Compare commits
15 Commits
ad7b2a02cb
...
v0.2.4
| Author | SHA1 | Date | |
|---|---|---|---|
| d5ca7a8be1 | |||
| 476dc79e42 | |||
| bb6590fc5e | |||
| 899db3421b | |||
| caf01d6ada | |||
| bb366cb4cd | |||
| a2745ff2ee | |||
| 28cb656d94 | |||
| 3c44152fc6 | |||
| 397515edce | |||
| 980fced7e4 | |||
| bae5009ec4 | |||
| 233780617f | |||
| fd8fb21517 | |||
| c6cbe822e1 |
@@ -11,5 +11,3 @@ htmlcov
|
||||
logs
|
||||
data
|
||||
tmp
|
||||
myfsio_core/target
|
||||
myfsio-engine/target
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -26,13 +26,6 @@ dist/
|
||||
*.egg-info/
|
||||
.eggs/
|
||||
|
||||
# Rust / maturin build artifacts
|
||||
myfsio_core/target/
|
||||
myfsio_core/Cargo.lock
|
||||
|
||||
# Rust engine build artifacts
|
||||
myfsio-engine/target/
|
||||
|
||||
# Local runtime artifacts
|
||||
logs/
|
||||
*.log
|
||||
|
||||
31
Dockerfile
31
Dockerfile
@@ -1,39 +1,27 @@
|
||||
FROM python:3.14.3-slim
|
||||
# syntax=docker/dockerfile:1.7
|
||||
FROM python:3.12.12-slim
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential curl \
|
||||
&& curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal \
|
||||
# Install build deps for any wheels that need compilation, then clean up
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
COPY requirements.txt ./
|
||||
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN pip install --no-cache-dir maturin \
|
||||
&& cd myfsio_core \
|
||||
&& maturin build --release \
|
||||
&& pip install target/wheels/*.whl \
|
||||
&& cd ../myfsio-engine \
|
||||
&& cargo build --release \
|
||||
&& cp target/release/myfsio-server /usr/local/bin/myfsio-server \
|
||||
&& cd .. \
|
||||
&& rm -rf myfsio_core/target myfsio-engine/target \
|
||||
&& pip uninstall -y maturin \
|
||||
&& rustup self uninstall -y
|
||||
|
||||
# Make entrypoint executable
|
||||
RUN chmod +x docker-entrypoint.sh
|
||||
|
||||
# Create data directory and set permissions
|
||||
RUN mkdir -p /app/data \
|
||||
&& useradd -m -u 1000 myfsio \
|
||||
&& useradd -m -u 1000 myfsio \
|
||||
&& chown -R myfsio:myfsio /app
|
||||
|
||||
USER myfsio
|
||||
@@ -41,8 +29,7 @@ USER myfsio
|
||||
EXPOSE 5000 5100
|
||||
ENV APP_HOST=0.0.0.0 \
|
||||
FLASK_ENV=production \
|
||||
FLASK_DEBUG=0 \
|
||||
ENGINE=rust
|
||||
FLASK_DEBUG=0
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD python -c "import requests; requests.get('http://localhost:5000/myfsio/health', timeout=2)"
|
||||
|
||||
@@ -72,11 +72,6 @@ source .venv/bin/activate
|
||||
# Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
# (Optional) Build Rust native extension for better performance
|
||||
# Requires Rust toolchain: https://rustup.rs
|
||||
pip install maturin
|
||||
cd myfsio_core && maturin develop --release && cd ..
|
||||
|
||||
# Start both servers
|
||||
python run.py
|
||||
|
||||
@@ -85,7 +80,7 @@ python run.py --mode api # API only (port 5000)
|
||||
python run.py --mode ui # UI only (port 5100)
|
||||
```
|
||||
|
||||
**Credentials:** Generated automatically on first run and printed to the console. If missed, check the IAM config file at `<STORAGE_ROOT>/.myfsio.sys/config/iam.json`.
|
||||
**Default Credentials:** `localadmin` / `localadmin`
|
||||
|
||||
- **Web Console:** http://127.0.0.1:5100/ui
|
||||
- **API Endpoint:** http://127.0.0.1:5000
|
||||
|
||||
332
app/__init__.py
332
app/__init__.py
@@ -1,25 +1,20 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import html as html_module
|
||||
import itertools
|
||||
import logging
|
||||
import mimetypes
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from datetime import timedelta
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from flask import Flask, Response, g, has_request_context, redirect, render_template, request, url_for
|
||||
from flask import Flask, g, has_request_context, redirect, render_template, request, url_for
|
||||
from flask_cors import CORS
|
||||
from flask_wtf.csrf import CSRFError
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
|
||||
import io
|
||||
|
||||
from .access_logging import AccessLoggingService
|
||||
from .operation_metrics import OperationMetricsCollector, classify_endpoint
|
||||
from .compression import GzipMiddleware
|
||||
@@ -31,77 +26,14 @@ from .encryption import EncryptionManager
|
||||
from .extensions import limiter, csrf
|
||||
from .iam import IamService
|
||||
from .kms import KMSManager
|
||||
from .gc import GarbageCollector
|
||||
from .integrity import IntegrityChecker
|
||||
from .lifecycle import LifecycleManager
|
||||
from .notifications import NotificationService
|
||||
from .object_lock import ObjectLockService
|
||||
from .replication import ReplicationManager
|
||||
from .secret_store import EphemeralSecretStore
|
||||
from .site_registry import SiteRegistry, SiteInfo
|
||||
from .storage import ObjectStorage, StorageError
|
||||
from .storage import ObjectStorage
|
||||
from .version import get_version
|
||||
from .website_domains import WebsiteDomainStore
|
||||
|
||||
_request_counter = itertools.count(1)
|
||||
|
||||
|
||||
class _ChunkedTransferMiddleware:
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
if environ.get("REQUEST_METHOD") not in ("PUT", "POST"):
|
||||
return self.app(environ, start_response)
|
||||
|
||||
transfer_encoding = environ.get("HTTP_TRANSFER_ENCODING", "")
|
||||
content_length = environ.get("CONTENT_LENGTH")
|
||||
|
||||
if "chunked" in transfer_encoding.lower():
|
||||
if content_length:
|
||||
del environ["HTTP_TRANSFER_ENCODING"]
|
||||
else:
|
||||
raw = environ.get("wsgi.input")
|
||||
if raw:
|
||||
try:
|
||||
if hasattr(raw, "seek"):
|
||||
raw.seek(0)
|
||||
body = raw.read()
|
||||
except Exception:
|
||||
body = b""
|
||||
if body:
|
||||
environ["wsgi.input"] = io.BytesIO(body)
|
||||
environ["CONTENT_LENGTH"] = str(len(body))
|
||||
del environ["HTTP_TRANSFER_ENCODING"]
|
||||
|
||||
content_length = environ.get("CONTENT_LENGTH")
|
||||
if not content_length or content_length == "0":
|
||||
sha256 = environ.get("HTTP_X_AMZ_CONTENT_SHA256", "")
|
||||
decoded_len = environ.get("HTTP_X_AMZ_DECODED_CONTENT_LENGTH", "")
|
||||
content_encoding = environ.get("HTTP_CONTENT_ENCODING", "")
|
||||
if ("STREAMING" in sha256.upper() or decoded_len
|
||||
or "aws-chunked" in content_encoding.lower()):
|
||||
raw = environ.get("wsgi.input")
|
||||
if raw:
|
||||
try:
|
||||
if hasattr(raw, "seek"):
|
||||
raw.seek(0)
|
||||
body = raw.read()
|
||||
except Exception:
|
||||
body = b""
|
||||
if body:
|
||||
environ["wsgi.input"] = io.BytesIO(body)
|
||||
environ["CONTENT_LENGTH"] = str(len(body))
|
||||
|
||||
raw = environ.get("wsgi.input")
|
||||
if raw and hasattr(raw, "seek"):
|
||||
try:
|
||||
raw.seek(0)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return self.app(environ, start_response)
|
||||
|
||||
|
||||
def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
|
||||
@@ -158,20 +90,12 @@ def create_app(
|
||||
app.config.setdefault("WTF_CSRF_ENABLED", False)
|
||||
|
||||
# Trust X-Forwarded-* headers from proxies
|
||||
num_proxies = app.config.get("NUM_TRUSTED_PROXIES", 1)
|
||||
if num_proxies:
|
||||
if "NUM_TRUSTED_PROXIES" not in os.environ:
|
||||
logging.getLogger(__name__).warning(
|
||||
"NUM_TRUSTED_PROXIES not set, defaulting to 1. "
|
||||
"Set NUM_TRUSTED_PROXIES=0 if not behind a reverse proxy."
|
||||
)
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=num_proxies, x_proto=num_proxies, x_host=num_proxies, x_prefix=num_proxies)
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1, x_host=1, x_prefix=1)
|
||||
|
||||
# Enable gzip compression for responses (10-20x smaller JSON payloads)
|
||||
if app.config.get("ENABLE_GZIP", True):
|
||||
app.wsgi_app = GzipMiddleware(app.wsgi_app, compression_level=6)
|
||||
|
||||
app.wsgi_app = _ChunkedTransferMiddleware(app.wsgi_app)
|
||||
|
||||
_configure_cors(app)
|
||||
_configure_logging(app)
|
||||
|
||||
@@ -180,11 +104,10 @@ def create_app(
|
||||
|
||||
storage = ObjectStorage(
|
||||
Path(app.config["STORAGE_ROOT"]),
|
||||
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 60),
|
||||
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 5),
|
||||
object_cache_max_size=app.config.get("OBJECT_CACHE_MAX_SIZE", 100),
|
||||
bucket_config_cache_ttl=app.config.get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0),
|
||||
object_key_max_length_bytes=app.config.get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024),
|
||||
meta_read_cache_max=app.config.get("META_READ_CACHE_MAX", 2048),
|
||||
)
|
||||
|
||||
if app.config.get("WARM_CACHE_ON_STARTUP", True) and not app.config.get("TESTING"):
|
||||
@@ -194,7 +117,6 @@ def create_app(
|
||||
Path(app.config["IAM_CONFIG"]),
|
||||
auth_max_attempts=app.config.get("AUTH_MAX_ATTEMPTS", 5),
|
||||
auth_lockout_minutes=app.config.get("AUTH_LOCKOUT_MINUTES", 15),
|
||||
encryption_key=app.config.get("SECRET_KEY"),
|
||||
)
|
||||
bucket_policies = BucketPolicyStore(Path(app.config["BUCKET_POLICY_PATH"]))
|
||||
secret_store = EphemeralSecretStore(default_ttl=app.config.get("SECRET_TTL_SECONDS", 300))
|
||||
@@ -285,31 +207,6 @@ def create_app(
|
||||
)
|
||||
lifecycle_manager.start()
|
||||
|
||||
gc_collector = None
|
||||
if app.config.get("GC_ENABLED", False):
|
||||
gc_collector = GarbageCollector(
|
||||
storage_root=storage_root,
|
||||
interval_hours=app.config.get("GC_INTERVAL_HOURS", 6.0),
|
||||
temp_file_max_age_hours=app.config.get("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0),
|
||||
multipart_max_age_days=app.config.get("GC_MULTIPART_MAX_AGE_DAYS", 7),
|
||||
lock_file_max_age_hours=app.config.get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0),
|
||||
dry_run=app.config.get("GC_DRY_RUN", False),
|
||||
io_throttle_ms=app.config.get("GC_IO_THROTTLE_MS", 10),
|
||||
)
|
||||
gc_collector.start()
|
||||
|
||||
integrity_checker = None
|
||||
if app.config.get("INTEGRITY_ENABLED", False):
|
||||
integrity_checker = IntegrityChecker(
|
||||
storage_root=storage_root,
|
||||
interval_hours=app.config.get("INTEGRITY_INTERVAL_HOURS", 24.0),
|
||||
batch_size=app.config.get("INTEGRITY_BATCH_SIZE", 1000),
|
||||
auto_heal=app.config.get("INTEGRITY_AUTO_HEAL", False),
|
||||
dry_run=app.config.get("INTEGRITY_DRY_RUN", False),
|
||||
io_throttle_ms=app.config.get("INTEGRITY_IO_THROTTLE_MS", 10),
|
||||
)
|
||||
integrity_checker.start()
|
||||
|
||||
app.extensions["object_storage"] = storage
|
||||
app.extensions["iam"] = iam
|
||||
app.extensions["bucket_policies"] = bucket_policies
|
||||
@@ -321,26 +218,11 @@ def create_app(
|
||||
app.extensions["kms"] = kms_manager
|
||||
app.extensions["acl"] = acl_service
|
||||
app.extensions["lifecycle"] = lifecycle_manager
|
||||
app.extensions["gc"] = gc_collector
|
||||
app.extensions["integrity"] = integrity_checker
|
||||
app.extensions["object_lock"] = object_lock_service
|
||||
app.extensions["notifications"] = notification_service
|
||||
app.extensions["access_logging"] = access_logging_service
|
||||
app.extensions["site_registry"] = site_registry
|
||||
|
||||
website_domains_store = None
|
||||
if app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
website_domains_path = config_dir / "website_domains.json"
|
||||
website_domains_store = WebsiteDomainStore(website_domains_path)
|
||||
app.extensions["website_domains"] = website_domains_store
|
||||
|
||||
from .s3_client import S3ProxyClient
|
||||
api_base = app.config.get("API_BASE_URL") or "http://127.0.0.1:5000"
|
||||
app.extensions["s3_proxy"] = S3ProxyClient(
|
||||
api_base_url=api_base,
|
||||
region=app.config.get("AWS_REGION", "us-east-1"),
|
||||
)
|
||||
|
||||
operation_metrics_collector = None
|
||||
if app.config.get("OPERATION_METRICS_ENABLED", False):
|
||||
operation_metrics_collector = OperationMetricsCollector(
|
||||
@@ -381,37 +263,11 @@ def create_app(
|
||||
|
||||
@app.errorhandler(500)
|
||||
def internal_error(error):
|
||||
wants_html = request.accept_mimetypes.accept_html
|
||||
path = request.path or ""
|
||||
if include_ui and wants_html and (path.startswith("/ui") or path == "/"):
|
||||
return render_template('500.html'), 500
|
||||
error_xml = (
|
||||
'<?xml version="1.0" encoding="UTF-8"?>'
|
||||
'<Error>'
|
||||
'<Code>InternalError</Code>'
|
||||
'<Message>An internal server error occurred</Message>'
|
||||
f'<Resource>{path}</Resource>'
|
||||
f'<RequestId>{getattr(g, "request_id", "-")}</RequestId>'
|
||||
'</Error>'
|
||||
)
|
||||
return error_xml, 500, {'Content-Type': 'application/xml'}
|
||||
return render_template('500.html'), 500
|
||||
|
||||
@app.errorhandler(CSRFError)
|
||||
def handle_csrf_error(e):
|
||||
wants_html = request.accept_mimetypes.accept_html
|
||||
path = request.path or ""
|
||||
if include_ui and wants_html and (path.startswith("/ui") or path == "/"):
|
||||
return render_template('csrf_error.html', reason=e.description), 400
|
||||
error_xml = (
|
||||
'<?xml version="1.0" encoding="UTF-8"?>'
|
||||
'<Error>'
|
||||
'<Code>CSRFError</Code>'
|
||||
f'<Message>{e.description}</Message>'
|
||||
f'<Resource>{path}</Resource>'
|
||||
f'<RequestId>{getattr(g, "request_id", "-")}</RequestId>'
|
||||
'</Error>'
|
||||
)
|
||||
return error_xml, 400, {'Content-Type': 'application/xml'}
|
||||
return render_template('csrf_error.html', reason=e.description), 400
|
||||
|
||||
@app.template_filter("filesizeformat")
|
||||
def filesizeformat(value: int) -> str:
|
||||
@@ -575,174 +431,30 @@ def _configure_logging(app: Flask) -> None:
|
||||
|
||||
@app.before_request
|
||||
def _log_request_start() -> None:
|
||||
g.request_id = f"{os.getpid():x}{next(_request_counter):012x}"
|
||||
g.request_id = uuid.uuid4().hex
|
||||
g.request_started_at = time.perf_counter()
|
||||
g.request_bytes_in = request.content_length or 0
|
||||
|
||||
@app.before_request
|
||||
def _maybe_serve_website():
|
||||
if not app.config.get("WEBSITE_HOSTING_ENABLED"):
|
||||
return None
|
||||
if request.method not in {"GET", "HEAD"}:
|
||||
return None
|
||||
host = request.host
|
||||
if ":" in host:
|
||||
host = host.rsplit(":", 1)[0]
|
||||
host = host.lower()
|
||||
store = app.extensions.get("website_domains")
|
||||
if not store:
|
||||
return None
|
||||
bucket = store.get_bucket(host)
|
||||
if not bucket:
|
||||
return None
|
||||
storage = app.extensions["object_storage"]
|
||||
if not storage.bucket_exists(bucket):
|
||||
return _website_error_response(404, "Not Found")
|
||||
website_config = storage.get_bucket_website(bucket)
|
||||
if not website_config:
|
||||
return _website_error_response(404, "Not Found")
|
||||
index_doc = website_config.get("index_document", "index.html")
|
||||
error_doc = website_config.get("error_document")
|
||||
req_path = request.path.lstrip("/")
|
||||
if not req_path or req_path.endswith("/"):
|
||||
object_key = req_path + index_doc
|
||||
else:
|
||||
object_key = req_path
|
||||
try:
|
||||
obj_path = storage.get_object_path(bucket, object_key)
|
||||
except (StorageError, OSError):
|
||||
if object_key == req_path:
|
||||
try:
|
||||
obj_path = storage.get_object_path(bucket, req_path + "/" + index_doc)
|
||||
object_key = req_path + "/" + index_doc
|
||||
except (StorageError, OSError):
|
||||
return _serve_website_error(storage, bucket, error_doc, 404)
|
||||
else:
|
||||
return _serve_website_error(storage, bucket, error_doc, 404)
|
||||
content_type = mimetypes.guess_type(object_key)[0] or "application/octet-stream"
|
||||
is_encrypted = False
|
||||
try:
|
||||
metadata = storage.get_object_metadata(bucket, object_key)
|
||||
is_encrypted = "x-amz-server-side-encryption" in metadata
|
||||
except (StorageError, OSError):
|
||||
pass
|
||||
if is_encrypted and hasattr(storage, "get_object_data"):
|
||||
try:
|
||||
data, _ = storage.get_object_data(bucket, object_key)
|
||||
file_size = len(data)
|
||||
except (StorageError, OSError):
|
||||
return _website_error_response(500, "Internal Server Error")
|
||||
else:
|
||||
data = None
|
||||
try:
|
||||
stat = obj_path.stat()
|
||||
file_size = stat.st_size
|
||||
except OSError:
|
||||
return _website_error_response(500, "Internal Server Error")
|
||||
if request.method == "HEAD":
|
||||
response = Response(status=200)
|
||||
response.headers["Content-Length"] = file_size
|
||||
response.headers["Content-Type"] = content_type
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
from .s3_api import _parse_range_header
|
||||
range_header = request.headers.get("Range")
|
||||
if range_header:
|
||||
ranges = _parse_range_header(range_header, file_size)
|
||||
if ranges is None:
|
||||
return Response(status=416, headers={"Content-Range": f"bytes */{file_size}"})
|
||||
start, end = ranges[0]
|
||||
length = end - start + 1
|
||||
if data is not None:
|
||||
partial_data = data[start:end + 1]
|
||||
response = Response(partial_data, status=206, mimetype=content_type)
|
||||
else:
|
||||
def _stream_range(file_path, start_pos, length_to_read):
|
||||
with file_path.open("rb") as f:
|
||||
f.seek(start_pos)
|
||||
remaining = length_to_read
|
||||
while remaining > 0:
|
||||
chunk = f.read(min(262144, remaining))
|
||||
if not chunk:
|
||||
break
|
||||
remaining -= len(chunk)
|
||||
yield chunk
|
||||
response = Response(_stream_range(obj_path, start, length), status=206, mimetype=content_type, direct_passthrough=True)
|
||||
response.headers["Content-Range"] = f"bytes {start}-{end}/{file_size}"
|
||||
response.headers["Content-Length"] = length
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
if data is not None:
|
||||
response = Response(data, mimetype=content_type)
|
||||
response.headers["Content-Length"] = file_size
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
def _stream(file_path):
|
||||
with file_path.open("rb") as f:
|
||||
while True:
|
||||
chunk = f.read(65536)
|
||||
if not chunk:
|
||||
break
|
||||
yield chunk
|
||||
response = Response(_stream(obj_path), mimetype=content_type, direct_passthrough=True)
|
||||
response.headers["Content-Length"] = file_size
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
|
||||
def _serve_website_error(storage, bucket, error_doc_key, status_code):
|
||||
if not error_doc_key:
|
||||
return _website_error_response(status_code, "Not Found" if status_code == 404 else "Error")
|
||||
try:
|
||||
obj_path = storage.get_object_path(bucket, error_doc_key)
|
||||
except (StorageError, OSError):
|
||||
return _website_error_response(status_code, "Not Found")
|
||||
content_type = mimetypes.guess_type(error_doc_key)[0] or "text/html"
|
||||
is_encrypted = False
|
||||
try:
|
||||
metadata = storage.get_object_metadata(bucket, error_doc_key)
|
||||
is_encrypted = "x-amz-server-side-encryption" in metadata
|
||||
except (StorageError, OSError):
|
||||
pass
|
||||
if is_encrypted and hasattr(storage, "get_object_data"):
|
||||
try:
|
||||
data, _ = storage.get_object_data(bucket, error_doc_key)
|
||||
response = Response(data, status=status_code, mimetype=content_type)
|
||||
response.headers["Content-Length"] = len(data)
|
||||
return response
|
||||
except (StorageError, OSError):
|
||||
return _website_error_response(status_code, "Not Found")
|
||||
try:
|
||||
data = obj_path.read_bytes()
|
||||
response = Response(data, status=status_code, mimetype=content_type)
|
||||
response.headers["Content-Length"] = len(data)
|
||||
return response
|
||||
except OSError:
|
||||
return _website_error_response(status_code, "Not Found")
|
||||
|
||||
def _website_error_response(status_code, message):
|
||||
safe_msg = html_module.escape(str(message))
|
||||
safe_code = html_module.escape(str(status_code))
|
||||
body = f"<html><head><title>{safe_code} {safe_msg}</title></head><body><h1>{safe_code} {safe_msg}</h1></body></html>"
|
||||
return Response(body, status=status_code, mimetype="text/html")
|
||||
app.logger.info(
|
||||
"Request started",
|
||||
extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr},
|
||||
)
|
||||
|
||||
@app.after_request
|
||||
def _log_request_end(response):
|
||||
duration_ms = 0.0
|
||||
if hasattr(g, "request_started_at"):
|
||||
duration_ms = (time.perf_counter() - g.request_started_at) * 1000
|
||||
request_id = getattr(g, "request_id", f"{os.getpid():x}{next(_request_counter):012x}")
|
||||
request_id = getattr(g, "request_id", uuid.uuid4().hex)
|
||||
response.headers.setdefault("X-Request-ID", request_id)
|
||||
if app.logger.isEnabledFor(logging.INFO):
|
||||
app.logger.info(
|
||||
"Request completed",
|
||||
extra={
|
||||
"path": request.path,
|
||||
"method": request.method,
|
||||
"remote_addr": request.remote_addr,
|
||||
},
|
||||
)
|
||||
app.logger.info(
|
||||
"Request completed",
|
||||
extra={
|
||||
"path": request.path,
|
||||
"method": request.method,
|
||||
"remote_addr": request.remote_addr,
|
||||
},
|
||||
)
|
||||
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
|
||||
response.headers["Server"] = "MyFSIO"
|
||||
|
||||
operation_metrics = app.extensions.get("operation_metrics")
|
||||
if operation_metrics:
|
||||
|
||||
314
app/admin_api.py
314
app/admin_api.py
@@ -1,7 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import socket
|
||||
@@ -14,12 +13,9 @@ from flask import Blueprint, Response, current_app, jsonify, request
|
||||
|
||||
from .connections import ConnectionStore
|
||||
from .extensions import limiter
|
||||
from .gc import GarbageCollector
|
||||
from .integrity import IntegrityChecker
|
||||
from .iam import IamError, Principal
|
||||
from .replication import ReplicationManager
|
||||
from .site_registry import PeerSite, SiteInfo, SiteRegistry
|
||||
from .website_domains import WebsiteDomainStore, normalize_domain, is_valid_domain
|
||||
|
||||
|
||||
def _is_safe_url(url: str, allow_internal: bool = False) -> bool:
|
||||
@@ -358,10 +354,6 @@ def update_peer_site(site_id: str):
|
||||
if region_error:
|
||||
return _json_error("ValidationError", region_error, 400)
|
||||
|
||||
if "connection_id" in payload:
|
||||
if payload["connection_id"] and not _connections().get(payload["connection_id"]):
|
||||
return _json_error("ValidationError", f"Connection '{payload['connection_id']}' not found", 400)
|
||||
|
||||
peer = PeerSite(
|
||||
site_id=site_id,
|
||||
endpoint=payload.get("endpoint", existing.endpoint),
|
||||
@@ -676,309 +668,3 @@ def check_bidirectional_status(site_id: str):
|
||||
result["is_fully_configured"] = len(error_issues) == 0 and len(local_bidir_rules) > 0
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
|
||||
def _website_domains() -> WebsiteDomainStore:
|
||||
return current_app.extensions["website_domains"]
|
||||
|
||||
|
||||
def _storage():
|
||||
return current_app.extensions["object_storage"]
|
||||
|
||||
|
||||
def _require_iam_action(action: str):
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return None, error
|
||||
try:
|
||||
_iam().authorize(principal, None, action)
|
||||
return principal, None
|
||||
except IamError:
|
||||
return None, _json_error("AccessDenied", f"Requires {action} permission", 403)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_list_users():
|
||||
principal, error = _require_iam_action("iam:list_users")
|
||||
if error:
|
||||
return error
|
||||
return jsonify({"users": _iam().list_users()})
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_get_user(identifier):
|
||||
principal, error = _require_iam_action("iam:get_user")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
user_id = _iam().resolve_user_id(identifier)
|
||||
return jsonify(_iam().get_user_by_id(user_id))
|
||||
except IamError as exc:
|
||||
return _json_error("NotFound", str(exc), 404)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/policies", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_get_user_policies(identifier):
|
||||
principal, error = _require_iam_action("iam:get_policy")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
return jsonify({"policies": _iam().get_user_policies(identifier)})
|
||||
except IamError as exc:
|
||||
return _json_error("NotFound", str(exc), 404)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/keys", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_create_access_key(identifier):
|
||||
principal, error = _require_iam_action("iam:create_key")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
result = _iam().create_access_key(identifier)
|
||||
logger.info("Access key created for %s by %s", identifier, principal.access_key)
|
||||
return jsonify(result), 201
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/keys/<access_key>", methods=["DELETE"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_delete_access_key(identifier, access_key):
|
||||
principal, error = _require_iam_action("iam:delete_key")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
_iam().delete_access_key(access_key)
|
||||
logger.info("Access key %s deleted by %s", access_key, principal.access_key)
|
||||
return "", 204
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/disable", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_disable_user(identifier):
|
||||
principal, error = _require_iam_action("iam:disable_user")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
_iam().disable_user(identifier)
|
||||
logger.info("User %s disabled by %s", identifier, principal.access_key)
|
||||
return jsonify({"status": "disabled"})
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/enable", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_enable_user(identifier):
|
||||
principal, error = _require_iam_action("iam:disable_user")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
_iam().enable_user(identifier)
|
||||
logger.info("User %s enabled by %s", identifier, principal.access_key)
|
||||
return jsonify({"status": "enabled"})
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def list_website_domains():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
return jsonify(_website_domains().list_all())
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def create_website_domain():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
domain = normalize_domain(payload.get("domain") or "")
|
||||
bucket = (payload.get("bucket") or "").strip()
|
||||
if not domain:
|
||||
return _json_error("ValidationError", "domain is required", 400)
|
||||
if not is_valid_domain(domain):
|
||||
return _json_error("ValidationError", f"Invalid domain: '{domain}'", 400)
|
||||
if not bucket:
|
||||
return _json_error("ValidationError", "bucket is required", 400)
|
||||
storage = _storage()
|
||||
if not storage.bucket_exists(bucket):
|
||||
return _json_error("NoSuchBucket", f"Bucket '{bucket}' does not exist", 404)
|
||||
store = _website_domains()
|
||||
existing = store.get_bucket(domain)
|
||||
if existing:
|
||||
return _json_error("Conflict", f"Domain '{domain}' is already mapped to bucket '{existing}'", 409)
|
||||
store.set_mapping(domain, bucket)
|
||||
logger.info("Website domain mapping created: %s -> %s", domain, bucket)
|
||||
return jsonify({"domain": domain, "bucket": bucket}), 201
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains/<domain>", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def get_website_domain(domain: str):
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
domain = normalize_domain(domain)
|
||||
bucket = _website_domains().get_bucket(domain)
|
||||
if not bucket:
|
||||
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
|
||||
return jsonify({"domain": domain, "bucket": bucket})
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains/<domain>", methods=["PUT"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def update_website_domain(domain: str):
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
domain = normalize_domain(domain)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
bucket = (payload.get("bucket") or "").strip()
|
||||
if not bucket:
|
||||
return _json_error("ValidationError", "bucket is required", 400)
|
||||
storage = _storage()
|
||||
if not storage.bucket_exists(bucket):
|
||||
return _json_error("NoSuchBucket", f"Bucket '{bucket}' does not exist", 404)
|
||||
store = _website_domains()
|
||||
if not store.get_bucket(domain):
|
||||
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
|
||||
store.set_mapping(domain, bucket)
|
||||
logger.info("Website domain mapping updated: %s -> %s", domain, bucket)
|
||||
return jsonify({"domain": domain, "bucket": bucket})
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains/<domain>", methods=["DELETE"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def delete_website_domain(domain: str):
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
domain = normalize_domain(domain)
|
||||
if not _website_domains().delete_mapping(domain):
|
||||
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
|
||||
logger.info("Website domain mapping deleted: %s", domain)
|
||||
return Response(status=204)
|
||||
|
||||
|
||||
def _gc() -> Optional[GarbageCollector]:
|
||||
return current_app.extensions.get("gc")
|
||||
|
||||
|
||||
@admin_api_bp.route("/gc/status", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def gc_status():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
gc = _gc()
|
||||
if not gc:
|
||||
return jsonify({"enabled": False, "message": "GC is not enabled. Set GC_ENABLED=true to enable."})
|
||||
return jsonify(gc.get_status())
|
||||
|
||||
|
||||
@admin_api_bp.route("/gc/run", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def gc_run_now():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
gc = _gc()
|
||||
if not gc:
|
||||
return _json_error("InvalidRequest", "GC is not enabled", 400)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
started = gc.run_async(dry_run=payload.get("dry_run"))
|
||||
logger.info("GC manual run by %s", principal.access_key)
|
||||
if not started:
|
||||
return _json_error("Conflict", "GC is already in progress", 409)
|
||||
return jsonify({"status": "started"})
|
||||
|
||||
|
||||
@admin_api_bp.route("/gc/history", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def gc_history():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
gc = _gc()
|
||||
if not gc:
|
||||
return jsonify({"executions": []})
|
||||
limit = min(int(request.args.get("limit", 50)), 200)
|
||||
offset = int(request.args.get("offset", 0))
|
||||
records = gc.get_history(limit=limit, offset=offset)
|
||||
return jsonify({"executions": records})
|
||||
|
||||
|
||||
def _integrity() -> Optional[IntegrityChecker]:
|
||||
return current_app.extensions.get("integrity")
|
||||
|
||||
|
||||
@admin_api_bp.route("/integrity/status", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def integrity_status():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
checker = _integrity()
|
||||
if not checker:
|
||||
return jsonify({"enabled": False, "message": "Integrity checker is not enabled. Set INTEGRITY_ENABLED=true to enable."})
|
||||
return jsonify(checker.get_status())
|
||||
|
||||
|
||||
@admin_api_bp.route("/integrity/run", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def integrity_run_now():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
checker = _integrity()
|
||||
if not checker:
|
||||
return _json_error("InvalidRequest", "Integrity checker is not enabled", 400)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
override_dry_run = payload.get("dry_run")
|
||||
override_auto_heal = payload.get("auto_heal")
|
||||
started = checker.run_async(
|
||||
auto_heal=override_auto_heal if override_auto_heal is not None else None,
|
||||
dry_run=override_dry_run if override_dry_run is not None else None,
|
||||
)
|
||||
logger.info("Integrity manual run by %s", principal.access_key)
|
||||
if not started:
|
||||
return _json_error("Conflict", "A scan is already in progress", 409)
|
||||
return jsonify({"status": "started"})
|
||||
|
||||
|
||||
@admin_api_bp.route("/integrity/history", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def integrity_history():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
checker = _integrity()
|
||||
if not checker:
|
||||
return jsonify({"executions": []})
|
||||
limit = min(int(request.args.get("limit", 50)), 200)
|
||||
offset = int(request.args.get("offset", 0))
|
||||
records = checker.get_history(limit=limit, offset=offset)
|
||||
return jsonify({"executions": records})
|
||||
|
||||
|
||||
|
||||
@@ -2,12 +2,10 @@ from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from fnmatch import fnmatch, translate
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterable, List, Optional, Pattern, Sequence, Tuple
|
||||
|
||||
@@ -15,14 +13,9 @@ from typing import Any, Dict, Iterable, List, Optional, Pattern, Sequence, Tuple
|
||||
RESOURCE_PREFIX = "arn:aws:s3:::"
|
||||
|
||||
|
||||
@lru_cache(maxsize=256)
|
||||
def _compile_pattern(pattern: str) -> Pattern[str]:
|
||||
return re.compile(translate(pattern), re.IGNORECASE)
|
||||
|
||||
|
||||
def _match_string_like(value: str, pattern: str) -> bool:
|
||||
compiled = _compile_pattern(pattern)
|
||||
return bool(compiled.match(value))
|
||||
regex = translate(pattern)
|
||||
return bool(re.match(regex, value, re.IGNORECASE))
|
||||
|
||||
|
||||
def _ip_in_cidr(ip_str: str, cidr: str) -> bool:
|
||||
@@ -76,7 +69,7 @@ def _evaluate_condition_operator(
|
||||
expected_null = condition_values[0].lower() in ("true", "1", "yes") if condition_values else True
|
||||
return is_null == expected_null
|
||||
|
||||
return False
|
||||
return True
|
||||
|
||||
ACTION_ALIASES = {
|
||||
"s3:listbucket": "list",
|
||||
@@ -269,7 +262,7 @@ class BucketPolicyStore:
|
||||
self._last_mtime = self._current_mtime()
|
||||
# Performance: Avoid stat() on every request
|
||||
self._last_stat_check = 0.0
|
||||
self._stat_check_interval = float(os.environ.get("BUCKET_POLICY_STAT_CHECK_INTERVAL_SECONDS", "2.0"))
|
||||
self._stat_check_interval = 1.0 # Only check mtime every 1 second
|
||||
|
||||
def maybe_reload(self) -> None:
|
||||
# Performance: Skip stat check if we checked recently
|
||||
|
||||
@@ -36,11 +36,10 @@ class GzipMiddleware:
|
||||
content_type = None
|
||||
content_length = None
|
||||
should_compress = False
|
||||
passthrough = False
|
||||
exc_info_holder = [None]
|
||||
|
||||
def custom_start_response(status: str, headers: List[Tuple[str, str]], exc_info=None):
|
||||
nonlocal response_started, status_code, response_headers, content_type, content_length, should_compress, passthrough
|
||||
nonlocal response_started, status_code, response_headers, content_type, content_length, should_compress
|
||||
response_started = True
|
||||
status_code = int(status.split(' ', 1)[0])
|
||||
response_headers = list(headers)
|
||||
@@ -51,32 +50,18 @@ class GzipMiddleware:
|
||||
if name_lower == 'content-type':
|
||||
content_type = value.split(';')[0].strip().lower()
|
||||
elif name_lower == 'content-length':
|
||||
try:
|
||||
content_length = int(value)
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
content_length = int(value)
|
||||
elif name_lower == 'content-encoding':
|
||||
passthrough = True
|
||||
return start_response(status, headers, exc_info)
|
||||
elif name_lower == 'x-stream-response':
|
||||
passthrough = True
|
||||
should_compress = False
|
||||
return start_response(status, headers, exc_info)
|
||||
|
||||
if content_type and content_type in COMPRESSIBLE_MIMES:
|
||||
if content_length is None or content_length >= self.min_size:
|
||||
should_compress = True
|
||||
else:
|
||||
passthrough = True
|
||||
return start_response(status, headers, exc_info)
|
||||
|
||||
return None
|
||||
|
||||
app_iter = self.app(environ, custom_start_response)
|
||||
|
||||
if passthrough:
|
||||
return app_iter
|
||||
|
||||
response_body = b''.join(app_iter)
|
||||
response_body = b''.join(self.app(environ, custom_start_response))
|
||||
|
||||
if not response_started:
|
||||
return [response_body]
|
||||
|
||||
@@ -25,7 +25,7 @@ def _calculate_auto_connection_limit() -> int:
|
||||
|
||||
|
||||
def _calculate_auto_backlog(connection_limit: int) -> int:
|
||||
return max(128, min(connection_limit * 2, 4096))
|
||||
return max(64, min(connection_limit * 2, 4096))
|
||||
|
||||
|
||||
def _validate_rate_limit(value: str) -> str:
|
||||
@@ -115,7 +115,6 @@ class AppConfig:
|
||||
server_connection_limit: int
|
||||
server_backlog: int
|
||||
server_channel_timeout: int
|
||||
server_max_buffer_size: int
|
||||
server_threads_auto: bool
|
||||
server_connection_limit_auto: bool
|
||||
server_backlog_auto: bool
|
||||
@@ -136,7 +135,6 @@ class AppConfig:
|
||||
site_sync_clock_skew_tolerance_seconds: float
|
||||
object_key_max_length_bytes: int
|
||||
object_cache_max_size: int
|
||||
meta_read_cache_max: int
|
||||
bucket_config_cache_ttl_seconds: float
|
||||
object_tag_limit: int
|
||||
encryption_chunk_size_bytes: int
|
||||
@@ -151,20 +149,6 @@ class AppConfig:
|
||||
num_trusted_proxies: int
|
||||
allowed_redirect_hosts: list[str]
|
||||
allow_internal_endpoints: bool
|
||||
website_hosting_enabled: bool
|
||||
gc_enabled: bool
|
||||
gc_interval_hours: float
|
||||
gc_temp_file_max_age_hours: float
|
||||
gc_multipart_max_age_days: int
|
||||
gc_lock_file_max_age_hours: float
|
||||
gc_dry_run: bool
|
||||
gc_io_throttle_ms: int
|
||||
integrity_enabled: bool
|
||||
integrity_interval_hours: float
|
||||
integrity_batch_size: int
|
||||
integrity_auto_heal: bool
|
||||
integrity_dry_run: bool
|
||||
integrity_io_throttle_ms: int
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
|
||||
@@ -256,7 +240,7 @@ class AppConfig:
|
||||
cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"])
|
||||
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
|
||||
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60))
|
||||
object_cache_ttl = int(_get("OBJECT_CACHE_TTL", 60))
|
||||
object_cache_ttl = int(_get("OBJECT_CACHE_TTL", 5))
|
||||
|
||||
encryption_enabled = str(_get("ENCRYPTION_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
encryption_keys_dir = storage_root / ".myfsio.sys" / "keys"
|
||||
@@ -297,7 +281,6 @@ class AppConfig:
|
||||
server_backlog_auto = False
|
||||
|
||||
server_channel_timeout = int(_get("SERVER_CHANNEL_TIMEOUT", 120))
|
||||
server_max_buffer_size = int(_get("SERVER_MAX_BUFFER_SIZE", 1024 * 1024 * 128))
|
||||
site_sync_enabled = str(_get("SITE_SYNC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
site_sync_interval_seconds = int(_get("SITE_SYNC_INTERVAL_SECONDS", 60))
|
||||
site_sync_batch_size = int(_get("SITE_SYNC_BATCH_SIZE", 100))
|
||||
@@ -316,7 +299,6 @@ class AppConfig:
|
||||
site_sync_clock_skew_tolerance_seconds = float(_get("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS", 1.0))
|
||||
object_key_max_length_bytes = int(_get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024))
|
||||
object_cache_max_size = int(_get("OBJECT_CACHE_MAX_SIZE", 100))
|
||||
meta_read_cache_max = int(_get("META_READ_CACHE_MAX", 2048))
|
||||
bucket_config_cache_ttl_seconds = float(_get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0))
|
||||
object_tag_limit = int(_get("OBJECT_TAG_LIMIT", 50))
|
||||
encryption_chunk_size_bytes = int(_get("ENCRYPTION_CHUNK_SIZE_BYTES", 64 * 1024))
|
||||
@@ -331,24 +313,10 @@ class AppConfig:
|
||||
site_region = str(_get("SITE_REGION", "us-east-1"))
|
||||
site_priority = int(_get("SITE_PRIORITY", 100))
|
||||
ratelimit_admin = _validate_rate_limit(str(_get("RATE_LIMIT_ADMIN", "60 per minute")))
|
||||
num_trusted_proxies = int(_get("NUM_TRUSTED_PROXIES", 1))
|
||||
num_trusted_proxies = int(_get("NUM_TRUSTED_PROXIES", 0))
|
||||
allowed_redirect_hosts_raw = _get("ALLOWED_REDIRECT_HOSTS", "")
|
||||
allowed_redirect_hosts = [h.strip() for h in str(allowed_redirect_hosts_raw).split(",") if h.strip()]
|
||||
allow_internal_endpoints = str(_get("ALLOW_INTERNAL_ENDPOINTS", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
website_hosting_enabled = str(_get("WEBSITE_HOSTING_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
gc_enabled = str(_get("GC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
gc_interval_hours = float(_get("GC_INTERVAL_HOURS", 6.0))
|
||||
gc_temp_file_max_age_hours = float(_get("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0))
|
||||
gc_multipart_max_age_days = int(_get("GC_MULTIPART_MAX_AGE_DAYS", 7))
|
||||
gc_lock_file_max_age_hours = float(_get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0))
|
||||
gc_dry_run = str(_get("GC_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
gc_io_throttle_ms = int(_get("GC_IO_THROTTLE_MS", 10))
|
||||
integrity_enabled = str(_get("INTEGRITY_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
integrity_interval_hours = float(_get("INTEGRITY_INTERVAL_HOURS", 24.0))
|
||||
integrity_batch_size = int(_get("INTEGRITY_BATCH_SIZE", 1000))
|
||||
integrity_auto_heal = str(_get("INTEGRITY_AUTO_HEAL", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
integrity_dry_run = str(_get("INTEGRITY_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
integrity_io_throttle_ms = int(_get("INTEGRITY_IO_THROTTLE_MS", 10))
|
||||
|
||||
return cls(storage_root=storage_root,
|
||||
max_upload_size=max_upload_size,
|
||||
@@ -402,7 +370,6 @@ class AppConfig:
|
||||
server_connection_limit=server_connection_limit,
|
||||
server_backlog=server_backlog,
|
||||
server_channel_timeout=server_channel_timeout,
|
||||
server_max_buffer_size=server_max_buffer_size,
|
||||
server_threads_auto=server_threads_auto,
|
||||
server_connection_limit_auto=server_connection_limit_auto,
|
||||
server_backlog_auto=server_backlog_auto,
|
||||
@@ -423,7 +390,6 @@ class AppConfig:
|
||||
site_sync_clock_skew_tolerance_seconds=site_sync_clock_skew_tolerance_seconds,
|
||||
object_key_max_length_bytes=object_key_max_length_bytes,
|
||||
object_cache_max_size=object_cache_max_size,
|
||||
meta_read_cache_max=meta_read_cache_max,
|
||||
bucket_config_cache_ttl_seconds=bucket_config_cache_ttl_seconds,
|
||||
object_tag_limit=object_tag_limit,
|
||||
encryption_chunk_size_bytes=encryption_chunk_size_bytes,
|
||||
@@ -437,21 +403,7 @@ class AppConfig:
|
||||
ratelimit_admin=ratelimit_admin,
|
||||
num_trusted_proxies=num_trusted_proxies,
|
||||
allowed_redirect_hosts=allowed_redirect_hosts,
|
||||
allow_internal_endpoints=allow_internal_endpoints,
|
||||
website_hosting_enabled=website_hosting_enabled,
|
||||
gc_enabled=gc_enabled,
|
||||
gc_interval_hours=gc_interval_hours,
|
||||
gc_temp_file_max_age_hours=gc_temp_file_max_age_hours,
|
||||
gc_multipart_max_age_days=gc_multipart_max_age_days,
|
||||
gc_lock_file_max_age_hours=gc_lock_file_max_age_hours,
|
||||
gc_dry_run=gc_dry_run,
|
||||
gc_io_throttle_ms=gc_io_throttle_ms,
|
||||
integrity_enabled=integrity_enabled,
|
||||
integrity_interval_hours=integrity_interval_hours,
|
||||
integrity_batch_size=integrity_batch_size,
|
||||
integrity_auto_heal=integrity_auto_heal,
|
||||
integrity_dry_run=integrity_dry_run,
|
||||
integrity_io_throttle_ms=integrity_io_throttle_ms)
|
||||
allow_internal_endpoints=allow_internal_endpoints)
|
||||
|
||||
def validate_and_report(self) -> list[str]:
|
||||
"""Validate configuration and return a list of warnings/issues.
|
||||
@@ -516,12 +468,10 @@ class AppConfig:
|
||||
issues.append(f"CRITICAL: SERVER_THREADS={self.server_threads} is outside valid range (1-64). Server cannot start.")
|
||||
if not (10 <= self.server_connection_limit <= 1000):
|
||||
issues.append(f"CRITICAL: SERVER_CONNECTION_LIMIT={self.server_connection_limit} is outside valid range (10-1000). Server cannot start.")
|
||||
if not (128 <= self.server_backlog <= 4096):
|
||||
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (128-4096). Server cannot start.")
|
||||
if not (64 <= self.server_backlog <= 4096):
|
||||
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (64-4096). Server cannot start.")
|
||||
if not (10 <= self.server_channel_timeout <= 300):
|
||||
issues.append(f"CRITICAL: SERVER_CHANNEL_TIMEOUT={self.server_channel_timeout} is outside valid range (10-300). Server cannot start.")
|
||||
if self.server_max_buffer_size < 1024 * 1024:
|
||||
issues.append(f"WARNING: SERVER_MAX_BUFFER_SIZE={self.server_max_buffer_size} is less than 1MB. Large uploads will fail.")
|
||||
|
||||
if sys.platform != "win32":
|
||||
try:
|
||||
@@ -559,15 +509,12 @@ class AppConfig:
|
||||
print(f" ENCRYPTION: Enabled (Master key: {self.encryption_master_key_path})")
|
||||
if self.kms_enabled:
|
||||
print(f" KMS: Enabled (Keys: {self.kms_keys_path})")
|
||||
if self.website_hosting_enabled:
|
||||
print(f" WEBSITE_HOSTING: Enabled")
|
||||
def _auto(flag: bool) -> str:
|
||||
return " (auto)" if flag else ""
|
||||
print(f" SERVER_THREADS: {self.server_threads}{_auto(self.server_threads_auto)}")
|
||||
print(f" CONNECTION_LIMIT: {self.server_connection_limit}{_auto(self.server_connection_limit_auto)}")
|
||||
print(f" BACKLOG: {self.server_backlog}{_auto(self.server_backlog_auto)}")
|
||||
print(f" CHANNEL_TIMEOUT: {self.server_channel_timeout}s")
|
||||
print(f" MAX_BUFFER_SIZE: {self.server_max_buffer_size // (1024 * 1024)}MB")
|
||||
print("=" * 60)
|
||||
|
||||
issues = self.validate_and_report()
|
||||
@@ -633,7 +580,6 @@ class AppConfig:
|
||||
"SERVER_CONNECTION_LIMIT": self.server_connection_limit,
|
||||
"SERVER_BACKLOG": self.server_backlog,
|
||||
"SERVER_CHANNEL_TIMEOUT": self.server_channel_timeout,
|
||||
"SERVER_MAX_BUFFER_SIZE": self.server_max_buffer_size,
|
||||
"SITE_SYNC_ENABLED": self.site_sync_enabled,
|
||||
"SITE_SYNC_INTERVAL_SECONDS": self.site_sync_interval_seconds,
|
||||
"SITE_SYNC_BATCH_SIZE": self.site_sync_batch_size,
|
||||
@@ -651,7 +597,6 @@ class AppConfig:
|
||||
"SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS": self.site_sync_clock_skew_tolerance_seconds,
|
||||
"OBJECT_KEY_MAX_LENGTH_BYTES": self.object_key_max_length_bytes,
|
||||
"OBJECT_CACHE_MAX_SIZE": self.object_cache_max_size,
|
||||
"META_READ_CACHE_MAX": self.meta_read_cache_max,
|
||||
"BUCKET_CONFIG_CACHE_TTL_SECONDS": self.bucket_config_cache_ttl_seconds,
|
||||
"OBJECT_TAG_LIMIT": self.object_tag_limit,
|
||||
"ENCRYPTION_CHUNK_SIZE_BYTES": self.encryption_chunk_size_bytes,
|
||||
@@ -666,18 +611,4 @@ class AppConfig:
|
||||
"NUM_TRUSTED_PROXIES": self.num_trusted_proxies,
|
||||
"ALLOWED_REDIRECT_HOSTS": self.allowed_redirect_hosts,
|
||||
"ALLOW_INTERNAL_ENDPOINTS": self.allow_internal_endpoints,
|
||||
"WEBSITE_HOSTING_ENABLED": self.website_hosting_enabled,
|
||||
"GC_ENABLED": self.gc_enabled,
|
||||
"GC_INTERVAL_HOURS": self.gc_interval_hours,
|
||||
"GC_TEMP_FILE_MAX_AGE_HOURS": self.gc_temp_file_max_age_hours,
|
||||
"GC_MULTIPART_MAX_AGE_DAYS": self.gc_multipart_max_age_days,
|
||||
"GC_LOCK_FILE_MAX_AGE_HOURS": self.gc_lock_file_max_age_hours,
|
||||
"GC_DRY_RUN": self.gc_dry_run,
|
||||
"GC_IO_THROTTLE_MS": self.gc_io_throttle_ms,
|
||||
"INTEGRITY_ENABLED": self.integrity_enabled,
|
||||
"INTEGRITY_INTERVAL_HOURS": self.integrity_interval_hours,
|
||||
"INTEGRITY_BATCH_SIZE": self.integrity_batch_size,
|
||||
"INTEGRITY_AUTO_HEAL": self.integrity_auto_heal,
|
||||
"INTEGRITY_DRY_RUN": self.integrity_dry_run,
|
||||
"INTEGRITY_IO_THROTTLE_MS": self.integrity_io_throttle_ms,
|
||||
}
|
||||
|
||||
@@ -189,16 +189,7 @@ class EncryptedObjectStorage:
|
||||
|
||||
def list_objects(self, bucket_name: str, **kwargs):
|
||||
return self.storage.list_objects(bucket_name, **kwargs)
|
||||
|
||||
def list_objects_shallow(self, bucket_name: str, **kwargs):
|
||||
return self.storage.list_objects_shallow(bucket_name, **kwargs)
|
||||
|
||||
def iter_objects_shallow(self, bucket_name: str, **kwargs):
|
||||
return self.storage.iter_objects_shallow(bucket_name, **kwargs)
|
||||
|
||||
def search_objects(self, bucket_name: str, query: str, **kwargs):
|
||||
return self.storage.search_objects(bucket_name, query, **kwargs)
|
||||
|
||||
|
||||
def list_objects_all(self, bucket_name: str):
|
||||
return self.storage.list_objects_all(bucket_name)
|
||||
|
||||
@@ -279,15 +270,9 @@ class EncryptedObjectStorage:
|
||||
|
||||
def get_bucket_quota(self, bucket_name: str):
|
||||
return self.storage.get_bucket_quota(bucket_name)
|
||||
|
||||
|
||||
def set_bucket_quota(self, bucket_name: str, *, max_bytes=None, max_objects=None):
|
||||
return self.storage.set_bucket_quota(bucket_name, max_bytes=max_bytes, max_objects=max_objects)
|
||||
|
||||
def get_bucket_website(self, bucket_name: str):
|
||||
return self.storage.get_bucket_website(bucket_name)
|
||||
|
||||
def set_bucket_website(self, bucket_name: str, website_config):
|
||||
return self.storage.set_bucket_website(bucket_name, website_config)
|
||||
|
||||
def _compute_etag(self, path: Path) -> str:
|
||||
return self.storage._compute_etag(path)
|
||||
|
||||
@@ -19,17 +19,6 @@ from cryptography.hazmat.primitives import hashes
|
||||
if sys.platform != "win32":
|
||||
import fcntl
|
||||
|
||||
try:
|
||||
import myfsio_core as _rc
|
||||
if not all(hasattr(_rc, f) for f in (
|
||||
"encrypt_stream_chunked", "decrypt_stream_chunked",
|
||||
)):
|
||||
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
|
||||
_HAS_RUST = True
|
||||
except ImportError:
|
||||
_rc = None
|
||||
_HAS_RUST = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -349,69 +338,6 @@ class StreamingEncryptor:
|
||||
output.seek(0)
|
||||
return output
|
||||
|
||||
def encrypt_file(self, input_path: str, output_path: str) -> EncryptionMetadata:
|
||||
data_key, encrypted_data_key = self.provider.generate_data_key()
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
if _HAS_RUST:
|
||||
_rc.encrypt_stream_chunked(
|
||||
input_path, output_path, data_key, base_nonce, self.chunk_size
|
||||
)
|
||||
else:
|
||||
with open(input_path, "rb") as stream:
|
||||
aesgcm = AESGCM(data_key)
|
||||
with open(output_path, "wb") as out:
|
||||
out.write(b"\x00\x00\x00\x00")
|
||||
chunk_index = 0
|
||||
while True:
|
||||
chunk = stream.read(self.chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
encrypted_chunk = aesgcm.encrypt(chunk_nonce, chunk, None)
|
||||
out.write(len(encrypted_chunk).to_bytes(self.HEADER_SIZE, "big"))
|
||||
out.write(encrypted_chunk)
|
||||
chunk_index += 1
|
||||
out.seek(0)
|
||||
out.write(chunk_index.to_bytes(4, "big"))
|
||||
|
||||
return EncryptionMetadata(
|
||||
algorithm="AES256",
|
||||
key_id=self.provider.KEY_ID if hasattr(self.provider, "KEY_ID") else "local",
|
||||
nonce=base_nonce,
|
||||
encrypted_data_key=encrypted_data_key,
|
||||
)
|
||||
|
||||
def decrypt_file(self, input_path: str, output_path: str,
|
||||
metadata: EncryptionMetadata) -> None:
|
||||
data_key = self.provider.decrypt_data_key(metadata.encrypted_data_key, metadata.key_id)
|
||||
base_nonce = metadata.nonce
|
||||
|
||||
if _HAS_RUST:
|
||||
_rc.decrypt_stream_chunked(input_path, output_path, data_key, base_nonce)
|
||||
else:
|
||||
with open(input_path, "rb") as stream:
|
||||
chunk_count_bytes = stream.read(4)
|
||||
if len(chunk_count_bytes) < 4:
|
||||
raise EncryptionError("Invalid encrypted stream: missing header")
|
||||
chunk_count = int.from_bytes(chunk_count_bytes, "big")
|
||||
aesgcm = AESGCM(data_key)
|
||||
with open(output_path, "wb") as out:
|
||||
for chunk_index in range(chunk_count):
|
||||
size_bytes = stream.read(self.HEADER_SIZE)
|
||||
if len(size_bytes) < self.HEADER_SIZE:
|
||||
raise EncryptionError(f"Invalid encrypted stream: truncated at chunk {chunk_index}")
|
||||
chunk_size = int.from_bytes(size_bytes, "big")
|
||||
encrypted_chunk = stream.read(chunk_size)
|
||||
if len(encrypted_chunk) < chunk_size:
|
||||
raise EncryptionError(f"Invalid encrypted stream: incomplete chunk {chunk_index}")
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
try:
|
||||
decrypted_chunk = aesgcm.decrypt(chunk_nonce, encrypted_chunk, None)
|
||||
out.write(decrypted_chunk)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt chunk {chunk_index}: {exc}") from exc
|
||||
|
||||
|
||||
class EncryptionManager:
|
||||
"""Manages encryption providers and operations."""
|
||||
|
||||
@@ -175,21 +175,13 @@ def handle_app_error(error: AppError) -> Response:
|
||||
|
||||
def handle_rate_limit_exceeded(e: RateLimitExceeded) -> Response:
|
||||
g.s3_error_code = "SlowDown"
|
||||
if request.path.startswith("/ui") or request.path.startswith("/buckets"):
|
||||
wants_json = (
|
||||
request.is_json or
|
||||
request.headers.get("X-Requested-With") == "XMLHttpRequest" or
|
||||
"application/json" in request.accept_mimetypes.values()
|
||||
)
|
||||
if wants_json:
|
||||
return jsonify({"success": False, "error": {"code": "SlowDown", "message": "Please reduce your request rate."}}), 429
|
||||
error = Element("Error")
|
||||
SubElement(error, "Code").text = "SlowDown"
|
||||
SubElement(error, "Message").text = "Please reduce your request rate."
|
||||
SubElement(error, "Resource").text = request.path
|
||||
SubElement(error, "RequestId").text = getattr(g, "request_id", "")
|
||||
xml_bytes = tostring(error, encoding="utf-8")
|
||||
return Response(xml_bytes, status="429 Too Many Requests", mimetype="application/xml")
|
||||
return Response(xml_bytes, status=429, mimetype="application/xml")
|
||||
|
||||
|
||||
def register_error_handlers(app):
|
||||
|
||||
596
app/gc.py
596
app/gc.py
@@ -1,596 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GCResult:
|
||||
temp_files_deleted: int = 0
|
||||
temp_bytes_freed: int = 0
|
||||
multipart_uploads_deleted: int = 0
|
||||
multipart_bytes_freed: int = 0
|
||||
lock_files_deleted: int = 0
|
||||
orphaned_metadata_deleted: int = 0
|
||||
orphaned_versions_deleted: int = 0
|
||||
orphaned_version_bytes_freed: int = 0
|
||||
empty_dirs_removed: int = 0
|
||||
errors: List[str] = field(default_factory=list)
|
||||
execution_time_seconds: float = 0.0
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"temp_files_deleted": self.temp_files_deleted,
|
||||
"temp_bytes_freed": self.temp_bytes_freed,
|
||||
"multipart_uploads_deleted": self.multipart_uploads_deleted,
|
||||
"multipart_bytes_freed": self.multipart_bytes_freed,
|
||||
"lock_files_deleted": self.lock_files_deleted,
|
||||
"orphaned_metadata_deleted": self.orphaned_metadata_deleted,
|
||||
"orphaned_versions_deleted": self.orphaned_versions_deleted,
|
||||
"orphaned_version_bytes_freed": self.orphaned_version_bytes_freed,
|
||||
"empty_dirs_removed": self.empty_dirs_removed,
|
||||
"errors": self.errors,
|
||||
"execution_time_seconds": self.execution_time_seconds,
|
||||
}
|
||||
|
||||
@property
|
||||
def total_bytes_freed(self) -> int:
|
||||
return self.temp_bytes_freed + self.multipart_bytes_freed + self.orphaned_version_bytes_freed
|
||||
|
||||
@property
|
||||
def has_work(self) -> bool:
|
||||
return (
|
||||
self.temp_files_deleted > 0
|
||||
or self.multipart_uploads_deleted > 0
|
||||
or self.lock_files_deleted > 0
|
||||
or self.orphaned_metadata_deleted > 0
|
||||
or self.orphaned_versions_deleted > 0
|
||||
or self.empty_dirs_removed > 0
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GCExecutionRecord:
|
||||
timestamp: float
|
||||
result: dict
|
||||
dry_run: bool
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"timestamp": self.timestamp,
|
||||
"result": self.result,
|
||||
"dry_run": self.dry_run,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> GCExecutionRecord:
|
||||
return cls(
|
||||
timestamp=data["timestamp"],
|
||||
result=data["result"],
|
||||
dry_run=data.get("dry_run", False),
|
||||
)
|
||||
|
||||
|
||||
class GCHistoryStore:
|
||||
def __init__(self, storage_root: Path, max_records: int = 50) -> None:
|
||||
self.storage_root = storage_root
|
||||
self.max_records = max_records
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "gc_history.json"
|
||||
|
||||
def load(self) -> List[GCExecutionRecord]:
|
||||
path = self._get_path()
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
return [GCExecutionRecord.from_dict(d) for d in data.get("executions", [])]
|
||||
except (OSError, ValueError, KeyError) as e:
|
||||
logger.error("Failed to load GC history: %s", e)
|
||||
return []
|
||||
|
||||
def save(self, records: List[GCExecutionRecord]) -> None:
|
||||
path = self._get_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {"executions": [r.to_dict() for r in records[: self.max_records]]}
|
||||
try:
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error("Failed to save GC history: %s", e)
|
||||
|
||||
def add(self, record: GCExecutionRecord) -> None:
|
||||
with self._lock:
|
||||
records = self.load()
|
||||
records.insert(0, record)
|
||||
self.save(records)
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[GCExecutionRecord]:
|
||||
return self.load()[offset : offset + limit]
|
||||
|
||||
|
||||
def _dir_size(path: Path) -> int:
|
||||
total = 0
|
||||
try:
|
||||
for f in path.rglob("*"):
|
||||
if f.is_file():
|
||||
try:
|
||||
total += f.stat().st_size
|
||||
except OSError:
|
||||
pass
|
||||
except OSError:
|
||||
pass
|
||||
return total
|
||||
|
||||
|
||||
def _file_age_hours(path: Path) -> float:
|
||||
try:
|
||||
mtime = path.stat().st_mtime
|
||||
return (time.time() - mtime) / 3600.0
|
||||
except OSError:
|
||||
return 0.0
|
||||
|
||||
|
||||
class GarbageCollector:
|
||||
SYSTEM_ROOT = ".myfsio.sys"
|
||||
SYSTEM_TMP_DIR = "tmp"
|
||||
SYSTEM_MULTIPART_DIR = "multipart"
|
||||
SYSTEM_BUCKETS_DIR = "buckets"
|
||||
BUCKET_META_DIR = "meta"
|
||||
BUCKET_VERSIONS_DIR = "versions"
|
||||
INTERNAL_FOLDERS = {".meta", ".versions", ".multipart"}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage_root: Path,
|
||||
interval_hours: float = 6.0,
|
||||
temp_file_max_age_hours: float = 24.0,
|
||||
multipart_max_age_days: int = 7,
|
||||
lock_file_max_age_hours: float = 1.0,
|
||||
dry_run: bool = False,
|
||||
max_history: int = 50,
|
||||
io_throttle_ms: int = 10,
|
||||
) -> None:
|
||||
self.storage_root = Path(storage_root)
|
||||
self.interval_seconds = interval_hours * 3600.0
|
||||
self.temp_file_max_age_hours = temp_file_max_age_hours
|
||||
self.multipart_max_age_days = multipart_max_age_days
|
||||
self.lock_file_max_age_hours = lock_file_max_age_hours
|
||||
self.dry_run = dry_run
|
||||
self._timer: Optional[threading.Timer] = None
|
||||
self._shutdown = False
|
||||
self._lock = threading.Lock()
|
||||
self._scanning = False
|
||||
self._scan_start_time: Optional[float] = None
|
||||
self._io_throttle = max(0, io_throttle_ms) / 1000.0
|
||||
self.history_store = GCHistoryStore(storage_root, max_records=max_history)
|
||||
|
||||
def start(self) -> None:
|
||||
if self._timer is not None:
|
||||
return
|
||||
self._shutdown = False
|
||||
self._schedule_next()
|
||||
logger.info(
|
||||
"GC started: interval=%.1fh, temp_max_age=%.1fh, multipart_max_age=%dd, lock_max_age=%.1fh, dry_run=%s",
|
||||
self.interval_seconds / 3600.0,
|
||||
self.temp_file_max_age_hours,
|
||||
self.multipart_max_age_days,
|
||||
self.lock_file_max_age_hours,
|
||||
self.dry_run,
|
||||
)
|
||||
|
||||
def stop(self) -> None:
|
||||
self._shutdown = True
|
||||
if self._timer:
|
||||
self._timer.cancel()
|
||||
self._timer = None
|
||||
logger.info("GC stopped")
|
||||
|
||||
def _schedule_next(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
self._timer = threading.Timer(self.interval_seconds, self._run_cycle)
|
||||
self._timer.daemon = True
|
||||
self._timer.start()
|
||||
|
||||
def _run_cycle(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
try:
|
||||
self.run_now()
|
||||
except Exception as e:
|
||||
logger.error("GC cycle failed: %s", e)
|
||||
finally:
|
||||
self._schedule_next()
|
||||
|
||||
def run_now(self, dry_run: Optional[bool] = None) -> GCResult:
|
||||
if not self._lock.acquire(blocking=False):
|
||||
raise RuntimeError("GC is already in progress")
|
||||
|
||||
effective_dry_run = dry_run if dry_run is not None else self.dry_run
|
||||
|
||||
try:
|
||||
self._scanning = True
|
||||
self._scan_start_time = time.time()
|
||||
|
||||
start = self._scan_start_time
|
||||
result = GCResult()
|
||||
|
||||
original_dry_run = self.dry_run
|
||||
self.dry_run = effective_dry_run
|
||||
try:
|
||||
self._clean_temp_files(result)
|
||||
self._clean_orphaned_multipart(result)
|
||||
self._clean_stale_locks(result)
|
||||
self._clean_orphaned_metadata(result)
|
||||
self._clean_orphaned_versions(result)
|
||||
self._clean_empty_dirs(result)
|
||||
finally:
|
||||
self.dry_run = original_dry_run
|
||||
|
||||
result.execution_time_seconds = time.time() - start
|
||||
|
||||
if result.has_work or result.errors:
|
||||
logger.info(
|
||||
"GC completed in %.2fs: temp=%d (%.1f MB), multipart=%d (%.1f MB), "
|
||||
"locks=%d, meta=%d, versions=%d (%.1f MB), dirs=%d, errors=%d%s",
|
||||
result.execution_time_seconds,
|
||||
result.temp_files_deleted,
|
||||
result.temp_bytes_freed / (1024 * 1024),
|
||||
result.multipart_uploads_deleted,
|
||||
result.multipart_bytes_freed / (1024 * 1024),
|
||||
result.lock_files_deleted,
|
||||
result.orphaned_metadata_deleted,
|
||||
result.orphaned_versions_deleted,
|
||||
result.orphaned_version_bytes_freed / (1024 * 1024),
|
||||
result.empty_dirs_removed,
|
||||
len(result.errors),
|
||||
" (dry run)" if effective_dry_run else "",
|
||||
)
|
||||
|
||||
record = GCExecutionRecord(
|
||||
timestamp=time.time(),
|
||||
result=result.to_dict(),
|
||||
dry_run=effective_dry_run,
|
||||
)
|
||||
self.history_store.add(record)
|
||||
|
||||
return result
|
||||
finally:
|
||||
self._scanning = False
|
||||
self._scan_start_time = None
|
||||
self._lock.release()
|
||||
|
||||
def run_async(self, dry_run: Optional[bool] = None) -> bool:
|
||||
if self._scanning:
|
||||
return False
|
||||
t = threading.Thread(target=self.run_now, args=(dry_run,), daemon=True)
|
||||
t.start()
|
||||
return True
|
||||
|
||||
def _system_path(self) -> Path:
|
||||
return self.storage_root / self.SYSTEM_ROOT
|
||||
|
||||
def _throttle(self) -> bool:
|
||||
if self._shutdown:
|
||||
return True
|
||||
if self._io_throttle > 0:
|
||||
time.sleep(self._io_throttle)
|
||||
return self._shutdown
|
||||
|
||||
def _list_bucket_names(self) -> List[str]:
|
||||
names = []
|
||||
try:
|
||||
for entry in self.storage_root.iterdir():
|
||||
if entry.is_dir() and entry.name != self.SYSTEM_ROOT:
|
||||
names.append(entry.name)
|
||||
except OSError:
|
||||
pass
|
||||
return names
|
||||
|
||||
def _clean_temp_files(self, result: GCResult) -> None:
|
||||
tmp_dir = self._system_path() / self.SYSTEM_TMP_DIR
|
||||
if not tmp_dir.exists():
|
||||
return
|
||||
try:
|
||||
for entry in tmp_dir.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not entry.is_file():
|
||||
continue
|
||||
age = _file_age_hours(entry)
|
||||
if age < self.temp_file_max_age_hours:
|
||||
continue
|
||||
try:
|
||||
size = entry.stat().st_size
|
||||
if not self.dry_run:
|
||||
entry.unlink()
|
||||
result.temp_files_deleted += 1
|
||||
result.temp_bytes_freed += size
|
||||
except OSError as e:
|
||||
result.errors.append(f"temp file {entry.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan tmp dir: {e}")
|
||||
|
||||
def _clean_orphaned_multipart(self, result: GCResult) -> None:
|
||||
cutoff_hours = self.multipart_max_age_days * 24.0
|
||||
bucket_names = self._list_bucket_names()
|
||||
|
||||
for bucket_name in bucket_names:
|
||||
if self._shutdown:
|
||||
return
|
||||
for multipart_root in (
|
||||
self._system_path() / self.SYSTEM_MULTIPART_DIR / bucket_name,
|
||||
self.storage_root / bucket_name / ".multipart",
|
||||
):
|
||||
if not multipart_root.exists():
|
||||
continue
|
||||
try:
|
||||
for upload_dir in multipart_root.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not upload_dir.is_dir():
|
||||
continue
|
||||
self._maybe_clean_upload(upload_dir, cutoff_hours, result)
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan multipart {bucket_name}: {e}")
|
||||
|
||||
def _maybe_clean_upload(self, upload_dir: Path, cutoff_hours: float, result: GCResult) -> None:
|
||||
manifest_path = upload_dir / "manifest.json"
|
||||
age = _file_age_hours(manifest_path) if manifest_path.exists() else _file_age_hours(upload_dir)
|
||||
|
||||
if age < cutoff_hours:
|
||||
return
|
||||
|
||||
dir_bytes = _dir_size(upload_dir)
|
||||
try:
|
||||
if not self.dry_run:
|
||||
shutil.rmtree(upload_dir, ignore_errors=True)
|
||||
result.multipart_uploads_deleted += 1
|
||||
result.multipart_bytes_freed += dir_bytes
|
||||
except OSError as e:
|
||||
result.errors.append(f"multipart {upload_dir.name}: {e}")
|
||||
|
||||
def _clean_stale_locks(self, result: GCResult) -> None:
|
||||
buckets_root = self._system_path() / self.SYSTEM_BUCKETS_DIR
|
||||
if not buckets_root.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
for bucket_dir in buckets_root.iterdir():
|
||||
if self._shutdown:
|
||||
return
|
||||
if not bucket_dir.is_dir():
|
||||
continue
|
||||
locks_dir = bucket_dir / "locks"
|
||||
if not locks_dir.exists():
|
||||
continue
|
||||
try:
|
||||
for lock_file in locks_dir.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not lock_file.is_file() or not lock_file.name.endswith(".lock"):
|
||||
continue
|
||||
age = _file_age_hours(lock_file)
|
||||
if age < self.lock_file_max_age_hours:
|
||||
continue
|
||||
try:
|
||||
if not self.dry_run:
|
||||
lock_file.unlink(missing_ok=True)
|
||||
result.lock_files_deleted += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"lock {lock_file.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan locks {bucket_dir.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan buckets for locks: {e}")
|
||||
|
||||
def _clean_orphaned_metadata(self, result: GCResult) -> None:
|
||||
bucket_names = self._list_bucket_names()
|
||||
|
||||
for bucket_name in bucket_names:
|
||||
if self._shutdown:
|
||||
return
|
||||
legacy_meta = self.storage_root / bucket_name / ".meta"
|
||||
if legacy_meta.exists():
|
||||
self._clean_legacy_metadata(bucket_name, legacy_meta, result)
|
||||
|
||||
new_meta = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
if new_meta.exists():
|
||||
self._clean_index_metadata(bucket_name, new_meta, result)
|
||||
|
||||
def _clean_legacy_metadata(self, bucket_name: str, meta_root: Path, result: GCResult) -> None:
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
try:
|
||||
for meta_file in meta_root.rglob("*.meta.json"):
|
||||
if self._throttle():
|
||||
return
|
||||
if not meta_file.is_file():
|
||||
continue
|
||||
try:
|
||||
rel = meta_file.relative_to(meta_root)
|
||||
object_key = rel.as_posix().removesuffix(".meta.json")
|
||||
object_path = bucket_path / object_key
|
||||
if not object_path.exists():
|
||||
if not self.dry_run:
|
||||
meta_file.unlink(missing_ok=True)
|
||||
result.orphaned_metadata_deleted += 1
|
||||
except (OSError, ValueError) as e:
|
||||
result.errors.append(f"legacy meta {bucket_name}/{meta_file.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan legacy meta {bucket_name}: {e}")
|
||||
|
||||
def _clean_index_metadata(self, bucket_name: str, meta_root: Path, result: GCResult) -> None:
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
try:
|
||||
for index_file in meta_root.rglob("_index.json"):
|
||||
if self._throttle():
|
||||
return
|
||||
if not index_file.is_file():
|
||||
continue
|
||||
try:
|
||||
with open(index_file, "r", encoding="utf-8") as f:
|
||||
index_data = json.load(f)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
|
||||
keys_to_remove = []
|
||||
for key in index_data:
|
||||
rel_dir = index_file.parent.relative_to(meta_root)
|
||||
if rel_dir == Path("."):
|
||||
full_key = key
|
||||
else:
|
||||
full_key = rel_dir.as_posix() + "/" + key
|
||||
object_path = bucket_path / full_key
|
||||
if not object_path.exists():
|
||||
keys_to_remove.append(key)
|
||||
|
||||
if keys_to_remove:
|
||||
if not self.dry_run:
|
||||
for k in keys_to_remove:
|
||||
index_data.pop(k, None)
|
||||
if index_data:
|
||||
try:
|
||||
with open(index_file, "w", encoding="utf-8") as f:
|
||||
json.dump(index_data, f)
|
||||
except OSError as e:
|
||||
result.errors.append(f"write index {bucket_name}: {e}")
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
index_file.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
result.orphaned_metadata_deleted += len(keys_to_remove)
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan index meta {bucket_name}: {e}")
|
||||
|
||||
def _clean_orphaned_versions(self, result: GCResult) -> None:
|
||||
bucket_names = self._list_bucket_names()
|
||||
|
||||
for bucket_name in bucket_names:
|
||||
if self._shutdown:
|
||||
return
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
for versions_root in (
|
||||
self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR,
|
||||
self.storage_root / bucket_name / ".versions",
|
||||
):
|
||||
if not versions_root.exists():
|
||||
continue
|
||||
try:
|
||||
for key_dir in versions_root.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not key_dir.is_dir():
|
||||
continue
|
||||
self._clean_versions_for_key(bucket_path, versions_root, key_dir, result)
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan versions {bucket_name}: {e}")
|
||||
|
||||
def _clean_versions_for_key(
|
||||
self, bucket_path: Path, versions_root: Path, key_dir: Path, result: GCResult
|
||||
) -> None:
|
||||
try:
|
||||
rel = key_dir.relative_to(versions_root)
|
||||
except ValueError:
|
||||
return
|
||||
|
||||
object_path = bucket_path / rel
|
||||
if object_path.exists():
|
||||
return
|
||||
|
||||
version_files = list(key_dir.glob("*.bin")) + list(key_dir.glob("*.json"))
|
||||
if not version_files:
|
||||
return
|
||||
|
||||
for vf in version_files:
|
||||
try:
|
||||
size = vf.stat().st_size if vf.suffix == ".bin" else 0
|
||||
if not self.dry_run:
|
||||
vf.unlink(missing_ok=True)
|
||||
if vf.suffix == ".bin":
|
||||
result.orphaned_version_bytes_freed += size
|
||||
result.orphaned_versions_deleted += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"version file {vf.name}: {e}")
|
||||
|
||||
def _clean_empty_dirs(self, result: GCResult) -> None:
|
||||
targets = [
|
||||
self._system_path() / self.SYSTEM_TMP_DIR,
|
||||
self._system_path() / self.SYSTEM_MULTIPART_DIR,
|
||||
self._system_path() / self.SYSTEM_BUCKETS_DIR,
|
||||
]
|
||||
for bucket_name in self._list_bucket_names():
|
||||
targets.append(self.storage_root / bucket_name / ".meta")
|
||||
targets.append(self.storage_root / bucket_name / ".versions")
|
||||
targets.append(self.storage_root / bucket_name / ".multipart")
|
||||
|
||||
for root in targets:
|
||||
if not root.exists():
|
||||
continue
|
||||
self._remove_empty_dirs_recursive(root, root, result)
|
||||
|
||||
def _remove_empty_dirs_recursive(self, path: Path, stop_at: Path, result: GCResult) -> bool:
|
||||
if self._shutdown:
|
||||
return False
|
||||
if not path.is_dir():
|
||||
return False
|
||||
|
||||
try:
|
||||
children = list(path.iterdir())
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
all_empty = True
|
||||
for child in children:
|
||||
if self._throttle():
|
||||
return False
|
||||
if child.is_dir():
|
||||
if not self._remove_empty_dirs_recursive(child, stop_at, result):
|
||||
all_empty = False
|
||||
else:
|
||||
all_empty = False
|
||||
|
||||
if all_empty and path != stop_at:
|
||||
try:
|
||||
if not self.dry_run:
|
||||
path.rmdir()
|
||||
result.empty_dirs_removed += 1
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
return all_empty
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[dict]:
|
||||
records = self.history_store.get_history(limit, offset)
|
||||
return [r.to_dict() for r in records]
|
||||
|
||||
def get_status(self) -> dict:
|
||||
status: Dict[str, Any] = {
|
||||
"enabled": not self._shutdown or self._timer is not None,
|
||||
"running": self._timer is not None and not self._shutdown,
|
||||
"scanning": self._scanning,
|
||||
"interval_hours": self.interval_seconds / 3600.0,
|
||||
"temp_file_max_age_hours": self.temp_file_max_age_hours,
|
||||
"multipart_max_age_days": self.multipart_max_age_days,
|
||||
"lock_file_max_age_hours": self.lock_file_max_age_hours,
|
||||
"dry_run": self.dry_run,
|
||||
"io_throttle_ms": round(self._io_throttle * 1000),
|
||||
}
|
||||
if self._scanning and self._scan_start_time:
|
||||
status["scan_elapsed_seconds"] = time.time() - self._scan_start_time
|
||||
return status
|
||||
722
app/iam.py
722
app/iam.py
File diff suppressed because it is too large
Load Diff
995
app/integrity.py
995
app/integrity.py
@@ -1,995 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
try:
|
||||
import myfsio_core as _rc
|
||||
if not hasattr(_rc, "md5_file"):
|
||||
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
|
||||
_HAS_RUST = True
|
||||
except ImportError:
|
||||
_HAS_RUST = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _compute_etag(path: Path) -> str:
|
||||
if _HAS_RUST:
|
||||
return _rc.md5_file(str(path))
|
||||
checksum = hashlib.md5()
|
||||
with path.open("rb") as handle:
|
||||
for chunk in iter(lambda: handle.read(8192), b""):
|
||||
checksum.update(chunk)
|
||||
return checksum.hexdigest()
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntegrityIssue:
|
||||
issue_type: str
|
||||
bucket: str
|
||||
key: str
|
||||
detail: str
|
||||
healed: bool = False
|
||||
heal_action: str = ""
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"issue_type": self.issue_type,
|
||||
"bucket": self.bucket,
|
||||
"key": self.key,
|
||||
"detail": self.detail,
|
||||
"healed": self.healed,
|
||||
"heal_action": self.heal_action,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntegrityResult:
|
||||
corrupted_objects: int = 0
|
||||
orphaned_objects: int = 0
|
||||
phantom_metadata: int = 0
|
||||
stale_versions: int = 0
|
||||
etag_cache_inconsistencies: int = 0
|
||||
legacy_metadata_drifts: int = 0
|
||||
issues_healed: int = 0
|
||||
issues: List[IntegrityIssue] = field(default_factory=list)
|
||||
errors: List[str] = field(default_factory=list)
|
||||
objects_scanned: int = 0
|
||||
buckets_scanned: int = 0
|
||||
execution_time_seconds: float = 0.0
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"corrupted_objects": self.corrupted_objects,
|
||||
"orphaned_objects": self.orphaned_objects,
|
||||
"phantom_metadata": self.phantom_metadata,
|
||||
"stale_versions": self.stale_versions,
|
||||
"etag_cache_inconsistencies": self.etag_cache_inconsistencies,
|
||||
"legacy_metadata_drifts": self.legacy_metadata_drifts,
|
||||
"issues_healed": self.issues_healed,
|
||||
"issues": [i.to_dict() for i in self.issues],
|
||||
"errors": self.errors,
|
||||
"objects_scanned": self.objects_scanned,
|
||||
"buckets_scanned": self.buckets_scanned,
|
||||
"execution_time_seconds": self.execution_time_seconds,
|
||||
}
|
||||
|
||||
@property
|
||||
def total_issues(self) -> int:
|
||||
return (
|
||||
self.corrupted_objects
|
||||
+ self.orphaned_objects
|
||||
+ self.phantom_metadata
|
||||
+ self.stale_versions
|
||||
+ self.etag_cache_inconsistencies
|
||||
+ self.legacy_metadata_drifts
|
||||
)
|
||||
|
||||
@property
|
||||
def has_issues(self) -> bool:
|
||||
return self.total_issues > 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntegrityExecutionRecord:
|
||||
timestamp: float
|
||||
result: dict
|
||||
dry_run: bool
|
||||
auto_heal: bool
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"timestamp": self.timestamp,
|
||||
"result": self.result,
|
||||
"dry_run": self.dry_run,
|
||||
"auto_heal": self.auto_heal,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> IntegrityExecutionRecord:
|
||||
return cls(
|
||||
timestamp=data["timestamp"],
|
||||
result=data["result"],
|
||||
dry_run=data.get("dry_run", False),
|
||||
auto_heal=data.get("auto_heal", False),
|
||||
)
|
||||
|
||||
|
||||
class IntegrityHistoryStore:
|
||||
def __init__(self, storage_root: Path, max_records: int = 50) -> None:
|
||||
self.storage_root = storage_root
|
||||
self.max_records = max_records
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "integrity_history.json"
|
||||
|
||||
def load(self) -> List[IntegrityExecutionRecord]:
|
||||
path = self._get_path()
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
return [IntegrityExecutionRecord.from_dict(d) for d in data.get("executions", [])]
|
||||
except (OSError, ValueError, KeyError) as e:
|
||||
logger.error("Failed to load integrity history: %s", e)
|
||||
return []
|
||||
|
||||
def save(self, records: List[IntegrityExecutionRecord]) -> None:
|
||||
path = self._get_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {"executions": [r.to_dict() for r in records[: self.max_records]]}
|
||||
try:
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error("Failed to save integrity history: %s", e)
|
||||
|
||||
def add(self, record: IntegrityExecutionRecord) -> None:
|
||||
with self._lock:
|
||||
records = self.load()
|
||||
records.insert(0, record)
|
||||
self.save(records)
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[IntegrityExecutionRecord]:
|
||||
return self.load()[offset : offset + limit]
|
||||
|
||||
|
||||
class IntegrityCursorStore:
|
||||
def __init__(self, storage_root: Path) -> None:
|
||||
self.storage_root = storage_root
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "integrity_cursor.json"
|
||||
|
||||
def load(self) -> Dict[str, Any]:
|
||||
path = self._get_path()
|
||||
if not path.exists():
|
||||
return {"buckets": {}}
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if not isinstance(data.get("buckets"), dict):
|
||||
return {"buckets": {}}
|
||||
return data
|
||||
except (OSError, ValueError, KeyError):
|
||||
return {"buckets": {}}
|
||||
|
||||
def save(self, data: Dict[str, Any]) -> None:
|
||||
path = self._get_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error("Failed to save integrity cursor: %s", e)
|
||||
|
||||
def update_bucket(
|
||||
self,
|
||||
bucket_name: str,
|
||||
timestamp: float,
|
||||
last_key: Optional[str] = None,
|
||||
completed: bool = False,
|
||||
) -> None:
|
||||
with self._lock:
|
||||
data = self.load()
|
||||
entry = data["buckets"].get(bucket_name, {})
|
||||
if completed:
|
||||
entry["last_scanned"] = timestamp
|
||||
entry.pop("last_key", None)
|
||||
entry["completed"] = True
|
||||
else:
|
||||
entry["last_scanned"] = timestamp
|
||||
if last_key is not None:
|
||||
entry["last_key"] = last_key
|
||||
entry["completed"] = False
|
||||
data["buckets"][bucket_name] = entry
|
||||
self.save(data)
|
||||
|
||||
def clean_stale(self, existing_buckets: List[str]) -> None:
|
||||
with self._lock:
|
||||
data = self.load()
|
||||
existing_set = set(existing_buckets)
|
||||
stale_keys = [k for k in data["buckets"] if k not in existing_set]
|
||||
if stale_keys:
|
||||
for k in stale_keys:
|
||||
del data["buckets"][k]
|
||||
self.save(data)
|
||||
|
||||
def get_last_key(self, bucket_name: str) -> Optional[str]:
|
||||
data = self.load()
|
||||
entry = data.get("buckets", {}).get(bucket_name)
|
||||
if entry is None:
|
||||
return None
|
||||
return entry.get("last_key")
|
||||
|
||||
def get_bucket_order(self, bucket_names: List[str]) -> List[str]:
|
||||
data = self.load()
|
||||
buckets_info = data.get("buckets", {})
|
||||
|
||||
incomplete = []
|
||||
complete = []
|
||||
for name in bucket_names:
|
||||
entry = buckets_info.get(name)
|
||||
if entry is None:
|
||||
incomplete.append((name, 0.0))
|
||||
elif entry.get("last_key") is not None:
|
||||
incomplete.append((name, entry.get("last_scanned", 0.0)))
|
||||
else:
|
||||
complete.append((name, entry.get("last_scanned", 0.0)))
|
||||
|
||||
incomplete.sort(key=lambda x: x[1])
|
||||
complete.sort(key=lambda x: x[1])
|
||||
|
||||
return [n for n, _ in incomplete] + [n for n, _ in complete]
|
||||
|
||||
def get_info(self) -> Dict[str, Any]:
|
||||
data = self.load()
|
||||
buckets = data.get("buckets", {})
|
||||
return {
|
||||
"tracked_buckets": len(buckets),
|
||||
"buckets": {
|
||||
name: {
|
||||
"last_scanned": info.get("last_scanned"),
|
||||
"last_key": info.get("last_key"),
|
||||
"completed": info.get("completed", False),
|
||||
}
|
||||
for name, info in buckets.items()
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
MAX_ISSUES = 500
|
||||
|
||||
|
||||
class IntegrityChecker:
|
||||
SYSTEM_ROOT = ".myfsio.sys"
|
||||
SYSTEM_BUCKETS_DIR = "buckets"
|
||||
BUCKET_META_DIR = "meta"
|
||||
BUCKET_VERSIONS_DIR = "versions"
|
||||
INTERNAL_FOLDERS = {".meta", ".versions", ".multipart"}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage_root: Path,
|
||||
interval_hours: float = 24.0,
|
||||
batch_size: int = 1000,
|
||||
auto_heal: bool = False,
|
||||
dry_run: bool = False,
|
||||
max_history: int = 50,
|
||||
io_throttle_ms: int = 10,
|
||||
) -> None:
|
||||
self.storage_root = Path(storage_root)
|
||||
self.interval_seconds = interval_hours * 3600.0
|
||||
self.batch_size = batch_size
|
||||
self.auto_heal = auto_heal
|
||||
self.dry_run = dry_run
|
||||
self._timer: Optional[threading.Timer] = None
|
||||
self._shutdown = False
|
||||
self._lock = threading.Lock()
|
||||
self._scanning = False
|
||||
self._scan_start_time: Optional[float] = None
|
||||
self._io_throttle = max(0, io_throttle_ms) / 1000.0
|
||||
self.history_store = IntegrityHistoryStore(storage_root, max_records=max_history)
|
||||
self.cursor_store = IntegrityCursorStore(self.storage_root)
|
||||
|
||||
def start(self) -> None:
|
||||
if self._timer is not None:
|
||||
return
|
||||
self._shutdown = False
|
||||
self._schedule_next()
|
||||
logger.info(
|
||||
"Integrity checker started: interval=%.1fh, batch_size=%d, auto_heal=%s, dry_run=%s",
|
||||
self.interval_seconds / 3600.0,
|
||||
self.batch_size,
|
||||
self.auto_heal,
|
||||
self.dry_run,
|
||||
)
|
||||
|
||||
def stop(self) -> None:
|
||||
self._shutdown = True
|
||||
if self._timer:
|
||||
self._timer.cancel()
|
||||
self._timer = None
|
||||
logger.info("Integrity checker stopped")
|
||||
|
||||
def _schedule_next(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
self._timer = threading.Timer(self.interval_seconds, self._run_cycle)
|
||||
self._timer.daemon = True
|
||||
self._timer.start()
|
||||
|
||||
def _run_cycle(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
try:
|
||||
self.run_now()
|
||||
except Exception as e:
|
||||
logger.error("Integrity check cycle failed: %s", e)
|
||||
finally:
|
||||
self._schedule_next()
|
||||
|
||||
def run_now(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> IntegrityResult:
|
||||
if not self._lock.acquire(blocking=False):
|
||||
raise RuntimeError("Integrity scan is already in progress")
|
||||
|
||||
try:
|
||||
self._scanning = True
|
||||
self._scan_start_time = time.time()
|
||||
|
||||
effective_auto_heal = auto_heal if auto_heal is not None else self.auto_heal
|
||||
effective_dry_run = dry_run if dry_run is not None else self.dry_run
|
||||
|
||||
start = self._scan_start_time
|
||||
result = IntegrityResult()
|
||||
|
||||
bucket_names = self._list_bucket_names()
|
||||
self.cursor_store.clean_stale(bucket_names)
|
||||
ordered_buckets = self.cursor_store.get_bucket_order(bucket_names)
|
||||
|
||||
for bucket_name in ordered_buckets:
|
||||
if self._batch_exhausted(result):
|
||||
break
|
||||
result.buckets_scanned += 1
|
||||
cursor_key = self.cursor_store.get_last_key(bucket_name)
|
||||
key_corrupted = self._check_corrupted_objects(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
|
||||
key_orphaned = self._check_orphaned_objects(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
|
||||
key_phantom = self._check_phantom_metadata(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
|
||||
self._check_stale_versions(bucket_name, result, effective_auto_heal, effective_dry_run)
|
||||
self._check_etag_cache(bucket_name, result, effective_auto_heal, effective_dry_run)
|
||||
self._check_legacy_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
|
||||
returned_keys = [k for k in (key_corrupted, key_orphaned, key_phantom) if k is not None]
|
||||
bucket_exhausted = self._batch_exhausted(result)
|
||||
if bucket_exhausted and returned_keys:
|
||||
self.cursor_store.update_bucket(bucket_name, time.time(), last_key=min(returned_keys))
|
||||
else:
|
||||
self.cursor_store.update_bucket(bucket_name, time.time(), completed=True)
|
||||
|
||||
result.execution_time_seconds = time.time() - start
|
||||
|
||||
if result.has_issues or result.errors:
|
||||
logger.info(
|
||||
"Integrity check completed in %.2fs: corrupted=%d, orphaned=%d, phantom=%d, "
|
||||
"stale_versions=%d, etag_cache=%d, legacy_drift=%d, healed=%d, errors=%d%s",
|
||||
result.execution_time_seconds,
|
||||
result.corrupted_objects,
|
||||
result.orphaned_objects,
|
||||
result.phantom_metadata,
|
||||
result.stale_versions,
|
||||
result.etag_cache_inconsistencies,
|
||||
result.legacy_metadata_drifts,
|
||||
result.issues_healed,
|
||||
len(result.errors),
|
||||
" (dry run)" if effective_dry_run else "",
|
||||
)
|
||||
|
||||
record = IntegrityExecutionRecord(
|
||||
timestamp=time.time(),
|
||||
result=result.to_dict(),
|
||||
dry_run=effective_dry_run,
|
||||
auto_heal=effective_auto_heal,
|
||||
)
|
||||
self.history_store.add(record)
|
||||
|
||||
return result
|
||||
finally:
|
||||
self._scanning = False
|
||||
self._scan_start_time = None
|
||||
self._lock.release()
|
||||
|
||||
def run_async(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> bool:
|
||||
if self._scanning:
|
||||
return False
|
||||
t = threading.Thread(target=self.run_now, args=(auto_heal, dry_run), daemon=True)
|
||||
t.start()
|
||||
return True
|
||||
|
||||
def _system_path(self) -> Path:
|
||||
return self.storage_root / self.SYSTEM_ROOT
|
||||
|
||||
def _list_bucket_names(self) -> List[str]:
|
||||
names = []
|
||||
try:
|
||||
for entry in self.storage_root.iterdir():
|
||||
if entry.is_dir() and entry.name != self.SYSTEM_ROOT:
|
||||
names.append(entry.name)
|
||||
except OSError:
|
||||
pass
|
||||
return names
|
||||
|
||||
def _throttle(self) -> bool:
|
||||
if self._shutdown:
|
||||
return True
|
||||
if self._io_throttle > 0:
|
||||
time.sleep(self._io_throttle)
|
||||
return self._shutdown
|
||||
|
||||
def _batch_exhausted(self, result: IntegrityResult) -> bool:
|
||||
return self._shutdown or result.objects_scanned >= self.batch_size
|
||||
|
||||
def _add_issue(self, result: IntegrityResult, issue: IntegrityIssue) -> None:
|
||||
if len(result.issues) < MAX_ISSUES:
|
||||
result.issues.append(issue)
|
||||
|
||||
def _collect_index_keys(
|
||||
self, meta_root: Path, cursor_key: Optional[str] = None,
|
||||
) -> Dict[str, Dict[str, Any]]:
|
||||
all_keys: Dict[str, Dict[str, Any]] = {}
|
||||
if not meta_root.exists():
|
||||
return all_keys
|
||||
try:
|
||||
for index_file in meta_root.rglob("_index.json"):
|
||||
if not index_file.is_file():
|
||||
continue
|
||||
rel_dir = index_file.parent.relative_to(meta_root)
|
||||
dir_prefix = "" if rel_dir == Path(".") else rel_dir.as_posix()
|
||||
if cursor_key is not None and dir_prefix:
|
||||
full_prefix = dir_prefix + "/"
|
||||
if not cursor_key.startswith(full_prefix) and cursor_key > full_prefix:
|
||||
continue
|
||||
try:
|
||||
index_data = json.loads(index_file.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
for key_name, entry in index_data.items():
|
||||
full_key = (dir_prefix + "/" + key_name) if dir_prefix else key_name
|
||||
if cursor_key is not None and full_key <= cursor_key:
|
||||
continue
|
||||
all_keys[full_key] = {
|
||||
"entry": entry,
|
||||
"index_file": index_file,
|
||||
"key_name": key_name,
|
||||
}
|
||||
except OSError:
|
||||
pass
|
||||
return all_keys
|
||||
|
||||
def _walk_bucket_files_sorted(
|
||||
self, bucket_path: Path, cursor_key: Optional[str] = None,
|
||||
):
|
||||
def _walk(dir_path: Path, prefix: str):
|
||||
try:
|
||||
entries = list(os.scandir(dir_path))
|
||||
except OSError:
|
||||
return
|
||||
|
||||
def _sort_key(e):
|
||||
if e.is_dir(follow_symlinks=False):
|
||||
return e.name + "/"
|
||||
return e.name
|
||||
|
||||
entries.sort(key=_sort_key)
|
||||
|
||||
for entry in entries:
|
||||
if entry.is_dir(follow_symlinks=False):
|
||||
if not prefix and entry.name in self.INTERNAL_FOLDERS:
|
||||
continue
|
||||
new_prefix = (prefix + "/" + entry.name) if prefix else entry.name
|
||||
if cursor_key is not None:
|
||||
full_prefix = new_prefix + "/"
|
||||
if not cursor_key.startswith(full_prefix) and cursor_key > full_prefix:
|
||||
continue
|
||||
yield from _walk(Path(entry.path), new_prefix)
|
||||
elif entry.is_file(follow_symlinks=False):
|
||||
full_key = (prefix + "/" + entry.name) if prefix else entry.name
|
||||
if cursor_key is not None and full_key <= cursor_key:
|
||||
continue
|
||||
yield full_key
|
||||
|
||||
yield from _walk(bucket_path, "")
|
||||
|
||||
def _check_corrupted_objects(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
|
||||
cursor_key: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
if self._batch_exhausted(result):
|
||||
return None
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
if not meta_root.exists():
|
||||
return None
|
||||
|
||||
last_key = None
|
||||
try:
|
||||
all_keys = self._collect_index_keys(meta_root, cursor_key)
|
||||
sorted_keys = sorted(all_keys.keys())
|
||||
|
||||
for full_key in sorted_keys:
|
||||
if self._throttle():
|
||||
return last_key
|
||||
if self._batch_exhausted(result):
|
||||
return last_key
|
||||
|
||||
info = all_keys[full_key]
|
||||
entry = info["entry"]
|
||||
index_file = info["index_file"]
|
||||
key_name = info["key_name"]
|
||||
|
||||
object_path = bucket_path / full_key
|
||||
if not object_path.exists():
|
||||
continue
|
||||
|
||||
result.objects_scanned += 1
|
||||
last_key = full_key
|
||||
|
||||
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
|
||||
stored_etag = meta.get("__etag__")
|
||||
if not stored_etag:
|
||||
continue
|
||||
|
||||
try:
|
||||
actual_etag = _compute_etag(object_path)
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
if actual_etag != stored_etag:
|
||||
result.corrupted_objects += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="corrupted_object",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail=f"stored_etag={stored_etag} actual_etag={actual_etag}",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
stat = object_path.stat()
|
||||
meta["__etag__"] = actual_etag
|
||||
meta["__size__"] = str(stat.st_size)
|
||||
meta["__last_modified__"] = str(stat.st_mtime)
|
||||
try:
|
||||
index_data = json.loads(index_file.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
index_data = {}
|
||||
index_data[key_name] = {"metadata": meta}
|
||||
self._atomic_write_index(index_file, index_data)
|
||||
issue.healed = True
|
||||
issue.heal_action = "updated etag in index"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal corrupted {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check corrupted {bucket_name}: {e}")
|
||||
return last_key
|
||||
|
||||
def _check_orphaned_objects(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
|
||||
cursor_key: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
if self._batch_exhausted(result):
|
||||
return None
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
last_key = None
|
||||
try:
|
||||
for full_key in self._walk_bucket_files_sorted(bucket_path, cursor_key):
|
||||
if self._throttle():
|
||||
return last_key
|
||||
if self._batch_exhausted(result):
|
||||
return last_key
|
||||
|
||||
result.objects_scanned += 1
|
||||
last_key = full_key
|
||||
key_path = Path(full_key)
|
||||
key_name = key_path.name
|
||||
parent = key_path.parent
|
||||
|
||||
if parent == Path("."):
|
||||
index_path = meta_root / "_index.json"
|
||||
else:
|
||||
index_path = meta_root / parent / "_index.json"
|
||||
|
||||
has_entry = False
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
has_entry = key_name in index_data
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
if not has_entry:
|
||||
result.orphaned_objects += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="orphaned_object",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="file exists without metadata entry",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
object_path = bucket_path / full_key
|
||||
etag = _compute_etag(object_path)
|
||||
stat = object_path.stat()
|
||||
meta = {
|
||||
"__etag__": etag,
|
||||
"__size__": str(stat.st_size),
|
||||
"__last_modified__": str(stat.st_mtime),
|
||||
}
|
||||
index_data = {}
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
index_data[key_name] = {"metadata": meta}
|
||||
self._atomic_write_index(index_path, index_data)
|
||||
issue.healed = True
|
||||
issue.heal_action = "created metadata entry"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal orphaned {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check orphaned {bucket_name}: {e}")
|
||||
return last_key
|
||||
|
||||
def _check_phantom_metadata(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
|
||||
cursor_key: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
if self._batch_exhausted(result):
|
||||
return None
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
if not meta_root.exists():
|
||||
return None
|
||||
|
||||
last_key = None
|
||||
try:
|
||||
all_keys = self._collect_index_keys(meta_root, cursor_key)
|
||||
sorted_keys = sorted(all_keys.keys())
|
||||
|
||||
heal_by_index: Dict[Path, List[str]] = {}
|
||||
|
||||
for full_key in sorted_keys:
|
||||
if self._batch_exhausted(result):
|
||||
break
|
||||
|
||||
result.objects_scanned += 1
|
||||
last_key = full_key
|
||||
|
||||
object_path = bucket_path / full_key
|
||||
if not object_path.exists():
|
||||
result.phantom_metadata += 1
|
||||
info = all_keys[full_key]
|
||||
issue = IntegrityIssue(
|
||||
issue_type="phantom_metadata",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="metadata entry without file on disk",
|
||||
)
|
||||
if auto_heal and not dry_run:
|
||||
index_file = info["index_file"]
|
||||
heal_by_index.setdefault(index_file, []).append(info["key_name"])
|
||||
issue.healed = True
|
||||
issue.heal_action = "removed stale index entry"
|
||||
result.issues_healed += 1
|
||||
self._add_issue(result, issue)
|
||||
|
||||
if heal_by_index and auto_heal and not dry_run:
|
||||
for index_file, keys_to_remove in heal_by_index.items():
|
||||
try:
|
||||
index_data = json.loads(index_file.read_text(encoding="utf-8"))
|
||||
for k in keys_to_remove:
|
||||
index_data.pop(k, None)
|
||||
if index_data:
|
||||
self._atomic_write_index(index_file, index_data)
|
||||
else:
|
||||
index_file.unlink(missing_ok=True)
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal phantom {bucket_name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"check phantom {bucket_name}: {e}")
|
||||
return last_key
|
||||
|
||||
def _check_stale_versions(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
|
||||
) -> None:
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
versions_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR
|
||||
|
||||
if not versions_root.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
for key_dir in versions_root.rglob("*"):
|
||||
if self._throttle():
|
||||
return
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
if not key_dir.is_dir():
|
||||
continue
|
||||
|
||||
bin_files = {f.stem: f for f in key_dir.glob("*.bin")}
|
||||
json_files = {f.stem: f for f in key_dir.glob("*.json")}
|
||||
|
||||
for stem, bin_file in bin_files.items():
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
result.objects_scanned += 1
|
||||
if stem not in json_files:
|
||||
result.stale_versions += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="stale_version",
|
||||
bucket=bucket_name,
|
||||
key=f"{key_dir.relative_to(versions_root).as_posix()}/{bin_file.name}",
|
||||
detail="version data without manifest",
|
||||
)
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
bin_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "removed orphaned version data"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal stale version {bin_file}: {e}")
|
||||
self._add_issue(result, issue)
|
||||
|
||||
for stem, json_file in json_files.items():
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
result.objects_scanned += 1
|
||||
if stem not in bin_files:
|
||||
result.stale_versions += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="stale_version",
|
||||
bucket=bucket_name,
|
||||
key=f"{key_dir.relative_to(versions_root).as_posix()}/{json_file.name}",
|
||||
detail="version manifest without data",
|
||||
)
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
json_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "removed orphaned version manifest"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal stale version {json_file}: {e}")
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check stale versions {bucket_name}: {e}")
|
||||
|
||||
def _check_etag_cache(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
|
||||
) -> None:
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
etag_index_path = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / "etag_index.json"
|
||||
|
||||
if not etag_index_path.exists():
|
||||
return
|
||||
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
if not meta_root.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
etag_cache = json.loads(etag_index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
return
|
||||
|
||||
found_mismatch = False
|
||||
|
||||
for full_key, cached_etag in etag_cache.items():
|
||||
if self._batch_exhausted(result):
|
||||
break
|
||||
result.objects_scanned += 1
|
||||
key_path = Path(full_key)
|
||||
key_name = key_path.name
|
||||
parent = key_path.parent
|
||||
|
||||
if parent == Path("."):
|
||||
index_path = meta_root / "_index.json"
|
||||
else:
|
||||
index_path = meta_root / parent / "_index.json"
|
||||
|
||||
if not index_path.exists():
|
||||
continue
|
||||
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
|
||||
entry = index_data.get(key_name)
|
||||
if not entry:
|
||||
continue
|
||||
|
||||
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
|
||||
stored_etag = meta.get("__etag__")
|
||||
|
||||
if stored_etag and cached_etag != stored_etag:
|
||||
result.etag_cache_inconsistencies += 1
|
||||
found_mismatch = True
|
||||
issue = IntegrityIssue(
|
||||
issue_type="etag_cache_inconsistency",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail=f"cached_etag={cached_etag} index_etag={stored_etag}",
|
||||
)
|
||||
self._add_issue(result, issue)
|
||||
|
||||
if found_mismatch and auto_heal and not dry_run:
|
||||
try:
|
||||
etag_index_path.unlink(missing_ok=True)
|
||||
for issue in result.issues:
|
||||
if issue.issue_type == "etag_cache_inconsistency" and issue.bucket == bucket_name and not issue.healed:
|
||||
issue.healed = True
|
||||
issue.heal_action = "deleted etag_index.json"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal etag cache {bucket_name}: {e}")
|
||||
|
||||
def _check_legacy_metadata(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
|
||||
) -> None:
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
legacy_meta_root = self.storage_root / bucket_name / ".meta"
|
||||
if not legacy_meta_root.exists():
|
||||
return
|
||||
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
try:
|
||||
for meta_file in legacy_meta_root.rglob("*.meta.json"):
|
||||
if self._throttle():
|
||||
return
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
if not meta_file.is_file():
|
||||
continue
|
||||
|
||||
result.objects_scanned += 1
|
||||
try:
|
||||
rel = meta_file.relative_to(legacy_meta_root)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
full_key = rel.as_posix().removesuffix(".meta.json")
|
||||
key_path = Path(full_key)
|
||||
key_name = key_path.name
|
||||
parent = key_path.parent
|
||||
|
||||
if parent == Path("."):
|
||||
index_path = meta_root / "_index.json"
|
||||
else:
|
||||
index_path = meta_root / parent / "_index.json"
|
||||
|
||||
try:
|
||||
legacy_data = json.loads(meta_file.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
|
||||
index_entry = None
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
index_entry = index_data.get(key_name)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
if index_entry is None:
|
||||
result.legacy_metadata_drifts += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="legacy_metadata_drift",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="unmigrated legacy .meta.json",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
index_data = {}
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
index_data[key_name] = {"metadata": legacy_data}
|
||||
self._atomic_write_index(index_path, index_data)
|
||||
meta_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "migrated to index and deleted legacy file"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal legacy {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
else:
|
||||
index_meta = index_entry.get("metadata", {}) if isinstance(index_entry, dict) else {}
|
||||
if legacy_data != index_meta:
|
||||
result.legacy_metadata_drifts += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="legacy_metadata_drift",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="legacy .meta.json differs from index entry",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
meta_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "deleted legacy file (index is authoritative)"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal legacy drift {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check legacy meta {bucket_name}: {e}")
|
||||
|
||||
@staticmethod
|
||||
def _atomic_write_index(index_path: Path, data: Dict[str, Any]) -> None:
|
||||
index_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
tmp_path = index_path.with_suffix(".tmp")
|
||||
try:
|
||||
with open(tmp_path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f)
|
||||
os.replace(str(tmp_path), str(index_path))
|
||||
except BaseException:
|
||||
try:
|
||||
tmp_path.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[dict]:
|
||||
records = self.history_store.get_history(limit, offset)
|
||||
return [r.to_dict() for r in records]
|
||||
|
||||
def get_status(self) -> dict:
|
||||
status: Dict[str, Any] = {
|
||||
"enabled": not self._shutdown or self._timer is not None,
|
||||
"running": self._timer is not None and not self._shutdown,
|
||||
"scanning": self._scanning,
|
||||
"interval_hours": self.interval_seconds / 3600.0,
|
||||
"batch_size": self.batch_size,
|
||||
"auto_heal": self.auto_heal,
|
||||
"dry_run": self.dry_run,
|
||||
"io_throttle_ms": round(self._io_throttle * 1000),
|
||||
}
|
||||
if self._scanning and self._scan_start_time is not None:
|
||||
status["scan_elapsed_seconds"] = round(time.time() - self._scan_start_time, 1)
|
||||
status["cursor"] = self.cursor_store.get_info()
|
||||
return status
|
||||
30
app/kms.py
30
app/kms.py
@@ -160,7 +160,6 @@ class KMSManager:
|
||||
self.generate_data_key_max_bytes = generate_data_key_max_bytes
|
||||
self._keys: Dict[str, KMSKey] = {}
|
||||
self._master_key: bytes | None = None
|
||||
self._master_aesgcm: AESGCM | None = None
|
||||
self._loaded = False
|
||||
|
||||
@property
|
||||
@@ -192,7 +191,6 @@ class KMSManager:
|
||||
msvcrt.locking(lock_file.fileno(), msvcrt.LK_UNLCK, 1)
|
||||
else:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||
self._master_aesgcm = AESGCM(self._master_key)
|
||||
return self._master_key
|
||||
|
||||
def _load_keys(self) -> None:
|
||||
@@ -233,16 +231,18 @@ class KMSManager:
|
||||
_set_secure_file_permissions(self.keys_path)
|
||||
|
||||
def _encrypt_key_material(self, key_material: bytes) -> bytes:
|
||||
_ = self.master_key
|
||||
"""Encrypt key material with the master key."""
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
ciphertext = self._master_aesgcm.encrypt(nonce, key_material, None)
|
||||
ciphertext = aesgcm.encrypt(nonce, key_material, None)
|
||||
return nonce + ciphertext
|
||||
|
||||
|
||||
def _decrypt_key_material(self, encrypted: bytes) -> bytes:
|
||||
_ = self.master_key
|
||||
"""Decrypt key material with the master key."""
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
nonce = encrypted[:12]
|
||||
ciphertext = encrypted[12:]
|
||||
return self._master_aesgcm.decrypt(nonce, ciphertext, None)
|
||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
||||
|
||||
def create_key(self, description: str = "", key_id: str | None = None) -> KMSKey:
|
||||
"""Create a new KMS key."""
|
||||
@@ -404,6 +404,22 @@ class KMSManager:
|
||||
plaintext, _ = self.decrypt(encrypted_key, context)
|
||||
return plaintext
|
||||
|
||||
def get_provider(self, key_id: str | None = None) -> KMSEncryptionProvider:
|
||||
"""Get an encryption provider for a specific key."""
|
||||
self._load_keys()
|
||||
|
||||
if key_id is None:
|
||||
if not self._keys:
|
||||
key = self.create_key("Default KMS Key")
|
||||
key_id = key.key_id
|
||||
else:
|
||||
key_id = next(iter(self._keys.keys()))
|
||||
|
||||
if key_id not in self._keys:
|
||||
raise EncryptionError(f"Key not found: {key_id}")
|
||||
|
||||
return KMSEncryptionProvider(self, key_id)
|
||||
|
||||
def re_encrypt(self, ciphertext: bytes, destination_key_id: str,
|
||||
source_context: Dict[str, str] | None = None,
|
||||
destination_context: Dict[str, str] | None = None) -> bytes:
|
||||
|
||||
@@ -15,23 +15,29 @@ from typing import Any, Dict, List, Optional
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
from urllib3.util.connection import create_connection as _urllib3_create_connection
|
||||
|
||||
|
||||
def _resolve_and_check_url(url: str, allow_internal: bool = False) -> Optional[str]:
|
||||
def _is_safe_url(url: str, allow_internal: bool = False) -> bool:
|
||||
"""Check if a URL is safe to make requests to (not internal/private).
|
||||
|
||||
Args:
|
||||
url: The URL to check.
|
||||
allow_internal: If True, allows internal/private IP addresses.
|
||||
Use for self-hosted deployments on internal networks.
|
||||
"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
hostname = parsed.hostname
|
||||
if not hostname:
|
||||
return None
|
||||
return False
|
||||
cloud_metadata_hosts = {
|
||||
"metadata.google.internal",
|
||||
"169.254.169.254",
|
||||
}
|
||||
if hostname.lower() in cloud_metadata_hosts:
|
||||
return None
|
||||
return False
|
||||
if allow_internal:
|
||||
return hostname
|
||||
return True
|
||||
blocked_hosts = {
|
||||
"localhost",
|
||||
"127.0.0.1",
|
||||
@@ -40,46 +46,17 @@ def _resolve_and_check_url(url: str, allow_internal: bool = False) -> Optional[s
|
||||
"[::1]",
|
||||
}
|
||||
if hostname.lower() in blocked_hosts:
|
||||
return None
|
||||
return False
|
||||
try:
|
||||
resolved_ip = socket.gethostbyname(hostname)
|
||||
ip = ipaddress.ip_address(resolved_ip)
|
||||
if ip.is_private or ip.is_loopback or ip.is_link_local or ip.is_reserved:
|
||||
return None
|
||||
return resolved_ip
|
||||
return False
|
||||
except (socket.gaierror, ValueError):
|
||||
return None
|
||||
return False
|
||||
return True
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _is_safe_url(url: str, allow_internal: bool = False) -> bool:
|
||||
return _resolve_and_check_url(url, allow_internal) is not None
|
||||
|
||||
|
||||
_dns_pin_lock = threading.Lock()
|
||||
|
||||
|
||||
def _pinned_post(url: str, pinned_ip: str, **kwargs: Any) -> requests.Response:
|
||||
parsed = urlparse(url)
|
||||
hostname = parsed.hostname or ""
|
||||
session = requests.Session()
|
||||
original_create = _urllib3_create_connection
|
||||
|
||||
def _create_pinned(address: Any, *args: Any, **kw: Any) -> Any:
|
||||
host, req_port = address
|
||||
if host == hostname:
|
||||
return original_create((pinned_ip, req_port), *args, **kw)
|
||||
return original_create(address, *args, **kw)
|
||||
|
||||
import urllib3.util.connection as _conn_mod
|
||||
with _dns_pin_lock:
|
||||
_conn_mod.create_connection = _create_pinned
|
||||
try:
|
||||
return session.post(url, **kwargs)
|
||||
finally:
|
||||
_conn_mod.create_connection = original_create
|
||||
|
||||
return False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -367,18 +344,16 @@ class NotificationService:
|
||||
self._queue.task_done()
|
||||
|
||||
def _send_notification(self, event: NotificationEvent, destination: WebhookDestination) -> None:
|
||||
resolved_ip = _resolve_and_check_url(destination.url, allow_internal=self._allow_internal_endpoints)
|
||||
if not resolved_ip:
|
||||
raise RuntimeError(f"Blocked request (SSRF protection): {destination.url}")
|
||||
if not _is_safe_url(destination.url, allow_internal=self._allow_internal_endpoints):
|
||||
raise RuntimeError(f"Blocked request to cloud metadata service (SSRF protection): {destination.url}")
|
||||
payload = event.to_s3_event()
|
||||
headers = {"Content-Type": "application/json", **destination.headers}
|
||||
|
||||
last_error = None
|
||||
for attempt in range(destination.retry_count):
|
||||
try:
|
||||
response = _pinned_post(
|
||||
response = requests.post(
|
||||
destination.url,
|
||||
resolved_ip,
|
||||
json=payload,
|
||||
headers=headers,
|
||||
timeout=destination.timeout_seconds,
|
||||
|
||||
@@ -2,17 +2,13 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import threading
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
MAX_LATENCY_SAMPLES = 5000
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -26,17 +22,6 @@ class OperationStats:
|
||||
latency_max_ms: float = 0.0
|
||||
bytes_in: int = 0
|
||||
bytes_out: int = 0
|
||||
latency_samples: List[float] = field(default_factory=list)
|
||||
|
||||
@staticmethod
|
||||
def _compute_percentile(sorted_data: List[float], p: float) -> float:
|
||||
if not sorted_data:
|
||||
return 0.0
|
||||
k = (len(sorted_data) - 1) * (p / 100.0)
|
||||
f = int(k)
|
||||
c = min(f + 1, len(sorted_data) - 1)
|
||||
d = k - f
|
||||
return sorted_data[f] + d * (sorted_data[c] - sorted_data[f])
|
||||
|
||||
def record(self, latency_ms: float, success: bool, bytes_in: int = 0, bytes_out: int = 0) -> None:
|
||||
self.count += 1
|
||||
@@ -51,17 +36,10 @@ class OperationStats:
|
||||
self.latency_max_ms = latency_ms
|
||||
self.bytes_in += bytes_in
|
||||
self.bytes_out += bytes_out
|
||||
if len(self.latency_samples) < MAX_LATENCY_SAMPLES:
|
||||
self.latency_samples.append(latency_ms)
|
||||
else:
|
||||
j = random.randint(0, self.count - 1)
|
||||
if j < MAX_LATENCY_SAMPLES:
|
||||
self.latency_samples[j] = latency_ms
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
avg_latency = self.latency_sum_ms / self.count if self.count > 0 else 0.0
|
||||
min_latency = self.latency_min_ms if self.latency_min_ms != float("inf") else 0.0
|
||||
sorted_latencies = sorted(self.latency_samples)
|
||||
return {
|
||||
"count": self.count,
|
||||
"success_count": self.success_count,
|
||||
@@ -69,9 +47,6 @@ class OperationStats:
|
||||
"latency_avg_ms": round(avg_latency, 2),
|
||||
"latency_min_ms": round(min_latency, 2),
|
||||
"latency_max_ms": round(self.latency_max_ms, 2),
|
||||
"latency_p50_ms": round(self._compute_percentile(sorted_latencies, 50), 2),
|
||||
"latency_p95_ms": round(self._compute_percentile(sorted_latencies, 95), 2),
|
||||
"latency_p99_ms": round(self._compute_percentile(sorted_latencies, 99), 2),
|
||||
"bytes_in": self.bytes_in,
|
||||
"bytes_out": self.bytes_out,
|
||||
}
|
||||
@@ -87,11 +62,6 @@ class OperationStats:
|
||||
self.latency_max_ms = other.latency_max_ms
|
||||
self.bytes_in += other.bytes_in
|
||||
self.bytes_out += other.bytes_out
|
||||
combined = self.latency_samples + other.latency_samples
|
||||
if len(combined) > MAX_LATENCY_SAMPLES:
|
||||
random.shuffle(combined)
|
||||
combined = combined[:MAX_LATENCY_SAMPLES]
|
||||
self.latency_samples = combined
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -139,8 +109,8 @@ class OperationMetricsCollector:
|
||||
self.interval_seconds = interval_minutes * 60
|
||||
self.retention_hours = retention_hours
|
||||
self._lock = threading.Lock()
|
||||
self._by_method: Dict[str, OperationStats] = defaultdict(OperationStats)
|
||||
self._by_endpoint: Dict[str, OperationStats] = defaultdict(OperationStats)
|
||||
self._by_method: Dict[str, OperationStats] = {}
|
||||
self._by_endpoint: Dict[str, OperationStats] = {}
|
||||
self._by_status_class: Dict[str, int] = {}
|
||||
self._error_codes: Dict[str, int] = {}
|
||||
self._totals = OperationStats()
|
||||
@@ -212,8 +182,8 @@ class OperationMetricsCollector:
|
||||
self._prune_old_snapshots()
|
||||
self._save_history()
|
||||
|
||||
self._by_method = defaultdict(OperationStats)
|
||||
self._by_endpoint = defaultdict(OperationStats)
|
||||
self._by_method.clear()
|
||||
self._by_endpoint.clear()
|
||||
self._by_status_class.clear()
|
||||
self._error_codes.clear()
|
||||
self._totals = OperationStats()
|
||||
@@ -233,7 +203,12 @@ class OperationMetricsCollector:
|
||||
status_class = f"{status_code // 100}xx"
|
||||
|
||||
with self._lock:
|
||||
if method not in self._by_method:
|
||||
self._by_method[method] = OperationStats()
|
||||
self._by_method[method].record(latency_ms, success, bytes_in, bytes_out)
|
||||
|
||||
if endpoint_type not in self._by_endpoint:
|
||||
self._by_endpoint[endpoint_type] = OperationStats()
|
||||
self._by_endpoint[endpoint_type].record(latency_ms, success, bytes_in, bytes_out)
|
||||
|
||||
self._by_status_class[status_class] = self._by_status_class.get(status_class, 0) + 1
|
||||
|
||||
@@ -176,12 +176,11 @@ class ReplicationFailureStore:
|
||||
self.storage_root = storage_root
|
||||
self.max_failures_per_bucket = max_failures_per_bucket
|
||||
self._lock = threading.Lock()
|
||||
self._cache: Dict[str, List[ReplicationFailure]] = {}
|
||||
|
||||
def _get_failures_path(self, bucket_name: str) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "replication_failures.json"
|
||||
|
||||
def _load_from_disk(self, bucket_name: str) -> List[ReplicationFailure]:
|
||||
def load_failures(self, bucket_name: str) -> List[ReplicationFailure]:
|
||||
path = self._get_failures_path(bucket_name)
|
||||
if not path.exists():
|
||||
return []
|
||||
@@ -193,7 +192,7 @@ class ReplicationFailureStore:
|
||||
logger.error(f"Failed to load replication failures for {bucket_name}: {e}")
|
||||
return []
|
||||
|
||||
def _save_to_disk(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
|
||||
def save_failures(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
|
||||
path = self._get_failures_path(bucket_name)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {"failures": [f.to_dict() for f in failures[:self.max_failures_per_bucket]]}
|
||||
@@ -203,18 +202,6 @@ class ReplicationFailureStore:
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to save replication failures for {bucket_name}: {e}")
|
||||
|
||||
def load_failures(self, bucket_name: str) -> List[ReplicationFailure]:
|
||||
if bucket_name in self._cache:
|
||||
return list(self._cache[bucket_name])
|
||||
failures = self._load_from_disk(bucket_name)
|
||||
self._cache[bucket_name] = failures
|
||||
return list(failures)
|
||||
|
||||
def save_failures(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
|
||||
trimmed = failures[:self.max_failures_per_bucket]
|
||||
self._cache[bucket_name] = trimmed
|
||||
self._save_to_disk(bucket_name, trimmed)
|
||||
|
||||
def add_failure(self, bucket_name: str, failure: ReplicationFailure) -> None:
|
||||
with self._lock:
|
||||
failures = self.load_failures(bucket_name)
|
||||
@@ -240,7 +227,6 @@ class ReplicationFailureStore:
|
||||
|
||||
def clear_failures(self, bucket_name: str) -> None:
|
||||
with self._lock:
|
||||
self._cache.pop(bucket_name, None)
|
||||
path = self._get_failures_path(bucket_name)
|
||||
if path.exists():
|
||||
path.unlink()
|
||||
|
||||
954
app/s3_api.py
954
app/s3_api.py
File diff suppressed because it is too large
Load Diff
296
app/s3_client.py
296
app/s3_client.py
@@ -1,296 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from typing import Any, Generator, Optional
|
||||
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError, EndpointConnectionError, ConnectionClosedError
|
||||
from flask import current_app, session
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
UI_PROXY_USER_AGENT = "MyFSIO-UIProxy/1.0"
|
||||
|
||||
_BOTO_ERROR_MAP = {
|
||||
"NoSuchBucket": 404,
|
||||
"NoSuchKey": 404,
|
||||
"NoSuchUpload": 404,
|
||||
"BucketAlreadyExists": 409,
|
||||
"BucketAlreadyOwnedByYou": 409,
|
||||
"BucketNotEmpty": 409,
|
||||
"AccessDenied": 403,
|
||||
"InvalidAccessKeyId": 403,
|
||||
"SignatureDoesNotMatch": 403,
|
||||
"InvalidBucketName": 400,
|
||||
"InvalidArgument": 400,
|
||||
"MalformedXML": 400,
|
||||
"EntityTooLarge": 400,
|
||||
"QuotaExceeded": 403,
|
||||
}
|
||||
|
||||
_UPLOAD_REGISTRY_MAX_AGE = 86400
|
||||
_UPLOAD_REGISTRY_CLEANUP_INTERVAL = 3600
|
||||
|
||||
|
||||
class UploadRegistry:
|
||||
def __init__(self) -> None:
|
||||
self._entries: dict[str, tuple[str, str, float]] = {}
|
||||
self._lock = threading.Lock()
|
||||
self._last_cleanup = time.monotonic()
|
||||
|
||||
def register(self, upload_id: str, bucket_name: str, object_key: str) -> None:
|
||||
with self._lock:
|
||||
self._entries[upload_id] = (bucket_name, object_key, time.monotonic())
|
||||
self._maybe_cleanup()
|
||||
|
||||
def get_key(self, upload_id: str, bucket_name: str) -> Optional[str]:
|
||||
with self._lock:
|
||||
entry = self._entries.get(upload_id)
|
||||
if entry is None:
|
||||
return None
|
||||
stored_bucket, key, created_at = entry
|
||||
if stored_bucket != bucket_name:
|
||||
return None
|
||||
if time.monotonic() - created_at > _UPLOAD_REGISTRY_MAX_AGE:
|
||||
del self._entries[upload_id]
|
||||
return None
|
||||
return key
|
||||
|
||||
def remove(self, upload_id: str) -> None:
|
||||
with self._lock:
|
||||
self._entries.pop(upload_id, None)
|
||||
|
||||
def _maybe_cleanup(self) -> None:
|
||||
now = time.monotonic()
|
||||
if now - self._last_cleanup < _UPLOAD_REGISTRY_CLEANUP_INTERVAL:
|
||||
return
|
||||
self._last_cleanup = now
|
||||
cutoff = now - _UPLOAD_REGISTRY_MAX_AGE
|
||||
stale = [uid for uid, (_, _, ts) in self._entries.items() if ts < cutoff]
|
||||
for uid in stale:
|
||||
del self._entries[uid]
|
||||
|
||||
|
||||
class S3ProxyClient:
|
||||
def __init__(self, api_base_url: str, region: str = "us-east-1") -> None:
|
||||
if not api_base_url:
|
||||
raise ValueError("api_base_url is required for S3ProxyClient")
|
||||
self._api_base_url = api_base_url.rstrip("/")
|
||||
self._region = region
|
||||
self.upload_registry = UploadRegistry()
|
||||
|
||||
@property
|
||||
def api_base_url(self) -> str:
|
||||
return self._api_base_url
|
||||
|
||||
def get_client(self, access_key: str, secret_key: str) -> Any:
|
||||
if not access_key or not secret_key:
|
||||
raise ValueError("Both access_key and secret_key are required")
|
||||
config = Config(
|
||||
user_agent_extra=UI_PROXY_USER_AGENT,
|
||||
connect_timeout=5,
|
||||
read_timeout=30,
|
||||
retries={"max_attempts": 0},
|
||||
signature_version="s3v4",
|
||||
s3={"addressing_style": "path"},
|
||||
request_checksum_calculation="when_required",
|
||||
response_checksum_validation="when_required",
|
||||
)
|
||||
return boto3.client(
|
||||
"s3",
|
||||
endpoint_url=self._api_base_url,
|
||||
aws_access_key_id=access_key,
|
||||
aws_secret_access_key=secret_key,
|
||||
region_name=self._region,
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
def _get_proxy() -> S3ProxyClient:
|
||||
proxy = current_app.extensions.get("s3_proxy")
|
||||
if proxy is None:
|
||||
raise RuntimeError(
|
||||
"S3 proxy not configured. Set API_BASE_URL or run both API and UI servers."
|
||||
)
|
||||
return proxy
|
||||
|
||||
|
||||
def _get_session_creds() -> tuple[str, str]:
|
||||
secret_store = current_app.extensions["secret_store"]
|
||||
secret_store.purge_expired()
|
||||
token = session.get("cred_token")
|
||||
if not token:
|
||||
raise PermissionError("Not authenticated")
|
||||
creds = secret_store.peek(token)
|
||||
if not creds:
|
||||
raise PermissionError("Session expired")
|
||||
access_key = creds.get("access_key", "")
|
||||
secret_key = creds.get("secret_key", "")
|
||||
if not access_key or not secret_key:
|
||||
raise PermissionError("Invalid session credentials")
|
||||
return access_key, secret_key
|
||||
|
||||
|
||||
def get_session_s3_client() -> Any:
|
||||
proxy = _get_proxy()
|
||||
access_key, secret_key = _get_session_creds()
|
||||
return proxy.get_client(access_key, secret_key)
|
||||
|
||||
|
||||
def get_upload_registry() -> UploadRegistry:
|
||||
return _get_proxy().upload_registry
|
||||
|
||||
|
||||
def handle_client_error(exc: ClientError) -> tuple[dict[str, str], int]:
|
||||
error_info = exc.response.get("Error", {})
|
||||
code = error_info.get("Code", "InternalError")
|
||||
message = error_info.get("Message") or "S3 operation failed"
|
||||
http_status = _BOTO_ERROR_MAP.get(code)
|
||||
if http_status is None:
|
||||
http_status = exc.response.get("ResponseMetadata", {}).get("HTTPStatusCode", 500)
|
||||
return {"error": message}, http_status
|
||||
|
||||
|
||||
def handle_connection_error(exc: Exception) -> tuple[dict[str, str], int]:
|
||||
logger.error("S3 API connection failed: %s", exc)
|
||||
return {"error": "S3 API server is unreachable. Ensure the API server is running."}, 502
|
||||
|
||||
|
||||
def format_datetime_display(dt: Any, display_tz: str = "UTC") -> str:
|
||||
from .ui import _format_datetime_display
|
||||
return _format_datetime_display(dt, display_tz)
|
||||
|
||||
|
||||
def format_datetime_iso(dt: Any, display_tz: str = "UTC") -> str:
|
||||
from .ui import _format_datetime_iso
|
||||
return _format_datetime_iso(dt, display_tz)
|
||||
|
||||
|
||||
def build_url_templates(bucket_name: str) -> dict[str, str]:
|
||||
from flask import url_for
|
||||
preview_t = url_for("ui.object_preview", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
delete_t = url_for("ui.delete_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
presign_t = url_for("ui.object_presign", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
versions_t = url_for("ui.object_versions", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
restore_t = url_for(
|
||||
"ui.restore_object_version",
|
||||
bucket_name=bucket_name,
|
||||
object_key="KEY_PLACEHOLDER",
|
||||
version_id="VERSION_ID_PLACEHOLDER",
|
||||
)
|
||||
tags_t = url_for("ui.object_tags", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
copy_t = url_for("ui.copy_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
move_t = url_for("ui.move_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
metadata_t = url_for("ui.object_metadata", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
return {
|
||||
"preview": preview_t,
|
||||
"download": preview_t + "?download=1",
|
||||
"presign": presign_t,
|
||||
"delete": delete_t,
|
||||
"versions": versions_t,
|
||||
"restore": restore_t,
|
||||
"tags": tags_t,
|
||||
"copy": copy_t,
|
||||
"move": move_t,
|
||||
"metadata": metadata_t,
|
||||
}
|
||||
|
||||
|
||||
def translate_list_objects(
|
||||
boto3_response: dict[str, Any],
|
||||
url_templates: dict[str, str],
|
||||
display_tz: str = "UTC",
|
||||
versioning_enabled: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
objects_data = []
|
||||
for obj in boto3_response.get("Contents", []):
|
||||
last_mod = obj["LastModified"]
|
||||
objects_data.append({
|
||||
"key": obj["Key"],
|
||||
"size": obj["Size"],
|
||||
"last_modified": last_mod.isoformat(),
|
||||
"last_modified_display": format_datetime_display(last_mod, display_tz),
|
||||
"last_modified_iso": format_datetime_iso(last_mod, display_tz),
|
||||
"etag": obj.get("ETag", "").strip('"'),
|
||||
})
|
||||
return {
|
||||
"objects": objects_data,
|
||||
"is_truncated": boto3_response.get("IsTruncated", False),
|
||||
"next_continuation_token": boto3_response.get("NextContinuationToken"),
|
||||
"total_count": boto3_response.get("KeyCount", len(objects_data)),
|
||||
"versioning_enabled": versioning_enabled,
|
||||
"url_templates": url_templates,
|
||||
}
|
||||
|
||||
|
||||
def get_versioning_via_s3(client: Any, bucket_name: str) -> bool:
|
||||
try:
|
||||
resp = client.get_bucket_versioning(Bucket=bucket_name)
|
||||
return resp.get("Status") == "Enabled"
|
||||
except ClientError as exc:
|
||||
code = exc.response.get("Error", {}).get("Code", "")
|
||||
if code != "NoSuchBucket":
|
||||
logger.warning("Failed to check versioning for %s: %s", bucket_name, code)
|
||||
return False
|
||||
|
||||
|
||||
def stream_objects_ndjson(
|
||||
client: Any,
|
||||
bucket_name: str,
|
||||
prefix: Optional[str],
|
||||
url_templates: dict[str, str],
|
||||
display_tz: str = "UTC",
|
||||
versioning_enabled: bool = False,
|
||||
delimiter: Optional[str] = None,
|
||||
) -> Generator[str, None, None]:
|
||||
meta_line = json.dumps({
|
||||
"type": "meta",
|
||||
"versioning_enabled": versioning_enabled,
|
||||
"url_templates": url_templates,
|
||||
}) + "\n"
|
||||
yield meta_line
|
||||
|
||||
yield json.dumps({"type": "count", "total_count": 0}) + "\n"
|
||||
|
||||
kwargs: dict[str, Any] = {"Bucket": bucket_name, "MaxKeys": 1000}
|
||||
if prefix:
|
||||
kwargs["Prefix"] = prefix
|
||||
if delimiter:
|
||||
kwargs["Delimiter"] = delimiter
|
||||
|
||||
running_count = 0
|
||||
try:
|
||||
paginator = client.get_paginator("list_objects_v2")
|
||||
for page in paginator.paginate(**kwargs):
|
||||
for cp in page.get("CommonPrefixes", []):
|
||||
yield json.dumps({
|
||||
"type": "folder",
|
||||
"prefix": cp["Prefix"],
|
||||
}) + "\n"
|
||||
page_contents = page.get("Contents", [])
|
||||
for obj in page_contents:
|
||||
last_mod = obj["LastModified"]
|
||||
yield json.dumps({
|
||||
"type": "object",
|
||||
"key": obj["Key"],
|
||||
"size": obj["Size"],
|
||||
"last_modified": last_mod.isoformat(),
|
||||
"last_modified_display": format_datetime_display(last_mod, display_tz),
|
||||
"last_modified_iso": format_datetime_iso(last_mod, display_tz),
|
||||
"etag": obj.get("ETag", "").strip('"'),
|
||||
}) + "\n"
|
||||
running_count += len(page_contents)
|
||||
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
|
||||
except ClientError as exc:
|
||||
error_msg = exc.response.get("Error", {}).get("Message", "S3 operation failed")
|
||||
yield json.dumps({"type": "error", "error": error_msg}) + "\n"
|
||||
return
|
||||
except (EndpointConnectionError, ConnectionClosedError):
|
||||
yield json.dumps({"type": "error", "error": "S3 API server is unreachable"}) + "\n"
|
||||
return
|
||||
|
||||
yield json.dumps({"type": "done"}) + "\n"
|
||||
@@ -18,18 +18,6 @@ class EphemeralSecretStore:
|
||||
self._store[token] = (payload, expires_at)
|
||||
return token
|
||||
|
||||
def peek(self, token: str | None) -> Any | None:
|
||||
if not token:
|
||||
return None
|
||||
entry = self._store.get(token)
|
||||
if not entry:
|
||||
return None
|
||||
payload, expires_at = entry
|
||||
if expires_at < time.time():
|
||||
self._store.pop(token, None)
|
||||
return None
|
||||
return payload
|
||||
|
||||
def pop(self, token: str | None) -> Any | None:
|
||||
if not token:
|
||||
return None
|
||||
|
||||
1391
app/storage.py
1391
app/storage.py
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
APP_VERSION = "0.4.3"
|
||||
APP_VERSION = "0.2.4"
|
||||
|
||||
|
||||
def get_version() -> str:
|
||||
|
||||
@@ -1,108 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
_DOMAIN_RE = re.compile(
|
||||
r"^(?!-)[a-z0-9]([a-z0-9-]*[a-z0-9])?(\.[a-z0-9]([a-z0-9-]*[a-z0-9])?)*$"
|
||||
)
|
||||
|
||||
|
||||
def normalize_domain(raw: str) -> str:
|
||||
raw = raw.strip().lower()
|
||||
for prefix in ("https://", "http://"):
|
||||
if raw.startswith(prefix):
|
||||
raw = raw[len(prefix):]
|
||||
raw = raw.split("/", 1)[0]
|
||||
raw = raw.split("?", 1)[0]
|
||||
raw = raw.split("#", 1)[0]
|
||||
if ":" in raw:
|
||||
raw = raw.rsplit(":", 1)[0]
|
||||
return raw
|
||||
|
||||
|
||||
def is_valid_domain(domain: str) -> bool:
|
||||
if not domain or len(domain) > 253:
|
||||
return False
|
||||
return bool(_DOMAIN_RE.match(domain))
|
||||
|
||||
|
||||
class WebsiteDomainStore:
|
||||
def __init__(self, config_path: Path) -> None:
|
||||
self.config_path = config_path
|
||||
self._lock = threading.Lock()
|
||||
self._domains: Dict[str, str] = {}
|
||||
self._last_mtime: float = 0.0
|
||||
self.reload()
|
||||
|
||||
def reload(self) -> None:
|
||||
if not self.config_path.exists():
|
||||
self._domains = {}
|
||||
self._last_mtime = 0.0
|
||||
return
|
||||
try:
|
||||
self._last_mtime = self.config_path.stat().st_mtime
|
||||
with open(self.config_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if isinstance(data, dict):
|
||||
self._domains = {k.lower(): v for k, v in data.items()}
|
||||
else:
|
||||
self._domains = {}
|
||||
except (OSError, json.JSONDecodeError):
|
||||
self._domains = {}
|
||||
|
||||
def _maybe_reload(self) -> None:
|
||||
try:
|
||||
if self.config_path.exists():
|
||||
mtime = self.config_path.stat().st_mtime
|
||||
if mtime != self._last_mtime:
|
||||
self._last_mtime = mtime
|
||||
with open(self.config_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if isinstance(data, dict):
|
||||
self._domains = {k.lower(): v for k, v in data.items()}
|
||||
else:
|
||||
self._domains = {}
|
||||
elif self._domains:
|
||||
self._domains = {}
|
||||
self._last_mtime = 0.0
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
def _save(self) -> None:
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(self.config_path, "w", encoding="utf-8") as f:
|
||||
json.dump(self._domains, f, indent=2)
|
||||
self._last_mtime = self.config_path.stat().st_mtime
|
||||
|
||||
def list_all(self) -> List[Dict[str, str]]:
|
||||
with self._lock:
|
||||
self._maybe_reload()
|
||||
return [{"domain": d, "bucket": b} for d, b in self._domains.items()]
|
||||
|
||||
def get_bucket(self, domain: str) -> Optional[str]:
|
||||
with self._lock:
|
||||
self._maybe_reload()
|
||||
return self._domains.get(domain.lower())
|
||||
|
||||
def get_domains_for_bucket(self, bucket: str) -> List[str]:
|
||||
with self._lock:
|
||||
self._maybe_reload()
|
||||
return [d for d, b in self._domains.items() if b == bucket]
|
||||
|
||||
def set_mapping(self, domain: str, bucket: str) -> None:
|
||||
with self._lock:
|
||||
self._domains[domain.lower()] = bucket
|
||||
self._save()
|
||||
|
||||
def delete_mapping(self, domain: str) -> bool:
|
||||
with self._lock:
|
||||
key = domain.lower()
|
||||
if key not in self._domains:
|
||||
return False
|
||||
del self._domains[key]
|
||||
self._save()
|
||||
return True
|
||||
@@ -1,6 +1,5 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
ENGINE="${ENGINE:-rust}"
|
||||
|
||||
exec python run.py --prod --engine "$ENGINE"
|
||||
# Run both services using the python runner in production mode
|
||||
exec python run.py --prod
|
||||
|
||||
426
docs.md
426
docs.md
@@ -7,7 +7,7 @@ This document expands on the README to describe the full workflow for running, c
|
||||
MyFSIO ships two Flask entrypoints that share the same storage, IAM, and bucket-policy state:
|
||||
|
||||
- **API server** – Implements the S3-compatible REST API, policy evaluation, and Signature Version 4 presign service.
|
||||
- **UI server** – Provides the browser console for buckets, IAM, and policies. It proxies all storage operations through the S3 API via boto3 (SigV4-signed), mirroring the architecture used by MinIO and Garage.
|
||||
- **UI server** – Provides the browser console for buckets, IAM, and policies. It proxies to the API for presign operations.
|
||||
|
||||
Both servers read `AppConfig`, so editing JSON stores on disk instantly affects both surfaces.
|
||||
|
||||
@@ -136,24 +136,21 @@ All configuration is done via environment variables. The table below lists every
|
||||
| `MAX_UPLOAD_SIZE` | `1073741824` (1 GiB) | Bytes. Caps incoming uploads in both API + UI. |
|
||||
| `UI_PAGE_SIZE` | `100` | `MaxKeys` hint shown in listings. |
|
||||
| `SECRET_KEY` | Auto-generated | Flask session key. Auto-generates and persists if not set. **Set explicitly in production.** |
|
||||
| `API_BASE_URL` | `http://127.0.0.1:5000` | Internal S3 API URL used by the web UI proxy. Also used for presigned URL generation. Set to your public URL if running behind a reverse proxy. |
|
||||
| `API_BASE_URL` | `None` | Public URL for presigned URLs. Required behind proxies. |
|
||||
| `AWS_REGION` | `us-east-1` | Region embedded in SigV4 credential scope. |
|
||||
| `AWS_SERVICE` | `s3` | Service string for SigV4. |
|
||||
| `DISPLAY_TIMEZONE` | `UTC` | Timezone for timestamps in the web UI (e.g., `US/Eastern`, `Asia/Tokyo`). |
|
||||
|
||||
### IAM & Security
|
||||
|
||||
| Variable | Default | Notes |
|
||||
| --- | --- | --- |
|
||||
| `IAM_CONFIG` | `data/.myfsio.sys/config/iam.json` | Stores users, secrets, and inline policies. Encrypted at rest when `SECRET_KEY` is set. |
|
||||
| `IAM_CONFIG` | `data/.myfsio.sys/config/iam.json` | Stores users, secrets, and inline policies. |
|
||||
| `BUCKET_POLICY_PATH` | `data/.myfsio.sys/config/bucket_policies.json` | Bucket policy store (auto hot-reload). |
|
||||
| `AUTH_MAX_ATTEMPTS` | `5` | Failed login attempts before lockout. |
|
||||
| `AUTH_LOCKOUT_MINUTES` | `15` | Lockout duration after max failed attempts. |
|
||||
| `SESSION_LIFETIME_DAYS` | `30` | How long UI sessions remain valid. |
|
||||
| `SECRET_TTL_SECONDS` | `300` | TTL for ephemeral secrets (presigned URLs). |
|
||||
| `UI_ENFORCE_BUCKET_POLICIES` | `false` | Whether the UI should enforce bucket policies. |
|
||||
| `ADMIN_ACCESS_KEY` | (none) | Custom access key for the admin user on first run or credential reset. If unset, a random key is generated. |
|
||||
| `ADMIN_SECRET_KEY` | (none) | Custom secret key for the admin user on first run or credential reset. If unset, a random key is generated. |
|
||||
|
||||
### CORS (Cross-Origin Resource Sharing)
|
||||
|
||||
@@ -173,16 +170,15 @@ All configuration is done via environment variables. The table below lists every
|
||||
| `RATE_LIMIT_BUCKET_OPS` | `120 per minute` | Rate limit for bucket operations (PUT/DELETE/GET/POST on `/<bucket>`). |
|
||||
| `RATE_LIMIT_OBJECT_OPS` | `240 per minute` | Rate limit for object operations (PUT/GET/DELETE/POST on `/<bucket>/<key>`). |
|
||||
| `RATE_LIMIT_HEAD_OPS` | `100 per minute` | Rate limit for HEAD requests (bucket and object). |
|
||||
| `RATE_LIMIT_ADMIN` | `60 per minute` | Rate limit for admin API endpoints (`/admin/*`). |
|
||||
| `RATE_LIMIT_STORAGE_URI` | `memory://` | Storage backend for rate limits. Use `redis://host:port` for distributed setups. |
|
||||
|
||||
### Server Configuration
|
||||
|
||||
| Variable | Default | Notes |
|
||||
| --- | --- | --- |
|
||||
| `SERVER_THREADS` | `0` (auto) | Granian blocking threads (1-64). Set to `0` for auto-calculation based on CPU cores (×2). |
|
||||
| `SERVER_CONNECTION_LIMIT` | `0` (auto) | Maximum concurrent requests per worker (10-1000). Set to `0` for auto-calculation based on available RAM. |
|
||||
| `SERVER_BACKLOG` | `0` (auto) | TCP listen backlog (128-4096). Set to `0` for auto-calculation (connection_limit × 2). |
|
||||
| `SERVER_THREADS` | `0` (auto) | Waitress worker threads (1-64). Set to `0` for auto-calculation based on CPU cores (×2). |
|
||||
| `SERVER_CONNECTION_LIMIT` | `0` (auto) | Maximum concurrent connections (10-1000). Set to `0` for auto-calculation based on available RAM. |
|
||||
| `SERVER_BACKLOG` | `0` (auto) | TCP listen backlog (64-4096). Set to `0` for auto-calculation (connection_limit × 2). |
|
||||
| `SERVER_CHANNEL_TIMEOUT` | `120` | Seconds before idle connections are closed (10-300). |
|
||||
|
||||
### Logging
|
||||
@@ -252,60 +248,6 @@ Once enabled, configure lifecycle rules via:
|
||||
</LifecycleConfiguration>
|
||||
```
|
||||
|
||||
## Garbage Collection
|
||||
|
||||
The garbage collector (GC) automatically cleans up orphaned data that accumulates over time: stale temporary files from failed uploads, abandoned multipart uploads, stale lock files, orphaned metadata entries, orphaned version files, and empty directories.
|
||||
|
||||
### Enabling GC
|
||||
|
||||
By default, GC is disabled. Enable it by setting:
|
||||
|
||||
```bash
|
||||
GC_ENABLED=true python run.py
|
||||
```
|
||||
|
||||
Or in your `myfsio.env` file:
|
||||
```
|
||||
GC_ENABLED=true
|
||||
GC_INTERVAL_HOURS=6 # Run every 6 hours (default)
|
||||
GC_TEMP_FILE_MAX_AGE_HOURS=24 # Delete temp files older than 24h
|
||||
GC_MULTIPART_MAX_AGE_DAYS=7 # Delete orphaned multipart uploads older than 7 days
|
||||
GC_LOCK_FILE_MAX_AGE_HOURS=1 # Delete stale lock files older than 1h
|
||||
GC_DRY_RUN=false # Set to true to log without deleting
|
||||
```
|
||||
|
||||
### What Gets Cleaned
|
||||
|
||||
| Type | Location | Condition |
|
||||
|------|----------|-----------|
|
||||
| **Temp files** | `.myfsio.sys/tmp/` | Older than `GC_TEMP_FILE_MAX_AGE_HOURS` |
|
||||
| **Orphaned multipart uploads** | `.myfsio.sys/multipart/` and `<bucket>/.multipart/` | Older than `GC_MULTIPART_MAX_AGE_DAYS` |
|
||||
| **Stale lock files** | `.myfsio.sys/buckets/<bucket>/locks/` | Older than `GC_LOCK_FILE_MAX_AGE_HOURS` |
|
||||
| **Orphaned metadata** | `.myfsio.sys/buckets/<bucket>/meta/` and `<bucket>/.meta/` | Object file no longer exists |
|
||||
| **Orphaned versions** | `.myfsio.sys/buckets/<bucket>/versions/` and `<bucket>/.versions/` | Main object no longer exists |
|
||||
| **Empty directories** | Various internal directories | Directory is empty after cleanup |
|
||||
|
||||
### Admin API
|
||||
|
||||
All GC endpoints require admin (`iam:*`) permissions.
|
||||
|
||||
| Method | Route | Description |
|
||||
|--------|-------|-------------|
|
||||
| `GET` | `/admin/gc/status` | Get GC status and configuration |
|
||||
| `POST` | `/admin/gc/run` | Trigger a manual GC run (body: `{"dry_run": true}` for preview) |
|
||||
| `GET` | `/admin/gc/history` | Get GC execution history (query: `?limit=50&offset=0`) |
|
||||
|
||||
### Dry Run Mode
|
||||
|
||||
Set `GC_DRY_RUN=true` to log what would be deleted without actually removing anything. You can also trigger a one-time dry run via the admin API:
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:5000/admin/gc/run" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"dry_run": true}'
|
||||
```
|
||||
|
||||
### Performance Tuning
|
||||
|
||||
| Variable | Default | Notes |
|
||||
@@ -314,12 +256,6 @@ curl -X POST "http://localhost:5000/admin/gc/run" \
|
||||
| `MULTIPART_MIN_PART_SIZE` | `5242880` (5 MB) | Minimum part size for multipart uploads. |
|
||||
| `BUCKET_STATS_CACHE_TTL` | `60` | Seconds to cache bucket statistics. |
|
||||
| `BULK_DELETE_MAX_KEYS` | `500` | Maximum keys per bulk delete request. |
|
||||
| `BULK_DOWNLOAD_MAX_BYTES` | `1073741824` (1 GiB) | Maximum total size for bulk ZIP downloads. |
|
||||
| `OBJECT_CACHE_TTL` | `60` | Seconds to cache object metadata. |
|
||||
|
||||
#### Gzip Compression
|
||||
|
||||
API responses for JSON, XML, HTML, CSS, and JavaScript are automatically gzip-compressed when the client sends `Accept-Encoding: gzip`. Compression activates for responses larger than 500 bytes and is handled by a WSGI middleware (`app/compression.py`). Binary object downloads and streaming responses are never compressed. No configuration is needed.
|
||||
|
||||
### Server Settings
|
||||
|
||||
@@ -333,14 +269,13 @@ API responses for JSON, XML, HTML, CSS, and JavaScript are automatically gzip-co
|
||||
|
||||
Before deploying to production, ensure you:
|
||||
|
||||
1. **Set `SECRET_KEY`** - Use a strong, unique value (e.g., `openssl rand -base64 32`). This also enables IAM config encryption at rest.
|
||||
1. **Set `SECRET_KEY`** - Use a strong, unique value (e.g., `openssl rand -base64 32`)
|
||||
2. **Restrict CORS** - Set `CORS_ORIGINS` to your specific domains instead of `*`
|
||||
3. **Configure `API_BASE_URL`** - Required for correct presigned URLs behind proxies
|
||||
4. **Enable HTTPS** - Use a reverse proxy (nginx, Cloudflare) with TLS termination
|
||||
5. **Review rate limits** - Adjust `RATE_LIMIT_DEFAULT` based on your needs
|
||||
6. **Secure master keys** - Back up `ENCRYPTION_MASTER_KEY_PATH` if using encryption
|
||||
7. **Use `--prod` flag** - Runs with Granian instead of Flask dev server
|
||||
8. **Set credential expiry** - Assign `expires_at` to non-admin users for time-limited access
|
||||
7. **Use `--prod` flag** - Runs with Waitress instead of Flask dev server
|
||||
|
||||
### Proxy Configuration
|
||||
|
||||
@@ -350,75 +285,6 @@ If running behind a reverse proxy (e.g., Nginx, Cloudflare, or a tunnel), ensure
|
||||
|
||||
The application automatically trusts these headers to generate correct presigned URLs (e.g., `https://s3.example.com/...` instead of `http://127.0.0.1:5000/...`). Alternatively, you can explicitly set `API_BASE_URL` to your public endpoint.
|
||||
|
||||
| Variable | Default | Notes |
|
||||
| --- | --- | --- |
|
||||
| `NUM_TRUSTED_PROXIES` | `1` | Number of trusted reverse proxies for `X-Forwarded-*` header processing. |
|
||||
| `ALLOWED_REDIRECT_HOSTS` | `""` | Comma-separated whitelist of safe redirect targets. Empty allows only same-host redirects. |
|
||||
| `ALLOW_INTERNAL_ENDPOINTS` | `false` | Allow connections to internal/private IPs for webhooks and replication targets. **Keep disabled in production unless needed.** |
|
||||
|
||||
## Integrity Scanner
|
||||
|
||||
The integrity scanner detects and optionally auto-repairs data inconsistencies: corrupted objects (ETag mismatch), orphaned files without metadata, phantom metadata without files, stale version archives, ETag cache drift, and unmigrated legacy `.meta.json` files.
|
||||
|
||||
### Enabling Integrity Scanner
|
||||
|
||||
By default, the integrity scanner is disabled. Enable it by setting:
|
||||
|
||||
```bash
|
||||
INTEGRITY_ENABLED=true python run.py
|
||||
```
|
||||
|
||||
Or in your `myfsio.env` file:
|
||||
```
|
||||
INTEGRITY_ENABLED=true
|
||||
INTEGRITY_INTERVAL_HOURS=24 # Run every 24 hours (default)
|
||||
INTEGRITY_BATCH_SIZE=1000 # Max objects to scan per cycle
|
||||
INTEGRITY_AUTO_HEAL=false # Automatically repair detected issues
|
||||
INTEGRITY_DRY_RUN=false # Set to true to log without healing
|
||||
```
|
||||
|
||||
### What Gets Checked
|
||||
|
||||
| Check | Detection | Heal Action |
|
||||
|-------|-----------|-------------|
|
||||
| **Corrupted objects** | File MD5 does not match stored `__etag__` | Update `__etag__` in index (disk data is authoritative) |
|
||||
| **Orphaned objects** | File exists on disk without metadata entry | Create index entry with computed MD5/size/mtime |
|
||||
| **Phantom metadata** | Index entry exists but file is missing from disk | Remove stale entry from `_index.json` |
|
||||
| **Stale versions** | `.json` manifest without `.bin` data or vice versa | Remove orphaned version file |
|
||||
| **ETag cache inconsistency** | `etag_index.json` entry differs from metadata `__etag__` | Delete `etag_index.json` (auto-rebuilt on next list) |
|
||||
| **Legacy metadata drift** | Legacy `.meta.json` differs from index or is unmigrated | Migrate to index and delete legacy file |
|
||||
|
||||
### Admin API
|
||||
|
||||
All integrity endpoints require admin (`iam:*`) permissions.
|
||||
|
||||
| Method | Route | Description |
|
||||
|--------|-------|-------------|
|
||||
| `GET` | `/admin/integrity/status` | Get scanner status and configuration |
|
||||
| `POST` | `/admin/integrity/run` | Trigger a manual scan (body: `{"dry_run": true, "auto_heal": true}`) |
|
||||
| `GET` | `/admin/integrity/history` | Get scan history (query: `?limit=50&offset=0`) |
|
||||
|
||||
### Dry Run Mode
|
||||
|
||||
Set `INTEGRITY_DRY_RUN=true` to log detected issues without making any changes. You can also trigger a one-time dry run via the admin API:
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:5000/admin/integrity/run" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"dry_run": true, "auto_heal": true}'
|
||||
```
|
||||
|
||||
### Configuration Reference
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `INTEGRITY_ENABLED` | `false` | Enable background integrity scanning |
|
||||
| `INTEGRITY_INTERVAL_HOURS` | `24` | Hours between scan cycles |
|
||||
| `INTEGRITY_BATCH_SIZE` | `1000` | Max objects to scan per cycle |
|
||||
| `INTEGRITY_AUTO_HEAL` | `false` | Automatically repair detected issues |
|
||||
| `INTEGRITY_DRY_RUN` | `false` | Log issues without healing |
|
||||
|
||||
## 4. Upgrading and Updates
|
||||
|
||||
### Version Checking
|
||||
@@ -753,16 +619,13 @@ MyFSIO implements a comprehensive Identity and Access Management (IAM) system th
|
||||
|
||||
### Getting Started
|
||||
|
||||
1. On first boot, `data/.myfsio.sys/config/iam.json` is created with a randomly generated admin user. The access key and secret key are printed to the console during first startup. You can set `ADMIN_ACCESS_KEY` and `ADMIN_SECRET_KEY` environment variables to use custom credentials instead of random ones. If `SECRET_KEY` is configured, the IAM config file is encrypted at rest using AES (Fernet). To reset admin credentials later, run `python run.py --reset-cred`.
|
||||
2. Sign into the UI using the generated credentials, then open **IAM**:
|
||||
- **Create user**: supply a display name, optional JSON inline policy array, and optional credential expiry date.
|
||||
- **Set expiry**: assign an expiration date to any user's credentials. Expired credentials are rejected at authentication time. The UI shows expiry badges and preset durations (1h, 24h, 7d, 30d, 90d).
|
||||
1. On first boot, `data/.myfsio.sys/config/iam.json` is seeded with `localadmin / localadmin` that has wildcard access.
|
||||
2. Sign into the UI using those credentials, then open **IAM**:
|
||||
- **Create user**: supply a display name and optional JSON inline policy array.
|
||||
- **Rotate secret**: generates a new secret key; the UI surfaces it once.
|
||||
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. An optional `"prefix"` field restricts object-level actions to a key prefix (e.g., `"uploads/"`). Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
|
||||
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
|
||||
3. Wildcard action `iam:*` is supported for admin user definitions.
|
||||
|
||||
> **Breaking Change (v0.2.0+):** Previous versions used fixed default credentials (`localadmin/localadmin`). If upgrading from an older version, your existing credentials remain unchanged, but new installations will generate random credentials.
|
||||
|
||||
### Authentication
|
||||
|
||||
The API expects every request to include authentication headers. The UI persists them in the Flask session after login.
|
||||
@@ -774,11 +637,8 @@ The API expects every request to include authentication headers. The UI persists
|
||||
|
||||
**Security Features:**
|
||||
- **Lockout Protection**: After `AUTH_MAX_ATTEMPTS` (default: 5) failed login attempts, the account is locked for `AUTH_LOCKOUT_MINUTES` (default: 15 minutes).
|
||||
- **Credential Expiry**: Each user can have an optional `expires_at` timestamp (ISO 8601). Once expired, all API requests using those credentials are rejected. Set or clear expiry via the UI or API.
|
||||
- **IAM Config Encryption**: When `SECRET_KEY` is set, the IAM config file (`iam.json`) is encrypted at rest using Fernet (AES-256-CBC with HMAC). Existing plaintext configs are automatically encrypted on next load.
|
||||
- **Session Management**: UI sessions remain valid for `SESSION_LIFETIME_DAYS` (default: 30 days).
|
||||
- **Hot Reload**: IAM configuration changes take effect immediately without restart.
|
||||
- **Credential Reset**: Run `python run.py --reset-cred` to reset admin credentials. Supports `ADMIN_ACCESS_KEY` and `ADMIN_SECRET_KEY` env vars for deterministic keys.
|
||||
|
||||
### Permission Model
|
||||
|
||||
@@ -797,23 +657,13 @@ Both layers are evaluated for each request. A user must have permission in their
|
||||
| --- | --- | --- |
|
||||
| `list` | List buckets and objects | `s3:ListBucket`, `s3:ListAllMyBuckets`, `s3:ListBucketVersions`, `s3:ListMultipartUploads`, `s3:ListParts` |
|
||||
| `read` | Download objects, get metadata | `s3:GetObject`, `s3:GetObjectVersion`, `s3:GetObjectTagging`, `s3:GetObjectVersionTagging`, `s3:GetObjectAcl`, `s3:GetBucketVersioning`, `s3:HeadObject`, `s3:HeadBucket` |
|
||||
| `write` | Upload objects, manage object tags | `s3:PutObject`, `s3:PutObjectTagging`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
|
||||
| `delete` | Remove objects and versions | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteObjectTagging` |
|
||||
| `create_bucket` | Create new buckets | `s3:CreateBucket` |
|
||||
| `delete_bucket` | Delete buckets | `s3:DeleteBucket` |
|
||||
| `write` | Upload objects, create buckets, manage tags | `s3:PutObject`, `s3:CreateBucket`, `s3:PutObjectTagging`, `s3:PutBucketVersioning`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
|
||||
| `delete` | Remove objects, versions, and buckets | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteBucket`, `s3:DeleteObjectTagging` |
|
||||
| `share` | Manage Access Control Lists (ACLs) | `s3:PutObjectAcl`, `s3:PutBucketAcl`, `s3:GetBucketAcl` |
|
||||
| `policy` | Manage bucket policies | `s3:PutBucketPolicy`, `s3:GetBucketPolicy`, `s3:DeleteBucketPolicy` |
|
||||
| `versioning` | Manage bucket versioning configuration | `s3:GetBucketVersioning`, `s3:PutBucketVersioning` |
|
||||
| `tagging` | Manage bucket-level tags | `s3:GetBucketTagging`, `s3:PutBucketTagging`, `s3:DeleteBucketTagging` |
|
||||
| `encryption` | Manage bucket encryption configuration | `s3:GetEncryptionConfiguration`, `s3:PutEncryptionConfiguration`, `s3:DeleteEncryptionConfiguration` |
|
||||
| `lifecycle` | Manage lifecycle rules | `s3:GetLifecycleConfiguration`, `s3:PutLifecycleConfiguration`, `s3:DeleteLifecycleConfiguration`, `s3:GetBucketLifecycle`, `s3:PutBucketLifecycle` |
|
||||
| `cors` | Manage CORS configuration | `s3:GetBucketCors`, `s3:PutBucketCors`, `s3:DeleteBucketCors` |
|
||||
| `replication` | Configure and manage replication | `s3:GetReplicationConfiguration`, `s3:PutReplicationConfiguration`, `s3:DeleteReplicationConfiguration`, `s3:ReplicateObject`, `s3:ReplicateTags`, `s3:ReplicateDelete` |
|
||||
| `quota` | Manage bucket storage quotas | `s3:GetBucketQuota`, `s3:PutBucketQuota`, `s3:DeleteBucketQuota` |
|
||||
| `object_lock` | Manage object lock, retention, and legal holds | `s3:GetObjectLockConfiguration`, `s3:PutObjectLockConfiguration`, `s3:PutObjectRetention`, `s3:GetObjectRetention`, `s3:PutObjectLegalHold`, `s3:GetObjectLegalHold` |
|
||||
| `notification` | Manage bucket event notifications | `s3:GetBucketNotificationConfiguration`, `s3:PutBucketNotificationConfiguration`, `s3:DeleteBucketNotificationConfiguration` |
|
||||
| `logging` | Manage bucket access logging | `s3:GetBucketLogging`, `s3:PutBucketLogging`, `s3:DeleteBucketLogging` |
|
||||
| `website` | Manage static website hosting configuration | `s3:GetBucketWebsite`, `s3:PutBucketWebsite`, `s3:DeleteBucketWebsite` |
|
||||
|
||||
#### IAM Actions (User Management)
|
||||
|
||||
@@ -824,31 +674,25 @@ Both layers are evaluated for each request. A user must have permission in their
|
||||
| `iam:delete_user` | Delete IAM users | `iam:DeleteUser` |
|
||||
| `iam:rotate_key` | Rotate user secret keys | `iam:RotateAccessKey` |
|
||||
| `iam:update_policy` | Modify user policies | `iam:PutUserPolicy` |
|
||||
| `iam:create_key` | Create additional access keys for a user | `iam:CreateAccessKey` |
|
||||
| `iam:delete_key` | Delete an access key from a user | `iam:DeleteAccessKey` |
|
||||
| `iam:get_user` | View user details and access keys | `iam:GetUser` |
|
||||
| `iam:get_policy` | View user policy configuration | `iam:GetPolicy` |
|
||||
| `iam:disable_user` | Temporarily disable/enable a user account | `iam:DisableUser` |
|
||||
| `iam:*` | **Admin wildcard** – grants all IAM actions | — |
|
||||
|
||||
#### Wildcards
|
||||
|
||||
| Wildcard | Scope | Description |
|
||||
| --- | --- | --- |
|
||||
| `*` (in actions) | All S3 actions | Grants all 19 S3 actions including `list`, `read`, `write`, `delete`, `create_bucket`, `delete_bucket`, `share`, `policy`, `versioning`, `tagging`, `encryption`, `lifecycle`, `cors`, `replication`, `quota`, `object_lock`, `notification`, `logging`, `website` |
|
||||
| `*` (in actions) | All S3 actions | Grants `list`, `read`, `write`, `delete`, `share`, `policy`, `lifecycle`, `cors`, `replication` |
|
||||
| `iam:*` | All IAM actions | Grants all `iam:*` actions for user management |
|
||||
| `*` (in bucket) | All buckets | Policy applies to every bucket |
|
||||
|
||||
### IAM Policy Structure
|
||||
|
||||
User policies are stored as a JSON array of policy objects. Each object specifies a bucket, the allowed actions, and an optional prefix for object-level scoping:
|
||||
User policies are stored as a JSON array of policy objects. Each object specifies a bucket and the allowed actions:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"bucket": "<bucket-name-or-wildcard>",
|
||||
"actions": ["<action1>", "<action2>", ...],
|
||||
"prefix": "<optional-key-prefix>"
|
||||
"actions": ["<action1>", "<action2>", ...]
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -856,13 +700,12 @@ User policies are stored as a JSON array of policy objects. Each object specifie
|
||||
**Fields:**
|
||||
- `bucket`: The bucket name (case-insensitive) or `*` for all buckets
|
||||
- `actions`: Array of action strings (simple names or AWS aliases)
|
||||
- `prefix`: *(optional)* Restrict object-level actions to keys starting with this prefix. Defaults to `*` (all objects). Example: `"uploads/"` restricts to keys under `uploads/`
|
||||
|
||||
### Example User Policies
|
||||
|
||||
**Full Administrator (complete system access):**
|
||||
```json
|
||||
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "create_bucket", "delete_bucket", "versioning", "tagging", "encryption", "lifecycle", "cors", "replication", "quota", "object_lock", "notification", "logging", "website", "iam:*"]}]
|
||||
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "lifecycle", "cors", "replication", "iam:*"]}]
|
||||
```
|
||||
|
||||
**Read-Only User (browse and download only):**
|
||||
@@ -875,11 +718,6 @@ User policies are stored as a JSON array of policy objects. Each object specifie
|
||||
[{"bucket": "user-bucket", "actions": ["list", "read", "write", "delete"]}]
|
||||
```
|
||||
|
||||
**Operator (data operations + bucket management, no config):**
|
||||
```json
|
||||
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "create_bucket", "delete_bucket"]}]
|
||||
```
|
||||
|
||||
**Multiple Bucket Access (different permissions per bucket):**
|
||||
```json
|
||||
[
|
||||
@@ -889,14 +727,9 @@ User policies are stored as a JSON array of policy objects. Each object specifie
|
||||
]
|
||||
```
|
||||
|
||||
**Prefix-Scoped Access (restrict to a folder inside a shared bucket):**
|
||||
```json
|
||||
[{"bucket": "shared-data", "actions": ["list", "read", "write", "delete"], "prefix": "team-a/"}]
|
||||
```
|
||||
|
||||
**IAM Manager (manage users but no data access):**
|
||||
```json
|
||||
[{"bucket": "*", "actions": ["iam:list_users", "iam:create_user", "iam:delete_user", "iam:rotate_key", "iam:update_policy", "iam:create_key", "iam:delete_key", "iam:get_user", "iam:get_policy", "iam:disable_user"]}]
|
||||
[{"bucket": "*", "actions": ["iam:list_users", "iam:create_user", "iam:delete_user", "iam:rotate_key", "iam:update_policy"]}]
|
||||
```
|
||||
|
||||
**Replication Operator (manage replication only):**
|
||||
@@ -916,10 +749,10 @@ User policies are stored as a JSON array of policy objects. Each object specifie
|
||||
|
||||
**Bucket Administrator (full bucket config, no IAM access):**
|
||||
```json
|
||||
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "create_bucket", "delete_bucket", "share", "policy", "versioning", "tagging", "encryption", "lifecycle", "cors", "replication", "quota", "object_lock", "notification", "logging", "website"]}]
|
||||
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "policy", "lifecycle", "cors"]}]
|
||||
```
|
||||
|
||||
**Upload-Only User (write but cannot create/delete buckets):**
|
||||
**Upload-Only User (write but cannot read back):**
|
||||
```json
|
||||
[{"bucket": "drop-box", "actions": ["write"]}]
|
||||
```
|
||||
@@ -965,8 +798,7 @@ curl -X POST http://localhost:5000/iam/users \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-d '{
|
||||
"display_name": "New User",
|
||||
"policies": [{"bucket": "*", "actions": ["list", "read"]}],
|
||||
"expires_at": "2026-12-31T23:59:59Z"
|
||||
"policies": [{"bucket": "*", "actions": ["list", "read"]}]
|
||||
}'
|
||||
|
||||
# Rotate user secret (requires iam:rotate_key)
|
||||
@@ -979,45 +811,9 @@ curl -X PUT http://localhost:5000/iam/users/<access-key>/policies \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-d '[{"bucket": "*", "actions": ["list", "read", "write"]}]'
|
||||
|
||||
# Update credential expiry (requires iam:update_policy)
|
||||
curl -X POST http://localhost:5000/iam/users/<access-key>/expiry \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-d 'expires_at=2026-12-31T23:59:59Z'
|
||||
|
||||
# Remove credential expiry (never expires)
|
||||
curl -X POST http://localhost:5000/iam/users/<access-key>/expiry \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-d 'expires_at='
|
||||
|
||||
# Delete a user (requires iam:delete_user)
|
||||
curl -X DELETE http://localhost:5000/iam/users/<access-key> \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||
|
||||
# Get user details (requires iam:get_user) — via Admin API
|
||||
curl http://localhost:5000/admin/iam/users/<user-id-or-access-key> \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 ..."
|
||||
|
||||
# Get user policies (requires iam:get_policy) — via Admin API
|
||||
curl http://localhost:5000/admin/iam/users/<user-id-or-access-key>/policies \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 ..."
|
||||
|
||||
# Create additional access key for a user (requires iam:create_key)
|
||||
curl -X POST http://localhost:5000/admin/iam/users/<user-id-or-access-key>/keys \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 ..."
|
||||
|
||||
# Delete an access key (requires iam:delete_key)
|
||||
curl -X DELETE http://localhost:5000/admin/iam/users/<user-id>/keys/<access-key> \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 ..."
|
||||
|
||||
# Disable a user account (requires iam:disable_user)
|
||||
curl -X POST http://localhost:5000/admin/iam/users/<user-id-or-access-key>/disable \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 ..."
|
||||
|
||||
# Re-enable a user account (requires iam:disable_user)
|
||||
curl -X POST http://localhost:5000/admin/iam/users/<user-id-or-access-key>/enable \
|
||||
-H "Authorization: AWS4-HMAC-SHA256 ..."
|
||||
```
|
||||
|
||||
### Permission Precedence
|
||||
@@ -1026,9 +822,8 @@ When a request is made, permissions are evaluated in this order:
|
||||
|
||||
1. **Authentication** – Verify the access key and secret key are valid
|
||||
2. **Lockout Check** – Ensure the account is not locked due to failed attempts
|
||||
3. **Expiry Check** – Reject requests if the user's credentials have expired (`expires_at`)
|
||||
4. **IAM Policy Check** – Verify the user has the required action for the target bucket
|
||||
5. **Bucket Policy Check** – If a bucket policy exists, verify it allows the action
|
||||
3. **IAM Policy Check** – Verify the user has the required action for the target bucket
|
||||
4. **Bucket Policy Check** – If a bucket policy exists, verify it allows the action
|
||||
|
||||
A request is allowed only if:
|
||||
- The IAM policy grants the action, AND
|
||||
@@ -1115,7 +910,7 @@ Objects with forward slashes (`/`) in their keys are displayed as a folder hiera
|
||||
|
||||
- Select multiple objects using checkboxes
|
||||
- **Bulk Delete**: Delete multiple objects at once
|
||||
- **Bulk Download**: Download selected objects as a single ZIP archive (up to `BULK_DOWNLOAD_MAX_BYTES`, default 1 GiB)
|
||||
- **Bulk Download**: Download selected objects as individual files
|
||||
|
||||
#### Search & Filter
|
||||
|
||||
@@ -1188,7 +983,6 @@ MyFSIO supports **server-side encryption at rest** to protect your data. When en
|
||||
|------|-------------|
|
||||
| **AES-256 (SSE-S3)** | Server-managed encryption using a local master key |
|
||||
| **KMS (SSE-KMS)** | Encryption using customer-managed keys via the built-in KMS |
|
||||
| **SSE-C** | Server-side encryption with customer-provided keys (per-request) |
|
||||
|
||||
### Enabling Encryption
|
||||
|
||||
@@ -1287,44 +1081,6 @@ encrypted, metadata = ClientEncryptionHelper.encrypt_for_upload(plaintext, key)
|
||||
decrypted = ClientEncryptionHelper.decrypt_from_download(encrypted, metadata, key)
|
||||
```
|
||||
|
||||
### SSE-C (Customer-Provided Keys)
|
||||
|
||||
With SSE-C, you provide your own 256-bit AES encryption key with each request. The server encrypts/decrypts using your key but never stores it. You must supply the same key for both upload and download.
|
||||
|
||||
**Required headers:**
|
||||
|
||||
| Header | Value |
|
||||
|--------|-------|
|
||||
| `x-amz-server-side-encryption-customer-algorithm` | `AES256` |
|
||||
| `x-amz-server-side-encryption-customer-key` | Base64-encoded 256-bit key |
|
||||
| `x-amz-server-side-encryption-customer-key-MD5` | Base64-encoded MD5 of the key |
|
||||
|
||||
```bash
|
||||
# Generate a 256-bit key
|
||||
KEY=$(openssl rand -base64 32)
|
||||
KEY_MD5=$(echo -n "$KEY" | base64 -d | openssl dgst -md5 -binary | base64)
|
||||
|
||||
# Upload with SSE-C
|
||||
curl -X PUT "http://localhost:5000/my-bucket/secret.txt" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
|
||||
-H "x-amz-server-side-encryption-customer-key: $KEY" \
|
||||
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5" \
|
||||
--data-binary @secret.txt
|
||||
|
||||
# Download with SSE-C (same key required)
|
||||
curl "http://localhost:5000/my-bucket/secret.txt" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
|
||||
-H "x-amz-server-side-encryption-customer-key: $KEY" \
|
||||
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5"
|
||||
```
|
||||
|
||||
**Key points:**
|
||||
- SSE-C does not require `ENCRYPTION_ENABLED` or `KMS_ENABLED` — the key is provided per-request
|
||||
- If you lose your key, the data is irrecoverable
|
||||
- The MD5 header is optional but recommended for integrity verification
|
||||
|
||||
### Important Notes
|
||||
|
||||
- **Existing objects are NOT encrypted** - Only new uploads after enabling encryption are encrypted
|
||||
@@ -1794,9 +1550,6 @@ GET /<bucket>?notification # Get event notifications
|
||||
PUT /<bucket>?notification # Set event notifications (webhooks)
|
||||
GET /<bucket>?object-lock # Get object lock configuration
|
||||
PUT /<bucket>?object-lock # Set object lock configuration
|
||||
GET /<bucket>?website # Get website configuration
|
||||
PUT /<bucket>?website # Set website configuration
|
||||
DELETE /<bucket>?website # Delete website configuration
|
||||
GET /<bucket>?uploads # List active multipart uploads
|
||||
GET /<bucket>?versions # List object versions
|
||||
GET /<bucket>?location # Get bucket location/region
|
||||
@@ -1841,11 +1594,6 @@ PUT /admin/sites/<site_id> # Update peer site
|
||||
DELETE /admin/sites/<site_id> # Unregister peer site
|
||||
GET /admin/sites/<site_id>/health # Check peer health
|
||||
GET /admin/topology # Get cluster topology
|
||||
GET /admin/website-domains # List domain mappings
|
||||
POST /admin/website-domains # Create domain mapping
|
||||
GET /admin/website-domains/<domain> # Get domain mapping
|
||||
PUT /admin/website-domains/<domain> # Update domain mapping
|
||||
DELETE /admin/website-domains/<domain> # Delete domain mapping
|
||||
|
||||
# KMS API
|
||||
GET /kms/keys # List KMS keys
|
||||
@@ -2201,20 +1949,6 @@ curl -X PUT "http://localhost:5000/my-bucket/file.txt" \
|
||||
-H "x-amz-meta-newkey: newvalue"
|
||||
```
|
||||
|
||||
### MoveObject (UI)
|
||||
|
||||
Move an object to a different key or bucket. This is a UI-only convenience operation that performs a copy followed by a delete of the source. Requires `read` and `delete` on the source, and `write` on the destination.
|
||||
|
||||
```bash
|
||||
# Move via UI API
|
||||
curl -X POST "http://localhost:5100/ui/buckets/my-bucket/objects/old-path/file.txt/move" \
|
||||
-H "Content-Type: application/json" \
|
||||
--cookie "session=..." \
|
||||
-d '{"dest_bucket": "other-bucket", "dest_key": "new-path/file.txt"}'
|
||||
```
|
||||
|
||||
The move is atomic from the caller's perspective: if the copy succeeds but the delete fails, the object exists in both locations (no data loss).
|
||||
|
||||
### UploadPartCopy
|
||||
|
||||
Copy data from an existing object into a multipart upload part:
|
||||
@@ -2493,113 +2227,3 @@ curl "http://localhost:5000/my-bucket?list-type=2&start-after=photos/2024/" \
|
||||
| `start-after` | Start listing after this key |
|
||||
| `fetch-owner` | Include owner info in response |
|
||||
| `encoding-type` | Set to `url` for URL-encoded keys
|
||||
|
||||
## 26. Static Website Hosting
|
||||
|
||||
MyFSIO can serve S3 buckets as static websites via custom domain mappings. When a request arrives with a `Host` header matching a mapped domain, MyFSIO resolves the bucket and serves objects directly.
|
||||
|
||||
### Enabling
|
||||
|
||||
Set the environment variable:
|
||||
|
||||
```bash
|
||||
WEBSITE_HOSTING_ENABLED=true
|
||||
```
|
||||
|
||||
When disabled, all website hosting endpoints return 400 and domain-based serving is skipped.
|
||||
|
||||
### Configuration
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `WEBSITE_HOSTING_ENABLED` | `false` | Master switch for website hosting |
|
||||
|
||||
### Setting Up a Website
|
||||
|
||||
**Step 1: Configure the bucket website settings**
|
||||
|
||||
```bash
|
||||
curl -X PUT "http://localhost:5000/my-site?website" \
|
||||
-H "Authorization: ..." \
|
||||
-d '<?xml version="1.0" encoding="UTF-8"?>
|
||||
<WebsiteConfiguration>
|
||||
<IndexDocument><Suffix>index.html</Suffix></IndexDocument>
|
||||
<ErrorDocument><Key>404.html</Key></ErrorDocument>
|
||||
</WebsiteConfiguration>'
|
||||
```
|
||||
|
||||
- `IndexDocument` with `Suffix` is required (must not contain `/`)
|
||||
- `ErrorDocument` is optional
|
||||
|
||||
**Step 2: Map a domain to the bucket**
|
||||
|
||||
```bash
|
||||
curl -X POST "http://localhost:5000/admin/website-domains" \
|
||||
-H "Authorization: ..." \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"domain": "example.com", "bucket": "my-site"}'
|
||||
```
|
||||
|
||||
**Step 3: Point your domain to MyFSIO**
|
||||
|
||||
For HTTP-only (direct access), point DNS to the MyFSIO host on port 5000.
|
||||
|
||||
For HTTPS (recommended), use a reverse proxy. The critical requirement is passing the original `Host` header so MyFSIO can match the domain to a bucket.
|
||||
|
||||
**nginx example:**
|
||||
|
||||
```nginx
|
||||
server {
|
||||
server_name example.com;
|
||||
listen 443 ssl;
|
||||
|
||||
ssl_certificate /etc/ssl/certs/example.com.pem;
|
||||
ssl_certificate_key /etc/ssl/private/example.com.key;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:5000;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`proxy_set_header Host $host;` is required — without it, MyFSIO cannot match the incoming domain to a bucket. You do not need any path-based routing rules; MyFSIO handles all object resolution internally.
|
||||
|
||||
### How Domain Routing Works
|
||||
|
||||
1. A request arrives with `Host: example.com`
|
||||
2. MyFSIO's `before_request` hook strips the port and looks up the domain in the `WebsiteDomainStore`
|
||||
3. If a match is found, it loads the bucket's website config (index/error documents)
|
||||
4. Object key resolution:
|
||||
- `/` or trailing `/` → append `index_document` (e.g., `index.html`)
|
||||
- `/path` → try exact match, then try `path/index_document`
|
||||
- Not found → serve `error_document` with 404 status
|
||||
5. If no domain match is found, the request falls through to normal S3 API / UI routing
|
||||
|
||||
### Domain Mapping Admin API
|
||||
|
||||
All endpoints require admin (`iam:*`) permissions.
|
||||
|
||||
| Method | Route | Body | Description |
|
||||
|--------|-------|------|-------------|
|
||||
| `GET` | `/admin/website-domains` | — | List all mappings |
|
||||
| `POST` | `/admin/website-domains` | `{"domain": "...", "bucket": "..."}` | Create mapping |
|
||||
| `GET` | `/admin/website-domains/<domain>` | — | Get single mapping |
|
||||
| `PUT` | `/admin/website-domains/<domain>` | `{"bucket": "..."}` | Update mapping |
|
||||
| `DELETE` | `/admin/website-domains/<domain>` | — | Delete mapping |
|
||||
|
||||
### Bucket Website API
|
||||
|
||||
| Method | Route | Description |
|
||||
|--------|-------|-------------|
|
||||
| `PUT` | `/<bucket>?website` | Set website config (XML body) |
|
||||
| `GET` | `/<bucket>?website` | Get website config (XML response) |
|
||||
| `DELETE` | `/<bucket>?website` | Remove website config |
|
||||
|
||||
### Web UI
|
||||
|
||||
- **Per-bucket config:** Bucket Details → Properties tab → "Static Website Hosting" card
|
||||
- **Domain management:** Sidebar → "Domains" (visible when hosting is enabled and user is admin)
|
||||
|
||||
3443
myfsio-engine/Cargo.lock
generated
3443
myfsio-engine/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,45 +0,0 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"crates/myfsio-common",
|
||||
"crates/myfsio-auth",
|
||||
"crates/myfsio-crypto",
|
||||
"crates/myfsio-storage",
|
||||
"crates/myfsio-xml",
|
||||
"crates/myfsio-server",
|
||||
]
|
||||
|
||||
[workspace.dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
axum = { version = "0.8" }
|
||||
tower = { version = "0.5" }
|
||||
tower-http = { version = "0.6", features = ["cors", "trace"] }
|
||||
hyper = { version = "1" }
|
||||
bytes = "1"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
quick-xml = { version = "0.37", features = ["serialize"] }
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
md-5 = "0.10"
|
||||
hex = "0.4"
|
||||
aes = "0.8"
|
||||
aes-gcm = "0.10"
|
||||
cbc = { version = "0.1", features = ["alloc"] }
|
||||
hkdf = "0.12"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
parking_lot = "0.12"
|
||||
lru = "0.14"
|
||||
percent-encoding = "2"
|
||||
regex = "1"
|
||||
unicode-normalization = "0.1"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = "0.3"
|
||||
thiserror = "2"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
base64 = "0.22"
|
||||
tokio-util = { version = "0.7", features = ["io"] }
|
||||
futures = "0.3"
|
||||
dashmap = "6"
|
||||
crc32fast = "1"
|
||||
duckdb = { version = "1", features = ["bundled"] }
|
||||
@@ -1,26 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio-auth"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
myfsio-common = { path = "../myfsio-common" }
|
||||
hmac = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
aes = { workspace = true }
|
||||
cbc = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
pbkdf2 = "0.12"
|
||||
lru = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
percent-encoding = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
@@ -1,80 +0,0 @@
|
||||
use aes::cipher::{block_padding::Pkcs7, BlockDecryptMut, KeyIvInit};
|
||||
use base64::{engine::general_purpose::URL_SAFE, Engine};
|
||||
use hmac::{Hmac, Mac};
|
||||
use sha2::Sha256;
|
||||
|
||||
type Aes128CbcDec = cbc::Decryptor<aes::Aes128>;
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
pub fn derive_fernet_key(secret: &str) -> String {
|
||||
let mut derived = [0u8; 32];
|
||||
pbkdf2::pbkdf2_hmac::<Sha256>(
|
||||
secret.as_bytes(),
|
||||
b"myfsio-iam-encryption",
|
||||
100_000,
|
||||
&mut derived,
|
||||
);
|
||||
URL_SAFE.encode(derived)
|
||||
}
|
||||
|
||||
pub fn decrypt(key_b64: &str, token: &str) -> Result<Vec<u8>, &'static str> {
|
||||
let key_bytes = URL_SAFE
|
||||
.decode(key_b64)
|
||||
.map_err(|_| "invalid fernet key base64")?;
|
||||
if key_bytes.len() != 32 {
|
||||
return Err("fernet key must be 32 bytes");
|
||||
}
|
||||
|
||||
let signing_key = &key_bytes[..16];
|
||||
let encryption_key = &key_bytes[16..];
|
||||
|
||||
let token_bytes = URL_SAFE
|
||||
.decode(token)
|
||||
.map_err(|_| "invalid fernet token base64")?;
|
||||
|
||||
if token_bytes.len() < 57 {
|
||||
return Err("fernet token too short");
|
||||
}
|
||||
|
||||
if token_bytes[0] != 0x80 {
|
||||
return Err("invalid fernet version");
|
||||
}
|
||||
|
||||
let hmac_offset = token_bytes.len() - 32;
|
||||
let payload = &token_bytes[..hmac_offset];
|
||||
let expected_hmac = &token_bytes[hmac_offset..];
|
||||
|
||||
let mut mac =
|
||||
HmacSha256::new_from_slice(signing_key).map_err(|_| "hmac key error")?;
|
||||
mac.update(payload);
|
||||
mac.verify_slice(expected_hmac)
|
||||
.map_err(|_| "HMAC verification failed")?;
|
||||
|
||||
let iv = &token_bytes[9..25];
|
||||
let ciphertext = &token_bytes[25..hmac_offset];
|
||||
|
||||
let plaintext = Aes128CbcDec::new(encryption_key.into(), iv.into())
|
||||
.decrypt_padded_vec_mut::<Pkcs7>(ciphertext)
|
||||
.map_err(|_| "AES-CBC decryption failed")?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_derive_fernet_key_format() {
|
||||
let key = derive_fernet_key("test-secret");
|
||||
let decoded = URL_SAFE.decode(&key).unwrap();
|
||||
assert_eq!(decoded.len(), 32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roundtrip_with_python_compat() {
|
||||
let key = derive_fernet_key("dev-secret-key");
|
||||
let decoded = URL_SAFE.decode(&key).unwrap();
|
||||
assert_eq!(decoded.len(), 32);
|
||||
}
|
||||
}
|
||||
@@ -1,812 +0,0 @@
|
||||
use chrono::{DateTime, Utc};
|
||||
use myfsio_common::types::Principal;
|
||||
use parking_lot::RwLock;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Instant, SystemTime};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct IamConfig {
|
||||
#[serde(default = "default_version")]
|
||||
pub version: u32,
|
||||
#[serde(default)]
|
||||
pub users: Vec<IamUser>,
|
||||
}
|
||||
|
||||
fn default_version() -> u32 {
|
||||
2
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct IamUser {
|
||||
pub user_id: String,
|
||||
pub display_name: String,
|
||||
#[serde(default = "default_enabled")]
|
||||
pub enabled: bool,
|
||||
#[serde(default)]
|
||||
pub expires_at: Option<String>,
|
||||
#[serde(default)]
|
||||
pub access_keys: Vec<AccessKey>,
|
||||
#[serde(default)]
|
||||
pub policies: Vec<IamPolicy>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
struct RawIamConfig {
|
||||
#[serde(default)]
|
||||
pub users: Vec<RawIamUser>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
struct RawIamUser {
|
||||
pub user_id: Option<String>,
|
||||
pub display_name: Option<String>,
|
||||
#[serde(default = "default_enabled")]
|
||||
pub enabled: bool,
|
||||
#[serde(default)]
|
||||
pub expires_at: Option<String>,
|
||||
pub access_key: Option<String>,
|
||||
pub secret_key: Option<String>,
|
||||
#[serde(default)]
|
||||
pub access_keys: Vec<AccessKey>,
|
||||
#[serde(default)]
|
||||
pub policies: Vec<IamPolicy>,
|
||||
}
|
||||
|
||||
impl RawIamUser {
|
||||
fn normalize(self) -> IamUser {
|
||||
let mut access_keys = self.access_keys;
|
||||
if access_keys.is_empty() {
|
||||
if let (Some(ak), Some(sk)) = (self.access_key, self.secret_key) {
|
||||
access_keys.push(AccessKey {
|
||||
access_key: ak,
|
||||
secret_key: sk,
|
||||
status: "active".to_string(),
|
||||
created_at: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
let display_name = self.display_name.unwrap_or_else(|| {
|
||||
access_keys.first().map(|k| k.access_key.clone()).unwrap_or_else(|| "unknown".to_string())
|
||||
});
|
||||
let user_id = self.user_id.unwrap_or_else(|| {
|
||||
format!("u-{}", display_name.to_ascii_lowercase().replace(' ', "-"))
|
||||
});
|
||||
IamUser {
|
||||
user_id,
|
||||
display_name,
|
||||
enabled: self.enabled,
|
||||
expires_at: self.expires_at,
|
||||
access_keys,
|
||||
policies: self.policies,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AccessKey {
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
#[serde(default = "default_status")]
|
||||
pub status: String,
|
||||
#[serde(default)]
|
||||
pub created_at: Option<String>,
|
||||
}
|
||||
|
||||
fn default_status() -> String {
|
||||
"active".to_string()
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct IamPolicy {
|
||||
pub bucket: String,
|
||||
pub actions: Vec<String>,
|
||||
#[serde(default = "default_prefix")]
|
||||
pub prefix: String,
|
||||
}
|
||||
|
||||
fn default_prefix() -> String {
|
||||
"*".to_string()
|
||||
}
|
||||
|
||||
struct IamState {
|
||||
key_secrets: HashMap<String, String>,
|
||||
key_index: HashMap<String, String>,
|
||||
key_status: HashMap<String, String>,
|
||||
user_records: HashMap<String, IamUser>,
|
||||
file_mtime: Option<SystemTime>,
|
||||
last_check: Instant,
|
||||
}
|
||||
|
||||
pub struct IamService {
|
||||
config_path: PathBuf,
|
||||
state: Arc<RwLock<IamState>>,
|
||||
check_interval: std::time::Duration,
|
||||
fernet_key: Option<String>,
|
||||
}
|
||||
|
||||
impl IamService {
|
||||
pub fn new(config_path: PathBuf) -> Self {
|
||||
Self::new_with_secret(config_path, None)
|
||||
}
|
||||
|
||||
pub fn new_with_secret(config_path: PathBuf, secret_key: Option<String>) -> Self {
|
||||
let fernet_key = secret_key.map(|s| crate::fernet::derive_fernet_key(&s));
|
||||
let service = Self {
|
||||
config_path,
|
||||
state: Arc::new(RwLock::new(IamState {
|
||||
key_secrets: HashMap::new(),
|
||||
key_index: HashMap::new(),
|
||||
key_status: HashMap::new(),
|
||||
user_records: HashMap::new(),
|
||||
file_mtime: None,
|
||||
last_check: Instant::now(),
|
||||
})),
|
||||
check_interval: std::time::Duration::from_secs(2),
|
||||
fernet_key,
|
||||
};
|
||||
service.reload();
|
||||
service
|
||||
}
|
||||
|
||||
fn reload_if_needed(&self) {
|
||||
{
|
||||
let state = self.state.read();
|
||||
if state.last_check.elapsed() < self.check_interval {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let current_mtime = std::fs::metadata(&self.config_path)
|
||||
.and_then(|m| m.modified())
|
||||
.ok();
|
||||
|
||||
let needs_reload = {
|
||||
let state = self.state.read();
|
||||
match (&state.file_mtime, ¤t_mtime) {
|
||||
(None, Some(_)) => true,
|
||||
(Some(old), Some(new)) => old != new,
|
||||
(Some(_), None) => true,
|
||||
(None, None) => state.key_secrets.is_empty(),
|
||||
}
|
||||
};
|
||||
|
||||
if needs_reload {
|
||||
self.reload();
|
||||
}
|
||||
|
||||
self.state.write().last_check = Instant::now();
|
||||
}
|
||||
|
||||
fn reload(&self) {
|
||||
let content = match std::fs::read_to_string(&self.config_path) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
tracing::warn!("Failed to read IAM config {}: {}", self.config_path.display(), e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let raw = if content.starts_with("MYFSIO_IAM_ENC:") {
|
||||
let encrypted_token = &content["MYFSIO_IAM_ENC:".len()..];
|
||||
match &self.fernet_key {
|
||||
Some(key) => match crate::fernet::decrypt(key, encrypted_token.trim()) {
|
||||
Ok(plaintext) => match String::from_utf8(plaintext) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
tracing::error!("Decrypted IAM config is not valid UTF-8: {}", e);
|
||||
return;
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to decrypt IAM config: {}. SECRET_KEY may have changed.", e);
|
||||
return;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
tracing::error!("IAM config is encrypted but no SECRET_KEY configured");
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
content
|
||||
};
|
||||
|
||||
let raw_config: RawIamConfig = match serde_json::from_str(&raw) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to parse IAM config: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let users: Vec<IamUser> = raw_config.users.into_iter().map(|u| u.normalize()).collect();
|
||||
|
||||
let mut key_secrets = HashMap::new();
|
||||
let mut key_index = HashMap::new();
|
||||
let mut key_status = HashMap::new();
|
||||
let mut user_records = HashMap::new();
|
||||
|
||||
for user in &users {
|
||||
user_records.insert(user.user_id.clone(), user.clone());
|
||||
for ak in &user.access_keys {
|
||||
key_secrets.insert(ak.access_key.clone(), ak.secret_key.clone());
|
||||
key_index.insert(ak.access_key.clone(), user.user_id.clone());
|
||||
key_status.insert(ak.access_key.clone(), ak.status.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let file_mtime = std::fs::metadata(&self.config_path)
|
||||
.and_then(|m| m.modified())
|
||||
.ok();
|
||||
|
||||
let mut state = self.state.write();
|
||||
state.key_secrets = key_secrets;
|
||||
state.key_index = key_index;
|
||||
state.key_status = key_status;
|
||||
state.user_records = user_records;
|
||||
state.file_mtime = file_mtime;
|
||||
state.last_check = Instant::now();
|
||||
|
||||
tracing::info!("IAM config reloaded: {} users, {} keys",
|
||||
users.len(),
|
||||
state.key_secrets.len());
|
||||
}
|
||||
|
||||
pub fn get_secret_key(&self, access_key: &str) -> Option<String> {
|
||||
self.reload_if_needed();
|
||||
let state = self.state.read();
|
||||
|
||||
let status = state.key_status.get(access_key)?;
|
||||
if status != "active" {
|
||||
return None;
|
||||
}
|
||||
|
||||
let user_id = state.key_index.get(access_key)?;
|
||||
let user = state.user_records.get(user_id)?;
|
||||
if !user.enabled {
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(ref expires_at) = user.expires_at {
|
||||
if let Ok(exp) = expires_at.parse::<DateTime<Utc>>() {
|
||||
if Utc::now() > exp {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state.key_secrets.get(access_key).cloned()
|
||||
}
|
||||
|
||||
pub fn get_principal(&self, access_key: &str) -> Option<Principal> {
|
||||
self.reload_if_needed();
|
||||
let state = self.state.read();
|
||||
|
||||
let status = state.key_status.get(access_key)?;
|
||||
if status != "active" {
|
||||
return None;
|
||||
}
|
||||
|
||||
let user_id = state.key_index.get(access_key)?;
|
||||
let user = state.user_records.get(user_id)?;
|
||||
if !user.enabled {
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(ref expires_at) = user.expires_at {
|
||||
if let Ok(exp) = expires_at.parse::<DateTime<Utc>>() {
|
||||
if Utc::now() > exp {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let is_admin = user.policies.iter().any(|p| {
|
||||
p.bucket == "*" && p.actions.iter().any(|a| a == "*")
|
||||
});
|
||||
|
||||
Some(Principal::new(
|
||||
access_key.to_string(),
|
||||
user.user_id.clone(),
|
||||
user.display_name.clone(),
|
||||
is_admin,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn authenticate(&self, access_key: &str, secret_key: &str) -> Option<Principal> {
|
||||
let stored_secret = self.get_secret_key(access_key)?;
|
||||
if !crate::sigv4::constant_time_compare(&stored_secret, secret_key) {
|
||||
return None;
|
||||
}
|
||||
self.get_principal(access_key)
|
||||
}
|
||||
|
||||
pub fn authorize(
|
||||
&self,
|
||||
principal: &Principal,
|
||||
bucket_name: Option<&str>,
|
||||
action: &str,
|
||||
object_key: Option<&str>,
|
||||
) -> bool {
|
||||
self.reload_if_needed();
|
||||
|
||||
if principal.is_admin {
|
||||
return true;
|
||||
}
|
||||
|
||||
let normalized_bucket = bucket_name
|
||||
.unwrap_or("*")
|
||||
.trim()
|
||||
.to_ascii_lowercase();
|
||||
let normalized_action = action.trim().to_ascii_lowercase();
|
||||
|
||||
let state = self.state.read();
|
||||
let user = match state.user_records.get(&principal.user_id) {
|
||||
Some(u) => u,
|
||||
None => return false,
|
||||
};
|
||||
|
||||
if !user.enabled {
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Some(ref expires_at) = user.expires_at {
|
||||
if let Ok(exp) = expires_at.parse::<DateTime<Utc>>() {
|
||||
if Utc::now() > exp {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for policy in &user.policies {
|
||||
if !bucket_matches(&policy.bucket, &normalized_bucket) {
|
||||
continue;
|
||||
}
|
||||
if !action_matches(&policy.actions, &normalized_action) {
|
||||
continue;
|
||||
}
|
||||
if let Some(key) = object_key {
|
||||
if !prefix_matches(&policy.prefix, key) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub async fn list_users(&self) -> Vec<serde_json::Value> {
|
||||
self.reload_if_needed();
|
||||
let state = self.state.read();
|
||||
state
|
||||
.user_records
|
||||
.values()
|
||||
.map(|u| {
|
||||
serde_json::json!({
|
||||
"user_id": u.user_id,
|
||||
"display_name": u.display_name,
|
||||
"enabled": u.enabled,
|
||||
"access_keys": u.access_keys.iter().map(|k| {
|
||||
serde_json::json!({
|
||||
"access_key": k.access_key,
|
||||
"status": k.status,
|
||||
"created_at": k.created_at,
|
||||
})
|
||||
}).collect::<Vec<_>>(),
|
||||
"policy_count": u.policies.len(),
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub async fn get_user(&self, identifier: &str) -> Option<serde_json::Value> {
|
||||
self.reload_if_needed();
|
||||
let state = self.state.read();
|
||||
|
||||
let user = state
|
||||
.user_records
|
||||
.get(identifier)
|
||||
.or_else(|| {
|
||||
state.key_index.get(identifier).and_then(|uid| state.user_records.get(uid))
|
||||
})?;
|
||||
|
||||
Some(serde_json::json!({
|
||||
"user_id": user.user_id,
|
||||
"display_name": user.display_name,
|
||||
"enabled": user.enabled,
|
||||
"expires_at": user.expires_at,
|
||||
"access_keys": user.access_keys.iter().map(|k| {
|
||||
serde_json::json!({
|
||||
"access_key": k.access_key,
|
||||
"status": k.status,
|
||||
"created_at": k.created_at,
|
||||
})
|
||||
}).collect::<Vec<_>>(),
|
||||
"policies": user.policies,
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn set_user_enabled(&self, identifier: &str, enabled: bool) -> Result<(), String> {
|
||||
let content = std::fs::read_to_string(&self.config_path)
|
||||
.map_err(|e| format!("Failed to read IAM config: {}", e))?;
|
||||
|
||||
let raw: RawIamConfig = serde_json::from_str(&content)
|
||||
.map_err(|e| format!("Failed to parse IAM config: {}", e))?;
|
||||
let mut config = IamConfig {
|
||||
version: 2,
|
||||
users: raw.users.into_iter().map(|u| u.normalize()).collect(),
|
||||
};
|
||||
|
||||
let user = config
|
||||
.users
|
||||
.iter_mut()
|
||||
.find(|u| {
|
||||
u.user_id == identifier
|
||||
|| u.access_keys.iter().any(|k| k.access_key == identifier)
|
||||
})
|
||||
.ok_or_else(|| "User not found".to_string())?;
|
||||
|
||||
user.enabled = enabled;
|
||||
|
||||
let json = serde_json::to_string_pretty(&config)
|
||||
.map_err(|e| format!("Failed to serialize IAM config: {}", e))?;
|
||||
std::fs::write(&self.config_path, json)
|
||||
.map_err(|e| format!("Failed to write IAM config: {}", e))?;
|
||||
|
||||
self.reload();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_user_policies(&self, identifier: &str) -> Option<Vec<serde_json::Value>> {
|
||||
self.reload_if_needed();
|
||||
let state = self.state.read();
|
||||
let user = state
|
||||
.user_records
|
||||
.get(identifier)
|
||||
.or_else(|| {
|
||||
state.key_index.get(identifier).and_then(|uid| state.user_records.get(uid))
|
||||
})?;
|
||||
Some(
|
||||
user.policies
|
||||
.iter()
|
||||
.map(|p| serde_json::to_value(p).unwrap_or_default())
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn create_access_key(&self, identifier: &str) -> Result<serde_json::Value, String> {
|
||||
let content = std::fs::read_to_string(&self.config_path)
|
||||
.map_err(|e| format!("Failed to read IAM config: {}", e))?;
|
||||
let raw: RawIamConfig = serde_json::from_str(&content)
|
||||
.map_err(|e| format!("Failed to parse IAM config: {}", e))?;
|
||||
let mut config = IamConfig {
|
||||
version: 2,
|
||||
users: raw.users.into_iter().map(|u| u.normalize()).collect(),
|
||||
};
|
||||
|
||||
let user = config
|
||||
.users
|
||||
.iter_mut()
|
||||
.find(|u| {
|
||||
u.user_id == identifier
|
||||
|| u.access_keys.iter().any(|k| k.access_key == identifier)
|
||||
})
|
||||
.ok_or_else(|| format!("User '{}' not found", identifier))?;
|
||||
|
||||
let new_ak = format!("AK{}", uuid::Uuid::new_v4().simple());
|
||||
let new_sk = format!("SK{}", uuid::Uuid::new_v4().simple());
|
||||
|
||||
let key = AccessKey {
|
||||
access_key: new_ak.clone(),
|
||||
secret_key: new_sk.clone(),
|
||||
status: "active".to_string(),
|
||||
created_at: Some(chrono::Utc::now().to_rfc3339()),
|
||||
};
|
||||
user.access_keys.push(key);
|
||||
|
||||
let json = serde_json::to_string_pretty(&config)
|
||||
.map_err(|e| format!("Failed to serialize IAM config: {}", e))?;
|
||||
std::fs::write(&self.config_path, json)
|
||||
.map_err(|e| format!("Failed to write IAM config: {}", e))?;
|
||||
|
||||
self.reload();
|
||||
Ok(serde_json::json!({
|
||||
"access_key": new_ak,
|
||||
"secret_key": new_sk,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn delete_access_key(&self, access_key: &str) -> Result<(), String> {
|
||||
let content = std::fs::read_to_string(&self.config_path)
|
||||
.map_err(|e| format!("Failed to read IAM config: {}", e))?;
|
||||
let raw: RawIamConfig = serde_json::from_str(&content)
|
||||
.map_err(|e| format!("Failed to parse IAM config: {}", e))?;
|
||||
let mut config = IamConfig {
|
||||
version: 2,
|
||||
users: raw.users.into_iter().map(|u| u.normalize()).collect(),
|
||||
};
|
||||
|
||||
let mut found = false;
|
||||
for user in &mut config.users {
|
||||
if user.access_keys.iter().any(|k| k.access_key == access_key) {
|
||||
if user.access_keys.len() <= 1 {
|
||||
return Err("Cannot delete the last access key".to_string());
|
||||
}
|
||||
user.access_keys.retain(|k| k.access_key != access_key);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return Err(format!("Access key '{}' not found", access_key));
|
||||
}
|
||||
|
||||
let json = serde_json::to_string_pretty(&config)
|
||||
.map_err(|e| format!("Failed to serialize IAM config: {}", e))?;
|
||||
std::fs::write(&self.config_path, json)
|
||||
.map_err(|e| format!("Failed to write IAM config: {}", e))?;
|
||||
|
||||
self.reload();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn bucket_matches(policy_bucket: &str, bucket: &str) -> bool {
|
||||
let pb = policy_bucket.trim().to_ascii_lowercase();
|
||||
pb == "*" || pb == bucket
|
||||
}
|
||||
|
||||
fn action_matches(policy_actions: &[String], action: &str) -> bool {
|
||||
for policy_action in policy_actions {
|
||||
let pa = policy_action.trim().to_ascii_lowercase();
|
||||
if pa == "*" || pa == action {
|
||||
return true;
|
||||
}
|
||||
if pa == "iam:*" && action.starts_with("iam:") {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn prefix_matches(policy_prefix: &str, object_key: &str) -> bool {
|
||||
let p = policy_prefix.trim();
|
||||
if p.is_empty() || p == "*" {
|
||||
return true;
|
||||
}
|
||||
let base = p.trim_end_matches('*');
|
||||
object_key.starts_with(base)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
|
||||
fn test_iam_json() -> String {
|
||||
serde_json::json!({
|
||||
"version": 2,
|
||||
"users": [{
|
||||
"user_id": "u-test1234",
|
||||
"display_name": "admin",
|
||||
"enabled": true,
|
||||
"access_keys": [{
|
||||
"access_key": "AKIAIOSFODNN7EXAMPLE",
|
||||
"secret_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
"status": "active",
|
||||
"created_at": "2024-01-01T00:00:00Z"
|
||||
}],
|
||||
"policies": [{
|
||||
"bucket": "*",
|
||||
"actions": ["*"],
|
||||
"prefix": "*"
|
||||
}]
|
||||
}]
|
||||
})
|
||||
.to_string()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_and_lookup() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(test_iam_json().as_bytes()).unwrap();
|
||||
tmp.flush().unwrap();
|
||||
|
||||
let svc = IamService::new(tmp.path().to_path_buf());
|
||||
let secret = svc.get_secret_key("AKIAIOSFODNN7EXAMPLE");
|
||||
assert_eq!(
|
||||
secret.unwrap(),
|
||||
"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_principal() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(test_iam_json().as_bytes()).unwrap();
|
||||
tmp.flush().unwrap();
|
||||
|
||||
let svc = IamService::new(tmp.path().to_path_buf());
|
||||
let principal = svc.get_principal("AKIAIOSFODNN7EXAMPLE").unwrap();
|
||||
assert_eq!(principal.display_name, "admin");
|
||||
assert_eq!(principal.user_id, "u-test1234");
|
||||
assert!(principal.is_admin);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_authenticate_success() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(test_iam_json().as_bytes()).unwrap();
|
||||
tmp.flush().unwrap();
|
||||
|
||||
let svc = IamService::new(tmp.path().to_path_buf());
|
||||
let principal = svc
|
||||
.authenticate(
|
||||
"AKIAIOSFODNN7EXAMPLE",
|
||||
"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(principal.display_name, "admin");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_authenticate_wrong_secret() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(test_iam_json().as_bytes()).unwrap();
|
||||
tmp.flush().unwrap();
|
||||
|
||||
let svc = IamService::new(tmp.path().to_path_buf());
|
||||
assert!(svc.authenticate("AKIAIOSFODNN7EXAMPLE", "wrongsecret").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unknown_key_returns_none() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(test_iam_json().as_bytes()).unwrap();
|
||||
tmp.flush().unwrap();
|
||||
|
||||
let svc = IamService::new(tmp.path().to_path_buf());
|
||||
assert!(svc.get_secret_key("NONEXISTENTKEY").is_none());
|
||||
assert!(svc.get_principal("NONEXISTENTKEY").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_disabled_user() {
|
||||
let json = serde_json::json!({
|
||||
"version": 2,
|
||||
"users": [{
|
||||
"user_id": "u-disabled",
|
||||
"display_name": "disabled-user",
|
||||
"enabled": false,
|
||||
"access_keys": [{
|
||||
"access_key": "DISABLED_KEY",
|
||||
"secret_key": "secret123",
|
||||
"status": "active"
|
||||
}],
|
||||
"policies": []
|
||||
}]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(json.as_bytes()).unwrap();
|
||||
tmp.flush().unwrap();
|
||||
|
||||
let svc = IamService::new(tmp.path().to_path_buf());
|
||||
assert!(svc.get_secret_key("DISABLED_KEY").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inactive_key() {
|
||||
let json = serde_json::json!({
|
||||
"version": 2,
|
||||
"users": [{
|
||||
"user_id": "u-test",
|
||||
"display_name": "test",
|
||||
"enabled": true,
|
||||
"access_keys": [{
|
||||
"access_key": "INACTIVE_KEY",
|
||||
"secret_key": "secret123",
|
||||
"status": "inactive"
|
||||
}],
|
||||
"policies": []
|
||||
}]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(json.as_bytes()).unwrap();
|
||||
tmp.flush().unwrap();
|
||||
|
||||
let svc = IamService::new(tmp.path().to_path_buf());
|
||||
assert!(svc.get_secret_key("INACTIVE_KEY").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_v1_flat_format() {
|
||||
let json = serde_json::json!({
|
||||
"users": [{
|
||||
"access_key": "test",
|
||||
"secret_key": "secret",
|
||||
"display_name": "Test User",
|
||||
"policies": [{"bucket": "*", "actions": ["*"], "prefix": "*"}]
|
||||
}]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(json.as_bytes()).unwrap();
|
||||
tmp.flush().unwrap();
|
||||
|
||||
let svc = IamService::new(tmp.path().to_path_buf());
|
||||
let secret = svc.get_secret_key("test");
|
||||
assert_eq!(secret.unwrap(), "secret");
|
||||
|
||||
let principal = svc.get_principal("test").unwrap();
|
||||
assert_eq!(principal.display_name, "Test User");
|
||||
assert!(principal.is_admin);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_authorize_allows_matching_policy() {
|
||||
let json = serde_json::json!({
|
||||
"version": 2,
|
||||
"users": [{
|
||||
"user_id": "u-reader",
|
||||
"display_name": "reader",
|
||||
"enabled": true,
|
||||
"access_keys": [{
|
||||
"access_key": "READER_KEY",
|
||||
"secret_key": "reader-secret",
|
||||
"status": "active"
|
||||
}],
|
||||
"policies": [{
|
||||
"bucket": "docs",
|
||||
"actions": ["read"],
|
||||
"prefix": "reports/"
|
||||
}]
|
||||
}]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(json.as_bytes()).unwrap();
|
||||
tmp.flush().unwrap();
|
||||
|
||||
let svc = IamService::new(tmp.path().to_path_buf());
|
||||
let principal = svc.get_principal("READER_KEY").unwrap();
|
||||
|
||||
assert!(svc.authorize(
|
||||
&principal,
|
||||
Some("docs"),
|
||||
"read",
|
||||
Some("reports/2026.csv"),
|
||||
));
|
||||
assert!(!svc.authorize(
|
||||
&principal,
|
||||
Some("docs"),
|
||||
"write",
|
||||
Some("reports/2026.csv"),
|
||||
));
|
||||
assert!(!svc.authorize(
|
||||
&principal,
|
||||
Some("docs"),
|
||||
"read",
|
||||
Some("private/2026.csv"),
|
||||
));
|
||||
assert!(!svc.authorize(
|
||||
&principal,
|
||||
Some("other"),
|
||||
"read",
|
||||
Some("reports/2026.csv"),
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
pub mod sigv4;
|
||||
pub mod principal;
|
||||
pub mod iam;
|
||||
mod fernet;
|
||||
@@ -1 +0,0 @@
|
||||
pub use myfsio_common::types::Principal;
|
||||
@@ -1,258 +0,0 @@
|
||||
use hmac::{Hmac, Mac};
|
||||
use lru::LruCache;
|
||||
use parking_lot::Mutex;
|
||||
use percent_encoding::{percent_encode, AsciiSet, NON_ALPHANUMERIC};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::num::NonZeroUsize;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Instant;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
struct CacheEntry {
|
||||
key: Vec<u8>,
|
||||
created: Instant,
|
||||
}
|
||||
|
||||
static SIGNING_KEY_CACHE: LazyLock<Mutex<LruCache<(String, String, String, String), CacheEntry>>> =
|
||||
LazyLock::new(|| Mutex::new(LruCache::new(NonZeroUsize::new(256).unwrap())));
|
||||
|
||||
const CACHE_TTL_SECS: u64 = 60;
|
||||
|
||||
const AWS_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC
|
||||
.remove(b'-')
|
||||
.remove(b'_')
|
||||
.remove(b'.')
|
||||
.remove(b'~');
|
||||
|
||||
fn hmac_sha256(key: &[u8], msg: &[u8]) -> Vec<u8> {
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
|
||||
mac.update(msg);
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
fn sha256_hex(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
hex::encode(hasher.finalize())
|
||||
}
|
||||
|
||||
fn aws_uri_encode(input: &str) -> String {
|
||||
percent_encode(input.as_bytes(), AWS_ENCODE_SET).to_string()
|
||||
}
|
||||
|
||||
pub fn derive_signing_key_cached(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
let cache_key = (
|
||||
secret_key.to_owned(),
|
||||
date_stamp.to_owned(),
|
||||
region.to_owned(),
|
||||
service.to_owned(),
|
||||
);
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
if let Some(entry) = cache.get(&cache_key) {
|
||||
if entry.created.elapsed().as_secs() < CACHE_TTL_SECS {
|
||||
return entry.key.clone();
|
||||
}
|
||||
cache.pop(&cache_key);
|
||||
}
|
||||
}
|
||||
|
||||
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date_stamp.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, service.as_bytes());
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
cache.put(
|
||||
cache_key,
|
||||
CacheEntry {
|
||||
key: k_signing.clone(),
|
||||
created: Instant::now(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
k_signing
|
||||
}
|
||||
|
||||
fn constant_time_compare_inner(a: &[u8], b: &[u8]) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
let mut result: u8 = 0;
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
result |= x ^ y;
|
||||
}
|
||||
result == 0
|
||||
}
|
||||
|
||||
pub fn verify_sigv4_signature(
|
||||
method: &str,
|
||||
canonical_uri: &str,
|
||||
query_params: &[(String, String)],
|
||||
signed_headers_str: &str,
|
||||
header_values: &[(String, String)],
|
||||
payload_hash: &str,
|
||||
amz_date: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
secret_key: &str,
|
||||
provided_signature: &str,
|
||||
) -> bool {
|
||||
let mut sorted_params = query_params.to_vec();
|
||||
sorted_params.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1)));
|
||||
|
||||
let canonical_query_string = sorted_params
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{}={}", aws_uri_encode(k), aws_uri_encode(v)))
|
||||
.collect::<Vec<_>>()
|
||||
.join("&");
|
||||
|
||||
let mut canonical_headers = String::new();
|
||||
for (name, value) in header_values {
|
||||
let lower_name = name.to_lowercase();
|
||||
let normalized = value.split_whitespace().collect::<Vec<_>>().join(" ");
|
||||
let final_value = if lower_name == "expect" && normalized.is_empty() {
|
||||
"100-continue"
|
||||
} else {
|
||||
&normalized
|
||||
};
|
||||
canonical_headers.push_str(&lower_name);
|
||||
canonical_headers.push(':');
|
||||
canonical_headers.push_str(final_value);
|
||||
canonical_headers.push('\n');
|
||||
}
|
||||
|
||||
let canonical_request = format!(
|
||||
"{}\n{}\n{}\n{}\n{}\n{}",
|
||||
method, canonical_uri, canonical_query_string, canonical_headers, signed_headers_str,
|
||||
payload_hash
|
||||
);
|
||||
|
||||
let credential_scope = format!("{}/{}/{}/aws4_request", date_stamp, region, service);
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
);
|
||||
|
||||
let signing_key = derive_signing_key_cached(secret_key, date_stamp, region, service);
|
||||
let calculated = hmac_sha256(&signing_key, string_to_sign.as_bytes());
|
||||
let calculated_hex = hex::encode(&calculated);
|
||||
|
||||
constant_time_compare_inner(calculated_hex.as_bytes(), provided_signature.as_bytes())
|
||||
}
|
||||
|
||||
pub fn derive_signing_key(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
derive_signing_key_cached(secret_key, date_stamp, region, service)
|
||||
}
|
||||
|
||||
pub fn compute_signature(signing_key: &[u8], string_to_sign: &str) -> String {
|
||||
let sig = hmac_sha256(signing_key, string_to_sign.as_bytes());
|
||||
hex::encode(sig)
|
||||
}
|
||||
|
||||
pub fn build_string_to_sign(
|
||||
amz_date: &str,
|
||||
credential_scope: &str,
|
||||
canonical_request: &str,
|
||||
) -> String {
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
)
|
||||
}
|
||||
|
||||
pub fn constant_time_compare(a: &str, b: &str) -> bool {
|
||||
constant_time_compare_inner(a.as_bytes(), b.as_bytes())
|
||||
}
|
||||
|
||||
pub fn clear_signing_key_cache() {
|
||||
SIGNING_KEY_CACHE.lock().clear();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_derive_signing_key() {
|
||||
let key = derive_signing_key("wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", "20130524", "us-east-1", "s3");
|
||||
assert_eq!(key.len(), 32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_derive_signing_key_cached() {
|
||||
let key1 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
|
||||
let key2 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
|
||||
assert_eq!(key1, key2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_constant_time_compare() {
|
||||
assert!(constant_time_compare("abc", "abc"));
|
||||
assert!(!constant_time_compare("abc", "abd"));
|
||||
assert!(!constant_time_compare("abc", "abcd"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_string_to_sign() {
|
||||
let result = build_string_to_sign("20130524T000000Z", "20130524/us-east-1/s3/aws4_request", "GET\n/\n\nhost:example.com\n\nhost\nUNSIGNED-PAYLOAD");
|
||||
assert!(result.starts_with("AWS4-HMAC-SHA256\n"));
|
||||
assert!(result.contains("20130524T000000Z"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aws_uri_encode() {
|
||||
assert_eq!(aws_uri_encode("hello world"), "hello%20world");
|
||||
assert_eq!(aws_uri_encode("test-file_name.txt"), "test-file_name.txt");
|
||||
assert_eq!(aws_uri_encode("a/b"), "a%2Fb");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_sigv4_roundtrip() {
|
||||
let secret = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
|
||||
let date_stamp = "20130524";
|
||||
let region = "us-east-1";
|
||||
let service = "s3";
|
||||
let amz_date = "20130524T000000Z";
|
||||
|
||||
let signing_key = derive_signing_key(secret, date_stamp, region, service);
|
||||
|
||||
let canonical_request = "GET\n/\n\nhost:examplebucket.s3.amazonaws.com\n\nhost\nUNSIGNED-PAYLOAD";
|
||||
let string_to_sign = build_string_to_sign(amz_date, &format!("{}/{}/{}/aws4_request", date_stamp, region, service), canonical_request);
|
||||
|
||||
let signature = compute_signature(&signing_key, &string_to_sign);
|
||||
|
||||
let result = verify_sigv4_signature(
|
||||
"GET",
|
||||
"/",
|
||||
&[],
|
||||
"host",
|
||||
&[("host".to_string(), "examplebucket.s3.amazonaws.com".to_string())],
|
||||
"UNSIGNED-PAYLOAD",
|
||||
amz_date,
|
||||
date_stamp,
|
||||
region,
|
||||
service,
|
||||
secret,
|
||||
&signature,
|
||||
);
|
||||
assert!(result);
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio-common"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
thiserror = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
@@ -1,20 +0,0 @@
|
||||
pub const SYSTEM_ROOT: &str = ".myfsio.sys";
|
||||
pub const SYSTEM_BUCKETS_DIR: &str = "buckets";
|
||||
pub const SYSTEM_MULTIPART_DIR: &str = "multipart";
|
||||
pub const BUCKET_META_DIR: &str = "meta";
|
||||
pub const BUCKET_VERSIONS_DIR: &str = "versions";
|
||||
pub const BUCKET_CONFIG_FILE: &str = ".bucket.json";
|
||||
pub const STATS_FILE: &str = "stats.json";
|
||||
pub const ETAG_INDEX_FILE: &str = "etag_index.json";
|
||||
pub const INDEX_FILE: &str = "_index.json";
|
||||
pub const MANIFEST_FILE: &str = "manifest.json";
|
||||
|
||||
pub const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
|
||||
pub const DEFAULT_REGION: &str = "us-east-1";
|
||||
pub const AWS_SERVICE: &str = "s3";
|
||||
|
||||
pub const DEFAULT_MAX_KEYS: usize = 1000;
|
||||
pub const DEFAULT_OBJECT_KEY_MAX_BYTES: usize = 1024;
|
||||
pub const DEFAULT_CHUNK_SIZE: usize = 65536;
|
||||
pub const STREAM_CHUNK_SIZE: usize = 1_048_576;
|
||||
@@ -1,221 +0,0 @@
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum S3ErrorCode {
|
||||
AccessDenied,
|
||||
BucketAlreadyExists,
|
||||
BucketNotEmpty,
|
||||
EntityTooLarge,
|
||||
InternalError,
|
||||
InvalidAccessKeyId,
|
||||
InvalidArgument,
|
||||
InvalidBucketName,
|
||||
InvalidKey,
|
||||
InvalidRange,
|
||||
InvalidRequest,
|
||||
MalformedXML,
|
||||
MethodNotAllowed,
|
||||
NoSuchBucket,
|
||||
NoSuchKey,
|
||||
NoSuchUpload,
|
||||
NoSuchVersion,
|
||||
NoSuchTagSet,
|
||||
PreconditionFailed,
|
||||
NotModified,
|
||||
QuotaExceeded,
|
||||
SignatureDoesNotMatch,
|
||||
SlowDown,
|
||||
}
|
||||
|
||||
impl S3ErrorCode {
|
||||
pub fn http_status(&self) -> u16 {
|
||||
match self {
|
||||
Self::AccessDenied => 403,
|
||||
Self::BucketAlreadyExists => 409,
|
||||
Self::BucketNotEmpty => 409,
|
||||
Self::EntityTooLarge => 413,
|
||||
Self::InternalError => 500,
|
||||
Self::InvalidAccessKeyId => 403,
|
||||
Self::InvalidArgument => 400,
|
||||
Self::InvalidBucketName => 400,
|
||||
Self::InvalidKey => 400,
|
||||
Self::InvalidRange => 416,
|
||||
Self::InvalidRequest => 400,
|
||||
Self::MalformedXML => 400,
|
||||
Self::MethodNotAllowed => 405,
|
||||
Self::NoSuchBucket => 404,
|
||||
Self::NoSuchKey => 404,
|
||||
Self::NoSuchUpload => 404,
|
||||
Self::NoSuchVersion => 404,
|
||||
Self::NoSuchTagSet => 404,
|
||||
Self::PreconditionFailed => 412,
|
||||
Self::NotModified => 304,
|
||||
Self::QuotaExceeded => 403,
|
||||
Self::SignatureDoesNotMatch => 403,
|
||||
Self::SlowDown => 429,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::AccessDenied => "AccessDenied",
|
||||
Self::BucketAlreadyExists => "BucketAlreadyExists",
|
||||
Self::BucketNotEmpty => "BucketNotEmpty",
|
||||
Self::EntityTooLarge => "EntityTooLarge",
|
||||
Self::InternalError => "InternalError",
|
||||
Self::InvalidAccessKeyId => "InvalidAccessKeyId",
|
||||
Self::InvalidArgument => "InvalidArgument",
|
||||
Self::InvalidBucketName => "InvalidBucketName",
|
||||
Self::InvalidKey => "InvalidKey",
|
||||
Self::InvalidRange => "InvalidRange",
|
||||
Self::InvalidRequest => "InvalidRequest",
|
||||
Self::MalformedXML => "MalformedXML",
|
||||
Self::MethodNotAllowed => "MethodNotAllowed",
|
||||
Self::NoSuchBucket => "NoSuchBucket",
|
||||
Self::NoSuchKey => "NoSuchKey",
|
||||
Self::NoSuchUpload => "NoSuchUpload",
|
||||
Self::NoSuchVersion => "NoSuchVersion",
|
||||
Self::NoSuchTagSet => "NoSuchTagSet",
|
||||
Self::PreconditionFailed => "PreconditionFailed",
|
||||
Self::NotModified => "NotModified",
|
||||
Self::QuotaExceeded => "QuotaExceeded",
|
||||
Self::SignatureDoesNotMatch => "SignatureDoesNotMatch",
|
||||
Self::SlowDown => "SlowDown",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_message(&self) -> &'static str {
|
||||
match self {
|
||||
Self::AccessDenied => "Access Denied",
|
||||
Self::BucketAlreadyExists => "The requested bucket name is not available",
|
||||
Self::BucketNotEmpty => "The bucket you tried to delete is not empty",
|
||||
Self::EntityTooLarge => "Your proposed upload exceeds the maximum allowed size",
|
||||
Self::InternalError => "We encountered an internal error. Please try again.",
|
||||
Self::InvalidAccessKeyId => "The access key ID you provided does not exist",
|
||||
Self::InvalidArgument => "Invalid argument",
|
||||
Self::InvalidBucketName => "The specified bucket is not valid",
|
||||
Self::InvalidKey => "The specified key is not valid",
|
||||
Self::InvalidRange => "The requested range is not satisfiable",
|
||||
Self::InvalidRequest => "Invalid request",
|
||||
Self::MalformedXML => "The XML you provided was not well-formed",
|
||||
Self::MethodNotAllowed => "The specified method is not allowed against this resource",
|
||||
Self::NoSuchBucket => "The specified bucket does not exist",
|
||||
Self::NoSuchKey => "The specified key does not exist",
|
||||
Self::NoSuchUpload => "The specified multipart upload does not exist",
|
||||
Self::NoSuchVersion => "The specified version does not exist",
|
||||
Self::NoSuchTagSet => "The TagSet does not exist",
|
||||
Self::PreconditionFailed => "At least one of the preconditions you specified did not hold",
|
||||
Self::NotModified => "Not Modified",
|
||||
Self::QuotaExceeded => "The bucket quota has been exceeded",
|
||||
Self::SignatureDoesNotMatch => "The request signature we calculated does not match the signature you provided",
|
||||
Self::SlowDown => "Please reduce your request rate",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for S3ErrorCode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct S3Error {
|
||||
pub code: S3ErrorCode,
|
||||
pub message: String,
|
||||
pub resource: String,
|
||||
pub request_id: String,
|
||||
}
|
||||
|
||||
impl S3Error {
|
||||
pub fn new(code: S3ErrorCode, message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
code,
|
||||
message: message.into(),
|
||||
resource: String::new(),
|
||||
request_id: String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_code(code: S3ErrorCode) -> Self {
|
||||
Self::new(code, code.default_message())
|
||||
}
|
||||
|
||||
pub fn with_resource(mut self, resource: impl Into<String>) -> Self {
|
||||
self.resource = resource.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_request_id(mut self, request_id: impl Into<String>) -> Self {
|
||||
self.request_id = request_id.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn http_status(&self) -> u16 {
|
||||
self.code.http_status()
|
||||
}
|
||||
|
||||
pub fn to_xml(&self) -> String {
|
||||
format!(
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||
<Error>\
|
||||
<Code>{}</Code>\
|
||||
<Message>{}</Message>\
|
||||
<Resource>{}</Resource>\
|
||||
<RequestId>{}</RequestId>\
|
||||
</Error>",
|
||||
self.code.as_str(),
|
||||
xml_escape(&self.message),
|
||||
xml_escape(&self.resource),
|
||||
xml_escape(&self.request_id),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for S3Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}: {}", self.code, self.message)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for S3Error {}
|
||||
|
||||
fn xml_escape(s: &str) -> String {
|
||||
s.replace('&', "&")
|
||||
.replace('<', "<")
|
||||
.replace('>', ">")
|
||||
.replace('"', """)
|
||||
.replace('\'', "'")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_error_codes() {
|
||||
assert_eq!(S3ErrorCode::NoSuchKey.http_status(), 404);
|
||||
assert_eq!(S3ErrorCode::AccessDenied.http_status(), 403);
|
||||
assert_eq!(S3ErrorCode::NoSuchBucket.as_str(), "NoSuchBucket");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_to_xml() {
|
||||
let err = S3Error::from_code(S3ErrorCode::NoSuchKey)
|
||||
.with_resource("/test-bucket/test-key")
|
||||
.with_request_id("abc123");
|
||||
let xml = err.to_xml();
|
||||
assert!(xml.contains("<Code>NoSuchKey</Code>"));
|
||||
assert!(xml.contains("<Resource>/test-bucket/test-key</Resource>"));
|
||||
assert!(xml.contains("<RequestId>abc123</RequestId>"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_xml_escape() {
|
||||
let err = S3Error::new(S3ErrorCode::InvalidArgument, "key <test> & \"value\"")
|
||||
.with_resource("/bucket/key&");
|
||||
let xml = err.to_xml();
|
||||
assert!(xml.contains("<test>"));
|
||||
assert!(xml.contains("&"));
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
pub mod constants;
|
||||
pub mod error;
|
||||
pub mod types;
|
||||
@@ -1,176 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ObjectMeta {
|
||||
pub key: String,
|
||||
pub size: u64,
|
||||
pub last_modified: DateTime<Utc>,
|
||||
pub etag: Option<String>,
|
||||
pub content_type: Option<String>,
|
||||
pub storage_class: Option<String>,
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl ObjectMeta {
|
||||
pub fn new(key: String, size: u64, last_modified: DateTime<Utc>) -> Self {
|
||||
Self {
|
||||
key,
|
||||
size,
|
||||
last_modified,
|
||||
etag: None,
|
||||
content_type: None,
|
||||
storage_class: Some("STANDARD".to_string()),
|
||||
metadata: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BucketMeta {
|
||||
pub name: String,
|
||||
pub creation_date: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct BucketStats {
|
||||
pub objects: u64,
|
||||
pub bytes: u64,
|
||||
pub version_count: u64,
|
||||
pub version_bytes: u64,
|
||||
}
|
||||
|
||||
impl BucketStats {
|
||||
pub fn total_objects(&self) -> u64 {
|
||||
self.objects + self.version_count
|
||||
}
|
||||
|
||||
pub fn total_bytes(&self) -> u64 {
|
||||
self.bytes + self.version_bytes
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ListObjectsResult {
|
||||
pub objects: Vec<ObjectMeta>,
|
||||
pub is_truncated: bool,
|
||||
pub next_continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ShallowListResult {
|
||||
pub objects: Vec<ObjectMeta>,
|
||||
pub common_prefixes: Vec<String>,
|
||||
pub is_truncated: bool,
|
||||
pub next_continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ListParams {
|
||||
pub max_keys: usize,
|
||||
pub continuation_token: Option<String>,
|
||||
pub prefix: Option<String>,
|
||||
pub start_after: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ShallowListParams {
|
||||
pub prefix: String,
|
||||
pub delimiter: String,
|
||||
pub max_keys: usize,
|
||||
pub continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PartMeta {
|
||||
pub part_number: u32,
|
||||
pub etag: String,
|
||||
pub size: u64,
|
||||
pub last_modified: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PartInfo {
|
||||
pub part_number: u32,
|
||||
pub etag: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MultipartUploadInfo {
|
||||
pub upload_id: String,
|
||||
pub key: String,
|
||||
pub initiated: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VersionInfo {
|
||||
pub version_id: String,
|
||||
pub key: String,
|
||||
pub size: u64,
|
||||
pub last_modified: DateTime<Utc>,
|
||||
pub etag: Option<String>,
|
||||
pub is_latest: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Tag {
|
||||
pub key: String,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct BucketConfig {
|
||||
#[serde(default)]
|
||||
pub versioning_enabled: bool,
|
||||
#[serde(default)]
|
||||
pub tags: Vec<Tag>,
|
||||
#[serde(default)]
|
||||
pub cors: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub encryption: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub lifecycle: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub website: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub quota: Option<QuotaConfig>,
|
||||
#[serde(default)]
|
||||
pub acl: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub notification: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub logging: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub object_lock: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub policy: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub replication: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct QuotaConfig {
|
||||
pub max_bytes: Option<u64>,
|
||||
pub max_objects: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Principal {
|
||||
pub access_key: String,
|
||||
pub user_id: String,
|
||||
pub display_name: String,
|
||||
pub is_admin: bool,
|
||||
}
|
||||
|
||||
impl Principal {
|
||||
pub fn new(access_key: String, user_id: String, display_name: String, is_admin: bool) -> Self {
|
||||
Self {
|
||||
access_key,
|
||||
user_id,
|
||||
display_name,
|
||||
is_admin,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio-crypto"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
myfsio-common = { path = "../myfsio-common" }
|
||||
md-5 = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
aes-gcm = { workspace = true }
|
||||
hkdf = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
rand = "0.8"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
|
||||
tempfile = "3"
|
||||
@@ -1,238 +0,0 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use hkdf::Hkdf;
|
||||
use sha2::Sha256;
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
use std::path::Path;
|
||||
use thiserror::Error;
|
||||
|
||||
const DEFAULT_CHUNK_SIZE: usize = 65536;
|
||||
const HEADER_SIZE: usize = 4;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum CryptoError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error("Invalid key size: expected 32 bytes, got {0}")]
|
||||
InvalidKeySize(usize),
|
||||
#[error("Invalid nonce size: expected 12 bytes, got {0}")]
|
||||
InvalidNonceSize(usize),
|
||||
#[error("Encryption failed: {0}")]
|
||||
EncryptionFailed(String),
|
||||
#[error("Decryption failed at chunk {0}")]
|
||||
DecryptionFailed(u32),
|
||||
#[error("HKDF expand failed: {0}")]
|
||||
HkdfFailed(String),
|
||||
}
|
||||
|
||||
fn read_exact_chunk(reader: &mut impl Read, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
let mut filled = 0;
|
||||
while filled < buf.len() {
|
||||
match reader.read(&mut buf[filled..]) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => filled += n,
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(filled)
|
||||
}
|
||||
|
||||
fn derive_chunk_nonce(base_nonce: &[u8], chunk_index: u32) -> Result<[u8; 12], CryptoError> {
|
||||
let hkdf = Hkdf::<Sha256>::new(Some(base_nonce), b"chunk_nonce");
|
||||
let mut okm = [0u8; 12];
|
||||
hkdf.expand(&chunk_index.to_be_bytes(), &mut okm)
|
||||
.map_err(|e| CryptoError::HkdfFailed(e.to_string()))?;
|
||||
Ok(okm)
|
||||
}
|
||||
|
||||
pub fn encrypt_stream_chunked(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
chunk_size: Option<usize>,
|
||||
) -> Result<u32, CryptoError> {
|
||||
if key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(key.len()));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
|
||||
}
|
||||
|
||||
let chunk_size = chunk_size.unwrap_or(DEFAULT_CHUNK_SIZE);
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(input_path)?;
|
||||
let mut outfile = File::create(output_path)?;
|
||||
|
||||
outfile.write_all(&[0u8; 4])?;
|
||||
|
||||
let mut buf = vec![0u8; chunk_size];
|
||||
let mut chunk_index: u32 = 0;
|
||||
|
||||
loop {
|
||||
let n = read_exact_chunk(&mut infile, &mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, &buf[..n])
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let size = encrypted.len() as u32;
|
||||
outfile.write_all(&size.to_be_bytes())?;
|
||||
outfile.write_all(&encrypted)?;
|
||||
|
||||
chunk_index += 1;
|
||||
}
|
||||
|
||||
outfile.seek(SeekFrom::Start(0))?;
|
||||
outfile.write_all(&chunk_index.to_be_bytes())?;
|
||||
|
||||
Ok(chunk_index)
|
||||
}
|
||||
|
||||
pub fn decrypt_stream_chunked(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
) -> Result<u32, CryptoError> {
|
||||
if key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(key.len()));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
|
||||
}
|
||||
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(input_path)?;
|
||||
let mut outfile = File::create(output_path)?;
|
||||
|
||||
let mut header = [0u8; HEADER_SIZE];
|
||||
infile.read_exact(&mut header)?;
|
||||
let chunk_count = u32::from_be_bytes(header);
|
||||
|
||||
let mut size_buf = [0u8; HEADER_SIZE];
|
||||
for chunk_index in 0..chunk_count {
|
||||
infile.read_exact(&mut size_buf)?;
|
||||
let chunk_size = u32::from_be_bytes(size_buf) as usize;
|
||||
|
||||
let mut encrypted = vec![0u8; chunk_size];
|
||||
infile.read_exact(&mut encrypted)?;
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let decrypted = cipher
|
||||
.decrypt(nonce, encrypted.as_ref())
|
||||
.map_err(|_| CryptoError::DecryptionFailed(chunk_index))?;
|
||||
|
||||
outfile.write_all(&decrypted)?;
|
||||
}
|
||||
|
||||
Ok(chunk_count)
|
||||
}
|
||||
|
||||
pub async fn encrypt_stream_chunked_async(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
chunk_size: Option<usize>,
|
||||
) -> Result<u32, CryptoError> {
|
||||
let input_path = input_path.to_owned();
|
||||
let output_path = output_path.to_owned();
|
||||
let key = key.to_vec();
|
||||
let base_nonce = base_nonce.to_vec();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
encrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce, chunk_size)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
|
||||
}
|
||||
|
||||
pub async fn decrypt_stream_chunked_async(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
) -> Result<u32, CryptoError> {
|
||||
let input_path = input_path.to_owned();
|
||||
let output_path = output_path.to_owned();
|
||||
let key = key.to_vec();
|
||||
let base_nonce = base_nonce.to_vec();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
decrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write as IoWrite;
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_decrypt_roundtrip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("input.bin");
|
||||
let encrypted = dir.path().join("encrypted.bin");
|
||||
let decrypted = dir.path().join("decrypted.bin");
|
||||
|
||||
let data = b"Hello, this is a test of AES-256-GCM chunked encryption!";
|
||||
std::fs::File::create(&input).unwrap().write_all(data).unwrap();
|
||||
|
||||
let key = [0x42u8; 32];
|
||||
let nonce = [0x01u8; 12];
|
||||
|
||||
let chunks = encrypt_stream_chunked(&input, &encrypted, &key, &nonce, Some(16)).unwrap();
|
||||
assert!(chunks > 0);
|
||||
|
||||
let chunks2 = decrypt_stream_chunked(&encrypted, &decrypted, &key, &nonce).unwrap();
|
||||
assert_eq!(chunks, chunks2);
|
||||
|
||||
let result = std::fs::read(&decrypted).unwrap();
|
||||
assert_eq!(result, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_key_size() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("input.bin");
|
||||
std::fs::File::create(&input).unwrap().write_all(b"test").unwrap();
|
||||
|
||||
let result = encrypt_stream_chunked(&input, &dir.path().join("out"), &[0u8; 16], &[0u8; 12], None);
|
||||
assert!(matches!(result, Err(CryptoError::InvalidKeySize(16))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_key_fails_decrypt() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("input.bin");
|
||||
let encrypted = dir.path().join("encrypted.bin");
|
||||
let decrypted = dir.path().join("decrypted.bin");
|
||||
|
||||
std::fs::File::create(&input).unwrap().write_all(b"secret data").unwrap();
|
||||
|
||||
let key = [0x42u8; 32];
|
||||
let nonce = [0x01u8; 12];
|
||||
encrypt_stream_chunked(&input, &encrypted, &key, &nonce, None).unwrap();
|
||||
|
||||
let wrong_key = [0x43u8; 32];
|
||||
let result = decrypt_stream_chunked(&encrypted, &decrypted, &wrong_key, &nonce);
|
||||
assert!(matches!(result, Err(CryptoError::DecryptionFailed(_))));
|
||||
}
|
||||
}
|
||||
@@ -1,375 +0,0 @@
|
||||
use base64::engine::general_purpose::STANDARD as B64;
|
||||
use base64::Engine;
|
||||
use rand::RngCore;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use crate::aes_gcm::{
|
||||
encrypt_stream_chunked, decrypt_stream_chunked, CryptoError,
|
||||
};
|
||||
use crate::kms::KmsService;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum SseAlgorithm {
|
||||
Aes256,
|
||||
AwsKms,
|
||||
CustomerProvided,
|
||||
}
|
||||
|
||||
impl SseAlgorithm {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
SseAlgorithm::Aes256 => "AES256",
|
||||
SseAlgorithm::AwsKms => "aws:kms",
|
||||
SseAlgorithm::CustomerProvided => "AES256",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EncryptionContext {
|
||||
pub algorithm: SseAlgorithm,
|
||||
pub kms_key_id: Option<String>,
|
||||
pub customer_key: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EncryptionMetadata {
|
||||
pub algorithm: String,
|
||||
pub nonce: String,
|
||||
pub encrypted_data_key: Option<String>,
|
||||
pub kms_key_id: Option<String>,
|
||||
}
|
||||
|
||||
impl EncryptionMetadata {
|
||||
pub fn to_metadata_map(&self) -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
map.insert(
|
||||
"x-amz-server-side-encryption".to_string(),
|
||||
self.algorithm.clone(),
|
||||
);
|
||||
map.insert("x-amz-encryption-nonce".to_string(), self.nonce.clone());
|
||||
if let Some(ref dk) = self.encrypted_data_key {
|
||||
map.insert("x-amz-encrypted-data-key".to_string(), dk.clone());
|
||||
}
|
||||
if let Some(ref kid) = self.kms_key_id {
|
||||
map.insert("x-amz-encryption-key-id".to_string(), kid.clone());
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
pub fn from_metadata(meta: &HashMap<String, String>) -> Option<Self> {
|
||||
let algorithm = meta.get("x-amz-server-side-encryption")?;
|
||||
let nonce = meta.get("x-amz-encryption-nonce")?;
|
||||
Some(Self {
|
||||
algorithm: algorithm.clone(),
|
||||
nonce: nonce.clone(),
|
||||
encrypted_data_key: meta.get("x-amz-encrypted-data-key").cloned(),
|
||||
kms_key_id: meta.get("x-amz-encryption-key-id").cloned(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_encrypted(meta: &HashMap<String, String>) -> bool {
|
||||
meta.contains_key("x-amz-server-side-encryption")
|
||||
}
|
||||
|
||||
pub fn clean_metadata(meta: &mut HashMap<String, String>) {
|
||||
meta.remove("x-amz-server-side-encryption");
|
||||
meta.remove("x-amz-encryption-nonce");
|
||||
meta.remove("x-amz-encrypted-data-key");
|
||||
meta.remove("x-amz-encryption-key-id");
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EncryptionService {
|
||||
master_key: [u8; 32],
|
||||
kms: Option<std::sync::Arc<KmsService>>,
|
||||
}
|
||||
|
||||
impl EncryptionService {
|
||||
pub fn new(master_key: [u8; 32], kms: Option<std::sync::Arc<KmsService>>) -> Self {
|
||||
Self { master_key, kms }
|
||||
}
|
||||
|
||||
pub fn generate_data_key(&self) -> ([u8; 32], [u8; 12]) {
|
||||
let mut data_key = [0u8; 32];
|
||||
let mut nonce = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut data_key);
|
||||
rand::thread_rng().fill_bytes(&mut nonce);
|
||||
(data_key, nonce)
|
||||
}
|
||||
|
||||
pub fn wrap_data_key(&self, data_key: &[u8; 32]) -> Result<String, CryptoError> {
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
|
||||
let cipher = Aes256Gcm::new((&self.master_key).into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, data_key.as_slice())
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut combined = Vec::with_capacity(12 + encrypted.len());
|
||||
combined.extend_from_slice(&nonce_bytes);
|
||||
combined.extend_from_slice(&encrypted);
|
||||
Ok(B64.encode(&combined))
|
||||
}
|
||||
|
||||
pub fn unwrap_data_key(&self, wrapped_b64: &str) -> Result<[u8; 32], CryptoError> {
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
|
||||
let combined = B64.decode(wrapped_b64).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad wrapped key encoding: {}", e))
|
||||
})?;
|
||||
if combined.len() < 12 {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"Wrapped key too short".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let (nonce_bytes, ciphertext) = combined.split_at(12);
|
||||
let cipher = Aes256Gcm::new((&self.master_key).into());
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| CryptoError::DecryptionFailed(0))?;
|
||||
|
||||
if plaintext.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(plaintext.len()));
|
||||
}
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(&plaintext);
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
pub async fn encrypt_object(
|
||||
&self,
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
ctx: &EncryptionContext,
|
||||
) -> Result<EncryptionMetadata, CryptoError> {
|
||||
let (data_key, nonce) = self.generate_data_key();
|
||||
|
||||
let (encrypted_data_key, kms_key_id) = match ctx.algorithm {
|
||||
SseAlgorithm::Aes256 => {
|
||||
let wrapped = self.wrap_data_key(&data_key)?;
|
||||
(Some(wrapped), None)
|
||||
}
|
||||
SseAlgorithm::AwsKms => {
|
||||
let kms = self
|
||||
.kms
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
|
||||
let kid = ctx
|
||||
.kms_key_id
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID".into()))?;
|
||||
let ciphertext = kms.encrypt_data(kid, &data_key).await?;
|
||||
(Some(B64.encode(&ciphertext)), Some(kid.clone()))
|
||||
}
|
||||
SseAlgorithm::CustomerProvided => {
|
||||
(None, None)
|
||||
}
|
||||
};
|
||||
|
||||
let actual_key = if ctx.algorithm == SseAlgorithm::CustomerProvided {
|
||||
let ck = ctx.customer_key.as_ref().ok_or_else(|| {
|
||||
CryptoError::EncryptionFailed("No customer key provided".into())
|
||||
})?;
|
||||
if ck.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(ck.len()));
|
||||
}
|
||||
let mut k = [0u8; 32];
|
||||
k.copy_from_slice(ck);
|
||||
k
|
||||
} else {
|
||||
data_key
|
||||
};
|
||||
|
||||
let ip = input_path.to_owned();
|
||||
let op = output_path.to_owned();
|
||||
let ak = actual_key;
|
||||
let n = nonce;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
encrypt_stream_chunked(&ip, &op, &ak, &n, None)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
|
||||
|
||||
Ok(EncryptionMetadata {
|
||||
algorithm: ctx.algorithm.as_str().to_string(),
|
||||
nonce: B64.encode(nonce),
|
||||
encrypted_data_key,
|
||||
kms_key_id,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn decrypt_object(
|
||||
&self,
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
enc_meta: &EncryptionMetadata,
|
||||
customer_key: Option<&[u8]>,
|
||||
) -> Result<(), CryptoError> {
|
||||
let nonce_bytes = B64.decode(&enc_meta.nonce).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad nonce encoding: {}", e))
|
||||
})?;
|
||||
if nonce_bytes.len() != 12 {
|
||||
return Err(CryptoError::InvalidNonceSize(nonce_bytes.len()));
|
||||
}
|
||||
|
||||
let data_key: [u8; 32] = if let Some(ck) = customer_key {
|
||||
if ck.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(ck.len()));
|
||||
}
|
||||
let mut k = [0u8; 32];
|
||||
k.copy_from_slice(ck);
|
||||
k
|
||||
} else if enc_meta.algorithm == "aws:kms" {
|
||||
let kms = self
|
||||
.kms
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
|
||||
let kid = enc_meta
|
||||
.kms_key_id
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID in metadata".into()))?;
|
||||
let encrypted_dk = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
|
||||
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
|
||||
})?;
|
||||
let ct = B64.decode(encrypted_dk).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad data key encoding: {}", e))
|
||||
})?;
|
||||
let dk = kms.decrypt_data(kid, &ct).await?;
|
||||
if dk.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(dk.len()));
|
||||
}
|
||||
let mut k = [0u8; 32];
|
||||
k.copy_from_slice(&dk);
|
||||
k
|
||||
} else {
|
||||
let wrapped = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
|
||||
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
|
||||
})?;
|
||||
self.unwrap_data_key(wrapped)?
|
||||
};
|
||||
|
||||
let ip = input_path.to_owned();
|
||||
let op = output_path.to_owned();
|
||||
let nb: [u8; 12] = nonce_bytes.try_into().unwrap();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
decrypt_stream_chunked(&ip, &op, &data_key, &nb)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
|
||||
fn test_master_key() -> [u8; 32] {
|
||||
[0x42u8; 32]
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrap_unwrap_data_key() {
|
||||
let svc = EncryptionService::new(test_master_key(), None);
|
||||
let dk = [0xAAu8; 32];
|
||||
let wrapped = svc.wrap_data_key(&dk).unwrap();
|
||||
let unwrapped = svc.unwrap_data_key(&wrapped).unwrap();
|
||||
assert_eq!(dk, unwrapped);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypt_decrypt_object_sse_s3() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("plain.bin");
|
||||
let encrypted = dir.path().join("enc.bin");
|
||||
let decrypted = dir.path().join("dec.bin");
|
||||
|
||||
let data = b"SSE-S3 encrypted content for testing!";
|
||||
std::fs::File::create(&input).unwrap().write_all(data).unwrap();
|
||||
|
||||
let svc = EncryptionService::new(test_master_key(), None);
|
||||
|
||||
let ctx = EncryptionContext {
|
||||
algorithm: SseAlgorithm::Aes256,
|
||||
kms_key_id: None,
|
||||
customer_key: None,
|
||||
};
|
||||
|
||||
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
|
||||
assert_eq!(meta.algorithm, "AES256");
|
||||
assert!(meta.encrypted_data_key.is_some());
|
||||
|
||||
svc.decrypt_object(&encrypted, &decrypted, &meta, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = std::fs::read(&decrypted).unwrap();
|
||||
assert_eq!(result, data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypt_decrypt_object_sse_c() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("plain.bin");
|
||||
let encrypted = dir.path().join("enc.bin");
|
||||
let decrypted = dir.path().join("dec.bin");
|
||||
|
||||
let data = b"SSE-C encrypted content!";
|
||||
std::fs::File::create(&input).unwrap().write_all(data).unwrap();
|
||||
|
||||
let customer_key = [0xBBu8; 32];
|
||||
let svc = EncryptionService::new(test_master_key(), None);
|
||||
|
||||
let ctx = EncryptionContext {
|
||||
algorithm: SseAlgorithm::CustomerProvided,
|
||||
kms_key_id: None,
|
||||
customer_key: Some(customer_key.to_vec()),
|
||||
};
|
||||
|
||||
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
|
||||
assert!(meta.encrypted_data_key.is_none());
|
||||
|
||||
svc.decrypt_object(&encrypted, &decrypted, &meta, Some(&customer_key))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = std::fs::read(&decrypted).unwrap();
|
||||
assert_eq!(result, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encryption_metadata_roundtrip() {
|
||||
let meta = EncryptionMetadata {
|
||||
algorithm: "AES256".to_string(),
|
||||
nonce: "dGVzdG5vbmNlMTI=".to_string(),
|
||||
encrypted_data_key: Some("c29tZWtleQ==".to_string()),
|
||||
kms_key_id: None,
|
||||
};
|
||||
let map = meta.to_metadata_map();
|
||||
let restored = EncryptionMetadata::from_metadata(&map).unwrap();
|
||||
assert_eq!(restored.algorithm, "AES256");
|
||||
assert_eq!(restored.nonce, meta.nonce);
|
||||
assert_eq!(restored.encrypted_data_key, meta.encrypted_data_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_encrypted() {
|
||||
let mut meta = HashMap::new();
|
||||
assert!(!EncryptionMetadata::is_encrypted(&meta));
|
||||
meta.insert("x-amz-server-side-encryption".to_string(), "AES256".to_string());
|
||||
assert!(EncryptionMetadata::is_encrypted(&meta));
|
||||
}
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
use md5::{Digest, Md5};
|
||||
use sha2::Sha256;
|
||||
use std::io::Read;
|
||||
use std::path::Path;
|
||||
|
||||
const CHUNK_SIZE: usize = 65536;
|
||||
|
||||
pub fn md5_file(path: &Path) -> std::io::Result<String> {
|
||||
let mut file = std::fs::File::open(path)?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file.read(&mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
}
|
||||
|
||||
pub fn md5_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Md5::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
pub fn sha256_file(path: &Path) -> std::io::Result<String> {
|
||||
let mut file = std::fs::File::open(path)?;
|
||||
let mut hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file.read(&mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
}
|
||||
|
||||
pub fn sha256_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
pub fn md5_sha256_file(path: &Path) -> std::io::Result<(String, String)> {
|
||||
let mut file = std::fs::File::open(path)?;
|
||||
let mut md5_hasher = Md5::new();
|
||||
let mut sha_hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file.read(&mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
md5_hasher.update(&buf[..n]);
|
||||
sha_hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok((
|
||||
format!("{:x}", md5_hasher.finalize()),
|
||||
format!("{:x}", sha_hasher.finalize()),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn md5_file_async(path: &Path) -> std::io::Result<String> {
|
||||
let path = path.to_owned();
|
||||
tokio::task::spawn_blocking(move || md5_file(&path))
|
||||
.await
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
}
|
||||
|
||||
pub async fn sha256_file_async(path: &Path) -> std::io::Result<String> {
|
||||
let path = path.to_owned();
|
||||
tokio::task::spawn_blocking(move || sha256_file(&path))
|
||||
.await
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
}
|
||||
|
||||
pub async fn md5_sha256_file_async(path: &Path) -> std::io::Result<(String, String)> {
|
||||
let path = path.to_owned();
|
||||
tokio::task::spawn_blocking(move || md5_sha256_file(&path))
|
||||
.await
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
|
||||
#[test]
|
||||
fn test_md5_bytes() {
|
||||
assert_eq!(md5_bytes(b""), "d41d8cd98f00b204e9800998ecf8427e");
|
||||
assert_eq!(md5_bytes(b"hello"), "5d41402abc4b2a76b9719d911017c592");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sha256_bytes() {
|
||||
let hash = sha256_bytes(b"hello");
|
||||
assert_eq!(hash, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_md5_file() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(b"hello").unwrap();
|
||||
tmp.flush().unwrap();
|
||||
let hash = md5_file(tmp.path()).unwrap();
|
||||
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_md5_sha256_file() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(b"hello").unwrap();
|
||||
tmp.flush().unwrap();
|
||||
let (md5, sha) = md5_sha256_file(tmp.path()).unwrap();
|
||||
assert_eq!(md5, "5d41402abc4b2a76b9719d911017c592");
|
||||
assert_eq!(sha, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_md5_file_async() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(b"hello").unwrap();
|
||||
tmp.flush().unwrap();
|
||||
let hash = md5_file_async(tmp.path()).await.unwrap();
|
||||
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
|
||||
}
|
||||
}
|
||||
@@ -1,453 +0,0 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use base64::engine::general_purpose::STANDARD as B64;
|
||||
use base64::Engine;
|
||||
use chrono::{DateTime, Utc};
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::aes_gcm::CryptoError;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct KmsKey {
|
||||
#[serde(rename = "KeyId")]
|
||||
pub key_id: String,
|
||||
#[serde(rename = "Arn")]
|
||||
pub arn: String,
|
||||
#[serde(rename = "Description")]
|
||||
pub description: String,
|
||||
#[serde(rename = "CreationDate")]
|
||||
pub creation_date: DateTime<Utc>,
|
||||
#[serde(rename = "Enabled")]
|
||||
pub enabled: bool,
|
||||
#[serde(rename = "KeyState")]
|
||||
pub key_state: String,
|
||||
#[serde(rename = "KeyUsage")]
|
||||
pub key_usage: String,
|
||||
#[serde(rename = "KeySpec")]
|
||||
pub key_spec: String,
|
||||
#[serde(rename = "EncryptedKeyMaterial")]
|
||||
pub encrypted_key_material: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct KmsStore {
|
||||
keys: Vec<KmsKey>,
|
||||
}
|
||||
|
||||
pub struct KmsService {
|
||||
keys_path: PathBuf,
|
||||
master_key: Arc<RwLock<[u8; 32]>>,
|
||||
keys: Arc<RwLock<Vec<KmsKey>>>,
|
||||
}
|
||||
|
||||
impl KmsService {
|
||||
pub async fn new(keys_dir: &Path) -> Result<Self, CryptoError> {
|
||||
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
|
||||
|
||||
let keys_path = keys_dir.join("kms_keys.json");
|
||||
|
||||
let master_key = Self::load_or_create_master_key(&keys_dir.join("kms_master.key"))?;
|
||||
|
||||
let keys = if keys_path.exists() {
|
||||
let data = std::fs::read_to_string(&keys_path).map_err(CryptoError::Io)?;
|
||||
let store: KmsStore = serde_json::from_str(&data)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(format!("Bad KMS store: {}", e)))?;
|
||||
store.keys
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
keys_path,
|
||||
master_key: Arc::new(RwLock::new(master_key)),
|
||||
keys: Arc::new(RwLock::new(keys)),
|
||||
})
|
||||
}
|
||||
|
||||
fn load_or_create_master_key(path: &Path) -> Result<[u8; 32], CryptoError> {
|
||||
if path.exists() {
|
||||
let encoded = std::fs::read_to_string(path).map_err(CryptoError::Io)?;
|
||||
let decoded = B64.decode(encoded.trim()).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
|
||||
})?;
|
||||
if decoded.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(decoded.len()));
|
||||
}
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(&decoded);
|
||||
Ok(key)
|
||||
} else {
|
||||
let mut key = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut key);
|
||||
let encoded = B64.encode(key);
|
||||
std::fs::write(path, &encoded).map_err(CryptoError::Io)?;
|
||||
Ok(key)
|
||||
}
|
||||
}
|
||||
|
||||
fn encrypt_key_material(
|
||||
master_key: &[u8; 32],
|
||||
plaintext_key: &[u8],
|
||||
) -> Result<String, CryptoError> {
|
||||
let cipher = Aes256Gcm::new(master_key.into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext_key)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut combined = Vec::with_capacity(12 + ciphertext.len());
|
||||
combined.extend_from_slice(&nonce_bytes);
|
||||
combined.extend_from_slice(&ciphertext);
|
||||
Ok(B64.encode(&combined))
|
||||
}
|
||||
|
||||
fn decrypt_key_material(
|
||||
master_key: &[u8; 32],
|
||||
encrypted_b64: &str,
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
let combined = B64.decode(encrypted_b64).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad key material encoding: {}", e))
|
||||
})?;
|
||||
if combined.len() < 12 {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"Encrypted key material too short".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let (nonce_bytes, ciphertext) = combined.split_at(12);
|
||||
let cipher = Aes256Gcm::new(master_key.into());
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| CryptoError::DecryptionFailed(0))
|
||||
}
|
||||
|
||||
async fn save(&self) -> Result<(), CryptoError> {
|
||||
let keys = self.keys.read().await;
|
||||
let store = KmsStore {
|
||||
keys: keys.clone(),
|
||||
};
|
||||
let json = serde_json::to_string_pretty(&store)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
std::fs::write(&self.keys_path, json).map_err(CryptoError::Io)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_key(&self, description: &str) -> Result<KmsKey, CryptoError> {
|
||||
let key_id = uuid::Uuid::new_v4().to_string();
|
||||
let arn = format!("arn:aws:kms:local:000000000000:key/{}", key_id);
|
||||
|
||||
let mut plaintext_key = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut plaintext_key);
|
||||
|
||||
let master = self.master_key.read().await;
|
||||
let encrypted = Self::encrypt_key_material(&master, &plaintext_key)?;
|
||||
|
||||
let kms_key = KmsKey {
|
||||
key_id: key_id.clone(),
|
||||
arn,
|
||||
description: description.to_string(),
|
||||
creation_date: Utc::now(),
|
||||
enabled: true,
|
||||
key_state: "Enabled".to_string(),
|
||||
key_usage: "ENCRYPT_DECRYPT".to_string(),
|
||||
key_spec: "SYMMETRIC_DEFAULT".to_string(),
|
||||
encrypted_key_material: encrypted,
|
||||
};
|
||||
|
||||
self.keys.write().await.push(kms_key.clone());
|
||||
self.save().await?;
|
||||
Ok(kms_key)
|
||||
}
|
||||
|
||||
pub async fn list_keys(&self) -> Vec<KmsKey> {
|
||||
self.keys.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn get_key(&self, key_id: &str) -> Option<KmsKey> {
|
||||
let keys = self.keys.read().await;
|
||||
keys.iter()
|
||||
.find(|k| k.key_id == key_id || k.arn == key_id)
|
||||
.cloned()
|
||||
}
|
||||
|
||||
pub async fn delete_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
||||
let mut keys = self.keys.write().await;
|
||||
let len_before = keys.len();
|
||||
keys.retain(|k| k.key_id != key_id && k.arn != key_id);
|
||||
let removed = keys.len() < len_before;
|
||||
drop(keys);
|
||||
if removed {
|
||||
self.save().await?;
|
||||
}
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
pub async fn enable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
||||
let mut keys = self.keys.write().await;
|
||||
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
|
||||
key.enabled = true;
|
||||
key.key_state = "Enabled".to_string();
|
||||
drop(keys);
|
||||
self.save().await?;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn disable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
||||
let mut keys = self.keys.write().await;
|
||||
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
|
||||
key.enabled = false;
|
||||
key.key_state = "Disabled".to_string();
|
||||
drop(keys);
|
||||
self.save().await?;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn decrypt_data_key(&self, key_id: &str) -> Result<Vec<u8>, CryptoError> {
|
||||
let keys = self.keys.read().await;
|
||||
let key = keys
|
||||
.iter()
|
||||
.find(|k| k.key_id == key_id || k.arn == key_id)
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS key not found".to_string()))?;
|
||||
|
||||
if !key.enabled {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"KMS key is disabled".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let master = self.master_key.read().await;
|
||||
Self::decrypt_key_material(&master, &key.encrypted_key_material)
|
||||
}
|
||||
|
||||
pub async fn encrypt_data(
|
||||
&self,
|
||||
key_id: &str,
|
||||
plaintext: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
let data_key = self.decrypt_data_key(key_id).await?;
|
||||
if data_key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(data_key.len()));
|
||||
}
|
||||
|
||||
let key_arr: [u8; 32] = data_key.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut result = Vec::with_capacity(12 + ciphertext.len());
|
||||
result.extend_from_slice(&nonce_bytes);
|
||||
result.extend_from_slice(&ciphertext);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn decrypt_data(
|
||||
&self,
|
||||
key_id: &str,
|
||||
ciphertext: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
if ciphertext.len() < 12 {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"Ciphertext too short".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let data_key = self.decrypt_data_key(key_id).await?;
|
||||
if data_key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(data_key.len()));
|
||||
}
|
||||
|
||||
let key_arr: [u8; 32] = data_key.try_into().unwrap();
|
||||
let (nonce_bytes, ct) = ciphertext.split_at(12);
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
cipher
|
||||
.decrypt(nonce, ct)
|
||||
.map_err(|_| CryptoError::DecryptionFailed(0))
|
||||
}
|
||||
|
||||
pub async fn generate_data_key(
|
||||
&self,
|
||||
key_id: &str,
|
||||
num_bytes: usize,
|
||||
) -> Result<(Vec<u8>, Vec<u8>), CryptoError> {
|
||||
let kms_key = self.decrypt_data_key(key_id).await?;
|
||||
if kms_key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(kms_key.len()));
|
||||
}
|
||||
|
||||
let mut plaintext_key = vec![0u8; num_bytes];
|
||||
rand::thread_rng().fill_bytes(&mut plaintext_key);
|
||||
|
||||
let key_arr: [u8; 32] = kms_key.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, plaintext_key.as_slice())
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut wrapped = Vec::with_capacity(12 + encrypted.len());
|
||||
wrapped.extend_from_slice(&nonce_bytes);
|
||||
wrapped.extend_from_slice(&encrypted);
|
||||
|
||||
Ok((plaintext_key, wrapped))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_or_create_master_key(keys_dir: &Path) -> Result<[u8; 32], CryptoError> {
|
||||
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
|
||||
let path = keys_dir.join("master.key");
|
||||
|
||||
if path.exists() {
|
||||
let encoded = std::fs::read_to_string(&path).map_err(CryptoError::Io)?;
|
||||
let decoded = B64.decode(encoded.trim()).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
|
||||
})?;
|
||||
if decoded.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(decoded.len()));
|
||||
}
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(&decoded);
|
||||
Ok(key)
|
||||
} else {
|
||||
let mut key = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut key);
|
||||
let encoded = B64.encode(key);
|
||||
std::fs::write(&path, &encoded).map_err(CryptoError::Io)?;
|
||||
Ok(key)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_and_list_keys() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("test key").await.unwrap();
|
||||
assert!(key.enabled);
|
||||
assert_eq!(key.description, "test key");
|
||||
assert!(key.key_id.len() > 0);
|
||||
|
||||
let keys = kms.list_keys().await;
|
||||
assert_eq!(keys.len(), 1);
|
||||
assert_eq!(keys[0].key_id, key.key_id);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enable_disable_key() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("toggle").await.unwrap();
|
||||
assert!(key.enabled);
|
||||
|
||||
kms.disable_key(&key.key_id).await.unwrap();
|
||||
let k = kms.get_key(&key.key_id).await.unwrap();
|
||||
assert!(!k.enabled);
|
||||
|
||||
kms.enable_key(&key.key_id).await.unwrap();
|
||||
let k = kms.get_key(&key.key_id).await.unwrap();
|
||||
assert!(k.enabled);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_key() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("doomed").await.unwrap();
|
||||
assert!(kms.delete_key(&key.key_id).await.unwrap());
|
||||
assert!(kms.get_key(&key.key_id).await.is_none());
|
||||
assert_eq!(kms.list_keys().await.len(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypt_decrypt_data() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("enc-key").await.unwrap();
|
||||
let plaintext = b"Hello, KMS!";
|
||||
|
||||
let ciphertext = kms.encrypt_data(&key.key_id, plaintext).await.unwrap();
|
||||
assert_ne!(&ciphertext, plaintext);
|
||||
|
||||
let decrypted = kms.decrypt_data(&key.key_id, &ciphertext).await.unwrap();
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_generate_data_key() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("data-key-gen").await.unwrap();
|
||||
let (plaintext, wrapped) = kms.generate_data_key(&key.key_id, 32).await.unwrap();
|
||||
|
||||
assert_eq!(plaintext.len(), 32);
|
||||
assert!(wrapped.len() > 32);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_disabled_key_cannot_encrypt() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("disabled").await.unwrap();
|
||||
kms.disable_key(&key.key_id).await.unwrap();
|
||||
|
||||
let result = kms.encrypt_data(&key.key_id, b"test").await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_persistence_across_reload() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let key_id = {
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
let key = kms.create_key("persistent").await.unwrap();
|
||||
key.key_id
|
||||
};
|
||||
|
||||
let kms2 = KmsService::new(dir.path()).await.unwrap();
|
||||
let key = kms2.get_key(&key_id).await;
|
||||
assert!(key.is_some());
|
||||
assert_eq!(key.unwrap().description, "persistent");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_master_key_roundtrip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let key1 = load_or_create_master_key(dir.path()).await.unwrap();
|
||||
let key2 = load_or_create_master_key(dir.path()).await.unwrap();
|
||||
assert_eq!(key1, key2);
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
pub mod hashing;
|
||||
pub mod aes_gcm;
|
||||
pub mod kms;
|
||||
pub mod encryption;
|
||||
@@ -1,39 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio-server"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
myfsio-common = { path = "../myfsio-common" }
|
||||
myfsio-auth = { path = "../myfsio-auth" }
|
||||
myfsio-crypto = { path = "../myfsio-crypto" }
|
||||
myfsio-storage = { path = "../myfsio-storage" }
|
||||
myfsio-xml = { path = "../myfsio-xml" }
|
||||
base64 = { workspace = true }
|
||||
axum = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tower = { workspace = true }
|
||||
tower-http = { workspace = true }
|
||||
hyper = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
http-body-util = "0.1"
|
||||
percent-encoding = { workspace = true }
|
||||
quick-xml = { workspace = true }
|
||||
mime_guess = "2"
|
||||
crc32fast = { workspace = true }
|
||||
duckdb = { workspace = true }
|
||||
roxmltree = "0.20"
|
||||
parking_lot = { workspace = true }
|
||||
regex = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
tower = { workspace = true, features = ["util"] }
|
||||
@@ -1,117 +0,0 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServerConfig {
|
||||
pub bind_addr: SocketAddr,
|
||||
pub storage_root: PathBuf,
|
||||
pub region: String,
|
||||
pub iam_config_path: PathBuf,
|
||||
pub sigv4_timestamp_tolerance_secs: u64,
|
||||
pub presigned_url_min_expiry: u64,
|
||||
pub presigned_url_max_expiry: u64,
|
||||
pub secret_key: Option<String>,
|
||||
pub encryption_enabled: bool,
|
||||
pub kms_enabled: bool,
|
||||
pub gc_enabled: bool,
|
||||
pub integrity_enabled: bool,
|
||||
pub metrics_enabled: bool,
|
||||
pub lifecycle_enabled: bool,
|
||||
pub website_hosting_enabled: bool,
|
||||
}
|
||||
|
||||
impl ServerConfig {
|
||||
pub fn from_env() -> Self {
|
||||
let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
|
||||
let port: u16 = std::env::var("PORT")
|
||||
.unwrap_or_else(|_| "5000".to_string())
|
||||
.parse()
|
||||
.unwrap_or(5000);
|
||||
let storage_root = std::env::var("STORAGE_ROOT")
|
||||
.unwrap_or_else(|_| "./data".to_string());
|
||||
let region = std::env::var("AWS_REGION")
|
||||
.unwrap_or_else(|_| "us-east-1".to_string());
|
||||
|
||||
let storage_path = PathBuf::from(&storage_root);
|
||||
let iam_config_path = std::env::var("IAM_CONFIG")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|_| {
|
||||
storage_path.join(".myfsio.sys").join("config").join("iam.json")
|
||||
});
|
||||
|
||||
let sigv4_timestamp_tolerance_secs: u64 = std::env::var("SIGV4_TIMESTAMP_TOLERANCE_SECONDS")
|
||||
.unwrap_or_else(|_| "900".to_string())
|
||||
.parse()
|
||||
.unwrap_or(900);
|
||||
|
||||
let presigned_url_min_expiry: u64 = std::env::var("PRESIGNED_URL_MIN_EXPIRY_SECONDS")
|
||||
.unwrap_or_else(|_| "1".to_string())
|
||||
.parse()
|
||||
.unwrap_or(1);
|
||||
|
||||
let presigned_url_max_expiry: u64 = std::env::var("PRESIGNED_URL_MAX_EXPIRY_SECONDS")
|
||||
.unwrap_or_else(|_| "604800".to_string())
|
||||
.parse()
|
||||
.unwrap_or(604800);
|
||||
|
||||
let secret_key = {
|
||||
let env_key = std::env::var("SECRET_KEY").ok();
|
||||
match env_key {
|
||||
Some(k) if !k.is_empty() && k != "dev-secret-key" => Some(k),
|
||||
_ => {
|
||||
let secret_file = storage_path
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join(".secret");
|
||||
std::fs::read_to_string(&secret_file).ok().map(|s| s.trim().to_string())
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let encryption_enabled = std::env::var("ENCRYPTION_ENABLED")
|
||||
.unwrap_or_else(|_| "false".to_string())
|
||||
.to_lowercase() == "true";
|
||||
|
||||
let kms_enabled = std::env::var("KMS_ENABLED")
|
||||
.unwrap_or_else(|_| "false".to_string())
|
||||
.to_lowercase() == "true";
|
||||
|
||||
let gc_enabled = std::env::var("GC_ENABLED")
|
||||
.unwrap_or_else(|_| "false".to_string())
|
||||
.to_lowercase() == "true";
|
||||
|
||||
let integrity_enabled = std::env::var("INTEGRITY_ENABLED")
|
||||
.unwrap_or_else(|_| "false".to_string())
|
||||
.to_lowercase() == "true";
|
||||
|
||||
let metrics_enabled = std::env::var("OPERATION_METRICS_ENABLED")
|
||||
.unwrap_or_else(|_| "false".to_string())
|
||||
.to_lowercase() == "true";
|
||||
|
||||
let lifecycle_enabled = std::env::var("LIFECYCLE_ENABLED")
|
||||
.unwrap_or_else(|_| "false".to_string())
|
||||
.to_lowercase() == "true";
|
||||
|
||||
let website_hosting_enabled = std::env::var("WEBSITE_HOSTING_ENABLED")
|
||||
.unwrap_or_else(|_| "false".to_string())
|
||||
.to_lowercase() == "true";
|
||||
|
||||
Self {
|
||||
bind_addr: SocketAddr::new(host.parse().unwrap(), port),
|
||||
storage_root: storage_path,
|
||||
region,
|
||||
iam_config_path,
|
||||
sigv4_timestamp_tolerance_secs,
|
||||
presigned_url_min_expiry,
|
||||
presigned_url_max_expiry,
|
||||
secret_key,
|
||||
encryption_enabled,
|
||||
kms_enabled,
|
||||
gc_enabled,
|
||||
integrity_enabled,
|
||||
metrics_enabled,
|
||||
lifecycle_enabled,
|
||||
website_hosting_enabled,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,704 +0,0 @@
|
||||
use axum::body::Body;
|
||||
use axum::extract::{Path, State};
|
||||
use axum::http::StatusCode;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use axum::Extension;
|
||||
use myfsio_common::types::Principal;
|
||||
use myfsio_storage::traits::StorageEngine;
|
||||
|
||||
use crate::services::site_registry::{PeerSite, SiteInfo};
|
||||
use crate::services::website_domains::{is_valid_domain, normalize_domain};
|
||||
use crate::state::AppState;
|
||||
|
||||
fn json_response(status: StatusCode, value: serde_json::Value) -> Response {
|
||||
(
|
||||
status,
|
||||
[("content-type", "application/json")],
|
||||
value.to_string(),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
fn json_error(code: &str, message: &str, status: StatusCode) -> Response {
|
||||
json_response(
|
||||
status,
|
||||
serde_json::json!({"error": {"code": code, "message": message}}),
|
||||
)
|
||||
}
|
||||
|
||||
fn require_admin(principal: &Principal) -> Option<Response> {
|
||||
if !principal.is_admin {
|
||||
return Some(json_error("AccessDenied", "Admin access required", StatusCode::FORBIDDEN));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
async fn read_json_body(body: Body) -> Option<serde_json::Value> {
|
||||
let bytes = http_body_util::BodyExt::collect(body).await.ok()?.to_bytes();
|
||||
serde_json::from_slice(&bytes).ok()
|
||||
}
|
||||
|
||||
fn validate_site_id(site_id: &str) -> Option<String> {
|
||||
if site_id.is_empty() || site_id.len() > 63 {
|
||||
return Some("site_id must be 1-63 characters".to_string());
|
||||
}
|
||||
let first = site_id.chars().next().unwrap();
|
||||
if !first.is_ascii_alphanumeric() {
|
||||
return Some("site_id must start with alphanumeric".to_string());
|
||||
}
|
||||
if !site_id.chars().all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_') {
|
||||
return Some("site_id must contain only alphanumeric, hyphens, underscores".to_string());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn validate_endpoint(endpoint: &str) -> Option<String> {
|
||||
if !endpoint.starts_with("http://") && !endpoint.starts_with("https://") {
|
||||
return Some("Endpoint must be http or https URL".to_string());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn validate_region(region: &str) -> Option<String> {
|
||||
let re = regex::Regex::new(r"^[a-z]{2,}-[a-z]+-\d+$").unwrap();
|
||||
if !re.is_match(region) {
|
||||
return Some("Region must match format like us-east-1".to_string());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn validate_priority(priority: i64) -> Option<String> {
|
||||
if priority < 0 || priority > 1000 {
|
||||
return Some("Priority must be between 0 and 1000".to_string());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub async fn get_local_site(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
|
||||
if let Some(ref registry) = state.site_registry {
|
||||
if let Some(local) = registry.get_local_site() {
|
||||
return json_response(StatusCode::OK, serde_json::to_value(&local).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
json_error("NotFound", "Local site not configured", StatusCode::NOT_FOUND)
|
||||
}
|
||||
|
||||
pub async fn update_local_site(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let registry = match &state.site_registry {
|
||||
Some(r) => r,
|
||||
None => return json_error("InvalidRequest", "Site registry not available", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let payload = match read_json_body(body).await {
|
||||
Some(v) => v,
|
||||
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let site_id = match payload.get("site_id").and_then(|v| v.as_str()) {
|
||||
Some(s) => s.to_string(),
|
||||
None => return json_error("ValidationError", "site_id is required", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
if let Some(err) = validate_site_id(&site_id) {
|
||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
let endpoint = payload.get("endpoint").and_then(|v| v.as_str()).unwrap_or("").to_string();
|
||||
if !endpoint.is_empty() {
|
||||
if let Some(err) = validate_endpoint(&endpoint) {
|
||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(p) = payload.get("priority").and_then(|v| v.as_i64()) {
|
||||
if let Some(err) = validate_priority(p) {
|
||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(r) = payload.get("region").and_then(|v| v.as_str()) {
|
||||
if let Some(err) = validate_region(r) {
|
||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
||||
}
|
||||
}
|
||||
|
||||
let existing = registry.get_local_site();
|
||||
let site = SiteInfo {
|
||||
site_id: site_id.clone(),
|
||||
endpoint,
|
||||
region: payload.get("region").and_then(|v| v.as_str()).unwrap_or("us-east-1").to_string(),
|
||||
priority: payload.get("priority").and_then(|v| v.as_i64()).unwrap_or(100) as i32,
|
||||
display_name: payload.get("display_name").and_then(|v| v.as_str()).unwrap_or(&site_id).to_string(),
|
||||
created_at: existing.and_then(|e| e.created_at),
|
||||
};
|
||||
|
||||
registry.set_local_site(site.clone());
|
||||
json_response(StatusCode::OK, serde_json::to_value(&site).unwrap())
|
||||
}
|
||||
|
||||
pub async fn list_all_sites(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let registry = match &state.site_registry {
|
||||
Some(r) => r,
|
||||
None => return json_response(StatusCode::OK, serde_json::json!({"local": null, "peers": [], "total_peers": 0})),
|
||||
};
|
||||
|
||||
let local = registry.get_local_site();
|
||||
let peers = registry.list_peers();
|
||||
|
||||
json_response(StatusCode::OK, serde_json::json!({
|
||||
"local": local,
|
||||
"peers": peers,
|
||||
"total_peers": peers.len(),
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn register_peer_site(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let registry = match &state.site_registry {
|
||||
Some(r) => r,
|
||||
None => return json_error("InvalidRequest", "Site registry not available", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let payload = match read_json_body(body).await {
|
||||
Some(v) => v,
|
||||
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let site_id = match payload.get("site_id").and_then(|v| v.as_str()) {
|
||||
Some(s) => s.to_string(),
|
||||
None => return json_error("ValidationError", "site_id is required", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
if let Some(err) = validate_site_id(&site_id) {
|
||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
let endpoint = match payload.get("endpoint").and_then(|v| v.as_str()) {
|
||||
Some(e) => e.to_string(),
|
||||
None => return json_error("ValidationError", "endpoint is required", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
if let Some(err) = validate_endpoint(&endpoint) {
|
||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
let region = payload.get("region").and_then(|v| v.as_str()).unwrap_or("us-east-1").to_string();
|
||||
if let Some(err) = validate_region(®ion) {
|
||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
let priority = payload.get("priority").and_then(|v| v.as_i64()).unwrap_or(100);
|
||||
if let Some(err) = validate_priority(priority) {
|
||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
if registry.get_peer(&site_id).is_some() {
|
||||
return json_error("AlreadyExists", &format!("Peer site '{}' already exists", site_id), StatusCode::CONFLICT);
|
||||
}
|
||||
|
||||
let peer = PeerSite {
|
||||
site_id: site_id.clone(),
|
||||
endpoint,
|
||||
region,
|
||||
priority: priority as i32,
|
||||
display_name: payload.get("display_name").and_then(|v| v.as_str()).unwrap_or(&site_id).to_string(),
|
||||
connection_id: payload.get("connection_id").and_then(|v| v.as_str()).map(|s| s.to_string()),
|
||||
created_at: Some(chrono::Utc::now().to_rfc3339()),
|
||||
is_healthy: false,
|
||||
last_health_check: None,
|
||||
};
|
||||
|
||||
registry.add_peer(peer.clone());
|
||||
json_response(StatusCode::CREATED, serde_json::to_value(&peer).unwrap())
|
||||
}
|
||||
|
||||
pub async fn get_peer_site(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(site_id): Path<String>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let registry = match &state.site_registry {
|
||||
Some(r) => r,
|
||||
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
|
||||
};
|
||||
|
||||
match registry.get_peer(&site_id) {
|
||||
Some(peer) => json_response(StatusCode::OK, serde_json::to_value(&peer).unwrap()),
|
||||
None => json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn update_peer_site(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(site_id): Path<String>,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let registry = match &state.site_registry {
|
||||
Some(r) => r,
|
||||
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
|
||||
};
|
||||
|
||||
let existing = match registry.get_peer(&site_id) {
|
||||
Some(p) => p,
|
||||
None => return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND),
|
||||
};
|
||||
|
||||
let payload = match read_json_body(body).await {
|
||||
Some(v) => v,
|
||||
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
if let Some(ep) = payload.get("endpoint").and_then(|v| v.as_str()) {
|
||||
if let Some(err) = validate_endpoint(ep) {
|
||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
||||
}
|
||||
}
|
||||
if let Some(p) = payload.get("priority").and_then(|v| v.as_i64()) {
|
||||
if let Some(err) = validate_priority(p) {
|
||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
||||
}
|
||||
}
|
||||
if let Some(r) = payload.get("region").and_then(|v| v.as_str()) {
|
||||
if let Some(err) = validate_region(r) {
|
||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
||||
}
|
||||
}
|
||||
|
||||
let peer = PeerSite {
|
||||
site_id: site_id.clone(),
|
||||
endpoint: payload.get("endpoint").and_then(|v| v.as_str()).unwrap_or(&existing.endpoint).to_string(),
|
||||
region: payload.get("region").and_then(|v| v.as_str()).unwrap_or(&existing.region).to_string(),
|
||||
priority: payload.get("priority").and_then(|v| v.as_i64()).unwrap_or(existing.priority as i64) as i32,
|
||||
display_name: payload.get("display_name").and_then(|v| v.as_str()).unwrap_or(&existing.display_name).to_string(),
|
||||
connection_id: payload.get("connection_id").and_then(|v| v.as_str()).map(|s| s.to_string()).or(existing.connection_id),
|
||||
created_at: existing.created_at,
|
||||
is_healthy: existing.is_healthy,
|
||||
last_health_check: existing.last_health_check,
|
||||
};
|
||||
|
||||
registry.update_peer(peer.clone());
|
||||
json_response(StatusCode::OK, serde_json::to_value(&peer).unwrap())
|
||||
}
|
||||
|
||||
pub async fn delete_peer_site(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(site_id): Path<String>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let registry = match &state.site_registry {
|
||||
Some(r) => r,
|
||||
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
|
||||
};
|
||||
|
||||
if !registry.delete_peer(&site_id) {
|
||||
return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND);
|
||||
}
|
||||
StatusCode::NO_CONTENT.into_response()
|
||||
}
|
||||
|
||||
pub async fn check_peer_health(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(site_id): Path<String>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let registry = match &state.site_registry {
|
||||
Some(r) => r,
|
||||
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
|
||||
};
|
||||
|
||||
if registry.get_peer(&site_id).is_none() {
|
||||
return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
json_response(StatusCode::OK, serde_json::json!({
|
||||
"site_id": site_id,
|
||||
"is_healthy": false,
|
||||
"error": "Health check not implemented in standalone mode",
|
||||
"checked_at": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn get_topology(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let registry = match &state.site_registry {
|
||||
Some(r) => r,
|
||||
None => return json_response(StatusCode::OK, serde_json::json!({"sites": [], "total": 0, "healthy_count": 0})),
|
||||
};
|
||||
|
||||
let local = registry.get_local_site();
|
||||
let peers = registry.list_peers();
|
||||
|
||||
let mut sites: Vec<serde_json::Value> = Vec::new();
|
||||
if let Some(l) = local {
|
||||
let mut v = serde_json::to_value(&l).unwrap();
|
||||
v.as_object_mut().unwrap().insert("is_local".to_string(), serde_json::json!(true));
|
||||
v.as_object_mut().unwrap().insert("is_healthy".to_string(), serde_json::json!(true));
|
||||
sites.push(v);
|
||||
}
|
||||
for p in &peers {
|
||||
let mut v = serde_json::to_value(p).unwrap();
|
||||
v.as_object_mut().unwrap().insert("is_local".to_string(), serde_json::json!(false));
|
||||
sites.push(v);
|
||||
}
|
||||
|
||||
sites.sort_by_key(|s| s.get("priority").and_then(|v| v.as_i64()).unwrap_or(100));
|
||||
|
||||
let healthy_count = sites.iter().filter(|s| s.get("is_healthy").and_then(|v| v.as_bool()).unwrap_or(false)).count();
|
||||
|
||||
json_response(StatusCode::OK, serde_json::json!({
|
||||
"sites": sites,
|
||||
"total": sites.len(),
|
||||
"healthy_count": healthy_count,
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn check_bidirectional_status(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(site_id): Path<String>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let registry = match &state.site_registry {
|
||||
Some(r) => r,
|
||||
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
|
||||
};
|
||||
|
||||
if registry.get_peer(&site_id).is_none() {
|
||||
return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
let local = registry.get_local_site();
|
||||
json_response(StatusCode::OK, serde_json::json!({
|
||||
"site_id": site_id,
|
||||
"local_site_id": local.as_ref().map(|l| &l.site_id),
|
||||
"local_endpoint": local.as_ref().map(|l| &l.endpoint),
|
||||
"local_bidirectional_rules": [],
|
||||
"local_site_sync_enabled": false,
|
||||
"remote_status": null,
|
||||
"issues": [{"code": "NOT_IMPLEMENTED", "message": "Bidirectional status check not implemented in standalone mode", "severity": "warning"}],
|
||||
"is_fully_configured": false,
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn iam_list_users(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let users = state.iam.list_users().await;
|
||||
json_response(StatusCode::OK, serde_json::json!({"users": users}))
|
||||
}
|
||||
|
||||
pub async fn iam_get_user(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(identifier): Path<String>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
match state.iam.get_user(&identifier).await {
|
||||
Some(user) => json_response(StatusCode::OK, user),
|
||||
None => json_error("NotFound", &format!("User '{}' not found", identifier), StatusCode::NOT_FOUND),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn iam_get_user_policies(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(identifier): Path<String>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
match state.iam.get_user_policies(&identifier) {
|
||||
Some(policies) => json_response(StatusCode::OK, serde_json::json!({"policies": policies})),
|
||||
None => json_error("NotFound", &format!("User '{}' not found", identifier), StatusCode::NOT_FOUND),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn iam_create_access_key(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(identifier): Path<String>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
match state.iam.create_access_key(&identifier) {
|
||||
Ok(result) => json_response(StatusCode::CREATED, result),
|
||||
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn iam_delete_access_key(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path((_identifier, access_key)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
match state.iam.delete_access_key(&access_key) {
|
||||
Ok(()) => StatusCode::NO_CONTENT.into_response(),
|
||||
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn iam_disable_user(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(identifier): Path<String>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
match state.iam.set_user_enabled(&identifier, false).await {
|
||||
Ok(()) => json_response(StatusCode::OK, serde_json::json!({"status": "disabled"})),
|
||||
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn iam_enable_user(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(identifier): Path<String>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
match state.iam.set_user_enabled(&identifier, true).await {
|
||||
Ok(()) => json_response(StatusCode::OK, serde_json::json!({"status": "enabled"})),
|
||||
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list_website_domains(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let store = match &state.website_domains {
|
||||
Some(s) => s,
|
||||
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
json_response(StatusCode::OK, serde_json::json!(store.list_all()))
|
||||
}
|
||||
|
||||
pub async fn create_website_domain(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let store = match &state.website_domains {
|
||||
Some(s) => s,
|
||||
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let payload = match read_json_body(body).await {
|
||||
Some(v) => v,
|
||||
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let domain = normalize_domain(payload.get("domain").and_then(|v| v.as_str()).unwrap_or(""));
|
||||
if domain.is_empty() {
|
||||
return json_error("ValidationError", "domain is required", StatusCode::BAD_REQUEST);
|
||||
}
|
||||
if !is_valid_domain(&domain) {
|
||||
return json_error("ValidationError", &format!("Invalid domain: '{}'", domain), StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
let bucket = payload.get("bucket").and_then(|v| v.as_str()).unwrap_or("").trim().to_string();
|
||||
if bucket.is_empty() {
|
||||
return json_error("ValidationError", "bucket is required", StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
match state.storage.bucket_exists(&bucket).await {
|
||||
Ok(true) => {}
|
||||
_ => return json_error("NoSuchBucket", &format!("Bucket '{}' does not exist", bucket), StatusCode::NOT_FOUND),
|
||||
}
|
||||
|
||||
if store.get_bucket(&domain).is_some() {
|
||||
return json_error("Conflict", &format!("Domain '{}' is already mapped", domain), StatusCode::CONFLICT);
|
||||
}
|
||||
|
||||
store.set_mapping(&domain, &bucket);
|
||||
json_response(StatusCode::CREATED, serde_json::json!({"domain": domain, "bucket": bucket}))
|
||||
}
|
||||
|
||||
pub async fn get_website_domain(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(domain): Path<String>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let store = match &state.website_domains {
|
||||
Some(s) => s,
|
||||
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let domain = normalize_domain(&domain);
|
||||
match store.get_bucket(&domain) {
|
||||
Some(bucket) => json_response(StatusCode::OK, serde_json::json!({"domain": domain, "bucket": bucket})),
|
||||
None => json_error("NotFound", &format!("No mapping found for domain '{}'", domain), StatusCode::NOT_FOUND),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn update_website_domain(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(domain): Path<String>,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let store = match &state.website_domains {
|
||||
Some(s) => s,
|
||||
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let domain = normalize_domain(&domain);
|
||||
let payload = match read_json_body(body).await {
|
||||
Some(v) => v,
|
||||
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let bucket = payload.get("bucket").and_then(|v| v.as_str()).unwrap_or("").trim().to_string();
|
||||
if bucket.is_empty() {
|
||||
return json_error("ValidationError", "bucket is required", StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
match state.storage.bucket_exists(&bucket).await {
|
||||
Ok(true) => {}
|
||||
_ => return json_error("NoSuchBucket", &format!("Bucket '{}' does not exist", bucket), StatusCode::NOT_FOUND),
|
||||
}
|
||||
|
||||
if store.get_bucket(&domain).is_none() {
|
||||
return json_error("NotFound", &format!("No mapping found for domain '{}'", domain), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
store.set_mapping(&domain, &bucket);
|
||||
json_response(StatusCode::OK, serde_json::json!({"domain": domain, "bucket": bucket}))
|
||||
}
|
||||
|
||||
pub async fn delete_website_domain(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
Path(domain): Path<String>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let store = match &state.website_domains {
|
||||
Some(s) => s,
|
||||
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let domain = normalize_domain(&domain);
|
||||
if !store.delete_mapping(&domain) {
|
||||
return json_error("NotFound", &format!("No mapping found for domain '{}'", domain), StatusCode::NOT_FOUND);
|
||||
}
|
||||
StatusCode::NO_CONTENT.into_response()
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, Default)]
|
||||
pub struct PaginationQuery {
|
||||
pub limit: Option<usize>,
|
||||
pub offset: Option<usize>,
|
||||
}
|
||||
|
||||
pub async fn gc_status(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
match &state.gc {
|
||||
Some(gc) => json_response(StatusCode::OK, gc.status().await),
|
||||
None => json_response(StatusCode::OK, serde_json::json!({"enabled": false, "message": "GC is not enabled. Set GC_ENABLED=true to enable."})),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn gc_run(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let gc = match &state.gc {
|
||||
Some(gc) => gc,
|
||||
None => return json_error("InvalidRequest", "GC is not enabled", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let payload = read_json_body(body).await.unwrap_or(serde_json::json!({}));
|
||||
let dry_run = payload.get("dry_run").and_then(|v| v.as_bool()).unwrap_or(false);
|
||||
|
||||
match gc.run_now(dry_run).await {
|
||||
Ok(result) => json_response(StatusCode::OK, result),
|
||||
Err(e) => json_error("Conflict", &e, StatusCode::CONFLICT),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn gc_history(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
match &state.gc {
|
||||
Some(gc) => json_response(StatusCode::OK, serde_json::json!({"executions": gc.history().await})),
|
||||
None => json_response(StatusCode::OK, serde_json::json!({"executions": []})),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn integrity_status(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
match &state.integrity {
|
||||
Some(checker) => json_response(StatusCode::OK, checker.status().await),
|
||||
None => json_response(StatusCode::OK, serde_json::json!({"enabled": false, "message": "Integrity checker is not enabled. Set INTEGRITY_ENABLED=true to enable."})),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn integrity_run(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
let checker = match &state.integrity {
|
||||
Some(c) => c,
|
||||
None => return json_error("InvalidRequest", "Integrity checker is not enabled", StatusCode::BAD_REQUEST),
|
||||
};
|
||||
|
||||
let payload = read_json_body(body).await.unwrap_or(serde_json::json!({}));
|
||||
let dry_run = payload.get("dry_run").and_then(|v| v.as_bool()).unwrap_or(false);
|
||||
let auto_heal = payload.get("auto_heal").and_then(|v| v.as_bool()).unwrap_or(false);
|
||||
|
||||
match checker.run_now(dry_run, auto_heal).await {
|
||||
Ok(result) => json_response(StatusCode::OK, result),
|
||||
Err(e) => json_error("Conflict", &e, StatusCode::CONFLICT),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn integrity_history(
|
||||
State(state): State<AppState>,
|
||||
Extension(principal): Extension<Principal>,
|
||||
) -> Response {
|
||||
if let Some(err) = require_admin(&principal) { return err; }
|
||||
match &state.integrity {
|
||||
Some(checker) => json_response(StatusCode::OK, serde_json::json!({"executions": checker.history().await})),
|
||||
None => json_response(StatusCode::OK, serde_json::json!({"executions": []})),
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,278 +0,0 @@
|
||||
use axum::body::Body;
|
||||
use axum::extract::State;
|
||||
use axum::http::StatusCode;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use base64::engine::general_purpose::STANDARD as B64;
|
||||
use base64::Engine;
|
||||
use serde_json::json;
|
||||
|
||||
use crate::state::AppState;
|
||||
|
||||
fn json_ok(value: serde_json::Value) -> Response {
|
||||
(
|
||||
StatusCode::OK,
|
||||
[("content-type", "application/json")],
|
||||
value.to_string(),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
fn json_err(status: StatusCode, msg: &str) -> Response {
|
||||
(
|
||||
status,
|
||||
[("content-type", "application/json")],
|
||||
json!({"error": msg}).to_string(),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub async fn list_keys(State(state): State<AppState>) -> Response {
|
||||
let kms = match &state.kms {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
||||
};
|
||||
|
||||
let keys = kms.list_keys().await;
|
||||
let keys_json: Vec<serde_json::Value> = keys
|
||||
.iter()
|
||||
.map(|k| {
|
||||
json!({
|
||||
"KeyId": k.key_id,
|
||||
"Arn": k.arn,
|
||||
"Description": k.description,
|
||||
"CreationDate": k.creation_date.to_rfc3339(),
|
||||
"Enabled": k.enabled,
|
||||
"KeyState": k.key_state,
|
||||
"KeyUsage": k.key_usage,
|
||||
"KeySpec": k.key_spec,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
json_ok(json!({"keys": keys_json}))
|
||||
}
|
||||
|
||||
pub async fn create_key(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match &state.kms {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
||||
};
|
||||
|
||||
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
||||
Ok(c) => c.to_bytes(),
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
|
||||
};
|
||||
|
||||
let description = if body_bytes.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
match serde_json::from_slice::<serde_json::Value>(&body_bytes) {
|
||||
Ok(v) => v
|
||||
.get("Description")
|
||||
.or_else(|| v.get("description"))
|
||||
.and_then(|d| d.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string(),
|
||||
Err(_) => String::new(),
|
||||
}
|
||||
};
|
||||
|
||||
match kms.create_key(&description).await {
|
||||
Ok(key) => json_ok(json!({
|
||||
"KeyId": key.key_id,
|
||||
"Arn": key.arn,
|
||||
"Description": key.description,
|
||||
"CreationDate": key.creation_date.to_rfc3339(),
|
||||
"Enabled": key.enabled,
|
||||
"KeyState": key.key_state,
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match &state.kms {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
||||
};
|
||||
|
||||
match kms.get_key(&key_id).await {
|
||||
Some(key) => json_ok(json!({
|
||||
"KeyId": key.key_id,
|
||||
"Arn": key.arn,
|
||||
"Description": key.description,
|
||||
"CreationDate": key.creation_date.to_rfc3339(),
|
||||
"Enabled": key.enabled,
|
||||
"KeyState": key.key_state,
|
||||
"KeyUsage": key.key_usage,
|
||||
"KeySpec": key.key_spec,
|
||||
})),
|
||||
None => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match &state.kms {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
||||
};
|
||||
|
||||
match kms.delete_key(&key_id).await {
|
||||
Ok(true) => StatusCode::NO_CONTENT.into_response(),
|
||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn enable_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match &state.kms {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
||||
};
|
||||
|
||||
match kms.enable_key(&key_id).await {
|
||||
Ok(true) => json_ok(json!({"status": "enabled"})),
|
||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn disable_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match &state.kms {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
||||
};
|
||||
|
||||
match kms.disable_key(&key_id).await {
|
||||
Ok(true) => json_ok(json!({"status": "disabled"})),
|
||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn encrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match &state.kms {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
||||
};
|
||||
|
||||
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
||||
Ok(c) => c.to_bytes(),
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
|
||||
};
|
||||
|
||||
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
|
||||
};
|
||||
|
||||
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
|
||||
};
|
||||
let plaintext_b64 = match req.get("Plaintext").and_then(|v| v.as_str()) {
|
||||
Some(p) => p,
|
||||
None => return json_err(StatusCode::BAD_REQUEST, "Missing Plaintext"),
|
||||
};
|
||||
let plaintext = match B64.decode(plaintext_b64) {
|
||||
Ok(p) => p,
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid base64 Plaintext"),
|
||||
};
|
||||
|
||||
match kms.encrypt_data(key_id, &plaintext).await {
|
||||
Ok(ct) => json_ok(json!({
|
||||
"KeyId": key_id,
|
||||
"CiphertextBlob": B64.encode(&ct),
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn decrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match &state.kms {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
||||
};
|
||||
|
||||
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
||||
Ok(c) => c.to_bytes(),
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
|
||||
};
|
||||
|
||||
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
|
||||
};
|
||||
|
||||
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
|
||||
};
|
||||
let ct_b64 = match req.get("CiphertextBlob").and_then(|v| v.as_str()) {
|
||||
Some(c) => c,
|
||||
None => return json_err(StatusCode::BAD_REQUEST, "Missing CiphertextBlob"),
|
||||
};
|
||||
let ciphertext = match B64.decode(ct_b64) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid base64"),
|
||||
};
|
||||
|
||||
match kms.decrypt_data(key_id, &ciphertext).await {
|
||||
Ok(pt) => json_ok(json!({
|
||||
"KeyId": key_id,
|
||||
"Plaintext": B64.encode(&pt),
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn generate_data_key(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match &state.kms {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
||||
};
|
||||
|
||||
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
||||
Ok(c) => c.to_bytes(),
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
|
||||
};
|
||||
|
||||
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
|
||||
};
|
||||
|
||||
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
|
||||
Some(k) => k,
|
||||
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
|
||||
};
|
||||
let num_bytes = req
|
||||
.get("NumberOfBytes")
|
||||
.and_then(|v| v.as_u64())
|
||||
.unwrap_or(32) as usize;
|
||||
|
||||
if num_bytes < 1 || num_bytes > 1024 {
|
||||
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
|
||||
}
|
||||
|
||||
match kms.generate_data_key(key_id, num_bytes).await {
|
||||
Ok((plaintext, wrapped)) => json_ok(json!({
|
||||
"KeyId": key_id,
|
||||
"Plaintext": B64.encode(&plaintext),
|
||||
"CiphertextBlob": B64.encode(&wrapped),
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,552 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use axum::body::Body;
|
||||
use axum::http::{HeaderMap, HeaderName, StatusCode};
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use base64::Engine;
|
||||
use bytes::Bytes;
|
||||
use crc32fast::Hasher;
|
||||
use duckdb::types::ValueRef;
|
||||
use duckdb::Connection;
|
||||
use futures::stream;
|
||||
use http_body_util::BodyExt;
|
||||
use myfsio_common::error::{S3Error, S3ErrorCode};
|
||||
use myfsio_storage::traits::StorageEngine;
|
||||
|
||||
use crate::state::AppState;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
#[link(name = "Rstrtmgr")]
|
||||
extern "system" {}
|
||||
|
||||
const CHUNK_SIZE: usize = 65_536;
|
||||
|
||||
pub async fn post_select_object_content(
|
||||
state: &AppState,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
headers: &HeaderMap,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
if let Some(resp) = require_xml_content_type(headers) {
|
||||
return resp;
|
||||
}
|
||||
|
||||
let body_bytes = match body.collect().await {
|
||||
Ok(collected) => collected.to_bytes(),
|
||||
Err(_) => {
|
||||
return s3_error_response(S3Error::new(
|
||||
S3ErrorCode::MalformedXML,
|
||||
"Unable to parse XML document",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let request = match parse_select_request(&body_bytes) {
|
||||
Ok(r) => r,
|
||||
Err(err) => return s3_error_response(err),
|
||||
};
|
||||
|
||||
let object_path = match state.storage.get_object_path(bucket, key).await {
|
||||
Ok(path) => path,
|
||||
Err(_) => {
|
||||
return s3_error_response(S3Error::new(
|
||||
S3ErrorCode::NoSuchKey,
|
||||
"Object not found",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let join_res = tokio::task::spawn_blocking(move || execute_select_query(object_path, request)).await;
|
||||
let chunks = match join_res {
|
||||
Ok(Ok(chunks)) => chunks,
|
||||
Ok(Err(message)) => {
|
||||
return s3_error_response(S3Error::new(S3ErrorCode::InvalidRequest, message));
|
||||
}
|
||||
Err(_) => {
|
||||
return s3_error_response(S3Error::new(
|
||||
S3ErrorCode::InternalError,
|
||||
"SelectObjectContent execution failed",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let bytes_returned: usize = chunks.iter().map(|c| c.len()).sum();
|
||||
let mut events: Vec<Bytes> = Vec::with_capacity(chunks.len() + 2);
|
||||
for chunk in chunks {
|
||||
events.push(Bytes::from(encode_select_event("Records", &chunk)));
|
||||
}
|
||||
|
||||
let stats_payload = build_stats_xml(0, bytes_returned);
|
||||
events.push(Bytes::from(encode_select_event("Stats", stats_payload.as_bytes())));
|
||||
events.push(Bytes::from(encode_select_event("End", b"")));
|
||||
|
||||
let stream = stream::iter(events.into_iter().map(Ok::<Bytes, std::io::Error>));
|
||||
let body = Body::from_stream(stream);
|
||||
|
||||
let mut response = (StatusCode::OK, body).into_response();
|
||||
response.headers_mut().insert(
|
||||
HeaderName::from_static("content-type"),
|
||||
"application/octet-stream".parse().unwrap(),
|
||||
);
|
||||
response.headers_mut().insert(
|
||||
HeaderName::from_static("x-amz-request-charged"),
|
||||
"requester".parse().unwrap(),
|
||||
);
|
||||
response
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct SelectRequest {
|
||||
expression: String,
|
||||
input_format: InputFormat,
|
||||
output_format: OutputFormat,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum InputFormat {
|
||||
Csv(CsvInputConfig),
|
||||
Json(JsonInputConfig),
|
||||
Parquet,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct CsvInputConfig {
|
||||
file_header_info: String,
|
||||
field_delimiter: String,
|
||||
quote_character: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct JsonInputConfig {
|
||||
json_type: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum OutputFormat {
|
||||
Csv(CsvOutputConfig),
|
||||
Json(JsonOutputConfig),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct CsvOutputConfig {
|
||||
field_delimiter: String,
|
||||
record_delimiter: String,
|
||||
quote_character: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct JsonOutputConfig {
|
||||
record_delimiter: String,
|
||||
}
|
||||
|
||||
fn parse_select_request(payload: &[u8]) -> Result<SelectRequest, S3Error> {
|
||||
let xml = String::from_utf8_lossy(payload);
|
||||
let doc = roxmltree::Document::parse(&xml)
|
||||
.map_err(|_| S3Error::new(S3ErrorCode::MalformedXML, "Unable to parse XML document"))?;
|
||||
|
||||
let root = doc.root_element();
|
||||
if root.tag_name().name() != "SelectObjectContentRequest" {
|
||||
return Err(S3Error::new(
|
||||
S3ErrorCode::MalformedXML,
|
||||
"Root element must be SelectObjectContentRequest",
|
||||
));
|
||||
}
|
||||
|
||||
let expression = child_text(&root, "Expression")
|
||||
.filter(|v| !v.is_empty())
|
||||
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "Expression is required"))?;
|
||||
|
||||
let expression_type = child_text(&root, "ExpressionType").unwrap_or_else(|| "SQL".to_string());
|
||||
if !expression_type.eq_ignore_ascii_case("SQL") {
|
||||
return Err(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"Only SQL expression type is supported",
|
||||
));
|
||||
}
|
||||
|
||||
let input_node = child(&root, "InputSerialization")
|
||||
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "InputSerialization is required"))?;
|
||||
let output_node = child(&root, "OutputSerialization")
|
||||
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "OutputSerialization is required"))?;
|
||||
|
||||
let input_format = parse_input_format(&input_node)?;
|
||||
let output_format = parse_output_format(&output_node)?;
|
||||
|
||||
Ok(SelectRequest {
|
||||
expression,
|
||||
input_format,
|
||||
output_format,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_input_format(node: &roxmltree::Node<'_, '_>) -> Result<InputFormat, S3Error> {
|
||||
if let Some(csv_node) = child(node, "CSV") {
|
||||
return Ok(InputFormat::Csv(CsvInputConfig {
|
||||
file_header_info: child_text(&csv_node, "FileHeaderInfo")
|
||||
.unwrap_or_else(|| "NONE".to_string())
|
||||
.to_ascii_uppercase(),
|
||||
field_delimiter: child_text(&csv_node, "FieldDelimiter").unwrap_or_else(|| ",".to_string()),
|
||||
quote_character: child_text(&csv_node, "QuoteCharacter").unwrap_or_else(|| "\"".to_string()),
|
||||
}));
|
||||
}
|
||||
|
||||
if let Some(json_node) = child(node, "JSON") {
|
||||
return Ok(InputFormat::Json(JsonInputConfig {
|
||||
json_type: child_text(&json_node, "Type")
|
||||
.unwrap_or_else(|| "DOCUMENT".to_string())
|
||||
.to_ascii_uppercase(),
|
||||
}));
|
||||
}
|
||||
|
||||
if child(node, "Parquet").is_some() {
|
||||
return Ok(InputFormat::Parquet);
|
||||
}
|
||||
|
||||
Err(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"InputSerialization must specify CSV, JSON, or Parquet",
|
||||
))
|
||||
}
|
||||
|
||||
fn parse_output_format(node: &roxmltree::Node<'_, '_>) -> Result<OutputFormat, S3Error> {
|
||||
if let Some(csv_node) = child(node, "CSV") {
|
||||
return Ok(OutputFormat::Csv(CsvOutputConfig {
|
||||
field_delimiter: child_text(&csv_node, "FieldDelimiter").unwrap_or_else(|| ",".to_string()),
|
||||
record_delimiter: child_text(&csv_node, "RecordDelimiter").unwrap_or_else(|| "\n".to_string()),
|
||||
quote_character: child_text(&csv_node, "QuoteCharacter").unwrap_or_else(|| "\"".to_string()),
|
||||
}));
|
||||
}
|
||||
|
||||
if let Some(json_node) = child(node, "JSON") {
|
||||
return Ok(OutputFormat::Json(JsonOutputConfig {
|
||||
record_delimiter: child_text(&json_node, "RecordDelimiter").unwrap_or_else(|| "\n".to_string()),
|
||||
}));
|
||||
}
|
||||
|
||||
Err(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"OutputSerialization must specify CSV or JSON",
|
||||
))
|
||||
}
|
||||
|
||||
fn child<'a, 'input>(node: &'a roxmltree::Node<'a, 'input>, name: &str) -> Option<roxmltree::Node<'a, 'input>> {
|
||||
node.children()
|
||||
.find(|n| n.is_element() && n.tag_name().name() == name)
|
||||
}
|
||||
|
||||
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
|
||||
child(node, name)
|
||||
.and_then(|n| n.text())
|
||||
.map(|s| s.to_string())
|
||||
}
|
||||
|
||||
fn execute_select_query(path: PathBuf, request: SelectRequest) -> Result<Vec<Vec<u8>>, String> {
|
||||
let conn = Connection::open_in_memory().map_err(|e| format!("DuckDB connection error: {}", e))?;
|
||||
|
||||
load_input_table(&conn, &path, &request.input_format)?;
|
||||
|
||||
let expression = request
|
||||
.expression
|
||||
.replace("s3object", "data")
|
||||
.replace("S3Object", "data");
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(&expression)
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
let mut rows = stmt
|
||||
.query([])
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
let stmt_ref = rows
|
||||
.as_ref()
|
||||
.ok_or_else(|| "SQL execution error: statement metadata unavailable".to_string())?;
|
||||
let col_count = stmt_ref.column_count();
|
||||
let mut columns: Vec<String> = Vec::with_capacity(col_count);
|
||||
for i in 0..col_count {
|
||||
let name = stmt_ref
|
||||
.column_name(i)
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|_| format!("_{}", i));
|
||||
columns.push(name);
|
||||
}
|
||||
|
||||
match request.output_format {
|
||||
OutputFormat::Csv(cfg) => collect_csv_chunks(&mut rows, col_count, cfg),
|
||||
OutputFormat::Json(cfg) => collect_json_chunks(&mut rows, col_count, &columns, cfg),
|
||||
}
|
||||
}
|
||||
|
||||
fn load_input_table(conn: &Connection, path: &Path, input: &InputFormat) -> Result<(), String> {
|
||||
let path_str = path.to_string_lossy().replace('\\', "/");
|
||||
match input {
|
||||
InputFormat::Csv(cfg) => {
|
||||
let header = cfg.file_header_info == "USE" || cfg.file_header_info == "IGNORE";
|
||||
let delimiter = normalize_single_char(&cfg.field_delimiter, ',');
|
||||
let quote = normalize_single_char(&cfg.quote_character, '"');
|
||||
|
||||
let sql = format!(
|
||||
"CREATE TABLE data AS SELECT * FROM read_csv('{}', header={}, delim='{}', quote='{}')",
|
||||
sql_escape(&path_str),
|
||||
if header { "true" } else { "false" },
|
||||
sql_escape(&delimiter),
|
||||
sql_escape("e)
|
||||
);
|
||||
conn.execute_batch(&sql)
|
||||
.map_err(|e| format!("Failed loading CSV data: {}", e))?;
|
||||
}
|
||||
InputFormat::Json(cfg) => {
|
||||
let format = if cfg.json_type == "LINES" {
|
||||
"newline_delimited"
|
||||
} else {
|
||||
"array"
|
||||
};
|
||||
let sql = format!(
|
||||
"CREATE TABLE data AS SELECT * FROM read_json_auto('{}', format='{}')",
|
||||
sql_escape(&path_str),
|
||||
format
|
||||
);
|
||||
conn.execute_batch(&sql)
|
||||
.map_err(|e| format!("Failed loading JSON data: {}", e))?;
|
||||
}
|
||||
InputFormat::Parquet => {
|
||||
let sql = format!(
|
||||
"CREATE TABLE data AS SELECT * FROM read_parquet('{}')",
|
||||
sql_escape(&path_str)
|
||||
);
|
||||
conn.execute_batch(&sql)
|
||||
.map_err(|e| format!("Failed loading Parquet data: {}", e))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sql_escape(value: &str) -> String {
|
||||
value.replace('\'', "''")
|
||||
}
|
||||
|
||||
fn normalize_single_char(value: &str, default_char: char) -> String {
|
||||
value.chars().next().unwrap_or(default_char).to_string()
|
||||
}
|
||||
|
||||
fn collect_csv_chunks(
|
||||
rows: &mut duckdb::Rows<'_>,
|
||||
col_count: usize,
|
||||
cfg: CsvOutputConfig,
|
||||
) -> Result<Vec<Vec<u8>>, String> {
|
||||
let delimiter = cfg.field_delimiter;
|
||||
let record_delimiter = cfg.record_delimiter;
|
||||
let quote = cfg.quote_character;
|
||||
|
||||
let mut chunks: Vec<Vec<u8>> = Vec::new();
|
||||
let mut buffer = String::new();
|
||||
|
||||
while let Some(row) = rows.next().map_err(|e| format!("SQL execution error: {}", e))? {
|
||||
let mut fields: Vec<String> = Vec::with_capacity(col_count);
|
||||
for i in 0..col_count {
|
||||
let value = row
|
||||
.get_ref(i)
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
if matches!(value, ValueRef::Null) {
|
||||
fields.push(String::new());
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut text = value_ref_to_string(value);
|
||||
if text.contains(&delimiter) || text.contains("e) || text.contains(&record_delimiter) {
|
||||
text = text.replace("e, &(quote.clone() + "e));
|
||||
text = format!("{}{}{}", quote, text, quote);
|
||||
}
|
||||
fields.push(text);
|
||||
}
|
||||
buffer.push_str(&fields.join(&delimiter));
|
||||
buffer.push_str(&record_delimiter);
|
||||
|
||||
while buffer.len() >= CHUNK_SIZE {
|
||||
let rest = buffer.split_off(CHUNK_SIZE);
|
||||
chunks.push(buffer.into_bytes());
|
||||
buffer = rest;
|
||||
}
|
||||
}
|
||||
|
||||
if !buffer.is_empty() {
|
||||
chunks.push(buffer.into_bytes());
|
||||
}
|
||||
Ok(chunks)
|
||||
}
|
||||
|
||||
fn collect_json_chunks(
|
||||
rows: &mut duckdb::Rows<'_>,
|
||||
col_count: usize,
|
||||
columns: &[String],
|
||||
cfg: JsonOutputConfig,
|
||||
) -> Result<Vec<Vec<u8>>, String> {
|
||||
let record_delimiter = cfg.record_delimiter;
|
||||
let mut chunks: Vec<Vec<u8>> = Vec::new();
|
||||
let mut buffer = String::new();
|
||||
|
||||
while let Some(row) = rows.next().map_err(|e| format!("SQL execution error: {}", e))? {
|
||||
let mut record: HashMap<String, serde_json::Value> = HashMap::with_capacity(col_count);
|
||||
for i in 0..col_count {
|
||||
let value = row
|
||||
.get_ref(i)
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
let key = columns
|
||||
.get(i)
|
||||
.cloned()
|
||||
.unwrap_or_else(|| format!("_{}", i));
|
||||
record.insert(key, value_ref_to_json(value));
|
||||
}
|
||||
let line = serde_json::to_string(&record)
|
||||
.map_err(|e| format!("JSON output encoding failed: {}", e))?;
|
||||
buffer.push_str(&line);
|
||||
buffer.push_str(&record_delimiter);
|
||||
|
||||
while buffer.len() >= CHUNK_SIZE {
|
||||
let rest = buffer.split_off(CHUNK_SIZE);
|
||||
chunks.push(buffer.into_bytes());
|
||||
buffer = rest;
|
||||
}
|
||||
}
|
||||
|
||||
if !buffer.is_empty() {
|
||||
chunks.push(buffer.into_bytes());
|
||||
}
|
||||
Ok(chunks)
|
||||
}
|
||||
|
||||
fn value_ref_to_string(value: ValueRef<'_>) -> String {
|
||||
match value {
|
||||
ValueRef::Null => String::new(),
|
||||
ValueRef::Boolean(v) => v.to_string(),
|
||||
ValueRef::TinyInt(v) => v.to_string(),
|
||||
ValueRef::SmallInt(v) => v.to_string(),
|
||||
ValueRef::Int(v) => v.to_string(),
|
||||
ValueRef::BigInt(v) => v.to_string(),
|
||||
ValueRef::UTinyInt(v) => v.to_string(),
|
||||
ValueRef::USmallInt(v) => v.to_string(),
|
||||
ValueRef::UInt(v) => v.to_string(),
|
||||
ValueRef::UBigInt(v) => v.to_string(),
|
||||
ValueRef::Float(v) => v.to_string(),
|
||||
ValueRef::Double(v) => v.to_string(),
|
||||
ValueRef::Decimal(v) => v.to_string(),
|
||||
ValueRef::Text(v) => String::from_utf8_lossy(v).into_owned(),
|
||||
ValueRef::Blob(v) => base64::engine::general_purpose::STANDARD.encode(v),
|
||||
_ => format!("{:?}", value),
|
||||
}
|
||||
}
|
||||
|
||||
fn value_ref_to_json(value: ValueRef<'_>) -> serde_json::Value {
|
||||
match value {
|
||||
ValueRef::Null => serde_json::Value::Null,
|
||||
ValueRef::Boolean(v) => serde_json::Value::Bool(v),
|
||||
ValueRef::TinyInt(v) => serde_json::json!(v),
|
||||
ValueRef::SmallInt(v) => serde_json::json!(v),
|
||||
ValueRef::Int(v) => serde_json::json!(v),
|
||||
ValueRef::BigInt(v) => serde_json::json!(v),
|
||||
ValueRef::UTinyInt(v) => serde_json::json!(v),
|
||||
ValueRef::USmallInt(v) => serde_json::json!(v),
|
||||
ValueRef::UInt(v) => serde_json::json!(v),
|
||||
ValueRef::UBigInt(v) => serde_json::json!(v),
|
||||
ValueRef::Float(v) => serde_json::json!(v),
|
||||
ValueRef::Double(v) => serde_json::json!(v),
|
||||
ValueRef::Decimal(v) => serde_json::Value::String(v.to_string()),
|
||||
ValueRef::Text(v) => serde_json::Value::String(String::from_utf8_lossy(v).into_owned()),
|
||||
ValueRef::Blob(v) => serde_json::Value::String(base64::engine::general_purpose::STANDARD.encode(v)),
|
||||
_ => serde_json::Value::String(format!("{:?}", value)),
|
||||
}
|
||||
}
|
||||
|
||||
fn require_xml_content_type(headers: &HeaderMap) -> Option<Response> {
|
||||
let value = headers
|
||||
.get("content-type")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("")
|
||||
.trim();
|
||||
if value.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let lowered = value.to_ascii_lowercase();
|
||||
if lowered.starts_with("application/xml") || lowered.starts_with("text/xml") {
|
||||
return None;
|
||||
}
|
||||
Some(s3_error_response(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"Content-Type must be application/xml or text/xml",
|
||||
)))
|
||||
}
|
||||
|
||||
fn s3_error_response(err: S3Error) -> Response {
|
||||
let status = StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
|
||||
let resource = if err.resource.is_empty() {
|
||||
"/".to_string()
|
||||
} else {
|
||||
err.resource.clone()
|
||||
};
|
||||
let body = err
|
||||
.with_resource(resource)
|
||||
.with_request_id(uuid::Uuid::new_v4().simple().to_string())
|
||||
.to_xml();
|
||||
(
|
||||
status,
|
||||
[("content-type", "application/xml")],
|
||||
body,
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
fn build_stats_xml(bytes_scanned: usize, bytes_returned: usize) -> String {
|
||||
format!(
|
||||
"<Stats><BytesScanned>{}</BytesScanned><BytesProcessed>{}</BytesProcessed><BytesReturned>{}</BytesReturned></Stats>",
|
||||
bytes_scanned,
|
||||
bytes_scanned,
|
||||
bytes_returned
|
||||
)
|
||||
}
|
||||
|
||||
fn encode_select_event(event_type: &str, payload: &[u8]) -> Vec<u8> {
|
||||
let mut headers = Vec::new();
|
||||
headers.extend(encode_select_header(":event-type", event_type));
|
||||
if event_type == "Records" {
|
||||
headers.extend(encode_select_header(":content-type", "application/octet-stream"));
|
||||
} else if event_type == "Stats" {
|
||||
headers.extend(encode_select_header(":content-type", "text/xml"));
|
||||
}
|
||||
headers.extend(encode_select_header(":message-type", "event"));
|
||||
|
||||
let headers_len = headers.len() as u32;
|
||||
let total_len = 4 + 4 + 4 + headers.len() + payload.len() + 4;
|
||||
|
||||
let mut message = Vec::with_capacity(total_len);
|
||||
let mut prelude = Vec::with_capacity(8);
|
||||
prelude.extend((total_len as u32).to_be_bytes());
|
||||
prelude.extend(headers_len.to_be_bytes());
|
||||
|
||||
let prelude_crc = crc32(&prelude);
|
||||
message.extend(prelude);
|
||||
message.extend(prelude_crc.to_be_bytes());
|
||||
message.extend(headers);
|
||||
message.extend(payload);
|
||||
|
||||
let msg_crc = crc32(&message);
|
||||
message.extend(msg_crc.to_be_bytes());
|
||||
message
|
||||
}
|
||||
|
||||
fn encode_select_header(name: &str, value: &str) -> Vec<u8> {
|
||||
let name_bytes = name.as_bytes();
|
||||
let value_bytes = value.as_bytes();
|
||||
let mut header = Vec::with_capacity(1 + name_bytes.len() + 1 + 2 + value_bytes.len());
|
||||
header.push(name_bytes.len() as u8);
|
||||
header.extend(name_bytes);
|
||||
header.push(7);
|
||||
header.extend((value_bytes.len() as u16).to_be_bytes());
|
||||
header.extend(value_bytes);
|
||||
header
|
||||
}
|
||||
|
||||
fn crc32(data: &[u8]) -> u32 {
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(data);
|
||||
hasher.finalize()
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
pub mod config;
|
||||
pub mod handlers;
|
||||
pub mod middleware;
|
||||
pub mod services;
|
||||
pub mod state;
|
||||
|
||||
use axum::Router;
|
||||
|
||||
pub const SERVER_HEADER: &str = concat!("MyFSIO-Rust/", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
pub fn create_router(state: state::AppState) -> Router {
|
||||
let mut router = Router::new()
|
||||
.route("/", axum::routing::get(handlers::list_buckets))
|
||||
.route(
|
||||
"/{bucket}",
|
||||
axum::routing::put(handlers::create_bucket)
|
||||
.get(handlers::get_bucket)
|
||||
.delete(handlers::delete_bucket)
|
||||
.head(handlers::head_bucket)
|
||||
.post(handlers::post_bucket),
|
||||
)
|
||||
.route(
|
||||
"/{bucket}/{*key}",
|
||||
axum::routing::put(handlers::put_object)
|
||||
.get(handlers::get_object)
|
||||
.delete(handlers::delete_object)
|
||||
.head(handlers::head_object)
|
||||
.post(handlers::post_object),
|
||||
);
|
||||
|
||||
if state.config.kms_enabled {
|
||||
router = router
|
||||
.route("/kms/keys", axum::routing::get(handlers::kms::list_keys).post(handlers::kms::create_key))
|
||||
.route("/kms/keys/{key_id}", axum::routing::get(handlers::kms::get_key).delete(handlers::kms::delete_key))
|
||||
.route("/kms/keys/{key_id}/enable", axum::routing::post(handlers::kms::enable_key))
|
||||
.route("/kms/keys/{key_id}/disable", axum::routing::post(handlers::kms::disable_key))
|
||||
.route("/kms/encrypt", axum::routing::post(handlers::kms::encrypt))
|
||||
.route("/kms/decrypt", axum::routing::post(handlers::kms::decrypt))
|
||||
.route("/kms/generate-data-key", axum::routing::post(handlers::kms::generate_data_key));
|
||||
}
|
||||
|
||||
router = router
|
||||
.route("/admin/site/local", axum::routing::get(handlers::admin::get_local_site).put(handlers::admin::update_local_site))
|
||||
.route("/admin/site/all", axum::routing::get(handlers::admin::list_all_sites))
|
||||
.route("/admin/site/peers", axum::routing::post(handlers::admin::register_peer_site))
|
||||
.route("/admin/site/peers/{site_id}", axum::routing::get(handlers::admin::get_peer_site).put(handlers::admin::update_peer_site).delete(handlers::admin::delete_peer_site))
|
||||
.route("/admin/site/peers/{site_id}/health", axum::routing::post(handlers::admin::check_peer_health))
|
||||
.route("/admin/site/topology", axum::routing::get(handlers::admin::get_topology))
|
||||
.route("/admin/site/peers/{site_id}/bidirectional-status", axum::routing::get(handlers::admin::check_bidirectional_status))
|
||||
.route("/admin/iam/users", axum::routing::get(handlers::admin::iam_list_users))
|
||||
.route("/admin/iam/users/{identifier}", axum::routing::get(handlers::admin::iam_get_user))
|
||||
.route("/admin/iam/users/{identifier}/policies", axum::routing::get(handlers::admin::iam_get_user_policies))
|
||||
.route("/admin/iam/users/{identifier}/access-keys", axum::routing::post(handlers::admin::iam_create_access_key))
|
||||
.route("/admin/iam/users/{identifier}/access-keys/{access_key}", axum::routing::delete(handlers::admin::iam_delete_access_key))
|
||||
.route("/admin/iam/users/{identifier}/disable", axum::routing::post(handlers::admin::iam_disable_user))
|
||||
.route("/admin/iam/users/{identifier}/enable", axum::routing::post(handlers::admin::iam_enable_user))
|
||||
.route("/admin/website-domains", axum::routing::get(handlers::admin::list_website_domains).post(handlers::admin::create_website_domain))
|
||||
.route("/admin/website-domains/{domain}", axum::routing::get(handlers::admin::get_website_domain).put(handlers::admin::update_website_domain).delete(handlers::admin::delete_website_domain))
|
||||
.route("/admin/gc/status", axum::routing::get(handlers::admin::gc_status))
|
||||
.route("/admin/gc/run", axum::routing::post(handlers::admin::gc_run))
|
||||
.route("/admin/gc/history", axum::routing::get(handlers::admin::gc_history))
|
||||
.route("/admin/integrity/status", axum::routing::get(handlers::admin::integrity_status))
|
||||
.route("/admin/integrity/run", axum::routing::post(handlers::admin::integrity_run))
|
||||
.route("/admin/integrity/history", axum::routing::get(handlers::admin::integrity_history));
|
||||
|
||||
router
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
middleware::auth_layer,
|
||||
))
|
||||
.layer(axum::middleware::from_fn(middleware::server_header))
|
||||
.with_state(state)
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
use myfsio_server::config::ServerConfig;
|
||||
use myfsio_server::state::AppState;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let config = ServerConfig::from_env();
|
||||
let bind_addr = config.bind_addr;
|
||||
|
||||
tracing::info!("MyFSIO Rust Engine starting on {}", bind_addr);
|
||||
tracing::info!("Storage root: {}", config.storage_root.display());
|
||||
tracing::info!("Region: {}", config.region);
|
||||
tracing::info!(
|
||||
"Encryption: {}, KMS: {}, GC: {}, Lifecycle: {}, Integrity: {}, Metrics: {}",
|
||||
config.encryption_enabled,
|
||||
config.kms_enabled,
|
||||
config.gc_enabled,
|
||||
config.lifecycle_enabled,
|
||||
config.integrity_enabled,
|
||||
config.metrics_enabled
|
||||
);
|
||||
|
||||
let state = if config.encryption_enabled || config.kms_enabled {
|
||||
AppState::new_with_encryption(config.clone()).await
|
||||
} else {
|
||||
AppState::new(config.clone())
|
||||
};
|
||||
|
||||
let mut bg_handles: Vec<tokio::task::JoinHandle<()>> = Vec::new();
|
||||
|
||||
if let Some(ref gc) = state.gc {
|
||||
bg_handles.push(gc.clone().start_background());
|
||||
tracing::info!("GC background service started");
|
||||
}
|
||||
|
||||
if let Some(ref integrity) = state.integrity {
|
||||
bg_handles.push(integrity.clone().start_background());
|
||||
tracing::info!("Integrity checker background service started");
|
||||
}
|
||||
|
||||
if let Some(ref metrics) = state.metrics {
|
||||
bg_handles.push(metrics.clone().start_background());
|
||||
tracing::info!("Metrics collector background service started");
|
||||
}
|
||||
|
||||
if config.lifecycle_enabled {
|
||||
let lifecycle = std::sync::Arc::new(
|
||||
myfsio_server::services::lifecycle::LifecycleService::new(
|
||||
state.storage.clone(),
|
||||
myfsio_server::services::lifecycle::LifecycleConfig::default(),
|
||||
),
|
||||
);
|
||||
bg_handles.push(lifecycle.start_background());
|
||||
tracing::info!("Lifecycle manager background service started");
|
||||
}
|
||||
|
||||
let app = myfsio_server::create_router(state);
|
||||
|
||||
let listener = match tokio::net::TcpListener::bind(bind_addr).await {
|
||||
Ok(listener) => listener,
|
||||
Err(err) => {
|
||||
if err.kind() == std::io::ErrorKind::AddrInUse {
|
||||
tracing::error!("Port already in use: {}", bind_addr);
|
||||
} else {
|
||||
tracing::error!("Failed to bind {}: {}", bind_addr, err);
|
||||
}
|
||||
for handle in bg_handles {
|
||||
handle.abort();
|
||||
}
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
tracing::info!("Listening on {}", bind_addr);
|
||||
|
||||
if let Err(err) = axum::serve(listener, app)
|
||||
.with_graceful_shutdown(shutdown_signal())
|
||||
.await
|
||||
{
|
||||
tracing::error!("Server exited with error: {}", err);
|
||||
for handle in bg_handles {
|
||||
handle.abort();
|
||||
}
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
for handle in bg_handles {
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
|
||||
async fn shutdown_signal() {
|
||||
tokio::signal::ctrl_c()
|
||||
.await
|
||||
.expect("Failed to listen for Ctrl+C");
|
||||
tracing::info!("Shutdown signal received");
|
||||
}
|
||||
@@ -1,569 +0,0 @@
|
||||
use axum::extract::{Request, State};
|
||||
use axum::http::{Method, StatusCode};
|
||||
use axum::middleware::Next;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use myfsio_auth::sigv4;
|
||||
use myfsio_common::error::{S3Error, S3ErrorCode};
|
||||
use myfsio_common::types::Principal;
|
||||
|
||||
use crate::state::AppState;
|
||||
|
||||
pub async fn auth_layer(
|
||||
State(state): State<AppState>,
|
||||
mut req: Request,
|
||||
next: Next,
|
||||
) -> Response {
|
||||
let uri = req.uri().clone();
|
||||
let path = uri.path().to_string();
|
||||
|
||||
if path == "/" && req.method() == axum::http::Method::GET {
|
||||
match try_auth(&state, &req) {
|
||||
AuthResult::Ok(principal) => {
|
||||
if let Err(err) = authorize_request(&state, &principal, &req) {
|
||||
return error_response(err, &path);
|
||||
}
|
||||
req.extensions_mut().insert(principal);
|
||||
}
|
||||
AuthResult::Denied(err) => return error_response(err, &path),
|
||||
AuthResult::NoAuth => {
|
||||
return error_response(
|
||||
S3Error::new(S3ErrorCode::AccessDenied, "Missing credentials"),
|
||||
&path,
|
||||
);
|
||||
}
|
||||
}
|
||||
return next.run(req).await;
|
||||
}
|
||||
|
||||
match try_auth(&state, &req) {
|
||||
AuthResult::Ok(principal) => {
|
||||
if let Err(err) = authorize_request(&state, &principal, &req) {
|
||||
return error_response(err, &path);
|
||||
}
|
||||
req.extensions_mut().insert(principal);
|
||||
next.run(req).await
|
||||
}
|
||||
AuthResult::Denied(err) => error_response(err, &path),
|
||||
AuthResult::NoAuth => {
|
||||
error_response(
|
||||
S3Error::new(S3ErrorCode::AccessDenied, "Missing credentials"),
|
||||
&path,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum AuthResult {
|
||||
Ok(Principal),
|
||||
Denied(S3Error),
|
||||
NoAuth,
|
||||
}
|
||||
|
||||
fn authorize_request(state: &AppState, principal: &Principal, req: &Request) -> Result<(), S3Error> {
|
||||
let path = req.uri().path();
|
||||
if path == "/" {
|
||||
if state.iam.authorize(principal, None, "list", None) {
|
||||
return Ok(());
|
||||
}
|
||||
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
|
||||
}
|
||||
|
||||
if path.starts_with("/admin/") || path.starts_with("/kms/") {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut segments = path.trim_start_matches('/').split('/').filter(|s| !s.is_empty());
|
||||
let bucket = match segments.next() {
|
||||
Some(b) => b,
|
||||
None => {
|
||||
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
|
||||
}
|
||||
};
|
||||
let remaining: Vec<&str> = segments.collect();
|
||||
let query = req.uri().query().unwrap_or("");
|
||||
|
||||
if remaining.is_empty() {
|
||||
let action = resolve_bucket_action(req.method(), query);
|
||||
if state.iam.authorize(principal, Some(bucket), action, None) {
|
||||
return Ok(());
|
||||
}
|
||||
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
|
||||
}
|
||||
|
||||
let object_key = remaining.join("/");
|
||||
if req.method() == Method::PUT {
|
||||
if let Some(copy_source) = req
|
||||
.headers()
|
||||
.get("x-amz-copy-source")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
{
|
||||
let source = copy_source.strip_prefix('/').unwrap_or(copy_source);
|
||||
if let Some((src_bucket, src_key)) = source.split_once('/') {
|
||||
let source_allowed =
|
||||
state.iam.authorize(principal, Some(src_bucket), "read", Some(src_key));
|
||||
let dest_allowed =
|
||||
state.iam.authorize(principal, Some(bucket), "write", Some(&object_key));
|
||||
if source_allowed && dest_allowed {
|
||||
return Ok(());
|
||||
}
|
||||
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let action = resolve_object_action(req.method(), query);
|
||||
if state
|
||||
.iam
|
||||
.authorize(principal, Some(bucket), action, Some(&object_key))
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"))
|
||||
}
|
||||
|
||||
fn resolve_bucket_action(method: &Method, query: &str) -> &'static str {
|
||||
if has_query_key(query, "versioning") {
|
||||
return "versioning";
|
||||
}
|
||||
if has_query_key(query, "tagging") {
|
||||
return "tagging";
|
||||
}
|
||||
if has_query_key(query, "cors") {
|
||||
return "cors";
|
||||
}
|
||||
if has_query_key(query, "location") {
|
||||
return "list";
|
||||
}
|
||||
if has_query_key(query, "encryption") {
|
||||
return "encryption";
|
||||
}
|
||||
if has_query_key(query, "lifecycle") {
|
||||
return "lifecycle";
|
||||
}
|
||||
if has_query_key(query, "acl") {
|
||||
return "share";
|
||||
}
|
||||
if has_query_key(query, "policy") || has_query_key(query, "policyStatus") {
|
||||
return "policy";
|
||||
}
|
||||
if has_query_key(query, "replication") {
|
||||
return "replication";
|
||||
}
|
||||
if has_query_key(query, "quota") {
|
||||
return "quota";
|
||||
}
|
||||
if has_query_key(query, "website") {
|
||||
return "website";
|
||||
}
|
||||
if has_query_key(query, "object-lock") {
|
||||
return "object_lock";
|
||||
}
|
||||
if has_query_key(query, "notification") {
|
||||
return "notification";
|
||||
}
|
||||
if has_query_key(query, "logging") {
|
||||
return "logging";
|
||||
}
|
||||
if has_query_key(query, "versions") || has_query_key(query, "uploads") {
|
||||
return "list";
|
||||
}
|
||||
if has_query_key(query, "delete") {
|
||||
return "delete";
|
||||
}
|
||||
|
||||
match *method {
|
||||
Method::GET => "list",
|
||||
Method::HEAD => "read",
|
||||
Method::PUT => "create_bucket",
|
||||
Method::DELETE => "delete_bucket",
|
||||
Method::POST => "write",
|
||||
_ => "list",
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_object_action(method: &Method, query: &str) -> &'static str {
|
||||
if has_query_key(query, "tagging") {
|
||||
return if *method == Method::GET { "read" } else { "write" };
|
||||
}
|
||||
if has_query_key(query, "acl") {
|
||||
return if *method == Method::GET { "read" } else { "write" };
|
||||
}
|
||||
if has_query_key(query, "retention") || has_query_key(query, "legal-hold") {
|
||||
return "object_lock";
|
||||
}
|
||||
if has_query_key(query, "attributes") {
|
||||
return "read";
|
||||
}
|
||||
if has_query_key(query, "uploads") || has_query_key(query, "uploadId") {
|
||||
return match *method {
|
||||
Method::GET => "read",
|
||||
_ => "write",
|
||||
};
|
||||
}
|
||||
if has_query_key(query, "select") {
|
||||
return "read";
|
||||
}
|
||||
|
||||
match *method {
|
||||
Method::GET | Method::HEAD => "read",
|
||||
Method::PUT => "write",
|
||||
Method::DELETE => "delete",
|
||||
Method::POST => "write",
|
||||
_ => "read",
|
||||
}
|
||||
}
|
||||
|
||||
fn has_query_key(query: &str, key: &str) -> bool {
|
||||
if query.is_empty() {
|
||||
return false;
|
||||
}
|
||||
query
|
||||
.split('&')
|
||||
.filter(|part| !part.is_empty())
|
||||
.any(|part| part == key || part.starts_with(&format!("{}=", key)))
|
||||
}
|
||||
|
||||
fn try_auth(state: &AppState, req: &Request) -> AuthResult {
|
||||
if let Some(auth_header) = req.headers().get("authorization") {
|
||||
if let Ok(auth_str) = auth_header.to_str() {
|
||||
if auth_str.starts_with("AWS4-HMAC-SHA256 ") {
|
||||
return verify_sigv4_header(state, req, auth_str);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let query = req.uri().query().unwrap_or("");
|
||||
if query.contains("X-Amz-Algorithm=AWS4-HMAC-SHA256") {
|
||||
return verify_sigv4_query(state, req);
|
||||
}
|
||||
|
||||
if let (Some(ak), Some(sk)) = (
|
||||
req.headers().get("x-access-key").and_then(|v| v.to_str().ok()),
|
||||
req.headers().get("x-secret-key").and_then(|v| v.to_str().ok()),
|
||||
) {
|
||||
return match state.iam.authenticate(ak, sk) {
|
||||
Some(principal) => AuthResult::Ok(principal),
|
||||
None => AuthResult::Denied(
|
||||
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
AuthResult::NoAuth
|
||||
}
|
||||
|
||||
fn verify_sigv4_header(state: &AppState, req: &Request, auth_str: &str) -> AuthResult {
|
||||
let parts: Vec<&str> = auth_str
|
||||
.strip_prefix("AWS4-HMAC-SHA256 ")
|
||||
.unwrap()
|
||||
.split(", ")
|
||||
.collect();
|
||||
|
||||
if parts.len() != 3 {
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed Authorization header"),
|
||||
);
|
||||
}
|
||||
|
||||
let credential = parts[0].strip_prefix("Credential=").unwrap_or("");
|
||||
let signed_headers_str = parts[1].strip_prefix("SignedHeaders=").unwrap_or("");
|
||||
let provided_signature = parts[2].strip_prefix("Signature=").unwrap_or("");
|
||||
|
||||
let cred_parts: Vec<&str> = credential.split('/').collect();
|
||||
if cred_parts.len() != 5 {
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed credential"),
|
||||
);
|
||||
}
|
||||
|
||||
let access_key = cred_parts[0];
|
||||
let date_stamp = cred_parts[1];
|
||||
let region = cred_parts[2];
|
||||
let service = cred_parts[3];
|
||||
|
||||
let amz_date = req
|
||||
.headers()
|
||||
.get("x-amz-date")
|
||||
.or_else(|| req.headers().get("date"))
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("");
|
||||
|
||||
if amz_date.is_empty() {
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::AccessDenied, "Missing Date header"),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(err) = check_timestamp_freshness(amz_date, state.config.sigv4_timestamp_tolerance_secs) {
|
||||
return AuthResult::Denied(err);
|
||||
}
|
||||
|
||||
let secret_key = match state.iam.get_secret_key(access_key) {
|
||||
Some(sk) => sk,
|
||||
None => {
|
||||
return AuthResult::Denied(
|
||||
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
let method = req.method().as_str();
|
||||
let canonical_uri = req.uri().path();
|
||||
|
||||
let query_params = parse_query_params(req.uri().query().unwrap_or(""));
|
||||
|
||||
let payload_hash = req
|
||||
.headers()
|
||||
.get("x-amz-content-sha256")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("UNSIGNED-PAYLOAD");
|
||||
|
||||
let signed_headers: Vec<&str> = signed_headers_str.split(';').collect();
|
||||
let header_values: Vec<(String, String)> = signed_headers
|
||||
.iter()
|
||||
.map(|&name| {
|
||||
let value = req
|
||||
.headers()
|
||||
.get(name)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("");
|
||||
(name.to_string(), value.to_string())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified = sigv4::verify_sigv4_signature(
|
||||
method,
|
||||
canonical_uri,
|
||||
&query_params,
|
||||
signed_headers_str,
|
||||
&header_values,
|
||||
payload_hash,
|
||||
amz_date,
|
||||
date_stamp,
|
||||
region,
|
||||
service,
|
||||
&secret_key,
|
||||
provided_signature,
|
||||
);
|
||||
|
||||
if !verified {
|
||||
return AuthResult::Denied(
|
||||
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
|
||||
);
|
||||
}
|
||||
|
||||
match state.iam.get_principal(access_key) {
|
||||
Some(p) => AuthResult::Ok(p),
|
||||
None => AuthResult::Denied(
|
||||
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_sigv4_query(state: &AppState, req: &Request) -> AuthResult {
|
||||
let query = req.uri().query().unwrap_or("");
|
||||
let params = parse_query_params(query);
|
||||
let param_map: std::collections::HashMap<&str, &str> = params
|
||||
.iter()
|
||||
.map(|(k, v)| (k.as_str(), v.as_str()))
|
||||
.collect();
|
||||
|
||||
let credential = match param_map.get("X-Amz-Credential") {
|
||||
Some(c) => *c,
|
||||
None => {
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Credential"),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
let signed_headers_str = param_map
|
||||
.get("X-Amz-SignedHeaders")
|
||||
.copied()
|
||||
.unwrap_or("host");
|
||||
let provided_signature = match param_map.get("X-Amz-Signature") {
|
||||
Some(s) => *s,
|
||||
None => {
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Signature"),
|
||||
);
|
||||
}
|
||||
};
|
||||
let amz_date = match param_map.get("X-Amz-Date") {
|
||||
Some(d) => *d,
|
||||
None => {
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Date"),
|
||||
);
|
||||
}
|
||||
};
|
||||
let expires_str = match param_map.get("X-Amz-Expires") {
|
||||
Some(e) => *e,
|
||||
None => {
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Expires"),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
let cred_parts: Vec<&str> = credential.split('/').collect();
|
||||
if cred_parts.len() != 5 {
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed credential"),
|
||||
);
|
||||
}
|
||||
|
||||
let access_key = cred_parts[0];
|
||||
let date_stamp = cred_parts[1];
|
||||
let region = cred_parts[2];
|
||||
let service = cred_parts[3];
|
||||
|
||||
let expires: u64 = match expires_str.parse() {
|
||||
Ok(e) => e,
|
||||
Err(_) => {
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::InvalidArgument, "Invalid X-Amz-Expires"),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if expires < state.config.presigned_url_min_expiry
|
||||
|| expires > state.config.presigned_url_max_expiry
|
||||
{
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::InvalidArgument, "X-Amz-Expires out of range"),
|
||||
);
|
||||
}
|
||||
|
||||
if let Ok(request_time) =
|
||||
NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ")
|
||||
{
|
||||
let request_utc = request_time.and_utc();
|
||||
let now = Utc::now();
|
||||
let elapsed = (now - request_utc).num_seconds();
|
||||
if elapsed > expires as i64 {
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::AccessDenied, "Request has expired"),
|
||||
);
|
||||
}
|
||||
if elapsed < -(state.config.sigv4_timestamp_tolerance_secs as i64) {
|
||||
return AuthResult::Denied(
|
||||
S3Error::new(S3ErrorCode::AccessDenied, "Request is too far in the future"),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let secret_key = match state.iam.get_secret_key(access_key) {
|
||||
Some(sk) => sk,
|
||||
None => {
|
||||
return AuthResult::Denied(
|
||||
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
let method = req.method().as_str();
|
||||
let canonical_uri = req.uri().path();
|
||||
|
||||
let query_params_no_sig: Vec<(String, String)> = params
|
||||
.iter()
|
||||
.filter(|(k, _)| k != "X-Amz-Signature")
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let payload_hash = "UNSIGNED-PAYLOAD";
|
||||
|
||||
let signed_headers: Vec<&str> = signed_headers_str.split(';').collect();
|
||||
let header_values: Vec<(String, String)> = signed_headers
|
||||
.iter()
|
||||
.map(|&name| {
|
||||
let value = req
|
||||
.headers()
|
||||
.get(name)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("");
|
||||
(name.to_string(), value.to_string())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified = sigv4::verify_sigv4_signature(
|
||||
method,
|
||||
canonical_uri,
|
||||
&query_params_no_sig,
|
||||
signed_headers_str,
|
||||
&header_values,
|
||||
payload_hash,
|
||||
amz_date,
|
||||
date_stamp,
|
||||
region,
|
||||
service,
|
||||
&secret_key,
|
||||
provided_signature,
|
||||
);
|
||||
|
||||
if !verified {
|
||||
return AuthResult::Denied(
|
||||
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
|
||||
);
|
||||
}
|
||||
|
||||
match state.iam.get_principal(access_key) {
|
||||
Some(p) => AuthResult::Ok(p),
|
||||
None => AuthResult::Denied(
|
||||
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn check_timestamp_freshness(amz_date: &str, tolerance_secs: u64) -> Option<S3Error> {
|
||||
let request_time = NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ").ok()?;
|
||||
let request_utc = request_time.and_utc();
|
||||
let now = Utc::now();
|
||||
let diff = (now - request_utc).num_seconds().unsigned_abs();
|
||||
|
||||
if diff > tolerance_secs {
|
||||
return Some(S3Error::new(
|
||||
S3ErrorCode::AccessDenied,
|
||||
"Request timestamp too old or too far in the future",
|
||||
));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn parse_query_params(query: &str) -> Vec<(String, String)> {
|
||||
if query.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
query
|
||||
.split('&')
|
||||
.filter_map(|pair| {
|
||||
let mut parts = pair.splitn(2, '=');
|
||||
let key = parts.next()?;
|
||||
let value = parts.next().unwrap_or("");
|
||||
Some((
|
||||
urlencoding_decode(key),
|
||||
urlencoding_decode(value),
|
||||
))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn urlencoding_decode(s: &str) -> String {
|
||||
percent_encoding::percent_decode_str(s)
|
||||
.decode_utf8_lossy()
|
||||
.into_owned()
|
||||
}
|
||||
|
||||
fn error_response(err: S3Error, resource: &str) -> Response {
|
||||
let status =
|
||||
StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
|
||||
let request_id = uuid::Uuid::new_v4().simple().to_string();
|
||||
let body = err
|
||||
.with_resource(resource.to_string())
|
||||
.with_request_id(request_id)
|
||||
.to_xml();
|
||||
(status, [("content-type", "application/xml")], body).into_response()
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
mod auth;
|
||||
|
||||
pub use auth::auth_layer;
|
||||
|
||||
use axum::extract::Request;
|
||||
use axum::middleware::Next;
|
||||
use axum::response::Response;
|
||||
|
||||
pub async fn server_header(req: Request, next: Next) -> Response {
|
||||
let mut resp = next.run(req).await;
|
||||
resp.headers_mut().insert(
|
||||
"server",
|
||||
crate::SERVER_HEADER.parse().unwrap(),
|
||||
);
|
||||
resp
|
||||
}
|
||||
@@ -1,263 +0,0 @@
|
||||
use serde_json::{json, Value};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub struct GcConfig {
|
||||
pub interval_hours: f64,
|
||||
pub temp_file_max_age_hours: f64,
|
||||
pub multipart_max_age_days: u64,
|
||||
pub lock_file_max_age_hours: f64,
|
||||
pub dry_run: bool,
|
||||
}
|
||||
|
||||
impl Default for GcConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval_hours: 6.0,
|
||||
temp_file_max_age_hours: 24.0,
|
||||
multipart_max_age_days: 7,
|
||||
lock_file_max_age_hours: 1.0,
|
||||
dry_run: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GcService {
|
||||
storage_root: PathBuf,
|
||||
config: GcConfig,
|
||||
running: Arc<RwLock<bool>>,
|
||||
history: Arc<RwLock<Vec<Value>>>,
|
||||
history_path: PathBuf,
|
||||
}
|
||||
|
||||
impl GcService {
|
||||
pub fn new(storage_root: PathBuf, config: GcConfig) -> Self {
|
||||
let history_path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("gc_history.json");
|
||||
|
||||
let history = if history_path.exists() {
|
||||
std::fs::read_to_string(&history_path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
||||
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Self {
|
||||
storage_root,
|
||||
config,
|
||||
running: Arc::new(RwLock::new(false)),
|
||||
history: Arc::new(RwLock::new(history)),
|
||||
history_path,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn status(&self) -> Value {
|
||||
let running = *self.running.read().await;
|
||||
json!({
|
||||
"enabled": true,
|
||||
"running": running,
|
||||
"interval_hours": self.config.interval_hours,
|
||||
"temp_file_max_age_hours": self.config.temp_file_max_age_hours,
|
||||
"multipart_max_age_days": self.config.multipart_max_age_days,
|
||||
"lock_file_max_age_hours": self.config.lock_file_max_age_hours,
|
||||
"dry_run": self.config.dry_run,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn history(&self) -> Value {
|
||||
let history = self.history.read().await;
|
||||
json!({ "executions": *history })
|
||||
}
|
||||
|
||||
pub async fn run_now(&self, dry_run: bool) -> Result<Value, String> {
|
||||
{
|
||||
let mut running = self.running.write().await;
|
||||
if *running {
|
||||
return Err("GC already running".to_string());
|
||||
}
|
||||
*running = true;
|
||||
}
|
||||
|
||||
let start = Instant::now();
|
||||
let result = self.execute_gc(dry_run || self.config.dry_run).await;
|
||||
let elapsed = start.elapsed().as_secs_f64();
|
||||
|
||||
*self.running.write().await = false;
|
||||
|
||||
let mut result_json = result.clone();
|
||||
if let Some(obj) = result_json.as_object_mut() {
|
||||
obj.insert("execution_time_seconds".to_string(), json!(elapsed));
|
||||
}
|
||||
|
||||
let record = json!({
|
||||
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
|
||||
"dry_run": dry_run || self.config.dry_run,
|
||||
"result": result_json,
|
||||
});
|
||||
|
||||
{
|
||||
let mut history = self.history.write().await;
|
||||
history.push(record);
|
||||
if history.len() > 50 {
|
||||
let excess = history.len() - 50;
|
||||
history.drain(..excess);
|
||||
}
|
||||
}
|
||||
self.save_history().await;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn execute_gc(&self, dry_run: bool) -> Value {
|
||||
let mut temp_files_deleted = 0u64;
|
||||
let mut temp_bytes_freed = 0u64;
|
||||
let mut multipart_uploads_deleted = 0u64;
|
||||
let mut lock_files_deleted = 0u64;
|
||||
let mut empty_dirs_removed = 0u64;
|
||||
let mut errors: Vec<String> = Vec::new();
|
||||
|
||||
let now = std::time::SystemTime::now();
|
||||
let temp_max_age = std::time::Duration::from_secs_f64(self.config.temp_file_max_age_hours * 3600.0);
|
||||
let multipart_max_age = std::time::Duration::from_secs(self.config.multipart_max_age_days * 86400);
|
||||
let lock_max_age = std::time::Duration::from_secs_f64(self.config.lock_file_max_age_hours * 3600.0);
|
||||
|
||||
let tmp_dir = self.storage_root.join(".myfsio.sys").join("tmp");
|
||||
if tmp_dir.exists() {
|
||||
match std::fs::read_dir(&tmp_dir) {
|
||||
Ok(entries) => {
|
||||
for entry in entries.flatten() {
|
||||
if let Ok(metadata) = entry.metadata() {
|
||||
if let Ok(modified) = metadata.modified() {
|
||||
if let Ok(age) = now.duration_since(modified) {
|
||||
if age > temp_max_age {
|
||||
let size = metadata.len();
|
||||
if !dry_run {
|
||||
if let Err(e) = std::fs::remove_file(entry.path()) {
|
||||
errors.push(format!("Failed to remove temp file: {}", e));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
temp_files_deleted += 1;
|
||||
temp_bytes_freed += size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => errors.push(format!("Failed to read tmp dir: {}", e)),
|
||||
}
|
||||
}
|
||||
|
||||
let multipart_dir = self.storage_root.join(".myfsio.sys").join("multipart");
|
||||
if multipart_dir.exists() {
|
||||
if let Ok(bucket_dirs) = std::fs::read_dir(&multipart_dir) {
|
||||
for bucket_entry in bucket_dirs.flatten() {
|
||||
if let Ok(uploads) = std::fs::read_dir(bucket_entry.path()) {
|
||||
for upload in uploads.flatten() {
|
||||
if let Ok(metadata) = upload.metadata() {
|
||||
if let Ok(modified) = metadata.modified() {
|
||||
if let Ok(age) = now.duration_since(modified) {
|
||||
if age > multipart_max_age {
|
||||
if !dry_run {
|
||||
let _ = std::fs::remove_dir_all(upload.path());
|
||||
}
|
||||
multipart_uploads_deleted += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let buckets_dir = self.storage_root.join(".myfsio.sys").join("buckets");
|
||||
if buckets_dir.exists() {
|
||||
if let Ok(bucket_dirs) = std::fs::read_dir(&buckets_dir) {
|
||||
for bucket_entry in bucket_dirs.flatten() {
|
||||
let locks_dir = bucket_entry.path().join("locks");
|
||||
if locks_dir.exists() {
|
||||
if let Ok(locks) = std::fs::read_dir(&locks_dir) {
|
||||
for lock in locks.flatten() {
|
||||
if let Ok(metadata) = lock.metadata() {
|
||||
if let Ok(modified) = metadata.modified() {
|
||||
if let Ok(age) = now.duration_since(modified) {
|
||||
if age > lock_max_age {
|
||||
if !dry_run {
|
||||
let _ = std::fs::remove_file(lock.path());
|
||||
}
|
||||
lock_files_deleted += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !dry_run {
|
||||
for dir in [&tmp_dir, &multipart_dir] {
|
||||
if dir.exists() {
|
||||
if let Ok(entries) = std::fs::read_dir(dir) {
|
||||
for entry in entries.flatten() {
|
||||
if entry.path().is_dir() {
|
||||
if let Ok(mut contents) = std::fs::read_dir(entry.path()) {
|
||||
if contents.next().is_none() {
|
||||
let _ = std::fs::remove_dir(entry.path());
|
||||
empty_dirs_removed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
json!({
|
||||
"temp_files_deleted": temp_files_deleted,
|
||||
"temp_bytes_freed": temp_bytes_freed,
|
||||
"multipart_uploads_deleted": multipart_uploads_deleted,
|
||||
"lock_files_deleted": lock_files_deleted,
|
||||
"empty_dirs_removed": empty_dirs_removed,
|
||||
"errors": errors,
|
||||
})
|
||||
}
|
||||
|
||||
async fn save_history(&self) {
|
||||
let history = self.history.read().await;
|
||||
let data = json!({ "executions": *history });
|
||||
if let Some(parent) = self.history_path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
let _ = std::fs::write(&self.history_path, serde_json::to_string_pretty(&data).unwrap_or_default());
|
||||
}
|
||||
|
||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
||||
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
|
||||
tokio::spawn(async move {
|
||||
let mut timer = tokio::time::interval(interval);
|
||||
timer.tick().await;
|
||||
loop {
|
||||
timer.tick().await;
|
||||
tracing::info!("GC cycle starting");
|
||||
match self.run_now(false).await {
|
||||
Ok(result) => tracing::info!("GC cycle complete: {:?}", result),
|
||||
Err(e) => tracing::warn!("GC cycle failed: {}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,204 +0,0 @@
|
||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
||||
use myfsio_storage::traits::StorageEngine;
|
||||
use serde_json::{json, Value};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub struct IntegrityConfig {
|
||||
pub interval_hours: f64,
|
||||
pub batch_size: usize,
|
||||
pub auto_heal: bool,
|
||||
pub dry_run: bool,
|
||||
}
|
||||
|
||||
impl Default for IntegrityConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval_hours: 24.0,
|
||||
batch_size: 1000,
|
||||
auto_heal: false,
|
||||
dry_run: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct IntegrityService {
|
||||
storage: Arc<FsStorageBackend>,
|
||||
config: IntegrityConfig,
|
||||
running: Arc<RwLock<bool>>,
|
||||
history: Arc<RwLock<Vec<Value>>>,
|
||||
history_path: PathBuf,
|
||||
}
|
||||
|
||||
impl IntegrityService {
|
||||
pub fn new(
|
||||
storage: Arc<FsStorageBackend>,
|
||||
storage_root: &std::path::Path,
|
||||
config: IntegrityConfig,
|
||||
) -> Self {
|
||||
let history_path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("integrity_history.json");
|
||||
|
||||
let history = if history_path.exists() {
|
||||
std::fs::read_to_string(&history_path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
||||
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Self {
|
||||
storage,
|
||||
config,
|
||||
running: Arc::new(RwLock::new(false)),
|
||||
history: Arc::new(RwLock::new(history)),
|
||||
history_path,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn status(&self) -> Value {
|
||||
let running = *self.running.read().await;
|
||||
json!({
|
||||
"enabled": true,
|
||||
"running": running,
|
||||
"interval_hours": self.config.interval_hours,
|
||||
"batch_size": self.config.batch_size,
|
||||
"auto_heal": self.config.auto_heal,
|
||||
"dry_run": self.config.dry_run,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn history(&self) -> Value {
|
||||
let history = self.history.read().await;
|
||||
json!({ "executions": *history })
|
||||
}
|
||||
|
||||
pub async fn run_now(&self, dry_run: bool, auto_heal: bool) -> Result<Value, String> {
|
||||
{
|
||||
let mut running = self.running.write().await;
|
||||
if *running {
|
||||
return Err("Integrity check already running".to_string());
|
||||
}
|
||||
*running = true;
|
||||
}
|
||||
|
||||
let start = Instant::now();
|
||||
let result = self.check_integrity(dry_run, auto_heal).await;
|
||||
let elapsed = start.elapsed().as_secs_f64();
|
||||
|
||||
*self.running.write().await = false;
|
||||
|
||||
let mut result_json = result.clone();
|
||||
if let Some(obj) = result_json.as_object_mut() {
|
||||
obj.insert("execution_time_seconds".to_string(), json!(elapsed));
|
||||
}
|
||||
|
||||
let record = json!({
|
||||
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
|
||||
"dry_run": dry_run,
|
||||
"auto_heal": auto_heal,
|
||||
"result": result_json,
|
||||
});
|
||||
|
||||
{
|
||||
let mut history = self.history.write().await;
|
||||
history.push(record);
|
||||
if history.len() > 50 {
|
||||
let excess = history.len() - 50;
|
||||
history.drain(..excess);
|
||||
}
|
||||
}
|
||||
self.save_history().await;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn check_integrity(&self, _dry_run: bool, _auto_heal: bool) -> Value {
|
||||
let buckets = match self.storage.list_buckets().await {
|
||||
Ok(b) => b,
|
||||
Err(e) => return json!({"error": e.to_string()}),
|
||||
};
|
||||
|
||||
let mut objects_scanned = 0u64;
|
||||
let mut corrupted = 0u64;
|
||||
let mut phantom_metadata = 0u64;
|
||||
let mut errors: Vec<String> = Vec::new();
|
||||
|
||||
for bucket in &buckets {
|
||||
let params = myfsio_common::types::ListParams {
|
||||
max_keys: self.config.batch_size,
|
||||
..Default::default()
|
||||
};
|
||||
let objects = match self.storage.list_objects(&bucket.name, ¶ms).await {
|
||||
Ok(r) => r.objects,
|
||||
Err(e) => {
|
||||
errors.push(format!("{}: {}", bucket.name, e));
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
for obj in &objects {
|
||||
objects_scanned += 1;
|
||||
match self.storage.get_object_path(&bucket.name, &obj.key).await {
|
||||
Ok(path) => {
|
||||
if !path.exists() {
|
||||
phantom_metadata += 1;
|
||||
} else if let Some(ref expected_etag) = obj.etag {
|
||||
match myfsio_crypto::hashing::md5_file(&path) {
|
||||
Ok(actual_etag) => {
|
||||
if &actual_etag != expected_etag {
|
||||
corrupted += 1;
|
||||
}
|
||||
}
|
||||
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
json!({
|
||||
"objects_scanned": objects_scanned,
|
||||
"buckets_scanned": buckets.len(),
|
||||
"corrupted_objects": corrupted,
|
||||
"phantom_metadata": phantom_metadata,
|
||||
"errors": errors,
|
||||
})
|
||||
}
|
||||
|
||||
async fn save_history(&self) {
|
||||
let history = self.history.read().await;
|
||||
let data = json!({ "executions": *history });
|
||||
if let Some(parent) = self.history_path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
let _ = std::fs::write(
|
||||
&self.history_path,
|
||||
serde_json::to_string_pretty(&data).unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
||||
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
|
||||
tokio::spawn(async move {
|
||||
let mut timer = tokio::time::interval(interval);
|
||||
timer.tick().await;
|
||||
loop {
|
||||
timer.tick().await;
|
||||
tracing::info!("Integrity check starting");
|
||||
match self.run_now(false, false).await {
|
||||
Ok(result) => tracing::info!("Integrity check complete: {:?}", result),
|
||||
Err(e) => tracing::warn!("Integrity check failed: {}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,153 +0,0 @@
|
||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
||||
use myfsio_storage::traits::StorageEngine;
|
||||
use serde_json::{json, Value};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub struct LifecycleConfig {
|
||||
pub interval_seconds: u64,
|
||||
}
|
||||
|
||||
impl Default for LifecycleConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval_seconds: 3600,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LifecycleService {
|
||||
storage: Arc<FsStorageBackend>,
|
||||
config: LifecycleConfig,
|
||||
running: Arc<RwLock<bool>>,
|
||||
}
|
||||
|
||||
impl LifecycleService {
|
||||
pub fn new(storage: Arc<FsStorageBackend>, config: LifecycleConfig) -> Self {
|
||||
Self {
|
||||
storage,
|
||||
config,
|
||||
running: Arc::new(RwLock::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_cycle(&self) -> Result<Value, String> {
|
||||
{
|
||||
let mut running = self.running.write().await;
|
||||
if *running {
|
||||
return Err("Lifecycle already running".to_string());
|
||||
}
|
||||
*running = true;
|
||||
}
|
||||
|
||||
let result = self.evaluate_rules().await;
|
||||
*self.running.write().await = false;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn evaluate_rules(&self) -> Value {
|
||||
let buckets = match self.storage.list_buckets().await {
|
||||
Ok(b) => b,
|
||||
Err(e) => return json!({"error": e.to_string()}),
|
||||
};
|
||||
|
||||
let mut total_expired = 0u64;
|
||||
let mut total_multipart_aborted = 0u64;
|
||||
let mut errors: Vec<String> = Vec::new();
|
||||
|
||||
for bucket in &buckets {
|
||||
let config = match self.storage.get_bucket_config(&bucket.name).await {
|
||||
Ok(c) => c,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
let lifecycle = match &config.lifecycle {
|
||||
Some(lc) => lc,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let rules = match lifecycle.as_str().and_then(|s| serde_json::from_str::<Value>(s).ok()) {
|
||||
Some(v) => v,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let rules_arr = match rules.get("Rules").and_then(|r| r.as_array()) {
|
||||
Some(a) => a.clone(),
|
||||
None => continue,
|
||||
};
|
||||
|
||||
for rule in &rules_arr {
|
||||
if rule.get("Status").and_then(|s| s.as_str()) != Some("Enabled") {
|
||||
continue;
|
||||
}
|
||||
|
||||
let prefix = rule
|
||||
.get("Filter")
|
||||
.and_then(|f| f.get("Prefix"))
|
||||
.and_then(|p| p.as_str())
|
||||
.or_else(|| rule.get("Prefix").and_then(|p| p.as_str()))
|
||||
.unwrap_or("");
|
||||
|
||||
if let Some(exp) = rule.get("Expiration") {
|
||||
if let Some(days) = exp.get("Days").and_then(|d| d.as_u64()) {
|
||||
let cutoff = chrono::Utc::now() - chrono::Duration::days(days as i64);
|
||||
let params = myfsio_common::types::ListParams {
|
||||
max_keys: 1000,
|
||||
prefix: if prefix.is_empty() { None } else { Some(prefix.to_string()) },
|
||||
..Default::default()
|
||||
};
|
||||
if let Ok(result) = self.storage.list_objects(&bucket.name, ¶ms).await {
|
||||
for obj in &result.objects {
|
||||
if obj.last_modified < cutoff {
|
||||
match self.storage.delete_object(&bucket.name, &obj.key).await {
|
||||
Ok(()) => total_expired += 1,
|
||||
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(abort) = rule.get("AbortIncompleteMultipartUpload") {
|
||||
if let Some(days) = abort.get("DaysAfterInitiation").and_then(|d| d.as_u64()) {
|
||||
let cutoff = chrono::Utc::now() - chrono::Duration::days(days as i64);
|
||||
if let Ok(uploads) = self.storage.list_multipart_uploads(&bucket.name).await {
|
||||
for upload in &uploads {
|
||||
if upload.initiated < cutoff {
|
||||
match self.storage.abort_multipart(&bucket.name, &upload.upload_id).await {
|
||||
Ok(()) => total_multipart_aborted += 1,
|
||||
Err(e) => errors.push(format!("abort {}: {}", upload.upload_id, e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
json!({
|
||||
"objects_expired": total_expired,
|
||||
"multipart_aborted": total_multipart_aborted,
|
||||
"buckets_evaluated": buckets.len(),
|
||||
"errors": errors,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
||||
let interval = std::time::Duration::from_secs(self.config.interval_seconds);
|
||||
tokio::spawn(async move {
|
||||
let mut timer = tokio::time::interval(interval);
|
||||
timer.tick().await;
|
||||
loop {
|
||||
timer.tick().await;
|
||||
tracing::info!("Lifecycle evaluation starting");
|
||||
match self.run_cycle().await {
|
||||
Ok(result) => tracing::info!("Lifecycle cycle complete: {:?}", result),
|
||||
Err(e) => tracing::warn!("Lifecycle cycle failed: {}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,219 +0,0 @@
|
||||
use serde_json::{json, Value};
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub struct MetricsConfig {
|
||||
pub interval_minutes: u64,
|
||||
pub retention_hours: u64,
|
||||
}
|
||||
|
||||
impl Default for MetricsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval_minutes: 5,
|
||||
retention_hours: 24,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct MethodStats {
|
||||
count: u64,
|
||||
success_count: u64,
|
||||
error_count: u64,
|
||||
bytes_in: u64,
|
||||
bytes_out: u64,
|
||||
latencies: Vec<f64>,
|
||||
}
|
||||
|
||||
impl MethodStats {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
count: 0,
|
||||
success_count: 0,
|
||||
error_count: 0,
|
||||
bytes_in: 0,
|
||||
bytes_out: 0,
|
||||
latencies: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn to_json(&self) -> Value {
|
||||
let (min, max, avg, p50, p95, p99) = if self.latencies.is_empty() {
|
||||
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
|
||||
} else {
|
||||
let mut sorted = self.latencies.clone();
|
||||
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
|
||||
let len = sorted.len();
|
||||
let sum: f64 = sorted.iter().sum();
|
||||
(
|
||||
sorted[0],
|
||||
sorted[len - 1],
|
||||
sum / len as f64,
|
||||
sorted[len / 2],
|
||||
sorted[((len as f64 * 0.95) as usize).min(len - 1)],
|
||||
sorted[((len as f64 * 0.99) as usize).min(len - 1)],
|
||||
)
|
||||
};
|
||||
|
||||
json!({
|
||||
"count": self.count,
|
||||
"success_count": self.success_count,
|
||||
"error_count": self.error_count,
|
||||
"bytes_in": self.bytes_in,
|
||||
"bytes_out": self.bytes_out,
|
||||
"latency_min_ms": min,
|
||||
"latency_max_ms": max,
|
||||
"latency_avg_ms": avg,
|
||||
"latency_p50_ms": p50,
|
||||
"latency_p95_ms": p95,
|
||||
"latency_p99_ms": p99,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct CurrentWindow {
|
||||
by_method: HashMap<String, MethodStats>,
|
||||
by_status_class: HashMap<String, u64>,
|
||||
start_time: Instant,
|
||||
}
|
||||
|
||||
impl CurrentWindow {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
by_method: HashMap::new(),
|
||||
by_status_class: HashMap::new(),
|
||||
start_time: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.by_method.clear();
|
||||
self.by_status_class.clear();
|
||||
self.start_time = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MetricsService {
|
||||
config: MetricsConfig,
|
||||
current: Arc<RwLock<CurrentWindow>>,
|
||||
snapshots: Arc<RwLock<Vec<Value>>>,
|
||||
snapshots_path: PathBuf,
|
||||
}
|
||||
|
||||
impl MetricsService {
|
||||
pub fn new(storage_root: &std::path::Path, config: MetricsConfig) -> Self {
|
||||
let snapshots_path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("operation_metrics.json");
|
||||
|
||||
let snapshots = if snapshots_path.exists() {
|
||||
std::fs::read_to_string(&snapshots_path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
||||
.and_then(|v| v.get("snapshots").and_then(|s| s.as_array().cloned()))
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Self {
|
||||
config,
|
||||
current: Arc::new(RwLock::new(CurrentWindow::new())),
|
||||
snapshots: Arc::new(RwLock::new(snapshots)),
|
||||
snapshots_path,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn record(&self, method: &str, status: u16, latency_ms: f64, bytes_in: u64, bytes_out: u64) {
|
||||
let mut window = self.current.write().await;
|
||||
let stats = window.by_method.entry(method.to_string()).or_insert_with(MethodStats::new);
|
||||
stats.count += 1;
|
||||
if status < 400 {
|
||||
stats.success_count += 1;
|
||||
} else {
|
||||
stats.error_count += 1;
|
||||
}
|
||||
stats.bytes_in += bytes_in;
|
||||
stats.bytes_out += bytes_out;
|
||||
stats.latencies.push(latency_ms);
|
||||
|
||||
let class = format!("{}xx", status / 100);
|
||||
*window.by_status_class.entry(class).or_insert(0) += 1;
|
||||
}
|
||||
|
||||
pub async fn snapshot(&self) -> Value {
|
||||
let window = self.current.read().await;
|
||||
let mut by_method = serde_json::Map::new();
|
||||
for (method, stats) in &window.by_method {
|
||||
by_method.insert(method.clone(), stats.to_json());
|
||||
}
|
||||
|
||||
let snapshots = self.snapshots.read().await;
|
||||
json!({
|
||||
"enabled": true,
|
||||
"current_window": {
|
||||
"by_method": by_method,
|
||||
"by_status_class": window.by_status_class,
|
||||
"window_start_elapsed_secs": window.start_time.elapsed().as_secs_f64(),
|
||||
},
|
||||
"snapshots": *snapshots,
|
||||
})
|
||||
}
|
||||
|
||||
async fn flush_window(&self) {
|
||||
let snap = {
|
||||
let mut window = self.current.write().await;
|
||||
let mut by_method = serde_json::Map::new();
|
||||
for (method, stats) in &window.by_method {
|
||||
by_method.insert(method.clone(), stats.to_json());
|
||||
}
|
||||
let snap = json!({
|
||||
"timestamp": chrono::Utc::now().to_rfc3339(),
|
||||
"window_seconds": self.config.interval_minutes * 60,
|
||||
"by_method": by_method,
|
||||
"by_status_class": window.by_status_class,
|
||||
});
|
||||
window.reset();
|
||||
snap
|
||||
};
|
||||
|
||||
let max_snapshots = (self.config.retention_hours * 60 / self.config.interval_minutes) as usize;
|
||||
{
|
||||
let mut snapshots = self.snapshots.write().await;
|
||||
snapshots.push(snap);
|
||||
if snapshots.len() > max_snapshots {
|
||||
let excess = snapshots.len() - max_snapshots;
|
||||
snapshots.drain(..excess);
|
||||
}
|
||||
}
|
||||
self.save_snapshots().await;
|
||||
}
|
||||
|
||||
async fn save_snapshots(&self) {
|
||||
let snapshots = self.snapshots.read().await;
|
||||
let data = json!({ "snapshots": *snapshots });
|
||||
if let Some(parent) = self.snapshots_path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
let _ = std::fs::write(
|
||||
&self.snapshots_path,
|
||||
serde_json::to_string_pretty(&data).unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
||||
let interval = std::time::Duration::from_secs(self.config.interval_minutes * 60);
|
||||
tokio::spawn(async move {
|
||||
let mut timer = tokio::time::interval(interval);
|
||||
timer.tick().await;
|
||||
loop {
|
||||
timer.tick().await;
|
||||
self.flush_window().await;
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
pub mod gc;
|
||||
pub mod lifecycle;
|
||||
pub mod integrity;
|
||||
pub mod metrics;
|
||||
pub mod site_registry;
|
||||
pub mod website_domains;
|
||||
@@ -1,143 +0,0 @@
|
||||
use chrono::Utc;
|
||||
use parking_lot::RwLock;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SiteInfo {
|
||||
pub site_id: String,
|
||||
pub endpoint: String,
|
||||
#[serde(default = "default_region")]
|
||||
pub region: String,
|
||||
#[serde(default = "default_priority")]
|
||||
pub priority: i32,
|
||||
#[serde(default)]
|
||||
pub display_name: String,
|
||||
#[serde(default)]
|
||||
pub created_at: Option<String>,
|
||||
}
|
||||
|
||||
fn default_region() -> String {
|
||||
"us-east-1".to_string()
|
||||
}
|
||||
fn default_priority() -> i32 {
|
||||
100
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PeerSite {
|
||||
pub site_id: String,
|
||||
pub endpoint: String,
|
||||
#[serde(default = "default_region")]
|
||||
pub region: String,
|
||||
#[serde(default = "default_priority")]
|
||||
pub priority: i32,
|
||||
#[serde(default)]
|
||||
pub display_name: String,
|
||||
#[serde(default)]
|
||||
pub connection_id: Option<String>,
|
||||
#[serde(default)]
|
||||
pub created_at: Option<String>,
|
||||
#[serde(default)]
|
||||
pub is_healthy: bool,
|
||||
#[serde(default)]
|
||||
pub last_health_check: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
struct RegistryData {
|
||||
#[serde(default)]
|
||||
local: Option<SiteInfo>,
|
||||
#[serde(default)]
|
||||
peers: Vec<PeerSite>,
|
||||
}
|
||||
|
||||
pub struct SiteRegistry {
|
||||
path: PathBuf,
|
||||
data: Arc<RwLock<RegistryData>>,
|
||||
}
|
||||
|
||||
impl SiteRegistry {
|
||||
pub fn new(storage_root: &std::path::Path) -> Self {
|
||||
let path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("site_registry.json");
|
||||
let data = if path.exists() {
|
||||
std::fs::read_to_string(&path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str(&s).ok())
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
RegistryData::default()
|
||||
};
|
||||
Self {
|
||||
path,
|
||||
data: Arc::new(RwLock::new(data)),
|
||||
}
|
||||
}
|
||||
|
||||
fn save(&self) {
|
||||
let data = self.data.read();
|
||||
if let Some(parent) = self.path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
if let Ok(json) = serde_json::to_string_pretty(&*data) {
|
||||
let _ = std::fs::write(&self.path, json);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_local_site(&self) -> Option<SiteInfo> {
|
||||
self.data.read().local.clone()
|
||||
}
|
||||
|
||||
pub fn set_local_site(&self, site: SiteInfo) {
|
||||
self.data.write().local = Some(site);
|
||||
self.save();
|
||||
}
|
||||
|
||||
pub fn list_peers(&self) -> Vec<PeerSite> {
|
||||
self.data.read().peers.clone()
|
||||
}
|
||||
|
||||
pub fn get_peer(&self, site_id: &str) -> Option<PeerSite> {
|
||||
self.data.read().peers.iter().find(|p| p.site_id == site_id).cloned()
|
||||
}
|
||||
|
||||
pub fn add_peer(&self, peer: PeerSite) {
|
||||
self.data.write().peers.push(peer);
|
||||
self.save();
|
||||
}
|
||||
|
||||
pub fn update_peer(&self, peer: PeerSite) {
|
||||
let mut data = self.data.write();
|
||||
if let Some(existing) = data.peers.iter_mut().find(|p| p.site_id == peer.site_id) {
|
||||
*existing = peer;
|
||||
}
|
||||
drop(data);
|
||||
self.save();
|
||||
}
|
||||
|
||||
pub fn delete_peer(&self, site_id: &str) -> bool {
|
||||
let mut data = self.data.write();
|
||||
let len_before = data.peers.len();
|
||||
data.peers.retain(|p| p.site_id != site_id);
|
||||
let removed = data.peers.len() < len_before;
|
||||
drop(data);
|
||||
if removed {
|
||||
self.save();
|
||||
}
|
||||
removed
|
||||
}
|
||||
|
||||
pub fn update_health(&self, site_id: &str, is_healthy: bool) {
|
||||
let mut data = self.data.write();
|
||||
if let Some(peer) = data.peers.iter_mut().find(|p| p.site_id == site_id) {
|
||||
peer.is_healthy = is_healthy;
|
||||
peer.last_health_check = Some(Utc::now().to_rfc3339());
|
||||
}
|
||||
drop(data);
|
||||
self.save();
|
||||
}
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
use parking_lot::RwLock;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
struct DomainData {
|
||||
#[serde(default)]
|
||||
mappings: HashMap<String, String>,
|
||||
}
|
||||
|
||||
pub struct WebsiteDomainStore {
|
||||
path: PathBuf,
|
||||
data: Arc<RwLock<DomainData>>,
|
||||
}
|
||||
|
||||
impl WebsiteDomainStore {
|
||||
pub fn new(storage_root: &std::path::Path) -> Self {
|
||||
let path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("website_domains.json");
|
||||
let data = if path.exists() {
|
||||
std::fs::read_to_string(&path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str(&s).ok())
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
DomainData::default()
|
||||
};
|
||||
Self {
|
||||
path,
|
||||
data: Arc::new(RwLock::new(data)),
|
||||
}
|
||||
}
|
||||
|
||||
fn save(&self) {
|
||||
let data = self.data.read();
|
||||
if let Some(parent) = self.path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
if let Ok(json) = serde_json::to_string_pretty(&*data) {
|
||||
let _ = std::fs::write(&self.path, json);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn list_all(&self) -> Vec<serde_json::Value> {
|
||||
self.data
|
||||
.read()
|
||||
.mappings
|
||||
.iter()
|
||||
.map(|(domain, bucket)| {
|
||||
serde_json::json!({
|
||||
"domain": domain,
|
||||
"bucket": bucket,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_bucket(&self, domain: &str) -> Option<String> {
|
||||
self.data.read().mappings.get(domain).cloned()
|
||||
}
|
||||
|
||||
pub fn set_mapping(&self, domain: &str, bucket: &str) {
|
||||
self.data.write().mappings.insert(domain.to_string(), bucket.to_string());
|
||||
self.save();
|
||||
}
|
||||
|
||||
pub fn delete_mapping(&self, domain: &str) -> bool {
|
||||
let removed = self.data.write().mappings.remove(domain).is_some();
|
||||
if removed {
|
||||
self.save();
|
||||
}
|
||||
removed
|
||||
}
|
||||
}
|
||||
|
||||
pub fn normalize_domain(domain: &str) -> String {
|
||||
domain.trim().to_ascii_lowercase()
|
||||
}
|
||||
|
||||
pub fn is_valid_domain(domain: &str) -> bool {
|
||||
if domain.is_empty() || domain.len() > 253 {
|
||||
return false;
|
||||
}
|
||||
let labels: Vec<&str> = domain.split('.').collect();
|
||||
if labels.len() < 2 {
|
||||
return false;
|
||||
}
|
||||
for label in &labels {
|
||||
if label.is_empty() || label.len() > 63 {
|
||||
return false;
|
||||
}
|
||||
if !label.chars().all(|c| c.is_ascii_alphanumeric() || c == '-') {
|
||||
return false;
|
||||
}
|
||||
if label.starts_with('-') || label.ends_with('-') {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::config::ServerConfig;
|
||||
use crate::services::gc::GcService;
|
||||
use crate::services::integrity::IntegrityService;
|
||||
use crate::services::metrics::MetricsService;
|
||||
use crate::services::site_registry::SiteRegistry;
|
||||
use crate::services::website_domains::WebsiteDomainStore;
|
||||
use myfsio_auth::iam::IamService;
|
||||
use myfsio_crypto::encryption::EncryptionService;
|
||||
use myfsio_crypto::kms::KmsService;
|
||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub config: ServerConfig,
|
||||
pub storage: Arc<FsStorageBackend>,
|
||||
pub iam: Arc<IamService>,
|
||||
pub encryption: Option<Arc<EncryptionService>>,
|
||||
pub kms: Option<Arc<KmsService>>,
|
||||
pub gc: Option<Arc<GcService>>,
|
||||
pub integrity: Option<Arc<IntegrityService>>,
|
||||
pub metrics: Option<Arc<MetricsService>>,
|
||||
pub site_registry: Option<Arc<SiteRegistry>>,
|
||||
pub website_domains: Option<Arc<WebsiteDomainStore>>,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
pub fn new(config: ServerConfig) -> Self {
|
||||
let storage = Arc::new(FsStorageBackend::new(config.storage_root.clone()));
|
||||
let iam = Arc::new(IamService::new_with_secret(
|
||||
config.iam_config_path.clone(),
|
||||
config.secret_key.clone(),
|
||||
));
|
||||
|
||||
let gc = if config.gc_enabled {
|
||||
Some(Arc::new(GcService::new(
|
||||
config.storage_root.clone(),
|
||||
crate::services::gc::GcConfig::default(),
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let integrity = if config.integrity_enabled {
|
||||
Some(Arc::new(IntegrityService::new(
|
||||
storage.clone(),
|
||||
&config.storage_root,
|
||||
crate::services::integrity::IntegrityConfig::default(),
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let metrics = if config.metrics_enabled {
|
||||
Some(Arc::new(MetricsService::new(
|
||||
&config.storage_root,
|
||||
crate::services::metrics::MetricsConfig::default(),
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let site_registry = Some(Arc::new(SiteRegistry::new(&config.storage_root)));
|
||||
|
||||
let website_domains = if config.website_hosting_enabled {
|
||||
Some(Arc::new(WebsiteDomainStore::new(&config.storage_root)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Self {
|
||||
config,
|
||||
storage,
|
||||
iam,
|
||||
encryption: None,
|
||||
kms: None,
|
||||
gc,
|
||||
integrity,
|
||||
metrics,
|
||||
site_registry,
|
||||
website_domains,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn new_with_encryption(config: ServerConfig) -> Self {
|
||||
let mut state = Self::new(config.clone());
|
||||
|
||||
let keys_dir = config.storage_root.join(".myfsio.sys").join("keys");
|
||||
|
||||
let kms = if config.kms_enabled {
|
||||
match KmsService::new(&keys_dir).await {
|
||||
Ok(k) => Some(Arc::new(k)),
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to initialize KMS: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let encryption = if config.encryption_enabled {
|
||||
match myfsio_crypto::kms::load_or_create_master_key(&keys_dir).await {
|
||||
Ok(master_key) => {
|
||||
Some(Arc::new(EncryptionService::new(master_key, kms.clone())))
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to initialize encryption: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
state.encryption = encryption;
|
||||
state.kms = kms;
|
||||
state
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,26 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio-storage"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
myfsio-common = { path = "../myfsio-common" }
|
||||
myfsio-crypto = { path = "../myfsio-crypto" }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
dashmap = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
unicode-normalization = { workspace = true }
|
||||
md-5 = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
|
||||
tempfile = "3"
|
||||
@@ -1,59 +0,0 @@
|
||||
use myfsio_common::error::{S3Error, S3ErrorCode};
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum StorageError {
|
||||
#[error("Bucket not found: {0}")]
|
||||
BucketNotFound(String),
|
||||
#[error("Bucket already exists: {0}")]
|
||||
BucketAlreadyExists(String),
|
||||
#[error("Bucket not empty: {0}")]
|
||||
BucketNotEmpty(String),
|
||||
#[error("Object not found: {bucket}/{key}")]
|
||||
ObjectNotFound { bucket: String, key: String },
|
||||
#[error("Invalid bucket name: {0}")]
|
||||
InvalidBucketName(String),
|
||||
#[error("Invalid object key: {0}")]
|
||||
InvalidObjectKey(String),
|
||||
#[error("Upload not found: {0}")]
|
||||
UploadNotFound(String),
|
||||
#[error("Quota exceeded: {0}")]
|
||||
QuotaExceeded(String),
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error("JSON error: {0}")]
|
||||
Json(#[from] serde_json::Error),
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(String),
|
||||
}
|
||||
|
||||
impl From<StorageError> for S3Error {
|
||||
fn from(err: StorageError) -> Self {
|
||||
match err {
|
||||
StorageError::BucketNotFound(name) => {
|
||||
S3Error::from_code(S3ErrorCode::NoSuchBucket).with_resource(format!("/{}", name))
|
||||
}
|
||||
StorageError::BucketAlreadyExists(name) => {
|
||||
S3Error::from_code(S3ErrorCode::BucketAlreadyExists)
|
||||
.with_resource(format!("/{}", name))
|
||||
}
|
||||
StorageError::BucketNotEmpty(name) => {
|
||||
S3Error::from_code(S3ErrorCode::BucketNotEmpty)
|
||||
.with_resource(format!("/{}", name))
|
||||
}
|
||||
StorageError::ObjectNotFound { bucket, key } => {
|
||||
S3Error::from_code(S3ErrorCode::NoSuchKey)
|
||||
.with_resource(format!("/{}/{}", bucket, key))
|
||||
}
|
||||
StorageError::InvalidBucketName(msg) => S3Error::new(S3ErrorCode::InvalidBucketName, msg),
|
||||
StorageError::InvalidObjectKey(msg) => S3Error::new(S3ErrorCode::InvalidKey, msg),
|
||||
StorageError::UploadNotFound(id) => {
|
||||
S3Error::new(S3ErrorCode::NoSuchUpload, format!("Upload {} not found", id))
|
||||
}
|
||||
StorageError::QuotaExceeded(msg) => S3Error::new(S3ErrorCode::QuotaExceeded, msg),
|
||||
StorageError::Io(e) => S3Error::new(S3ErrorCode::InternalError, e.to_string()),
|
||||
StorageError::Json(e) => S3Error::new(S3ErrorCode::InternalError, e.to_string()),
|
||||
StorageError::Internal(msg) => S3Error::new(S3ErrorCode::InternalError, msg),
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +0,0 @@
|
||||
pub mod validation;
|
||||
pub mod traits;
|
||||
pub mod error;
|
||||
pub mod fs_backend;
|
||||
@@ -1,125 +0,0 @@
|
||||
use crate::error::StorageError;
|
||||
use myfsio_common::types::*;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use tokio::io::AsyncRead;
|
||||
|
||||
pub type StorageResult<T> = Result<T, StorageError>;
|
||||
pub type AsyncReadStream = Pin<Box<dyn AsyncRead + Send>>;
|
||||
|
||||
#[allow(async_fn_in_trait)]
|
||||
pub trait StorageEngine: Send + Sync {
|
||||
async fn list_buckets(&self) -> StorageResult<Vec<BucketMeta>>;
|
||||
async fn create_bucket(&self, name: &str) -> StorageResult<()>;
|
||||
async fn delete_bucket(&self, name: &str) -> StorageResult<()>;
|
||||
async fn bucket_exists(&self, name: &str) -> StorageResult<bool>;
|
||||
async fn bucket_stats(&self, name: &str) -> StorageResult<BucketStats>;
|
||||
|
||||
async fn put_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
stream: AsyncReadStream,
|
||||
metadata: Option<HashMap<String, String>>,
|
||||
) -> StorageResult<ObjectMeta>;
|
||||
|
||||
async fn get_object(&self, bucket: &str, key: &str) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
|
||||
|
||||
async fn get_object_path(&self, bucket: &str, key: &str) -> StorageResult<PathBuf>;
|
||||
|
||||
async fn head_object(&self, bucket: &str, key: &str) -> StorageResult<ObjectMeta>;
|
||||
|
||||
async fn delete_object(&self, bucket: &str, key: &str) -> StorageResult<()>;
|
||||
|
||||
async fn copy_object(
|
||||
&self,
|
||||
src_bucket: &str,
|
||||
src_key: &str,
|
||||
dst_bucket: &str,
|
||||
dst_key: &str,
|
||||
) -> StorageResult<ObjectMeta>;
|
||||
|
||||
async fn get_object_metadata(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
) -> StorageResult<HashMap<String, String>>;
|
||||
|
||||
async fn put_object_metadata(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
metadata: &HashMap<String, String>,
|
||||
) -> StorageResult<()>;
|
||||
|
||||
async fn list_objects(&self, bucket: &str, params: &ListParams) -> StorageResult<ListObjectsResult>;
|
||||
|
||||
async fn list_objects_shallow(
|
||||
&self,
|
||||
bucket: &str,
|
||||
params: &ShallowListParams,
|
||||
) -> StorageResult<ShallowListResult>;
|
||||
|
||||
async fn initiate_multipart(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
metadata: Option<HashMap<String, String>>,
|
||||
) -> StorageResult<String>;
|
||||
|
||||
async fn upload_part(
|
||||
&self,
|
||||
bucket: &str,
|
||||
upload_id: &str,
|
||||
part_number: u32,
|
||||
stream: AsyncReadStream,
|
||||
) -> StorageResult<String>;
|
||||
|
||||
async fn complete_multipart(
|
||||
&self,
|
||||
bucket: &str,
|
||||
upload_id: &str,
|
||||
parts: &[PartInfo],
|
||||
) -> StorageResult<ObjectMeta>;
|
||||
|
||||
async fn abort_multipart(&self, bucket: &str, upload_id: &str) -> StorageResult<()>;
|
||||
|
||||
async fn list_parts(&self, bucket: &str, upload_id: &str) -> StorageResult<Vec<PartMeta>>;
|
||||
|
||||
async fn list_multipart_uploads(
|
||||
&self,
|
||||
bucket: &str,
|
||||
) -> StorageResult<Vec<MultipartUploadInfo>>;
|
||||
|
||||
async fn get_bucket_config(&self, bucket: &str) -> StorageResult<BucketConfig>;
|
||||
async fn set_bucket_config(&self, bucket: &str, config: &BucketConfig) -> StorageResult<()>;
|
||||
|
||||
async fn is_versioning_enabled(&self, bucket: &str) -> StorageResult<bool>;
|
||||
async fn set_versioning(&self, bucket: &str, enabled: bool) -> StorageResult<()>;
|
||||
|
||||
async fn list_object_versions(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
) -> StorageResult<Vec<VersionInfo>>;
|
||||
|
||||
async fn get_object_tags(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
) -> StorageResult<Vec<Tag>>;
|
||||
|
||||
async fn set_object_tags(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
tags: &[Tag],
|
||||
) -> StorageResult<()>;
|
||||
|
||||
async fn delete_object_tags(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
) -> StorageResult<()>;
|
||||
}
|
||||
@@ -1,194 +0,0 @@
|
||||
use std::sync::LazyLock;
|
||||
use unicode_normalization::UnicodeNormalization;
|
||||
|
||||
const WINDOWS_RESERVED: &[&str] = &[
|
||||
"CON", "PRN", "AUX", "NUL", "COM0", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7",
|
||||
"COM8", "COM9", "LPT0", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8",
|
||||
"LPT9",
|
||||
];
|
||||
|
||||
const WINDOWS_ILLEGAL_CHARS: &[char] = &['<', '>', ':', '"', '/', '\\', '|', '?', '*'];
|
||||
|
||||
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
const SYSTEM_ROOT: &str = ".myfsio.sys";
|
||||
|
||||
static IP_REGEX: LazyLock<regex::Regex> =
|
||||
LazyLock::new(|| regex::Regex::new(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$").unwrap());
|
||||
|
||||
pub fn validate_object_key(
|
||||
object_key: &str,
|
||||
max_length_bytes: usize,
|
||||
is_windows: bool,
|
||||
reserved_prefixes: Option<&[&str]>,
|
||||
) -> Option<String> {
|
||||
if object_key.is_empty() {
|
||||
return Some("Object key required".to_string());
|
||||
}
|
||||
|
||||
if object_key.contains('\0') {
|
||||
return Some("Object key contains null bytes".to_string());
|
||||
}
|
||||
|
||||
let normalized: String = object_key.nfc().collect();
|
||||
|
||||
if normalized.len() > max_length_bytes {
|
||||
return Some(format!(
|
||||
"Object key exceeds maximum length of {} bytes",
|
||||
max_length_bytes
|
||||
));
|
||||
}
|
||||
|
||||
if normalized.starts_with('/') || normalized.starts_with('\\') {
|
||||
return Some("Object key cannot start with a slash".to_string());
|
||||
}
|
||||
|
||||
let parts: Vec<&str> = if cfg!(windows) || is_windows {
|
||||
normalized.split(['/', '\\']).collect()
|
||||
} else {
|
||||
normalized.split('/').collect()
|
||||
};
|
||||
|
||||
for part in &parts {
|
||||
if part.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if *part == ".." {
|
||||
return Some("Object key contains parent directory references".to_string());
|
||||
}
|
||||
|
||||
if *part == "." {
|
||||
return Some("Object key contains invalid segments".to_string());
|
||||
}
|
||||
|
||||
if part.chars().any(|c| (c as u32) < 32) {
|
||||
return Some("Object key contains control characters".to_string());
|
||||
}
|
||||
|
||||
if is_windows {
|
||||
if part.chars().any(|c| WINDOWS_ILLEGAL_CHARS.contains(&c)) {
|
||||
return Some(
|
||||
"Object key contains characters not supported on Windows filesystems"
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
if part.ends_with(' ') || part.ends_with('.') {
|
||||
return Some(
|
||||
"Object key segments cannot end with spaces or periods on Windows".to_string(),
|
||||
);
|
||||
}
|
||||
let trimmed = part.trim_end_matches(['.', ' ']).to_uppercase();
|
||||
if WINDOWS_RESERVED.contains(&trimmed.as_str()) {
|
||||
return Some(format!("Invalid filename segment: {}", part));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let non_empty_parts: Vec<&str> = parts.iter().filter(|p| !p.is_empty()).copied().collect();
|
||||
if let Some(top) = non_empty_parts.first() {
|
||||
if INTERNAL_FOLDERS.contains(top) || *top == SYSTEM_ROOT {
|
||||
return Some("Object key uses a reserved prefix".to_string());
|
||||
}
|
||||
|
||||
if let Some(prefixes) = reserved_prefixes {
|
||||
for prefix in prefixes {
|
||||
if *top == *prefix {
|
||||
return Some("Object key uses a reserved prefix".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub fn validate_bucket_name(bucket_name: &str) -> Option<String> {
|
||||
let len = bucket_name.len();
|
||||
if len < 3 || len > 63 {
|
||||
return Some("Bucket name must be between 3 and 63 characters".to_string());
|
||||
}
|
||||
|
||||
let bytes = bucket_name.as_bytes();
|
||||
if !bytes[0].is_ascii_lowercase() && !bytes[0].is_ascii_digit() {
|
||||
return Some(
|
||||
"Bucket name must start and end with a lowercase letter or digit".to_string(),
|
||||
);
|
||||
}
|
||||
if !bytes[len - 1].is_ascii_lowercase() && !bytes[len - 1].is_ascii_digit() {
|
||||
return Some(
|
||||
"Bucket name must start and end with a lowercase letter or digit".to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
for &b in bytes {
|
||||
if !b.is_ascii_lowercase() && !b.is_ascii_digit() && b != b'.' && b != b'-' {
|
||||
return Some(
|
||||
"Bucket name can only contain lowercase letters, digits, dots, and hyphens"
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if bucket_name.contains("..") {
|
||||
return Some("Bucket name must not contain consecutive periods".to_string());
|
||||
}
|
||||
|
||||
if IP_REGEX.is_match(bucket_name) {
|
||||
return Some("Bucket name must not be formatted as an IP address".to_string());
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_valid_bucket_names() {
|
||||
assert!(validate_bucket_name("my-bucket").is_none());
|
||||
assert!(validate_bucket_name("test123").is_none());
|
||||
assert!(validate_bucket_name("my.bucket.name").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_bucket_names() {
|
||||
assert!(validate_bucket_name("ab").is_some());
|
||||
assert!(validate_bucket_name("My-Bucket").is_some());
|
||||
assert!(validate_bucket_name("-bucket").is_some());
|
||||
assert!(validate_bucket_name("bucket-").is_some());
|
||||
assert!(validate_bucket_name("my..bucket").is_some());
|
||||
assert!(validate_bucket_name("192.168.1.1").is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_object_keys() {
|
||||
assert!(validate_object_key("file.txt", 1024, false, None).is_none());
|
||||
assert!(validate_object_key("path/to/file.txt", 1024, false, None).is_none());
|
||||
assert!(validate_object_key("a", 1024, false, None).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_object_keys() {
|
||||
assert!(validate_object_key("", 1024, false, None).is_some());
|
||||
assert!(validate_object_key("/leading-slash", 1024, false, None).is_some());
|
||||
assert!(validate_object_key("path/../escape", 1024, false, None).is_some());
|
||||
assert!(validate_object_key(".myfsio.sys/secret", 1024, false, None).is_some());
|
||||
assert!(validate_object_key(".meta/data", 1024, false, None).is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_object_key_max_length() {
|
||||
let long_key = "a".repeat(1025);
|
||||
assert!(validate_object_key(&long_key, 1024, false, None).is_some());
|
||||
let ok_key = "a".repeat(1024);
|
||||
assert!(validate_object_key(&ok_key, 1024, false, None).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_windows_validation() {
|
||||
assert!(validate_object_key("CON", 1024, true, None).is_some());
|
||||
assert!(validate_object_key("file<name", 1024, true, None).is_some());
|
||||
assert!(validate_object_key("file.txt ", 1024, true, None).is_some());
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio-xml"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
myfsio-common = { path = "../myfsio-common" }
|
||||
quick-xml = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
@@ -1,14 +0,0 @@
|
||||
pub mod response;
|
||||
pub mod request;
|
||||
|
||||
use quick_xml::Writer;
|
||||
use std::io::Cursor;
|
||||
|
||||
pub fn write_xml_element(tag: &str, text: &str) -> String {
|
||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||
writer
|
||||
.create_element(tag)
|
||||
.write_text_content(quick_xml::events::BytesText::new(text))
|
||||
.unwrap();
|
||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
use quick_xml::events::Event;
|
||||
use quick_xml::Reader;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DeleteObjectsRequest {
|
||||
pub objects: Vec<ObjectIdentifier>,
|
||||
pub quiet: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ObjectIdentifier {
|
||||
pub key: String,
|
||||
pub version_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct CompleteMultipartUpload {
|
||||
pub parts: Vec<CompletedPart>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CompletedPart {
|
||||
pub part_number: u32,
|
||||
pub etag: String,
|
||||
}
|
||||
|
||||
pub fn parse_complete_multipart_upload(xml: &str) -> Result<CompleteMultipartUpload, String> {
|
||||
let mut reader = Reader::from_str(xml);
|
||||
let mut result = CompleteMultipartUpload::default();
|
||||
let mut buf = Vec::new();
|
||||
let mut current_tag = String::new();
|
||||
let mut part_number: Option<u32> = None;
|
||||
let mut etag: Option<String> = None;
|
||||
let mut in_part = false;
|
||||
|
||||
loop {
|
||||
match reader.read_event_into(&mut buf) {
|
||||
Ok(Event::Start(ref e)) => {
|
||||
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
|
||||
current_tag = name.clone();
|
||||
if name == "Part" {
|
||||
in_part = true;
|
||||
part_number = None;
|
||||
etag = None;
|
||||
}
|
||||
}
|
||||
Ok(Event::Text(ref e)) => {
|
||||
if in_part {
|
||||
let text = e.unescape().map_err(|e| e.to_string())?.to_string();
|
||||
match current_tag.as_str() {
|
||||
"PartNumber" => {
|
||||
part_number = Some(text.trim().parse().map_err(|e: std::num::ParseIntError| e.to_string())?);
|
||||
}
|
||||
"ETag" => {
|
||||
etag = Some(text.trim().trim_matches('"').to_string());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Event::End(ref e)) => {
|
||||
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
|
||||
if name == "Part" && in_part {
|
||||
if let (Some(pn), Some(et)) = (part_number.take(), etag.take()) {
|
||||
result.parts.push(CompletedPart {
|
||||
part_number: pn,
|
||||
etag: et,
|
||||
});
|
||||
}
|
||||
in_part = false;
|
||||
}
|
||||
}
|
||||
Ok(Event::Eof) => break,
|
||||
Err(e) => return Err(format!("XML parse error: {}", e)),
|
||||
_ => {}
|
||||
}
|
||||
buf.clear();
|
||||
}
|
||||
|
||||
result.parts.sort_by_key(|p| p.part_number);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn parse_delete_objects(xml: &str) -> Result<DeleteObjectsRequest, String> {
|
||||
let mut reader = Reader::from_str(xml);
|
||||
let mut result = DeleteObjectsRequest::default();
|
||||
let mut buf = Vec::new();
|
||||
let mut current_tag = String::new();
|
||||
let mut current_key: Option<String> = None;
|
||||
let mut current_version_id: Option<String> = None;
|
||||
let mut in_object = false;
|
||||
|
||||
loop {
|
||||
match reader.read_event_into(&mut buf) {
|
||||
Ok(Event::Start(ref e)) => {
|
||||
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
|
||||
current_tag = name.clone();
|
||||
if name == "Object" {
|
||||
in_object = true;
|
||||
current_key = None;
|
||||
current_version_id = None;
|
||||
}
|
||||
}
|
||||
Ok(Event::Text(ref e)) => {
|
||||
let text = e.unescape().map_err(|e| e.to_string())?.to_string();
|
||||
match current_tag.as_str() {
|
||||
"Key" if in_object => {
|
||||
current_key = Some(text.trim().to_string());
|
||||
}
|
||||
"VersionId" if in_object => {
|
||||
current_version_id = Some(text.trim().to_string());
|
||||
}
|
||||
"Quiet" => {
|
||||
result.quiet = text.trim() == "true";
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
Ok(Event::End(ref e)) => {
|
||||
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
|
||||
if name == "Object" && in_object {
|
||||
if let Some(key) = current_key.take() {
|
||||
result.objects.push(ObjectIdentifier {
|
||||
key,
|
||||
version_id: current_version_id.take(),
|
||||
});
|
||||
}
|
||||
in_object = false;
|
||||
}
|
||||
}
|
||||
Ok(Event::Eof) => break,
|
||||
Err(e) => return Err(format!("XML parse error: {}", e)),
|
||||
_ => {}
|
||||
}
|
||||
buf.clear();
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_complete_multipart() {
|
||||
let xml = r#"<CompleteMultipartUpload>
|
||||
<Part><PartNumber>2</PartNumber><ETag>"etag2"</ETag></Part>
|
||||
<Part><PartNumber>1</PartNumber><ETag>"etag1"</ETag></Part>
|
||||
</CompleteMultipartUpload>"#;
|
||||
|
||||
let result = parse_complete_multipart_upload(xml).unwrap();
|
||||
assert_eq!(result.parts.len(), 2);
|
||||
assert_eq!(result.parts[0].part_number, 1);
|
||||
assert_eq!(result.parts[0].etag, "etag1");
|
||||
assert_eq!(result.parts[1].part_number, 2);
|
||||
assert_eq!(result.parts[1].etag, "etag2");
|
||||
}
|
||||
}
|
||||
@@ -1,363 +0,0 @@
|
||||
use chrono::{DateTime, Utc};
|
||||
use myfsio_common::types::{BucketMeta, ObjectMeta};
|
||||
use quick_xml::events::{BytesDecl, BytesEnd, BytesStart, BytesText, Event};
|
||||
use quick_xml::Writer;
|
||||
use std::io::Cursor;
|
||||
|
||||
pub fn format_s3_datetime(dt: &DateTime<Utc>) -> String {
|
||||
dt.format("%Y-%m-%dT%H:%M:%S%.3fZ").to_string()
|
||||
}
|
||||
|
||||
pub fn list_buckets_xml(owner_id: &str, owner_name: &str, buckets: &[BucketMeta]) -> String {
|
||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||
|
||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
||||
|
||||
let start = BytesStart::new("ListAllMyBucketsResult")
|
||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
||||
writer.write_event(Event::Start(start)).unwrap();
|
||||
|
||||
writer.write_event(Event::Start(BytesStart::new("Owner"))).unwrap();
|
||||
write_text_element(&mut writer, "ID", owner_id);
|
||||
write_text_element(&mut writer, "DisplayName", owner_name);
|
||||
writer.write_event(Event::End(BytesEnd::new("Owner"))).unwrap();
|
||||
|
||||
writer.write_event(Event::Start(BytesStart::new("Buckets"))).unwrap();
|
||||
for bucket in buckets {
|
||||
writer.write_event(Event::Start(BytesStart::new("Bucket"))).unwrap();
|
||||
write_text_element(&mut writer, "Name", &bucket.name);
|
||||
write_text_element(&mut writer, "CreationDate", &format_s3_datetime(&bucket.creation_date));
|
||||
writer.write_event(Event::End(BytesEnd::new("Bucket"))).unwrap();
|
||||
}
|
||||
writer.write_event(Event::End(BytesEnd::new("Buckets"))).unwrap();
|
||||
|
||||
writer.write_event(Event::End(BytesEnd::new("ListAllMyBucketsResult"))).unwrap();
|
||||
|
||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||
}
|
||||
|
||||
pub fn list_objects_v2_xml(
|
||||
bucket_name: &str,
|
||||
prefix: &str,
|
||||
delimiter: &str,
|
||||
max_keys: usize,
|
||||
objects: &[ObjectMeta],
|
||||
common_prefixes: &[String],
|
||||
is_truncated: bool,
|
||||
continuation_token: Option<&str>,
|
||||
next_continuation_token: Option<&str>,
|
||||
key_count: usize,
|
||||
) -> String {
|
||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||
|
||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
||||
|
||||
let start = BytesStart::new("ListBucketResult")
|
||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
||||
writer.write_event(Event::Start(start)).unwrap();
|
||||
|
||||
write_text_element(&mut writer, "Name", bucket_name);
|
||||
write_text_element(&mut writer, "Prefix", prefix);
|
||||
if !delimiter.is_empty() {
|
||||
write_text_element(&mut writer, "Delimiter", delimiter);
|
||||
}
|
||||
write_text_element(&mut writer, "MaxKeys", &max_keys.to_string());
|
||||
write_text_element(&mut writer, "KeyCount", &key_count.to_string());
|
||||
write_text_element(&mut writer, "IsTruncated", &is_truncated.to_string());
|
||||
|
||||
if let Some(token) = continuation_token {
|
||||
write_text_element(&mut writer, "ContinuationToken", token);
|
||||
}
|
||||
if let Some(token) = next_continuation_token {
|
||||
write_text_element(&mut writer, "NextContinuationToken", token);
|
||||
}
|
||||
|
||||
for obj in objects {
|
||||
writer.write_event(Event::Start(BytesStart::new("Contents"))).unwrap();
|
||||
write_text_element(&mut writer, "Key", &obj.key);
|
||||
write_text_element(&mut writer, "LastModified", &format_s3_datetime(&obj.last_modified));
|
||||
if let Some(ref etag) = obj.etag {
|
||||
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
|
||||
}
|
||||
write_text_element(&mut writer, "Size", &obj.size.to_string());
|
||||
write_text_element(&mut writer, "StorageClass", obj.storage_class.as_deref().unwrap_or("STANDARD"));
|
||||
writer.write_event(Event::End(BytesEnd::new("Contents"))).unwrap();
|
||||
}
|
||||
|
||||
for prefix in common_prefixes {
|
||||
writer.write_event(Event::Start(BytesStart::new("CommonPrefixes"))).unwrap();
|
||||
write_text_element(&mut writer, "Prefix", prefix);
|
||||
writer.write_event(Event::End(BytesEnd::new("CommonPrefixes"))).unwrap();
|
||||
}
|
||||
|
||||
writer.write_event(Event::End(BytesEnd::new("ListBucketResult"))).unwrap();
|
||||
|
||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||
}
|
||||
|
||||
pub fn list_objects_v1_xml(
|
||||
bucket_name: &str,
|
||||
prefix: &str,
|
||||
marker: &str,
|
||||
delimiter: &str,
|
||||
max_keys: usize,
|
||||
objects: &[ObjectMeta],
|
||||
common_prefixes: &[String],
|
||||
is_truncated: bool,
|
||||
next_marker: Option<&str>,
|
||||
) -> String {
|
||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||
|
||||
writer
|
||||
.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None)))
|
||||
.unwrap();
|
||||
|
||||
let start = BytesStart::new("ListBucketResult")
|
||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
||||
writer.write_event(Event::Start(start)).unwrap();
|
||||
|
||||
write_text_element(&mut writer, "Name", bucket_name);
|
||||
write_text_element(&mut writer, "Prefix", prefix);
|
||||
write_text_element(&mut writer, "Marker", marker);
|
||||
write_text_element(&mut writer, "MaxKeys", &max_keys.to_string());
|
||||
write_text_element(&mut writer, "IsTruncated", &is_truncated.to_string());
|
||||
|
||||
if !delimiter.is_empty() {
|
||||
write_text_element(&mut writer, "Delimiter", delimiter);
|
||||
}
|
||||
if !delimiter.is_empty() && is_truncated {
|
||||
if let Some(nm) = next_marker {
|
||||
if !nm.is_empty() {
|
||||
write_text_element(&mut writer, "NextMarker", nm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for obj in objects {
|
||||
writer
|
||||
.write_event(Event::Start(BytesStart::new("Contents")))
|
||||
.unwrap();
|
||||
write_text_element(&mut writer, "Key", &obj.key);
|
||||
write_text_element(&mut writer, "LastModified", &format_s3_datetime(&obj.last_modified));
|
||||
if let Some(ref etag) = obj.etag {
|
||||
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
|
||||
}
|
||||
write_text_element(&mut writer, "Size", &obj.size.to_string());
|
||||
writer
|
||||
.write_event(Event::End(BytesEnd::new("Contents")))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
for cp in common_prefixes {
|
||||
writer
|
||||
.write_event(Event::Start(BytesStart::new("CommonPrefixes")))
|
||||
.unwrap();
|
||||
write_text_element(&mut writer, "Prefix", cp);
|
||||
writer
|
||||
.write_event(Event::End(BytesEnd::new("CommonPrefixes")))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
writer
|
||||
.write_event(Event::End(BytesEnd::new("ListBucketResult")))
|
||||
.unwrap();
|
||||
|
||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||
}
|
||||
|
||||
fn write_text_element(writer: &mut Writer<Cursor<Vec<u8>>>, tag: &str, text: &str) {
|
||||
writer.write_event(Event::Start(BytesStart::new(tag))).unwrap();
|
||||
writer.write_event(Event::Text(BytesText::new(text))).unwrap();
|
||||
writer.write_event(Event::End(BytesEnd::new(tag))).unwrap();
|
||||
}
|
||||
|
||||
pub fn initiate_multipart_upload_xml(bucket: &str, key: &str, upload_id: &str) -> String {
|
||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
||||
|
||||
let start = BytesStart::new("InitiateMultipartUploadResult")
|
||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
||||
writer.write_event(Event::Start(start)).unwrap();
|
||||
write_text_element(&mut writer, "Bucket", bucket);
|
||||
write_text_element(&mut writer, "Key", key);
|
||||
write_text_element(&mut writer, "UploadId", upload_id);
|
||||
writer.write_event(Event::End(BytesEnd::new("InitiateMultipartUploadResult"))).unwrap();
|
||||
|
||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||
}
|
||||
|
||||
pub fn complete_multipart_upload_xml(
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
etag: &str,
|
||||
location: &str,
|
||||
) -> String {
|
||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
||||
|
||||
let start = BytesStart::new("CompleteMultipartUploadResult")
|
||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
||||
writer.write_event(Event::Start(start)).unwrap();
|
||||
write_text_element(&mut writer, "Location", location);
|
||||
write_text_element(&mut writer, "Bucket", bucket);
|
||||
write_text_element(&mut writer, "Key", key);
|
||||
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
|
||||
writer.write_event(Event::End(BytesEnd::new("CompleteMultipartUploadResult"))).unwrap();
|
||||
|
||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||
}
|
||||
|
||||
pub fn copy_object_result_xml(etag: &str, last_modified: &str) -> String {
|
||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
||||
|
||||
let start = BytesStart::new("CopyObjectResult")
|
||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
||||
writer.write_event(Event::Start(start)).unwrap();
|
||||
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
|
||||
write_text_element(&mut writer, "LastModified", last_modified);
|
||||
writer.write_event(Event::End(BytesEnd::new("CopyObjectResult"))).unwrap();
|
||||
|
||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||
}
|
||||
|
||||
pub fn delete_result_xml(
|
||||
deleted: &[(String, Option<String>)],
|
||||
errors: &[(String, String, String)],
|
||||
quiet: bool,
|
||||
) -> String {
|
||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
||||
|
||||
let start = BytesStart::new("DeleteResult")
|
||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
||||
writer.write_event(Event::Start(start)).unwrap();
|
||||
|
||||
if !quiet {
|
||||
for (key, version_id) in deleted {
|
||||
writer.write_event(Event::Start(BytesStart::new("Deleted"))).unwrap();
|
||||
write_text_element(&mut writer, "Key", key);
|
||||
if let Some(vid) = version_id {
|
||||
write_text_element(&mut writer, "VersionId", vid);
|
||||
}
|
||||
writer.write_event(Event::End(BytesEnd::new("Deleted"))).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
for (key, code, message) in errors {
|
||||
writer.write_event(Event::Start(BytesStart::new("Error"))).unwrap();
|
||||
write_text_element(&mut writer, "Key", key);
|
||||
write_text_element(&mut writer, "Code", code);
|
||||
write_text_element(&mut writer, "Message", message);
|
||||
writer.write_event(Event::End(BytesEnd::new("Error"))).unwrap();
|
||||
}
|
||||
|
||||
writer.write_event(Event::End(BytesEnd::new("DeleteResult"))).unwrap();
|
||||
|
||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||
}
|
||||
|
||||
pub fn list_multipart_uploads_xml(
|
||||
bucket: &str,
|
||||
uploads: &[myfsio_common::types::MultipartUploadInfo],
|
||||
) -> String {
|
||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
||||
|
||||
let start = BytesStart::new("ListMultipartUploadsResult")
|
||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
||||
writer.write_event(Event::Start(start)).unwrap();
|
||||
write_text_element(&mut writer, "Bucket", bucket);
|
||||
|
||||
for upload in uploads {
|
||||
writer.write_event(Event::Start(BytesStart::new("Upload"))).unwrap();
|
||||
write_text_element(&mut writer, "Key", &upload.key);
|
||||
write_text_element(&mut writer, "UploadId", &upload.upload_id);
|
||||
write_text_element(&mut writer, "Initiated", &format_s3_datetime(&upload.initiated));
|
||||
writer.write_event(Event::End(BytesEnd::new("Upload"))).unwrap();
|
||||
}
|
||||
|
||||
writer.write_event(Event::End(BytesEnd::new("ListMultipartUploadsResult"))).unwrap();
|
||||
|
||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||
}
|
||||
|
||||
pub fn list_parts_xml(
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
upload_id: &str,
|
||||
parts: &[myfsio_common::types::PartMeta],
|
||||
) -> String {
|
||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
||||
|
||||
let start = BytesStart::new("ListPartsResult")
|
||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
||||
writer.write_event(Event::Start(start)).unwrap();
|
||||
write_text_element(&mut writer, "Bucket", bucket);
|
||||
write_text_element(&mut writer, "Key", key);
|
||||
write_text_element(&mut writer, "UploadId", upload_id);
|
||||
|
||||
for part in parts {
|
||||
writer.write_event(Event::Start(BytesStart::new("Part"))).unwrap();
|
||||
write_text_element(&mut writer, "PartNumber", &part.part_number.to_string());
|
||||
write_text_element(&mut writer, "ETag", &format!("\"{}\"", part.etag));
|
||||
write_text_element(&mut writer, "Size", &part.size.to_string());
|
||||
if let Some(ref lm) = part.last_modified {
|
||||
write_text_element(&mut writer, "LastModified", &format_s3_datetime(lm));
|
||||
}
|
||||
writer.write_event(Event::End(BytesEnd::new("Part"))).unwrap();
|
||||
}
|
||||
|
||||
writer.write_event(Event::End(BytesEnd::new("ListPartsResult"))).unwrap();
|
||||
|
||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::Utc;
|
||||
|
||||
#[test]
|
||||
fn test_list_buckets_xml() {
|
||||
let buckets = vec![BucketMeta {
|
||||
name: "test-bucket".to_string(),
|
||||
creation_date: Utc::now(),
|
||||
}];
|
||||
let xml = list_buckets_xml("owner-id", "owner-name", &buckets);
|
||||
assert!(xml.contains("<Name>test-bucket</Name>"));
|
||||
assert!(xml.contains("<ID>owner-id</ID>"));
|
||||
assert!(xml.contains("ListAllMyBucketsResult"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_list_objects_v2_xml() {
|
||||
let objects = vec![ObjectMeta::new("file.txt".to_string(), 1024, Utc::now())];
|
||||
let xml = list_objects_v2_xml(
|
||||
"my-bucket", "", "/", 1000, &objects, &[], false, None, None, 1,
|
||||
);
|
||||
assert!(xml.contains("<Key>file.txt</Key>"));
|
||||
assert!(xml.contains("<Size>1024</Size>"));
|
||||
assert!(xml.contains("<IsTruncated>false</IsTruncated>"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_list_objects_v1_xml() {
|
||||
let objects = vec![ObjectMeta::new("file.txt".to_string(), 1024, Utc::now())];
|
||||
let xml = list_objects_v1_xml(
|
||||
"my-bucket",
|
||||
"",
|
||||
"",
|
||||
"/",
|
||||
1000,
|
||||
&objects,
|
||||
&[],
|
||||
false,
|
||||
None,
|
||||
);
|
||||
assert!(xml.contains("<Key>file.txt</Key>"));
|
||||
assert!(xml.contains("<Size>1024</Size>"));
|
||||
assert!(xml.contains("<Marker></Marker>"));
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio_core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "myfsio_core"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
pyo3 = { version = "0.28", features = ["extension-module"] }
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
md-5 = "0.10"
|
||||
hex = "0.4"
|
||||
unicode-normalization = "0.1"
|
||||
serde_json = "1"
|
||||
regex = "1"
|
||||
lru = "0.14"
|
||||
parking_lot = "0.12"
|
||||
percent-encoding = "2"
|
||||
aes-gcm = "0.10"
|
||||
hkdf = "0.12"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
@@ -1,11 +0,0 @@
|
||||
[build-system]
|
||||
requires = ["maturin>=1.0,<2.0"]
|
||||
build-backend = "maturin"
|
||||
|
||||
[project]
|
||||
name = "myfsio_core"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10"
|
||||
|
||||
[tool.maturin]
|
||||
features = ["pyo3/extension-module"]
|
||||
@@ -1,192 +0,0 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use hkdf::Hkdf;
|
||||
use pyo3::exceptions::{PyIOError, PyValueError};
|
||||
use pyo3::prelude::*;
|
||||
use sha2::Sha256;
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
|
||||
const DEFAULT_CHUNK_SIZE: usize = 65536;
|
||||
const HEADER_SIZE: usize = 4;
|
||||
|
||||
fn read_exact_chunk(reader: &mut impl Read, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
let mut filled = 0;
|
||||
while filled < buf.len() {
|
||||
match reader.read(&mut buf[filled..]) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => filled += n,
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(filled)
|
||||
}
|
||||
|
||||
fn derive_chunk_nonce(base_nonce: &[u8], chunk_index: u32) -> Result<[u8; 12], String> {
|
||||
let hkdf = Hkdf::<Sha256>::new(Some(base_nonce), b"chunk_nonce");
|
||||
let mut okm = [0u8; 12];
|
||||
hkdf.expand(&chunk_index.to_be_bytes(), &mut okm)
|
||||
.map_err(|e| format!("HKDF expand failed: {}", e))?;
|
||||
Ok(okm)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (input_path, output_path, key, base_nonce, chunk_size=DEFAULT_CHUNK_SIZE))]
|
||||
pub fn encrypt_stream_chunked(
|
||||
py: Python<'_>,
|
||||
input_path: &str,
|
||||
output_path: &str,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
chunk_size: usize,
|
||||
) -> PyResult<u32> {
|
||||
if key.len() != 32 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Key must be 32 bytes, got {}",
|
||||
key.len()
|
||||
)));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Base nonce must be 12 bytes, got {}",
|
||||
base_nonce.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let chunk_size = if chunk_size == 0 {
|
||||
DEFAULT_CHUNK_SIZE
|
||||
} else {
|
||||
chunk_size
|
||||
};
|
||||
|
||||
let inp = input_path.to_owned();
|
||||
let out = output_path.to_owned();
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
|
||||
py.detach(move || {
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(&inp)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open input: {}", e)))?;
|
||||
let mut outfile = File::create(&out)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create output: {}", e)))?;
|
||||
|
||||
outfile
|
||||
.write_all(&[0u8; 4])
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write header: {}", e)))?;
|
||||
|
||||
let mut buf = vec![0u8; chunk_size];
|
||||
let mut chunk_index: u32 = 0;
|
||||
|
||||
loop {
|
||||
let n = read_exact_chunk(&mut infile, &mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)
|
||||
.map_err(|e| PyValueError::new_err(e))?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, &buf[..n])
|
||||
.map_err(|e| PyValueError::new_err(format!("Encrypt failed: {}", e)))?;
|
||||
|
||||
let size = encrypted.len() as u32;
|
||||
outfile
|
||||
.write_all(&size.to_be_bytes())
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk size: {}", e)))?;
|
||||
outfile
|
||||
.write_all(&encrypted)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk: {}", e)))?;
|
||||
|
||||
chunk_index += 1;
|
||||
}
|
||||
|
||||
outfile
|
||||
.seek(SeekFrom::Start(0))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to seek: {}", e)))?;
|
||||
outfile
|
||||
.write_all(&chunk_index.to_be_bytes())
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk count: {}", e)))?;
|
||||
|
||||
Ok(chunk_index)
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn decrypt_stream_chunked(
|
||||
py: Python<'_>,
|
||||
input_path: &str,
|
||||
output_path: &str,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
) -> PyResult<u32> {
|
||||
if key.len() != 32 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Key must be 32 bytes, got {}",
|
||||
key.len()
|
||||
)));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Base nonce must be 12 bytes, got {}",
|
||||
base_nonce.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let inp = input_path.to_owned();
|
||||
let out = output_path.to_owned();
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
|
||||
py.detach(move || {
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(&inp)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open input: {}", e)))?;
|
||||
let mut outfile = File::create(&out)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create output: {}", e)))?;
|
||||
|
||||
let mut header = [0u8; HEADER_SIZE];
|
||||
infile
|
||||
.read_exact(&mut header)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read header: {}", e)))?;
|
||||
let chunk_count = u32::from_be_bytes(header);
|
||||
|
||||
let mut size_buf = [0u8; HEADER_SIZE];
|
||||
for chunk_index in 0..chunk_count {
|
||||
infile
|
||||
.read_exact(&mut size_buf)
|
||||
.map_err(|e| {
|
||||
PyIOError::new_err(format!(
|
||||
"Failed to read chunk {} size: {}",
|
||||
chunk_index, e
|
||||
))
|
||||
})?;
|
||||
let chunk_size = u32::from_be_bytes(size_buf) as usize;
|
||||
|
||||
let mut encrypted = vec![0u8; chunk_size];
|
||||
infile.read_exact(&mut encrypted).map_err(|e| {
|
||||
PyIOError::new_err(format!("Failed to read chunk {}: {}", chunk_index, e))
|
||||
})?;
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)
|
||||
.map_err(|e| PyValueError::new_err(e))?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let decrypted = cipher.decrypt(nonce, encrypted.as_ref()).map_err(|e| {
|
||||
PyValueError::new_err(format!("Decrypt chunk {} failed: {}", chunk_index, e))
|
||||
})?;
|
||||
|
||||
outfile.write_all(&decrypted).map_err(|e| {
|
||||
PyIOError::new_err(format!("Failed to write chunk {}: {}", chunk_index, e))
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(chunk_count)
|
||||
})
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
use md5::{Digest, Md5};
|
||||
use pyo3::exceptions::PyIOError;
|
||||
use pyo3::prelude::*;
|
||||
use sha2::Sha256;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
|
||||
const CHUNK_SIZE: usize = 65536;
|
||||
|
||||
#[pyfunction]
|
||||
pub fn md5_file(py: Python<'_>, path: &str) -> PyResult<String> {
|
||||
let path = path.to_owned();
|
||||
py.detach(move || {
|
||||
let mut file = File::open(&path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open file: {}", e)))?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read file: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn md5_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Md5::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn sha256_file(py: Python<'_>, path: &str) -> PyResult<String> {
|
||||
let path = path.to_owned();
|
||||
py.detach(move || {
|
||||
let mut file = File::open(&path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open file: {}", e)))?;
|
||||
let mut hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read file: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn sha256_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn md5_sha256_file(py: Python<'_>, path: &str) -> PyResult<(String, String)> {
|
||||
let path = path.to_owned();
|
||||
py.detach(move || {
|
||||
let mut file = File::open(&path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open file: {}", e)))?;
|
||||
let mut md5_hasher = Md5::new();
|
||||
let mut sha_hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read file: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
md5_hasher.update(&buf[..n]);
|
||||
sha_hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok((
|
||||
format!("{:x}", md5_hasher.finalize()),
|
||||
format!("{:x}", sha_hasher.finalize()),
|
||||
))
|
||||
})
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
mod crypto;
|
||||
mod hashing;
|
||||
mod metadata;
|
||||
mod sigv4;
|
||||
mod storage;
|
||||
mod streaming;
|
||||
mod validation;
|
||||
|
||||
use pyo3::prelude::*;
|
||||
|
||||
#[pymodule]
|
||||
mod myfsio_core {
|
||||
use super::*;
|
||||
|
||||
#[pymodule_init]
|
||||
fn init(m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(sigv4::verify_sigv4_signature, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::derive_signing_key, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::compute_signature, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::build_string_to_sign, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::constant_time_compare, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::clear_signing_key_cache, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(hashing::md5_file, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::md5_bytes, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::sha256_file, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::sha256_bytes, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::md5_sha256_file, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(validation::validate_object_key, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(validation::validate_bucket_name, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(metadata::read_index_entry, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(storage::write_index_entry, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::delete_index_entry, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::check_bucket_contents, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::shallow_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::bucket_stats_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::search_objects_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::build_object_cache, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(streaming::stream_to_file_with_md5, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(streaming::assemble_parts_with_md5, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(crypto::encrypt_stream_chunked, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(crypto::decrypt_stream_chunked, m)?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
use pyo3::exceptions::PyValueError;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::{PyDict, PyList, PyString};
|
||||
use serde_json::Value;
|
||||
use std::fs;
|
||||
|
||||
const MAX_DEPTH: u32 = 64;
|
||||
|
||||
fn value_to_py(py: Python<'_>, v: &Value, depth: u32) -> PyResult<Py<PyAny>> {
|
||||
if depth > MAX_DEPTH {
|
||||
return Err(PyValueError::new_err("JSON nesting too deep"));
|
||||
}
|
||||
match v {
|
||||
Value::Null => Ok(py.None()),
|
||||
Value::Bool(b) => Ok((*b).into_pyobject(py)?.to_owned().into_any().unbind()),
|
||||
Value::Number(n) => {
|
||||
if let Some(i) = n.as_i64() {
|
||||
Ok(i.into_pyobject(py)?.into_any().unbind())
|
||||
} else if let Some(f) = n.as_f64() {
|
||||
Ok(f.into_pyobject(py)?.into_any().unbind())
|
||||
} else {
|
||||
Ok(py.None())
|
||||
}
|
||||
}
|
||||
Value::String(s) => Ok(PyString::new(py, s).into_any().unbind()),
|
||||
Value::Array(arr) => {
|
||||
let list = PyList::empty(py);
|
||||
for item in arr {
|
||||
list.append(value_to_py(py, item, depth + 1)?)?;
|
||||
}
|
||||
Ok(list.into_any().unbind())
|
||||
}
|
||||
Value::Object(map) => {
|
||||
let dict = PyDict::new(py);
|
||||
for (k, val) in map {
|
||||
dict.set_item(k, value_to_py(py, val, depth + 1)?)?;
|
||||
}
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn read_index_entry(
|
||||
py: Python<'_>,
|
||||
path: &str,
|
||||
entry_name: &str,
|
||||
) -> PyResult<Option<Py<PyAny>>> {
|
||||
let path_owned = path.to_owned();
|
||||
let entry_owned = entry_name.to_owned();
|
||||
|
||||
let entry: Option<Value> = py.detach(move || -> PyResult<Option<Value>> {
|
||||
let content = match fs::read_to_string(&path_owned) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return Ok(None),
|
||||
};
|
||||
let parsed: Value = match serde_json::from_str(&content) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return Ok(None),
|
||||
};
|
||||
match parsed {
|
||||
Value::Object(mut map) => Ok(map.remove(&entry_owned)),
|
||||
_ => Ok(None),
|
||||
}
|
||||
})?;
|
||||
|
||||
match entry {
|
||||
Some(val) => Ok(Some(value_to_py(py, &val, 0)?)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
@@ -1,193 +0,0 @@
|
||||
use hmac::{Hmac, Mac};
|
||||
use lru::LruCache;
|
||||
use parking_lot::Mutex;
|
||||
use percent_encoding::{percent_encode, AsciiSet, NON_ALPHANUMERIC};
|
||||
use pyo3::prelude::*;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::num::NonZeroUsize;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Instant;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
struct CacheEntry {
|
||||
key: Vec<u8>,
|
||||
created: Instant,
|
||||
}
|
||||
|
||||
static SIGNING_KEY_CACHE: LazyLock<Mutex<LruCache<(String, String, String, String), CacheEntry>>> =
|
||||
LazyLock::new(|| Mutex::new(LruCache::new(NonZeroUsize::new(256).unwrap())));
|
||||
|
||||
const CACHE_TTL_SECS: u64 = 60;
|
||||
|
||||
const AWS_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC
|
||||
.remove(b'-')
|
||||
.remove(b'_')
|
||||
.remove(b'.')
|
||||
.remove(b'~');
|
||||
|
||||
fn hmac_sha256(key: &[u8], msg: &[u8]) -> Vec<u8> {
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
|
||||
mac.update(msg);
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
fn sha256_hex(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
hex::encode(hasher.finalize())
|
||||
}
|
||||
|
||||
fn aws_uri_encode(input: &str) -> String {
|
||||
percent_encode(input.as_bytes(), AWS_ENCODE_SET).to_string()
|
||||
}
|
||||
|
||||
fn derive_signing_key_cached(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
let cache_key = (
|
||||
secret_key.to_owned(),
|
||||
date_stamp.to_owned(),
|
||||
region.to_owned(),
|
||||
service.to_owned(),
|
||||
);
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
if let Some(entry) = cache.get(&cache_key) {
|
||||
if entry.created.elapsed().as_secs() < CACHE_TTL_SECS {
|
||||
return entry.key.clone();
|
||||
}
|
||||
cache.pop(&cache_key);
|
||||
}
|
||||
}
|
||||
|
||||
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date_stamp.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, service.as_bytes());
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
cache.put(
|
||||
cache_key,
|
||||
CacheEntry {
|
||||
key: k_signing.clone(),
|
||||
created: Instant::now(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
k_signing
|
||||
}
|
||||
|
||||
fn constant_time_compare_inner(a: &[u8], b: &[u8]) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
let mut result: u8 = 0;
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
result |= x ^ y;
|
||||
}
|
||||
result == 0
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn verify_sigv4_signature(
|
||||
method: &str,
|
||||
canonical_uri: &str,
|
||||
query_params: Vec<(String, String)>,
|
||||
signed_headers_str: &str,
|
||||
header_values: Vec<(String, String)>,
|
||||
payload_hash: &str,
|
||||
amz_date: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
secret_key: &str,
|
||||
provided_signature: &str,
|
||||
) -> bool {
|
||||
let mut sorted_params = query_params;
|
||||
sorted_params.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1)));
|
||||
|
||||
let canonical_query_string = sorted_params
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{}={}", aws_uri_encode(k), aws_uri_encode(v)))
|
||||
.collect::<Vec<_>>()
|
||||
.join("&");
|
||||
|
||||
let mut canonical_headers = String::new();
|
||||
for (name, value) in &header_values {
|
||||
let lower_name = name.to_lowercase();
|
||||
let normalized = value.split_whitespace().collect::<Vec<_>>().join(" ");
|
||||
let final_value = if lower_name == "expect" && normalized.is_empty() {
|
||||
"100-continue"
|
||||
} else {
|
||||
&normalized
|
||||
};
|
||||
canonical_headers.push_str(&lower_name);
|
||||
canonical_headers.push(':');
|
||||
canonical_headers.push_str(final_value);
|
||||
canonical_headers.push('\n');
|
||||
}
|
||||
|
||||
let canonical_request = format!(
|
||||
"{}\n{}\n{}\n{}\n{}\n{}",
|
||||
method, canonical_uri, canonical_query_string, canonical_headers, signed_headers_str, payload_hash
|
||||
);
|
||||
|
||||
let credential_scope = format!("{}/{}/{}/aws4_request", date_stamp, region, service);
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
);
|
||||
|
||||
let signing_key = derive_signing_key_cached(secret_key, date_stamp, region, service);
|
||||
let calculated = hmac_sha256(&signing_key, string_to_sign.as_bytes());
|
||||
let calculated_hex = hex::encode(&calculated);
|
||||
|
||||
constant_time_compare_inner(calculated_hex.as_bytes(), provided_signature.as_bytes())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn derive_signing_key(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
derive_signing_key_cached(secret_key, date_stamp, region, service)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn compute_signature(signing_key: &[u8], string_to_sign: &str) -> String {
|
||||
let sig = hmac_sha256(signing_key, string_to_sign.as_bytes());
|
||||
hex::encode(sig)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn build_string_to_sign(
|
||||
amz_date: &str,
|
||||
credential_scope: &str,
|
||||
canonical_request: &str,
|
||||
) -> String {
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn constant_time_compare(a: &str, b: &str) -> bool {
|
||||
constant_time_compare_inner(a.as_bytes(), b.as_bytes())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn clear_signing_key_cache() {
|
||||
SIGNING_KEY_CACHE.lock().clear();
|
||||
}
|
||||
@@ -1,817 +0,0 @@
|
||||
use pyo3::exceptions::PyIOError;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::{PyDict, PyList, PyString, PyTuple};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::time::SystemTime;
|
||||
|
||||
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
|
||||
fn system_time_to_epoch(t: SystemTime) -> f64 {
|
||||
t.duration_since(std::time::UNIX_EPOCH)
|
||||
.map(|d| d.as_secs_f64())
|
||||
.unwrap_or(0.0)
|
||||
}
|
||||
|
||||
fn extract_etag_from_meta_bytes(content: &[u8]) -> Option<String> {
|
||||
let marker = b"\"__etag__\"";
|
||||
let idx = content.windows(marker.len()).position(|w| w == marker)?;
|
||||
let after = &content[idx + marker.len()..];
|
||||
let start = after.iter().position(|&b| b == b'"')? + 1;
|
||||
let rest = &after[start..];
|
||||
let end = rest.iter().position(|&b| b == b'"')?;
|
||||
std::str::from_utf8(&rest[..end]).ok().map(|s| s.to_owned())
|
||||
}
|
||||
|
||||
fn has_any_file(root: &str) -> bool {
|
||||
let root_path = Path::new(root);
|
||||
if !root_path.is_dir() {
|
||||
return false;
|
||||
}
|
||||
let mut stack = vec![root_path.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_file() {
|
||||
return true;
|
||||
}
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn write_index_entry(
|
||||
py: Python<'_>,
|
||||
path: &str,
|
||||
entry_name: &str,
|
||||
entry_data_json: &str,
|
||||
) -> PyResult<()> {
|
||||
let path_owned = path.to_owned();
|
||||
let entry_owned = entry_name.to_owned();
|
||||
let data_owned = entry_data_json.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<()> {
|
||||
let entry_value: Value = serde_json::from_str(&data_owned)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to parse entry data: {}", e)))?;
|
||||
|
||||
if let Some(parent) = Path::new(&path_owned).parent() {
|
||||
let _ = fs::create_dir_all(parent);
|
||||
}
|
||||
|
||||
let mut index_data: serde_json::Map<String, Value> = match fs::read_to_string(&path_owned)
|
||||
{
|
||||
Ok(content) => serde_json::from_str(&content).unwrap_or_default(),
|
||||
Err(_) => serde_json::Map::new(),
|
||||
};
|
||||
|
||||
index_data.insert(entry_owned, entry_value);
|
||||
|
||||
let serialized = serde_json::to_string(&Value::Object(index_data))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to serialize index: {}", e)))?;
|
||||
|
||||
fs::write(&path_owned, serialized)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn delete_index_entry(py: Python<'_>, path: &str, entry_name: &str) -> PyResult<bool> {
|
||||
let path_owned = path.to_owned();
|
||||
let entry_owned = entry_name.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<bool> {
|
||||
let content = match fs::read_to_string(&path_owned) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return Ok(false),
|
||||
};
|
||||
|
||||
let mut index_data: serde_json::Map<String, Value> =
|
||||
match serde_json::from_str(&content) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return Ok(false),
|
||||
};
|
||||
|
||||
if index_data.remove(&entry_owned).is_none() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
if index_data.is_empty() {
|
||||
let _ = fs::remove_file(&path_owned);
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let serialized = serde_json::to_string(&Value::Object(index_data))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to serialize index: {}", e)))?;
|
||||
|
||||
fs::write(&path_owned, serialized)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
|
||||
|
||||
Ok(true)
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn check_bucket_contents(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
version_roots: Vec<String>,
|
||||
multipart_roots: Vec<String>,
|
||||
) -> PyResult<(bool, bool, bool)> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<(bool, bool, bool)> {
|
||||
let mut has_objects = false;
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
'obj_scan: while let Some(current) = stack.pop() {
|
||||
let is_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if ft.is_file() && !ft.is_symlink() {
|
||||
has_objects = true;
|
||||
break 'obj_scan;
|
||||
}
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut has_versions = false;
|
||||
for root in &version_roots {
|
||||
if has_versions {
|
||||
break;
|
||||
}
|
||||
has_versions = has_any_file(root);
|
||||
}
|
||||
|
||||
let mut has_multipart = false;
|
||||
for root in &multipart_roots {
|
||||
if has_multipart {
|
||||
break;
|
||||
}
|
||||
has_multipart = has_any_file(root);
|
||||
}
|
||||
|
||||
Ok((has_objects, has_versions, has_multipart))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn shallow_scan(
|
||||
py: Python<'_>,
|
||||
target_dir: &str,
|
||||
prefix: &str,
|
||||
meta_cache_json: &str,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let target_owned = target_dir.to_owned();
|
||||
let prefix_owned = prefix.to_owned();
|
||||
let cache_owned = meta_cache_json.to_owned();
|
||||
|
||||
let result: (
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
Vec<String>,
|
||||
Vec<(String, bool)>,
|
||||
) = py.detach(move || -> PyResult<(
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
Vec<String>,
|
||||
Vec<(String, bool)>,
|
||||
)> {
|
||||
let meta_cache: HashMap<String, String> =
|
||||
serde_json::from_str(&cache_owned).unwrap_or_default();
|
||||
|
||||
let mut files: Vec<(String, u64, f64, Option<String>)> = Vec::new();
|
||||
let mut dirs: Vec<String> = Vec::new();
|
||||
|
||||
let entries = match fs::read_dir(&target_owned) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return Ok((files, dirs, Vec::new())),
|
||||
};
|
||||
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let name = match entry.file_name().into_string() {
|
||||
Ok(n) => n,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&name.as_str()) {
|
||||
continue;
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
let cp = format!("{}{}/", prefix_owned, name);
|
||||
dirs.push(cp);
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let key = format!("{}{}", prefix_owned, name);
|
||||
let md = match entry.metadata() {
|
||||
Ok(m) => m,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
let etag = meta_cache.get(&key).cloned();
|
||||
files.push((key, size, mtime, etag));
|
||||
}
|
||||
}
|
||||
|
||||
files.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
dirs.sort();
|
||||
|
||||
let mut merged: Vec<(String, bool)> = Vec::with_capacity(files.len() + dirs.len());
|
||||
let mut fi = 0;
|
||||
let mut di = 0;
|
||||
while fi < files.len() && di < dirs.len() {
|
||||
if files[fi].0 <= dirs[di] {
|
||||
merged.push((files[fi].0.clone(), false));
|
||||
fi += 1;
|
||||
} else {
|
||||
merged.push((dirs[di].clone(), true));
|
||||
di += 1;
|
||||
}
|
||||
}
|
||||
while fi < files.len() {
|
||||
merged.push((files[fi].0.clone(), false));
|
||||
fi += 1;
|
||||
}
|
||||
while di < dirs.len() {
|
||||
merged.push((dirs[di].clone(), true));
|
||||
di += 1;
|
||||
}
|
||||
|
||||
Ok((files, dirs, merged))
|
||||
})?;
|
||||
|
||||
let (files, dirs, merged) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let files_list = PyList::empty(py);
|
||||
for (key, size, mtime, etag) in &files {
|
||||
let etag_py: Py<PyAny> = match etag {
|
||||
Some(e) => PyString::new(py, e).into_any().unbind(),
|
||||
None => py.None(),
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
etag_py,
|
||||
])?;
|
||||
files_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("files", files_list)?;
|
||||
|
||||
let dirs_list = PyList::empty(py);
|
||||
for d in &dirs {
|
||||
dirs_list.append(PyString::new(py, d))?;
|
||||
}
|
||||
dict.set_item("dirs", dirs_list)?;
|
||||
|
||||
let merged_list = PyList::empty(py);
|
||||
for (key, is_dir) in &merged {
|
||||
let bool_obj: Py<PyAny> = if *is_dir {
|
||||
true.into_pyobject(py)?.to_owned().into_any().unbind()
|
||||
} else {
|
||||
false.into_pyobject(py)?.to_owned().into_any().unbind()
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
bool_obj,
|
||||
])?;
|
||||
merged_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("merged_keys", merged_list)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn bucket_stats_scan(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
versions_root: &str,
|
||||
) -> PyResult<(u64, u64, u64, u64)> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let versions_owned = versions_root.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<(u64, u64, u64, u64)> {
|
||||
let mut object_count: u64 = 0;
|
||||
let mut total_bytes: u64 = 0;
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let is_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
object_count += 1;
|
||||
if let Ok(md) = entry.metadata() {
|
||||
total_bytes += md.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut version_count: u64 = 0;
|
||||
let mut version_bytes: u64 = 0;
|
||||
|
||||
let versions_p = Path::new(&versions_owned);
|
||||
if versions_p.is_dir() {
|
||||
let mut stack = vec![versions_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if name.ends_with(".bin") {
|
||||
version_count += 1;
|
||||
if let Ok(md) = entry.metadata() {
|
||||
version_bytes += md.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((object_count, total_bytes, version_count, version_bytes))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (bucket_path, search_root, query, limit))]
|
||||
pub fn search_objects_scan(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
search_root: &str,
|
||||
query: &str,
|
||||
limit: usize,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let search_owned = search_root.to_owned();
|
||||
let query_owned = query.to_owned();
|
||||
|
||||
let result: (Vec<(String, u64, f64)>, bool) = py.detach(
|
||||
move || -> PyResult<(Vec<(String, u64, f64)>, bool)> {
|
||||
let query_lower = query_owned.to_lowercase();
|
||||
let bucket_len = bucket_owned.len() + 1;
|
||||
let scan_limit = limit * 4;
|
||||
let mut matched: usize = 0;
|
||||
let mut results: Vec<(String, u64, f64)> = Vec::new();
|
||||
|
||||
let search_p = Path::new(&search_owned);
|
||||
if !search_p.is_dir() {
|
||||
return Ok((results, false));
|
||||
}
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
let mut stack = vec![search_p.to_path_buf()];
|
||||
|
||||
'scan: while let Some(current) = stack.pop() {
|
||||
let is_bucket_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_bucket_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let full_path = entry.path();
|
||||
let full_str = full_path.to_string_lossy();
|
||||
if full_str.len() <= bucket_len {
|
||||
continue;
|
||||
}
|
||||
let key = full_str[bucket_len..].replace('\\', "/");
|
||||
if key.to_lowercase().contains(&query_lower) {
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
results.push((key, size, mtime));
|
||||
matched += 1;
|
||||
}
|
||||
}
|
||||
if matched >= scan_limit {
|
||||
break 'scan;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
let truncated = results.len() > limit;
|
||||
results.truncate(limit);
|
||||
|
||||
Ok((results, truncated))
|
||||
},
|
||||
)?;
|
||||
|
||||
let (results, truncated) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let results_list = PyList::empty(py);
|
||||
for (key, size, mtime) in &results {
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
])?;
|
||||
results_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("results", results_list)?;
|
||||
dict.set_item("truncated", truncated)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn build_object_cache(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
meta_root: &str,
|
||||
etag_index_path: &str,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let meta_owned = meta_root.to_owned();
|
||||
let index_path_owned = etag_index_path.to_owned();
|
||||
|
||||
let result: (HashMap<String, String>, Vec<(String, u64, f64, Option<String>)>, bool) =
|
||||
py.detach(move || -> PyResult<(
|
||||
HashMap<String, String>,
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
bool,
|
||||
)> {
|
||||
let mut meta_cache: HashMap<String, String> = HashMap::new();
|
||||
let mut index_mtime: f64 = 0.0;
|
||||
let mut etag_cache_changed = false;
|
||||
|
||||
let index_p = Path::new(&index_path_owned);
|
||||
if index_p.is_file() {
|
||||
if let Ok(md) = fs::metadata(&index_path_owned) {
|
||||
index_mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
}
|
||||
if let Ok(content) = fs::read_to_string(&index_path_owned) {
|
||||
if let Ok(parsed) = serde_json::from_str::<HashMap<String, String>>(&content) {
|
||||
meta_cache = parsed;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let meta_p = Path::new(&meta_owned);
|
||||
let mut needs_rebuild = false;
|
||||
|
||||
if meta_p.is_dir() && index_mtime > 0.0 {
|
||||
fn check_newer(dir: &Path, index_mtime: f64) -> bool {
|
||||
let entries = match fs::read_dir(dir) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return false,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
if check_newer(&entry.path(), index_mtime) {
|
||||
return true;
|
||||
}
|
||||
} else if ft.is_file() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if name.ends_with(".meta.json") || name == "_index.json" {
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let mt = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
if mt > index_mtime {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
needs_rebuild = check_newer(meta_p, index_mtime);
|
||||
} else if meta_cache.is_empty() {
|
||||
needs_rebuild = true;
|
||||
}
|
||||
|
||||
if needs_rebuild && meta_p.is_dir() {
|
||||
let meta_str = meta_owned.clone();
|
||||
let meta_len = meta_str.len() + 1;
|
||||
let mut index_files: Vec<String> = Vec::new();
|
||||
let mut legacy_meta_files: Vec<(String, String)> = Vec::new();
|
||||
|
||||
fn collect_meta(
|
||||
dir: &Path,
|
||||
meta_len: usize,
|
||||
index_files: &mut Vec<String>,
|
||||
legacy_meta_files: &mut Vec<(String, String)>,
|
||||
) {
|
||||
let entries = match fs::read_dir(dir) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
collect_meta(&entry.path(), meta_len, index_files, legacy_meta_files);
|
||||
} else if ft.is_file() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
let full = entry.path().to_string_lossy().to_string();
|
||||
if name == "_index.json" {
|
||||
index_files.push(full);
|
||||
} else if name.ends_with(".meta.json") {
|
||||
if full.len() > meta_len {
|
||||
let rel = &full[meta_len..];
|
||||
let key = if rel.len() > 10 {
|
||||
rel[..rel.len() - 10].replace('\\', "/")
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
legacy_meta_files.push((key, full));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
collect_meta(
|
||||
meta_p,
|
||||
meta_len,
|
||||
&mut index_files,
|
||||
&mut legacy_meta_files,
|
||||
);
|
||||
|
||||
meta_cache.clear();
|
||||
|
||||
for idx_path in &index_files {
|
||||
if let Ok(content) = fs::read_to_string(idx_path) {
|
||||
if let Ok(idx_data) = serde_json::from_str::<HashMap<String, Value>>(&content) {
|
||||
let rel_dir = if idx_path.len() > meta_len {
|
||||
let r = &idx_path[meta_len..];
|
||||
r.replace('\\', "/")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
let dir_prefix = if rel_dir.ends_with("/_index.json") {
|
||||
&rel_dir[..rel_dir.len() - "/_index.json".len()]
|
||||
} else {
|
||||
""
|
||||
};
|
||||
for (entry_name, entry_data) in &idx_data {
|
||||
let key = if dir_prefix.is_empty() {
|
||||
entry_name.clone()
|
||||
} else {
|
||||
format!("{}/{}", dir_prefix, entry_name)
|
||||
};
|
||||
if let Some(meta_obj) = entry_data.get("metadata") {
|
||||
if let Some(etag) = meta_obj.get("__etag__") {
|
||||
if let Some(etag_str) = etag.as_str() {
|
||||
meta_cache.insert(key, etag_str.to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (key, path) in &legacy_meta_files {
|
||||
if meta_cache.contains_key(key) {
|
||||
continue;
|
||||
}
|
||||
if let Ok(content) = fs::read(path) {
|
||||
if let Some(etag) = extract_etag_from_meta_bytes(&content) {
|
||||
meta_cache.insert(key.clone(), etag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
etag_cache_changed = true;
|
||||
}
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
let bucket_len = bucket_owned.len() + 1;
|
||||
let mut objects: Vec<(String, u64, f64, Option<String>)> = Vec::new();
|
||||
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
let full = entry.path();
|
||||
let full_str = full.to_string_lossy();
|
||||
if full_str.len() > bucket_len {
|
||||
let first_part: &str = if let Some(sep_pos) =
|
||||
full_str[bucket_len..].find(|c: char| c == '\\' || c == '/')
|
||||
{
|
||||
&full_str[bucket_len..bucket_len + sep_pos]
|
||||
} else {
|
||||
&full_str[bucket_len..]
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&first_part) {
|
||||
continue;
|
||||
}
|
||||
} else if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
stack.push(full);
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let full = entry.path();
|
||||
let full_str = full.to_string_lossy();
|
||||
if full_str.len() <= bucket_len {
|
||||
continue;
|
||||
}
|
||||
let rel = &full_str[bucket_len..];
|
||||
let first_part: &str =
|
||||
if let Some(sep_pos) = rel.find(|c: char| c == '\\' || c == '/') {
|
||||
&rel[..sep_pos]
|
||||
} else {
|
||||
rel
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&first_part) {
|
||||
continue;
|
||||
}
|
||||
let key = rel.replace('\\', "/");
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
let etag = meta_cache.get(&key).cloned();
|
||||
objects.push((key, size, mtime, etag));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((meta_cache, objects, etag_cache_changed))
|
||||
})?;
|
||||
|
||||
let (meta_cache, objects, etag_cache_changed) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let cache_dict = PyDict::new(py);
|
||||
for (k, v) in &meta_cache {
|
||||
cache_dict.set_item(k, v)?;
|
||||
}
|
||||
dict.set_item("etag_cache", cache_dict)?;
|
||||
|
||||
let objects_list = PyList::empty(py);
|
||||
for (key, size, mtime, etag) in &objects {
|
||||
let etag_py: Py<PyAny> = match etag {
|
||||
Some(e) => PyString::new(py, e).into_any().unbind(),
|
||||
None => py.None(),
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
etag_py,
|
||||
])?;
|
||||
objects_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("objects", objects_list)?;
|
||||
dict.set_item("etag_cache_changed", etag_cache_changed)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
use md5::{Digest, Md5};
|
||||
use pyo3::exceptions::{PyIOError, PyValueError};
|
||||
use pyo3::prelude::*;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{Read, Write};
|
||||
use uuid::Uuid;
|
||||
|
||||
const DEFAULT_CHUNK_SIZE: usize = 262144;
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (stream, tmp_dir, chunk_size=DEFAULT_CHUNK_SIZE))]
|
||||
pub fn stream_to_file_with_md5(
|
||||
py: Python<'_>,
|
||||
stream: &Bound<'_, PyAny>,
|
||||
tmp_dir: &str,
|
||||
chunk_size: usize,
|
||||
) -> PyResult<(String, String, u64)> {
|
||||
let chunk_size = if chunk_size == 0 {
|
||||
DEFAULT_CHUNK_SIZE
|
||||
} else {
|
||||
chunk_size
|
||||
};
|
||||
|
||||
fs::create_dir_all(tmp_dir)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create tmp dir: {}", e)))?;
|
||||
|
||||
let tmp_name = format!("{}.tmp", Uuid::new_v4().as_hyphenated());
|
||||
let tmp_path_buf = std::path::PathBuf::from(tmp_dir).join(&tmp_name);
|
||||
let tmp_path = tmp_path_buf.to_string_lossy().into_owned();
|
||||
|
||||
let mut file = File::create(&tmp_path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create temp file: {}", e)))?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut total_bytes: u64 = 0;
|
||||
|
||||
let result: PyResult<()> = (|| {
|
||||
loop {
|
||||
let chunk: Vec<u8> = stream.call_method1("read", (chunk_size,))?.extract()?;
|
||||
if chunk.is_empty() {
|
||||
break;
|
||||
}
|
||||
hasher.update(&chunk);
|
||||
file.write_all(&chunk)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write: {}", e)))?;
|
||||
total_bytes += chunk.len() as u64;
|
||||
|
||||
py.check_signals()?;
|
||||
}
|
||||
file.sync_all()
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to fsync: {}", e)))?;
|
||||
Ok(())
|
||||
})();
|
||||
|
||||
if let Err(e) = result {
|
||||
drop(file);
|
||||
let _ = fs::remove_file(&tmp_path);
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
drop(file);
|
||||
|
||||
let md5_hex = format!("{:x}", hasher.finalize());
|
||||
Ok((tmp_path, md5_hex, total_bytes))
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn assemble_parts_with_md5(
|
||||
py: Python<'_>,
|
||||
part_paths: Vec<String>,
|
||||
dest_path: &str,
|
||||
) -> PyResult<String> {
|
||||
if part_paths.is_empty() {
|
||||
return Err(PyValueError::new_err("No parts to assemble"));
|
||||
}
|
||||
|
||||
let dest = dest_path.to_owned();
|
||||
let parts = part_paths;
|
||||
|
||||
py.detach(move || {
|
||||
if let Some(parent) = std::path::Path::new(&dest).parent() {
|
||||
fs::create_dir_all(parent)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create dest dir: {}", e)))?;
|
||||
}
|
||||
|
||||
let mut target = File::create(&dest)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create dest file: {}", e)))?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut buf = vec![0u8; 1024 * 1024];
|
||||
|
||||
for part_path in &parts {
|
||||
let mut part = File::open(part_path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open part {}: {}", part_path, e)))?;
|
||||
loop {
|
||||
let n = part
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read part: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
target
|
||||
.write_all(&buf[..n])
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write: {}", e)))?;
|
||||
}
|
||||
}
|
||||
|
||||
target.sync_all()
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to fsync: {}", e)))?;
|
||||
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
})
|
||||
}
|
||||
@@ -1,149 +0,0 @@
|
||||
use pyo3::prelude::*;
|
||||
use std::sync::LazyLock;
|
||||
use unicode_normalization::UnicodeNormalization;
|
||||
|
||||
const WINDOWS_RESERVED: &[&str] = &[
|
||||
"CON", "PRN", "AUX", "NUL", "COM0", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7",
|
||||
"COM8", "COM9", "LPT0", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8",
|
||||
"LPT9",
|
||||
];
|
||||
|
||||
const WINDOWS_ILLEGAL_CHARS: &[char] = &['<', '>', ':', '"', '/', '\\', '|', '?', '*'];
|
||||
|
||||
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
const SYSTEM_ROOT: &str = ".myfsio.sys";
|
||||
|
||||
static IP_REGEX: LazyLock<regex::Regex> =
|
||||
LazyLock::new(|| regex::Regex::new(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$").unwrap());
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (object_key, max_length_bytes=1024, is_windows=false, reserved_prefixes=None))]
|
||||
pub fn validate_object_key(
|
||||
object_key: &str,
|
||||
max_length_bytes: usize,
|
||||
is_windows: bool,
|
||||
reserved_prefixes: Option<Vec<String>>,
|
||||
) -> PyResult<Option<String>> {
|
||||
if object_key.is_empty() {
|
||||
return Ok(Some("Object key required".to_string()));
|
||||
}
|
||||
|
||||
if object_key.contains('\0') {
|
||||
return Ok(Some("Object key contains null bytes".to_string()));
|
||||
}
|
||||
|
||||
let normalized: String = object_key.nfc().collect();
|
||||
|
||||
if normalized.len() > max_length_bytes {
|
||||
return Ok(Some(format!(
|
||||
"Object key exceeds maximum length of {} bytes",
|
||||
max_length_bytes
|
||||
)));
|
||||
}
|
||||
|
||||
if normalized.starts_with('/') || normalized.starts_with('\\') {
|
||||
return Ok(Some("Object key cannot start with a slash".to_string()));
|
||||
}
|
||||
|
||||
let parts: Vec<&str> = if cfg!(windows) || is_windows {
|
||||
normalized.split(['/', '\\']).collect()
|
||||
} else {
|
||||
normalized.split('/').collect()
|
||||
};
|
||||
|
||||
for part in &parts {
|
||||
if part.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if *part == ".." {
|
||||
return Ok(Some(
|
||||
"Object key contains parent directory references".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if *part == "." {
|
||||
return Ok(Some("Object key contains invalid segments".to_string()));
|
||||
}
|
||||
|
||||
if part.chars().any(|c| (c as u32) < 32) {
|
||||
return Ok(Some(
|
||||
"Object key contains control characters".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if is_windows {
|
||||
if part.chars().any(|c| WINDOWS_ILLEGAL_CHARS.contains(&c)) {
|
||||
return Ok(Some(
|
||||
"Object key contains characters not supported on Windows filesystems"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
if part.ends_with(' ') || part.ends_with('.') {
|
||||
return Ok(Some(
|
||||
"Object key segments cannot end with spaces or periods on Windows".to_string(),
|
||||
));
|
||||
}
|
||||
let trimmed = part.trim_end_matches(['.', ' ']).to_uppercase();
|
||||
if WINDOWS_RESERVED.contains(&trimmed.as_str()) {
|
||||
return Ok(Some(format!("Invalid filename segment: {}", part)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let non_empty_parts: Vec<&str> = parts.iter().filter(|p| !p.is_empty()).copied().collect();
|
||||
if let Some(top) = non_empty_parts.first() {
|
||||
if INTERNAL_FOLDERS.contains(top) || *top == SYSTEM_ROOT {
|
||||
return Ok(Some("Object key uses a reserved prefix".to_string()));
|
||||
}
|
||||
|
||||
if let Some(ref prefixes) = reserved_prefixes {
|
||||
for prefix in prefixes {
|
||||
if *top == prefix.as_str() {
|
||||
return Ok(Some("Object key uses a reserved prefix".to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn validate_bucket_name(bucket_name: &str) -> Option<String> {
|
||||
let len = bucket_name.len();
|
||||
if len < 3 || len > 63 {
|
||||
return Some("Bucket name must be between 3 and 63 characters".to_string());
|
||||
}
|
||||
|
||||
let bytes = bucket_name.as_bytes();
|
||||
if !bytes[0].is_ascii_lowercase() && !bytes[0].is_ascii_digit() {
|
||||
return Some(
|
||||
"Bucket name must start and end with a lowercase letter or digit".to_string(),
|
||||
);
|
||||
}
|
||||
if !bytes[len - 1].is_ascii_lowercase() && !bytes[len - 1].is_ascii_digit() {
|
||||
return Some(
|
||||
"Bucket name must start and end with a lowercase letter or digit".to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
for &b in bytes {
|
||||
if !b.is_ascii_lowercase() && !b.is_ascii_digit() && b != b'.' && b != b'-' {
|
||||
return Some(
|
||||
"Bucket name can only contain lowercase letters, digits, dots, and hyphens"
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if bucket_name.contains("..") {
|
||||
return Some("Bucket name must not contain consecutive periods".to_string());
|
||||
}
|
||||
|
||||
if IP_REGEX.is_match(bucket_name) {
|
||||
return Some("Bucket name must not be formatted as an IP address".to_string());
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
Flask>=3.1.3
|
||||
Flask>=3.1.2
|
||||
Flask-Limiter>=4.1.1
|
||||
Flask-Cors>=6.0.2
|
||||
Flask-WTF>=1.2.2
|
||||
@@ -6,8 +6,8 @@ python-dotenv>=1.2.1
|
||||
pytest>=9.0.2
|
||||
requests>=2.32.5
|
||||
boto3>=1.42.14
|
||||
granian>=2.7.2
|
||||
psutil>=7.2.2
|
||||
cryptography>=46.0.5
|
||||
waitress>=3.0.2
|
||||
psutil>=7.1.3
|
||||
cryptography>=46.0.3
|
||||
defusedxml>=0.7.1
|
||||
duckdb>=1.5.1
|
||||
duckdb>=1.4.4
|
||||
274
run.py
274
run.py
@@ -2,13 +2,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import atexit
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import warnings
|
||||
import multiprocessing
|
||||
from multiprocessing import Process
|
||||
from pathlib import Path
|
||||
|
||||
@@ -26,8 +22,6 @@ from typing import Optional
|
||||
|
||||
from app import create_api_app, create_ui_app
|
||||
from app.config import AppConfig
|
||||
from app.iam import IamService, IamError, ALLOWED_ACTIONS, _derive_fernet_key
|
||||
from app.version import get_version
|
||||
|
||||
|
||||
def _server_host() -> str:
|
||||
@@ -44,85 +38,24 @@ def _is_frozen() -> bool:
|
||||
return getattr(sys, 'frozen', False) or '__compiled__' in globals()
|
||||
|
||||
|
||||
def _serve_granian(target: str, port: int, config: Optional[AppConfig] = None) -> None:
|
||||
from granian import Granian
|
||||
from granian.constants import Interfaces
|
||||
from granian.http import HTTP1Settings
|
||||
|
||||
kwargs: dict = {
|
||||
"target": target,
|
||||
"address": _server_host(),
|
||||
"port": port,
|
||||
"interface": Interfaces.WSGI,
|
||||
"factory": True,
|
||||
"workers": 1,
|
||||
}
|
||||
|
||||
if config:
|
||||
kwargs["blocking_threads"] = config.server_threads
|
||||
kwargs["backlog"] = config.server_backlog
|
||||
kwargs["backpressure"] = config.server_connection_limit
|
||||
kwargs["http1_settings"] = HTTP1Settings(
|
||||
header_read_timeout=config.server_channel_timeout * 1000,
|
||||
max_buffer_size=config.server_max_buffer_size,
|
||||
)
|
||||
else:
|
||||
kwargs["http1_settings"] = HTTP1Settings(
|
||||
max_buffer_size=1024 * 1024 * 128,
|
||||
)
|
||||
|
||||
server = Granian(**kwargs)
|
||||
server.serve()
|
||||
|
||||
|
||||
def _find_rust_binary() -> Optional[Path]:
|
||||
candidates = [
|
||||
Path("/usr/local/bin/myfsio-server"),
|
||||
Path(__file__).parent / "myfsio-engine" / "target" / "release" / "myfsio-server.exe",
|
||||
Path(__file__).parent / "myfsio-engine" / "target" / "release" / "myfsio-server",
|
||||
Path(__file__).parent / "myfsio-engine" / "target" / "debug" / "myfsio-server.exe",
|
||||
Path(__file__).parent / "myfsio-engine" / "target" / "debug" / "myfsio-server",
|
||||
]
|
||||
for p in candidates:
|
||||
if p.exists():
|
||||
return p
|
||||
return None
|
||||
|
||||
|
||||
def serve_rust_api(port: int, config: Optional[AppConfig] = None) -> None:
|
||||
binary = _find_rust_binary()
|
||||
if binary is None:
|
||||
print("ERROR: Rust engine binary not found. Build it first:")
|
||||
print(" cd myfsio-engine && cargo build --release")
|
||||
sys.exit(1)
|
||||
|
||||
env = os.environ.copy()
|
||||
env["PORT"] = str(port)
|
||||
env["HOST"] = _server_host()
|
||||
if config:
|
||||
env["STORAGE_ROOT"] = str(config.storage_root)
|
||||
env["AWS_REGION"] = config.aws_region
|
||||
if config.secret_key:
|
||||
env["SECRET_KEY"] = config.secret_key
|
||||
env.setdefault("ENCRYPTION_ENABLED", str(config.encryption_enabled).lower())
|
||||
env.setdefault("KMS_ENABLED", str(config.kms_enabled).lower())
|
||||
env.setdefault("LIFECYCLE_ENABLED", str(config.lifecycle_enabled).lower())
|
||||
env.setdefault("RUST_LOG", "info")
|
||||
|
||||
print(f"Starting Rust S3 engine: {binary}")
|
||||
proc = subprocess.Popen([str(binary)], env=env)
|
||||
try:
|
||||
proc.wait()
|
||||
except KeyboardInterrupt:
|
||||
proc.terminate()
|
||||
proc.wait(timeout=5)
|
||||
|
||||
|
||||
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
app = create_api_app()
|
||||
if prod:
|
||||
_serve_granian("app:create_api_app", port, config)
|
||||
from waitress import serve
|
||||
if config:
|
||||
serve(
|
||||
app,
|
||||
host=_server_host(),
|
||||
port=port,
|
||||
ident="MyFSIO",
|
||||
threads=config.server_threads,
|
||||
connection_limit=config.server_connection_limit,
|
||||
backlog=config.server_backlog,
|
||||
channel_timeout=config.server_channel_timeout,
|
||||
)
|
||||
else:
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
else:
|
||||
app = create_api_app()
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
|
||||
@@ -130,158 +63,40 @@ def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None)
|
||||
|
||||
|
||||
def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
app = create_ui_app()
|
||||
if prod:
|
||||
_serve_granian("app:create_ui_app", port, config)
|
||||
from waitress import serve
|
||||
if config:
|
||||
serve(
|
||||
app,
|
||||
host=_server_host(),
|
||||
port=port,
|
||||
ident="MyFSIO",
|
||||
threads=config.server_threads,
|
||||
connection_limit=config.server_connection_limit,
|
||||
backlog=config.server_backlog,
|
||||
channel_timeout=config.server_channel_timeout,
|
||||
)
|
||||
else:
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
else:
|
||||
app = create_ui_app()
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
|
||||
app.run(host=_server_host(), port=port, debug=debug)
|
||||
|
||||
|
||||
def reset_credentials() -> None:
|
||||
import json
|
||||
import secrets
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
config = AppConfig.from_env()
|
||||
iam_path = config.iam_config_path
|
||||
encryption_key = config.secret_key
|
||||
|
||||
access_key = os.environ.get("ADMIN_ACCESS_KEY", "").strip() or secrets.token_hex(12)
|
||||
secret_key = os.environ.get("ADMIN_SECRET_KEY", "").strip() or secrets.token_urlsafe(32)
|
||||
custom_keys = bool(os.environ.get("ADMIN_ACCESS_KEY", "").strip())
|
||||
|
||||
fernet = Fernet(_derive_fernet_key(encryption_key)) if encryption_key else None
|
||||
|
||||
raw_config = None
|
||||
if iam_path.exists():
|
||||
try:
|
||||
raw_bytes = iam_path.read_bytes()
|
||||
from app.iam import _IAM_ENCRYPTED_PREFIX
|
||||
if raw_bytes.startswith(_IAM_ENCRYPTED_PREFIX):
|
||||
if fernet:
|
||||
try:
|
||||
content = fernet.decrypt(raw_bytes[len(_IAM_ENCRYPTED_PREFIX):]).decode("utf-8")
|
||||
raw_config = json.loads(content)
|
||||
except Exception:
|
||||
print("WARNING: Could not decrypt existing IAM config. Creating fresh config.")
|
||||
else:
|
||||
print("WARNING: IAM config is encrypted but no SECRET_KEY available. Creating fresh config.")
|
||||
else:
|
||||
try:
|
||||
raw_config = json.loads(raw_bytes.decode("utf-8"))
|
||||
except json.JSONDecodeError:
|
||||
print("WARNING: Existing IAM config is corrupted. Creating fresh config.")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if raw_config and raw_config.get("users"):
|
||||
is_v2 = raw_config.get("version", 1) >= 2
|
||||
admin_user = None
|
||||
for user in raw_config["users"]:
|
||||
policies = user.get("policies", [])
|
||||
for p in policies:
|
||||
actions = p.get("actions", [])
|
||||
if "iam:*" in actions or "*" in actions:
|
||||
admin_user = user
|
||||
break
|
||||
if admin_user:
|
||||
break
|
||||
if not admin_user:
|
||||
admin_user = raw_config["users"][0]
|
||||
|
||||
if is_v2:
|
||||
admin_keys = admin_user.get("access_keys", [])
|
||||
if admin_keys:
|
||||
admin_keys[0]["access_key"] = access_key
|
||||
admin_keys[0]["secret_key"] = secret_key
|
||||
else:
|
||||
from datetime import datetime as _dt, timezone as _tz
|
||||
admin_user["access_keys"] = [{
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"status": "active",
|
||||
"created_at": _dt.now(_tz.utc).isoformat(),
|
||||
}]
|
||||
else:
|
||||
admin_user["access_key"] = access_key
|
||||
admin_user["secret_key"] = secret_key
|
||||
else:
|
||||
from datetime import datetime as _dt, timezone as _tz
|
||||
raw_config = {
|
||||
"version": 2,
|
||||
"users": [
|
||||
{
|
||||
"user_id": f"u-{secrets.token_hex(8)}",
|
||||
"display_name": "Local Admin",
|
||||
"enabled": True,
|
||||
"access_keys": [
|
||||
{
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"status": "active",
|
||||
"created_at": _dt.now(_tz.utc).isoformat(),
|
||||
}
|
||||
],
|
||||
"policies": [
|
||||
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
json_text = json.dumps(raw_config, indent=2)
|
||||
iam_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
temp_path = iam_path.with_suffix(".json.tmp")
|
||||
if fernet:
|
||||
from app.iam import _IAM_ENCRYPTED_PREFIX
|
||||
encrypted = fernet.encrypt(json_text.encode("utf-8"))
|
||||
temp_path.write_bytes(_IAM_ENCRYPTED_PREFIX + encrypted)
|
||||
else:
|
||||
temp_path.write_text(json_text, encoding="utf-8")
|
||||
temp_path.replace(iam_path)
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print("MYFSIO - ADMIN CREDENTIALS RESET")
|
||||
print(f"{'='*60}")
|
||||
if custom_keys:
|
||||
print(f"Access Key: {access_key} (from ADMIN_ACCESS_KEY)")
|
||||
print(f"Secret Key: {'(from ADMIN_SECRET_KEY)' if os.environ.get('ADMIN_SECRET_KEY', '').strip() else secret_key}")
|
||||
else:
|
||||
print(f"Access Key: {access_key}")
|
||||
print(f"Secret Key: {secret_key}")
|
||||
print(f"{'='*60}")
|
||||
if fernet:
|
||||
print("IAM config saved (encrypted).")
|
||||
else:
|
||||
print(f"IAM config saved to: {iam_path}")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
multiprocessing.freeze_support()
|
||||
if _is_frozen():
|
||||
multiprocessing.set_start_method("spawn", force=True)
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run the S3 clone services.")
|
||||
parser.add_argument("--mode", choices=["api", "ui", "both", "reset-cred"], default="both")
|
||||
parser.add_argument("--mode", choices=["api", "ui", "both"], default="both")
|
||||
parser.add_argument("--api-port", type=int, default=5000)
|
||||
parser.add_argument("--ui-port", type=int, default=5100)
|
||||
parser.add_argument("--prod", action="store_true", help="Run in production mode using Granian")
|
||||
parser.add_argument("--prod", action="store_true", help="Run in production mode using Waitress")
|
||||
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
|
||||
parser.add_argument("--engine", choices=["python", "rust"], default=os.getenv("ENGINE", "python"), help="API engine: python (Flask) or rust (myfsio-engine)")
|
||||
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
|
||||
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
|
||||
parser.add_argument("--reset-cred", action="store_true", help="Reset admin credentials and exit")
|
||||
parser.add_argument("--version", action="version", version=f"MyFSIO {get_version()}")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.reset_cred or args.mode == "reset-cred":
|
||||
reset_credentials()
|
||||
sys.exit(0)
|
||||
|
||||
if args.check_config or args.show_config:
|
||||
config = AppConfig.from_env()
|
||||
config.print_startup_summary()
|
||||
@@ -314,7 +129,7 @@ if __name__ == "__main__":
|
||||
pass
|
||||
|
||||
if prod_mode:
|
||||
print("Running in production mode (Granian)")
|
||||
print("Running in production mode (Waitress)")
|
||||
issues = config.validate_and_report()
|
||||
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
if critical_issues:
|
||||
@@ -325,32 +140,13 @@ if __name__ == "__main__":
|
||||
else:
|
||||
print("Running in development mode (Flask dev server)")
|
||||
|
||||
use_rust = args.engine == "rust"
|
||||
|
||||
if args.mode in {"api", "both"}:
|
||||
if use_rust:
|
||||
print(f"Starting Rust API engine on port {args.api_port}...")
|
||||
else:
|
||||
print(f"Starting API server on port {args.api_port}...")
|
||||
if use_rust:
|
||||
api_proc = Process(target=serve_rust_api, args=(args.api_port, config))
|
||||
else:
|
||||
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config))
|
||||
print(f"Starting API server on port {args.api_port}...")
|
||||
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config), daemon=True)
|
||||
api_proc.start()
|
||||
else:
|
||||
api_proc = None
|
||||
|
||||
def _cleanup_api():
|
||||
if api_proc and api_proc.is_alive():
|
||||
api_proc.terminate()
|
||||
api_proc.join(timeout=5)
|
||||
if api_proc.is_alive():
|
||||
api_proc.kill()
|
||||
|
||||
if api_proc:
|
||||
atexit.register(_cleanup_api)
|
||||
signal.signal(signal.SIGTERM, lambda *_: sys.exit(0))
|
||||
|
||||
if args.mode in {"ui", "both"}:
|
||||
print(f"Starting UI server on port {args.ui_port}...")
|
||||
serve_ui(args.ui_port, prod_mode, config)
|
||||
|
||||
@@ -192,86 +192,31 @@ cat > "$INSTALL_DIR/myfsio.env" << EOF
|
||||
# Generated by install.sh on $(date)
|
||||
# Documentation: https://go.jzwsite.com/myfsio
|
||||
|
||||
# =============================================================================
|
||||
# STORAGE PATHS
|
||||
# =============================================================================
|
||||
# Storage paths
|
||||
STORAGE_ROOT=$DATA_DIR
|
||||
LOG_DIR=$LOG_DIR
|
||||
|
||||
# =============================================================================
|
||||
# NETWORK
|
||||
# =============================================================================
|
||||
# Network
|
||||
APP_HOST=0.0.0.0
|
||||
APP_PORT=$API_PORT
|
||||
|
||||
# Public URL (set this if behind a reverse proxy for presigned URLs)
|
||||
$(if [[ -n "$API_URL" ]]; then echo "API_BASE_URL=$API_URL"; else echo "# API_BASE_URL=https://s3.example.com"; fi)
|
||||
|
||||
# =============================================================================
|
||||
# SECURITY
|
||||
# =============================================================================
|
||||
# Secret key for session signing (auto-generated if not set)
|
||||
# Security - CHANGE IN PRODUCTION
|
||||
SECRET_KEY=$SECRET_KEY
|
||||
|
||||
# CORS settings - restrict in production
|
||||
CORS_ORIGINS=*
|
||||
|
||||
# Brute-force protection
|
||||
AUTH_MAX_ATTEMPTS=5
|
||||
AUTH_LOCKOUT_MINUTES=15
|
||||
# Public URL (set this if behind a reverse proxy)
|
||||
$(if [[ -n "$API_URL" ]]; then echo "API_BASE_URL=$API_URL"; else echo "# API_BASE_URL=https://s3.example.com"; fi)
|
||||
|
||||
# Reverse proxy settings (set to number of trusted proxies in front)
|
||||
# NUM_TRUSTED_PROXIES=1
|
||||
|
||||
# Allow internal admin endpoints (only enable on trusted networks)
|
||||
# ALLOW_INTERNAL_ENDPOINTS=false
|
||||
|
||||
# Allowed hosts for redirects (comma-separated, empty = restrict all)
|
||||
# ALLOWED_REDIRECT_HOSTS=
|
||||
|
||||
# =============================================================================
|
||||
# LOGGING
|
||||
# =============================================================================
|
||||
# Logging
|
||||
LOG_LEVEL=INFO
|
||||
LOG_TO_FILE=true
|
||||
|
||||
# =============================================================================
|
||||
# RATE LIMITING
|
||||
# =============================================================================
|
||||
# Rate limiting
|
||||
RATE_LIMIT_DEFAULT=200 per minute
|
||||
# RATE_LIMIT_LIST_BUCKETS=60 per minute
|
||||
# RATE_LIMIT_BUCKET_OPS=120 per minute
|
||||
# RATE_LIMIT_OBJECT_OPS=240 per minute
|
||||
# RATE_LIMIT_ADMIN=60 per minute
|
||||
|
||||
# =============================================================================
|
||||
# SERVER TUNING (0 = auto-detect based on system resources)
|
||||
# =============================================================================
|
||||
# SERVER_THREADS=0
|
||||
# SERVER_CONNECTION_LIMIT=0
|
||||
# SERVER_BACKLOG=0
|
||||
# SERVER_CHANNEL_TIMEOUT=120
|
||||
|
||||
# =============================================================================
|
||||
# ENCRYPTION (uncomment to enable)
|
||||
# =============================================================================
|
||||
# Optional: Encryption (uncomment to enable)
|
||||
# ENCRYPTION_ENABLED=true
|
||||
# KMS_ENABLED=true
|
||||
|
||||
# =============================================================================
|
||||
# SITE SYNC / REPLICATION (for multi-site deployments)
|
||||
# =============================================================================
|
||||
# SITE_ID=site-1
|
||||
# SITE_ENDPOINT=https://s3-site1.example.com
|
||||
# SITE_REGION=us-east-1
|
||||
# SITE_SYNC_ENABLED=false
|
||||
|
||||
# =============================================================================
|
||||
# OPTIONAL FEATURES
|
||||
# =============================================================================
|
||||
# LIFECYCLE_ENABLED=false
|
||||
# METRICS_HISTORY_ENABLED=false
|
||||
# OPERATION_METRICS_ENABLED=false
|
||||
EOF
|
||||
chmod 600 "$INSTALL_DIR/myfsio.env"
|
||||
echo " [OK] Created $INSTALL_DIR/myfsio.env"
|
||||
@@ -363,7 +308,7 @@ if [[ "$SKIP_SYSTEMD" != true ]]; then
|
||||
systemctl start myfsio
|
||||
echo " [OK] Service started"
|
||||
echo ""
|
||||
|
||||
|
||||
read -p "Would you like to enable MyFSIO to start on boot? [Y/n] " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Nn]$ ]]; then
|
||||
@@ -371,33 +316,12 @@ if [[ "$SKIP_SYSTEMD" != true ]]; then
|
||||
echo " [OK] Service enabled on boot"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo " Waiting for service initialization..."
|
||||
sleep 3
|
||||
|
||||
|
||||
sleep 2
|
||||
echo " Service Status:"
|
||||
echo " ---------------"
|
||||
if systemctl is-active --quiet myfsio; then
|
||||
echo " [OK] MyFSIO is running"
|
||||
echo ""
|
||||
echo " ============================================"
|
||||
echo " ADMIN CREDENTIALS (save these securely!)"
|
||||
echo " ============================================"
|
||||
CRED_OUTPUT=$(journalctl -u myfsio --no-pager -n 50 2>/dev/null | grep -A 5 "FIRST RUN - ADMIN CREDENTIALS")
|
||||
ACCESS_KEY=$(echo "$CRED_OUTPUT" | grep "Access Key:" | head -1 | sed 's/.*Access Key: //' | awk '{print $1}')
|
||||
SECRET_KEY=$(echo "$CRED_OUTPUT" | grep "Secret Key:" | head -1 | sed 's/.*Secret Key: //' | awk '{print $1}')
|
||||
if [[ -n "$ACCESS_KEY" && "$ACCESS_KEY" != *"from"* && -n "$SECRET_KEY" && "$SECRET_KEY" != *"from"* ]]; then
|
||||
echo " Access Key: $ACCESS_KEY"
|
||||
echo " Secret Key: $SECRET_KEY"
|
||||
else
|
||||
echo " [!] Could not extract credentials from service logs."
|
||||
echo " Check startup output: journalctl -u myfsio --no-pager | grep -A 5 'ADMIN CREDENTIALS'"
|
||||
echo " Or reset credentials: $INSTALL_DIR/myfsio reset-cred"
|
||||
fi
|
||||
echo " ============================================"
|
||||
echo ""
|
||||
echo " NOTE: The IAM config file is encrypted at rest."
|
||||
echo " Credentials are only shown on first run or after reset."
|
||||
else
|
||||
echo " [WARNING] MyFSIO may not have started correctly"
|
||||
echo " Check logs with: journalctl -u myfsio -f"
|
||||
@@ -422,27 +346,19 @@ echo "Access Points:"
|
||||
echo " API: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$API_PORT"
|
||||
echo " UI: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$UI_PORT/ui"
|
||||
echo ""
|
||||
echo "Credentials:"
|
||||
echo " Admin credentials are shown on first service start (see above)."
|
||||
echo " The IAM config is encrypted at rest and cannot be read directly."
|
||||
echo " To reset credentials: $INSTALL_DIR/myfsio reset-cred"
|
||||
echo "Default Credentials:"
|
||||
echo " Username: localadmin"
|
||||
echo " Password: localadmin"
|
||||
echo " [!] WARNING: Change these immediately after first login!"
|
||||
echo ""
|
||||
echo "Configuration Files:"
|
||||
echo " Environment: $INSTALL_DIR/myfsio.env"
|
||||
echo " IAM Users: $DATA_DIR/.myfsio.sys/config/iam.json (encrypted)"
|
||||
echo " IAM Users: $DATA_DIR/.myfsio.sys/config/iam.json"
|
||||
echo " Bucket Policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
|
||||
echo " Secret Key: $DATA_DIR/.myfsio.sys/config/.secret (auto-generated)"
|
||||
echo ""
|
||||
echo "Security Notes:"
|
||||
echo " - Rate limiting is enabled by default (200 req/min)"
|
||||
echo " - Brute-force protection: 5 attempts, 15 min lockout"
|
||||
echo " - Set CORS_ORIGINS to specific domains in production"
|
||||
echo " - Set NUM_TRUSTED_PROXIES if behind a reverse proxy"
|
||||
echo ""
|
||||
echo "Useful Commands:"
|
||||
echo " Check status: sudo systemctl status myfsio"
|
||||
echo " View logs: sudo journalctl -u myfsio -f"
|
||||
echo " Validate config: $INSTALL_DIR/myfsio --check-config"
|
||||
echo " Restart: sudo systemctl restart myfsio"
|
||||
echo " Stop: sudo systemctl stop myfsio"
|
||||
echo ""
|
||||
|
||||
@@ -88,8 +88,7 @@ echo "The following items will be removed:"
|
||||
echo ""
|
||||
echo " Install directory: $INSTALL_DIR"
|
||||
if [[ "$KEEP_DATA" != true ]]; then
|
||||
echo " Data directory: $DATA_DIR"
|
||||
echo " [!] ALL DATA, IAM USERS, AND ENCRYPTION KEYS WILL BE DELETED!"
|
||||
echo " Data directory: $DATA_DIR (ALL YOUR DATA WILL BE DELETED!)"
|
||||
else
|
||||
echo " Data directory: $DATA_DIR (WILL BE KEPT)"
|
||||
fi
|
||||
@@ -228,18 +227,8 @@ echo ""
|
||||
if [[ "$KEEP_DATA" == true ]]; then
|
||||
echo "Your data has been preserved at: $DATA_DIR"
|
||||
echo ""
|
||||
echo "Preserved files include:"
|
||||
echo " - All buckets and objects"
|
||||
echo " - IAM configuration: $DATA_DIR/.myfsio.sys/config/iam.json (encrypted at rest)"
|
||||
echo " - Bucket policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
|
||||
echo " - Secret key: $DATA_DIR/.myfsio.sys/config/.secret"
|
||||
echo " - Encryption keys: $DATA_DIR/.myfsio.sys/keys/ (if encryption was enabled)"
|
||||
echo ""
|
||||
echo "NOTE: The IAM config is encrypted and requires the SECRET_KEY to read."
|
||||
echo " Keep the .secret file intact for reinstallation."
|
||||
echo ""
|
||||
echo "To reinstall MyFSIO with existing data:"
|
||||
echo " ./install.sh --data-dir $DATA_DIR"
|
||||
echo "To reinstall MyFSIO with existing data, run:"
|
||||
echo " curl -fsSL https://go.jzwsite.com/myfsio-install | sudo bash"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
|
||||
@@ -15,12 +15,6 @@
|
||||
--myfsio-hover-bg: rgba(59, 130, 246, 0.12);
|
||||
--myfsio-accent: #3b82f6;
|
||||
--myfsio-accent-hover: #2563eb;
|
||||
--myfsio-tag-key-bg: #e0e7ff;
|
||||
--myfsio-tag-key-text: #3730a3;
|
||||
--myfsio-tag-value-bg: #f0f1fa;
|
||||
--myfsio-tag-value-text: #4338ca;
|
||||
--myfsio-tag-border: #c7d2fe;
|
||||
--myfsio-tag-delete-hover: #ef4444;
|
||||
}
|
||||
|
||||
[data-theme='dark'] {
|
||||
@@ -40,12 +34,6 @@
|
||||
--myfsio-hover-bg: rgba(59, 130, 246, 0.2);
|
||||
--myfsio-accent: #60a5fa;
|
||||
--myfsio-accent-hover: #3b82f6;
|
||||
--myfsio-tag-key-bg: #312e81;
|
||||
--myfsio-tag-key-text: #c7d2fe;
|
||||
--myfsio-tag-value-bg: #1e1b4b;
|
||||
--myfsio-tag-value-text: #a5b4fc;
|
||||
--myfsio-tag-border: #4338ca;
|
||||
--myfsio-tag-delete-hover: #f87171;
|
||||
}
|
||||
|
||||
[data-theme='dark'] body,
|
||||
@@ -1163,104 +1151,17 @@ html.sidebar-will-collapse .sidebar-user {
|
||||
}
|
||||
|
||||
.iam-user-card {
|
||||
position: relative;
|
||||
border: 1px solid var(--myfsio-card-border) !important;
|
||||
border-radius: 1rem !important;
|
||||
overflow: visible;
|
||||
transition: all 0.2s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
border: 1px solid var(--myfsio-card-border);
|
||||
border-radius: 0.75rem;
|
||||
transition: box-shadow 0.2s ease, transform 0.2s ease;
|
||||
}
|
||||
|
||||
.iam-user-card:hover {
|
||||
transform: translateY(-2px);
|
||||
box-shadow: 0 8px 24px -4px rgba(0, 0, 0, 0.12), 0 4px 8px -4px rgba(0, 0, 0, 0.08);
|
||||
border-color: var(--myfsio-accent) !important;
|
||||
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
[data-theme='dark'] .iam-user-card:hover {
|
||||
box-shadow: 0 8px 24px -4px rgba(0, 0, 0, 0.4), 0 4px 8px -4px rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
||||
|
||||
.iam-role-badge {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
padding: 0.25em 0.65em;
|
||||
border-radius: 999px;
|
||||
font-size: 0.7rem;
|
||||
font-weight: 600;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.03em;
|
||||
}
|
||||
|
||||
.iam-role-admin {
|
||||
background: rgba(245, 158, 11, 0.15);
|
||||
color: #d97706;
|
||||
}
|
||||
|
||||
[data-theme='dark'] .iam-role-admin {
|
||||
background: rgba(245, 158, 11, 0.25);
|
||||
color: #fbbf24;
|
||||
}
|
||||
|
||||
.iam-role-user {
|
||||
background: rgba(59, 130, 246, 0.12);
|
||||
color: #2563eb;
|
||||
}
|
||||
|
||||
[data-theme='dark'] .iam-role-user {
|
||||
background: rgba(59, 130, 246, 0.2);
|
||||
color: #60a5fa;
|
||||
}
|
||||
|
||||
.iam-perm-badge {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.25rem;
|
||||
padding: 0.3em 0.6em;
|
||||
border-radius: 999px;
|
||||
font-size: 0.75rem;
|
||||
font-weight: 500;
|
||||
background: rgba(59, 130, 246, 0.08);
|
||||
color: var(--myfsio-text);
|
||||
border: 1px solid rgba(59, 130, 246, 0.15);
|
||||
}
|
||||
|
||||
[data-theme='dark'] .iam-perm-badge {
|
||||
background: rgba(59, 130, 246, 0.15);
|
||||
border-color: rgba(59, 130, 246, 0.25);
|
||||
}
|
||||
|
||||
.iam-copy-key {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 22px;
|
||||
height: 22px;
|
||||
padding: 0;
|
||||
border: none;
|
||||
background: transparent;
|
||||
color: var(--myfsio-muted);
|
||||
border-radius: 4px;
|
||||
cursor: pointer;
|
||||
transition: all 0.15s ease;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.iam-copy-key:hover {
|
||||
background: var(--myfsio-hover-bg);
|
||||
color: var(--myfsio-text);
|
||||
}
|
||||
|
||||
.iam-no-results {
|
||||
text-align: center;
|
||||
padding: 2rem 1rem;
|
||||
color: var(--myfsio-muted);
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
.iam-user-card:hover {
|
||||
transform: none;
|
||||
}
|
||||
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
||||
.user-avatar-lg {
|
||||
@@ -1387,20 +1288,6 @@ html.sidebar-will-collapse .sidebar-user {
|
||||
padding: 2rem 1rem;
|
||||
}
|
||||
|
||||
#preview-text {
|
||||
padding: 1rem 1.125rem;
|
||||
max-height: 360px;
|
||||
overflow: auto;
|
||||
white-space: pre-wrap;
|
||||
word-break: break-word;
|
||||
font-family: 'SFMono-Regular', 'Menlo', 'Consolas', 'Liberation Mono', monospace;
|
||||
font-size: .8rem;
|
||||
line-height: 1.6;
|
||||
tab-size: 4;
|
||||
color: var(--myfsio-text);
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
.upload-progress-stack {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
@@ -2655,7 +2542,7 @@ pre code {
|
||||
}
|
||||
|
||||
.objects-table-container {
|
||||
max-height: 60vh;
|
||||
max-height: none;
|
||||
}
|
||||
|
||||
.preview-card {
|
||||
@@ -2918,195 +2805,6 @@ body:has(.login-card) .main-wrapper {
|
||||
padding-top: 0 !important;
|
||||
}
|
||||
|
||||
.context-menu {
|
||||
position: fixed;
|
||||
z-index: 1060;
|
||||
min-width: 180px;
|
||||
background: var(--myfsio-card-bg);
|
||||
border: 1px solid var(--myfsio-card-border);
|
||||
border-radius: 0.5rem;
|
||||
box-shadow: 0 10px 25px -5px rgba(0, 0, 0, 0.15), 0 8px 10px -6px rgba(0, 0, 0, 0.1);
|
||||
padding: 0.25rem 0;
|
||||
font-size: 0.875rem;
|
||||
}
|
||||
|
||||
[data-theme='dark'] .context-menu {
|
||||
box-shadow: 0 10px 25px -5px rgba(0, 0, 0, 0.4), 0 8px 10px -6px rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
||||
.context-menu-item {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.625rem;
|
||||
padding: 0.5rem 0.875rem;
|
||||
color: var(--myfsio-text);
|
||||
cursor: pointer;
|
||||
transition: background-color 0.1s ease;
|
||||
border: none;
|
||||
background: none;
|
||||
width: 100%;
|
||||
text-align: left;
|
||||
font-size: inherit;
|
||||
}
|
||||
|
||||
.context-menu-item:hover {
|
||||
background-color: var(--myfsio-hover-bg);
|
||||
}
|
||||
|
||||
.context-menu-item.text-danger:hover {
|
||||
background-color: rgba(239, 68, 68, 0.1);
|
||||
}
|
||||
|
||||
.context-menu-divider {
|
||||
height: 1px;
|
||||
background: var(--myfsio-card-border);
|
||||
margin: 0.25rem 0;
|
||||
}
|
||||
|
||||
.context-menu-shortcut {
|
||||
margin-left: auto;
|
||||
font-size: 0.75rem;
|
||||
color: var(--myfsio-muted);
|
||||
}
|
||||
|
||||
.kbd-shortcuts-list {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.kbd-shortcuts-list .shortcut-row {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: 0.375rem 0;
|
||||
}
|
||||
|
||||
.kbd-shortcuts-list kbd {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
min-width: 1.75rem;
|
||||
padding: 0.2rem 0.5rem;
|
||||
font-family: inherit;
|
||||
font-size: 0.75rem;
|
||||
font-weight: 600;
|
||||
background: var(--myfsio-preview-bg);
|
||||
border: 1px solid var(--myfsio-card-border);
|
||||
border-radius: 0.25rem;
|
||||
box-shadow: 0 1px 0 1px rgba(0, 0, 0, 0.05);
|
||||
color: var(--myfsio-text);
|
||||
}
|
||||
|
||||
[data-theme='dark'] .kbd-shortcuts-list kbd {
|
||||
background: rgba(255, 255, 255, 0.1);
|
||||
box-shadow: 0 1px 0 1px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.sort-dropdown .dropdown-item.active,
|
||||
.sort-dropdown .dropdown-item:active {
|
||||
background-color: var(--myfsio-hover-bg);
|
||||
color: var(--myfsio-text);
|
||||
}
|
||||
|
||||
.sort-dropdown .dropdown-item {
|
||||
font-size: 0.875rem;
|
||||
padding: 0.375rem 1rem;
|
||||
}
|
||||
|
||||
.tag-pill {
|
||||
display: inline-flex;
|
||||
border-radius: 9999px;
|
||||
border: 1px solid var(--myfsio-tag-border);
|
||||
overflow: hidden;
|
||||
font-size: 0.75rem;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
.tag-pill-key {
|
||||
padding: 0.3rem 0.5rem;
|
||||
background: var(--myfsio-tag-key-bg);
|
||||
color: var(--myfsio-tag-key-text);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.tag-pill-value {
|
||||
padding: 0.3rem 0.5rem;
|
||||
background: var(--myfsio-tag-value-bg);
|
||||
color: var(--myfsio-tag-value-text);
|
||||
font-weight: 400;
|
||||
}
|
||||
|
||||
.tag-editor-card {
|
||||
background: var(--myfsio-preview-bg);
|
||||
border-radius: 0.5rem;
|
||||
padding: 0.75rem;
|
||||
}
|
||||
|
||||
.tag-editor-header,
|
||||
.tag-editor-row {
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr 28px;
|
||||
gap: 0.5rem;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.tag-editor-header {
|
||||
padding-bottom: 0.375rem;
|
||||
border-bottom: 1px solid var(--myfsio-card-border);
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.tag-editor-header span {
|
||||
font-size: 0.7rem;
|
||||
font-weight: 600;
|
||||
text-transform: uppercase;
|
||||
color: var(--myfsio-muted);
|
||||
letter-spacing: 0.05em;
|
||||
}
|
||||
|
||||
.tag-editor-row {
|
||||
margin-bottom: 0.375rem;
|
||||
}
|
||||
|
||||
.tag-editor-delete {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 28px;
|
||||
height: 28px;
|
||||
border: none;
|
||||
background: transparent;
|
||||
color: var(--myfsio-muted);
|
||||
border-radius: 0.375rem;
|
||||
cursor: pointer;
|
||||
transition: color 0.15s, background 0.15s;
|
||||
}
|
||||
|
||||
.tag-editor-delete:hover {
|
||||
color: var(--myfsio-tag-delete-hover);
|
||||
background: rgba(239, 68, 68, 0.1);
|
||||
}
|
||||
|
||||
.tag-editor-actions {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
margin-top: 0.75rem;
|
||||
padding-top: 0.5rem;
|
||||
border-top: 1px solid var(--myfsio-card-border);
|
||||
}
|
||||
|
||||
@media (prefers-reduced-motion: reduce) {
|
||||
*,
|
||||
*::before,
|
||||
*::after {
|
||||
animation-duration: 0.01ms !important;
|
||||
animation-iteration-count: 1 !important;
|
||||
transition-duration: 0.01ms !important;
|
||||
}
|
||||
}
|
||||
|
||||
@media print {
|
||||
.sidebar,
|
||||
.mobile-header {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,8 +3,6 @@ window.BucketDetailUpload = (function() {
|
||||
|
||||
const MULTIPART_THRESHOLD = 8 * 1024 * 1024;
|
||||
const CHUNK_SIZE = 8 * 1024 * 1024;
|
||||
const MAX_PART_RETRIES = 3;
|
||||
const RETRY_BASE_DELAY_MS = 1000;
|
||||
|
||||
let state = {
|
||||
isUploading: false,
|
||||
@@ -206,67 +204,6 @@ window.BucketDetailUpload = (function() {
|
||||
}
|
||||
}
|
||||
|
||||
function uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const xhr = new XMLHttpRequest();
|
||||
xhr.open('PUT', url, true);
|
||||
xhr.setRequestHeader('X-CSRFToken', csrfToken || '');
|
||||
|
||||
xhr.upload.addEventListener('progress', (e) => {
|
||||
if (e.lengthComputable) {
|
||||
updateProgressItem(progressItem, {
|
||||
status: `Part ${partNumber}/${totalParts}`,
|
||||
loaded: baseBytes + e.loaded,
|
||||
total: fileSize
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('load', () => {
|
||||
if (xhr.status >= 200 && xhr.status < 300) {
|
||||
try {
|
||||
resolve(JSON.parse(xhr.responseText));
|
||||
} catch {
|
||||
reject(new Error(`Part ${partNumber}: invalid response`));
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
const data = JSON.parse(xhr.responseText);
|
||||
reject(new Error(data.error || `Part ${partNumber} failed (${xhr.status})`));
|
||||
} catch {
|
||||
reject(new Error(`Part ${partNumber} failed (${xhr.status})`));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('error', () => reject(new Error(`Part ${partNumber}: network error`)));
|
||||
xhr.addEventListener('abort', () => reject(new Error(`Part ${partNumber}: aborted`)));
|
||||
|
||||
xhr.send(chunk);
|
||||
});
|
||||
}
|
||||
|
||||
async function uploadPartWithRetry(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
|
||||
let lastError;
|
||||
for (let attempt = 0; attempt <= MAX_PART_RETRIES; attempt++) {
|
||||
try {
|
||||
return await uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts);
|
||||
} catch (err) {
|
||||
lastError = err;
|
||||
if (attempt < MAX_PART_RETRIES) {
|
||||
const delay = RETRY_BASE_DELAY_MS * Math.pow(2, attempt);
|
||||
updateProgressItem(progressItem, {
|
||||
status: `Part ${partNumber}/${totalParts} retry ${attempt + 1}/${MAX_PART_RETRIES}...`,
|
||||
loaded: baseBytes,
|
||||
total: fileSize
|
||||
});
|
||||
await new Promise(r => setTimeout(r, delay));
|
||||
}
|
||||
}
|
||||
}
|
||||
throw lastError;
|
||||
}
|
||||
|
||||
async function uploadMultipart(file, objectKey, metadata, progressItem, urls) {
|
||||
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
|
||||
|
||||
@@ -296,14 +233,26 @@ window.BucketDetailUpload = (function() {
|
||||
const end = Math.min(start + CHUNK_SIZE, file.size);
|
||||
const chunk = file.slice(start, end);
|
||||
|
||||
const partData = await uploadPartWithRetry(
|
||||
`${partUrl}?partNumber=${partNumber}`,
|
||||
chunk, csrfToken, uploadedBytes, file.size,
|
||||
progressItem, partNumber, totalParts
|
||||
);
|
||||
updateProgressItem(progressItem, {
|
||||
status: `Part ${partNumber}/${totalParts}`,
|
||||
loaded: uploadedBytes,
|
||||
total: file.size
|
||||
});
|
||||
|
||||
const partResp = await fetch(`${partUrl}?partNumber=${partNumber}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'X-CSRFToken': csrfToken || '' },
|
||||
body: chunk
|
||||
});
|
||||
|
||||
if (!partResp.ok) {
|
||||
const err = await partResp.json().catch(() => ({}));
|
||||
throw new Error(err.error || `Part ${partNumber} failed`);
|
||||
}
|
||||
|
||||
const partData = await partResp.json();
|
||||
parts.push({ part_number: partNumber, etag: partData.etag });
|
||||
uploadedBytes += (end - start);
|
||||
uploadedBytes += chunk.size;
|
||||
|
||||
updateProgressItem(progressItem, {
|
||||
loaded: uploadedBytes,
|
||||
|
||||
@@ -78,7 +78,7 @@ window.ConnectionsManagement = (function() {
|
||||
|
||||
try {
|
||||
var controller = new AbortController();
|
||||
var timeoutId = setTimeout(function() { controller.abort(); }, 10000);
|
||||
var timeoutId = setTimeout(function() { controller.abort(); }, 15000);
|
||||
|
||||
var response = await fetch(endpoints.healthTemplate.replace('CONNECTION_ID', connectionId), {
|
||||
signal: controller.signal
|
||||
@@ -147,7 +147,7 @@ window.ConnectionsManagement = (function() {
|
||||
'<button type="button" class="btn btn-outline-secondary" data-bs-toggle="modal" data-bs-target="#editConnectionModal" ' +
|
||||
'data-id="' + window.UICore.escapeHtml(conn.id) + '" data-name="' + window.UICore.escapeHtml(conn.name) + '" ' +
|
||||
'data-endpoint="' + window.UICore.escapeHtml(conn.endpoint_url) + '" data-region="' + window.UICore.escapeHtml(conn.region) + '" ' +
|
||||
'data-access="' + window.UICore.escapeHtml(conn.access_key) + '" title="Edit connection">' +
|
||||
'data-access="' + window.UICore.escapeHtml(conn.access_key) + '" data-secret="' + window.UICore.escapeHtml(conn.secret_key || '') + '" title="Edit connection">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/></svg></button>' +
|
||||
'<button type="button" class="btn btn-outline-danger" data-bs-toggle="modal" data-bs-target="#deleteConnectionModal" ' +
|
||||
@@ -185,9 +185,7 @@ window.ConnectionsManagement = (function() {
|
||||
document.getElementById('edit_endpoint_url').value = button.getAttribute('data-endpoint') || '';
|
||||
document.getElementById('edit_region').value = button.getAttribute('data-region') || '';
|
||||
document.getElementById('edit_access_key').value = button.getAttribute('data-access') || '';
|
||||
document.getElementById('edit_secret_key').value = '';
|
||||
document.getElementById('edit_secret_key').placeholder = '(unchanged — leave blank to keep current)';
|
||||
document.getElementById('edit_secret_key').required = false;
|
||||
document.getElementById('edit_secret_key').value = button.getAttribute('data-secret') || '';
|
||||
document.getElementById('editTestResult').innerHTML = '';
|
||||
|
||||
var form = document.getElementById('editConnectionForm');
|
||||
@@ -290,6 +288,9 @@ window.ConnectionsManagement = (function() {
|
||||
editBtn.setAttribute('data-endpoint', data.connection.endpoint_url);
|
||||
editBtn.setAttribute('data-region', data.connection.region);
|
||||
editBtn.setAttribute('data-access', data.connection.access_key);
|
||||
if (data.connection.secret_key) {
|
||||
editBtn.setAttribute('data-secret', data.connection.secret_key);
|
||||
}
|
||||
}
|
||||
|
||||
var deleteBtn = row.querySelector('[data-bs-target="#deleteConnectionModal"]');
|
||||
|
||||
@@ -11,53 +11,16 @@ window.IAMManagement = (function() {
|
||||
var editUserModal = null;
|
||||
var deleteUserModal = null;
|
||||
var rotateSecretModal = null;
|
||||
var expiryModal = null;
|
||||
var currentRotateKey = null;
|
||||
var currentEditKey = null;
|
||||
var currentDeleteKey = null;
|
||||
var currentExpiryKey = null;
|
||||
|
||||
var ALL_S3_ACTIONS = [
|
||||
'list', 'read', 'write', 'delete', 'share', 'policy',
|
||||
'replication', 'lifecycle', 'cors',
|
||||
'create_bucket', 'delete_bucket',
|
||||
'versioning', 'tagging', 'encryption', 'quota',
|
||||
'object_lock', 'notification', 'logging', 'website'
|
||||
];
|
||||
|
||||
var policyTemplates = {
|
||||
full: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'create_bucket', 'delete_bucket', 'replication', 'lifecycle', 'cors', 'versioning', 'tagging', 'encryption', 'quota', 'object_lock', 'notification', 'logging', 'website', 'iam:*'] }],
|
||||
full: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'lifecycle', 'cors', 'iam:*'] }],
|
||||
readonly: [{ bucket: '*', actions: ['list', 'read'] }],
|
||||
writer: [{ bucket: '*', actions: ['list', 'read', 'write'] }],
|
||||
operator: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'create_bucket', 'delete_bucket'] }],
|
||||
bucketadmin: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'create_bucket', 'delete_bucket', 'versioning', 'tagging', 'encryption', 'cors', 'lifecycle', 'quota', 'object_lock', 'notification', 'logging', 'website', 'replication'] }]
|
||||
writer: [{ bucket: '*', actions: ['list', 'read', 'write'] }]
|
||||
};
|
||||
|
||||
function isAdminUser(policies) {
|
||||
if (!policies || !policies.length) return false;
|
||||
return policies.some(function(p) {
|
||||
return p.actions && (p.actions.indexOf('iam:*') >= 0 || p.actions.indexOf('*') >= 0);
|
||||
});
|
||||
}
|
||||
|
||||
function getPermissionLevel(actions) {
|
||||
if (!actions || !actions.length) return 'Custom (0)';
|
||||
if (actions.indexOf('*') >= 0) return 'Full Access';
|
||||
if (actions.length >= ALL_S3_ACTIONS.length) {
|
||||
var hasAll = ALL_S3_ACTIONS.every(function(a) { return actions.indexOf(a) >= 0; });
|
||||
if (hasAll) return 'Full Access';
|
||||
}
|
||||
var has = function(a) { return actions.indexOf(a) >= 0; };
|
||||
if (has('list') && has('read') && has('write') && has('delete')) return 'Read + Write + Delete';
|
||||
if (has('list') && has('read') && has('write')) return 'Read + Write';
|
||||
if (has('list') && has('read')) return 'Read Only';
|
||||
return 'Custom (' + actions.length + ')';
|
||||
}
|
||||
|
||||
function getBucketLabel(bucket) {
|
||||
return bucket === '*' ? 'All Buckets' : bucket;
|
||||
}
|
||||
|
||||
function init(config) {
|
||||
users = config.users || [];
|
||||
currentUserKey = config.currentUserKey || null;
|
||||
@@ -75,10 +38,7 @@ window.IAMManagement = (function() {
|
||||
setupEditUserModal();
|
||||
setupDeleteUserModal();
|
||||
setupRotateSecretModal();
|
||||
setupExpiryModal();
|
||||
setupFormHandlers();
|
||||
setupSearch();
|
||||
setupCopyAccessKeyButtons();
|
||||
}
|
||||
|
||||
function initModals() {
|
||||
@@ -86,13 +46,11 @@ window.IAMManagement = (function() {
|
||||
var editModalEl = document.getElementById('editUserModal');
|
||||
var deleteModalEl = document.getElementById('deleteUserModal');
|
||||
var rotateModalEl = document.getElementById('rotateSecretModal');
|
||||
var expiryModalEl = document.getElementById('expiryModal');
|
||||
|
||||
if (policyModalEl) policyModal = new bootstrap.Modal(policyModalEl);
|
||||
if (editModalEl) editUserModal = new bootstrap.Modal(editModalEl);
|
||||
if (deleteModalEl) deleteUserModal = new bootstrap.Modal(deleteModalEl);
|
||||
if (rotateModalEl) rotateSecretModal = new bootstrap.Modal(rotateModalEl);
|
||||
if (expiryModalEl) expiryModal = new bootstrap.Modal(expiryModalEl);
|
||||
}
|
||||
|
||||
function setupJsonAutoIndent() {
|
||||
@@ -110,15 +68,6 @@ window.IAMManagement = (function() {
|
||||
});
|
||||
});
|
||||
|
||||
var accessKeyCopyButton = document.querySelector('[data-access-key-copy]');
|
||||
if (accessKeyCopyButton) {
|
||||
accessKeyCopyButton.addEventListener('click', async function() {
|
||||
var accessKeyInput = document.getElementById('disclosedAccessKeyValue');
|
||||
if (!accessKeyInput) return;
|
||||
await window.UICore.copyToClipboard(accessKeyInput.value, accessKeyCopyButton, 'Copy');
|
||||
});
|
||||
}
|
||||
|
||||
var secretCopyButton = document.querySelector('[data-secret-copy]');
|
||||
if (secretCopyButton) {
|
||||
secretCopyButton.addEventListener('click', async function() {
|
||||
@@ -165,22 +114,6 @@ window.IAMManagement = (function() {
|
||||
});
|
||||
}
|
||||
|
||||
function generateSecureHex(byteCount) {
|
||||
var arr = new Uint8Array(byteCount);
|
||||
crypto.getRandomValues(arr);
|
||||
return Array.from(arr).map(function(b) { return b.toString(16).padStart(2, '0'); }).join('');
|
||||
}
|
||||
|
||||
function generateSecureBase64(byteCount) {
|
||||
var arr = new Uint8Array(byteCount);
|
||||
crypto.getRandomValues(arr);
|
||||
var binary = '';
|
||||
for (var i = 0; i < arr.length; i++) {
|
||||
binary += String.fromCharCode(arr[i]);
|
||||
}
|
||||
return btoa(binary).replace(/\+/g, '-').replace(/\//g, '_').replace(/=+$/, '');
|
||||
}
|
||||
|
||||
function setupCreateUserModal() {
|
||||
var createUserPoliciesEl = document.getElementById('createUserPolicies');
|
||||
|
||||
@@ -189,22 +122,6 @@ window.IAMManagement = (function() {
|
||||
applyPolicyTemplate(button.dataset.createPolicyTemplate, createUserPoliciesEl);
|
||||
});
|
||||
});
|
||||
|
||||
var genAccessKeyBtn = document.getElementById('generateAccessKeyBtn');
|
||||
if (genAccessKeyBtn) {
|
||||
genAccessKeyBtn.addEventListener('click', function() {
|
||||
var input = document.getElementById('createUserAccessKey');
|
||||
if (input) input.value = generateSecureHex(8);
|
||||
});
|
||||
}
|
||||
|
||||
var genSecretKeyBtn = document.getElementById('generateSecretKeyBtn');
|
||||
if (genSecretKeyBtn) {
|
||||
genSecretKeyBtn.addEventListener('click', function() {
|
||||
var input = document.getElementById('createUserSecretKey');
|
||||
if (input) input.value = generateSecureBase64(24);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function setupEditUserModal() {
|
||||
@@ -325,101 +242,23 @@ window.IAMManagement = (function() {
|
||||
}
|
||||
}
|
||||
|
||||
function openExpiryModal(key, expiresAt) {
|
||||
currentExpiryKey = key;
|
||||
var label = document.getElementById('expiryUserLabel');
|
||||
var input = document.getElementById('expiryDateInput');
|
||||
var form = document.getElementById('expiryForm');
|
||||
if (label) label.textContent = key;
|
||||
if (expiresAt) {
|
||||
try {
|
||||
var dt = new Date(expiresAt);
|
||||
var local = new Date(dt.getTime() - dt.getTimezoneOffset() * 60000);
|
||||
if (input) input.value = local.toISOString().slice(0, 16);
|
||||
} catch(e) {
|
||||
if (input) input.value = '';
|
||||
}
|
||||
} else {
|
||||
if (input) input.value = '';
|
||||
}
|
||||
if (form) form.action = endpoints.updateExpiry.replace('ACCESS_KEY', key);
|
||||
var modalEl = document.getElementById('expiryModal');
|
||||
if (modalEl) {
|
||||
var modal = bootstrap.Modal.getOrCreateInstance(modalEl);
|
||||
modal.show();
|
||||
}
|
||||
}
|
||||
|
||||
function setupExpiryModal() {
|
||||
document.querySelectorAll('[data-expiry-user]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
openExpiryModal(btn.dataset.expiryUser, btn.dataset.expiresAt || '');
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('[data-expiry-preset]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var preset = btn.dataset.expiryPreset;
|
||||
var input = document.getElementById('expiryDateInput');
|
||||
if (!input) return;
|
||||
if (preset === 'clear') {
|
||||
input.value = '';
|
||||
return;
|
||||
}
|
||||
var now = new Date();
|
||||
var ms = 0;
|
||||
if (preset === '1h') ms = 3600000;
|
||||
else if (preset === '24h') ms = 86400000;
|
||||
else if (preset === '7d') ms = 7 * 86400000;
|
||||
else if (preset === '30d') ms = 30 * 86400000;
|
||||
else if (preset === '90d') ms = 90 * 86400000;
|
||||
var future = new Date(now.getTime() + ms);
|
||||
var local = new Date(future.getTime() - future.getTimezoneOffset() * 60000);
|
||||
input.value = local.toISOString().slice(0, 16);
|
||||
});
|
||||
});
|
||||
|
||||
var expiryForm = document.getElementById('expiryForm');
|
||||
if (expiryForm) {
|
||||
expiryForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(expiryForm, {
|
||||
successMessage: 'Expiry updated',
|
||||
onSuccess: function() {
|
||||
var modalEl = document.getElementById('expiryModal');
|
||||
if (modalEl) bootstrap.Modal.getOrCreateInstance(modalEl).hide();
|
||||
window.location.reload();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function createUserCardHtml(accessKey, displayName, policies) {
|
||||
var admin = isAdminUser(policies);
|
||||
var cardClass = 'card h-100 iam-user-card' + (admin ? ' iam-admin-card' : '');
|
||||
var roleBadge = admin
|
||||
? '<span class="iam-role-badge iam-role-admin" data-role-badge>Admin</span>'
|
||||
: '<span class="iam-role-badge iam-role-user" data-role-badge>User</span>';
|
||||
|
||||
var policyBadges = '';
|
||||
if (policies && policies.length > 0) {
|
||||
policyBadges = policies.map(function(p) {
|
||||
var bucketLabel = getBucketLabel(p.bucket);
|
||||
var permLevel = getPermissionLevel(p.actions);
|
||||
return '<span class="iam-perm-badge">' +
|
||||
var actionText = p.actions && p.actions.includes('*') ? 'full' : (p.actions ? p.actions.length : 0);
|
||||
return '<span class="badge bg-primary bg-opacity-10 text-primary">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="10" height="10" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>' +
|
||||
'</svg>' + window.UICore.escapeHtml(bucketLabel) + ' · ' + window.UICore.escapeHtml(permLevel) + '</span>';
|
||||
'</svg>' + window.UICore.escapeHtml(p.bucket) +
|
||||
'<span class="opacity-75">(' + actionText + ')</span></span>';
|
||||
}).join('');
|
||||
} else {
|
||||
policyBadges = '<span class="badge bg-secondary bg-opacity-10 text-secondary">No policies</span>';
|
||||
}
|
||||
|
||||
var esc = window.UICore.escapeHtml;
|
||||
return '<div class="col-md-6 col-xl-4 iam-user-item" data-display-name="' + esc(displayName.toLowerCase()) + '" data-access-key-filter="' + esc(accessKey.toLowerCase()) + '">' +
|
||||
'<div class="' + cardClass + '">' +
|
||||
return '<div class="col-md-6 col-xl-4">' +
|
||||
'<div class="card h-100 iam-user-card">' +
|
||||
'<div class="card-body">' +
|
||||
'<div class="d-flex align-items-start justify-content-between mb-3">' +
|
||||
'<div class="d-flex align-items-center gap-3 min-width-0 overflow-hidden">' +
|
||||
@@ -428,18 +267,8 @@ window.IAMManagement = (function() {
|
||||
'<path d="M8 8a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm2-3a2 2 0 1 1-4 0 2 2 0 0 1 4 0zm4 8c0 1-1 1-1 1H3s-1 0-1-1 1-4 6-4 6 3 6 4zm-1-.004c-.001-.246-.154-.986-.832-1.664C11.516 10.68 10.289 10 8 10c-2.29 0-3.516.68-4.168 1.332-.678.678-.83 1.418-.832 1.664h10z"/>' +
|
||||
'</svg></div>' +
|
||||
'<div class="min-width-0">' +
|
||||
'<div class="d-flex align-items-center gap-2 mb-0">' +
|
||||
'<h6 class="fw-semibold mb-0 text-truncate" title="' + esc(displayName) + '">' + esc(displayName) + '</h6>' +
|
||||
roleBadge +
|
||||
'</div>' +
|
||||
'<div class="d-flex align-items-center gap-1">' +
|
||||
'<code class="small text-muted text-truncate" title="' + esc(accessKey) + '">' + esc(accessKey) + '</code>' +
|
||||
'<button type="button" class="iam-copy-key" title="Copy access key" data-copy-access-key="' + esc(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M4 1.5H3a2 2 0 0 0-2 2V14a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V3.5a2 2 0 0 0-2-2h-1v1h1a1 1 0 0 1 1 1V14a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3.5a1 1 0 0 1 1-1h1v-1z"/>' +
|
||||
'<path d="M9.5 1a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1a.5.5 0 0 1 .5-.5h3zm-3-1A1.5 1.5 0 0 0 5 1.5v1A1.5 1.5 0 0 0 6.5 4h3A1.5 1.5 0 0 0 11 2.5v-1A1.5 1.5 0 0 0 9.5 0h-3z"/>' +
|
||||
'</svg></button>' +
|
||||
'</div>' +
|
||||
'<h6 class="fw-semibold mb-0 text-truncate" title="' + window.UICore.escapeHtml(displayName) + '">' + window.UICore.escapeHtml(displayName) + '</h6>' +
|
||||
'<code class="small text-muted d-block text-truncate" title="' + window.UICore.escapeHtml(accessKey) + '">' + window.UICore.escapeHtml(accessKey) + '</code>' +
|
||||
'</div></div>' +
|
||||
'<div class="dropdown flex-shrink-0">' +
|
||||
'<button class="btn btn-sm btn-icon" type="button" data-bs-toggle="dropdown" aria-expanded="false">' +
|
||||
@@ -447,20 +276,18 @@ window.IAMManagement = (function() {
|
||||
'<path d="M9.5 13a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0z"/>' +
|
||||
'</svg></button>' +
|
||||
'<ul class="dropdown-menu dropdown-menu-end">' +
|
||||
'<li><button class="dropdown-item" type="button" data-edit-user="' + esc(accessKey) + '" data-display-name="' + esc(displayName) + '">' +
|
||||
'<li><button class="dropdown-item" type="button" data-edit-user="' + window.UICore.escapeHtml(accessKey) + '" data-display-name="' + window.UICore.escapeHtml(displayName) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/></svg>Edit Name</button></li>' +
|
||||
'<li><button class="dropdown-item" type="button" data-expiry-user="' + esc(accessKey) + '" data-expires-at="">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M8 3.5a.5.5 0 0 0-1 0V9a.5.5 0 0 0 .252.434l3.5 2a.5.5 0 0 0 .496-.868L8 8.71V3.5z"/><path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm7-8A7 7 0 1 1 1 8a7 7 0 0 1 14 0z"/></svg>Set Expiry</button></li>' +
|
||||
'<li><button class="dropdown-item" type="button" data-rotate-user="' + esc(accessKey) + '">' +
|
||||
'<li><button class="dropdown-item" type="button" data-rotate-user="' + window.UICore.escapeHtml(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/><path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/></svg>Rotate Secret</button></li>' +
|
||||
'<li><hr class="dropdown-divider"></li>' +
|
||||
'<li><button class="dropdown-item text-danger" type="button" data-delete-user="' + esc(accessKey) + '">' +
|
||||
'<li><button class="dropdown-item text-danger" type="button" data-delete-user="' + window.UICore.escapeHtml(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M5.5 5.5a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm3 .5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 1 0z"/><path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/></svg>Delete User</button></li>' +
|
||||
'</ul></div></div>' +
|
||||
'<div class="mb-3">' +
|
||||
'<div class="small text-muted mb-2">Bucket Permissions</div>' +
|
||||
'<div class="d-flex flex-wrap gap-1" data-policy-badges>' + policyBadges + '</div></div>' +
|
||||
'<button class="btn btn-outline-primary btn-sm w-100" type="button" data-policy-editor data-access-key="' + esc(accessKey) + '">' +
|
||||
'<div class="d-flex flex-wrap gap-1">' + policyBadges + '</div></div>' +
|
||||
'<button class="btn btn-outline-primary btn-sm w-100" type="button" data-policy-editor data-access-key="' + window.UICore.escapeHtml(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16"><path d="M8 4.754a3.246 3.246 0 1 0 0 6.492 3.246 3.246 0 0 0 0-6.492zM5.754 8a2.246 2.246 0 1 1 4.492 0 2.246 2.246 0 0 1-4.492 0z"/><path d="M9.796 1.343c-.527-1.79-3.065-1.79-3.592 0l-.094.319a.873.873 0 0 1-1.255.52l-.292-.16c-1.64-.892-3.433.902-2.54 2.541l.159.292a.873.873 0 0 1-.52 1.255l-.319.094c-1.79.527-1.79 3.065 0 3.592l.319.094a.873.873 0 0 1 .52 1.255l-.16.292c-.892 1.64.901 3.434 2.541 2.54l.292-.159a.873.873 0 0 1 1.255.52l.094.319c.527 1.79 3.065 1.79 3.592 0l.094-.319a.873.873 0 0 1 1.255-.52l.292.16c1.64.893 3.434-.902 2.54-2.541l-.159-.292a.873.873 0 0 1 .52-1.255l.319-.094c1.79-.527 1.79-3.065 0-3.592l-.319-.094a.873.873 0 0 1-.52-1.255l.16-.292c.893-1.64-.902-3.433-2.541-2.54l-.292.159a.873.873 0 0 1-1.255-.52l-.094-.319z"/></svg>Manage Policies</button>' +
|
||||
'</div></div></div>';
|
||||
}
|
||||
@@ -506,14 +333,6 @@ window.IAMManagement = (function() {
|
||||
});
|
||||
}
|
||||
|
||||
var expiryBtn = cardElement.querySelector('[data-expiry-user]');
|
||||
if (expiryBtn) {
|
||||
expiryBtn.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
openExpiryModal(accessKey, '');
|
||||
});
|
||||
}
|
||||
|
||||
var policyBtn = cardElement.querySelector('[data-policy-editor]');
|
||||
if (policyBtn) {
|
||||
policyBtn.addEventListener('click', function() {
|
||||
@@ -523,13 +342,6 @@ window.IAMManagement = (function() {
|
||||
policyModal.show();
|
||||
});
|
||||
}
|
||||
|
||||
var copyBtn = cardElement.querySelector('[data-copy-access-key]');
|
||||
if (copyBtn) {
|
||||
copyBtn.addEventListener('click', function() {
|
||||
copyAccessKey(copyBtn);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function updateUserCount() {
|
||||
@@ -563,15 +375,10 @@ window.IAMManagement = (function() {
|
||||
'</svg>' +
|
||||
'<div class="flex-grow-1">' +
|
||||
'<div class="fw-semibold">New user created: <code>' + window.UICore.escapeHtml(data.access_key) + '</code></div>' +
|
||||
'<p class="mb-2 small">These credentials are only shown once. Copy them now and store them securely.</p>' +
|
||||
'<p class="mb-2 small">This secret is only shown once. Copy it now and store it securely.</p>' +
|
||||
'</div>' +
|
||||
'<button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button>' +
|
||||
'</div>' +
|
||||
'<div class="input-group mb-2">' +
|
||||
'<span class="input-group-text"><strong>Access key</strong></span>' +
|
||||
'<input class="form-control font-monospace" type="text" value="' + window.UICore.escapeHtml(data.access_key) + '" readonly />' +
|
||||
'<button class="btn btn-outline-primary" type="button" id="copyNewUserAccessKey">Copy</button>' +
|
||||
'</div>' +
|
||||
'<div class="input-group">' +
|
||||
'<span class="input-group-text"><strong>Secret key</strong></span>' +
|
||||
'<input class="form-control font-monospace" type="text" value="' + window.UICore.escapeHtml(data.secret_key) + '" readonly id="newUserSecret" />' +
|
||||
@@ -580,9 +387,6 @@ window.IAMManagement = (function() {
|
||||
var container = document.querySelector('.page-header');
|
||||
if (container) {
|
||||
container.insertAdjacentHTML('afterend', alertHtml);
|
||||
document.getElementById('copyNewUserAccessKey').addEventListener('click', async function() {
|
||||
await window.UICore.copyToClipboard(data.access_key, this, 'Copy');
|
||||
});
|
||||
document.getElementById('copyNewUserSecret').addEventListener('click', async function() {
|
||||
await window.UICore.copyToClipboard(data.secret_key, this, 'Copy');
|
||||
});
|
||||
@@ -638,33 +442,17 @@ window.IAMManagement = (function() {
|
||||
|
||||
var userCard = document.querySelector('[data-access-key="' + key + '"]');
|
||||
if (userCard) {
|
||||
var cardEl = userCard.closest('.iam-user-card');
|
||||
var badgeContainer = cardEl ? cardEl.querySelector('[data-policy-badges]') : null;
|
||||
var badgeContainer = userCard.closest('.iam-user-card').querySelector('.d-flex.flex-wrap.gap-1');
|
||||
if (badgeContainer && data.policies) {
|
||||
var badges = data.policies.map(function(p) {
|
||||
var bl = getBucketLabel(p.bucket);
|
||||
var pl = getPermissionLevel(p.actions);
|
||||
return '<span class="iam-perm-badge">' +
|
||||
return '<span class="badge bg-primary bg-opacity-10 text-primary">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="10" height="10" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>' +
|
||||
'</svg>' + window.UICore.escapeHtml(bl) + ' · ' + window.UICore.escapeHtml(pl) + '</span>';
|
||||
'</svg>' + window.UICore.escapeHtml(p.bucket) +
|
||||
'<span class="opacity-75">(' + (p.actions.includes('*') ? 'full' : p.actions.length) + ')</span></span>';
|
||||
}).join('');
|
||||
badgeContainer.innerHTML = badges || '<span class="badge bg-secondary bg-opacity-10 text-secondary">No policies</span>';
|
||||
}
|
||||
if (cardEl) {
|
||||
var nowAdmin = isAdminUser(data.policies);
|
||||
cardEl.classList.toggle('iam-admin-card', nowAdmin);
|
||||
var roleBadgeEl = cardEl.querySelector('[data-role-badge]');
|
||||
if (roleBadgeEl) {
|
||||
if (nowAdmin) {
|
||||
roleBadgeEl.className = 'iam-role-badge iam-role-admin';
|
||||
roleBadgeEl.textContent = 'Admin';
|
||||
} else {
|
||||
roleBadgeEl.className = 'iam-role-badge iam-role-user';
|
||||
roleBadgeEl.textContent = 'User';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var userIndex = users.findIndex(function(u) { return u.access_key === key; });
|
||||
@@ -697,10 +485,6 @@ window.IAMManagement = (function() {
|
||||
nameEl.textContent = newName;
|
||||
nameEl.title = newName;
|
||||
}
|
||||
var itemWrapper = card.closest('.iam-user-item');
|
||||
if (itemWrapper) {
|
||||
itemWrapper.setAttribute('data-display-name', newName.toLowerCase());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -755,52 +539,6 @@ window.IAMManagement = (function() {
|
||||
}
|
||||
}
|
||||
|
||||
function setupSearch() {
|
||||
var searchInput = document.getElementById('iam-user-search');
|
||||
if (!searchInput) return;
|
||||
|
||||
searchInput.addEventListener('input', function() {
|
||||
var query = searchInput.value.toLowerCase().trim();
|
||||
var items = document.querySelectorAll('.iam-user-item');
|
||||
var noResults = document.getElementById('iam-no-results');
|
||||
var visibleCount = 0;
|
||||
|
||||
items.forEach(function(item) {
|
||||
var name = item.getAttribute('data-display-name') || '';
|
||||
var key = item.getAttribute('data-access-key-filter') || '';
|
||||
var matches = !query || name.indexOf(query) >= 0 || key.indexOf(query) >= 0;
|
||||
item.classList.toggle('d-none', !matches);
|
||||
if (matches) visibleCount++;
|
||||
});
|
||||
|
||||
if (noResults) {
|
||||
noResults.classList.toggle('d-none', visibleCount > 0);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function copyAccessKey(btn) {
|
||||
var key = btn.getAttribute('data-copy-access-key');
|
||||
if (!key) return;
|
||||
var originalHtml = btn.innerHTML;
|
||||
navigator.clipboard.writeText(key).then(function() {
|
||||
btn.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16"><path d="M13.854 3.646a.5.5 0 0 1 0 .708l-7 7a.5.5 0 0 1-.708 0l-3.5-3.5a.5.5 0 1 1 .708-.708L6.5 10.293l6.646-6.647a.5.5 0 0 1 .708 0z"/></svg>';
|
||||
btn.style.color = '#22c55e';
|
||||
setTimeout(function() {
|
||||
btn.innerHTML = originalHtml;
|
||||
btn.style.color = '';
|
||||
}, 1200);
|
||||
}).catch(function() {});
|
||||
}
|
||||
|
||||
function setupCopyAccessKeyButtons() {
|
||||
document.querySelectorAll('[data-copy-access-key]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
copyAccessKey(btn);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
init: init
|
||||
};
|
||||
|
||||
@@ -191,10 +191,6 @@ window.UICore = (function() {
|
||||
}
|
||||
});
|
||||
|
||||
window.addEventListener('beforeunload', function() {
|
||||
pollingManager.stopAll();
|
||||
});
|
||||
|
||||
return {
|
||||
getCsrfToken: getCsrfToken,
|
||||
formatBytes: formatBytes,
|
||||
|
||||
@@ -101,23 +101,6 @@
|
||||
<span>Sites</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if website_hosting_nav %}
|
||||
<a href="{{ url_for('ui.website_domains_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.website_domains_dashboard' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.715 6.542 3.343 7.914a3 3 0 1 0 4.243 4.243l1.828-1.829A3 3 0 0 0 8.586 5.5L8 6.086a1.002 1.002 0 0 0-.154.199 2 2 0 0 1 .861 3.337L6.88 11.45a2 2 0 1 1-2.83-2.83l.793-.792a4.018 4.018 0 0 1-.128-1.287z"/>
|
||||
<path d="M6.586 4.672A3 3 0 0 0 7.414 9.5l.775-.776a2 2 0 0 1-.896-3.346L9.12 3.55a2 2 0 1 1 2.83 2.83l-.793.792c.112.42.155.855.128 1.287l1.372-1.372a3 3 0 1 0-4.243-4.243L6.586 4.672z"/>
|
||||
</svg>
|
||||
<span>Domains</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if can_manage_iam %}
|
||||
<a href="{{ url_for('ui.system_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.system_dashboard' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
<span>System</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Resources</span>
|
||||
@@ -209,23 +192,6 @@
|
||||
<span class="sidebar-link-text">Sites</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if website_hosting_nav %}
|
||||
<a href="{{ url_for('ui.website_domains_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.website_domains_dashboard' %}active{% endif %}" data-tooltip="Domains">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.715 6.542 3.343 7.914a3 3 0 1 0 4.243 4.243l1.828-1.829A3 3 0 0 0 8.586 5.5L8 6.086a1.002 1.002 0 0 0-.154.199 2 2 0 0 1 .861 3.337L6.88 11.45a2 2 0 1 1-2.83-2.83l.793-.792a4.018 4.018 0 0 1-.128-1.287z"/>
|
||||
<path d="M6.586 4.672A3 3 0 0 0 7.414 9.5l.775-.776a2 2 0 0 1-.896-3.346L9.12 3.55a2 2 0 1 1 2.83 2.83l-.793.792c.112.42.155.855.128 1.287l1.372-1.372a3 3 0 1 0-4.243-4.243L6.586 4.672z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Domains</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if can_manage_iam %}
|
||||
<a href="{{ url_for('ui.system_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.system_dashboard' %}active{% endif %}" data-tooltip="System">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">System</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Resources</span>
|
||||
|
||||
@@ -100,26 +100,8 @@
|
||||
</svg>
|
||||
Upload
|
||||
</button>
|
||||
<div class="dropdown sort-dropdown">
|
||||
<button class="btn btn-outline-secondary btn-sm dropdown-toggle" type="button" data-bs-toggle="dropdown" aria-expanded="false" title="Sort objects">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M3.5 2.5a.5.5 0 0 0-1 0v8.793l-1.146-1.147a.5.5 0 0 0-.708.708l2 1.999.007.007a.497.497 0 0 0 .7-.006l2-2a.5.5 0 0 0-.707-.708L3.5 11.293V2.5zm3.5 1a.5.5 0 0 1 .5-.5h7a.5.5 0 0 1 0 1h-7a.5.5 0 0 1-.5-.5zM7.5 6a.5.5 0 0 0 0 1h5a.5.5 0 0 0 0-1h-5zm0 3a.5.5 0 0 0 0 1h3a.5.5 0 0 0 0-1h-3zm0 3a.5.5 0 0 0 0 1h1a.5.5 0 0 0 0-1h-1z"/>
|
||||
</svg>
|
||||
<span id="sort-dropdown-label">Name A-Z</span>
|
||||
</button>
|
||||
<ul class="dropdown-menu dropdown-menu-end">
|
||||
<li><button class="dropdown-item active" type="button" data-sort-field="name" data-sort-dir="asc">Name A-Z</button></li>
|
||||
<li><button class="dropdown-item" type="button" data-sort-field="name" data-sort-dir="desc">Name Z-A</button></li>
|
||||
<li><hr class="dropdown-divider"></li>
|
||||
<li><button class="dropdown-item" type="button" data-sort-field="size" data-sort-dir="desc">Size (largest)</button></li>
|
||||
<li><button class="dropdown-item" type="button" data-sort-field="size" data-sort-dir="asc">Size (smallest)</button></li>
|
||||
<li><hr class="dropdown-divider"></li>
|
||||
<li><button class="dropdown-item" type="button" data-sort-field="date" data-sort-dir="desc">Date (newest)</button></li>
|
||||
<li><button class="dropdown-item" type="button" data-sort-field="date" data-sort-dir="asc">Date (oldest)</button></li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="position-relative search-wrapper">
|
||||
<input id="object-search" class="form-control form-control-sm" type="search" placeholder="Filter objects (press /)" style="max-width: 180px;" />
|
||||
<input id="object-search" class="form-control form-control-sm" type="search" placeholder="Filter objects" style="max-width: 180px;" />
|
||||
</div>
|
||||
<div class="bulk-actions d-none" id="bulk-actions-wrapper">
|
||||
<button class="btn btn-outline-danger btn-sm" type="button" data-bulk-delete-trigger disabled>
|
||||
@@ -171,7 +153,6 @@
|
||||
data-bulk-download-endpoint="{{ url_for('ui.bulk_download_objects', bucket_name=bucket_name) }}"
|
||||
data-folders-url="{{ folders_url }}"
|
||||
data-buckets-for-copy-url="{{ buckets_for_copy_url }}"
|
||||
data-bucket-total-objects="{{ bucket_stats.get('objects', 0) }}"
|
||||
>
|
||||
<table class="table table-hover align-middle mb-0" id="objects-table" style="table-layout: fixed;">
|
||||
<thead class="table-light">
|
||||
@@ -257,8 +238,7 @@
|
||||
Share Link
|
||||
</button>
|
||||
</div>
|
||||
<div id="preview-error-alert" class="alert alert-warning d-none py-2 px-3 mb-3 small" role="alert"></div>
|
||||
<div id="preview-details-meta" class="p-3 rounded mb-3" style="background: var(--myfsio-preview-bg);">
|
||||
<div class="p-3 rounded mb-3" style="background: var(--myfsio-preview-bg);">
|
||||
<dl class="row small mb-0">
|
||||
<dt class="col-5 text-muted fw-normal">Last modified</dt>
|
||||
<dd class="col-7 mb-2 fw-medium" id="preview-modified"></dd>
|
||||
@@ -293,28 +273,19 @@
|
||||
Edit
|
||||
</button>
|
||||
</div>
|
||||
<div id="preview-tags-list" class="d-flex flex-wrap gap-2"></div>
|
||||
<div id="preview-tags-list" class="d-flex flex-wrap gap-1"></div>
|
||||
<div id="preview-tags-empty" class="text-muted small p-2 bg-body-tertiary rounded">No tags</div>
|
||||
<div id="preview-tags-editor" class="d-none mt-2">
|
||||
<div class="tag-editor-card">
|
||||
<div class="tag-editor-header">
|
||||
<span>Key</span>
|
||||
<span>Value</span>
|
||||
<span></span>
|
||||
</div>
|
||||
<div id="preview-tags-inputs"></div>
|
||||
<div class="tag-editor-actions">
|
||||
<button class="btn btn-sm btn-outline-secondary" type="button" id="addTagRow">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M8 4a.5.5 0 0 1 .5.5v3h3a.5.5 0 0 1 0 1h-3v3a.5.5 0 0 1-1 0v-3h-3a.5.5 0 0 1 0-1h3v-3A.5.5 0 0 1 8 4z"/>
|
||||
</svg>
|
||||
Add Tag
|
||||
</button>
|
||||
<div class="ms-auto d-flex gap-2">
|
||||
<button class="btn btn-sm btn-outline-secondary" type="button" id="cancelTagsButton">Cancel</button>
|
||||
<button class="btn btn-sm btn-primary" type="button" id="saveTagsButton">Save</button>
|
||||
</div>
|
||||
</div>
|
||||
<div id="preview-tags-inputs" class="mb-2"></div>
|
||||
<div class="d-flex gap-2">
|
||||
<button class="btn btn-sm btn-outline-secondary flex-grow-1" type="button" id="addTagRow">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M8 4a.5.5 0 0 1 .5.5v3h3a.5.5 0 0 1 0 1h-3v3a.5.5 0 0 1-1 0v-3h-3a.5.5 0 0 1 0-1h3v-3A.5.5 0 0 1 8 4z"/>
|
||||
</svg>
|
||||
Add Tag
|
||||
</button>
|
||||
<button class="btn btn-sm btn-primary" type="button" id="saveTagsButton">Save</button>
|
||||
<button class="btn btn-sm btn-outline-secondary" type="button" id="cancelTagsButton">Cancel</button>
|
||||
</div>
|
||||
<div class="form-text mt-1">Maximum 10 tags. Keys and values up to 256 characters.</div>
|
||||
</div>
|
||||
@@ -350,8 +321,7 @@
|
||||
<img id="preview-image" class="img-fluid d-none w-100" alt="Object preview" style="display: block;" />
|
||||
<video id="preview-video" class="w-100 d-none" controls style="display: block;"></video>
|
||||
<audio id="preview-audio" class="w-100 d-none" controls style="display: block;"></audio>
|
||||
<pre id="preview-text" class="w-100 d-none m-0"></pre>
|
||||
<iframe id="preview-iframe" class="w-100 d-none" style="min-height: 200px;"></iframe>
|
||||
<iframe id="preview-iframe" class="w-100 d-none" loading="lazy" style="min-height: 200px;"></iframe>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -995,111 +965,6 @@
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% if website_hosting_enabled %}
|
||||
<div class="card shadow-sm mt-4" id="bucket-website-card">
|
||||
<div class="card-header d-flex align-items-center">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-primary me-2" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||
</svg>
|
||||
<span class="fw-semibold">Static Website Hosting</span>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
{% if website_config %}
|
||||
<div class="alert alert-success d-flex align-items-start mb-4" role="alert">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="me-2 flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
|
||||
</svg>
|
||||
<div>
|
||||
<strong>Website hosting is enabled</strong>
|
||||
<p class="mb-0 small">
|
||||
Index: <code>{{ website_config.index_document }}</code>
|
||||
{% if website_config.error_document %}<br>Error: <code>{{ website_config.error_document }}</code>{% endif %}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="alert alert-secondary d-flex align-items-start mb-4" role="alert">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="me-2 flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||
<path d="M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z"/>
|
||||
</svg>
|
||||
<div>
|
||||
<strong>Website hosting is disabled</strong>
|
||||
<p class="mb-0 small">Enable website hosting to serve bucket contents as a static website.</p>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
{% if website_domains %}
|
||||
<div class="mb-4">
|
||||
<label class="form-label fw-medium mb-2">Mapped Domains</label>
|
||||
{% for domain in website_domains %}
|
||||
<div class="d-flex align-items-center mb-1">
|
||||
<span class="badge bg-success-subtle text-success-emphasis me-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M7.21 1.293a1 1 0 0 1 1.58 0l.612.72a1 1 0 0 0 .88.332l.94-.134a1 1 0 0 1 1.118.7l.248.912a1 1 0 0 0 .59.659l.876.388a1 1 0 0 1 .435 1.505l-.546.766a1 1 0 0 0-.156.935l.306.899a1 1 0 0 1-.725 1.282l-.92.216a1 1 0 0 0-.72.555l-.41.856a1 1 0 0 1-1.396.478l-.803-.49a1 1 0 0 0-1.04 0l-.802.49a1 1 0 0 1-1.397-.478l-.41-.857a1 1 0 0 0-.72-.554l-.919-.216a1 1 0 0 1-.725-1.282l.306-.9a1 1 0 0 0-.156-.934l-.546-.766a1 1 0 0 1 .435-1.505l.877-.388a1 1 0 0 0 .589-.66l.248-.911a1 1 0 0 1 1.118-.7l.94.133a1 1 0 0 0 .88-.331l.612-.72zM11 7a.5.5 0 0 0-.5-.5h-5a.5.5 0 0 0 0 1H6v1.5a.5.5 0 0 0 1 0V7.5h1v2a.5.5 0 0 0 1 0v-2h1.5a.5.5 0 0 0 0-1H10V7z"/>
|
||||
</svg>
|
||||
connected
|
||||
</span>
|
||||
<code class="small">{{ domain }}</code>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% elif website_config %}
|
||||
<div class="mb-4">
|
||||
<label class="form-label fw-medium mb-2">Mapped Domains</label>
|
||||
<p class="text-muted small mb-0">No domains mapped to this bucket. <a href="{{ url_for('ui.website_domains_dashboard') }}">Manage domains</a></p>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
{% if can_manage_website %}
|
||||
<form method="post" action="{{ url_for('ui.update_bucket_website', bucket_name=bucket_name) }}" id="websiteForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<input type="hidden" name="action" value="enable" id="websiteAction" />
|
||||
|
||||
<div class="mb-3">
|
||||
<label for="index_document" class="form-label fw-medium">Index Document</label>
|
||||
<input type="text" class="form-control" id="index_document" name="index_document"
|
||||
value="{{ website_config.index_document if website_config else 'index.html' }}"
|
||||
placeholder="index.html">
|
||||
<div class="form-text">The default page served for directory paths (e.g., index.html).</div>
|
||||
</div>
|
||||
|
||||
<div class="mb-4">
|
||||
<label for="error_document" class="form-label fw-medium">Error Document</label>
|
||||
<input type="text" class="form-control" id="error_document" name="error_document"
|
||||
value="{{ website_config.error_document if website_config else '' }}"
|
||||
placeholder="error.html">
|
||||
<div class="form-text">Optional. The page served for 404 errors.</div>
|
||||
</div>
|
||||
|
||||
<div class="d-flex gap-2 flex-wrap">
|
||||
<button class="btn {{ 'btn-primary' if website_config else 'btn-success' }}" type="submit" id="websiteSubmitBtn">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M12.736 3.97a.733.733 0 0 1 1.047 0c.286.289.29.756.01 1.05L7.88 12.01a.733.733 0 0 1-1.065.02L3.217 8.384a.757.757 0 0 1 0-1.06.733.733 0 0 1 1.047 0l3.052 3.093 5.4-6.425a.247.247 0 0 1 .02-.022Z"/>
|
||||
</svg>
|
||||
<span id="websiteSubmitLabel">{{ 'Save Website Settings' if website_config else 'Enable Website Hosting' }}</span>
|
||||
</button>
|
||||
<button type="button" class="btn btn-outline-danger" id="disableWebsiteBtn"{% if not website_config %} style="display: none;"{% endif %}>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z"/>
|
||||
</svg>
|
||||
Disable Website Hosting
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
{% else %}
|
||||
<div class="text-center py-3">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="currentColor" class="text-muted mb-2" viewBox="0 0 16 16">
|
||||
<path d="M8 1a2 2 0 0 1 2 2v4H6V3a2 2 0 0 1 2-2zm3 6V3a3 3 0 0 0-6 0v4a2 2 0 0 0-2 2v5a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V9a2 2 0 0 0-2-2z"/>
|
||||
</svg>
|
||||
<p class="text-muted mb-0 small">You do not have permission to modify website hosting for this bucket.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<div class="col-lg-4">
|
||||
@@ -2058,7 +1923,7 @@
|
||||
<div class="col-12">
|
||||
<label class="form-label fw-medium">Select files</label>
|
||||
<input class="form-control" type="file" name="object" id="uploadFileInput" multiple required />
|
||||
<div class="form-text">Select one or more files from your device. Files ≥ 8 MB use multipart uploads with automatic retry.</div>
|
||||
<div class="form-text">Select one or more files from your device. Files ≥ 8 MB automatically switch to multipart uploads.</div>
|
||||
</div>
|
||||
<div class="col-12">
|
||||
<div class="upload-dropzone text-center" data-dropzone>
|
||||
@@ -2283,11 +2148,13 @@
|
||||
</div>
|
||||
<ul class="list-group mb-3" id="bulkDeleteList" style="max-height: 200px; overflow-y: auto;"></ul>
|
||||
<div class="text-muted small" id="bulkDeleteStatus"></div>
|
||||
<div class="form-check mt-3 p-3 bg-body-tertiary rounded-3 {% if not versioning_enabled %}d-none{% endif %}" id="bulkDeletePurgeWrap">
|
||||
{% if versioning_enabled %}
|
||||
<div class="form-check mt-3 p-3 bg-body-tertiary rounded-3">
|
||||
<input class="form-check-input" type="checkbox" id="bulkDeletePurge" />
|
||||
<label class="form-check-label" for="bulkDeletePurge">Also delete archived versions</label>
|
||||
<div class="form-text">Removes any archived versions stored in the archive.</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
@@ -2325,7 +2192,7 @@
|
||||
<div class="p-3 bg-body-tertiary rounded-3 mb-3">
|
||||
<code id="deleteObjectKey" class="d-block text-break"></code>
|
||||
</div>
|
||||
<div id="deleteObjectVersioningWrap" class="{% if not versioning_enabled %}d-none{% endif %}">
|
||||
{% if versioning_enabled %}
|
||||
<div class="alert alert-warning d-flex align-items-start small mb-3" role="alert">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="flex-shrink-0 me-2 mt-0" viewBox="0 0 16 16">
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||
@@ -2337,7 +2204,7 @@
|
||||
<label class="form-check-label" for="deletePurgeVersions">Also delete all archived versions</label>
|
||||
<div class="form-text mb-0">Removes the live object and every stored version.</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
@@ -2712,63 +2579,6 @@
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="context-menu d-none" id="objectContextMenu">
|
||||
<button class="context-menu-item" data-ctx-action="download">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M.5 9.9a.5.5 0 0 1 .5.5v2.5a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1v-2.5a.5.5 0 0 1 1 0v2.5a2 2 0 0 1-2 2H2a2 2 0 0 1-2-2v-2.5a.5.5 0 0 1 .5-.5z"/>
|
||||
<path d="M7.646 11.854a.5.5 0 0 0 .708 0l3-3a.5.5 0 0 0-.708-.708L8.5 10.293V1.5a.5.5 0 0 0-1 0v8.793L5.354 8.146a.5.5 0 1 0-.708.708l3 3z"/>
|
||||
</svg>
|
||||
Download
|
||||
</button>
|
||||
<button class="context-menu-item" data-ctx-action="copy-path">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M4 2a2 2 0 0 1 2-2h8a2 2 0 0 1 2 2v8a2 2 0 0 1-2 2H6a2 2 0 0 1-2-2V2Zm2-1a1 1 0 0 0-1 1v8a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H6ZM2 5a1 1 0 0 0-1 1v8a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1v-1h1v1a2 2 0 0 1-2 2H2a2 2 0 0 1-2-2V6a2 2 0 0 1 2-2h1v1H2Z"/>
|
||||
</svg>
|
||||
Copy S3 Path
|
||||
</button>
|
||||
<button class="context-menu-item" data-ctx-action="presign">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.715 6.542 3.343 7.914a3 3 0 1 0 4.243 4.243l1.828-1.829A3 3 0 0 0 8.586 5.5L8 6.086a1.002 1.002 0 0 0-.154.199 2 2 0 0 1 .861 3.337L6.88 11.45a2 2 0 1 1-2.83-2.83l.793-.792a4.018 4.018 0 0 1-.128-1.287z"/>
|
||||
<path d="M6.586 4.672A3 3 0 0 0 7.414 9.5l.775-.776a2 2 0 0 1-.896-3.346L9.12 3.55a2 2 0 1 1 2.83 2.83l-.793.792c.112.42.155.855.128 1.287l1.372-1.372a3 3 0 1 0-4.243-4.243L6.586 4.672z"/>
|
||||
</svg>
|
||||
Share Link
|
||||
</button>
|
||||
<div class="context-menu-divider"></div>
|
||||
<button class="context-menu-item text-danger" data-ctx-action="delete">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
Delete
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="keyboardShortcutsModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered modal-sm">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h5 class="modal-title fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-primary me-1" viewBox="0 0 16 16">
|
||||
<path d="M14 5a1 1 0 0 1 1 1v5a1 1 0 0 1-1 1H2a1 1 0 0 1-1-1V6a1 1 0 0 1 1-1h12zM2 4a2 2 0 0 0-2 2v5a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V6a2 2 0 0 0-2-2H2z"/>
|
||||
<path d="M13 10.25a.25.25 0 0 1 .25-.25h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5a.25.25 0 0 1-.25-.25v-.5zm0-2a.25.25 0 0 1 .25-.25h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5a.25.25 0 0 1-.25-.25v-.5zm-5 0A.25.25 0 0 1 8.25 8h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5A.25.25 0 0 1 8 8.75v-.5zm2 0a.25.25 0 0 1 .25-.25h1.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-1.5a.25.25 0 0 1-.25-.25v-.5zm1 2a.25.25 0 0 1 .25-.25h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5a.25.25 0 0 1-.25-.25v-.5zm-5-2A.25.25 0 0 1 6.25 8h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5A.25.25 0 0 1 6 8.75v-.5zm-2 0A.25.25 0 0 1 4.25 8h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5A.25.25 0 0 1 4 8.75v-.5zm-2 0A.25.25 0 0 1 2.25 8h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5A.25.25 0 0 1 2 8.75v-.5zm11-2a.25.25 0 0 1 .25-.25h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5a.25.25 0 0 1-.25-.25v-.5zm-2 0a.25.25 0 0 1 .25-.25h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5a.25.25 0 0 1-.25-.25v-.5zm-2 0A.25.25 0 0 1 9.25 6h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5A.25.25 0 0 1 9 6.75v-.5zm-2 0A.25.25 0 0 1 7.25 6h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5A.25.25 0 0 1 7 6.75v-.5zm-2 0A.25.25 0 0 1 5.25 6h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5A.25.25 0 0 1 5 6.75v-.5zm-3 0A.25.25 0 0 1 2.25 6h1.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-1.5A.25.25 0 0 1 2 6.75v-.5zm0 4a.25.25 0 0 1 .25-.25h.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-.5a.25.25 0 0 1-.25-.25v-.5zm2 0a.25.25 0 0 1 .25-.25h5.5a.25.25 0 0 1 .25.25v.5a.25.25 0 0 1-.25.25h-5.5a.25.25 0 0 1-.25-.25v-.5z"/>
|
||||
</svg>
|
||||
Keyboard Shortcuts
|
||||
</h5>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="modal-body pt-2">
|
||||
<div class="kbd-shortcuts-list">
|
||||
<div class="shortcut-row"><span class="text-muted">Search objects</span><kbd>/</kbd></div>
|
||||
<div class="shortcut-row"><span class="text-muted">Select all</span><span><kbd>Ctrl</kbd> + <kbd>A</kbd></span></div>
|
||||
<div class="shortcut-row"><span class="text-muted">Delete selected</span><kbd>Del</kbd></div>
|
||||
<div class="shortcut-row"><span class="text-muted">Clear search</span><kbd>Esc</kbd></div>
|
||||
<div class="shortcut-row"><span class="text-muted">Show shortcuts</span><kbd>?</kbd></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block extra_scripts %}
|
||||
@@ -2780,8 +2590,7 @@
|
||||
window.BucketDetailConfig = {
|
||||
endpoints: {
|
||||
versioning: "{{ url_for('ui.update_bucket_versioning', bucket_name=bucket_name) }}",
|
||||
bucketsOverview: "{{ url_for('ui.buckets_overview') }}",
|
||||
archivedObjects: "{{ url_for('ui.archived_objects', bucket_name=bucket_name) }}"
|
||||
bucketsOverview: "{{ url_for('ui.buckets_overview') }}"
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
</div>
|
||||
<div>
|
||||
<h5 class="bucket-name text-break">{{ bucket.meta.name }}</h5>
|
||||
<small class="text-muted">Created {{ bucket.meta.creation_date | format_datetime }}</small>
|
||||
<small class="text-muted">Created {{ bucket.meta.created_at | format_datetime }}</small>
|
||||
</div>
|
||||
</div>
|
||||
<span class="badge {{ bucket.access_badge }} bucket-access-badge">{{ bucket.access_label }}</span>
|
||||
@@ -89,14 +89,6 @@
|
||||
</div>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<div class="col-12 d-none" id="bucket-no-results">
|
||||
<div class="text-center py-5 text-muted">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="currentColor" class="mb-3 opacity-50" viewBox="0 0 16 16">
|
||||
<path d="M11.742 10.344a6.5 6.5 0 1 0-1.397 1.398h-.001c.03.04.062.078.098.115l3.85 3.85a1 1 0 0 0 1.415-1.414l-3.85-3.85a1.007 1.007 0 0 0-.115-.1zM12 6.5a5.5 5.5 0 1 1-11 0 5.5 5.5 0 0 1 11 0z"/>
|
||||
</svg>
|
||||
<p class="mb-0 fw-medium">No buckets match your filter.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="createBucketModal" tabindex="-1" aria-hidden="true">
|
||||
@@ -149,7 +141,7 @@
|
||||
let visibleCount = 0;
|
||||
|
||||
bucketItems.forEach(item => {
|
||||
const name = item.querySelector('.bucket-name').textContent.toLowerCase();
|
||||
const name = item.querySelector('.card-title').textContent.toLowerCase();
|
||||
if (name.includes(term)) {
|
||||
item.classList.remove('d-none');
|
||||
visibleCount++;
|
||||
@@ -157,15 +149,6 @@
|
||||
item.classList.add('d-none');
|
||||
}
|
||||
});
|
||||
|
||||
var noResults = document.getElementById('bucket-no-results');
|
||||
if (noResults) {
|
||||
if (term && visibleCount === 0) {
|
||||
noResults.classList.remove('d-none');
|
||||
} else {
|
||||
noResults.classList.add('d-none');
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user