31 Commits

Author SHA1 Message Date
50fb5aa387 MyFSIO v0.3.9 Release
Reviewed-on: #32
2026-03-14 09:44:14 +00:00
cc161bf362 MyFSIO v0.3.8 Release
Reviewed-on: #31
2026-03-10 08:31:27 +00:00
2a0e77a754 MyFSIO v0.3.7 Release
Reviewed-on: #30
2026-03-09 06:25:50 +00:00
eb0e435a5a MyFSIO v0.3.6 Release
Reviewed-on: #29
2026-03-08 04:46:31 +00:00
7633007a08 MyFSIO v0.3.5 Release
Reviewed-on: #28
2026-03-07 05:53:02 +00:00
de0d869c9f Merge pull request 'MyFSIO v0.3.4 Release' (#27) from next into main
Reviewed-on: #27
2026-03-02 08:31:32 +00:00
fdd068feee MyFSIO v0.3.3 Release
Reviewed-on: #26
2026-02-27 04:49:32 +00:00
66b7677d2c MyFSIO v0.3.2 Release
Reviewed-on: #25
2026-02-26 10:10:19 +00:00
4d90ead816 Merge pull request 'Fix incorrect Upgrading & Updates section in Docs' (#24) from next into main
Reviewed-on: #24
2026-02-26 09:50:17 +00:00
b37a51ed1d MyFSIO v0.3.1 Release
Reviewed-on: #23
2026-02-26 09:42:37 +00:00
0462a7b62e MyFSIO v0.3.0 Release
Reviewed-on: #22
2026-02-22 10:22:35 +00:00
52660570c1 Merge pull request 'MyFSIO v0.2.9 Release' (#21) from next into main
Reviewed-on: #21
2026-02-15 14:24:14 +00:00
35f61313e0 MyFSIO v0.2.8 Release
Reviewed-on: #20
2026-02-10 14:16:22 +00:00
c470cfb576 MyFSIO v0.2.7 Release
Reviewed-on: #19
2026-02-09 12:22:37 +00:00
jun
d96955deee MyFSIO v0.2.6 Release
Reviewed-on: #18
2026-02-05 16:18:03 +00:00
85181f0be6 Merge pull request 'MyFSIO v0.2.5 Release' (#17) from next into main
Reviewed-on: #17
2026-02-02 05:32:02 +00:00
d5ca7a8be1 Merge pull request 'MyFSIO v0.2.4 Release' (#16) from next into main
Reviewed-on: #16
2026-02-01 10:27:11 +00:00
476dc79e42 MyFSIO v0.2.3 Release
Reviewed-on: #15
2026-01-25 06:05:53 +00:00
bb6590fc5e Merge pull request 'MyFSIO v0.2.2 Release' (#14) from next into main
Reviewed-on: #14
2026-01-19 07:12:15 +00:00
899db3421b Merge pull request 'MyFSIO v0.2.1 Release' (#13) from next into main
Reviewed-on: #13
2026-01-12 08:03:29 +00:00
caf01d6ada Merge pull request 'MyFSIO v0.2.0 Release' (#12) from next into main
Reviewed-on: #12
2026-01-05 15:48:03 +00:00
bb366cb4cd Merge pull request 'MyFSIO v0.1.9 Release' (#10) from next into main
Reviewed-on: #10
2025-12-29 06:49:48 +00:00
a2745ff2ee Merge pull request 'MyFSIO v0.1.8 Release' (#9) from next into main
Reviewed-on: #9
2025-12-23 06:01:32 +00:00
28cb656d94 Merge pull request 'MyFSIO v0.1.7 Release' (#8) from next into main
Reviewed-on: #8
2025-12-22 03:10:35 +00:00
3c44152fc6 Merge pull request 'MyFSIO v0.1.6 Release' (#7) from next into main
Reviewed-on: #7
2025-12-21 06:30:21 +00:00
397515edce Merge pull request 'MyFSIO v0.1.5 Release' (#6) from next into main
Reviewed-on: #6
2025-12-13 15:41:03 +00:00
980fced7e4 Merge pull request 'MyFSIO v0.1.4 Release' (#5) from next into main
Reviewed-on: #5
2025-12-13 08:22:43 +00:00
bae5009ec4 Merge pull request 'Release v0.1.3' (#4) from next into main
Reviewed-on: #4
2025-12-03 04:14:57 +00:00
233780617f Merge pull request 'Release V0.1.2' (#3) from next into main
Reviewed-on: #3
2025-11-26 04:59:15 +00:00
fd8fb21517 Merge pull request 'Prepare for binary release' (#2) from next into main
Reviewed-on: #2
2025-11-22 12:33:38 +00:00
c6cbe822e1 Merge pull request 'Release v0.1.1' (#1) from next into main
Reviewed-on: #1
2025-11-22 12:31:27 +00:00
87 changed files with 650 additions and 20876 deletions

View File

@@ -11,5 +11,3 @@ htmlcov
logs
data
tmp
myfsio_core/target
myfsio-engine/target

3
.gitignore vendored
View File

@@ -30,9 +30,6 @@ dist/
myfsio_core/target/
myfsio_core/Cargo.lock
# Rust engine build artifacts
myfsio-engine/target/
# Local runtime artifacts
logs/
*.log

View File

@@ -13,7 +13,6 @@ RUN apt-get update \
ENV PATH="/root/.cargo/bin:${PATH}"
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
@@ -22,18 +21,15 @@ RUN pip install --no-cache-dir maturin \
&& cd myfsio_core \
&& maturin build --release \
&& pip install target/wheels/*.whl \
&& cd ../myfsio-engine \
&& cargo build --release \
&& cp target/release/myfsio-server /usr/local/bin/myfsio-server \
&& cd .. \
&& rm -rf myfsio_core/target myfsio-engine/target \
&& rm -rf myfsio_core/target \
&& pip uninstall -y maturin \
&& rustup self uninstall -y
RUN chmod +x docker-entrypoint.sh
RUN mkdir -p /app/data \
&& useradd -m -u 1000 myfsio \
&& useradd -m -u 1000 myfsio \
&& chown -R myfsio:myfsio /app
USER myfsio
@@ -41,8 +37,7 @@ USER myfsio
EXPOSE 5000 5100
ENV APP_HOST=0.0.0.0 \
FLASK_ENV=production \
FLASK_DEBUG=0 \
ENGINE=rust
FLASK_DEBUG=0
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD python -c "import requests; requests.get('http://localhost:5000/myfsio/health', timeout=2)"

View File

@@ -72,11 +72,6 @@ source .venv/bin/activate
# Install dependencies
pip install -r requirements.txt
# (Optional) Build Rust native extension for better performance
# Requires Rust toolchain: https://rustup.rs
pip install maturin
cd myfsio_core && maturin develop --release && cd ..
# Start both servers
python run.py

View File

@@ -18,8 +18,6 @@ from flask_cors import CORS
from flask_wtf.csrf import CSRFError
from werkzeug.middleware.proxy_fix import ProxyFix
import io
from .access_logging import AccessLoggingService
from .operation_metrics import OperationMetricsCollector, classify_endpoint
from .compression import GzipMiddleware
@@ -46,64 +44,6 @@ from .website_domains import WebsiteDomainStore
_request_counter = itertools.count(1)
class _ChunkedTransferMiddleware:
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
if environ.get("REQUEST_METHOD") not in ("PUT", "POST"):
return self.app(environ, start_response)
transfer_encoding = environ.get("HTTP_TRANSFER_ENCODING", "")
content_length = environ.get("CONTENT_LENGTH")
if "chunked" in transfer_encoding.lower():
if content_length:
del environ["HTTP_TRANSFER_ENCODING"]
else:
raw = environ.get("wsgi.input")
if raw:
try:
if hasattr(raw, "seek"):
raw.seek(0)
body = raw.read()
except Exception:
body = b""
if body:
environ["wsgi.input"] = io.BytesIO(body)
environ["CONTENT_LENGTH"] = str(len(body))
del environ["HTTP_TRANSFER_ENCODING"]
content_length = environ.get("CONTENT_LENGTH")
if not content_length or content_length == "0":
sha256 = environ.get("HTTP_X_AMZ_CONTENT_SHA256", "")
decoded_len = environ.get("HTTP_X_AMZ_DECODED_CONTENT_LENGTH", "")
content_encoding = environ.get("HTTP_CONTENT_ENCODING", "")
if ("STREAMING" in sha256.upper() or decoded_len
or "aws-chunked" in content_encoding.lower()):
raw = environ.get("wsgi.input")
if raw:
try:
if hasattr(raw, "seek"):
raw.seek(0)
body = raw.read()
except Exception:
body = b""
if body:
environ["wsgi.input"] = io.BytesIO(body)
environ["CONTENT_LENGTH"] = str(len(body))
raw = environ.get("wsgi.input")
if raw and hasattr(raw, "seek"):
try:
raw.seek(0)
except Exception:
pass
return self.app(environ, start_response)
def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
"""Migrate config file from legacy locations to the active path.
@@ -167,11 +107,10 @@ def create_app(
)
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=num_proxies, x_proto=num_proxies, x_host=num_proxies, x_prefix=num_proxies)
# Enable gzip compression for responses (10-20x smaller JSON payloads)
if app.config.get("ENABLE_GZIP", True):
app.wsgi_app = GzipMiddleware(app.wsgi_app, compression_level=6)
app.wsgi_app = _ChunkedTransferMiddleware(app.wsgi_app)
_configure_cors(app)
_configure_logging(app)
@@ -184,7 +123,6 @@ def create_app(
object_cache_max_size=app.config.get("OBJECT_CACHE_MAX_SIZE", 100),
bucket_config_cache_ttl=app.config.get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0),
object_key_max_length_bytes=app.config.get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024),
meta_read_cache_max=app.config.get("META_READ_CACHE_MAX", 2048),
)
if app.config.get("WARM_CACHE_ON_STARTUP", True) and not app.config.get("TESTING"):
@@ -294,7 +232,6 @@ def create_app(
multipart_max_age_days=app.config.get("GC_MULTIPART_MAX_AGE_DAYS", 7),
lock_file_max_age_hours=app.config.get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0),
dry_run=app.config.get("GC_DRY_RUN", False),
io_throttle_ms=app.config.get("GC_IO_THROTTLE_MS", 10),
)
gc_collector.start()
@@ -306,7 +243,6 @@ def create_app(
batch_size=app.config.get("INTEGRITY_BATCH_SIZE", 1000),
auto_heal=app.config.get("INTEGRITY_AUTO_HEAL", False),
dry_run=app.config.get("INTEGRITY_DRY_RUN", False),
io_throttle_ms=app.config.get("INTEGRITY_IO_THROTTLE_MS", 10),
)
integrity_checker.start()
@@ -742,7 +678,6 @@ def _configure_logging(app: Flask) -> None:
},
)
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
response.headers["Server"] = "MyFSIO"
operation_metrics = app.extensions.get("operation_metrics")
if operation_metrics:

View File

@@ -686,107 +686,6 @@ def _storage():
return current_app.extensions["object_storage"]
def _require_iam_action(action: str):
principal, error = _require_principal()
if error:
return None, error
try:
_iam().authorize(principal, None, action)
return principal, None
except IamError:
return None, _json_error("AccessDenied", f"Requires {action} permission", 403)
@admin_api_bp.route("/iam/users", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_list_users():
principal, error = _require_iam_action("iam:list_users")
if error:
return error
return jsonify({"users": _iam().list_users()})
@admin_api_bp.route("/iam/users/<identifier>", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_get_user(identifier):
principal, error = _require_iam_action("iam:get_user")
if error:
return error
try:
user_id = _iam().resolve_user_id(identifier)
return jsonify(_iam().get_user_by_id(user_id))
except IamError as exc:
return _json_error("NotFound", str(exc), 404)
@admin_api_bp.route("/iam/users/<identifier>/policies", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_get_user_policies(identifier):
principal, error = _require_iam_action("iam:get_policy")
if error:
return error
try:
return jsonify({"policies": _iam().get_user_policies(identifier)})
except IamError as exc:
return _json_error("NotFound", str(exc), 404)
@admin_api_bp.route("/iam/users/<identifier>/keys", methods=["POST"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_create_access_key(identifier):
principal, error = _require_iam_action("iam:create_key")
if error:
return error
try:
result = _iam().create_access_key(identifier)
logger.info("Access key created for %s by %s", identifier, principal.access_key)
return jsonify(result), 201
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/iam/users/<identifier>/keys/<access_key>", methods=["DELETE"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_delete_access_key(identifier, access_key):
principal, error = _require_iam_action("iam:delete_key")
if error:
return error
try:
_iam().delete_access_key(access_key)
logger.info("Access key %s deleted by %s", access_key, principal.access_key)
return "", 204
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/iam/users/<identifier>/disable", methods=["POST"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_disable_user(identifier):
principal, error = _require_iam_action("iam:disable_user")
if error:
return error
try:
_iam().disable_user(identifier)
logger.info("User %s disabled by %s", identifier, principal.access_key)
return jsonify({"status": "disabled"})
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/iam/users/<identifier>/enable", methods=["POST"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_enable_user(identifier):
principal, error = _require_iam_action("iam:disable_user")
if error:
return error
try:
_iam().enable_user(identifier)
logger.info("User %s enabled by %s", identifier, principal.access_key)
return jsonify({"status": "enabled"})
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/website-domains", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def list_website_domains():
@@ -907,11 +806,15 @@ def gc_run_now():
if not gc:
return _json_error("InvalidRequest", "GC is not enabled", 400)
payload = request.get_json(silent=True) or {}
started = gc.run_async(dry_run=payload.get("dry_run"))
original_dry_run = gc.dry_run
if "dry_run" in payload:
gc.dry_run = bool(payload["dry_run"])
try:
result = gc.run_now()
finally:
gc.dry_run = original_dry_run
logger.info("GC manual run by %s", principal.access_key)
if not started:
return _json_error("Conflict", "GC is already in progress", 409)
return jsonify({"status": "started"})
return jsonify(result.to_dict())
@admin_api_bp.route("/gc/history", methods=["GET"])
@@ -957,14 +860,12 @@ def integrity_run_now():
payload = request.get_json(silent=True) or {}
override_dry_run = payload.get("dry_run")
override_auto_heal = payload.get("auto_heal")
started = checker.run_async(
result = checker.run_now(
auto_heal=override_auto_heal if override_auto_heal is not None else None,
dry_run=override_dry_run if override_dry_run is not None else None,
)
logger.info("Integrity manual run by %s", principal.access_key)
if not started:
return _json_error("Conflict", "A scan is already in progress", 409)
return jsonify({"status": "started"})
return jsonify(result.to_dict())
@admin_api_bp.route("/integrity/history", methods=["GET"])
@@ -980,5 +881,3 @@ def integrity_history():
offset = int(request.args.get("offset", 0))
records = checker.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})

View File

@@ -25,7 +25,7 @@ def _calculate_auto_connection_limit() -> int:
def _calculate_auto_backlog(connection_limit: int) -> int:
return max(128, min(connection_limit * 2, 4096))
return max(64, min(connection_limit * 2, 4096))
def _validate_rate_limit(value: str) -> str:
@@ -115,7 +115,6 @@ class AppConfig:
server_connection_limit: int
server_backlog: int
server_channel_timeout: int
server_max_buffer_size: int
server_threads_auto: bool
server_connection_limit_auto: bool
server_backlog_auto: bool
@@ -136,7 +135,6 @@ class AppConfig:
site_sync_clock_skew_tolerance_seconds: float
object_key_max_length_bytes: int
object_cache_max_size: int
meta_read_cache_max: int
bucket_config_cache_ttl_seconds: float
object_tag_limit: int
encryption_chunk_size_bytes: int
@@ -158,13 +156,11 @@ class AppConfig:
gc_multipart_max_age_days: int
gc_lock_file_max_age_hours: float
gc_dry_run: bool
gc_io_throttle_ms: int
integrity_enabled: bool
integrity_interval_hours: float
integrity_batch_size: int
integrity_auto_heal: bool
integrity_dry_run: bool
integrity_io_throttle_ms: int
@classmethod
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
@@ -297,7 +293,6 @@ class AppConfig:
server_backlog_auto = False
server_channel_timeout = int(_get("SERVER_CHANNEL_TIMEOUT", 120))
server_max_buffer_size = int(_get("SERVER_MAX_BUFFER_SIZE", 1024 * 1024 * 128))
site_sync_enabled = str(_get("SITE_SYNC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
site_sync_interval_seconds = int(_get("SITE_SYNC_INTERVAL_SECONDS", 60))
site_sync_batch_size = int(_get("SITE_SYNC_BATCH_SIZE", 100))
@@ -316,7 +311,6 @@ class AppConfig:
site_sync_clock_skew_tolerance_seconds = float(_get("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS", 1.0))
object_key_max_length_bytes = int(_get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024))
object_cache_max_size = int(_get("OBJECT_CACHE_MAX_SIZE", 100))
meta_read_cache_max = int(_get("META_READ_CACHE_MAX", 2048))
bucket_config_cache_ttl_seconds = float(_get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0))
object_tag_limit = int(_get("OBJECT_TAG_LIMIT", 50))
encryption_chunk_size_bytes = int(_get("ENCRYPTION_CHUNK_SIZE_BYTES", 64 * 1024))
@@ -342,13 +336,11 @@ class AppConfig:
gc_multipart_max_age_days = int(_get("GC_MULTIPART_MAX_AGE_DAYS", 7))
gc_lock_file_max_age_hours = float(_get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0))
gc_dry_run = str(_get("GC_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
gc_io_throttle_ms = int(_get("GC_IO_THROTTLE_MS", 10))
integrity_enabled = str(_get("INTEGRITY_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
integrity_interval_hours = float(_get("INTEGRITY_INTERVAL_HOURS", 24.0))
integrity_batch_size = int(_get("INTEGRITY_BATCH_SIZE", 1000))
integrity_auto_heal = str(_get("INTEGRITY_AUTO_HEAL", "0")).lower() in {"1", "true", "yes", "on"}
integrity_dry_run = str(_get("INTEGRITY_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
integrity_io_throttle_ms = int(_get("INTEGRITY_IO_THROTTLE_MS", 10))
return cls(storage_root=storage_root,
max_upload_size=max_upload_size,
@@ -402,7 +394,6 @@ class AppConfig:
server_connection_limit=server_connection_limit,
server_backlog=server_backlog,
server_channel_timeout=server_channel_timeout,
server_max_buffer_size=server_max_buffer_size,
server_threads_auto=server_threads_auto,
server_connection_limit_auto=server_connection_limit_auto,
server_backlog_auto=server_backlog_auto,
@@ -423,7 +414,6 @@ class AppConfig:
site_sync_clock_skew_tolerance_seconds=site_sync_clock_skew_tolerance_seconds,
object_key_max_length_bytes=object_key_max_length_bytes,
object_cache_max_size=object_cache_max_size,
meta_read_cache_max=meta_read_cache_max,
bucket_config_cache_ttl_seconds=bucket_config_cache_ttl_seconds,
object_tag_limit=object_tag_limit,
encryption_chunk_size_bytes=encryption_chunk_size_bytes,
@@ -445,13 +435,11 @@ class AppConfig:
gc_multipart_max_age_days=gc_multipart_max_age_days,
gc_lock_file_max_age_hours=gc_lock_file_max_age_hours,
gc_dry_run=gc_dry_run,
gc_io_throttle_ms=gc_io_throttle_ms,
integrity_enabled=integrity_enabled,
integrity_interval_hours=integrity_interval_hours,
integrity_batch_size=integrity_batch_size,
integrity_auto_heal=integrity_auto_heal,
integrity_dry_run=integrity_dry_run,
integrity_io_throttle_ms=integrity_io_throttle_ms)
integrity_dry_run=integrity_dry_run)
def validate_and_report(self) -> list[str]:
"""Validate configuration and return a list of warnings/issues.
@@ -516,12 +504,10 @@ class AppConfig:
issues.append(f"CRITICAL: SERVER_THREADS={self.server_threads} is outside valid range (1-64). Server cannot start.")
if not (10 <= self.server_connection_limit <= 1000):
issues.append(f"CRITICAL: SERVER_CONNECTION_LIMIT={self.server_connection_limit} is outside valid range (10-1000). Server cannot start.")
if not (128 <= self.server_backlog <= 4096):
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (128-4096). Server cannot start.")
if not (64 <= self.server_backlog <= 4096):
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (64-4096). Server cannot start.")
if not (10 <= self.server_channel_timeout <= 300):
issues.append(f"CRITICAL: SERVER_CHANNEL_TIMEOUT={self.server_channel_timeout} is outside valid range (10-300). Server cannot start.")
if self.server_max_buffer_size < 1024 * 1024:
issues.append(f"WARNING: SERVER_MAX_BUFFER_SIZE={self.server_max_buffer_size} is less than 1MB. Large uploads will fail.")
if sys.platform != "win32":
try:
@@ -567,7 +553,6 @@ class AppConfig:
print(f" CONNECTION_LIMIT: {self.server_connection_limit}{_auto(self.server_connection_limit_auto)}")
print(f" BACKLOG: {self.server_backlog}{_auto(self.server_backlog_auto)}")
print(f" CHANNEL_TIMEOUT: {self.server_channel_timeout}s")
print(f" MAX_BUFFER_SIZE: {self.server_max_buffer_size // (1024 * 1024)}MB")
print("=" * 60)
issues = self.validate_and_report()
@@ -633,7 +618,6 @@ class AppConfig:
"SERVER_CONNECTION_LIMIT": self.server_connection_limit,
"SERVER_BACKLOG": self.server_backlog,
"SERVER_CHANNEL_TIMEOUT": self.server_channel_timeout,
"SERVER_MAX_BUFFER_SIZE": self.server_max_buffer_size,
"SITE_SYNC_ENABLED": self.site_sync_enabled,
"SITE_SYNC_INTERVAL_SECONDS": self.site_sync_interval_seconds,
"SITE_SYNC_BATCH_SIZE": self.site_sync_batch_size,
@@ -651,7 +635,6 @@ class AppConfig:
"SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS": self.site_sync_clock_skew_tolerance_seconds,
"OBJECT_KEY_MAX_LENGTH_BYTES": self.object_key_max_length_bytes,
"OBJECT_CACHE_MAX_SIZE": self.object_cache_max_size,
"META_READ_CACHE_MAX": self.meta_read_cache_max,
"BUCKET_CONFIG_CACHE_TTL_SECONDS": self.bucket_config_cache_ttl_seconds,
"OBJECT_TAG_LIMIT": self.object_tag_limit,
"ENCRYPTION_CHUNK_SIZE_BYTES": self.encryption_chunk_size_bytes,
@@ -673,11 +656,9 @@ class AppConfig:
"GC_MULTIPART_MAX_AGE_DAYS": self.gc_multipart_max_age_days,
"GC_LOCK_FILE_MAX_AGE_HOURS": self.gc_lock_file_max_age_hours,
"GC_DRY_RUN": self.gc_dry_run,
"GC_IO_THROTTLE_MS": self.gc_io_throttle_ms,
"INTEGRITY_ENABLED": self.integrity_enabled,
"INTEGRITY_INTERVAL_HOURS": self.integrity_interval_hours,
"INTEGRITY_BATCH_SIZE": self.integrity_batch_size,
"INTEGRITY_AUTO_HEAL": self.integrity_auto_heal,
"INTEGRITY_DRY_RUN": self.integrity_dry_run,
"INTEGRITY_IO_THROTTLE_MS": self.integrity_io_throttle_ms,
}

View File

@@ -193,9 +193,6 @@ class EncryptedObjectStorage:
def list_objects_shallow(self, bucket_name: str, **kwargs):
return self.storage.list_objects_shallow(bucket_name, **kwargs)
def iter_objects_shallow(self, bucket_name: str, **kwargs):
return self.storage.iter_objects_shallow(bucket_name, **kwargs)
def search_objects(self, bucket_name: str, query: str, **kwargs):
return self.storage.search_objects(bucket_name, query, **kwargs)

View File

@@ -21,10 +21,6 @@ if sys.platform != "win32":
try:
import myfsio_core as _rc
if not all(hasattr(_rc, f) for f in (
"encrypt_stream_chunked", "decrypt_stream_chunked",
)):
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
_HAS_RUST = True
except ImportError:
_rc = None

View File

@@ -175,21 +175,13 @@ def handle_app_error(error: AppError) -> Response:
def handle_rate_limit_exceeded(e: RateLimitExceeded) -> Response:
g.s3_error_code = "SlowDown"
if request.path.startswith("/ui") or request.path.startswith("/buckets"):
wants_json = (
request.is_json or
request.headers.get("X-Requested-With") == "XMLHttpRequest" or
"application/json" in request.accept_mimetypes.values()
)
if wants_json:
return jsonify({"success": False, "error": {"code": "SlowDown", "message": "Please reduce your request rate."}}), 429
error = Element("Error")
SubElement(error, "Code").text = "SlowDown"
SubElement(error, "Message").text = "Please reduce your request rate."
SubElement(error, "Resource").text = request.path
SubElement(error, "RequestId").text = getattr(g, "request_id", "")
xml_bytes = tostring(error, encoding="utf-8")
return Response(xml_bytes, status="429 Too Many Requests", mimetype="application/xml")
return Response(xml_bytes, status=429, mimetype="application/xml")
def register_error_handlers(app):

133
app/gc.py
View File

@@ -162,7 +162,6 @@ class GarbageCollector:
lock_file_max_age_hours: float = 1.0,
dry_run: bool = False,
max_history: int = 50,
io_throttle_ms: int = 10,
) -> None:
self.storage_root = Path(storage_root)
self.interval_seconds = interval_hours * 3600.0
@@ -173,9 +172,6 @@ class GarbageCollector:
self._timer: Optional[threading.Timer] = None
self._shutdown = False
self._lock = threading.Lock()
self._scanning = False
self._scan_start_time: Optional[float] = None
self._io_throttle = max(0, io_throttle_ms) / 1000.0
self.history_store = GCHistoryStore(storage_root, max_records=max_history)
def start(self) -> None:
@@ -216,81 +212,49 @@ class GarbageCollector:
finally:
self._schedule_next()
def run_now(self, dry_run: Optional[bool] = None) -> GCResult:
if not self._lock.acquire(blocking=False):
raise RuntimeError("GC is already in progress")
def run_now(self) -> GCResult:
start = time.time()
result = GCResult()
effective_dry_run = dry_run if dry_run is not None else self.dry_run
self._clean_temp_files(result)
self._clean_orphaned_multipart(result)
self._clean_stale_locks(result)
self._clean_orphaned_metadata(result)
self._clean_orphaned_versions(result)
self._clean_empty_dirs(result)
try:
self._scanning = True
self._scan_start_time = time.time()
result.execution_time_seconds = time.time() - start
start = self._scan_start_time
result = GCResult()
original_dry_run = self.dry_run
self.dry_run = effective_dry_run
try:
self._clean_temp_files(result)
self._clean_orphaned_multipart(result)
self._clean_stale_locks(result)
self._clean_orphaned_metadata(result)
self._clean_orphaned_versions(result)
self._clean_empty_dirs(result)
finally:
self.dry_run = original_dry_run
result.execution_time_seconds = time.time() - start
if result.has_work or result.errors:
logger.info(
"GC completed in %.2fs: temp=%d (%.1f MB), multipart=%d (%.1f MB), "
"locks=%d, meta=%d, versions=%d (%.1f MB), dirs=%d, errors=%d%s",
result.execution_time_seconds,
result.temp_files_deleted,
result.temp_bytes_freed / (1024 * 1024),
result.multipart_uploads_deleted,
result.multipart_bytes_freed / (1024 * 1024),
result.lock_files_deleted,
result.orphaned_metadata_deleted,
result.orphaned_versions_deleted,
result.orphaned_version_bytes_freed / (1024 * 1024),
result.empty_dirs_removed,
len(result.errors),
" (dry run)" if effective_dry_run else "",
)
record = GCExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=effective_dry_run,
if result.has_work or result.errors:
logger.info(
"GC completed in %.2fs: temp=%d (%.1f MB), multipart=%d (%.1f MB), "
"locks=%d, meta=%d, versions=%d (%.1f MB), dirs=%d, errors=%d%s",
result.execution_time_seconds,
result.temp_files_deleted,
result.temp_bytes_freed / (1024 * 1024),
result.multipart_uploads_deleted,
result.multipart_bytes_freed / (1024 * 1024),
result.lock_files_deleted,
result.orphaned_metadata_deleted,
result.orphaned_versions_deleted,
result.orphaned_version_bytes_freed / (1024 * 1024),
result.empty_dirs_removed,
len(result.errors),
" (dry run)" if self.dry_run else "",
)
self.history_store.add(record)
return result
finally:
self._scanning = False
self._scan_start_time = None
self._lock.release()
record = GCExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=self.dry_run,
)
self.history_store.add(record)
def run_async(self, dry_run: Optional[bool] = None) -> bool:
if self._scanning:
return False
t = threading.Thread(target=self.run_now, args=(dry_run,), daemon=True)
t.start()
return True
return result
def _system_path(self) -> Path:
return self.storage_root / self.SYSTEM_ROOT
def _throttle(self) -> bool:
if self._shutdown:
return True
if self._io_throttle > 0:
time.sleep(self._io_throttle)
return self._shutdown
def _list_bucket_names(self) -> List[str]:
names = []
try:
@@ -307,8 +271,6 @@ class GarbageCollector:
return
try:
for entry in tmp_dir.iterdir():
if self._throttle():
return
if not entry.is_file():
continue
age = _file_age_hours(entry)
@@ -330,8 +292,6 @@ class GarbageCollector:
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
for multipart_root in (
self._system_path() / self.SYSTEM_MULTIPART_DIR / bucket_name,
self.storage_root / bucket_name / ".multipart",
@@ -340,8 +300,6 @@ class GarbageCollector:
continue
try:
for upload_dir in multipart_root.iterdir():
if self._throttle():
return
if not upload_dir.is_dir():
continue
self._maybe_clean_upload(upload_dir, cutoff_hours, result)
@@ -371,8 +329,6 @@ class GarbageCollector:
try:
for bucket_dir in buckets_root.iterdir():
if self._shutdown:
return
if not bucket_dir.is_dir():
continue
locks_dir = bucket_dir / "locks"
@@ -380,8 +336,6 @@ class GarbageCollector:
continue
try:
for lock_file in locks_dir.iterdir():
if self._throttle():
return
if not lock_file.is_file() or not lock_file.name.endswith(".lock"):
continue
age = _file_age_hours(lock_file)
@@ -402,8 +356,6 @@ class GarbageCollector:
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
legacy_meta = self.storage_root / bucket_name / ".meta"
if legacy_meta.exists():
self._clean_legacy_metadata(bucket_name, legacy_meta, result)
@@ -416,8 +368,6 @@ class GarbageCollector:
bucket_path = self.storage_root / bucket_name
try:
for meta_file in meta_root.rglob("*.meta.json"):
if self._throttle():
return
if not meta_file.is_file():
continue
try:
@@ -437,8 +387,6 @@ class GarbageCollector:
bucket_path = self.storage_root / bucket_name
try:
for index_file in meta_root.rglob("_index.json"):
if self._throttle():
return
if not index_file.is_file():
continue
try:
@@ -482,8 +430,6 @@ class GarbageCollector:
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
bucket_path = self.storage_root / bucket_name
for versions_root in (
self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR,
@@ -493,8 +439,6 @@ class GarbageCollector:
continue
try:
for key_dir in versions_root.iterdir():
if self._throttle():
return
if not key_dir.is_dir():
continue
self._clean_versions_for_key(bucket_path, versions_root, key_dir, result)
@@ -545,8 +489,6 @@ class GarbageCollector:
self._remove_empty_dirs_recursive(root, root, result)
def _remove_empty_dirs_recursive(self, path: Path, stop_at: Path, result: GCResult) -> bool:
if self._shutdown:
return False
if not path.is_dir():
return False
@@ -557,8 +499,6 @@ class GarbageCollector:
all_empty = True
for child in children:
if self._throttle():
return False
if child.is_dir():
if not self._remove_empty_dirs_recursive(child, stop_at, result):
all_empty = False
@@ -580,17 +520,12 @@ class GarbageCollector:
return [r.to_dict() for r in records]
def get_status(self) -> dict:
status: Dict[str, Any] = {
return {
"enabled": not self._shutdown or self._timer is not None,
"running": self._timer is not None and not self._shutdown,
"scanning": self._scanning,
"interval_hours": self.interval_seconds / 3600.0,
"temp_file_max_age_hours": self.temp_file_max_age_hours,
"multipart_max_age_days": self.multipart_max_age_days,
"lock_file_max_age_hours": self.lock_file_max_age_hours,
"dry_run": self.dry_run,
"io_throttle_ms": round(self._io_throttle * 1000),
}
if self._scanning and self._scan_start_time:
status["scan_elapsed_seconds"] = time.time() - self._scan_start_time
return status

View File

@@ -10,7 +10,7 @@ import secrets
import threading
import time
from collections import deque
from dataclasses import dataclass, field
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Any, Deque, Dict, Iterable, List, Optional, Sequence, Set, Tuple
@@ -22,37 +22,16 @@ class IamError(RuntimeError):
"""Raised when authentication or authorization fails."""
S3_ACTIONS = {
"list", "read", "write", "delete", "share", "policy",
"replication", "lifecycle", "cors",
"create_bucket", "delete_bucket",
"versioning", "tagging", "encryption", "quota",
"object_lock", "notification", "logging", "website",
}
S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication", "lifecycle", "cors"}
IAM_ACTIONS = {
"iam:list_users",
"iam:create_user",
"iam:delete_user",
"iam:rotate_key",
"iam:update_policy",
"iam:create_key",
"iam:delete_key",
"iam:get_user",
"iam:get_policy",
"iam:disable_user",
}
ALLOWED_ACTIONS = (S3_ACTIONS | IAM_ACTIONS) | {"iam:*"}
_V1_IMPLIED_ACTIONS = {
"write": {"create_bucket"},
"delete": {"delete_bucket"},
"policy": {
"versioning", "tagging", "encryption", "quota",
"object_lock", "notification", "logging", "website",
"cors", "lifecycle", "replication", "share",
},
}
ACTION_ALIASES = {
"list": "list",
"s3:listbucket": "list",
@@ -66,11 +45,14 @@ ACTION_ALIASES = {
"s3:getobjecttagging": "read",
"s3:getobjectversiontagging": "read",
"s3:getobjectacl": "read",
"s3:getbucketversioning": "read",
"s3:headobject": "read",
"s3:headbucket": "read",
"write": "write",
"s3:putobject": "write",
"s3:createbucket": "write",
"s3:putobjecttagging": "write",
"s3:putbucketversioning": "write",
"s3:createmultipartupload": "write",
"s3:uploadpart": "write",
"s3:completemultipartupload": "write",
@@ -79,11 +61,8 @@ ACTION_ALIASES = {
"delete": "delete",
"s3:deleteobject": "delete",
"s3:deleteobjectversion": "delete",
"s3:deletebucket": "delete",
"s3:deleteobjecttagging": "delete",
"create_bucket": "create_bucket",
"s3:createbucket": "create_bucket",
"delete_bucket": "delete_bucket",
"s3:deletebucket": "delete_bucket",
"share": "share",
"s3:putobjectacl": "share",
"s3:putbucketacl": "share",
@@ -109,50 +88,11 @@ ACTION_ALIASES = {
"s3:getbucketcors": "cors",
"s3:putbucketcors": "cors",
"s3:deletebucketcors": "cors",
"versioning": "versioning",
"s3:getbucketversioning": "versioning",
"s3:putbucketversioning": "versioning",
"tagging": "tagging",
"s3:getbuckettagging": "tagging",
"s3:putbuckettagging": "tagging",
"s3:deletebuckettagging": "tagging",
"encryption": "encryption",
"s3:getencryptionconfiguration": "encryption",
"s3:putencryptionconfiguration": "encryption",
"s3:deleteencryptionconfiguration": "encryption",
"quota": "quota",
"s3:getbucketquota": "quota",
"s3:putbucketquota": "quota",
"s3:deletebucketquota": "quota",
"object_lock": "object_lock",
"s3:getobjectlockconfiguration": "object_lock",
"s3:putobjectlockconfiguration": "object_lock",
"s3:putobjectretention": "object_lock",
"s3:getobjectretention": "object_lock",
"s3:putobjectlegalhold": "object_lock",
"s3:getobjectlegalhold": "object_lock",
"notification": "notification",
"s3:getbucketnotificationconfiguration": "notification",
"s3:putbucketnotificationconfiguration": "notification",
"s3:deletebucketnotificationconfiguration": "notification",
"logging": "logging",
"s3:getbucketlogging": "logging",
"s3:putbucketlogging": "logging",
"s3:deletebucketlogging": "logging",
"website": "website",
"s3:getbucketwebsite": "website",
"s3:putbucketwebsite": "website",
"s3:deletebucketwebsite": "website",
"iam:listusers": "iam:list_users",
"iam:createuser": "iam:create_user",
"iam:deleteuser": "iam:delete_user",
"iam:rotateaccesskey": "iam:rotate_key",
"iam:putuserpolicy": "iam:update_policy",
"iam:createaccesskey": "iam:create_key",
"iam:deleteaccesskey": "iam:delete_key",
"iam:getuser": "iam:get_user",
"iam:getpolicy": "iam:get_policy",
"iam:disableuser": "iam:disable_user",
"iam:*": "iam:*",
}
@@ -161,7 +101,6 @@ ACTION_ALIASES = {
class Policy:
bucket: str
actions: Set[str]
prefix: str = "*"
@dataclass
@@ -178,16 +117,6 @@ def _derive_fernet_key(secret: str) -> bytes:
_IAM_ENCRYPTED_PREFIX = b"MYFSIO_IAM_ENC:"
_CONFIG_VERSION = 2
def _expand_v1_actions(actions: Set[str]) -> Set[str]:
expanded = set(actions)
for action, implied in _V1_IMPLIED_ACTIONS.items():
if action in expanded:
expanded.update(implied)
return expanded
class IamService:
"""Loads IAM configuration, manages users, and evaluates policies."""
@@ -202,10 +131,7 @@ class IamService:
self.config_path.parent.mkdir(parents=True, exist_ok=True)
if not self.config_path.exists():
self._write_default()
self._user_records: Dict[str, Dict[str, Any]] = {}
self._key_index: Dict[str, str] = {}
self._key_secrets: Dict[str, str] = {}
self._key_status: Dict[str, str] = {}
self._users: Dict[str, Dict[str, Any]] = {}
self._raw_config: Dict[str, Any] = {}
self._failed_attempts: Dict[str, Deque[datetime]] = {}
self._last_load_time = 0.0
@@ -220,6 +146,7 @@ class IamService:
self._load_lockout_state()
def _maybe_reload(self) -> None:
"""Reload configuration if the file has changed on disk."""
now = time.time()
if now - self._last_stat_check < self._stat_check_interval:
return
@@ -256,20 +183,11 @@ class IamService:
raise IamError(
f"Access temporarily locked. Try again in {seconds} seconds."
)
user_id = self._key_index.get(access_key)
stored_secret = self._key_secrets.get(access_key, secrets.token_urlsafe(24))
if not user_id or not hmac.compare_digest(stored_secret, secret_key):
record = self._users.get(access_key)
stored_secret = record["secret_key"] if record else secrets.token_urlsafe(24)
if not record or not hmac.compare_digest(stored_secret, secret_key):
self._record_failed_attempt(access_key)
raise IamError("Invalid credentials")
key_status = self._key_status.get(access_key, "active")
if key_status != "active":
raise IamError("Access key is inactive")
record = self._user_records.get(user_id)
if not record:
self._record_failed_attempt(access_key)
raise IamError("Invalid credentials")
if not record.get("enabled", True):
raise IamError("User account is disabled")
self._check_expiry(access_key, record)
self._clear_failed_attempts(access_key)
return self._build_principal(access_key, record)
@@ -297,6 +215,7 @@ class IamService:
return self.config_path.parent / "lockout_state.json"
def _load_lockout_state(self) -> None:
"""Load lockout state from disk."""
try:
if self._lockout_file().exists():
data = json.loads(self._lockout_file().read_text(encoding="utf-8"))
@@ -316,6 +235,7 @@ class IamService:
pass
def _save_lockout_state(self) -> None:
"""Persist lockout state to disk."""
data: Dict[str, Any] = {"failed_attempts": {}}
for key, attempts in self._failed_attempts.items():
data["failed_attempts"][key] = [ts.isoformat() for ts in attempts]
@@ -350,9 +270,10 @@ class IamService:
return int(max(0, self.auth_lockout_window.total_seconds() - elapsed))
def create_session_token(self, access_key: str, duration_seconds: int = 3600) -> str:
"""Create a temporary session token for an access key."""
self._maybe_reload()
user_id = self._key_index.get(access_key)
if not user_id or user_id not in self._user_records:
record = self._users.get(access_key)
if not record:
raise IamError("Unknown access key")
self._cleanup_expired_sessions()
token = secrets.token_urlsafe(32)
@@ -364,6 +285,7 @@ class IamService:
return token
def validate_session_token(self, access_key: str, session_token: str) -> bool:
"""Validate a session token for an access key (thread-safe, constant-time)."""
dummy_key = secrets.token_urlsafe(16)
dummy_token = secrets.token_urlsafe(32)
with self._session_lock:
@@ -382,6 +304,7 @@ class IamService:
return True
def _cleanup_expired_sessions(self) -> None:
"""Remove expired session tokens."""
now = time.time()
expired = [token for token, data in self._sessions.items() if now > data["expires_at"]]
for token in expired:
@@ -393,20 +316,13 @@ class IamService:
if cached:
principal, cached_time = cached
if now - cached_time < self._cache_ttl:
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
self._enforce_key_and_user_status(access_key)
record = self._users.get(access_key)
if record:
self._check_expiry(access_key, record)
return principal
self._maybe_reload()
self._enforce_key_and_user_status(access_key)
user_id = self._key_index.get(access_key)
if not user_id:
raise IamError("Unknown access key")
record = self._user_records.get(user_id)
record = self._users.get(access_key)
if not record:
raise IamError("Unknown access key")
self._check_expiry(access_key, record)
@@ -416,27 +332,22 @@ class IamService:
def secret_for_key(self, access_key: str) -> str:
self._maybe_reload()
self._enforce_key_and_user_status(access_key)
secret = self._key_secrets.get(access_key)
if not secret:
record = self._users.get(access_key)
if not record:
raise IamError("Unknown access key")
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
return secret
self._check_expiry(access_key, record)
return record["secret_key"]
def authorize(self, principal: Principal, bucket_name: str | None, action: str, *, object_key: str | None = None) -> None:
def authorize(self, principal: Principal, bucket_name: str | None, action: str) -> None:
action = self._normalize_action(action)
if action not in ALLOWED_ACTIONS:
raise IamError(f"Unknown action '{action}'")
bucket_name = bucket_name or "*"
normalized = bucket_name.lower() if bucket_name != "*" else bucket_name
if not self._is_allowed(principal, normalized, action, object_key=object_key):
if not self._is_allowed(principal, normalized, action):
raise IamError(f"Access denied for action '{action}' on bucket '{bucket_name}'")
def check_permissions(self, principal: Principal, bucket_name: str | None, actions: Iterable[str], *, object_key: str | None = None) -> Dict[str, bool]:
def check_permissions(self, principal: Principal, bucket_name: str | None, actions: Iterable[str]) -> Dict[str, bool]:
self._maybe_reload()
bucket_name = (bucket_name or "*").lower() if bucket_name != "*" else (bucket_name or "*")
normalized_actions = {a: self._normalize_action(a) for a in actions}
@@ -445,53 +356,37 @@ class IamService:
if canonical not in ALLOWED_ACTIONS:
results[original] = False
else:
results[original] = self._is_allowed(principal, bucket_name, canonical, object_key=object_key)
results[original] = self._is_allowed(principal, bucket_name, canonical)
return results
def buckets_for_principal(self, principal: Principal, buckets: Iterable[str]) -> List[str]:
return [bucket for bucket in buckets if self._is_allowed(principal, bucket, "list")]
def _is_allowed(self, principal: Principal, bucket_name: str, action: str, *, object_key: str | None = None) -> bool:
def _is_allowed(self, principal: Principal, bucket_name: str, action: str) -> bool:
bucket_name = bucket_name.lower()
for policy in principal.policies:
if policy.bucket not in {"*", bucket_name}:
continue
action_match = "*" in policy.actions or action in policy.actions
if not action_match and "iam:*" in policy.actions and action.startswith("iam:"):
action_match = True
if not action_match:
continue
if object_key is not None and policy.prefix != "*":
prefix = policy.prefix.rstrip("*")
if not object_key.startswith(prefix):
continue
return True
if "*" in policy.actions or action in policy.actions:
return True
if "iam:*" in policy.actions and action.startswith("iam:"):
return True
return False
def list_users(self) -> List[Dict[str, Any]]:
listing: List[Dict[str, Any]] = []
for user_id, record in self._user_records.items():
access_keys = []
for key_info in record.get("access_keys", []):
access_keys.append({
"access_key": key_info["access_key"],
"status": key_info.get("status", "active"),
"created_at": key_info.get("created_at"),
})
user_entry: Dict[str, Any] = {
"user_id": user_id,
"display_name": record["display_name"],
"enabled": record.get("enabled", True),
"expires_at": record.get("expires_at"),
"access_keys": access_keys,
"policies": [
{**{"bucket": policy.bucket, "actions": sorted(policy.actions)}, **({"prefix": policy.prefix} if policy.prefix != "*" else {})}
for policy in record["policies"]
],
}
if access_keys:
user_entry["access_key"] = access_keys[0]["access_key"]
listing.append(user_entry)
for access_key, record in self._users.items():
listing.append(
{
"access_key": access_key,
"display_name": record["display_name"],
"expires_at": record.get("expires_at"),
"policies": [
{"bucket": policy.bucket, "actions": sorted(policy.actions)}
for policy in record["policies"]
],
}
)
return listing
def create_user(
@@ -502,33 +397,20 @@ class IamService:
access_key: str | None = None,
secret_key: str | None = None,
expires_at: str | None = None,
user_id: str | None = None,
) -> Dict[str, str]:
access_key = (access_key or self._generate_access_key()).strip()
if not access_key:
raise IamError("Access key cannot be empty")
if access_key in self._key_index:
if access_key in self._users:
raise IamError("Access key already exists")
if expires_at:
self._validate_expires_at(expires_at)
secret_key = secret_key or self._generate_secret_key()
sanitized_policies = self._prepare_policy_payload(policies)
user_id = user_id or self._generate_user_id()
if user_id in self._user_records:
raise IamError("User ID already exists")
now_iso = datetime.now(timezone.utc).isoformat()
record: Dict[str, Any] = {
"user_id": user_id,
"access_key": access_key,
"secret_key": secret_key,
"display_name": display_name or access_key,
"enabled": True,
"access_keys": [
{
"access_key": access_key,
"secret_key": secret_key,
"status": "active",
"created_at": now_iso,
}
],
"policies": sanitized_policies,
}
if expires_at:
@@ -536,108 +418,12 @@ class IamService:
self._raw_config.setdefault("users", []).append(record)
self._save()
self._load()
return {"user_id": user_id, "access_key": access_key, "secret_key": secret_key}
def create_access_key(self, identifier: str) -> Dict[str, str]:
user_raw, _ = self._resolve_raw_user(identifier)
new_access_key = self._generate_access_key()
new_secret_key = self._generate_secret_key()
now_iso = datetime.now(timezone.utc).isoformat()
key_entry = {
"access_key": new_access_key,
"secret_key": new_secret_key,
"status": "active",
"created_at": now_iso,
}
user_raw.setdefault("access_keys", []).append(key_entry)
self._save()
self._load()
return {"access_key": new_access_key, "secret_key": new_secret_key}
def delete_access_key(self, access_key: str) -> None:
user_raw, _ = self._resolve_raw_user(access_key)
keys = user_raw.get("access_keys", [])
if len(keys) <= 1:
raise IamError("Cannot delete the only access key for a user")
remaining = [k for k in keys if k["access_key"] != access_key]
if len(remaining) == len(keys):
raise IamError("Access key not found")
user_raw["access_keys"] = remaining
self._save()
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
from .s3_api import clear_signing_key_cache
clear_signing_key_cache()
self._load()
def disable_user(self, identifier: str) -> None:
user_raw, _ = self._resolve_raw_user(identifier)
user_raw["enabled"] = False
self._save()
for key_info in user_raw.get("access_keys", []):
ak = key_info["access_key"]
self._principal_cache.pop(ak, None)
self._secret_key_cache.pop(ak, None)
from .s3_api import clear_signing_key_cache
clear_signing_key_cache()
self._load()
def enable_user(self, identifier: str) -> None:
user_raw, _ = self._resolve_raw_user(identifier)
user_raw["enabled"] = True
self._save()
self._load()
def get_user_by_id(self, user_id: str) -> Dict[str, Any]:
record = self._user_records.get(user_id)
if not record:
raise IamError("User not found")
access_keys = []
for key_info in record.get("access_keys", []):
access_keys.append({
"access_key": key_info["access_key"],
"status": key_info.get("status", "active"),
"created_at": key_info.get("created_at"),
})
return {
"user_id": user_id,
"display_name": record["display_name"],
"enabled": record.get("enabled", True),
"expires_at": record.get("expires_at"),
"access_keys": access_keys,
"policies": [
{"bucket": p.bucket, "actions": sorted(p.actions), "prefix": p.prefix}
for p in record["policies"]
],
}
def get_user_policies(self, identifier: str) -> List[Dict[str, Any]]:
_, user_id = self._resolve_raw_user(identifier)
record = self._user_records.get(user_id)
if not record:
raise IamError("User not found")
return [
{**{"bucket": p.bucket, "actions": sorted(p.actions)}, **({"prefix": p.prefix} if p.prefix != "*" else {})}
for p in record["policies"]
]
def resolve_user_id(self, identifier: str) -> str:
if identifier in self._user_records:
return identifier
user_id = self._key_index.get(identifier)
if user_id:
return user_id
raise IamError("User not found")
return {"access_key": access_key, "secret_key": secret_key}
def rotate_secret(self, access_key: str) -> str:
user_raw, _ = self._resolve_raw_user(access_key)
user = self._get_raw_user(access_key)
new_secret = self._generate_secret_key()
for key_info in user_raw.get("access_keys", []):
if key_info["access_key"] == access_key:
key_info["secret_key"] = new_secret
break
else:
raise IamError("Access key not found")
user["secret_key"] = new_secret
self._save()
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
@@ -647,8 +433,8 @@ class IamService:
return new_secret
def update_user(self, access_key: str, display_name: str) -> None:
user_raw, _ = self._resolve_raw_user(access_key)
user_raw["display_name"] = display_name
user = self._get_raw_user(access_key)
user["display_name"] = display_name
self._save()
self._load()
@@ -656,43 +442,32 @@ class IamService:
users = self._raw_config.get("users", [])
if len(users) <= 1:
raise IamError("Cannot delete the only user")
_, target_user_id = self._resolve_raw_user(access_key)
target_user_raw = None
remaining = []
for u in users:
if u.get("user_id") == target_user_id:
target_user_raw = u
else:
remaining.append(u)
if target_user_raw is None:
remaining = [user for user in users if user["access_key"] != access_key]
if len(remaining) == len(users):
raise IamError("User not found")
self._raw_config["users"] = remaining
self._save()
for key_info in target_user_raw.get("access_keys", []):
ak = key_info["access_key"]
self._principal_cache.pop(ak, None)
self._secret_key_cache.pop(ak, None)
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
from .s3_api import clear_signing_key_cache
clear_signing_key_cache()
self._load()
def update_user_expiry(self, access_key: str, expires_at: str | None) -> None:
user_raw, _ = self._resolve_raw_user(access_key)
user = self._get_raw_user(access_key)
if expires_at:
self._validate_expires_at(expires_at)
user_raw["expires_at"] = expires_at
user["expires_at"] = expires_at
else:
user_raw.pop("expires_at", None)
user.pop("expires_at", None)
self._save()
for key_info in user_raw.get("access_keys", []):
ak = key_info["access_key"]
self._principal_cache.pop(ak, None)
self._secret_key_cache.pop(ak, None)
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
self._load()
def update_user_policies(self, access_key: str, policies: Sequence[Dict[str, Any]]) -> None:
user_raw, _ = self._resolve_raw_user(access_key)
user_raw["policies"] = self._prepare_policy_payload(policies)
user = self._get_raw_user(access_key)
user["policies"] = self._prepare_policy_payload(policies)
self._save()
self._load()
@@ -707,52 +482,6 @@ class IamService:
raise IamError("Cannot decrypt IAM config. SECRET_KEY may have changed. Use 'python run.py reset-cred' to reset credentials.")
return raw_bytes.decode("utf-8")
def _is_v2_config(self, raw: Dict[str, Any]) -> bool:
return raw.get("version", 1) >= _CONFIG_VERSION
def _migrate_v1_to_v2(self, raw: Dict[str, Any]) -> Dict[str, Any]:
migrated_users = []
now_iso = datetime.now(timezone.utc).isoformat()
for user in raw.get("users", []):
old_policies = user.get("policies", [])
expanded_policies = []
for p in old_policies:
raw_actions = p.get("actions", [])
if isinstance(raw_actions, str):
raw_actions = [raw_actions]
action_set: Set[str] = set()
for a in raw_actions:
canonical = self._normalize_action(a)
if canonical == "*":
action_set = set(ALLOWED_ACTIONS)
break
if canonical:
action_set.add(canonical)
action_set = _expand_v1_actions(action_set)
expanded_policies.append({
"bucket": p.get("bucket", "*"),
"actions": sorted(action_set),
"prefix": p.get("prefix", "*"),
})
migrated_user: Dict[str, Any] = {
"user_id": user["access_key"],
"display_name": user.get("display_name", user["access_key"]),
"enabled": True,
"access_keys": [
{
"access_key": user["access_key"],
"secret_key": user["secret_key"],
"status": "active",
"created_at": now_iso,
}
],
"policies": expanded_policies,
}
if user.get("expires_at"):
migrated_user["expires_at"] = user["expires_at"]
migrated_users.append(migrated_user)
return {"version": _CONFIG_VERSION, "users": migrated_users}
def _load(self) -> None:
try:
self._last_load_time = self.config_path.stat().st_mtime
@@ -771,67 +500,35 @@ class IamService:
raise IamError(f"Failed to load IAM config: {e}")
was_plaintext = not raw_bytes.startswith(_IAM_ENCRYPTED_PREFIX)
was_v1 = not self._is_v2_config(raw)
if was_v1:
raw = self._migrate_v1_to_v2(raw)
user_records: Dict[str, Dict[str, Any]] = {}
key_index: Dict[str, str] = {}
key_secrets: Dict[str, str] = {}
key_status_map: Dict[str, str] = {}
users: Dict[str, Dict[str, Any]] = {}
for user in raw.get("users", []):
user_id = user["user_id"]
policies = self._build_policy_objects(user.get("policies", []))
access_keys_raw = user.get("access_keys", [])
access_keys_info = []
for key_entry in access_keys_raw:
ak = key_entry["access_key"]
sk = key_entry["secret_key"]
status = key_entry.get("status", "active")
key_index[ak] = user_id
key_secrets[ak] = sk
key_status_map[ak] = status
access_keys_info.append({
"access_key": ak,
"secret_key": sk,
"status": status,
"created_at": key_entry.get("created_at"),
})
record: Dict[str, Any] = {
"display_name": user.get("display_name", user_id),
"enabled": user.get("enabled", True),
user_record: Dict[str, Any] = {
"secret_key": user["secret_key"],
"display_name": user.get("display_name", user["access_key"]),
"policies": policies,
"access_keys": access_keys_info,
}
if user.get("expires_at"):
record["expires_at"] = user["expires_at"]
user_records[user_id] = record
if not user_records:
user_record["expires_at"] = user["expires_at"]
users[user["access_key"]] = user_record
if not users:
raise IamError("IAM configuration contains no users")
self._user_records = user_records
self._key_index = key_index
self._key_secrets = key_secrets
self._key_status = key_status_map
self._users = users
raw_users: List[Dict[str, Any]] = []
for user in raw.get("users", []):
for entry in raw.get("users", []):
raw_entry: Dict[str, Any] = {
"user_id": user["user_id"],
"display_name": user.get("display_name", user["user_id"]),
"enabled": user.get("enabled", True),
"access_keys": user.get("access_keys", []),
"policies": user.get("policies", []),
"access_key": entry["access_key"],
"secret_key": entry["secret_key"],
"display_name": entry.get("display_name", entry["access_key"]),
"policies": entry.get("policies", []),
}
if user.get("expires_at"):
raw_entry["expires_at"] = user["expires_at"]
if entry.get("expires_at"):
raw_entry["expires_at"] = entry["expires_at"]
raw_users.append(raw_entry)
self._raw_config = {"version": _CONFIG_VERSION, "users": raw_users}
self._raw_config = {"users": raw_users}
if was_v1 or (was_plaintext and self._fernet):
if was_plaintext and self._fernet:
self._save()
def _save(self) -> None:
@@ -850,30 +547,19 @@ class IamService:
def config_summary(self) -> Dict[str, Any]:
return {
"path": str(self.config_path),
"user_count": len(self._user_records),
"user_count": len(self._users),
"allowed_actions": sorted(ALLOWED_ACTIONS),
}
def export_config(self, mask_secrets: bool = True) -> Dict[str, Any]:
payload: Dict[str, Any] = {"version": _CONFIG_VERSION, "users": []}
payload: Dict[str, Any] = {"users": []}
for user in self._raw_config.get("users", []):
access_keys = []
for key_info in user.get("access_keys", []):
access_keys.append({
"access_key": key_info["access_key"],
"secret_key": "\u2022\u2022\u2022\u2022\u2022\u2022\u2022\u2022\u2022\u2022" if mask_secrets else key_info["secret_key"],
"status": key_info.get("status", "active"),
"created_at": key_info.get("created_at"),
})
record: Dict[str, Any] = {
"user_id": user["user_id"],
"access_key": user["access_key"],
"secret_key": "••••••••••" if mask_secrets else user["secret_key"],
"display_name": user["display_name"],
"enabled": user.get("enabled", True),
"access_keys": access_keys,
"policies": user["policies"],
}
if access_keys:
record["access_key"] = access_keys[0]["access_key"]
if user.get("expires_at"):
record["expires_at"] = user["expires_at"]
payload["users"].append(record)
@@ -883,7 +569,6 @@ class IamService:
entries: List[Policy] = []
for policy in policies:
bucket = str(policy.get("bucket", "*")).lower()
prefix = str(policy.get("prefix", "*"))
raw_actions = policy.get("actions", [])
if isinstance(raw_actions, str):
raw_actions = [raw_actions]
@@ -896,7 +581,7 @@ class IamService:
if canonical:
action_set.add(canonical)
if action_set:
entries.append(Policy(bucket=bucket, actions=action_set, prefix=prefix))
entries.append(Policy(bucket=bucket, actions=action_set))
return entries
def _prepare_policy_payload(self, policies: Optional[Sequence[Dict[str, Any]]]) -> List[Dict[str, Any]]:
@@ -904,14 +589,12 @@ class IamService:
policies = (
{
"bucket": "*",
"actions": ["list", "read", "write", "delete", "share", "policy",
"create_bucket", "delete_bucket"],
"actions": ["list", "read", "write", "delete", "share", "policy"],
},
)
sanitized: List[Dict[str, Any]] = []
for policy in policies:
bucket = str(policy.get("bucket", "*")).lower()
prefix = str(policy.get("prefix", "*"))
raw_actions = policy.get("actions", [])
if isinstance(raw_actions, str):
raw_actions = [raw_actions]
@@ -925,10 +608,7 @@ class IamService:
action_set.add(canonical)
if not action_set:
continue
entry: Dict[str, Any] = {"bucket": bucket, "actions": sorted(action_set)}
if prefix != "*":
entry["prefix"] = prefix
sanitized.append(entry)
sanitized.append({"bucket": bucket, "actions": sorted(action_set)})
if not sanitized:
raise IamError("At least one policy with valid actions is required")
return sanitized
@@ -953,23 +633,12 @@ class IamService:
access_key = os.environ.get("ADMIN_ACCESS_KEY", "").strip() or secrets.token_hex(12)
secret_key = os.environ.get("ADMIN_SECRET_KEY", "").strip() or secrets.token_urlsafe(32)
custom_keys = bool(os.environ.get("ADMIN_ACCESS_KEY", "").strip())
user_id = self._generate_user_id()
now_iso = datetime.now(timezone.utc).isoformat()
default = {
"version": _CONFIG_VERSION,
"users": [
{
"user_id": user_id,
"access_key": access_key,
"secret_key": secret_key,
"display_name": "Local Admin",
"enabled": True,
"access_keys": [
{
"access_key": access_key,
"secret_key": secret_key,
"status": "active",
"created_at": now_iso,
}
],
"policies": [
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
],
@@ -991,7 +660,6 @@ class IamService:
else:
print(f"Access Key: {access_key}")
print(f"Secret Key: {secret_key}")
print(f"User ID: {user_id}")
print(f"{'='*60}")
if self._fernet:
print("IAM config is encrypted at rest.")
@@ -1014,32 +682,11 @@ class IamService:
def _generate_secret_key(self) -> str:
return secrets.token_urlsafe(24)
def _generate_user_id(self) -> str:
return f"u-{secrets.token_hex(8)}"
def _resolve_raw_user(self, identifier: str) -> Tuple[Dict[str, Any], str]:
for user in self._raw_config.get("users", []):
if user.get("user_id") == identifier:
return user, identifier
for user in self._raw_config.get("users", []):
for key_info in user.get("access_keys", []):
if key_info["access_key"] == identifier:
return user, user["user_id"]
raise IamError("User not found")
def _get_raw_user(self, access_key: str) -> Dict[str, Any]:
user, _ = self._resolve_raw_user(access_key)
return user
def _enforce_key_and_user_status(self, access_key: str) -> None:
key_status = self._key_status.get(access_key, "active")
if key_status != "active":
raise IamError("Access key is inactive")
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record and not record.get("enabled", True):
raise IamError("User account is disabled")
for user in self._raw_config.get("users", []):
if user["access_key"] == access_key:
return user
raise IamError("User not found")
def get_secret_key(self, access_key: str) -> str | None:
now = time.time()
@@ -1047,25 +694,18 @@ class IamService:
if cached:
secret_key, cached_time = cached
if now - cached_time < self._cache_ttl:
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
self._enforce_key_and_user_status(access_key)
record = self._users.get(access_key)
if record:
self._check_expiry(access_key, record)
return secret_key
self._maybe_reload()
secret = self._key_secrets.get(access_key)
if secret:
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
self._enforce_key_and_user_status(access_key)
self._secret_key_cache[access_key] = (secret, now)
return secret
record = self._users.get(access_key)
if record:
self._check_expiry(access_key, record)
secret_key = record["secret_key"]
self._secret_key_cache[access_key] = (secret_key, now)
return secret_key
return None
def get_principal(self, access_key: str) -> Principal | None:
@@ -1074,22 +714,16 @@ class IamService:
if cached:
principal, cached_time = cached
if now - cached_time < self._cache_ttl:
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
self._enforce_key_and_user_status(access_key)
record = self._users.get(access_key)
if record:
self._check_expiry(access_key, record)
return principal
self._maybe_reload()
self._enforce_key_and_user_status(access_key)
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
principal = self._build_principal(access_key, record)
self._principal_cache[access_key] = (principal, now)
return principal
record = self._users.get(access_key)
if record:
self._check_expiry(access_key, record)
principal = self._build_principal(access_key, record)
self._principal_cache[access_key] = (principal, now)
return principal
return None

View File

@@ -12,8 +12,6 @@ from typing import Any, Dict, List, Optional
try:
import myfsio_core as _rc
if not hasattr(_rc, "md5_file"):
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
_HAS_RUST = True
except ImportError:
_HAS_RUST = False
@@ -164,111 +162,6 @@ class IntegrityHistoryStore:
return self.load()[offset : offset + limit]
class IntegrityCursorStore:
def __init__(self, storage_root: Path) -> None:
self.storage_root = storage_root
self._lock = threading.Lock()
def _get_path(self) -> Path:
return self.storage_root / ".myfsio.sys" / "config" / "integrity_cursor.json"
def load(self) -> Dict[str, Any]:
path = self._get_path()
if not path.exists():
return {"buckets": {}}
try:
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
if not isinstance(data.get("buckets"), dict):
return {"buckets": {}}
return data
except (OSError, ValueError, KeyError):
return {"buckets": {}}
def save(self, data: Dict[str, Any]) -> None:
path = self._get_path()
path.parent.mkdir(parents=True, exist_ok=True)
try:
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2)
except OSError as e:
logger.error("Failed to save integrity cursor: %s", e)
def update_bucket(
self,
bucket_name: str,
timestamp: float,
last_key: Optional[str] = None,
completed: bool = False,
) -> None:
with self._lock:
data = self.load()
entry = data["buckets"].get(bucket_name, {})
if completed:
entry["last_scanned"] = timestamp
entry.pop("last_key", None)
entry["completed"] = True
else:
entry["last_scanned"] = timestamp
if last_key is not None:
entry["last_key"] = last_key
entry["completed"] = False
data["buckets"][bucket_name] = entry
self.save(data)
def clean_stale(self, existing_buckets: List[str]) -> None:
with self._lock:
data = self.load()
existing_set = set(existing_buckets)
stale_keys = [k for k in data["buckets"] if k not in existing_set]
if stale_keys:
for k in stale_keys:
del data["buckets"][k]
self.save(data)
def get_last_key(self, bucket_name: str) -> Optional[str]:
data = self.load()
entry = data.get("buckets", {}).get(bucket_name)
if entry is None:
return None
return entry.get("last_key")
def get_bucket_order(self, bucket_names: List[str]) -> List[str]:
data = self.load()
buckets_info = data.get("buckets", {})
incomplete = []
complete = []
for name in bucket_names:
entry = buckets_info.get(name)
if entry is None:
incomplete.append((name, 0.0))
elif entry.get("last_key") is not None:
incomplete.append((name, entry.get("last_scanned", 0.0)))
else:
complete.append((name, entry.get("last_scanned", 0.0)))
incomplete.sort(key=lambda x: x[1])
complete.sort(key=lambda x: x[1])
return [n for n, _ in incomplete] + [n for n, _ in complete]
def get_info(self) -> Dict[str, Any]:
data = self.load()
buckets = data.get("buckets", {})
return {
"tracked_buckets": len(buckets),
"buckets": {
name: {
"last_scanned": info.get("last_scanned"),
"last_key": info.get("last_key"),
"completed": info.get("completed", False),
}
for name, info in buckets.items()
},
}
MAX_ISSUES = 500
@@ -287,7 +180,6 @@ class IntegrityChecker:
auto_heal: bool = False,
dry_run: bool = False,
max_history: int = 50,
io_throttle_ms: int = 10,
) -> None:
self.storage_root = Path(storage_root)
self.interval_seconds = interval_hours * 3600.0
@@ -297,11 +189,7 @@ class IntegrityChecker:
self._timer: Optional[threading.Timer] = None
self._shutdown = False
self._lock = threading.Lock()
self._scanning = False
self._scan_start_time: Optional[float] = None
self._io_throttle = max(0, io_throttle_ms) / 1000.0
self.history_store = IntegrityHistoryStore(storage_root, max_records=max_history)
self.cursor_store = IntegrityCursorStore(self.storage_root)
def start(self) -> None:
if self._timer is not None:
@@ -341,79 +229,52 @@ class IntegrityChecker:
self._schedule_next()
def run_now(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> IntegrityResult:
if not self._lock.acquire(blocking=False):
raise RuntimeError("Integrity scan is already in progress")
effective_auto_heal = auto_heal if auto_heal is not None else self.auto_heal
effective_dry_run = dry_run if dry_run is not None else self.dry_run
try:
self._scanning = True
self._scan_start_time = time.time()
start = time.time()
result = IntegrityResult()
effective_auto_heal = auto_heal if auto_heal is not None else self.auto_heal
effective_dry_run = dry_run if dry_run is not None else self.dry_run
bucket_names = self._list_bucket_names()
start = self._scan_start_time
result = IntegrityResult()
for bucket_name in bucket_names:
if result.objects_scanned >= self.batch_size:
break
result.buckets_scanned += 1
self._check_corrupted_objects(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_orphaned_objects(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_phantom_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_stale_versions(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_etag_cache(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_legacy_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
bucket_names = self._list_bucket_names()
self.cursor_store.clean_stale(bucket_names)
ordered_buckets = self.cursor_store.get_bucket_order(bucket_names)
result.execution_time_seconds = time.time() - start
for bucket_name in ordered_buckets:
if self._batch_exhausted(result):
break
result.buckets_scanned += 1
cursor_key = self.cursor_store.get_last_key(bucket_name)
key_corrupted = self._check_corrupted_objects(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
key_orphaned = self._check_orphaned_objects(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
key_phantom = self._check_phantom_metadata(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
self._check_stale_versions(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_etag_cache(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_legacy_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
returned_keys = [k for k in (key_corrupted, key_orphaned, key_phantom) if k is not None]
bucket_exhausted = self._batch_exhausted(result)
if bucket_exhausted and returned_keys:
self.cursor_store.update_bucket(bucket_name, time.time(), last_key=min(returned_keys))
else:
self.cursor_store.update_bucket(bucket_name, time.time(), completed=True)
result.execution_time_seconds = time.time() - start
if result.has_issues or result.errors:
logger.info(
"Integrity check completed in %.2fs: corrupted=%d, orphaned=%d, phantom=%d, "
"stale_versions=%d, etag_cache=%d, legacy_drift=%d, healed=%d, errors=%d%s",
result.execution_time_seconds,
result.corrupted_objects,
result.orphaned_objects,
result.phantom_metadata,
result.stale_versions,
result.etag_cache_inconsistencies,
result.legacy_metadata_drifts,
result.issues_healed,
len(result.errors),
" (dry run)" if effective_dry_run else "",
)
record = IntegrityExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=effective_dry_run,
auto_heal=effective_auto_heal,
if result.has_issues or result.errors:
logger.info(
"Integrity check completed in %.2fs: corrupted=%d, orphaned=%d, phantom=%d, "
"stale_versions=%d, etag_cache=%d, legacy_drift=%d, healed=%d, errors=%d%s",
result.execution_time_seconds,
result.corrupted_objects,
result.orphaned_objects,
result.phantom_metadata,
result.stale_versions,
result.etag_cache_inconsistencies,
result.legacy_metadata_drifts,
result.issues_healed,
len(result.errors),
" (dry run)" if effective_dry_run else "",
)
self.history_store.add(record)
return result
finally:
self._scanning = False
self._scan_start_time = None
self._lock.release()
record = IntegrityExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=effective_dry_run,
auto_heal=effective_auto_heal,
)
self.history_store.add(record)
def run_async(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> bool:
if self._scanning:
return False
t = threading.Thread(target=self.run_now, args=(auto_heal, dry_run), daemon=True)
t.start()
return True
return result
def _system_path(self) -> Path:
return self.storage_root / self.SYSTEM_ROOT
@@ -428,186 +289,105 @@ class IntegrityChecker:
pass
return names
def _throttle(self) -> bool:
if self._shutdown:
return True
if self._io_throttle > 0:
time.sleep(self._io_throttle)
return self._shutdown
def _batch_exhausted(self, result: IntegrityResult) -> bool:
return self._shutdown or result.objects_scanned >= self.batch_size
def _add_issue(self, result: IntegrityResult, issue: IntegrityIssue) -> None:
if len(result.issues) < MAX_ISSUES:
result.issues.append(issue)
def _collect_index_keys(
self, meta_root: Path, cursor_key: Optional[str] = None,
) -> Dict[str, Dict[str, Any]]:
all_keys: Dict[str, Dict[str, Any]] = {}
def _check_corrupted_objects(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
bucket_path = self.storage_root / bucket_name
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
if not meta_root.exists():
return all_keys
return
try:
for index_file in meta_root.rglob("_index.json"):
if result.objects_scanned >= self.batch_size:
return
if not index_file.is_file():
continue
rel_dir = index_file.parent.relative_to(meta_root)
dir_prefix = "" if rel_dir == Path(".") else rel_dir.as_posix()
if cursor_key is not None and dir_prefix:
full_prefix = dir_prefix + "/"
if not cursor_key.startswith(full_prefix) and cursor_key > full_prefix:
continue
try:
index_data = json.loads(index_file.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
continue
for key_name, entry in index_data.items():
full_key = (dir_prefix + "/" + key_name) if dir_prefix else key_name
if cursor_key is not None and full_key <= cursor_key:
for key_name, entry in list(index_data.items()):
if result.objects_scanned >= self.batch_size:
return
rel_dir = index_file.parent.relative_to(meta_root)
if rel_dir == Path("."):
full_key = key_name
else:
full_key = rel_dir.as_posix() + "/" + key_name
object_path = bucket_path / full_key
if not object_path.exists():
continue
all_keys[full_key] = {
"entry": entry,
"index_file": index_file,
"key_name": key_name,
}
except OSError:
pass
return all_keys
def _walk_bucket_files_sorted(
self, bucket_path: Path, cursor_key: Optional[str] = None,
):
def _walk(dir_path: Path, prefix: str):
try:
entries = list(os.scandir(dir_path))
except OSError:
return
result.objects_scanned += 1
def _sort_key(e):
if e.is_dir(follow_symlinks=False):
return e.name + "/"
return e.name
entries.sort(key=_sort_key)
for entry in entries:
if entry.is_dir(follow_symlinks=False):
if not prefix and entry.name in self.INTERNAL_FOLDERS:
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
stored_etag = meta.get("__etag__")
if not stored_etag:
continue
new_prefix = (prefix + "/" + entry.name) if prefix else entry.name
if cursor_key is not None:
full_prefix = new_prefix + "/"
if not cursor_key.startswith(full_prefix) and cursor_key > full_prefix:
continue
yield from _walk(Path(entry.path), new_prefix)
elif entry.is_file(follow_symlinks=False):
full_key = (prefix + "/" + entry.name) if prefix else entry.name
if cursor_key is not None and full_key <= cursor_key:
try:
actual_etag = _compute_etag(object_path)
except OSError:
continue
yield full_key
yield from _walk(bucket_path, "")
if actual_etag != stored_etag:
result.corrupted_objects += 1
issue = IntegrityIssue(
issue_type="corrupted_object",
bucket=bucket_name,
key=full_key,
detail=f"stored_etag={stored_etag} actual_etag={actual_etag}",
)
def _check_corrupted_objects(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
cursor_key: Optional[str] = None,
) -> Optional[str]:
if self._batch_exhausted(result):
return None
bucket_path = self.storage_root / bucket_name
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
if not meta_root.exists():
return None
last_key = None
try:
all_keys = self._collect_index_keys(meta_root, cursor_key)
sorted_keys = sorted(all_keys.keys())
for full_key in sorted_keys:
if self._throttle():
return last_key
if self._batch_exhausted(result):
return last_key
info = all_keys[full_key]
entry = info["entry"]
index_file = info["index_file"]
key_name = info["key_name"]
object_path = bucket_path / full_key
if not object_path.exists():
continue
result.objects_scanned += 1
last_key = full_key
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
stored_etag = meta.get("__etag__")
if not stored_etag:
continue
try:
actual_etag = _compute_etag(object_path)
except OSError:
continue
if actual_etag != stored_etag:
result.corrupted_objects += 1
issue = IntegrityIssue(
issue_type="corrupted_object",
bucket=bucket_name,
key=full_key,
detail=f"stored_etag={stored_etag} actual_etag={actual_etag}",
)
if auto_heal and not dry_run:
try:
stat = object_path.stat()
meta["__etag__"] = actual_etag
meta["__size__"] = str(stat.st_size)
meta["__last_modified__"] = str(stat.st_mtime)
if auto_heal and not dry_run:
try:
index_data = json.loads(index_file.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
index_data = {}
index_data[key_name] = {"metadata": meta}
self._atomic_write_index(index_file, index_data)
issue.healed = True
issue.heal_action = "updated etag in index"
result.issues_healed += 1
except OSError as e:
result.errors.append(f"heal corrupted {bucket_name}/{full_key}: {e}")
stat = object_path.stat()
meta["__etag__"] = actual_etag
meta["__size__"] = str(stat.st_size)
meta["__last_modified__"] = str(stat.st_mtime)
index_data[key_name] = {"metadata": meta}
self._atomic_write_index(index_file, index_data)
issue.healed = True
issue.heal_action = "updated etag in index"
result.issues_healed += 1
except OSError as e:
result.errors.append(f"heal corrupted {bucket_name}/{full_key}: {e}")
self._add_issue(result, issue)
self._add_issue(result, issue)
except OSError as e:
result.errors.append(f"check corrupted {bucket_name}: {e}")
return last_key
def _check_orphaned_objects(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
cursor_key: Optional[str] = None,
) -> Optional[str]:
if self._batch_exhausted(result):
return None
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
bucket_path = self.storage_root / bucket_name
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
last_key = None
try:
for full_key in self._walk_bucket_files_sorted(bucket_path, cursor_key):
if self._throttle():
return last_key
if self._batch_exhausted(result):
return last_key
for entry in bucket_path.rglob("*"):
if result.objects_scanned >= self.batch_size:
return
if not entry.is_file():
continue
try:
rel = entry.relative_to(bucket_path)
except ValueError:
continue
if rel.parts and rel.parts[0] in self.INTERNAL_FOLDERS:
continue
result.objects_scanned += 1
last_key = full_key
key_path = Path(full_key)
key_name = key_path.name
parent = key_path.parent
full_key = rel.as_posix()
key_name = rel.name
parent = rel.parent
if parent == Path("."):
index_path = meta_root / "_index.json"
@@ -633,9 +413,8 @@ class IntegrityChecker:
if auto_heal and not dry_run:
try:
object_path = bucket_path / full_key
etag = _compute_etag(object_path)
stat = object_path.stat()
etag = _compute_etag(entry)
stat = entry.stat()
meta = {
"__etag__": etag,
"__size__": str(stat.st_size),
@@ -658,56 +437,51 @@ class IntegrityChecker:
self._add_issue(result, issue)
except OSError as e:
result.errors.append(f"check orphaned {bucket_name}: {e}")
return last_key
def _check_phantom_metadata(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
cursor_key: Optional[str] = None,
) -> Optional[str]:
if self._batch_exhausted(result):
return None
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
bucket_path = self.storage_root / bucket_name
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
if not meta_root.exists():
return None
return
last_key = None
try:
all_keys = self._collect_index_keys(meta_root, cursor_key)
sorted_keys = sorted(all_keys.keys())
for index_file in meta_root.rglob("_index.json"):
if not index_file.is_file():
continue
try:
index_data = json.loads(index_file.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
continue
heal_by_index: Dict[Path, List[str]] = {}
keys_to_remove = []
for key_name in list(index_data.keys()):
rel_dir = index_file.parent.relative_to(meta_root)
if rel_dir == Path("."):
full_key = key_name
else:
full_key = rel_dir.as_posix() + "/" + key_name
for full_key in sorted_keys:
if self._batch_exhausted(result):
break
object_path = bucket_path / full_key
if not object_path.exists():
result.phantom_metadata += 1
issue = IntegrityIssue(
issue_type="phantom_metadata",
bucket=bucket_name,
key=full_key,
detail="metadata entry without file on disk",
)
if auto_heal and not dry_run:
keys_to_remove.append(key_name)
issue.healed = True
issue.heal_action = "removed stale index entry"
result.issues_healed += 1
self._add_issue(result, issue)
result.objects_scanned += 1
last_key = full_key
object_path = bucket_path / full_key
if not object_path.exists():
result.phantom_metadata += 1
info = all_keys[full_key]
issue = IntegrityIssue(
issue_type="phantom_metadata",
bucket=bucket_name,
key=full_key,
detail="metadata entry without file on disk",
)
if auto_heal and not dry_run:
index_file = info["index_file"]
heal_by_index.setdefault(index_file, []).append(info["key_name"])
issue.healed = True
issue.heal_action = "removed stale index entry"
result.issues_healed += 1
self._add_issue(result, issue)
if heal_by_index and auto_heal and not dry_run:
for index_file, keys_to_remove in heal_by_index.items():
if keys_to_remove and auto_heal and not dry_run:
try:
index_data = json.loads(index_file.read_text(encoding="utf-8"))
for k in keys_to_remove:
index_data.pop(k, None)
if index_data:
@@ -718,13 +492,10 @@ class IntegrityChecker:
result.errors.append(f"heal phantom {bucket_name}: {e}")
except OSError as e:
result.errors.append(f"check phantom {bucket_name}: {e}")
return last_key
def _check_stale_versions(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
if self._batch_exhausted(result):
return
versions_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR
if not versions_root.exists():
@@ -732,10 +503,6 @@ class IntegrityChecker:
try:
for key_dir in versions_root.rglob("*"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not key_dir.is_dir():
continue
@@ -743,9 +510,6 @@ class IntegrityChecker:
json_files = {f.stem: f for f in key_dir.glob("*.json")}
for stem, bin_file in bin_files.items():
if self._batch_exhausted(result):
return
result.objects_scanned += 1
if stem not in json_files:
result.stale_versions += 1
issue = IntegrityIssue(
@@ -765,9 +529,6 @@ class IntegrityChecker:
self._add_issue(result, issue)
for stem, json_file in json_files.items():
if self._batch_exhausted(result):
return
result.objects_scanned += 1
if stem not in bin_files:
result.stale_versions += 1
issue = IntegrityIssue(
@@ -791,8 +552,6 @@ class IntegrityChecker:
def _check_etag_cache(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
if self._batch_exhausted(result):
return
etag_index_path = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / "etag_index.json"
if not etag_index_path.exists():
@@ -810,9 +569,6 @@ class IntegrityChecker:
found_mismatch = False
for full_key, cached_etag in etag_cache.items():
if self._batch_exhausted(result):
break
result.objects_scanned += 1
key_path = Path(full_key)
key_name = key_path.name
parent = key_path.parent
@@ -862,8 +618,6 @@ class IntegrityChecker:
def _check_legacy_metadata(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
if self._batch_exhausted(result):
return
legacy_meta_root = self.storage_root / bucket_name / ".meta"
if not legacy_meta_root.exists():
return
@@ -872,14 +626,9 @@ class IntegrityChecker:
try:
for meta_file in legacy_meta_root.rglob("*.meta.json"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not meta_file.is_file():
continue
result.objects_scanned += 1
try:
rel = meta_file.relative_to(legacy_meta_root)
except ValueError:
@@ -979,17 +728,11 @@ class IntegrityChecker:
return [r.to_dict() for r in records]
def get_status(self) -> dict:
status: Dict[str, Any] = {
return {
"enabled": not self._shutdown or self._timer is not None,
"running": self._timer is not None and not self._shutdown,
"scanning": self._scanning,
"interval_hours": self.interval_seconds / 3600.0,
"batch_size": self.batch_size,
"auto_heal": self.auto_heal,
"dry_run": self.dry_run,
"io_throttle_ms": round(self._io_throttle * 1000),
}
if self._scanning and self._scan_start_time is not None:
status["scan_elapsed_seconds"] = round(time.time() - self._scan_start_time, 1)
status["cursor"] = self.cursor_store.get_info()
return status

View File

@@ -19,10 +19,6 @@ from defusedxml.ElementTree import fromstring
try:
import myfsio_core as _rc
if not all(hasattr(_rc, f) for f in (
"verify_sigv4_signature", "derive_signing_key", "clear_signing_key_cache",
)):
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
_HAS_RUST = True
except ImportError:
_rc = None
@@ -205,11 +201,6 @@ _SIGNING_KEY_CACHE_LOCK = threading.Lock()
_SIGNING_KEY_CACHE_TTL = 60.0
_SIGNING_KEY_CACHE_MAX_SIZE = 256
_SIGV4_HEADER_RE = re.compile(
r"AWS4-HMAC-SHA256 Credential=([^/]+)/([^/]+)/([^/]+)/([^/]+)/aws4_request, SignedHeaders=([^,]+), Signature=(.+)"
)
_SIGV4_REQUIRED_HEADERS = frozenset({'host', 'x-amz-date'})
def clear_signing_key_cache() -> None:
if _HAS_RUST:
@@ -268,7 +259,10 @@ def _get_canonical_uri(req: Any) -> str:
def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
match = _SIGV4_HEADER_RE.match(auth_header)
match = re.match(
r"AWS4-HMAC-SHA256 Credential=([^/]+)/([^/]+)/([^/]+)/([^/]+)/aws4_request, SignedHeaders=([^,]+), Signature=(.+)",
auth_header,
)
if not match:
return None
@@ -292,9 +286,14 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
if time_diff > tolerance:
raise IamError("Request timestamp too old or too far in the future")
required_headers = {'host', 'x-amz-date'}
signed_headers_set = set(signed_headers_str.split(';'))
if not _SIGV4_REQUIRED_HEADERS.issubset(signed_headers_set):
if not ({'host', 'date'}.issubset(signed_headers_set)):
if not required_headers.issubset(signed_headers_set):
if 'date' in signed_headers_set:
required_headers.remove('x-amz-date')
required_headers.add('date')
if not required_headers.issubset(signed_headers_set):
raise IamError("Required headers not signed")
canonical_uri = _get_canonical_uri(req)
@@ -302,12 +301,7 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
if _HAS_RUST:
query_params = list(req.args.items(multi=True))
header_values = []
for h in signed_headers_str.split(";"):
val = req.headers.get(h) or ""
if h.lower() == "expect" and val == "":
val = "100-continue"
header_values.append((h, val))
header_values = [(h, req.headers.get(h) or "") for h in signed_headers_str.split(";")]
if not _rc.verify_sigv4_signature(
req.method, canonical_uri, query_params, signed_headers_str,
header_values, payload_hash, amz_date, date_stamp, region,
@@ -396,12 +390,7 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
if _HAS_RUST:
query_params = [(k, v) for k, v in req.args.items(multi=True) if k != "X-Amz-Signature"]
header_values = []
for h in signed_headers_str.split(";"):
val = req.headers.get(h) or ""
if h.lower() == "expect" and val == "":
val = "100-continue"
header_values.append((h, val))
header_values = [(h, req.headers.get(h) or "") for h in signed_headers_str.split(";")]
if not _rc.verify_sigv4_signature(
req.method, canonical_uri, query_params, signed_headers_str,
header_values, "UNSIGNED-PAYLOAD", amz_date, date_stamp, region,
@@ -499,7 +488,7 @@ def _authorize_action(principal: Principal | None, bucket_name: str | None, acti
iam_error: IamError | None = None
if principal is not None:
try:
_iam().authorize(principal, bucket_name, action, object_key=object_key)
_iam().authorize(principal, bucket_name, action)
iam_allowed = True
except IamError as exc:
iam_error = exc
@@ -534,6 +523,21 @@ def _authorize_action(principal: Principal | None, bucket_name: str | None, acti
raise iam_error or IamError("Access denied")
def _enforce_bucket_policy(principal: Principal | None, bucket_name: str | None, object_key: str | None, action: str) -> None:
if not bucket_name:
return
policy_context = _build_policy_context()
decision = _bucket_policies().evaluate(
principal.access_key if principal else None,
bucket_name,
object_key,
action,
policy_context,
)
if decision == "deny":
raise IamError("Access denied by bucket policy")
def _object_principal(action: str, bucket_name: str, object_key: str):
principal, error = _require_principal()
try:
@@ -542,7 +546,121 @@ def _object_principal(action: str, bucket_name: str, object_key: str):
except IamError as exc:
if not error:
return None, _error_response("AccessDenied", str(exc), 403)
if not _has_presign_params():
return None, error
try:
principal = _validate_presigned_request(action, bucket_name, object_key)
_enforce_bucket_policy(principal, bucket_name, object_key, action)
return principal, None
except IamError as exc:
return None, _error_response("AccessDenied", str(exc), 403)
def _has_presign_params() -> bool:
return bool(request.args.get("X-Amz-Algorithm"))
def _validate_presigned_request(action: str, bucket_name: str, object_key: str) -> Principal:
algorithm = request.args.get("X-Amz-Algorithm")
credential = request.args.get("X-Amz-Credential")
amz_date = request.args.get("X-Amz-Date")
signed_headers = request.args.get("X-Amz-SignedHeaders")
expires = request.args.get("X-Amz-Expires")
signature = request.args.get("X-Amz-Signature")
if not all([algorithm, credential, amz_date, signed_headers, expires, signature]):
raise IamError("Malformed presigned URL")
if algorithm != "AWS4-HMAC-SHA256":
raise IamError("Unsupported signing algorithm")
parts = credential.split("/")
if len(parts) != 5:
raise IamError("Invalid credential scope")
access_key, date_stamp, region, service, terminal = parts
if terminal != "aws4_request":
raise IamError("Invalid credential scope")
config_region = current_app.config["AWS_REGION"]
config_service = current_app.config["AWS_SERVICE"]
if region != config_region or service != config_service:
raise IamError("Credential scope mismatch")
try:
expiry = int(expires)
except ValueError as exc:
raise IamError("Invalid expiration") from exc
min_expiry = current_app.config.get("PRESIGNED_URL_MIN_EXPIRY_SECONDS", 1)
max_expiry = current_app.config.get("PRESIGNED_URL_MAX_EXPIRY_SECONDS", 604800)
if expiry < min_expiry or expiry > max_expiry:
raise IamError(f"Expiration must be between {min_expiry} second(s) and {max_expiry} seconds")
try:
request_time = datetime.strptime(amz_date, "%Y%m%dT%H%M%SZ").replace(tzinfo=timezone.utc)
except ValueError as exc:
raise IamError("Invalid X-Amz-Date") from exc
now = datetime.now(timezone.utc)
tolerance = timedelta(seconds=current_app.config.get("SIGV4_TIMESTAMP_TOLERANCE_SECONDS", 900))
if request_time > now + tolerance:
raise IamError("Request date is too far in the future")
if now > request_time + timedelta(seconds=expiry):
raise IamError("Presigned URL expired")
signed_headers_list = [header.strip().lower() for header in signed_headers.split(";") if header]
signed_headers_list.sort()
canonical_headers = _canonical_headers_from_request(signed_headers_list)
canonical_query = _canonical_query_from_request()
payload_hash = request.args.get("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
canonical_request = "\n".join(
[
request.method,
_canonical_uri(bucket_name, object_key),
canonical_query,
canonical_headers,
";".join(signed_headers_list),
payload_hash,
]
)
hashed_request = hashlib.sha256(canonical_request.encode()).hexdigest()
scope = f"{date_stamp}/{region}/{service}/aws4_request"
string_to_sign = "\n".join([
"AWS4-HMAC-SHA256",
amz_date,
scope,
hashed_request,
])
secret = _iam().secret_for_key(access_key)
signing_key = _derive_signing_key(secret, date_stamp, region, service)
expected = hmac.new(signing_key, string_to_sign.encode(), hashlib.sha256).hexdigest()
if not hmac.compare_digest(expected, signature):
raise IamError("Signature mismatch")
return _iam().principal_for_key(access_key)
def _canonical_query_from_request() -> str:
parts = []
for key in sorted(request.args.keys()):
if key == "X-Amz-Signature":
continue
values = request.args.getlist(key)
encoded_key = quote(str(key), safe="-_.~")
for value in sorted(values):
encoded_value = quote(str(value), safe="-_.~")
parts.append(f"{encoded_key}={encoded_value}")
return "&".join(parts)
def _canonical_headers_from_request(headers: list[str]) -> str:
lines = []
for header in headers:
if header == "host":
api_base = current_app.config.get("API_BASE_URL")
if api_base:
value = urlparse(api_base).netloc
else:
value = request.host
else:
value = request.headers.get(header, "")
canonical_value = " ".join(value.strip().split()) if value else ""
lines.append(f"{header}:{canonical_value}")
return "\n".join(lines) + "\n"
def _canonical_uri(bucket_name: str, object_key: str | None) -> str:
@@ -608,8 +726,8 @@ def _generate_presigned_url(
host = parsed.netloc
scheme = parsed.scheme
else:
host = request.host
scheme = request.scheme or "http"
host = request.headers.get("X-Forwarded-Host", request.host)
scheme = request.headers.get("X-Forwarded-Proto", request.scheme or "http")
canonical_headers = f"host:{host}\n"
canonical_request = "\n".join(
@@ -882,7 +1000,7 @@ def _render_encryption_document(config: dict[str, Any]) -> Element:
return root
def _stream_file(path, chunk_size: int = 1024 * 1024):
def _stream_file(path, chunk_size: int = 256 * 1024):
with path.open("rb") as handle:
while True:
chunk = handle.read(chunk_size)
@@ -1017,7 +1135,7 @@ def _bucket_versioning_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "versioning")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1064,7 +1182,7 @@ def _bucket_tagging_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "tagging")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1229,7 +1347,7 @@ def _bucket_cors_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "cors")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1282,7 +1400,7 @@ def _bucket_encryption_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "encryption")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1357,7 +1475,7 @@ def _bucket_acl_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "share")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1600,12 +1718,12 @@ def _bucket_lifecycle_handler(bucket_name: str) -> Response:
"""Handle bucket lifecycle configuration (GET/PUT/DELETE /<bucket>?lifecycle)."""
if request.method not in {"GET", "PUT", "DELETE"}:
return _method_not_allowed(["GET", "PUT", "DELETE"])
principal, error = _require_principal()
if error:
return error
try:
_authorize_action(principal, bucket_name, "lifecycle")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -1764,12 +1882,12 @@ def _bucket_quota_handler(bucket_name: str) -> Response:
"""Handle bucket quota configuration (GET/PUT/DELETE /<bucket>?quota)."""
if request.method not in {"GET", "PUT", "DELETE"}:
return _method_not_allowed(["GET", "PUT", "DELETE"])
principal, error = _require_principal()
if error:
return error
try:
_authorize_action(principal, bucket_name, "quota")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -1846,7 +1964,7 @@ def _bucket_object_lock_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "object_lock")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -1892,7 +2010,7 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "notification")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -1988,7 +2106,7 @@ def _bucket_logging_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "logging")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -2130,7 +2248,7 @@ def _object_retention_handler(bucket_name: str, object_key: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "object_lock", object_key=object_key)
_authorize_action(principal, bucket_name, "write" if request.method == "PUT" else "read", object_key=object_key)
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -2206,7 +2324,7 @@ def _object_legal_hold_handler(bucket_name: str, object_key: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "object_lock", object_key=object_key)
_authorize_action(principal, bucket_name, "write" if request.method == "PUT" else "read", object_key=object_key)
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -2539,7 +2657,7 @@ def bucket_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "create_bucket")
_authorize_action(principal, bucket_name, "write")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
try:
@@ -2556,7 +2674,7 @@ def bucket_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "delete_bucket")
_authorize_action(principal, bucket_name, "delete")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
try:
@@ -2833,12 +2951,9 @@ def object_handler(bucket_name: str, object_key: str):
is_encrypted = "x-amz-server-side-encryption" in metadata
cond_etag = metadata.get("__etag__")
_etag_was_healed = False
if not cond_etag and not is_encrypted:
try:
cond_etag = storage._compute_etag(path)
_etag_was_healed = True
storage.heal_missing_etag(bucket_name, object_key, cond_etag)
except OSError:
cond_etag = None
if cond_etag:
@@ -2884,7 +2999,7 @@ def object_handler(bucket_name: str, object_key: str):
try:
stat = path.stat()
file_size = stat.st_size
etag = cond_etag or storage._compute_etag(path)
etag = metadata.get("__etag__") or storage._compute_etag(path)
except PermissionError:
return _error_response("AccessDenied", "Permission denied accessing object", 403)
except OSError as exc:
@@ -2932,7 +3047,7 @@ def object_handler(bucket_name: str, object_key: str):
try:
stat = path.stat()
response = Response(status=200)
etag = cond_etag or storage._compute_etag(path)
etag = metadata.get("__etag__") or storage._compute_etag(path)
except PermissionError:
return _error_response("AccessDenied", "Permission denied accessing object", 403)
except OSError as exc:
@@ -3114,7 +3229,7 @@ def _bucket_replication_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "replication")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -3197,7 +3312,7 @@ def _bucket_website_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "website")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -3317,13 +3432,9 @@ def head_object(bucket_name: str, object_key: str) -> Response:
return error
try:
_authorize_action(principal, bucket_name, "read", object_key=object_key)
storage = _storage()
path = storage.get_object_path(bucket_name, object_key)
metadata = storage.get_object_metadata(bucket_name, object_key)
etag = metadata.get("__etag__")
if not etag:
etag = storage._compute_etag(path)
storage.heal_missing_etag(bucket_name, object_key, etag)
path = _storage().get_object_path(bucket_name, object_key)
metadata = _storage().get_object_metadata(bucket_name, object_key)
etag = metadata.get("__etag__") or _storage()._compute_etag(path)
head_mtime = float(metadata["__last_modified__"]) if "__last_modified__" in metadata else None
if head_mtime is None:

View File

@@ -2,7 +2,6 @@ from __future__ import annotations
import hashlib
import json
import logging
import os
import re
import shutil
@@ -21,21 +20,12 @@ from typing import Any, BinaryIO, Dict, Generator, List, Optional
try:
import myfsio_core as _rc
if not all(hasattr(_rc, f) for f in (
"validate_bucket_name", "validate_object_key", "md5_file",
"shallow_scan", "bucket_stats_scan", "search_objects_scan",
"stream_to_file_with_md5", "assemble_parts_with_md5",
"build_object_cache", "read_index_entry", "write_index_entry",
"delete_index_entry", "check_bucket_contents",
)):
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
_HAS_RUST = True
except ImportError:
_rc = None
_HAS_RUST = False
logger = logging.getLogger(__name__)
# Platform-specific file locking
if os.name == "nt":
import msvcrt
@@ -200,7 +190,6 @@ class ObjectStorage:
object_cache_max_size: int = 100,
bucket_config_cache_ttl: float = 30.0,
object_key_max_length_bytes: int = 1024,
meta_read_cache_max: int = 2048,
) -> None:
self.root = Path(root)
self.root.mkdir(parents=True, exist_ok=True)
@@ -219,7 +208,7 @@ class ObjectStorage:
self._sorted_key_cache: Dict[str, tuple[list[str], int]] = {}
self._meta_index_locks: Dict[str, threading.Lock] = {}
self._meta_read_cache: OrderedDict[tuple, Optional[Dict[str, Any]]] = OrderedDict()
self._meta_read_cache_max = meta_read_cache_max
self._meta_read_cache_max = 2048
self._cleanup_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="ParentCleanup")
self._stats_mem: Dict[str, Dict[str, int]] = {}
self._stats_serial: Dict[str, int] = {}
@@ -229,7 +218,6 @@ class ObjectStorage:
self._stats_flush_timer: Optional[threading.Timer] = None
self._etag_index_dirty: set[str] = set()
self._etag_index_flush_timer: Optional[threading.Timer] = None
self._etag_index_mem: Dict[str, tuple[Dict[str, str], float]] = {}
def _get_bucket_lock(self, bucket_id: str) -> threading.Lock:
with self._registry_lock:
@@ -418,10 +406,6 @@ class ObjectStorage:
self._stats_serial[bucket_id] = self._stats_serial.get(bucket_id, 0) + 1
self._stats_mem_time[bucket_id] = time.monotonic()
self._stats_dirty.add(bucket_id)
needs_immediate = data["objects"] == 0 and objects_delta < 0
if needs_immediate:
self._flush_stats()
else:
self._schedule_stats_flush()
def _schedule_stats_flush(self) -> None:
@@ -439,7 +423,7 @@ class ObjectStorage:
cache_path = self._system_bucket_root(bucket_id) / "stats.json"
try:
cache_path.parent.mkdir(parents=True, exist_ok=True)
self._atomic_write_json(cache_path, data, sync=False)
self._atomic_write_json(cache_path, data)
except OSError:
pass
@@ -614,7 +598,14 @@ class ObjectStorage:
is_truncated=False, next_continuation_token=None,
)
meta_cache: Dict[str, str] = self._get_etag_index(bucket_id)
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
meta_cache: Dict[str, str] = {}
if etag_index_path.exists():
try:
with open(etag_index_path, 'r', encoding='utf-8') as f:
meta_cache = json.load(f)
except (OSError, json.JSONDecodeError):
pass
entries_files: list[tuple[str, int, float, Optional[str]]] = []
entries_dirs: list[str] = []
@@ -719,73 +710,6 @@ class ObjectStorage:
next_continuation_token=next_token,
)
def iter_objects_shallow(
self,
bucket_name: str,
*,
prefix: str = "",
delimiter: str = "/",
) -> Generator[tuple[str, ObjectMeta | str], None, None]:
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
raise BucketNotFoundError("Bucket does not exist")
bucket_id = bucket_path.name
target_dir = bucket_path
if prefix:
safe_prefix_path = Path(prefix.rstrip("/"))
if ".." in safe_prefix_path.parts:
return
target_dir = bucket_path / safe_prefix_path
try:
resolved = target_dir.resolve()
bucket_resolved = bucket_path.resolve()
if not str(resolved).startswith(str(bucket_resolved) + os.sep) and resolved != bucket_resolved:
return
except (OSError, ValueError):
return
if not target_dir.exists() or not target_dir.is_dir():
return
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
meta_cache: Dict[str, str] = {}
if etag_index_path.exists():
try:
with open(etag_index_path, 'r', encoding='utf-8') as f:
meta_cache = json.load(f)
except (OSError, json.JSONDecodeError):
pass
try:
with os.scandir(str(target_dir)) as it:
for entry in it:
name = entry.name
if name in self.INTERNAL_FOLDERS:
continue
if entry.is_dir(follow_symlinks=False):
yield ("folder", prefix + name + delimiter)
elif entry.is_file(follow_symlinks=False):
key = prefix + name
try:
st = entry.stat()
etag = meta_cache.get(key)
if etag is None:
safe_key = PurePosixPath(key)
meta = self._read_metadata(bucket_id, Path(safe_key))
etag = meta.get("__etag__") if meta else None
yield ("object", ObjectMeta(
key=key,
size=st.st_size,
last_modified=datetime.fromtimestamp(st.st_mtime, timezone.utc),
etag=etag,
metadata=None,
))
except OSError:
pass
except OSError:
return
def _shallow_via_full_scan(
self,
bucket_name: str,
@@ -1084,30 +1008,6 @@ class ObjectStorage:
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
return self._read_metadata(bucket_path.name, safe_key) or {}
def heal_missing_etag(self, bucket_name: str, object_key: str, etag: str) -> None:
"""Persist a computed ETag back to metadata (self-heal on read)."""
try:
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
return
bucket_id = bucket_path.name
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
existing = self._read_metadata(bucket_id, safe_key) or {}
if existing.get("__etag__"):
return
existing["__etag__"] = etag
self._write_metadata(bucket_id, safe_key, existing)
with self._obj_cache_lock:
cached = self._object_cache.get(bucket_id)
if cached:
obj = cached[0].get(safe_key.as_posix())
if obj and not obj.etag:
obj.etag = etag
self._etag_index_dirty.add(bucket_id)
self._schedule_etag_index_flush()
except Exception:
logger.warning("Failed to heal missing ETag for %s/%s", bucket_name, object_key)
def _cleanup_empty_parents(self, path: Path, stop_at: Path) -> None:
"""Remove empty parent directories in a background thread.
@@ -2117,7 +2017,6 @@ class ObjectStorage:
etag_index_path.parent.mkdir(parents=True, exist_ok=True)
with open(etag_index_path, 'w', encoding='utf-8') as f:
json.dump(raw["etag_cache"], f)
self._etag_index_mem[bucket_id] = (dict(raw["etag_cache"]), etag_index_path.stat().st_mtime)
except OSError:
pass
for key, size, mtime, etag in raw["objects"]:
@@ -2241,7 +2140,6 @@ class ObjectStorage:
etag_index_path.parent.mkdir(parents=True, exist_ok=True)
with open(etag_index_path, 'w', encoding='utf-8') as f:
json.dump(meta_cache, f)
self._etag_index_mem[bucket_id] = (dict(meta_cache), etag_index_path.stat().st_mtime)
except OSError:
pass
@@ -2355,25 +2253,6 @@ class ObjectStorage:
self._etag_index_dirty.add(bucket_id)
self._schedule_etag_index_flush()
def _get_etag_index(self, bucket_id: str) -> Dict[str, str]:
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
try:
current_mtime = etag_index_path.stat().st_mtime
except OSError:
return {}
cached = self._etag_index_mem.get(bucket_id)
if cached:
cache_dict, cached_mtime = cached
if current_mtime == cached_mtime:
return cache_dict
try:
with open(etag_index_path, 'r', encoding='utf-8') as f:
data = json.load(f)
self._etag_index_mem[bucket_id] = (data, current_mtime)
return data
except (OSError, json.JSONDecodeError):
return {}
def _schedule_etag_index_flush(self) -> None:
if self._etag_index_flush_timer is None or not self._etag_index_flush_timer.is_alive():
self._etag_index_flush_timer = threading.Timer(5.0, self._flush_etag_indexes)
@@ -2392,10 +2271,11 @@ class ObjectStorage:
index = {k: v.etag for k, v in objects.items() if v.etag}
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
try:
self._atomic_write_json(etag_index_path, index, sync=False)
self._etag_index_mem[bucket_id] = (index, etag_index_path.stat().st_mtime)
etag_index_path.parent.mkdir(parents=True, exist_ok=True)
with open(etag_index_path, 'w', encoding='utf-8') as f:
json.dump(index, f)
except OSError:
logger.warning("Failed to flush etag index for bucket %s", bucket_id)
pass
def warm_cache(self, bucket_names: Optional[List[str]] = None) -> None:
"""Pre-warm the object cache for specified buckets or all buckets.
@@ -2437,15 +2317,14 @@ class ObjectStorage:
path.mkdir(parents=True, exist_ok=True)
@staticmethod
def _atomic_write_json(path: Path, data: Any, *, sync: bool = True) -> None:
def _atomic_write_json(path: Path, data: Any) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
tmp_path = path.with_suffix(".tmp")
try:
with tmp_path.open("w", encoding="utf-8") as f:
json.dump(data, f)
if sync:
f.flush()
os.fsync(f.fileno())
f.flush()
os.fsync(f.fileno())
tmp_path.replace(path)
except BaseException:
try:

290
app/ui.py
View File

@@ -618,77 +618,20 @@ def stream_bucket_objects(bucket_name: str):
prefix = request.args.get("prefix") or None
delimiter = request.args.get("delimiter") or None
storage = _storage()
try:
versioning_enabled = storage.is_versioning_enabled(bucket_name)
except StorageError:
versioning_enabled = False
client = get_session_s3_client()
except (PermissionError, RuntimeError) as exc:
return jsonify({"error": str(exc)}), 403
versioning_enabled = get_versioning_via_s3(client, bucket_name)
url_templates = build_url_templates(bucket_name)
display_tz = current_app.config.get("DISPLAY_TIMEZONE", "UTC")
def generate():
yield json.dumps({
"type": "meta",
"versioning_enabled": versioning_enabled,
"url_templates": url_templates,
}) + "\n"
yield json.dumps({"type": "count", "total_count": 0}) + "\n"
running_count = 0
try:
if delimiter:
for item_type, item in storage.iter_objects_shallow(
bucket_name, prefix=prefix or "", delimiter=delimiter,
):
if item_type == "folder":
yield json.dumps({"type": "folder", "prefix": item}) + "\n"
else:
last_mod = item.last_modified
yield json.dumps({
"type": "object",
"key": item.key,
"size": item.size,
"last_modified": last_mod.isoformat(),
"last_modified_display": _format_datetime_display(last_mod, display_tz),
"last_modified_iso": _format_datetime_iso(last_mod, display_tz),
"etag": item.etag or "",
}) + "\n"
running_count += 1
if running_count % 1000 == 0:
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
else:
continuation_token = None
while True:
result = storage.list_objects(
bucket_name,
max_keys=1000,
continuation_token=continuation_token,
prefix=prefix,
)
for obj in result.objects:
last_mod = obj.last_modified
yield json.dumps({
"type": "object",
"key": obj.key,
"size": obj.size,
"last_modified": last_mod.isoformat(),
"last_modified_display": _format_datetime_display(last_mod, display_tz),
"last_modified_iso": _format_datetime_iso(last_mod, display_tz),
"etag": obj.etag or "",
}) + "\n"
running_count += len(result.objects)
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
if not result.is_truncated:
break
continuation_token = result.next_continuation_token
except StorageError as exc:
yield json.dumps({"type": "error", "error": str(exc)}) + "\n"
return
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
yield json.dumps({"type": "done"}) + "\n"
return Response(
generate(),
stream_objects_ndjson(
client, bucket_name, prefix, url_templates, display_tz, versioning_enabled,
delimiter=delimiter,
),
mimetype='application/x-ndjson',
headers={
'Cache-Control': 'no-cache',
@@ -1063,27 +1006,6 @@ def bulk_delete_objects(bucket_name: str):
return _respond(False, f"A maximum of {MAX_KEYS} objects can be deleted per request", status_code=400)
unique_keys = list(dict.fromkeys(cleaned))
folder_prefixes = [k for k in unique_keys if k.endswith("/")]
if folder_prefixes:
try:
client = get_session_s3_client()
for prefix in folder_prefixes:
unique_keys.remove(prefix)
paginator = client.get_paginator("list_objects_v2")
for page in paginator.paginate(Bucket=bucket_name, Prefix=prefix):
for obj in page.get("Contents", []):
if obj["Key"] not in unique_keys:
unique_keys.append(obj["Key"])
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
if isinstance(exc, ClientError):
err, status = handle_client_error(exc)
return _respond(False, err["error"], status_code=status)
return _respond(False, "S3 API server is unreachable", status_code=502)
if not unique_keys:
return _respond(False, "No objects found under the selected folders", status_code=400)
try:
_authorize_ui(principal, bucket_name, "delete")
except IamError as exc:
@@ -1114,17 +1036,13 @@ def bulk_delete_objects(bucket_name: str):
else:
try:
client = get_session_s3_client()
deleted = []
errors = []
for i in range(0, len(unique_keys), 1000):
batch = unique_keys[i:i + 1000]
objects_to_delete = [{"Key": k} for k in batch]
resp = client.delete_objects(
Bucket=bucket_name,
Delete={"Objects": objects_to_delete, "Quiet": False},
)
deleted.extend(d["Key"] for d in resp.get("Deleted", []))
errors.extend({"key": e["Key"], "error": e.get("Message", e.get("Code", "Unknown error"))} for e in resp.get("Errors", []))
objects_to_delete = [{"Key": k} for k in unique_keys]
resp = client.delete_objects(
Bucket=bucket_name,
Delete={"Objects": objects_to_delete, "Quiet": False},
)
deleted = [d["Key"] for d in resp.get("Deleted", [])]
errors = [{"key": e["Key"], "error": e.get("Message", e.get("Code", "Unknown error"))} for e in resp.get("Errors", [])]
for key in deleted:
_replication_manager().trigger_replication(bucket_name, key, action="delete")
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
@@ -4123,182 +4041,6 @@ def get_peer_sync_stats(site_id: str):
return jsonify(stats)
@ui_bp.get("/system")
def system_dashboard():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
flash("Access denied: System page requires admin permissions", "danger")
return redirect(url_for("ui.buckets_overview"))
import platform as _platform
import sys
from app.version import APP_VERSION
try:
import myfsio_core as _rc
has_rust = True
except ImportError:
has_rust = False
gc = current_app.extensions.get("gc")
gc_status = gc.get_status() if gc else {"enabled": False}
gc_history_records = []
if gc:
raw = gc.get_history(limit=10, offset=0)
for rec in raw:
r = rec.get("result", {})
total_freed = r.get("temp_bytes_freed", 0) + r.get("multipart_bytes_freed", 0) + r.get("orphaned_version_bytes_freed", 0)
rec["bytes_freed_display"] = _format_bytes(total_freed)
rec["timestamp_display"] = _format_datetime_display(datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc))
gc_history_records.append(rec)
checker = current_app.extensions.get("integrity")
integrity_status = checker.get_status() if checker else {"enabled": False}
integrity_history_records = []
if checker:
raw = checker.get_history(limit=10, offset=0)
for rec in raw:
rec["timestamp_display"] = _format_datetime_display(datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc))
integrity_history_records.append(rec)
features = [
{"label": "Encryption (SSE-S3)", "enabled": current_app.config.get("ENCRYPTION_ENABLED", False)},
{"label": "KMS", "enabled": current_app.config.get("KMS_ENABLED", False)},
{"label": "Versioning Lifecycle", "enabled": current_app.config.get("LIFECYCLE_ENABLED", False)},
{"label": "Metrics History", "enabled": current_app.config.get("METRICS_HISTORY_ENABLED", False)},
{"label": "Operation Metrics", "enabled": current_app.config.get("OPERATION_METRICS_ENABLED", False)},
{"label": "Site Sync", "enabled": current_app.config.get("SITE_SYNC_ENABLED", False)},
{"label": "Website Hosting", "enabled": current_app.config.get("WEBSITE_HOSTING_ENABLED", False)},
{"label": "Garbage Collection", "enabled": current_app.config.get("GC_ENABLED", False)},
{"label": "Integrity Scanner", "enabled": current_app.config.get("INTEGRITY_ENABLED", False)},
]
return render_template(
"system.html",
principal=principal,
app_version=APP_VERSION,
storage_root=current_app.config.get("STORAGE_ROOT", "./data"),
platform=_platform.platform(),
python_version=sys.version.split()[0],
has_rust=has_rust,
features=features,
gc_status=gc_status,
gc_history=gc_history_records,
integrity_status=integrity_status,
integrity_history=integrity_history_records,
display_timezone=current_app.config.get("DISPLAY_TIMEZONE", "UTC"),
)
@ui_bp.post("/system/gc/run")
def system_gc_run():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
gc = current_app.extensions.get("gc")
if not gc:
return jsonify({"error": "GC is not enabled"}), 400
payload = request.get_json(silent=True) or {}
started = gc.run_async(dry_run=payload.get("dry_run"))
if not started:
return jsonify({"error": "GC is already in progress"}), 409
return jsonify({"status": "started"})
@ui_bp.get("/system/gc/status")
def system_gc_status():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
gc = current_app.extensions.get("gc")
if not gc:
return jsonify({"error": "GC is not enabled"}), 400
return jsonify(gc.get_status())
@ui_bp.get("/system/gc/history")
def system_gc_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
gc = current_app.extensions.get("gc")
if not gc:
return jsonify({"executions": []})
limit = min(int(request.args.get("limit", 10)), 200)
offset = int(request.args.get("offset", 0))
records = gc.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})
@ui_bp.post("/system/integrity/run")
def system_integrity_run():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
checker = current_app.extensions.get("integrity")
if not checker:
return jsonify({"error": "Integrity checker is not enabled"}), 400
payload = request.get_json(silent=True) or {}
started = checker.run_async(
auto_heal=payload.get("auto_heal"),
dry_run=payload.get("dry_run"),
)
if not started:
return jsonify({"error": "A scan is already in progress"}), 409
return jsonify({"status": "started"})
@ui_bp.get("/system/integrity/status")
def system_integrity_status():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
checker = current_app.extensions.get("integrity")
if not checker:
return jsonify({"error": "Integrity checker is not enabled"}), 400
return jsonify(checker.get_status())
@ui_bp.get("/system/integrity/history")
def system_integrity_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
checker = current_app.extensions.get("integrity")
if not checker:
return jsonify({"executions": []})
limit = min(int(request.args.get("limit", 10)), 200)
offset = int(request.args.get("offset", 0))
records = checker.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})
@ui_bp.app_errorhandler(404)
def ui_not_found(error): # type: ignore[override]
prefix = ui_bp.url_prefix or ""

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
APP_VERSION = "0.4.3"
APP_VERSION = "0.3.9"
def get_version() -> str:

View File

@@ -1,6 +1,5 @@
#!/bin/sh
set -e
ENGINE="${ENGINE:-rust}"
exec python run.py --prod --engine "$ENGINE"
# Run both services using the python runner in production mode
exec python run.py --prod

79
docs.md
View File

@@ -180,9 +180,9 @@ All configuration is done via environment variables. The table below lists every
| Variable | Default | Notes |
| --- | --- | --- |
| `SERVER_THREADS` | `0` (auto) | Granian blocking threads (1-64). Set to `0` for auto-calculation based on CPU cores (×2). |
| `SERVER_CONNECTION_LIMIT` | `0` (auto) | Maximum concurrent requests per worker (10-1000). Set to `0` for auto-calculation based on available RAM. |
| `SERVER_BACKLOG` | `0` (auto) | TCP listen backlog (128-4096). Set to `0` for auto-calculation (connection_limit × 2). |
| `SERVER_THREADS` | `0` (auto) | Waitress worker threads (1-64). Set to `0` for auto-calculation based on CPU cores (×2). |
| `SERVER_CONNECTION_LIMIT` | `0` (auto) | Maximum concurrent connections (10-1000). Set to `0` for auto-calculation based on available RAM. |
| `SERVER_BACKLOG` | `0` (auto) | TCP listen backlog (64-4096). Set to `0` for auto-calculation (connection_limit × 2). |
| `SERVER_CHANNEL_TIMEOUT` | `120` | Seconds before idle connections are closed (10-300). |
### Logging
@@ -339,7 +339,7 @@ Before deploying to production, ensure you:
4. **Enable HTTPS** - Use a reverse proxy (nginx, Cloudflare) with TLS termination
5. **Review rate limits** - Adjust `RATE_LIMIT_DEFAULT` based on your needs
6. **Secure master keys** - Back up `ENCRYPTION_MASTER_KEY_PATH` if using encryption
7. **Use `--prod` flag** - Runs with Granian instead of Flask dev server
7. **Use `--prod` flag** - Runs with Waitress instead of Flask dev server
8. **Set credential expiry** - Assign `expires_at` to non-admin users for time-limited access
### Proxy Configuration
@@ -758,7 +758,7 @@ MyFSIO implements a comprehensive Identity and Access Management (IAM) system th
- **Create user**: supply a display name, optional JSON inline policy array, and optional credential expiry date.
- **Set expiry**: assign an expiration date to any user's credentials. Expired credentials are rejected at authentication time. The UI shows expiry badges and preset durations (1h, 24h, 7d, 30d, 90d).
- **Rotate secret**: generates a new secret key; the UI surfaces it once.
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. An optional `"prefix"` field restricts object-level actions to a key prefix (e.g., `"uploads/"`). Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
3. Wildcard action `iam:*` is supported for admin user definitions.
> **Breaking Change (v0.2.0+):** Previous versions used fixed default credentials (`localadmin/localadmin`). If upgrading from an older version, your existing credentials remain unchanged, but new installations will generate random credentials.
@@ -797,23 +797,13 @@ Both layers are evaluated for each request. A user must have permission in their
| --- | --- | --- |
| `list` | List buckets and objects | `s3:ListBucket`, `s3:ListAllMyBuckets`, `s3:ListBucketVersions`, `s3:ListMultipartUploads`, `s3:ListParts` |
| `read` | Download objects, get metadata | `s3:GetObject`, `s3:GetObjectVersion`, `s3:GetObjectTagging`, `s3:GetObjectVersionTagging`, `s3:GetObjectAcl`, `s3:GetBucketVersioning`, `s3:HeadObject`, `s3:HeadBucket` |
| `write` | Upload objects, manage object tags | `s3:PutObject`, `s3:PutObjectTagging`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
| `delete` | Remove objects and versions | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteObjectTagging` |
| `create_bucket` | Create new buckets | `s3:CreateBucket` |
| `delete_bucket` | Delete buckets | `s3:DeleteBucket` |
| `write` | Upload objects, create buckets, manage tags | `s3:PutObject`, `s3:CreateBucket`, `s3:PutObjectTagging`, `s3:PutBucketVersioning`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
| `delete` | Remove objects, versions, and buckets | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteBucket`, `s3:DeleteObjectTagging` |
| `share` | Manage Access Control Lists (ACLs) | `s3:PutObjectAcl`, `s3:PutBucketAcl`, `s3:GetBucketAcl` |
| `policy` | Manage bucket policies | `s3:PutBucketPolicy`, `s3:GetBucketPolicy`, `s3:DeleteBucketPolicy` |
| `versioning` | Manage bucket versioning configuration | `s3:GetBucketVersioning`, `s3:PutBucketVersioning` |
| `tagging` | Manage bucket-level tags | `s3:GetBucketTagging`, `s3:PutBucketTagging`, `s3:DeleteBucketTagging` |
| `encryption` | Manage bucket encryption configuration | `s3:GetEncryptionConfiguration`, `s3:PutEncryptionConfiguration`, `s3:DeleteEncryptionConfiguration` |
| `lifecycle` | Manage lifecycle rules | `s3:GetLifecycleConfiguration`, `s3:PutLifecycleConfiguration`, `s3:DeleteLifecycleConfiguration`, `s3:GetBucketLifecycle`, `s3:PutBucketLifecycle` |
| `cors` | Manage CORS configuration | `s3:GetBucketCors`, `s3:PutBucketCors`, `s3:DeleteBucketCors` |
| `replication` | Configure and manage replication | `s3:GetReplicationConfiguration`, `s3:PutReplicationConfiguration`, `s3:DeleteReplicationConfiguration`, `s3:ReplicateObject`, `s3:ReplicateTags`, `s3:ReplicateDelete` |
| `quota` | Manage bucket storage quotas | `s3:GetBucketQuota`, `s3:PutBucketQuota`, `s3:DeleteBucketQuota` |
| `object_lock` | Manage object lock, retention, and legal holds | `s3:GetObjectLockConfiguration`, `s3:PutObjectLockConfiguration`, `s3:PutObjectRetention`, `s3:GetObjectRetention`, `s3:PutObjectLegalHold`, `s3:GetObjectLegalHold` |
| `notification` | Manage bucket event notifications | `s3:GetBucketNotificationConfiguration`, `s3:PutBucketNotificationConfiguration`, `s3:DeleteBucketNotificationConfiguration` |
| `logging` | Manage bucket access logging | `s3:GetBucketLogging`, `s3:PutBucketLogging`, `s3:DeleteBucketLogging` |
| `website` | Manage static website hosting configuration | `s3:GetBucketWebsite`, `s3:PutBucketWebsite`, `s3:DeleteBucketWebsite` |
#### IAM Actions (User Management)
@@ -824,31 +814,25 @@ Both layers are evaluated for each request. A user must have permission in their
| `iam:delete_user` | Delete IAM users | `iam:DeleteUser` |
| `iam:rotate_key` | Rotate user secret keys | `iam:RotateAccessKey` |
| `iam:update_policy` | Modify user policies | `iam:PutUserPolicy` |
| `iam:create_key` | Create additional access keys for a user | `iam:CreateAccessKey` |
| `iam:delete_key` | Delete an access key from a user | `iam:DeleteAccessKey` |
| `iam:get_user` | View user details and access keys | `iam:GetUser` |
| `iam:get_policy` | View user policy configuration | `iam:GetPolicy` |
| `iam:disable_user` | Temporarily disable/enable a user account | `iam:DisableUser` |
| `iam:*` | **Admin wildcard** grants all IAM actions | — |
#### Wildcards
| Wildcard | Scope | Description |
| --- | --- | --- |
| `*` (in actions) | All S3 actions | Grants all 19 S3 actions including `list`, `read`, `write`, `delete`, `create_bucket`, `delete_bucket`, `share`, `policy`, `versioning`, `tagging`, `encryption`, `lifecycle`, `cors`, `replication`, `quota`, `object_lock`, `notification`, `logging`, `website` |
| `*` (in actions) | All S3 actions | Grants `list`, `read`, `write`, `delete`, `share`, `policy`, `lifecycle`, `cors`, `replication` |
| `iam:*` | All IAM actions | Grants all `iam:*` actions for user management |
| `*` (in bucket) | All buckets | Policy applies to every bucket |
### IAM Policy Structure
User policies are stored as a JSON array of policy objects. Each object specifies a bucket, the allowed actions, and an optional prefix for object-level scoping:
User policies are stored as a JSON array of policy objects. Each object specifies a bucket and the allowed actions:
```json
[
{
"bucket": "<bucket-name-or-wildcard>",
"actions": ["<action1>", "<action2>", ...],
"prefix": "<optional-key-prefix>"
"actions": ["<action1>", "<action2>", ...]
}
]
```
@@ -856,13 +840,12 @@ User policies are stored as a JSON array of policy objects. Each object specifie
**Fields:**
- `bucket`: The bucket name (case-insensitive) or `*` for all buckets
- `actions`: Array of action strings (simple names or AWS aliases)
- `prefix`: *(optional)* Restrict object-level actions to keys starting with this prefix. Defaults to `*` (all objects). Example: `"uploads/"` restricts to keys under `uploads/`
### Example User Policies
**Full Administrator (complete system access):**
```json
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "create_bucket", "delete_bucket", "versioning", "tagging", "encryption", "lifecycle", "cors", "replication", "quota", "object_lock", "notification", "logging", "website", "iam:*"]}]
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "lifecycle", "cors", "replication", "iam:*"]}]
```
**Read-Only User (browse and download only):**
@@ -875,11 +858,6 @@ User policies are stored as a JSON array of policy objects. Each object specifie
[{"bucket": "user-bucket", "actions": ["list", "read", "write", "delete"]}]
```
**Operator (data operations + bucket management, no config):**
```json
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "create_bucket", "delete_bucket"]}]
```
**Multiple Bucket Access (different permissions per bucket):**
```json
[
@@ -889,14 +867,9 @@ User policies are stored as a JSON array of policy objects. Each object specifie
]
```
**Prefix-Scoped Access (restrict to a folder inside a shared bucket):**
```json
[{"bucket": "shared-data", "actions": ["list", "read", "write", "delete"], "prefix": "team-a/"}]
```
**IAM Manager (manage users but no data access):**
```json
[{"bucket": "*", "actions": ["iam:list_users", "iam:create_user", "iam:delete_user", "iam:rotate_key", "iam:update_policy", "iam:create_key", "iam:delete_key", "iam:get_user", "iam:get_policy", "iam:disable_user"]}]
[{"bucket": "*", "actions": ["iam:list_users", "iam:create_user", "iam:delete_user", "iam:rotate_key", "iam:update_policy"]}]
```
**Replication Operator (manage replication only):**
@@ -916,10 +889,10 @@ User policies are stored as a JSON array of policy objects. Each object specifie
**Bucket Administrator (full bucket config, no IAM access):**
```json
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "create_bucket", "delete_bucket", "share", "policy", "versioning", "tagging", "encryption", "lifecycle", "cors", "replication", "quota", "object_lock", "notification", "logging", "website"]}]
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "policy", "lifecycle", "cors"]}]
```
**Upload-Only User (write but cannot create/delete buckets):**
**Upload-Only User (write but cannot read back):**
```json
[{"bucket": "drop-box", "actions": ["write"]}]
```
@@ -994,30 +967,6 @@ curl -X POST http://localhost:5000/iam/users/<access-key>/expiry \
# Delete a user (requires iam:delete_user)
curl -X DELETE http://localhost:5000/iam/users/<access-key> \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Get user details (requires iam:get_user) — via Admin API
curl http://localhost:5000/admin/iam/users/<user-id-or-access-key> \
-H "Authorization: AWS4-HMAC-SHA256 ..."
# Get user policies (requires iam:get_policy) — via Admin API
curl http://localhost:5000/admin/iam/users/<user-id-or-access-key>/policies \
-H "Authorization: AWS4-HMAC-SHA256 ..."
# Create additional access key for a user (requires iam:create_key)
curl -X POST http://localhost:5000/admin/iam/users/<user-id-or-access-key>/keys \
-H "Authorization: AWS4-HMAC-SHA256 ..."
# Delete an access key (requires iam:delete_key)
curl -X DELETE http://localhost:5000/admin/iam/users/<user-id>/keys/<access-key> \
-H "Authorization: AWS4-HMAC-SHA256 ..."
# Disable a user account (requires iam:disable_user)
curl -X POST http://localhost:5000/admin/iam/users/<user-id-or-access-key>/disable \
-H "Authorization: AWS4-HMAC-SHA256 ..."
# Re-enable a user account (requires iam:disable_user)
curl -X POST http://localhost:5000/admin/iam/users/<user-id-or-access-key>/enable \
-H "Authorization: AWS4-HMAC-SHA256 ..."
```
### Permission Precedence

3443
myfsio-engine/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,45 +0,0 @@
[workspace]
resolver = "2"
members = [
"crates/myfsio-common",
"crates/myfsio-auth",
"crates/myfsio-crypto",
"crates/myfsio-storage",
"crates/myfsio-xml",
"crates/myfsio-server",
]
[workspace.dependencies]
tokio = { version = "1", features = ["full"] }
axum = { version = "0.8" }
tower = { version = "0.5" }
tower-http = { version = "0.6", features = ["cors", "trace"] }
hyper = { version = "1" }
bytes = "1"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
quick-xml = { version = "0.37", features = ["serialize"] }
hmac = "0.12"
sha2 = "0.10"
md-5 = "0.10"
hex = "0.4"
aes = "0.8"
aes-gcm = "0.10"
cbc = { version = "0.1", features = ["alloc"] }
hkdf = "0.12"
uuid = { version = "1", features = ["v4"] }
parking_lot = "0.12"
lru = "0.14"
percent-encoding = "2"
regex = "1"
unicode-normalization = "0.1"
tracing = "0.1"
tracing-subscriber = "0.3"
thiserror = "2"
chrono = { version = "0.4", features = ["serde"] }
base64 = "0.22"
tokio-util = { version = "0.7", features = ["io"] }
futures = "0.3"
dashmap = "6"
crc32fast = "1"
duckdb = { version = "1", features = ["bundled"] }

View File

@@ -1,26 +0,0 @@
[package]
name = "myfsio-auth"
version = "0.1.0"
edition = "2021"
[dependencies]
myfsio-common = { path = "../myfsio-common" }
hmac = { workspace = true }
sha2 = { workspace = true }
hex = { workspace = true }
aes = { workspace = true }
cbc = { workspace = true }
base64 = { workspace = true }
pbkdf2 = "0.12"
lru = { workspace = true }
parking_lot = { workspace = true }
percent-encoding = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
thiserror = { workspace = true }
chrono = { workspace = true }
tracing = { workspace = true }
uuid = { workspace = true }
[dev-dependencies]
tempfile = "3"

View File

@@ -1,80 +0,0 @@
use aes::cipher::{block_padding::Pkcs7, BlockDecryptMut, KeyIvInit};
use base64::{engine::general_purpose::URL_SAFE, Engine};
use hmac::{Hmac, Mac};
use sha2::Sha256;
type Aes128CbcDec = cbc::Decryptor<aes::Aes128>;
type HmacSha256 = Hmac<Sha256>;
pub fn derive_fernet_key(secret: &str) -> String {
let mut derived = [0u8; 32];
pbkdf2::pbkdf2_hmac::<Sha256>(
secret.as_bytes(),
b"myfsio-iam-encryption",
100_000,
&mut derived,
);
URL_SAFE.encode(derived)
}
pub fn decrypt(key_b64: &str, token: &str) -> Result<Vec<u8>, &'static str> {
let key_bytes = URL_SAFE
.decode(key_b64)
.map_err(|_| "invalid fernet key base64")?;
if key_bytes.len() != 32 {
return Err("fernet key must be 32 bytes");
}
let signing_key = &key_bytes[..16];
let encryption_key = &key_bytes[16..];
let token_bytes = URL_SAFE
.decode(token)
.map_err(|_| "invalid fernet token base64")?;
if token_bytes.len() < 57 {
return Err("fernet token too short");
}
if token_bytes[0] != 0x80 {
return Err("invalid fernet version");
}
let hmac_offset = token_bytes.len() - 32;
let payload = &token_bytes[..hmac_offset];
let expected_hmac = &token_bytes[hmac_offset..];
let mut mac =
HmacSha256::new_from_slice(signing_key).map_err(|_| "hmac key error")?;
mac.update(payload);
mac.verify_slice(expected_hmac)
.map_err(|_| "HMAC verification failed")?;
let iv = &token_bytes[9..25];
let ciphertext = &token_bytes[25..hmac_offset];
let plaintext = Aes128CbcDec::new(encryption_key.into(), iv.into())
.decrypt_padded_vec_mut::<Pkcs7>(ciphertext)
.map_err(|_| "AES-CBC decryption failed")?;
Ok(plaintext)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_derive_fernet_key_format() {
let key = derive_fernet_key("test-secret");
let decoded = URL_SAFE.decode(&key).unwrap();
assert_eq!(decoded.len(), 32);
}
#[test]
fn test_roundtrip_with_python_compat() {
let key = derive_fernet_key("dev-secret-key");
let decoded = URL_SAFE.decode(&key).unwrap();
assert_eq!(decoded.len(), 32);
}
}

View File

@@ -1,812 +0,0 @@
use chrono::{DateTime, Utc};
use myfsio_common::types::Principal;
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::{Instant, SystemTime};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IamConfig {
#[serde(default = "default_version")]
pub version: u32,
#[serde(default)]
pub users: Vec<IamUser>,
}
fn default_version() -> u32 {
2
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IamUser {
pub user_id: String,
pub display_name: String,
#[serde(default = "default_enabled")]
pub enabled: bool,
#[serde(default)]
pub expires_at: Option<String>,
#[serde(default)]
pub access_keys: Vec<AccessKey>,
#[serde(default)]
pub policies: Vec<IamPolicy>,
}
#[derive(Debug, Clone, Deserialize)]
struct RawIamConfig {
#[serde(default)]
pub users: Vec<RawIamUser>,
}
#[derive(Debug, Clone, Deserialize)]
struct RawIamUser {
pub user_id: Option<String>,
pub display_name: Option<String>,
#[serde(default = "default_enabled")]
pub enabled: bool,
#[serde(default)]
pub expires_at: Option<String>,
pub access_key: Option<String>,
pub secret_key: Option<String>,
#[serde(default)]
pub access_keys: Vec<AccessKey>,
#[serde(default)]
pub policies: Vec<IamPolicy>,
}
impl RawIamUser {
fn normalize(self) -> IamUser {
let mut access_keys = self.access_keys;
if access_keys.is_empty() {
if let (Some(ak), Some(sk)) = (self.access_key, self.secret_key) {
access_keys.push(AccessKey {
access_key: ak,
secret_key: sk,
status: "active".to_string(),
created_at: None,
});
}
}
let display_name = self.display_name.unwrap_or_else(|| {
access_keys.first().map(|k| k.access_key.clone()).unwrap_or_else(|| "unknown".to_string())
});
let user_id = self.user_id.unwrap_or_else(|| {
format!("u-{}", display_name.to_ascii_lowercase().replace(' ', "-"))
});
IamUser {
user_id,
display_name,
enabled: self.enabled,
expires_at: self.expires_at,
access_keys,
policies: self.policies,
}
}
}
fn default_enabled() -> bool {
true
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AccessKey {
pub access_key: String,
pub secret_key: String,
#[serde(default = "default_status")]
pub status: String,
#[serde(default)]
pub created_at: Option<String>,
}
fn default_status() -> String {
"active".to_string()
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IamPolicy {
pub bucket: String,
pub actions: Vec<String>,
#[serde(default = "default_prefix")]
pub prefix: String,
}
fn default_prefix() -> String {
"*".to_string()
}
struct IamState {
key_secrets: HashMap<String, String>,
key_index: HashMap<String, String>,
key_status: HashMap<String, String>,
user_records: HashMap<String, IamUser>,
file_mtime: Option<SystemTime>,
last_check: Instant,
}
pub struct IamService {
config_path: PathBuf,
state: Arc<RwLock<IamState>>,
check_interval: std::time::Duration,
fernet_key: Option<String>,
}
impl IamService {
pub fn new(config_path: PathBuf) -> Self {
Self::new_with_secret(config_path, None)
}
pub fn new_with_secret(config_path: PathBuf, secret_key: Option<String>) -> Self {
let fernet_key = secret_key.map(|s| crate::fernet::derive_fernet_key(&s));
let service = Self {
config_path,
state: Arc::new(RwLock::new(IamState {
key_secrets: HashMap::new(),
key_index: HashMap::new(),
key_status: HashMap::new(),
user_records: HashMap::new(),
file_mtime: None,
last_check: Instant::now(),
})),
check_interval: std::time::Duration::from_secs(2),
fernet_key,
};
service.reload();
service
}
fn reload_if_needed(&self) {
{
let state = self.state.read();
if state.last_check.elapsed() < self.check_interval {
return;
}
}
let current_mtime = std::fs::metadata(&self.config_path)
.and_then(|m| m.modified())
.ok();
let needs_reload = {
let state = self.state.read();
match (&state.file_mtime, &current_mtime) {
(None, Some(_)) => true,
(Some(old), Some(new)) => old != new,
(Some(_), None) => true,
(None, None) => state.key_secrets.is_empty(),
}
};
if needs_reload {
self.reload();
}
self.state.write().last_check = Instant::now();
}
fn reload(&self) {
let content = match std::fs::read_to_string(&self.config_path) {
Ok(c) => c,
Err(e) => {
tracing::warn!("Failed to read IAM config {}: {}", self.config_path.display(), e);
return;
}
};
let raw = if content.starts_with("MYFSIO_IAM_ENC:") {
let encrypted_token = &content["MYFSIO_IAM_ENC:".len()..];
match &self.fernet_key {
Some(key) => match crate::fernet::decrypt(key, encrypted_token.trim()) {
Ok(plaintext) => match String::from_utf8(plaintext) {
Ok(s) => s,
Err(e) => {
tracing::error!("Decrypted IAM config is not valid UTF-8: {}", e);
return;
}
},
Err(e) => {
tracing::error!("Failed to decrypt IAM config: {}. SECRET_KEY may have changed.", e);
return;
}
},
None => {
tracing::error!("IAM config is encrypted but no SECRET_KEY configured");
return;
}
}
} else {
content
};
let raw_config: RawIamConfig = match serde_json::from_str(&raw) {
Ok(c) => c,
Err(e) => {
tracing::error!("Failed to parse IAM config: {}", e);
return;
}
};
let users: Vec<IamUser> = raw_config.users.into_iter().map(|u| u.normalize()).collect();
let mut key_secrets = HashMap::new();
let mut key_index = HashMap::new();
let mut key_status = HashMap::new();
let mut user_records = HashMap::new();
for user in &users {
user_records.insert(user.user_id.clone(), user.clone());
for ak in &user.access_keys {
key_secrets.insert(ak.access_key.clone(), ak.secret_key.clone());
key_index.insert(ak.access_key.clone(), user.user_id.clone());
key_status.insert(ak.access_key.clone(), ak.status.clone());
}
}
let file_mtime = std::fs::metadata(&self.config_path)
.and_then(|m| m.modified())
.ok();
let mut state = self.state.write();
state.key_secrets = key_secrets;
state.key_index = key_index;
state.key_status = key_status;
state.user_records = user_records;
state.file_mtime = file_mtime;
state.last_check = Instant::now();
tracing::info!("IAM config reloaded: {} users, {} keys",
users.len(),
state.key_secrets.len());
}
pub fn get_secret_key(&self, access_key: &str) -> Option<String> {
self.reload_if_needed();
let state = self.state.read();
let status = state.key_status.get(access_key)?;
if status != "active" {
return None;
}
let user_id = state.key_index.get(access_key)?;
let user = state.user_records.get(user_id)?;
if !user.enabled {
return None;
}
if let Some(ref expires_at) = user.expires_at {
if let Ok(exp) = expires_at.parse::<DateTime<Utc>>() {
if Utc::now() > exp {
return None;
}
}
}
state.key_secrets.get(access_key).cloned()
}
pub fn get_principal(&self, access_key: &str) -> Option<Principal> {
self.reload_if_needed();
let state = self.state.read();
let status = state.key_status.get(access_key)?;
if status != "active" {
return None;
}
let user_id = state.key_index.get(access_key)?;
let user = state.user_records.get(user_id)?;
if !user.enabled {
return None;
}
if let Some(ref expires_at) = user.expires_at {
if let Ok(exp) = expires_at.parse::<DateTime<Utc>>() {
if Utc::now() > exp {
return None;
}
}
}
let is_admin = user.policies.iter().any(|p| {
p.bucket == "*" && p.actions.iter().any(|a| a == "*")
});
Some(Principal::new(
access_key.to_string(),
user.user_id.clone(),
user.display_name.clone(),
is_admin,
))
}
pub fn authenticate(&self, access_key: &str, secret_key: &str) -> Option<Principal> {
let stored_secret = self.get_secret_key(access_key)?;
if !crate::sigv4::constant_time_compare(&stored_secret, secret_key) {
return None;
}
self.get_principal(access_key)
}
pub fn authorize(
&self,
principal: &Principal,
bucket_name: Option<&str>,
action: &str,
object_key: Option<&str>,
) -> bool {
self.reload_if_needed();
if principal.is_admin {
return true;
}
let normalized_bucket = bucket_name
.unwrap_or("*")
.trim()
.to_ascii_lowercase();
let normalized_action = action.trim().to_ascii_lowercase();
let state = self.state.read();
let user = match state.user_records.get(&principal.user_id) {
Some(u) => u,
None => return false,
};
if !user.enabled {
return false;
}
if let Some(ref expires_at) = user.expires_at {
if let Ok(exp) = expires_at.parse::<DateTime<Utc>>() {
if Utc::now() > exp {
return false;
}
}
}
for policy in &user.policies {
if !bucket_matches(&policy.bucket, &normalized_bucket) {
continue;
}
if !action_matches(&policy.actions, &normalized_action) {
continue;
}
if let Some(key) = object_key {
if !prefix_matches(&policy.prefix, key) {
continue;
}
}
return true;
}
false
}
pub async fn list_users(&self) -> Vec<serde_json::Value> {
self.reload_if_needed();
let state = self.state.read();
state
.user_records
.values()
.map(|u| {
serde_json::json!({
"user_id": u.user_id,
"display_name": u.display_name,
"enabled": u.enabled,
"access_keys": u.access_keys.iter().map(|k| {
serde_json::json!({
"access_key": k.access_key,
"status": k.status,
"created_at": k.created_at,
})
}).collect::<Vec<_>>(),
"policy_count": u.policies.len(),
})
})
.collect()
}
pub async fn get_user(&self, identifier: &str) -> Option<serde_json::Value> {
self.reload_if_needed();
let state = self.state.read();
let user = state
.user_records
.get(identifier)
.or_else(|| {
state.key_index.get(identifier).and_then(|uid| state.user_records.get(uid))
})?;
Some(serde_json::json!({
"user_id": user.user_id,
"display_name": user.display_name,
"enabled": user.enabled,
"expires_at": user.expires_at,
"access_keys": user.access_keys.iter().map(|k| {
serde_json::json!({
"access_key": k.access_key,
"status": k.status,
"created_at": k.created_at,
})
}).collect::<Vec<_>>(),
"policies": user.policies,
}))
}
pub async fn set_user_enabled(&self, identifier: &str, enabled: bool) -> Result<(), String> {
let content = std::fs::read_to_string(&self.config_path)
.map_err(|e| format!("Failed to read IAM config: {}", e))?;
let raw: RawIamConfig = serde_json::from_str(&content)
.map_err(|e| format!("Failed to parse IAM config: {}", e))?;
let mut config = IamConfig {
version: 2,
users: raw.users.into_iter().map(|u| u.normalize()).collect(),
};
let user = config
.users
.iter_mut()
.find(|u| {
u.user_id == identifier
|| u.access_keys.iter().any(|k| k.access_key == identifier)
})
.ok_or_else(|| "User not found".to_string())?;
user.enabled = enabled;
let json = serde_json::to_string_pretty(&config)
.map_err(|e| format!("Failed to serialize IAM config: {}", e))?;
std::fs::write(&self.config_path, json)
.map_err(|e| format!("Failed to write IAM config: {}", e))?;
self.reload();
Ok(())
}
pub fn get_user_policies(&self, identifier: &str) -> Option<Vec<serde_json::Value>> {
self.reload_if_needed();
let state = self.state.read();
let user = state
.user_records
.get(identifier)
.or_else(|| {
state.key_index.get(identifier).and_then(|uid| state.user_records.get(uid))
})?;
Some(
user.policies
.iter()
.map(|p| serde_json::to_value(p).unwrap_or_default())
.collect(),
)
}
pub fn create_access_key(&self, identifier: &str) -> Result<serde_json::Value, String> {
let content = std::fs::read_to_string(&self.config_path)
.map_err(|e| format!("Failed to read IAM config: {}", e))?;
let raw: RawIamConfig = serde_json::from_str(&content)
.map_err(|e| format!("Failed to parse IAM config: {}", e))?;
let mut config = IamConfig {
version: 2,
users: raw.users.into_iter().map(|u| u.normalize()).collect(),
};
let user = config
.users
.iter_mut()
.find(|u| {
u.user_id == identifier
|| u.access_keys.iter().any(|k| k.access_key == identifier)
})
.ok_or_else(|| format!("User '{}' not found", identifier))?;
let new_ak = format!("AK{}", uuid::Uuid::new_v4().simple());
let new_sk = format!("SK{}", uuid::Uuid::new_v4().simple());
let key = AccessKey {
access_key: new_ak.clone(),
secret_key: new_sk.clone(),
status: "active".to_string(),
created_at: Some(chrono::Utc::now().to_rfc3339()),
};
user.access_keys.push(key);
let json = serde_json::to_string_pretty(&config)
.map_err(|e| format!("Failed to serialize IAM config: {}", e))?;
std::fs::write(&self.config_path, json)
.map_err(|e| format!("Failed to write IAM config: {}", e))?;
self.reload();
Ok(serde_json::json!({
"access_key": new_ak,
"secret_key": new_sk,
}))
}
pub fn delete_access_key(&self, access_key: &str) -> Result<(), String> {
let content = std::fs::read_to_string(&self.config_path)
.map_err(|e| format!("Failed to read IAM config: {}", e))?;
let raw: RawIamConfig = serde_json::from_str(&content)
.map_err(|e| format!("Failed to parse IAM config: {}", e))?;
let mut config = IamConfig {
version: 2,
users: raw.users.into_iter().map(|u| u.normalize()).collect(),
};
let mut found = false;
for user in &mut config.users {
if user.access_keys.iter().any(|k| k.access_key == access_key) {
if user.access_keys.len() <= 1 {
return Err("Cannot delete the last access key".to_string());
}
user.access_keys.retain(|k| k.access_key != access_key);
found = true;
break;
}
}
if !found {
return Err(format!("Access key '{}' not found", access_key));
}
let json = serde_json::to_string_pretty(&config)
.map_err(|e| format!("Failed to serialize IAM config: {}", e))?;
std::fs::write(&self.config_path, json)
.map_err(|e| format!("Failed to write IAM config: {}", e))?;
self.reload();
Ok(())
}
}
fn bucket_matches(policy_bucket: &str, bucket: &str) -> bool {
let pb = policy_bucket.trim().to_ascii_lowercase();
pb == "*" || pb == bucket
}
fn action_matches(policy_actions: &[String], action: &str) -> bool {
for policy_action in policy_actions {
let pa = policy_action.trim().to_ascii_lowercase();
if pa == "*" || pa == action {
return true;
}
if pa == "iam:*" && action.starts_with("iam:") {
return true;
}
}
false
}
fn prefix_matches(policy_prefix: &str, object_key: &str) -> bool {
let p = policy_prefix.trim();
if p.is_empty() || p == "*" {
return true;
}
let base = p.trim_end_matches('*');
object_key.starts_with(base)
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write;
fn test_iam_json() -> String {
serde_json::json!({
"version": 2,
"users": [{
"user_id": "u-test1234",
"display_name": "admin",
"enabled": true,
"access_keys": [{
"access_key": "AKIAIOSFODNN7EXAMPLE",
"secret_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
"status": "active",
"created_at": "2024-01-01T00:00:00Z"
}],
"policies": [{
"bucket": "*",
"actions": ["*"],
"prefix": "*"
}]
}]
})
.to_string()
}
#[test]
fn test_load_and_lookup() {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(test_iam_json().as_bytes()).unwrap();
tmp.flush().unwrap();
let svc = IamService::new(tmp.path().to_path_buf());
let secret = svc.get_secret_key("AKIAIOSFODNN7EXAMPLE");
assert_eq!(
secret.unwrap(),
"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
);
}
#[test]
fn test_get_principal() {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(test_iam_json().as_bytes()).unwrap();
tmp.flush().unwrap();
let svc = IamService::new(tmp.path().to_path_buf());
let principal = svc.get_principal("AKIAIOSFODNN7EXAMPLE").unwrap();
assert_eq!(principal.display_name, "admin");
assert_eq!(principal.user_id, "u-test1234");
assert!(principal.is_admin);
}
#[test]
fn test_authenticate_success() {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(test_iam_json().as_bytes()).unwrap();
tmp.flush().unwrap();
let svc = IamService::new(tmp.path().to_path_buf());
let principal = svc
.authenticate(
"AKIAIOSFODNN7EXAMPLE",
"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
)
.unwrap();
assert_eq!(principal.display_name, "admin");
}
#[test]
fn test_authenticate_wrong_secret() {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(test_iam_json().as_bytes()).unwrap();
tmp.flush().unwrap();
let svc = IamService::new(tmp.path().to_path_buf());
assert!(svc.authenticate("AKIAIOSFODNN7EXAMPLE", "wrongsecret").is_none());
}
#[test]
fn test_unknown_key_returns_none() {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(test_iam_json().as_bytes()).unwrap();
tmp.flush().unwrap();
let svc = IamService::new(tmp.path().to_path_buf());
assert!(svc.get_secret_key("NONEXISTENTKEY").is_none());
assert!(svc.get_principal("NONEXISTENTKEY").is_none());
}
#[test]
fn test_disabled_user() {
let json = serde_json::json!({
"version": 2,
"users": [{
"user_id": "u-disabled",
"display_name": "disabled-user",
"enabled": false,
"access_keys": [{
"access_key": "DISABLED_KEY",
"secret_key": "secret123",
"status": "active"
}],
"policies": []
}]
})
.to_string();
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(json.as_bytes()).unwrap();
tmp.flush().unwrap();
let svc = IamService::new(tmp.path().to_path_buf());
assert!(svc.get_secret_key("DISABLED_KEY").is_none());
}
#[test]
fn test_inactive_key() {
let json = serde_json::json!({
"version": 2,
"users": [{
"user_id": "u-test",
"display_name": "test",
"enabled": true,
"access_keys": [{
"access_key": "INACTIVE_KEY",
"secret_key": "secret123",
"status": "inactive"
}],
"policies": []
}]
})
.to_string();
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(json.as_bytes()).unwrap();
tmp.flush().unwrap();
let svc = IamService::new(tmp.path().to_path_buf());
assert!(svc.get_secret_key("INACTIVE_KEY").is_none());
}
#[test]
fn test_v1_flat_format() {
let json = serde_json::json!({
"users": [{
"access_key": "test",
"secret_key": "secret",
"display_name": "Test User",
"policies": [{"bucket": "*", "actions": ["*"], "prefix": "*"}]
}]
})
.to_string();
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(json.as_bytes()).unwrap();
tmp.flush().unwrap();
let svc = IamService::new(tmp.path().to_path_buf());
let secret = svc.get_secret_key("test");
assert_eq!(secret.unwrap(), "secret");
let principal = svc.get_principal("test").unwrap();
assert_eq!(principal.display_name, "Test User");
assert!(principal.is_admin);
}
#[test]
fn test_authorize_allows_matching_policy() {
let json = serde_json::json!({
"version": 2,
"users": [{
"user_id": "u-reader",
"display_name": "reader",
"enabled": true,
"access_keys": [{
"access_key": "READER_KEY",
"secret_key": "reader-secret",
"status": "active"
}],
"policies": [{
"bucket": "docs",
"actions": ["read"],
"prefix": "reports/"
}]
}]
})
.to_string();
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(json.as_bytes()).unwrap();
tmp.flush().unwrap();
let svc = IamService::new(tmp.path().to_path_buf());
let principal = svc.get_principal("READER_KEY").unwrap();
assert!(svc.authorize(
&principal,
Some("docs"),
"read",
Some("reports/2026.csv"),
));
assert!(!svc.authorize(
&principal,
Some("docs"),
"write",
Some("reports/2026.csv"),
));
assert!(!svc.authorize(
&principal,
Some("docs"),
"read",
Some("private/2026.csv"),
));
assert!(!svc.authorize(
&principal,
Some("other"),
"read",
Some("reports/2026.csv"),
));
}
}

View File

@@ -1,4 +0,0 @@
pub mod sigv4;
pub mod principal;
pub mod iam;
mod fernet;

View File

@@ -1 +0,0 @@
pub use myfsio_common::types::Principal;

View File

@@ -1,258 +0,0 @@
use hmac::{Hmac, Mac};
use lru::LruCache;
use parking_lot::Mutex;
use percent_encoding::{percent_encode, AsciiSet, NON_ALPHANUMERIC};
use sha2::{Digest, Sha256};
use std::num::NonZeroUsize;
use std::sync::LazyLock;
use std::time::Instant;
type HmacSha256 = Hmac<Sha256>;
struct CacheEntry {
key: Vec<u8>,
created: Instant,
}
static SIGNING_KEY_CACHE: LazyLock<Mutex<LruCache<(String, String, String, String), CacheEntry>>> =
LazyLock::new(|| Mutex::new(LruCache::new(NonZeroUsize::new(256).unwrap())));
const CACHE_TTL_SECS: u64 = 60;
const AWS_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC
.remove(b'-')
.remove(b'_')
.remove(b'.')
.remove(b'~');
fn hmac_sha256(key: &[u8], msg: &[u8]) -> Vec<u8> {
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
mac.update(msg);
mac.finalize().into_bytes().to_vec()
}
fn sha256_hex(data: &[u8]) -> String {
let mut hasher = Sha256::new();
hasher.update(data);
hex::encode(hasher.finalize())
}
fn aws_uri_encode(input: &str) -> String {
percent_encode(input.as_bytes(), AWS_ENCODE_SET).to_string()
}
pub fn derive_signing_key_cached(
secret_key: &str,
date_stamp: &str,
region: &str,
service: &str,
) -> Vec<u8> {
let cache_key = (
secret_key.to_owned(),
date_stamp.to_owned(),
region.to_owned(),
service.to_owned(),
);
{
let mut cache = SIGNING_KEY_CACHE.lock();
if let Some(entry) = cache.get(&cache_key) {
if entry.created.elapsed().as_secs() < CACHE_TTL_SECS {
return entry.key.clone();
}
cache.pop(&cache_key);
}
}
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date_stamp.as_bytes());
let k_region = hmac_sha256(&k_date, region.as_bytes());
let k_service = hmac_sha256(&k_region, service.as_bytes());
let k_signing = hmac_sha256(&k_service, b"aws4_request");
{
let mut cache = SIGNING_KEY_CACHE.lock();
cache.put(
cache_key,
CacheEntry {
key: k_signing.clone(),
created: Instant::now(),
},
);
}
k_signing
}
fn constant_time_compare_inner(a: &[u8], b: &[u8]) -> bool {
if a.len() != b.len() {
return false;
}
let mut result: u8 = 0;
for (x, y) in a.iter().zip(b.iter()) {
result |= x ^ y;
}
result == 0
}
pub fn verify_sigv4_signature(
method: &str,
canonical_uri: &str,
query_params: &[(String, String)],
signed_headers_str: &str,
header_values: &[(String, String)],
payload_hash: &str,
amz_date: &str,
date_stamp: &str,
region: &str,
service: &str,
secret_key: &str,
provided_signature: &str,
) -> bool {
let mut sorted_params = query_params.to_vec();
sorted_params.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1)));
let canonical_query_string = sorted_params
.iter()
.map(|(k, v)| format!("{}={}", aws_uri_encode(k), aws_uri_encode(v)))
.collect::<Vec<_>>()
.join("&");
let mut canonical_headers = String::new();
for (name, value) in header_values {
let lower_name = name.to_lowercase();
let normalized = value.split_whitespace().collect::<Vec<_>>().join(" ");
let final_value = if lower_name == "expect" && normalized.is_empty() {
"100-continue"
} else {
&normalized
};
canonical_headers.push_str(&lower_name);
canonical_headers.push(':');
canonical_headers.push_str(final_value);
canonical_headers.push('\n');
}
let canonical_request = format!(
"{}\n{}\n{}\n{}\n{}\n{}",
method, canonical_uri, canonical_query_string, canonical_headers, signed_headers_str,
payload_hash
);
let credential_scope = format!("{}/{}/{}/aws4_request", date_stamp, region, service);
let cr_hash = sha256_hex(canonical_request.as_bytes());
let string_to_sign = format!(
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
amz_date, credential_scope, cr_hash
);
let signing_key = derive_signing_key_cached(secret_key, date_stamp, region, service);
let calculated = hmac_sha256(&signing_key, string_to_sign.as_bytes());
let calculated_hex = hex::encode(&calculated);
constant_time_compare_inner(calculated_hex.as_bytes(), provided_signature.as_bytes())
}
pub fn derive_signing_key(
secret_key: &str,
date_stamp: &str,
region: &str,
service: &str,
) -> Vec<u8> {
derive_signing_key_cached(secret_key, date_stamp, region, service)
}
pub fn compute_signature(signing_key: &[u8], string_to_sign: &str) -> String {
let sig = hmac_sha256(signing_key, string_to_sign.as_bytes());
hex::encode(sig)
}
pub fn build_string_to_sign(
amz_date: &str,
credential_scope: &str,
canonical_request: &str,
) -> String {
let cr_hash = sha256_hex(canonical_request.as_bytes());
format!(
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
amz_date, credential_scope, cr_hash
)
}
pub fn constant_time_compare(a: &str, b: &str) -> bool {
constant_time_compare_inner(a.as_bytes(), b.as_bytes())
}
pub fn clear_signing_key_cache() {
SIGNING_KEY_CACHE.lock().clear();
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_derive_signing_key() {
let key = derive_signing_key("wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", "20130524", "us-east-1", "s3");
assert_eq!(key.len(), 32);
}
#[test]
fn test_derive_signing_key_cached() {
let key1 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
let key2 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
assert_eq!(key1, key2);
}
#[test]
fn test_constant_time_compare() {
assert!(constant_time_compare("abc", "abc"));
assert!(!constant_time_compare("abc", "abd"));
assert!(!constant_time_compare("abc", "abcd"));
}
#[test]
fn test_build_string_to_sign() {
let result = build_string_to_sign("20130524T000000Z", "20130524/us-east-1/s3/aws4_request", "GET\n/\n\nhost:example.com\n\nhost\nUNSIGNED-PAYLOAD");
assert!(result.starts_with("AWS4-HMAC-SHA256\n"));
assert!(result.contains("20130524T000000Z"));
}
#[test]
fn test_aws_uri_encode() {
assert_eq!(aws_uri_encode("hello world"), "hello%20world");
assert_eq!(aws_uri_encode("test-file_name.txt"), "test-file_name.txt");
assert_eq!(aws_uri_encode("a/b"), "a%2Fb");
}
#[test]
fn test_verify_sigv4_roundtrip() {
let secret = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
let date_stamp = "20130524";
let region = "us-east-1";
let service = "s3";
let amz_date = "20130524T000000Z";
let signing_key = derive_signing_key(secret, date_stamp, region, service);
let canonical_request = "GET\n/\n\nhost:examplebucket.s3.amazonaws.com\n\nhost\nUNSIGNED-PAYLOAD";
let string_to_sign = build_string_to_sign(amz_date, &format!("{}/{}/{}/aws4_request", date_stamp, region, service), canonical_request);
let signature = compute_signature(&signing_key, &string_to_sign);
let result = verify_sigv4_signature(
"GET",
"/",
&[],
"host",
&[("host".to_string(), "examplebucket.s3.amazonaws.com".to_string())],
"UNSIGNED-PAYLOAD",
amz_date,
date_stamp,
region,
service,
secret,
&signature,
);
assert!(result);
}
}

View File

@@ -1,11 +0,0 @@
[package]
name = "myfsio-common"
version = "0.1.0"
edition = "2021"
[dependencies]
thiserror = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
chrono = { workspace = true }
uuid = { workspace = true }

View File

@@ -1,20 +0,0 @@
pub const SYSTEM_ROOT: &str = ".myfsio.sys";
pub const SYSTEM_BUCKETS_DIR: &str = "buckets";
pub const SYSTEM_MULTIPART_DIR: &str = "multipart";
pub const BUCKET_META_DIR: &str = "meta";
pub const BUCKET_VERSIONS_DIR: &str = "versions";
pub const BUCKET_CONFIG_FILE: &str = ".bucket.json";
pub const STATS_FILE: &str = "stats.json";
pub const ETAG_INDEX_FILE: &str = "etag_index.json";
pub const INDEX_FILE: &str = "_index.json";
pub const MANIFEST_FILE: &str = "manifest.json";
pub const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
pub const DEFAULT_REGION: &str = "us-east-1";
pub const AWS_SERVICE: &str = "s3";
pub const DEFAULT_MAX_KEYS: usize = 1000;
pub const DEFAULT_OBJECT_KEY_MAX_BYTES: usize = 1024;
pub const DEFAULT_CHUNK_SIZE: usize = 65536;
pub const STREAM_CHUNK_SIZE: usize = 1_048_576;

View File

@@ -1,221 +0,0 @@
use std::fmt;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum S3ErrorCode {
AccessDenied,
BucketAlreadyExists,
BucketNotEmpty,
EntityTooLarge,
InternalError,
InvalidAccessKeyId,
InvalidArgument,
InvalidBucketName,
InvalidKey,
InvalidRange,
InvalidRequest,
MalformedXML,
MethodNotAllowed,
NoSuchBucket,
NoSuchKey,
NoSuchUpload,
NoSuchVersion,
NoSuchTagSet,
PreconditionFailed,
NotModified,
QuotaExceeded,
SignatureDoesNotMatch,
SlowDown,
}
impl S3ErrorCode {
pub fn http_status(&self) -> u16 {
match self {
Self::AccessDenied => 403,
Self::BucketAlreadyExists => 409,
Self::BucketNotEmpty => 409,
Self::EntityTooLarge => 413,
Self::InternalError => 500,
Self::InvalidAccessKeyId => 403,
Self::InvalidArgument => 400,
Self::InvalidBucketName => 400,
Self::InvalidKey => 400,
Self::InvalidRange => 416,
Self::InvalidRequest => 400,
Self::MalformedXML => 400,
Self::MethodNotAllowed => 405,
Self::NoSuchBucket => 404,
Self::NoSuchKey => 404,
Self::NoSuchUpload => 404,
Self::NoSuchVersion => 404,
Self::NoSuchTagSet => 404,
Self::PreconditionFailed => 412,
Self::NotModified => 304,
Self::QuotaExceeded => 403,
Self::SignatureDoesNotMatch => 403,
Self::SlowDown => 429,
}
}
pub fn as_str(&self) -> &'static str {
match self {
Self::AccessDenied => "AccessDenied",
Self::BucketAlreadyExists => "BucketAlreadyExists",
Self::BucketNotEmpty => "BucketNotEmpty",
Self::EntityTooLarge => "EntityTooLarge",
Self::InternalError => "InternalError",
Self::InvalidAccessKeyId => "InvalidAccessKeyId",
Self::InvalidArgument => "InvalidArgument",
Self::InvalidBucketName => "InvalidBucketName",
Self::InvalidKey => "InvalidKey",
Self::InvalidRange => "InvalidRange",
Self::InvalidRequest => "InvalidRequest",
Self::MalformedXML => "MalformedXML",
Self::MethodNotAllowed => "MethodNotAllowed",
Self::NoSuchBucket => "NoSuchBucket",
Self::NoSuchKey => "NoSuchKey",
Self::NoSuchUpload => "NoSuchUpload",
Self::NoSuchVersion => "NoSuchVersion",
Self::NoSuchTagSet => "NoSuchTagSet",
Self::PreconditionFailed => "PreconditionFailed",
Self::NotModified => "NotModified",
Self::QuotaExceeded => "QuotaExceeded",
Self::SignatureDoesNotMatch => "SignatureDoesNotMatch",
Self::SlowDown => "SlowDown",
}
}
pub fn default_message(&self) -> &'static str {
match self {
Self::AccessDenied => "Access Denied",
Self::BucketAlreadyExists => "The requested bucket name is not available",
Self::BucketNotEmpty => "The bucket you tried to delete is not empty",
Self::EntityTooLarge => "Your proposed upload exceeds the maximum allowed size",
Self::InternalError => "We encountered an internal error. Please try again.",
Self::InvalidAccessKeyId => "The access key ID you provided does not exist",
Self::InvalidArgument => "Invalid argument",
Self::InvalidBucketName => "The specified bucket is not valid",
Self::InvalidKey => "The specified key is not valid",
Self::InvalidRange => "The requested range is not satisfiable",
Self::InvalidRequest => "Invalid request",
Self::MalformedXML => "The XML you provided was not well-formed",
Self::MethodNotAllowed => "The specified method is not allowed against this resource",
Self::NoSuchBucket => "The specified bucket does not exist",
Self::NoSuchKey => "The specified key does not exist",
Self::NoSuchUpload => "The specified multipart upload does not exist",
Self::NoSuchVersion => "The specified version does not exist",
Self::NoSuchTagSet => "The TagSet does not exist",
Self::PreconditionFailed => "At least one of the preconditions you specified did not hold",
Self::NotModified => "Not Modified",
Self::QuotaExceeded => "The bucket quota has been exceeded",
Self::SignatureDoesNotMatch => "The request signature we calculated does not match the signature you provided",
Self::SlowDown => "Please reduce your request rate",
}
}
}
impl fmt::Display for S3ErrorCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
#[derive(Debug, Clone)]
pub struct S3Error {
pub code: S3ErrorCode,
pub message: String,
pub resource: String,
pub request_id: String,
}
impl S3Error {
pub fn new(code: S3ErrorCode, message: impl Into<String>) -> Self {
Self {
code,
message: message.into(),
resource: String::new(),
request_id: String::new(),
}
}
pub fn from_code(code: S3ErrorCode) -> Self {
Self::new(code, code.default_message())
}
pub fn with_resource(mut self, resource: impl Into<String>) -> Self {
self.resource = resource.into();
self
}
pub fn with_request_id(mut self, request_id: impl Into<String>) -> Self {
self.request_id = request_id.into();
self
}
pub fn http_status(&self) -> u16 {
self.code.http_status()
}
pub fn to_xml(&self) -> String {
format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<Error>\
<Code>{}</Code>\
<Message>{}</Message>\
<Resource>{}</Resource>\
<RequestId>{}</RequestId>\
</Error>",
self.code.as_str(),
xml_escape(&self.message),
xml_escape(&self.resource),
xml_escape(&self.request_id),
)
}
}
impl fmt::Display for S3Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}: {}", self.code, self.message)
}
}
impl std::error::Error for S3Error {}
fn xml_escape(s: &str) -> String {
s.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
.replace('"', "&quot;")
.replace('\'', "&apos;")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_error_codes() {
assert_eq!(S3ErrorCode::NoSuchKey.http_status(), 404);
assert_eq!(S3ErrorCode::AccessDenied.http_status(), 403);
assert_eq!(S3ErrorCode::NoSuchBucket.as_str(), "NoSuchBucket");
}
#[test]
fn test_error_to_xml() {
let err = S3Error::from_code(S3ErrorCode::NoSuchKey)
.with_resource("/test-bucket/test-key")
.with_request_id("abc123");
let xml = err.to_xml();
assert!(xml.contains("<Code>NoSuchKey</Code>"));
assert!(xml.contains("<Resource>/test-bucket/test-key</Resource>"));
assert!(xml.contains("<RequestId>abc123</RequestId>"));
}
#[test]
fn test_xml_escape() {
let err = S3Error::new(S3ErrorCode::InvalidArgument, "key <test> & \"value\"")
.with_resource("/bucket/key&amp");
let xml = err.to_xml();
assert!(xml.contains("&lt;test&gt;"));
assert!(xml.contains("&amp;"));
}
}

View File

@@ -1,3 +0,0 @@
pub mod constants;
pub mod error;
pub mod types;

View File

@@ -1,176 +0,0 @@
use std::collections::HashMap;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ObjectMeta {
pub key: String,
pub size: u64,
pub last_modified: DateTime<Utc>,
pub etag: Option<String>,
pub content_type: Option<String>,
pub storage_class: Option<String>,
pub metadata: HashMap<String, String>,
}
impl ObjectMeta {
pub fn new(key: String, size: u64, last_modified: DateTime<Utc>) -> Self {
Self {
key,
size,
last_modified,
etag: None,
content_type: None,
storage_class: Some("STANDARD".to_string()),
metadata: HashMap::new(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BucketMeta {
pub name: String,
pub creation_date: DateTime<Utc>,
}
#[derive(Debug, Clone, Default)]
pub struct BucketStats {
pub objects: u64,
pub bytes: u64,
pub version_count: u64,
pub version_bytes: u64,
}
impl BucketStats {
pub fn total_objects(&self) -> u64 {
self.objects + self.version_count
}
pub fn total_bytes(&self) -> u64 {
self.bytes + self.version_bytes
}
}
#[derive(Debug, Clone)]
pub struct ListObjectsResult {
pub objects: Vec<ObjectMeta>,
pub is_truncated: bool,
pub next_continuation_token: Option<String>,
}
#[derive(Debug, Clone)]
pub struct ShallowListResult {
pub objects: Vec<ObjectMeta>,
pub common_prefixes: Vec<String>,
pub is_truncated: bool,
pub next_continuation_token: Option<String>,
}
#[derive(Debug, Clone, Default)]
pub struct ListParams {
pub max_keys: usize,
pub continuation_token: Option<String>,
pub prefix: Option<String>,
pub start_after: Option<String>,
}
#[derive(Debug, Clone, Default)]
pub struct ShallowListParams {
pub prefix: String,
pub delimiter: String,
pub max_keys: usize,
pub continuation_token: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PartMeta {
pub part_number: u32,
pub etag: String,
pub size: u64,
pub last_modified: Option<DateTime<Utc>>,
}
#[derive(Debug, Clone)]
pub struct PartInfo {
pub part_number: u32,
pub etag: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MultipartUploadInfo {
pub upload_id: String,
pub key: String,
pub initiated: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VersionInfo {
pub version_id: String,
pub key: String,
pub size: u64,
pub last_modified: DateTime<Utc>,
pub etag: Option<String>,
pub is_latest: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Tag {
pub key: String,
pub value: String,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct BucketConfig {
#[serde(default)]
pub versioning_enabled: bool,
#[serde(default)]
pub tags: Vec<Tag>,
#[serde(default)]
pub cors: Option<serde_json::Value>,
#[serde(default)]
pub encryption: Option<serde_json::Value>,
#[serde(default)]
pub lifecycle: Option<serde_json::Value>,
#[serde(default)]
pub website: Option<serde_json::Value>,
#[serde(default)]
pub quota: Option<QuotaConfig>,
#[serde(default)]
pub acl: Option<serde_json::Value>,
#[serde(default)]
pub notification: Option<serde_json::Value>,
#[serde(default)]
pub logging: Option<serde_json::Value>,
#[serde(default)]
pub object_lock: Option<serde_json::Value>,
#[serde(default)]
pub policy: Option<serde_json::Value>,
#[serde(default)]
pub replication: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QuotaConfig {
pub max_bytes: Option<u64>,
pub max_objects: Option<u64>,
}
#[derive(Debug, Clone)]
pub struct Principal {
pub access_key: String,
pub user_id: String,
pub display_name: String,
pub is_admin: bool,
}
impl Principal {
pub fn new(access_key: String, user_id: String, display_name: String, is_admin: bool) -> Self {
Self {
access_key,
user_id,
display_name,
is_admin,
}
}
}

View File

@@ -1,24 +0,0 @@
[package]
name = "myfsio-crypto"
version = "0.1.0"
edition = "2021"
[dependencies]
myfsio-common = { path = "../myfsio-common" }
md-5 = { workspace = true }
sha2 = { workspace = true }
hex = { workspace = true }
aes-gcm = { workspace = true }
hkdf = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
uuid = { workspace = true }
chrono = { workspace = true }
base64 = { workspace = true }
rand = "0.8"
[dev-dependencies]
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
tempfile = "3"

View File

@@ -1,238 +0,0 @@
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
use hkdf::Hkdf;
use sha2::Sha256;
use std::fs::File;
use std::io::{Read, Seek, SeekFrom, Write};
use std::path::Path;
use thiserror::Error;
const DEFAULT_CHUNK_SIZE: usize = 65536;
const HEADER_SIZE: usize = 4;
#[derive(Debug, Error)]
pub enum CryptoError {
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Invalid key size: expected 32 bytes, got {0}")]
InvalidKeySize(usize),
#[error("Invalid nonce size: expected 12 bytes, got {0}")]
InvalidNonceSize(usize),
#[error("Encryption failed: {0}")]
EncryptionFailed(String),
#[error("Decryption failed at chunk {0}")]
DecryptionFailed(u32),
#[error("HKDF expand failed: {0}")]
HkdfFailed(String),
}
fn read_exact_chunk(reader: &mut impl Read, buf: &mut [u8]) -> std::io::Result<usize> {
let mut filled = 0;
while filled < buf.len() {
match reader.read(&mut buf[filled..]) {
Ok(0) => break,
Ok(n) => filled += n,
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
}
Ok(filled)
}
fn derive_chunk_nonce(base_nonce: &[u8], chunk_index: u32) -> Result<[u8; 12], CryptoError> {
let hkdf = Hkdf::<Sha256>::new(Some(base_nonce), b"chunk_nonce");
let mut okm = [0u8; 12];
hkdf.expand(&chunk_index.to_be_bytes(), &mut okm)
.map_err(|e| CryptoError::HkdfFailed(e.to_string()))?;
Ok(okm)
}
pub fn encrypt_stream_chunked(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
chunk_size: Option<usize>,
) -> Result<u32, CryptoError> {
if key.len() != 32 {
return Err(CryptoError::InvalidKeySize(key.len()));
}
if base_nonce.len() != 12 {
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
}
let chunk_size = chunk_size.unwrap_or(DEFAULT_CHUNK_SIZE);
let key_arr: [u8; 32] = key.try_into().unwrap();
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
let cipher = Aes256Gcm::new(&key_arr.into());
let mut infile = File::open(input_path)?;
let mut outfile = File::create(output_path)?;
outfile.write_all(&[0u8; 4])?;
let mut buf = vec![0u8; chunk_size];
let mut chunk_index: u32 = 0;
loop {
let n = read_exact_chunk(&mut infile, &mut buf)?;
if n == 0 {
break;
}
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
let nonce = Nonce::from_slice(&nonce_bytes);
let encrypted = cipher
.encrypt(nonce, &buf[..n])
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let size = encrypted.len() as u32;
outfile.write_all(&size.to_be_bytes())?;
outfile.write_all(&encrypted)?;
chunk_index += 1;
}
outfile.seek(SeekFrom::Start(0))?;
outfile.write_all(&chunk_index.to_be_bytes())?;
Ok(chunk_index)
}
pub fn decrypt_stream_chunked(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
) -> Result<u32, CryptoError> {
if key.len() != 32 {
return Err(CryptoError::InvalidKeySize(key.len()));
}
if base_nonce.len() != 12 {
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
}
let key_arr: [u8; 32] = key.try_into().unwrap();
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
let cipher = Aes256Gcm::new(&key_arr.into());
let mut infile = File::open(input_path)?;
let mut outfile = File::create(output_path)?;
let mut header = [0u8; HEADER_SIZE];
infile.read_exact(&mut header)?;
let chunk_count = u32::from_be_bytes(header);
let mut size_buf = [0u8; HEADER_SIZE];
for chunk_index in 0..chunk_count {
infile.read_exact(&mut size_buf)?;
let chunk_size = u32::from_be_bytes(size_buf) as usize;
let mut encrypted = vec![0u8; chunk_size];
infile.read_exact(&mut encrypted)?;
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
let nonce = Nonce::from_slice(&nonce_bytes);
let decrypted = cipher
.decrypt(nonce, encrypted.as_ref())
.map_err(|_| CryptoError::DecryptionFailed(chunk_index))?;
outfile.write_all(&decrypted)?;
}
Ok(chunk_count)
}
pub async fn encrypt_stream_chunked_async(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
chunk_size: Option<usize>,
) -> Result<u32, CryptoError> {
let input_path = input_path.to_owned();
let output_path = output_path.to_owned();
let key = key.to_vec();
let base_nonce = base_nonce.to_vec();
tokio::task::spawn_blocking(move || {
encrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce, chunk_size)
})
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
}
pub async fn decrypt_stream_chunked_async(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
) -> Result<u32, CryptoError> {
let input_path = input_path.to_owned();
let output_path = output_path.to_owned();
let key = key.to_vec();
let base_nonce = base_nonce.to_vec();
tokio::task::spawn_blocking(move || {
decrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce)
})
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write as IoWrite;
#[test]
fn test_encrypt_decrypt_roundtrip() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("input.bin");
let encrypted = dir.path().join("encrypted.bin");
let decrypted = dir.path().join("decrypted.bin");
let data = b"Hello, this is a test of AES-256-GCM chunked encryption!";
std::fs::File::create(&input).unwrap().write_all(data).unwrap();
let key = [0x42u8; 32];
let nonce = [0x01u8; 12];
let chunks = encrypt_stream_chunked(&input, &encrypted, &key, &nonce, Some(16)).unwrap();
assert!(chunks > 0);
let chunks2 = decrypt_stream_chunked(&encrypted, &decrypted, &key, &nonce).unwrap();
assert_eq!(chunks, chunks2);
let result = std::fs::read(&decrypted).unwrap();
assert_eq!(result, data);
}
#[test]
fn test_invalid_key_size() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("input.bin");
std::fs::File::create(&input).unwrap().write_all(b"test").unwrap();
let result = encrypt_stream_chunked(&input, &dir.path().join("out"), &[0u8; 16], &[0u8; 12], None);
assert!(matches!(result, Err(CryptoError::InvalidKeySize(16))));
}
#[test]
fn test_wrong_key_fails_decrypt() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("input.bin");
let encrypted = dir.path().join("encrypted.bin");
let decrypted = dir.path().join("decrypted.bin");
std::fs::File::create(&input).unwrap().write_all(b"secret data").unwrap();
let key = [0x42u8; 32];
let nonce = [0x01u8; 12];
encrypt_stream_chunked(&input, &encrypted, &key, &nonce, None).unwrap();
let wrong_key = [0x43u8; 32];
let result = decrypt_stream_chunked(&encrypted, &decrypted, &wrong_key, &nonce);
assert!(matches!(result, Err(CryptoError::DecryptionFailed(_))));
}
}

View File

@@ -1,375 +0,0 @@
use base64::engine::general_purpose::STANDARD as B64;
use base64::Engine;
use rand::RngCore;
use std::collections::HashMap;
use std::path::Path;
use crate::aes_gcm::{
encrypt_stream_chunked, decrypt_stream_chunked, CryptoError,
};
use crate::kms::KmsService;
#[derive(Debug, Clone, PartialEq)]
pub enum SseAlgorithm {
Aes256,
AwsKms,
CustomerProvided,
}
impl SseAlgorithm {
pub fn as_str(&self) -> &'static str {
match self {
SseAlgorithm::Aes256 => "AES256",
SseAlgorithm::AwsKms => "aws:kms",
SseAlgorithm::CustomerProvided => "AES256",
}
}
}
#[derive(Debug, Clone)]
pub struct EncryptionContext {
pub algorithm: SseAlgorithm,
pub kms_key_id: Option<String>,
pub customer_key: Option<Vec<u8>>,
}
#[derive(Debug, Clone)]
pub struct EncryptionMetadata {
pub algorithm: String,
pub nonce: String,
pub encrypted_data_key: Option<String>,
pub kms_key_id: Option<String>,
}
impl EncryptionMetadata {
pub fn to_metadata_map(&self) -> HashMap<String, String> {
let mut map = HashMap::new();
map.insert(
"x-amz-server-side-encryption".to_string(),
self.algorithm.clone(),
);
map.insert("x-amz-encryption-nonce".to_string(), self.nonce.clone());
if let Some(ref dk) = self.encrypted_data_key {
map.insert("x-amz-encrypted-data-key".to_string(), dk.clone());
}
if let Some(ref kid) = self.kms_key_id {
map.insert("x-amz-encryption-key-id".to_string(), kid.clone());
}
map
}
pub fn from_metadata(meta: &HashMap<String, String>) -> Option<Self> {
let algorithm = meta.get("x-amz-server-side-encryption")?;
let nonce = meta.get("x-amz-encryption-nonce")?;
Some(Self {
algorithm: algorithm.clone(),
nonce: nonce.clone(),
encrypted_data_key: meta.get("x-amz-encrypted-data-key").cloned(),
kms_key_id: meta.get("x-amz-encryption-key-id").cloned(),
})
}
pub fn is_encrypted(meta: &HashMap<String, String>) -> bool {
meta.contains_key("x-amz-server-side-encryption")
}
pub fn clean_metadata(meta: &mut HashMap<String, String>) {
meta.remove("x-amz-server-side-encryption");
meta.remove("x-amz-encryption-nonce");
meta.remove("x-amz-encrypted-data-key");
meta.remove("x-amz-encryption-key-id");
}
}
pub struct EncryptionService {
master_key: [u8; 32],
kms: Option<std::sync::Arc<KmsService>>,
}
impl EncryptionService {
pub fn new(master_key: [u8; 32], kms: Option<std::sync::Arc<KmsService>>) -> Self {
Self { master_key, kms }
}
pub fn generate_data_key(&self) -> ([u8; 32], [u8; 12]) {
let mut data_key = [0u8; 32];
let mut nonce = [0u8; 12];
rand::thread_rng().fill_bytes(&mut data_key);
rand::thread_rng().fill_bytes(&mut nonce);
(data_key, nonce)
}
pub fn wrap_data_key(&self, data_key: &[u8; 32]) -> Result<String, CryptoError> {
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
let cipher = Aes256Gcm::new((&self.master_key).into());
let mut nonce_bytes = [0u8; 12];
rand::thread_rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
let encrypted = cipher
.encrypt(nonce, data_key.as_slice())
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let mut combined = Vec::with_capacity(12 + encrypted.len());
combined.extend_from_slice(&nonce_bytes);
combined.extend_from_slice(&encrypted);
Ok(B64.encode(&combined))
}
pub fn unwrap_data_key(&self, wrapped_b64: &str) -> Result<[u8; 32], CryptoError> {
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
let combined = B64.decode(wrapped_b64).map_err(|e| {
CryptoError::EncryptionFailed(format!("Bad wrapped key encoding: {}", e))
})?;
if combined.len() < 12 {
return Err(CryptoError::EncryptionFailed(
"Wrapped key too short".to_string(),
));
}
let (nonce_bytes, ciphertext) = combined.split_at(12);
let cipher = Aes256Gcm::new((&self.master_key).into());
let nonce = Nonce::from_slice(nonce_bytes);
let plaintext = cipher
.decrypt(nonce, ciphertext)
.map_err(|_| CryptoError::DecryptionFailed(0))?;
if plaintext.len() != 32 {
return Err(CryptoError::InvalidKeySize(plaintext.len()));
}
let mut key = [0u8; 32];
key.copy_from_slice(&plaintext);
Ok(key)
}
pub async fn encrypt_object(
&self,
input_path: &Path,
output_path: &Path,
ctx: &EncryptionContext,
) -> Result<EncryptionMetadata, CryptoError> {
let (data_key, nonce) = self.generate_data_key();
let (encrypted_data_key, kms_key_id) = match ctx.algorithm {
SseAlgorithm::Aes256 => {
let wrapped = self.wrap_data_key(&data_key)?;
(Some(wrapped), None)
}
SseAlgorithm::AwsKms => {
let kms = self
.kms
.as_ref()
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
let kid = ctx
.kms_key_id
.as_ref()
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID".into()))?;
let ciphertext = kms.encrypt_data(kid, &data_key).await?;
(Some(B64.encode(&ciphertext)), Some(kid.clone()))
}
SseAlgorithm::CustomerProvided => {
(None, None)
}
};
let actual_key = if ctx.algorithm == SseAlgorithm::CustomerProvided {
let ck = ctx.customer_key.as_ref().ok_or_else(|| {
CryptoError::EncryptionFailed("No customer key provided".into())
})?;
if ck.len() != 32 {
return Err(CryptoError::InvalidKeySize(ck.len()));
}
let mut k = [0u8; 32];
k.copy_from_slice(ck);
k
} else {
data_key
};
let ip = input_path.to_owned();
let op = output_path.to_owned();
let ak = actual_key;
let n = nonce;
tokio::task::spawn_blocking(move || {
encrypt_stream_chunked(&ip, &op, &ak, &n, None)
})
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
Ok(EncryptionMetadata {
algorithm: ctx.algorithm.as_str().to_string(),
nonce: B64.encode(nonce),
encrypted_data_key,
kms_key_id,
})
}
pub async fn decrypt_object(
&self,
input_path: &Path,
output_path: &Path,
enc_meta: &EncryptionMetadata,
customer_key: Option<&[u8]>,
) -> Result<(), CryptoError> {
let nonce_bytes = B64.decode(&enc_meta.nonce).map_err(|e| {
CryptoError::EncryptionFailed(format!("Bad nonce encoding: {}", e))
})?;
if nonce_bytes.len() != 12 {
return Err(CryptoError::InvalidNonceSize(nonce_bytes.len()));
}
let data_key: [u8; 32] = if let Some(ck) = customer_key {
if ck.len() != 32 {
return Err(CryptoError::InvalidKeySize(ck.len()));
}
let mut k = [0u8; 32];
k.copy_from_slice(ck);
k
} else if enc_meta.algorithm == "aws:kms" {
let kms = self
.kms
.as_ref()
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
let kid = enc_meta
.kms_key_id
.as_ref()
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID in metadata".into()))?;
let encrypted_dk = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
})?;
let ct = B64.decode(encrypted_dk).map_err(|e| {
CryptoError::EncryptionFailed(format!("Bad data key encoding: {}", e))
})?;
let dk = kms.decrypt_data(kid, &ct).await?;
if dk.len() != 32 {
return Err(CryptoError::InvalidKeySize(dk.len()));
}
let mut k = [0u8; 32];
k.copy_from_slice(&dk);
k
} else {
let wrapped = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
})?;
self.unwrap_data_key(wrapped)?
};
let ip = input_path.to_owned();
let op = output_path.to_owned();
let nb: [u8; 12] = nonce_bytes.try_into().unwrap();
tokio::task::spawn_blocking(move || {
decrypt_stream_chunked(&ip, &op, &data_key, &nb)
})
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write;
fn test_master_key() -> [u8; 32] {
[0x42u8; 32]
}
#[test]
fn test_wrap_unwrap_data_key() {
let svc = EncryptionService::new(test_master_key(), None);
let dk = [0xAAu8; 32];
let wrapped = svc.wrap_data_key(&dk).unwrap();
let unwrapped = svc.unwrap_data_key(&wrapped).unwrap();
assert_eq!(dk, unwrapped);
}
#[tokio::test]
async fn test_encrypt_decrypt_object_sse_s3() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("plain.bin");
let encrypted = dir.path().join("enc.bin");
let decrypted = dir.path().join("dec.bin");
let data = b"SSE-S3 encrypted content for testing!";
std::fs::File::create(&input).unwrap().write_all(data).unwrap();
let svc = EncryptionService::new(test_master_key(), None);
let ctx = EncryptionContext {
algorithm: SseAlgorithm::Aes256,
kms_key_id: None,
customer_key: None,
};
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
assert_eq!(meta.algorithm, "AES256");
assert!(meta.encrypted_data_key.is_some());
svc.decrypt_object(&encrypted, &decrypted, &meta, None)
.await
.unwrap();
let result = std::fs::read(&decrypted).unwrap();
assert_eq!(result, data);
}
#[tokio::test]
async fn test_encrypt_decrypt_object_sse_c() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("plain.bin");
let encrypted = dir.path().join("enc.bin");
let decrypted = dir.path().join("dec.bin");
let data = b"SSE-C encrypted content!";
std::fs::File::create(&input).unwrap().write_all(data).unwrap();
let customer_key = [0xBBu8; 32];
let svc = EncryptionService::new(test_master_key(), None);
let ctx = EncryptionContext {
algorithm: SseAlgorithm::CustomerProvided,
kms_key_id: None,
customer_key: Some(customer_key.to_vec()),
};
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
assert!(meta.encrypted_data_key.is_none());
svc.decrypt_object(&encrypted, &decrypted, &meta, Some(&customer_key))
.await
.unwrap();
let result = std::fs::read(&decrypted).unwrap();
assert_eq!(result, data);
}
#[test]
fn test_encryption_metadata_roundtrip() {
let meta = EncryptionMetadata {
algorithm: "AES256".to_string(),
nonce: "dGVzdG5vbmNlMTI=".to_string(),
encrypted_data_key: Some("c29tZWtleQ==".to_string()),
kms_key_id: None,
};
let map = meta.to_metadata_map();
let restored = EncryptionMetadata::from_metadata(&map).unwrap();
assert_eq!(restored.algorithm, "AES256");
assert_eq!(restored.nonce, meta.nonce);
assert_eq!(restored.encrypted_data_key, meta.encrypted_data_key);
}
#[test]
fn test_is_encrypted() {
let mut meta = HashMap::new();
assert!(!EncryptionMetadata::is_encrypted(&meta));
meta.insert("x-amz-server-side-encryption".to_string(), "AES256".to_string());
assert!(EncryptionMetadata::is_encrypted(&meta));
}
}

View File

@@ -1,132 +0,0 @@
use md5::{Digest, Md5};
use sha2::Sha256;
use std::io::Read;
use std::path::Path;
const CHUNK_SIZE: usize = 65536;
pub fn md5_file(path: &Path) -> std::io::Result<String> {
let mut file = std::fs::File::open(path)?;
let mut hasher = Md5::new();
let mut buf = vec![0u8; CHUNK_SIZE];
loop {
let n = file.read(&mut buf)?;
if n == 0 {
break;
}
hasher.update(&buf[..n]);
}
Ok(format!("{:x}", hasher.finalize()))
}
pub fn md5_bytes(data: &[u8]) -> String {
let mut hasher = Md5::new();
hasher.update(data);
format!("{:x}", hasher.finalize())
}
pub fn sha256_file(path: &Path) -> std::io::Result<String> {
let mut file = std::fs::File::open(path)?;
let mut hasher = Sha256::new();
let mut buf = vec![0u8; CHUNK_SIZE];
loop {
let n = file.read(&mut buf)?;
if n == 0 {
break;
}
hasher.update(&buf[..n]);
}
Ok(format!("{:x}", hasher.finalize()))
}
pub fn sha256_bytes(data: &[u8]) -> String {
let mut hasher = Sha256::new();
hasher.update(data);
format!("{:x}", hasher.finalize())
}
pub fn md5_sha256_file(path: &Path) -> std::io::Result<(String, String)> {
let mut file = std::fs::File::open(path)?;
let mut md5_hasher = Md5::new();
let mut sha_hasher = Sha256::new();
let mut buf = vec![0u8; CHUNK_SIZE];
loop {
let n = file.read(&mut buf)?;
if n == 0 {
break;
}
md5_hasher.update(&buf[..n]);
sha_hasher.update(&buf[..n]);
}
Ok((
format!("{:x}", md5_hasher.finalize()),
format!("{:x}", sha_hasher.finalize()),
))
}
pub async fn md5_file_async(path: &Path) -> std::io::Result<String> {
let path = path.to_owned();
tokio::task::spawn_blocking(move || md5_file(&path))
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
}
pub async fn sha256_file_async(path: &Path) -> std::io::Result<String> {
let path = path.to_owned();
tokio::task::spawn_blocking(move || sha256_file(&path))
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
}
pub async fn md5_sha256_file_async(path: &Path) -> std::io::Result<(String, String)> {
let path = path.to_owned();
tokio::task::spawn_blocking(move || md5_sha256_file(&path))
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write;
#[test]
fn test_md5_bytes() {
assert_eq!(md5_bytes(b""), "d41d8cd98f00b204e9800998ecf8427e");
assert_eq!(md5_bytes(b"hello"), "5d41402abc4b2a76b9719d911017c592");
}
#[test]
fn test_sha256_bytes() {
let hash = sha256_bytes(b"hello");
assert_eq!(hash, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824");
}
#[test]
fn test_md5_file() {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(b"hello").unwrap();
tmp.flush().unwrap();
let hash = md5_file(tmp.path()).unwrap();
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
}
#[test]
fn test_md5_sha256_file() {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(b"hello").unwrap();
tmp.flush().unwrap();
let (md5, sha) = md5_sha256_file(tmp.path()).unwrap();
assert_eq!(md5, "5d41402abc4b2a76b9719d911017c592");
assert_eq!(sha, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824");
}
#[tokio::test]
async fn test_md5_file_async() {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(b"hello").unwrap();
tmp.flush().unwrap();
let hash = md5_file_async(tmp.path()).await.unwrap();
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
}
}

View File

@@ -1,453 +0,0 @@
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
use base64::engine::general_purpose::STANDARD as B64;
use base64::Engine;
use chrono::{DateTime, Utc};
use rand::RngCore;
use serde::{Deserialize, Serialize};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tokio::sync::RwLock;
use crate::aes_gcm::CryptoError;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct KmsKey {
#[serde(rename = "KeyId")]
pub key_id: String,
#[serde(rename = "Arn")]
pub arn: String,
#[serde(rename = "Description")]
pub description: String,
#[serde(rename = "CreationDate")]
pub creation_date: DateTime<Utc>,
#[serde(rename = "Enabled")]
pub enabled: bool,
#[serde(rename = "KeyState")]
pub key_state: String,
#[serde(rename = "KeyUsage")]
pub key_usage: String,
#[serde(rename = "KeySpec")]
pub key_spec: String,
#[serde(rename = "EncryptedKeyMaterial")]
pub encrypted_key_material: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct KmsStore {
keys: Vec<KmsKey>,
}
pub struct KmsService {
keys_path: PathBuf,
master_key: Arc<RwLock<[u8; 32]>>,
keys: Arc<RwLock<Vec<KmsKey>>>,
}
impl KmsService {
pub async fn new(keys_dir: &Path) -> Result<Self, CryptoError> {
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
let keys_path = keys_dir.join("kms_keys.json");
let master_key = Self::load_or_create_master_key(&keys_dir.join("kms_master.key"))?;
let keys = if keys_path.exists() {
let data = std::fs::read_to_string(&keys_path).map_err(CryptoError::Io)?;
let store: KmsStore = serde_json::from_str(&data)
.map_err(|e| CryptoError::EncryptionFailed(format!("Bad KMS store: {}", e)))?;
store.keys
} else {
Vec::new()
};
Ok(Self {
keys_path,
master_key: Arc::new(RwLock::new(master_key)),
keys: Arc::new(RwLock::new(keys)),
})
}
fn load_or_create_master_key(path: &Path) -> Result<[u8; 32], CryptoError> {
if path.exists() {
let encoded = std::fs::read_to_string(path).map_err(CryptoError::Io)?;
let decoded = B64.decode(encoded.trim()).map_err(|e| {
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
})?;
if decoded.len() != 32 {
return Err(CryptoError::InvalidKeySize(decoded.len()));
}
let mut key = [0u8; 32];
key.copy_from_slice(&decoded);
Ok(key)
} else {
let mut key = [0u8; 32];
rand::thread_rng().fill_bytes(&mut key);
let encoded = B64.encode(key);
std::fs::write(path, &encoded).map_err(CryptoError::Io)?;
Ok(key)
}
}
fn encrypt_key_material(
master_key: &[u8; 32],
plaintext_key: &[u8],
) -> Result<String, CryptoError> {
let cipher = Aes256Gcm::new(master_key.into());
let mut nonce_bytes = [0u8; 12];
rand::thread_rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
let ciphertext = cipher
.encrypt(nonce, plaintext_key)
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let mut combined = Vec::with_capacity(12 + ciphertext.len());
combined.extend_from_slice(&nonce_bytes);
combined.extend_from_slice(&ciphertext);
Ok(B64.encode(&combined))
}
fn decrypt_key_material(
master_key: &[u8; 32],
encrypted_b64: &str,
) -> Result<Vec<u8>, CryptoError> {
let combined = B64.decode(encrypted_b64).map_err(|e| {
CryptoError::EncryptionFailed(format!("Bad key material encoding: {}", e))
})?;
if combined.len() < 12 {
return Err(CryptoError::EncryptionFailed(
"Encrypted key material too short".to_string(),
));
}
let (nonce_bytes, ciphertext) = combined.split_at(12);
let cipher = Aes256Gcm::new(master_key.into());
let nonce = Nonce::from_slice(nonce_bytes);
cipher
.decrypt(nonce, ciphertext)
.map_err(|_| CryptoError::DecryptionFailed(0))
}
async fn save(&self) -> Result<(), CryptoError> {
let keys = self.keys.read().await;
let store = KmsStore {
keys: keys.clone(),
};
let json = serde_json::to_string_pretty(&store)
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
std::fs::write(&self.keys_path, json).map_err(CryptoError::Io)?;
Ok(())
}
pub async fn create_key(&self, description: &str) -> Result<KmsKey, CryptoError> {
let key_id = uuid::Uuid::new_v4().to_string();
let arn = format!("arn:aws:kms:local:000000000000:key/{}", key_id);
let mut plaintext_key = [0u8; 32];
rand::thread_rng().fill_bytes(&mut plaintext_key);
let master = self.master_key.read().await;
let encrypted = Self::encrypt_key_material(&master, &plaintext_key)?;
let kms_key = KmsKey {
key_id: key_id.clone(),
arn,
description: description.to_string(),
creation_date: Utc::now(),
enabled: true,
key_state: "Enabled".to_string(),
key_usage: "ENCRYPT_DECRYPT".to_string(),
key_spec: "SYMMETRIC_DEFAULT".to_string(),
encrypted_key_material: encrypted,
};
self.keys.write().await.push(kms_key.clone());
self.save().await?;
Ok(kms_key)
}
pub async fn list_keys(&self) -> Vec<KmsKey> {
self.keys.read().await.clone()
}
pub async fn get_key(&self, key_id: &str) -> Option<KmsKey> {
let keys = self.keys.read().await;
keys.iter()
.find(|k| k.key_id == key_id || k.arn == key_id)
.cloned()
}
pub async fn delete_key(&self, key_id: &str) -> Result<bool, CryptoError> {
let mut keys = self.keys.write().await;
let len_before = keys.len();
keys.retain(|k| k.key_id != key_id && k.arn != key_id);
let removed = keys.len() < len_before;
drop(keys);
if removed {
self.save().await?;
}
Ok(removed)
}
pub async fn enable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
let mut keys = self.keys.write().await;
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
key.enabled = true;
key.key_state = "Enabled".to_string();
drop(keys);
self.save().await?;
Ok(true)
} else {
Ok(false)
}
}
pub async fn disable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
let mut keys = self.keys.write().await;
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
key.enabled = false;
key.key_state = "Disabled".to_string();
drop(keys);
self.save().await?;
Ok(true)
} else {
Ok(false)
}
}
pub async fn decrypt_data_key(&self, key_id: &str) -> Result<Vec<u8>, CryptoError> {
let keys = self.keys.read().await;
let key = keys
.iter()
.find(|k| k.key_id == key_id || k.arn == key_id)
.ok_or_else(|| CryptoError::EncryptionFailed("KMS key not found".to_string()))?;
if !key.enabled {
return Err(CryptoError::EncryptionFailed(
"KMS key is disabled".to_string(),
));
}
let master = self.master_key.read().await;
Self::decrypt_key_material(&master, &key.encrypted_key_material)
}
pub async fn encrypt_data(
&self,
key_id: &str,
plaintext: &[u8],
) -> Result<Vec<u8>, CryptoError> {
let data_key = self.decrypt_data_key(key_id).await?;
if data_key.len() != 32 {
return Err(CryptoError::InvalidKeySize(data_key.len()));
}
let key_arr: [u8; 32] = data_key.try_into().unwrap();
let cipher = Aes256Gcm::new(&key_arr.into());
let mut nonce_bytes = [0u8; 12];
rand::thread_rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
let ciphertext = cipher
.encrypt(nonce, plaintext)
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let mut result = Vec::with_capacity(12 + ciphertext.len());
result.extend_from_slice(&nonce_bytes);
result.extend_from_slice(&ciphertext);
Ok(result)
}
pub async fn decrypt_data(
&self,
key_id: &str,
ciphertext: &[u8],
) -> Result<Vec<u8>, CryptoError> {
if ciphertext.len() < 12 {
return Err(CryptoError::EncryptionFailed(
"Ciphertext too short".to_string(),
));
}
let data_key = self.decrypt_data_key(key_id).await?;
if data_key.len() != 32 {
return Err(CryptoError::InvalidKeySize(data_key.len()));
}
let key_arr: [u8; 32] = data_key.try_into().unwrap();
let (nonce_bytes, ct) = ciphertext.split_at(12);
let cipher = Aes256Gcm::new(&key_arr.into());
let nonce = Nonce::from_slice(nonce_bytes);
cipher
.decrypt(nonce, ct)
.map_err(|_| CryptoError::DecryptionFailed(0))
}
pub async fn generate_data_key(
&self,
key_id: &str,
num_bytes: usize,
) -> Result<(Vec<u8>, Vec<u8>), CryptoError> {
let kms_key = self.decrypt_data_key(key_id).await?;
if kms_key.len() != 32 {
return Err(CryptoError::InvalidKeySize(kms_key.len()));
}
let mut plaintext_key = vec![0u8; num_bytes];
rand::thread_rng().fill_bytes(&mut plaintext_key);
let key_arr: [u8; 32] = kms_key.try_into().unwrap();
let cipher = Aes256Gcm::new(&key_arr.into());
let mut nonce_bytes = [0u8; 12];
rand::thread_rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
let encrypted = cipher
.encrypt(nonce, plaintext_key.as_slice())
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let mut wrapped = Vec::with_capacity(12 + encrypted.len());
wrapped.extend_from_slice(&nonce_bytes);
wrapped.extend_from_slice(&encrypted);
Ok((plaintext_key, wrapped))
}
}
pub async fn load_or_create_master_key(keys_dir: &Path) -> Result<[u8; 32], CryptoError> {
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
let path = keys_dir.join("master.key");
if path.exists() {
let encoded = std::fs::read_to_string(&path).map_err(CryptoError::Io)?;
let decoded = B64.decode(encoded.trim()).map_err(|e| {
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
})?;
if decoded.len() != 32 {
return Err(CryptoError::InvalidKeySize(decoded.len()));
}
let mut key = [0u8; 32];
key.copy_from_slice(&decoded);
Ok(key)
} else {
let mut key = [0u8; 32];
rand::thread_rng().fill_bytes(&mut key);
let encoded = B64.encode(key);
std::fs::write(&path, &encoded).map_err(CryptoError::Io)?;
Ok(key)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_create_and_list_keys() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("test key").await.unwrap();
assert!(key.enabled);
assert_eq!(key.description, "test key");
assert!(key.key_id.len() > 0);
let keys = kms.list_keys().await;
assert_eq!(keys.len(), 1);
assert_eq!(keys[0].key_id, key.key_id);
}
#[tokio::test]
async fn test_enable_disable_key() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("toggle").await.unwrap();
assert!(key.enabled);
kms.disable_key(&key.key_id).await.unwrap();
let k = kms.get_key(&key.key_id).await.unwrap();
assert!(!k.enabled);
kms.enable_key(&key.key_id).await.unwrap();
let k = kms.get_key(&key.key_id).await.unwrap();
assert!(k.enabled);
}
#[tokio::test]
async fn test_delete_key() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("doomed").await.unwrap();
assert!(kms.delete_key(&key.key_id).await.unwrap());
assert!(kms.get_key(&key.key_id).await.is_none());
assert_eq!(kms.list_keys().await.len(), 0);
}
#[tokio::test]
async fn test_encrypt_decrypt_data() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("enc-key").await.unwrap();
let plaintext = b"Hello, KMS!";
let ciphertext = kms.encrypt_data(&key.key_id, plaintext).await.unwrap();
assert_ne!(&ciphertext, plaintext);
let decrypted = kms.decrypt_data(&key.key_id, &ciphertext).await.unwrap();
assert_eq!(decrypted, plaintext);
}
#[tokio::test]
async fn test_generate_data_key() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("data-key-gen").await.unwrap();
let (plaintext, wrapped) = kms.generate_data_key(&key.key_id, 32).await.unwrap();
assert_eq!(plaintext.len(), 32);
assert!(wrapped.len() > 32);
}
#[tokio::test]
async fn test_disabled_key_cannot_encrypt() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("disabled").await.unwrap();
kms.disable_key(&key.key_id).await.unwrap();
let result = kms.encrypt_data(&key.key_id, b"test").await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_persistence_across_reload() {
let dir = tempfile::tempdir().unwrap();
let key_id = {
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("persistent").await.unwrap();
key.key_id
};
let kms2 = KmsService::new(dir.path()).await.unwrap();
let key = kms2.get_key(&key_id).await;
assert!(key.is_some());
assert_eq!(key.unwrap().description, "persistent");
}
#[tokio::test]
async fn test_master_key_roundtrip() {
let dir = tempfile::tempdir().unwrap();
let key1 = load_or_create_master_key(dir.path()).await.unwrap();
let key2 = load_or_create_master_key(dir.path()).await.unwrap();
assert_eq!(key1, key2);
}
}

View File

@@ -1,4 +0,0 @@
pub mod hashing;
pub mod aes_gcm;
pub mod kms;
pub mod encryption;

View File

@@ -1,39 +0,0 @@
[package]
name = "myfsio-server"
version = "0.1.0"
edition = "2021"
[dependencies]
myfsio-common = { path = "../myfsio-common" }
myfsio-auth = { path = "../myfsio-auth" }
myfsio-crypto = { path = "../myfsio-crypto" }
myfsio-storage = { path = "../myfsio-storage" }
myfsio-xml = { path = "../myfsio-xml" }
base64 = { workspace = true }
axum = { workspace = true }
tokio = { workspace = true }
tower = { workspace = true }
tower-http = { workspace = true }
hyper = { workspace = true }
bytes = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
tokio-util = { workspace = true }
chrono = { workspace = true }
uuid = { workspace = true }
futures = { workspace = true }
http-body-util = "0.1"
percent-encoding = { workspace = true }
quick-xml = { workspace = true }
mime_guess = "2"
crc32fast = { workspace = true }
duckdb = { workspace = true }
roxmltree = "0.20"
parking_lot = { workspace = true }
regex = "1"
[dev-dependencies]
tempfile = "3"
tower = { workspace = true, features = ["util"] }

View File

@@ -1,117 +0,0 @@
use std::net::SocketAddr;
use std::path::PathBuf;
#[derive(Debug, Clone)]
pub struct ServerConfig {
pub bind_addr: SocketAddr,
pub storage_root: PathBuf,
pub region: String,
pub iam_config_path: PathBuf,
pub sigv4_timestamp_tolerance_secs: u64,
pub presigned_url_min_expiry: u64,
pub presigned_url_max_expiry: u64,
pub secret_key: Option<String>,
pub encryption_enabled: bool,
pub kms_enabled: bool,
pub gc_enabled: bool,
pub integrity_enabled: bool,
pub metrics_enabled: bool,
pub lifecycle_enabled: bool,
pub website_hosting_enabled: bool,
}
impl ServerConfig {
pub fn from_env() -> Self {
let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
let port: u16 = std::env::var("PORT")
.unwrap_or_else(|_| "5000".to_string())
.parse()
.unwrap_or(5000);
let storage_root = std::env::var("STORAGE_ROOT")
.unwrap_or_else(|_| "./data".to_string());
let region = std::env::var("AWS_REGION")
.unwrap_or_else(|_| "us-east-1".to_string());
let storage_path = PathBuf::from(&storage_root);
let iam_config_path = std::env::var("IAM_CONFIG")
.map(PathBuf::from)
.unwrap_or_else(|_| {
storage_path.join(".myfsio.sys").join("config").join("iam.json")
});
let sigv4_timestamp_tolerance_secs: u64 = std::env::var("SIGV4_TIMESTAMP_TOLERANCE_SECONDS")
.unwrap_or_else(|_| "900".to_string())
.parse()
.unwrap_or(900);
let presigned_url_min_expiry: u64 = std::env::var("PRESIGNED_URL_MIN_EXPIRY_SECONDS")
.unwrap_or_else(|_| "1".to_string())
.parse()
.unwrap_or(1);
let presigned_url_max_expiry: u64 = std::env::var("PRESIGNED_URL_MAX_EXPIRY_SECONDS")
.unwrap_or_else(|_| "604800".to_string())
.parse()
.unwrap_or(604800);
let secret_key = {
let env_key = std::env::var("SECRET_KEY").ok();
match env_key {
Some(k) if !k.is_empty() && k != "dev-secret-key" => Some(k),
_ => {
let secret_file = storage_path
.join(".myfsio.sys")
.join("config")
.join(".secret");
std::fs::read_to_string(&secret_file).ok().map(|s| s.trim().to_string())
}
}
};
let encryption_enabled = std::env::var("ENCRYPTION_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
let kms_enabled = std::env::var("KMS_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
let gc_enabled = std::env::var("GC_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
let integrity_enabled = std::env::var("INTEGRITY_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
let metrics_enabled = std::env::var("OPERATION_METRICS_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
let lifecycle_enabled = std::env::var("LIFECYCLE_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
let website_hosting_enabled = std::env::var("WEBSITE_HOSTING_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
Self {
bind_addr: SocketAddr::new(host.parse().unwrap(), port),
storage_root: storage_path,
region,
iam_config_path,
sigv4_timestamp_tolerance_secs,
presigned_url_min_expiry,
presigned_url_max_expiry,
secret_key,
encryption_enabled,
kms_enabled,
gc_enabled,
integrity_enabled,
metrics_enabled,
lifecycle_enabled,
website_hosting_enabled,
}
}
}

View File

@@ -1,704 +0,0 @@
use axum::body::Body;
use axum::extract::{Path, State};
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use axum::Extension;
use myfsio_common::types::Principal;
use myfsio_storage::traits::StorageEngine;
use crate::services::site_registry::{PeerSite, SiteInfo};
use crate::services::website_domains::{is_valid_domain, normalize_domain};
use crate::state::AppState;
fn json_response(status: StatusCode, value: serde_json::Value) -> Response {
(
status,
[("content-type", "application/json")],
value.to_string(),
)
.into_response()
}
fn json_error(code: &str, message: &str, status: StatusCode) -> Response {
json_response(
status,
serde_json::json!({"error": {"code": code, "message": message}}),
)
}
fn require_admin(principal: &Principal) -> Option<Response> {
if !principal.is_admin {
return Some(json_error("AccessDenied", "Admin access required", StatusCode::FORBIDDEN));
}
None
}
async fn read_json_body(body: Body) -> Option<serde_json::Value> {
let bytes = http_body_util::BodyExt::collect(body).await.ok()?.to_bytes();
serde_json::from_slice(&bytes).ok()
}
fn validate_site_id(site_id: &str) -> Option<String> {
if site_id.is_empty() || site_id.len() > 63 {
return Some("site_id must be 1-63 characters".to_string());
}
let first = site_id.chars().next().unwrap();
if !first.is_ascii_alphanumeric() {
return Some("site_id must start with alphanumeric".to_string());
}
if !site_id.chars().all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_') {
return Some("site_id must contain only alphanumeric, hyphens, underscores".to_string());
}
None
}
fn validate_endpoint(endpoint: &str) -> Option<String> {
if !endpoint.starts_with("http://") && !endpoint.starts_with("https://") {
return Some("Endpoint must be http or https URL".to_string());
}
None
}
fn validate_region(region: &str) -> Option<String> {
let re = regex::Regex::new(r"^[a-z]{2,}-[a-z]+-\d+$").unwrap();
if !re.is_match(region) {
return Some("Region must match format like us-east-1".to_string());
}
None
}
fn validate_priority(priority: i64) -> Option<String> {
if priority < 0 || priority > 1000 {
return Some("Priority must be between 0 and 1000".to_string());
}
None
}
pub async fn get_local_site(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
if let Some(ref registry) = state.site_registry {
if let Some(local) = registry.get_local_site() {
return json_response(StatusCode::OK, serde_json::to_value(&local).unwrap());
}
}
json_error("NotFound", "Local site not configured", StatusCode::NOT_FOUND)
}
pub async fn update_local_site(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
body: Body,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let registry = match &state.site_registry {
Some(r) => r,
None => return json_error("InvalidRequest", "Site registry not available", StatusCode::BAD_REQUEST),
};
let payload = match read_json_body(body).await {
Some(v) => v,
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
};
let site_id = match payload.get("site_id").and_then(|v| v.as_str()) {
Some(s) => s.to_string(),
None => return json_error("ValidationError", "site_id is required", StatusCode::BAD_REQUEST),
};
if let Some(err) = validate_site_id(&site_id) {
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
}
let endpoint = payload.get("endpoint").and_then(|v| v.as_str()).unwrap_or("").to_string();
if !endpoint.is_empty() {
if let Some(err) = validate_endpoint(&endpoint) {
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
}
}
if let Some(p) = payload.get("priority").and_then(|v| v.as_i64()) {
if let Some(err) = validate_priority(p) {
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
}
}
if let Some(r) = payload.get("region").and_then(|v| v.as_str()) {
if let Some(err) = validate_region(r) {
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
}
}
let existing = registry.get_local_site();
let site = SiteInfo {
site_id: site_id.clone(),
endpoint,
region: payload.get("region").and_then(|v| v.as_str()).unwrap_or("us-east-1").to_string(),
priority: payload.get("priority").and_then(|v| v.as_i64()).unwrap_or(100) as i32,
display_name: payload.get("display_name").and_then(|v| v.as_str()).unwrap_or(&site_id).to_string(),
created_at: existing.and_then(|e| e.created_at),
};
registry.set_local_site(site.clone());
json_response(StatusCode::OK, serde_json::to_value(&site).unwrap())
}
pub async fn list_all_sites(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let registry = match &state.site_registry {
Some(r) => r,
None => return json_response(StatusCode::OK, serde_json::json!({"local": null, "peers": [], "total_peers": 0})),
};
let local = registry.get_local_site();
let peers = registry.list_peers();
json_response(StatusCode::OK, serde_json::json!({
"local": local,
"peers": peers,
"total_peers": peers.len(),
}))
}
pub async fn register_peer_site(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
body: Body,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let registry = match &state.site_registry {
Some(r) => r,
None => return json_error("InvalidRequest", "Site registry not available", StatusCode::BAD_REQUEST),
};
let payload = match read_json_body(body).await {
Some(v) => v,
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
};
let site_id = match payload.get("site_id").and_then(|v| v.as_str()) {
Some(s) => s.to_string(),
None => return json_error("ValidationError", "site_id is required", StatusCode::BAD_REQUEST),
};
if let Some(err) = validate_site_id(&site_id) {
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
}
let endpoint = match payload.get("endpoint").and_then(|v| v.as_str()) {
Some(e) => e.to_string(),
None => return json_error("ValidationError", "endpoint is required", StatusCode::BAD_REQUEST),
};
if let Some(err) = validate_endpoint(&endpoint) {
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
}
let region = payload.get("region").and_then(|v| v.as_str()).unwrap_or("us-east-1").to_string();
if let Some(err) = validate_region(&region) {
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
}
let priority = payload.get("priority").and_then(|v| v.as_i64()).unwrap_or(100);
if let Some(err) = validate_priority(priority) {
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
}
if registry.get_peer(&site_id).is_some() {
return json_error("AlreadyExists", &format!("Peer site '{}' already exists", site_id), StatusCode::CONFLICT);
}
let peer = PeerSite {
site_id: site_id.clone(),
endpoint,
region,
priority: priority as i32,
display_name: payload.get("display_name").and_then(|v| v.as_str()).unwrap_or(&site_id).to_string(),
connection_id: payload.get("connection_id").and_then(|v| v.as_str()).map(|s| s.to_string()),
created_at: Some(chrono::Utc::now().to_rfc3339()),
is_healthy: false,
last_health_check: None,
};
registry.add_peer(peer.clone());
json_response(StatusCode::CREATED, serde_json::to_value(&peer).unwrap())
}
pub async fn get_peer_site(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(site_id): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let registry = match &state.site_registry {
Some(r) => r,
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
};
match registry.get_peer(&site_id) {
Some(peer) => json_response(StatusCode::OK, serde_json::to_value(&peer).unwrap()),
None => json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND),
}
}
pub async fn update_peer_site(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(site_id): Path<String>,
body: Body,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let registry = match &state.site_registry {
Some(r) => r,
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
};
let existing = match registry.get_peer(&site_id) {
Some(p) => p,
None => return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND),
};
let payload = match read_json_body(body).await {
Some(v) => v,
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
};
if let Some(ep) = payload.get("endpoint").and_then(|v| v.as_str()) {
if let Some(err) = validate_endpoint(ep) {
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
}
}
if let Some(p) = payload.get("priority").and_then(|v| v.as_i64()) {
if let Some(err) = validate_priority(p) {
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
}
}
if let Some(r) = payload.get("region").and_then(|v| v.as_str()) {
if let Some(err) = validate_region(r) {
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
}
}
let peer = PeerSite {
site_id: site_id.clone(),
endpoint: payload.get("endpoint").and_then(|v| v.as_str()).unwrap_or(&existing.endpoint).to_string(),
region: payload.get("region").and_then(|v| v.as_str()).unwrap_or(&existing.region).to_string(),
priority: payload.get("priority").and_then(|v| v.as_i64()).unwrap_or(existing.priority as i64) as i32,
display_name: payload.get("display_name").and_then(|v| v.as_str()).unwrap_or(&existing.display_name).to_string(),
connection_id: payload.get("connection_id").and_then(|v| v.as_str()).map(|s| s.to_string()).or(existing.connection_id),
created_at: existing.created_at,
is_healthy: existing.is_healthy,
last_health_check: existing.last_health_check,
};
registry.update_peer(peer.clone());
json_response(StatusCode::OK, serde_json::to_value(&peer).unwrap())
}
pub async fn delete_peer_site(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(site_id): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let registry = match &state.site_registry {
Some(r) => r,
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
};
if !registry.delete_peer(&site_id) {
return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND);
}
StatusCode::NO_CONTENT.into_response()
}
pub async fn check_peer_health(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(site_id): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let registry = match &state.site_registry {
Some(r) => r,
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
};
if registry.get_peer(&site_id).is_none() {
return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND);
}
json_response(StatusCode::OK, serde_json::json!({
"site_id": site_id,
"is_healthy": false,
"error": "Health check not implemented in standalone mode",
"checked_at": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
}))
}
pub async fn get_topology(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let registry = match &state.site_registry {
Some(r) => r,
None => return json_response(StatusCode::OK, serde_json::json!({"sites": [], "total": 0, "healthy_count": 0})),
};
let local = registry.get_local_site();
let peers = registry.list_peers();
let mut sites: Vec<serde_json::Value> = Vec::new();
if let Some(l) = local {
let mut v = serde_json::to_value(&l).unwrap();
v.as_object_mut().unwrap().insert("is_local".to_string(), serde_json::json!(true));
v.as_object_mut().unwrap().insert("is_healthy".to_string(), serde_json::json!(true));
sites.push(v);
}
for p in &peers {
let mut v = serde_json::to_value(p).unwrap();
v.as_object_mut().unwrap().insert("is_local".to_string(), serde_json::json!(false));
sites.push(v);
}
sites.sort_by_key(|s| s.get("priority").and_then(|v| v.as_i64()).unwrap_or(100));
let healthy_count = sites.iter().filter(|s| s.get("is_healthy").and_then(|v| v.as_bool()).unwrap_or(false)).count();
json_response(StatusCode::OK, serde_json::json!({
"sites": sites,
"total": sites.len(),
"healthy_count": healthy_count,
}))
}
pub async fn check_bidirectional_status(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(site_id): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let registry = match &state.site_registry {
Some(r) => r,
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
};
if registry.get_peer(&site_id).is_none() {
return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND);
}
let local = registry.get_local_site();
json_response(StatusCode::OK, serde_json::json!({
"site_id": site_id,
"local_site_id": local.as_ref().map(|l| &l.site_id),
"local_endpoint": local.as_ref().map(|l| &l.endpoint),
"local_bidirectional_rules": [],
"local_site_sync_enabled": false,
"remote_status": null,
"issues": [{"code": "NOT_IMPLEMENTED", "message": "Bidirectional status check not implemented in standalone mode", "severity": "warning"}],
"is_fully_configured": false,
}))
}
pub async fn iam_list_users(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let users = state.iam.list_users().await;
json_response(StatusCode::OK, serde_json::json!({"users": users}))
}
pub async fn iam_get_user(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(identifier): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
match state.iam.get_user(&identifier).await {
Some(user) => json_response(StatusCode::OK, user),
None => json_error("NotFound", &format!("User '{}' not found", identifier), StatusCode::NOT_FOUND),
}
}
pub async fn iam_get_user_policies(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(identifier): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
match state.iam.get_user_policies(&identifier) {
Some(policies) => json_response(StatusCode::OK, serde_json::json!({"policies": policies})),
None => json_error("NotFound", &format!("User '{}' not found", identifier), StatusCode::NOT_FOUND),
}
}
pub async fn iam_create_access_key(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(identifier): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
match state.iam.create_access_key(&identifier) {
Ok(result) => json_response(StatusCode::CREATED, result),
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
}
}
pub async fn iam_delete_access_key(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path((_identifier, access_key)): Path<(String, String)>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
match state.iam.delete_access_key(&access_key) {
Ok(()) => StatusCode::NO_CONTENT.into_response(),
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
}
}
pub async fn iam_disable_user(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(identifier): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
match state.iam.set_user_enabled(&identifier, false).await {
Ok(()) => json_response(StatusCode::OK, serde_json::json!({"status": "disabled"})),
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
}
}
pub async fn iam_enable_user(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(identifier): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
match state.iam.set_user_enabled(&identifier, true).await {
Ok(()) => json_response(StatusCode::OK, serde_json::json!({"status": "enabled"})),
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
}
}
pub async fn list_website_domains(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let store = match &state.website_domains {
Some(s) => s,
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
};
json_response(StatusCode::OK, serde_json::json!(store.list_all()))
}
pub async fn create_website_domain(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
body: Body,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let store = match &state.website_domains {
Some(s) => s,
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
};
let payload = match read_json_body(body).await {
Some(v) => v,
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
};
let domain = normalize_domain(payload.get("domain").and_then(|v| v.as_str()).unwrap_or(""));
if domain.is_empty() {
return json_error("ValidationError", "domain is required", StatusCode::BAD_REQUEST);
}
if !is_valid_domain(&domain) {
return json_error("ValidationError", &format!("Invalid domain: '{}'", domain), StatusCode::BAD_REQUEST);
}
let bucket = payload.get("bucket").and_then(|v| v.as_str()).unwrap_or("").trim().to_string();
if bucket.is_empty() {
return json_error("ValidationError", "bucket is required", StatusCode::BAD_REQUEST);
}
match state.storage.bucket_exists(&bucket).await {
Ok(true) => {}
_ => return json_error("NoSuchBucket", &format!("Bucket '{}' does not exist", bucket), StatusCode::NOT_FOUND),
}
if store.get_bucket(&domain).is_some() {
return json_error("Conflict", &format!("Domain '{}' is already mapped", domain), StatusCode::CONFLICT);
}
store.set_mapping(&domain, &bucket);
json_response(StatusCode::CREATED, serde_json::json!({"domain": domain, "bucket": bucket}))
}
pub async fn get_website_domain(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(domain): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let store = match &state.website_domains {
Some(s) => s,
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
};
let domain = normalize_domain(&domain);
match store.get_bucket(&domain) {
Some(bucket) => json_response(StatusCode::OK, serde_json::json!({"domain": domain, "bucket": bucket})),
None => json_error("NotFound", &format!("No mapping found for domain '{}'", domain), StatusCode::NOT_FOUND),
}
}
pub async fn update_website_domain(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(domain): Path<String>,
body: Body,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let store = match &state.website_domains {
Some(s) => s,
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
};
let domain = normalize_domain(&domain);
let payload = match read_json_body(body).await {
Some(v) => v,
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
};
let bucket = payload.get("bucket").and_then(|v| v.as_str()).unwrap_or("").trim().to_string();
if bucket.is_empty() {
return json_error("ValidationError", "bucket is required", StatusCode::BAD_REQUEST);
}
match state.storage.bucket_exists(&bucket).await {
Ok(true) => {}
_ => return json_error("NoSuchBucket", &format!("Bucket '{}' does not exist", bucket), StatusCode::NOT_FOUND),
}
if store.get_bucket(&domain).is_none() {
return json_error("NotFound", &format!("No mapping found for domain '{}'", domain), StatusCode::NOT_FOUND);
}
store.set_mapping(&domain, &bucket);
json_response(StatusCode::OK, serde_json::json!({"domain": domain, "bucket": bucket}))
}
pub async fn delete_website_domain(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
Path(domain): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let store = match &state.website_domains {
Some(s) => s,
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
};
let domain = normalize_domain(&domain);
if !store.delete_mapping(&domain) {
return json_error("NotFound", &format!("No mapping found for domain '{}'", domain), StatusCode::NOT_FOUND);
}
StatusCode::NO_CONTENT.into_response()
}
#[derive(serde::Deserialize, Default)]
pub struct PaginationQuery {
pub limit: Option<usize>,
pub offset: Option<usize>,
}
pub async fn gc_status(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
match &state.gc {
Some(gc) => json_response(StatusCode::OK, gc.status().await),
None => json_response(StatusCode::OK, serde_json::json!({"enabled": false, "message": "GC is not enabled. Set GC_ENABLED=true to enable."})),
}
}
pub async fn gc_run(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
body: Body,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let gc = match &state.gc {
Some(gc) => gc,
None => return json_error("InvalidRequest", "GC is not enabled", StatusCode::BAD_REQUEST),
};
let payload = read_json_body(body).await.unwrap_or(serde_json::json!({}));
let dry_run = payload.get("dry_run").and_then(|v| v.as_bool()).unwrap_or(false);
match gc.run_now(dry_run).await {
Ok(result) => json_response(StatusCode::OK, result),
Err(e) => json_error("Conflict", &e, StatusCode::CONFLICT),
}
}
pub async fn gc_history(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
match &state.gc {
Some(gc) => json_response(StatusCode::OK, serde_json::json!({"executions": gc.history().await})),
None => json_response(StatusCode::OK, serde_json::json!({"executions": []})),
}
}
pub async fn integrity_status(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
match &state.integrity {
Some(checker) => json_response(StatusCode::OK, checker.status().await),
None => json_response(StatusCode::OK, serde_json::json!({"enabled": false, "message": "Integrity checker is not enabled. Set INTEGRITY_ENABLED=true to enable."})),
}
}
pub async fn integrity_run(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
body: Body,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
let checker = match &state.integrity {
Some(c) => c,
None => return json_error("InvalidRequest", "Integrity checker is not enabled", StatusCode::BAD_REQUEST),
};
let payload = read_json_body(body).await.unwrap_or(serde_json::json!({}));
let dry_run = payload.get("dry_run").and_then(|v| v.as_bool()).unwrap_or(false);
let auto_heal = payload.get("auto_heal").and_then(|v| v.as_bool()).unwrap_or(false);
match checker.run_now(dry_run, auto_heal).await {
Ok(result) => json_response(StatusCode::OK, result),
Err(e) => json_error("Conflict", &e, StatusCode::CONFLICT),
}
}
pub async fn integrity_history(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin(&principal) { return err; }
match &state.integrity {
Some(checker) => json_response(StatusCode::OK, serde_json::json!({"executions": checker.history().await})),
None => json_response(StatusCode::OK, serde_json::json!({"executions": []})),
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,278 +0,0 @@
use axum::body::Body;
use axum::extract::State;
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use base64::engine::general_purpose::STANDARD as B64;
use base64::Engine;
use serde_json::json;
use crate::state::AppState;
fn json_ok(value: serde_json::Value) -> Response {
(
StatusCode::OK,
[("content-type", "application/json")],
value.to_string(),
)
.into_response()
}
fn json_err(status: StatusCode, msg: &str) -> Response {
(
status,
[("content-type", "application/json")],
json!({"error": msg}).to_string(),
)
.into_response()
}
pub async fn list_keys(State(state): State<AppState>) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
let keys = kms.list_keys().await;
let keys_json: Vec<serde_json::Value> = keys
.iter()
.map(|k| {
json!({
"KeyId": k.key_id,
"Arn": k.arn,
"Description": k.description,
"CreationDate": k.creation_date.to_rfc3339(),
"Enabled": k.enabled,
"KeyState": k.key_state,
"KeyUsage": k.key_usage,
"KeySpec": k.key_spec,
})
})
.collect();
json_ok(json!({"keys": keys_json}))
}
pub async fn create_key(State(state): State<AppState>, body: Body) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(c) => c.to_bytes(),
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
};
let description = if body_bytes.is_empty() {
String::new()
} else {
match serde_json::from_slice::<serde_json::Value>(&body_bytes) {
Ok(v) => v
.get("Description")
.or_else(|| v.get("description"))
.and_then(|d| d.as_str())
.unwrap_or("")
.to_string(),
Err(_) => String::new(),
}
};
match kms.create_key(&description).await {
Ok(key) => json_ok(json!({
"KeyId": key.key_id,
"Arn": key.arn,
"Description": key.description,
"CreationDate": key.creation_date.to_rfc3339(),
"Enabled": key.enabled,
"KeyState": key.key_state,
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn get_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
match kms.get_key(&key_id).await {
Some(key) => json_ok(json!({
"KeyId": key.key_id,
"Arn": key.arn,
"Description": key.description,
"CreationDate": key.creation_date.to_rfc3339(),
"Enabled": key.enabled,
"KeyState": key.key_state,
"KeyUsage": key.key_usage,
"KeySpec": key.key_spec,
})),
None => json_err(StatusCode::NOT_FOUND, "Key not found"),
}
}
pub async fn delete_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
match kms.delete_key(&key_id).await {
Ok(true) => StatusCode::NO_CONTENT.into_response(),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn enable_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
match kms.enable_key(&key_id).await {
Ok(true) => json_ok(json!({"status": "enabled"})),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn disable_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
match kms.disable_key(&key_id).await {
Ok(true) => json_ok(json!({"status": "disabled"})),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn encrypt(State(state): State<AppState>, body: Body) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(c) => c.to_bytes(),
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
};
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
Ok(v) => v,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
};
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
Some(k) => k,
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
};
let plaintext_b64 = match req.get("Plaintext").and_then(|v| v.as_str()) {
Some(p) => p,
None => return json_err(StatusCode::BAD_REQUEST, "Missing Plaintext"),
};
let plaintext = match B64.decode(plaintext_b64) {
Ok(p) => p,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid base64 Plaintext"),
};
match kms.encrypt_data(key_id, &plaintext).await {
Ok(ct) => json_ok(json!({
"KeyId": key_id,
"CiphertextBlob": B64.encode(&ct),
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn decrypt(State(state): State<AppState>, body: Body) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(c) => c.to_bytes(),
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
};
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
Ok(v) => v,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
};
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
Some(k) => k,
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
};
let ct_b64 = match req.get("CiphertextBlob").and_then(|v| v.as_str()) {
Some(c) => c,
None => return json_err(StatusCode::BAD_REQUEST, "Missing CiphertextBlob"),
};
let ciphertext = match B64.decode(ct_b64) {
Ok(c) => c,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid base64"),
};
match kms.decrypt_data(key_id, &ciphertext).await {
Ok(pt) => json_ok(json!({
"KeyId": key_id,
"Plaintext": B64.encode(&pt),
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn generate_data_key(State(state): State<AppState>, body: Body) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(c) => c.to_bytes(),
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
};
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
Ok(v) => v,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
};
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
Some(k) => k,
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
};
let num_bytes = req
.get("NumberOfBytes")
.and_then(|v| v.as_u64())
.unwrap_or(32) as usize;
if num_bytes < 1 || num_bytes > 1024 {
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
}
match kms.generate_data_key(key_id, num_bytes).await {
Ok((plaintext, wrapped)) => json_ok(json!({
"KeyId": key_id,
"Plaintext": B64.encode(&plaintext),
"CiphertextBlob": B64.encode(&wrapped),
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,552 +0,0 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use axum::body::Body;
use axum::http::{HeaderMap, HeaderName, StatusCode};
use axum::response::{IntoResponse, Response};
use base64::Engine;
use bytes::Bytes;
use crc32fast::Hasher;
use duckdb::types::ValueRef;
use duckdb::Connection;
use futures::stream;
use http_body_util::BodyExt;
use myfsio_common::error::{S3Error, S3ErrorCode};
use myfsio_storage::traits::StorageEngine;
use crate::state::AppState;
#[cfg(target_os = "windows")]
#[link(name = "Rstrtmgr")]
extern "system" {}
const CHUNK_SIZE: usize = 65_536;
pub async fn post_select_object_content(
state: &AppState,
bucket: &str,
key: &str,
headers: &HeaderMap,
body: Body,
) -> Response {
if let Some(resp) = require_xml_content_type(headers) {
return resp;
}
let body_bytes = match body.collect().await {
Ok(collected) => collected.to_bytes(),
Err(_) => {
return s3_error_response(S3Error::new(
S3ErrorCode::MalformedXML,
"Unable to parse XML document",
));
}
};
let request = match parse_select_request(&body_bytes) {
Ok(r) => r,
Err(err) => return s3_error_response(err),
};
let object_path = match state.storage.get_object_path(bucket, key).await {
Ok(path) => path,
Err(_) => {
return s3_error_response(S3Error::new(
S3ErrorCode::NoSuchKey,
"Object not found",
));
}
};
let join_res = tokio::task::spawn_blocking(move || execute_select_query(object_path, request)).await;
let chunks = match join_res {
Ok(Ok(chunks)) => chunks,
Ok(Err(message)) => {
return s3_error_response(S3Error::new(S3ErrorCode::InvalidRequest, message));
}
Err(_) => {
return s3_error_response(S3Error::new(
S3ErrorCode::InternalError,
"SelectObjectContent execution failed",
));
}
};
let bytes_returned: usize = chunks.iter().map(|c| c.len()).sum();
let mut events: Vec<Bytes> = Vec::with_capacity(chunks.len() + 2);
for chunk in chunks {
events.push(Bytes::from(encode_select_event("Records", &chunk)));
}
let stats_payload = build_stats_xml(0, bytes_returned);
events.push(Bytes::from(encode_select_event("Stats", stats_payload.as_bytes())));
events.push(Bytes::from(encode_select_event("End", b"")));
let stream = stream::iter(events.into_iter().map(Ok::<Bytes, std::io::Error>));
let body = Body::from_stream(stream);
let mut response = (StatusCode::OK, body).into_response();
response.headers_mut().insert(
HeaderName::from_static("content-type"),
"application/octet-stream".parse().unwrap(),
);
response.headers_mut().insert(
HeaderName::from_static("x-amz-request-charged"),
"requester".parse().unwrap(),
);
response
}
#[derive(Clone)]
struct SelectRequest {
expression: String,
input_format: InputFormat,
output_format: OutputFormat,
}
#[derive(Clone)]
enum InputFormat {
Csv(CsvInputConfig),
Json(JsonInputConfig),
Parquet,
}
#[derive(Clone)]
struct CsvInputConfig {
file_header_info: String,
field_delimiter: String,
quote_character: String,
}
#[derive(Clone)]
struct JsonInputConfig {
json_type: String,
}
#[derive(Clone)]
enum OutputFormat {
Csv(CsvOutputConfig),
Json(JsonOutputConfig),
}
#[derive(Clone)]
struct CsvOutputConfig {
field_delimiter: String,
record_delimiter: String,
quote_character: String,
}
#[derive(Clone)]
struct JsonOutputConfig {
record_delimiter: String,
}
fn parse_select_request(payload: &[u8]) -> Result<SelectRequest, S3Error> {
let xml = String::from_utf8_lossy(payload);
let doc = roxmltree::Document::parse(&xml)
.map_err(|_| S3Error::new(S3ErrorCode::MalformedXML, "Unable to parse XML document"))?;
let root = doc.root_element();
if root.tag_name().name() != "SelectObjectContentRequest" {
return Err(S3Error::new(
S3ErrorCode::MalformedXML,
"Root element must be SelectObjectContentRequest",
));
}
let expression = child_text(&root, "Expression")
.filter(|v| !v.is_empty())
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "Expression is required"))?;
let expression_type = child_text(&root, "ExpressionType").unwrap_or_else(|| "SQL".to_string());
if !expression_type.eq_ignore_ascii_case("SQL") {
return Err(S3Error::new(
S3ErrorCode::InvalidRequest,
"Only SQL expression type is supported",
));
}
let input_node = child(&root, "InputSerialization")
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "InputSerialization is required"))?;
let output_node = child(&root, "OutputSerialization")
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "OutputSerialization is required"))?;
let input_format = parse_input_format(&input_node)?;
let output_format = parse_output_format(&output_node)?;
Ok(SelectRequest {
expression,
input_format,
output_format,
})
}
fn parse_input_format(node: &roxmltree::Node<'_, '_>) -> Result<InputFormat, S3Error> {
if let Some(csv_node) = child(node, "CSV") {
return Ok(InputFormat::Csv(CsvInputConfig {
file_header_info: child_text(&csv_node, "FileHeaderInfo")
.unwrap_or_else(|| "NONE".to_string())
.to_ascii_uppercase(),
field_delimiter: child_text(&csv_node, "FieldDelimiter").unwrap_or_else(|| ",".to_string()),
quote_character: child_text(&csv_node, "QuoteCharacter").unwrap_or_else(|| "\"".to_string()),
}));
}
if let Some(json_node) = child(node, "JSON") {
return Ok(InputFormat::Json(JsonInputConfig {
json_type: child_text(&json_node, "Type")
.unwrap_or_else(|| "DOCUMENT".to_string())
.to_ascii_uppercase(),
}));
}
if child(node, "Parquet").is_some() {
return Ok(InputFormat::Parquet);
}
Err(S3Error::new(
S3ErrorCode::InvalidRequest,
"InputSerialization must specify CSV, JSON, or Parquet",
))
}
fn parse_output_format(node: &roxmltree::Node<'_, '_>) -> Result<OutputFormat, S3Error> {
if let Some(csv_node) = child(node, "CSV") {
return Ok(OutputFormat::Csv(CsvOutputConfig {
field_delimiter: child_text(&csv_node, "FieldDelimiter").unwrap_or_else(|| ",".to_string()),
record_delimiter: child_text(&csv_node, "RecordDelimiter").unwrap_or_else(|| "\n".to_string()),
quote_character: child_text(&csv_node, "QuoteCharacter").unwrap_or_else(|| "\"".to_string()),
}));
}
if let Some(json_node) = child(node, "JSON") {
return Ok(OutputFormat::Json(JsonOutputConfig {
record_delimiter: child_text(&json_node, "RecordDelimiter").unwrap_or_else(|| "\n".to_string()),
}));
}
Err(S3Error::new(
S3ErrorCode::InvalidRequest,
"OutputSerialization must specify CSV or JSON",
))
}
fn child<'a, 'input>(node: &'a roxmltree::Node<'a, 'input>, name: &str) -> Option<roxmltree::Node<'a, 'input>> {
node.children()
.find(|n| n.is_element() && n.tag_name().name() == name)
}
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
child(node, name)
.and_then(|n| n.text())
.map(|s| s.to_string())
}
fn execute_select_query(path: PathBuf, request: SelectRequest) -> Result<Vec<Vec<u8>>, String> {
let conn = Connection::open_in_memory().map_err(|e| format!("DuckDB connection error: {}", e))?;
load_input_table(&conn, &path, &request.input_format)?;
let expression = request
.expression
.replace("s3object", "data")
.replace("S3Object", "data");
let mut stmt = conn
.prepare(&expression)
.map_err(|e| format!("SQL execution error: {}", e))?;
let mut rows = stmt
.query([])
.map_err(|e| format!("SQL execution error: {}", e))?;
let stmt_ref = rows
.as_ref()
.ok_or_else(|| "SQL execution error: statement metadata unavailable".to_string())?;
let col_count = stmt_ref.column_count();
let mut columns: Vec<String> = Vec::with_capacity(col_count);
for i in 0..col_count {
let name = stmt_ref
.column_name(i)
.map(|s| s.to_string())
.unwrap_or_else(|_| format!("_{}", i));
columns.push(name);
}
match request.output_format {
OutputFormat::Csv(cfg) => collect_csv_chunks(&mut rows, col_count, cfg),
OutputFormat::Json(cfg) => collect_json_chunks(&mut rows, col_count, &columns, cfg),
}
}
fn load_input_table(conn: &Connection, path: &Path, input: &InputFormat) -> Result<(), String> {
let path_str = path.to_string_lossy().replace('\\', "/");
match input {
InputFormat::Csv(cfg) => {
let header = cfg.file_header_info == "USE" || cfg.file_header_info == "IGNORE";
let delimiter = normalize_single_char(&cfg.field_delimiter, ',');
let quote = normalize_single_char(&cfg.quote_character, '"');
let sql = format!(
"CREATE TABLE data AS SELECT * FROM read_csv('{}', header={}, delim='{}', quote='{}')",
sql_escape(&path_str),
if header { "true" } else { "false" },
sql_escape(&delimiter),
sql_escape(&quote)
);
conn.execute_batch(&sql)
.map_err(|e| format!("Failed loading CSV data: {}", e))?;
}
InputFormat::Json(cfg) => {
let format = if cfg.json_type == "LINES" {
"newline_delimited"
} else {
"array"
};
let sql = format!(
"CREATE TABLE data AS SELECT * FROM read_json_auto('{}', format='{}')",
sql_escape(&path_str),
format
);
conn.execute_batch(&sql)
.map_err(|e| format!("Failed loading JSON data: {}", e))?;
}
InputFormat::Parquet => {
let sql = format!(
"CREATE TABLE data AS SELECT * FROM read_parquet('{}')",
sql_escape(&path_str)
);
conn.execute_batch(&sql)
.map_err(|e| format!("Failed loading Parquet data: {}", e))?;
}
}
Ok(())
}
fn sql_escape(value: &str) -> String {
value.replace('\'', "''")
}
fn normalize_single_char(value: &str, default_char: char) -> String {
value.chars().next().unwrap_or(default_char).to_string()
}
fn collect_csv_chunks(
rows: &mut duckdb::Rows<'_>,
col_count: usize,
cfg: CsvOutputConfig,
) -> Result<Vec<Vec<u8>>, String> {
let delimiter = cfg.field_delimiter;
let record_delimiter = cfg.record_delimiter;
let quote = cfg.quote_character;
let mut chunks: Vec<Vec<u8>> = Vec::new();
let mut buffer = String::new();
while let Some(row) = rows.next().map_err(|e| format!("SQL execution error: {}", e))? {
let mut fields: Vec<String> = Vec::with_capacity(col_count);
for i in 0..col_count {
let value = row
.get_ref(i)
.map_err(|e| format!("SQL execution error: {}", e))?;
if matches!(value, ValueRef::Null) {
fields.push(String::new());
continue;
}
let mut text = value_ref_to_string(value);
if text.contains(&delimiter) || text.contains(&quote) || text.contains(&record_delimiter) {
text = text.replace(&quote, &(quote.clone() + &quote));
text = format!("{}{}{}", quote, text, quote);
}
fields.push(text);
}
buffer.push_str(&fields.join(&delimiter));
buffer.push_str(&record_delimiter);
while buffer.len() >= CHUNK_SIZE {
let rest = buffer.split_off(CHUNK_SIZE);
chunks.push(buffer.into_bytes());
buffer = rest;
}
}
if !buffer.is_empty() {
chunks.push(buffer.into_bytes());
}
Ok(chunks)
}
fn collect_json_chunks(
rows: &mut duckdb::Rows<'_>,
col_count: usize,
columns: &[String],
cfg: JsonOutputConfig,
) -> Result<Vec<Vec<u8>>, String> {
let record_delimiter = cfg.record_delimiter;
let mut chunks: Vec<Vec<u8>> = Vec::new();
let mut buffer = String::new();
while let Some(row) = rows.next().map_err(|e| format!("SQL execution error: {}", e))? {
let mut record: HashMap<String, serde_json::Value> = HashMap::with_capacity(col_count);
for i in 0..col_count {
let value = row
.get_ref(i)
.map_err(|e| format!("SQL execution error: {}", e))?;
let key = columns
.get(i)
.cloned()
.unwrap_or_else(|| format!("_{}", i));
record.insert(key, value_ref_to_json(value));
}
let line = serde_json::to_string(&record)
.map_err(|e| format!("JSON output encoding failed: {}", e))?;
buffer.push_str(&line);
buffer.push_str(&record_delimiter);
while buffer.len() >= CHUNK_SIZE {
let rest = buffer.split_off(CHUNK_SIZE);
chunks.push(buffer.into_bytes());
buffer = rest;
}
}
if !buffer.is_empty() {
chunks.push(buffer.into_bytes());
}
Ok(chunks)
}
fn value_ref_to_string(value: ValueRef<'_>) -> String {
match value {
ValueRef::Null => String::new(),
ValueRef::Boolean(v) => v.to_string(),
ValueRef::TinyInt(v) => v.to_string(),
ValueRef::SmallInt(v) => v.to_string(),
ValueRef::Int(v) => v.to_string(),
ValueRef::BigInt(v) => v.to_string(),
ValueRef::UTinyInt(v) => v.to_string(),
ValueRef::USmallInt(v) => v.to_string(),
ValueRef::UInt(v) => v.to_string(),
ValueRef::UBigInt(v) => v.to_string(),
ValueRef::Float(v) => v.to_string(),
ValueRef::Double(v) => v.to_string(),
ValueRef::Decimal(v) => v.to_string(),
ValueRef::Text(v) => String::from_utf8_lossy(v).into_owned(),
ValueRef::Blob(v) => base64::engine::general_purpose::STANDARD.encode(v),
_ => format!("{:?}", value),
}
}
fn value_ref_to_json(value: ValueRef<'_>) -> serde_json::Value {
match value {
ValueRef::Null => serde_json::Value::Null,
ValueRef::Boolean(v) => serde_json::Value::Bool(v),
ValueRef::TinyInt(v) => serde_json::json!(v),
ValueRef::SmallInt(v) => serde_json::json!(v),
ValueRef::Int(v) => serde_json::json!(v),
ValueRef::BigInt(v) => serde_json::json!(v),
ValueRef::UTinyInt(v) => serde_json::json!(v),
ValueRef::USmallInt(v) => serde_json::json!(v),
ValueRef::UInt(v) => serde_json::json!(v),
ValueRef::UBigInt(v) => serde_json::json!(v),
ValueRef::Float(v) => serde_json::json!(v),
ValueRef::Double(v) => serde_json::json!(v),
ValueRef::Decimal(v) => serde_json::Value::String(v.to_string()),
ValueRef::Text(v) => serde_json::Value::String(String::from_utf8_lossy(v).into_owned()),
ValueRef::Blob(v) => serde_json::Value::String(base64::engine::general_purpose::STANDARD.encode(v)),
_ => serde_json::Value::String(format!("{:?}", value)),
}
}
fn require_xml_content_type(headers: &HeaderMap) -> Option<Response> {
let value = headers
.get("content-type")
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.trim();
if value.is_empty() {
return None;
}
let lowered = value.to_ascii_lowercase();
if lowered.starts_with("application/xml") || lowered.starts_with("text/xml") {
return None;
}
Some(s3_error_response(S3Error::new(
S3ErrorCode::InvalidRequest,
"Content-Type must be application/xml or text/xml",
)))
}
fn s3_error_response(err: S3Error) -> Response {
let status = StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
let resource = if err.resource.is_empty() {
"/".to_string()
} else {
err.resource.clone()
};
let body = err
.with_resource(resource)
.with_request_id(uuid::Uuid::new_v4().simple().to_string())
.to_xml();
(
status,
[("content-type", "application/xml")],
body,
)
.into_response()
}
fn build_stats_xml(bytes_scanned: usize, bytes_returned: usize) -> String {
format!(
"<Stats><BytesScanned>{}</BytesScanned><BytesProcessed>{}</BytesProcessed><BytesReturned>{}</BytesReturned></Stats>",
bytes_scanned,
bytes_scanned,
bytes_returned
)
}
fn encode_select_event(event_type: &str, payload: &[u8]) -> Vec<u8> {
let mut headers = Vec::new();
headers.extend(encode_select_header(":event-type", event_type));
if event_type == "Records" {
headers.extend(encode_select_header(":content-type", "application/octet-stream"));
} else if event_type == "Stats" {
headers.extend(encode_select_header(":content-type", "text/xml"));
}
headers.extend(encode_select_header(":message-type", "event"));
let headers_len = headers.len() as u32;
let total_len = 4 + 4 + 4 + headers.len() + payload.len() + 4;
let mut message = Vec::with_capacity(total_len);
let mut prelude = Vec::with_capacity(8);
prelude.extend((total_len as u32).to_be_bytes());
prelude.extend(headers_len.to_be_bytes());
let prelude_crc = crc32(&prelude);
message.extend(prelude);
message.extend(prelude_crc.to_be_bytes());
message.extend(headers);
message.extend(payload);
let msg_crc = crc32(&message);
message.extend(msg_crc.to_be_bytes());
message
}
fn encode_select_header(name: &str, value: &str) -> Vec<u8> {
let name_bytes = name.as_bytes();
let value_bytes = value.as_bytes();
let mut header = Vec::with_capacity(1 + name_bytes.len() + 1 + 2 + value_bytes.len());
header.push(name_bytes.len() as u8);
header.extend(name_bytes);
header.push(7);
header.extend((value_bytes.len() as u16).to_be_bytes());
header.extend(value_bytes);
header
}
fn crc32(data: &[u8]) -> u32 {
let mut hasher = Hasher::new();
hasher.update(data);
hasher.finalize()
}

View File

@@ -1,73 +0,0 @@
pub mod config;
pub mod handlers;
pub mod middleware;
pub mod services;
pub mod state;
use axum::Router;
pub const SERVER_HEADER: &str = concat!("MyFSIO-Rust/", env!("CARGO_PKG_VERSION"));
pub fn create_router(state: state::AppState) -> Router {
let mut router = Router::new()
.route("/", axum::routing::get(handlers::list_buckets))
.route(
"/{bucket}",
axum::routing::put(handlers::create_bucket)
.get(handlers::get_bucket)
.delete(handlers::delete_bucket)
.head(handlers::head_bucket)
.post(handlers::post_bucket),
)
.route(
"/{bucket}/{*key}",
axum::routing::put(handlers::put_object)
.get(handlers::get_object)
.delete(handlers::delete_object)
.head(handlers::head_object)
.post(handlers::post_object),
);
if state.config.kms_enabled {
router = router
.route("/kms/keys", axum::routing::get(handlers::kms::list_keys).post(handlers::kms::create_key))
.route("/kms/keys/{key_id}", axum::routing::get(handlers::kms::get_key).delete(handlers::kms::delete_key))
.route("/kms/keys/{key_id}/enable", axum::routing::post(handlers::kms::enable_key))
.route("/kms/keys/{key_id}/disable", axum::routing::post(handlers::kms::disable_key))
.route("/kms/encrypt", axum::routing::post(handlers::kms::encrypt))
.route("/kms/decrypt", axum::routing::post(handlers::kms::decrypt))
.route("/kms/generate-data-key", axum::routing::post(handlers::kms::generate_data_key));
}
router = router
.route("/admin/site/local", axum::routing::get(handlers::admin::get_local_site).put(handlers::admin::update_local_site))
.route("/admin/site/all", axum::routing::get(handlers::admin::list_all_sites))
.route("/admin/site/peers", axum::routing::post(handlers::admin::register_peer_site))
.route("/admin/site/peers/{site_id}", axum::routing::get(handlers::admin::get_peer_site).put(handlers::admin::update_peer_site).delete(handlers::admin::delete_peer_site))
.route("/admin/site/peers/{site_id}/health", axum::routing::post(handlers::admin::check_peer_health))
.route("/admin/site/topology", axum::routing::get(handlers::admin::get_topology))
.route("/admin/site/peers/{site_id}/bidirectional-status", axum::routing::get(handlers::admin::check_bidirectional_status))
.route("/admin/iam/users", axum::routing::get(handlers::admin::iam_list_users))
.route("/admin/iam/users/{identifier}", axum::routing::get(handlers::admin::iam_get_user))
.route("/admin/iam/users/{identifier}/policies", axum::routing::get(handlers::admin::iam_get_user_policies))
.route("/admin/iam/users/{identifier}/access-keys", axum::routing::post(handlers::admin::iam_create_access_key))
.route("/admin/iam/users/{identifier}/access-keys/{access_key}", axum::routing::delete(handlers::admin::iam_delete_access_key))
.route("/admin/iam/users/{identifier}/disable", axum::routing::post(handlers::admin::iam_disable_user))
.route("/admin/iam/users/{identifier}/enable", axum::routing::post(handlers::admin::iam_enable_user))
.route("/admin/website-domains", axum::routing::get(handlers::admin::list_website_domains).post(handlers::admin::create_website_domain))
.route("/admin/website-domains/{domain}", axum::routing::get(handlers::admin::get_website_domain).put(handlers::admin::update_website_domain).delete(handlers::admin::delete_website_domain))
.route("/admin/gc/status", axum::routing::get(handlers::admin::gc_status))
.route("/admin/gc/run", axum::routing::post(handlers::admin::gc_run))
.route("/admin/gc/history", axum::routing::get(handlers::admin::gc_history))
.route("/admin/integrity/status", axum::routing::get(handlers::admin::integrity_status))
.route("/admin/integrity/run", axum::routing::post(handlers::admin::integrity_run))
.route("/admin/integrity/history", axum::routing::get(handlers::admin::integrity_history));
router
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::auth_layer,
))
.layer(axum::middleware::from_fn(middleware::server_header))
.with_state(state)
}

View File

@@ -1,97 +0,0 @@
use myfsio_server::config::ServerConfig;
use myfsio_server::state::AppState;
#[tokio::main]
async fn main() {
tracing_subscriber::fmt::init();
let config = ServerConfig::from_env();
let bind_addr = config.bind_addr;
tracing::info!("MyFSIO Rust Engine starting on {}", bind_addr);
tracing::info!("Storage root: {}", config.storage_root.display());
tracing::info!("Region: {}", config.region);
tracing::info!(
"Encryption: {}, KMS: {}, GC: {}, Lifecycle: {}, Integrity: {}, Metrics: {}",
config.encryption_enabled,
config.kms_enabled,
config.gc_enabled,
config.lifecycle_enabled,
config.integrity_enabled,
config.metrics_enabled
);
let state = if config.encryption_enabled || config.kms_enabled {
AppState::new_with_encryption(config.clone()).await
} else {
AppState::new(config.clone())
};
let mut bg_handles: Vec<tokio::task::JoinHandle<()>> = Vec::new();
if let Some(ref gc) = state.gc {
bg_handles.push(gc.clone().start_background());
tracing::info!("GC background service started");
}
if let Some(ref integrity) = state.integrity {
bg_handles.push(integrity.clone().start_background());
tracing::info!("Integrity checker background service started");
}
if let Some(ref metrics) = state.metrics {
bg_handles.push(metrics.clone().start_background());
tracing::info!("Metrics collector background service started");
}
if config.lifecycle_enabled {
let lifecycle = std::sync::Arc::new(
myfsio_server::services::lifecycle::LifecycleService::new(
state.storage.clone(),
myfsio_server::services::lifecycle::LifecycleConfig::default(),
),
);
bg_handles.push(lifecycle.start_background());
tracing::info!("Lifecycle manager background service started");
}
let app = myfsio_server::create_router(state);
let listener = match tokio::net::TcpListener::bind(bind_addr).await {
Ok(listener) => listener,
Err(err) => {
if err.kind() == std::io::ErrorKind::AddrInUse {
tracing::error!("Port already in use: {}", bind_addr);
} else {
tracing::error!("Failed to bind {}: {}", bind_addr, err);
}
for handle in bg_handles {
handle.abort();
}
std::process::exit(1);
}
};
tracing::info!("Listening on {}", bind_addr);
if let Err(err) = axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal())
.await
{
tracing::error!("Server exited with error: {}", err);
for handle in bg_handles {
handle.abort();
}
std::process::exit(1);
}
for handle in bg_handles {
handle.abort();
}
}
async fn shutdown_signal() {
tokio::signal::ctrl_c()
.await
.expect("Failed to listen for Ctrl+C");
tracing::info!("Shutdown signal received");
}

View File

@@ -1,569 +0,0 @@
use axum::extract::{Request, State};
use axum::http::{Method, StatusCode};
use axum::middleware::Next;
use axum::response::{IntoResponse, Response};
use chrono::{NaiveDateTime, Utc};
use myfsio_auth::sigv4;
use myfsio_common::error::{S3Error, S3ErrorCode};
use myfsio_common::types::Principal;
use crate::state::AppState;
pub async fn auth_layer(
State(state): State<AppState>,
mut req: Request,
next: Next,
) -> Response {
let uri = req.uri().clone();
let path = uri.path().to_string();
if path == "/" && req.method() == axum::http::Method::GET {
match try_auth(&state, &req) {
AuthResult::Ok(principal) => {
if let Err(err) = authorize_request(&state, &principal, &req) {
return error_response(err, &path);
}
req.extensions_mut().insert(principal);
}
AuthResult::Denied(err) => return error_response(err, &path),
AuthResult::NoAuth => {
return error_response(
S3Error::new(S3ErrorCode::AccessDenied, "Missing credentials"),
&path,
);
}
}
return next.run(req).await;
}
match try_auth(&state, &req) {
AuthResult::Ok(principal) => {
if let Err(err) = authorize_request(&state, &principal, &req) {
return error_response(err, &path);
}
req.extensions_mut().insert(principal);
next.run(req).await
}
AuthResult::Denied(err) => error_response(err, &path),
AuthResult::NoAuth => {
error_response(
S3Error::new(S3ErrorCode::AccessDenied, "Missing credentials"),
&path,
)
}
}
}
enum AuthResult {
Ok(Principal),
Denied(S3Error),
NoAuth,
}
fn authorize_request(state: &AppState, principal: &Principal, req: &Request) -> Result<(), S3Error> {
let path = req.uri().path();
if path == "/" {
if state.iam.authorize(principal, None, "list", None) {
return Ok(());
}
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
}
if path.starts_with("/admin/") || path.starts_with("/kms/") {
return Ok(());
}
let mut segments = path.trim_start_matches('/').split('/').filter(|s| !s.is_empty());
let bucket = match segments.next() {
Some(b) => b,
None => {
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
}
};
let remaining: Vec<&str> = segments.collect();
let query = req.uri().query().unwrap_or("");
if remaining.is_empty() {
let action = resolve_bucket_action(req.method(), query);
if state.iam.authorize(principal, Some(bucket), action, None) {
return Ok(());
}
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
}
let object_key = remaining.join("/");
if req.method() == Method::PUT {
if let Some(copy_source) = req
.headers()
.get("x-amz-copy-source")
.and_then(|v| v.to_str().ok())
{
let source = copy_source.strip_prefix('/').unwrap_or(copy_source);
if let Some((src_bucket, src_key)) = source.split_once('/') {
let source_allowed =
state.iam.authorize(principal, Some(src_bucket), "read", Some(src_key));
let dest_allowed =
state.iam.authorize(principal, Some(bucket), "write", Some(&object_key));
if source_allowed && dest_allowed {
return Ok(());
}
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
}
}
}
let action = resolve_object_action(req.method(), query);
if state
.iam
.authorize(principal, Some(bucket), action, Some(&object_key))
{
return Ok(());
}
Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"))
}
fn resolve_bucket_action(method: &Method, query: &str) -> &'static str {
if has_query_key(query, "versioning") {
return "versioning";
}
if has_query_key(query, "tagging") {
return "tagging";
}
if has_query_key(query, "cors") {
return "cors";
}
if has_query_key(query, "location") {
return "list";
}
if has_query_key(query, "encryption") {
return "encryption";
}
if has_query_key(query, "lifecycle") {
return "lifecycle";
}
if has_query_key(query, "acl") {
return "share";
}
if has_query_key(query, "policy") || has_query_key(query, "policyStatus") {
return "policy";
}
if has_query_key(query, "replication") {
return "replication";
}
if has_query_key(query, "quota") {
return "quota";
}
if has_query_key(query, "website") {
return "website";
}
if has_query_key(query, "object-lock") {
return "object_lock";
}
if has_query_key(query, "notification") {
return "notification";
}
if has_query_key(query, "logging") {
return "logging";
}
if has_query_key(query, "versions") || has_query_key(query, "uploads") {
return "list";
}
if has_query_key(query, "delete") {
return "delete";
}
match *method {
Method::GET => "list",
Method::HEAD => "read",
Method::PUT => "create_bucket",
Method::DELETE => "delete_bucket",
Method::POST => "write",
_ => "list",
}
}
fn resolve_object_action(method: &Method, query: &str) -> &'static str {
if has_query_key(query, "tagging") {
return if *method == Method::GET { "read" } else { "write" };
}
if has_query_key(query, "acl") {
return if *method == Method::GET { "read" } else { "write" };
}
if has_query_key(query, "retention") || has_query_key(query, "legal-hold") {
return "object_lock";
}
if has_query_key(query, "attributes") {
return "read";
}
if has_query_key(query, "uploads") || has_query_key(query, "uploadId") {
return match *method {
Method::GET => "read",
_ => "write",
};
}
if has_query_key(query, "select") {
return "read";
}
match *method {
Method::GET | Method::HEAD => "read",
Method::PUT => "write",
Method::DELETE => "delete",
Method::POST => "write",
_ => "read",
}
}
fn has_query_key(query: &str, key: &str) -> bool {
if query.is_empty() {
return false;
}
query
.split('&')
.filter(|part| !part.is_empty())
.any(|part| part == key || part.starts_with(&format!("{}=", key)))
}
fn try_auth(state: &AppState, req: &Request) -> AuthResult {
if let Some(auth_header) = req.headers().get("authorization") {
if let Ok(auth_str) = auth_header.to_str() {
if auth_str.starts_with("AWS4-HMAC-SHA256 ") {
return verify_sigv4_header(state, req, auth_str);
}
}
}
let query = req.uri().query().unwrap_or("");
if query.contains("X-Amz-Algorithm=AWS4-HMAC-SHA256") {
return verify_sigv4_query(state, req);
}
if let (Some(ak), Some(sk)) = (
req.headers().get("x-access-key").and_then(|v| v.to_str().ok()),
req.headers().get("x-secret-key").and_then(|v| v.to_str().ok()),
) {
return match state.iam.authenticate(ak, sk) {
Some(principal) => AuthResult::Ok(principal),
None => AuthResult::Denied(
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
),
};
}
AuthResult::NoAuth
}
fn verify_sigv4_header(state: &AppState, req: &Request, auth_str: &str) -> AuthResult {
let parts: Vec<&str> = auth_str
.strip_prefix("AWS4-HMAC-SHA256 ")
.unwrap()
.split(", ")
.collect();
if parts.len() != 3 {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed Authorization header"),
);
}
let credential = parts[0].strip_prefix("Credential=").unwrap_or("");
let signed_headers_str = parts[1].strip_prefix("SignedHeaders=").unwrap_or("");
let provided_signature = parts[2].strip_prefix("Signature=").unwrap_or("");
let cred_parts: Vec<&str> = credential.split('/').collect();
if cred_parts.len() != 5 {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed credential"),
);
}
let access_key = cred_parts[0];
let date_stamp = cred_parts[1];
let region = cred_parts[2];
let service = cred_parts[3];
let amz_date = req
.headers()
.get("x-amz-date")
.or_else(|| req.headers().get("date"))
.and_then(|v| v.to_str().ok())
.unwrap_or("");
if amz_date.is_empty() {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::AccessDenied, "Missing Date header"),
);
}
if let Some(err) = check_timestamp_freshness(amz_date, state.config.sigv4_timestamp_tolerance_secs) {
return AuthResult::Denied(err);
}
let secret_key = match state.iam.get_secret_key(access_key) {
Some(sk) => sk,
None => {
return AuthResult::Denied(
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
);
}
};
let method = req.method().as_str();
let canonical_uri = req.uri().path();
let query_params = parse_query_params(req.uri().query().unwrap_or(""));
let payload_hash = req
.headers()
.get("x-amz-content-sha256")
.and_then(|v| v.to_str().ok())
.unwrap_or("UNSIGNED-PAYLOAD");
let signed_headers: Vec<&str> = signed_headers_str.split(';').collect();
let header_values: Vec<(String, String)> = signed_headers
.iter()
.map(|&name| {
let value = req
.headers()
.get(name)
.and_then(|v| v.to_str().ok())
.unwrap_or("");
(name.to_string(), value.to_string())
})
.collect();
let verified = sigv4::verify_sigv4_signature(
method,
canonical_uri,
&query_params,
signed_headers_str,
&header_values,
payload_hash,
amz_date,
date_stamp,
region,
service,
&secret_key,
provided_signature,
);
if !verified {
return AuthResult::Denied(
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
);
}
match state.iam.get_principal(access_key) {
Some(p) => AuthResult::Ok(p),
None => AuthResult::Denied(
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
),
}
}
fn verify_sigv4_query(state: &AppState, req: &Request) -> AuthResult {
let query = req.uri().query().unwrap_or("");
let params = parse_query_params(query);
let param_map: std::collections::HashMap<&str, &str> = params
.iter()
.map(|(k, v)| (k.as_str(), v.as_str()))
.collect();
let credential = match param_map.get("X-Amz-Credential") {
Some(c) => *c,
None => {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Credential"),
);
}
};
let signed_headers_str = param_map
.get("X-Amz-SignedHeaders")
.copied()
.unwrap_or("host");
let provided_signature = match param_map.get("X-Amz-Signature") {
Some(s) => *s,
None => {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Signature"),
);
}
};
let amz_date = match param_map.get("X-Amz-Date") {
Some(d) => *d,
None => {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Date"),
);
}
};
let expires_str = match param_map.get("X-Amz-Expires") {
Some(e) => *e,
None => {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Expires"),
);
}
};
let cred_parts: Vec<&str> = credential.split('/').collect();
if cred_parts.len() != 5 {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed credential"),
);
}
let access_key = cred_parts[0];
let date_stamp = cred_parts[1];
let region = cred_parts[2];
let service = cred_parts[3];
let expires: u64 = match expires_str.parse() {
Ok(e) => e,
Err(_) => {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Invalid X-Amz-Expires"),
);
}
};
if expires < state.config.presigned_url_min_expiry
|| expires > state.config.presigned_url_max_expiry
{
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "X-Amz-Expires out of range"),
);
}
if let Ok(request_time) =
NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ")
{
let request_utc = request_time.and_utc();
let now = Utc::now();
let elapsed = (now - request_utc).num_seconds();
if elapsed > expires as i64 {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::AccessDenied, "Request has expired"),
);
}
if elapsed < -(state.config.sigv4_timestamp_tolerance_secs as i64) {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::AccessDenied, "Request is too far in the future"),
);
}
}
let secret_key = match state.iam.get_secret_key(access_key) {
Some(sk) => sk,
None => {
return AuthResult::Denied(
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
);
}
};
let method = req.method().as_str();
let canonical_uri = req.uri().path();
let query_params_no_sig: Vec<(String, String)> = params
.iter()
.filter(|(k, _)| k != "X-Amz-Signature")
.cloned()
.collect();
let payload_hash = "UNSIGNED-PAYLOAD";
let signed_headers: Vec<&str> = signed_headers_str.split(';').collect();
let header_values: Vec<(String, String)> = signed_headers
.iter()
.map(|&name| {
let value = req
.headers()
.get(name)
.and_then(|v| v.to_str().ok())
.unwrap_or("");
(name.to_string(), value.to_string())
})
.collect();
let verified = sigv4::verify_sigv4_signature(
method,
canonical_uri,
&query_params_no_sig,
signed_headers_str,
&header_values,
payload_hash,
amz_date,
date_stamp,
region,
service,
&secret_key,
provided_signature,
);
if !verified {
return AuthResult::Denied(
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
);
}
match state.iam.get_principal(access_key) {
Some(p) => AuthResult::Ok(p),
None => AuthResult::Denied(
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
),
}
}
fn check_timestamp_freshness(amz_date: &str, tolerance_secs: u64) -> Option<S3Error> {
let request_time = NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ").ok()?;
let request_utc = request_time.and_utc();
let now = Utc::now();
let diff = (now - request_utc).num_seconds().unsigned_abs();
if diff > tolerance_secs {
return Some(S3Error::new(
S3ErrorCode::AccessDenied,
"Request timestamp too old or too far in the future",
));
}
None
}
fn parse_query_params(query: &str) -> Vec<(String, String)> {
if query.is_empty() {
return Vec::new();
}
query
.split('&')
.filter_map(|pair| {
let mut parts = pair.splitn(2, '=');
let key = parts.next()?;
let value = parts.next().unwrap_or("");
Some((
urlencoding_decode(key),
urlencoding_decode(value),
))
})
.collect()
}
fn urlencoding_decode(s: &str) -> String {
percent_encoding::percent_decode_str(s)
.decode_utf8_lossy()
.into_owned()
}
fn error_response(err: S3Error, resource: &str) -> Response {
let status =
StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
let request_id = uuid::Uuid::new_v4().simple().to_string();
let body = err
.with_resource(resource.to_string())
.with_request_id(request_id)
.to_xml();
(status, [("content-type", "application/xml")], body).into_response()
}

View File

@@ -1,16 +0,0 @@
mod auth;
pub use auth::auth_layer;
use axum::extract::Request;
use axum::middleware::Next;
use axum::response::Response;
pub async fn server_header(req: Request, next: Next) -> Response {
let mut resp = next.run(req).await;
resp.headers_mut().insert(
"server",
crate::SERVER_HEADER.parse().unwrap(),
);
resp
}

View File

@@ -1,263 +0,0 @@
use serde_json::{json, Value};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
pub struct GcConfig {
pub interval_hours: f64,
pub temp_file_max_age_hours: f64,
pub multipart_max_age_days: u64,
pub lock_file_max_age_hours: f64,
pub dry_run: bool,
}
impl Default for GcConfig {
fn default() -> Self {
Self {
interval_hours: 6.0,
temp_file_max_age_hours: 24.0,
multipart_max_age_days: 7,
lock_file_max_age_hours: 1.0,
dry_run: false,
}
}
}
pub struct GcService {
storage_root: PathBuf,
config: GcConfig,
running: Arc<RwLock<bool>>,
history: Arc<RwLock<Vec<Value>>>,
history_path: PathBuf,
}
impl GcService {
pub fn new(storage_root: PathBuf, config: GcConfig) -> Self {
let history_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("gc_history.json");
let history = if history_path.exists() {
std::fs::read_to_string(&history_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
.unwrap_or_default()
} else {
Vec::new()
};
Self {
storage_root,
config,
running: Arc::new(RwLock::new(false)),
history: Arc::new(RwLock::new(history)),
history_path,
}
}
pub async fn status(&self) -> Value {
let running = *self.running.read().await;
json!({
"enabled": true,
"running": running,
"interval_hours": self.config.interval_hours,
"temp_file_max_age_hours": self.config.temp_file_max_age_hours,
"multipart_max_age_days": self.config.multipart_max_age_days,
"lock_file_max_age_hours": self.config.lock_file_max_age_hours,
"dry_run": self.config.dry_run,
})
}
pub async fn history(&self) -> Value {
let history = self.history.read().await;
json!({ "executions": *history })
}
pub async fn run_now(&self, dry_run: bool) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("GC already running".to_string());
}
*running = true;
}
let start = Instant::now();
let result = self.execute_gc(dry_run || self.config.dry_run).await;
let elapsed = start.elapsed().as_secs_f64();
*self.running.write().await = false;
let mut result_json = result.clone();
if let Some(obj) = result_json.as_object_mut() {
obj.insert("execution_time_seconds".to_string(), json!(elapsed));
}
let record = json!({
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
"dry_run": dry_run || self.config.dry_run,
"result": result_json,
});
{
let mut history = self.history.write().await;
history.push(record);
if history.len() > 50 {
let excess = history.len() - 50;
history.drain(..excess);
}
}
self.save_history().await;
Ok(result)
}
async fn execute_gc(&self, dry_run: bool) -> Value {
let mut temp_files_deleted = 0u64;
let mut temp_bytes_freed = 0u64;
let mut multipart_uploads_deleted = 0u64;
let mut lock_files_deleted = 0u64;
let mut empty_dirs_removed = 0u64;
let mut errors: Vec<String> = Vec::new();
let now = std::time::SystemTime::now();
let temp_max_age = std::time::Duration::from_secs_f64(self.config.temp_file_max_age_hours * 3600.0);
let multipart_max_age = std::time::Duration::from_secs(self.config.multipart_max_age_days * 86400);
let lock_max_age = std::time::Duration::from_secs_f64(self.config.lock_file_max_age_hours * 3600.0);
let tmp_dir = self.storage_root.join(".myfsio.sys").join("tmp");
if tmp_dir.exists() {
match std::fs::read_dir(&tmp_dir) {
Ok(entries) => {
for entry in entries.flatten() {
if let Ok(metadata) = entry.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > temp_max_age {
let size = metadata.len();
if !dry_run {
if let Err(e) = std::fs::remove_file(entry.path()) {
errors.push(format!("Failed to remove temp file: {}", e));
continue;
}
}
temp_files_deleted += 1;
temp_bytes_freed += size;
}
}
}
}
}
}
Err(e) => errors.push(format!("Failed to read tmp dir: {}", e)),
}
}
let multipart_dir = self.storage_root.join(".myfsio.sys").join("multipart");
if multipart_dir.exists() {
if let Ok(bucket_dirs) = std::fs::read_dir(&multipart_dir) {
for bucket_entry in bucket_dirs.flatten() {
if let Ok(uploads) = std::fs::read_dir(bucket_entry.path()) {
for upload in uploads.flatten() {
if let Ok(metadata) = upload.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > multipart_max_age {
if !dry_run {
let _ = std::fs::remove_dir_all(upload.path());
}
multipart_uploads_deleted += 1;
}
}
}
}
}
}
}
}
}
let buckets_dir = self.storage_root.join(".myfsio.sys").join("buckets");
if buckets_dir.exists() {
if let Ok(bucket_dirs) = std::fs::read_dir(&buckets_dir) {
for bucket_entry in bucket_dirs.flatten() {
let locks_dir = bucket_entry.path().join("locks");
if locks_dir.exists() {
if let Ok(locks) = std::fs::read_dir(&locks_dir) {
for lock in locks.flatten() {
if let Ok(metadata) = lock.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > lock_max_age {
if !dry_run {
let _ = std::fs::remove_file(lock.path());
}
lock_files_deleted += 1;
}
}
}
}
}
}
}
}
}
}
if !dry_run {
for dir in [&tmp_dir, &multipart_dir] {
if dir.exists() {
if let Ok(entries) = std::fs::read_dir(dir) {
for entry in entries.flatten() {
if entry.path().is_dir() {
if let Ok(mut contents) = std::fs::read_dir(entry.path()) {
if contents.next().is_none() {
let _ = std::fs::remove_dir(entry.path());
empty_dirs_removed += 1;
}
}
}
}
}
}
}
}
json!({
"temp_files_deleted": temp_files_deleted,
"temp_bytes_freed": temp_bytes_freed,
"multipart_uploads_deleted": multipart_uploads_deleted,
"lock_files_deleted": lock_files_deleted,
"empty_dirs_removed": empty_dirs_removed,
"errors": errors,
})
}
async fn save_history(&self) {
let history = self.history.read().await;
let data = json!({ "executions": *history });
if let Some(parent) = self.history_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(&self.history_path, serde_json::to_string_pretty(&data).unwrap_or_default());
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("GC cycle starting");
match self.run_now(false).await {
Ok(result) => tracing::info!("GC cycle complete: {:?}", result),
Err(e) => tracing::warn!("GC cycle failed: {}", e),
}
}
})
}
}

View File

@@ -1,204 +0,0 @@
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::traits::StorageEngine;
use serde_json::{json, Value};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
pub struct IntegrityConfig {
pub interval_hours: f64,
pub batch_size: usize,
pub auto_heal: bool,
pub dry_run: bool,
}
impl Default for IntegrityConfig {
fn default() -> Self {
Self {
interval_hours: 24.0,
batch_size: 1000,
auto_heal: false,
dry_run: false,
}
}
}
pub struct IntegrityService {
storage: Arc<FsStorageBackend>,
config: IntegrityConfig,
running: Arc<RwLock<bool>>,
history: Arc<RwLock<Vec<Value>>>,
history_path: PathBuf,
}
impl IntegrityService {
pub fn new(
storage: Arc<FsStorageBackend>,
storage_root: &std::path::Path,
config: IntegrityConfig,
) -> Self {
let history_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("integrity_history.json");
let history = if history_path.exists() {
std::fs::read_to_string(&history_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
.unwrap_or_default()
} else {
Vec::new()
};
Self {
storage,
config,
running: Arc::new(RwLock::new(false)),
history: Arc::new(RwLock::new(history)),
history_path,
}
}
pub async fn status(&self) -> Value {
let running = *self.running.read().await;
json!({
"enabled": true,
"running": running,
"interval_hours": self.config.interval_hours,
"batch_size": self.config.batch_size,
"auto_heal": self.config.auto_heal,
"dry_run": self.config.dry_run,
})
}
pub async fn history(&self) -> Value {
let history = self.history.read().await;
json!({ "executions": *history })
}
pub async fn run_now(&self, dry_run: bool, auto_heal: bool) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("Integrity check already running".to_string());
}
*running = true;
}
let start = Instant::now();
let result = self.check_integrity(dry_run, auto_heal).await;
let elapsed = start.elapsed().as_secs_f64();
*self.running.write().await = false;
let mut result_json = result.clone();
if let Some(obj) = result_json.as_object_mut() {
obj.insert("execution_time_seconds".to_string(), json!(elapsed));
}
let record = json!({
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
"dry_run": dry_run,
"auto_heal": auto_heal,
"result": result_json,
});
{
let mut history = self.history.write().await;
history.push(record);
if history.len() > 50 {
let excess = history.len() - 50;
history.drain(..excess);
}
}
self.save_history().await;
Ok(result)
}
async fn check_integrity(&self, _dry_run: bool, _auto_heal: bool) -> Value {
let buckets = match self.storage.list_buckets().await {
Ok(b) => b,
Err(e) => return json!({"error": e.to_string()}),
};
let mut objects_scanned = 0u64;
let mut corrupted = 0u64;
let mut phantom_metadata = 0u64;
let mut errors: Vec<String> = Vec::new();
for bucket in &buckets {
let params = myfsio_common::types::ListParams {
max_keys: self.config.batch_size,
..Default::default()
};
let objects = match self.storage.list_objects(&bucket.name, &params).await {
Ok(r) => r.objects,
Err(e) => {
errors.push(format!("{}: {}", bucket.name, e));
continue;
}
};
for obj in &objects {
objects_scanned += 1;
match self.storage.get_object_path(&bucket.name, &obj.key).await {
Ok(path) => {
if !path.exists() {
phantom_metadata += 1;
} else if let Some(ref expected_etag) = obj.etag {
match myfsio_crypto::hashing::md5_file(&path) {
Ok(actual_etag) => {
if &actual_etag != expected_etag {
corrupted += 1;
}
}
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
}
}
}
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
}
}
}
json!({
"objects_scanned": objects_scanned,
"buckets_scanned": buckets.len(),
"corrupted_objects": corrupted,
"phantom_metadata": phantom_metadata,
"errors": errors,
})
}
async fn save_history(&self) {
let history = self.history.read().await;
let data = json!({ "executions": *history });
if let Some(parent) = self.history_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&self.history_path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("Integrity check starting");
match self.run_now(false, false).await {
Ok(result) => tracing::info!("Integrity check complete: {:?}", result),
Err(e) => tracing::warn!("Integrity check failed: {}", e),
}
}
})
}
}

View File

@@ -1,153 +0,0 @@
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::traits::StorageEngine;
use serde_json::{json, Value};
use std::sync::Arc;
use tokio::sync::RwLock;
pub struct LifecycleConfig {
pub interval_seconds: u64,
}
impl Default for LifecycleConfig {
fn default() -> Self {
Self {
interval_seconds: 3600,
}
}
}
pub struct LifecycleService {
storage: Arc<FsStorageBackend>,
config: LifecycleConfig,
running: Arc<RwLock<bool>>,
}
impl LifecycleService {
pub fn new(storage: Arc<FsStorageBackend>, config: LifecycleConfig) -> Self {
Self {
storage,
config,
running: Arc::new(RwLock::new(false)),
}
}
pub async fn run_cycle(&self) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("Lifecycle already running".to_string());
}
*running = true;
}
let result = self.evaluate_rules().await;
*self.running.write().await = false;
Ok(result)
}
async fn evaluate_rules(&self) -> Value {
let buckets = match self.storage.list_buckets().await {
Ok(b) => b,
Err(e) => return json!({"error": e.to_string()}),
};
let mut total_expired = 0u64;
let mut total_multipart_aborted = 0u64;
let mut errors: Vec<String> = Vec::new();
for bucket in &buckets {
let config = match self.storage.get_bucket_config(&bucket.name).await {
Ok(c) => c,
Err(_) => continue,
};
let lifecycle = match &config.lifecycle {
Some(lc) => lc,
None => continue,
};
let rules = match lifecycle.as_str().and_then(|s| serde_json::from_str::<Value>(s).ok()) {
Some(v) => v,
None => continue,
};
let rules_arr = match rules.get("Rules").and_then(|r| r.as_array()) {
Some(a) => a.clone(),
None => continue,
};
for rule in &rules_arr {
if rule.get("Status").and_then(|s| s.as_str()) != Some("Enabled") {
continue;
}
let prefix = rule
.get("Filter")
.and_then(|f| f.get("Prefix"))
.and_then(|p| p.as_str())
.or_else(|| rule.get("Prefix").and_then(|p| p.as_str()))
.unwrap_or("");
if let Some(exp) = rule.get("Expiration") {
if let Some(days) = exp.get("Days").and_then(|d| d.as_u64()) {
let cutoff = chrono::Utc::now() - chrono::Duration::days(days as i64);
let params = myfsio_common::types::ListParams {
max_keys: 1000,
prefix: if prefix.is_empty() { None } else { Some(prefix.to_string()) },
..Default::default()
};
if let Ok(result) = self.storage.list_objects(&bucket.name, &params).await {
for obj in &result.objects {
if obj.last_modified < cutoff {
match self.storage.delete_object(&bucket.name, &obj.key).await {
Ok(()) => total_expired += 1,
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
}
}
}
}
}
}
if let Some(abort) = rule.get("AbortIncompleteMultipartUpload") {
if let Some(days) = abort.get("DaysAfterInitiation").and_then(|d| d.as_u64()) {
let cutoff = chrono::Utc::now() - chrono::Duration::days(days as i64);
if let Ok(uploads) = self.storage.list_multipart_uploads(&bucket.name).await {
for upload in &uploads {
if upload.initiated < cutoff {
match self.storage.abort_multipart(&bucket.name, &upload.upload_id).await {
Ok(()) => total_multipart_aborted += 1,
Err(e) => errors.push(format!("abort {}: {}", upload.upload_id, e)),
}
}
}
}
}
}
}
}
json!({
"objects_expired": total_expired,
"multipart_aborted": total_multipart_aborted,
"buckets_evaluated": buckets.len(),
"errors": errors,
})
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs(self.config.interval_seconds);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("Lifecycle evaluation starting");
match self.run_cycle().await {
Ok(result) => tracing::info!("Lifecycle cycle complete: {:?}", result),
Err(e) => tracing::warn!("Lifecycle cycle failed: {}", e),
}
}
})
}
}

View File

@@ -1,219 +0,0 @@
use serde_json::{json, Value};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
pub struct MetricsConfig {
pub interval_minutes: u64,
pub retention_hours: u64,
}
impl Default for MetricsConfig {
fn default() -> Self {
Self {
interval_minutes: 5,
retention_hours: 24,
}
}
}
struct MethodStats {
count: u64,
success_count: u64,
error_count: u64,
bytes_in: u64,
bytes_out: u64,
latencies: Vec<f64>,
}
impl MethodStats {
fn new() -> Self {
Self {
count: 0,
success_count: 0,
error_count: 0,
bytes_in: 0,
bytes_out: 0,
latencies: Vec::new(),
}
}
fn to_json(&self) -> Value {
let (min, max, avg, p50, p95, p99) = if self.latencies.is_empty() {
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
} else {
let mut sorted = self.latencies.clone();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let len = sorted.len();
let sum: f64 = sorted.iter().sum();
(
sorted[0],
sorted[len - 1],
sum / len as f64,
sorted[len / 2],
sorted[((len as f64 * 0.95) as usize).min(len - 1)],
sorted[((len as f64 * 0.99) as usize).min(len - 1)],
)
};
json!({
"count": self.count,
"success_count": self.success_count,
"error_count": self.error_count,
"bytes_in": self.bytes_in,
"bytes_out": self.bytes_out,
"latency_min_ms": min,
"latency_max_ms": max,
"latency_avg_ms": avg,
"latency_p50_ms": p50,
"latency_p95_ms": p95,
"latency_p99_ms": p99,
})
}
}
struct CurrentWindow {
by_method: HashMap<String, MethodStats>,
by_status_class: HashMap<String, u64>,
start_time: Instant,
}
impl CurrentWindow {
fn new() -> Self {
Self {
by_method: HashMap::new(),
by_status_class: HashMap::new(),
start_time: Instant::now(),
}
}
fn reset(&mut self) {
self.by_method.clear();
self.by_status_class.clear();
self.start_time = Instant::now();
}
}
pub struct MetricsService {
config: MetricsConfig,
current: Arc<RwLock<CurrentWindow>>,
snapshots: Arc<RwLock<Vec<Value>>>,
snapshots_path: PathBuf,
}
impl MetricsService {
pub fn new(storage_root: &std::path::Path, config: MetricsConfig) -> Self {
let snapshots_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("operation_metrics.json");
let snapshots = if snapshots_path.exists() {
std::fs::read_to_string(&snapshots_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| v.get("snapshots").and_then(|s| s.as_array().cloned()))
.unwrap_or_default()
} else {
Vec::new()
};
Self {
config,
current: Arc::new(RwLock::new(CurrentWindow::new())),
snapshots: Arc::new(RwLock::new(snapshots)),
snapshots_path,
}
}
pub async fn record(&self, method: &str, status: u16, latency_ms: f64, bytes_in: u64, bytes_out: u64) {
let mut window = self.current.write().await;
let stats = window.by_method.entry(method.to_string()).or_insert_with(MethodStats::new);
stats.count += 1;
if status < 400 {
stats.success_count += 1;
} else {
stats.error_count += 1;
}
stats.bytes_in += bytes_in;
stats.bytes_out += bytes_out;
stats.latencies.push(latency_ms);
let class = format!("{}xx", status / 100);
*window.by_status_class.entry(class).or_insert(0) += 1;
}
pub async fn snapshot(&self) -> Value {
let window = self.current.read().await;
let mut by_method = serde_json::Map::new();
for (method, stats) in &window.by_method {
by_method.insert(method.clone(), stats.to_json());
}
let snapshots = self.snapshots.read().await;
json!({
"enabled": true,
"current_window": {
"by_method": by_method,
"by_status_class": window.by_status_class,
"window_start_elapsed_secs": window.start_time.elapsed().as_secs_f64(),
},
"snapshots": *snapshots,
})
}
async fn flush_window(&self) {
let snap = {
let mut window = self.current.write().await;
let mut by_method = serde_json::Map::new();
for (method, stats) in &window.by_method {
by_method.insert(method.clone(), stats.to_json());
}
let snap = json!({
"timestamp": chrono::Utc::now().to_rfc3339(),
"window_seconds": self.config.interval_minutes * 60,
"by_method": by_method,
"by_status_class": window.by_status_class,
});
window.reset();
snap
};
let max_snapshots = (self.config.retention_hours * 60 / self.config.interval_minutes) as usize;
{
let mut snapshots = self.snapshots.write().await;
snapshots.push(snap);
if snapshots.len() > max_snapshots {
let excess = snapshots.len() - max_snapshots;
snapshots.drain(..excess);
}
}
self.save_snapshots().await;
}
async fn save_snapshots(&self) {
let snapshots = self.snapshots.read().await;
let data = json!({ "snapshots": *snapshots });
if let Some(parent) = self.snapshots_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&self.snapshots_path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs(self.config.interval_minutes * 60);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
self.flush_window().await;
}
})
}
}

View File

@@ -1,6 +0,0 @@
pub mod gc;
pub mod lifecycle;
pub mod integrity;
pub mod metrics;
pub mod site_registry;
pub mod website_domains;

View File

@@ -1,143 +0,0 @@
use chrono::Utc;
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::sync::Arc;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SiteInfo {
pub site_id: String,
pub endpoint: String,
#[serde(default = "default_region")]
pub region: String,
#[serde(default = "default_priority")]
pub priority: i32,
#[serde(default)]
pub display_name: String,
#[serde(default)]
pub created_at: Option<String>,
}
fn default_region() -> String {
"us-east-1".to_string()
}
fn default_priority() -> i32 {
100
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PeerSite {
pub site_id: String,
pub endpoint: String,
#[serde(default = "default_region")]
pub region: String,
#[serde(default = "default_priority")]
pub priority: i32,
#[serde(default)]
pub display_name: String,
#[serde(default)]
pub connection_id: Option<String>,
#[serde(default)]
pub created_at: Option<String>,
#[serde(default)]
pub is_healthy: bool,
#[serde(default)]
pub last_health_check: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct RegistryData {
#[serde(default)]
local: Option<SiteInfo>,
#[serde(default)]
peers: Vec<PeerSite>,
}
pub struct SiteRegistry {
path: PathBuf,
data: Arc<RwLock<RegistryData>>,
}
impl SiteRegistry {
pub fn new(storage_root: &std::path::Path) -> Self {
let path = storage_root
.join(".myfsio.sys")
.join("config")
.join("site_registry.json");
let data = if path.exists() {
std::fs::read_to_string(&path)
.ok()
.and_then(|s| serde_json::from_str(&s).ok())
.unwrap_or_default()
} else {
RegistryData::default()
};
Self {
path,
data: Arc::new(RwLock::new(data)),
}
}
fn save(&self) {
let data = self.data.read();
if let Some(parent) = self.path.parent() {
let _ = std::fs::create_dir_all(parent);
}
if let Ok(json) = serde_json::to_string_pretty(&*data) {
let _ = std::fs::write(&self.path, json);
}
}
pub fn get_local_site(&self) -> Option<SiteInfo> {
self.data.read().local.clone()
}
pub fn set_local_site(&self, site: SiteInfo) {
self.data.write().local = Some(site);
self.save();
}
pub fn list_peers(&self) -> Vec<PeerSite> {
self.data.read().peers.clone()
}
pub fn get_peer(&self, site_id: &str) -> Option<PeerSite> {
self.data.read().peers.iter().find(|p| p.site_id == site_id).cloned()
}
pub fn add_peer(&self, peer: PeerSite) {
self.data.write().peers.push(peer);
self.save();
}
pub fn update_peer(&self, peer: PeerSite) {
let mut data = self.data.write();
if let Some(existing) = data.peers.iter_mut().find(|p| p.site_id == peer.site_id) {
*existing = peer;
}
drop(data);
self.save();
}
pub fn delete_peer(&self, site_id: &str) -> bool {
let mut data = self.data.write();
let len_before = data.peers.len();
data.peers.retain(|p| p.site_id != site_id);
let removed = data.peers.len() < len_before;
drop(data);
if removed {
self.save();
}
removed
}
pub fn update_health(&self, site_id: &str, is_healthy: bool) {
let mut data = self.data.write();
if let Some(peer) = data.peers.iter_mut().find(|p| p.site_id == site_id) {
peer.is_healthy = is_healthy;
peer.last_health_check = Some(Utc::now().to_rfc3339());
}
drop(data);
self.save();
}
}

View File

@@ -1,104 +0,0 @@
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct DomainData {
#[serde(default)]
mappings: HashMap<String, String>,
}
pub struct WebsiteDomainStore {
path: PathBuf,
data: Arc<RwLock<DomainData>>,
}
impl WebsiteDomainStore {
pub fn new(storage_root: &std::path::Path) -> Self {
let path = storage_root
.join(".myfsio.sys")
.join("config")
.join("website_domains.json");
let data = if path.exists() {
std::fs::read_to_string(&path)
.ok()
.and_then(|s| serde_json::from_str(&s).ok())
.unwrap_or_default()
} else {
DomainData::default()
};
Self {
path,
data: Arc::new(RwLock::new(data)),
}
}
fn save(&self) {
let data = self.data.read();
if let Some(parent) = self.path.parent() {
let _ = std::fs::create_dir_all(parent);
}
if let Ok(json) = serde_json::to_string_pretty(&*data) {
let _ = std::fs::write(&self.path, json);
}
}
pub fn list_all(&self) -> Vec<serde_json::Value> {
self.data
.read()
.mappings
.iter()
.map(|(domain, bucket)| {
serde_json::json!({
"domain": domain,
"bucket": bucket,
})
})
.collect()
}
pub fn get_bucket(&self, domain: &str) -> Option<String> {
self.data.read().mappings.get(domain).cloned()
}
pub fn set_mapping(&self, domain: &str, bucket: &str) {
self.data.write().mappings.insert(domain.to_string(), bucket.to_string());
self.save();
}
pub fn delete_mapping(&self, domain: &str) -> bool {
let removed = self.data.write().mappings.remove(domain).is_some();
if removed {
self.save();
}
removed
}
}
pub fn normalize_domain(domain: &str) -> String {
domain.trim().to_ascii_lowercase()
}
pub fn is_valid_domain(domain: &str) -> bool {
if domain.is_empty() || domain.len() > 253 {
return false;
}
let labels: Vec<&str> = domain.split('.').collect();
if labels.len() < 2 {
return false;
}
for label in &labels {
if label.is_empty() || label.len() > 63 {
return false;
}
if !label.chars().all(|c| c.is_ascii_alphanumeric() || c == '-') {
return false;
}
if label.starts_with('-') || label.ends_with('-') {
return false;
}
}
true
}

View File

@@ -1,121 +0,0 @@
use std::sync::Arc;
use crate::config::ServerConfig;
use crate::services::gc::GcService;
use crate::services::integrity::IntegrityService;
use crate::services::metrics::MetricsService;
use crate::services::site_registry::SiteRegistry;
use crate::services::website_domains::WebsiteDomainStore;
use myfsio_auth::iam::IamService;
use myfsio_crypto::encryption::EncryptionService;
use myfsio_crypto::kms::KmsService;
use myfsio_storage::fs_backend::FsStorageBackend;
#[derive(Clone)]
pub struct AppState {
pub config: ServerConfig,
pub storage: Arc<FsStorageBackend>,
pub iam: Arc<IamService>,
pub encryption: Option<Arc<EncryptionService>>,
pub kms: Option<Arc<KmsService>>,
pub gc: Option<Arc<GcService>>,
pub integrity: Option<Arc<IntegrityService>>,
pub metrics: Option<Arc<MetricsService>>,
pub site_registry: Option<Arc<SiteRegistry>>,
pub website_domains: Option<Arc<WebsiteDomainStore>>,
}
impl AppState {
pub fn new(config: ServerConfig) -> Self {
let storage = Arc::new(FsStorageBackend::new(config.storage_root.clone()));
let iam = Arc::new(IamService::new_with_secret(
config.iam_config_path.clone(),
config.secret_key.clone(),
));
let gc = if config.gc_enabled {
Some(Arc::new(GcService::new(
config.storage_root.clone(),
crate::services::gc::GcConfig::default(),
)))
} else {
None
};
let integrity = if config.integrity_enabled {
Some(Arc::new(IntegrityService::new(
storage.clone(),
&config.storage_root,
crate::services::integrity::IntegrityConfig::default(),
)))
} else {
None
};
let metrics = if config.metrics_enabled {
Some(Arc::new(MetricsService::new(
&config.storage_root,
crate::services::metrics::MetricsConfig::default(),
)))
} else {
None
};
let site_registry = Some(Arc::new(SiteRegistry::new(&config.storage_root)));
let website_domains = if config.website_hosting_enabled {
Some(Arc::new(WebsiteDomainStore::new(&config.storage_root)))
} else {
None
};
Self {
config,
storage,
iam,
encryption: None,
kms: None,
gc,
integrity,
metrics,
site_registry,
website_domains,
}
}
pub async fn new_with_encryption(config: ServerConfig) -> Self {
let mut state = Self::new(config.clone());
let keys_dir = config.storage_root.join(".myfsio.sys").join("keys");
let kms = if config.kms_enabled {
match KmsService::new(&keys_dir).await {
Ok(k) => Some(Arc::new(k)),
Err(e) => {
tracing::error!("Failed to initialize KMS: {}", e);
None
}
}
} else {
None
};
let encryption = if config.encryption_enabled {
match myfsio_crypto::kms::load_or_create_master_key(&keys_dir).await {
Ok(master_key) => {
Some(Arc::new(EncryptionService::new(master_key, kms.clone())))
}
Err(e) => {
tracing::error!("Failed to initialize encryption: {}", e);
None
}
}
} else {
None
};
state.encryption = encryption;
state.kms = kms;
state
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,26 +0,0 @@
[package]
name = "myfsio-storage"
version = "0.1.0"
edition = "2021"
[dependencies]
myfsio-common = { path = "../myfsio-common" }
myfsio-crypto = { path = "../myfsio-crypto" }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
dashmap = { workspace = true }
parking_lot = { workspace = true }
uuid = { workspace = true }
chrono = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
regex = { workspace = true }
unicode-normalization = { workspace = true }
md-5 = { workspace = true }
sha2 = { workspace = true }
hex = { workspace = true }
[dev-dependencies]
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
tempfile = "3"

View File

@@ -1,59 +0,0 @@
use myfsio_common::error::{S3Error, S3ErrorCode};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum StorageError {
#[error("Bucket not found: {0}")]
BucketNotFound(String),
#[error("Bucket already exists: {0}")]
BucketAlreadyExists(String),
#[error("Bucket not empty: {0}")]
BucketNotEmpty(String),
#[error("Object not found: {bucket}/{key}")]
ObjectNotFound { bucket: String, key: String },
#[error("Invalid bucket name: {0}")]
InvalidBucketName(String),
#[error("Invalid object key: {0}")]
InvalidObjectKey(String),
#[error("Upload not found: {0}")]
UploadNotFound(String),
#[error("Quota exceeded: {0}")]
QuotaExceeded(String),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("JSON error: {0}")]
Json(#[from] serde_json::Error),
#[error("Internal error: {0}")]
Internal(String),
}
impl From<StorageError> for S3Error {
fn from(err: StorageError) -> Self {
match err {
StorageError::BucketNotFound(name) => {
S3Error::from_code(S3ErrorCode::NoSuchBucket).with_resource(format!("/{}", name))
}
StorageError::BucketAlreadyExists(name) => {
S3Error::from_code(S3ErrorCode::BucketAlreadyExists)
.with_resource(format!("/{}", name))
}
StorageError::BucketNotEmpty(name) => {
S3Error::from_code(S3ErrorCode::BucketNotEmpty)
.with_resource(format!("/{}", name))
}
StorageError::ObjectNotFound { bucket, key } => {
S3Error::from_code(S3ErrorCode::NoSuchKey)
.with_resource(format!("/{}/{}", bucket, key))
}
StorageError::InvalidBucketName(msg) => S3Error::new(S3ErrorCode::InvalidBucketName, msg),
StorageError::InvalidObjectKey(msg) => S3Error::new(S3ErrorCode::InvalidKey, msg),
StorageError::UploadNotFound(id) => {
S3Error::new(S3ErrorCode::NoSuchUpload, format!("Upload {} not found", id))
}
StorageError::QuotaExceeded(msg) => S3Error::new(S3ErrorCode::QuotaExceeded, msg),
StorageError::Io(e) => S3Error::new(S3ErrorCode::InternalError, e.to_string()),
StorageError::Json(e) => S3Error::new(S3ErrorCode::InternalError, e.to_string()),
StorageError::Internal(msg) => S3Error::new(S3ErrorCode::InternalError, msg),
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +0,0 @@
pub mod validation;
pub mod traits;
pub mod error;
pub mod fs_backend;

View File

@@ -1,125 +0,0 @@
use crate::error::StorageError;
use myfsio_common::types::*;
use std::collections::HashMap;
use std::path::PathBuf;
use std::pin::Pin;
use tokio::io::AsyncRead;
pub type StorageResult<T> = Result<T, StorageError>;
pub type AsyncReadStream = Pin<Box<dyn AsyncRead + Send>>;
#[allow(async_fn_in_trait)]
pub trait StorageEngine: Send + Sync {
async fn list_buckets(&self) -> StorageResult<Vec<BucketMeta>>;
async fn create_bucket(&self, name: &str) -> StorageResult<()>;
async fn delete_bucket(&self, name: &str) -> StorageResult<()>;
async fn bucket_exists(&self, name: &str) -> StorageResult<bool>;
async fn bucket_stats(&self, name: &str) -> StorageResult<BucketStats>;
async fn put_object(
&self,
bucket: &str,
key: &str,
stream: AsyncReadStream,
metadata: Option<HashMap<String, String>>,
) -> StorageResult<ObjectMeta>;
async fn get_object(&self, bucket: &str, key: &str) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
async fn get_object_path(&self, bucket: &str, key: &str) -> StorageResult<PathBuf>;
async fn head_object(&self, bucket: &str, key: &str) -> StorageResult<ObjectMeta>;
async fn delete_object(&self, bucket: &str, key: &str) -> StorageResult<()>;
async fn copy_object(
&self,
src_bucket: &str,
src_key: &str,
dst_bucket: &str,
dst_key: &str,
) -> StorageResult<ObjectMeta>;
async fn get_object_metadata(
&self,
bucket: &str,
key: &str,
) -> StorageResult<HashMap<String, String>>;
async fn put_object_metadata(
&self,
bucket: &str,
key: &str,
metadata: &HashMap<String, String>,
) -> StorageResult<()>;
async fn list_objects(&self, bucket: &str, params: &ListParams) -> StorageResult<ListObjectsResult>;
async fn list_objects_shallow(
&self,
bucket: &str,
params: &ShallowListParams,
) -> StorageResult<ShallowListResult>;
async fn initiate_multipart(
&self,
bucket: &str,
key: &str,
metadata: Option<HashMap<String, String>>,
) -> StorageResult<String>;
async fn upload_part(
&self,
bucket: &str,
upload_id: &str,
part_number: u32,
stream: AsyncReadStream,
) -> StorageResult<String>;
async fn complete_multipart(
&self,
bucket: &str,
upload_id: &str,
parts: &[PartInfo],
) -> StorageResult<ObjectMeta>;
async fn abort_multipart(&self, bucket: &str, upload_id: &str) -> StorageResult<()>;
async fn list_parts(&self, bucket: &str, upload_id: &str) -> StorageResult<Vec<PartMeta>>;
async fn list_multipart_uploads(
&self,
bucket: &str,
) -> StorageResult<Vec<MultipartUploadInfo>>;
async fn get_bucket_config(&self, bucket: &str) -> StorageResult<BucketConfig>;
async fn set_bucket_config(&self, bucket: &str, config: &BucketConfig) -> StorageResult<()>;
async fn is_versioning_enabled(&self, bucket: &str) -> StorageResult<bool>;
async fn set_versioning(&self, bucket: &str, enabled: bool) -> StorageResult<()>;
async fn list_object_versions(
&self,
bucket: &str,
key: &str,
) -> StorageResult<Vec<VersionInfo>>;
async fn get_object_tags(
&self,
bucket: &str,
key: &str,
) -> StorageResult<Vec<Tag>>;
async fn set_object_tags(
&self,
bucket: &str,
key: &str,
tags: &[Tag],
) -> StorageResult<()>;
async fn delete_object_tags(
&self,
bucket: &str,
key: &str,
) -> StorageResult<()>;
}

View File

@@ -1,194 +0,0 @@
use std::sync::LazyLock;
use unicode_normalization::UnicodeNormalization;
const WINDOWS_RESERVED: &[&str] = &[
"CON", "PRN", "AUX", "NUL", "COM0", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7",
"COM8", "COM9", "LPT0", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8",
"LPT9",
];
const WINDOWS_ILLEGAL_CHARS: &[char] = &['<', '>', ':', '"', '/', '\\', '|', '?', '*'];
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
const SYSTEM_ROOT: &str = ".myfsio.sys";
static IP_REGEX: LazyLock<regex::Regex> =
LazyLock::new(|| regex::Regex::new(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$").unwrap());
pub fn validate_object_key(
object_key: &str,
max_length_bytes: usize,
is_windows: bool,
reserved_prefixes: Option<&[&str]>,
) -> Option<String> {
if object_key.is_empty() {
return Some("Object key required".to_string());
}
if object_key.contains('\0') {
return Some("Object key contains null bytes".to_string());
}
let normalized: String = object_key.nfc().collect();
if normalized.len() > max_length_bytes {
return Some(format!(
"Object key exceeds maximum length of {} bytes",
max_length_bytes
));
}
if normalized.starts_with('/') || normalized.starts_with('\\') {
return Some("Object key cannot start with a slash".to_string());
}
let parts: Vec<&str> = if cfg!(windows) || is_windows {
normalized.split(['/', '\\']).collect()
} else {
normalized.split('/').collect()
};
for part in &parts {
if part.is_empty() {
continue;
}
if *part == ".." {
return Some("Object key contains parent directory references".to_string());
}
if *part == "." {
return Some("Object key contains invalid segments".to_string());
}
if part.chars().any(|c| (c as u32) < 32) {
return Some("Object key contains control characters".to_string());
}
if is_windows {
if part.chars().any(|c| WINDOWS_ILLEGAL_CHARS.contains(&c)) {
return Some(
"Object key contains characters not supported on Windows filesystems"
.to_string(),
);
}
if part.ends_with(' ') || part.ends_with('.') {
return Some(
"Object key segments cannot end with spaces or periods on Windows".to_string(),
);
}
let trimmed = part.trim_end_matches(['.', ' ']).to_uppercase();
if WINDOWS_RESERVED.contains(&trimmed.as_str()) {
return Some(format!("Invalid filename segment: {}", part));
}
}
}
let non_empty_parts: Vec<&str> = parts.iter().filter(|p| !p.is_empty()).copied().collect();
if let Some(top) = non_empty_parts.first() {
if INTERNAL_FOLDERS.contains(top) || *top == SYSTEM_ROOT {
return Some("Object key uses a reserved prefix".to_string());
}
if let Some(prefixes) = reserved_prefixes {
for prefix in prefixes {
if *top == *prefix {
return Some("Object key uses a reserved prefix".to_string());
}
}
}
}
None
}
pub fn validate_bucket_name(bucket_name: &str) -> Option<String> {
let len = bucket_name.len();
if len < 3 || len > 63 {
return Some("Bucket name must be between 3 and 63 characters".to_string());
}
let bytes = bucket_name.as_bytes();
if !bytes[0].is_ascii_lowercase() && !bytes[0].is_ascii_digit() {
return Some(
"Bucket name must start and end with a lowercase letter or digit".to_string(),
);
}
if !bytes[len - 1].is_ascii_lowercase() && !bytes[len - 1].is_ascii_digit() {
return Some(
"Bucket name must start and end with a lowercase letter or digit".to_string(),
);
}
for &b in bytes {
if !b.is_ascii_lowercase() && !b.is_ascii_digit() && b != b'.' && b != b'-' {
return Some(
"Bucket name can only contain lowercase letters, digits, dots, and hyphens"
.to_string(),
);
}
}
if bucket_name.contains("..") {
return Some("Bucket name must not contain consecutive periods".to_string());
}
if IP_REGEX.is_match(bucket_name) {
return Some("Bucket name must not be formatted as an IP address".to_string());
}
None
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_valid_bucket_names() {
assert!(validate_bucket_name("my-bucket").is_none());
assert!(validate_bucket_name("test123").is_none());
assert!(validate_bucket_name("my.bucket.name").is_none());
}
#[test]
fn test_invalid_bucket_names() {
assert!(validate_bucket_name("ab").is_some());
assert!(validate_bucket_name("My-Bucket").is_some());
assert!(validate_bucket_name("-bucket").is_some());
assert!(validate_bucket_name("bucket-").is_some());
assert!(validate_bucket_name("my..bucket").is_some());
assert!(validate_bucket_name("192.168.1.1").is_some());
}
#[test]
fn test_valid_object_keys() {
assert!(validate_object_key("file.txt", 1024, false, None).is_none());
assert!(validate_object_key("path/to/file.txt", 1024, false, None).is_none());
assert!(validate_object_key("a", 1024, false, None).is_none());
}
#[test]
fn test_invalid_object_keys() {
assert!(validate_object_key("", 1024, false, None).is_some());
assert!(validate_object_key("/leading-slash", 1024, false, None).is_some());
assert!(validate_object_key("path/../escape", 1024, false, None).is_some());
assert!(validate_object_key(".myfsio.sys/secret", 1024, false, None).is_some());
assert!(validate_object_key(".meta/data", 1024, false, None).is_some());
}
#[test]
fn test_object_key_max_length() {
let long_key = "a".repeat(1025);
assert!(validate_object_key(&long_key, 1024, false, None).is_some());
let ok_key = "a".repeat(1024);
assert!(validate_object_key(&ok_key, 1024, false, None).is_none());
}
#[test]
fn test_windows_validation() {
assert!(validate_object_key("CON", 1024, true, None).is_some());
assert!(validate_object_key("file<name", 1024, true, None).is_some());
assert!(validate_object_key("file.txt ", 1024, true, None).is_some());
}
}

View File

@@ -1,10 +0,0 @@
[package]
name = "myfsio-xml"
version = "0.1.0"
edition = "2021"
[dependencies]
myfsio-common = { path = "../myfsio-common" }
quick-xml = { workspace = true }
serde = { workspace = true }
chrono = { workspace = true }

View File

@@ -1,14 +0,0 @@
pub mod response;
pub mod request;
use quick_xml::Writer;
use std::io::Cursor;
pub fn write_xml_element(tag: &str, text: &str) -> String {
let mut writer = Writer::new(Cursor::new(Vec::new()));
writer
.create_element(tag)
.write_text_content(quick_xml::events::BytesText::new(text))
.unwrap();
String::from_utf8(writer.into_inner().into_inner()).unwrap()
}

View File

@@ -1,159 +0,0 @@
use quick_xml::events::Event;
use quick_xml::Reader;
#[derive(Debug, Default)]
pub struct DeleteObjectsRequest {
pub objects: Vec<ObjectIdentifier>,
pub quiet: bool,
}
#[derive(Debug)]
pub struct ObjectIdentifier {
pub key: String,
pub version_id: Option<String>,
}
#[derive(Debug, Default)]
pub struct CompleteMultipartUpload {
pub parts: Vec<CompletedPart>,
}
#[derive(Debug)]
pub struct CompletedPart {
pub part_number: u32,
pub etag: String,
}
pub fn parse_complete_multipart_upload(xml: &str) -> Result<CompleteMultipartUpload, String> {
let mut reader = Reader::from_str(xml);
let mut result = CompleteMultipartUpload::default();
let mut buf = Vec::new();
let mut current_tag = String::new();
let mut part_number: Option<u32> = None;
let mut etag: Option<String> = None;
let mut in_part = false;
loop {
match reader.read_event_into(&mut buf) {
Ok(Event::Start(ref e)) => {
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
current_tag = name.clone();
if name == "Part" {
in_part = true;
part_number = None;
etag = None;
}
}
Ok(Event::Text(ref e)) => {
if in_part {
let text = e.unescape().map_err(|e| e.to_string())?.to_string();
match current_tag.as_str() {
"PartNumber" => {
part_number = Some(text.trim().parse().map_err(|e: std::num::ParseIntError| e.to_string())?);
}
"ETag" => {
etag = Some(text.trim().trim_matches('"').to_string());
}
_ => {}
}
}
}
Ok(Event::End(ref e)) => {
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
if name == "Part" && in_part {
if let (Some(pn), Some(et)) = (part_number.take(), etag.take()) {
result.parts.push(CompletedPart {
part_number: pn,
etag: et,
});
}
in_part = false;
}
}
Ok(Event::Eof) => break,
Err(e) => return Err(format!("XML parse error: {}", e)),
_ => {}
}
buf.clear();
}
result.parts.sort_by_key(|p| p.part_number);
Ok(result)
}
pub fn parse_delete_objects(xml: &str) -> Result<DeleteObjectsRequest, String> {
let mut reader = Reader::from_str(xml);
let mut result = DeleteObjectsRequest::default();
let mut buf = Vec::new();
let mut current_tag = String::new();
let mut current_key: Option<String> = None;
let mut current_version_id: Option<String> = None;
let mut in_object = false;
loop {
match reader.read_event_into(&mut buf) {
Ok(Event::Start(ref e)) => {
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
current_tag = name.clone();
if name == "Object" {
in_object = true;
current_key = None;
current_version_id = None;
}
}
Ok(Event::Text(ref e)) => {
let text = e.unescape().map_err(|e| e.to_string())?.to_string();
match current_tag.as_str() {
"Key" if in_object => {
current_key = Some(text.trim().to_string());
}
"VersionId" if in_object => {
current_version_id = Some(text.trim().to_string());
}
"Quiet" => {
result.quiet = text.trim() == "true";
}
_ => {}
}
}
Ok(Event::End(ref e)) => {
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
if name == "Object" && in_object {
if let Some(key) = current_key.take() {
result.objects.push(ObjectIdentifier {
key,
version_id: current_version_id.take(),
});
}
in_object = false;
}
}
Ok(Event::Eof) => break,
Err(e) => return Err(format!("XML parse error: {}", e)),
_ => {}
}
buf.clear();
}
Ok(result)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_complete_multipart() {
let xml = r#"<CompleteMultipartUpload>
<Part><PartNumber>2</PartNumber><ETag>"etag2"</ETag></Part>
<Part><PartNumber>1</PartNumber><ETag>"etag1"</ETag></Part>
</CompleteMultipartUpload>"#;
let result = parse_complete_multipart_upload(xml).unwrap();
assert_eq!(result.parts.len(), 2);
assert_eq!(result.parts[0].part_number, 1);
assert_eq!(result.parts[0].etag, "etag1");
assert_eq!(result.parts[1].part_number, 2);
assert_eq!(result.parts[1].etag, "etag2");
}
}

View File

@@ -1,363 +0,0 @@
use chrono::{DateTime, Utc};
use myfsio_common::types::{BucketMeta, ObjectMeta};
use quick_xml::events::{BytesDecl, BytesEnd, BytesStart, BytesText, Event};
use quick_xml::Writer;
use std::io::Cursor;
pub fn format_s3_datetime(dt: &DateTime<Utc>) -> String {
dt.format("%Y-%m-%dT%H:%M:%S%.3fZ").to_string()
}
pub fn list_buckets_xml(owner_id: &str, owner_name: &str, buckets: &[BucketMeta]) -> String {
let mut writer = Writer::new(Cursor::new(Vec::new()));
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
let start = BytesStart::new("ListAllMyBucketsResult")
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
writer.write_event(Event::Start(start)).unwrap();
writer.write_event(Event::Start(BytesStart::new("Owner"))).unwrap();
write_text_element(&mut writer, "ID", owner_id);
write_text_element(&mut writer, "DisplayName", owner_name);
writer.write_event(Event::End(BytesEnd::new("Owner"))).unwrap();
writer.write_event(Event::Start(BytesStart::new("Buckets"))).unwrap();
for bucket in buckets {
writer.write_event(Event::Start(BytesStart::new("Bucket"))).unwrap();
write_text_element(&mut writer, "Name", &bucket.name);
write_text_element(&mut writer, "CreationDate", &format_s3_datetime(&bucket.creation_date));
writer.write_event(Event::End(BytesEnd::new("Bucket"))).unwrap();
}
writer.write_event(Event::End(BytesEnd::new("Buckets"))).unwrap();
writer.write_event(Event::End(BytesEnd::new("ListAllMyBucketsResult"))).unwrap();
String::from_utf8(writer.into_inner().into_inner()).unwrap()
}
pub fn list_objects_v2_xml(
bucket_name: &str,
prefix: &str,
delimiter: &str,
max_keys: usize,
objects: &[ObjectMeta],
common_prefixes: &[String],
is_truncated: bool,
continuation_token: Option<&str>,
next_continuation_token: Option<&str>,
key_count: usize,
) -> String {
let mut writer = Writer::new(Cursor::new(Vec::new()));
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
let start = BytesStart::new("ListBucketResult")
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
writer.write_event(Event::Start(start)).unwrap();
write_text_element(&mut writer, "Name", bucket_name);
write_text_element(&mut writer, "Prefix", prefix);
if !delimiter.is_empty() {
write_text_element(&mut writer, "Delimiter", delimiter);
}
write_text_element(&mut writer, "MaxKeys", &max_keys.to_string());
write_text_element(&mut writer, "KeyCount", &key_count.to_string());
write_text_element(&mut writer, "IsTruncated", &is_truncated.to_string());
if let Some(token) = continuation_token {
write_text_element(&mut writer, "ContinuationToken", token);
}
if let Some(token) = next_continuation_token {
write_text_element(&mut writer, "NextContinuationToken", token);
}
for obj in objects {
writer.write_event(Event::Start(BytesStart::new("Contents"))).unwrap();
write_text_element(&mut writer, "Key", &obj.key);
write_text_element(&mut writer, "LastModified", &format_s3_datetime(&obj.last_modified));
if let Some(ref etag) = obj.etag {
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
}
write_text_element(&mut writer, "Size", &obj.size.to_string());
write_text_element(&mut writer, "StorageClass", obj.storage_class.as_deref().unwrap_or("STANDARD"));
writer.write_event(Event::End(BytesEnd::new("Contents"))).unwrap();
}
for prefix in common_prefixes {
writer.write_event(Event::Start(BytesStart::new("CommonPrefixes"))).unwrap();
write_text_element(&mut writer, "Prefix", prefix);
writer.write_event(Event::End(BytesEnd::new("CommonPrefixes"))).unwrap();
}
writer.write_event(Event::End(BytesEnd::new("ListBucketResult"))).unwrap();
String::from_utf8(writer.into_inner().into_inner()).unwrap()
}
pub fn list_objects_v1_xml(
bucket_name: &str,
prefix: &str,
marker: &str,
delimiter: &str,
max_keys: usize,
objects: &[ObjectMeta],
common_prefixes: &[String],
is_truncated: bool,
next_marker: Option<&str>,
) -> String {
let mut writer = Writer::new(Cursor::new(Vec::new()));
writer
.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None)))
.unwrap();
let start = BytesStart::new("ListBucketResult")
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
writer.write_event(Event::Start(start)).unwrap();
write_text_element(&mut writer, "Name", bucket_name);
write_text_element(&mut writer, "Prefix", prefix);
write_text_element(&mut writer, "Marker", marker);
write_text_element(&mut writer, "MaxKeys", &max_keys.to_string());
write_text_element(&mut writer, "IsTruncated", &is_truncated.to_string());
if !delimiter.is_empty() {
write_text_element(&mut writer, "Delimiter", delimiter);
}
if !delimiter.is_empty() && is_truncated {
if let Some(nm) = next_marker {
if !nm.is_empty() {
write_text_element(&mut writer, "NextMarker", nm);
}
}
}
for obj in objects {
writer
.write_event(Event::Start(BytesStart::new("Contents")))
.unwrap();
write_text_element(&mut writer, "Key", &obj.key);
write_text_element(&mut writer, "LastModified", &format_s3_datetime(&obj.last_modified));
if let Some(ref etag) = obj.etag {
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
}
write_text_element(&mut writer, "Size", &obj.size.to_string());
writer
.write_event(Event::End(BytesEnd::new("Contents")))
.unwrap();
}
for cp in common_prefixes {
writer
.write_event(Event::Start(BytesStart::new("CommonPrefixes")))
.unwrap();
write_text_element(&mut writer, "Prefix", cp);
writer
.write_event(Event::End(BytesEnd::new("CommonPrefixes")))
.unwrap();
}
writer
.write_event(Event::End(BytesEnd::new("ListBucketResult")))
.unwrap();
String::from_utf8(writer.into_inner().into_inner()).unwrap()
}
fn write_text_element(writer: &mut Writer<Cursor<Vec<u8>>>, tag: &str, text: &str) {
writer.write_event(Event::Start(BytesStart::new(tag))).unwrap();
writer.write_event(Event::Text(BytesText::new(text))).unwrap();
writer.write_event(Event::End(BytesEnd::new(tag))).unwrap();
}
pub fn initiate_multipart_upload_xml(bucket: &str, key: &str, upload_id: &str) -> String {
let mut writer = Writer::new(Cursor::new(Vec::new()));
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
let start = BytesStart::new("InitiateMultipartUploadResult")
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
writer.write_event(Event::Start(start)).unwrap();
write_text_element(&mut writer, "Bucket", bucket);
write_text_element(&mut writer, "Key", key);
write_text_element(&mut writer, "UploadId", upload_id);
writer.write_event(Event::End(BytesEnd::new("InitiateMultipartUploadResult"))).unwrap();
String::from_utf8(writer.into_inner().into_inner()).unwrap()
}
pub fn complete_multipart_upload_xml(
bucket: &str,
key: &str,
etag: &str,
location: &str,
) -> String {
let mut writer = Writer::new(Cursor::new(Vec::new()));
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
let start = BytesStart::new("CompleteMultipartUploadResult")
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
writer.write_event(Event::Start(start)).unwrap();
write_text_element(&mut writer, "Location", location);
write_text_element(&mut writer, "Bucket", bucket);
write_text_element(&mut writer, "Key", key);
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
writer.write_event(Event::End(BytesEnd::new("CompleteMultipartUploadResult"))).unwrap();
String::from_utf8(writer.into_inner().into_inner()).unwrap()
}
pub fn copy_object_result_xml(etag: &str, last_modified: &str) -> String {
let mut writer = Writer::new(Cursor::new(Vec::new()));
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
let start = BytesStart::new("CopyObjectResult")
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
writer.write_event(Event::Start(start)).unwrap();
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
write_text_element(&mut writer, "LastModified", last_modified);
writer.write_event(Event::End(BytesEnd::new("CopyObjectResult"))).unwrap();
String::from_utf8(writer.into_inner().into_inner()).unwrap()
}
pub fn delete_result_xml(
deleted: &[(String, Option<String>)],
errors: &[(String, String, String)],
quiet: bool,
) -> String {
let mut writer = Writer::new(Cursor::new(Vec::new()));
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
let start = BytesStart::new("DeleteResult")
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
writer.write_event(Event::Start(start)).unwrap();
if !quiet {
for (key, version_id) in deleted {
writer.write_event(Event::Start(BytesStart::new("Deleted"))).unwrap();
write_text_element(&mut writer, "Key", key);
if let Some(vid) = version_id {
write_text_element(&mut writer, "VersionId", vid);
}
writer.write_event(Event::End(BytesEnd::new("Deleted"))).unwrap();
}
}
for (key, code, message) in errors {
writer.write_event(Event::Start(BytesStart::new("Error"))).unwrap();
write_text_element(&mut writer, "Key", key);
write_text_element(&mut writer, "Code", code);
write_text_element(&mut writer, "Message", message);
writer.write_event(Event::End(BytesEnd::new("Error"))).unwrap();
}
writer.write_event(Event::End(BytesEnd::new("DeleteResult"))).unwrap();
String::from_utf8(writer.into_inner().into_inner()).unwrap()
}
pub fn list_multipart_uploads_xml(
bucket: &str,
uploads: &[myfsio_common::types::MultipartUploadInfo],
) -> String {
let mut writer = Writer::new(Cursor::new(Vec::new()));
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
let start = BytesStart::new("ListMultipartUploadsResult")
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
writer.write_event(Event::Start(start)).unwrap();
write_text_element(&mut writer, "Bucket", bucket);
for upload in uploads {
writer.write_event(Event::Start(BytesStart::new("Upload"))).unwrap();
write_text_element(&mut writer, "Key", &upload.key);
write_text_element(&mut writer, "UploadId", &upload.upload_id);
write_text_element(&mut writer, "Initiated", &format_s3_datetime(&upload.initiated));
writer.write_event(Event::End(BytesEnd::new("Upload"))).unwrap();
}
writer.write_event(Event::End(BytesEnd::new("ListMultipartUploadsResult"))).unwrap();
String::from_utf8(writer.into_inner().into_inner()).unwrap()
}
pub fn list_parts_xml(
bucket: &str,
key: &str,
upload_id: &str,
parts: &[myfsio_common::types::PartMeta],
) -> String {
let mut writer = Writer::new(Cursor::new(Vec::new()));
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
let start = BytesStart::new("ListPartsResult")
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
writer.write_event(Event::Start(start)).unwrap();
write_text_element(&mut writer, "Bucket", bucket);
write_text_element(&mut writer, "Key", key);
write_text_element(&mut writer, "UploadId", upload_id);
for part in parts {
writer.write_event(Event::Start(BytesStart::new("Part"))).unwrap();
write_text_element(&mut writer, "PartNumber", &part.part_number.to_string());
write_text_element(&mut writer, "ETag", &format!("\"{}\"", part.etag));
write_text_element(&mut writer, "Size", &part.size.to_string());
if let Some(ref lm) = part.last_modified {
write_text_element(&mut writer, "LastModified", &format_s3_datetime(lm));
}
writer.write_event(Event::End(BytesEnd::new("Part"))).unwrap();
}
writer.write_event(Event::End(BytesEnd::new("ListPartsResult"))).unwrap();
String::from_utf8(writer.into_inner().into_inner()).unwrap()
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Utc;
#[test]
fn test_list_buckets_xml() {
let buckets = vec![BucketMeta {
name: "test-bucket".to_string(),
creation_date: Utc::now(),
}];
let xml = list_buckets_xml("owner-id", "owner-name", &buckets);
assert!(xml.contains("<Name>test-bucket</Name>"));
assert!(xml.contains("<ID>owner-id</ID>"));
assert!(xml.contains("ListAllMyBucketsResult"));
}
#[test]
fn test_list_objects_v2_xml() {
let objects = vec![ObjectMeta::new("file.txt".to_string(), 1024, Utc::now())];
let xml = list_objects_v2_xml(
"my-bucket", "", "/", 1000, &objects, &[], false, None, None, 1,
);
assert!(xml.contains("<Key>file.txt</Key>"));
assert!(xml.contains("<Size>1024</Size>"));
assert!(xml.contains("<IsTruncated>false</IsTruncated>"));
}
#[test]
fn test_list_objects_v1_xml() {
let objects = vec![ObjectMeta::new("file.txt".to_string(), 1024, Utc::now())];
let xml = list_objects_v1_xml(
"my-bucket",
"",
"",
"/",
1000,
&objects,
&[],
false,
None,
);
assert!(xml.contains("<Key>file.txt</Key>"));
assert!(xml.contains("<Size>1024</Size>"));
assert!(xml.contains("<Marker></Marker>"));
}
}

View File

@@ -125,7 +125,7 @@ pub fn delete_index_entry(py: Python<'_>, path: &str, entry_name: &str) -> PyRes
fs::write(&path_owned, serialized)
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
Ok(true)
Ok(false)
})
}

View File

@@ -1,4 +1,4 @@
Flask>=3.1.3
Flask>=3.1.2
Flask-Limiter>=4.1.1
Flask-Cors>=6.0.2
Flask-WTF>=1.2.2
@@ -6,8 +6,8 @@ python-dotenv>=1.2.1
pytest>=9.0.2
requests>=2.32.5
boto3>=1.42.14
granian>=2.7.2
psutil>=7.2.2
cryptography>=46.0.5
waitress>=3.0.2
psutil>=7.1.3
cryptography>=46.0.3
defusedxml>=0.7.1
duckdb>=1.5.1
duckdb>=1.4.4

174
run.py
View File

@@ -2,10 +2,7 @@
from __future__ import annotations
import argparse
import atexit
import os
import signal
import subprocess
import sys
import warnings
import multiprocessing
@@ -27,7 +24,6 @@ from typing import Optional
from app import create_api_app, create_ui_app
from app.config import AppConfig
from app.iam import IamService, IamError, ALLOWED_ACTIONS, _derive_fernet_key
from app.version import get_version
def _server_host() -> str:
@@ -44,85 +40,24 @@ def _is_frozen() -> bool:
return getattr(sys, 'frozen', False) or '__compiled__' in globals()
def _serve_granian(target: str, port: int, config: Optional[AppConfig] = None) -> None:
from granian import Granian
from granian.constants import Interfaces
from granian.http import HTTP1Settings
kwargs: dict = {
"target": target,
"address": _server_host(),
"port": port,
"interface": Interfaces.WSGI,
"factory": True,
"workers": 1,
}
if config:
kwargs["blocking_threads"] = config.server_threads
kwargs["backlog"] = config.server_backlog
kwargs["backpressure"] = config.server_connection_limit
kwargs["http1_settings"] = HTTP1Settings(
header_read_timeout=config.server_channel_timeout * 1000,
max_buffer_size=config.server_max_buffer_size,
)
else:
kwargs["http1_settings"] = HTTP1Settings(
max_buffer_size=1024 * 1024 * 128,
)
server = Granian(**kwargs)
server.serve()
def _find_rust_binary() -> Optional[Path]:
candidates = [
Path("/usr/local/bin/myfsio-server"),
Path(__file__).parent / "myfsio-engine" / "target" / "release" / "myfsio-server.exe",
Path(__file__).parent / "myfsio-engine" / "target" / "release" / "myfsio-server",
Path(__file__).parent / "myfsio-engine" / "target" / "debug" / "myfsio-server.exe",
Path(__file__).parent / "myfsio-engine" / "target" / "debug" / "myfsio-server",
]
for p in candidates:
if p.exists():
return p
return None
def serve_rust_api(port: int, config: Optional[AppConfig] = None) -> None:
binary = _find_rust_binary()
if binary is None:
print("ERROR: Rust engine binary not found. Build it first:")
print(" cd myfsio-engine && cargo build --release")
sys.exit(1)
env = os.environ.copy()
env["PORT"] = str(port)
env["HOST"] = _server_host()
if config:
env["STORAGE_ROOT"] = str(config.storage_root)
env["AWS_REGION"] = config.aws_region
if config.secret_key:
env["SECRET_KEY"] = config.secret_key
env.setdefault("ENCRYPTION_ENABLED", str(config.encryption_enabled).lower())
env.setdefault("KMS_ENABLED", str(config.kms_enabled).lower())
env.setdefault("LIFECYCLE_ENABLED", str(config.lifecycle_enabled).lower())
env.setdefault("RUST_LOG", "info")
print(f"Starting Rust S3 engine: {binary}")
proc = subprocess.Popen([str(binary)], env=env)
try:
proc.wait()
except KeyboardInterrupt:
proc.terminate()
proc.wait(timeout=5)
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
app = create_api_app()
if prod:
_serve_granian("app:create_api_app", port, config)
from waitress import serve
if config:
serve(
app,
host=_server_host(),
port=port,
ident="MyFSIO",
threads=config.server_threads,
connection_limit=config.server_connection_limit,
backlog=config.server_backlog,
channel_timeout=config.server_channel_timeout,
)
else:
serve(app, host=_server_host(), port=port, ident="MyFSIO")
else:
app = create_api_app()
debug = _is_debug_enabled()
if debug:
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
@@ -130,10 +65,23 @@ def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None)
def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
app = create_ui_app()
if prod:
_serve_granian("app:create_ui_app", port, config)
from waitress import serve
if config:
serve(
app,
host=_server_host(),
port=port,
ident="MyFSIO",
threads=config.server_threads,
connection_limit=config.server_connection_limit,
backlog=config.server_backlog,
channel_timeout=config.server_channel_timeout,
)
else:
serve(app, host=_server_host(), port=port, ident="MyFSIO")
else:
app = create_ui_app()
debug = _is_debug_enabled()
if debug:
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
@@ -178,7 +126,6 @@ def reset_credentials() -> None:
pass
if raw_config and raw_config.get("users"):
is_v2 = raw_config.get("version", 1) >= 2
admin_user = None
for user in raw_config["users"]:
policies = user.get("policies", [])
@@ -192,39 +139,15 @@ def reset_credentials() -> None:
if not admin_user:
admin_user = raw_config["users"][0]
if is_v2:
admin_keys = admin_user.get("access_keys", [])
if admin_keys:
admin_keys[0]["access_key"] = access_key
admin_keys[0]["secret_key"] = secret_key
else:
from datetime import datetime as _dt, timezone as _tz
admin_user["access_keys"] = [{
"access_key": access_key,
"secret_key": secret_key,
"status": "active",
"created_at": _dt.now(_tz.utc).isoformat(),
}]
else:
admin_user["access_key"] = access_key
admin_user["secret_key"] = secret_key
admin_user["access_key"] = access_key
admin_user["secret_key"] = secret_key
else:
from datetime import datetime as _dt, timezone as _tz
raw_config = {
"version": 2,
"users": [
{
"user_id": f"u-{secrets.token_hex(8)}",
"access_key": access_key,
"secret_key": secret_key,
"display_name": "Local Admin",
"enabled": True,
"access_keys": [
{
"access_key": access_key,
"secret_key": secret_key,
"status": "active",
"created_at": _dt.now(_tz.utc).isoformat(),
}
],
"policies": [
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
],
@@ -269,13 +192,11 @@ if __name__ == "__main__":
parser.add_argument("--mode", choices=["api", "ui", "both", "reset-cred"], default="both")
parser.add_argument("--api-port", type=int, default=5000)
parser.add_argument("--ui-port", type=int, default=5100)
parser.add_argument("--prod", action="store_true", help="Run in production mode using Granian")
parser.add_argument("--prod", action="store_true", help="Run in production mode using Waitress")
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
parser.add_argument("--engine", choices=["python", "rust"], default=os.getenv("ENGINE", "python"), help="API engine: python (Flask) or rust (myfsio-engine)")
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
parser.add_argument("--reset-cred", action="store_true", help="Reset admin credentials and exit")
parser.add_argument("--version", action="version", version=f"MyFSIO {get_version()}")
args = parser.parse_args()
if args.reset_cred or args.mode == "reset-cred":
@@ -314,7 +235,7 @@ if __name__ == "__main__":
pass
if prod_mode:
print("Running in production mode (Granian)")
print("Running in production mode (Waitress)")
issues = config.validate_and_report()
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
if critical_issues:
@@ -325,32 +246,13 @@ if __name__ == "__main__":
else:
print("Running in development mode (Flask dev server)")
use_rust = args.engine == "rust"
if args.mode in {"api", "both"}:
if use_rust:
print(f"Starting Rust API engine on port {args.api_port}...")
else:
print(f"Starting API server on port {args.api_port}...")
if use_rust:
api_proc = Process(target=serve_rust_api, args=(args.api_port, config))
else:
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config))
print(f"Starting API server on port {args.api_port}...")
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config), daemon=True)
api_proc.start()
else:
api_proc = None
def _cleanup_api():
if api_proc and api_proc.is_alive():
api_proc.terminate()
api_proc.join(timeout=5)
if api_proc.is_alive():
api_proc.kill()
if api_proc:
atexit.register(_cleanup_api)
signal.signal(signal.SIGTERM, lambda *_: sys.exit(0))
if args.mode in {"ui", "both"}:
print(f"Starting UI server on port {args.ui_port}...")
serve_ui(args.ui_port, prod_mode, config)

View File

@@ -379,25 +379,29 @@ if [[ "$SKIP_SYSTEMD" != true ]]; then
echo " ---------------"
if systemctl is-active --quiet myfsio; then
echo " [OK] MyFSIO is running"
echo ""
echo " ============================================"
echo " ADMIN CREDENTIALS (save these securely!)"
echo " ============================================"
CRED_OUTPUT=$(journalctl -u myfsio --no-pager -n 50 2>/dev/null | grep -A 5 "FIRST RUN - ADMIN CREDENTIALS")
ACCESS_KEY=$(echo "$CRED_OUTPUT" | grep "Access Key:" | head -1 | sed 's/.*Access Key: //' | awk '{print $1}')
SECRET_KEY=$(echo "$CRED_OUTPUT" | grep "Secret Key:" | head -1 | sed 's/.*Secret Key: //' | awk '{print $1}')
if [[ -n "$ACCESS_KEY" && "$ACCESS_KEY" != *"from"* && -n "$SECRET_KEY" && "$SECRET_KEY" != *"from"* ]]; then
echo " Access Key: $ACCESS_KEY"
echo " Secret Key: $SECRET_KEY"
else
echo " [!] Could not extract credentials from service logs."
echo " Check startup output: journalctl -u myfsio --no-pager | grep -A 5 'ADMIN CREDENTIALS'"
echo " Or reset credentials: $INSTALL_DIR/myfsio reset-cred"
IAM_FILE="$DATA_DIR/.myfsio.sys/config/iam.json"
if [[ -f "$IAM_FILE" ]]; then
echo ""
echo " ============================================"
echo " ADMIN CREDENTIALS (save these securely!)"
echo " ============================================"
if command -v jq &>/dev/null; then
ACCESS_KEY=$(jq -r '.users[0].access_key' "$IAM_FILE" 2>/dev/null)
SECRET_KEY=$(jq -r '.users[0].secret_key' "$IAM_FILE" 2>/dev/null)
else
ACCESS_KEY=$(grep -o '"access_key"[[:space:]]*:[[:space:]]*"[^"]*"' "$IAM_FILE" | head -1 | sed 's/.*"\([^"]*\)"$/\1/')
SECRET_KEY=$(grep -o '"secret_key"[[:space:]]*:[[:space:]]*"[^"]*"' "$IAM_FILE" | head -1 | sed 's/.*"\([^"]*\)"$/\1/')
fi
if [[ -n "$ACCESS_KEY" && -n "$SECRET_KEY" ]]; then
echo " Access Key: $ACCESS_KEY"
echo " Secret Key: $SECRET_KEY"
else
echo " [!] Could not parse credentials from $IAM_FILE"
echo " Check the file manually or view service logs."
fi
echo " ============================================"
fi
echo " ============================================"
echo ""
echo " NOTE: The IAM config file is encrypted at rest."
echo " Credentials are only shown on first run or after reset."
else
echo " [WARNING] MyFSIO may not have started correctly"
echo " Check logs with: journalctl -u myfsio -f"
@@ -423,13 +427,12 @@ echo " API: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "local
echo " UI: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$UI_PORT/ui"
echo ""
echo "Credentials:"
echo " Admin credentials are shown on first service start (see above)."
echo " The IAM config is encrypted at rest and cannot be read directly."
echo " To reset credentials: $INSTALL_DIR/myfsio reset-cred"
echo " Admin credentials were shown above (if service was started)."
echo " You can also find them in: $DATA_DIR/.myfsio.sys/config/iam.json"
echo ""
echo "Configuration Files:"
echo " Environment: $INSTALL_DIR/myfsio.env"
echo " IAM Users: $DATA_DIR/.myfsio.sys/config/iam.json (encrypted)"
echo " IAM Users: $DATA_DIR/.myfsio.sys/config/iam.json"
echo " Bucket Policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
echo " Secret Key: $DATA_DIR/.myfsio.sys/config/.secret (auto-generated)"
echo ""

View File

@@ -230,14 +230,11 @@ if [[ "$KEEP_DATA" == true ]]; then
echo ""
echo "Preserved files include:"
echo " - All buckets and objects"
echo " - IAM configuration: $DATA_DIR/.myfsio.sys/config/iam.json (encrypted at rest)"
echo " - IAM configuration: $DATA_DIR/.myfsio.sys/config/iam.json"
echo " - Bucket policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
echo " - Secret key: $DATA_DIR/.myfsio.sys/config/.secret"
echo " - Encryption keys: $DATA_DIR/.myfsio.sys/keys/ (if encryption was enabled)"
echo ""
echo "NOTE: The IAM config is encrypted and requires the SECRET_KEY to read."
echo " Keep the .secret file intact for reinstallation."
echo ""
echo "To reinstall MyFSIO with existing data:"
echo " ./install.sh --data-dir $DATA_DIR"
echo ""

View File

@@ -2655,7 +2655,7 @@ pre code {
}
.objects-table-container {
max-height: 60vh;
max-height: none;
}
.preview-card {

View File

@@ -98,9 +98,6 @@
const previewMetadata = document.getElementById('preview-metadata');
const previewMetadataList = document.getElementById('preview-metadata-list');
const previewPlaceholder = document.getElementById('preview-placeholder');
const previewPlaceholderDefault = previewPlaceholder ? previewPlaceholder.innerHTML : '';
const previewErrorAlert = document.getElementById('preview-error-alert');
const previewDetailsMeta = document.getElementById('preview-details-meta');
const previewImage = document.getElementById('preview-image');
const previewVideo = document.getElementById('preview-video');
const previewAudio = document.getElementById('preview-audio');
@@ -245,12 +242,12 @@
</svg>
</a>
<div class="dropdown d-inline-block">
<button class="btn btn-outline-secondary btn-icon dropdown-toggle" type="button" data-bs-toggle="dropdown" data-bs-auto-close="true" data-bs-config='{"popperConfig":{"strategy":"fixed"}}' aria-expanded="false" title="More actions">
<button class="btn btn-outline-secondary btn-icon dropdown-toggle" type="button" data-bs-toggle="dropdown" data-bs-auto-close="true" aria-expanded="false" title="More actions">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
<path d="M9.5 13a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0z"/>
</svg>
</button>
<ul class="dropdown-menu dropdown-menu-end">
<ul class="dropdown-menu dropdown-menu-end" style="position: fixed;">
<li><button class="dropdown-item" type="button" onclick="openCopyMoveModal('copy', '${escapeHtml(obj.key)}')">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path fill-rule="evenodd" d="M4 2a2 2 0 0 1 2-2h8a2 2 0 0 1 2 2v8a2 2 0 0 1-2 2H6a2 2 0 0 1-2-2V2Zm2-1a1 1 0 0 0-1 1v8a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H6ZM2 5a1 1 0 0 0-1 1v8a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1v-1h1v1a2 2 0 0 1-2 2H2a2 2 0 0 1-2-2V6a2 2 0 0 1 2-2h1v1H2Z"/></svg>
Copy
@@ -852,11 +849,6 @@
selectCheckbox.checked = true;
row.classList.add('table-active');
}
if (activeRow && activeRow.dataset.key === row.dataset.key) {
row.classList.add('table-active');
activeRow = row;
}
});
const folderRows = document.querySelectorAll('.folder-row');
@@ -869,11 +861,6 @@
const checkbox = row.querySelector('[data-folder-select]');
checkbox?.addEventListener('change', (e) => {
e.stopPropagation();
if (checkbox.checked) {
selectedRows.set(folderPath, { key: folderPath, isFolder: true });
} else {
selectedRows.delete(folderPath);
}
const folderObjects = allObjects.filter(obj => obj.key.startsWith(folderPath));
folderObjects.forEach(obj => {
if (checkbox.checked) {
@@ -948,7 +935,7 @@
const row = e.target.closest('[data-object-row]');
if (!row) return;
if (e.target.closest('[data-delete-object]') || e.target.closest('[data-object-select]') || e.target.closest('a') || e.target.closest('.dropdown')) {
if (e.target.closest('[data-delete-object]') || e.target.closest('[data-object-select]') || e.target.closest('a')) {
return;
}
@@ -1358,11 +1345,8 @@
}
if (selectAllCheckbox) {
const filesInView = visibleItems.filter(item => item.type === 'file');
const foldersInView = visibleItems.filter(item => item.type === 'folder');
const total = filesInView.length + foldersInView.length;
const fileSelectedCount = filesInView.filter(item => selectedRows.has(item.data.key)).length;
const folderSelectedCount = foldersInView.filter(item => selectedRows.has(item.path)).length;
const visibleSelectedCount = fileSelectedCount + folderSelectedCount;
const total = filesInView.length;
const visibleSelectedCount = filesInView.filter(item => selectedRows.has(item.data.key)).length;
selectAllCheckbox.disabled = total === 0;
selectAllCheckbox.checked = visibleSelectedCount > 0 && visibleSelectedCount === total && total > 0;
selectAllCheckbox.indeterminate = visibleSelectedCount > 0 && visibleSelectedCount < total;
@@ -1384,12 +1368,8 @@
const keys = Array.from(selectedRows.keys());
bulkDeleteList.innerHTML = '';
if (bulkDeleteCount) {
const folderCount = keys.filter(k => k.endsWith('/')).length;
const objectCount = keys.length - folderCount;
const parts = [];
if (folderCount) parts.push(`${folderCount} folder${folderCount !== 1 ? 's' : ''}`);
if (objectCount) parts.push(`${objectCount} object${objectCount !== 1 ? 's' : ''}`);
bulkDeleteCount.textContent = `${parts.join(' and ')} selected`;
const label = keys.length === 1 ? 'object' : 'objects';
bulkDeleteCount.textContent = `${keys.length} ${label} selected`;
}
if (!keys.length) {
const empty = document.createElement('li');
@@ -1528,7 +1508,7 @@
};
const response = await fetch(endpoint, {
method: 'POST',
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': window.getCsrfToken ? window.getCsrfToken() : '' },
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(payload),
});
const data = await response.json();
@@ -1972,10 +1952,6 @@
[previewImage, previewVideo, previewAudio, previewIframe].forEach((el) => {
if (!el) return;
el.classList.add('d-none');
if (el.tagName === 'IMG') {
el.removeAttribute('src');
el.onload = null;
}
if (el.tagName === 'VIDEO' || el.tagName === 'AUDIO') {
el.pause();
el.removeAttribute('src');
@@ -1988,38 +1964,9 @@
previewText.classList.add('d-none');
previewText.textContent = '';
}
previewPlaceholder.innerHTML = previewPlaceholderDefault;
previewPlaceholder.classList.remove('d-none');
};
let previewFailed = false;
const handlePreviewError = () => {
previewFailed = true;
if (downloadButton) {
downloadButton.classList.add('disabled');
downloadButton.removeAttribute('href');
}
if (presignButton) presignButton.disabled = true;
if (generatePresignButton) generatePresignButton.disabled = true;
if (previewDetailsMeta) previewDetailsMeta.classList.add('d-none');
if (previewMetadata) previewMetadata.classList.add('d-none');
const tagsPanel = document.getElementById('preview-tags');
if (tagsPanel) tagsPanel.classList.add('d-none');
const versionPanel = document.getElementById('version-panel');
if (versionPanel) versionPanel.classList.add('d-none');
if (previewErrorAlert) {
previewErrorAlert.textContent = 'Unable to load object \u2014 it may have been deleted, or the server returned an error.';
previewErrorAlert.classList.remove('d-none');
}
};
const clearPreviewError = () => {
previewFailed = false;
if (previewErrorAlert) previewErrorAlert.classList.add('d-none');
if (previewDetailsMeta) previewDetailsMeta.classList.remove('d-none');
};
async function fetchMetadata(metadataUrl) {
if (!metadataUrl) return null;
try {
@@ -2041,7 +1988,6 @@
previewPanel.classList.remove('d-none');
activeRow = row;
renderMetadata(null);
clearPreviewError();
previewKey.textContent = row.dataset.key;
previewSize.textContent = formatBytes(Number(row.dataset.size));
@@ -2065,71 +2011,18 @@
const previewUrl = row.dataset.previewUrl;
const lower = row.dataset.key.toLowerCase();
if (previewUrl && lower.match(/\.(png|jpg|jpeg|gif|webp|svg|ico|bmp)$/)) {
previewPlaceholder.innerHTML = '<div class="spinner-border spinner-border-sm text-secondary" role="status"></div><div class="small mt-2">Loading preview\u2026</div>';
const currentRow = row;
fetch(previewUrl)
.then((r) => {
if (activeRow !== currentRow) return;
if (!r.ok) {
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
return;
}
return r.blob();
})
.then((blob) => {
if (!blob || activeRow !== currentRow) return;
const url = URL.createObjectURL(blob);
previewImage.onload = () => {
if (activeRow !== currentRow) { URL.revokeObjectURL(url); return; }
previewImage.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
};
previewImage.onerror = () => {
if (activeRow !== currentRow) { URL.revokeObjectURL(url); return; }
URL.revokeObjectURL(url);
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
};
previewImage.src = url;
})
.catch(() => {
if (activeRow !== currentRow) return;
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
});
previewImage.src = previewUrl;
previewImage.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (previewUrl && lower.match(/\.(mp4|webm|ogv|mov|avi|mkv)$/)) {
const currentRow = row;
previewVideo.onerror = () => {
if (activeRow !== currentRow) return;
previewVideo.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
};
previewVideo.src = previewUrl;
previewVideo.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (previewUrl && lower.match(/\.(mp3|wav|flac|ogg|aac|m4a|wma)$/)) {
const currentRow = row;
previewAudio.onerror = () => {
if (activeRow !== currentRow) return;
previewAudio.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
};
previewAudio.src = previewUrl;
previewAudio.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (previewUrl && lower.match(/\.(pdf)$/)) {
const currentRow = row;
previewIframe.onerror = () => {
if (activeRow !== currentRow) return;
previewIframe.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
};
previewIframe.src = previewUrl;
previewIframe.style.minHeight = '500px';
previewIframe.classList.remove('d-none');
@@ -2154,17 +2047,14 @@
})
.catch(() => {
if (activeRow !== currentRow) return;
previewText.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
previewText.textContent = 'Failed to load preview';
});
}
const metadataUrl = row.dataset.metadataUrl;
if (metadataUrl) {
const metadata = await fetchMetadata(metadataUrl);
if (activeRow === row && !previewFailed) {
if (activeRow === row) {
renderMetadata(metadata);
}
}
@@ -3262,15 +3152,6 @@
}
});
const foldersInView = visibleItems.filter(item => item.type === 'folder');
foldersInView.forEach(item => {
if (shouldSelect) {
selectedRows.set(item.path, { key: item.path, isFolder: true });
} else {
selectedRows.delete(item.path);
}
});
document.querySelectorAll('[data-folder-select]').forEach(cb => {
cb.checked = shouldSelect;
});
@@ -4071,10 +3952,6 @@
const loadObjectTags = async (row) => {
if (!row || !previewTagsPanel) return;
if (previewFailed) {
previewTagsPanel.classList.add('d-none');
return;
}
const tagsUrl = row.dataset.tagsUrl;
if (!tagsUrl) {
previewTagsPanel.classList.add('d-none');

View File

@@ -3,8 +3,6 @@ window.BucketDetailUpload = (function() {
const MULTIPART_THRESHOLD = 8 * 1024 * 1024;
const CHUNK_SIZE = 8 * 1024 * 1024;
const MAX_PART_RETRIES = 3;
const RETRY_BASE_DELAY_MS = 1000;
let state = {
isUploading: false,
@@ -206,67 +204,6 @@ window.BucketDetailUpload = (function() {
}
}
function uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.open('PUT', url, true);
xhr.setRequestHeader('X-CSRFToken', csrfToken || '');
xhr.upload.addEventListener('progress', (e) => {
if (e.lengthComputable) {
updateProgressItem(progressItem, {
status: `Part ${partNumber}/${totalParts}`,
loaded: baseBytes + e.loaded,
total: fileSize
});
}
});
xhr.addEventListener('load', () => {
if (xhr.status >= 200 && xhr.status < 300) {
try {
resolve(JSON.parse(xhr.responseText));
} catch {
reject(new Error(`Part ${partNumber}: invalid response`));
}
} else {
try {
const data = JSON.parse(xhr.responseText);
reject(new Error(data.error || `Part ${partNumber} failed (${xhr.status})`));
} catch {
reject(new Error(`Part ${partNumber} failed (${xhr.status})`));
}
}
});
xhr.addEventListener('error', () => reject(new Error(`Part ${partNumber}: network error`)));
xhr.addEventListener('abort', () => reject(new Error(`Part ${partNumber}: aborted`)));
xhr.send(chunk);
});
}
async function uploadPartWithRetry(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
let lastError;
for (let attempt = 0; attempt <= MAX_PART_RETRIES; attempt++) {
try {
return await uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts);
} catch (err) {
lastError = err;
if (attempt < MAX_PART_RETRIES) {
const delay = RETRY_BASE_DELAY_MS * Math.pow(2, attempt);
updateProgressItem(progressItem, {
status: `Part ${partNumber}/${totalParts} retry ${attempt + 1}/${MAX_PART_RETRIES}...`,
loaded: baseBytes,
total: fileSize
});
await new Promise(r => setTimeout(r, delay));
}
}
}
throw lastError;
}
async function uploadMultipart(file, objectKey, metadata, progressItem, urls) {
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
@@ -296,14 +233,26 @@ window.BucketDetailUpload = (function() {
const end = Math.min(start + CHUNK_SIZE, file.size);
const chunk = file.slice(start, end);
const partData = await uploadPartWithRetry(
`${partUrl}?partNumber=${partNumber}`,
chunk, csrfToken, uploadedBytes, file.size,
progressItem, partNumber, totalParts
);
updateProgressItem(progressItem, {
status: `Part ${partNumber}/${totalParts}`,
loaded: uploadedBytes,
total: file.size
});
const partResp = await fetch(`${partUrl}?partNumber=${partNumber}`, {
method: 'PUT',
headers: { 'X-CSRFToken': csrfToken || '' },
body: chunk
});
if (!partResp.ok) {
const err = await partResp.json().catch(() => ({}));
throw new Error(err.error || `Part ${partNumber} failed`);
}
const partData = await partResp.json();
parts.push({ part_number: partNumber, etag: partData.etag });
uploadedBytes += (end - start);
uploadedBytes += chunk.size;
updateProgressItem(progressItem, {
loaded: uploadedBytes,

View File

@@ -17,20 +17,12 @@ window.IAMManagement = (function() {
var currentDeleteKey = null;
var currentExpiryKey = null;
var ALL_S3_ACTIONS = [
'list', 'read', 'write', 'delete', 'share', 'policy',
'replication', 'lifecycle', 'cors',
'create_bucket', 'delete_bucket',
'versioning', 'tagging', 'encryption', 'quota',
'object_lock', 'notification', 'logging', 'website'
];
var ALL_S3_ACTIONS = ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'lifecycle', 'cors'];
var policyTemplates = {
full: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'create_bucket', 'delete_bucket', 'replication', 'lifecycle', 'cors', 'versioning', 'tagging', 'encryption', 'quota', 'object_lock', 'notification', 'logging', 'website', 'iam:*'] }],
full: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'lifecycle', 'cors', 'iam:*'] }],
readonly: [{ bucket: '*', actions: ['list', 'read'] }],
writer: [{ bucket: '*', actions: ['list', 'read', 'write'] }],
operator: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'create_bucket', 'delete_bucket'] }],
bucketadmin: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'create_bucket', 'delete_bucket', 'versioning', 'tagging', 'encryption', 'cors', 'lifecycle', 'quota', 'object_lock', 'notification', 'logging', 'website', 'replication'] }]
writer: [{ bucket: '*', actions: ['list', 'read', 'write'] }]
};
function isAdminUser(policies) {

View File

@@ -110,14 +110,6 @@
<span>Domains</span>
</a>
{% endif %}
{% if can_manage_iam %}
<a href="{{ url_for('ui.system_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.system_dashboard' %}active{% endif %}">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
<span>System</span>
</a>
{% endif %}
</div>
<div class="nav-section">
<span class="nav-section-title">Resources</span>
@@ -218,14 +210,6 @@
<span class="sidebar-link-text">Domains</span>
</a>
{% endif %}
{% if can_manage_iam %}
<a href="{{ url_for('ui.system_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.system_dashboard' %}active{% endif %}" data-tooltip="System">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
<span class="sidebar-link-text">System</span>
</a>
{% endif %}
</div>
<div class="nav-section">
<span class="nav-section-title">Resources</span>

View File

@@ -257,8 +257,7 @@
Share Link
</button>
</div>
<div id="preview-error-alert" class="alert alert-warning d-none py-2 px-3 mb-3 small" role="alert"></div>
<div id="preview-details-meta" class="p-3 rounded mb-3" style="background: var(--myfsio-preview-bg);">
<div class="p-3 rounded mb-3" style="background: var(--myfsio-preview-bg);">
<dl class="row small mb-0">
<dt class="col-5 text-muted fw-normal">Last modified</dt>
<dd class="col-7 mb-2 fw-medium" id="preview-modified"></dd>
@@ -2058,7 +2057,7 @@
<div class="col-12">
<label class="form-label fw-medium">Select files</label>
<input class="form-control" type="file" name="object" id="uploadFileInput" multiple required />
<div class="form-text">Select one or more files from your device. Files ≥ 8&nbsp;MB use multipart uploads with automatic retry.</div>
<div class="form-text">Select one or more files from your device. Files ≥ 8&nbsp;MB automatically switch to multipart uploads.</div>
</div>
<div class="col-12">
<div class="upload-dropzone text-center" data-dropzone>

View File

@@ -84,7 +84,7 @@ pip install -r requirements.txt
# Run both API and UI (Development)
python run.py
# Run in Production (Granian server)
# Run in Production (Waitress server)
python run.py --prod
# Or run individually
@@ -220,7 +220,7 @@ python run.py --mode ui
<tr>
<td><code>SERVER_THREADS</code></td>
<td><code>0</code> (auto)</td>
<td>Granian blocking threads (1-64). 0 = auto (CPU cores × 2).</td>
<td>Waitress worker threads (1-64). 0 = auto (CPU cores × 2).</td>
</tr>
<tr>
<td><code>SERVER_CONNECTION_LIMIT</code></td>

View File

@@ -235,7 +235,7 @@
{% set bucket_label = 'All Buckets' if policy.bucket == '*' else policy.bucket %}
{% if '*' in policy.actions %}
{% set perm_label = 'Full Access' %}
{% elif policy.actions|length >= 19 %}
{% elif policy.actions|length >= 9 %}
{% set perm_label = 'Full Access' %}
{% elif 'list' in policy.actions and 'read' in policy.actions and 'write' in policy.actions and 'delete' in policy.actions %}
{% set perm_label = 'Read + Write + Delete' %}
@@ -354,8 +354,6 @@
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="full">Full Control</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="readonly">Read-Only</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="writer">Read + Write</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="operator">Operator</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="bucketadmin">Bucket Admin</button>
</div>
</div>
<div class="modal-footer">
@@ -406,8 +404,6 @@
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="full">Full Control</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="readonly">Read-Only</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="writer">Read + Write</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="operator">Operator</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="bucketadmin">Bucket Admin</button>
</div>
</form>
</div>

View File

@@ -210,6 +210,9 @@
<div class="fw-bold" data-metric="health_uptime">{{ app.uptime_days }}d</div>
<small class="opacity-75" style="font-size: 0.7rem;">Uptime</small>
</div>
<div class="text-center">
<span class="badge bg-white bg-opacity-25 fw-semibold px-2 py-1">v{{ app.version }}</span>
</div>
</div>
</div>
</div>

View File

@@ -1,750 +0,0 @@
{% extends "base.html" %}
{% block title %}System - MyFSIO Console{% endblock %}
{% block content %}
<div class="page-header d-flex justify-content-between align-items-center mb-4">
<div>
<p class="text-uppercase text-muted small mb-1">Administration</p>
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
System
</h1>
<p class="text-muted mb-0 mt-1">Server information, feature flags, and maintenance tools.</p>
</div>
<div class="d-none d-md-block">
<span class="badge bg-primary bg-opacity-10 text-primary fs-6 px-3 py-2">v{{ app_version }}</span>
</div>
</div>
<div class="row g-4 mb-4">
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M5 0a.5.5 0 0 1 .5.5V2h1V.5a.5.5 0 0 1 1 0V2h1V.5a.5.5 0 0 1 1 0V2h1V.5a.5.5 0 0 1 1 0V2A2.5 2.5 0 0 1 14 4.5h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14a2.5 2.5 0 0 1-2.5 2.5v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14A2.5 2.5 0 0 1 2 11.5H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2A2.5 2.5 0 0 1 4.5 2V.5A.5.5 0 0 1 5 0zm-.5 3A1.5 1.5 0 0 0 3 4.5v7A1.5 1.5 0 0 0 4.5 13h7a1.5 1.5 0 0 0 1.5-1.5v-7A1.5 1.5 0 0 0 11.5 3h-7zM5 6.5A1.5 1.5 0 0 1 6.5 5h3A1.5 1.5 0 0 1 11 6.5v3A1.5 1.5 0 0 1 9.5 11h-3A1.5 1.5 0 0 1 5 9.5v-3zM6.5 6a.5.5 0 0 0-.5.5v3a.5.5 0 0 0 .5.5h3a.5.5 0 0 0 .5-.5v-3a.5.5 0 0 0-.5-.5h-3z"/>
</svg>
Server Information
</h5>
<p class="text-muted small mb-0">Runtime environment and configuration</p>
</div>
<div class="card-body px-4 pb-4">
<table class="table table-sm mb-0">
<tbody>
<tr><td class="text-muted" style="width:40%">Version</td><td class="fw-medium">{{ app_version }}</td></tr>
<tr><td class="text-muted">Storage Root</td><td><code>{{ storage_root }}</code></td></tr>
<tr><td class="text-muted">Platform</td><td>{{ platform }}</td></tr>
<tr><td class="text-muted">Python</td><td>{{ python_version }}</td></tr>
<tr><td class="text-muted">Rust Extension</td><td>
{% if has_rust %}
<span class="badge bg-success bg-opacity-10 text-success">Loaded</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Not loaded</span>
{% endif %}
</td></tr>
</tbody>
</table>
</div>
</div>
</div>
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M11.5 2a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zM9.05 3a2.5 2.5 0 0 1 4.9 0H16v1h-2.05a2.5 2.5 0 0 1-4.9 0H0V3h9.05zM4.5 7a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zM2.05 8a2.5 2.5 0 0 1 4.9 0H16v1H6.95a2.5 2.5 0 0 1-4.9 0H0V8h2.05zm9.45 4a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zm-2.45 1a2.5 2.5 0 0 1 4.9 0H16v1h-2.05a2.5 2.5 0 0 1-4.9 0H0v-1h9.05z"/>
</svg>
Feature Flags
</h5>
<p class="text-muted small mb-0">Features configured via environment variables</p>
</div>
<div class="card-body px-4 pb-4">
<table class="table table-sm mb-0">
<tbody>
{% for feat in features %}
<tr>
<td class="text-muted" style="width:55%">{{ feat.label }}</td>
<td class="text-end">
{% if feat.enabled %}
<span class="badge bg-success bg-opacity-10 text-success">Enabled</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
{% endif %}
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</div>
</div>
<div class="row g-4 mb-4">
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<div class="d-flex justify-content-between align-items-start">
<div>
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M2.5 1a1 1 0 0 0-1 1v1a1 1 0 0 0 1 1H3v9a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V4h.5a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H10a1 1 0 0 0-1-1H7a1 1 0 0 0-1 1H2.5zm3 4a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 .5-.5zM8 5a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7A.5.5 0 0 1 8 5zm3 .5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 1 0z"/>
</svg>
Garbage Collection
</h5>
<p class="text-muted small mb-0">Clean up temporary files, orphaned uploads, and stale locks</p>
</div>
<div>
{% if gc_status.enabled %}
<span class="badge bg-success bg-opacity-10 text-success">Active</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
{% endif %}
</div>
</div>
</div>
<div class="card-body px-4 pb-4">
{% if gc_status.enabled %}
<div class="d-flex gap-2 mb-3">
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="gcRunBtn" onclick="runGC(false)">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 flex-shrink-0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
</svg>
Run Now
</button>
<button class="btn btn-outline-secondary btn-sm" id="gcDryRunBtn" onclick="runGC(true)">
Dry Run
</button>
</div>
<div id="gcScanningBanner" class="mb-3 {% if not gc_status.scanning %}d-none{% endif %}">
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
<span>GC in progress<span id="gcScanElapsed"></span></span>
</div>
</div>
<div id="gcResult" class="mb-3 d-none">
<div class="alert mb-0 small" id="gcResultAlert">
<div class="d-flex justify-content-between align-items-start">
<div class="fw-semibold mb-1" id="gcResultTitle"></div>
<button type="button" class="btn-close btn-close-sm" style="font-size:0.65rem" onclick="document.getElementById('gcResult').classList.add('d-none')"></button>
</div>
<div id="gcResultBody"></div>
</div>
</div>
<div class="border rounded p-3 mb-3" style="background: var(--bs-tertiary-bg, #f8f9fa);">
<div class="d-flex align-items-center gap-2 mb-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
<span class="small fw-semibold text-muted">Configuration</span>
</div>
<div class="row small">
<div class="col-6 mb-1"><span class="text-muted">Interval:</span> {{ gc_status.interval_hours }}h</div>
<div class="col-6 mb-1"><span class="text-muted">Dry run:</span> {{ "Yes" if gc_status.dry_run else "No" }}</div>
<div class="col-6 mb-1"><span class="text-muted">Temp max age:</span> {{ gc_status.temp_file_max_age_hours }}h</div>
<div class="col-6 mb-1"><span class="text-muted">Lock max age:</span> {{ gc_status.lock_file_max_age_hours }}h</div>
<div class="col-6"><span class="text-muted">Multipart max age:</span> {{ gc_status.multipart_max_age_days }}d</div>
</div>
</div>
<div id="gcHistoryContainer">
{% if gc_history %}
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>
<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>
<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/>
</svg>
Recent Executions
</h6>
<div class="table-responsive">
<table class="table table-sm small mb-0">
<thead class="table-light">
<tr>
<th>Time</th>
<th class="text-center">Cleaned</th>
<th class="text-center">Freed</th>
<th class="text-center">Mode</th>
</tr>
</thead>
<tbody>
{% for exec in gc_history %}
<tr>
<td class="text-nowrap">{{ exec.timestamp_display }}</td>
<td class="text-center">
{% set r = exec.result %}
{{ (r.temp_files_deleted|d(0)) + (r.multipart_uploads_deleted|d(0)) + (r.lock_files_deleted|d(0)) + (r.orphaned_metadata_deleted|d(0)) + (r.orphaned_versions_deleted|d(0)) + (r.empty_dirs_removed|d(0)) }}
</td>
<td class="text-center">{{ exec.bytes_freed_display }}</td>
<td class="text-center">
{% if exec.dry_run %}
<span class="badge bg-warning bg-opacity-10 text-warning">Dry run</span>
{% else %}
<span class="badge bg-primary bg-opacity-10 text-primary">Live</span>
{% endif %}
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% else %}
<div class="text-center py-2">
<p class="text-muted small mb-0">No executions recorded yet.</p>
</div>
{% endif %}
</div>
{% else %}
<div class="text-center py-4">
<svg xmlns="http://www.w3.org/2000/svg" width="40" height="40" fill="currentColor" class="text-muted mb-2 opacity-50" viewBox="0 0 16 16">
<path d="M2.5 1a1 1 0 0 0-1 1v1a1 1 0 0 0 1 1H3v9a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V4h.5a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H10a1 1 0 0 0-1-1H7a1 1 0 0 0-1 1H2.5zm3 4a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 .5-.5zM8 5a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7A.5.5 0 0 1 8 5zm3 .5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 1 0z"/>
</svg>
<p class="text-muted mb-1">Garbage collection is not enabled.</p>
<p class="text-muted small mb-0">Set <code>GC_ENABLED=true</code> to enable automatic cleanup.</p>
</div>
{% endif %}
</div>
</div>
</div>
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<div class="d-flex justify-content-between align-items-start">
<div>
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M5.338 1.59a61.44 61.44 0 0 0-2.837.856.481.481 0 0 0-.328.39c-.554 4.157.726 7.19 2.253 9.188a10.725 10.725 0 0 0 2.287 2.233c.346.244.652.42.893.533.12.057.218.095.293.118a.55.55 0 0 0 .101.025.615.615 0 0 0 .1-.025c.076-.023.174-.061.294-.118.24-.113.547-.29.893-.533a10.726 10.726 0 0 0 2.287-2.233c1.527-1.997 2.807-5.031 2.253-9.188a.48.48 0 0 0-.328-.39c-.651-.213-1.75-.56-2.837-.855C9.552 1.29 8.531 1.067 8 1.067c-.53 0-1.552.223-2.662.524zM5.072.56C6.157.265 7.31 0 8 0s1.843.265 2.928.56c1.11.3 2.229.655 2.887.87a1.54 1.54 0 0 1 1.044 1.262c.596 4.477-.787 7.795-2.465 9.99a11.775 11.775 0 0 1-2.517 2.453 7.159 7.159 0 0 1-1.048.625c-.28.132-.581.24-.829.24s-.548-.108-.829-.24a7.158 7.158 0 0 1-1.048-.625 11.777 11.777 0 0 1-2.517-2.453C1.928 10.487.545 7.169 1.141 2.692A1.54 1.54 0 0 1 2.185 1.43 62.456 62.456 0 0 1 5.072.56z"/>
<path d="M10.854 5.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7.5 7.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
</svg>
Integrity Scanner
</h5>
<p class="text-muted small mb-0">Detect and heal corrupted objects, orphaned files, and metadata drift</p>
</div>
<div>
{% if integrity_status.enabled %}
<span class="badge bg-success bg-opacity-10 text-success">Active</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
{% endif %}
</div>
</div>
</div>
<div class="card-body px-4 pb-4">
{% if integrity_status.enabled %}
<div class="d-flex gap-2 flex-wrap mb-3">
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="integrityRunBtn" onclick="runIntegrity(false, false)" {% if integrity_status.scanning %}disabled{% endif %}>
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 flex-shrink-0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
</svg>
Scan Now
</button>
<button class="btn btn-outline-warning btn-sm" id="integrityHealBtn" onclick="runIntegrity(false, true)" {% if integrity_status.scanning %}disabled{% endif %}>
Scan &amp; Heal
</button>
<button class="btn btn-outline-secondary btn-sm" id="integrityDryRunBtn" onclick="runIntegrity(true, false)" {% if integrity_status.scanning %}disabled{% endif %}>
Dry Run
</button>
</div>
<div id="integrityScanningBanner" class="mb-3 {% if not integrity_status.scanning %}d-none{% endif %}">
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
<span>Scan in progress<span id="integrityScanElapsed"></span></span>
</div>
</div>
<div id="integrityResult" class="mb-3 d-none">
<div class="alert mb-0 small" id="integrityResultAlert">
<div class="d-flex justify-content-between align-items-start">
<div class="fw-semibold mb-1" id="integrityResultTitle"></div>
<button type="button" class="btn-close btn-close-sm" style="font-size:0.65rem" onclick="document.getElementById('integrityResult').classList.add('d-none')"></button>
</div>
<div id="integrityResultBody"></div>
</div>
</div>
<div class="border rounded p-3 mb-3" style="background: var(--bs-tertiary-bg, #f8f9fa);">
<div class="d-flex align-items-center gap-2 mb-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
<span class="small fw-semibold text-muted">Configuration</span>
</div>
<div class="row small">
<div class="col-6 mb-1"><span class="text-muted">Interval:</span> {{ integrity_status.interval_hours }}h</div>
<div class="col-6 mb-1"><span class="text-muted">Dry run:</span> {{ "Yes" if integrity_status.dry_run else "No" }}</div>
<div class="col-6"><span class="text-muted">Batch size:</span> {{ integrity_status.batch_size }}</div>
<div class="col-6"><span class="text-muted">Auto-heal:</span> {{ "Yes" if integrity_status.auto_heal else "No" }}</div>
</div>
</div>
<div id="integrityHistoryContainer">
{% if integrity_history %}
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>
<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>
<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/>
</svg>
Recent Scans
</h6>
<div class="table-responsive">
<table class="table table-sm small mb-0">
<thead class="table-light">
<tr>
<th>Time</th>
<th class="text-center">Scanned</th>
<th class="text-center">Issues</th>
<th class="text-center">Healed</th>
<th class="text-center">Mode</th>
</tr>
</thead>
<tbody>
{% for exec in integrity_history %}
<tr>
<td class="text-nowrap">{{ exec.timestamp_display }}</td>
<td class="text-center">{{ exec.result.objects_scanned|d(0) }}</td>
<td class="text-center">
{% set total_issues = (exec.result.corrupted_objects|d(0)) + (exec.result.orphaned_objects|d(0)) + (exec.result.phantom_metadata|d(0)) + (exec.result.stale_versions|d(0)) + (exec.result.etag_cache_inconsistencies|d(0)) + (exec.result.legacy_metadata_drifts|d(0)) %}
{% if total_issues > 0 %}
<span class="text-danger fw-medium">{{ total_issues }}</span>
{% else %}
<span class="text-success">0</span>
{% endif %}
</td>
<td class="text-center">{{ exec.result.issues_healed|d(0) }}</td>
<td class="text-center">
{% if exec.dry_run %}
<span class="badge bg-warning bg-opacity-10 text-warning">Dry</span>
{% elif exec.auto_heal %}
<span class="badge bg-success bg-opacity-10 text-success">Heal</span>
{% else %}
<span class="badge bg-primary bg-opacity-10 text-primary">Scan</span>
{% endif %}
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% else %}
<div class="text-center py-2">
<p class="text-muted small mb-0">No scans recorded yet.</p>
</div>
{% endif %}
</div>
{% else %}
<div class="text-center py-4">
<svg xmlns="http://www.w3.org/2000/svg" width="40" height="40" fill="currentColor" class="text-muted mb-2 opacity-50" viewBox="0 0 16 16">
<path d="M5.338 1.59a61.44 61.44 0 0 0-2.837.856.481.481 0 0 0-.328.39c-.554 4.157.726 7.19 2.253 9.188a10.725 10.725 0 0 0 2.287 2.233c.346.244.652.42.893.533.12.057.218.095.293.118a.55.55 0 0 0 .101.025.615.615 0 0 0 .1-.025c.076-.023.174-.061.294-.118.24-.113.547-.29.893-.533a10.726 10.726 0 0 0 2.287-2.233c1.527-1.997 2.807-5.031 2.253-9.188a.48.48 0 0 0-.328-.39c-.651-.213-1.75-.56-2.837-.855C9.552 1.29 8.531 1.067 8 1.067c-.53 0-1.552.223-2.662.524zM5.072.56C6.157.265 7.31 0 8 0s1.843.265 2.928.56c1.11.3 2.229.655 2.887.87a1.54 1.54 0 0 1 1.044 1.262c.596 4.477-.787 7.795-2.465 9.99a11.775 11.775 0 0 1-2.517 2.453 7.159 7.159 0 0 1-1.048.625c-.28.132-.581.24-.829.24s-.548-.108-.829-.24a7.158 7.158 0 0 1-1.048-.625 11.777 11.777 0 0 1-2.517-2.453C1.928 10.487.545 7.169 1.141 2.692A1.54 1.54 0 0 1 2.185 1.43 62.456 62.456 0 0 1 5.072.56z"/>
<path d="M10.854 5.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7.5 7.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
</svg>
<p class="text-muted mb-1">Integrity scanner is not enabled.</p>
<p class="text-muted small mb-0">Set <code>INTEGRITY_ENABLED=true</code> to enable automatic scanning.</p>
</div>
{% endif %}
</div>
</div>
</div>
</div>
{% endblock %}
{% block extra_scripts %}
<script>
(function () {
var csrfToken = document.querySelector('meta[name="csrf-token"]')?.getAttribute('content') || '';
function setLoading(btnId, loading, spinnerOnly) {
var btn = document.getElementById(btnId);
if (!btn) return;
btn.disabled = loading;
if (loading && !spinnerOnly) {
btn.dataset.originalHtml = btn.innerHTML;
btn.innerHTML = '<span class="spinner-border spinner-border-sm me-1" role="status"></span>Running...';
} else if (!loading && btn.dataset.originalHtml) {
btn.innerHTML = btn.dataset.originalHtml;
}
}
function formatBytes(bytes) {
if (!bytes || bytes === 0) return '0 B';
var units = ['B', 'KB', 'MB', 'GB'];
var i = 0;
var b = bytes;
while (b >= 1024 && i < units.length - 1) { b /= 1024; i++; }
return (i === 0 ? b : b.toFixed(1)) + ' ' + units[i];
}
var _displayTimezone = {{ display_timezone|tojson }};
function formatTimestamp(ts) {
var d = new Date(ts * 1000);
try {
var opts = {year: 'numeric', month: 'short', day: '2-digit', hour: '2-digit', minute: '2-digit', hour12: false, timeZone: _displayTimezone, timeZoneName: 'short'};
return d.toLocaleString('en-US', opts);
} catch (e) {
var pad = function (n) { return n < 10 ? '0' + n : '' + n; };
return d.getUTCFullYear() + '-' + pad(d.getUTCMonth() + 1) + '-' + pad(d.getUTCDate()) +
' ' + pad(d.getUTCHours()) + ':' + pad(d.getUTCMinutes()) + ' UTC';
}
}
var _gcHistoryIcon = '<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">' +
'<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>' +
'<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>' +
'<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/></svg>';
function _gcRefreshHistory() {
fetch('{{ url_for("ui.system_gc_history") }}?limit=10', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
var container = document.getElementById('gcHistoryContainer');
if (!container) return;
var execs = hist.executions || [];
if (execs.length === 0) {
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No executions recorded yet.</p></div>';
return;
}
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
_gcHistoryIcon + ' Recent Executions</h6>' +
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Cleaned</th>' +
'<th class="text-center">Freed</th><th class="text-center">Mode</th></tr></thead><tbody>';
execs.forEach(function (exec) {
var r = exec.result || {};
var cleaned = (r.temp_files_deleted || 0) + (r.multipart_uploads_deleted || 0) +
(r.lock_files_deleted || 0) + (r.orphaned_metadata_deleted || 0) +
(r.orphaned_versions_deleted || 0) + (r.empty_dirs_removed || 0);
var freed = (r.temp_bytes_freed || 0) + (r.multipart_bytes_freed || 0) +
(r.orphaned_version_bytes_freed || 0);
var mode = exec.dry_run
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry run</span>'
: '<span class="badge bg-primary bg-opacity-10 text-primary">Live</span>';
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
'<td class="text-center">' + cleaned + '</td>' +
'<td class="text-center">' + formatBytes(freed) + '</td>' +
'<td class="text-center">' + mode + '</td></tr>';
});
html += '</tbody></table></div>';
container.innerHTML = html;
})
.catch(function () {});
}
function _integrityRefreshHistory() {
fetch('{{ url_for("ui.system_integrity_history") }}?limit=10', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
var container = document.getElementById('integrityHistoryContainer');
if (!container) return;
var execs = hist.executions || [];
if (execs.length === 0) {
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No scans recorded yet.</p></div>';
return;
}
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
_gcHistoryIcon + ' Recent Scans</h6>' +
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Scanned</th>' +
'<th class="text-center">Issues</th><th class="text-center">Healed</th>' +
'<th class="text-center">Mode</th></tr></thead><tbody>';
execs.forEach(function (exec) {
var r = exec.result || {};
var issues = (r.corrupted_objects || 0) + (r.orphaned_objects || 0) +
(r.phantom_metadata || 0) + (r.stale_versions || 0) +
(r.etag_cache_inconsistencies || 0) + (r.legacy_metadata_drifts || 0);
var issueHtml = issues > 0
? '<span class="text-danger fw-medium">' + issues + '</span>'
: '<span class="text-success">0</span>';
var mode = exec.dry_run
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry</span>'
: (exec.auto_heal
? '<span class="badge bg-success bg-opacity-10 text-success">Heal</span>'
: '<span class="badge bg-primary bg-opacity-10 text-primary">Scan</span>');
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
'<td class="text-center">' + (r.objects_scanned || 0) + '</td>' +
'<td class="text-center">' + issueHtml + '</td>' +
'<td class="text-center">' + (r.issues_healed || 0) + '</td>' +
'<td class="text-center">' + mode + '</td></tr>';
});
html += '</tbody></table></div>';
container.innerHTML = html;
})
.catch(function () {});
}
var _gcPollTimer = null;
var _gcLastDryRun = false;
function _gcSetScanning(scanning) {
var banner = document.getElementById('gcScanningBanner');
var btns = ['gcRunBtn', 'gcDryRunBtn'];
if (scanning) {
banner.classList.remove('d-none');
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = true;
});
} else {
banner.classList.add('d-none');
document.getElementById('gcScanElapsed').textContent = '';
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = false;
});
}
}
function _gcShowResult(data, dryRun) {
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
var totalItems = (data.temp_files_deleted || 0) + (data.multipart_uploads_deleted || 0) +
(data.lock_files_deleted || 0) + (data.orphaned_metadata_deleted || 0) +
(data.orphaned_versions_deleted || 0) + (data.empty_dirs_removed || 0);
var totalFreed = (data.temp_bytes_freed || 0) + (data.multipart_bytes_freed || 0) +
(data.orphaned_version_bytes_freed || 0);
alert.className = totalItems > 0 ? 'alert alert-success mb-0 small' : 'alert alert-info mb-0 small';
title.textContent = (dryRun ? '[Dry Run] ' : '') + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
var lines = [];
if (data.temp_files_deleted) lines.push('Temp files: ' + data.temp_files_deleted + ' (' + formatBytes(data.temp_bytes_freed) + ')');
if (data.multipart_uploads_deleted) lines.push('Multipart uploads: ' + data.multipart_uploads_deleted + ' (' + formatBytes(data.multipart_bytes_freed) + ')');
if (data.lock_files_deleted) lines.push('Lock files: ' + data.lock_files_deleted);
if (data.orphaned_metadata_deleted) lines.push('Orphaned metadata: ' + data.orphaned_metadata_deleted);
if (data.orphaned_versions_deleted) lines.push('Orphaned versions: ' + data.orphaned_versions_deleted + ' (' + formatBytes(data.orphaned_version_bytes_freed) + ')');
if (data.empty_dirs_removed) lines.push('Empty directories: ' + data.empty_dirs_removed);
if (totalItems === 0) lines.push('Nothing to clean up.');
if (totalFreed > 0) lines.push('Total freed: ' + formatBytes(totalFreed));
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
body.innerHTML = lines.join('<br>');
}
function _gcPoll() {
fetch('{{ url_for("ui.system_gc_status") }}', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (status) {
if (status.scanning) {
var elapsed = status.scan_elapsed_seconds || 0;
document.getElementById('gcScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
_gcPollTimer = setTimeout(_gcPoll, 2000);
} else {
_gcSetScanning(false);
_gcRefreshHistory();
fetch('{{ url_for("ui.system_gc_history") }}?limit=1', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
if (hist.executions && hist.executions.length > 0) {
var latest = hist.executions[0];
_gcShowResult(latest.result, latest.dry_run);
}
})
.catch(function () {});
}
})
.catch(function () {
_gcPollTimer = setTimeout(_gcPoll, 3000);
});
}
window.runGC = function (dryRun) {
_gcLastDryRun = dryRun;
document.getElementById('gcResult').classList.add('d-none');
_gcSetScanning(true);
fetch('{{ url_for("ui.system_gc_run") }}', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'X-CSRFToken': csrfToken},
body: JSON.stringify({dry_run: dryRun})
})
.then(function (r) { return r.json(); })
.then(function (data) {
if (data.error) {
_gcSetScanning(false);
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = data.error;
return;
}
_gcPollTimer = setTimeout(_gcPoll, 2000);
})
.catch(function (err) {
_gcSetScanning(false);
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = err.message;
});
};
{% if gc_status.scanning %}
_gcSetScanning(true);
_gcPollTimer = setTimeout(_gcPoll, 2000);
{% endif %}
var _integrityPollTimer = null;
var _integrityLastMode = {dryRun: false, autoHeal: false};
function _integritySetScanning(scanning) {
var banner = document.getElementById('integrityScanningBanner');
var btns = ['integrityRunBtn', 'integrityHealBtn', 'integrityDryRunBtn'];
if (scanning) {
banner.classList.remove('d-none');
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = true;
});
} else {
banner.classList.add('d-none');
document.getElementById('integrityScanElapsed').textContent = '';
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = false;
});
}
}
function _integrityShowResult(data, dryRun, autoHeal) {
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
var totalIssues = (data.corrupted_objects || 0) + (data.orphaned_objects || 0) +
(data.phantom_metadata || 0) + (data.stale_versions || 0) +
(data.etag_cache_inconsistencies || 0) + (data.legacy_metadata_drifts || 0);
var prefix = dryRun ? '[Dry Run] ' : (autoHeal ? '[Heal] ' : '');
alert.className = totalIssues > 0 ? 'alert alert-warning mb-0 small' : 'alert alert-success mb-0 small';
title.textContent = prefix + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
var lines = [];
lines.push('Scanned: ' + (data.objects_scanned || 0) + ' objects in ' + (data.buckets_scanned || 0) + ' buckets');
if (totalIssues === 0) {
lines.push('No issues found.');
} else {
if (data.corrupted_objects) lines.push('Corrupted objects: ' + data.corrupted_objects);
if (data.orphaned_objects) lines.push('Orphaned objects: ' + data.orphaned_objects);
if (data.phantom_metadata) lines.push('Phantom metadata: ' + data.phantom_metadata);
if (data.stale_versions) lines.push('Stale versions: ' + data.stale_versions);
if (data.etag_cache_inconsistencies) lines.push('ETag inconsistencies: ' + data.etag_cache_inconsistencies);
if (data.legacy_metadata_drifts) lines.push('Legacy metadata drifts: ' + data.legacy_metadata_drifts);
if (data.issues_healed) lines.push('Issues healed: ' + data.issues_healed);
}
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
body.innerHTML = lines.join('<br>');
}
function _integrityPoll() {
fetch('{{ url_for("ui.system_integrity_status") }}', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (status) {
if (status.scanning) {
var elapsed = status.scan_elapsed_seconds || 0;
document.getElementById('integrityScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
} else {
_integritySetScanning(false);
_integrityRefreshHistory();
fetch('{{ url_for("ui.system_integrity_history") }}?limit=1', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
if (hist.executions && hist.executions.length > 0) {
var latest = hist.executions[0];
_integrityShowResult(latest.result, latest.dry_run, latest.auto_heal);
}
})
.catch(function () {});
}
})
.catch(function () {
_integrityPollTimer = setTimeout(_integrityPoll, 3000);
});
}
window.runIntegrity = function (dryRun, autoHeal) {
_integrityLastMode = {dryRun: dryRun, autoHeal: autoHeal};
document.getElementById('integrityResult').classList.add('d-none');
_integritySetScanning(true);
fetch('{{ url_for("ui.system_integrity_run") }}', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'X-CSRFToken': csrfToken},
body: JSON.stringify({dry_run: dryRun, auto_heal: autoHeal})
})
.then(function (r) { return r.json(); })
.then(function (data) {
if (data.error) {
_integritySetScanning(false);
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = data.error;
return;
}
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
})
.catch(function (err) {
_integritySetScanning(false);
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = err.message;
});
};
{% if integrity_status.scanning %}
_integritySetScanning(true);
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
{% endif %}
})();
</script>
{% endblock %}

View File

@@ -27,10 +27,7 @@ def app(tmp_path: Path):
"access_key": "test",
"secret_key": "secret",
"display_name": "Test User",
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy",
"create_bucket", "delete_bucket", "share", "versioning", "tagging",
"encryption", "cors", "lifecycle", "replication", "quota",
"object_lock", "notification", "logging", "website"]}],
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy"]}],
}
]
}

View File

@@ -1,56 +1,3 @@
import hashlib
import hmac
from datetime import datetime, timezone
from urllib.parse import quote
def _build_presigned_query(path: str, *, access_key: str = "test", secret_key: str = "secret", expires: int = 60) -> str:
now = datetime.now(timezone.utc)
amz_date = now.strftime("%Y%m%dT%H%M%SZ")
date_stamp = now.strftime("%Y%m%d")
region = "us-east-1"
service = "s3"
credential_scope = f"{date_stamp}/{region}/{service}/aws4_request"
query_items = [
("X-Amz-Algorithm", "AWS4-HMAC-SHA256"),
("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD"),
("X-Amz-Credential", f"{access_key}/{credential_scope}"),
("X-Amz-Date", amz_date),
("X-Amz-Expires", str(expires)),
("X-Amz-SignedHeaders", "host"),
]
canonical_query = "&".join(
f"{quote(k, safe='-_.~')}={quote(v, safe='-_.~')}" for k, v in sorted(query_items)
)
canonical_request = "\n".join([
"GET",
quote(path, safe="/-_.~"),
canonical_query,
"host:localhost\n",
"host",
"UNSIGNED-PAYLOAD",
])
hashed_request = hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()
string_to_sign = "\n".join([
"AWS4-HMAC-SHA256",
amz_date,
credential_scope,
hashed_request,
])
def _sign(key: bytes, msg: str) -> bytes:
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
k_date = _sign(("AWS4" + secret_key).encode("utf-8"), date_stamp)
k_region = _sign(k_date, region)
k_service = _sign(k_region, service)
signing_key = _sign(k_service, "aws4_request")
signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
return canonical_query + f"&X-Amz-Signature={signature}"
def test_bucket_and_object_lifecycle(client, signer):
headers = signer("PUT", "/photos")
response = client.put("/photos", headers=headers)
@@ -167,45 +114,6 @@ def test_missing_credentials_denied(client):
assert response.status_code == 403
def test_presigned_url_denied_for_disabled_user(client, signer):
headers = signer("PUT", "/secure")
assert client.put("/secure", headers=headers).status_code == 200
payload = b"hello"
headers = signer("PUT", "/secure/file.txt", body=payload)
assert client.put("/secure/file.txt", headers=headers, data=payload).status_code == 200
iam = client.application.extensions["iam"]
iam.disable_user("test")
query = _build_presigned_query("/secure/file.txt")
response = client.get(f"/secure/file.txt?{query}", headers={"Host": "localhost"})
assert response.status_code == 403
assert b"User account is disabled" in response.data
def test_presigned_url_denied_for_inactive_key(client, signer):
headers = signer("PUT", "/secure2")
assert client.put("/secure2", headers=headers).status_code == 200
payload = b"hello"
headers = signer("PUT", "/secure2/file.txt", body=payload)
assert client.put("/secure2/file.txt", headers=headers, data=payload).status_code == 200
iam = client.application.extensions["iam"]
for user in iam._raw_config.get("users", []):
for key_info in user.get("access_keys", []):
if key_info.get("access_key") == "test":
key_info["status"] = "inactive"
iam._save()
iam._load()
query = _build_presigned_query("/secure2/file.txt")
response = client.get(f"/secure2/file.txt?{query}", headers={"Host": "localhost"})
assert response.status_code == 403
assert b"Access key is inactive" in response.data
def test_bucket_policies_deny_reads(client, signer):
import json

View File

@@ -317,7 +317,7 @@ class TestAdminAPI:
)
assert resp.status_code == 200
data = resp.get_json()
assert data["status"] == "started"
assert "temp_files_deleted" in data
def test_gc_dry_run(self, gc_app):
client = gc_app.test_client()
@@ -329,17 +329,11 @@ class TestAdminAPI:
)
assert resp.status_code == 200
data = resp.get_json()
assert data["status"] == "started"
assert "temp_files_deleted" in data
def test_gc_history(self, gc_app):
import time
client = gc_app.test_client()
client.post("/admin/gc/run", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"})
for _ in range(50):
time.sleep(0.1)
status = client.get("/admin/gc/status", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"}).get_json()
if not status.get("scanning"):
break
resp = client.get("/admin/gc/history", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"})
assert resp.status_code == 200
data = resp.get_json()

View File

@@ -2,25 +2,13 @@ import hashlib
import json
import os
import sys
import time
from pathlib import Path
import pytest
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
from app.integrity import IntegrityChecker, IntegrityCursorStore, IntegrityResult
def _wait_scan_done(client, headers, timeout=10):
deadline = time.time() + timeout
while time.time() < deadline:
resp = client.get("/admin/integrity/status", headers=headers)
data = resp.get_json()
if not data.get("scanning"):
return
time.sleep(0.1)
raise TimeoutError("scan did not complete")
from app.integrity import IntegrityChecker, IntegrityResult
def _md5(data: bytes) -> str:
@@ -118,7 +106,7 @@ class TestCorruptedObjects:
result = checker.run_now()
assert result.corrupted_objects == 0
assert result.objects_scanned >= 1
assert result.objects_scanned == 1
def test_corrupted_nested_key(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"sub/dir/file.txt": b"nested content"})
@@ -425,13 +413,8 @@ class TestAdminAPI:
resp = client.post("/admin/integrity/run", headers=AUTH_HEADERS, json={})
assert resp.status_code == 200
data = resp.get_json()
assert data["status"] == "started"
_wait_scan_done(client, AUTH_HEADERS)
resp = client.get("/admin/integrity/history?limit=1", headers=AUTH_HEADERS)
hist = resp.get_json()
assert len(hist["executions"]) >= 1
assert "corrupted_objects" in hist["executions"][0]["result"]
assert "objects_scanned" in hist["executions"][0]["result"]
assert "corrupted_objects" in data
assert "objects_scanned" in data
def test_run_with_overrides(self, integrity_app):
client = integrity_app.test_client()
@@ -441,12 +424,10 @@ class TestAdminAPI:
json={"dry_run": True, "auto_heal": True},
)
assert resp.status_code == 200
_wait_scan_done(client, AUTH_HEADERS)
def test_history_endpoint(self, integrity_app):
client = integrity_app.test_client()
client.post("/admin/integrity/run", headers=AUTH_HEADERS, json={})
_wait_scan_done(client, AUTH_HEADERS)
resp = client.get("/admin/integrity/history", headers=AUTH_HEADERS)
assert resp.status_code == 200
data = resp.get_json()
@@ -503,7 +484,7 @@ class TestMultipleBuckets:
result = checker.run_now()
assert result.buckets_scanned == 2
assert result.objects_scanned >= 2
assert result.objects_scanned == 2
assert result.corrupted_objects == 0
@@ -516,273 +497,3 @@ class TestGetStatus:
assert "batch_size" in status
assert "auto_heal" in status
assert "dry_run" in status
def test_status_includes_cursor(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
checker.run_now()
status = checker.get_status()
assert "cursor" in status
assert status["cursor"]["tracked_buckets"] == 1
assert "mybucket" in status["cursor"]["buckets"]
class TestUnifiedBatchCounter:
def test_orphaned_objects_count_toward_batch(self, storage_root):
_setup_bucket(storage_root, "mybucket", {})
for i in range(10):
(storage_root / "mybucket" / f"orphan{i}.txt").write_bytes(f"data{i}".encode())
checker = IntegrityChecker(storage_root=storage_root, batch_size=3)
result = checker.run_now()
assert result.objects_scanned <= 3
def test_phantom_metadata_counts_toward_batch(self, storage_root):
objects = {f"file{i}.txt": f"data{i}".encode() for i in range(10)}
_setup_bucket(storage_root, "mybucket", objects)
for i in range(10):
(storage_root / "mybucket" / f"file{i}.txt").unlink()
checker = IntegrityChecker(storage_root=storage_root, batch_size=5)
result = checker.run_now()
assert result.objects_scanned <= 5
def test_all_check_types_contribute(self, storage_root):
_setup_bucket(storage_root, "mybucket", {"valid.txt": b"hello"})
(storage_root / "mybucket" / "orphan.txt").write_bytes(b"orphan")
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
result = checker.run_now()
assert result.objects_scanned > 2
class TestCursorRotation:
def test_oldest_bucket_scanned_first(self, storage_root):
_setup_bucket(storage_root, "bucket-a", {"a.txt": b"aaa"})
_setup_bucket(storage_root, "bucket-b", {"b.txt": b"bbb"})
_setup_bucket(storage_root, "bucket-c", {"c.txt": b"ccc"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=5)
checker.cursor_store.update_bucket("bucket-a", 1000.0)
checker.cursor_store.update_bucket("bucket-b", 3000.0)
checker.cursor_store.update_bucket("bucket-c", 2000.0)
ordered = checker.cursor_store.get_bucket_order(["bucket-a", "bucket-b", "bucket-c"])
assert ordered[0] == "bucket-a"
assert ordered[1] == "bucket-c"
assert ordered[2] == "bucket-b"
def test_never_scanned_buckets_first(self, storage_root):
_setup_bucket(storage_root, "bucket-old", {"a.txt": b"aaa"})
_setup_bucket(storage_root, "bucket-new", {"b.txt": b"bbb"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
checker.cursor_store.update_bucket("bucket-old", time.time())
ordered = checker.cursor_store.get_bucket_order(["bucket-old", "bucket-new"])
assert ordered[0] == "bucket-new"
def test_rotation_covers_all_buckets(self, storage_root):
for name in ["bucket-a", "bucket-b", "bucket-c"]:
_setup_bucket(storage_root, name, {f"{name}.txt": name.encode()})
checker = IntegrityChecker(storage_root=storage_root, batch_size=4)
result1 = checker.run_now()
scanned_buckets_1 = set()
for issue_bucket in [storage_root]:
pass
assert result1.buckets_scanned >= 1
result2 = checker.run_now()
result3 = checker.run_now()
cursor_info = checker.cursor_store.get_info()
assert cursor_info["tracked_buckets"] == 3
def test_cursor_persistence(self, storage_root):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
checker1 = IntegrityChecker(storage_root=storage_root, batch_size=1000)
checker1.run_now()
cursor1 = checker1.cursor_store.get_info()
assert cursor1["tracked_buckets"] == 1
assert "mybucket" in cursor1["buckets"]
checker2 = IntegrityChecker(storage_root=storage_root, batch_size=1000)
cursor2 = checker2.cursor_store.get_info()
assert cursor2["tracked_buckets"] == 1
assert "mybucket" in cursor2["buckets"]
def test_stale_cursor_cleanup(self, storage_root):
_setup_bucket(storage_root, "bucket-a", {"a.txt": b"aaa"})
_setup_bucket(storage_root, "bucket-b", {"b.txt": b"bbb"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
checker.run_now()
import shutil
shutil.rmtree(storage_root / "bucket-b")
meta_b = storage_root / ".myfsio.sys" / "buckets" / "bucket-b"
if meta_b.exists():
shutil.rmtree(meta_b)
checker.run_now()
cursor_info = checker.cursor_store.get_info()
assert "bucket-b" not in cursor_info["buckets"]
assert "bucket-a" in cursor_info["buckets"]
def test_cursor_updates_after_scan(self, storage_root):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
before = time.time()
checker.run_now()
after = time.time()
cursor_info = checker.cursor_store.get_info()
entry = cursor_info["buckets"]["mybucket"]
assert before <= entry["last_scanned"] <= after
assert entry["completed"] is True
class TestIntraBucketCursor:
def test_resumes_from_cursor_key(self, storage_root):
objects = {f"file_{chr(ord('a') + i)}.txt": f"data{i}".encode() for i in range(10)}
_setup_bucket(storage_root, "mybucket", objects)
checker = IntegrityChecker(storage_root=storage_root, batch_size=3)
result1 = checker.run_now()
assert result1.objects_scanned == 3
cursor_info = checker.cursor_store.get_info()
entry = cursor_info["buckets"]["mybucket"]
assert entry["last_key"] is not None
assert entry["completed"] is False
result2 = checker.run_now()
assert result2.objects_scanned == 3
cursor_after = checker.cursor_store.get_info()["buckets"]["mybucket"]
assert cursor_after["last_key"] > entry["last_key"]
def test_cursor_resets_after_full_pass(self, storage_root):
objects = {f"file_{i}.txt": f"data{i}".encode() for i in range(3)}
_setup_bucket(storage_root, "mybucket", objects)
checker = IntegrityChecker(storage_root=storage_root, batch_size=100)
checker.run_now()
cursor_info = checker.cursor_store.get_info()
entry = cursor_info["buckets"]["mybucket"]
assert entry["last_key"] is None
assert entry["completed"] is True
def test_full_coverage_across_cycles(self, storage_root):
objects = {f"obj_{chr(ord('a') + i)}.txt": f"data{i}".encode() for i in range(10)}
_setup_bucket(storage_root, "mybucket", objects)
checker = IntegrityChecker(storage_root=storage_root, batch_size=4)
all_scanned = 0
for _ in range(10):
result = checker.run_now()
all_scanned += result.objects_scanned
if checker.cursor_store.get_info()["buckets"]["mybucket"]["completed"]:
break
assert all_scanned >= 10
def test_deleted_cursor_key_skips_gracefully(self, storage_root):
objects = {f"file_{chr(ord('a') + i)}.txt": f"data{i}".encode() for i in range(6)}
_setup_bucket(storage_root, "mybucket", objects)
checker = IntegrityChecker(storage_root=storage_root, batch_size=3)
checker.run_now()
cursor_info = checker.cursor_store.get_info()
cursor_key = cursor_info["buckets"]["mybucket"]["last_key"]
assert cursor_key is not None
obj_path = storage_root / "mybucket" / cursor_key
meta_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "meta"
key_path = Path(cursor_key)
index_path = meta_root / key_path.parent / "_index.json" if key_path.parent != Path(".") else meta_root / "_index.json"
if key_path.parent == Path("."):
index_path = meta_root / "_index.json"
else:
index_path = meta_root / key_path.parent / "_index.json"
if obj_path.exists():
obj_path.unlink()
if index_path.exists():
index_data = json.loads(index_path.read_text())
index_data.pop(key_path.name, None)
index_path.write_text(json.dumps(index_data))
result2 = checker.run_now()
assert result2.objects_scanned > 0
def test_incomplete_buckets_prioritized(self, storage_root):
_setup_bucket(storage_root, "bucket-a", {f"a{i}.txt": b"a" for i in range(5)})
_setup_bucket(storage_root, "bucket-b", {f"b{i}.txt": b"b" for i in range(5)})
checker = IntegrityChecker(storage_root=storage_root, batch_size=3)
checker.run_now()
cursor_info = checker.cursor_store.get_info()
incomplete = [
name for name, info in cursor_info["buckets"].items()
if info.get("last_key") is not None
]
assert len(incomplete) >= 1
result2 = checker.run_now()
assert result2.objects_scanned > 0
def test_cursor_skips_nested_directories(self, storage_root):
objects = {
"aaa/file1.txt": b"a1",
"aaa/file2.txt": b"a2",
"bbb/file1.txt": b"b1",
"bbb/file2.txt": b"b2",
"ccc/file1.txt": b"c1",
"ccc/file2.txt": b"c2",
}
_setup_bucket(storage_root, "mybucket", objects)
checker = IntegrityChecker(storage_root=storage_root, batch_size=4)
result1 = checker.run_now()
assert result1.objects_scanned == 4
cursor_info = checker.cursor_store.get_info()
cursor_key = cursor_info["buckets"]["mybucket"]["last_key"]
assert cursor_key is not None
assert cursor_key.startswith("aaa/") or cursor_key.startswith("bbb/")
result2 = checker.run_now()
assert result2.objects_scanned >= 2
all_scanned = result1.objects_scanned + result2.objects_scanned
for _ in range(10):
if checker.cursor_store.get_info()["buckets"]["mybucket"]["completed"]:
break
r = checker.run_now()
all_scanned += r.objects_scanned
assert all_scanned >= 6
def test_sorted_walk_order(self, storage_root):
objects = {
"bar.txt": b"bar",
"bar/inner.txt": b"inner",
"abc.txt": b"abc",
"zzz/deep.txt": b"deep",
}
_setup_bucket(storage_root, "mybucket", objects)
checker = IntegrityChecker(storage_root=storage_root, batch_size=100)
result = checker.run_now()
assert result.objects_scanned >= 4
assert result.total_issues == 0