31 Commits

Author SHA1 Message Date
50fb5aa387 MyFSIO v0.3.9 Release
Reviewed-on: #32
2026-03-14 09:44:14 +00:00
cc161bf362 MyFSIO v0.3.8 Release
Reviewed-on: #31
2026-03-10 08:31:27 +00:00
2a0e77a754 MyFSIO v0.3.7 Release
Reviewed-on: #30
2026-03-09 06:25:50 +00:00
eb0e435a5a MyFSIO v0.3.6 Release
Reviewed-on: #29
2026-03-08 04:46:31 +00:00
7633007a08 MyFSIO v0.3.5 Release
Reviewed-on: #28
2026-03-07 05:53:02 +00:00
de0d869c9f Merge pull request 'MyFSIO v0.3.4 Release' (#27) from next into main
Reviewed-on: #27
2026-03-02 08:31:32 +00:00
fdd068feee MyFSIO v0.3.3 Release
Reviewed-on: #26
2026-02-27 04:49:32 +00:00
66b7677d2c MyFSIO v0.3.2 Release
Reviewed-on: #25
2026-02-26 10:10:19 +00:00
4d90ead816 Merge pull request 'Fix incorrect Upgrading & Updates section in Docs' (#24) from next into main
Reviewed-on: #24
2026-02-26 09:50:17 +00:00
b37a51ed1d MyFSIO v0.3.1 Release
Reviewed-on: #23
2026-02-26 09:42:37 +00:00
0462a7b62e MyFSIO v0.3.0 Release
Reviewed-on: #22
2026-02-22 10:22:35 +00:00
52660570c1 Merge pull request 'MyFSIO v0.2.9 Release' (#21) from next into main
Reviewed-on: #21
2026-02-15 14:24:14 +00:00
35f61313e0 MyFSIO v0.2.8 Release
Reviewed-on: #20
2026-02-10 14:16:22 +00:00
c470cfb576 MyFSIO v0.2.7 Release
Reviewed-on: #19
2026-02-09 12:22:37 +00:00
jun
d96955deee MyFSIO v0.2.6 Release
Reviewed-on: #18
2026-02-05 16:18:03 +00:00
85181f0be6 Merge pull request 'MyFSIO v0.2.5 Release' (#17) from next into main
Reviewed-on: #17
2026-02-02 05:32:02 +00:00
d5ca7a8be1 Merge pull request 'MyFSIO v0.2.4 Release' (#16) from next into main
Reviewed-on: #16
2026-02-01 10:27:11 +00:00
476dc79e42 MyFSIO v0.2.3 Release
Reviewed-on: #15
2026-01-25 06:05:53 +00:00
bb6590fc5e Merge pull request 'MyFSIO v0.2.2 Release' (#14) from next into main
Reviewed-on: #14
2026-01-19 07:12:15 +00:00
899db3421b Merge pull request 'MyFSIO v0.2.1 Release' (#13) from next into main
Reviewed-on: #13
2026-01-12 08:03:29 +00:00
caf01d6ada Merge pull request 'MyFSIO v0.2.0 Release' (#12) from next into main
Reviewed-on: #12
2026-01-05 15:48:03 +00:00
bb366cb4cd Merge pull request 'MyFSIO v0.1.9 Release' (#10) from next into main
Reviewed-on: #10
2025-12-29 06:49:48 +00:00
a2745ff2ee Merge pull request 'MyFSIO v0.1.8 Release' (#9) from next into main
Reviewed-on: #9
2025-12-23 06:01:32 +00:00
28cb656d94 Merge pull request 'MyFSIO v0.1.7 Release' (#8) from next into main
Reviewed-on: #8
2025-12-22 03:10:35 +00:00
3c44152fc6 Merge pull request 'MyFSIO v0.1.6 Release' (#7) from next into main
Reviewed-on: #7
2025-12-21 06:30:21 +00:00
397515edce Merge pull request 'MyFSIO v0.1.5 Release' (#6) from next into main
Reviewed-on: #6
2025-12-13 15:41:03 +00:00
980fced7e4 Merge pull request 'MyFSIO v0.1.4 Release' (#5) from next into main
Reviewed-on: #5
2025-12-13 08:22:43 +00:00
bae5009ec4 Merge pull request 'Release v0.1.3' (#4) from next into main
Reviewed-on: #4
2025-12-03 04:14:57 +00:00
233780617f Merge pull request 'Release V0.1.2' (#3) from next into main
Reviewed-on: #3
2025-11-26 04:59:15 +00:00
fd8fb21517 Merge pull request 'Prepare for binary release' (#2) from next into main
Reviewed-on: #2
2025-11-22 12:33:38 +00:00
c6cbe822e1 Merge pull request 'Release v0.1.1' (#1) from next into main
Reviewed-on: #1
2025-11-22 12:31:27 +00:00
206 changed files with 3290 additions and 64645 deletions

View File

@@ -11,7 +11,3 @@ htmlcov
logs
data
tmp
tests
myfsio_core/target
Dockerfile
.dockerignore

7
.gitignore vendored
View File

@@ -27,11 +27,8 @@ dist/
.eggs/
# Rust / maturin build artifacts
python/myfsio_core/target/
python/myfsio_core/Cargo.lock
# Rust engine build artifacts
rust/myfsio-engine/target/
myfsio_core/target/
myfsio_core/Cargo.lock
# Local runtime artifacts
logs/

View File

@@ -1,9 +1,9 @@
FROM python:3.14.3-slim AS builder
FROM python:3.14.3-slim
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1
WORKDIR /build
WORKDIR /app
RUN apt-get update \
&& apt-get install -y --no-install-recommends build-essential curl \
@@ -12,35 +12,24 @@ RUN apt-get update \
ENV PATH="/root/.cargo/bin:${PATH}"
RUN pip install --no-cache-dir maturin
COPY myfsio_core ./myfsio_core
RUN cd myfsio_core \
&& maturin build --release --out /wheels
FROM python:3.14.3-slim
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1
WORKDIR /app
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
COPY --from=builder /wheels/*.whl /tmp/
RUN pip install --no-cache-dir /tmp/*.whl && rm /tmp/*.whl
COPY . .
COPY app ./app
COPY templates ./templates
COPY static ./static
COPY run.py ./
COPY docker-entrypoint.sh ./
RUN pip install --no-cache-dir maturin \
&& cd myfsio_core \
&& maturin build --release \
&& pip install target/wheels/*.whl \
&& cd .. \
&& rm -rf myfsio_core/target \
&& pip uninstall -y maturin \
&& rustup self uninstall -y
RUN chmod +x docker-entrypoint.sh \
&& mkdir -p /app/data \
&& useradd -m -u 1000 myfsio \
RUN chmod +x docker-entrypoint.sh
RUN mkdir -p /app/data \
&& useradd -m -u 1000 myfsio \
&& chown -R myfsio:myfsio /app
USER myfsio

390
README.md
View File

@@ -1,212 +1,250 @@
# MyFSIO
MyFSIO is an S3-compatible object storage server with a Rust runtime and a filesystem-backed storage engine. The active server lives under `rust/myfsio-engine` and serves both the S3 API and the built-in web UI from a single process.
The `python/` implementation is deprecated as of 2026-04-21. It remains in the repository for migration reference and legacy tests, but new development and supported runtime usage should target the Rust server.
A lightweight, S3-compatible object storage system built with Flask. MyFSIO implements core AWS S3 REST API operations with filesystem-backed storage, making it ideal for local development, testing, and self-hosted storage scenarios.
## Features
- S3-compatible REST API with Signature Version 4 authentication
- Browser UI for buckets, objects, IAM users, policies, replication, metrics, and site administration
- Filesystem-backed storage rooted at `data/`
- Bucket versioning, multipart uploads, presigned URLs, CORS, object and bucket tagging
- Server-side encryption and built-in KMS support
- Optional background services for lifecycle, garbage collection, integrity scanning, operation metrics, and system metrics history
- Replication, site sync, and static website hosting support
**Core Storage**
- S3-compatible REST API with AWS Signature Version 4 authentication
- Bucket and object CRUD operations
- Object versioning with version history
- Multipart uploads for large files
- Presigned URLs (1 second to 7 days validity)
## Runtime Model
**Security & Access Control**
- IAM users with access key management and rotation
- Bucket policies (AWS Policy Version 2012-10-17)
- Server-side encryption (SSE-S3 and SSE-KMS)
- Built-in Key Management Service (KMS)
- Rate limiting per endpoint
MyFSIO now runs as one Rust process:
**Advanced Features**
- Cross-bucket replication to remote S3-compatible endpoints
- Hot-reload for bucket policies (no restart required)
- CORS configuration per bucket
- API listener on `HOST` + `PORT` (default `127.0.0.1:5000`)
- UI listener on `HOST` + `UI_PORT` (default `127.0.0.1:5100`)
- Shared state for storage, IAM, policies, sessions, metrics, and background workers
**Management UI**
- Web console for bucket and object management
- IAM dashboard for user administration
- Inline JSON policy editor with presets
- Object browser with folder navigation and bulk operations
- Dark mode support
If you want API-only mode, set `UI_ENABLED=false`. There is no separate "UI-only" runtime anymore.
## Architecture
```
+------------------+ +------------------+
| API Server | | UI Server |
| (port 5000) | | (port 5100) |
| | | |
| - S3 REST API |<------->| - Web Console |
| - SigV4 Auth | | - IAM Dashboard |
| - Presign URLs | | - Bucket Editor |
+--------+---------+ +------------------+
|
v
+------------------+ +------------------+
| Object Storage | | System Metadata |
| (filesystem) | | (.myfsio.sys/) |
| | | |
| data/<bucket>/ | | - IAM config |
| <objects> | | - Bucket policies|
| | | - Encryption keys|
+------------------+ +------------------+
```
## Quick Start
From the repository root:
```bash
cd rust/myfsio-engine
cargo run -p myfsio-server --
# Clone and setup
git clone https://gitea.jzwsite.com/kqjy/MyFSIO
cd s3
python -m venv .venv
# Activate virtual environment
# Windows PowerShell:
.\.venv\Scripts\Activate.ps1
# Windows CMD:
.venv\Scripts\activate.bat
# Linux/macOS:
source .venv/bin/activate
# Install dependencies
pip install -r requirements.txt
# Start both servers
python run.py
# Or start individually
python run.py --mode api # API only (port 5000)
python run.py --mode ui # UI only (port 5100)
```
Useful URLs:
**Credentials:** Generated automatically on first run and printed to the console. If missed, check the IAM config file at `<STORAGE_ROOT>/.myfsio.sys/config/iam.json`.
- UI: `http://127.0.0.1:5100/ui`
- API: `http://127.0.0.1:5000/`
- Health: `http://127.0.0.1:5000/myfsio/health`
On first boot, MyFSIO creates `data/.myfsio.sys/config/iam.json` and prints the generated admin access key and secret key to the console.
### Common CLI commands
```bash
# Show resolved configuration
cargo run -p myfsio-server -- --show-config
# Validate configuration and exit non-zero on critical issues
cargo run -p myfsio-server -- --check-config
# Reset admin credentials
cargo run -p myfsio-server -- --reset-cred
# API only
UI_ENABLED=false cargo run -p myfsio-server --
```
## Building a Binary
```bash
cd rust/myfsio-engine
cargo build --release -p myfsio-server
```
Binary locations:
- Linux/macOS: `rust/myfsio-engine/target/release/myfsio-server`
- Windows: `rust/myfsio-engine/target/release/myfsio-server.exe`
Run the built binary directly:
```bash
./target/release/myfsio-server
```
- **Web Console:** http://127.0.0.1:5100/ui
- **API Endpoint:** http://127.0.0.1:5000
## Configuration
The server reads environment variables from the process environment and also loads, when present:
- `/opt/myfsio/myfsio.env`
- `.env`
- `myfsio.env`
Core settings:
| Variable | Default | Description |
| --- | --- | --- |
| `HOST` | `127.0.0.1` | Bind address for API and UI listeners |
| `PORT` | `5000` | API port |
| `UI_PORT` | `5100` | UI port |
| `UI_ENABLED` | `true` | Disable to run API-only |
| `STORAGE_ROOT` | `./data` | Root directory for buckets and system metadata |
| `IAM_CONFIG` | `<STORAGE_ROOT>/.myfsio.sys/config/iam.json` | IAM config path |
| `API_BASE_URL` | unset | Public API base used by the UI and presigned URL generation |
| `AWS_REGION` | `us-east-1` | Region used in SigV4 scope |
| `SIGV4_TIMESTAMP_TOLERANCE_SECONDS` | `900` | Allowed request time skew |
| `PRESIGNED_URL_MIN_EXPIRY_SECONDS` | `1` | Minimum presigned URL expiry |
| `PRESIGNED_URL_MAX_EXPIRY_SECONDS` | `604800` | Maximum presigned URL expiry |
| `SECRET_KEY` | loaded from `.myfsio.sys/config/.secret` if present | Session signing key and IAM-at-rest encryption key |
| `ADMIN_ACCESS_KEY` | unset | Optional first-run or reset access key |
| `ADMIN_SECRET_KEY` | unset | Optional first-run or reset secret key |
Feature toggles:
| Variable | Default |
| --- | --- |
| `ENCRYPTION_ENABLED` | `false` |
| `KMS_ENABLED` | `false` |
| `GC_ENABLED` | `false` |
| `INTEGRITY_ENABLED` | `false` |
| `LIFECYCLE_ENABLED` | `false` |
| `METRICS_HISTORY_ENABLED` | `false` |
| `OPERATION_METRICS_ENABLED` | `false` |
| `WEBSITE_HOSTING_ENABLED` | `false` |
| `SITE_SYNC_ENABLED` | `false` |
Metrics and replication tuning:
| Variable | Default |
| --- | --- |
| `OPERATION_METRICS_INTERVAL_MINUTES` | `5` |
| `OPERATION_METRICS_RETENTION_HOURS` | `24` |
| `METRICS_HISTORY_INTERVAL_MINUTES` | `5` |
| `METRICS_HISTORY_RETENTION_HOURS` | `24` |
| `REPLICATION_CONNECT_TIMEOUT_SECONDS` | `5` |
| `REPLICATION_READ_TIMEOUT_SECONDS` | `30` |
| `REPLICATION_MAX_RETRIES` | `2` |
| `REPLICATION_STREAMING_THRESHOLD_BYTES` | `10485760` |
| `REPLICATION_MAX_FAILURES_PER_BUCKET` | `50` |
| `SITE_SYNC_INTERVAL_SECONDS` | `60` |
| `SITE_SYNC_BATCH_SIZE` | `100` |
| `SITE_SYNC_CONNECT_TIMEOUT_SECONDS` | `10` |
| `SITE_SYNC_READ_TIMEOUT_SECONDS` | `120` |
| `SITE_SYNC_MAX_RETRIES` | `2` |
| `SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS` | `1.0` |
UI asset overrides:
| Variable | Default |
| --- | --- |
| `TEMPLATES_DIR` | built-in crate templates directory |
| `STATIC_DIR` | built-in crate static directory |
See [docs.md](./docs.md) for the full Rust-side operations guide.
|----------|---------|-------------|
| `STORAGE_ROOT` | `./data` | Filesystem root for bucket storage |
| `IAM_CONFIG` | `.myfsio.sys/config/iam.json` | IAM user and policy store |
| `BUCKET_POLICY_PATH` | `.myfsio.sys/config/bucket_policies.json` | Bucket policy store |
| `API_BASE_URL` | `http://127.0.0.1:5000` | API endpoint for UI calls |
| `MAX_UPLOAD_SIZE` | `1073741824` | Maximum upload size in bytes (1 GB) |
| `MULTIPART_MIN_PART_SIZE` | `5242880` | Minimum multipart part size (5 MB) |
| `UI_PAGE_SIZE` | `100` | Default page size for listings |
| `SECRET_KEY` | `dev-secret-key` | Flask session secret |
| `AWS_REGION` | `us-east-1` | Region for SigV4 signing |
| `AWS_SERVICE` | `s3` | Service name for SigV4 signing |
| `ENCRYPTION_ENABLED` | `false` | Enable server-side encryption |
| `KMS_ENABLED` | `false` | Enable Key Management Service |
| `LOG_LEVEL` | `INFO` | Logging verbosity |
| `SIGV4_TIMESTAMP_TOLERANCE_SECONDS` | `900` | Max time skew for SigV4 requests |
| `PRESIGNED_URL_MAX_EXPIRY_SECONDS` | `604800` | Max presigned URL expiry (7 days) |
| `REPLICATION_CONNECT_TIMEOUT_SECONDS` | `5` | Replication connection timeout |
| `SITE_SYNC_ENABLED` | `false` | Enable bi-directional site sync |
| `OBJECT_TAG_LIMIT` | `50` | Maximum tags per object |
## Data Layout
```text
data/
<bucket>/
.myfsio.sys/
config/
iam.json
bucket_policies.json
connections.json
operation_metrics.json
metrics_history.json
buckets/<bucket>/
meta/
versions/
multipart/
keys/
```
data/
├── <bucket>/ # User buckets with objects
└── .myfsio.sys/ # System metadata
├── config/
│ ├── iam.json # IAM users and policies
│ ├── bucket_policies.json # Bucket policies
│ ├── replication_rules.json
│ └── connections.json # Remote S3 connections
├── buckets/<bucket>/
│ ├── meta/ # Object metadata (.meta.json)
│ ├── versions/ # Archived object versions
│ └── .bucket.json # Bucket config (versioning, CORS)
├── multipart/ # Active multipart uploads
└── keys/ # Encryption keys (SSE-S3/KMS)
```
## API Reference
All endpoints require AWS Signature Version 4 authentication unless using presigned URLs or public bucket policies.
### Bucket Operations
| Method | Endpoint | Description |
|--------|----------|-------------|
| `GET` | `/` | List all buckets |
| `PUT` | `/<bucket>` | Create bucket |
| `DELETE` | `/<bucket>` | Delete bucket (must be empty) |
| `HEAD` | `/<bucket>` | Check bucket exists |
### Object Operations
| Method | Endpoint | Description |
|--------|----------|-------------|
| `GET` | `/<bucket>` | List objects (supports `list-type=2`) |
| `PUT` | `/<bucket>/<key>` | Upload object |
| `GET` | `/<bucket>/<key>` | Download object |
| `DELETE` | `/<bucket>/<key>` | Delete object |
| `HEAD` | `/<bucket>/<key>` | Get object metadata |
| `POST` | `/<bucket>/<key>?uploads` | Initiate multipart upload |
| `PUT` | `/<bucket>/<key>?partNumber=N&uploadId=X` | Upload part |
| `POST` | `/<bucket>/<key>?uploadId=X` | Complete multipart upload |
| `DELETE` | `/<bucket>/<key>?uploadId=X` | Abort multipart upload |
### Bucket Policies (S3-compatible)
| Method | Endpoint | Description |
|--------|----------|-------------|
| `GET` | `/<bucket>?policy` | Get bucket policy |
| `PUT` | `/<bucket>?policy` | Set bucket policy |
| `DELETE` | `/<bucket>?policy` | Delete bucket policy |
### Versioning
| Method | Endpoint | Description |
|--------|----------|-------------|
| `GET` | `/<bucket>/<key>?versionId=X` | Get specific version |
| `DELETE` | `/<bucket>/<key>?versionId=X` | Delete specific version |
| `GET` | `/<bucket>?versions` | List object versions |
### Health Check
| Method | Endpoint | Description |
|--------|----------|-------------|
| `GET` | `/myfsio/health` | Health check endpoint |
## IAM & Access Control
### Users and Access Keys
On first run, MyFSIO creates a default admin user (`localadmin`/`localadmin`). Use the IAM dashboard to:
- Create and delete users
- Generate and rotate access keys
- Attach inline policies to users
- Control IAM management permissions
### Bucket Policies
Bucket policies follow AWS policy grammar (Version `2012-10-17`) with support for:
- Principal-based access (`*` for anonymous, specific users)
- Action-based permissions (`s3:GetObject`, `s3:PutObject`, etc.)
- Resource patterns (`arn:aws:s3:::bucket/*`)
- Condition keys
**Policy Presets:**
- **Public:** Grants anonymous read access (`s3:GetObject`, `s3:ListBucket`)
- **Private:** Removes bucket policy (IAM-only access)
- **Custom:** Manual policy editing with draft preservation
Policies hot-reload when the JSON file changes.
## Server-Side Encryption
MyFSIO supports two encryption modes:
- **SSE-S3:** Server-managed keys with automatic key rotation
- **SSE-KMS:** Customer-managed keys via built-in KMS
Enable encryption with:
```bash
ENCRYPTION_ENABLED=true python run.py
```
## Cross-Bucket Replication
Replicate objects to remote S3-compatible endpoints:
1. Configure remote connections in the UI
2. Create replication rules specifying source/destination
3. Objects are automatically replicated on upload
## Docker
Build the Rust image from the `rust/` directory:
```bash
docker build -t myfsio ./rust
docker run --rm -p 5000:5000 -p 5100:5100 -v "${PWD}/data:/app/data" myfsio
docker build -t myfsio .
docker run -p 5000:5000 -p 5100:5100 -v ./data:/app/data myfsio
```
If the instance sits behind a reverse proxy, set `API_BASE_URL` to the public S3 endpoint.
## Linux Installation
The repository includes `scripts/install.sh` for systemd-style Linux installs. Build the Rust binary first, then pass it to the installer:
```bash
cd rust/myfsio-engine
cargo build --release -p myfsio-server
cd ../..
sudo ./scripts/install.sh --binary ./rust/myfsio-engine/target/release/myfsio-server
```
The installer copies the binary into `/opt/myfsio/myfsio`, writes `/opt/myfsio/myfsio.env`, and can register a `myfsio.service` unit.
## Testing
Run the Rust test suite from the workspace:
```bash
cd rust/myfsio-engine
cargo test
# Run all tests
pytest tests/ -v
# Run specific test file
pytest tests/test_api.py -v
# Run with coverage
pytest tests/ --cov=app --cov-report=html
```
## Health Check
## References
`GET /myfsio/health` returns:
```json
{
"status": "ok",
"version": "0.5.0"
}
```
The `version` field comes from the Rust crate version in `rust/myfsio-engine/crates/myfsio-server/Cargo.toml`.
- [Amazon S3 Documentation](https://docs.aws.amazon.com/s3/)
- [AWS Signature Version 4](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)
- [S3 Bucket Policy Examples](https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html)

View File

@@ -1,5 +1,6 @@
from __future__ import annotations
import html as html_module
import itertools
import logging
import mimetypes
@@ -17,8 +18,6 @@ from flask_cors import CORS
from flask_wtf.csrf import CSRFError
from werkzeug.middleware.proxy_fix import ProxyFix
import io
from .access_logging import AccessLoggingService
from .operation_metrics import OperationMetricsCollector, classify_endpoint
from .compression import GzipMiddleware
@@ -45,64 +44,6 @@ from .website_domains import WebsiteDomainStore
_request_counter = itertools.count(1)
class _ChunkedTransferMiddleware:
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
if environ.get("REQUEST_METHOD") not in ("PUT", "POST"):
return self.app(environ, start_response)
transfer_encoding = environ.get("HTTP_TRANSFER_ENCODING", "")
content_length = environ.get("CONTENT_LENGTH")
if "chunked" in transfer_encoding.lower():
if content_length:
del environ["HTTP_TRANSFER_ENCODING"]
else:
raw = environ.get("wsgi.input")
if raw:
try:
if hasattr(raw, "seek"):
raw.seek(0)
body = raw.read()
except Exception:
body = b""
if body:
environ["wsgi.input"] = io.BytesIO(body)
environ["CONTENT_LENGTH"] = str(len(body))
del environ["HTTP_TRANSFER_ENCODING"]
content_length = environ.get("CONTENT_LENGTH")
if not content_length or content_length == "0":
sha256 = environ.get("HTTP_X_AMZ_CONTENT_SHA256", "")
decoded_len = environ.get("HTTP_X_AMZ_DECODED_CONTENT_LENGTH", "")
content_encoding = environ.get("HTTP_CONTENT_ENCODING", "")
if ("STREAMING" in sha256.upper() or decoded_len
or "aws-chunked" in content_encoding.lower()):
raw = environ.get("wsgi.input")
if raw:
try:
if hasattr(raw, "seek"):
raw.seek(0)
body = raw.read()
except Exception:
body = b""
if body:
environ["wsgi.input"] = io.BytesIO(body)
environ["CONTENT_LENGTH"] = str(len(body))
raw = environ.get("wsgi.input")
if raw and hasattr(raw, "seek"):
try:
raw.seek(0)
except Exception:
pass
return self.app(environ, start_response)
def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
"""Migrate config file from legacy locations to the active path.
@@ -166,11 +107,10 @@ def create_app(
)
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=num_proxies, x_proto=num_proxies, x_host=num_proxies, x_prefix=num_proxies)
# Enable gzip compression for responses (10-20x smaller JSON payloads)
if app.config.get("ENABLE_GZIP", True):
app.wsgi_app = GzipMiddleware(app.wsgi_app, compression_level=6)
app.wsgi_app = _ChunkedTransferMiddleware(app.wsgi_app)
_configure_cors(app)
_configure_logging(app)
@@ -183,7 +123,6 @@ def create_app(
object_cache_max_size=app.config.get("OBJECT_CACHE_MAX_SIZE", 100),
bucket_config_cache_ttl=app.config.get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0),
object_key_max_length_bytes=app.config.get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024),
meta_read_cache_max=app.config.get("META_READ_CACHE_MAX", 2048),
)
if app.config.get("WARM_CACHE_ON_STARTUP", True) and not app.config.get("TESTING"):
@@ -293,7 +232,6 @@ def create_app(
multipart_max_age_days=app.config.get("GC_MULTIPART_MAX_AGE_DAYS", 7),
lock_file_max_age_hours=app.config.get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0),
dry_run=app.config.get("GC_DRY_RUN", False),
io_throttle_ms=app.config.get("GC_IO_THROTTLE_MS", 10),
)
gc_collector.start()
@@ -305,7 +243,6 @@ def create_app(
batch_size=app.config.get("INTEGRITY_BATCH_SIZE", 1000),
auto_heal=app.config.get("INTEGRITY_AUTO_HEAL", False),
dry_run=app.config.get("INTEGRITY_DRY_RUN", False),
io_throttle_ms=app.config.get("INTEGRITY_IO_THROTTLE_MS", 10),
)
integrity_checker.start()
@@ -719,10 +656,9 @@ def _configure_logging(app: Flask) -> None:
return _website_error_response(status_code, "Not Found")
def _website_error_response(status_code, message):
if status_code == 404:
body = "<h1>404 page not found</h1>"
else:
body = f"{status_code} {message}"
safe_msg = html_module.escape(str(message))
safe_code = html_module.escape(str(status_code))
body = f"<html><head><title>{safe_code} {safe_msg}</title></head><body><h1>{safe_code} {safe_msg}</h1></body></html>"
return Response(body, status=status_code, mimetype="text/html")
@app.after_request
@@ -742,7 +678,6 @@ def _configure_logging(app: Flask) -> None:
},
)
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
response.headers["Server"] = "MyFSIO"
operation_metrics = app.extensions.get("operation_metrics")
if operation_metrics:

View File

@@ -686,107 +686,6 @@ def _storage():
return current_app.extensions["object_storage"]
def _require_iam_action(action: str):
principal, error = _require_principal()
if error:
return None, error
try:
_iam().authorize(principal, None, action)
return principal, None
except IamError:
return None, _json_error("AccessDenied", f"Requires {action} permission", 403)
@admin_api_bp.route("/iam/users", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_list_users():
principal, error = _require_iam_action("iam:list_users")
if error:
return error
return jsonify({"users": _iam().list_users()})
@admin_api_bp.route("/iam/users/<identifier>", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_get_user(identifier):
principal, error = _require_iam_action("iam:get_user")
if error:
return error
try:
user_id = _iam().resolve_user_id(identifier)
return jsonify(_iam().get_user_by_id(user_id))
except IamError as exc:
return _json_error("NotFound", str(exc), 404)
@admin_api_bp.route("/iam/users/<identifier>/policies", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_get_user_policies(identifier):
principal, error = _require_iam_action("iam:get_policy")
if error:
return error
try:
return jsonify({"policies": _iam().get_user_policies(identifier)})
except IamError as exc:
return _json_error("NotFound", str(exc), 404)
@admin_api_bp.route("/iam/users/<identifier>/keys", methods=["POST"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_create_access_key(identifier):
principal, error = _require_iam_action("iam:create_key")
if error:
return error
try:
result = _iam().create_access_key(identifier)
logger.info("Access key created for %s by %s", identifier, principal.access_key)
return jsonify(result), 201
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/iam/users/<identifier>/keys/<access_key>", methods=["DELETE"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_delete_access_key(identifier, access_key):
principal, error = _require_iam_action("iam:delete_key")
if error:
return error
try:
_iam().delete_access_key(access_key)
logger.info("Access key %s deleted by %s", access_key, principal.access_key)
return "", 204
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/iam/users/<identifier>/disable", methods=["POST"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_disable_user(identifier):
principal, error = _require_iam_action("iam:disable_user")
if error:
return error
try:
_iam().disable_user(identifier)
logger.info("User %s disabled by %s", identifier, principal.access_key)
return jsonify({"status": "disabled"})
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/iam/users/<identifier>/enable", methods=["POST"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_enable_user(identifier):
principal, error = _require_iam_action("iam:disable_user")
if error:
return error
try:
_iam().enable_user(identifier)
logger.info("User %s enabled by %s", identifier, principal.access_key)
return jsonify({"status": "enabled"})
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/website-domains", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def list_website_domains():
@@ -907,11 +806,15 @@ def gc_run_now():
if not gc:
return _json_error("InvalidRequest", "GC is not enabled", 400)
payload = request.get_json(silent=True) or {}
started = gc.run_async(dry_run=payload.get("dry_run"))
original_dry_run = gc.dry_run
if "dry_run" in payload:
gc.dry_run = bool(payload["dry_run"])
try:
result = gc.run_now()
finally:
gc.dry_run = original_dry_run
logger.info("GC manual run by %s", principal.access_key)
if not started:
return _json_error("Conflict", "GC is already in progress", 409)
return jsonify({"status": "started"})
return jsonify(result.to_dict())
@admin_api_bp.route("/gc/history", methods=["GET"])
@@ -957,14 +860,12 @@ def integrity_run_now():
payload = request.get_json(silent=True) or {}
override_dry_run = payload.get("dry_run")
override_auto_heal = payload.get("auto_heal")
started = checker.run_async(
result = checker.run_now(
auto_heal=override_auto_heal if override_auto_heal is not None else None,
dry_run=override_dry_run if override_dry_run is not None else None,
)
logger.info("Integrity manual run by %s", principal.access_key)
if not started:
return _json_error("Conflict", "A scan is already in progress", 409)
return jsonify({"status": "started"})
return jsonify(result.to_dict())
@admin_api_bp.route("/integrity/history", methods=["GET"])
@@ -980,5 +881,3 @@ def integrity_history():
offset = int(request.args.get("offset", 0))
records = checker.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})

View File

@@ -25,7 +25,7 @@ def _calculate_auto_connection_limit() -> int:
def _calculate_auto_backlog(connection_limit: int) -> int:
return max(128, min(connection_limit * 2, 4096))
return max(64, min(connection_limit * 2, 4096))
def _validate_rate_limit(value: str) -> str:
@@ -115,7 +115,6 @@ class AppConfig:
server_connection_limit: int
server_backlog: int
server_channel_timeout: int
server_max_buffer_size: int
server_threads_auto: bool
server_connection_limit_auto: bool
server_backlog_auto: bool
@@ -136,7 +135,6 @@ class AppConfig:
site_sync_clock_skew_tolerance_seconds: float
object_key_max_length_bytes: int
object_cache_max_size: int
meta_read_cache_max: int
bucket_config_cache_ttl_seconds: float
object_tag_limit: int
encryption_chunk_size_bytes: int
@@ -158,13 +156,11 @@ class AppConfig:
gc_multipart_max_age_days: int
gc_lock_file_max_age_hours: float
gc_dry_run: bool
gc_io_throttle_ms: int
integrity_enabled: bool
integrity_interval_hours: float
integrity_batch_size: int
integrity_auto_heal: bool
integrity_dry_run: bool
integrity_io_throttle_ms: int
@classmethod
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
@@ -297,7 +293,6 @@ class AppConfig:
server_backlog_auto = False
server_channel_timeout = int(_get("SERVER_CHANNEL_TIMEOUT", 120))
server_max_buffer_size = int(_get("SERVER_MAX_BUFFER_SIZE", 1024 * 1024 * 128))
site_sync_enabled = str(_get("SITE_SYNC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
site_sync_interval_seconds = int(_get("SITE_SYNC_INTERVAL_SECONDS", 60))
site_sync_batch_size = int(_get("SITE_SYNC_BATCH_SIZE", 100))
@@ -316,7 +311,6 @@ class AppConfig:
site_sync_clock_skew_tolerance_seconds = float(_get("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS", 1.0))
object_key_max_length_bytes = int(_get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024))
object_cache_max_size = int(_get("OBJECT_CACHE_MAX_SIZE", 100))
meta_read_cache_max = int(_get("META_READ_CACHE_MAX", 2048))
bucket_config_cache_ttl_seconds = float(_get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0))
object_tag_limit = int(_get("OBJECT_TAG_LIMIT", 50))
encryption_chunk_size_bytes = int(_get("ENCRYPTION_CHUNK_SIZE_BYTES", 64 * 1024))
@@ -342,13 +336,11 @@ class AppConfig:
gc_multipart_max_age_days = int(_get("GC_MULTIPART_MAX_AGE_DAYS", 7))
gc_lock_file_max_age_hours = float(_get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0))
gc_dry_run = str(_get("GC_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
gc_io_throttle_ms = int(_get("GC_IO_THROTTLE_MS", 10))
integrity_enabled = str(_get("INTEGRITY_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
integrity_interval_hours = float(_get("INTEGRITY_INTERVAL_HOURS", 24.0))
integrity_batch_size = int(_get("INTEGRITY_BATCH_SIZE", 1000))
integrity_auto_heal = str(_get("INTEGRITY_AUTO_HEAL", "0")).lower() in {"1", "true", "yes", "on"}
integrity_dry_run = str(_get("INTEGRITY_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
integrity_io_throttle_ms = int(_get("INTEGRITY_IO_THROTTLE_MS", 10))
return cls(storage_root=storage_root,
max_upload_size=max_upload_size,
@@ -402,7 +394,6 @@ class AppConfig:
server_connection_limit=server_connection_limit,
server_backlog=server_backlog,
server_channel_timeout=server_channel_timeout,
server_max_buffer_size=server_max_buffer_size,
server_threads_auto=server_threads_auto,
server_connection_limit_auto=server_connection_limit_auto,
server_backlog_auto=server_backlog_auto,
@@ -423,7 +414,6 @@ class AppConfig:
site_sync_clock_skew_tolerance_seconds=site_sync_clock_skew_tolerance_seconds,
object_key_max_length_bytes=object_key_max_length_bytes,
object_cache_max_size=object_cache_max_size,
meta_read_cache_max=meta_read_cache_max,
bucket_config_cache_ttl_seconds=bucket_config_cache_ttl_seconds,
object_tag_limit=object_tag_limit,
encryption_chunk_size_bytes=encryption_chunk_size_bytes,
@@ -445,13 +435,11 @@ class AppConfig:
gc_multipart_max_age_days=gc_multipart_max_age_days,
gc_lock_file_max_age_hours=gc_lock_file_max_age_hours,
gc_dry_run=gc_dry_run,
gc_io_throttle_ms=gc_io_throttle_ms,
integrity_enabled=integrity_enabled,
integrity_interval_hours=integrity_interval_hours,
integrity_batch_size=integrity_batch_size,
integrity_auto_heal=integrity_auto_heal,
integrity_dry_run=integrity_dry_run,
integrity_io_throttle_ms=integrity_io_throttle_ms)
integrity_dry_run=integrity_dry_run)
def validate_and_report(self) -> list[str]:
"""Validate configuration and return a list of warnings/issues.
@@ -516,12 +504,10 @@ class AppConfig:
issues.append(f"CRITICAL: SERVER_THREADS={self.server_threads} is outside valid range (1-64). Server cannot start.")
if not (10 <= self.server_connection_limit <= 1000):
issues.append(f"CRITICAL: SERVER_CONNECTION_LIMIT={self.server_connection_limit} is outside valid range (10-1000). Server cannot start.")
if not (128 <= self.server_backlog <= 4096):
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (128-4096). Server cannot start.")
if not (64 <= self.server_backlog <= 4096):
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (64-4096). Server cannot start.")
if not (10 <= self.server_channel_timeout <= 300):
issues.append(f"CRITICAL: SERVER_CHANNEL_TIMEOUT={self.server_channel_timeout} is outside valid range (10-300). Server cannot start.")
if self.server_max_buffer_size < 1024 * 1024:
issues.append(f"WARNING: SERVER_MAX_BUFFER_SIZE={self.server_max_buffer_size} is less than 1MB. Large uploads will fail.")
if sys.platform != "win32":
try:
@@ -567,7 +553,6 @@ class AppConfig:
print(f" CONNECTION_LIMIT: {self.server_connection_limit}{_auto(self.server_connection_limit_auto)}")
print(f" BACKLOG: {self.server_backlog}{_auto(self.server_backlog_auto)}")
print(f" CHANNEL_TIMEOUT: {self.server_channel_timeout}s")
print(f" MAX_BUFFER_SIZE: {self.server_max_buffer_size // (1024 * 1024)}MB")
print("=" * 60)
issues = self.validate_and_report()
@@ -633,7 +618,6 @@ class AppConfig:
"SERVER_CONNECTION_LIMIT": self.server_connection_limit,
"SERVER_BACKLOG": self.server_backlog,
"SERVER_CHANNEL_TIMEOUT": self.server_channel_timeout,
"SERVER_MAX_BUFFER_SIZE": self.server_max_buffer_size,
"SITE_SYNC_ENABLED": self.site_sync_enabled,
"SITE_SYNC_INTERVAL_SECONDS": self.site_sync_interval_seconds,
"SITE_SYNC_BATCH_SIZE": self.site_sync_batch_size,
@@ -651,7 +635,6 @@ class AppConfig:
"SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS": self.site_sync_clock_skew_tolerance_seconds,
"OBJECT_KEY_MAX_LENGTH_BYTES": self.object_key_max_length_bytes,
"OBJECT_CACHE_MAX_SIZE": self.object_cache_max_size,
"META_READ_CACHE_MAX": self.meta_read_cache_max,
"BUCKET_CONFIG_CACHE_TTL_SECONDS": self.bucket_config_cache_ttl_seconds,
"OBJECT_TAG_LIMIT": self.object_tag_limit,
"ENCRYPTION_CHUNK_SIZE_BYTES": self.encryption_chunk_size_bytes,
@@ -673,11 +656,9 @@ class AppConfig:
"GC_MULTIPART_MAX_AGE_DAYS": self.gc_multipart_max_age_days,
"GC_LOCK_FILE_MAX_AGE_HOURS": self.gc_lock_file_max_age_hours,
"GC_DRY_RUN": self.gc_dry_run,
"GC_IO_THROTTLE_MS": self.gc_io_throttle_ms,
"INTEGRITY_ENABLED": self.integrity_enabled,
"INTEGRITY_INTERVAL_HOURS": self.integrity_interval_hours,
"INTEGRITY_BATCH_SIZE": self.integrity_batch_size,
"INTEGRITY_AUTO_HEAL": self.integrity_auto_heal,
"INTEGRITY_DRY_RUN": self.integrity_dry_run,
"INTEGRITY_IO_THROTTLE_MS": self.integrity_io_throttle_ms,
}

View File

@@ -193,9 +193,6 @@ class EncryptedObjectStorage:
def list_objects_shallow(self, bucket_name: str, **kwargs):
return self.storage.list_objects_shallow(bucket_name, **kwargs)
def iter_objects_shallow(self, bucket_name: str, **kwargs):
return self.storage.iter_objects_shallow(bucket_name, **kwargs)
def search_objects(self, bucket_name: str, query: str, **kwargs):
return self.storage.search_objects(bucket_name, query, **kwargs)

View File

@@ -21,10 +21,6 @@ if sys.platform != "win32":
try:
import myfsio_core as _rc
if not all(hasattr(_rc, f) for f in (
"encrypt_stream_chunked", "decrypt_stream_chunked",
)):
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
_HAS_RUST = True
except ImportError:
_rc = None

View File

@@ -175,21 +175,13 @@ def handle_app_error(error: AppError) -> Response:
def handle_rate_limit_exceeded(e: RateLimitExceeded) -> Response:
g.s3_error_code = "SlowDown"
if request.path.startswith("/ui") or request.path.startswith("/buckets"):
wants_json = (
request.is_json or
request.headers.get("X-Requested-With") == "XMLHttpRequest" or
"application/json" in request.accept_mimetypes.values()
)
if wants_json:
return jsonify({"success": False, "error": {"code": "SlowDown", "message": "Please reduce your request rate."}}), 429
error = Element("Error")
SubElement(error, "Code").text = "SlowDown"
SubElement(error, "Message").text = "Please reduce your request rate."
SubElement(error, "Resource").text = request.path
SubElement(error, "RequestId").text = getattr(g, "request_id", "")
xml_bytes = tostring(error, encoding="utf-8")
return Response(xml_bytes, status="429 Too Many Requests", mimetype="application/xml")
return Response(xml_bytes, status=429, mimetype="application/xml")
def register_error_handlers(app):

View File

@@ -162,7 +162,6 @@ class GarbageCollector:
lock_file_max_age_hours: float = 1.0,
dry_run: bool = False,
max_history: int = 50,
io_throttle_ms: int = 10,
) -> None:
self.storage_root = Path(storage_root)
self.interval_seconds = interval_hours * 3600.0
@@ -173,9 +172,6 @@ class GarbageCollector:
self._timer: Optional[threading.Timer] = None
self._shutdown = False
self._lock = threading.Lock()
self._scanning = False
self._scan_start_time: Optional[float] = None
self._io_throttle = max(0, io_throttle_ms) / 1000.0
self.history_store = GCHistoryStore(storage_root, max_records=max_history)
def start(self) -> None:
@@ -216,81 +212,49 @@ class GarbageCollector:
finally:
self._schedule_next()
def run_now(self, dry_run: Optional[bool] = None) -> GCResult:
if not self._lock.acquire(blocking=False):
raise RuntimeError("GC is already in progress")
def run_now(self) -> GCResult:
start = time.time()
result = GCResult()
effective_dry_run = dry_run if dry_run is not None else self.dry_run
self._clean_temp_files(result)
self._clean_orphaned_multipart(result)
self._clean_stale_locks(result)
self._clean_orphaned_metadata(result)
self._clean_orphaned_versions(result)
self._clean_empty_dirs(result)
try:
self._scanning = True
self._scan_start_time = time.time()
result.execution_time_seconds = time.time() - start
start = self._scan_start_time
result = GCResult()
original_dry_run = self.dry_run
self.dry_run = effective_dry_run
try:
self._clean_temp_files(result)
self._clean_orphaned_multipart(result)
self._clean_stale_locks(result)
self._clean_orphaned_metadata(result)
self._clean_orphaned_versions(result)
self._clean_empty_dirs(result)
finally:
self.dry_run = original_dry_run
result.execution_time_seconds = time.time() - start
if result.has_work or result.errors:
logger.info(
"GC completed in %.2fs: temp=%d (%.1f MB), multipart=%d (%.1f MB), "
"locks=%d, meta=%d, versions=%d (%.1f MB), dirs=%d, errors=%d%s",
result.execution_time_seconds,
result.temp_files_deleted,
result.temp_bytes_freed / (1024 * 1024),
result.multipart_uploads_deleted,
result.multipart_bytes_freed / (1024 * 1024),
result.lock_files_deleted,
result.orphaned_metadata_deleted,
result.orphaned_versions_deleted,
result.orphaned_version_bytes_freed / (1024 * 1024),
result.empty_dirs_removed,
len(result.errors),
" (dry run)" if effective_dry_run else "",
)
record = GCExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=effective_dry_run,
if result.has_work or result.errors:
logger.info(
"GC completed in %.2fs: temp=%d (%.1f MB), multipart=%d (%.1f MB), "
"locks=%d, meta=%d, versions=%d (%.1f MB), dirs=%d, errors=%d%s",
result.execution_time_seconds,
result.temp_files_deleted,
result.temp_bytes_freed / (1024 * 1024),
result.multipart_uploads_deleted,
result.multipart_bytes_freed / (1024 * 1024),
result.lock_files_deleted,
result.orphaned_metadata_deleted,
result.orphaned_versions_deleted,
result.orphaned_version_bytes_freed / (1024 * 1024),
result.empty_dirs_removed,
len(result.errors),
" (dry run)" if self.dry_run else "",
)
self.history_store.add(record)
return result
finally:
self._scanning = False
self._scan_start_time = None
self._lock.release()
record = GCExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=self.dry_run,
)
self.history_store.add(record)
def run_async(self, dry_run: Optional[bool] = None) -> bool:
if self._scanning:
return False
t = threading.Thread(target=self.run_now, args=(dry_run,), daemon=True)
t.start()
return True
return result
def _system_path(self) -> Path:
return self.storage_root / self.SYSTEM_ROOT
def _throttle(self) -> bool:
if self._shutdown:
return True
if self._io_throttle > 0:
time.sleep(self._io_throttle)
return self._shutdown
def _list_bucket_names(self) -> List[str]:
names = []
try:
@@ -307,8 +271,6 @@ class GarbageCollector:
return
try:
for entry in tmp_dir.iterdir():
if self._throttle():
return
if not entry.is_file():
continue
age = _file_age_hours(entry)
@@ -330,8 +292,6 @@ class GarbageCollector:
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
for multipart_root in (
self._system_path() / self.SYSTEM_MULTIPART_DIR / bucket_name,
self.storage_root / bucket_name / ".multipart",
@@ -340,8 +300,6 @@ class GarbageCollector:
continue
try:
for upload_dir in multipart_root.iterdir():
if self._throttle():
return
if not upload_dir.is_dir():
continue
self._maybe_clean_upload(upload_dir, cutoff_hours, result)
@@ -371,8 +329,6 @@ class GarbageCollector:
try:
for bucket_dir in buckets_root.iterdir():
if self._shutdown:
return
if not bucket_dir.is_dir():
continue
locks_dir = bucket_dir / "locks"
@@ -380,8 +336,6 @@ class GarbageCollector:
continue
try:
for lock_file in locks_dir.iterdir():
if self._throttle():
return
if not lock_file.is_file() or not lock_file.name.endswith(".lock"):
continue
age = _file_age_hours(lock_file)
@@ -402,8 +356,6 @@ class GarbageCollector:
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
legacy_meta = self.storage_root / bucket_name / ".meta"
if legacy_meta.exists():
self._clean_legacy_metadata(bucket_name, legacy_meta, result)
@@ -416,8 +368,6 @@ class GarbageCollector:
bucket_path = self.storage_root / bucket_name
try:
for meta_file in meta_root.rglob("*.meta.json"):
if self._throttle():
return
if not meta_file.is_file():
continue
try:
@@ -437,8 +387,6 @@ class GarbageCollector:
bucket_path = self.storage_root / bucket_name
try:
for index_file in meta_root.rglob("_index.json"):
if self._throttle():
return
if not index_file.is_file():
continue
try:
@@ -482,8 +430,6 @@ class GarbageCollector:
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
bucket_path = self.storage_root / bucket_name
for versions_root in (
self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR,
@@ -493,8 +439,6 @@ class GarbageCollector:
continue
try:
for key_dir in versions_root.iterdir():
if self._throttle():
return
if not key_dir.is_dir():
continue
self._clean_versions_for_key(bucket_path, versions_root, key_dir, result)
@@ -545,8 +489,6 @@ class GarbageCollector:
self._remove_empty_dirs_recursive(root, root, result)
def _remove_empty_dirs_recursive(self, path: Path, stop_at: Path, result: GCResult) -> bool:
if self._shutdown:
return False
if not path.is_dir():
return False
@@ -557,8 +499,6 @@ class GarbageCollector:
all_empty = True
for child in children:
if self._throttle():
return False
if child.is_dir():
if not self._remove_empty_dirs_recursive(child, stop_at, result):
all_empty = False
@@ -580,17 +520,12 @@ class GarbageCollector:
return [r.to_dict() for r in records]
def get_status(self) -> dict:
status: Dict[str, Any] = {
return {
"enabled": not self._shutdown or self._timer is not None,
"running": self._timer is not None and not self._shutdown,
"scanning": self._scanning,
"interval_hours": self.interval_seconds / 3600.0,
"temp_file_max_age_hours": self.temp_file_max_age_hours,
"multipart_max_age_days": self.multipart_max_age_days,
"lock_file_max_age_hours": self.lock_file_max_age_hours,
"dry_run": self.dry_run,
"io_throttle_ms": round(self._io_throttle * 1000),
}
if self._scanning and self._scan_start_time:
status["scan_elapsed_seconds"] = time.time() - self._scan_start_time
return status

View File

@@ -10,7 +10,7 @@ import secrets
import threading
import time
from collections import deque
from dataclasses import dataclass, field
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Any, Deque, Dict, Iterable, List, Optional, Sequence, Set, Tuple
@@ -22,37 +22,16 @@ class IamError(RuntimeError):
"""Raised when authentication or authorization fails."""
S3_ACTIONS = {
"list", "read", "write", "delete", "share", "policy",
"replication", "lifecycle", "cors",
"create_bucket", "delete_bucket",
"versioning", "tagging", "encryption", "quota",
"object_lock", "notification", "logging", "website",
}
S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication", "lifecycle", "cors"}
IAM_ACTIONS = {
"iam:list_users",
"iam:create_user",
"iam:delete_user",
"iam:rotate_key",
"iam:update_policy",
"iam:create_key",
"iam:delete_key",
"iam:get_user",
"iam:get_policy",
"iam:disable_user",
}
ALLOWED_ACTIONS = (S3_ACTIONS | IAM_ACTIONS) | {"iam:*"}
_V1_IMPLIED_ACTIONS = {
"write": {"create_bucket"},
"delete": {"delete_bucket"},
"policy": {
"versioning", "tagging", "encryption", "quota",
"object_lock", "notification", "logging", "website",
"cors", "lifecycle", "replication", "share",
},
}
ACTION_ALIASES = {
"list": "list",
"s3:listbucket": "list",
@@ -66,11 +45,14 @@ ACTION_ALIASES = {
"s3:getobjecttagging": "read",
"s3:getobjectversiontagging": "read",
"s3:getobjectacl": "read",
"s3:getbucketversioning": "read",
"s3:headobject": "read",
"s3:headbucket": "read",
"write": "write",
"s3:putobject": "write",
"s3:createbucket": "write",
"s3:putobjecttagging": "write",
"s3:putbucketversioning": "write",
"s3:createmultipartupload": "write",
"s3:uploadpart": "write",
"s3:completemultipartupload": "write",
@@ -79,11 +61,8 @@ ACTION_ALIASES = {
"delete": "delete",
"s3:deleteobject": "delete",
"s3:deleteobjectversion": "delete",
"s3:deletebucket": "delete",
"s3:deleteobjecttagging": "delete",
"create_bucket": "create_bucket",
"s3:createbucket": "create_bucket",
"delete_bucket": "delete_bucket",
"s3:deletebucket": "delete_bucket",
"share": "share",
"s3:putobjectacl": "share",
"s3:putbucketacl": "share",
@@ -109,50 +88,11 @@ ACTION_ALIASES = {
"s3:getbucketcors": "cors",
"s3:putbucketcors": "cors",
"s3:deletebucketcors": "cors",
"versioning": "versioning",
"s3:getbucketversioning": "versioning",
"s3:putbucketversioning": "versioning",
"tagging": "tagging",
"s3:getbuckettagging": "tagging",
"s3:putbuckettagging": "tagging",
"s3:deletebuckettagging": "tagging",
"encryption": "encryption",
"s3:getencryptionconfiguration": "encryption",
"s3:putencryptionconfiguration": "encryption",
"s3:deleteencryptionconfiguration": "encryption",
"quota": "quota",
"s3:getbucketquota": "quota",
"s3:putbucketquota": "quota",
"s3:deletebucketquota": "quota",
"object_lock": "object_lock",
"s3:getobjectlockconfiguration": "object_lock",
"s3:putobjectlockconfiguration": "object_lock",
"s3:putobjectretention": "object_lock",
"s3:getobjectretention": "object_lock",
"s3:putobjectlegalhold": "object_lock",
"s3:getobjectlegalhold": "object_lock",
"notification": "notification",
"s3:getbucketnotificationconfiguration": "notification",
"s3:putbucketnotificationconfiguration": "notification",
"s3:deletebucketnotificationconfiguration": "notification",
"logging": "logging",
"s3:getbucketlogging": "logging",
"s3:putbucketlogging": "logging",
"s3:deletebucketlogging": "logging",
"website": "website",
"s3:getbucketwebsite": "website",
"s3:putbucketwebsite": "website",
"s3:deletebucketwebsite": "website",
"iam:listusers": "iam:list_users",
"iam:createuser": "iam:create_user",
"iam:deleteuser": "iam:delete_user",
"iam:rotateaccesskey": "iam:rotate_key",
"iam:putuserpolicy": "iam:update_policy",
"iam:createaccesskey": "iam:create_key",
"iam:deleteaccesskey": "iam:delete_key",
"iam:getuser": "iam:get_user",
"iam:getpolicy": "iam:get_policy",
"iam:disableuser": "iam:disable_user",
"iam:*": "iam:*",
}
@@ -161,7 +101,6 @@ ACTION_ALIASES = {
class Policy:
bucket: str
actions: Set[str]
prefix: str = "*"
@dataclass
@@ -178,16 +117,6 @@ def _derive_fernet_key(secret: str) -> bytes:
_IAM_ENCRYPTED_PREFIX = b"MYFSIO_IAM_ENC:"
_CONFIG_VERSION = 2
def _expand_v1_actions(actions: Set[str]) -> Set[str]:
expanded = set(actions)
for action, implied in _V1_IMPLIED_ACTIONS.items():
if action in expanded:
expanded.update(implied)
return expanded
class IamService:
"""Loads IAM configuration, manages users, and evaluates policies."""
@@ -202,10 +131,7 @@ class IamService:
self.config_path.parent.mkdir(parents=True, exist_ok=True)
if not self.config_path.exists():
self._write_default()
self._user_records: Dict[str, Dict[str, Any]] = {}
self._key_index: Dict[str, str] = {}
self._key_secrets: Dict[str, str] = {}
self._key_status: Dict[str, str] = {}
self._users: Dict[str, Dict[str, Any]] = {}
self._raw_config: Dict[str, Any] = {}
self._failed_attempts: Dict[str, Deque[datetime]] = {}
self._last_load_time = 0.0
@@ -220,6 +146,7 @@ class IamService:
self._load_lockout_state()
def _maybe_reload(self) -> None:
"""Reload configuration if the file has changed on disk."""
now = time.time()
if now - self._last_stat_check < self._stat_check_interval:
return
@@ -256,20 +183,11 @@ class IamService:
raise IamError(
f"Access temporarily locked. Try again in {seconds} seconds."
)
user_id = self._key_index.get(access_key)
stored_secret = self._key_secrets.get(access_key, secrets.token_urlsafe(24))
if not user_id or not hmac.compare_digest(stored_secret, secret_key):
record = self._users.get(access_key)
stored_secret = record["secret_key"] if record else secrets.token_urlsafe(24)
if not record or not hmac.compare_digest(stored_secret, secret_key):
self._record_failed_attempt(access_key)
raise IamError("Invalid credentials")
key_status = self._key_status.get(access_key, "active")
if key_status != "active":
raise IamError("Access key is inactive")
record = self._user_records.get(user_id)
if not record:
self._record_failed_attempt(access_key)
raise IamError("Invalid credentials")
if not record.get("enabled", True):
raise IamError("User account is disabled")
self._check_expiry(access_key, record)
self._clear_failed_attempts(access_key)
return self._build_principal(access_key, record)
@@ -297,6 +215,7 @@ class IamService:
return self.config_path.parent / "lockout_state.json"
def _load_lockout_state(self) -> None:
"""Load lockout state from disk."""
try:
if self._lockout_file().exists():
data = json.loads(self._lockout_file().read_text(encoding="utf-8"))
@@ -316,6 +235,7 @@ class IamService:
pass
def _save_lockout_state(self) -> None:
"""Persist lockout state to disk."""
data: Dict[str, Any] = {"failed_attempts": {}}
for key, attempts in self._failed_attempts.items():
data["failed_attempts"][key] = [ts.isoformat() for ts in attempts]
@@ -350,9 +270,10 @@ class IamService:
return int(max(0, self.auth_lockout_window.total_seconds() - elapsed))
def create_session_token(self, access_key: str, duration_seconds: int = 3600) -> str:
"""Create a temporary session token for an access key."""
self._maybe_reload()
user_id = self._key_index.get(access_key)
if not user_id or user_id not in self._user_records:
record = self._users.get(access_key)
if not record:
raise IamError("Unknown access key")
self._cleanup_expired_sessions()
token = secrets.token_urlsafe(32)
@@ -364,6 +285,7 @@ class IamService:
return token
def validate_session_token(self, access_key: str, session_token: str) -> bool:
"""Validate a session token for an access key (thread-safe, constant-time)."""
dummy_key = secrets.token_urlsafe(16)
dummy_token = secrets.token_urlsafe(32)
with self._session_lock:
@@ -382,6 +304,7 @@ class IamService:
return True
def _cleanup_expired_sessions(self) -> None:
"""Remove expired session tokens."""
now = time.time()
expired = [token for token, data in self._sessions.items() if now > data["expires_at"]]
for token in expired:
@@ -393,20 +316,13 @@ class IamService:
if cached:
principal, cached_time = cached
if now - cached_time < self._cache_ttl:
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
self._enforce_key_and_user_status(access_key)
record = self._users.get(access_key)
if record:
self._check_expiry(access_key, record)
return principal
self._maybe_reload()
self._enforce_key_and_user_status(access_key)
user_id = self._key_index.get(access_key)
if not user_id:
raise IamError("Unknown access key")
record = self._user_records.get(user_id)
record = self._users.get(access_key)
if not record:
raise IamError("Unknown access key")
self._check_expiry(access_key, record)
@@ -416,27 +332,22 @@ class IamService:
def secret_for_key(self, access_key: str) -> str:
self._maybe_reload()
self._enforce_key_and_user_status(access_key)
secret = self._key_secrets.get(access_key)
if not secret:
record = self._users.get(access_key)
if not record:
raise IamError("Unknown access key")
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
return secret
self._check_expiry(access_key, record)
return record["secret_key"]
def authorize(self, principal: Principal, bucket_name: str | None, action: str, *, object_key: str | None = None) -> None:
def authorize(self, principal: Principal, bucket_name: str | None, action: str) -> None:
action = self._normalize_action(action)
if action not in ALLOWED_ACTIONS:
raise IamError(f"Unknown action '{action}'")
bucket_name = bucket_name or "*"
normalized = bucket_name.lower() if bucket_name != "*" else bucket_name
if not self._is_allowed(principal, normalized, action, object_key=object_key):
if not self._is_allowed(principal, normalized, action):
raise IamError(f"Access denied for action '{action}' on bucket '{bucket_name}'")
def check_permissions(self, principal: Principal, bucket_name: str | None, actions: Iterable[str], *, object_key: str | None = None) -> Dict[str, bool]:
def check_permissions(self, principal: Principal, bucket_name: str | None, actions: Iterable[str]) -> Dict[str, bool]:
self._maybe_reload()
bucket_name = (bucket_name or "*").lower() if bucket_name != "*" else (bucket_name or "*")
normalized_actions = {a: self._normalize_action(a) for a in actions}
@@ -445,53 +356,37 @@ class IamService:
if canonical not in ALLOWED_ACTIONS:
results[original] = False
else:
results[original] = self._is_allowed(principal, bucket_name, canonical, object_key=object_key)
results[original] = self._is_allowed(principal, bucket_name, canonical)
return results
def buckets_for_principal(self, principal: Principal, buckets: Iterable[str]) -> List[str]:
return [bucket for bucket in buckets if self._is_allowed(principal, bucket, "list")]
def _is_allowed(self, principal: Principal, bucket_name: str, action: str, *, object_key: str | None = None) -> bool:
def _is_allowed(self, principal: Principal, bucket_name: str, action: str) -> bool:
bucket_name = bucket_name.lower()
for policy in principal.policies:
if policy.bucket not in {"*", bucket_name}:
continue
action_match = "*" in policy.actions or action in policy.actions
if not action_match and "iam:*" in policy.actions and action.startswith("iam:"):
action_match = True
if not action_match:
continue
if object_key is not None and policy.prefix != "*":
prefix = policy.prefix.rstrip("*")
if not object_key.startswith(prefix):
continue
return True
if "*" in policy.actions or action in policy.actions:
return True
if "iam:*" in policy.actions and action.startswith("iam:"):
return True
return False
def list_users(self) -> List[Dict[str, Any]]:
listing: List[Dict[str, Any]] = []
for user_id, record in self._user_records.items():
access_keys = []
for key_info in record.get("access_keys", []):
access_keys.append({
"access_key": key_info["access_key"],
"status": key_info.get("status", "active"),
"created_at": key_info.get("created_at"),
})
user_entry: Dict[str, Any] = {
"user_id": user_id,
"display_name": record["display_name"],
"enabled": record.get("enabled", True),
"expires_at": record.get("expires_at"),
"access_keys": access_keys,
"policies": [
{**{"bucket": policy.bucket, "actions": sorted(policy.actions)}, **({"prefix": policy.prefix} if policy.prefix != "*" else {})}
for policy in record["policies"]
],
}
if access_keys:
user_entry["access_key"] = access_keys[0]["access_key"]
listing.append(user_entry)
for access_key, record in self._users.items():
listing.append(
{
"access_key": access_key,
"display_name": record["display_name"],
"expires_at": record.get("expires_at"),
"policies": [
{"bucket": policy.bucket, "actions": sorted(policy.actions)}
for policy in record["policies"]
],
}
)
return listing
def create_user(
@@ -502,33 +397,20 @@ class IamService:
access_key: str | None = None,
secret_key: str | None = None,
expires_at: str | None = None,
user_id: str | None = None,
) -> Dict[str, str]:
access_key = (access_key or self._generate_access_key()).strip()
if not access_key:
raise IamError("Access key cannot be empty")
if access_key in self._key_index:
if access_key in self._users:
raise IamError("Access key already exists")
if expires_at:
self._validate_expires_at(expires_at)
secret_key = secret_key or self._generate_secret_key()
sanitized_policies = self._prepare_policy_payload(policies)
user_id = user_id or self._generate_user_id()
if user_id in self._user_records:
raise IamError("User ID already exists")
now_iso = datetime.now(timezone.utc).isoformat()
record: Dict[str, Any] = {
"user_id": user_id,
"access_key": access_key,
"secret_key": secret_key,
"display_name": display_name or access_key,
"enabled": True,
"access_keys": [
{
"access_key": access_key,
"secret_key": secret_key,
"status": "active",
"created_at": now_iso,
}
],
"policies": sanitized_policies,
}
if expires_at:
@@ -536,108 +418,12 @@ class IamService:
self._raw_config.setdefault("users", []).append(record)
self._save()
self._load()
return {"user_id": user_id, "access_key": access_key, "secret_key": secret_key}
def create_access_key(self, identifier: str) -> Dict[str, str]:
user_raw, _ = self._resolve_raw_user(identifier)
new_access_key = self._generate_access_key()
new_secret_key = self._generate_secret_key()
now_iso = datetime.now(timezone.utc).isoformat()
key_entry = {
"access_key": new_access_key,
"secret_key": new_secret_key,
"status": "active",
"created_at": now_iso,
}
user_raw.setdefault("access_keys", []).append(key_entry)
self._save()
self._load()
return {"access_key": new_access_key, "secret_key": new_secret_key}
def delete_access_key(self, access_key: str) -> None:
user_raw, _ = self._resolve_raw_user(access_key)
keys = user_raw.get("access_keys", [])
if len(keys) <= 1:
raise IamError("Cannot delete the only access key for a user")
remaining = [k for k in keys if k["access_key"] != access_key]
if len(remaining) == len(keys):
raise IamError("Access key not found")
user_raw["access_keys"] = remaining
self._save()
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
from .s3_api import clear_signing_key_cache
clear_signing_key_cache()
self._load()
def disable_user(self, identifier: str) -> None:
user_raw, _ = self._resolve_raw_user(identifier)
user_raw["enabled"] = False
self._save()
for key_info in user_raw.get("access_keys", []):
ak = key_info["access_key"]
self._principal_cache.pop(ak, None)
self._secret_key_cache.pop(ak, None)
from .s3_api import clear_signing_key_cache
clear_signing_key_cache()
self._load()
def enable_user(self, identifier: str) -> None:
user_raw, _ = self._resolve_raw_user(identifier)
user_raw["enabled"] = True
self._save()
self._load()
def get_user_by_id(self, user_id: str) -> Dict[str, Any]:
record = self._user_records.get(user_id)
if not record:
raise IamError("User not found")
access_keys = []
for key_info in record.get("access_keys", []):
access_keys.append({
"access_key": key_info["access_key"],
"status": key_info.get("status", "active"),
"created_at": key_info.get("created_at"),
})
return {
"user_id": user_id,
"display_name": record["display_name"],
"enabled": record.get("enabled", True),
"expires_at": record.get("expires_at"),
"access_keys": access_keys,
"policies": [
{"bucket": p.bucket, "actions": sorted(p.actions), "prefix": p.prefix}
for p in record["policies"]
],
}
def get_user_policies(self, identifier: str) -> List[Dict[str, Any]]:
_, user_id = self._resolve_raw_user(identifier)
record = self._user_records.get(user_id)
if not record:
raise IamError("User not found")
return [
{**{"bucket": p.bucket, "actions": sorted(p.actions)}, **({"prefix": p.prefix} if p.prefix != "*" else {})}
for p in record["policies"]
]
def resolve_user_id(self, identifier: str) -> str:
if identifier in self._user_records:
return identifier
user_id = self._key_index.get(identifier)
if user_id:
return user_id
raise IamError("User not found")
return {"access_key": access_key, "secret_key": secret_key}
def rotate_secret(self, access_key: str) -> str:
user_raw, _ = self._resolve_raw_user(access_key)
user = self._get_raw_user(access_key)
new_secret = self._generate_secret_key()
for key_info in user_raw.get("access_keys", []):
if key_info["access_key"] == access_key:
key_info["secret_key"] = new_secret
break
else:
raise IamError("Access key not found")
user["secret_key"] = new_secret
self._save()
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
@@ -647,8 +433,8 @@ class IamService:
return new_secret
def update_user(self, access_key: str, display_name: str) -> None:
user_raw, _ = self._resolve_raw_user(access_key)
user_raw["display_name"] = display_name
user = self._get_raw_user(access_key)
user["display_name"] = display_name
self._save()
self._load()
@@ -656,43 +442,32 @@ class IamService:
users = self._raw_config.get("users", [])
if len(users) <= 1:
raise IamError("Cannot delete the only user")
_, target_user_id = self._resolve_raw_user(access_key)
target_user_raw = None
remaining = []
for u in users:
if u.get("user_id") == target_user_id:
target_user_raw = u
else:
remaining.append(u)
if target_user_raw is None:
remaining = [user for user in users if user["access_key"] != access_key]
if len(remaining) == len(users):
raise IamError("User not found")
self._raw_config["users"] = remaining
self._save()
for key_info in target_user_raw.get("access_keys", []):
ak = key_info["access_key"]
self._principal_cache.pop(ak, None)
self._secret_key_cache.pop(ak, None)
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
from .s3_api import clear_signing_key_cache
clear_signing_key_cache()
self._load()
def update_user_expiry(self, access_key: str, expires_at: str | None) -> None:
user_raw, _ = self._resolve_raw_user(access_key)
user = self._get_raw_user(access_key)
if expires_at:
self._validate_expires_at(expires_at)
user_raw["expires_at"] = expires_at
user["expires_at"] = expires_at
else:
user_raw.pop("expires_at", None)
user.pop("expires_at", None)
self._save()
for key_info in user_raw.get("access_keys", []):
ak = key_info["access_key"]
self._principal_cache.pop(ak, None)
self._secret_key_cache.pop(ak, None)
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
self._load()
def update_user_policies(self, access_key: str, policies: Sequence[Dict[str, Any]]) -> None:
user_raw, _ = self._resolve_raw_user(access_key)
user_raw["policies"] = self._prepare_policy_payload(policies)
user = self._get_raw_user(access_key)
user["policies"] = self._prepare_policy_payload(policies)
self._save()
self._load()
@@ -707,52 +482,6 @@ class IamService:
raise IamError("Cannot decrypt IAM config. SECRET_KEY may have changed. Use 'python run.py reset-cred' to reset credentials.")
return raw_bytes.decode("utf-8")
def _is_v2_config(self, raw: Dict[str, Any]) -> bool:
return raw.get("version", 1) >= _CONFIG_VERSION
def _migrate_v1_to_v2(self, raw: Dict[str, Any]) -> Dict[str, Any]:
migrated_users = []
now_iso = datetime.now(timezone.utc).isoformat()
for user in raw.get("users", []):
old_policies = user.get("policies", [])
expanded_policies = []
for p in old_policies:
raw_actions = p.get("actions", [])
if isinstance(raw_actions, str):
raw_actions = [raw_actions]
action_set: Set[str] = set()
for a in raw_actions:
canonical = self._normalize_action(a)
if canonical == "*":
action_set = set(ALLOWED_ACTIONS)
break
if canonical:
action_set.add(canonical)
action_set = _expand_v1_actions(action_set)
expanded_policies.append({
"bucket": p.get("bucket", "*"),
"actions": sorted(action_set),
"prefix": p.get("prefix", "*"),
})
migrated_user: Dict[str, Any] = {
"user_id": user["access_key"],
"display_name": user.get("display_name", user["access_key"]),
"enabled": True,
"access_keys": [
{
"access_key": user["access_key"],
"secret_key": user["secret_key"],
"status": "active",
"created_at": now_iso,
}
],
"policies": expanded_policies,
}
if user.get("expires_at"):
migrated_user["expires_at"] = user["expires_at"]
migrated_users.append(migrated_user)
return {"version": _CONFIG_VERSION, "users": migrated_users}
def _load(self) -> None:
try:
self._last_load_time = self.config_path.stat().st_mtime
@@ -771,67 +500,35 @@ class IamService:
raise IamError(f"Failed to load IAM config: {e}")
was_plaintext = not raw_bytes.startswith(_IAM_ENCRYPTED_PREFIX)
was_v1 = not self._is_v2_config(raw)
if was_v1:
raw = self._migrate_v1_to_v2(raw)
user_records: Dict[str, Dict[str, Any]] = {}
key_index: Dict[str, str] = {}
key_secrets: Dict[str, str] = {}
key_status_map: Dict[str, str] = {}
users: Dict[str, Dict[str, Any]] = {}
for user in raw.get("users", []):
user_id = user["user_id"]
policies = self._build_policy_objects(user.get("policies", []))
access_keys_raw = user.get("access_keys", [])
access_keys_info = []
for key_entry in access_keys_raw:
ak = key_entry["access_key"]
sk = key_entry["secret_key"]
status = key_entry.get("status", "active")
key_index[ak] = user_id
key_secrets[ak] = sk
key_status_map[ak] = status
access_keys_info.append({
"access_key": ak,
"secret_key": sk,
"status": status,
"created_at": key_entry.get("created_at"),
})
record: Dict[str, Any] = {
"display_name": user.get("display_name", user_id),
"enabled": user.get("enabled", True),
user_record: Dict[str, Any] = {
"secret_key": user["secret_key"],
"display_name": user.get("display_name", user["access_key"]),
"policies": policies,
"access_keys": access_keys_info,
}
if user.get("expires_at"):
record["expires_at"] = user["expires_at"]
user_records[user_id] = record
if not user_records:
user_record["expires_at"] = user["expires_at"]
users[user["access_key"]] = user_record
if not users:
raise IamError("IAM configuration contains no users")
self._user_records = user_records
self._key_index = key_index
self._key_secrets = key_secrets
self._key_status = key_status_map
self._users = users
raw_users: List[Dict[str, Any]] = []
for user in raw.get("users", []):
for entry in raw.get("users", []):
raw_entry: Dict[str, Any] = {
"user_id": user["user_id"],
"display_name": user.get("display_name", user["user_id"]),
"enabled": user.get("enabled", True),
"access_keys": user.get("access_keys", []),
"policies": user.get("policies", []),
"access_key": entry["access_key"],
"secret_key": entry["secret_key"],
"display_name": entry.get("display_name", entry["access_key"]),
"policies": entry.get("policies", []),
}
if user.get("expires_at"):
raw_entry["expires_at"] = user["expires_at"]
if entry.get("expires_at"):
raw_entry["expires_at"] = entry["expires_at"]
raw_users.append(raw_entry)
self._raw_config = {"version": _CONFIG_VERSION, "users": raw_users}
self._raw_config = {"users": raw_users}
if was_v1 or (was_plaintext and self._fernet):
if was_plaintext and self._fernet:
self._save()
def _save(self) -> None:
@@ -850,30 +547,19 @@ class IamService:
def config_summary(self) -> Dict[str, Any]:
return {
"path": str(self.config_path),
"user_count": len(self._user_records),
"user_count": len(self._users),
"allowed_actions": sorted(ALLOWED_ACTIONS),
}
def export_config(self, mask_secrets: bool = True) -> Dict[str, Any]:
payload: Dict[str, Any] = {"version": _CONFIG_VERSION, "users": []}
payload: Dict[str, Any] = {"users": []}
for user in self._raw_config.get("users", []):
access_keys = []
for key_info in user.get("access_keys", []):
access_keys.append({
"access_key": key_info["access_key"],
"secret_key": "\u2022\u2022\u2022\u2022\u2022\u2022\u2022\u2022\u2022\u2022" if mask_secrets else key_info["secret_key"],
"status": key_info.get("status", "active"),
"created_at": key_info.get("created_at"),
})
record: Dict[str, Any] = {
"user_id": user["user_id"],
"access_key": user["access_key"],
"secret_key": "••••••••••" if mask_secrets else user["secret_key"],
"display_name": user["display_name"],
"enabled": user.get("enabled", True),
"access_keys": access_keys,
"policies": user["policies"],
}
if access_keys:
record["access_key"] = access_keys[0]["access_key"]
if user.get("expires_at"):
record["expires_at"] = user["expires_at"]
payload["users"].append(record)
@@ -883,7 +569,6 @@ class IamService:
entries: List[Policy] = []
for policy in policies:
bucket = str(policy.get("bucket", "*")).lower()
prefix = str(policy.get("prefix", "*"))
raw_actions = policy.get("actions", [])
if isinstance(raw_actions, str):
raw_actions = [raw_actions]
@@ -896,7 +581,7 @@ class IamService:
if canonical:
action_set.add(canonical)
if action_set:
entries.append(Policy(bucket=bucket, actions=action_set, prefix=prefix))
entries.append(Policy(bucket=bucket, actions=action_set))
return entries
def _prepare_policy_payload(self, policies: Optional[Sequence[Dict[str, Any]]]) -> List[Dict[str, Any]]:
@@ -904,14 +589,12 @@ class IamService:
policies = (
{
"bucket": "*",
"actions": ["list", "read", "write", "delete", "share", "policy",
"create_bucket", "delete_bucket"],
"actions": ["list", "read", "write", "delete", "share", "policy"],
},
)
sanitized: List[Dict[str, Any]] = []
for policy in policies:
bucket = str(policy.get("bucket", "*")).lower()
prefix = str(policy.get("prefix", "*"))
raw_actions = policy.get("actions", [])
if isinstance(raw_actions, str):
raw_actions = [raw_actions]
@@ -925,10 +608,7 @@ class IamService:
action_set.add(canonical)
if not action_set:
continue
entry: Dict[str, Any] = {"bucket": bucket, "actions": sorted(action_set)}
if prefix != "*":
entry["prefix"] = prefix
sanitized.append(entry)
sanitized.append({"bucket": bucket, "actions": sorted(action_set)})
if not sanitized:
raise IamError("At least one policy with valid actions is required")
return sanitized
@@ -953,23 +633,12 @@ class IamService:
access_key = os.environ.get("ADMIN_ACCESS_KEY", "").strip() or secrets.token_hex(12)
secret_key = os.environ.get("ADMIN_SECRET_KEY", "").strip() or secrets.token_urlsafe(32)
custom_keys = bool(os.environ.get("ADMIN_ACCESS_KEY", "").strip())
user_id = self._generate_user_id()
now_iso = datetime.now(timezone.utc).isoformat()
default = {
"version": _CONFIG_VERSION,
"users": [
{
"user_id": user_id,
"access_key": access_key,
"secret_key": secret_key,
"display_name": "Local Admin",
"enabled": True,
"access_keys": [
{
"access_key": access_key,
"secret_key": secret_key,
"status": "active",
"created_at": now_iso,
}
],
"policies": [
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
],
@@ -991,7 +660,6 @@ class IamService:
else:
print(f"Access Key: {access_key}")
print(f"Secret Key: {secret_key}")
print(f"User ID: {user_id}")
print(f"{'='*60}")
if self._fernet:
print("IAM config is encrypted at rest.")
@@ -1014,32 +682,11 @@ class IamService:
def _generate_secret_key(self) -> str:
return secrets.token_urlsafe(24)
def _generate_user_id(self) -> str:
return f"u-{secrets.token_hex(8)}"
def _resolve_raw_user(self, identifier: str) -> Tuple[Dict[str, Any], str]:
for user in self._raw_config.get("users", []):
if user.get("user_id") == identifier:
return user, identifier
for user in self._raw_config.get("users", []):
for key_info in user.get("access_keys", []):
if key_info["access_key"] == identifier:
return user, user["user_id"]
raise IamError("User not found")
def _get_raw_user(self, access_key: str) -> Dict[str, Any]:
user, _ = self._resolve_raw_user(access_key)
return user
def _enforce_key_and_user_status(self, access_key: str) -> None:
key_status = self._key_status.get(access_key, "active")
if key_status != "active":
raise IamError("Access key is inactive")
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record and not record.get("enabled", True):
raise IamError("User account is disabled")
for user in self._raw_config.get("users", []):
if user["access_key"] == access_key:
return user
raise IamError("User not found")
def get_secret_key(self, access_key: str) -> str | None:
now = time.time()
@@ -1047,25 +694,18 @@ class IamService:
if cached:
secret_key, cached_time = cached
if now - cached_time < self._cache_ttl:
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
self._enforce_key_and_user_status(access_key)
record = self._users.get(access_key)
if record:
self._check_expiry(access_key, record)
return secret_key
self._maybe_reload()
secret = self._key_secrets.get(access_key)
if secret:
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
self._enforce_key_and_user_status(access_key)
self._secret_key_cache[access_key] = (secret, now)
return secret
record = self._users.get(access_key)
if record:
self._check_expiry(access_key, record)
secret_key = record["secret_key"]
self._secret_key_cache[access_key] = (secret_key, now)
return secret_key
return None
def get_principal(self, access_key: str) -> Principal | None:
@@ -1074,22 +714,16 @@ class IamService:
if cached:
principal, cached_time = cached
if now - cached_time < self._cache_ttl:
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
self._enforce_key_and_user_status(access_key)
record = self._users.get(access_key)
if record:
self._check_expiry(access_key, record)
return principal
self._maybe_reload()
self._enforce_key_and_user_status(access_key)
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
principal = self._build_principal(access_key, record)
self._principal_cache[access_key] = (principal, now)
return principal
record = self._users.get(access_key)
if record:
self._check_expiry(access_key, record)
principal = self._build_principal(access_key, record)
self._principal_cache[access_key] = (principal, now)
return principal
return None

View File

@@ -12,8 +12,6 @@ from typing import Any, Dict, List, Optional
try:
import myfsio_core as _rc
if not hasattr(_rc, "md5_file"):
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
_HAS_RUST = True
except ImportError:
_HAS_RUST = False
@@ -164,111 +162,6 @@ class IntegrityHistoryStore:
return self.load()[offset : offset + limit]
class IntegrityCursorStore:
def __init__(self, storage_root: Path) -> None:
self.storage_root = storage_root
self._lock = threading.Lock()
def _get_path(self) -> Path:
return self.storage_root / ".myfsio.sys" / "config" / "integrity_cursor.json"
def load(self) -> Dict[str, Any]:
path = self._get_path()
if not path.exists():
return {"buckets": {}}
try:
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
if not isinstance(data.get("buckets"), dict):
return {"buckets": {}}
return data
except (OSError, ValueError, KeyError):
return {"buckets": {}}
def save(self, data: Dict[str, Any]) -> None:
path = self._get_path()
path.parent.mkdir(parents=True, exist_ok=True)
try:
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2)
except OSError as e:
logger.error("Failed to save integrity cursor: %s", e)
def update_bucket(
self,
bucket_name: str,
timestamp: float,
last_key: Optional[str] = None,
completed: bool = False,
) -> None:
with self._lock:
data = self.load()
entry = data["buckets"].get(bucket_name, {})
if completed:
entry["last_scanned"] = timestamp
entry.pop("last_key", None)
entry["completed"] = True
else:
entry["last_scanned"] = timestamp
if last_key is not None:
entry["last_key"] = last_key
entry["completed"] = False
data["buckets"][bucket_name] = entry
self.save(data)
def clean_stale(self, existing_buckets: List[str]) -> None:
with self._lock:
data = self.load()
existing_set = set(existing_buckets)
stale_keys = [k for k in data["buckets"] if k not in existing_set]
if stale_keys:
for k in stale_keys:
del data["buckets"][k]
self.save(data)
def get_last_key(self, bucket_name: str) -> Optional[str]:
data = self.load()
entry = data.get("buckets", {}).get(bucket_name)
if entry is None:
return None
return entry.get("last_key")
def get_bucket_order(self, bucket_names: List[str]) -> List[str]:
data = self.load()
buckets_info = data.get("buckets", {})
incomplete = []
complete = []
for name in bucket_names:
entry = buckets_info.get(name)
if entry is None:
incomplete.append((name, 0.0))
elif entry.get("last_key") is not None:
incomplete.append((name, entry.get("last_scanned", 0.0)))
else:
complete.append((name, entry.get("last_scanned", 0.0)))
incomplete.sort(key=lambda x: x[1])
complete.sort(key=lambda x: x[1])
return [n for n, _ in incomplete] + [n for n, _ in complete]
def get_info(self) -> Dict[str, Any]:
data = self.load()
buckets = data.get("buckets", {})
return {
"tracked_buckets": len(buckets),
"buckets": {
name: {
"last_scanned": info.get("last_scanned"),
"last_key": info.get("last_key"),
"completed": info.get("completed", False),
}
for name, info in buckets.items()
},
}
MAX_ISSUES = 500
@@ -287,7 +180,6 @@ class IntegrityChecker:
auto_heal: bool = False,
dry_run: bool = False,
max_history: int = 50,
io_throttle_ms: int = 10,
) -> None:
self.storage_root = Path(storage_root)
self.interval_seconds = interval_hours * 3600.0
@@ -297,11 +189,7 @@ class IntegrityChecker:
self._timer: Optional[threading.Timer] = None
self._shutdown = False
self._lock = threading.Lock()
self._scanning = False
self._scan_start_time: Optional[float] = None
self._io_throttle = max(0, io_throttle_ms) / 1000.0
self.history_store = IntegrityHistoryStore(storage_root, max_records=max_history)
self.cursor_store = IntegrityCursorStore(self.storage_root)
def start(self) -> None:
if self._timer is not None:
@@ -341,79 +229,52 @@ class IntegrityChecker:
self._schedule_next()
def run_now(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> IntegrityResult:
if not self._lock.acquire(blocking=False):
raise RuntimeError("Integrity scan is already in progress")
effective_auto_heal = auto_heal if auto_heal is not None else self.auto_heal
effective_dry_run = dry_run if dry_run is not None else self.dry_run
try:
self._scanning = True
self._scan_start_time = time.time()
start = time.time()
result = IntegrityResult()
effective_auto_heal = auto_heal if auto_heal is not None else self.auto_heal
effective_dry_run = dry_run if dry_run is not None else self.dry_run
bucket_names = self._list_bucket_names()
start = self._scan_start_time
result = IntegrityResult()
for bucket_name in bucket_names:
if result.objects_scanned >= self.batch_size:
break
result.buckets_scanned += 1
self._check_corrupted_objects(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_orphaned_objects(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_phantom_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_stale_versions(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_etag_cache(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_legacy_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
bucket_names = self._list_bucket_names()
self.cursor_store.clean_stale(bucket_names)
ordered_buckets = self.cursor_store.get_bucket_order(bucket_names)
result.execution_time_seconds = time.time() - start
for bucket_name in ordered_buckets:
if self._batch_exhausted(result):
break
result.buckets_scanned += 1
cursor_key = self.cursor_store.get_last_key(bucket_name)
key_corrupted = self._check_corrupted_objects(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
key_orphaned = self._check_orphaned_objects(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
key_phantom = self._check_phantom_metadata(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
self._check_stale_versions(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_etag_cache(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_legacy_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
returned_keys = [k for k in (key_corrupted, key_orphaned, key_phantom) if k is not None]
bucket_exhausted = self._batch_exhausted(result)
if bucket_exhausted and returned_keys:
self.cursor_store.update_bucket(bucket_name, time.time(), last_key=min(returned_keys))
else:
self.cursor_store.update_bucket(bucket_name, time.time(), completed=True)
result.execution_time_seconds = time.time() - start
if result.has_issues or result.errors:
logger.info(
"Integrity check completed in %.2fs: corrupted=%d, orphaned=%d, phantom=%d, "
"stale_versions=%d, etag_cache=%d, legacy_drift=%d, healed=%d, errors=%d%s",
result.execution_time_seconds,
result.corrupted_objects,
result.orphaned_objects,
result.phantom_metadata,
result.stale_versions,
result.etag_cache_inconsistencies,
result.legacy_metadata_drifts,
result.issues_healed,
len(result.errors),
" (dry run)" if effective_dry_run else "",
)
record = IntegrityExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=effective_dry_run,
auto_heal=effective_auto_heal,
if result.has_issues or result.errors:
logger.info(
"Integrity check completed in %.2fs: corrupted=%d, orphaned=%d, phantom=%d, "
"stale_versions=%d, etag_cache=%d, legacy_drift=%d, healed=%d, errors=%d%s",
result.execution_time_seconds,
result.corrupted_objects,
result.orphaned_objects,
result.phantom_metadata,
result.stale_versions,
result.etag_cache_inconsistencies,
result.legacy_metadata_drifts,
result.issues_healed,
len(result.errors),
" (dry run)" if effective_dry_run else "",
)
self.history_store.add(record)
return result
finally:
self._scanning = False
self._scan_start_time = None
self._lock.release()
record = IntegrityExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=effective_dry_run,
auto_heal=effective_auto_heal,
)
self.history_store.add(record)
def run_async(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> bool:
if self._scanning:
return False
t = threading.Thread(target=self.run_now, args=(auto_heal, dry_run), daemon=True)
t.start()
return True
return result
def _system_path(self) -> Path:
return self.storage_root / self.SYSTEM_ROOT
@@ -428,186 +289,105 @@ class IntegrityChecker:
pass
return names
def _throttle(self) -> bool:
if self._shutdown:
return True
if self._io_throttle > 0:
time.sleep(self._io_throttle)
return self._shutdown
def _batch_exhausted(self, result: IntegrityResult) -> bool:
return self._shutdown or result.objects_scanned >= self.batch_size
def _add_issue(self, result: IntegrityResult, issue: IntegrityIssue) -> None:
if len(result.issues) < MAX_ISSUES:
result.issues.append(issue)
def _collect_index_keys(
self, meta_root: Path, cursor_key: Optional[str] = None,
) -> Dict[str, Dict[str, Any]]:
all_keys: Dict[str, Dict[str, Any]] = {}
def _check_corrupted_objects(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
bucket_path = self.storage_root / bucket_name
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
if not meta_root.exists():
return all_keys
return
try:
for index_file in meta_root.rglob("_index.json"):
if result.objects_scanned >= self.batch_size:
return
if not index_file.is_file():
continue
rel_dir = index_file.parent.relative_to(meta_root)
dir_prefix = "" if rel_dir == Path(".") else rel_dir.as_posix()
if cursor_key is not None and dir_prefix:
full_prefix = dir_prefix + "/"
if not cursor_key.startswith(full_prefix) and cursor_key > full_prefix:
continue
try:
index_data = json.loads(index_file.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
continue
for key_name, entry in index_data.items():
full_key = (dir_prefix + "/" + key_name) if dir_prefix else key_name
if cursor_key is not None and full_key <= cursor_key:
for key_name, entry in list(index_data.items()):
if result.objects_scanned >= self.batch_size:
return
rel_dir = index_file.parent.relative_to(meta_root)
if rel_dir == Path("."):
full_key = key_name
else:
full_key = rel_dir.as_posix() + "/" + key_name
object_path = bucket_path / full_key
if not object_path.exists():
continue
all_keys[full_key] = {
"entry": entry,
"index_file": index_file,
"key_name": key_name,
}
except OSError:
pass
return all_keys
def _walk_bucket_files_sorted(
self, bucket_path: Path, cursor_key: Optional[str] = None,
):
def _walk(dir_path: Path, prefix: str):
try:
entries = list(os.scandir(dir_path))
except OSError:
return
result.objects_scanned += 1
def _sort_key(e):
if e.is_dir(follow_symlinks=False):
return e.name + "/"
return e.name
entries.sort(key=_sort_key)
for entry in entries:
if entry.is_dir(follow_symlinks=False):
if not prefix and entry.name in self.INTERNAL_FOLDERS:
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
stored_etag = meta.get("__etag__")
if not stored_etag:
continue
new_prefix = (prefix + "/" + entry.name) if prefix else entry.name
if cursor_key is not None:
full_prefix = new_prefix + "/"
if not cursor_key.startswith(full_prefix) and cursor_key > full_prefix:
continue
yield from _walk(Path(entry.path), new_prefix)
elif entry.is_file(follow_symlinks=False):
full_key = (prefix + "/" + entry.name) if prefix else entry.name
if cursor_key is not None and full_key <= cursor_key:
try:
actual_etag = _compute_etag(object_path)
except OSError:
continue
yield full_key
yield from _walk(bucket_path, "")
if actual_etag != stored_etag:
result.corrupted_objects += 1
issue = IntegrityIssue(
issue_type="corrupted_object",
bucket=bucket_name,
key=full_key,
detail=f"stored_etag={stored_etag} actual_etag={actual_etag}",
)
def _check_corrupted_objects(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
cursor_key: Optional[str] = None,
) -> Optional[str]:
if self._batch_exhausted(result):
return None
bucket_path = self.storage_root / bucket_name
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
if not meta_root.exists():
return None
last_key = None
try:
all_keys = self._collect_index_keys(meta_root, cursor_key)
sorted_keys = sorted(all_keys.keys())
for full_key in sorted_keys:
if self._throttle():
return last_key
if self._batch_exhausted(result):
return last_key
info = all_keys[full_key]
entry = info["entry"]
index_file = info["index_file"]
key_name = info["key_name"]
object_path = bucket_path / full_key
if not object_path.exists():
continue
result.objects_scanned += 1
last_key = full_key
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
stored_etag = meta.get("__etag__")
if not stored_etag:
continue
try:
actual_etag = _compute_etag(object_path)
except OSError:
continue
if actual_etag != stored_etag:
result.corrupted_objects += 1
issue = IntegrityIssue(
issue_type="corrupted_object",
bucket=bucket_name,
key=full_key,
detail=f"stored_etag={stored_etag} actual_etag={actual_etag}",
)
if auto_heal and not dry_run:
try:
stat = object_path.stat()
meta["__etag__"] = actual_etag
meta["__size__"] = str(stat.st_size)
meta["__last_modified__"] = str(stat.st_mtime)
if auto_heal and not dry_run:
try:
index_data = json.loads(index_file.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
index_data = {}
index_data[key_name] = {"metadata": meta}
self._atomic_write_index(index_file, index_data)
issue.healed = True
issue.heal_action = "updated etag in index"
result.issues_healed += 1
except OSError as e:
result.errors.append(f"heal corrupted {bucket_name}/{full_key}: {e}")
stat = object_path.stat()
meta["__etag__"] = actual_etag
meta["__size__"] = str(stat.st_size)
meta["__last_modified__"] = str(stat.st_mtime)
index_data[key_name] = {"metadata": meta}
self._atomic_write_index(index_file, index_data)
issue.healed = True
issue.heal_action = "updated etag in index"
result.issues_healed += 1
except OSError as e:
result.errors.append(f"heal corrupted {bucket_name}/{full_key}: {e}")
self._add_issue(result, issue)
self._add_issue(result, issue)
except OSError as e:
result.errors.append(f"check corrupted {bucket_name}: {e}")
return last_key
def _check_orphaned_objects(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
cursor_key: Optional[str] = None,
) -> Optional[str]:
if self._batch_exhausted(result):
return None
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
bucket_path = self.storage_root / bucket_name
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
last_key = None
try:
for full_key in self._walk_bucket_files_sorted(bucket_path, cursor_key):
if self._throttle():
return last_key
if self._batch_exhausted(result):
return last_key
for entry in bucket_path.rglob("*"):
if result.objects_scanned >= self.batch_size:
return
if not entry.is_file():
continue
try:
rel = entry.relative_to(bucket_path)
except ValueError:
continue
if rel.parts and rel.parts[0] in self.INTERNAL_FOLDERS:
continue
result.objects_scanned += 1
last_key = full_key
key_path = Path(full_key)
key_name = key_path.name
parent = key_path.parent
full_key = rel.as_posix()
key_name = rel.name
parent = rel.parent
if parent == Path("."):
index_path = meta_root / "_index.json"
@@ -633,9 +413,8 @@ class IntegrityChecker:
if auto_heal and not dry_run:
try:
object_path = bucket_path / full_key
etag = _compute_etag(object_path)
stat = object_path.stat()
etag = _compute_etag(entry)
stat = entry.stat()
meta = {
"__etag__": etag,
"__size__": str(stat.st_size),
@@ -658,56 +437,51 @@ class IntegrityChecker:
self._add_issue(result, issue)
except OSError as e:
result.errors.append(f"check orphaned {bucket_name}: {e}")
return last_key
def _check_phantom_metadata(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
cursor_key: Optional[str] = None,
) -> Optional[str]:
if self._batch_exhausted(result):
return None
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
bucket_path = self.storage_root / bucket_name
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
if not meta_root.exists():
return None
return
last_key = None
try:
all_keys = self._collect_index_keys(meta_root, cursor_key)
sorted_keys = sorted(all_keys.keys())
for index_file in meta_root.rglob("_index.json"):
if not index_file.is_file():
continue
try:
index_data = json.loads(index_file.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
continue
heal_by_index: Dict[Path, List[str]] = {}
keys_to_remove = []
for key_name in list(index_data.keys()):
rel_dir = index_file.parent.relative_to(meta_root)
if rel_dir == Path("."):
full_key = key_name
else:
full_key = rel_dir.as_posix() + "/" + key_name
for full_key in sorted_keys:
if self._batch_exhausted(result):
break
object_path = bucket_path / full_key
if not object_path.exists():
result.phantom_metadata += 1
issue = IntegrityIssue(
issue_type="phantom_metadata",
bucket=bucket_name,
key=full_key,
detail="metadata entry without file on disk",
)
if auto_heal and not dry_run:
keys_to_remove.append(key_name)
issue.healed = True
issue.heal_action = "removed stale index entry"
result.issues_healed += 1
self._add_issue(result, issue)
result.objects_scanned += 1
last_key = full_key
object_path = bucket_path / full_key
if not object_path.exists():
result.phantom_metadata += 1
info = all_keys[full_key]
issue = IntegrityIssue(
issue_type="phantom_metadata",
bucket=bucket_name,
key=full_key,
detail="metadata entry without file on disk",
)
if auto_heal and not dry_run:
index_file = info["index_file"]
heal_by_index.setdefault(index_file, []).append(info["key_name"])
issue.healed = True
issue.heal_action = "removed stale index entry"
result.issues_healed += 1
self._add_issue(result, issue)
if heal_by_index and auto_heal and not dry_run:
for index_file, keys_to_remove in heal_by_index.items():
if keys_to_remove and auto_heal and not dry_run:
try:
index_data = json.loads(index_file.read_text(encoding="utf-8"))
for k in keys_to_remove:
index_data.pop(k, None)
if index_data:
@@ -718,13 +492,10 @@ class IntegrityChecker:
result.errors.append(f"heal phantom {bucket_name}: {e}")
except OSError as e:
result.errors.append(f"check phantom {bucket_name}: {e}")
return last_key
def _check_stale_versions(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
if self._batch_exhausted(result):
return
versions_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR
if not versions_root.exists():
@@ -732,10 +503,6 @@ class IntegrityChecker:
try:
for key_dir in versions_root.rglob("*"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not key_dir.is_dir():
continue
@@ -743,9 +510,6 @@ class IntegrityChecker:
json_files = {f.stem: f for f in key_dir.glob("*.json")}
for stem, bin_file in bin_files.items():
if self._batch_exhausted(result):
return
result.objects_scanned += 1
if stem not in json_files:
result.stale_versions += 1
issue = IntegrityIssue(
@@ -765,9 +529,6 @@ class IntegrityChecker:
self._add_issue(result, issue)
for stem, json_file in json_files.items():
if self._batch_exhausted(result):
return
result.objects_scanned += 1
if stem not in bin_files:
result.stale_versions += 1
issue = IntegrityIssue(
@@ -791,8 +552,6 @@ class IntegrityChecker:
def _check_etag_cache(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
if self._batch_exhausted(result):
return
etag_index_path = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / "etag_index.json"
if not etag_index_path.exists():
@@ -810,9 +569,6 @@ class IntegrityChecker:
found_mismatch = False
for full_key, cached_etag in etag_cache.items():
if self._batch_exhausted(result):
break
result.objects_scanned += 1
key_path = Path(full_key)
key_name = key_path.name
parent = key_path.parent
@@ -862,8 +618,6 @@ class IntegrityChecker:
def _check_legacy_metadata(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
if self._batch_exhausted(result):
return
legacy_meta_root = self.storage_root / bucket_name / ".meta"
if not legacy_meta_root.exists():
return
@@ -872,14 +626,9 @@ class IntegrityChecker:
try:
for meta_file in legacy_meta_root.rglob("*.meta.json"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not meta_file.is_file():
continue
result.objects_scanned += 1
try:
rel = meta_file.relative_to(legacy_meta_root)
except ValueError:
@@ -979,17 +728,11 @@ class IntegrityChecker:
return [r.to_dict() for r in records]
def get_status(self) -> dict:
status: Dict[str, Any] = {
return {
"enabled": not self._shutdown or self._timer is not None,
"running": self._timer is not None and not self._shutdown,
"scanning": self._scanning,
"interval_hours": self.interval_seconds / 3600.0,
"batch_size": self.batch_size,
"auto_heal": self.auto_heal,
"dry_run": self.dry_run,
"io_throttle_ms": round(self._io_throttle * 1000),
}
if self._scanning and self._scan_start_time is not None:
status["scan_elapsed_seconds"] = round(time.time() - self._scan_start_time, 1)
status["cursor"] = self.cursor_store.get_info()
return status

View File

@@ -19,10 +19,6 @@ from defusedxml.ElementTree import fromstring
try:
import myfsio_core as _rc
if not all(hasattr(_rc, f) for f in (
"verify_sigv4_signature", "derive_signing_key", "clear_signing_key_cache",
)):
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
_HAS_RUST = True
except ImportError:
_rc = None
@@ -205,11 +201,6 @@ _SIGNING_KEY_CACHE_LOCK = threading.Lock()
_SIGNING_KEY_CACHE_TTL = 60.0
_SIGNING_KEY_CACHE_MAX_SIZE = 256
_SIGV4_HEADER_RE = re.compile(
r"AWS4-HMAC-SHA256 Credential=([^/]+)/([^/]+)/([^/]+)/([^/]+)/aws4_request, SignedHeaders=([^,]+), Signature=(.+)"
)
_SIGV4_REQUIRED_HEADERS = frozenset({'host', 'x-amz-date'})
def clear_signing_key_cache() -> None:
if _HAS_RUST:
@@ -268,7 +259,10 @@ def _get_canonical_uri(req: Any) -> str:
def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
match = _SIGV4_HEADER_RE.match(auth_header)
match = re.match(
r"AWS4-HMAC-SHA256 Credential=([^/]+)/([^/]+)/([^/]+)/([^/]+)/aws4_request, SignedHeaders=([^,]+), Signature=(.+)",
auth_header,
)
if not match:
return None
@@ -292,9 +286,14 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
if time_diff > tolerance:
raise IamError("Request timestamp too old or too far in the future")
required_headers = {'host', 'x-amz-date'}
signed_headers_set = set(signed_headers_str.split(';'))
if not _SIGV4_REQUIRED_HEADERS.issubset(signed_headers_set):
if not ({'host', 'date'}.issubset(signed_headers_set)):
if not required_headers.issubset(signed_headers_set):
if 'date' in signed_headers_set:
required_headers.remove('x-amz-date')
required_headers.add('date')
if not required_headers.issubset(signed_headers_set):
raise IamError("Required headers not signed")
canonical_uri = _get_canonical_uri(req)
@@ -302,12 +301,7 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
if _HAS_RUST:
query_params = list(req.args.items(multi=True))
header_values = []
for h in signed_headers_str.split(";"):
val = req.headers.get(h) or ""
if h.lower() == "expect" and val == "":
val = "100-continue"
header_values.append((h, val))
header_values = [(h, req.headers.get(h) or "") for h in signed_headers_str.split(";")]
if not _rc.verify_sigv4_signature(
req.method, canonical_uri, query_params, signed_headers_str,
header_values, payload_hash, amz_date, date_stamp, region,
@@ -396,12 +390,7 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
if _HAS_RUST:
query_params = [(k, v) for k, v in req.args.items(multi=True) if k != "X-Amz-Signature"]
header_values = []
for h in signed_headers_str.split(";"):
val = req.headers.get(h) or ""
if h.lower() == "expect" and val == "":
val = "100-continue"
header_values.append((h, val))
header_values = [(h, req.headers.get(h) or "") for h in signed_headers_str.split(";")]
if not _rc.verify_sigv4_signature(
req.method, canonical_uri, query_params, signed_headers_str,
header_values, "UNSIGNED-PAYLOAD", amz_date, date_stamp, region,
@@ -499,7 +488,7 @@ def _authorize_action(principal: Principal | None, bucket_name: str | None, acti
iam_error: IamError | None = None
if principal is not None:
try:
_iam().authorize(principal, bucket_name, action, object_key=object_key)
_iam().authorize(principal, bucket_name, action)
iam_allowed = True
except IamError as exc:
iam_error = exc
@@ -534,6 +523,21 @@ def _authorize_action(principal: Principal | None, bucket_name: str | None, acti
raise iam_error or IamError("Access denied")
def _enforce_bucket_policy(principal: Principal | None, bucket_name: str | None, object_key: str | None, action: str) -> None:
if not bucket_name:
return
policy_context = _build_policy_context()
decision = _bucket_policies().evaluate(
principal.access_key if principal else None,
bucket_name,
object_key,
action,
policy_context,
)
if decision == "deny":
raise IamError("Access denied by bucket policy")
def _object_principal(action: str, bucket_name: str, object_key: str):
principal, error = _require_principal()
try:
@@ -542,7 +546,121 @@ def _object_principal(action: str, bucket_name: str, object_key: str):
except IamError as exc:
if not error:
return None, _error_response("AccessDenied", str(exc), 403)
if not _has_presign_params():
return None, error
try:
principal = _validate_presigned_request(action, bucket_name, object_key)
_enforce_bucket_policy(principal, bucket_name, object_key, action)
return principal, None
except IamError as exc:
return None, _error_response("AccessDenied", str(exc), 403)
def _has_presign_params() -> bool:
return bool(request.args.get("X-Amz-Algorithm"))
def _validate_presigned_request(action: str, bucket_name: str, object_key: str) -> Principal:
algorithm = request.args.get("X-Amz-Algorithm")
credential = request.args.get("X-Amz-Credential")
amz_date = request.args.get("X-Amz-Date")
signed_headers = request.args.get("X-Amz-SignedHeaders")
expires = request.args.get("X-Amz-Expires")
signature = request.args.get("X-Amz-Signature")
if not all([algorithm, credential, amz_date, signed_headers, expires, signature]):
raise IamError("Malformed presigned URL")
if algorithm != "AWS4-HMAC-SHA256":
raise IamError("Unsupported signing algorithm")
parts = credential.split("/")
if len(parts) != 5:
raise IamError("Invalid credential scope")
access_key, date_stamp, region, service, terminal = parts
if terminal != "aws4_request":
raise IamError("Invalid credential scope")
config_region = current_app.config["AWS_REGION"]
config_service = current_app.config["AWS_SERVICE"]
if region != config_region or service != config_service:
raise IamError("Credential scope mismatch")
try:
expiry = int(expires)
except ValueError as exc:
raise IamError("Invalid expiration") from exc
min_expiry = current_app.config.get("PRESIGNED_URL_MIN_EXPIRY_SECONDS", 1)
max_expiry = current_app.config.get("PRESIGNED_URL_MAX_EXPIRY_SECONDS", 604800)
if expiry < min_expiry or expiry > max_expiry:
raise IamError(f"Expiration must be between {min_expiry} second(s) and {max_expiry} seconds")
try:
request_time = datetime.strptime(amz_date, "%Y%m%dT%H%M%SZ").replace(tzinfo=timezone.utc)
except ValueError as exc:
raise IamError("Invalid X-Amz-Date") from exc
now = datetime.now(timezone.utc)
tolerance = timedelta(seconds=current_app.config.get("SIGV4_TIMESTAMP_TOLERANCE_SECONDS", 900))
if request_time > now + tolerance:
raise IamError("Request date is too far in the future")
if now > request_time + timedelta(seconds=expiry):
raise IamError("Presigned URL expired")
signed_headers_list = [header.strip().lower() for header in signed_headers.split(";") if header]
signed_headers_list.sort()
canonical_headers = _canonical_headers_from_request(signed_headers_list)
canonical_query = _canonical_query_from_request()
payload_hash = request.args.get("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
canonical_request = "\n".join(
[
request.method,
_canonical_uri(bucket_name, object_key),
canonical_query,
canonical_headers,
";".join(signed_headers_list),
payload_hash,
]
)
hashed_request = hashlib.sha256(canonical_request.encode()).hexdigest()
scope = f"{date_stamp}/{region}/{service}/aws4_request"
string_to_sign = "\n".join([
"AWS4-HMAC-SHA256",
amz_date,
scope,
hashed_request,
])
secret = _iam().secret_for_key(access_key)
signing_key = _derive_signing_key(secret, date_stamp, region, service)
expected = hmac.new(signing_key, string_to_sign.encode(), hashlib.sha256).hexdigest()
if not hmac.compare_digest(expected, signature):
raise IamError("Signature mismatch")
return _iam().principal_for_key(access_key)
def _canonical_query_from_request() -> str:
parts = []
for key in sorted(request.args.keys()):
if key == "X-Amz-Signature":
continue
values = request.args.getlist(key)
encoded_key = quote(str(key), safe="-_.~")
for value in sorted(values):
encoded_value = quote(str(value), safe="-_.~")
parts.append(f"{encoded_key}={encoded_value}")
return "&".join(parts)
def _canonical_headers_from_request(headers: list[str]) -> str:
lines = []
for header in headers:
if header == "host":
api_base = current_app.config.get("API_BASE_URL")
if api_base:
value = urlparse(api_base).netloc
else:
value = request.host
else:
value = request.headers.get(header, "")
canonical_value = " ".join(value.strip().split()) if value else ""
lines.append(f"{header}:{canonical_value}")
return "\n".join(lines) + "\n"
def _canonical_uri(bucket_name: str, object_key: str | None) -> str:
@@ -608,8 +726,8 @@ def _generate_presigned_url(
host = parsed.netloc
scheme = parsed.scheme
else:
host = request.host
scheme = request.scheme or "http"
host = request.headers.get("X-Forwarded-Host", request.host)
scheme = request.headers.get("X-Forwarded-Proto", request.scheme or "http")
canonical_headers = f"host:{host}\n"
canonical_request = "\n".join(
@@ -882,7 +1000,7 @@ def _render_encryption_document(config: dict[str, Any]) -> Element:
return root
def _stream_file(path, chunk_size: int = 1024 * 1024):
def _stream_file(path, chunk_size: int = 256 * 1024):
with path.open("rb") as handle:
while True:
chunk = handle.read(chunk_size)
@@ -1017,7 +1135,7 @@ def _bucket_versioning_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "versioning")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1064,7 +1182,7 @@ def _bucket_tagging_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "tagging")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1229,7 +1347,7 @@ def _bucket_cors_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "cors")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1282,7 +1400,7 @@ def _bucket_encryption_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "encryption")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1357,7 +1475,7 @@ def _bucket_acl_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "share")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1600,12 +1718,12 @@ def _bucket_lifecycle_handler(bucket_name: str) -> Response:
"""Handle bucket lifecycle configuration (GET/PUT/DELETE /<bucket>?lifecycle)."""
if request.method not in {"GET", "PUT", "DELETE"}:
return _method_not_allowed(["GET", "PUT", "DELETE"])
principal, error = _require_principal()
if error:
return error
try:
_authorize_action(principal, bucket_name, "lifecycle")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -1764,12 +1882,12 @@ def _bucket_quota_handler(bucket_name: str) -> Response:
"""Handle bucket quota configuration (GET/PUT/DELETE /<bucket>?quota)."""
if request.method not in {"GET", "PUT", "DELETE"}:
return _method_not_allowed(["GET", "PUT", "DELETE"])
principal, error = _require_principal()
if error:
return error
try:
_authorize_action(principal, bucket_name, "quota")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -1846,7 +1964,7 @@ def _bucket_object_lock_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "object_lock")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -1892,7 +2010,7 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "notification")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -1988,7 +2106,7 @@ def _bucket_logging_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "logging")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -2130,7 +2248,7 @@ def _object_retention_handler(bucket_name: str, object_key: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "object_lock", object_key=object_key)
_authorize_action(principal, bucket_name, "write" if request.method == "PUT" else "read", object_key=object_key)
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -2206,7 +2324,7 @@ def _object_legal_hold_handler(bucket_name: str, object_key: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "object_lock", object_key=object_key)
_authorize_action(principal, bucket_name, "write" if request.method == "PUT" else "read", object_key=object_key)
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -2539,7 +2657,7 @@ def bucket_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "create_bucket")
_authorize_action(principal, bucket_name, "write")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
try:
@@ -2556,7 +2674,7 @@ def bucket_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "delete_bucket")
_authorize_action(principal, bucket_name, "delete")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
try:
@@ -2833,12 +2951,9 @@ def object_handler(bucket_name: str, object_key: str):
is_encrypted = "x-amz-server-side-encryption" in metadata
cond_etag = metadata.get("__etag__")
_etag_was_healed = False
if not cond_etag and not is_encrypted:
try:
cond_etag = storage._compute_etag(path)
_etag_was_healed = True
storage.heal_missing_etag(bucket_name, object_key, cond_etag)
except OSError:
cond_etag = None
if cond_etag:
@@ -2884,7 +2999,7 @@ def object_handler(bucket_name: str, object_key: str):
try:
stat = path.stat()
file_size = stat.st_size
etag = cond_etag or storage._compute_etag(path)
etag = metadata.get("__etag__") or storage._compute_etag(path)
except PermissionError:
return _error_response("AccessDenied", "Permission denied accessing object", 403)
except OSError as exc:
@@ -2932,7 +3047,7 @@ def object_handler(bucket_name: str, object_key: str):
try:
stat = path.stat()
response = Response(status=200)
etag = cond_etag or storage._compute_etag(path)
etag = metadata.get("__etag__") or storage._compute_etag(path)
except PermissionError:
return _error_response("AccessDenied", "Permission denied accessing object", 403)
except OSError as exc:
@@ -3114,7 +3229,7 @@ def _bucket_replication_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "replication")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -3197,7 +3312,7 @@ def _bucket_website_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "website")
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -3317,13 +3432,9 @@ def head_object(bucket_name: str, object_key: str) -> Response:
return error
try:
_authorize_action(principal, bucket_name, "read", object_key=object_key)
storage = _storage()
path = storage.get_object_path(bucket_name, object_key)
metadata = storage.get_object_metadata(bucket_name, object_key)
etag = metadata.get("__etag__")
if not etag:
etag = storage._compute_etag(path)
storage.heal_missing_etag(bucket_name, object_key, etag)
path = _storage().get_object_path(bucket_name, object_key)
metadata = _storage().get_object_metadata(bucket_name, object_key)
etag = metadata.get("__etag__") or _storage()._compute_etag(path)
head_mtime = float(metadata["__last_modified__"]) if "__last_modified__" in metadata else None
if head_mtime is None:

View File

@@ -2,7 +2,6 @@ from __future__ import annotations
import hashlib
import json
import logging
import os
import re
import shutil
@@ -21,21 +20,12 @@ from typing import Any, BinaryIO, Dict, Generator, List, Optional
try:
import myfsio_core as _rc
if not all(hasattr(_rc, f) for f in (
"validate_bucket_name", "validate_object_key", "md5_file",
"shallow_scan", "bucket_stats_scan", "search_objects_scan",
"stream_to_file_with_md5", "assemble_parts_with_md5",
"build_object_cache", "read_index_entry", "write_index_entry",
"delete_index_entry", "check_bucket_contents",
)):
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
_HAS_RUST = True
except ImportError:
_rc = None
_HAS_RUST = False
logger = logging.getLogger(__name__)
# Platform-specific file locking
if os.name == "nt":
import msvcrt
@@ -200,7 +190,6 @@ class ObjectStorage:
object_cache_max_size: int = 100,
bucket_config_cache_ttl: float = 30.0,
object_key_max_length_bytes: int = 1024,
meta_read_cache_max: int = 2048,
) -> None:
self.root = Path(root)
self.root.mkdir(parents=True, exist_ok=True)
@@ -219,7 +208,7 @@ class ObjectStorage:
self._sorted_key_cache: Dict[str, tuple[list[str], int]] = {}
self._meta_index_locks: Dict[str, threading.Lock] = {}
self._meta_read_cache: OrderedDict[tuple, Optional[Dict[str, Any]]] = OrderedDict()
self._meta_read_cache_max = meta_read_cache_max
self._meta_read_cache_max = 2048
self._cleanup_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="ParentCleanup")
self._stats_mem: Dict[str, Dict[str, int]] = {}
self._stats_serial: Dict[str, int] = {}
@@ -229,7 +218,6 @@ class ObjectStorage:
self._stats_flush_timer: Optional[threading.Timer] = None
self._etag_index_dirty: set[str] = set()
self._etag_index_flush_timer: Optional[threading.Timer] = None
self._etag_index_mem: Dict[str, tuple[Dict[str, str], float]] = {}
def _get_bucket_lock(self, bucket_id: str) -> threading.Lock:
with self._registry_lock:
@@ -418,10 +406,6 @@ class ObjectStorage:
self._stats_serial[bucket_id] = self._stats_serial.get(bucket_id, 0) + 1
self._stats_mem_time[bucket_id] = time.monotonic()
self._stats_dirty.add(bucket_id)
needs_immediate = data["objects"] == 0 and objects_delta < 0
if needs_immediate:
self._flush_stats()
else:
self._schedule_stats_flush()
def _schedule_stats_flush(self) -> None:
@@ -439,7 +423,7 @@ class ObjectStorage:
cache_path = self._system_bucket_root(bucket_id) / "stats.json"
try:
cache_path.parent.mkdir(parents=True, exist_ok=True)
self._atomic_write_json(cache_path, data, sync=False)
self._atomic_write_json(cache_path, data)
except OSError:
pass
@@ -614,7 +598,14 @@ class ObjectStorage:
is_truncated=False, next_continuation_token=None,
)
meta_cache: Dict[str, str] = self._get_etag_index(bucket_id)
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
meta_cache: Dict[str, str] = {}
if etag_index_path.exists():
try:
with open(etag_index_path, 'r', encoding='utf-8') as f:
meta_cache = json.load(f)
except (OSError, json.JSONDecodeError):
pass
entries_files: list[tuple[str, int, float, Optional[str]]] = []
entries_dirs: list[str] = []
@@ -719,73 +710,6 @@ class ObjectStorage:
next_continuation_token=next_token,
)
def iter_objects_shallow(
self,
bucket_name: str,
*,
prefix: str = "",
delimiter: str = "/",
) -> Generator[tuple[str, ObjectMeta | str], None, None]:
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
raise BucketNotFoundError("Bucket does not exist")
bucket_id = bucket_path.name
target_dir = bucket_path
if prefix:
safe_prefix_path = Path(prefix.rstrip("/"))
if ".." in safe_prefix_path.parts:
return
target_dir = bucket_path / safe_prefix_path
try:
resolved = target_dir.resolve()
bucket_resolved = bucket_path.resolve()
if not str(resolved).startswith(str(bucket_resolved) + os.sep) and resolved != bucket_resolved:
return
except (OSError, ValueError):
return
if not target_dir.exists() or not target_dir.is_dir():
return
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
meta_cache: Dict[str, str] = {}
if etag_index_path.exists():
try:
with open(etag_index_path, 'r', encoding='utf-8') as f:
meta_cache = json.load(f)
except (OSError, json.JSONDecodeError):
pass
try:
with os.scandir(str(target_dir)) as it:
for entry in it:
name = entry.name
if name in self.INTERNAL_FOLDERS:
continue
if entry.is_dir(follow_symlinks=False):
yield ("folder", prefix + name + delimiter)
elif entry.is_file(follow_symlinks=False):
key = prefix + name
try:
st = entry.stat()
etag = meta_cache.get(key)
if etag is None:
safe_key = PurePosixPath(key)
meta = self._read_metadata(bucket_id, Path(safe_key))
etag = meta.get("__etag__") if meta else None
yield ("object", ObjectMeta(
key=key,
size=st.st_size,
last_modified=datetime.fromtimestamp(st.st_mtime, timezone.utc),
etag=etag,
metadata=None,
))
except OSError:
pass
except OSError:
return
def _shallow_via_full_scan(
self,
bucket_name: str,
@@ -1084,30 +1008,6 @@ class ObjectStorage:
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
return self._read_metadata(bucket_path.name, safe_key) or {}
def heal_missing_etag(self, bucket_name: str, object_key: str, etag: str) -> None:
"""Persist a computed ETag back to metadata (self-heal on read)."""
try:
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
return
bucket_id = bucket_path.name
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
existing = self._read_metadata(bucket_id, safe_key) or {}
if existing.get("__etag__"):
return
existing["__etag__"] = etag
self._write_metadata(bucket_id, safe_key, existing)
with self._obj_cache_lock:
cached = self._object_cache.get(bucket_id)
if cached:
obj = cached[0].get(safe_key.as_posix())
if obj and not obj.etag:
obj.etag = etag
self._etag_index_dirty.add(bucket_id)
self._schedule_etag_index_flush()
except Exception:
logger.warning("Failed to heal missing ETag for %s/%s", bucket_name, object_key)
def _cleanup_empty_parents(self, path: Path, stop_at: Path) -> None:
"""Remove empty parent directories in a background thread.
@@ -2117,7 +2017,6 @@ class ObjectStorage:
etag_index_path.parent.mkdir(parents=True, exist_ok=True)
with open(etag_index_path, 'w', encoding='utf-8') as f:
json.dump(raw["etag_cache"], f)
self._etag_index_mem[bucket_id] = (dict(raw["etag_cache"]), etag_index_path.stat().st_mtime)
except OSError:
pass
for key, size, mtime, etag in raw["objects"]:
@@ -2241,7 +2140,6 @@ class ObjectStorage:
etag_index_path.parent.mkdir(parents=True, exist_ok=True)
with open(etag_index_path, 'w', encoding='utf-8') as f:
json.dump(meta_cache, f)
self._etag_index_mem[bucket_id] = (dict(meta_cache), etag_index_path.stat().st_mtime)
except OSError:
pass
@@ -2355,25 +2253,6 @@ class ObjectStorage:
self._etag_index_dirty.add(bucket_id)
self._schedule_etag_index_flush()
def _get_etag_index(self, bucket_id: str) -> Dict[str, str]:
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
try:
current_mtime = etag_index_path.stat().st_mtime
except OSError:
return {}
cached = self._etag_index_mem.get(bucket_id)
if cached:
cache_dict, cached_mtime = cached
if current_mtime == cached_mtime:
return cache_dict
try:
with open(etag_index_path, 'r', encoding='utf-8') as f:
data = json.load(f)
self._etag_index_mem[bucket_id] = (data, current_mtime)
return data
except (OSError, json.JSONDecodeError):
return {}
def _schedule_etag_index_flush(self) -> None:
if self._etag_index_flush_timer is None or not self._etag_index_flush_timer.is_alive():
self._etag_index_flush_timer = threading.Timer(5.0, self._flush_etag_indexes)
@@ -2392,10 +2271,11 @@ class ObjectStorage:
index = {k: v.etag for k, v in objects.items() if v.etag}
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
try:
self._atomic_write_json(etag_index_path, index, sync=False)
self._etag_index_mem[bucket_id] = (index, etag_index_path.stat().st_mtime)
etag_index_path.parent.mkdir(parents=True, exist_ok=True)
with open(etag_index_path, 'w', encoding='utf-8') as f:
json.dump(index, f)
except OSError:
logger.warning("Failed to flush etag index for bucket %s", bucket_id)
pass
def warm_cache(self, bucket_names: Optional[List[str]] = None) -> None:
"""Pre-warm the object cache for specified buckets or all buckets.
@@ -2437,15 +2317,14 @@ class ObjectStorage:
path.mkdir(parents=True, exist_ok=True)
@staticmethod
def _atomic_write_json(path: Path, data: Any, *, sync: bool = True) -> None:
def _atomic_write_json(path: Path, data: Any) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
tmp_path = path.with_suffix(".tmp")
try:
with tmp_path.open("w", encoding="utf-8") as f:
json.dump(data, f)
if sync:
f.flush()
os.fsync(f.fileno())
f.flush()
os.fsync(f.fileno())
tmp_path.replace(path)
except BaseException:
try:

View File

@@ -225,10 +225,10 @@ def _policy_allows_public_read(policy: dict[str, Any]) -> bool:
def _bucket_access_descriptor(policy: dict[str, Any] | None) -> tuple[str, str]:
if not policy:
return ("IAM only", "bg-secondary-subtle text-secondary-emphasis")
return ("IAM only", "text-bg-secondary")
if _policy_allows_public_read(policy):
return ("Public read", "bg-warning-subtle text-warning-emphasis")
return ("Custom policy", "bg-info-subtle text-info-emphasis")
return ("Public read", "text-bg-warning")
return ("Custom policy", "text-bg-info")
def _current_principal():
@@ -618,77 +618,20 @@ def stream_bucket_objects(bucket_name: str):
prefix = request.args.get("prefix") or None
delimiter = request.args.get("delimiter") or None
storage = _storage()
try:
versioning_enabled = storage.is_versioning_enabled(bucket_name)
except StorageError:
versioning_enabled = False
client = get_session_s3_client()
except (PermissionError, RuntimeError) as exc:
return jsonify({"error": str(exc)}), 403
versioning_enabled = get_versioning_via_s3(client, bucket_name)
url_templates = build_url_templates(bucket_name)
display_tz = current_app.config.get("DISPLAY_TIMEZONE", "UTC")
def generate():
yield json.dumps({
"type": "meta",
"versioning_enabled": versioning_enabled,
"url_templates": url_templates,
}) + "\n"
yield json.dumps({"type": "count", "total_count": 0}) + "\n"
running_count = 0
try:
if delimiter:
for item_type, item in storage.iter_objects_shallow(
bucket_name, prefix=prefix or "", delimiter=delimiter,
):
if item_type == "folder":
yield json.dumps({"type": "folder", "prefix": item}) + "\n"
else:
last_mod = item.last_modified
yield json.dumps({
"type": "object",
"key": item.key,
"size": item.size,
"last_modified": last_mod.isoformat(),
"last_modified_display": _format_datetime_display(last_mod, display_tz),
"last_modified_iso": _format_datetime_iso(last_mod, display_tz),
"etag": item.etag or "",
}) + "\n"
running_count += 1
if running_count % 1000 == 0:
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
else:
continuation_token = None
while True:
result = storage.list_objects(
bucket_name,
max_keys=1000,
continuation_token=continuation_token,
prefix=prefix,
)
for obj in result.objects:
last_mod = obj.last_modified
yield json.dumps({
"type": "object",
"key": obj.key,
"size": obj.size,
"last_modified": last_mod.isoformat(),
"last_modified_display": _format_datetime_display(last_mod, display_tz),
"last_modified_iso": _format_datetime_iso(last_mod, display_tz),
"etag": obj.etag or "",
}) + "\n"
running_count += len(result.objects)
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
if not result.is_truncated:
break
continuation_token = result.next_continuation_token
except StorageError as exc:
yield json.dumps({"type": "error", "error": str(exc)}) + "\n"
return
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
yield json.dumps({"type": "done"}) + "\n"
return Response(
generate(),
stream_objects_ndjson(
client, bucket_name, prefix, url_templates, display_tz, versioning_enabled,
delimiter=delimiter,
),
mimetype='application/x-ndjson',
headers={
'Cache-Control': 'no-cache',
@@ -1063,27 +1006,6 @@ def bulk_delete_objects(bucket_name: str):
return _respond(False, f"A maximum of {MAX_KEYS} objects can be deleted per request", status_code=400)
unique_keys = list(dict.fromkeys(cleaned))
folder_prefixes = [k for k in unique_keys if k.endswith("/")]
if folder_prefixes:
try:
client = get_session_s3_client()
for prefix in folder_prefixes:
unique_keys.remove(prefix)
paginator = client.get_paginator("list_objects_v2")
for page in paginator.paginate(Bucket=bucket_name, Prefix=prefix):
for obj in page.get("Contents", []):
if obj["Key"] not in unique_keys:
unique_keys.append(obj["Key"])
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
if isinstance(exc, ClientError):
err, status = handle_client_error(exc)
return _respond(False, err["error"], status_code=status)
return _respond(False, "S3 API server is unreachable", status_code=502)
if not unique_keys:
return _respond(False, "No objects found under the selected folders", status_code=400)
try:
_authorize_ui(principal, bucket_name, "delete")
except IamError as exc:
@@ -1114,17 +1036,13 @@ def bulk_delete_objects(bucket_name: str):
else:
try:
client = get_session_s3_client()
deleted = []
errors = []
for i in range(0, len(unique_keys), 1000):
batch = unique_keys[i:i + 1000]
objects_to_delete = [{"Key": k} for k in batch]
resp = client.delete_objects(
Bucket=bucket_name,
Delete={"Objects": objects_to_delete, "Quiet": False},
)
deleted.extend(d["Key"] for d in resp.get("Deleted", []))
errors.extend({"key": e["Key"], "error": e.get("Message", e.get("Code", "Unknown error"))} for e in resp.get("Errors", []))
objects_to_delete = [{"Key": k} for k in unique_keys]
resp = client.delete_objects(
Bucket=bucket_name,
Delete={"Objects": objects_to_delete, "Quiet": False},
)
deleted = [d["Key"] for d in resp.get("Deleted", [])]
errors = [{"key": e["Key"], "error": e.get("Message", e.get("Code", "Unknown error"))} for e in resp.get("Errors", [])]
for key in deleted:
_replication_manager().trigger_replication(bucket_name, key, action="delete")
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
@@ -4123,182 +4041,6 @@ def get_peer_sync_stats(site_id: str):
return jsonify(stats)
@ui_bp.get("/system")
def system_dashboard():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
flash("Access denied: System page requires admin permissions", "danger")
return redirect(url_for("ui.buckets_overview"))
import platform as _platform
import sys
from app.version import APP_VERSION
try:
import myfsio_core as _rc
has_rust = True
except ImportError:
has_rust = False
gc = current_app.extensions.get("gc")
gc_status = gc.get_status() if gc else {"enabled": False}
gc_history_records = []
if gc:
raw = gc.get_history(limit=10, offset=0)
for rec in raw:
r = rec.get("result", {})
total_freed = r.get("temp_bytes_freed", 0) + r.get("multipart_bytes_freed", 0) + r.get("orphaned_version_bytes_freed", 0)
rec["bytes_freed_display"] = _format_bytes(total_freed)
rec["timestamp_display"] = _format_datetime_display(datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc))
gc_history_records.append(rec)
checker = current_app.extensions.get("integrity")
integrity_status = checker.get_status() if checker else {"enabled": False}
integrity_history_records = []
if checker:
raw = checker.get_history(limit=10, offset=0)
for rec in raw:
rec["timestamp_display"] = _format_datetime_display(datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc))
integrity_history_records.append(rec)
features = [
{"label": "Encryption (SSE-S3)", "enabled": current_app.config.get("ENCRYPTION_ENABLED", False)},
{"label": "KMS", "enabled": current_app.config.get("KMS_ENABLED", False)},
{"label": "Versioning Lifecycle", "enabled": current_app.config.get("LIFECYCLE_ENABLED", False)},
{"label": "Metrics History", "enabled": current_app.config.get("METRICS_HISTORY_ENABLED", False)},
{"label": "Operation Metrics", "enabled": current_app.config.get("OPERATION_METRICS_ENABLED", False)},
{"label": "Site Sync", "enabled": current_app.config.get("SITE_SYNC_ENABLED", False)},
{"label": "Website Hosting", "enabled": current_app.config.get("WEBSITE_HOSTING_ENABLED", False)},
{"label": "Garbage Collection", "enabled": current_app.config.get("GC_ENABLED", False)},
{"label": "Integrity Scanner", "enabled": current_app.config.get("INTEGRITY_ENABLED", False)},
]
return render_template(
"system.html",
principal=principal,
app_version=APP_VERSION,
storage_root=current_app.config.get("STORAGE_ROOT", "./data"),
platform=_platform.platform(),
python_version=sys.version.split()[0],
has_rust=has_rust,
features=features,
gc_status=gc_status,
gc_history=gc_history_records,
integrity_status=integrity_status,
integrity_history=integrity_history_records,
display_timezone=current_app.config.get("DISPLAY_TIMEZONE", "UTC"),
)
@ui_bp.post("/system/gc/run")
def system_gc_run():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
gc = current_app.extensions.get("gc")
if not gc:
return jsonify({"error": "GC is not enabled"}), 400
payload = request.get_json(silent=True) or {}
started = gc.run_async(dry_run=payload.get("dry_run"))
if not started:
return jsonify({"error": "GC is already in progress"}), 409
return jsonify({"status": "started"})
@ui_bp.get("/system/gc/status")
def system_gc_status():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
gc = current_app.extensions.get("gc")
if not gc:
return jsonify({"error": "GC is not enabled"}), 400
return jsonify(gc.get_status())
@ui_bp.get("/system/gc/history")
def system_gc_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
gc = current_app.extensions.get("gc")
if not gc:
return jsonify({"executions": []})
limit = min(int(request.args.get("limit", 10)), 200)
offset = int(request.args.get("offset", 0))
records = gc.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})
@ui_bp.post("/system/integrity/run")
def system_integrity_run():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
checker = current_app.extensions.get("integrity")
if not checker:
return jsonify({"error": "Integrity checker is not enabled"}), 400
payload = request.get_json(silent=True) or {}
started = checker.run_async(
auto_heal=payload.get("auto_heal"),
dry_run=payload.get("dry_run"),
)
if not started:
return jsonify({"error": "A scan is already in progress"}), 409
return jsonify({"status": "started"})
@ui_bp.get("/system/integrity/status")
def system_integrity_status():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
checker = current_app.extensions.get("integrity")
if not checker:
return jsonify({"error": "Integrity checker is not enabled"}), 400
return jsonify(checker.get_status())
@ui_bp.get("/system/integrity/history")
def system_integrity_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
checker = current_app.extensions.get("integrity")
if not checker:
return jsonify({"executions": []})
limit = min(int(request.args.get("limit", 10)), 200)
offset = int(request.args.get("offset", 0))
records = checker.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})
@ui_bp.app_errorhandler(404)
def ui_not_found(error): # type: ignore[override]
prefix = ui_bp.url_prefix or ""

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
APP_VERSION = "0.4.3"
APP_VERSION = "0.3.9"
def get_version() -> str:

5
docker-entrypoint.sh Normal file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
# Run both services using the python runner in production mode
exec python run.py --prod

2691
docs.md

File diff suppressed because it is too large Load Diff

View File

@@ -125,7 +125,7 @@ pub fn delete_index_entry(py: Python<'_>, path: &str, entry_name: &str) -> PyRes
fs::write(&path_owned, serialized)
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
Ok(true)
Ok(false)
})
}

View File

@@ -1,14 +0,0 @@
# Deprecated Python Implementation
The Python implementation of MyFSIO is deprecated as of 2026-04-21.
The supported server runtime now lives in `../rust/myfsio-engine` and serves the S3 API and web UI from the Rust `myfsio-server` binary. Keep this tree for migration reference, compatibility checks, and legacy tests only.
For normal development and operations, run:
```bash
cd ../rust/myfsio-engine
cargo run -p myfsio-server --
```
Do not add new product features to the Python implementation unless they are needed to unblock a migration or compare behavior with the Rust server.

View File

@@ -1,4 +0,0 @@
#!/bin/sh
set -e
exec python run.py --prod

File diff suppressed because it is too large Load Diff

View File

@@ -1,750 +0,0 @@
{% extends "base.html" %}
{% block title %}System - MyFSIO Console{% endblock %}
{% block content %}
<div class="page-header d-flex justify-content-between align-items-center mb-4">
<div>
<p class="text-uppercase text-muted small mb-1">Administration</p>
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
System
</h1>
<p class="text-muted mb-0 mt-1">Server information, feature flags, and maintenance tools.</p>
</div>
<div class="d-none d-md-block">
<span class="badge bg-primary bg-opacity-10 text-primary fs-6 px-3 py-2">v{{ app_version }}</span>
</div>
</div>
<div class="row g-4 mb-4">
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M5 0a.5.5 0 0 1 .5.5V2h1V.5a.5.5 0 0 1 1 0V2h1V.5a.5.5 0 0 1 1 0V2h1V.5a.5.5 0 0 1 1 0V2A2.5 2.5 0 0 1 14 4.5h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14a2.5 2.5 0 0 1-2.5 2.5v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14A2.5 2.5 0 0 1 2 11.5H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2A2.5 2.5 0 0 1 4.5 2V.5A.5.5 0 0 1 5 0zm-.5 3A1.5 1.5 0 0 0 3 4.5v7A1.5 1.5 0 0 0 4.5 13h7a1.5 1.5 0 0 0 1.5-1.5v-7A1.5 1.5 0 0 0 11.5 3h-7zM5 6.5A1.5 1.5 0 0 1 6.5 5h3A1.5 1.5 0 0 1 11 6.5v3A1.5 1.5 0 0 1 9.5 11h-3A1.5 1.5 0 0 1 5 9.5v-3zM6.5 6a.5.5 0 0 0-.5.5v3a.5.5 0 0 0 .5.5h3a.5.5 0 0 0 .5-.5v-3a.5.5 0 0 0-.5-.5h-3z"/>
</svg>
Server Information
</h5>
<p class="text-muted small mb-0">Runtime environment and configuration</p>
</div>
<div class="card-body px-4 pb-4">
<table class="table table-sm mb-0">
<tbody>
<tr><td class="text-muted" style="width:40%">Version</td><td class="fw-medium">{{ app_version }}</td></tr>
<tr><td class="text-muted">Storage Root</td><td><code>{{ storage_root }}</code></td></tr>
<tr><td class="text-muted">Platform</td><td>{{ platform }}</td></tr>
<tr><td class="text-muted">Python</td><td>{{ python_version }}</td></tr>
<tr><td class="text-muted">Rust Extension</td><td>
{% if has_rust %}
<span class="badge bg-success bg-opacity-10 text-success">Loaded</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Not loaded</span>
{% endif %}
</td></tr>
</tbody>
</table>
</div>
</div>
</div>
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M11.5 2a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zM9.05 3a2.5 2.5 0 0 1 4.9 0H16v1h-2.05a2.5 2.5 0 0 1-4.9 0H0V3h9.05zM4.5 7a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zM2.05 8a2.5 2.5 0 0 1 4.9 0H16v1H6.95a2.5 2.5 0 0 1-4.9 0H0V8h2.05zm9.45 4a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zm-2.45 1a2.5 2.5 0 0 1 4.9 0H16v1h-2.05a2.5 2.5 0 0 1-4.9 0H0v-1h9.05z"/>
</svg>
Feature Flags
</h5>
<p class="text-muted small mb-0">Features configured via environment variables</p>
</div>
<div class="card-body px-4 pb-4">
<table class="table table-sm mb-0">
<tbody>
{% for feat in features %}
<tr>
<td class="text-muted" style="width:55%">{{ feat.label }}</td>
<td class="text-end">
{% if feat.enabled %}
<span class="badge bg-success bg-opacity-10 text-success">Enabled</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
{% endif %}
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</div>
</div>
<div class="row g-4 mb-4">
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<div class="d-flex justify-content-between align-items-start">
<div>
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M2.5 1a1 1 0 0 0-1 1v1a1 1 0 0 0 1 1H3v9a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V4h.5a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H10a1 1 0 0 0-1-1H7a1 1 0 0 0-1 1H2.5zm3 4a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 .5-.5zM8 5a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7A.5.5 0 0 1 8 5zm3 .5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 1 0z"/>
</svg>
Garbage Collection
</h5>
<p class="text-muted small mb-0">Clean up temporary files, orphaned uploads, and stale locks</p>
</div>
<div>
{% if gc_status.enabled %}
<span class="badge bg-success bg-opacity-10 text-success">Active</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
{% endif %}
</div>
</div>
</div>
<div class="card-body px-4 pb-4">
{% if gc_status.enabled %}
<div class="d-flex gap-2 mb-3">
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="gcRunBtn" onclick="runGC(false)">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 flex-shrink-0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
</svg>
Run Now
</button>
<button class="btn btn-outline-secondary btn-sm" id="gcDryRunBtn" onclick="runGC(true)">
Dry Run
</button>
</div>
<div id="gcScanningBanner" class="mb-3 {% if not gc_status.scanning %}d-none{% endif %}">
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
<span>GC in progress<span id="gcScanElapsed"></span></span>
</div>
</div>
<div id="gcResult" class="mb-3 d-none">
<div class="alert mb-0 small" id="gcResultAlert">
<div class="d-flex justify-content-between align-items-start">
<div class="fw-semibold mb-1" id="gcResultTitle"></div>
<button type="button" class="btn-close btn-close-sm" style="font-size:0.65rem" onclick="document.getElementById('gcResult').classList.add('d-none')"></button>
</div>
<div id="gcResultBody"></div>
</div>
</div>
<div class="border rounded p-3 mb-3" style="background: var(--bs-tertiary-bg, #f8f9fa);">
<div class="d-flex align-items-center gap-2 mb-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
<span class="small fw-semibold text-muted">Configuration</span>
</div>
<div class="row small">
<div class="col-6 mb-1"><span class="text-muted">Interval:</span> {{ gc_status.interval_hours }}h</div>
<div class="col-6 mb-1"><span class="text-muted">Dry run:</span> {{ "Yes" if gc_status.dry_run else "No" }}</div>
<div class="col-6 mb-1"><span class="text-muted">Temp max age:</span> {{ gc_status.temp_file_max_age_hours }}h</div>
<div class="col-6 mb-1"><span class="text-muted">Lock max age:</span> {{ gc_status.lock_file_max_age_hours }}h</div>
<div class="col-6"><span class="text-muted">Multipart max age:</span> {{ gc_status.multipart_max_age_days }}d</div>
</div>
</div>
<div id="gcHistoryContainer">
{% if gc_history %}
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>
<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>
<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/>
</svg>
Recent Executions
</h6>
<div class="table-responsive">
<table class="table table-sm small mb-0">
<thead class="table-light">
<tr>
<th>Time</th>
<th class="text-center">Cleaned</th>
<th class="text-center">Freed</th>
<th class="text-center">Mode</th>
</tr>
</thead>
<tbody>
{% for exec in gc_history %}
<tr>
<td class="text-nowrap">{{ exec.timestamp_display }}</td>
<td class="text-center">
{% set r = exec.result %}
{{ (r.temp_files_deleted|d(0)) + (r.multipart_uploads_deleted|d(0)) + (r.lock_files_deleted|d(0)) + (r.orphaned_metadata_deleted|d(0)) + (r.orphaned_versions_deleted|d(0)) + (r.empty_dirs_removed|d(0)) }}
</td>
<td class="text-center">{{ exec.bytes_freed_display }}</td>
<td class="text-center">
{% if exec.dry_run %}
<span class="badge bg-warning bg-opacity-10 text-warning">Dry run</span>
{% else %}
<span class="badge bg-primary bg-opacity-10 text-primary">Live</span>
{% endif %}
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% else %}
<div class="text-center py-2">
<p class="text-muted small mb-0">No executions recorded yet.</p>
</div>
{% endif %}
</div>
{% else %}
<div class="text-center py-4">
<svg xmlns="http://www.w3.org/2000/svg" width="40" height="40" fill="currentColor" class="text-muted mb-2 opacity-50" viewBox="0 0 16 16">
<path d="M2.5 1a1 1 0 0 0-1 1v1a1 1 0 0 0 1 1H3v9a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V4h.5a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H10a1 1 0 0 0-1-1H7a1 1 0 0 0-1 1H2.5zm3 4a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 .5-.5zM8 5a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7A.5.5 0 0 1 8 5zm3 .5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 1 0z"/>
</svg>
<p class="text-muted mb-1">Garbage collection is not enabled.</p>
<p class="text-muted small mb-0">Set <code>GC_ENABLED=true</code> to enable automatic cleanup.</p>
</div>
{% endif %}
</div>
</div>
</div>
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<div class="d-flex justify-content-between align-items-start">
<div>
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M5.338 1.59a61.44 61.44 0 0 0-2.837.856.481.481 0 0 0-.328.39c-.554 4.157.726 7.19 2.253 9.188a10.725 10.725 0 0 0 2.287 2.233c.346.244.652.42.893.533.12.057.218.095.293.118a.55.55 0 0 0 .101.025.615.615 0 0 0 .1-.025c.076-.023.174-.061.294-.118.24-.113.547-.29.893-.533a10.726 10.726 0 0 0 2.287-2.233c1.527-1.997 2.807-5.031 2.253-9.188a.48.48 0 0 0-.328-.39c-.651-.213-1.75-.56-2.837-.855C9.552 1.29 8.531 1.067 8 1.067c-.53 0-1.552.223-2.662.524zM5.072.56C6.157.265 7.31 0 8 0s1.843.265 2.928.56c1.11.3 2.229.655 2.887.87a1.54 1.54 0 0 1 1.044 1.262c.596 4.477-.787 7.795-2.465 9.99a11.775 11.775 0 0 1-2.517 2.453 7.159 7.159 0 0 1-1.048.625c-.28.132-.581.24-.829.24s-.548-.108-.829-.24a7.158 7.158 0 0 1-1.048-.625 11.777 11.777 0 0 1-2.517-2.453C1.928 10.487.545 7.169 1.141 2.692A1.54 1.54 0 0 1 2.185 1.43 62.456 62.456 0 0 1 5.072.56z"/>
<path d="M10.854 5.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7.5 7.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
</svg>
Integrity Scanner
</h5>
<p class="text-muted small mb-0">Detect and heal corrupted objects, orphaned files, and metadata drift</p>
</div>
<div>
{% if integrity_status.enabled %}
<span class="badge bg-success bg-opacity-10 text-success">Active</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
{% endif %}
</div>
</div>
</div>
<div class="card-body px-4 pb-4">
{% if integrity_status.enabled %}
<div class="d-flex gap-2 flex-wrap mb-3">
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="integrityRunBtn" onclick="runIntegrity(false, false)" {% if integrity_status.scanning %}disabled{% endif %}>
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 flex-shrink-0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
</svg>
Scan Now
</button>
<button class="btn btn-outline-warning btn-sm" id="integrityHealBtn" onclick="runIntegrity(false, true)" {% if integrity_status.scanning %}disabled{% endif %}>
Scan &amp; Heal
</button>
<button class="btn btn-outline-secondary btn-sm" id="integrityDryRunBtn" onclick="runIntegrity(true, false)" {% if integrity_status.scanning %}disabled{% endif %}>
Dry Run
</button>
</div>
<div id="integrityScanningBanner" class="mb-3 {% if not integrity_status.scanning %}d-none{% endif %}">
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
<span>Scan in progress<span id="integrityScanElapsed"></span></span>
</div>
</div>
<div id="integrityResult" class="mb-3 d-none">
<div class="alert mb-0 small" id="integrityResultAlert">
<div class="d-flex justify-content-between align-items-start">
<div class="fw-semibold mb-1" id="integrityResultTitle"></div>
<button type="button" class="btn-close btn-close-sm" style="font-size:0.65rem" onclick="document.getElementById('integrityResult').classList.add('d-none')"></button>
</div>
<div id="integrityResultBody"></div>
</div>
</div>
<div class="border rounded p-3 mb-3" style="background: var(--bs-tertiary-bg, #f8f9fa);">
<div class="d-flex align-items-center gap-2 mb-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
<span class="small fw-semibold text-muted">Configuration</span>
</div>
<div class="row small">
<div class="col-6 mb-1"><span class="text-muted">Interval:</span> {{ integrity_status.interval_hours }}h</div>
<div class="col-6 mb-1"><span class="text-muted">Dry run:</span> {{ "Yes" if integrity_status.dry_run else "No" }}</div>
<div class="col-6"><span class="text-muted">Batch size:</span> {{ integrity_status.batch_size }}</div>
<div class="col-6"><span class="text-muted">Auto-heal:</span> {{ "Yes" if integrity_status.auto_heal else "No" }}</div>
</div>
</div>
<div id="integrityHistoryContainer">
{% if integrity_history %}
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>
<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>
<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/>
</svg>
Recent Scans
</h6>
<div class="table-responsive">
<table class="table table-sm small mb-0">
<thead class="table-light">
<tr>
<th>Time</th>
<th class="text-center">Scanned</th>
<th class="text-center">Issues</th>
<th class="text-center">Healed</th>
<th class="text-center">Mode</th>
</tr>
</thead>
<tbody>
{% for exec in integrity_history %}
<tr>
<td class="text-nowrap">{{ exec.timestamp_display }}</td>
<td class="text-center">{{ exec.result.objects_scanned|d(0) }}</td>
<td class="text-center">
{% set total_issues = (exec.result.corrupted_objects|d(0)) + (exec.result.orphaned_objects|d(0)) + (exec.result.phantom_metadata|d(0)) + (exec.result.stale_versions|d(0)) + (exec.result.etag_cache_inconsistencies|d(0)) + (exec.result.legacy_metadata_drifts|d(0)) %}
{% if total_issues > 0 %}
<span class="text-danger fw-medium">{{ total_issues }}</span>
{% else %}
<span class="text-success">0</span>
{% endif %}
</td>
<td class="text-center">{{ exec.result.issues_healed|d(0) }}</td>
<td class="text-center">
{% if exec.dry_run %}
<span class="badge bg-warning bg-opacity-10 text-warning">Dry</span>
{% elif exec.auto_heal %}
<span class="badge bg-success bg-opacity-10 text-success">Heal</span>
{% else %}
<span class="badge bg-primary bg-opacity-10 text-primary">Scan</span>
{% endif %}
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% else %}
<div class="text-center py-2">
<p class="text-muted small mb-0">No scans recorded yet.</p>
</div>
{% endif %}
</div>
{% else %}
<div class="text-center py-4">
<svg xmlns="http://www.w3.org/2000/svg" width="40" height="40" fill="currentColor" class="text-muted mb-2 opacity-50" viewBox="0 0 16 16">
<path d="M5.338 1.59a61.44 61.44 0 0 0-2.837.856.481.481 0 0 0-.328.39c-.554 4.157.726 7.19 2.253 9.188a10.725 10.725 0 0 0 2.287 2.233c.346.244.652.42.893.533.12.057.218.095.293.118a.55.55 0 0 0 .101.025.615.615 0 0 0 .1-.025c.076-.023.174-.061.294-.118.24-.113.547-.29.893-.533a10.726 10.726 0 0 0 2.287-2.233c1.527-1.997 2.807-5.031 2.253-9.188a.48.48 0 0 0-.328-.39c-.651-.213-1.75-.56-2.837-.855C9.552 1.29 8.531 1.067 8 1.067c-.53 0-1.552.223-2.662.524zM5.072.56C6.157.265 7.31 0 8 0s1.843.265 2.928.56c1.11.3 2.229.655 2.887.87a1.54 1.54 0 0 1 1.044 1.262c.596 4.477-.787 7.795-2.465 9.99a11.775 11.775 0 0 1-2.517 2.453 7.159 7.159 0 0 1-1.048.625c-.28.132-.581.24-.829.24s-.548-.108-.829-.24a7.158 7.158 0 0 1-1.048-.625 11.777 11.777 0 0 1-2.517-2.453C1.928 10.487.545 7.169 1.141 2.692A1.54 1.54 0 0 1 2.185 1.43 62.456 62.456 0 0 1 5.072.56z"/>
<path d="M10.854 5.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7.5 7.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
</svg>
<p class="text-muted mb-1">Integrity scanner is not enabled.</p>
<p class="text-muted small mb-0">Set <code>INTEGRITY_ENABLED=true</code> to enable automatic scanning.</p>
</div>
{% endif %}
</div>
</div>
</div>
</div>
{% endblock %}
{% block extra_scripts %}
<script>
(function () {
var csrfToken = document.querySelector('meta[name="csrf-token"]')?.getAttribute('content') || '';
function setLoading(btnId, loading, spinnerOnly) {
var btn = document.getElementById(btnId);
if (!btn) return;
btn.disabled = loading;
if (loading && !spinnerOnly) {
btn.dataset.originalHtml = btn.innerHTML;
btn.innerHTML = '<span class="spinner-border spinner-border-sm me-1" role="status"></span>Running...';
} else if (!loading && btn.dataset.originalHtml) {
btn.innerHTML = btn.dataset.originalHtml;
}
}
function formatBytes(bytes) {
if (!bytes || bytes === 0) return '0 B';
var units = ['B', 'KB', 'MB', 'GB'];
var i = 0;
var b = bytes;
while (b >= 1024 && i < units.length - 1) { b /= 1024; i++; }
return (i === 0 ? b : b.toFixed(1)) + ' ' + units[i];
}
var _displayTimezone = {{ display_timezone|tojson }};
function formatTimestamp(ts) {
var d = new Date(ts * 1000);
try {
var opts = {year: 'numeric', month: 'short', day: '2-digit', hour: '2-digit', minute: '2-digit', hour12: false, timeZone: _displayTimezone, timeZoneName: 'short'};
return d.toLocaleString('en-US', opts);
} catch (e) {
var pad = function (n) { return n < 10 ? '0' + n : '' + n; };
return d.getUTCFullYear() + '-' + pad(d.getUTCMonth() + 1) + '-' + pad(d.getUTCDate()) +
' ' + pad(d.getUTCHours()) + ':' + pad(d.getUTCMinutes()) + ' UTC';
}
}
var _gcHistoryIcon = '<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">' +
'<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>' +
'<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>' +
'<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/></svg>';
function _gcRefreshHistory() {
fetch('{{ url_for("ui.system_gc_history") }}?limit=10', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
var container = document.getElementById('gcHistoryContainer');
if (!container) return;
var execs = hist.executions || [];
if (execs.length === 0) {
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No executions recorded yet.</p></div>';
return;
}
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
_gcHistoryIcon + ' Recent Executions</h6>' +
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Cleaned</th>' +
'<th class="text-center">Freed</th><th class="text-center">Mode</th></tr></thead><tbody>';
execs.forEach(function (exec) {
var r = exec.result || {};
var cleaned = (r.temp_files_deleted || 0) + (r.multipart_uploads_deleted || 0) +
(r.lock_files_deleted || 0) + (r.orphaned_metadata_deleted || 0) +
(r.orphaned_versions_deleted || 0) + (r.empty_dirs_removed || 0);
var freed = (r.temp_bytes_freed || 0) + (r.multipart_bytes_freed || 0) +
(r.orphaned_version_bytes_freed || 0);
var mode = exec.dry_run
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry run</span>'
: '<span class="badge bg-primary bg-opacity-10 text-primary">Live</span>';
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
'<td class="text-center">' + cleaned + '</td>' +
'<td class="text-center">' + formatBytes(freed) + '</td>' +
'<td class="text-center">' + mode + '</td></tr>';
});
html += '</tbody></table></div>';
container.innerHTML = html;
})
.catch(function () {});
}
function _integrityRefreshHistory() {
fetch('{{ url_for("ui.system_integrity_history") }}?limit=10', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
var container = document.getElementById('integrityHistoryContainer');
if (!container) return;
var execs = hist.executions || [];
if (execs.length === 0) {
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No scans recorded yet.</p></div>';
return;
}
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
_gcHistoryIcon + ' Recent Scans</h6>' +
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Scanned</th>' +
'<th class="text-center">Issues</th><th class="text-center">Healed</th>' +
'<th class="text-center">Mode</th></tr></thead><tbody>';
execs.forEach(function (exec) {
var r = exec.result || {};
var issues = (r.corrupted_objects || 0) + (r.orphaned_objects || 0) +
(r.phantom_metadata || 0) + (r.stale_versions || 0) +
(r.etag_cache_inconsistencies || 0) + (r.legacy_metadata_drifts || 0);
var issueHtml = issues > 0
? '<span class="text-danger fw-medium">' + issues + '</span>'
: '<span class="text-success">0</span>';
var mode = exec.dry_run
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry</span>'
: (exec.auto_heal
? '<span class="badge bg-success bg-opacity-10 text-success">Heal</span>'
: '<span class="badge bg-primary bg-opacity-10 text-primary">Scan</span>');
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
'<td class="text-center">' + (r.objects_scanned || 0) + '</td>' +
'<td class="text-center">' + issueHtml + '</td>' +
'<td class="text-center">' + (r.issues_healed || 0) + '</td>' +
'<td class="text-center">' + mode + '</td></tr>';
});
html += '</tbody></table></div>';
container.innerHTML = html;
})
.catch(function () {});
}
var _gcPollTimer = null;
var _gcLastDryRun = false;
function _gcSetScanning(scanning) {
var banner = document.getElementById('gcScanningBanner');
var btns = ['gcRunBtn', 'gcDryRunBtn'];
if (scanning) {
banner.classList.remove('d-none');
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = true;
});
} else {
banner.classList.add('d-none');
document.getElementById('gcScanElapsed').textContent = '';
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = false;
});
}
}
function _gcShowResult(data, dryRun) {
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
var totalItems = (data.temp_files_deleted || 0) + (data.multipart_uploads_deleted || 0) +
(data.lock_files_deleted || 0) + (data.orphaned_metadata_deleted || 0) +
(data.orphaned_versions_deleted || 0) + (data.empty_dirs_removed || 0);
var totalFreed = (data.temp_bytes_freed || 0) + (data.multipart_bytes_freed || 0) +
(data.orphaned_version_bytes_freed || 0);
alert.className = totalItems > 0 ? 'alert alert-success mb-0 small' : 'alert alert-info mb-0 small';
title.textContent = (dryRun ? '[Dry Run] ' : '') + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
var lines = [];
if (data.temp_files_deleted) lines.push('Temp files: ' + data.temp_files_deleted + ' (' + formatBytes(data.temp_bytes_freed) + ')');
if (data.multipart_uploads_deleted) lines.push('Multipart uploads: ' + data.multipart_uploads_deleted + ' (' + formatBytes(data.multipart_bytes_freed) + ')');
if (data.lock_files_deleted) lines.push('Lock files: ' + data.lock_files_deleted);
if (data.orphaned_metadata_deleted) lines.push('Orphaned metadata: ' + data.orphaned_metadata_deleted);
if (data.orphaned_versions_deleted) lines.push('Orphaned versions: ' + data.orphaned_versions_deleted + ' (' + formatBytes(data.orphaned_version_bytes_freed) + ')');
if (data.empty_dirs_removed) lines.push('Empty directories: ' + data.empty_dirs_removed);
if (totalItems === 0) lines.push('Nothing to clean up.');
if (totalFreed > 0) lines.push('Total freed: ' + formatBytes(totalFreed));
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
body.innerHTML = lines.join('<br>');
}
function _gcPoll() {
fetch('{{ url_for("ui.system_gc_status") }}', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (status) {
if (status.scanning) {
var elapsed = status.scan_elapsed_seconds || 0;
document.getElementById('gcScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
_gcPollTimer = setTimeout(_gcPoll, 2000);
} else {
_gcSetScanning(false);
_gcRefreshHistory();
fetch('{{ url_for("ui.system_gc_history") }}?limit=1', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
if (hist.executions && hist.executions.length > 0) {
var latest = hist.executions[0];
_gcShowResult(latest.result, latest.dry_run);
}
})
.catch(function () {});
}
})
.catch(function () {
_gcPollTimer = setTimeout(_gcPoll, 3000);
});
}
window.runGC = function (dryRun) {
_gcLastDryRun = dryRun;
document.getElementById('gcResult').classList.add('d-none');
_gcSetScanning(true);
fetch('{{ url_for("ui.system_gc_run") }}', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'X-CSRFToken': csrfToken},
body: JSON.stringify({dry_run: dryRun})
})
.then(function (r) { return r.json(); })
.then(function (data) {
if (data.error) {
_gcSetScanning(false);
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = data.error;
return;
}
_gcPollTimer = setTimeout(_gcPoll, 2000);
})
.catch(function (err) {
_gcSetScanning(false);
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = err.message;
});
};
{% if gc_status.scanning %}
_gcSetScanning(true);
_gcPollTimer = setTimeout(_gcPoll, 2000);
{% endif %}
var _integrityPollTimer = null;
var _integrityLastMode = {dryRun: false, autoHeal: false};
function _integritySetScanning(scanning) {
var banner = document.getElementById('integrityScanningBanner');
var btns = ['integrityRunBtn', 'integrityHealBtn', 'integrityDryRunBtn'];
if (scanning) {
banner.classList.remove('d-none');
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = true;
});
} else {
banner.classList.add('d-none');
document.getElementById('integrityScanElapsed').textContent = '';
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = false;
});
}
}
function _integrityShowResult(data, dryRun, autoHeal) {
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
var totalIssues = (data.corrupted_objects || 0) + (data.orphaned_objects || 0) +
(data.phantom_metadata || 0) + (data.stale_versions || 0) +
(data.etag_cache_inconsistencies || 0) + (data.legacy_metadata_drifts || 0);
var prefix = dryRun ? '[Dry Run] ' : (autoHeal ? '[Heal] ' : '');
alert.className = totalIssues > 0 ? 'alert alert-warning mb-0 small' : 'alert alert-success mb-0 small';
title.textContent = prefix + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
var lines = [];
lines.push('Scanned: ' + (data.objects_scanned || 0) + ' objects in ' + (data.buckets_scanned || 0) + ' buckets');
if (totalIssues === 0) {
lines.push('No issues found.');
} else {
if (data.corrupted_objects) lines.push('Corrupted objects: ' + data.corrupted_objects);
if (data.orphaned_objects) lines.push('Orphaned objects: ' + data.orphaned_objects);
if (data.phantom_metadata) lines.push('Phantom metadata: ' + data.phantom_metadata);
if (data.stale_versions) lines.push('Stale versions: ' + data.stale_versions);
if (data.etag_cache_inconsistencies) lines.push('ETag inconsistencies: ' + data.etag_cache_inconsistencies);
if (data.legacy_metadata_drifts) lines.push('Legacy metadata drifts: ' + data.legacy_metadata_drifts);
if (data.issues_healed) lines.push('Issues healed: ' + data.issues_healed);
}
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
body.innerHTML = lines.join('<br>');
}
function _integrityPoll() {
fetch('{{ url_for("ui.system_integrity_status") }}', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (status) {
if (status.scanning) {
var elapsed = status.scan_elapsed_seconds || 0;
document.getElementById('integrityScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
} else {
_integritySetScanning(false);
_integrityRefreshHistory();
fetch('{{ url_for("ui.system_integrity_history") }}?limit=1', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
if (hist.executions && hist.executions.length > 0) {
var latest = hist.executions[0];
_integrityShowResult(latest.result, latest.dry_run, latest.auto_heal);
}
})
.catch(function () {});
}
})
.catch(function () {
_integrityPollTimer = setTimeout(_integrityPoll, 3000);
});
}
window.runIntegrity = function (dryRun, autoHeal) {
_integrityLastMode = {dryRun: dryRun, autoHeal: autoHeal};
document.getElementById('integrityResult').classList.add('d-none');
_integritySetScanning(true);
fetch('{{ url_for("ui.system_integrity_run") }}', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'X-CSRFToken': csrfToken},
body: JSON.stringify({dry_run: dryRun, auto_heal: autoHeal})
})
.then(function (r) { return r.json(); })
.then(function (data) {
if (data.error) {
_integritySetScanning(false);
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = data.error;
return;
}
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
})
.catch(function (err) {
_integritySetScanning(false);
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = err.message;
});
};
{% if integrity_status.scanning %}
_integritySetScanning(true);
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
{% endif %}
})();
</script>
{% endblock %}

View File

@@ -1,4 +1,4 @@
Flask>=3.1.3
Flask>=3.1.2
Flask-Limiter>=4.1.1
Flask-Cors>=6.0.2
Flask-WTF>=1.2.2
@@ -6,8 +6,8 @@ python-dotenv>=1.2.1
pytest>=9.0.2
requests>=2.32.5
boto3>=1.42.14
granian>=2.7.2
psutil>=7.2.2
cryptography>=46.0.5
waitress>=3.0.2
psutil>=7.1.3
cryptography>=46.0.3
defusedxml>=0.7.1
duckdb>=1.5.1
duckdb>=1.4.4

View File

@@ -2,9 +2,7 @@
from __future__ import annotations
import argparse
import atexit
import os
import signal
import sys
import warnings
import multiprocessing
@@ -26,12 +24,6 @@ from typing import Optional
from app import create_api_app, create_ui_app
from app.config import AppConfig
from app.iam import IamService, IamError, ALLOWED_ACTIONS, _derive_fernet_key
from app.version import get_version
PYTHON_DEPRECATION_MESSAGE = (
"The Python MyFSIO runtime is deprecated as of 2026-04-21. "
"Use the Rust server in rust/myfsio-engine for supported development and production usage."
)
def _server_host() -> str:
@@ -48,42 +40,24 @@ def _is_frozen() -> bool:
return getattr(sys, 'frozen', False) or '__compiled__' in globals()
def _serve_granian(target: str, port: int, config: Optional[AppConfig] = None) -> None:
from granian import Granian
from granian.constants import Interfaces
from granian.http import HTTP1Settings
kwargs: dict = {
"target": target,
"address": _server_host(),
"port": port,
"interface": Interfaces.WSGI,
"factory": True,
"workers": 1,
}
if config:
kwargs["blocking_threads"] = config.server_threads
kwargs["backlog"] = config.server_backlog
kwargs["backpressure"] = config.server_connection_limit
kwargs["http1_settings"] = HTTP1Settings(
header_read_timeout=config.server_channel_timeout * 1000,
max_buffer_size=config.server_max_buffer_size,
)
else:
kwargs["http1_settings"] = HTTP1Settings(
max_buffer_size=1024 * 1024 * 128,
)
server = Granian(**kwargs)
server.serve()
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
app = create_api_app()
if prod:
_serve_granian("app:create_api_app", port, config)
from waitress import serve
if config:
serve(
app,
host=_server_host(),
port=port,
ident="MyFSIO",
threads=config.server_threads,
connection_limit=config.server_connection_limit,
backlog=config.server_backlog,
channel_timeout=config.server_channel_timeout,
)
else:
serve(app, host=_server_host(), port=port, ident="MyFSIO")
else:
app = create_api_app()
debug = _is_debug_enabled()
if debug:
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
@@ -91,10 +65,23 @@ def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None)
def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
app = create_ui_app()
if prod:
_serve_granian("app:create_ui_app", port, config)
from waitress import serve
if config:
serve(
app,
host=_server_host(),
port=port,
ident="MyFSIO",
threads=config.server_threads,
connection_limit=config.server_connection_limit,
backlog=config.server_backlog,
channel_timeout=config.server_channel_timeout,
)
else:
serve(app, host=_server_host(), port=port, ident="MyFSIO")
else:
app = create_ui_app()
debug = _is_debug_enabled()
if debug:
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
@@ -139,7 +126,6 @@ def reset_credentials() -> None:
pass
if raw_config and raw_config.get("users"):
is_v2 = raw_config.get("version", 1) >= 2
admin_user = None
for user in raw_config["users"]:
policies = user.get("policies", [])
@@ -153,39 +139,15 @@ def reset_credentials() -> None:
if not admin_user:
admin_user = raw_config["users"][0]
if is_v2:
admin_keys = admin_user.get("access_keys", [])
if admin_keys:
admin_keys[0]["access_key"] = access_key
admin_keys[0]["secret_key"] = secret_key
else:
from datetime import datetime as _dt, timezone as _tz
admin_user["access_keys"] = [{
"access_key": access_key,
"secret_key": secret_key,
"status": "active",
"created_at": _dt.now(_tz.utc).isoformat(),
}]
else:
admin_user["access_key"] = access_key
admin_user["secret_key"] = secret_key
admin_user["access_key"] = access_key
admin_user["secret_key"] = secret_key
else:
from datetime import datetime as _dt, timezone as _tz
raw_config = {
"version": 2,
"users": [
{
"user_id": f"u-{secrets.token_hex(8)}",
"access_key": access_key,
"secret_key": secret_key,
"display_name": "Local Admin",
"enabled": True,
"access_keys": [
{
"access_key": access_key,
"secret_key": secret_key,
"status": "active",
"created_at": _dt.now(_tz.utc).isoformat(),
}
],
"policies": [
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
],
@@ -230,16 +192,13 @@ if __name__ == "__main__":
parser.add_argument("--mode", choices=["api", "ui", "both", "reset-cred"], default="both")
parser.add_argument("--api-port", type=int, default=5000)
parser.add_argument("--ui-port", type=int, default=5100)
parser.add_argument("--prod", action="store_true", help="Run in production mode using Granian")
parser.add_argument("--prod", action="store_true", help="Run in production mode using Waitress")
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
parser.add_argument("--reset-cred", action="store_true", help="Reset admin credentials and exit")
parser.add_argument("--version", action="version", version=f"MyFSIO {get_version()}")
args = parser.parse_args()
warnings.warn(PYTHON_DEPRECATION_MESSAGE, DeprecationWarning, stacklevel=1)
if args.reset_cred or args.mode == "reset-cred":
reset_credentials()
sys.exit(0)
@@ -276,7 +235,7 @@ if __name__ == "__main__":
pass
if prod_mode:
print("Running in production mode (Granian)")
print("Running in production mode (Waitress)")
issues = config.validate_and_report()
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
if critical_issues:
@@ -289,22 +248,11 @@ if __name__ == "__main__":
if args.mode in {"api", "both"}:
print(f"Starting API server on port {args.api_port}...")
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config))
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config), daemon=True)
api_proc.start()
else:
api_proc = None
def _cleanup_api():
if api_proc and api_proc.is_alive():
api_proc.terminate()
api_proc.join(timeout=5)
if api_proc.is_alive():
api_proc.kill()
if api_proc:
atexit.register(_cleanup_api)
signal.signal(signal.SIGTERM, lambda *_: sys.exit(0))
if args.mode in {"ui", "both"}:
print(f"Starting UI server on port {args.ui_port}...")
serve_ui(args.ui_port, prod_mode, config)

View File

@@ -1,9 +0,0 @@
.git
.gitignore
logs
data
tmp
myfsio-engine/target
myfsio-engine/tests
Dockerfile
.dockerignore

View File

@@ -1,50 +0,0 @@
FROM rust:1-slim-bookworm AS builder
WORKDIR /build
RUN apt-get update \
&& apt-get install -y --no-install-recommends build-essential pkg-config libssl-dev \
&& rm -rf /var/lib/apt/lists/*
COPY myfsio-engine ./myfsio-engine
RUN cd myfsio-engine \
&& cargo build --release --bin myfsio-server \
&& strip target/release/myfsio-server
FROM debian:bookworm-slim
WORKDIR /app
RUN apt-get update \
&& apt-get install -y --no-install-recommends ca-certificates curl \
&& rm -rf /var/lib/apt/lists/* \
&& mkdir -p /app/data \
&& useradd -m -u 1000 myfsio \
&& chown -R myfsio:myfsio /app
COPY --from=builder /build/myfsio-engine/target/release/myfsio-server /usr/local/bin/myfsio-server
COPY --from=builder /build/myfsio-engine/crates/myfsio-server/templates /app/templates
COPY --from=builder /build/myfsio-engine/crates/myfsio-server/static /app/static
COPY docker-entrypoint.sh /app/docker-entrypoint.sh
RUN chmod +x /app/docker-entrypoint.sh \
&& chown -R myfsio:myfsio /app
USER myfsio
EXPOSE 5000
EXPOSE 5100
ENV HOST=0.0.0.0 \
PORT=5000 \
UI_PORT=5100 \
STORAGE_ROOT=/app/data \
TEMPLATES_DIR=/app/templates \
STATIC_DIR=/app/static \
RUST_LOG=info
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -fsS "http://localhost:${PORT}/myfsio/health" || exit 1
CMD ["/app/docker-entrypoint.sh"]

View File

@@ -1,4 +0,0 @@
#!/bin/sh
set -e
exec /usr/local/bin/myfsio-server

File diff suppressed because it is too large Load Diff

View File

@@ -1,61 +0,0 @@
[workspace]
resolver = "2"
members = [
"crates/myfsio-common",
"crates/myfsio-auth",
"crates/myfsio-crypto",
"crates/myfsio-storage",
"crates/myfsio-xml",
"crates/myfsio-server",
]
[workspace.package]
version = "0.4.3"
edition = "2021"
[workspace.dependencies]
tokio = { version = "1", features = ["full"] }
axum = { version = "0.8" }
tower = { version = "0.5" }
tower-http = { version = "0.6", features = ["cors", "trace", "fs", "compression-gzip"] }
hyper = { version = "1" }
bytes = "1"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
quick-xml = { version = "0.37", features = ["serialize"] }
hmac = "0.12"
sha2 = "0.10"
md-5 = "0.10"
hex = "0.4"
aes = "0.8"
aes-gcm = "0.10"
cbc = { version = "0.1", features = ["alloc"] }
hkdf = "0.12"
uuid = { version = "1", features = ["v4"] }
parking_lot = "0.12"
lru = "0.14"
percent-encoding = "2"
regex = "1"
unicode-normalization = "0.1"
tracing = "0.1"
tracing-subscriber = "0.3"
thiserror = "2"
chrono = { version = "0.4", features = ["serde"] }
base64 = "0.22"
tokio-util = { version = "0.7", features = ["io"] }
futures = "0.3"
dashmap = "6"
crc32fast = "1"
duckdb = { version = "1", features = ["bundled"] }
reqwest = { version = "0.12", default-features = false, features = ["stream", "rustls-tls", "json"] }
aws-sdk-s3 = { version = "1", features = ["behavior-version-latest", "rt-tokio"] }
aws-config = { version = "1", features = ["behavior-version-latest"] }
aws-credential-types = "1"
aws-smithy-runtime-api = "1"
aws-smithy-types = "1"
async-trait = "0.1"
tera = "1"
cookie = "0.18"
subtle = "2"
clap = { version = "4", features = ["derive"] }
dotenvy = "0.15"

View File

@@ -1,27 +0,0 @@
[package]
name = "myfsio-auth"
version.workspace = true
edition.workspace = true
[dependencies]
myfsio-common = { path = "../myfsio-common" }
hmac = { workspace = true }
sha2 = { workspace = true }
hex = { workspace = true }
aes = { workspace = true }
cbc = { workspace = true }
base64 = { workspace = true }
pbkdf2 = "0.12"
rand = "0.8"
lru = { workspace = true }
parking_lot = { workspace = true }
percent-encoding = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
thiserror = { workspace = true }
chrono = { workspace = true }
tracing = { workspace = true }
uuid = { workspace = true }
[dev-dependencies]
tempfile = "3"

View File

@@ -1,118 +0,0 @@
use aes::cipher::{block_padding::Pkcs7, BlockDecryptMut, BlockEncryptMut, KeyIvInit};
use base64::{engine::general_purpose::URL_SAFE, Engine};
use hmac::{Hmac, Mac};
use rand::RngCore;
use sha2::Sha256;
type Aes128CbcDec = cbc::Decryptor<aes::Aes128>;
type Aes128CbcEnc = cbc::Encryptor<aes::Aes128>;
type HmacSha256 = Hmac<Sha256>;
pub fn derive_fernet_key(secret: &str) -> String {
let mut derived = [0u8; 32];
pbkdf2::pbkdf2_hmac::<Sha256>(
secret.as_bytes(),
b"myfsio-iam-encryption",
100_000,
&mut derived,
);
URL_SAFE.encode(derived)
}
pub fn decrypt(key_b64: &str, token: &str) -> Result<Vec<u8>, &'static str> {
let key_bytes = URL_SAFE
.decode(key_b64)
.map_err(|_| "invalid fernet key base64")?;
if key_bytes.len() != 32 {
return Err("fernet key must be 32 bytes");
}
let signing_key = &key_bytes[..16];
let encryption_key = &key_bytes[16..];
let token_bytes = URL_SAFE
.decode(token)
.map_err(|_| "invalid fernet token base64")?;
if token_bytes.len() < 57 {
return Err("fernet token too short");
}
if token_bytes[0] != 0x80 {
return Err("invalid fernet version");
}
let hmac_offset = token_bytes.len() - 32;
let payload = &token_bytes[..hmac_offset];
let expected_hmac = &token_bytes[hmac_offset..];
let mut mac = HmacSha256::new_from_slice(signing_key).map_err(|_| "hmac key error")?;
mac.update(payload);
mac.verify_slice(expected_hmac)
.map_err(|_| "HMAC verification failed")?;
let iv = &token_bytes[9..25];
let ciphertext = &token_bytes[25..hmac_offset];
let plaintext = Aes128CbcDec::new(encryption_key.into(), iv.into())
.decrypt_padded_vec_mut::<Pkcs7>(ciphertext)
.map_err(|_| "AES-CBC decryption failed")?;
Ok(plaintext)
}
pub fn encrypt(key_b64: &str, plaintext: &[u8]) -> Result<String, &'static str> {
let key_bytes = URL_SAFE
.decode(key_b64)
.map_err(|_| "invalid fernet key base64")?;
if key_bytes.len() != 32 {
return Err("fernet key must be 32 bytes");
}
let signing_key = &key_bytes[..16];
let encryption_key = &key_bytes[16..];
let mut iv = [0u8; 16];
rand::thread_rng().fill_bytes(&mut iv);
let timestamp = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map_err(|_| "system time error")?
.as_secs();
let ciphertext = Aes128CbcEnc::new(encryption_key.into(), (&iv).into())
.encrypt_padded_vec_mut::<Pkcs7>(plaintext);
let mut payload = Vec::with_capacity(1 + 8 + 16 + ciphertext.len());
payload.push(0x80);
payload.extend_from_slice(&timestamp.to_be_bytes());
payload.extend_from_slice(&iv);
payload.extend_from_slice(&ciphertext);
let mut mac = HmacSha256::new_from_slice(signing_key).map_err(|_| "hmac key error")?;
mac.update(&payload);
let tag = mac.finalize().into_bytes();
let mut token_bytes = payload;
token_bytes.extend_from_slice(&tag);
Ok(URL_SAFE.encode(&token_bytes))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_derive_fernet_key_format() {
let key = derive_fernet_key("test-secret");
let decoded = URL_SAFE.decode(&key).unwrap();
assert_eq!(decoded.len(), 32);
}
#[test]
fn test_roundtrip_with_python_compat() {
let key = derive_fernet_key("dev-secret-key");
let decoded = URL_SAFE.decode(&key).unwrap();
assert_eq!(decoded.len(), 32);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +0,0 @@
mod fernet;
pub mod iam;
pub mod principal;
pub mod sigv4;

View File

@@ -1 +0,0 @@
pub use myfsio_common::types::Principal;

View File

@@ -1,287 +0,0 @@
use hmac::{Hmac, Mac};
use lru::LruCache;
use parking_lot::Mutex;
use percent_encoding::{percent_encode, AsciiSet, NON_ALPHANUMERIC};
use sha2::{Digest, Sha256};
use std::num::NonZeroUsize;
use std::sync::LazyLock;
use std::time::Instant;
type HmacSha256 = Hmac<Sha256>;
struct CacheEntry {
key: Vec<u8>,
created: Instant,
}
static SIGNING_KEY_CACHE: LazyLock<Mutex<LruCache<(String, String, String, String), CacheEntry>>> =
LazyLock::new(|| Mutex::new(LruCache::new(NonZeroUsize::new(256).unwrap())));
const CACHE_TTL_SECS: u64 = 60;
const AWS_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC
.remove(b'-')
.remove(b'_')
.remove(b'.')
.remove(b'~');
fn hmac_sha256(key: &[u8], msg: &[u8]) -> Vec<u8> {
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
mac.update(msg);
mac.finalize().into_bytes().to_vec()
}
fn sha256_hex(data: &[u8]) -> String {
let mut hasher = Sha256::new();
hasher.update(data);
hex::encode(hasher.finalize())
}
fn aws_uri_encode(input: &str) -> String {
percent_encode(input.as_bytes(), AWS_ENCODE_SET).to_string()
}
pub fn derive_signing_key_cached(
secret_key: &str,
date_stamp: &str,
region: &str,
service: &str,
) -> Vec<u8> {
let cache_key = (
secret_key.to_owned(),
date_stamp.to_owned(),
region.to_owned(),
service.to_owned(),
);
{
let mut cache = SIGNING_KEY_CACHE.lock();
if let Some(entry) = cache.get(&cache_key) {
if entry.created.elapsed().as_secs() < CACHE_TTL_SECS {
return entry.key.clone();
}
cache.pop(&cache_key);
}
}
let k_date = hmac_sha256(
format!("AWS4{}", secret_key).as_bytes(),
date_stamp.as_bytes(),
);
let k_region = hmac_sha256(&k_date, region.as_bytes());
let k_service = hmac_sha256(&k_region, service.as_bytes());
let k_signing = hmac_sha256(&k_service, b"aws4_request");
{
let mut cache = SIGNING_KEY_CACHE.lock();
cache.put(
cache_key,
CacheEntry {
key: k_signing.clone(),
created: Instant::now(),
},
);
}
k_signing
}
fn constant_time_compare_inner(a: &[u8], b: &[u8]) -> bool {
if a.len() != b.len() {
return false;
}
let mut result: u8 = 0;
for (x, y) in a.iter().zip(b.iter()) {
result |= x ^ y;
}
result == 0
}
pub fn verify_sigv4_signature(
method: &str,
canonical_uri: &str,
query_params: &[(String, String)],
signed_headers_str: &str,
header_values: &[(String, String)],
payload_hash: &str,
amz_date: &str,
date_stamp: &str,
region: &str,
service: &str,
secret_key: &str,
provided_signature: &str,
) -> bool {
let mut sorted_params = query_params.to_vec();
sorted_params.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1)));
let canonical_query_string = sorted_params
.iter()
.map(|(k, v)| format!("{}={}", aws_uri_encode(k), aws_uri_encode(v)))
.collect::<Vec<_>>()
.join("&");
let mut canonical_headers = String::new();
for (name, value) in header_values {
let lower_name = name.to_lowercase();
let normalized = value.split_whitespace().collect::<Vec<_>>().join(" ");
let final_value = if lower_name == "expect" && normalized.is_empty() {
"100-continue"
} else {
&normalized
};
canonical_headers.push_str(&lower_name);
canonical_headers.push(':');
canonical_headers.push_str(final_value);
canonical_headers.push('\n');
}
let canonical_request = format!(
"{}\n{}\n{}\n{}\n{}\n{}",
method,
canonical_uri,
canonical_query_string,
canonical_headers,
signed_headers_str,
payload_hash
);
let credential_scope = format!("{}/{}/{}/aws4_request", date_stamp, region, service);
let cr_hash = sha256_hex(canonical_request.as_bytes());
let string_to_sign = format!(
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
amz_date, credential_scope, cr_hash
);
let signing_key = derive_signing_key_cached(secret_key, date_stamp, region, service);
let calculated = hmac_sha256(&signing_key, string_to_sign.as_bytes());
let calculated_hex = hex::encode(&calculated);
constant_time_compare_inner(calculated_hex.as_bytes(), provided_signature.as_bytes())
}
pub fn derive_signing_key(
secret_key: &str,
date_stamp: &str,
region: &str,
service: &str,
) -> Vec<u8> {
derive_signing_key_cached(secret_key, date_stamp, region, service)
}
pub fn compute_signature(signing_key: &[u8], string_to_sign: &str) -> String {
let sig = hmac_sha256(signing_key, string_to_sign.as_bytes());
hex::encode(sig)
}
pub fn compute_post_policy_signature(signing_key: &[u8], policy_b64: &str) -> String {
let sig = hmac_sha256(signing_key, policy_b64.as_bytes());
hex::encode(sig)
}
pub fn build_string_to_sign(
amz_date: &str,
credential_scope: &str,
canonical_request: &str,
) -> String {
let cr_hash = sha256_hex(canonical_request.as_bytes());
format!(
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
amz_date, credential_scope, cr_hash
)
}
pub fn constant_time_compare(a: &str, b: &str) -> bool {
constant_time_compare_inner(a.as_bytes(), b.as_bytes())
}
pub fn clear_signing_key_cache() {
SIGNING_KEY_CACHE.lock().clear();
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_derive_signing_key() {
let key = derive_signing_key(
"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
"20130524",
"us-east-1",
"s3",
);
assert_eq!(key.len(), 32);
}
#[test]
fn test_derive_signing_key_cached() {
let key1 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
let key2 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
assert_eq!(key1, key2);
}
#[test]
fn test_constant_time_compare() {
assert!(constant_time_compare("abc", "abc"));
assert!(!constant_time_compare("abc", "abd"));
assert!(!constant_time_compare("abc", "abcd"));
}
#[test]
fn test_build_string_to_sign() {
let result = build_string_to_sign(
"20130524T000000Z",
"20130524/us-east-1/s3/aws4_request",
"GET\n/\n\nhost:example.com\n\nhost\nUNSIGNED-PAYLOAD",
);
assert!(result.starts_with("AWS4-HMAC-SHA256\n"));
assert!(result.contains("20130524T000000Z"));
}
#[test]
fn test_aws_uri_encode() {
assert_eq!(aws_uri_encode("hello world"), "hello%20world");
assert_eq!(aws_uri_encode("test-file_name.txt"), "test-file_name.txt");
assert_eq!(aws_uri_encode("a/b"), "a%2Fb");
}
#[test]
fn test_verify_sigv4_roundtrip() {
let secret = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
let date_stamp = "20130524";
let region = "us-east-1";
let service = "s3";
let amz_date = "20130524T000000Z";
let signing_key = derive_signing_key(secret, date_stamp, region, service);
let canonical_request =
"GET\n/\n\nhost:examplebucket.s3.amazonaws.com\n\nhost\nUNSIGNED-PAYLOAD";
let string_to_sign = build_string_to_sign(
amz_date,
&format!("{}/{}/{}/aws4_request", date_stamp, region, service),
canonical_request,
);
let signature = compute_signature(&signing_key, &string_to_sign);
let result = verify_sigv4_signature(
"GET",
"/",
&[],
"host",
&[(
"host".to_string(),
"examplebucket.s3.amazonaws.com".to_string(),
)],
"UNSIGNED-PAYLOAD",
amz_date,
date_stamp,
region,
service,
secret,
&signature,
);
assert!(result);
}
}

View File

@@ -1,11 +0,0 @@
[package]
name = "myfsio-common"
version.workspace = true
edition.workspace = true
[dependencies]
thiserror = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
chrono = { workspace = true }
uuid = { workspace = true }

View File

@@ -1,20 +0,0 @@
pub const SYSTEM_ROOT: &str = ".myfsio.sys";
pub const SYSTEM_BUCKETS_DIR: &str = "buckets";
pub const SYSTEM_MULTIPART_DIR: &str = "multipart";
pub const BUCKET_META_DIR: &str = "meta";
pub const BUCKET_VERSIONS_DIR: &str = "versions";
pub const BUCKET_CONFIG_FILE: &str = ".bucket.json";
pub const STATS_FILE: &str = "stats.json";
pub const ETAG_INDEX_FILE: &str = "etag_index.json";
pub const INDEX_FILE: &str = "_index.json";
pub const MANIFEST_FILE: &str = "manifest.json";
pub const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
pub const DEFAULT_REGION: &str = "us-east-1";
pub const AWS_SERVICE: &str = "s3";
pub const DEFAULT_MAX_KEYS: usize = 1000;
pub const DEFAULT_OBJECT_KEY_MAX_BYTES: usize = 1024;
pub const DEFAULT_CHUNK_SIZE: usize = 65536;
pub const STREAM_CHUNK_SIZE: usize = 1_048_576;

View File

@@ -1,233 +0,0 @@
use std::fmt;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum S3ErrorCode {
AccessDenied,
BadDigest,
BucketAlreadyExists,
BucketNotEmpty,
EntityTooLarge,
InternalError,
InvalidAccessKeyId,
InvalidArgument,
InvalidBucketName,
InvalidKey,
InvalidPolicyDocument,
InvalidRange,
InvalidRequest,
InvalidTag,
MalformedXML,
MethodNotAllowed,
NoSuchBucket,
NoSuchKey,
NoSuchUpload,
NoSuchVersion,
NoSuchTagSet,
PreconditionFailed,
NotModified,
QuotaExceeded,
SignatureDoesNotMatch,
SlowDown,
}
impl S3ErrorCode {
pub fn http_status(&self) -> u16 {
match self {
Self::AccessDenied => 403,
Self::BadDigest => 400,
Self::BucketAlreadyExists => 409,
Self::BucketNotEmpty => 409,
Self::EntityTooLarge => 413,
Self::InternalError => 500,
Self::InvalidAccessKeyId => 403,
Self::InvalidArgument => 400,
Self::InvalidBucketName => 400,
Self::InvalidKey => 400,
Self::InvalidPolicyDocument => 400,
Self::InvalidRange => 416,
Self::InvalidRequest => 400,
Self::InvalidTag => 400,
Self::MalformedXML => 400,
Self::MethodNotAllowed => 405,
Self::NoSuchBucket => 404,
Self::NoSuchKey => 404,
Self::NoSuchUpload => 404,
Self::NoSuchVersion => 404,
Self::NoSuchTagSet => 404,
Self::PreconditionFailed => 412,
Self::NotModified => 304,
Self::QuotaExceeded => 403,
Self::SignatureDoesNotMatch => 403,
Self::SlowDown => 429,
}
}
pub fn as_str(&self) -> &'static str {
match self {
Self::AccessDenied => "AccessDenied",
Self::BadDigest => "BadDigest",
Self::BucketAlreadyExists => "BucketAlreadyExists",
Self::BucketNotEmpty => "BucketNotEmpty",
Self::EntityTooLarge => "EntityTooLarge",
Self::InternalError => "InternalError",
Self::InvalidAccessKeyId => "InvalidAccessKeyId",
Self::InvalidArgument => "InvalidArgument",
Self::InvalidBucketName => "InvalidBucketName",
Self::InvalidKey => "InvalidKey",
Self::InvalidPolicyDocument => "InvalidPolicyDocument",
Self::InvalidRange => "InvalidRange",
Self::InvalidRequest => "InvalidRequest",
Self::InvalidTag => "InvalidTag",
Self::MalformedXML => "MalformedXML",
Self::MethodNotAllowed => "MethodNotAllowed",
Self::NoSuchBucket => "NoSuchBucket",
Self::NoSuchKey => "NoSuchKey",
Self::NoSuchUpload => "NoSuchUpload",
Self::NoSuchVersion => "NoSuchVersion",
Self::NoSuchTagSet => "NoSuchTagSet",
Self::PreconditionFailed => "PreconditionFailed",
Self::NotModified => "NotModified",
Self::QuotaExceeded => "QuotaExceeded",
Self::SignatureDoesNotMatch => "SignatureDoesNotMatch",
Self::SlowDown => "SlowDown",
}
}
pub fn default_message(&self) -> &'static str {
match self {
Self::AccessDenied => "Access Denied",
Self::BadDigest => "The Content-MD5 or checksum value you specified did not match what we received",
Self::BucketAlreadyExists => "The requested bucket name is not available",
Self::BucketNotEmpty => "The bucket you tried to delete is not empty",
Self::EntityTooLarge => "Your proposed upload exceeds the maximum allowed size",
Self::InternalError => "We encountered an internal error. Please try again.",
Self::InvalidAccessKeyId => "The access key ID you provided does not exist",
Self::InvalidArgument => "Invalid argument",
Self::InvalidBucketName => "The specified bucket is not valid",
Self::InvalidKey => "The specified key is not valid",
Self::InvalidPolicyDocument => "The content of the form does not meet the conditions specified in the policy document",
Self::InvalidRange => "The requested range is not satisfiable",
Self::InvalidRequest => "Invalid request",
Self::InvalidTag => "The Tagging header is invalid",
Self::MalformedXML => "The XML you provided was not well-formed",
Self::MethodNotAllowed => "The specified method is not allowed against this resource",
Self::NoSuchBucket => "The specified bucket does not exist",
Self::NoSuchKey => "The specified key does not exist",
Self::NoSuchUpload => "The specified multipart upload does not exist",
Self::NoSuchVersion => "The specified version does not exist",
Self::NoSuchTagSet => "The TagSet does not exist",
Self::PreconditionFailed => "At least one of the preconditions you specified did not hold",
Self::NotModified => "Not Modified",
Self::QuotaExceeded => "The bucket quota has been exceeded",
Self::SignatureDoesNotMatch => "The request signature we calculated does not match the signature you provided",
Self::SlowDown => "Please reduce your request rate",
}
}
}
impl fmt::Display for S3ErrorCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
#[derive(Debug, Clone)]
pub struct S3Error {
pub code: S3ErrorCode,
pub message: String,
pub resource: String,
pub request_id: String,
}
impl S3Error {
pub fn new(code: S3ErrorCode, message: impl Into<String>) -> Self {
Self {
code,
message: message.into(),
resource: String::new(),
request_id: String::new(),
}
}
pub fn from_code(code: S3ErrorCode) -> Self {
Self::new(code, code.default_message())
}
pub fn with_resource(mut self, resource: impl Into<String>) -> Self {
self.resource = resource.into();
self
}
pub fn with_request_id(mut self, request_id: impl Into<String>) -> Self {
self.request_id = request_id.into();
self
}
pub fn http_status(&self) -> u16 {
self.code.http_status()
}
pub fn to_xml(&self) -> String {
format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<Error>\
<Code>{}</Code>\
<Message>{}</Message>\
<Resource>{}</Resource>\
<RequestId>{}</RequestId>\
</Error>",
self.code.as_str(),
xml_escape(&self.message),
xml_escape(&self.resource),
xml_escape(&self.request_id),
)
}
}
impl fmt::Display for S3Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}: {}", self.code, self.message)
}
}
impl std::error::Error for S3Error {}
fn xml_escape(s: &str) -> String {
s.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
.replace('"', "&quot;")
.replace('\'', "&apos;")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_error_codes() {
assert_eq!(S3ErrorCode::NoSuchKey.http_status(), 404);
assert_eq!(S3ErrorCode::AccessDenied.http_status(), 403);
assert_eq!(S3ErrorCode::NoSuchBucket.as_str(), "NoSuchBucket");
}
#[test]
fn test_error_to_xml() {
let err = S3Error::from_code(S3ErrorCode::NoSuchKey)
.with_resource("/test-bucket/test-key")
.with_request_id("abc123");
let xml = err.to_xml();
assert!(xml.contains("<Code>NoSuchKey</Code>"));
assert!(xml.contains("<Resource>/test-bucket/test-key</Resource>"));
assert!(xml.contains("<RequestId>abc123</RequestId>"));
}
#[test]
fn test_xml_escape() {
let err = S3Error::new(S3ErrorCode::InvalidArgument, "key <test> & \"value\"")
.with_resource("/bucket/key&amp");
let xml = err.to_xml();
assert!(xml.contains("&lt;test&gt;"));
assert!(xml.contains("&amp;"));
}
}

View File

@@ -1,3 +0,0 @@
pub mod constants;
pub mod error;
pub mod types;

View File

@@ -1,178 +0,0 @@
use std::collections::HashMap;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ObjectMeta {
pub key: String,
pub size: u64,
pub last_modified: DateTime<Utc>,
pub etag: Option<String>,
pub content_type: Option<String>,
pub storage_class: Option<String>,
pub metadata: HashMap<String, String>,
}
impl ObjectMeta {
pub fn new(key: String, size: u64, last_modified: DateTime<Utc>) -> Self {
Self {
key,
size,
last_modified,
etag: None,
content_type: None,
storage_class: Some("STANDARD".to_string()),
metadata: HashMap::new(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BucketMeta {
pub name: String,
pub creation_date: DateTime<Utc>,
}
#[derive(Debug, Clone, Default)]
pub struct BucketStats {
pub objects: u64,
pub bytes: u64,
pub version_count: u64,
pub version_bytes: u64,
}
impl BucketStats {
pub fn total_objects(&self) -> u64 {
self.objects + self.version_count
}
pub fn total_bytes(&self) -> u64 {
self.bytes + self.version_bytes
}
}
#[derive(Debug, Clone)]
pub struct ListObjectsResult {
pub objects: Vec<ObjectMeta>,
pub is_truncated: bool,
pub next_continuation_token: Option<String>,
}
#[derive(Debug, Clone)]
pub struct ShallowListResult {
pub objects: Vec<ObjectMeta>,
pub common_prefixes: Vec<String>,
pub is_truncated: bool,
pub next_continuation_token: Option<String>,
}
#[derive(Debug, Clone, Default)]
pub struct ListParams {
pub max_keys: usize,
pub continuation_token: Option<String>,
pub prefix: Option<String>,
pub start_after: Option<String>,
}
#[derive(Debug, Clone, Default)]
pub struct ShallowListParams {
pub prefix: String,
pub delimiter: String,
pub max_keys: usize,
pub continuation_token: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PartMeta {
pub part_number: u32,
pub etag: String,
pub size: u64,
pub last_modified: Option<DateTime<Utc>>,
}
#[derive(Debug, Clone)]
pub struct PartInfo {
pub part_number: u32,
pub etag: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MultipartUploadInfo {
pub upload_id: String,
pub key: String,
pub initiated: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VersionInfo {
pub version_id: String,
pub key: String,
pub size: u64,
pub last_modified: DateTime<Utc>,
pub etag: Option<String>,
pub is_latest: bool,
#[serde(default)]
pub is_delete_marker: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Tag {
pub key: String,
pub value: String,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct BucketConfig {
#[serde(default)]
pub versioning_enabled: bool,
#[serde(default)]
pub tags: Vec<Tag>,
#[serde(default)]
pub cors: Option<serde_json::Value>,
#[serde(default)]
pub encryption: Option<serde_json::Value>,
#[serde(default)]
pub lifecycle: Option<serde_json::Value>,
#[serde(default)]
pub website: Option<serde_json::Value>,
#[serde(default)]
pub quota: Option<QuotaConfig>,
#[serde(default)]
pub acl: Option<serde_json::Value>,
#[serde(default)]
pub notification: Option<serde_json::Value>,
#[serde(default)]
pub logging: Option<serde_json::Value>,
#[serde(default)]
pub object_lock: Option<serde_json::Value>,
#[serde(default)]
pub policy: Option<serde_json::Value>,
#[serde(default)]
pub replication: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QuotaConfig {
pub max_bytes: Option<u64>,
pub max_objects: Option<u64>,
}
#[derive(Debug, Clone)]
pub struct Principal {
pub access_key: String,
pub user_id: String,
pub display_name: String,
pub is_admin: bool,
}
impl Principal {
pub fn new(access_key: String, user_id: String, display_name: String, is_admin: bool) -> Self {
Self {
access_key,
user_id,
display_name,
is_admin,
}
}
}

View File

@@ -1,24 +0,0 @@
[package]
name = "myfsio-crypto"
version.workspace = true
edition.workspace = true
[dependencies]
myfsio-common = { path = "../myfsio-common" }
md-5 = { workspace = true }
sha2 = { workspace = true }
hex = { workspace = true }
aes-gcm = { workspace = true }
hkdf = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
uuid = { workspace = true }
chrono = { workspace = true }
base64 = { workspace = true }
rand = "0.8"
[dev-dependencies]
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
tempfile = "3"

View File

@@ -1,253 +0,0 @@
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
use hkdf::Hkdf;
use sha2::Sha256;
use std::fs::File;
use std::io::{Read, Seek, SeekFrom, Write};
use std::path::Path;
use thiserror::Error;
const DEFAULT_CHUNK_SIZE: usize = 65536;
const HEADER_SIZE: usize = 4;
#[derive(Debug, Error)]
pub enum CryptoError {
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Invalid key size: expected 32 bytes, got {0}")]
InvalidKeySize(usize),
#[error("Invalid nonce size: expected 12 bytes, got {0}")]
InvalidNonceSize(usize),
#[error("Encryption failed: {0}")]
EncryptionFailed(String),
#[error("Decryption failed at chunk {0}")]
DecryptionFailed(u32),
#[error("HKDF expand failed: {0}")]
HkdfFailed(String),
}
fn read_exact_chunk(reader: &mut impl Read, buf: &mut [u8]) -> std::io::Result<usize> {
let mut filled = 0;
while filled < buf.len() {
match reader.read(&mut buf[filled..]) {
Ok(0) => break,
Ok(n) => filled += n,
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
}
Ok(filled)
}
fn derive_chunk_nonce(base_nonce: &[u8], chunk_index: u32) -> Result<[u8; 12], CryptoError> {
let hkdf = Hkdf::<Sha256>::new(Some(base_nonce), b"chunk_nonce");
let mut okm = [0u8; 12];
hkdf.expand(&chunk_index.to_be_bytes(), &mut okm)
.map_err(|e| CryptoError::HkdfFailed(e.to_string()))?;
Ok(okm)
}
pub fn encrypt_stream_chunked(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
chunk_size: Option<usize>,
) -> Result<u32, CryptoError> {
if key.len() != 32 {
return Err(CryptoError::InvalidKeySize(key.len()));
}
if base_nonce.len() != 12 {
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
}
let chunk_size = chunk_size.unwrap_or(DEFAULT_CHUNK_SIZE);
let key_arr: [u8; 32] = key.try_into().unwrap();
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
let cipher = Aes256Gcm::new(&key_arr.into());
let mut infile = File::open(input_path)?;
let mut outfile = File::create(output_path)?;
outfile.write_all(&[0u8; 4])?;
let mut buf = vec![0u8; chunk_size];
let mut chunk_index: u32 = 0;
loop {
let n = read_exact_chunk(&mut infile, &mut buf)?;
if n == 0 {
break;
}
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
let nonce = Nonce::from_slice(&nonce_bytes);
let encrypted = cipher
.encrypt(nonce, &buf[..n])
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let size = encrypted.len() as u32;
outfile.write_all(&size.to_be_bytes())?;
outfile.write_all(&encrypted)?;
chunk_index += 1;
}
outfile.seek(SeekFrom::Start(0))?;
outfile.write_all(&chunk_index.to_be_bytes())?;
Ok(chunk_index)
}
pub fn decrypt_stream_chunked(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
) -> Result<u32, CryptoError> {
if key.len() != 32 {
return Err(CryptoError::InvalidKeySize(key.len()));
}
if base_nonce.len() != 12 {
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
}
let key_arr: [u8; 32] = key.try_into().unwrap();
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
let cipher = Aes256Gcm::new(&key_arr.into());
let mut infile = File::open(input_path)?;
let mut outfile = File::create(output_path)?;
let mut header = [0u8; HEADER_SIZE];
infile.read_exact(&mut header)?;
let chunk_count = u32::from_be_bytes(header);
let mut size_buf = [0u8; HEADER_SIZE];
for chunk_index in 0..chunk_count {
infile.read_exact(&mut size_buf)?;
let chunk_size = u32::from_be_bytes(size_buf) as usize;
let mut encrypted = vec![0u8; chunk_size];
infile.read_exact(&mut encrypted)?;
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
let nonce = Nonce::from_slice(&nonce_bytes);
let decrypted = cipher
.decrypt(nonce, encrypted.as_ref())
.map_err(|_| CryptoError::DecryptionFailed(chunk_index))?;
outfile.write_all(&decrypted)?;
}
Ok(chunk_count)
}
pub async fn encrypt_stream_chunked_async(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
chunk_size: Option<usize>,
) -> Result<u32, CryptoError> {
let input_path = input_path.to_owned();
let output_path = output_path.to_owned();
let key = key.to_vec();
let base_nonce = base_nonce.to_vec();
tokio::task::spawn_blocking(move || {
encrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce, chunk_size)
})
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
}
pub async fn decrypt_stream_chunked_async(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
) -> Result<u32, CryptoError> {
let input_path = input_path.to_owned();
let output_path = output_path.to_owned();
let key = key.to_vec();
let base_nonce = base_nonce.to_vec();
tokio::task::spawn_blocking(move || {
decrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce)
})
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write as IoWrite;
#[test]
fn test_encrypt_decrypt_roundtrip() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("input.bin");
let encrypted = dir.path().join("encrypted.bin");
let decrypted = dir.path().join("decrypted.bin");
let data = b"Hello, this is a test of AES-256-GCM chunked encryption!";
std::fs::File::create(&input)
.unwrap()
.write_all(data)
.unwrap();
let key = [0x42u8; 32];
let nonce = [0x01u8; 12];
let chunks = encrypt_stream_chunked(&input, &encrypted, &key, &nonce, Some(16)).unwrap();
assert!(chunks > 0);
let chunks2 = decrypt_stream_chunked(&encrypted, &decrypted, &key, &nonce).unwrap();
assert_eq!(chunks, chunks2);
let result = std::fs::read(&decrypted).unwrap();
assert_eq!(result, data);
}
#[test]
fn test_invalid_key_size() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("input.bin");
std::fs::File::create(&input)
.unwrap()
.write_all(b"test")
.unwrap();
let result = encrypt_stream_chunked(
&input,
&dir.path().join("out"),
&[0u8; 16],
&[0u8; 12],
None,
);
assert!(matches!(result, Err(CryptoError::InvalidKeySize(16))));
}
#[test]
fn test_wrong_key_fails_decrypt() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("input.bin");
let encrypted = dir.path().join("encrypted.bin");
let decrypted = dir.path().join("decrypted.bin");
std::fs::File::create(&input)
.unwrap()
.write_all(b"secret data")
.unwrap();
let key = [0x42u8; 32];
let nonce = [0x01u8; 12];
encrypt_stream_chunked(&input, &encrypted, &key, &nonce, None).unwrap();
let wrong_key = [0x43u8; 32];
let result = decrypt_stream_chunked(&encrypted, &decrypted, &wrong_key, &nonce);
assert!(matches!(result, Err(CryptoError::DecryptionFailed(_))));
}
}

View File

@@ -1,377 +0,0 @@
use base64::engine::general_purpose::STANDARD as B64;
use base64::Engine;
use rand::RngCore;
use std::collections::HashMap;
use std::path::Path;
use crate::aes_gcm::{decrypt_stream_chunked, encrypt_stream_chunked, CryptoError};
use crate::kms::KmsService;
#[derive(Debug, Clone, PartialEq)]
pub enum SseAlgorithm {
Aes256,
AwsKms,
CustomerProvided,
}
impl SseAlgorithm {
pub fn as_str(&self) -> &'static str {
match self {
SseAlgorithm::Aes256 => "AES256",
SseAlgorithm::AwsKms => "aws:kms",
SseAlgorithm::CustomerProvided => "AES256",
}
}
}
#[derive(Debug, Clone)]
pub struct EncryptionContext {
pub algorithm: SseAlgorithm,
pub kms_key_id: Option<String>,
pub customer_key: Option<Vec<u8>>,
}
#[derive(Debug, Clone)]
pub struct EncryptionMetadata {
pub algorithm: String,
pub nonce: String,
pub encrypted_data_key: Option<String>,
pub kms_key_id: Option<String>,
}
impl EncryptionMetadata {
pub fn to_metadata_map(&self) -> HashMap<String, String> {
let mut map = HashMap::new();
map.insert(
"x-amz-server-side-encryption".to_string(),
self.algorithm.clone(),
);
map.insert("x-amz-encryption-nonce".to_string(), self.nonce.clone());
if let Some(ref dk) = self.encrypted_data_key {
map.insert("x-amz-encrypted-data-key".to_string(), dk.clone());
}
if let Some(ref kid) = self.kms_key_id {
map.insert("x-amz-encryption-key-id".to_string(), kid.clone());
}
map
}
pub fn from_metadata(meta: &HashMap<String, String>) -> Option<Self> {
let algorithm = meta.get("x-amz-server-side-encryption")?;
let nonce = meta.get("x-amz-encryption-nonce")?;
Some(Self {
algorithm: algorithm.clone(),
nonce: nonce.clone(),
encrypted_data_key: meta.get("x-amz-encrypted-data-key").cloned(),
kms_key_id: meta.get("x-amz-encryption-key-id").cloned(),
})
}
pub fn is_encrypted(meta: &HashMap<String, String>) -> bool {
meta.contains_key("x-amz-server-side-encryption")
}
pub fn clean_metadata(meta: &mut HashMap<String, String>) {
meta.remove("x-amz-server-side-encryption");
meta.remove("x-amz-encryption-nonce");
meta.remove("x-amz-encrypted-data-key");
meta.remove("x-amz-encryption-key-id");
}
}
pub struct EncryptionService {
master_key: [u8; 32],
kms: Option<std::sync::Arc<KmsService>>,
}
impl EncryptionService {
pub fn new(master_key: [u8; 32], kms: Option<std::sync::Arc<KmsService>>) -> Self {
Self { master_key, kms }
}
pub fn generate_data_key(&self) -> ([u8; 32], [u8; 12]) {
let mut data_key = [0u8; 32];
let mut nonce = [0u8; 12];
rand::thread_rng().fill_bytes(&mut data_key);
rand::thread_rng().fill_bytes(&mut nonce);
(data_key, nonce)
}
pub fn wrap_data_key(&self, data_key: &[u8; 32]) -> Result<String, CryptoError> {
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
let cipher = Aes256Gcm::new((&self.master_key).into());
let mut nonce_bytes = [0u8; 12];
rand::thread_rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
let encrypted = cipher
.encrypt(nonce, data_key.as_slice())
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let mut combined = Vec::with_capacity(12 + encrypted.len());
combined.extend_from_slice(&nonce_bytes);
combined.extend_from_slice(&encrypted);
Ok(B64.encode(&combined))
}
pub fn unwrap_data_key(&self, wrapped_b64: &str) -> Result<[u8; 32], CryptoError> {
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
let combined = B64.decode(wrapped_b64).map_err(|e| {
CryptoError::EncryptionFailed(format!("Bad wrapped key encoding: {}", e))
})?;
if combined.len() < 12 {
return Err(CryptoError::EncryptionFailed(
"Wrapped key too short".to_string(),
));
}
let (nonce_bytes, ciphertext) = combined.split_at(12);
let cipher = Aes256Gcm::new((&self.master_key).into());
let nonce = Nonce::from_slice(nonce_bytes);
let plaintext = cipher
.decrypt(nonce, ciphertext)
.map_err(|_| CryptoError::DecryptionFailed(0))?;
if plaintext.len() != 32 {
return Err(CryptoError::InvalidKeySize(plaintext.len()));
}
let mut key = [0u8; 32];
key.copy_from_slice(&plaintext);
Ok(key)
}
pub async fn encrypt_object(
&self,
input_path: &Path,
output_path: &Path,
ctx: &EncryptionContext,
) -> Result<EncryptionMetadata, CryptoError> {
let (data_key, nonce) = self.generate_data_key();
let (encrypted_data_key, kms_key_id) = match ctx.algorithm {
SseAlgorithm::Aes256 => {
let wrapped = self.wrap_data_key(&data_key)?;
(Some(wrapped), None)
}
SseAlgorithm::AwsKms => {
let kms = self
.kms
.as_ref()
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
let kid = ctx
.kms_key_id
.as_ref()
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID".into()))?;
let ciphertext = kms.encrypt_data(kid, &data_key).await?;
(Some(B64.encode(&ciphertext)), Some(kid.clone()))
}
SseAlgorithm::CustomerProvided => (None, None),
};
let actual_key = if ctx.algorithm == SseAlgorithm::CustomerProvided {
let ck = ctx
.customer_key
.as_ref()
.ok_or_else(|| CryptoError::EncryptionFailed("No customer key provided".into()))?;
if ck.len() != 32 {
return Err(CryptoError::InvalidKeySize(ck.len()));
}
let mut k = [0u8; 32];
k.copy_from_slice(ck);
k
} else {
data_key
};
let ip = input_path.to_owned();
let op = output_path.to_owned();
let ak = actual_key;
let n = nonce;
tokio::task::spawn_blocking(move || encrypt_stream_chunked(&ip, &op, &ak, &n, None))
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
Ok(EncryptionMetadata {
algorithm: ctx.algorithm.as_str().to_string(),
nonce: B64.encode(nonce),
encrypted_data_key,
kms_key_id,
})
}
pub async fn decrypt_object(
&self,
input_path: &Path,
output_path: &Path,
enc_meta: &EncryptionMetadata,
customer_key: Option<&[u8]>,
) -> Result<(), CryptoError> {
let nonce_bytes = B64
.decode(&enc_meta.nonce)
.map_err(|e| CryptoError::EncryptionFailed(format!("Bad nonce encoding: {}", e)))?;
if nonce_bytes.len() != 12 {
return Err(CryptoError::InvalidNonceSize(nonce_bytes.len()));
}
let data_key: [u8; 32] = if let Some(ck) = customer_key {
if ck.len() != 32 {
return Err(CryptoError::InvalidKeySize(ck.len()));
}
let mut k = [0u8; 32];
k.copy_from_slice(ck);
k
} else if enc_meta.algorithm == "aws:kms" {
let kms = self
.kms
.as_ref()
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
let kid = enc_meta
.kms_key_id
.as_ref()
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID in metadata".into()))?;
let encrypted_dk = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
})?;
let ct = B64.decode(encrypted_dk).map_err(|e| {
CryptoError::EncryptionFailed(format!("Bad data key encoding: {}", e))
})?;
let dk = kms.decrypt_data(kid, &ct).await?;
if dk.len() != 32 {
return Err(CryptoError::InvalidKeySize(dk.len()));
}
let mut k = [0u8; 32];
k.copy_from_slice(&dk);
k
} else {
let wrapped = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
})?;
self.unwrap_data_key(wrapped)?
};
let ip = input_path.to_owned();
let op = output_path.to_owned();
let nb: [u8; 12] = nonce_bytes.try_into().unwrap();
tokio::task::spawn_blocking(move || decrypt_stream_chunked(&ip, &op, &data_key, &nb))
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write;
fn test_master_key() -> [u8; 32] {
[0x42u8; 32]
}
#[test]
fn test_wrap_unwrap_data_key() {
let svc = EncryptionService::new(test_master_key(), None);
let dk = [0xAAu8; 32];
let wrapped = svc.wrap_data_key(&dk).unwrap();
let unwrapped = svc.unwrap_data_key(&wrapped).unwrap();
assert_eq!(dk, unwrapped);
}
#[tokio::test]
async fn test_encrypt_decrypt_object_sse_s3() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("plain.bin");
let encrypted = dir.path().join("enc.bin");
let decrypted = dir.path().join("dec.bin");
let data = b"SSE-S3 encrypted content for testing!";
std::fs::File::create(&input)
.unwrap()
.write_all(data)
.unwrap();
let svc = EncryptionService::new(test_master_key(), None);
let ctx = EncryptionContext {
algorithm: SseAlgorithm::Aes256,
kms_key_id: None,
customer_key: None,
};
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
assert_eq!(meta.algorithm, "AES256");
assert!(meta.encrypted_data_key.is_some());
svc.decrypt_object(&encrypted, &decrypted, &meta, None)
.await
.unwrap();
let result = std::fs::read(&decrypted).unwrap();
assert_eq!(result, data);
}
#[tokio::test]
async fn test_encrypt_decrypt_object_sse_c() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("plain.bin");
let encrypted = dir.path().join("enc.bin");
let decrypted = dir.path().join("dec.bin");
let data = b"SSE-C encrypted content!";
std::fs::File::create(&input)
.unwrap()
.write_all(data)
.unwrap();
let customer_key = [0xBBu8; 32];
let svc = EncryptionService::new(test_master_key(), None);
let ctx = EncryptionContext {
algorithm: SseAlgorithm::CustomerProvided,
kms_key_id: None,
customer_key: Some(customer_key.to_vec()),
};
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
assert!(meta.encrypted_data_key.is_none());
svc.decrypt_object(&encrypted, &decrypted, &meta, Some(&customer_key))
.await
.unwrap();
let result = std::fs::read(&decrypted).unwrap();
assert_eq!(result, data);
}
#[test]
fn test_encryption_metadata_roundtrip() {
let meta = EncryptionMetadata {
algorithm: "AES256".to_string(),
nonce: "dGVzdG5vbmNlMTI=".to_string(),
encrypted_data_key: Some("c29tZWtleQ==".to_string()),
kms_key_id: None,
};
let map = meta.to_metadata_map();
let restored = EncryptionMetadata::from_metadata(&map).unwrap();
assert_eq!(restored.algorithm, "AES256");
assert_eq!(restored.nonce, meta.nonce);
assert_eq!(restored.encrypted_data_key, meta.encrypted_data_key);
}
#[test]
fn test_is_encrypted() {
let mut meta = HashMap::new();
assert!(!EncryptionMetadata::is_encrypted(&meta));
meta.insert(
"x-amz-server-side-encryption".to_string(),
"AES256".to_string(),
);
assert!(EncryptionMetadata::is_encrypted(&meta));
}
}

View File

@@ -1,138 +0,0 @@
use md5::{Digest, Md5};
use sha2::Sha256;
use std::io::Read;
use std::path::Path;
const CHUNK_SIZE: usize = 65536;
pub fn md5_file(path: &Path) -> std::io::Result<String> {
let mut file = std::fs::File::open(path)?;
let mut hasher = Md5::new();
let mut buf = vec![0u8; CHUNK_SIZE];
loop {
let n = file.read(&mut buf)?;
if n == 0 {
break;
}
hasher.update(&buf[..n]);
}
Ok(format!("{:x}", hasher.finalize()))
}
pub fn md5_bytes(data: &[u8]) -> String {
let mut hasher = Md5::new();
hasher.update(data);
format!("{:x}", hasher.finalize())
}
pub fn sha256_file(path: &Path) -> std::io::Result<String> {
let mut file = std::fs::File::open(path)?;
let mut hasher = Sha256::new();
let mut buf = vec![0u8; CHUNK_SIZE];
loop {
let n = file.read(&mut buf)?;
if n == 0 {
break;
}
hasher.update(&buf[..n]);
}
Ok(format!("{:x}", hasher.finalize()))
}
pub fn sha256_bytes(data: &[u8]) -> String {
let mut hasher = Sha256::new();
hasher.update(data);
format!("{:x}", hasher.finalize())
}
pub fn md5_sha256_file(path: &Path) -> std::io::Result<(String, String)> {
let mut file = std::fs::File::open(path)?;
let mut md5_hasher = Md5::new();
let mut sha_hasher = Sha256::new();
let mut buf = vec![0u8; CHUNK_SIZE];
loop {
let n = file.read(&mut buf)?;
if n == 0 {
break;
}
md5_hasher.update(&buf[..n]);
sha_hasher.update(&buf[..n]);
}
Ok((
format!("{:x}", md5_hasher.finalize()),
format!("{:x}", sha_hasher.finalize()),
))
}
pub async fn md5_file_async(path: &Path) -> std::io::Result<String> {
let path = path.to_owned();
tokio::task::spawn_blocking(move || md5_file(&path))
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
}
pub async fn sha256_file_async(path: &Path) -> std::io::Result<String> {
let path = path.to_owned();
tokio::task::spawn_blocking(move || sha256_file(&path))
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
}
pub async fn md5_sha256_file_async(path: &Path) -> std::io::Result<(String, String)> {
let path = path.to_owned();
tokio::task::spawn_blocking(move || md5_sha256_file(&path))
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write;
#[test]
fn test_md5_bytes() {
assert_eq!(md5_bytes(b""), "d41d8cd98f00b204e9800998ecf8427e");
assert_eq!(md5_bytes(b"hello"), "5d41402abc4b2a76b9719d911017c592");
}
#[test]
fn test_sha256_bytes() {
let hash = sha256_bytes(b"hello");
assert_eq!(
hash,
"2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824"
);
}
#[test]
fn test_md5_file() {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(b"hello").unwrap();
tmp.flush().unwrap();
let hash = md5_file(tmp.path()).unwrap();
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
}
#[test]
fn test_md5_sha256_file() {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(b"hello").unwrap();
tmp.flush().unwrap();
let (md5, sha) = md5_sha256_file(tmp.path()).unwrap();
assert_eq!(md5, "5d41402abc4b2a76b9719d911017c592");
assert_eq!(
sha,
"2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824"
);
}
#[tokio::test]
async fn test_md5_file_async() {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(b"hello").unwrap();
tmp.flush().unwrap();
let hash = md5_file_async(tmp.path()).await.unwrap();
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
}
}

View File

@@ -1,451 +0,0 @@
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
use base64::engine::general_purpose::STANDARD as B64;
use base64::Engine;
use chrono::{DateTime, Utc};
use rand::RngCore;
use serde::{Deserialize, Serialize};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tokio::sync::RwLock;
use crate::aes_gcm::CryptoError;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct KmsKey {
#[serde(rename = "KeyId")]
pub key_id: String,
#[serde(rename = "Arn")]
pub arn: String,
#[serde(rename = "Description")]
pub description: String,
#[serde(rename = "CreationDate")]
pub creation_date: DateTime<Utc>,
#[serde(rename = "Enabled")]
pub enabled: bool,
#[serde(rename = "KeyState")]
pub key_state: String,
#[serde(rename = "KeyUsage")]
pub key_usage: String,
#[serde(rename = "KeySpec")]
pub key_spec: String,
#[serde(rename = "EncryptedKeyMaterial")]
pub encrypted_key_material: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct KmsStore {
keys: Vec<KmsKey>,
}
pub struct KmsService {
keys_path: PathBuf,
master_key: Arc<RwLock<[u8; 32]>>,
keys: Arc<RwLock<Vec<KmsKey>>>,
}
impl KmsService {
pub async fn new(keys_dir: &Path) -> Result<Self, CryptoError> {
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
let keys_path = keys_dir.join("kms_keys.json");
let master_key = Self::load_or_create_master_key(&keys_dir.join("kms_master.key"))?;
let keys = if keys_path.exists() {
let data = std::fs::read_to_string(&keys_path).map_err(CryptoError::Io)?;
let store: KmsStore = serde_json::from_str(&data)
.map_err(|e| CryptoError::EncryptionFailed(format!("Bad KMS store: {}", e)))?;
store.keys
} else {
Vec::new()
};
Ok(Self {
keys_path,
master_key: Arc::new(RwLock::new(master_key)),
keys: Arc::new(RwLock::new(keys)),
})
}
fn load_or_create_master_key(path: &Path) -> Result<[u8; 32], CryptoError> {
if path.exists() {
let encoded = std::fs::read_to_string(path).map_err(CryptoError::Io)?;
let decoded = B64.decode(encoded.trim()).map_err(|e| {
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
})?;
if decoded.len() != 32 {
return Err(CryptoError::InvalidKeySize(decoded.len()));
}
let mut key = [0u8; 32];
key.copy_from_slice(&decoded);
Ok(key)
} else {
let mut key = [0u8; 32];
rand::thread_rng().fill_bytes(&mut key);
let encoded = B64.encode(key);
std::fs::write(path, &encoded).map_err(CryptoError::Io)?;
Ok(key)
}
}
fn encrypt_key_material(
master_key: &[u8; 32],
plaintext_key: &[u8],
) -> Result<String, CryptoError> {
let cipher = Aes256Gcm::new(master_key.into());
let mut nonce_bytes = [0u8; 12];
rand::thread_rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
let ciphertext = cipher
.encrypt(nonce, plaintext_key)
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let mut combined = Vec::with_capacity(12 + ciphertext.len());
combined.extend_from_slice(&nonce_bytes);
combined.extend_from_slice(&ciphertext);
Ok(B64.encode(&combined))
}
fn decrypt_key_material(
master_key: &[u8; 32],
encrypted_b64: &str,
) -> Result<Vec<u8>, CryptoError> {
let combined = B64.decode(encrypted_b64).map_err(|e| {
CryptoError::EncryptionFailed(format!("Bad key material encoding: {}", e))
})?;
if combined.len() < 12 {
return Err(CryptoError::EncryptionFailed(
"Encrypted key material too short".to_string(),
));
}
let (nonce_bytes, ciphertext) = combined.split_at(12);
let cipher = Aes256Gcm::new(master_key.into());
let nonce = Nonce::from_slice(nonce_bytes);
cipher
.decrypt(nonce, ciphertext)
.map_err(|_| CryptoError::DecryptionFailed(0))
}
async fn save(&self) -> Result<(), CryptoError> {
let keys = self.keys.read().await;
let store = KmsStore { keys: keys.clone() };
let json = serde_json::to_string_pretty(&store)
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
std::fs::write(&self.keys_path, json).map_err(CryptoError::Io)?;
Ok(())
}
pub async fn create_key(&self, description: &str) -> Result<KmsKey, CryptoError> {
let key_id = uuid::Uuid::new_v4().to_string();
let arn = format!("arn:aws:kms:local:000000000000:key/{}", key_id);
let mut plaintext_key = [0u8; 32];
rand::thread_rng().fill_bytes(&mut plaintext_key);
let master = self.master_key.read().await;
let encrypted = Self::encrypt_key_material(&master, &plaintext_key)?;
let kms_key = KmsKey {
key_id: key_id.clone(),
arn,
description: description.to_string(),
creation_date: Utc::now(),
enabled: true,
key_state: "Enabled".to_string(),
key_usage: "ENCRYPT_DECRYPT".to_string(),
key_spec: "SYMMETRIC_DEFAULT".to_string(),
encrypted_key_material: encrypted,
};
self.keys.write().await.push(kms_key.clone());
self.save().await?;
Ok(kms_key)
}
pub async fn list_keys(&self) -> Vec<KmsKey> {
self.keys.read().await.clone()
}
pub async fn get_key(&self, key_id: &str) -> Option<KmsKey> {
let keys = self.keys.read().await;
keys.iter()
.find(|k| k.key_id == key_id || k.arn == key_id)
.cloned()
}
pub async fn delete_key(&self, key_id: &str) -> Result<bool, CryptoError> {
let mut keys = self.keys.write().await;
let len_before = keys.len();
keys.retain(|k| k.key_id != key_id && k.arn != key_id);
let removed = keys.len() < len_before;
drop(keys);
if removed {
self.save().await?;
}
Ok(removed)
}
pub async fn enable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
let mut keys = self.keys.write().await;
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
key.enabled = true;
key.key_state = "Enabled".to_string();
drop(keys);
self.save().await?;
Ok(true)
} else {
Ok(false)
}
}
pub async fn disable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
let mut keys = self.keys.write().await;
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
key.enabled = false;
key.key_state = "Disabled".to_string();
drop(keys);
self.save().await?;
Ok(true)
} else {
Ok(false)
}
}
pub async fn decrypt_data_key(&self, key_id: &str) -> Result<Vec<u8>, CryptoError> {
let keys = self.keys.read().await;
let key = keys
.iter()
.find(|k| k.key_id == key_id || k.arn == key_id)
.ok_or_else(|| CryptoError::EncryptionFailed("KMS key not found".to_string()))?;
if !key.enabled {
return Err(CryptoError::EncryptionFailed(
"KMS key is disabled".to_string(),
));
}
let master = self.master_key.read().await;
Self::decrypt_key_material(&master, &key.encrypted_key_material)
}
pub async fn encrypt_data(
&self,
key_id: &str,
plaintext: &[u8],
) -> Result<Vec<u8>, CryptoError> {
let data_key = self.decrypt_data_key(key_id).await?;
if data_key.len() != 32 {
return Err(CryptoError::InvalidKeySize(data_key.len()));
}
let key_arr: [u8; 32] = data_key.try_into().unwrap();
let cipher = Aes256Gcm::new(&key_arr.into());
let mut nonce_bytes = [0u8; 12];
rand::thread_rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
let ciphertext = cipher
.encrypt(nonce, plaintext)
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let mut result = Vec::with_capacity(12 + ciphertext.len());
result.extend_from_slice(&nonce_bytes);
result.extend_from_slice(&ciphertext);
Ok(result)
}
pub async fn decrypt_data(
&self,
key_id: &str,
ciphertext: &[u8],
) -> Result<Vec<u8>, CryptoError> {
if ciphertext.len() < 12 {
return Err(CryptoError::EncryptionFailed(
"Ciphertext too short".to_string(),
));
}
let data_key = self.decrypt_data_key(key_id).await?;
if data_key.len() != 32 {
return Err(CryptoError::InvalidKeySize(data_key.len()));
}
let key_arr: [u8; 32] = data_key.try_into().unwrap();
let (nonce_bytes, ct) = ciphertext.split_at(12);
let cipher = Aes256Gcm::new(&key_arr.into());
let nonce = Nonce::from_slice(nonce_bytes);
cipher
.decrypt(nonce, ct)
.map_err(|_| CryptoError::DecryptionFailed(0))
}
pub async fn generate_data_key(
&self,
key_id: &str,
num_bytes: usize,
) -> Result<(Vec<u8>, Vec<u8>), CryptoError> {
let kms_key = self.decrypt_data_key(key_id).await?;
if kms_key.len() != 32 {
return Err(CryptoError::InvalidKeySize(kms_key.len()));
}
let mut plaintext_key = vec![0u8; num_bytes];
rand::thread_rng().fill_bytes(&mut plaintext_key);
let key_arr: [u8; 32] = kms_key.try_into().unwrap();
let cipher = Aes256Gcm::new(&key_arr.into());
let mut nonce_bytes = [0u8; 12];
rand::thread_rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
let encrypted = cipher
.encrypt(nonce, plaintext_key.as_slice())
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let mut wrapped = Vec::with_capacity(12 + encrypted.len());
wrapped.extend_from_slice(&nonce_bytes);
wrapped.extend_from_slice(&encrypted);
Ok((plaintext_key, wrapped))
}
}
pub async fn load_or_create_master_key(keys_dir: &Path) -> Result<[u8; 32], CryptoError> {
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
let path = keys_dir.join("master.key");
if path.exists() {
let encoded = std::fs::read_to_string(&path).map_err(CryptoError::Io)?;
let decoded = B64.decode(encoded.trim()).map_err(|e| {
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
})?;
if decoded.len() != 32 {
return Err(CryptoError::InvalidKeySize(decoded.len()));
}
let mut key = [0u8; 32];
key.copy_from_slice(&decoded);
Ok(key)
} else {
let mut key = [0u8; 32];
rand::thread_rng().fill_bytes(&mut key);
let encoded = B64.encode(key);
std::fs::write(&path, &encoded).map_err(CryptoError::Io)?;
Ok(key)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_create_and_list_keys() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("test key").await.unwrap();
assert!(key.enabled);
assert_eq!(key.description, "test key");
assert!(key.key_id.len() > 0);
let keys = kms.list_keys().await;
assert_eq!(keys.len(), 1);
assert_eq!(keys[0].key_id, key.key_id);
}
#[tokio::test]
async fn test_enable_disable_key() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("toggle").await.unwrap();
assert!(key.enabled);
kms.disable_key(&key.key_id).await.unwrap();
let k = kms.get_key(&key.key_id).await.unwrap();
assert!(!k.enabled);
kms.enable_key(&key.key_id).await.unwrap();
let k = kms.get_key(&key.key_id).await.unwrap();
assert!(k.enabled);
}
#[tokio::test]
async fn test_delete_key() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("doomed").await.unwrap();
assert!(kms.delete_key(&key.key_id).await.unwrap());
assert!(kms.get_key(&key.key_id).await.is_none());
assert_eq!(kms.list_keys().await.len(), 0);
}
#[tokio::test]
async fn test_encrypt_decrypt_data() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("enc-key").await.unwrap();
let plaintext = b"Hello, KMS!";
let ciphertext = kms.encrypt_data(&key.key_id, plaintext).await.unwrap();
assert_ne!(&ciphertext, plaintext);
let decrypted = kms.decrypt_data(&key.key_id, &ciphertext).await.unwrap();
assert_eq!(decrypted, plaintext);
}
#[tokio::test]
async fn test_generate_data_key() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("data-key-gen").await.unwrap();
let (plaintext, wrapped) = kms.generate_data_key(&key.key_id, 32).await.unwrap();
assert_eq!(plaintext.len(), 32);
assert!(wrapped.len() > 32);
}
#[tokio::test]
async fn test_disabled_key_cannot_encrypt() {
let dir = tempfile::tempdir().unwrap();
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("disabled").await.unwrap();
kms.disable_key(&key.key_id).await.unwrap();
let result = kms.encrypt_data(&key.key_id, b"test").await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_persistence_across_reload() {
let dir = tempfile::tempdir().unwrap();
let key_id = {
let kms = KmsService::new(dir.path()).await.unwrap();
let key = kms.create_key("persistent").await.unwrap();
key.key_id
};
let kms2 = KmsService::new(dir.path()).await.unwrap();
let key = kms2.get_key(&key_id).await;
assert!(key.is_some());
assert_eq!(key.unwrap().description, "persistent");
}
#[tokio::test]
async fn test_master_key_roundtrip() {
let dir = tempfile::tempdir().unwrap();
let key1 = load_or_create_master_key(dir.path()).await.unwrap();
let key2 = load_or_create_master_key(dir.path()).await.unwrap();
assert_eq!(key1, key2);
}
}

View File

@@ -1,4 +0,0 @@
pub mod aes_gcm;
pub mod encryption;
pub mod hashing;
pub mod kms;

View File

@@ -1,56 +0,0 @@
[package]
name = "myfsio-server"
version.workspace = true
edition.workspace = true
[dependencies]
myfsio-common = { path = "../myfsio-common" }
myfsio-auth = { path = "../myfsio-auth" }
myfsio-crypto = { path = "../myfsio-crypto" }
myfsio-storage = { path = "../myfsio-storage" }
myfsio-xml = { path = "../myfsio-xml" }
base64 = { workspace = true }
md-5 = { workspace = true }
axum = { workspace = true }
tokio = { workspace = true }
tower = { workspace = true }
tower-http = { workspace = true }
hyper = { workspace = true }
bytes = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
tokio-util = { workspace = true }
chrono = { workspace = true }
uuid = { workspace = true }
futures = { workspace = true }
http-body-util = "0.1"
percent-encoding = { workspace = true }
quick-xml = { workspace = true }
mime_guess = "2"
crc32fast = { workspace = true }
sha2 = { workspace = true }
duckdb = { workspace = true }
roxmltree = "0.20"
parking_lot = { workspace = true }
regex = "1"
multer = "3"
reqwest = { workspace = true }
aws-sdk-s3 = { workspace = true }
aws-config = { workspace = true }
aws-credential-types = { workspace = true }
aws-smithy-types = { workspace = true }
async-trait = { workspace = true }
rand = "0.8"
tera = { workspace = true }
cookie = { workspace = true }
subtle = { workspace = true }
clap = { workspace = true }
dotenvy = { workspace = true }
sysinfo = "0.32"
aes-gcm = { workspace = true }
[dev-dependencies]
tempfile = "3"
tower = { workspace = true, features = ["util"] }

View File

@@ -1,227 +0,0 @@
use std::net::SocketAddr;
use std::path::PathBuf;
#[derive(Debug, Clone)]
pub struct ServerConfig {
pub bind_addr: SocketAddr,
pub ui_bind_addr: SocketAddr,
pub storage_root: PathBuf,
pub region: String,
pub iam_config_path: PathBuf,
pub sigv4_timestamp_tolerance_secs: u64,
pub presigned_url_min_expiry: u64,
pub presigned_url_max_expiry: u64,
pub secret_key: Option<String>,
pub encryption_enabled: bool,
pub kms_enabled: bool,
pub gc_enabled: bool,
pub integrity_enabled: bool,
pub metrics_enabled: bool,
pub metrics_history_enabled: bool,
pub metrics_interval_minutes: u64,
pub metrics_retention_hours: u64,
pub metrics_history_interval_minutes: u64,
pub metrics_history_retention_hours: u64,
pub lifecycle_enabled: bool,
pub website_hosting_enabled: bool,
pub replication_connect_timeout_secs: u64,
pub replication_read_timeout_secs: u64,
pub replication_max_retries: u32,
pub replication_streaming_threshold_bytes: u64,
pub replication_max_failures_per_bucket: usize,
pub site_sync_enabled: bool,
pub site_sync_interval_secs: u64,
pub site_sync_batch_size: usize,
pub site_sync_connect_timeout_secs: u64,
pub site_sync_read_timeout_secs: u64,
pub site_sync_max_retries: u32,
pub site_sync_clock_skew_tolerance: f64,
pub ui_enabled: bool,
pub templates_dir: PathBuf,
pub static_dir: PathBuf,
}
impl ServerConfig {
pub fn from_env() -> Self {
let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
let port: u16 = std::env::var("PORT")
.unwrap_or_else(|_| "5000".to_string())
.parse()
.unwrap_or(5000);
let ui_port: u16 = std::env::var("UI_PORT")
.unwrap_or_else(|_| "5100".to_string())
.parse()
.unwrap_or(5100);
let storage_root = std::env::var("STORAGE_ROOT").unwrap_or_else(|_| "./data".to_string());
let region = std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string());
let storage_path = PathBuf::from(&storage_root);
let iam_config_path = std::env::var("IAM_CONFIG")
.map(PathBuf::from)
.unwrap_or_else(|_| {
storage_path
.join(".myfsio.sys")
.join("config")
.join("iam.json")
});
let sigv4_timestamp_tolerance_secs: u64 =
std::env::var("SIGV4_TIMESTAMP_TOLERANCE_SECONDS")
.unwrap_or_else(|_| "900".to_string())
.parse()
.unwrap_or(900);
let presigned_url_min_expiry: u64 = std::env::var("PRESIGNED_URL_MIN_EXPIRY_SECONDS")
.unwrap_or_else(|_| "1".to_string())
.parse()
.unwrap_or(1);
let presigned_url_max_expiry: u64 = std::env::var("PRESIGNED_URL_MAX_EXPIRY_SECONDS")
.unwrap_or_else(|_| "604800".to_string())
.parse()
.unwrap_or(604800);
let secret_key = {
let env_key = std::env::var("SECRET_KEY").ok();
match env_key {
Some(k) if !k.is_empty() && k != "dev-secret-key" => Some(k),
_ => {
let secret_file = storage_path
.join(".myfsio.sys")
.join("config")
.join(".secret");
std::fs::read_to_string(&secret_file)
.ok()
.map(|s| s.trim().to_string())
}
}
};
let encryption_enabled = parse_bool_env("ENCRYPTION_ENABLED", false);
let kms_enabled = parse_bool_env("KMS_ENABLED", false);
let gc_enabled = parse_bool_env("GC_ENABLED", false);
let integrity_enabled = parse_bool_env("INTEGRITY_ENABLED", false);
let metrics_enabled = parse_bool_env("OPERATION_METRICS_ENABLED", false);
let metrics_history_enabled = parse_bool_env("METRICS_HISTORY_ENABLED", false);
let metrics_interval_minutes = parse_u64_env("OPERATION_METRICS_INTERVAL_MINUTES", 5);
let metrics_retention_hours = parse_u64_env("OPERATION_METRICS_RETENTION_HOURS", 24);
let metrics_history_interval_minutes = parse_u64_env("METRICS_HISTORY_INTERVAL_MINUTES", 5);
let metrics_history_retention_hours = parse_u64_env("METRICS_HISTORY_RETENTION_HOURS", 24);
let lifecycle_enabled = parse_bool_env("LIFECYCLE_ENABLED", false);
let website_hosting_enabled = parse_bool_env("WEBSITE_HOSTING_ENABLED", false);
let replication_connect_timeout_secs =
parse_u64_env("REPLICATION_CONNECT_TIMEOUT_SECONDS", 5);
let replication_read_timeout_secs = parse_u64_env("REPLICATION_READ_TIMEOUT_SECONDS", 30);
let replication_max_retries = parse_u64_env("REPLICATION_MAX_RETRIES", 2) as u32;
let replication_streaming_threshold_bytes =
parse_u64_env("REPLICATION_STREAMING_THRESHOLD_BYTES", 10_485_760);
let replication_max_failures_per_bucket =
parse_u64_env("REPLICATION_MAX_FAILURES_PER_BUCKET", 50) as usize;
let site_sync_enabled = parse_bool_env("SITE_SYNC_ENABLED", false);
let site_sync_interval_secs = parse_u64_env("SITE_SYNC_INTERVAL_SECONDS", 60);
let site_sync_batch_size = parse_u64_env("SITE_SYNC_BATCH_SIZE", 100) as usize;
let site_sync_connect_timeout_secs = parse_u64_env("SITE_SYNC_CONNECT_TIMEOUT_SECONDS", 10);
let site_sync_read_timeout_secs = parse_u64_env("SITE_SYNC_READ_TIMEOUT_SECONDS", 120);
let site_sync_max_retries = parse_u64_env("SITE_SYNC_MAX_RETRIES", 2) as u32;
let site_sync_clock_skew_tolerance: f64 =
std::env::var("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(1.0);
let ui_enabled = parse_bool_env("UI_ENABLED", true);
let templates_dir = std::env::var("TEMPLATES_DIR")
.map(PathBuf::from)
.unwrap_or_else(|_| default_templates_dir());
let static_dir = std::env::var("STATIC_DIR")
.map(PathBuf::from)
.unwrap_or_else(|_| default_static_dir());
let host_ip: std::net::IpAddr = host.parse().unwrap();
Self {
bind_addr: SocketAddr::new(host_ip, port),
ui_bind_addr: SocketAddr::new(host_ip, ui_port),
storage_root: storage_path,
region,
iam_config_path,
sigv4_timestamp_tolerance_secs,
presigned_url_min_expiry,
presigned_url_max_expiry,
secret_key,
encryption_enabled,
kms_enabled,
gc_enabled,
integrity_enabled,
metrics_enabled,
metrics_history_enabled,
metrics_interval_minutes,
metrics_retention_hours,
metrics_history_interval_minutes,
metrics_history_retention_hours,
lifecycle_enabled,
website_hosting_enabled,
replication_connect_timeout_secs,
replication_read_timeout_secs,
replication_max_retries,
replication_streaming_threshold_bytes,
replication_max_failures_per_bucket,
site_sync_enabled,
site_sync_interval_secs,
site_sync_batch_size,
site_sync_connect_timeout_secs,
site_sync_read_timeout_secs,
site_sync_max_retries,
site_sync_clock_skew_tolerance,
ui_enabled,
templates_dir,
static_dir,
}
}
}
fn default_templates_dir() -> PathBuf {
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
manifest_dir.join("templates")
}
fn default_static_dir() -> PathBuf {
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
for candidate in [
manifest_dir.join("static"),
manifest_dir.join("..").join("..").join("..").join("static"),
] {
if candidate.exists() {
return candidate;
}
}
manifest_dir.join("static")
}
fn parse_u64_env(key: &str, default: u64) -> u64 {
std::env::var(key)
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(default)
}
fn parse_bool_env(key: &str, default: bool) -> bool {
std::env::var(key)
.ok()
.map(|value| {
matches!(
value.trim().to_ascii_lowercase().as_str(),
"1" | "true" | "yes" | "on"
)
})
.unwrap_or(default)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,184 +0,0 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::{Buf, BytesMut};
use tokio::io::{AsyncRead, ReadBuf};
enum State {
ReadSize,
ReadData(u64),
ReadTrailer,
Finished,
}
pub struct AwsChunkedStream<S> {
inner: S,
buffer: BytesMut,
state: State,
pending: BytesMut,
eof: bool,
}
impl<S> AwsChunkedStream<S> {
pub fn new(inner: S) -> Self {
Self {
inner,
buffer: BytesMut::with_capacity(8192),
state: State::ReadSize,
pending: BytesMut::new(),
eof: false,
}
}
fn find_crlf(&self) -> Option<usize> {
for i in 0..self.buffer.len().saturating_sub(1) {
if self.buffer[i] == b'\r' && self.buffer[i + 1] == b'\n' {
return Some(i);
}
}
None
}
fn parse_chunk_size(line: &[u8]) -> std::io::Result<u64> {
let text = std::str::from_utf8(line).map_err(|_| {
std::io::Error::new(
std::io::ErrorKind::InvalidData,
"invalid chunk size encoding",
)
})?;
let head = text.split(';').next().unwrap_or("").trim();
u64::from_str_radix(head, 16).map_err(|_| {
std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("invalid chunk size: {}", head),
)
})
}
fn try_advance(&mut self, out: &mut ReadBuf<'_>) -> std::io::Result<bool> {
loop {
if out.remaining() == 0 {
return Ok(true);
}
if !self.pending.is_empty() {
let take = std::cmp::min(self.pending.len(), out.remaining());
out.put_slice(&self.pending[..take]);
self.pending.advance(take);
continue;
}
match self.state {
State::Finished => return Ok(true),
State::ReadSize => {
let idx = match self.find_crlf() {
Some(i) => i,
None => return Ok(false),
};
let line = self.buffer.split_to(idx);
self.buffer.advance(2);
let size = Self::parse_chunk_size(&line)?;
if size == 0 {
self.state = State::ReadTrailer;
} else {
self.state = State::ReadData(size);
}
}
State::ReadData(remaining) => {
if self.buffer.is_empty() {
return Ok(false);
}
let avail = std::cmp::min(self.buffer.len() as u64, remaining) as usize;
let take = std::cmp::min(avail, out.remaining());
out.put_slice(&self.buffer[..take]);
self.buffer.advance(take);
let new_remaining = remaining - take as u64;
if new_remaining == 0 {
if self.buffer.len() < 2 {
self.state = State::ReadData(0);
return Ok(false);
}
if &self.buffer[..2] != b"\r\n" {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"malformed chunk terminator",
));
}
self.buffer.advance(2);
self.state = State::ReadSize;
} else {
self.state = State::ReadData(new_remaining);
}
}
State::ReadTrailer => {
let idx = match self.find_crlf() {
Some(i) => i,
None => return Ok(false),
};
if idx == 0 {
self.buffer.advance(2);
self.state = State::Finished;
} else {
self.buffer.advance(idx + 2);
}
}
}
}
}
}
impl<S> AsyncRead for AwsChunkedStream<S>
where
S: AsyncRead + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
loop {
let before = buf.filled().len();
let done = match self.try_advance(buf) {
Ok(v) => v,
Err(e) => return Poll::Ready(Err(e)),
};
if buf.filled().len() > before {
return Poll::Ready(Ok(()));
}
if done {
return Poll::Ready(Ok(()));
}
if self.eof {
return Poll::Ready(Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
"unexpected EOF in aws-chunked stream",
)));
}
let mut tmp = [0u8; 8192];
let mut rb = ReadBuf::new(&mut tmp);
match Pin::new(&mut self.inner).poll_read(cx, &mut rb) {
Poll::Ready(Ok(())) => {
let n = rb.filled().len();
if n == 0 {
self.eof = true;
continue;
}
self.buffer.extend_from_slice(rb.filled());
}
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
Poll::Pending => return Poll::Pending,
}
}
}
}
pub fn decode_body(body: axum::body::Body) -> impl AsyncRead + Send + Unpin {
use futures::TryStreamExt;
let stream = tokio_util::io::StreamReader::new(
http_body_util::BodyStream::new(body)
.map_ok(|frame| frame.into_data().unwrap_or_default())
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)),
);
AwsChunkedStream::new(stream)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,541 +0,0 @@
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
use axum::body::Body;
use axum::extract::State;
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use base64::engine::general_purpose::STANDARD as B64;
use base64::Engine;
use rand::RngCore;
use serde_json::{json, Value};
use crate::state::AppState;
fn json_ok(value: Value) -> Response {
(
StatusCode::OK,
[("content-type", "application/json")],
value.to_string(),
)
.into_response()
}
fn json_err(status: StatusCode, msg: &str) -> Response {
(
status,
[("content-type", "application/json")],
json!({"error": msg}).to_string(),
)
.into_response()
}
async fn read_json(body: Body) -> Result<Value, Response> {
let body_bytes = http_body_util::BodyExt::collect(body)
.await
.map_err(|_| json_err(StatusCode::BAD_REQUEST, "Invalid request body"))?
.to_bytes();
if body_bytes.is_empty() {
Ok(json!({}))
} else {
serde_json::from_slice(&body_bytes)
.map_err(|_| json_err(StatusCode::BAD_REQUEST, "Invalid JSON"))
}
}
fn require_kms(
state: &AppState,
) -> Result<&std::sync::Arc<myfsio_crypto::kms::KmsService>, Response> {
state
.kms
.as_ref()
.ok_or_else(|| json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"))
}
fn decode_b64(value: &str, field: &str) -> Result<Vec<u8>, Response> {
B64.decode(value).map_err(|_| {
json_err(
StatusCode::BAD_REQUEST,
&format!("Invalid base64 {}", field),
)
})
}
fn require_str<'a>(value: &'a Value, names: &[&str], message: &str) -> Result<&'a str, Response> {
for name in names {
if let Some(found) = value.get(*name).and_then(|v| v.as_str()) {
return Ok(found);
}
}
Err(json_err(StatusCode::BAD_REQUEST, message))
}
pub async fn list_keys(State(state): State<AppState>) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let keys = kms.list_keys().await;
let keys_json: Vec<Value> = keys
.iter()
.map(|k| {
json!({
"KeyId": k.key_id,
"Arn": k.arn,
"Description": k.description,
"CreationDate": k.creation_date.to_rfc3339(),
"Enabled": k.enabled,
"KeyState": k.key_state,
"KeyUsage": k.key_usage,
"KeySpec": k.key_spec,
})
})
.collect();
json_ok(json!({"keys": keys_json}))
}
pub async fn create_key(State(state): State<AppState>, body: Body) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let description = req
.get("Description")
.or_else(|| req.get("description"))
.and_then(|d| d.as_str())
.unwrap_or("");
match kms.create_key(description).await {
Ok(key) => json_ok(json!({
"KeyId": key.key_id,
"Arn": key.arn,
"Description": key.description,
"CreationDate": key.creation_date.to_rfc3339(),
"Enabled": key.enabled,
"KeyState": key.key_state,
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn get_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
match kms.get_key(&key_id).await {
Some(key) => json_ok(json!({
"KeyId": key.key_id,
"Arn": key.arn,
"Description": key.description,
"CreationDate": key.creation_date.to_rfc3339(),
"Enabled": key.enabled,
"KeyState": key.key_state,
"KeyUsage": key.key_usage,
"KeySpec": key.key_spec,
})),
None => json_err(StatusCode::NOT_FOUND, "Key not found"),
}
}
pub async fn delete_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
match kms.delete_key(&key_id).await {
Ok(true) => StatusCode::NO_CONTENT.into_response(),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn enable_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
match kms.enable_key(&key_id).await {
Ok(true) => json_ok(json!({"status": "enabled"})),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn disable_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
match kms.disable_key(&key_id).await {
Ok(true) => json_ok(json!({"status": "disabled"})),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn encrypt(State(state): State<AppState>, body: Body) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
Ok(value) => value,
Err(response) => return response,
};
let plaintext_b64 = match require_str(&req, &["Plaintext", "plaintext"], "Missing Plaintext") {
Ok(value) => value,
Err(response) => return response,
};
let plaintext = match decode_b64(plaintext_b64, "Plaintext") {
Ok(value) => value,
Err(response) => return response,
};
match kms.encrypt_data(key_id, &plaintext).await {
Ok(ct) => json_ok(json!({
"KeyId": key_id,
"CiphertextBlob": B64.encode(&ct),
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn decrypt(State(state): State<AppState>, body: Body) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
Ok(value) => value,
Err(response) => return response,
};
let ciphertext_b64 = match require_str(
&req,
&["CiphertextBlob", "ciphertext_blob"],
"Missing CiphertextBlob",
) {
Ok(value) => value,
Err(response) => return response,
};
let ciphertext = match decode_b64(ciphertext_b64, "CiphertextBlob") {
Ok(value) => value,
Err(response) => return response,
};
match kms.decrypt_data(key_id, &ciphertext).await {
Ok(pt) => json_ok(json!({
"KeyId": key_id,
"Plaintext": B64.encode(&pt),
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn generate_data_key(State(state): State<AppState>, body: Body) -> Response {
generate_data_key_inner(state, body, true).await
}
pub async fn generate_data_key_without_plaintext(
State(state): State<AppState>,
body: Body,
) -> Response {
generate_data_key_inner(state, body, false).await
}
async fn generate_data_key_inner(state: AppState, body: Body, include_plaintext: bool) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
Ok(value) => value,
Err(response) => return response,
};
let num_bytes = req
.get("NumberOfBytes")
.and_then(|v| v.as_u64())
.unwrap_or(32) as usize;
if !(1..=1024).contains(&num_bytes) {
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
}
match kms.generate_data_key(key_id, num_bytes).await {
Ok((plaintext, wrapped)) => {
let mut value = json!({
"KeyId": key_id,
"CiphertextBlob": B64.encode(&wrapped),
});
if include_plaintext {
value["Plaintext"] = json!(B64.encode(&plaintext));
}
json_ok(value)
}
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn re_encrypt(State(state): State<AppState>, body: Body) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let ciphertext_b64 = match require_str(
&req,
&["CiphertextBlob", "ciphertext_blob"],
"CiphertextBlob is required",
) {
Ok(value) => value,
Err(response) => return response,
};
let destination_key_id = match require_str(
&req,
&["DestinationKeyId", "destination_key_id"],
"DestinationKeyId is required",
) {
Ok(value) => value,
Err(response) => return response,
};
let ciphertext = match decode_b64(ciphertext_b64, "CiphertextBlob") {
Ok(value) => value,
Err(response) => return response,
};
let keys = kms.list_keys().await;
let mut source_key_id: Option<String> = None;
let mut plaintext: Option<Vec<u8>> = None;
for key in keys {
if !key.enabled {
continue;
}
if let Ok(value) = kms.decrypt_data(&key.key_id, &ciphertext).await {
source_key_id = Some(key.key_id);
plaintext = Some(value);
break;
}
}
let Some(source_key_id) = source_key_id else {
return json_err(
StatusCode::BAD_REQUEST,
"Could not determine source key for CiphertextBlob",
);
};
let plaintext = plaintext.unwrap_or_default();
match kms.encrypt_data(destination_key_id, &plaintext).await {
Ok(new_ciphertext) => json_ok(json!({
"CiphertextBlob": B64.encode(&new_ciphertext),
"SourceKeyId": source_key_id,
"KeyId": destination_key_id,
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn generate_random(State(state): State<AppState>, body: Body) -> Response {
if let Err(response) = require_kms(&state) {
return response;
}
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let num_bytes = req
.get("NumberOfBytes")
.and_then(|v| v.as_u64())
.unwrap_or(32) as usize;
if !(1..=1024).contains(&num_bytes) {
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
}
let mut bytes = vec![0u8; num_bytes];
rand::thread_rng().fill_bytes(&mut bytes);
json_ok(json!({
"Plaintext": B64.encode(bytes),
}))
}
pub async fn client_generate_key(State(state): State<AppState>) -> Response {
let _ = state;
let mut key = [0u8; 32];
rand::thread_rng().fill_bytes(&mut key);
json_ok(json!({
"Key": B64.encode(key),
"Algorithm": "AES-256-GCM",
"KeySize": 32,
}))
}
pub async fn client_encrypt(State(state): State<AppState>, body: Body) -> Response {
let _ = state;
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let plaintext_b64 =
match require_str(&req, &["Plaintext", "plaintext"], "Plaintext is required") {
Ok(value) => value,
Err(response) => return response,
};
let key_b64 = match require_str(&req, &["Key", "key"], "Key is required") {
Ok(value) => value,
Err(response) => return response,
};
let plaintext = match decode_b64(plaintext_b64, "Plaintext") {
Ok(value) => value,
Err(response) => return response,
};
let key_bytes = match decode_b64(key_b64, "Key") {
Ok(value) => value,
Err(response) => return response,
};
if key_bytes.len() != 32 {
return json_err(StatusCode::BAD_REQUEST, "Key must decode to 32 bytes");
}
let cipher = match Aes256Gcm::new_from_slice(&key_bytes) {
Ok(cipher) => cipher,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid encryption key"),
};
let mut nonce_bytes = [0u8; 12];
rand::thread_rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
match cipher.encrypt(nonce, plaintext.as_ref()) {
Ok(ciphertext) => json_ok(json!({
"Ciphertext": B64.encode(ciphertext),
"Nonce": B64.encode(nonce_bytes),
"Algorithm": "AES-256-GCM",
})),
Err(e) => json_err(StatusCode::BAD_REQUEST, &e.to_string()),
}
}
pub async fn client_decrypt(State(state): State<AppState>, body: Body) -> Response {
let _ = state;
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let ciphertext_b64 = match require_str(
&req,
&["Ciphertext", "ciphertext"],
"Ciphertext is required",
) {
Ok(value) => value,
Err(response) => return response,
};
let nonce_b64 = match require_str(&req, &["Nonce", "nonce"], "Nonce is required") {
Ok(value) => value,
Err(response) => return response,
};
let key_b64 = match require_str(&req, &["Key", "key"], "Key is required") {
Ok(value) => value,
Err(response) => return response,
};
let ciphertext = match decode_b64(ciphertext_b64, "Ciphertext") {
Ok(value) => value,
Err(response) => return response,
};
let nonce_bytes = match decode_b64(nonce_b64, "Nonce") {
Ok(value) => value,
Err(response) => return response,
};
let key_bytes = match decode_b64(key_b64, "Key") {
Ok(value) => value,
Err(response) => return response,
};
if key_bytes.len() != 32 {
return json_err(StatusCode::BAD_REQUEST, "Key must decode to 32 bytes");
}
if nonce_bytes.len() != 12 {
return json_err(StatusCode::BAD_REQUEST, "Nonce must decode to 12 bytes");
}
let cipher = match Aes256Gcm::new_from_slice(&key_bytes) {
Ok(cipher) => cipher,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid encryption key"),
};
let nonce = Nonce::from_slice(&nonce_bytes);
match cipher.decrypt(nonce, ciphertext.as_ref()) {
Ok(plaintext) => json_ok(json!({
"Plaintext": B64.encode(plaintext),
})),
Err(e) => json_err(StatusCode::BAD_REQUEST, &e.to_string()),
}
}
pub async fn materials(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
body: Body,
) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let _ = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
match kms.generate_data_key(&key_id, 32).await {
Ok((plaintext, wrapped)) => json_ok(json!({
"PlaintextKey": B64.encode(plaintext),
"EncryptedKey": B64.encode(wrapped),
"KeyId": key_id,
"Algorithm": "AES-256-GCM",
"KeyWrapAlgorithm": "kms",
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,578 +0,0 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use axum::body::Body;
use axum::http::{HeaderMap, HeaderName, StatusCode};
use axum::response::{IntoResponse, Response};
use base64::Engine;
use bytes::Bytes;
use crc32fast::Hasher;
use duckdb::types::ValueRef;
use duckdb::Connection;
use futures::stream;
use http_body_util::BodyExt;
use myfsio_common::error::{S3Error, S3ErrorCode};
use myfsio_storage::traits::StorageEngine;
use crate::state::AppState;
#[cfg(target_os = "windows")]
#[link(name = "Rstrtmgr")]
extern "system" {}
const CHUNK_SIZE: usize = 65_536;
pub async fn post_select_object_content(
state: &AppState,
bucket: &str,
key: &str,
headers: &HeaderMap,
body: Body,
) -> Response {
if let Some(resp) = require_xml_content_type(headers) {
return resp;
}
let body_bytes = match body.collect().await {
Ok(collected) => collected.to_bytes(),
Err(_) => {
return s3_error_response(S3Error::new(
S3ErrorCode::MalformedXML,
"Unable to parse XML document",
));
}
};
let request = match parse_select_request(&body_bytes) {
Ok(r) => r,
Err(err) => return s3_error_response(err),
};
let object_path = match state.storage.get_object_path(bucket, key).await {
Ok(path) => path,
Err(_) => {
return s3_error_response(S3Error::new(S3ErrorCode::NoSuchKey, "Object not found"));
}
};
let join_res =
tokio::task::spawn_blocking(move || execute_select_query(object_path, request)).await;
let chunks = match join_res {
Ok(Ok(chunks)) => chunks,
Ok(Err(message)) => {
return s3_error_response(S3Error::new(S3ErrorCode::InvalidRequest, message));
}
Err(_) => {
return s3_error_response(S3Error::new(
S3ErrorCode::InternalError,
"SelectObjectContent execution failed",
));
}
};
let bytes_returned: usize = chunks.iter().map(|c| c.len()).sum();
let mut events: Vec<Bytes> = Vec::with_capacity(chunks.len() + 2);
for chunk in chunks {
events.push(Bytes::from(encode_select_event("Records", &chunk)));
}
let stats_payload = build_stats_xml(0, bytes_returned);
events.push(Bytes::from(encode_select_event(
"Stats",
stats_payload.as_bytes(),
)));
events.push(Bytes::from(encode_select_event("End", b"")));
let stream = stream::iter(events.into_iter().map(Ok::<Bytes, std::io::Error>));
let body = Body::from_stream(stream);
let mut response = (StatusCode::OK, body).into_response();
response.headers_mut().insert(
HeaderName::from_static("content-type"),
"application/octet-stream".parse().unwrap(),
);
response.headers_mut().insert(
HeaderName::from_static("x-amz-request-charged"),
"requester".parse().unwrap(),
);
response
}
#[derive(Clone)]
struct SelectRequest {
expression: String,
input_format: InputFormat,
output_format: OutputFormat,
}
#[derive(Clone)]
enum InputFormat {
Csv(CsvInputConfig),
Json(JsonInputConfig),
Parquet,
}
#[derive(Clone)]
struct CsvInputConfig {
file_header_info: String,
field_delimiter: String,
quote_character: String,
}
#[derive(Clone)]
struct JsonInputConfig {
json_type: String,
}
#[derive(Clone)]
enum OutputFormat {
Csv(CsvOutputConfig),
Json(JsonOutputConfig),
}
#[derive(Clone)]
struct CsvOutputConfig {
field_delimiter: String,
record_delimiter: String,
quote_character: String,
}
#[derive(Clone)]
struct JsonOutputConfig {
record_delimiter: String,
}
fn parse_select_request(payload: &[u8]) -> Result<SelectRequest, S3Error> {
let xml = String::from_utf8_lossy(payload);
let doc = roxmltree::Document::parse(&xml)
.map_err(|_| S3Error::new(S3ErrorCode::MalformedXML, "Unable to parse XML document"))?;
let root = doc.root_element();
if root.tag_name().name() != "SelectObjectContentRequest" {
return Err(S3Error::new(
S3ErrorCode::MalformedXML,
"Root element must be SelectObjectContentRequest",
));
}
let expression = child_text(&root, "Expression")
.filter(|v| !v.is_empty())
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "Expression is required"))?;
let expression_type = child_text(&root, "ExpressionType").unwrap_or_else(|| "SQL".to_string());
if !expression_type.eq_ignore_ascii_case("SQL") {
return Err(S3Error::new(
S3ErrorCode::InvalidRequest,
"Only SQL expression type is supported",
));
}
let input_node = child(&root, "InputSerialization").ok_or_else(|| {
S3Error::new(
S3ErrorCode::InvalidRequest,
"InputSerialization is required",
)
})?;
let output_node = child(&root, "OutputSerialization").ok_or_else(|| {
S3Error::new(
S3ErrorCode::InvalidRequest,
"OutputSerialization is required",
)
})?;
let input_format = parse_input_format(&input_node)?;
let output_format = parse_output_format(&output_node)?;
Ok(SelectRequest {
expression,
input_format,
output_format,
})
}
fn parse_input_format(node: &roxmltree::Node<'_, '_>) -> Result<InputFormat, S3Error> {
if let Some(csv_node) = child(node, "CSV") {
return Ok(InputFormat::Csv(CsvInputConfig {
file_header_info: child_text(&csv_node, "FileHeaderInfo")
.unwrap_or_else(|| "NONE".to_string())
.to_ascii_uppercase(),
field_delimiter: child_text(&csv_node, "FieldDelimiter")
.unwrap_or_else(|| ",".to_string()),
quote_character: child_text(&csv_node, "QuoteCharacter")
.unwrap_or_else(|| "\"".to_string()),
}));
}
if let Some(json_node) = child(node, "JSON") {
return Ok(InputFormat::Json(JsonInputConfig {
json_type: child_text(&json_node, "Type")
.unwrap_or_else(|| "DOCUMENT".to_string())
.to_ascii_uppercase(),
}));
}
if child(node, "Parquet").is_some() {
return Ok(InputFormat::Parquet);
}
Err(S3Error::new(
S3ErrorCode::InvalidRequest,
"InputSerialization must specify CSV, JSON, or Parquet",
))
}
fn parse_output_format(node: &roxmltree::Node<'_, '_>) -> Result<OutputFormat, S3Error> {
if let Some(csv_node) = child(node, "CSV") {
return Ok(OutputFormat::Csv(CsvOutputConfig {
field_delimiter: child_text(&csv_node, "FieldDelimiter")
.unwrap_or_else(|| ",".to_string()),
record_delimiter: child_text(&csv_node, "RecordDelimiter")
.unwrap_or_else(|| "\n".to_string()),
quote_character: child_text(&csv_node, "QuoteCharacter")
.unwrap_or_else(|| "\"".to_string()),
}));
}
if let Some(json_node) = child(node, "JSON") {
return Ok(OutputFormat::Json(JsonOutputConfig {
record_delimiter: child_text(&json_node, "RecordDelimiter")
.unwrap_or_else(|| "\n".to_string()),
}));
}
Err(S3Error::new(
S3ErrorCode::InvalidRequest,
"OutputSerialization must specify CSV or JSON",
))
}
fn child<'a, 'input>(
node: &'a roxmltree::Node<'a, 'input>,
name: &str,
) -> Option<roxmltree::Node<'a, 'input>> {
node.children()
.find(|n| n.is_element() && n.tag_name().name() == name)
}
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
child(node, name)
.and_then(|n| n.text())
.map(|s| s.to_string())
}
fn execute_select_query(path: PathBuf, request: SelectRequest) -> Result<Vec<Vec<u8>>, String> {
let conn =
Connection::open_in_memory().map_err(|e| format!("DuckDB connection error: {}", e))?;
load_input_table(&conn, &path, &request.input_format)?;
let expression = request
.expression
.replace("s3object", "data")
.replace("S3Object", "data");
let mut stmt = conn
.prepare(&expression)
.map_err(|e| format!("SQL execution error: {}", e))?;
let mut rows = stmt
.query([])
.map_err(|e| format!("SQL execution error: {}", e))?;
let stmt_ref = rows
.as_ref()
.ok_or_else(|| "SQL execution error: statement metadata unavailable".to_string())?;
let col_count = stmt_ref.column_count();
let mut columns: Vec<String> = Vec::with_capacity(col_count);
for i in 0..col_count {
let name = stmt_ref
.column_name(i)
.map(|s| s.to_string())
.unwrap_or_else(|_| format!("_{}", i));
columns.push(name);
}
match request.output_format {
OutputFormat::Csv(cfg) => collect_csv_chunks(&mut rows, col_count, cfg),
OutputFormat::Json(cfg) => collect_json_chunks(&mut rows, col_count, &columns, cfg),
}
}
fn load_input_table(conn: &Connection, path: &Path, input: &InputFormat) -> Result<(), String> {
let path_str = path.to_string_lossy().replace('\\', "/");
match input {
InputFormat::Csv(cfg) => {
let header = cfg.file_header_info == "USE" || cfg.file_header_info == "IGNORE";
let delimiter = normalize_single_char(&cfg.field_delimiter, ',');
let quote = normalize_single_char(&cfg.quote_character, '"');
let sql = format!(
"CREATE TABLE data AS SELECT * FROM read_csv('{}', header={}, delim='{}', quote='{}')",
sql_escape(&path_str),
if header { "true" } else { "false" },
sql_escape(&delimiter),
sql_escape(&quote)
);
conn.execute_batch(&sql)
.map_err(|e| format!("Failed loading CSV data: {}", e))?;
}
InputFormat::Json(cfg) => {
let format = if cfg.json_type == "LINES" {
"newline_delimited"
} else {
"array"
};
let sql = format!(
"CREATE TABLE data AS SELECT * FROM read_json_auto('{}', format='{}')",
sql_escape(&path_str),
format
);
conn.execute_batch(&sql)
.map_err(|e| format!("Failed loading JSON data: {}", e))?;
}
InputFormat::Parquet => {
let sql = format!(
"CREATE TABLE data AS SELECT * FROM read_parquet('{}')",
sql_escape(&path_str)
);
conn.execute_batch(&sql)
.map_err(|e| format!("Failed loading Parquet data: {}", e))?;
}
}
Ok(())
}
fn sql_escape(value: &str) -> String {
value.replace('\'', "''")
}
fn normalize_single_char(value: &str, default_char: char) -> String {
value.chars().next().unwrap_or(default_char).to_string()
}
fn collect_csv_chunks(
rows: &mut duckdb::Rows<'_>,
col_count: usize,
cfg: CsvOutputConfig,
) -> Result<Vec<Vec<u8>>, String> {
let delimiter = cfg.field_delimiter;
let record_delimiter = cfg.record_delimiter;
let quote = cfg.quote_character;
let mut chunks: Vec<Vec<u8>> = Vec::new();
let mut buffer = String::new();
while let Some(row) = rows
.next()
.map_err(|e| format!("SQL execution error: {}", e))?
{
let mut fields: Vec<String> = Vec::with_capacity(col_count);
for i in 0..col_count {
let value = row
.get_ref(i)
.map_err(|e| format!("SQL execution error: {}", e))?;
if matches!(value, ValueRef::Null) {
fields.push(String::new());
continue;
}
let mut text = value_ref_to_string(value);
if text.contains(&delimiter)
|| text.contains(&quote)
|| text.contains(&record_delimiter)
{
text = text.replace(&quote, &(quote.clone() + &quote));
text = format!("{}{}{}", quote, text, quote);
}
fields.push(text);
}
buffer.push_str(&fields.join(&delimiter));
buffer.push_str(&record_delimiter);
while buffer.len() >= CHUNK_SIZE {
let rest = buffer.split_off(CHUNK_SIZE);
chunks.push(buffer.into_bytes());
buffer = rest;
}
}
if !buffer.is_empty() {
chunks.push(buffer.into_bytes());
}
Ok(chunks)
}
fn collect_json_chunks(
rows: &mut duckdb::Rows<'_>,
col_count: usize,
columns: &[String],
cfg: JsonOutputConfig,
) -> Result<Vec<Vec<u8>>, String> {
let record_delimiter = cfg.record_delimiter;
let mut chunks: Vec<Vec<u8>> = Vec::new();
let mut buffer = String::new();
while let Some(row) = rows
.next()
.map_err(|e| format!("SQL execution error: {}", e))?
{
let mut record: HashMap<String, serde_json::Value> = HashMap::with_capacity(col_count);
for i in 0..col_count {
let value = row
.get_ref(i)
.map_err(|e| format!("SQL execution error: {}", e))?;
let key = columns.get(i).cloned().unwrap_or_else(|| format!("_{}", i));
record.insert(key, value_ref_to_json(value));
}
let line = serde_json::to_string(&record)
.map_err(|e| format!("JSON output encoding failed: {}", e))?;
buffer.push_str(&line);
buffer.push_str(&record_delimiter);
while buffer.len() >= CHUNK_SIZE {
let rest = buffer.split_off(CHUNK_SIZE);
chunks.push(buffer.into_bytes());
buffer = rest;
}
}
if !buffer.is_empty() {
chunks.push(buffer.into_bytes());
}
Ok(chunks)
}
fn value_ref_to_string(value: ValueRef<'_>) -> String {
match value {
ValueRef::Null => String::new(),
ValueRef::Boolean(v) => v.to_string(),
ValueRef::TinyInt(v) => v.to_string(),
ValueRef::SmallInt(v) => v.to_string(),
ValueRef::Int(v) => v.to_string(),
ValueRef::BigInt(v) => v.to_string(),
ValueRef::UTinyInt(v) => v.to_string(),
ValueRef::USmallInt(v) => v.to_string(),
ValueRef::UInt(v) => v.to_string(),
ValueRef::UBigInt(v) => v.to_string(),
ValueRef::Float(v) => v.to_string(),
ValueRef::Double(v) => v.to_string(),
ValueRef::Decimal(v) => v.to_string(),
ValueRef::Text(v) => String::from_utf8_lossy(v).into_owned(),
ValueRef::Blob(v) => base64::engine::general_purpose::STANDARD.encode(v),
_ => format!("{:?}", value),
}
}
fn value_ref_to_json(value: ValueRef<'_>) -> serde_json::Value {
match value {
ValueRef::Null => serde_json::Value::Null,
ValueRef::Boolean(v) => serde_json::Value::Bool(v),
ValueRef::TinyInt(v) => serde_json::json!(v),
ValueRef::SmallInt(v) => serde_json::json!(v),
ValueRef::Int(v) => serde_json::json!(v),
ValueRef::BigInt(v) => serde_json::json!(v),
ValueRef::UTinyInt(v) => serde_json::json!(v),
ValueRef::USmallInt(v) => serde_json::json!(v),
ValueRef::UInt(v) => serde_json::json!(v),
ValueRef::UBigInt(v) => serde_json::json!(v),
ValueRef::Float(v) => serde_json::json!(v),
ValueRef::Double(v) => serde_json::json!(v),
ValueRef::Decimal(v) => serde_json::Value::String(v.to_string()),
ValueRef::Text(v) => serde_json::Value::String(String::from_utf8_lossy(v).into_owned()),
ValueRef::Blob(v) => {
serde_json::Value::String(base64::engine::general_purpose::STANDARD.encode(v))
}
_ => serde_json::Value::String(format!("{:?}", value)),
}
}
fn require_xml_content_type(headers: &HeaderMap) -> Option<Response> {
let value = headers
.get("content-type")
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.trim();
if value.is_empty() {
return None;
}
let lowered = value.to_ascii_lowercase();
if lowered.starts_with("application/xml") || lowered.starts_with("text/xml") {
return None;
}
Some(s3_error_response(S3Error::new(
S3ErrorCode::InvalidRequest,
"Content-Type must be application/xml or text/xml",
)))
}
fn s3_error_response(err: S3Error) -> Response {
let status =
StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
let resource = if err.resource.is_empty() {
"/".to_string()
} else {
err.resource.clone()
};
let body = err
.with_resource(resource)
.with_request_id(uuid::Uuid::new_v4().simple().to_string())
.to_xml();
(status, [("content-type", "application/xml")], body).into_response()
}
fn build_stats_xml(bytes_scanned: usize, bytes_returned: usize) -> String {
format!(
"<Stats><BytesScanned>{}</BytesScanned><BytesProcessed>{}</BytesProcessed><BytesReturned>{}</BytesReturned></Stats>",
bytes_scanned,
bytes_scanned,
bytes_returned
)
}
fn encode_select_event(event_type: &str, payload: &[u8]) -> Vec<u8> {
let mut headers = Vec::new();
headers.extend(encode_select_header(":event-type", event_type));
if event_type == "Records" {
headers.extend(encode_select_header(
":content-type",
"application/octet-stream",
));
} else if event_type == "Stats" {
headers.extend(encode_select_header(":content-type", "text/xml"));
}
headers.extend(encode_select_header(":message-type", "event"));
let headers_len = headers.len() as u32;
let total_len = 4 + 4 + 4 + headers.len() + payload.len() + 4;
let mut message = Vec::with_capacity(total_len);
let mut prelude = Vec::with_capacity(8);
prelude.extend((total_len as u32).to_be_bytes());
prelude.extend(headers_len.to_be_bytes());
let prelude_crc = crc32(&prelude);
message.extend(prelude);
message.extend(prelude_crc.to_be_bytes());
message.extend(headers);
message.extend(payload);
let msg_crc = crc32(&message);
message.extend(msg_crc.to_be_bytes());
message
}
fn encode_select_header(name: &str, value: &str) -> Vec<u8> {
let name_bytes = name.as_bytes();
let value_bytes = value.as_bytes();
let mut header = Vec::with_capacity(1 + name_bytes.len() + 1 + 2 + value_bytes.len());
header.push(name_bytes.len() as u8);
header.extend(name_bytes);
header.push(7);
header.extend((value_bytes.len() as u16).to_be_bytes());
header.extend(value_bytes);
header
}
fn crc32(data: &[u8]) -> u32 {
let mut hasher = Hasher::new();
hasher.update(data);
hasher.finalize()
}

View File

@@ -1,210 +0,0 @@
use std::collections::HashMap;
use std::error::Error as StdError;
use axum::extract::{Extension, Form, State};
use axum::http::{header, HeaderMap, StatusCode};
use axum::response::{IntoResponse, Redirect, Response};
use tera::Context;
use crate::middleware::session::SessionHandle;
use crate::session::FlashMessage;
use crate::state::AppState;
pub async fn login_page(
State(state): State<AppState>,
Extension(session): Extension<SessionHandle>,
) -> Response {
if session.read(|s| s.is_authenticated()) {
return Redirect::to("/ui/buckets").into_response();
}
let mut ctx = base_context(&session, None);
let flashed = session.write(|s| s.take_flash());
inject_flash(&mut ctx, flashed);
render(&state, "login.html", &ctx)
}
#[derive(serde::Deserialize)]
pub struct LoginForm {
pub access_key: String,
pub secret_key: String,
#[serde(default)]
pub csrf_token: String,
#[serde(default)]
pub next: Option<String>,
}
pub async fn login_submit(
State(state): State<AppState>,
Extension(session): Extension<SessionHandle>,
Form(form): Form<LoginForm>,
) -> Response {
let access_key = form.access_key.trim();
let secret_key = form.secret_key.trim();
match state.iam.get_secret_key(access_key) {
Some(expected) if constant_time_eq_str(&expected, secret_key) => {
let display = state
.iam
.get_user(access_key)
.await
.and_then(|v| {
v.get("display_name")
.and_then(|d| d.as_str())
.map(|s| s.to_string())
})
.unwrap_or_else(|| access_key.to_string());
session.write(|s| {
s.user_id = Some(access_key.to_string());
s.display_name = Some(display);
s.rotate_csrf();
s.push_flash("success", "Signed in successfully.");
});
let next = form
.next
.as_deref()
.filter(|n| n.starts_with("/ui/") || *n == "/ui")
.unwrap_or("/ui/buckets")
.to_string();
Redirect::to(&next).into_response()
}
_ => {
session.write(|s| {
s.push_flash("danger", "Invalid access key or secret key.");
});
Redirect::to("/login").into_response()
}
}
}
pub async fn logout(Extension(session): Extension<SessionHandle>) -> Response {
session.write(|s| {
s.user_id = None;
s.display_name = None;
s.flash.clear();
s.rotate_csrf();
s.push_flash("info", "Signed out.");
});
Redirect::to("/login").into_response()
}
pub async fn csrf_error_page(
State(state): State<AppState>,
Extension(session): Extension<SessionHandle>,
) -> Response {
let ctx = base_context(&session, None);
let mut resp = render(&state, "csrf_error.html", &ctx);
*resp.status_mut() = StatusCode::FORBIDDEN;
resp
}
pub async fn root_redirect() -> Response {
Redirect::to("/ui/buckets").into_response()
}
pub async fn not_found_page(
State(state): State<AppState>,
Extension(session): Extension<SessionHandle>,
) -> Response {
let ctx = base_context(&session, None);
let mut resp = render(&state, "404.html", &ctx);
*resp.status_mut() = StatusCode::NOT_FOUND;
resp
}
pub async fn require_login(
Extension(session): Extension<SessionHandle>,
req: axum::extract::Request,
next: axum::middleware::Next,
) -> Response {
if session.read(|s| s.is_authenticated()) {
return next.run(req).await;
}
let path = req.uri().path().to_string();
let query = req
.uri()
.query()
.map(|q| format!("?{}", q))
.unwrap_or_default();
let next_url = format!("{}{}", path, query);
let encoded =
percent_encoding::utf8_percent_encode(&next_url, percent_encoding::NON_ALPHANUMERIC)
.to_string();
let target = format!("/login?next={}", encoded);
Redirect::to(&target).into_response()
}
pub fn render(state: &AppState, template: &str, ctx: &Context) -> Response {
let engine = match &state.templates {
Some(e) => e,
None => {
return (
StatusCode::INTERNAL_SERVER_ERROR,
"Templates not configured",
)
.into_response();
}
};
match engine.render(template, ctx) {
Ok(html) => {
let mut headers = HeaderMap::new();
headers.insert(
header::CONTENT_TYPE,
"text/html; charset=utf-8".parse().unwrap(),
);
(StatusCode::OK, headers, html).into_response()
}
Err(e) => {
let mut detail = format!("{}", e);
let mut src = StdError::source(&e);
while let Some(s) = src {
detail.push_str(" | ");
detail.push_str(&s.to_string());
src = s.source();
}
tracing::error!("Template render failed ({}): {}", template, detail);
let fallback_ctx = Context::new();
let body = if template != "500.html" {
engine
.render("500.html", &fallback_ctx)
.unwrap_or_else(|_| "Internal Server Error".to_string())
} else {
"Internal Server Error".to_string()
};
let mut headers = HeaderMap::new();
headers.insert(
header::CONTENT_TYPE,
"text/html; charset=utf-8".parse().unwrap(),
);
(StatusCode::INTERNAL_SERVER_ERROR, headers, body).into_response()
}
}
}
pub fn base_context(session: &SessionHandle, endpoint: Option<&str>) -> Context {
let mut ctx = Context::new();
let snapshot = session.snapshot();
ctx.insert("csrf_token_value", &snapshot.csrf_token);
ctx.insert("is_authenticated", &snapshot.user_id.is_some());
ctx.insert("current_user", &snapshot.user_id);
ctx.insert("current_user_display_name", &snapshot.display_name);
ctx.insert("current_endpoint", &endpoint.unwrap_or(""));
ctx.insert("request_args", &HashMap::<String, String>::new());
ctx.insert("null", &serde_json::Value::Null);
ctx.insert("none", &serde_json::Value::Null);
ctx
}
pub fn inject_flash(ctx: &mut Context, flashed: Vec<FlashMessage>) {
ctx.insert("flashed_messages", &flashed);
}
fn constant_time_eq_str(a: &str, b: &str) -> bool {
if a.len() != b.len() {
return false;
}
subtle::ConstantTimeEq::ct_eq(a.as_bytes(), b.as_bytes()).into()
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,559 +0,0 @@
pub mod config;
pub mod handlers;
pub mod middleware;
pub mod services;
pub mod session;
pub mod state;
pub mod stores;
pub mod templates;
use axum::Router;
pub const SERVER_HEADER: &str = concat!("MyFSIO-Rust/", env!("CARGO_PKG_VERSION"));
pub fn create_ui_router(state: state::AppState) -> Router {
use axum::routing::{delete, get, post, put};
use handlers::ui;
use handlers::ui_api;
use handlers::ui_pages;
let protected = Router::new()
.route("/", get(ui::root_redirect))
.route("/ui", get(ui::root_redirect))
.route("/ui/", get(ui::root_redirect))
.route(
"/ui/buckets",
get(ui_pages::buckets_overview).post(ui_pages::create_bucket),
)
.route("/ui/buckets/create", post(ui_pages::create_bucket))
.route("/ui/buckets/{bucket_name}", get(ui_pages::bucket_detail))
.route(
"/ui/buckets/{bucket_name}/delete",
post(ui_pages::delete_bucket),
)
.route(
"/ui/buckets/{bucket_name}/versioning",
post(ui_pages::update_bucket_versioning),
)
.route(
"/ui/buckets/{bucket_name}/quota",
post(ui_pages::update_bucket_quota),
)
.route(
"/ui/buckets/{bucket_name}/encryption",
post(ui_pages::update_bucket_encryption),
)
.route(
"/ui/buckets/{bucket_name}/policy",
post(ui_pages::update_bucket_policy),
)
.route(
"/ui/buckets/{bucket_name}/replication",
post(ui_pages::update_bucket_replication),
)
.route(
"/ui/buckets/{bucket_name}/website",
post(ui_pages::update_bucket_website),
)
.route(
"/ui/buckets/{bucket_name}/upload",
post(ui_api::upload_object),
)
.route(
"/ui/buckets/{bucket_name}/multipart/initiate",
post(ui_api::initiate_multipart_upload),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}/part",
put(ui_api::upload_multipart_part),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}/parts",
put(ui_api::upload_multipart_part),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}/complete",
post(ui_api::complete_multipart_upload),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}/abort",
delete(ui_api::abort_multipart_upload),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}",
delete(ui_api::abort_multipart_upload),
)
.route(
"/ui/buckets/{bucket_name}/objects",
get(ui_api::list_bucket_objects),
)
.route(
"/ui/buckets/{bucket_name}/objects/stream",
get(ui_api::stream_bucket_objects),
)
.route(
"/ui/buckets/{bucket_name}/folders",
get(ui_api::list_bucket_folders),
)
.route(
"/ui/buckets/{bucket_name}/copy-targets",
get(ui_api::list_copy_targets),
)
.route(
"/ui/buckets/{bucket_name}/list-for-copy",
get(ui_api::list_copy_targets),
)
.route(
"/ui/buckets/{bucket_name}/objects/bulk-delete",
post(ui_api::bulk_delete_objects),
)
.route(
"/ui/buckets/{bucket_name}/objects/bulk-download",
post(ui_api::bulk_download_objects),
)
.route(
"/ui/buckets/{bucket_name}/objects/{*rest}",
get(ui_api::object_get_dispatch).post(ui_api::object_post_dispatch),
)
.route(
"/ui/buckets/{bucket_name}/acl",
get(ui_api::bucket_acl).post(ui_api::update_bucket_acl),
)
.route(
"/ui/buckets/{bucket_name}/cors",
get(ui_api::bucket_cors).post(ui_api::update_bucket_cors),
)
.route(
"/ui/buckets/{bucket_name}/lifecycle",
get(ui_api::bucket_lifecycle).post(ui_api::update_bucket_lifecycle),
)
.route(
"/ui/buckets/{bucket_name}/lifecycle/history",
get(ui_api::lifecycle_history),
)
.route(
"/ui/buckets/{bucket_name}/replication/status",
get(ui_api::replication_status),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures",
get(ui_api::replication_failures).delete(ui_api::clear_replication_failures),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures/retry",
post(ui_api::retry_replication_failure),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures/retry-all",
post(ui_api::retry_all_replication_failures),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures/dismiss",
delete(ui_api::dismiss_replication_failure),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures/clear",
delete(ui_api::clear_replication_failures),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures/{*rest}",
post(ui_api::retry_replication_failure_path)
.delete(ui_api::dismiss_replication_failure_path),
)
.route(
"/ui/buckets/{bucket_name}/bulk-delete",
post(ui_api::bulk_delete_objects),
)
.route(
"/ui/buckets/{bucket_name}/bulk-download",
post(ui_api::bulk_download_objects),
)
.route(
"/ui/buckets/{bucket_name}/archived",
get(ui_api::archived_objects),
)
.route(
"/ui/buckets/{bucket_name}/archived/{*rest}",
post(ui_api::archived_post_dispatch),
)
.route("/ui/iam", get(ui_pages::iam_dashboard))
.route("/ui/iam/users", post(ui_pages::create_iam_user))
.route("/ui/iam/users/{user_id}", post(ui_pages::update_iam_user))
.route(
"/ui/iam/users/{user_id}/delete",
post(ui_pages::delete_iam_user),
)
.route(
"/ui/iam/users/{user_id}/update",
post(ui_pages::update_iam_user),
)
.route(
"/ui/iam/users/{user_id}/policies",
post(ui_pages::update_iam_policies),
)
.route(
"/ui/iam/users/{user_id}/expiry",
post(ui_pages::update_iam_expiry),
)
.route(
"/ui/iam/users/{user_id}/rotate-secret",
post(ui_pages::rotate_iam_secret),
)
.route(
"/ui/iam/users/{user_id}/rotate",
post(ui_pages::rotate_iam_secret),
)
.route("/ui/connections/create", post(ui_pages::create_connection))
.route("/ui/connections/test", post(ui_api::test_connection))
.route(
"/ui/connections/{connection_id}",
post(ui_pages::update_connection),
)
.route(
"/ui/connections/{connection_id}/update",
post(ui_pages::update_connection),
)
.route(
"/ui/connections/{connection_id}/delete",
post(ui_pages::delete_connection),
)
.route(
"/ui/connections/{connection_id}/health",
get(ui_api::connection_health),
)
.route("/ui/sites", get(ui_pages::sites_dashboard))
.route("/ui/sites/local", post(ui_pages::update_local_site))
.route("/ui/sites/peers", post(ui_pages::add_peer_site))
.route(
"/ui/sites/peers/{site_id}/update",
post(ui_pages::update_peer_site),
)
.route(
"/ui/sites/peers/{site_id}/delete",
post(ui_pages::delete_peer_site),
)
.route("/ui/sites/peers/{site_id}/health", get(ui_api::peer_health))
.route(
"/ui/sites/peers/{site_id}/sync-stats",
get(ui_api::peer_sync_stats),
)
.route(
"/ui/sites/peers/{site_id}/bidirectional-status",
get(ui_api::peer_bidirectional_status),
)
.route(
"/ui/connections",
get(ui_pages::connections_dashboard).post(ui_pages::create_connection),
)
.route("/ui/metrics", get(ui_pages::metrics_dashboard))
.route(
"/ui/metrics/settings",
get(ui_api::metrics_settings).put(ui_api::update_metrics_settings),
)
.route("/ui/metrics/api", get(ui_api::metrics_api))
.route("/ui/metrics/history", get(ui_api::metrics_history))
.route("/ui/metrics/operations", get(ui_api::metrics_operations))
.route(
"/ui/metrics/operations/history",
get(ui_api::metrics_operations_history),
)
.route("/ui/system", get(ui_pages::system_dashboard))
.route("/ui/system/gc/status", get(ui_api::gc_status_ui))
.route("/ui/system/gc/run", post(ui_api::gc_run_ui))
.route("/ui/system/gc/history", get(ui_api::gc_history_ui))
.route(
"/ui/system/integrity/status",
get(ui_api::integrity_status_ui),
)
.route("/ui/system/integrity/run", post(ui_api::integrity_run_ui))
.route(
"/ui/system/integrity/history",
get(ui_api::integrity_history_ui),
)
.route(
"/ui/website-domains",
get(ui_pages::website_domains_dashboard),
)
.route(
"/ui/website-domains/create",
post(ui_pages::create_website_domain),
)
.route(
"/ui/website-domains/{domain}",
post(ui_pages::update_website_domain),
)
.route(
"/ui/website-domains/{domain}/update",
post(ui_pages::update_website_domain),
)
.route(
"/ui/website-domains/{domain}/delete",
post(ui_pages::delete_website_domain),
)
.route("/ui/replication/new", get(ui_pages::replication_wizard))
.route(
"/ui/replication/create",
post(ui_pages::create_peer_replication_rules_from_query),
)
.route(
"/ui/sites/peers/{site_id}/replication-rules",
post(ui_pages::create_peer_replication_rules),
)
.route("/ui/docs", get(ui_pages::docs_page))
.layer(axum::middleware::from_fn(ui::require_login));
let public = Router::new()
.route("/login", get(ui::login_page).post(ui::login_submit))
.route("/logout", post(ui::logout).get(ui::logout))
.route("/csrf-error", get(ui::csrf_error_page));
let session_state = middleware::SessionLayerState {
store: state.sessions.clone(),
secure: false,
};
let static_service = tower_http::services::ServeDir::new(&state.config.static_dir);
protected
.merge(public)
.fallback(ui::not_found_page)
.layer(axum::middleware::from_fn(middleware::csrf_layer))
.layer(axum::middleware::from_fn_with_state(
session_state,
middleware::session_layer,
))
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::ui_metrics_layer,
))
.with_state(state)
.nest_service("/static", static_service)
.layer(axum::middleware::from_fn(middleware::server_header))
.layer(tower_http::compression::CompressionLayer::new())
}
pub fn create_router(state: state::AppState) -> Router {
let mut router = Router::new()
.route("/myfsio/health", axum::routing::get(handlers::health_check))
.route("/", axum::routing::get(handlers::list_buckets))
.route(
"/{bucket}",
axum::routing::put(handlers::create_bucket)
.get(handlers::get_bucket)
.delete(handlers::delete_bucket)
.head(handlers::head_bucket)
.post(handlers::post_bucket),
)
.route(
"/{bucket}/",
axum::routing::put(handlers::create_bucket)
.get(handlers::get_bucket)
.delete(handlers::delete_bucket)
.head(handlers::head_bucket)
.post(handlers::post_bucket),
)
.route(
"/{bucket}/{*key}",
axum::routing::put(handlers::put_object)
.get(handlers::get_object)
.delete(handlers::delete_object)
.head(handlers::head_object)
.post(handlers::post_object),
);
if state.config.kms_enabled {
router = router
.route(
"/kms/keys",
axum::routing::get(handlers::kms::list_keys).post(handlers::kms::create_key),
)
.route(
"/kms/keys/{key_id}",
axum::routing::get(handlers::kms::get_key).delete(handlers::kms::delete_key),
)
.route(
"/kms/keys/{key_id}/enable",
axum::routing::post(handlers::kms::enable_key),
)
.route(
"/kms/keys/{key_id}/disable",
axum::routing::post(handlers::kms::disable_key),
)
.route("/kms/encrypt", axum::routing::post(handlers::kms::encrypt))
.route("/kms/decrypt", axum::routing::post(handlers::kms::decrypt))
.route(
"/kms/generate-data-key",
axum::routing::post(handlers::kms::generate_data_key),
)
.route(
"/kms/generate-data-key-without-plaintext",
axum::routing::post(handlers::kms::generate_data_key_without_plaintext),
)
.route(
"/kms/re-encrypt",
axum::routing::post(handlers::kms::re_encrypt),
)
.route(
"/kms/generate-random",
axum::routing::post(handlers::kms::generate_random),
)
.route(
"/kms/client/generate-key",
axum::routing::post(handlers::kms::client_generate_key),
)
.route(
"/kms/client/encrypt",
axum::routing::post(handlers::kms::client_encrypt),
)
.route(
"/kms/client/decrypt",
axum::routing::post(handlers::kms::client_decrypt),
)
.route(
"/kms/materials/{key_id}",
axum::routing::post(handlers::kms::materials),
);
}
router = router
.route(
"/admin/site",
axum::routing::get(handlers::admin::get_local_site)
.put(handlers::admin::update_local_site),
)
.route(
"/admin/sites",
axum::routing::get(handlers::admin::list_all_sites)
.post(handlers::admin::register_peer_site),
)
.route(
"/admin/sites/{site_id}",
axum::routing::get(handlers::admin::get_peer_site)
.put(handlers::admin::update_peer_site)
.delete(handlers::admin::delete_peer_site),
)
.route(
"/admin/sites/{site_id}/health",
axum::routing::get(handlers::admin::check_peer_health)
.post(handlers::admin::check_peer_health),
)
.route(
"/admin/sites/{site_id}/bidirectional-status",
axum::routing::get(handlers::admin::check_bidirectional_status),
)
.route(
"/admin/topology",
axum::routing::get(handlers::admin::get_topology),
)
.route(
"/admin/site/local",
axum::routing::get(handlers::admin::get_local_site)
.put(handlers::admin::update_local_site),
)
.route(
"/admin/site/all",
axum::routing::get(handlers::admin::list_all_sites),
)
.route(
"/admin/site/peers",
axum::routing::post(handlers::admin::register_peer_site),
)
.route(
"/admin/site/peers/{site_id}",
axum::routing::get(handlers::admin::get_peer_site)
.put(handlers::admin::update_peer_site)
.delete(handlers::admin::delete_peer_site),
)
.route(
"/admin/site/peers/{site_id}/health",
axum::routing::post(handlers::admin::check_peer_health),
)
.route(
"/admin/site/topology",
axum::routing::get(handlers::admin::get_topology),
)
.route(
"/admin/site/peers/{site_id}/bidirectional-status",
axum::routing::get(handlers::admin::check_bidirectional_status),
)
.route(
"/admin/iam/users",
axum::routing::get(handlers::admin::iam_list_users),
)
.route(
"/admin/iam/users/{identifier}",
axum::routing::get(handlers::admin::iam_get_user),
)
.route(
"/admin/iam/users/{identifier}/policies",
axum::routing::get(handlers::admin::iam_get_user_policies),
)
.route(
"/admin/iam/users/{identifier}/access-keys",
axum::routing::post(handlers::admin::iam_create_access_key),
)
.route(
"/admin/iam/users/{identifier}/keys",
axum::routing::post(handlers::admin::iam_create_access_key),
)
.route(
"/admin/iam/users/{identifier}/access-keys/{access_key}",
axum::routing::delete(handlers::admin::iam_delete_access_key),
)
.route(
"/admin/iam/users/{identifier}/keys/{access_key}",
axum::routing::delete(handlers::admin::iam_delete_access_key),
)
.route(
"/admin/iam/users/{identifier}/disable",
axum::routing::post(handlers::admin::iam_disable_user),
)
.route(
"/admin/iam/users/{identifier}/enable",
axum::routing::post(handlers::admin::iam_enable_user),
)
.route(
"/admin/website-domains",
axum::routing::get(handlers::admin::list_website_domains)
.post(handlers::admin::create_website_domain),
)
.route(
"/admin/website-domains/{domain}",
axum::routing::get(handlers::admin::get_website_domain)
.put(handlers::admin::update_website_domain)
.delete(handlers::admin::delete_website_domain),
)
.route(
"/admin/gc/status",
axum::routing::get(handlers::admin::gc_status),
)
.route(
"/admin/gc/run",
axum::routing::post(handlers::admin::gc_run),
)
.route(
"/admin/gc/history",
axum::routing::get(handlers::admin::gc_history),
)
.route(
"/admin/integrity/status",
axum::routing::get(handlers::admin::integrity_status),
)
.route(
"/admin/integrity/run",
axum::routing::post(handlers::admin::integrity_run),
)
.route(
"/admin/integrity/history",
axum::routing::get(handlers::admin::integrity_history),
);
router
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::auth_layer,
))
.layer(axum::middleware::from_fn(middleware::server_header))
.layer(tower_http::compression::CompressionLayer::new())
.with_state(state)
}

View File

@@ -1,426 +0,0 @@
use clap::{Parser, Subcommand};
use myfsio_server::config::ServerConfig;
use myfsio_server::state::AppState;
#[derive(Parser)]
#[command(
name = "myfsio",
version,
about = "MyFSIO S3-compatible storage engine"
)]
struct Cli {
#[arg(long, help = "Validate configuration and exit")]
check_config: bool,
#[arg(long, help = "Show configuration summary and exit")]
show_config: bool,
#[arg(long, help = "Reset admin credentials and exit")]
reset_cred: bool,
#[command(subcommand)]
command: Option<Command>,
}
#[derive(Subcommand)]
enum Command {
Serve,
Version,
}
#[tokio::main]
async fn main() {
load_env_files();
tracing_subscriber::fmt::init();
let cli = Cli::parse();
let config = ServerConfig::from_env();
if cli.reset_cred {
reset_admin_credentials(&config);
return;
}
if cli.check_config || cli.show_config {
print_config_summary(&config);
if cli.check_config {
let issues = validate_config(&config);
for issue in &issues {
println!("{issue}");
}
if issues.iter().any(|issue| issue.starts_with("CRITICAL:")) {
std::process::exit(1);
}
}
return;
}
match cli.command.unwrap_or(Command::Serve) {
Command::Version => {
println!("myfsio {}", env!("CARGO_PKG_VERSION"));
return;
}
Command::Serve => {}
}
ensure_iam_bootstrap(&config);
let bind_addr = config.bind_addr;
let ui_bind_addr = config.ui_bind_addr;
tracing::info!("MyFSIO Rust Engine starting — API on {}", bind_addr);
if config.ui_enabled {
tracing::info!("UI will bind on {}", ui_bind_addr);
}
tracing::info!("Storage root: {}", config.storage_root.display());
tracing::info!("Region: {}", config.region);
tracing::info!(
"Encryption: {}, KMS: {}, GC: {}, Lifecycle: {}, Integrity: {}, Metrics History: {}, Operation Metrics: {}, UI: {}",
config.encryption_enabled,
config.kms_enabled,
config.gc_enabled,
config.lifecycle_enabled,
config.integrity_enabled,
config.metrics_history_enabled,
config.metrics_enabled,
config.ui_enabled
);
let state = if config.encryption_enabled || config.kms_enabled {
AppState::new_with_encryption(config.clone()).await
} else {
AppState::new(config.clone())
};
let mut bg_handles: Vec<tokio::task::JoinHandle<()>> = Vec::new();
if let Some(ref gc) = state.gc {
bg_handles.push(gc.clone().start_background());
tracing::info!("GC background service started");
}
if let Some(ref integrity) = state.integrity {
bg_handles.push(integrity.clone().start_background());
tracing::info!("Integrity checker background service started");
}
if let Some(ref metrics) = state.metrics {
bg_handles.push(metrics.clone().start_background());
tracing::info!("Metrics collector background service started");
}
if let Some(ref system_metrics) = state.system_metrics {
bg_handles.push(system_metrics.clone().start_background());
tracing::info!("System metrics history collector started");
}
if config.lifecycle_enabled {
let lifecycle =
std::sync::Arc::new(myfsio_server::services::lifecycle::LifecycleService::new(
state.storage.clone(),
config.storage_root.clone(),
myfsio_server::services::lifecycle::LifecycleConfig::default(),
));
bg_handles.push(lifecycle.start_background());
tracing::info!("Lifecycle manager background service started");
}
if let Some(ref site_sync) = state.site_sync {
let worker = site_sync.clone();
bg_handles.push(tokio::spawn(async move {
worker.run().await;
}));
tracing::info!("Site sync worker started");
}
let ui_enabled = config.ui_enabled;
let api_app = myfsio_server::create_router(state.clone());
let ui_app = if ui_enabled {
Some(myfsio_server::create_ui_router(state.clone()))
} else {
None
};
let api_listener = match tokio::net::TcpListener::bind(bind_addr).await {
Ok(listener) => listener,
Err(err) => {
if err.kind() == std::io::ErrorKind::AddrInUse {
tracing::error!("API port already in use: {}", bind_addr);
} else {
tracing::error!("Failed to bind API {}: {}", bind_addr, err);
}
for handle in bg_handles {
handle.abort();
}
std::process::exit(1);
}
};
tracing::info!("API listening on {}", bind_addr);
let ui_listener = if let Some(ref app) = ui_app {
let _ = app;
match tokio::net::TcpListener::bind(ui_bind_addr).await {
Ok(listener) => {
tracing::info!("UI listening on {}", ui_bind_addr);
Some(listener)
}
Err(err) => {
if err.kind() == std::io::ErrorKind::AddrInUse {
tracing::error!("UI port already in use: {}", ui_bind_addr);
} else {
tracing::error!("Failed to bind UI {}: {}", ui_bind_addr, err);
}
for handle in bg_handles {
handle.abort();
}
std::process::exit(1);
}
}
} else {
None
};
let shutdown = shutdown_signal_shared();
let api_shutdown = shutdown.clone();
let api_task = tokio::spawn(async move {
axum::serve(api_listener, api_app)
.with_graceful_shutdown(async move {
api_shutdown.notified().await;
})
.await
});
let ui_task = if let (Some(listener), Some(app)) = (ui_listener, ui_app) {
let ui_shutdown = shutdown.clone();
Some(tokio::spawn(async move {
axum::serve(listener, app)
.with_graceful_shutdown(async move {
ui_shutdown.notified().await;
})
.await
}))
} else {
None
};
tokio::signal::ctrl_c()
.await
.expect("Failed to listen for Ctrl+C");
tracing::info!("Shutdown signal received");
shutdown.notify_waiters();
if let Err(err) = api_task.await.unwrap_or(Ok(())) {
tracing::error!("API server exited with error: {}", err);
}
if let Some(task) = ui_task {
if let Err(err) = task.await.unwrap_or(Ok(())) {
tracing::error!("UI server exited with error: {}", err);
}
}
for handle in bg_handles {
handle.abort();
}
}
fn print_config_summary(config: &ServerConfig) {
println!("MyFSIO Rust Configuration");
println!("Version: {}", env!("CARGO_PKG_VERSION"));
println!("API bind: {}", config.bind_addr);
println!("UI bind: {}", config.ui_bind_addr);
println!("UI enabled: {}", config.ui_enabled);
println!("Storage root: {}", config.storage_root.display());
println!("IAM config: {}", config.iam_config_path.display());
println!("Region: {}", config.region);
println!("Encryption enabled: {}", config.encryption_enabled);
println!("KMS enabled: {}", config.kms_enabled);
println!("GC enabled: {}", config.gc_enabled);
println!("Integrity enabled: {}", config.integrity_enabled);
println!("Lifecycle enabled: {}", config.lifecycle_enabled);
println!(
"Website hosting enabled: {}",
config.website_hosting_enabled
);
println!("Site sync enabled: {}", config.site_sync_enabled);
println!(
"Metrics history enabled: {}",
config.metrics_history_enabled
);
println!("Operation metrics enabled: {}", config.metrics_enabled);
}
fn validate_config(config: &ServerConfig) -> Vec<String> {
let mut issues = Vec::new();
if config.ui_enabled && config.bind_addr == config.ui_bind_addr {
issues.push(
"CRITICAL: API and UI bind addresses cannot be identical when UI is enabled."
.to_string(),
);
}
if config.presigned_url_min_expiry > config.presigned_url_max_expiry {
issues.push("CRITICAL: PRESIGNED_URL_MIN_EXPIRY_SECONDS cannot exceed PRESIGNED_URL_MAX_EXPIRY_SECONDS.".to_string());
}
if let Err(err) = std::fs::create_dir_all(&config.storage_root) {
issues.push(format!(
"CRITICAL: Cannot create storage root {}: {}",
config.storage_root.display(),
err
));
}
if let Some(parent) = config.iam_config_path.parent() {
if let Err(err) = std::fs::create_dir_all(parent) {
issues.push(format!(
"CRITICAL: Cannot create IAM config directory {}: {}",
parent.display(),
err
));
}
}
if config.encryption_enabled && config.secret_key.is_none() {
issues.push(
"WARNING: ENCRYPTION_ENABLED=true but SECRET_KEY is not configured; secure-at-rest config encryption is unavailable.".to_string(),
);
}
if config.site_sync_enabled && !config.website_hosting_enabled {
issues.push(
"INFO: SITE_SYNC_ENABLED=true without WEBSITE_HOSTING_ENABLED; this is valid but unrelated.".to_string(),
);
}
issues
}
fn shutdown_signal_shared() -> std::sync::Arc<tokio::sync::Notify> {
std::sync::Arc::new(tokio::sync::Notify::new())
}
fn load_env_files() {
let cwd = std::env::current_dir().ok();
let mut candidates: Vec<std::path::PathBuf> = Vec::new();
candidates.push(std::path::PathBuf::from("/opt/myfsio/myfsio.env"));
if let Some(ref dir) = cwd {
candidates.push(dir.join(".env"));
candidates.push(dir.join("myfsio.env"));
for ancestor in dir.ancestors().skip(1).take(4) {
candidates.push(ancestor.join(".env"));
candidates.push(ancestor.join("myfsio.env"));
}
}
let mut seen = std::collections::HashSet::new();
for path in candidates {
if !seen.insert(path.clone()) {
continue;
}
if path.is_file() {
match dotenvy::from_path_override(&path) {
Ok(()) => eprintln!("Loaded env file: {}", path.display()),
Err(e) => eprintln!("Failed to load env file {}: {}", path.display(), e),
}
}
}
}
fn ensure_iam_bootstrap(config: &ServerConfig) {
let iam_path = &config.iam_config_path;
if iam_path.exists() {
return;
}
let access_key = std::env::var("ADMIN_ACCESS_KEY")
.ok()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.unwrap_or_else(|| format!("AK{}", uuid::Uuid::new_v4().simple()));
let secret_key = std::env::var("ADMIN_SECRET_KEY")
.ok()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.unwrap_or_else(|| format!("SK{}", uuid::Uuid::new_v4().simple()));
let user_id = format!("u-{}", &uuid::Uuid::new_v4().simple().to_string()[..16]);
let created_at = chrono::Utc::now().to_rfc3339();
let body = serde_json::json!({
"version": 2,
"users": [{
"user_id": user_id,
"display_name": "Local Admin",
"enabled": true,
"access_keys": [{
"access_key": access_key,
"secret_key": secret_key,
"status": "active",
"created_at": created_at,
}],
"policies": [{
"bucket": "*",
"actions": ["*"],
"prefix": "*",
}]
}]
});
let json = match serde_json::to_string_pretty(&body) {
Ok(s) => s,
Err(e) => {
tracing::error!("Failed to serialize IAM bootstrap config: {}", e);
return;
}
};
if let Some(parent) = iam_path.parent() {
if let Err(e) = std::fs::create_dir_all(parent) {
tracing::error!(
"Failed to create IAM config dir {}: {}",
parent.display(),
e
);
return;
}
}
if let Err(e) = std::fs::write(iam_path, json) {
tracing::error!(
"Failed to write IAM bootstrap config {}: {}",
iam_path.display(),
e
);
return;
}
tracing::info!("============================================================");
tracing::info!("MYFSIO - ADMIN CREDENTIALS INITIALIZED");
tracing::info!("============================================================");
tracing::info!("Access Key: {}", access_key);
tracing::info!("Secret Key: {}", secret_key);
tracing::info!("Saved to: {}", iam_path.display());
tracing::info!("============================================================");
}
fn reset_admin_credentials(config: &ServerConfig) {
if let Some(parent) = config.iam_config_path.parent() {
if let Err(err) = std::fs::create_dir_all(parent) {
eprintln!(
"Failed to create IAM config directory {}: {}",
parent.display(),
err
);
std::process::exit(1);
}
}
if config.iam_config_path.exists() {
let backup = config
.iam_config_path
.with_extension(format!("bak-{}", chrono::Utc::now().timestamp()));
if let Err(err) = std::fs::rename(&config.iam_config_path, &backup) {
eprintln!(
"Failed to back up existing IAM config {}: {}",
config.iam_config_path.display(),
err
);
std::process::exit(1);
}
println!("Backed up existing IAM config to {}", backup.display());
}
ensure_iam_bootstrap(config);
println!("Admin credentials reset.");
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,85 +0,0 @@
mod auth;
pub mod session;
pub use auth::auth_layer;
pub use session::{csrf_layer, session_layer, SessionHandle, SessionLayerState};
use axum::extract::{Request, State};
use axum::middleware::Next;
use axum::response::Response;
use std::time::Instant;
use crate::state::AppState;
pub async fn server_header(req: Request, next: Next) -> Response {
let mut resp = next.run(req).await;
resp.headers_mut()
.insert("server", crate::SERVER_HEADER.parse().unwrap());
resp
}
pub async fn ui_metrics_layer(State(state): State<AppState>, req: Request, next: Next) -> Response {
let metrics = match state.metrics.clone() {
Some(m) => m,
None => return next.run(req).await,
};
let start = Instant::now();
let method = req.method().clone();
let path = req.uri().path().to_string();
let endpoint_type = classify_ui_endpoint(&path);
let bytes_in = req
.headers()
.get(axum::http::header::CONTENT_LENGTH)
.and_then(|v| v.to_str().ok())
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(0);
let response = next.run(req).await;
let latency_ms = start.elapsed().as_secs_f64() * 1000.0;
let status = response.status().as_u16();
let bytes_out = response
.headers()
.get(axum::http::header::CONTENT_LENGTH)
.and_then(|v| v.to_str().ok())
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(0);
let error_code = if status >= 400 { Some("UIError") } else { None };
metrics.record_request(
method.as_str(),
endpoint_type,
status,
latency_ms,
bytes_in,
bytes_out,
error_code,
);
response
}
fn classify_ui_endpoint(path: &str) -> &'static str {
if path.contains("/upload") {
"ui_upload"
} else if path.starts_with("/ui/buckets/") {
"ui_bucket"
} else if path.starts_with("/ui/iam") {
"ui_iam"
} else if path.starts_with("/ui/sites") {
"ui_sites"
} else if path.starts_with("/ui/connections") {
"ui_connections"
} else if path.starts_with("/ui/metrics") {
"ui_metrics"
} else if path.starts_with("/ui/system") {
"ui_system"
} else if path.starts_with("/ui/website-domains") {
"ui_website_domains"
} else if path.starts_with("/ui/replication") {
"ui_replication"
} else if path.starts_with("/login") || path.starts_with("/logout") {
"ui_auth"
} else {
"ui_other"
}
}

View File

@@ -1,228 +0,0 @@
use std::sync::Arc;
use axum::extract::{Request, State};
use axum::http::{header, HeaderValue, StatusCode};
use axum::middleware::Next;
use axum::response::{IntoResponse, Response};
use cookie::{Cookie, SameSite};
use parking_lot::Mutex;
use crate::session::{
csrf_tokens_match, SessionData, SessionStore, CSRF_FIELD_NAME, CSRF_HEADER_NAME,
SESSION_COOKIE_NAME,
};
#[derive(Clone)]
pub struct SessionLayerState {
pub store: Arc<SessionStore>,
pub secure: bool,
}
#[derive(Clone)]
pub struct SessionHandle {
pub id: String,
inner: Arc<Mutex<SessionData>>,
dirty: Arc<Mutex<bool>>,
}
impl SessionHandle {
pub fn new(id: String, data: SessionData) -> Self {
Self {
id,
inner: Arc::new(Mutex::new(data)),
dirty: Arc::new(Mutex::new(false)),
}
}
pub fn read<R>(&self, f: impl FnOnce(&SessionData) -> R) -> R {
let guard = self.inner.lock();
f(&guard)
}
pub fn write<R>(&self, f: impl FnOnce(&mut SessionData) -> R) -> R {
let mut guard = self.inner.lock();
let out = f(&mut guard);
*self.dirty.lock() = true;
out
}
pub fn snapshot(&self) -> SessionData {
self.inner.lock().clone()
}
pub fn is_dirty(&self) -> bool {
*self.dirty.lock()
}
}
pub async fn session_layer(
State(state): State<SessionLayerState>,
mut req: Request,
next: Next,
) -> Response {
let cookie_id = extract_session_cookie(&req);
let (session_id, session_data, is_new) =
match cookie_id.and_then(|id| state.store.get(&id).map(|data| (id.clone(), data))) {
Some((id, data)) => (id, data, false),
None => {
let (id, data) = state.store.create();
(id, data, true)
}
};
let handle = SessionHandle::new(session_id.clone(), session_data);
req.extensions_mut().insert(handle.clone());
let mut resp = next.run(req).await;
if handle.is_dirty() {
state.store.save(&handle.id, handle.snapshot());
}
if is_new {
let cookie = build_session_cookie(&session_id, state.secure);
if let Ok(value) = HeaderValue::from_str(&cookie.to_string()) {
resp.headers_mut().append(header::SET_COOKIE, value);
}
}
resp
}
pub async fn csrf_layer(req: Request, next: Next) -> Response {
const CSRF_HEADER_ALIAS: &str = "x-csrftoken";
let method = req.method().clone();
let needs_check = matches!(
method,
axum::http::Method::POST
| axum::http::Method::PUT
| axum::http::Method::PATCH
| axum::http::Method::DELETE
);
if !needs_check {
return next.run(req).await;
}
let is_ui = req.uri().path().starts_with("/ui/")
|| req.uri().path() == "/ui"
|| req.uri().path() == "/login"
|| req.uri().path() == "/logout";
if !is_ui {
return next.run(req).await;
}
let handle = match req.extensions().get::<SessionHandle>() {
Some(h) => h.clone(),
None => return (StatusCode::FORBIDDEN, "Missing session").into_response(),
};
let expected = handle.read(|s| s.csrf_token.clone());
let header_token = req
.headers()
.get(CSRF_HEADER_NAME)
.or_else(|| req.headers().get(CSRF_HEADER_ALIAS))
.and_then(|v| v.to_str().ok())
.map(|s| s.to_string());
if let Some(token) = header_token.as_deref() {
if csrf_tokens_match(&expected, token) {
return next.run(req).await;
}
}
let content_type = req
.headers()
.get(header::CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.to_string();
let (parts, body) = req.into_parts();
let bytes = match axum::body::to_bytes(body, usize::MAX).await {
Ok(b) => b,
Err(_) => return (StatusCode::BAD_REQUEST, "Body read failed").into_response(),
};
let form_token = if content_type.starts_with("application/x-www-form-urlencoded") {
extract_form_token(&bytes)
} else if content_type.starts_with("multipart/form-data") {
extract_multipart_token(&content_type, &bytes)
} else {
None
};
if let Some(token) = form_token {
if csrf_tokens_match(&expected, &token) {
let req = Request::from_parts(parts, axum::body::Body::from(bytes));
return next.run(req).await;
}
}
tracing::warn!(
path = %parts.uri.path(),
content_type = %content_type,
expected_len = expected.len(),
header_present = header_token.is_some(),
"CSRF token mismatch"
);
(StatusCode::FORBIDDEN, "Invalid CSRF token").into_response()
}
fn extract_multipart_token(content_type: &str, body: &[u8]) -> Option<String> {
let boundary = multer::parse_boundary(content_type).ok()?;
let prefix = format!("--{}", boundary);
let text = std::str::from_utf8(body).ok()?;
let needle = "name=\"csrf_token\"";
let idx = text.find(needle)?;
let after = &text[idx + needle.len()..];
let body_start = after.find("\r\n\r\n")? + 4;
let tail = &after[body_start..];
let end = tail
.find(&format!("\r\n--{}", prefix.trim_start_matches("--")))
.or_else(|| tail.find("\r\n--"))
.unwrap_or(tail.len());
Some(tail[..end].trim().to_string())
}
fn extract_session_cookie(req: &Request) -> Option<String> {
let raw = req.headers().get(header::COOKIE)?.to_str().ok()?;
for pair in raw.split(';') {
if let Ok(cookie) = Cookie::parse(pair.trim().to_string()) {
if cookie.name() == SESSION_COOKIE_NAME {
return Some(cookie.value().to_string());
}
}
}
None
}
fn build_session_cookie(id: &str, secure: bool) -> Cookie<'static> {
let mut cookie = Cookie::new(SESSION_COOKIE_NAME, id.to_string());
cookie.set_http_only(true);
cookie.set_same_site(SameSite::Lax);
cookie.set_secure(secure);
cookie.set_path("/");
cookie
}
fn extract_form_token(body: &[u8]) -> Option<String> {
let text = std::str::from_utf8(body).ok()?;
let prefix = format!("{}=", CSRF_FIELD_NAME);
for pair in text.split('&') {
if let Some(rest) = pair.strip_prefix(&prefix) {
return urldecode(rest);
}
}
None
}
fn urldecode(s: &str) -> Option<String> {
percent_encoding::percent_decode_str(&s.replace('+', " "))
.decode_utf8()
.ok()
.map(|c| c.into_owned())
}

View File

@@ -1,105 +0,0 @@
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LoggingConfiguration {
pub target_bucket: String,
#[serde(default)]
pub target_prefix: String,
#[serde(default = "default_enabled")]
pub enabled: bool,
}
fn default_enabled() -> bool {
true
}
#[derive(Serialize, Deserialize)]
struct StoredLoggingFile {
#[serde(rename = "LoggingEnabled")]
logging_enabled: Option<StoredLoggingEnabled>,
}
#[derive(Serialize, Deserialize)]
struct StoredLoggingEnabled {
#[serde(rename = "TargetBucket")]
target_bucket: String,
#[serde(rename = "TargetPrefix", default)]
target_prefix: String,
}
pub struct AccessLoggingService {
storage_root: PathBuf,
cache: RwLock<HashMap<String, Option<LoggingConfiguration>>>,
}
impl AccessLoggingService {
pub fn new(storage_root: &Path) -> Self {
Self {
storage_root: storage_root.to_path_buf(),
cache: RwLock::new(HashMap::new()),
}
}
fn config_path(&self, bucket: &str) -> PathBuf {
self.storage_root
.join(".myfsio.sys")
.join("buckets")
.join(bucket)
.join("logging.json")
}
pub fn get(&self, bucket: &str) -> Option<LoggingConfiguration> {
if let Some(cached) = self.cache.read().get(bucket).cloned() {
return cached;
}
let path = self.config_path(bucket);
let config = if path.exists() {
std::fs::read_to_string(&path)
.ok()
.and_then(|s| serde_json::from_str::<StoredLoggingFile>(&s).ok())
.and_then(|f| f.logging_enabled)
.map(|e| LoggingConfiguration {
target_bucket: e.target_bucket,
target_prefix: e.target_prefix,
enabled: true,
})
} else {
None
};
self.cache
.write()
.insert(bucket.to_string(), config.clone());
config
}
pub fn set(&self, bucket: &str, config: LoggingConfiguration) -> std::io::Result<()> {
let path = self.config_path(bucket);
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)?;
}
let stored = StoredLoggingFile {
logging_enabled: Some(StoredLoggingEnabled {
target_bucket: config.target_bucket.clone(),
target_prefix: config.target_prefix.clone(),
}),
};
let json = serde_json::to_string_pretty(&stored)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
std::fs::write(&path, json)?;
self.cache.write().insert(bucket.to_string(), Some(config));
Ok(())
}
pub fn delete(&self, bucket: &str) {
let path = self.config_path(bucket);
if path.exists() {
let _ = std::fs::remove_file(&path);
}
self.cache.write().insert(bucket.to_string(), None);
}
}

View File

@@ -1,276 +0,0 @@
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::{HashMap, HashSet};
pub const ACL_METADATA_KEY: &str = "__acl__";
pub const GRANTEE_ALL_USERS: &str = "*";
pub const GRANTEE_AUTHENTICATED_USERS: &str = "authenticated";
const ACL_PERMISSION_FULL_CONTROL: &str = "FULL_CONTROL";
const ACL_PERMISSION_WRITE: &str = "WRITE";
const ACL_PERMISSION_WRITE_ACP: &str = "WRITE_ACP";
const ACL_PERMISSION_READ: &str = "READ";
const ACL_PERMISSION_READ_ACP: &str = "READ_ACP";
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct AclGrant {
pub grantee: String,
pub permission: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Acl {
pub owner: String,
#[serde(default)]
pub grants: Vec<AclGrant>,
}
impl Acl {
pub fn allowed_actions(
&self,
principal_id: Option<&str>,
is_authenticated: bool,
) -> HashSet<&'static str> {
let mut actions = HashSet::new();
if let Some(principal_id) = principal_id {
if principal_id == self.owner {
actions.extend(permission_to_actions(ACL_PERMISSION_FULL_CONTROL));
}
}
for grant in &self.grants {
if grant.grantee == GRANTEE_ALL_USERS {
actions.extend(permission_to_actions(&grant.permission));
} else if grant.grantee == GRANTEE_AUTHENTICATED_USERS && is_authenticated {
actions.extend(permission_to_actions(&grant.permission));
} else if let Some(principal_id) = principal_id {
if grant.grantee == principal_id {
actions.extend(permission_to_actions(&grant.permission));
}
}
}
actions
}
}
pub fn create_canned_acl(canned_acl: &str, owner: &str) -> Acl {
let owner_grant = AclGrant {
grantee: owner.to_string(),
permission: ACL_PERMISSION_FULL_CONTROL.to_string(),
};
match canned_acl {
"public-read" => Acl {
owner: owner.to_string(),
grants: vec![
owner_grant,
AclGrant {
grantee: GRANTEE_ALL_USERS.to_string(),
permission: ACL_PERMISSION_READ.to_string(),
},
],
},
"public-read-write" => Acl {
owner: owner.to_string(),
grants: vec![
owner_grant,
AclGrant {
grantee: GRANTEE_ALL_USERS.to_string(),
permission: ACL_PERMISSION_READ.to_string(),
},
AclGrant {
grantee: GRANTEE_ALL_USERS.to_string(),
permission: ACL_PERMISSION_WRITE.to_string(),
},
],
},
"authenticated-read" => Acl {
owner: owner.to_string(),
grants: vec![
owner_grant,
AclGrant {
grantee: GRANTEE_AUTHENTICATED_USERS.to_string(),
permission: ACL_PERMISSION_READ.to_string(),
},
],
},
"bucket-owner-read" | "bucket-owner-full-control" | "private" | _ => Acl {
owner: owner.to_string(),
grants: vec![owner_grant],
},
}
}
pub fn acl_to_xml(acl: &Acl) -> String {
let mut xml = format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Owner><ID>{}</ID><DisplayName>{}</DisplayName></Owner>\
<AccessControlList>",
xml_escape(&acl.owner),
xml_escape(&acl.owner),
);
for grant in &acl.grants {
xml.push_str("<Grant>");
match grant.grantee.as_str() {
GRANTEE_ALL_USERS => {
xml.push_str(
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\">\
<URI>http://acs.amazonaws.com/groups/global/AllUsers</URI>\
</Grantee>",
);
}
GRANTEE_AUTHENTICATED_USERS => {
xml.push_str(
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\">\
<URI>http://acs.amazonaws.com/groups/global/AuthenticatedUsers</URI>\
</Grantee>",
);
}
other => {
xml.push_str(&format!(
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
<ID>{}</ID><DisplayName>{}</DisplayName>\
</Grantee>",
xml_escape(other),
xml_escape(other),
));
}
}
xml.push_str(&format!(
"<Permission>{}</Permission></Grant>",
xml_escape(&grant.permission)
));
}
xml.push_str("</AccessControlList></AccessControlPolicy>");
xml
}
pub fn acl_from_bucket_config(value: &Value) -> Option<Acl> {
match value {
Value::String(raw) => acl_from_xml(raw).or_else(|| serde_json::from_str(raw).ok()),
Value::Object(_) => serde_json::from_value(value.clone()).ok(),
_ => None,
}
}
pub fn acl_from_object_metadata(metadata: &HashMap<String, String>) -> Option<Acl> {
metadata
.get(ACL_METADATA_KEY)
.and_then(|raw| serde_json::from_str::<Acl>(raw).ok())
}
pub fn store_object_acl(metadata: &mut HashMap<String, String>, acl: &Acl) {
if let Ok(serialized) = serde_json::to_string(acl) {
metadata.insert(ACL_METADATA_KEY.to_string(), serialized);
}
}
fn acl_from_xml(xml: &str) -> Option<Acl> {
let doc = roxmltree::Document::parse(xml).ok()?;
let owner = doc
.descendants()
.find(|node| node.is_element() && node.tag_name().name() == "Owner")
.and_then(|node| {
node.children()
.find(|child| child.is_element() && child.tag_name().name() == "ID")
.and_then(|child| child.text())
})
.unwrap_or("myfsio")
.trim()
.to_string();
let mut grants = Vec::new();
for grant in doc
.descendants()
.filter(|node| node.is_element() && node.tag_name().name() == "Grant")
{
let permission = grant
.children()
.find(|child| child.is_element() && child.tag_name().name() == "Permission")
.and_then(|child| child.text())
.unwrap_or_default()
.trim()
.to_string();
if permission.is_empty() {
continue;
}
let grantee_node = grant
.children()
.find(|child| child.is_element() && child.tag_name().name() == "Grantee");
let grantee = grantee_node
.and_then(|node| {
let uri = node
.children()
.find(|child| child.is_element() && child.tag_name().name() == "URI")
.and_then(|child| child.text())
.map(|text| text.trim().to_string());
match uri.as_deref() {
Some("http://acs.amazonaws.com/groups/global/AllUsers") => {
Some(GRANTEE_ALL_USERS.to_string())
}
Some("http://acs.amazonaws.com/groups/global/AuthenticatedUsers") => {
Some(GRANTEE_AUTHENTICATED_USERS.to_string())
}
_ => node
.children()
.find(|child| child.is_element() && child.tag_name().name() == "ID")
.and_then(|child| child.text())
.map(|text| text.trim().to_string()),
}
})
.unwrap_or_default();
if grantee.is_empty() {
continue;
}
grants.push(AclGrant {
grantee,
permission,
});
}
Some(Acl { owner, grants })
}
fn permission_to_actions(permission: &str) -> &'static [&'static str] {
match permission {
ACL_PERMISSION_FULL_CONTROL => &["read", "write", "delete", "list", "share"],
ACL_PERMISSION_WRITE => &["write", "delete"],
ACL_PERMISSION_WRITE_ACP => &["share"],
ACL_PERMISSION_READ => &["read", "list"],
ACL_PERMISSION_READ_ACP => &["share"],
_ => &[],
}
}
fn xml_escape(s: &str) -> String {
s.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
.replace('"', "&quot;")
.replace('\'', "&apos;")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn canned_acl_grants_public_read() {
let acl = create_canned_acl("public-read", "owner");
let actions = acl.allowed_actions(None, false);
assert!(actions.contains("read"));
assert!(actions.contains("list"));
assert!(!actions.contains("write"));
}
#[test]
fn xml_round_trip_preserves_grants() {
let acl = create_canned_acl("authenticated-read", "owner");
let parsed = acl_from_bucket_config(&Value::String(acl_to_xml(&acl))).unwrap();
assert_eq!(parsed.owner, "owner");
assert_eq!(parsed.grants.len(), 2);
assert!(parsed
.grants
.iter()
.any(|grant| grant.grantee == GRANTEE_AUTHENTICATED_USERS));
}
}

View File

@@ -1,286 +0,0 @@
use serde_json::{json, Value};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
pub struct GcConfig {
pub interval_hours: f64,
pub temp_file_max_age_hours: f64,
pub multipart_max_age_days: u64,
pub lock_file_max_age_hours: f64,
pub dry_run: bool,
}
impl Default for GcConfig {
fn default() -> Self {
Self {
interval_hours: 6.0,
temp_file_max_age_hours: 24.0,
multipart_max_age_days: 7,
lock_file_max_age_hours: 1.0,
dry_run: false,
}
}
}
pub struct GcService {
storage_root: PathBuf,
config: GcConfig,
running: Arc<RwLock<bool>>,
started_at: Arc<RwLock<Option<Instant>>>,
history: Arc<RwLock<Vec<Value>>>,
history_path: PathBuf,
}
impl GcService {
pub fn new(storage_root: PathBuf, config: GcConfig) -> Self {
let history_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("gc_history.json");
let history = if history_path.exists() {
std::fs::read_to_string(&history_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
.unwrap_or_default()
} else {
Vec::new()
};
Self {
storage_root,
config,
running: Arc::new(RwLock::new(false)),
started_at: Arc::new(RwLock::new(None)),
history: Arc::new(RwLock::new(history)),
history_path,
}
}
pub async fn status(&self) -> Value {
let running = *self.running.read().await;
let scan_elapsed_seconds = self
.started_at
.read()
.await
.as_ref()
.map(|started| started.elapsed().as_secs_f64());
json!({
"enabled": true,
"running": running,
"scanning": running,
"scan_elapsed_seconds": scan_elapsed_seconds,
"interval_hours": self.config.interval_hours,
"temp_file_max_age_hours": self.config.temp_file_max_age_hours,
"multipart_max_age_days": self.config.multipart_max_age_days,
"lock_file_max_age_hours": self.config.lock_file_max_age_hours,
"dry_run": self.config.dry_run,
})
}
pub async fn history(&self) -> Value {
let history = self.history.read().await;
let mut executions: Vec<Value> = history.iter().cloned().collect();
executions.reverse();
json!({ "executions": executions })
}
pub async fn run_now(&self, dry_run: bool) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("GC already running".to_string());
}
*running = true;
}
*self.started_at.write().await = Some(Instant::now());
let start = Instant::now();
let result = self.execute_gc(dry_run || self.config.dry_run).await;
let elapsed = start.elapsed().as_secs_f64();
*self.running.write().await = false;
*self.started_at.write().await = None;
let mut result_json = result.clone();
if let Some(obj) = result_json.as_object_mut() {
obj.insert("execution_time_seconds".to_string(), json!(elapsed));
}
let record = json!({
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
"dry_run": dry_run || self.config.dry_run,
"result": result_json,
});
{
let mut history = self.history.write().await;
history.push(record);
if history.len() > 50 {
let excess = history.len() - 50;
history.drain(..excess);
}
}
self.save_history().await;
Ok(result)
}
async fn execute_gc(&self, dry_run: bool) -> Value {
let mut temp_files_deleted = 0u64;
let mut temp_bytes_freed = 0u64;
let mut multipart_uploads_deleted = 0u64;
let mut lock_files_deleted = 0u64;
let mut empty_dirs_removed = 0u64;
let mut errors: Vec<String> = Vec::new();
let now = std::time::SystemTime::now();
let temp_max_age =
std::time::Duration::from_secs_f64(self.config.temp_file_max_age_hours * 3600.0);
let multipart_max_age =
std::time::Duration::from_secs(self.config.multipart_max_age_days * 86400);
let lock_max_age =
std::time::Duration::from_secs_f64(self.config.lock_file_max_age_hours * 3600.0);
let tmp_dir = self.storage_root.join(".myfsio.sys").join("tmp");
if tmp_dir.exists() {
match std::fs::read_dir(&tmp_dir) {
Ok(entries) => {
for entry in entries.flatten() {
if let Ok(metadata) = entry.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > temp_max_age {
let size = metadata.len();
if !dry_run {
if let Err(e) = std::fs::remove_file(entry.path()) {
errors.push(format!(
"Failed to remove temp file: {}",
e
));
continue;
}
}
temp_files_deleted += 1;
temp_bytes_freed += size;
}
}
}
}
}
}
Err(e) => errors.push(format!("Failed to read tmp dir: {}", e)),
}
}
let multipart_dir = self.storage_root.join(".myfsio.sys").join("multipart");
if multipart_dir.exists() {
if let Ok(bucket_dirs) = std::fs::read_dir(&multipart_dir) {
for bucket_entry in bucket_dirs.flatten() {
if let Ok(uploads) = std::fs::read_dir(bucket_entry.path()) {
for upload in uploads.flatten() {
if let Ok(metadata) = upload.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > multipart_max_age {
if !dry_run {
let _ = std::fs::remove_dir_all(upload.path());
}
multipart_uploads_deleted += 1;
}
}
}
}
}
}
}
}
}
let buckets_dir = self.storage_root.join(".myfsio.sys").join("buckets");
if buckets_dir.exists() {
if let Ok(bucket_dirs) = std::fs::read_dir(&buckets_dir) {
for bucket_entry in bucket_dirs.flatten() {
let locks_dir = bucket_entry.path().join("locks");
if locks_dir.exists() {
if let Ok(locks) = std::fs::read_dir(&locks_dir) {
for lock in locks.flatten() {
if let Ok(metadata) = lock.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > lock_max_age {
if !dry_run {
let _ = std::fs::remove_file(lock.path());
}
lock_files_deleted += 1;
}
}
}
}
}
}
}
}
}
}
if !dry_run {
for dir in [&tmp_dir, &multipart_dir] {
if dir.exists() {
if let Ok(entries) = std::fs::read_dir(dir) {
for entry in entries.flatten() {
if entry.path().is_dir() {
if let Ok(mut contents) = std::fs::read_dir(entry.path()) {
if contents.next().is_none() {
let _ = std::fs::remove_dir(entry.path());
empty_dirs_removed += 1;
}
}
}
}
}
}
}
}
json!({
"temp_files_deleted": temp_files_deleted,
"temp_bytes_freed": temp_bytes_freed,
"multipart_uploads_deleted": multipart_uploads_deleted,
"lock_files_deleted": lock_files_deleted,
"empty_dirs_removed": empty_dirs_removed,
"errors": errors,
})
}
async fn save_history(&self) {
let history = self.history.read().await;
let data = json!({ "executions": *history });
if let Some(parent) = self.history_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&self.history_path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("GC cycle starting");
match self.run_now(false).await {
Ok(result) => tracing::info!("GC cycle complete: {:?}", result),
Err(e) => tracing::warn!("GC cycle failed: {}", e),
}
}
})
}
}

View File

@@ -1,732 +0,0 @@
use myfsio_common::constants::{
BUCKET_META_DIR, BUCKET_VERSIONS_DIR, INDEX_FILE, SYSTEM_BUCKETS_DIR, SYSTEM_ROOT,
};
use myfsio_storage::fs_backend::FsStorageBackend;
use serde_json::{json, Map, Value};
use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
const MAX_ISSUES: usize = 500;
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
pub struct IntegrityConfig {
pub interval_hours: f64,
pub batch_size: usize,
pub auto_heal: bool,
pub dry_run: bool,
}
impl Default for IntegrityConfig {
fn default() -> Self {
Self {
interval_hours: 24.0,
batch_size: 10_000,
auto_heal: false,
dry_run: false,
}
}
}
pub struct IntegrityService {
#[allow(dead_code)]
storage: Arc<FsStorageBackend>,
storage_root: PathBuf,
config: IntegrityConfig,
running: Arc<RwLock<bool>>,
started_at: Arc<RwLock<Option<Instant>>>,
history: Arc<RwLock<Vec<Value>>>,
history_path: PathBuf,
}
#[derive(Default)]
struct ScanState {
objects_scanned: u64,
buckets_scanned: u64,
corrupted_objects: u64,
orphaned_objects: u64,
phantom_metadata: u64,
stale_versions: u64,
etag_cache_inconsistencies: u64,
issues: Vec<Value>,
errors: Vec<String>,
}
impl ScanState {
fn batch_exhausted(&self, batch_size: usize) -> bool {
self.objects_scanned >= batch_size as u64
}
fn push_issue(&mut self, issue_type: &str, bucket: &str, key: &str, detail: String) {
if self.issues.len() < MAX_ISSUES {
self.issues.push(json!({
"issue_type": issue_type,
"bucket": bucket,
"key": key,
"detail": detail,
}));
}
}
fn into_json(self, elapsed: f64) -> Value {
json!({
"objects_scanned": self.objects_scanned,
"buckets_scanned": self.buckets_scanned,
"corrupted_objects": self.corrupted_objects,
"orphaned_objects": self.orphaned_objects,
"phantom_metadata": self.phantom_metadata,
"stale_versions": self.stale_versions,
"etag_cache_inconsistencies": self.etag_cache_inconsistencies,
"issues_healed": 0,
"issues": self.issues,
"errors": self.errors,
"execution_time_seconds": elapsed,
})
}
}
impl IntegrityService {
pub fn new(
storage: Arc<FsStorageBackend>,
storage_root: &Path,
config: IntegrityConfig,
) -> Self {
let history_path = storage_root
.join(SYSTEM_ROOT)
.join("config")
.join("integrity_history.json");
let history = if history_path.exists() {
std::fs::read_to_string(&history_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
.unwrap_or_default()
} else {
Vec::new()
};
Self {
storage,
storage_root: storage_root.to_path_buf(),
config,
running: Arc::new(RwLock::new(false)),
started_at: Arc::new(RwLock::new(None)),
history: Arc::new(RwLock::new(history)),
history_path,
}
}
pub async fn status(&self) -> Value {
let running = *self.running.read().await;
let scan_elapsed_seconds = self
.started_at
.read()
.await
.as_ref()
.map(|started| started.elapsed().as_secs_f64());
json!({
"enabled": true,
"running": running,
"scanning": running,
"scan_elapsed_seconds": scan_elapsed_seconds,
"interval_hours": self.config.interval_hours,
"batch_size": self.config.batch_size,
"auto_heal": self.config.auto_heal,
"dry_run": self.config.dry_run,
})
}
pub async fn history(&self) -> Value {
let history = self.history.read().await;
let mut executions: Vec<Value> = history.iter().cloned().collect();
executions.reverse();
json!({ "executions": executions })
}
pub async fn run_now(&self, dry_run: bool, auto_heal: bool) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("Integrity check already running".to_string());
}
*running = true;
}
*self.started_at.write().await = Some(Instant::now());
let start = Instant::now();
let storage_root = self.storage_root.clone();
let batch_size = self.config.batch_size;
let result =
tokio::task::spawn_blocking(move || scan_all_buckets(&storage_root, batch_size))
.await
.unwrap_or_else(|e| {
let mut st = ScanState::default();
st.errors.push(format!("scan task failed: {}", e));
st
});
let elapsed = start.elapsed().as_secs_f64();
*self.running.write().await = false;
*self.started_at.write().await = None;
let result_json = result.into_json(elapsed);
let record = json!({
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
"dry_run": dry_run,
"auto_heal": auto_heal,
"result": result_json.clone(),
});
{
let mut history = self.history.write().await;
history.push(record);
if history.len() > 50 {
let excess = history.len() - 50;
history.drain(..excess);
}
}
self.save_history().await;
Ok(result_json)
}
async fn save_history(&self) {
let history = self.history.read().await;
let data = json!({ "executions": *history });
if let Some(parent) = self.history_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&self.history_path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("Integrity check starting");
match self.run_now(false, false).await {
Ok(result) => tracing::info!("Integrity check complete: {:?}", result),
Err(e) => tracing::warn!("Integrity check failed: {}", e),
}
}
})
}
}
fn scan_all_buckets(storage_root: &Path, batch_size: usize) -> ScanState {
let mut state = ScanState::default();
let buckets = match list_bucket_names(storage_root) {
Ok(b) => b,
Err(e) => {
state.errors.push(format!("list buckets: {}", e));
return state;
}
};
for bucket in &buckets {
if state.batch_exhausted(batch_size) {
break;
}
state.buckets_scanned += 1;
let bucket_path = storage_root.join(bucket);
let meta_root = storage_root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join(BUCKET_META_DIR);
let index_entries = collect_index_entries(&meta_root);
check_corrupted(&mut state, bucket, &bucket_path, &index_entries, batch_size);
check_phantom(&mut state, bucket, &bucket_path, &index_entries, batch_size);
check_orphaned(&mut state, bucket, &bucket_path, &index_entries, batch_size);
check_stale_versions(&mut state, storage_root, bucket, batch_size);
check_etag_cache(&mut state, storage_root, bucket, &index_entries, batch_size);
}
state
}
fn list_bucket_names(storage_root: &Path) -> std::io::Result<Vec<String>> {
let mut names = Vec::new();
if !storage_root.exists() {
return Ok(names);
}
for entry in std::fs::read_dir(storage_root)? {
let entry = entry?;
let name = entry.file_name().to_string_lossy().to_string();
if name == SYSTEM_ROOT {
continue;
}
if entry.file_type().map(|t| t.is_dir()).unwrap_or(false) {
names.push(name);
}
}
Ok(names)
}
#[allow(dead_code)]
struct IndexEntryInfo {
entry: Value,
index_file: PathBuf,
key_name: String,
}
fn collect_index_entries(meta_root: &Path) -> HashMap<String, IndexEntryInfo> {
let mut out: HashMap<String, IndexEntryInfo> = HashMap::new();
if !meta_root.exists() {
return out;
}
let mut stack: Vec<PathBuf> = vec![meta_root.to_path_buf()];
while let Some(dir) = stack.pop() {
let rd = match std::fs::read_dir(&dir) {
Ok(r) => r,
Err(_) => continue,
};
for entry in rd.flatten() {
let path = entry.path();
let ft = match entry.file_type() {
Ok(t) => t,
Err(_) => continue,
};
if ft.is_dir() {
stack.push(path);
continue;
}
if entry.file_name().to_string_lossy() != INDEX_FILE {
continue;
}
let rel_dir = match path.parent().and_then(|p| p.strip_prefix(meta_root).ok()) {
Some(p) => p.to_path_buf(),
None => continue,
};
let dir_prefix = if rel_dir.as_os_str().is_empty() {
String::new()
} else {
rel_dir
.components()
.map(|c| c.as_os_str().to_string_lossy().to_string())
.collect::<Vec<_>>()
.join("/")
};
let content = match std::fs::read_to_string(&path) {
Ok(c) => c,
Err(_) => continue,
};
let index_data: Map<String, Value> = match serde_json::from_str(&content) {
Ok(Value::Object(m)) => m,
_ => continue,
};
for (key_name, entry_val) in index_data {
let full_key = if dir_prefix.is_empty() {
key_name.clone()
} else {
format!("{}/{}", dir_prefix, key_name)
};
out.insert(
full_key,
IndexEntryInfo {
entry: entry_val,
index_file: path.clone(),
key_name,
},
);
}
}
}
out
}
fn stored_etag(entry: &Value) -> Option<String> {
entry
.get("metadata")
.and_then(|m| m.get("__etag__"))
.and_then(|v| v.as_str())
.map(|s| s.to_string())
}
fn check_corrupted(
state: &mut ScanState,
bucket: &str,
bucket_path: &Path,
entries: &HashMap<String, IndexEntryInfo>,
batch_size: usize,
) {
let mut keys: Vec<&String> = entries.keys().collect();
keys.sort();
for full_key in keys {
if state.batch_exhausted(batch_size) {
return;
}
let info = &entries[full_key];
let object_path = bucket_path.join(full_key);
if !object_path.exists() {
continue;
}
state.objects_scanned += 1;
let Some(stored) = stored_etag(&info.entry) else {
continue;
};
match myfsio_crypto::hashing::md5_file(&object_path) {
Ok(actual) => {
if actual != stored {
state.corrupted_objects += 1;
state.push_issue(
"corrupted_object",
bucket,
full_key,
format!("stored_etag={} actual_etag={}", stored, actual),
);
}
}
Err(e) => state
.errors
.push(format!("hash {}/{}: {}", bucket, full_key, e)),
}
}
}
fn check_phantom(
state: &mut ScanState,
bucket: &str,
bucket_path: &Path,
entries: &HashMap<String, IndexEntryInfo>,
batch_size: usize,
) {
let mut keys: Vec<&String> = entries.keys().collect();
keys.sort();
for full_key in keys {
if state.batch_exhausted(batch_size) {
return;
}
state.objects_scanned += 1;
let object_path = bucket_path.join(full_key);
if !object_path.exists() {
state.phantom_metadata += 1;
state.push_issue(
"phantom_metadata",
bucket,
full_key,
"metadata entry without file on disk".to_string(),
);
}
}
}
fn check_orphaned(
state: &mut ScanState,
bucket: &str,
bucket_path: &Path,
entries: &HashMap<String, IndexEntryInfo>,
batch_size: usize,
) {
let indexed: HashSet<&String> = entries.keys().collect();
let mut stack: Vec<(PathBuf, String)> = vec![(bucket_path.to_path_buf(), String::new())];
while let Some((dir, prefix)) = stack.pop() {
if state.batch_exhausted(batch_size) {
return;
}
let rd = match std::fs::read_dir(&dir) {
Ok(r) => r,
Err(_) => continue,
};
for entry in rd.flatten() {
if state.batch_exhausted(batch_size) {
return;
}
let name = entry.file_name().to_string_lossy().to_string();
let ft = match entry.file_type() {
Ok(t) => t,
Err(_) => continue,
};
if ft.is_dir() {
if prefix.is_empty() && INTERNAL_FOLDERS.contains(&name.as_str()) {
continue;
}
let new_prefix = if prefix.is_empty() {
name
} else {
format!("{}/{}", prefix, name)
};
stack.push((entry.path(), new_prefix));
} else if ft.is_file() {
let full_key = if prefix.is_empty() {
name
} else {
format!("{}/{}", prefix, name)
};
state.objects_scanned += 1;
if !indexed.contains(&full_key) {
state.orphaned_objects += 1;
state.push_issue(
"orphaned_object",
bucket,
&full_key,
"file exists without metadata entry".to_string(),
);
}
}
}
}
}
fn check_stale_versions(
state: &mut ScanState,
storage_root: &Path,
bucket: &str,
batch_size: usize,
) {
let versions_root = storage_root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join(BUCKET_VERSIONS_DIR);
if !versions_root.exists() {
return;
}
let mut stack: Vec<PathBuf> = vec![versions_root.clone()];
while let Some(dir) = stack.pop() {
if state.batch_exhausted(batch_size) {
return;
}
let rd = match std::fs::read_dir(&dir) {
Ok(r) => r,
Err(_) => continue,
};
let mut bin_stems: HashMap<String, PathBuf> = HashMap::new();
let mut json_stems: HashMap<String, PathBuf> = HashMap::new();
let mut subdirs: Vec<PathBuf> = Vec::new();
for entry in rd.flatten() {
let ft = match entry.file_type() {
Ok(t) => t,
Err(_) => continue,
};
let path = entry.path();
if ft.is_dir() {
subdirs.push(path);
continue;
}
let name = entry.file_name().to_string_lossy().to_string();
if let Some(stem) = name.strip_suffix(".bin") {
bin_stems.insert(stem.to_string(), path);
} else if let Some(stem) = name.strip_suffix(".json") {
json_stems.insert(stem.to_string(), path);
}
}
for (stem, path) in &bin_stems {
if state.batch_exhausted(batch_size) {
return;
}
state.objects_scanned += 1;
if !json_stems.contains_key(stem) {
state.stale_versions += 1;
let key = path
.strip_prefix(&versions_root)
.map(|p| p.to_string_lossy().replace('\\', "/"))
.unwrap_or_else(|_| path.display().to_string());
state.push_issue(
"stale_version",
bucket,
&key,
"version data without manifest".to_string(),
);
}
}
for (stem, path) in &json_stems {
if state.batch_exhausted(batch_size) {
return;
}
state.objects_scanned += 1;
if !bin_stems.contains_key(stem) {
state.stale_versions += 1;
let key = path
.strip_prefix(&versions_root)
.map(|p| p.to_string_lossy().replace('\\', "/"))
.unwrap_or_else(|_| path.display().to_string());
state.push_issue(
"stale_version",
bucket,
&key,
"version manifest without data".to_string(),
);
}
}
stack.extend(subdirs);
}
}
fn check_etag_cache(
state: &mut ScanState,
storage_root: &Path,
bucket: &str,
entries: &HashMap<String, IndexEntryInfo>,
batch_size: usize,
) {
let etag_index_path = storage_root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join("etag_index.json");
if !etag_index_path.exists() {
return;
}
let cache: HashMap<String, Value> = match std::fs::read_to_string(&etag_index_path)
.ok()
.and_then(|s| serde_json::from_str(&s).ok())
{
Some(Value::Object(m)) => m.into_iter().collect(),
_ => return,
};
for (full_key, cached_val) in cache {
if state.batch_exhausted(batch_size) {
return;
}
state.objects_scanned += 1;
let Some(cached_etag) = cached_val.as_str() else {
continue;
};
let Some(info) = entries.get(&full_key) else {
continue;
};
let Some(stored) = stored_etag(&info.entry) else {
continue;
};
if cached_etag != stored {
state.etag_cache_inconsistencies += 1;
state.push_issue(
"etag_cache_inconsistency",
bucket,
&full_key,
format!("cached_etag={} index_etag={}", cached_etag, stored),
);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
fn md5_hex(bytes: &[u8]) -> String {
myfsio_crypto::hashing::md5_bytes(bytes)
}
fn write_index(meta_dir: &Path, entries: &[(&str, &str)]) {
fs::create_dir_all(meta_dir).unwrap();
let mut map = Map::new();
for (name, etag) in entries {
map.insert(
name.to_string(),
json!({ "metadata": { "__etag__": etag } }),
);
}
fs::write(
meta_dir.join(INDEX_FILE),
serde_json::to_string(&Value::Object(map)).unwrap(),
)
.unwrap();
}
#[test]
fn scan_detects_each_issue_type() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let bucket = "testbucket";
let bucket_path = root.join(bucket);
let meta_root = root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join(BUCKET_META_DIR);
fs::create_dir_all(&bucket_path).unwrap();
let clean_bytes = b"clean file contents";
let clean_etag = md5_hex(clean_bytes);
fs::write(bucket_path.join("clean.txt"), clean_bytes).unwrap();
let corrupted_bytes = b"actual content";
fs::write(bucket_path.join("corrupted.txt"), corrupted_bytes).unwrap();
fs::write(bucket_path.join("orphan.txt"), b"no metadata").unwrap();
write_index(
&meta_root,
&[
("clean.txt", &clean_etag),
("corrupted.txt", "00000000000000000000000000000000"),
("phantom.txt", "deadbeefdeadbeefdeadbeefdeadbeef"),
],
);
let versions_root = root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join(BUCKET_VERSIONS_DIR)
.join("someobject");
fs::create_dir_all(&versions_root).unwrap();
fs::write(versions_root.join("v1.bin"), b"orphan bin").unwrap();
fs::write(versions_root.join("v2.json"), b"{}").unwrap();
let etag_index = root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join("etag_index.json");
fs::write(
&etag_index,
serde_json::to_string(&json!({ "clean.txt": "stale-cached-etag" })).unwrap(),
)
.unwrap();
let state = scan_all_buckets(root, 10_000);
assert_eq!(state.corrupted_objects, 1, "corrupted");
assert_eq!(state.phantom_metadata, 1, "phantom");
assert_eq!(state.orphaned_objects, 1, "orphaned");
assert_eq!(state.stale_versions, 2, "stale versions");
assert_eq!(state.etag_cache_inconsistencies, 1, "etag cache");
assert_eq!(state.buckets_scanned, 1);
assert!(
state.errors.is_empty(),
"unexpected errors: {:?}",
state.errors
);
}
#[test]
fn skips_system_root_as_bucket() {
let tmp = tempfile::tempdir().unwrap();
fs::create_dir_all(tmp.path().join(SYSTEM_ROOT).join("config")).unwrap();
let state = scan_all_buckets(tmp.path(), 100);
assert_eq!(state.buckets_scanned, 0);
}
}

View File

@@ -1,637 +0,0 @@
use chrono::{DateTime, Duration, Utc};
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::traits::StorageEngine;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::VecDeque;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tokio::sync::RwLock;
pub struct LifecycleConfig {
pub interval_seconds: u64,
pub max_history_per_bucket: usize,
}
impl Default for LifecycleConfig {
fn default() -> Self {
Self {
interval_seconds: 3600,
max_history_per_bucket: 50,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LifecycleExecutionRecord {
pub timestamp: f64,
pub bucket_name: String,
pub objects_deleted: u64,
pub versions_deleted: u64,
pub uploads_aborted: u64,
#[serde(default)]
pub errors: Vec<String>,
pub execution_time_seconds: f64,
}
#[derive(Debug, Clone, Default)]
struct BucketLifecycleResult {
bucket_name: String,
objects_deleted: u64,
versions_deleted: u64,
uploads_aborted: u64,
errors: Vec<String>,
execution_time_seconds: f64,
}
#[derive(Debug, Clone, Default)]
struct ParsedLifecycleRule {
status: String,
prefix: String,
expiration_days: Option<u64>,
expiration_date: Option<DateTime<Utc>>,
noncurrent_days: Option<u64>,
abort_incomplete_multipart_days: Option<u64>,
}
pub struct LifecycleService {
storage: Arc<FsStorageBackend>,
storage_root: PathBuf,
config: LifecycleConfig,
running: Arc<RwLock<bool>>,
}
impl LifecycleService {
pub fn new(
storage: Arc<FsStorageBackend>,
storage_root: impl Into<PathBuf>,
config: LifecycleConfig,
) -> Self {
Self {
storage,
storage_root: storage_root.into(),
config,
running: Arc::new(RwLock::new(false)),
}
}
pub async fn run_cycle(&self) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("Lifecycle already running".to_string());
}
*running = true;
}
let result = self.evaluate_rules().await;
*self.running.write().await = false;
Ok(result)
}
async fn evaluate_rules(&self) -> Value {
let buckets = match self.storage.list_buckets().await {
Ok(buckets) => buckets,
Err(err) => return json!({ "error": err.to_string() }),
};
let mut bucket_results = Vec::new();
let mut total_objects_deleted = 0u64;
let mut total_versions_deleted = 0u64;
let mut total_uploads_aborted = 0u64;
let mut errors = Vec::new();
for bucket in &buckets {
let started_at = std::time::Instant::now();
let mut result = BucketLifecycleResult {
bucket_name: bucket.name.clone(),
..Default::default()
};
let config = match self.storage.get_bucket_config(&bucket.name).await {
Ok(config) => config,
Err(err) => {
result.errors.push(err.to_string());
result.execution_time_seconds = started_at.elapsed().as_secs_f64();
self.append_history(&result);
errors.extend(result.errors.clone());
bucket_results.push(result);
continue;
}
};
let Some(lifecycle) = config.lifecycle.as_ref() else {
continue;
};
let rules = parse_lifecycle_rules(lifecycle);
if rules.is_empty() {
continue;
}
for rule in &rules {
if rule.status != "Enabled" {
continue;
}
if let Some(err) = self
.apply_expiration_rule(&bucket.name, rule, &mut result)
.await
{
result.errors.push(err);
}
if let Some(err) = self
.apply_noncurrent_expiration_rule(&bucket.name, rule, &mut result)
.await
{
result.errors.push(err);
}
if let Some(err) = self
.apply_abort_incomplete_multipart_rule(&bucket.name, rule, &mut result)
.await
{
result.errors.push(err);
}
}
result.execution_time_seconds = started_at.elapsed().as_secs_f64();
if result.objects_deleted > 0
|| result.versions_deleted > 0
|| result.uploads_aborted > 0
|| !result.errors.is_empty()
{
total_objects_deleted += result.objects_deleted;
total_versions_deleted += result.versions_deleted;
total_uploads_aborted += result.uploads_aborted;
errors.extend(result.errors.clone());
self.append_history(&result);
bucket_results.push(result);
}
}
json!({
"objects_deleted": total_objects_deleted,
"versions_deleted": total_versions_deleted,
"multipart_aborted": total_uploads_aborted,
"buckets_evaluated": buckets.len(),
"results": bucket_results.iter().map(result_to_json).collect::<Vec<_>>(),
"errors": errors,
})
}
async fn apply_expiration_rule(
&self,
bucket: &str,
rule: &ParsedLifecycleRule,
result: &mut BucketLifecycleResult,
) -> Option<String> {
let cutoff = if let Some(days) = rule.expiration_days {
Some(Utc::now() - Duration::days(days as i64))
} else {
rule.expiration_date
};
let Some(cutoff) = cutoff else {
return None;
};
let params = myfsio_common::types::ListParams {
max_keys: 10_000,
prefix: if rule.prefix.is_empty() {
None
} else {
Some(rule.prefix.clone())
},
..Default::default()
};
match self.storage.list_objects(bucket, &params).await {
Ok(objects) => {
for object in &objects.objects {
if object.last_modified < cutoff {
if let Err(err) = self.storage.delete_object(bucket, &object.key).await {
result
.errors
.push(format!("{}:{}: {}", bucket, object.key, err));
} else {
result.objects_deleted += 1;
}
}
}
None
}
Err(err) => Some(format!("Failed to list objects for {}: {}", bucket, err)),
}
}
async fn apply_noncurrent_expiration_rule(
&self,
bucket: &str,
rule: &ParsedLifecycleRule,
result: &mut BucketLifecycleResult,
) -> Option<String> {
let Some(days) = rule.noncurrent_days else {
return None;
};
let cutoff = Utc::now() - Duration::days(days as i64);
let versions_root = version_root_for_bucket(&self.storage_root, bucket);
if !versions_root.exists() {
return None;
}
let mut stack = VecDeque::from([versions_root]);
while let Some(current) = stack.pop_front() {
let entries = match std::fs::read_dir(&current) {
Ok(entries) => entries,
Err(err) => return Some(err.to_string()),
};
for entry in entries.flatten() {
let file_type = match entry.file_type() {
Ok(file_type) => file_type,
Err(_) => continue,
};
if file_type.is_dir() {
stack.push_back(entry.path());
continue;
}
if entry.path().extension().and_then(|ext| ext.to_str()) != Some("json") {
continue;
}
let contents = match std::fs::read_to_string(entry.path()) {
Ok(contents) => contents,
Err(_) => continue,
};
let Ok(manifest) = serde_json::from_str::<Value>(&contents) else {
continue;
};
let key = manifest
.get("key")
.and_then(|value| value.as_str())
.unwrap_or_default()
.to_string();
if !rule.prefix.is_empty() && !key.starts_with(&rule.prefix) {
continue;
}
let archived_at = manifest
.get("archived_at")
.and_then(|value| value.as_str())
.and_then(|value| DateTime::parse_from_rfc3339(value).ok())
.map(|value| value.with_timezone(&Utc));
if archived_at.is_none() || archived_at.unwrap() >= cutoff {
continue;
}
let version_id = manifest
.get("version_id")
.and_then(|value| value.as_str())
.unwrap_or_default();
let data_path = entry.path().with_file_name(format!("{}.bin", version_id));
let _ = std::fs::remove_file(&data_path);
let _ = std::fs::remove_file(entry.path());
result.versions_deleted += 1;
}
}
None
}
async fn apply_abort_incomplete_multipart_rule(
&self,
bucket: &str,
rule: &ParsedLifecycleRule,
result: &mut BucketLifecycleResult,
) -> Option<String> {
let Some(days) = rule.abort_incomplete_multipart_days else {
return None;
};
let cutoff = Utc::now() - Duration::days(days as i64);
match self.storage.list_multipart_uploads(bucket).await {
Ok(uploads) => {
for upload in &uploads {
if upload.initiated < cutoff {
if let Err(err) = self
.storage
.abort_multipart(bucket, &upload.upload_id)
.await
{
result
.errors
.push(format!("abort {}: {}", upload.upload_id, err));
} else {
result.uploads_aborted += 1;
}
}
}
None
}
Err(err) => Some(format!(
"Failed to list multipart uploads for {}: {}",
bucket, err
)),
}
}
fn append_history(&self, result: &BucketLifecycleResult) {
let path = lifecycle_history_path(&self.storage_root, &result.bucket_name);
let mut history = load_history(&path);
history.insert(
0,
LifecycleExecutionRecord {
timestamp: Utc::now().timestamp_millis() as f64 / 1000.0,
bucket_name: result.bucket_name.clone(),
objects_deleted: result.objects_deleted,
versions_deleted: result.versions_deleted,
uploads_aborted: result.uploads_aborted,
errors: result.errors.clone(),
execution_time_seconds: result.execution_time_seconds,
},
);
history.truncate(self.config.max_history_per_bucket);
let payload = json!({
"executions": history,
});
if let Some(parent) = path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&path,
serde_json::to_string_pretty(&payload).unwrap_or_else(|_| "{}".to_string()),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs(self.config.interval_seconds);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("Lifecycle evaluation starting");
match self.run_cycle().await {
Ok(result) => tracing::info!("Lifecycle cycle complete: {:?}", result),
Err(err) => tracing::warn!("Lifecycle cycle failed: {}", err),
}
}
})
}
}
pub fn read_history(storage_root: &Path, bucket_name: &str, limit: usize, offset: usize) -> Value {
let path = lifecycle_history_path(storage_root, bucket_name);
let mut history = load_history(&path);
let total = history.len();
let executions = history
.drain(offset.min(total)..)
.take(limit)
.collect::<Vec<_>>();
json!({
"executions": executions,
"total": total,
"limit": limit,
"offset": offset,
"enabled": true,
})
}
fn load_history(path: &Path) -> Vec<LifecycleExecutionRecord> {
if !path.exists() {
return Vec::new();
}
std::fs::read_to_string(path)
.ok()
.and_then(|contents| serde_json::from_str::<Value>(&contents).ok())
.and_then(|value| value.get("executions").cloned())
.and_then(|value| serde_json::from_value::<Vec<LifecycleExecutionRecord>>(value).ok())
.unwrap_or_default()
}
fn lifecycle_history_path(storage_root: &Path, bucket_name: &str) -> PathBuf {
storage_root
.join(".myfsio.sys")
.join("buckets")
.join(bucket_name)
.join("lifecycle_history.json")
}
fn version_root_for_bucket(storage_root: &Path, bucket_name: &str) -> PathBuf {
storage_root
.join(".myfsio.sys")
.join("buckets")
.join(bucket_name)
.join("versions")
}
fn parse_lifecycle_rules(value: &Value) -> Vec<ParsedLifecycleRule> {
match value {
Value::String(raw) => parse_lifecycle_rules_from_string(raw),
Value::Array(items) => items.iter().filter_map(parse_lifecycle_rule).collect(),
Value::Object(map) => map
.get("Rules")
.and_then(|rules| rules.as_array())
.map(|rules| rules.iter().filter_map(parse_lifecycle_rule).collect())
.unwrap_or_default(),
_ => Vec::new(),
}
}
fn parse_lifecycle_rules_from_string(raw: &str) -> Vec<ParsedLifecycleRule> {
if let Ok(json) = serde_json::from_str::<Value>(raw) {
return parse_lifecycle_rules(&json);
}
let Ok(doc) = roxmltree::Document::parse(raw) else {
return Vec::new();
};
doc.descendants()
.filter(|node| node.is_element() && node.tag_name().name() == "Rule")
.map(|rule| ParsedLifecycleRule {
status: child_text(&rule, "Status").unwrap_or_else(|| "Enabled".to_string()),
prefix: child_text(&rule, "Prefix")
.or_else(|| {
rule.descendants()
.find(|node| {
node.is_element()
&& node.tag_name().name() == "Filter"
&& node.children().any(|child| {
child.is_element() && child.tag_name().name() == "Prefix"
})
})
.and_then(|filter| child_text(&filter, "Prefix"))
})
.unwrap_or_default(),
expiration_days: rule
.descendants()
.find(|node| node.is_element() && node.tag_name().name() == "Expiration")
.and_then(|expiration| child_text(&expiration, "Days"))
.and_then(|value| value.parse::<u64>().ok()),
expiration_date: rule
.descendants()
.find(|node| node.is_element() && node.tag_name().name() == "Expiration")
.and_then(|expiration| child_text(&expiration, "Date"))
.as_deref()
.and_then(parse_datetime),
noncurrent_days: rule
.descendants()
.find(|node| {
node.is_element() && node.tag_name().name() == "NoncurrentVersionExpiration"
})
.and_then(|node| child_text(&node, "NoncurrentDays"))
.and_then(|value| value.parse::<u64>().ok()),
abort_incomplete_multipart_days: rule
.descendants()
.find(|node| {
node.is_element() && node.tag_name().name() == "AbortIncompleteMultipartUpload"
})
.and_then(|node| child_text(&node, "DaysAfterInitiation"))
.and_then(|value| value.parse::<u64>().ok()),
})
.collect()
}
fn parse_lifecycle_rule(value: &Value) -> Option<ParsedLifecycleRule> {
let map = value.as_object()?;
Some(ParsedLifecycleRule {
status: map
.get("Status")
.and_then(|value| value.as_str())
.unwrap_or("Enabled")
.to_string(),
prefix: map
.get("Prefix")
.and_then(|value| value.as_str())
.or_else(|| {
map.get("Filter")
.and_then(|value| value.get("Prefix"))
.and_then(|value| value.as_str())
})
.unwrap_or_default()
.to_string(),
expiration_days: map
.get("Expiration")
.and_then(|value| value.get("Days"))
.and_then(|value| value.as_u64()),
expiration_date: map
.get("Expiration")
.and_then(|value| value.get("Date"))
.and_then(|value| value.as_str())
.and_then(parse_datetime),
noncurrent_days: map
.get("NoncurrentVersionExpiration")
.and_then(|value| value.get("NoncurrentDays"))
.and_then(|value| value.as_u64()),
abort_incomplete_multipart_days: map
.get("AbortIncompleteMultipartUpload")
.and_then(|value| value.get("DaysAfterInitiation"))
.and_then(|value| value.as_u64()),
})
}
fn parse_datetime(value: &str) -> Option<DateTime<Utc>> {
DateTime::parse_from_rfc3339(value)
.ok()
.map(|value| value.with_timezone(&Utc))
}
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
node.children()
.find(|child| child.is_element() && child.tag_name().name() == name)
.and_then(|child| child.text())
.map(|text| text.trim().to_string())
.filter(|text| !text.is_empty())
}
fn result_to_json(result: &BucketLifecycleResult) -> Value {
json!({
"bucket_name": result.bucket_name,
"objects_deleted": result.objects_deleted,
"versions_deleted": result.versions_deleted,
"uploads_aborted": result.uploads_aborted,
"errors": result.errors,
"execution_time_seconds": result.execution_time_seconds,
})
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration;
#[test]
fn parses_rules_from_xml() {
let xml = r#"<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration>
<Rule>
<Status>Enabled</Status>
<Filter><Prefix>logs/</Prefix></Filter>
<Expiration><Days>10</Days></Expiration>
<NoncurrentVersionExpiration><NoncurrentDays>30</NoncurrentDays></NoncurrentVersionExpiration>
<AbortIncompleteMultipartUpload><DaysAfterInitiation>7</DaysAfterInitiation></AbortIncompleteMultipartUpload>
</Rule>
</LifecycleConfiguration>"#;
let rules = parse_lifecycle_rules(&Value::String(xml.to_string()));
assert_eq!(rules.len(), 1);
assert_eq!(rules[0].prefix, "logs/");
assert_eq!(rules[0].expiration_days, Some(10));
assert_eq!(rules[0].noncurrent_days, Some(30));
assert_eq!(rules[0].abort_incomplete_multipart_days, Some(7));
}
#[tokio::test]
async fn run_cycle_writes_history_and_deletes_noncurrent_versions() {
let tmp = tempfile::tempdir().unwrap();
let storage = Arc::new(FsStorageBackend::new(tmp.path().to_path_buf()));
storage.create_bucket("docs").await.unwrap();
storage.set_versioning("docs", true).await.unwrap();
storage
.put_object(
"docs",
"logs/file.txt",
Box::pin(std::io::Cursor::new(b"old".to_vec())),
None,
)
.await
.unwrap();
storage
.put_object(
"docs",
"logs/file.txt",
Box::pin(std::io::Cursor::new(b"new".to_vec())),
None,
)
.await
.unwrap();
let versions_root = version_root_for_bucket(tmp.path(), "docs")
.join("logs")
.join("file.txt");
let manifest = std::fs::read_dir(&versions_root)
.unwrap()
.flatten()
.find(|entry| entry.path().extension().and_then(|ext| ext.to_str()) == Some("json"))
.unwrap()
.path();
let old_manifest = json!({
"version_id": "ver-1",
"key": "logs/file.txt",
"size": 3,
"archived_at": (Utc::now() - Duration::days(45)).to_rfc3339(),
"etag": "etag",
});
std::fs::write(&manifest, serde_json::to_string(&old_manifest).unwrap()).unwrap();
std::fs::write(manifest.with_file_name("ver-1.bin"), b"old").unwrap();
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration>
<Rule>
<Status>Enabled</Status>
<Filter><Prefix>logs/</Prefix></Filter>
<NoncurrentVersionExpiration><NoncurrentDays>30</NoncurrentDays></NoncurrentVersionExpiration>
</Rule>
</LifecycleConfiguration>"#;
let mut config = storage.get_bucket_config("docs").await.unwrap();
config.lifecycle = Some(Value::String(lifecycle_xml.to_string()));
storage.set_bucket_config("docs", &config).await.unwrap();
let service =
LifecycleService::new(storage.clone(), tmp.path(), LifecycleConfig::default());
let result = service.run_cycle().await.unwrap();
assert_eq!(result["versions_deleted"], 1);
let history = read_history(tmp.path(), "docs", 50, 0);
assert_eq!(history["total"], 1);
assert_eq!(history["executions"][0]["versions_deleted"], 1);
}
}

View File

@@ -1,368 +0,0 @@
use chrono::{DateTime, Utc};
use parking_lot::Mutex;
use rand::Rng;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
const MAX_LATENCY_SAMPLES: usize = 5000;
pub struct MetricsConfig {
pub interval_minutes: u64,
pub retention_hours: u64,
}
impl Default for MetricsConfig {
fn default() -> Self {
Self {
interval_minutes: 5,
retention_hours: 24,
}
}
}
#[derive(Debug, Clone)]
struct OperationStats {
count: u64,
success_count: u64,
error_count: u64,
latency_sum_ms: f64,
latency_min_ms: f64,
latency_max_ms: f64,
bytes_in: u64,
bytes_out: u64,
latency_samples: Vec<f64>,
}
impl Default for OperationStats {
fn default() -> Self {
Self {
count: 0,
success_count: 0,
error_count: 0,
latency_sum_ms: 0.0,
latency_min_ms: f64::INFINITY,
latency_max_ms: 0.0,
bytes_in: 0,
bytes_out: 0,
latency_samples: Vec::new(),
}
}
}
impl OperationStats {
fn record(&mut self, latency_ms: f64, success: bool, bytes_in: u64, bytes_out: u64) {
self.count += 1;
if success {
self.success_count += 1;
} else {
self.error_count += 1;
}
self.latency_sum_ms += latency_ms;
if latency_ms < self.latency_min_ms {
self.latency_min_ms = latency_ms;
}
if latency_ms > self.latency_max_ms {
self.latency_max_ms = latency_ms;
}
self.bytes_in += bytes_in;
self.bytes_out += bytes_out;
if self.latency_samples.len() < MAX_LATENCY_SAMPLES {
self.latency_samples.push(latency_ms);
} else {
let mut rng = rand::thread_rng();
let j = rng.gen_range(0..self.count as usize);
if j < MAX_LATENCY_SAMPLES {
self.latency_samples[j] = latency_ms;
}
}
}
fn compute_percentile(sorted: &[f64], p: f64) -> f64 {
if sorted.is_empty() {
return 0.0;
}
let k = (sorted.len() - 1) as f64 * (p / 100.0);
let f = k.floor() as usize;
let c = (f + 1).min(sorted.len() - 1);
let d = k - f as f64;
sorted[f] + d * (sorted[c] - sorted[f])
}
fn to_json(&self) -> Value {
let avg = if self.count > 0 {
self.latency_sum_ms / self.count as f64
} else {
0.0
};
let min = if self.latency_min_ms.is_infinite() {
0.0
} else {
self.latency_min_ms
};
let mut sorted = self.latency_samples.clone();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
json!({
"count": self.count,
"success_count": self.success_count,
"error_count": self.error_count,
"latency_avg_ms": round2(avg),
"latency_min_ms": round2(min),
"latency_max_ms": round2(self.latency_max_ms),
"latency_p50_ms": round2(Self::compute_percentile(&sorted, 50.0)),
"latency_p95_ms": round2(Self::compute_percentile(&sorted, 95.0)),
"latency_p99_ms": round2(Self::compute_percentile(&sorted, 99.0)),
"bytes_in": self.bytes_in,
"bytes_out": self.bytes_out,
})
}
}
fn round2(v: f64) -> f64 {
(v * 100.0).round() / 100.0
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MetricsSnapshot {
pub timestamp: DateTime<Utc>,
pub window_seconds: u64,
pub by_method: HashMap<String, Value>,
pub by_endpoint: HashMap<String, Value>,
pub by_status_class: HashMap<String, u64>,
pub error_codes: HashMap<String, u64>,
pub totals: Value,
}
struct Inner {
by_method: HashMap<String, OperationStats>,
by_endpoint: HashMap<String, OperationStats>,
by_status_class: HashMap<String, u64>,
error_codes: HashMap<String, u64>,
totals: OperationStats,
window_start: f64,
snapshots: Vec<MetricsSnapshot>,
}
pub struct MetricsService {
config: MetricsConfig,
inner: Arc<Mutex<Inner>>,
snapshots_path: PathBuf,
}
impl MetricsService {
pub fn new(storage_root: &Path, config: MetricsConfig) -> Self {
let snapshots_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("operation_metrics.json");
let mut snapshots: Vec<MetricsSnapshot> = if snapshots_path.exists() {
std::fs::read_to_string(&snapshots_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| {
v.get("snapshots").and_then(|s| {
serde_json::from_value::<Vec<MetricsSnapshot>>(s.clone()).ok()
})
})
.unwrap_or_default()
} else {
Vec::new()
};
let cutoff = now_secs() - (config.retention_hours * 3600) as f64;
snapshots.retain(|s| s.timestamp.timestamp() as f64 > cutoff);
Self {
config,
inner: Arc::new(Mutex::new(Inner {
by_method: HashMap::new(),
by_endpoint: HashMap::new(),
by_status_class: HashMap::new(),
error_codes: HashMap::new(),
totals: OperationStats::default(),
window_start: now_secs(),
snapshots,
})),
snapshots_path,
}
}
pub fn record_request(
&self,
method: &str,
endpoint_type: &str,
status_code: u16,
latency_ms: f64,
bytes_in: u64,
bytes_out: u64,
error_code: Option<&str>,
) {
let success = (200..400).contains(&status_code);
let status_class = format!("{}xx", status_code / 100);
let mut inner = self.inner.lock();
inner
.by_method
.entry(method.to_string())
.or_default()
.record(latency_ms, success, bytes_in, bytes_out);
inner
.by_endpoint
.entry(endpoint_type.to_string())
.or_default()
.record(latency_ms, success, bytes_in, bytes_out);
*inner.by_status_class.entry(status_class).or_insert(0) += 1;
if let Some(code) = error_code {
*inner.error_codes.entry(code.to_string()).or_insert(0) += 1;
}
inner
.totals
.record(latency_ms, success, bytes_in, bytes_out);
}
pub fn get_current_stats(&self) -> Value {
let inner = self.inner.lock();
let window_seconds = (now_secs() - inner.window_start).max(0.0) as u64;
let by_method: HashMap<String, Value> = inner
.by_method
.iter()
.map(|(k, v)| (k.clone(), v.to_json()))
.collect();
let by_endpoint: HashMap<String, Value> = inner
.by_endpoint
.iter()
.map(|(k, v)| (k.clone(), v.to_json()))
.collect();
json!({
"timestamp": Utc::now().to_rfc3339(),
"window_seconds": window_seconds,
"by_method": by_method,
"by_endpoint": by_endpoint,
"by_status_class": inner.by_status_class,
"error_codes": inner.error_codes,
"totals": inner.totals.to_json(),
})
}
pub fn get_history(&self, hours: Option<u64>) -> Vec<MetricsSnapshot> {
let inner = self.inner.lock();
let mut snapshots = inner.snapshots.clone();
if let Some(h) = hours {
let cutoff = now_secs() - (h * 3600) as f64;
snapshots.retain(|s| s.timestamp.timestamp() as f64 > cutoff);
}
snapshots
}
pub fn snapshot(&self) -> Value {
let current = self.get_current_stats();
let history = self.get_history(None);
json!({
"enabled": true,
"current": current,
"snapshots": history,
})
}
fn take_snapshot(&self) {
let snapshot = {
let mut inner = self.inner.lock();
let window_seconds = (now_secs() - inner.window_start).max(0.0) as u64;
let by_method: HashMap<String, Value> = inner
.by_method
.iter()
.map(|(k, v)| (k.clone(), v.to_json()))
.collect();
let by_endpoint: HashMap<String, Value> = inner
.by_endpoint
.iter()
.map(|(k, v)| (k.clone(), v.to_json()))
.collect();
let snap = MetricsSnapshot {
timestamp: Utc::now(),
window_seconds,
by_method,
by_endpoint,
by_status_class: inner.by_status_class.clone(),
error_codes: inner.error_codes.clone(),
totals: inner.totals.to_json(),
};
inner.snapshots.push(snap.clone());
let cutoff = now_secs() - (self.config.retention_hours * 3600) as f64;
inner
.snapshots
.retain(|s| s.timestamp.timestamp() as f64 > cutoff);
inner.by_method.clear();
inner.by_endpoint.clear();
inner.by_status_class.clear();
inner.error_codes.clear();
inner.totals = OperationStats::default();
inner.window_start = now_secs();
snap
};
let _ = snapshot;
self.save_snapshots();
}
fn save_snapshots(&self) {
let snapshots = { self.inner.lock().snapshots.clone() };
if let Some(parent) = self.snapshots_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let data = json!({ "snapshots": snapshots });
let _ = std::fs::write(
&self.snapshots_path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs(self.config.interval_minutes * 60);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
self.take_snapshot();
}
})
}
}
pub fn classify_endpoint(path: &str) -> &'static str {
if path.is_empty() || path == "/" {
return "service";
}
let trimmed = path.trim_end_matches('/');
if trimmed.starts_with("/ui") {
return "ui";
}
if trimmed.starts_with("/kms") {
return "kms";
}
if trimmed.starts_with("/myfsio") {
return "service";
}
let parts: Vec<&str> = trimmed.trim_start_matches('/').split('/').collect();
match parts.len() {
0 => "service",
1 => "bucket",
_ => "object",
}
}
fn now_secs() -> f64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs_f64())
.unwrap_or(0.0)
}

Some files were not shown because too many files have changed in this diff Show More