Compare commits
20 Commits
9ec5797919
...
v0.2.9
| Author | SHA1 | Date | |
|---|---|---|---|
| 52660570c1 | |||
| 35f61313e0 | |||
| c470cfb576 | |||
| d96955deee | |||
| 85181f0be6 | |||
| d5ca7a8be1 | |||
| 476dc79e42 | |||
| bb6590fc5e | |||
| 899db3421b | |||
| caf01d6ada | |||
| bb366cb4cd | |||
| a2745ff2ee | |||
| 28cb656d94 | |||
| 3c44152fc6 | |||
| 397515edce | |||
| 980fced7e4 | |||
| bae5009ec4 | |||
| 233780617f | |||
| fd8fb21517 | |||
| c6cbe822e1 |
@@ -11,7 +11,3 @@ htmlcov
|
||||
logs
|
||||
data
|
||||
tmp
|
||||
tests
|
||||
myfsio_core/target
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -26,13 +26,6 @@ dist/
|
||||
*.egg-info/
|
||||
.eggs/
|
||||
|
||||
# Rust / maturin build artifacts
|
||||
python/myfsio_core/target/
|
||||
python/myfsio_core/Cargo.lock
|
||||
|
||||
# Rust engine build artifacts
|
||||
rust/myfsio-engine/target/
|
||||
|
||||
# Local runtime artifacts
|
||||
logs/
|
||||
*.log
|
||||
|
||||
33
Dockerfile
Normal file
33
Dockerfile
Normal file
@@ -0,0 +1,33 @@
|
||||
FROM python:3.14.3-slim
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY requirements.txt ./
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN chmod +x docker-entrypoint.sh
|
||||
|
||||
RUN mkdir -p /app/data \
|
||||
&& useradd -m -u 1000 myfsio \
|
||||
&& chown -R myfsio:myfsio /app
|
||||
|
||||
USER myfsio
|
||||
|
||||
EXPOSE 5000 5100
|
||||
ENV APP_HOST=0.0.0.0 \
|
||||
FLASK_ENV=production \
|
||||
FLASK_DEBUG=0
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD python -c "import requests; requests.get('http://localhost:5000/myfsio/health', timeout=2)"
|
||||
|
||||
CMD ["./docker-entrypoint.sh"]
|
||||
390
README.md
390
README.md
@@ -1,212 +1,250 @@
|
||||
# MyFSIO
|
||||
|
||||
MyFSIO is an S3-compatible object storage server with a Rust runtime and a filesystem-backed storage engine. The active server lives under `rust/myfsio-engine` and serves both the S3 API and the built-in web UI from a single process.
|
||||
|
||||
The `python/` implementation is deprecated as of 2026-04-21. It remains in the repository for migration reference and legacy tests, but new development and supported runtime usage should target the Rust server.
|
||||
A lightweight, S3-compatible object storage system built with Flask. MyFSIO implements core AWS S3 REST API operations with filesystem-backed storage, making it ideal for local development, testing, and self-hosted storage scenarios.
|
||||
|
||||
## Features
|
||||
|
||||
- S3-compatible REST API with Signature Version 4 authentication
|
||||
- Browser UI for buckets, objects, IAM users, policies, replication, metrics, and site administration
|
||||
- Filesystem-backed storage rooted at `data/`
|
||||
- Bucket versioning, multipart uploads, presigned URLs, CORS, object and bucket tagging
|
||||
- Server-side encryption and built-in KMS support
|
||||
- Optional background services for lifecycle, garbage collection, integrity scanning, operation metrics, and system metrics history
|
||||
- Replication, site sync, and static website hosting support
|
||||
**Core Storage**
|
||||
- S3-compatible REST API with AWS Signature Version 4 authentication
|
||||
- Bucket and object CRUD operations
|
||||
- Object versioning with version history
|
||||
- Multipart uploads for large files
|
||||
- Presigned URLs (1 second to 7 days validity)
|
||||
|
||||
## Runtime Model
|
||||
**Security & Access Control**
|
||||
- IAM users with access key management and rotation
|
||||
- Bucket policies (AWS Policy Version 2012-10-17)
|
||||
- Server-side encryption (SSE-S3 and SSE-KMS)
|
||||
- Built-in Key Management Service (KMS)
|
||||
- Rate limiting per endpoint
|
||||
|
||||
MyFSIO now runs as one Rust process:
|
||||
**Advanced Features**
|
||||
- Cross-bucket replication to remote S3-compatible endpoints
|
||||
- Hot-reload for bucket policies (no restart required)
|
||||
- CORS configuration per bucket
|
||||
|
||||
- API listener on `HOST` + `PORT` (default `127.0.0.1:5000`)
|
||||
- UI listener on `HOST` + `UI_PORT` (default `127.0.0.1:5100`)
|
||||
- Shared state for storage, IAM, policies, sessions, metrics, and background workers
|
||||
**Management UI**
|
||||
- Web console for bucket and object management
|
||||
- IAM dashboard for user administration
|
||||
- Inline JSON policy editor with presets
|
||||
- Object browser with folder navigation and bulk operations
|
||||
- Dark mode support
|
||||
|
||||
If you want API-only mode, set `UI_ENABLED=false`. There is no separate "UI-only" runtime anymore.
|
||||
## Architecture
|
||||
|
||||
```
|
||||
+------------------+ +------------------+
|
||||
| API Server | | UI Server |
|
||||
| (port 5000) | | (port 5100) |
|
||||
| | | |
|
||||
| - S3 REST API |<------->| - Web Console |
|
||||
| - SigV4 Auth | | - IAM Dashboard |
|
||||
| - Presign URLs | | - Bucket Editor |
|
||||
+--------+---------+ +------------------+
|
||||
|
|
||||
v
|
||||
+------------------+ +------------------+
|
||||
| Object Storage | | System Metadata |
|
||||
| (filesystem) | | (.myfsio.sys/) |
|
||||
| | | |
|
||||
| data/<bucket>/ | | - IAM config |
|
||||
| <objects> | | - Bucket policies|
|
||||
| | | - Encryption keys|
|
||||
+------------------+ +------------------+
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
From the repository root:
|
||||
|
||||
```bash
|
||||
cd rust/myfsio-engine
|
||||
cargo run -p myfsio-server --
|
||||
# Clone and setup
|
||||
git clone https://gitea.jzwsite.com/kqjy/MyFSIO
|
||||
cd s3
|
||||
python -m venv .venv
|
||||
|
||||
# Activate virtual environment
|
||||
# Windows PowerShell:
|
||||
.\.venv\Scripts\Activate.ps1
|
||||
# Windows CMD:
|
||||
.venv\Scripts\activate.bat
|
||||
# Linux/macOS:
|
||||
source .venv/bin/activate
|
||||
|
||||
# Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Start both servers
|
||||
python run.py
|
||||
|
||||
# Or start individually
|
||||
python run.py --mode api # API only (port 5000)
|
||||
python run.py --mode ui # UI only (port 5100)
|
||||
```
|
||||
|
||||
Useful URLs:
|
||||
**Default Credentials:** `localadmin` / `localadmin`
|
||||
|
||||
- UI: `http://127.0.0.1:5100/ui`
|
||||
- API: `http://127.0.0.1:5000/`
|
||||
- Health: `http://127.0.0.1:5000/myfsio/health`
|
||||
|
||||
On first boot, MyFSIO creates `data/.myfsio.sys/config/iam.json` and prints the generated admin access key and secret key to the console.
|
||||
|
||||
### Common CLI commands
|
||||
|
||||
```bash
|
||||
# Show resolved configuration
|
||||
cargo run -p myfsio-server -- --show-config
|
||||
|
||||
# Validate configuration and exit non-zero on critical issues
|
||||
cargo run -p myfsio-server -- --check-config
|
||||
|
||||
# Reset admin credentials
|
||||
cargo run -p myfsio-server -- --reset-cred
|
||||
|
||||
# API only
|
||||
UI_ENABLED=false cargo run -p myfsio-server --
|
||||
```
|
||||
|
||||
## Building a Binary
|
||||
|
||||
```bash
|
||||
cd rust/myfsio-engine
|
||||
cargo build --release -p myfsio-server
|
||||
```
|
||||
|
||||
Binary locations:
|
||||
|
||||
- Linux/macOS: `rust/myfsio-engine/target/release/myfsio-server`
|
||||
- Windows: `rust/myfsio-engine/target/release/myfsio-server.exe`
|
||||
|
||||
Run the built binary directly:
|
||||
|
||||
```bash
|
||||
./target/release/myfsio-server
|
||||
```
|
||||
- **Web Console:** http://127.0.0.1:5100/ui
|
||||
- **API Endpoint:** http://127.0.0.1:5000
|
||||
|
||||
## Configuration
|
||||
|
||||
The server reads environment variables from the process environment and also loads, when present:
|
||||
|
||||
- `/opt/myfsio/myfsio.env`
|
||||
- `.env`
|
||||
- `myfsio.env`
|
||||
|
||||
Core settings:
|
||||
|
||||
| Variable | Default | Description |
|
||||
| --- | --- | --- |
|
||||
| `HOST` | `127.0.0.1` | Bind address for API and UI listeners |
|
||||
| `PORT` | `5000` | API port |
|
||||
| `UI_PORT` | `5100` | UI port |
|
||||
| `UI_ENABLED` | `true` | Disable to run API-only |
|
||||
| `STORAGE_ROOT` | `./data` | Root directory for buckets and system metadata |
|
||||
| `IAM_CONFIG` | `<STORAGE_ROOT>/.myfsio.sys/config/iam.json` | IAM config path |
|
||||
| `API_BASE_URL` | unset | Public API base used by the UI and presigned URL generation |
|
||||
| `AWS_REGION` | `us-east-1` | Region used in SigV4 scope |
|
||||
| `SIGV4_TIMESTAMP_TOLERANCE_SECONDS` | `900` | Allowed request time skew |
|
||||
| `PRESIGNED_URL_MIN_EXPIRY_SECONDS` | `1` | Minimum presigned URL expiry |
|
||||
| `PRESIGNED_URL_MAX_EXPIRY_SECONDS` | `604800` | Maximum presigned URL expiry |
|
||||
| `SECRET_KEY` | loaded from `.myfsio.sys/config/.secret` if present | Session signing key and IAM-at-rest encryption key |
|
||||
| `ADMIN_ACCESS_KEY` | unset | Optional first-run or reset access key |
|
||||
| `ADMIN_SECRET_KEY` | unset | Optional first-run or reset secret key |
|
||||
|
||||
Feature toggles:
|
||||
|
||||
| Variable | Default |
|
||||
| --- | --- |
|
||||
| `ENCRYPTION_ENABLED` | `false` |
|
||||
| `KMS_ENABLED` | `false` |
|
||||
| `GC_ENABLED` | `false` |
|
||||
| `INTEGRITY_ENABLED` | `false` |
|
||||
| `LIFECYCLE_ENABLED` | `false` |
|
||||
| `METRICS_HISTORY_ENABLED` | `false` |
|
||||
| `OPERATION_METRICS_ENABLED` | `false` |
|
||||
| `WEBSITE_HOSTING_ENABLED` | `false` |
|
||||
| `SITE_SYNC_ENABLED` | `false` |
|
||||
|
||||
Metrics and replication tuning:
|
||||
|
||||
| Variable | Default |
|
||||
| --- | --- |
|
||||
| `OPERATION_METRICS_INTERVAL_MINUTES` | `5` |
|
||||
| `OPERATION_METRICS_RETENTION_HOURS` | `24` |
|
||||
| `METRICS_HISTORY_INTERVAL_MINUTES` | `5` |
|
||||
| `METRICS_HISTORY_RETENTION_HOURS` | `24` |
|
||||
| `REPLICATION_CONNECT_TIMEOUT_SECONDS` | `5` |
|
||||
| `REPLICATION_READ_TIMEOUT_SECONDS` | `30` |
|
||||
| `REPLICATION_MAX_RETRIES` | `2` |
|
||||
| `REPLICATION_STREAMING_THRESHOLD_BYTES` | `10485760` |
|
||||
| `REPLICATION_MAX_FAILURES_PER_BUCKET` | `50` |
|
||||
| `SITE_SYNC_INTERVAL_SECONDS` | `60` |
|
||||
| `SITE_SYNC_BATCH_SIZE` | `100` |
|
||||
| `SITE_SYNC_CONNECT_TIMEOUT_SECONDS` | `10` |
|
||||
| `SITE_SYNC_READ_TIMEOUT_SECONDS` | `120` |
|
||||
| `SITE_SYNC_MAX_RETRIES` | `2` |
|
||||
| `SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS` | `1.0` |
|
||||
|
||||
UI asset overrides:
|
||||
|
||||
| Variable | Default |
|
||||
| --- | --- |
|
||||
| `TEMPLATES_DIR` | built-in crate templates directory |
|
||||
| `STATIC_DIR` | built-in crate static directory |
|
||||
|
||||
See [docs.md](./docs.md) for the full Rust-side operations guide.
|
||||
|----------|---------|-------------|
|
||||
| `STORAGE_ROOT` | `./data` | Filesystem root for bucket storage |
|
||||
| `IAM_CONFIG` | `.myfsio.sys/config/iam.json` | IAM user and policy store |
|
||||
| `BUCKET_POLICY_PATH` | `.myfsio.sys/config/bucket_policies.json` | Bucket policy store |
|
||||
| `API_BASE_URL` | `http://127.0.0.1:5000` | API endpoint for UI calls |
|
||||
| `MAX_UPLOAD_SIZE` | `1073741824` | Maximum upload size in bytes (1 GB) |
|
||||
| `MULTIPART_MIN_PART_SIZE` | `5242880` | Minimum multipart part size (5 MB) |
|
||||
| `UI_PAGE_SIZE` | `100` | Default page size for listings |
|
||||
| `SECRET_KEY` | `dev-secret-key` | Flask session secret |
|
||||
| `AWS_REGION` | `us-east-1` | Region for SigV4 signing |
|
||||
| `AWS_SERVICE` | `s3` | Service name for SigV4 signing |
|
||||
| `ENCRYPTION_ENABLED` | `false` | Enable server-side encryption |
|
||||
| `KMS_ENABLED` | `false` | Enable Key Management Service |
|
||||
| `LOG_LEVEL` | `INFO` | Logging verbosity |
|
||||
| `SIGV4_TIMESTAMP_TOLERANCE_SECONDS` | `900` | Max time skew for SigV4 requests |
|
||||
| `PRESIGNED_URL_MAX_EXPIRY_SECONDS` | `604800` | Max presigned URL expiry (7 days) |
|
||||
| `REPLICATION_CONNECT_TIMEOUT_SECONDS` | `5` | Replication connection timeout |
|
||||
| `SITE_SYNC_ENABLED` | `false` | Enable bi-directional site sync |
|
||||
| `OBJECT_TAG_LIMIT` | `50` | Maximum tags per object |
|
||||
|
||||
## Data Layout
|
||||
|
||||
```text
|
||||
data/
|
||||
<bucket>/
|
||||
.myfsio.sys/
|
||||
config/
|
||||
iam.json
|
||||
bucket_policies.json
|
||||
connections.json
|
||||
operation_metrics.json
|
||||
metrics_history.json
|
||||
buckets/<bucket>/
|
||||
meta/
|
||||
versions/
|
||||
multipart/
|
||||
keys/
|
||||
```
|
||||
data/
|
||||
├── <bucket>/ # User buckets with objects
|
||||
└── .myfsio.sys/ # System metadata
|
||||
├── config/
|
||||
│ ├── iam.json # IAM users and policies
|
||||
│ ├── bucket_policies.json # Bucket policies
|
||||
│ ├── replication_rules.json
|
||||
│ └── connections.json # Remote S3 connections
|
||||
├── buckets/<bucket>/
|
||||
│ ├── meta/ # Object metadata (.meta.json)
|
||||
│ ├── versions/ # Archived object versions
|
||||
│ └── .bucket.json # Bucket config (versioning, CORS)
|
||||
├── multipart/ # Active multipart uploads
|
||||
└── keys/ # Encryption keys (SSE-S3/KMS)
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
All endpoints require AWS Signature Version 4 authentication unless using presigned URLs or public bucket policies.
|
||||
|
||||
### Bucket Operations
|
||||
|
||||
| Method | Endpoint | Description |
|
||||
|--------|----------|-------------|
|
||||
| `GET` | `/` | List all buckets |
|
||||
| `PUT` | `/<bucket>` | Create bucket |
|
||||
| `DELETE` | `/<bucket>` | Delete bucket (must be empty) |
|
||||
| `HEAD` | `/<bucket>` | Check bucket exists |
|
||||
|
||||
### Object Operations
|
||||
|
||||
| Method | Endpoint | Description |
|
||||
|--------|----------|-------------|
|
||||
| `GET` | `/<bucket>` | List objects (supports `list-type=2`) |
|
||||
| `PUT` | `/<bucket>/<key>` | Upload object |
|
||||
| `GET` | `/<bucket>/<key>` | Download object |
|
||||
| `DELETE` | `/<bucket>/<key>` | Delete object |
|
||||
| `HEAD` | `/<bucket>/<key>` | Get object metadata |
|
||||
| `POST` | `/<bucket>/<key>?uploads` | Initiate multipart upload |
|
||||
| `PUT` | `/<bucket>/<key>?partNumber=N&uploadId=X` | Upload part |
|
||||
| `POST` | `/<bucket>/<key>?uploadId=X` | Complete multipart upload |
|
||||
| `DELETE` | `/<bucket>/<key>?uploadId=X` | Abort multipart upload |
|
||||
|
||||
### Bucket Policies (S3-compatible)
|
||||
|
||||
| Method | Endpoint | Description |
|
||||
|--------|----------|-------------|
|
||||
| `GET` | `/<bucket>?policy` | Get bucket policy |
|
||||
| `PUT` | `/<bucket>?policy` | Set bucket policy |
|
||||
| `DELETE` | `/<bucket>?policy` | Delete bucket policy |
|
||||
|
||||
### Versioning
|
||||
|
||||
| Method | Endpoint | Description |
|
||||
|--------|----------|-------------|
|
||||
| `GET` | `/<bucket>/<key>?versionId=X` | Get specific version |
|
||||
| `DELETE` | `/<bucket>/<key>?versionId=X` | Delete specific version |
|
||||
| `GET` | `/<bucket>?versions` | List object versions |
|
||||
|
||||
### Health Check
|
||||
|
||||
| Method | Endpoint | Description |
|
||||
|--------|----------|-------------|
|
||||
| `GET` | `/myfsio/health` | Health check endpoint |
|
||||
|
||||
## IAM & Access Control
|
||||
|
||||
### Users and Access Keys
|
||||
|
||||
On first run, MyFSIO creates a default admin user (`localadmin`/`localadmin`). Use the IAM dashboard to:
|
||||
|
||||
- Create and delete users
|
||||
- Generate and rotate access keys
|
||||
- Attach inline policies to users
|
||||
- Control IAM management permissions
|
||||
|
||||
### Bucket Policies
|
||||
|
||||
Bucket policies follow AWS policy grammar (Version `2012-10-17`) with support for:
|
||||
|
||||
- Principal-based access (`*` for anonymous, specific users)
|
||||
- Action-based permissions (`s3:GetObject`, `s3:PutObject`, etc.)
|
||||
- Resource patterns (`arn:aws:s3:::bucket/*`)
|
||||
- Condition keys
|
||||
|
||||
**Policy Presets:**
|
||||
- **Public:** Grants anonymous read access (`s3:GetObject`, `s3:ListBucket`)
|
||||
- **Private:** Removes bucket policy (IAM-only access)
|
||||
- **Custom:** Manual policy editing with draft preservation
|
||||
|
||||
Policies hot-reload when the JSON file changes.
|
||||
|
||||
## Server-Side Encryption
|
||||
|
||||
MyFSIO supports two encryption modes:
|
||||
|
||||
- **SSE-S3:** Server-managed keys with automatic key rotation
|
||||
- **SSE-KMS:** Customer-managed keys via built-in KMS
|
||||
|
||||
Enable encryption with:
|
||||
```bash
|
||||
ENCRYPTION_ENABLED=true python run.py
|
||||
```
|
||||
|
||||
## Cross-Bucket Replication
|
||||
|
||||
Replicate objects to remote S3-compatible endpoints:
|
||||
|
||||
1. Configure remote connections in the UI
|
||||
2. Create replication rules specifying source/destination
|
||||
3. Objects are automatically replicated on upload
|
||||
|
||||
## Docker
|
||||
|
||||
Build the Rust image from the `rust/` directory:
|
||||
|
||||
```bash
|
||||
docker build -t myfsio ./rust
|
||||
docker run --rm -p 5000:5000 -p 5100:5100 -v "${PWD}/data:/app/data" myfsio
|
||||
docker build -t myfsio .
|
||||
docker run -p 5000:5000 -p 5100:5100 -v ./data:/app/data myfsio
|
||||
```
|
||||
|
||||
If the instance sits behind a reverse proxy, set `API_BASE_URL` to the public S3 endpoint.
|
||||
|
||||
## Linux Installation
|
||||
|
||||
The repository includes `scripts/install.sh` for systemd-style Linux installs. Build the Rust binary first, then pass it to the installer:
|
||||
|
||||
```bash
|
||||
cd rust/myfsio-engine
|
||||
cargo build --release -p myfsio-server
|
||||
|
||||
cd ../..
|
||||
sudo ./scripts/install.sh --binary ./rust/myfsio-engine/target/release/myfsio-server
|
||||
```
|
||||
|
||||
The installer copies the binary into `/opt/myfsio/myfsio`, writes `/opt/myfsio/myfsio.env`, and can register a `myfsio.service` unit.
|
||||
|
||||
## Testing
|
||||
|
||||
Run the Rust test suite from the workspace:
|
||||
|
||||
```bash
|
||||
cd rust/myfsio-engine
|
||||
cargo test
|
||||
# Run all tests
|
||||
pytest tests/ -v
|
||||
|
||||
# Run specific test file
|
||||
pytest tests/test_api.py -v
|
||||
|
||||
# Run with coverage
|
||||
pytest tests/ --cov=app --cov-report=html
|
||||
```
|
||||
|
||||
## Health Check
|
||||
## References
|
||||
|
||||
`GET /myfsio/health` returns:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "ok",
|
||||
"version": "0.5.0"
|
||||
}
|
||||
```
|
||||
|
||||
The `version` field comes from the Rust crate version in `rust/myfsio-engine/crates/myfsio-server/Cargo.toml`.
|
||||
- [Amazon S3 Documentation](https://docs.aws.amazon.com/s3/)
|
||||
- [AWS Signature Version 4](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)
|
||||
- [S3 Bucket Policy Examples](https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html)
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
import html as html_module
|
||||
import logging
|
||||
import mimetypes
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from datetime import timedelta
|
||||
@@ -17,8 +17,6 @@ from flask_cors import CORS
|
||||
from flask_wtf.csrf import CSRFError
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
|
||||
import io
|
||||
|
||||
from .access_logging import AccessLoggingService
|
||||
from .operation_metrics import OperationMetricsCollector, classify_endpoint
|
||||
from .compression import GzipMiddleware
|
||||
@@ -30,8 +28,6 @@ from .encryption import EncryptionManager
|
||||
from .extensions import limiter, csrf
|
||||
from .iam import IamService
|
||||
from .kms import KMSManager
|
||||
from .gc import GarbageCollector
|
||||
from .integrity import IntegrityChecker
|
||||
from .lifecycle import LifecycleManager
|
||||
from .notifications import NotificationService
|
||||
from .object_lock import ObjectLockService
|
||||
@@ -42,66 +38,6 @@ from .storage import ObjectStorage, StorageError
|
||||
from .version import get_version
|
||||
from .website_domains import WebsiteDomainStore
|
||||
|
||||
_request_counter = itertools.count(1)
|
||||
|
||||
|
||||
class _ChunkedTransferMiddleware:
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
if environ.get("REQUEST_METHOD") not in ("PUT", "POST"):
|
||||
return self.app(environ, start_response)
|
||||
|
||||
transfer_encoding = environ.get("HTTP_TRANSFER_ENCODING", "")
|
||||
content_length = environ.get("CONTENT_LENGTH")
|
||||
|
||||
if "chunked" in transfer_encoding.lower():
|
||||
if content_length:
|
||||
del environ["HTTP_TRANSFER_ENCODING"]
|
||||
else:
|
||||
raw = environ.get("wsgi.input")
|
||||
if raw:
|
||||
try:
|
||||
if hasattr(raw, "seek"):
|
||||
raw.seek(0)
|
||||
body = raw.read()
|
||||
except Exception:
|
||||
body = b""
|
||||
if body:
|
||||
environ["wsgi.input"] = io.BytesIO(body)
|
||||
environ["CONTENT_LENGTH"] = str(len(body))
|
||||
del environ["HTTP_TRANSFER_ENCODING"]
|
||||
|
||||
content_length = environ.get("CONTENT_LENGTH")
|
||||
if not content_length or content_length == "0":
|
||||
sha256 = environ.get("HTTP_X_AMZ_CONTENT_SHA256", "")
|
||||
decoded_len = environ.get("HTTP_X_AMZ_DECODED_CONTENT_LENGTH", "")
|
||||
content_encoding = environ.get("HTTP_CONTENT_ENCODING", "")
|
||||
if ("STREAMING" in sha256.upper() or decoded_len
|
||||
or "aws-chunked" in content_encoding.lower()):
|
||||
raw = environ.get("wsgi.input")
|
||||
if raw:
|
||||
try:
|
||||
if hasattr(raw, "seek"):
|
||||
raw.seek(0)
|
||||
body = raw.read()
|
||||
except Exception:
|
||||
body = b""
|
||||
if body:
|
||||
environ["wsgi.input"] = io.BytesIO(body)
|
||||
environ["CONTENT_LENGTH"] = str(len(body))
|
||||
|
||||
raw = environ.get("wsgi.input")
|
||||
if raw and hasattr(raw, "seek"):
|
||||
try:
|
||||
raw.seek(0)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return self.app(environ, start_response)
|
||||
|
||||
|
||||
def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
|
||||
"""Migrate config file from legacy locations to the active path.
|
||||
@@ -157,20 +93,12 @@ def create_app(
|
||||
app.config.setdefault("WTF_CSRF_ENABLED", False)
|
||||
|
||||
# Trust X-Forwarded-* headers from proxies
|
||||
num_proxies = app.config.get("NUM_TRUSTED_PROXIES", 1)
|
||||
if num_proxies:
|
||||
if "NUM_TRUSTED_PROXIES" not in os.environ:
|
||||
logging.getLogger(__name__).warning(
|
||||
"NUM_TRUSTED_PROXIES not set, defaulting to 1. "
|
||||
"Set NUM_TRUSTED_PROXIES=0 if not behind a reverse proxy."
|
||||
)
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=num_proxies, x_proto=num_proxies, x_host=num_proxies, x_prefix=num_proxies)
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1, x_host=1, x_prefix=1)
|
||||
|
||||
# Enable gzip compression for responses (10-20x smaller JSON payloads)
|
||||
if app.config.get("ENABLE_GZIP", True):
|
||||
app.wsgi_app = GzipMiddleware(app.wsgi_app, compression_level=6)
|
||||
|
||||
app.wsgi_app = _ChunkedTransferMiddleware(app.wsgi_app)
|
||||
|
||||
_configure_cors(app)
|
||||
_configure_logging(app)
|
||||
|
||||
@@ -179,11 +107,10 @@ def create_app(
|
||||
|
||||
storage = ObjectStorage(
|
||||
Path(app.config["STORAGE_ROOT"]),
|
||||
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 60),
|
||||
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 5),
|
||||
object_cache_max_size=app.config.get("OBJECT_CACHE_MAX_SIZE", 100),
|
||||
bucket_config_cache_ttl=app.config.get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0),
|
||||
object_key_max_length_bytes=app.config.get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024),
|
||||
meta_read_cache_max=app.config.get("META_READ_CACHE_MAX", 2048),
|
||||
)
|
||||
|
||||
if app.config.get("WARM_CACHE_ON_STARTUP", True) and not app.config.get("TESTING"):
|
||||
@@ -193,7 +120,6 @@ def create_app(
|
||||
Path(app.config["IAM_CONFIG"]),
|
||||
auth_max_attempts=app.config.get("AUTH_MAX_ATTEMPTS", 5),
|
||||
auth_lockout_minutes=app.config.get("AUTH_LOCKOUT_MINUTES", 15),
|
||||
encryption_key=app.config.get("SECRET_KEY"),
|
||||
)
|
||||
bucket_policies = BucketPolicyStore(Path(app.config["BUCKET_POLICY_PATH"]))
|
||||
secret_store = EphemeralSecretStore(default_ttl=app.config.get("SECRET_TTL_SECONDS", 300))
|
||||
@@ -284,31 +210,6 @@ def create_app(
|
||||
)
|
||||
lifecycle_manager.start()
|
||||
|
||||
gc_collector = None
|
||||
if app.config.get("GC_ENABLED", False):
|
||||
gc_collector = GarbageCollector(
|
||||
storage_root=storage_root,
|
||||
interval_hours=app.config.get("GC_INTERVAL_HOURS", 6.0),
|
||||
temp_file_max_age_hours=app.config.get("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0),
|
||||
multipart_max_age_days=app.config.get("GC_MULTIPART_MAX_AGE_DAYS", 7),
|
||||
lock_file_max_age_hours=app.config.get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0),
|
||||
dry_run=app.config.get("GC_DRY_RUN", False),
|
||||
io_throttle_ms=app.config.get("GC_IO_THROTTLE_MS", 10),
|
||||
)
|
||||
gc_collector.start()
|
||||
|
||||
integrity_checker = None
|
||||
if app.config.get("INTEGRITY_ENABLED", False):
|
||||
integrity_checker = IntegrityChecker(
|
||||
storage_root=storage_root,
|
||||
interval_hours=app.config.get("INTEGRITY_INTERVAL_HOURS", 24.0),
|
||||
batch_size=app.config.get("INTEGRITY_BATCH_SIZE", 1000),
|
||||
auto_heal=app.config.get("INTEGRITY_AUTO_HEAL", False),
|
||||
dry_run=app.config.get("INTEGRITY_DRY_RUN", False),
|
||||
io_throttle_ms=app.config.get("INTEGRITY_IO_THROTTLE_MS", 10),
|
||||
)
|
||||
integrity_checker.start()
|
||||
|
||||
app.extensions["object_storage"] = storage
|
||||
app.extensions["iam"] = iam
|
||||
app.extensions["bucket_policies"] = bucket_policies
|
||||
@@ -320,8 +221,6 @@ def create_app(
|
||||
app.extensions["kms"] = kms_manager
|
||||
app.extensions["acl"] = acl_service
|
||||
app.extensions["lifecycle"] = lifecycle_manager
|
||||
app.extensions["gc"] = gc_collector
|
||||
app.extensions["integrity"] = integrity_checker
|
||||
app.extensions["object_lock"] = object_lock_service
|
||||
app.extensions["notifications"] = notification_service
|
||||
app.extensions["access_logging"] = access_logging_service
|
||||
@@ -574,9 +473,13 @@ def _configure_logging(app: Flask) -> None:
|
||||
|
||||
@app.before_request
|
||||
def _log_request_start() -> None:
|
||||
g.request_id = f"{os.getpid():x}{next(_request_counter):012x}"
|
||||
g.request_id = uuid.uuid4().hex
|
||||
g.request_started_at = time.perf_counter()
|
||||
g.request_bytes_in = request.content_length or 0
|
||||
app.logger.info(
|
||||
"Request started",
|
||||
extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr},
|
||||
)
|
||||
|
||||
@app.before_request
|
||||
def _maybe_serve_website():
|
||||
@@ -625,57 +528,30 @@ def _configure_logging(app: Flask) -> None:
|
||||
is_encrypted = "x-amz-server-side-encryption" in metadata
|
||||
except (StorageError, OSError):
|
||||
pass
|
||||
if request.method == "HEAD":
|
||||
response = Response(status=200)
|
||||
if is_encrypted and hasattr(storage, "get_object_data"):
|
||||
try:
|
||||
data, _ = storage.get_object_data(bucket, object_key)
|
||||
response.headers["Content-Length"] = len(data)
|
||||
except (StorageError, OSError):
|
||||
return _website_error_response(500, "Internal Server Error")
|
||||
else:
|
||||
try:
|
||||
stat = obj_path.stat()
|
||||
response.headers["Content-Length"] = stat.st_size
|
||||
except OSError:
|
||||
return _website_error_response(500, "Internal Server Error")
|
||||
response.headers["Content-Type"] = content_type
|
||||
return response
|
||||
if is_encrypted and hasattr(storage, "get_object_data"):
|
||||
try:
|
||||
data, _ = storage.get_object_data(bucket, object_key)
|
||||
file_size = len(data)
|
||||
response = Response(data, mimetype=content_type)
|
||||
response.headers["Content-Length"] = len(data)
|
||||
return response
|
||||
except (StorageError, OSError):
|
||||
return _website_error_response(500, "Internal Server Error")
|
||||
else:
|
||||
data = None
|
||||
try:
|
||||
stat = obj_path.stat()
|
||||
file_size = stat.st_size
|
||||
except OSError:
|
||||
return _website_error_response(500, "Internal Server Error")
|
||||
if request.method == "HEAD":
|
||||
response = Response(status=200)
|
||||
response.headers["Content-Length"] = file_size
|
||||
response.headers["Content-Type"] = content_type
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
from .s3_api import _parse_range_header
|
||||
range_header = request.headers.get("Range")
|
||||
if range_header:
|
||||
ranges = _parse_range_header(range_header, file_size)
|
||||
if ranges is None:
|
||||
return Response(status=416, headers={"Content-Range": f"bytes */{file_size}"})
|
||||
start, end = ranges[0]
|
||||
length = end - start + 1
|
||||
if data is not None:
|
||||
partial_data = data[start:end + 1]
|
||||
response = Response(partial_data, status=206, mimetype=content_type)
|
||||
else:
|
||||
def _stream_range(file_path, start_pos, length_to_read):
|
||||
with file_path.open("rb") as f:
|
||||
f.seek(start_pos)
|
||||
remaining = length_to_read
|
||||
while remaining > 0:
|
||||
chunk = f.read(min(262144, remaining))
|
||||
if not chunk:
|
||||
break
|
||||
remaining -= len(chunk)
|
||||
yield chunk
|
||||
response = Response(_stream_range(obj_path, start, length), status=206, mimetype=content_type, direct_passthrough=True)
|
||||
response.headers["Content-Range"] = f"bytes {start}-{end}/{file_size}"
|
||||
response.headers["Content-Length"] = length
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
if data is not None:
|
||||
response = Response(data, mimetype=content_type)
|
||||
response.headers["Content-Length"] = file_size
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
def _stream(file_path):
|
||||
with file_path.open("rb") as f:
|
||||
while True:
|
||||
@@ -683,10 +559,13 @@ def _configure_logging(app: Flask) -> None:
|
||||
if not chunk:
|
||||
break
|
||||
yield chunk
|
||||
response = Response(_stream(obj_path), mimetype=content_type, direct_passthrough=True)
|
||||
response.headers["Content-Length"] = file_size
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
try:
|
||||
stat = obj_path.stat()
|
||||
response = Response(_stream(obj_path), mimetype=content_type, direct_passthrough=True)
|
||||
response.headers["Content-Length"] = stat.st_size
|
||||
return response
|
||||
except OSError:
|
||||
return _website_error_response(500, "Internal Server Error")
|
||||
|
||||
def _serve_website_error(storage, bucket, error_doc_key, status_code):
|
||||
if not error_doc_key:
|
||||
@@ -719,10 +598,9 @@ def _configure_logging(app: Flask) -> None:
|
||||
return _website_error_response(status_code, "Not Found")
|
||||
|
||||
def _website_error_response(status_code, message):
|
||||
if status_code == 404:
|
||||
body = "<h1>404 page not found</h1>"
|
||||
else:
|
||||
body = f"{status_code} {message}"
|
||||
safe_msg = html_module.escape(str(message))
|
||||
safe_code = html_module.escape(str(status_code))
|
||||
body = f"<html><head><title>{safe_code} {safe_msg}</title></head><body><h1>{safe_code} {safe_msg}</h1></body></html>"
|
||||
return Response(body, status=status_code, mimetype="text/html")
|
||||
|
||||
@app.after_request
|
||||
@@ -730,19 +608,17 @@ def _configure_logging(app: Flask) -> None:
|
||||
duration_ms = 0.0
|
||||
if hasattr(g, "request_started_at"):
|
||||
duration_ms = (time.perf_counter() - g.request_started_at) * 1000
|
||||
request_id = getattr(g, "request_id", f"{os.getpid():x}{next(_request_counter):012x}")
|
||||
request_id = getattr(g, "request_id", uuid.uuid4().hex)
|
||||
response.headers.setdefault("X-Request-ID", request_id)
|
||||
if app.logger.isEnabledFor(logging.INFO):
|
||||
app.logger.info(
|
||||
"Request completed",
|
||||
extra={
|
||||
"path": request.path,
|
||||
"method": request.method,
|
||||
"remote_addr": request.remote_addr,
|
||||
},
|
||||
)
|
||||
app.logger.info(
|
||||
"Request completed",
|
||||
extra={
|
||||
"path": request.path,
|
||||
"method": request.method,
|
||||
"remote_addr": request.remote_addr,
|
||||
},
|
||||
)
|
||||
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
|
||||
response.headers["Server"] = "MyFSIO"
|
||||
|
||||
operation_metrics = app.extensions.get("operation_metrics")
|
||||
if operation_metrics:
|
||||
@@ -14,12 +14,10 @@ from flask import Blueprint, Response, current_app, jsonify, request
|
||||
|
||||
from .connections import ConnectionStore
|
||||
from .extensions import limiter
|
||||
from .gc import GarbageCollector
|
||||
from .integrity import IntegrityChecker
|
||||
from .iam import IamError, Principal
|
||||
from .replication import ReplicationManager
|
||||
from .site_registry import PeerSite, SiteInfo, SiteRegistry
|
||||
from .website_domains import WebsiteDomainStore, normalize_domain, is_valid_domain
|
||||
from .website_domains import WebsiteDomainStore
|
||||
|
||||
|
||||
def _is_safe_url(url: str, allow_internal: bool = False) -> bool:
|
||||
@@ -686,107 +684,6 @@ def _storage():
|
||||
return current_app.extensions["object_storage"]
|
||||
|
||||
|
||||
def _require_iam_action(action: str):
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return None, error
|
||||
try:
|
||||
_iam().authorize(principal, None, action)
|
||||
return principal, None
|
||||
except IamError:
|
||||
return None, _json_error("AccessDenied", f"Requires {action} permission", 403)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_list_users():
|
||||
principal, error = _require_iam_action("iam:list_users")
|
||||
if error:
|
||||
return error
|
||||
return jsonify({"users": _iam().list_users()})
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_get_user(identifier):
|
||||
principal, error = _require_iam_action("iam:get_user")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
user_id = _iam().resolve_user_id(identifier)
|
||||
return jsonify(_iam().get_user_by_id(user_id))
|
||||
except IamError as exc:
|
||||
return _json_error("NotFound", str(exc), 404)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/policies", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_get_user_policies(identifier):
|
||||
principal, error = _require_iam_action("iam:get_policy")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
return jsonify({"policies": _iam().get_user_policies(identifier)})
|
||||
except IamError as exc:
|
||||
return _json_error("NotFound", str(exc), 404)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/keys", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_create_access_key(identifier):
|
||||
principal, error = _require_iam_action("iam:create_key")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
result = _iam().create_access_key(identifier)
|
||||
logger.info("Access key created for %s by %s", identifier, principal.access_key)
|
||||
return jsonify(result), 201
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/keys/<access_key>", methods=["DELETE"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_delete_access_key(identifier, access_key):
|
||||
principal, error = _require_iam_action("iam:delete_key")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
_iam().delete_access_key(access_key)
|
||||
logger.info("Access key %s deleted by %s", access_key, principal.access_key)
|
||||
return "", 204
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/disable", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_disable_user(identifier):
|
||||
principal, error = _require_iam_action("iam:disable_user")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
_iam().disable_user(identifier)
|
||||
logger.info("User %s disabled by %s", identifier, principal.access_key)
|
||||
return jsonify({"status": "disabled"})
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/enable", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_enable_user(identifier):
|
||||
principal, error = _require_iam_action("iam:disable_user")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
_iam().enable_user(identifier)
|
||||
logger.info("User %s enabled by %s", identifier, principal.access_key)
|
||||
return jsonify({"status": "enabled"})
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def list_website_domains():
|
||||
@@ -807,12 +704,10 @@ def create_website_domain():
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
domain = normalize_domain(payload.get("domain") or "")
|
||||
domain = (payload.get("domain") or "").strip().lower()
|
||||
bucket = (payload.get("bucket") or "").strip()
|
||||
if not domain:
|
||||
return _json_error("ValidationError", "domain is required", 400)
|
||||
if not is_valid_domain(domain):
|
||||
return _json_error("ValidationError", f"Invalid domain: '{domain}'", 400)
|
||||
if not bucket:
|
||||
return _json_error("ValidationError", "bucket is required", 400)
|
||||
storage = _storage()
|
||||
@@ -835,11 +730,10 @@ def get_website_domain(domain: str):
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
domain = normalize_domain(domain)
|
||||
bucket = _website_domains().get_bucket(domain)
|
||||
if not bucket:
|
||||
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
|
||||
return jsonify({"domain": domain, "bucket": bucket})
|
||||
return jsonify({"domain": domain.lower(), "bucket": bucket})
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains/<domain>", methods=["PUT"])
|
||||
@@ -850,7 +744,6 @@ def update_website_domain(domain: str):
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
domain = normalize_domain(domain)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
bucket = (payload.get("bucket") or "").strip()
|
||||
if not bucket:
|
||||
@@ -859,11 +752,9 @@ def update_website_domain(domain: str):
|
||||
if not storage.bucket_exists(bucket):
|
||||
return _json_error("NoSuchBucket", f"Bucket '{bucket}' does not exist", 404)
|
||||
store = _website_domains()
|
||||
if not store.get_bucket(domain):
|
||||
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
|
||||
store.set_mapping(domain, bucket)
|
||||
logger.info("Website domain mapping updated: %s -> %s", domain, bucket)
|
||||
return jsonify({"domain": domain, "bucket": bucket})
|
||||
return jsonify({"domain": domain.lower(), "bucket": bucket})
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains/<domain>", methods=["DELETE"])
|
||||
@@ -874,111 +765,7 @@ def delete_website_domain(domain: str):
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
domain = normalize_domain(domain)
|
||||
if not _website_domains().delete_mapping(domain):
|
||||
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
|
||||
logger.info("Website domain mapping deleted: %s", domain)
|
||||
return Response(status=204)
|
||||
|
||||
|
||||
def _gc() -> Optional[GarbageCollector]:
|
||||
return current_app.extensions.get("gc")
|
||||
|
||||
|
||||
@admin_api_bp.route("/gc/status", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def gc_status():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
gc = _gc()
|
||||
if not gc:
|
||||
return jsonify({"enabled": False, "message": "GC is not enabled. Set GC_ENABLED=true to enable."})
|
||||
return jsonify(gc.get_status())
|
||||
|
||||
|
||||
@admin_api_bp.route("/gc/run", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def gc_run_now():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
gc = _gc()
|
||||
if not gc:
|
||||
return _json_error("InvalidRequest", "GC is not enabled", 400)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
started = gc.run_async(dry_run=payload.get("dry_run"))
|
||||
logger.info("GC manual run by %s", principal.access_key)
|
||||
if not started:
|
||||
return _json_error("Conflict", "GC is already in progress", 409)
|
||||
return jsonify({"status": "started"})
|
||||
|
||||
|
||||
@admin_api_bp.route("/gc/history", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def gc_history():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
gc = _gc()
|
||||
if not gc:
|
||||
return jsonify({"executions": []})
|
||||
limit = min(int(request.args.get("limit", 50)), 200)
|
||||
offset = int(request.args.get("offset", 0))
|
||||
records = gc.get_history(limit=limit, offset=offset)
|
||||
return jsonify({"executions": records})
|
||||
|
||||
|
||||
def _integrity() -> Optional[IntegrityChecker]:
|
||||
return current_app.extensions.get("integrity")
|
||||
|
||||
|
||||
@admin_api_bp.route("/integrity/status", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def integrity_status():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
checker = _integrity()
|
||||
if not checker:
|
||||
return jsonify({"enabled": False, "message": "Integrity checker is not enabled. Set INTEGRITY_ENABLED=true to enable."})
|
||||
return jsonify(checker.get_status())
|
||||
|
||||
|
||||
@admin_api_bp.route("/integrity/run", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def integrity_run_now():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
checker = _integrity()
|
||||
if not checker:
|
||||
return _json_error("InvalidRequest", "Integrity checker is not enabled", 400)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
override_dry_run = payload.get("dry_run")
|
||||
override_auto_heal = payload.get("auto_heal")
|
||||
started = checker.run_async(
|
||||
auto_heal=override_auto_heal if override_auto_heal is not None else None,
|
||||
dry_run=override_dry_run if override_dry_run is not None else None,
|
||||
)
|
||||
logger.info("Integrity manual run by %s", principal.access_key)
|
||||
if not started:
|
||||
return _json_error("Conflict", "A scan is already in progress", 409)
|
||||
return jsonify({"status": "started"})
|
||||
|
||||
|
||||
@admin_api_bp.route("/integrity/history", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def integrity_history():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
checker = _integrity()
|
||||
if not checker:
|
||||
return jsonify({"executions": []})
|
||||
limit = min(int(request.args.get("limit", 50)), 200)
|
||||
offset = int(request.args.get("offset", 0))
|
||||
records = checker.get_history(limit=limit, offset=offset)
|
||||
return jsonify({"executions": records})
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
@@ -76,7 +75,7 @@ def _evaluate_condition_operator(
|
||||
expected_null = condition_values[0].lower() in ("true", "1", "yes") if condition_values else True
|
||||
return is_null == expected_null
|
||||
|
||||
return False
|
||||
return True
|
||||
|
||||
ACTION_ALIASES = {
|
||||
"s3:listbucket": "list",
|
||||
@@ -269,7 +268,7 @@ class BucketPolicyStore:
|
||||
self._last_mtime = self._current_mtime()
|
||||
# Performance: Avoid stat() on every request
|
||||
self._last_stat_check = 0.0
|
||||
self._stat_check_interval = float(os.environ.get("BUCKET_POLICY_STAT_CHECK_INTERVAL_SECONDS", "2.0"))
|
||||
self._stat_check_interval = 1.0 # Only check mtime every 1 second
|
||||
|
||||
def maybe_reload(self) -> None:
|
||||
# Performance: Skip stat check if we checked recently
|
||||
@@ -25,7 +25,7 @@ def _calculate_auto_connection_limit() -> int:
|
||||
|
||||
|
||||
def _calculate_auto_backlog(connection_limit: int) -> int:
|
||||
return max(128, min(connection_limit * 2, 4096))
|
||||
return max(64, min(connection_limit * 2, 4096))
|
||||
|
||||
|
||||
def _validate_rate_limit(value: str) -> str:
|
||||
@@ -115,7 +115,6 @@ class AppConfig:
|
||||
server_connection_limit: int
|
||||
server_backlog: int
|
||||
server_channel_timeout: int
|
||||
server_max_buffer_size: int
|
||||
server_threads_auto: bool
|
||||
server_connection_limit_auto: bool
|
||||
server_backlog_auto: bool
|
||||
@@ -136,7 +135,6 @@ class AppConfig:
|
||||
site_sync_clock_skew_tolerance_seconds: float
|
||||
object_key_max_length_bytes: int
|
||||
object_cache_max_size: int
|
||||
meta_read_cache_max: int
|
||||
bucket_config_cache_ttl_seconds: float
|
||||
object_tag_limit: int
|
||||
encryption_chunk_size_bytes: int
|
||||
@@ -152,19 +150,6 @@ class AppConfig:
|
||||
allowed_redirect_hosts: list[str]
|
||||
allow_internal_endpoints: bool
|
||||
website_hosting_enabled: bool
|
||||
gc_enabled: bool
|
||||
gc_interval_hours: float
|
||||
gc_temp_file_max_age_hours: float
|
||||
gc_multipart_max_age_days: int
|
||||
gc_lock_file_max_age_hours: float
|
||||
gc_dry_run: bool
|
||||
gc_io_throttle_ms: int
|
||||
integrity_enabled: bool
|
||||
integrity_interval_hours: float
|
||||
integrity_batch_size: int
|
||||
integrity_auto_heal: bool
|
||||
integrity_dry_run: bool
|
||||
integrity_io_throttle_ms: int
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
|
||||
@@ -256,7 +241,7 @@ class AppConfig:
|
||||
cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"])
|
||||
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
|
||||
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60))
|
||||
object_cache_ttl = int(_get("OBJECT_CACHE_TTL", 60))
|
||||
object_cache_ttl = int(_get("OBJECT_CACHE_TTL", 5))
|
||||
|
||||
encryption_enabled = str(_get("ENCRYPTION_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
encryption_keys_dir = storage_root / ".myfsio.sys" / "keys"
|
||||
@@ -297,7 +282,6 @@ class AppConfig:
|
||||
server_backlog_auto = False
|
||||
|
||||
server_channel_timeout = int(_get("SERVER_CHANNEL_TIMEOUT", 120))
|
||||
server_max_buffer_size = int(_get("SERVER_MAX_BUFFER_SIZE", 1024 * 1024 * 128))
|
||||
site_sync_enabled = str(_get("SITE_SYNC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
site_sync_interval_seconds = int(_get("SITE_SYNC_INTERVAL_SECONDS", 60))
|
||||
site_sync_batch_size = int(_get("SITE_SYNC_BATCH_SIZE", 100))
|
||||
@@ -316,7 +300,6 @@ class AppConfig:
|
||||
site_sync_clock_skew_tolerance_seconds = float(_get("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS", 1.0))
|
||||
object_key_max_length_bytes = int(_get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024))
|
||||
object_cache_max_size = int(_get("OBJECT_CACHE_MAX_SIZE", 100))
|
||||
meta_read_cache_max = int(_get("META_READ_CACHE_MAX", 2048))
|
||||
bucket_config_cache_ttl_seconds = float(_get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0))
|
||||
object_tag_limit = int(_get("OBJECT_TAG_LIMIT", 50))
|
||||
encryption_chunk_size_bytes = int(_get("ENCRYPTION_CHUNK_SIZE_BYTES", 64 * 1024))
|
||||
@@ -331,24 +314,11 @@ class AppConfig:
|
||||
site_region = str(_get("SITE_REGION", "us-east-1"))
|
||||
site_priority = int(_get("SITE_PRIORITY", 100))
|
||||
ratelimit_admin = _validate_rate_limit(str(_get("RATE_LIMIT_ADMIN", "60 per minute")))
|
||||
num_trusted_proxies = int(_get("NUM_TRUSTED_PROXIES", 1))
|
||||
num_trusted_proxies = int(_get("NUM_TRUSTED_PROXIES", 0))
|
||||
allowed_redirect_hosts_raw = _get("ALLOWED_REDIRECT_HOSTS", "")
|
||||
allowed_redirect_hosts = [h.strip() for h in str(allowed_redirect_hosts_raw).split(",") if h.strip()]
|
||||
allow_internal_endpoints = str(_get("ALLOW_INTERNAL_ENDPOINTS", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
website_hosting_enabled = str(_get("WEBSITE_HOSTING_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
gc_enabled = str(_get("GC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
gc_interval_hours = float(_get("GC_INTERVAL_HOURS", 6.0))
|
||||
gc_temp_file_max_age_hours = float(_get("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0))
|
||||
gc_multipart_max_age_days = int(_get("GC_MULTIPART_MAX_AGE_DAYS", 7))
|
||||
gc_lock_file_max_age_hours = float(_get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0))
|
||||
gc_dry_run = str(_get("GC_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
gc_io_throttle_ms = int(_get("GC_IO_THROTTLE_MS", 10))
|
||||
integrity_enabled = str(_get("INTEGRITY_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
integrity_interval_hours = float(_get("INTEGRITY_INTERVAL_HOURS", 24.0))
|
||||
integrity_batch_size = int(_get("INTEGRITY_BATCH_SIZE", 1000))
|
||||
integrity_auto_heal = str(_get("INTEGRITY_AUTO_HEAL", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
integrity_dry_run = str(_get("INTEGRITY_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
integrity_io_throttle_ms = int(_get("INTEGRITY_IO_THROTTLE_MS", 10))
|
||||
|
||||
return cls(storage_root=storage_root,
|
||||
max_upload_size=max_upload_size,
|
||||
@@ -402,7 +372,6 @@ class AppConfig:
|
||||
server_connection_limit=server_connection_limit,
|
||||
server_backlog=server_backlog,
|
||||
server_channel_timeout=server_channel_timeout,
|
||||
server_max_buffer_size=server_max_buffer_size,
|
||||
server_threads_auto=server_threads_auto,
|
||||
server_connection_limit_auto=server_connection_limit_auto,
|
||||
server_backlog_auto=server_backlog_auto,
|
||||
@@ -423,7 +392,6 @@ class AppConfig:
|
||||
site_sync_clock_skew_tolerance_seconds=site_sync_clock_skew_tolerance_seconds,
|
||||
object_key_max_length_bytes=object_key_max_length_bytes,
|
||||
object_cache_max_size=object_cache_max_size,
|
||||
meta_read_cache_max=meta_read_cache_max,
|
||||
bucket_config_cache_ttl_seconds=bucket_config_cache_ttl_seconds,
|
||||
object_tag_limit=object_tag_limit,
|
||||
encryption_chunk_size_bytes=encryption_chunk_size_bytes,
|
||||
@@ -438,20 +406,7 @@ class AppConfig:
|
||||
num_trusted_proxies=num_trusted_proxies,
|
||||
allowed_redirect_hosts=allowed_redirect_hosts,
|
||||
allow_internal_endpoints=allow_internal_endpoints,
|
||||
website_hosting_enabled=website_hosting_enabled,
|
||||
gc_enabled=gc_enabled,
|
||||
gc_interval_hours=gc_interval_hours,
|
||||
gc_temp_file_max_age_hours=gc_temp_file_max_age_hours,
|
||||
gc_multipart_max_age_days=gc_multipart_max_age_days,
|
||||
gc_lock_file_max_age_hours=gc_lock_file_max_age_hours,
|
||||
gc_dry_run=gc_dry_run,
|
||||
gc_io_throttle_ms=gc_io_throttle_ms,
|
||||
integrity_enabled=integrity_enabled,
|
||||
integrity_interval_hours=integrity_interval_hours,
|
||||
integrity_batch_size=integrity_batch_size,
|
||||
integrity_auto_heal=integrity_auto_heal,
|
||||
integrity_dry_run=integrity_dry_run,
|
||||
integrity_io_throttle_ms=integrity_io_throttle_ms)
|
||||
website_hosting_enabled=website_hosting_enabled)
|
||||
|
||||
def validate_and_report(self) -> list[str]:
|
||||
"""Validate configuration and return a list of warnings/issues.
|
||||
@@ -516,12 +471,10 @@ class AppConfig:
|
||||
issues.append(f"CRITICAL: SERVER_THREADS={self.server_threads} is outside valid range (1-64). Server cannot start.")
|
||||
if not (10 <= self.server_connection_limit <= 1000):
|
||||
issues.append(f"CRITICAL: SERVER_CONNECTION_LIMIT={self.server_connection_limit} is outside valid range (10-1000). Server cannot start.")
|
||||
if not (128 <= self.server_backlog <= 4096):
|
||||
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (128-4096). Server cannot start.")
|
||||
if not (64 <= self.server_backlog <= 4096):
|
||||
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (64-4096). Server cannot start.")
|
||||
if not (10 <= self.server_channel_timeout <= 300):
|
||||
issues.append(f"CRITICAL: SERVER_CHANNEL_TIMEOUT={self.server_channel_timeout} is outside valid range (10-300). Server cannot start.")
|
||||
if self.server_max_buffer_size < 1024 * 1024:
|
||||
issues.append(f"WARNING: SERVER_MAX_BUFFER_SIZE={self.server_max_buffer_size} is less than 1MB. Large uploads will fail.")
|
||||
|
||||
if sys.platform != "win32":
|
||||
try:
|
||||
@@ -567,7 +520,6 @@ class AppConfig:
|
||||
print(f" CONNECTION_LIMIT: {self.server_connection_limit}{_auto(self.server_connection_limit_auto)}")
|
||||
print(f" BACKLOG: {self.server_backlog}{_auto(self.server_backlog_auto)}")
|
||||
print(f" CHANNEL_TIMEOUT: {self.server_channel_timeout}s")
|
||||
print(f" MAX_BUFFER_SIZE: {self.server_max_buffer_size // (1024 * 1024)}MB")
|
||||
print("=" * 60)
|
||||
|
||||
issues = self.validate_and_report()
|
||||
@@ -633,7 +585,6 @@ class AppConfig:
|
||||
"SERVER_CONNECTION_LIMIT": self.server_connection_limit,
|
||||
"SERVER_BACKLOG": self.server_backlog,
|
||||
"SERVER_CHANNEL_TIMEOUT": self.server_channel_timeout,
|
||||
"SERVER_MAX_BUFFER_SIZE": self.server_max_buffer_size,
|
||||
"SITE_SYNC_ENABLED": self.site_sync_enabled,
|
||||
"SITE_SYNC_INTERVAL_SECONDS": self.site_sync_interval_seconds,
|
||||
"SITE_SYNC_BATCH_SIZE": self.site_sync_batch_size,
|
||||
@@ -651,7 +602,6 @@ class AppConfig:
|
||||
"SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS": self.site_sync_clock_skew_tolerance_seconds,
|
||||
"OBJECT_KEY_MAX_LENGTH_BYTES": self.object_key_max_length_bytes,
|
||||
"OBJECT_CACHE_MAX_SIZE": self.object_cache_max_size,
|
||||
"META_READ_CACHE_MAX": self.meta_read_cache_max,
|
||||
"BUCKET_CONFIG_CACHE_TTL_SECONDS": self.bucket_config_cache_ttl_seconds,
|
||||
"OBJECT_TAG_LIMIT": self.object_tag_limit,
|
||||
"ENCRYPTION_CHUNK_SIZE_BYTES": self.encryption_chunk_size_bytes,
|
||||
@@ -667,17 +617,4 @@ class AppConfig:
|
||||
"ALLOWED_REDIRECT_HOSTS": self.allowed_redirect_hosts,
|
||||
"ALLOW_INTERNAL_ENDPOINTS": self.allow_internal_endpoints,
|
||||
"WEBSITE_HOSTING_ENABLED": self.website_hosting_enabled,
|
||||
"GC_ENABLED": self.gc_enabled,
|
||||
"GC_INTERVAL_HOURS": self.gc_interval_hours,
|
||||
"GC_TEMP_FILE_MAX_AGE_HOURS": self.gc_temp_file_max_age_hours,
|
||||
"GC_MULTIPART_MAX_AGE_DAYS": self.gc_multipart_max_age_days,
|
||||
"GC_LOCK_FILE_MAX_AGE_HOURS": self.gc_lock_file_max_age_hours,
|
||||
"GC_DRY_RUN": self.gc_dry_run,
|
||||
"GC_IO_THROTTLE_MS": self.gc_io_throttle_ms,
|
||||
"INTEGRITY_ENABLED": self.integrity_enabled,
|
||||
"INTEGRITY_INTERVAL_HOURS": self.integrity_interval_hours,
|
||||
"INTEGRITY_BATCH_SIZE": self.integrity_batch_size,
|
||||
"INTEGRITY_AUTO_HEAL": self.integrity_auto_heal,
|
||||
"INTEGRITY_DRY_RUN": self.integrity_dry_run,
|
||||
"INTEGRITY_IO_THROTTLE_MS": self.integrity_io_throttle_ms,
|
||||
}
|
||||
@@ -190,15 +190,6 @@ class EncryptedObjectStorage:
|
||||
def list_objects(self, bucket_name: str, **kwargs):
|
||||
return self.storage.list_objects(bucket_name, **kwargs)
|
||||
|
||||
def list_objects_shallow(self, bucket_name: str, **kwargs):
|
||||
return self.storage.list_objects_shallow(bucket_name, **kwargs)
|
||||
|
||||
def iter_objects_shallow(self, bucket_name: str, **kwargs):
|
||||
return self.storage.iter_objects_shallow(bucket_name, **kwargs)
|
||||
|
||||
def search_objects(self, bucket_name: str, query: str, **kwargs):
|
||||
return self.storage.search_objects(bucket_name, query, **kwargs)
|
||||
|
||||
def list_objects_all(self, bucket_name: str):
|
||||
return self.storage.list_objects_all(bucket_name)
|
||||
|
||||
@@ -19,17 +19,6 @@ from cryptography.hazmat.primitives import hashes
|
||||
if sys.platform != "win32":
|
||||
import fcntl
|
||||
|
||||
try:
|
||||
import myfsio_core as _rc
|
||||
if not all(hasattr(_rc, f) for f in (
|
||||
"encrypt_stream_chunked", "decrypt_stream_chunked",
|
||||
)):
|
||||
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
|
||||
_HAS_RUST = True
|
||||
except ImportError:
|
||||
_rc = None
|
||||
_HAS_RUST = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -349,69 +338,6 @@ class StreamingEncryptor:
|
||||
output.seek(0)
|
||||
return output
|
||||
|
||||
def encrypt_file(self, input_path: str, output_path: str) -> EncryptionMetadata:
|
||||
data_key, encrypted_data_key = self.provider.generate_data_key()
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
if _HAS_RUST:
|
||||
_rc.encrypt_stream_chunked(
|
||||
input_path, output_path, data_key, base_nonce, self.chunk_size
|
||||
)
|
||||
else:
|
||||
with open(input_path, "rb") as stream:
|
||||
aesgcm = AESGCM(data_key)
|
||||
with open(output_path, "wb") as out:
|
||||
out.write(b"\x00\x00\x00\x00")
|
||||
chunk_index = 0
|
||||
while True:
|
||||
chunk = stream.read(self.chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
encrypted_chunk = aesgcm.encrypt(chunk_nonce, chunk, None)
|
||||
out.write(len(encrypted_chunk).to_bytes(self.HEADER_SIZE, "big"))
|
||||
out.write(encrypted_chunk)
|
||||
chunk_index += 1
|
||||
out.seek(0)
|
||||
out.write(chunk_index.to_bytes(4, "big"))
|
||||
|
||||
return EncryptionMetadata(
|
||||
algorithm="AES256",
|
||||
key_id=self.provider.KEY_ID if hasattr(self.provider, "KEY_ID") else "local",
|
||||
nonce=base_nonce,
|
||||
encrypted_data_key=encrypted_data_key,
|
||||
)
|
||||
|
||||
def decrypt_file(self, input_path: str, output_path: str,
|
||||
metadata: EncryptionMetadata) -> None:
|
||||
data_key = self.provider.decrypt_data_key(metadata.encrypted_data_key, metadata.key_id)
|
||||
base_nonce = metadata.nonce
|
||||
|
||||
if _HAS_RUST:
|
||||
_rc.decrypt_stream_chunked(input_path, output_path, data_key, base_nonce)
|
||||
else:
|
||||
with open(input_path, "rb") as stream:
|
||||
chunk_count_bytes = stream.read(4)
|
||||
if len(chunk_count_bytes) < 4:
|
||||
raise EncryptionError("Invalid encrypted stream: missing header")
|
||||
chunk_count = int.from_bytes(chunk_count_bytes, "big")
|
||||
aesgcm = AESGCM(data_key)
|
||||
with open(output_path, "wb") as out:
|
||||
for chunk_index in range(chunk_count):
|
||||
size_bytes = stream.read(self.HEADER_SIZE)
|
||||
if len(size_bytes) < self.HEADER_SIZE:
|
||||
raise EncryptionError(f"Invalid encrypted stream: truncated at chunk {chunk_index}")
|
||||
chunk_size = int.from_bytes(size_bytes, "big")
|
||||
encrypted_chunk = stream.read(chunk_size)
|
||||
if len(encrypted_chunk) < chunk_size:
|
||||
raise EncryptionError(f"Invalid encrypted stream: incomplete chunk {chunk_index}")
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
try:
|
||||
decrypted_chunk = aesgcm.decrypt(chunk_nonce, encrypted_chunk, None)
|
||||
out.write(decrypted_chunk)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt chunk {chunk_index}: {exc}") from exc
|
||||
|
||||
|
||||
class EncryptionManager:
|
||||
"""Manages encryption providers and operations."""
|
||||
@@ -175,21 +175,13 @@ def handle_app_error(error: AppError) -> Response:
|
||||
|
||||
def handle_rate_limit_exceeded(e: RateLimitExceeded) -> Response:
|
||||
g.s3_error_code = "SlowDown"
|
||||
if request.path.startswith("/ui") or request.path.startswith("/buckets"):
|
||||
wants_json = (
|
||||
request.is_json or
|
||||
request.headers.get("X-Requested-With") == "XMLHttpRequest" or
|
||||
"application/json" in request.accept_mimetypes.values()
|
||||
)
|
||||
if wants_json:
|
||||
return jsonify({"success": False, "error": {"code": "SlowDown", "message": "Please reduce your request rate."}}), 429
|
||||
error = Element("Error")
|
||||
SubElement(error, "Code").text = "SlowDown"
|
||||
SubElement(error, "Message").text = "Please reduce your request rate."
|
||||
SubElement(error, "Resource").text = request.path
|
||||
SubElement(error, "RequestId").text = getattr(g, "request_id", "")
|
||||
xml_bytes = tostring(error, encoding="utf-8")
|
||||
return Response(xml_bytes, status="429 Too Many Requests", mimetype="application/xml")
|
||||
return Response(xml_bytes, status=429, mimetype="application/xml")
|
||||
|
||||
|
||||
def register_error_handlers(app):
|
||||
610
app/iam.py
Normal file
610
app/iam.py
Normal file
@@ -0,0 +1,610 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
import secrets
|
||||
import threading
|
||||
import time
|
||||
from collections import deque
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Deque, Dict, Iterable, List, Optional, Sequence, Set, Tuple
|
||||
|
||||
|
||||
class IamError(RuntimeError):
|
||||
"""Raised when authentication or authorization fails."""
|
||||
|
||||
|
||||
S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication", "lifecycle", "cors"}
|
||||
IAM_ACTIONS = {
|
||||
"iam:list_users",
|
||||
"iam:create_user",
|
||||
"iam:delete_user",
|
||||
"iam:rotate_key",
|
||||
"iam:update_policy",
|
||||
}
|
||||
ALLOWED_ACTIONS = (S3_ACTIONS | IAM_ACTIONS) | {"iam:*"}
|
||||
|
||||
ACTION_ALIASES = {
|
||||
"list": "list",
|
||||
"s3:listbucket": "list",
|
||||
"s3:listallmybuckets": "list",
|
||||
"s3:listbucketversions": "list",
|
||||
"s3:listmultipartuploads": "list",
|
||||
"s3:listparts": "list",
|
||||
"read": "read",
|
||||
"s3:getobject": "read",
|
||||
"s3:getobjectversion": "read",
|
||||
"s3:getobjecttagging": "read",
|
||||
"s3:getobjectversiontagging": "read",
|
||||
"s3:getobjectacl": "read",
|
||||
"s3:getbucketversioning": "read",
|
||||
"s3:headobject": "read",
|
||||
"s3:headbucket": "read",
|
||||
"write": "write",
|
||||
"s3:putobject": "write",
|
||||
"s3:createbucket": "write",
|
||||
"s3:putobjecttagging": "write",
|
||||
"s3:putbucketversioning": "write",
|
||||
"s3:createmultipartupload": "write",
|
||||
"s3:uploadpart": "write",
|
||||
"s3:completemultipartupload": "write",
|
||||
"s3:abortmultipartupload": "write",
|
||||
"s3:copyobject": "write",
|
||||
"delete": "delete",
|
||||
"s3:deleteobject": "delete",
|
||||
"s3:deleteobjectversion": "delete",
|
||||
"s3:deletebucket": "delete",
|
||||
"s3:deleteobjecttagging": "delete",
|
||||
"share": "share",
|
||||
"s3:putobjectacl": "share",
|
||||
"s3:putbucketacl": "share",
|
||||
"s3:getbucketacl": "share",
|
||||
"policy": "policy",
|
||||
"s3:putbucketpolicy": "policy",
|
||||
"s3:getbucketpolicy": "policy",
|
||||
"s3:deletebucketpolicy": "policy",
|
||||
"replication": "replication",
|
||||
"s3:getreplicationconfiguration": "replication",
|
||||
"s3:putreplicationconfiguration": "replication",
|
||||
"s3:deletereplicationconfiguration": "replication",
|
||||
"s3:replicateobject": "replication",
|
||||
"s3:replicatetags": "replication",
|
||||
"s3:replicatedelete": "replication",
|
||||
"lifecycle": "lifecycle",
|
||||
"s3:getlifecycleconfiguration": "lifecycle",
|
||||
"s3:putlifecycleconfiguration": "lifecycle",
|
||||
"s3:deletelifecycleconfiguration": "lifecycle",
|
||||
"s3:getbucketlifecycle": "lifecycle",
|
||||
"s3:putbucketlifecycle": "lifecycle",
|
||||
"cors": "cors",
|
||||
"s3:getbucketcors": "cors",
|
||||
"s3:putbucketcors": "cors",
|
||||
"s3:deletebucketcors": "cors",
|
||||
"iam:listusers": "iam:list_users",
|
||||
"iam:createuser": "iam:create_user",
|
||||
"iam:deleteuser": "iam:delete_user",
|
||||
"iam:rotateaccesskey": "iam:rotate_key",
|
||||
"iam:putuserpolicy": "iam:update_policy",
|
||||
"iam:*": "iam:*",
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class Policy:
|
||||
bucket: str
|
||||
actions: Set[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Principal:
|
||||
access_key: str
|
||||
display_name: str
|
||||
policies: List[Policy]
|
||||
|
||||
|
||||
class IamService:
|
||||
"""Loads IAM configuration, manages users, and evaluates policies."""
|
||||
|
||||
def __init__(self, config_path: Path, auth_max_attempts: int = 5, auth_lockout_minutes: int = 15) -> None:
|
||||
self.config_path = Path(config_path)
|
||||
self.auth_max_attempts = auth_max_attempts
|
||||
self.auth_lockout_window = timedelta(minutes=auth_lockout_minutes)
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if not self.config_path.exists():
|
||||
self._write_default()
|
||||
self._users: Dict[str, Dict[str, Any]] = {}
|
||||
self._raw_config: Dict[str, Any] = {}
|
||||
self._failed_attempts: Dict[str, Deque[datetime]] = {}
|
||||
self._last_load_time = 0.0
|
||||
self._principal_cache: Dict[str, Tuple[Principal, float]] = {}
|
||||
self._secret_key_cache: Dict[str, Tuple[str, float]] = {}
|
||||
self._cache_ttl = float(os.environ.get("IAM_CACHE_TTL_SECONDS", "5.0"))
|
||||
self._last_stat_check = 0.0
|
||||
self._stat_check_interval = 1.0
|
||||
self._sessions: Dict[str, Dict[str, Any]] = {}
|
||||
self._session_lock = threading.Lock()
|
||||
self._load()
|
||||
self._load_lockout_state()
|
||||
|
||||
def _maybe_reload(self) -> None:
|
||||
"""Reload configuration if the file has changed on disk."""
|
||||
now = time.time()
|
||||
if now - self._last_stat_check < self._stat_check_interval:
|
||||
return
|
||||
self._last_stat_check = now
|
||||
try:
|
||||
if self.config_path.stat().st_mtime > self._last_load_time:
|
||||
self._load()
|
||||
self._principal_cache.clear()
|
||||
self._secret_key_cache.clear()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def authenticate(self, access_key: str, secret_key: str) -> Principal:
|
||||
self._maybe_reload()
|
||||
access_key = (access_key or "").strip()
|
||||
secret_key = (secret_key or "").strip()
|
||||
if not access_key or not secret_key:
|
||||
raise IamError("Missing access credentials")
|
||||
if self._is_locked_out(access_key):
|
||||
seconds = self._seconds_until_unlock(access_key)
|
||||
raise IamError(
|
||||
f"Access temporarily locked. Try again in {seconds} seconds."
|
||||
)
|
||||
record = self._users.get(access_key)
|
||||
stored_secret = record["secret_key"] if record else secrets.token_urlsafe(24)
|
||||
if not record or not hmac.compare_digest(stored_secret, secret_key):
|
||||
self._record_failed_attempt(access_key)
|
||||
raise IamError("Invalid credentials")
|
||||
self._clear_failed_attempts(access_key)
|
||||
return self._build_principal(access_key, record)
|
||||
|
||||
def _record_failed_attempt(self, access_key: str) -> None:
|
||||
if not access_key:
|
||||
return
|
||||
attempts = self._failed_attempts.setdefault(access_key, deque())
|
||||
self._prune_attempts(attempts)
|
||||
attempts.append(datetime.now(timezone.utc))
|
||||
self._save_lockout_state()
|
||||
|
||||
def _clear_failed_attempts(self, access_key: str) -> None:
|
||||
if not access_key:
|
||||
return
|
||||
if self._failed_attempts.pop(access_key, None) is not None:
|
||||
self._save_lockout_state()
|
||||
|
||||
def _lockout_file(self) -> Path:
|
||||
return self.config_path.parent / "lockout_state.json"
|
||||
|
||||
def _load_lockout_state(self) -> None:
|
||||
"""Load lockout state from disk."""
|
||||
try:
|
||||
if self._lockout_file().exists():
|
||||
data = json.loads(self._lockout_file().read_text(encoding="utf-8"))
|
||||
cutoff = datetime.now(timezone.utc) - self.auth_lockout_window
|
||||
for key, timestamps in data.get("failed_attempts", {}).items():
|
||||
valid = []
|
||||
for ts in timestamps:
|
||||
try:
|
||||
dt = datetime.fromisoformat(ts)
|
||||
if dt > cutoff:
|
||||
valid.append(dt)
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
if valid:
|
||||
self._failed_attempts[key] = deque(valid)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
def _save_lockout_state(self) -> None:
|
||||
"""Persist lockout state to disk."""
|
||||
data: Dict[str, Any] = {"failed_attempts": {}}
|
||||
for key, attempts in self._failed_attempts.items():
|
||||
data["failed_attempts"][key] = [ts.isoformat() for ts in attempts]
|
||||
try:
|
||||
self._lockout_file().write_text(json.dumps(data), encoding="utf-8")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def _prune_attempts(self, attempts: Deque[datetime]) -> None:
|
||||
cutoff = datetime.now(timezone.utc) - self.auth_lockout_window
|
||||
while attempts and attempts[0] < cutoff:
|
||||
attempts.popleft()
|
||||
|
||||
def _is_locked_out(self, access_key: str) -> bool:
|
||||
if not access_key:
|
||||
return False
|
||||
attempts = self._failed_attempts.get(access_key)
|
||||
if not attempts:
|
||||
return False
|
||||
self._prune_attempts(attempts)
|
||||
return len(attempts) >= self.auth_max_attempts
|
||||
|
||||
def _seconds_until_unlock(self, access_key: str) -> int:
|
||||
attempts = self._failed_attempts.get(access_key)
|
||||
if not attempts:
|
||||
return 0
|
||||
self._prune_attempts(attempts)
|
||||
if len(attempts) < self.auth_max_attempts:
|
||||
return 0
|
||||
oldest = attempts[0]
|
||||
elapsed = (datetime.now(timezone.utc) - oldest).total_seconds()
|
||||
return int(max(0, self.auth_lockout_window.total_seconds() - elapsed))
|
||||
|
||||
def create_session_token(self, access_key: str, duration_seconds: int = 3600) -> str:
|
||||
"""Create a temporary session token for an access key."""
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if not record:
|
||||
raise IamError("Unknown access key")
|
||||
self._cleanup_expired_sessions()
|
||||
token = secrets.token_urlsafe(32)
|
||||
expires_at = time.time() + duration_seconds
|
||||
self._sessions[token] = {
|
||||
"access_key": access_key,
|
||||
"expires_at": expires_at,
|
||||
}
|
||||
return token
|
||||
|
||||
def validate_session_token(self, access_key: str, session_token: str) -> bool:
|
||||
"""Validate a session token for an access key (thread-safe, constant-time)."""
|
||||
dummy_key = secrets.token_urlsafe(16)
|
||||
dummy_token = secrets.token_urlsafe(32)
|
||||
with self._session_lock:
|
||||
session = self._sessions.get(session_token)
|
||||
if not session:
|
||||
hmac.compare_digest(access_key, dummy_key)
|
||||
hmac.compare_digest(session_token, dummy_token)
|
||||
return False
|
||||
key_match = hmac.compare_digest(session["access_key"], access_key)
|
||||
if not key_match:
|
||||
hmac.compare_digest(session_token, dummy_token)
|
||||
return False
|
||||
if time.time() > session["expires_at"]:
|
||||
self._sessions.pop(session_token, None)
|
||||
return False
|
||||
return True
|
||||
|
||||
def _cleanup_expired_sessions(self) -> None:
|
||||
"""Remove expired session tokens."""
|
||||
now = time.time()
|
||||
expired = [token for token, data in self._sessions.items() if now > data["expires_at"]]
|
||||
for token in expired:
|
||||
del self._sessions[token]
|
||||
|
||||
def principal_for_key(self, access_key: str) -> Principal:
|
||||
now = time.time()
|
||||
cached = self._principal_cache.get(access_key)
|
||||
if cached:
|
||||
principal, cached_time = cached
|
||||
if now - cached_time < self._cache_ttl:
|
||||
return principal
|
||||
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if not record:
|
||||
raise IamError("Unknown access key")
|
||||
principal = self._build_principal(access_key, record)
|
||||
self._principal_cache[access_key] = (principal, now)
|
||||
return principal
|
||||
|
||||
def secret_for_key(self, access_key: str) -> str:
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if not record:
|
||||
raise IamError("Unknown access key")
|
||||
return record["secret_key"]
|
||||
|
||||
def authorize(self, principal: Principal, bucket_name: str | None, action: str) -> None:
|
||||
action = self._normalize_action(action)
|
||||
if action not in ALLOWED_ACTIONS:
|
||||
raise IamError(f"Unknown action '{action}'")
|
||||
bucket_name = bucket_name or "*"
|
||||
normalized = bucket_name.lower() if bucket_name != "*" else bucket_name
|
||||
if not self._is_allowed(principal, normalized, action):
|
||||
raise IamError(f"Access denied for action '{action}' on bucket '{bucket_name}'")
|
||||
|
||||
def check_permissions(self, principal: Principal, bucket_name: str | None, actions: Iterable[str]) -> Dict[str, bool]:
|
||||
self._maybe_reload()
|
||||
bucket_name = (bucket_name or "*").lower() if bucket_name != "*" else (bucket_name or "*")
|
||||
normalized_actions = {a: self._normalize_action(a) for a in actions}
|
||||
results: Dict[str, bool] = {}
|
||||
for original, canonical in normalized_actions.items():
|
||||
if canonical not in ALLOWED_ACTIONS:
|
||||
results[original] = False
|
||||
else:
|
||||
results[original] = self._is_allowed(principal, bucket_name, canonical)
|
||||
return results
|
||||
|
||||
def buckets_for_principal(self, principal: Principal, buckets: Iterable[str]) -> List[str]:
|
||||
return [bucket for bucket in buckets if self._is_allowed(principal, bucket, "list")]
|
||||
|
||||
def _is_allowed(self, principal: Principal, bucket_name: str, action: str) -> bool:
|
||||
bucket_name = bucket_name.lower()
|
||||
for policy in principal.policies:
|
||||
if policy.bucket not in {"*", bucket_name}:
|
||||
continue
|
||||
if "*" in policy.actions or action in policy.actions:
|
||||
return True
|
||||
if "iam:*" in policy.actions and action.startswith("iam:"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def list_users(self) -> List[Dict[str, Any]]:
|
||||
listing: List[Dict[str, Any]] = []
|
||||
for access_key, record in self._users.items():
|
||||
listing.append(
|
||||
{
|
||||
"access_key": access_key,
|
||||
"display_name": record["display_name"],
|
||||
"policies": [
|
||||
{"bucket": policy.bucket, "actions": sorted(policy.actions)}
|
||||
for policy in record["policies"]
|
||||
],
|
||||
}
|
||||
)
|
||||
return listing
|
||||
|
||||
def create_user(
|
||||
self,
|
||||
*,
|
||||
display_name: str,
|
||||
policies: Optional[Sequence[Dict[str, Any]]] = None,
|
||||
access_key: str | None = None,
|
||||
secret_key: str | None = None,
|
||||
) -> Dict[str, str]:
|
||||
access_key = (access_key or self._generate_access_key()).strip()
|
||||
if not access_key:
|
||||
raise IamError("Access key cannot be empty")
|
||||
if access_key in self._users:
|
||||
raise IamError("Access key already exists")
|
||||
secret_key = secret_key or self._generate_secret_key()
|
||||
sanitized_policies = self._prepare_policy_payload(policies)
|
||||
record = {
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"display_name": display_name or access_key,
|
||||
"policies": sanitized_policies,
|
||||
}
|
||||
self._raw_config.setdefault("users", []).append(record)
|
||||
self._save()
|
||||
self._load()
|
||||
return {"access_key": access_key, "secret_key": secret_key}
|
||||
|
||||
def rotate_secret(self, access_key: str) -> str:
|
||||
user = self._get_raw_user(access_key)
|
||||
new_secret = self._generate_secret_key()
|
||||
user["secret_key"] = new_secret
|
||||
self._save()
|
||||
self._principal_cache.pop(access_key, None)
|
||||
self._secret_key_cache.pop(access_key, None)
|
||||
from .s3_api import clear_signing_key_cache
|
||||
clear_signing_key_cache()
|
||||
self._load()
|
||||
return new_secret
|
||||
|
||||
def update_user(self, access_key: str, display_name: str) -> None:
|
||||
user = self._get_raw_user(access_key)
|
||||
user["display_name"] = display_name
|
||||
self._save()
|
||||
self._load()
|
||||
|
||||
def delete_user(self, access_key: str) -> None:
|
||||
users = self._raw_config.get("users", [])
|
||||
if len(users) <= 1:
|
||||
raise IamError("Cannot delete the only user")
|
||||
remaining = [user for user in users if user["access_key"] != access_key]
|
||||
if len(remaining) == len(users):
|
||||
raise IamError("User not found")
|
||||
self._raw_config["users"] = remaining
|
||||
self._save()
|
||||
self._principal_cache.pop(access_key, None)
|
||||
self._secret_key_cache.pop(access_key, None)
|
||||
from .s3_api import clear_signing_key_cache
|
||||
clear_signing_key_cache()
|
||||
self._load()
|
||||
|
||||
def update_user_policies(self, access_key: str, policies: Sequence[Dict[str, Any]]) -> None:
|
||||
user = self._get_raw_user(access_key)
|
||||
user["policies"] = self._prepare_policy_payload(policies)
|
||||
self._save()
|
||||
self._load()
|
||||
|
||||
def _load(self) -> None:
|
||||
try:
|
||||
self._last_load_time = self.config_path.stat().st_mtime
|
||||
content = self.config_path.read_text(encoding='utf-8')
|
||||
raw = json.loads(content)
|
||||
except FileNotFoundError:
|
||||
raise IamError(f"IAM config not found: {self.config_path}")
|
||||
except json.JSONDecodeError as e:
|
||||
raise IamError(f"Corrupted IAM config (invalid JSON): {e}")
|
||||
except PermissionError as e:
|
||||
raise IamError(f"Cannot read IAM config (permission denied): {e}")
|
||||
except (OSError, ValueError) as e:
|
||||
raise IamError(f"Failed to load IAM config: {e}")
|
||||
|
||||
users: Dict[str, Dict[str, Any]] = {}
|
||||
for user in raw.get("users", []):
|
||||
policies = self._build_policy_objects(user.get("policies", []))
|
||||
users[user["access_key"]] = {
|
||||
"secret_key": user["secret_key"],
|
||||
"display_name": user.get("display_name", user["access_key"]),
|
||||
"policies": policies,
|
||||
}
|
||||
if not users:
|
||||
raise IamError("IAM configuration contains no users")
|
||||
self._users = users
|
||||
self._raw_config = {
|
||||
"users": [
|
||||
{
|
||||
"access_key": entry["access_key"],
|
||||
"secret_key": entry["secret_key"],
|
||||
"display_name": entry.get("display_name", entry["access_key"]),
|
||||
"policies": entry.get("policies", []),
|
||||
}
|
||||
for entry in raw.get("users", [])
|
||||
]
|
||||
}
|
||||
|
||||
def _save(self) -> None:
|
||||
try:
|
||||
temp_path = self.config_path.with_suffix('.json.tmp')
|
||||
temp_path.write_text(json.dumps(self._raw_config, indent=2), encoding='utf-8')
|
||||
temp_path.replace(self.config_path)
|
||||
except (OSError, PermissionError) as e:
|
||||
raise IamError(f"Cannot save IAM config: {e}")
|
||||
|
||||
def config_summary(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"path": str(self.config_path),
|
||||
"user_count": len(self._users),
|
||||
"allowed_actions": sorted(ALLOWED_ACTIONS),
|
||||
}
|
||||
|
||||
def export_config(self, mask_secrets: bool = True) -> Dict[str, Any]:
|
||||
payload: Dict[str, Any] = {"users": []}
|
||||
for user in self._raw_config.get("users", []):
|
||||
record = dict(user)
|
||||
if mask_secrets and "secret_key" in record:
|
||||
record["secret_key"] = "••••••••••"
|
||||
payload["users"].append(record)
|
||||
return payload
|
||||
|
||||
def _build_policy_objects(self, policies: Sequence[Dict[str, Any]]) -> List[Policy]:
|
||||
entries: List[Policy] = []
|
||||
for policy in policies:
|
||||
bucket = str(policy.get("bucket", "*")).lower()
|
||||
raw_actions = policy.get("actions", [])
|
||||
if isinstance(raw_actions, str):
|
||||
raw_actions = [raw_actions]
|
||||
action_set: Set[str] = set()
|
||||
for action in raw_actions:
|
||||
canonical = self._normalize_action(action)
|
||||
if canonical == "*":
|
||||
action_set = set(ALLOWED_ACTIONS)
|
||||
break
|
||||
if canonical:
|
||||
action_set.add(canonical)
|
||||
if action_set:
|
||||
entries.append(Policy(bucket=bucket, actions=action_set))
|
||||
return entries
|
||||
|
||||
def _prepare_policy_payload(self, policies: Optional[Sequence[Dict[str, Any]]]) -> List[Dict[str, Any]]:
|
||||
if not policies:
|
||||
policies = (
|
||||
{
|
||||
"bucket": "*",
|
||||
"actions": ["list", "read", "write", "delete", "share", "policy"],
|
||||
},
|
||||
)
|
||||
sanitized: List[Dict[str, Any]] = []
|
||||
for policy in policies:
|
||||
bucket = str(policy.get("bucket", "*")).lower()
|
||||
raw_actions = policy.get("actions", [])
|
||||
if isinstance(raw_actions, str):
|
||||
raw_actions = [raw_actions]
|
||||
action_set: Set[str] = set()
|
||||
for action in raw_actions:
|
||||
canonical = self._normalize_action(action)
|
||||
if canonical == "*":
|
||||
action_set = set(ALLOWED_ACTIONS)
|
||||
break
|
||||
if canonical:
|
||||
action_set.add(canonical)
|
||||
if not action_set:
|
||||
continue
|
||||
sanitized.append({"bucket": bucket, "actions": sorted(action_set)})
|
||||
if not sanitized:
|
||||
raise IamError("At least one policy with valid actions is required")
|
||||
return sanitized
|
||||
|
||||
def _build_principal(self, access_key: str, record: Dict[str, Any]) -> Principal:
|
||||
return Principal(
|
||||
access_key=access_key,
|
||||
display_name=record["display_name"],
|
||||
policies=record["policies"],
|
||||
)
|
||||
|
||||
def _normalize_action(self, action: str) -> str:
|
||||
if not action:
|
||||
return ""
|
||||
lowered = action.strip().lower()
|
||||
if lowered == "*":
|
||||
return "*"
|
||||
candidate = ACTION_ALIASES.get(lowered, lowered)
|
||||
return candidate if candidate in ALLOWED_ACTIONS else ""
|
||||
|
||||
def _write_default(self) -> None:
|
||||
access_key = secrets.token_hex(12)
|
||||
secret_key = secrets.token_urlsafe(32)
|
||||
default = {
|
||||
"users": [
|
||||
{
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"display_name": "Local Admin",
|
||||
"policies": [
|
||||
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
self.config_path.write_text(json.dumps(default, indent=2))
|
||||
print(f"\n{'='*60}")
|
||||
print("MYFSIO FIRST RUN - ADMIN CREDENTIALS GENERATED")
|
||||
print(f"{'='*60}")
|
||||
print(f"Access Key: {access_key}")
|
||||
print(f"Secret Key: {secret_key}")
|
||||
print(f"{'='*60}")
|
||||
print(f"Missed this? Check: {self.config_path}")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
def _generate_access_key(self) -> str:
|
||||
return secrets.token_hex(8)
|
||||
|
||||
def _generate_secret_key(self) -> str:
|
||||
return secrets.token_urlsafe(24)
|
||||
|
||||
def _get_raw_user(self, access_key: str) -> Dict[str, Any]:
|
||||
for user in self._raw_config.get("users", []):
|
||||
if user["access_key"] == access_key:
|
||||
return user
|
||||
raise IamError("User not found")
|
||||
|
||||
def get_secret_key(self, access_key: str) -> str | None:
|
||||
now = time.time()
|
||||
cached = self._secret_key_cache.get(access_key)
|
||||
if cached:
|
||||
secret_key, cached_time = cached
|
||||
if now - cached_time < self._cache_ttl:
|
||||
return secret_key
|
||||
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if record:
|
||||
secret_key = record["secret_key"]
|
||||
self._secret_key_cache[access_key] = (secret_key, now)
|
||||
return secret_key
|
||||
return None
|
||||
|
||||
def get_principal(self, access_key: str) -> Principal | None:
|
||||
now = time.time()
|
||||
cached = self._principal_cache.get(access_key)
|
||||
if cached:
|
||||
principal, cached_time = cached
|
||||
if now - cached_time < self._cache_ttl:
|
||||
return principal
|
||||
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if record:
|
||||
principal = self._build_principal(access_key, record)
|
||||
self._principal_cache[access_key] = (principal, now)
|
||||
return principal
|
||||
return None
|
||||
@@ -15,23 +15,29 @@ from typing import Any, Dict, List, Optional
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
from urllib3.util.connection import create_connection as _urllib3_create_connection
|
||||
|
||||
|
||||
def _resolve_and_check_url(url: str, allow_internal: bool = False) -> Optional[str]:
|
||||
def _is_safe_url(url: str, allow_internal: bool = False) -> bool:
|
||||
"""Check if a URL is safe to make requests to (not internal/private).
|
||||
|
||||
Args:
|
||||
url: The URL to check.
|
||||
allow_internal: If True, allows internal/private IP addresses.
|
||||
Use for self-hosted deployments on internal networks.
|
||||
"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
hostname = parsed.hostname
|
||||
if not hostname:
|
||||
return None
|
||||
return False
|
||||
cloud_metadata_hosts = {
|
||||
"metadata.google.internal",
|
||||
"169.254.169.254",
|
||||
}
|
||||
if hostname.lower() in cloud_metadata_hosts:
|
||||
return None
|
||||
return False
|
||||
if allow_internal:
|
||||
return hostname
|
||||
return True
|
||||
blocked_hosts = {
|
||||
"localhost",
|
||||
"127.0.0.1",
|
||||
@@ -40,46 +46,17 @@ def _resolve_and_check_url(url: str, allow_internal: bool = False) -> Optional[s
|
||||
"[::1]",
|
||||
}
|
||||
if hostname.lower() in blocked_hosts:
|
||||
return None
|
||||
return False
|
||||
try:
|
||||
resolved_ip = socket.gethostbyname(hostname)
|
||||
ip = ipaddress.ip_address(resolved_ip)
|
||||
if ip.is_private or ip.is_loopback or ip.is_link_local or ip.is_reserved:
|
||||
return None
|
||||
return resolved_ip
|
||||
return False
|
||||
except (socket.gaierror, ValueError):
|
||||
return None
|
||||
return False
|
||||
return True
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _is_safe_url(url: str, allow_internal: bool = False) -> bool:
|
||||
return _resolve_and_check_url(url, allow_internal) is not None
|
||||
|
||||
|
||||
_dns_pin_lock = threading.Lock()
|
||||
|
||||
|
||||
def _pinned_post(url: str, pinned_ip: str, **kwargs: Any) -> requests.Response:
|
||||
parsed = urlparse(url)
|
||||
hostname = parsed.hostname or ""
|
||||
session = requests.Session()
|
||||
original_create = _urllib3_create_connection
|
||||
|
||||
def _create_pinned(address: Any, *args: Any, **kw: Any) -> Any:
|
||||
host, req_port = address
|
||||
if host == hostname:
|
||||
return original_create((pinned_ip, req_port), *args, **kw)
|
||||
return original_create(address, *args, **kw)
|
||||
|
||||
import urllib3.util.connection as _conn_mod
|
||||
with _dns_pin_lock:
|
||||
_conn_mod.create_connection = _create_pinned
|
||||
try:
|
||||
return session.post(url, **kwargs)
|
||||
finally:
|
||||
_conn_mod.create_connection = original_create
|
||||
|
||||
return False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -367,18 +344,16 @@ class NotificationService:
|
||||
self._queue.task_done()
|
||||
|
||||
def _send_notification(self, event: NotificationEvent, destination: WebhookDestination) -> None:
|
||||
resolved_ip = _resolve_and_check_url(destination.url, allow_internal=self._allow_internal_endpoints)
|
||||
if not resolved_ip:
|
||||
raise RuntimeError(f"Blocked request (SSRF protection): {destination.url}")
|
||||
if not _is_safe_url(destination.url, allow_internal=self._allow_internal_endpoints):
|
||||
raise RuntimeError(f"Blocked request to cloud metadata service (SSRF protection): {destination.url}")
|
||||
payload = event.to_s3_event()
|
||||
headers = {"Content-Type": "application/json", **destination.headers}
|
||||
|
||||
last_error = None
|
||||
for attempt in range(destination.retry_count):
|
||||
try:
|
||||
response = _pinned_post(
|
||||
response = requests.post(
|
||||
destination.url,
|
||||
resolved_ip,
|
||||
json=payload,
|
||||
headers=headers,
|
||||
timeout=destination.timeout_seconds,
|
||||
@@ -2,17 +2,13 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import threading
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
MAX_LATENCY_SAMPLES = 5000
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -26,17 +22,6 @@ class OperationStats:
|
||||
latency_max_ms: float = 0.0
|
||||
bytes_in: int = 0
|
||||
bytes_out: int = 0
|
||||
latency_samples: List[float] = field(default_factory=list)
|
||||
|
||||
@staticmethod
|
||||
def _compute_percentile(sorted_data: List[float], p: float) -> float:
|
||||
if not sorted_data:
|
||||
return 0.0
|
||||
k = (len(sorted_data) - 1) * (p / 100.0)
|
||||
f = int(k)
|
||||
c = min(f + 1, len(sorted_data) - 1)
|
||||
d = k - f
|
||||
return sorted_data[f] + d * (sorted_data[c] - sorted_data[f])
|
||||
|
||||
def record(self, latency_ms: float, success: bool, bytes_in: int = 0, bytes_out: int = 0) -> None:
|
||||
self.count += 1
|
||||
@@ -51,17 +36,10 @@ class OperationStats:
|
||||
self.latency_max_ms = latency_ms
|
||||
self.bytes_in += bytes_in
|
||||
self.bytes_out += bytes_out
|
||||
if len(self.latency_samples) < MAX_LATENCY_SAMPLES:
|
||||
self.latency_samples.append(latency_ms)
|
||||
else:
|
||||
j = random.randint(0, self.count - 1)
|
||||
if j < MAX_LATENCY_SAMPLES:
|
||||
self.latency_samples[j] = latency_ms
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
avg_latency = self.latency_sum_ms / self.count if self.count > 0 else 0.0
|
||||
min_latency = self.latency_min_ms if self.latency_min_ms != float("inf") else 0.0
|
||||
sorted_latencies = sorted(self.latency_samples)
|
||||
return {
|
||||
"count": self.count,
|
||||
"success_count": self.success_count,
|
||||
@@ -69,9 +47,6 @@ class OperationStats:
|
||||
"latency_avg_ms": round(avg_latency, 2),
|
||||
"latency_min_ms": round(min_latency, 2),
|
||||
"latency_max_ms": round(self.latency_max_ms, 2),
|
||||
"latency_p50_ms": round(self._compute_percentile(sorted_latencies, 50), 2),
|
||||
"latency_p95_ms": round(self._compute_percentile(sorted_latencies, 95), 2),
|
||||
"latency_p99_ms": round(self._compute_percentile(sorted_latencies, 99), 2),
|
||||
"bytes_in": self.bytes_in,
|
||||
"bytes_out": self.bytes_out,
|
||||
}
|
||||
@@ -87,11 +62,6 @@ class OperationStats:
|
||||
self.latency_max_ms = other.latency_max_ms
|
||||
self.bytes_in += other.bytes_in
|
||||
self.bytes_out += other.bytes_out
|
||||
combined = self.latency_samples + other.latency_samples
|
||||
if len(combined) > MAX_LATENCY_SAMPLES:
|
||||
random.shuffle(combined)
|
||||
combined = combined[:MAX_LATENCY_SAMPLES]
|
||||
self.latency_samples = combined
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -139,8 +109,8 @@ class OperationMetricsCollector:
|
||||
self.interval_seconds = interval_minutes * 60
|
||||
self.retention_hours = retention_hours
|
||||
self._lock = threading.Lock()
|
||||
self._by_method: Dict[str, OperationStats] = defaultdict(OperationStats)
|
||||
self._by_endpoint: Dict[str, OperationStats] = defaultdict(OperationStats)
|
||||
self._by_method: Dict[str, OperationStats] = {}
|
||||
self._by_endpoint: Dict[str, OperationStats] = {}
|
||||
self._by_status_class: Dict[str, int] = {}
|
||||
self._error_codes: Dict[str, int] = {}
|
||||
self._totals = OperationStats()
|
||||
@@ -212,8 +182,8 @@ class OperationMetricsCollector:
|
||||
self._prune_old_snapshots()
|
||||
self._save_history()
|
||||
|
||||
self._by_method = defaultdict(OperationStats)
|
||||
self._by_endpoint = defaultdict(OperationStats)
|
||||
self._by_method.clear()
|
||||
self._by_endpoint.clear()
|
||||
self._by_status_class.clear()
|
||||
self._error_codes.clear()
|
||||
self._totals = OperationStats()
|
||||
@@ -233,7 +203,12 @@ class OperationMetricsCollector:
|
||||
status_class = f"{status_code // 100}xx"
|
||||
|
||||
with self._lock:
|
||||
if method not in self._by_method:
|
||||
self._by_method[method] = OperationStats()
|
||||
self._by_method[method].record(latency_ms, success, bytes_in, bytes_out)
|
||||
|
||||
if endpoint_type not in self._by_endpoint:
|
||||
self._by_endpoint[endpoint_type] = OperationStats()
|
||||
self._by_endpoint[endpoint_type].record(latency_ms, success, bytes_in, bytes_out)
|
||||
|
||||
self._by_status_class[status_class] = self._by_status_class.get(status_class, 0) + 1
|
||||
File diff suppressed because it is too large
Load Diff
@@ -245,7 +245,6 @@ def stream_objects_ndjson(
|
||||
url_templates: dict[str, str],
|
||||
display_tz: str = "UTC",
|
||||
versioning_enabled: bool = False,
|
||||
delimiter: Optional[str] = None,
|
||||
) -> Generator[str, None, None]:
|
||||
meta_line = json.dumps({
|
||||
"type": "meta",
|
||||
@@ -259,20 +258,11 @@ def stream_objects_ndjson(
|
||||
kwargs: dict[str, Any] = {"Bucket": bucket_name, "MaxKeys": 1000}
|
||||
if prefix:
|
||||
kwargs["Prefix"] = prefix
|
||||
if delimiter:
|
||||
kwargs["Delimiter"] = delimiter
|
||||
|
||||
running_count = 0
|
||||
try:
|
||||
paginator = client.get_paginator("list_objects_v2")
|
||||
for page in paginator.paginate(**kwargs):
|
||||
for cp in page.get("CommonPrefixes", []):
|
||||
yield json.dumps({
|
||||
"type": "folder",
|
||||
"prefix": cp["Prefix"],
|
||||
}) + "\n"
|
||||
page_contents = page.get("Contents", [])
|
||||
for obj in page_contents:
|
||||
for obj in page.get("Contents", []):
|
||||
last_mod = obj["LastModified"]
|
||||
yield json.dumps({
|
||||
"type": "object",
|
||||
@@ -283,8 +273,6 @@ def stream_objects_ndjson(
|
||||
"last_modified_iso": format_datetime_iso(last_mod, display_tz),
|
||||
"etag": obj.get("ETag", "").strip('"'),
|
||||
}) + "\n"
|
||||
running_count += len(page_contents)
|
||||
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
|
||||
except ClientError as exc:
|
||||
error_msg = exc.response.get("Error", {}).get("Message", "S3 operation failed")
|
||||
yield json.dumps({"type": "error", "error": error_msg}) + "\n"
|
||||
File diff suppressed because it is too large
Load Diff
@@ -51,7 +51,6 @@ from .s3_client import (
|
||||
from .secret_store import EphemeralSecretStore
|
||||
from .site_registry import SiteRegistry, SiteInfo, PeerSite
|
||||
from .storage import ObjectStorage, StorageError
|
||||
from .website_domains import normalize_domain, is_valid_domain
|
||||
|
||||
ui_bp = Blueprint("ui", __name__, template_folder="../templates", url_prefix="/ui")
|
||||
|
||||
@@ -225,10 +224,10 @@ def _policy_allows_public_read(policy: dict[str, Any]) -> bool:
|
||||
|
||||
def _bucket_access_descriptor(policy: dict[str, Any] | None) -> tuple[str, str]:
|
||||
if not policy:
|
||||
return ("IAM only", "bg-secondary-subtle text-secondary-emphasis")
|
||||
return ("IAM only", "text-bg-secondary")
|
||||
if _policy_allows_public_read(policy):
|
||||
return ("Public read", "bg-warning-subtle text-warning-emphasis")
|
||||
return ("Custom policy", "bg-info-subtle text-info-emphasis")
|
||||
return ("Public read", "text-bg-warning")
|
||||
return ("Custom policy", "text-bg-info")
|
||||
|
||||
|
||||
def _current_principal():
|
||||
@@ -508,15 +507,11 @@ def bucket_detail(bucket_name: str):
|
||||
can_manage_quota = is_replication_admin
|
||||
|
||||
website_config = None
|
||||
website_domains = []
|
||||
if website_hosting_enabled:
|
||||
try:
|
||||
website_config = storage.get_bucket_website(bucket_name)
|
||||
except StorageError:
|
||||
website_config = None
|
||||
domain_store = current_app.extensions.get("website_domains")
|
||||
if domain_store:
|
||||
website_domains = domain_store.get_domains_for_bucket(bucket_name)
|
||||
|
||||
objects_api_url = url_for("ui.list_bucket_objects", bucket_name=bucket_name)
|
||||
objects_stream_url = url_for("ui.stream_bucket_objects", bucket_name=bucket_name)
|
||||
@@ -562,7 +557,6 @@ def bucket_detail(bucket_name: str):
|
||||
site_sync_enabled=site_sync_enabled,
|
||||
website_hosting_enabled=website_hosting_enabled,
|
||||
website_config=website_config,
|
||||
website_domains=website_domains,
|
||||
can_manage_website=can_edit_policy,
|
||||
)
|
||||
|
||||
@@ -616,79 +610,20 @@ def stream_bucket_objects(bucket_name: str):
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
|
||||
prefix = request.args.get("prefix") or None
|
||||
delimiter = request.args.get("delimiter") or None
|
||||
|
||||
storage = _storage()
|
||||
try:
|
||||
versioning_enabled = storage.is_versioning_enabled(bucket_name)
|
||||
except StorageError:
|
||||
versioning_enabled = False
|
||||
client = get_session_s3_client()
|
||||
except (PermissionError, RuntimeError) as exc:
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
|
||||
versioning_enabled = get_versioning_via_s3(client, bucket_name)
|
||||
url_templates = build_url_templates(bucket_name)
|
||||
display_tz = current_app.config.get("DISPLAY_TIMEZONE", "UTC")
|
||||
|
||||
def generate():
|
||||
yield json.dumps({
|
||||
"type": "meta",
|
||||
"versioning_enabled": versioning_enabled,
|
||||
"url_templates": url_templates,
|
||||
}) + "\n"
|
||||
yield json.dumps({"type": "count", "total_count": 0}) + "\n"
|
||||
|
||||
running_count = 0
|
||||
try:
|
||||
if delimiter:
|
||||
for item_type, item in storage.iter_objects_shallow(
|
||||
bucket_name, prefix=prefix or "", delimiter=delimiter,
|
||||
):
|
||||
if item_type == "folder":
|
||||
yield json.dumps({"type": "folder", "prefix": item}) + "\n"
|
||||
else:
|
||||
last_mod = item.last_modified
|
||||
yield json.dumps({
|
||||
"type": "object",
|
||||
"key": item.key,
|
||||
"size": item.size,
|
||||
"last_modified": last_mod.isoformat(),
|
||||
"last_modified_display": _format_datetime_display(last_mod, display_tz),
|
||||
"last_modified_iso": _format_datetime_iso(last_mod, display_tz),
|
||||
"etag": item.etag or "",
|
||||
}) + "\n"
|
||||
running_count += 1
|
||||
if running_count % 1000 == 0:
|
||||
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
|
||||
else:
|
||||
continuation_token = None
|
||||
while True:
|
||||
result = storage.list_objects(
|
||||
bucket_name,
|
||||
max_keys=1000,
|
||||
continuation_token=continuation_token,
|
||||
prefix=prefix,
|
||||
)
|
||||
for obj in result.objects:
|
||||
last_mod = obj.last_modified
|
||||
yield json.dumps({
|
||||
"type": "object",
|
||||
"key": obj.key,
|
||||
"size": obj.size,
|
||||
"last_modified": last_mod.isoformat(),
|
||||
"last_modified_display": _format_datetime_display(last_mod, display_tz),
|
||||
"last_modified_iso": _format_datetime_iso(last_mod, display_tz),
|
||||
"etag": obj.etag or "",
|
||||
}) + "\n"
|
||||
running_count += len(result.objects)
|
||||
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
|
||||
if not result.is_truncated:
|
||||
break
|
||||
continuation_token = result.next_continuation_token
|
||||
except StorageError as exc:
|
||||
yield json.dumps({"type": "error", "error": str(exc)}) + "\n"
|
||||
return
|
||||
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
|
||||
yield json.dumps({"type": "done"}) + "\n"
|
||||
|
||||
return Response(
|
||||
generate(),
|
||||
stream_objects_ndjson(
|
||||
client, bucket_name, prefix, url_templates, display_tz, versioning_enabled,
|
||||
),
|
||||
mimetype='application/x-ndjson',
|
||||
headers={
|
||||
'Cache-Control': 'no-cache',
|
||||
@@ -698,33 +633,6 @@ def stream_bucket_objects(bucket_name: str):
|
||||
)
|
||||
|
||||
|
||||
@ui_bp.get("/buckets/<bucket_name>/objects/search")
|
||||
@limiter.limit("30 per minute")
|
||||
def search_bucket_objects(bucket_name: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "list")
|
||||
except IamError as exc:
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
|
||||
query = request.args.get("q", "").strip()
|
||||
if not query:
|
||||
return jsonify({"results": [], "truncated": False})
|
||||
|
||||
try:
|
||||
limit = max(1, min(int(request.args.get("limit", 500)), 1000))
|
||||
except (ValueError, TypeError):
|
||||
limit = 500
|
||||
|
||||
prefix = request.args.get("prefix", "").strip()
|
||||
|
||||
storage = _storage()
|
||||
try:
|
||||
return jsonify(storage.search_objects(bucket_name, query, prefix=prefix, limit=limit))
|
||||
except StorageError as exc:
|
||||
return jsonify({"error": str(exc)}), 404
|
||||
|
||||
|
||||
@ui_bp.post("/buckets/<bucket_name>/upload")
|
||||
@limiter.limit("30 per minute")
|
||||
def upload_object(bucket_name: str):
|
||||
@@ -829,6 +737,7 @@ def initiate_multipart_upload(bucket_name: str):
|
||||
|
||||
|
||||
@ui_bp.put("/buckets/<bucket_name>/multipart/<upload_id>/parts")
|
||||
@limiter.exempt
|
||||
@csrf.exempt
|
||||
def upload_multipart_part(bucket_name: str, upload_id: str):
|
||||
principal = _current_principal()
|
||||
@@ -1063,27 +972,6 @@ def bulk_delete_objects(bucket_name: str):
|
||||
return _respond(False, f"A maximum of {MAX_KEYS} objects can be deleted per request", status_code=400)
|
||||
|
||||
unique_keys = list(dict.fromkeys(cleaned))
|
||||
|
||||
folder_prefixes = [k for k in unique_keys if k.endswith("/")]
|
||||
if folder_prefixes:
|
||||
try:
|
||||
client = get_session_s3_client()
|
||||
for prefix in folder_prefixes:
|
||||
unique_keys.remove(prefix)
|
||||
paginator = client.get_paginator("list_objects_v2")
|
||||
for page in paginator.paginate(Bucket=bucket_name, Prefix=prefix):
|
||||
for obj in page.get("Contents", []):
|
||||
if obj["Key"] not in unique_keys:
|
||||
unique_keys.append(obj["Key"])
|
||||
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
|
||||
if isinstance(exc, ClientError):
|
||||
err, status = handle_client_error(exc)
|
||||
return _respond(False, err["error"], status_code=status)
|
||||
return _respond(False, "S3 API server is unreachable", status_code=502)
|
||||
|
||||
if not unique_keys:
|
||||
return _respond(False, "No objects found under the selected folders", status_code=400)
|
||||
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "delete")
|
||||
except IamError as exc:
|
||||
@@ -1114,17 +1002,13 @@ def bulk_delete_objects(bucket_name: str):
|
||||
else:
|
||||
try:
|
||||
client = get_session_s3_client()
|
||||
deleted = []
|
||||
errors = []
|
||||
for i in range(0, len(unique_keys), 1000):
|
||||
batch = unique_keys[i:i + 1000]
|
||||
objects_to_delete = [{"Key": k} for k in batch]
|
||||
resp = client.delete_objects(
|
||||
Bucket=bucket_name,
|
||||
Delete={"Objects": objects_to_delete, "Quiet": False},
|
||||
)
|
||||
deleted.extend(d["Key"] for d in resp.get("Deleted", []))
|
||||
errors.extend({"key": e["Key"], "error": e.get("Message", e.get("Code", "Unknown error"))} for e in resp.get("Errors", []))
|
||||
objects_to_delete = [{"Key": k} for k in unique_keys]
|
||||
resp = client.delete_objects(
|
||||
Bucket=bucket_name,
|
||||
Delete={"Objects": objects_to_delete, "Quiet": False},
|
||||
)
|
||||
deleted = [d["Key"] for d in resp.get("Deleted", [])]
|
||||
errors = [{"key": e["Key"], "error": e.get("Message", e.get("Code", "Unknown error"))} for e in resp.get("Errors", [])]
|
||||
for key in deleted:
|
||||
_replication_manager().trigger_replication(bucket_name, key, action="delete")
|
||||
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
|
||||
@@ -1412,14 +1296,12 @@ def object_versions(bucket_name: str, object_key: str):
|
||||
for v in resp.get("Versions", []):
|
||||
if v.get("Key") != object_key:
|
||||
continue
|
||||
if v.get("IsLatest", False):
|
||||
continue
|
||||
versions.append({
|
||||
"version_id": v.get("VersionId", ""),
|
||||
"last_modified": v["LastModified"].isoformat() if v.get("LastModified") else None,
|
||||
"size": v.get("Size", 0),
|
||||
"etag": v.get("ETag", "").strip('"'),
|
||||
"is_latest": False,
|
||||
"is_latest": v.get("IsLatest", False),
|
||||
})
|
||||
return jsonify({"versions": versions})
|
||||
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
|
||||
@@ -1836,10 +1718,6 @@ def iam_dashboard():
|
||||
users = iam_service.list_users() if not locked else []
|
||||
config_summary = iam_service.config_summary()
|
||||
config_document = json.dumps(iam_service.export_config(mask_secrets=True), indent=2)
|
||||
from datetime import datetime as _dt, timedelta as _td, timezone as _tz
|
||||
_now = _dt.now(_tz.utc)
|
||||
now_iso = _now.isoformat()
|
||||
soon_iso = (_now + _td(days=7)).isoformat()
|
||||
return render_template(
|
||||
"iam.html",
|
||||
users=users,
|
||||
@@ -1849,8 +1727,6 @@ def iam_dashboard():
|
||||
config_summary=config_summary,
|
||||
config_document=config_document,
|
||||
disclosed_secret=disclosed_secret,
|
||||
now_iso=now_iso,
|
||||
soon_iso=soon_iso,
|
||||
)
|
||||
|
||||
|
||||
@@ -1870,8 +1746,6 @@ def create_iam_user():
|
||||
return jsonify({"error": "Display name must be 64 characters or fewer"}), 400
|
||||
flash("Display name must be 64 characters or fewer", "danger")
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
custom_access_key = request.form.get("access_key", "").strip() or None
|
||||
custom_secret_key = request.form.get("secret_key", "").strip() or None
|
||||
policies_text = request.form.get("policies", "").strip()
|
||||
policies = None
|
||||
if policies_text:
|
||||
@@ -1882,21 +1756,8 @@ def create_iam_user():
|
||||
return jsonify({"error": f"Invalid JSON: {exc}"}), 400
|
||||
flash(f"Invalid JSON: {exc}", "danger")
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
expires_at = request.form.get("expires_at", "").strip() or None
|
||||
if expires_at:
|
||||
try:
|
||||
from datetime import datetime as _dt, timezone as _tz
|
||||
exp_dt = _dt.fromisoformat(expires_at)
|
||||
if exp_dt.tzinfo is None:
|
||||
exp_dt = exp_dt.replace(tzinfo=_tz.utc)
|
||||
expires_at = exp_dt.isoformat()
|
||||
except (ValueError, TypeError):
|
||||
if _wants_json():
|
||||
return jsonify({"error": "Invalid expiry date format"}), 400
|
||||
flash("Invalid expiry date format", "danger")
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
try:
|
||||
created = _iam().create_user(display_name=display_name, policies=policies, access_key=custom_access_key, secret_key=custom_secret_key, expires_at=expires_at)
|
||||
created = _iam().create_user(display_name=display_name, policies=policies)
|
||||
except IamError as exc:
|
||||
if _wants_json():
|
||||
return jsonify({"error": str(exc)}), 400
|
||||
@@ -2070,45 +1931,6 @@ def update_iam_policies(access_key: str):
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
|
||||
|
||||
@ui_bp.post("/iam/users/<access_key>/expiry")
|
||||
def update_iam_expiry(access_key: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:update_policy")
|
||||
except IamError as exc:
|
||||
if _wants_json():
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
flash(str(exc), "danger")
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
|
||||
expires_at = request.form.get("expires_at", "").strip() or None
|
||||
if expires_at:
|
||||
try:
|
||||
from datetime import datetime as _dt, timezone as _tz
|
||||
exp_dt = _dt.fromisoformat(expires_at)
|
||||
if exp_dt.tzinfo is None:
|
||||
exp_dt = exp_dt.replace(tzinfo=_tz.utc)
|
||||
expires_at = exp_dt.isoformat()
|
||||
except (ValueError, TypeError):
|
||||
if _wants_json():
|
||||
return jsonify({"error": "Invalid expiry date format"}), 400
|
||||
flash("Invalid expiry date format", "danger")
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
|
||||
try:
|
||||
_iam().update_user_expiry(access_key, expires_at)
|
||||
if _wants_json():
|
||||
return jsonify({"success": True, "message": f"Updated expiry for {access_key}", "expires_at": expires_at})
|
||||
label = expires_at if expires_at else "never"
|
||||
flash(f"Expiry for {access_key} set to {label}", "success")
|
||||
except IamError as exc:
|
||||
if _wants_json():
|
||||
return jsonify({"error": str(exc)}), 400
|
||||
flash(str(exc), "danger")
|
||||
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
|
||||
|
||||
@ui_bp.post("/connections")
|
||||
def create_connection():
|
||||
principal = _current_principal()
|
||||
@@ -2220,17 +2042,16 @@ def update_connection(connection_id: str):
|
||||
secret_key = request.form.get("secret_key", "").strip()
|
||||
region = request.form.get("region", "us-east-1").strip()
|
||||
|
||||
if not all([name, endpoint, access_key]):
|
||||
if not all([name, endpoint, access_key, secret_key]):
|
||||
if _wants_json():
|
||||
return jsonify({"error": "Name, endpoint, and access key are required"}), 400
|
||||
flash("Name, endpoint, and access key are required", "danger")
|
||||
return jsonify({"error": "All fields are required"}), 400
|
||||
flash("All fields are required", "danger")
|
||||
return redirect(url_for("ui.connections_dashboard"))
|
||||
|
||||
conn.name = name
|
||||
conn.endpoint_url = endpoint
|
||||
conn.access_key = access_key
|
||||
if secret_key:
|
||||
conn.secret_key = secret_key
|
||||
conn.secret_key = secret_key
|
||||
conn.region = region
|
||||
|
||||
_connections().save()
|
||||
@@ -2551,10 +2372,7 @@ def website_domains_dashboard():
|
||||
store = current_app.extensions.get("website_domains")
|
||||
mappings = store.list_all() if store else []
|
||||
storage = _storage()
|
||||
buckets = [
|
||||
b.name for b in storage.list_buckets()
|
||||
if storage.get_bucket_website(b.name)
|
||||
]
|
||||
buckets = [b.name for b in storage.list_buckets()]
|
||||
return render_template(
|
||||
"website_domains.html",
|
||||
mappings=mappings,
|
||||
@@ -2581,7 +2399,7 @@ def create_website_domain():
|
||||
flash("Website hosting is not enabled", "warning")
|
||||
return redirect(url_for("ui.buckets_overview"))
|
||||
|
||||
domain = normalize_domain(request.form.get("domain") or "")
|
||||
domain = (request.form.get("domain") or "").strip().lower()
|
||||
bucket = (request.form.get("bucket") or "").strip()
|
||||
|
||||
if not domain:
|
||||
@@ -2590,12 +2408,6 @@ def create_website_domain():
|
||||
flash("Domain is required", "danger")
|
||||
return redirect(url_for("ui.website_domains_dashboard"))
|
||||
|
||||
if not is_valid_domain(domain):
|
||||
if _wants_json():
|
||||
return jsonify({"error": f"Invalid domain format: '{domain}'"}), 400
|
||||
flash(f"Invalid domain format: '{domain}'. Use a hostname like www.example.com", "danger")
|
||||
return redirect(url_for("ui.website_domains_dashboard"))
|
||||
|
||||
if not bucket:
|
||||
if _wants_json():
|
||||
return jsonify({"error": "Bucket is required"}), 400
|
||||
@@ -2634,7 +2446,6 @@ def update_website_domain(domain: str):
|
||||
flash("Access denied", "danger")
|
||||
return redirect(url_for("ui.website_domains_dashboard"))
|
||||
|
||||
domain = normalize_domain(domain)
|
||||
bucket = (request.form.get("bucket") or "").strip()
|
||||
if not bucket:
|
||||
if _wants_json():
|
||||
@@ -2650,14 +2461,9 @@ def update_website_domain(domain: str):
|
||||
return redirect(url_for("ui.website_domains_dashboard"))
|
||||
|
||||
store = current_app.extensions.get("website_domains")
|
||||
if not store.get_bucket(domain):
|
||||
if _wants_json():
|
||||
return jsonify({"error": f"No mapping for domain '{domain}'"}), 404
|
||||
flash(f"No mapping for domain '{domain}'", "danger")
|
||||
return redirect(url_for("ui.website_domains_dashboard"))
|
||||
store.set_mapping(domain, bucket)
|
||||
if _wants_json():
|
||||
return jsonify({"success": True, "domain": domain, "bucket": bucket})
|
||||
return jsonify({"success": True, "domain": domain.lower(), "bucket": bucket})
|
||||
flash(f"Domain '{domain}' updated to bucket '{bucket}'", "success")
|
||||
return redirect(url_for("ui.website_domains_dashboard"))
|
||||
|
||||
@@ -2673,7 +2479,6 @@ def delete_website_domain(domain: str):
|
||||
flash("Access denied", "danger")
|
||||
return redirect(url_for("ui.website_domains_dashboard"))
|
||||
|
||||
domain = normalize_domain(domain)
|
||||
store = current_app.extensions.get("website_domains")
|
||||
if not store.delete_mapping(domain):
|
||||
if _wants_json():
|
||||
@@ -3473,12 +3278,9 @@ def sites_dashboard():
|
||||
@ui_bp.post("/sites/local")
|
||||
def update_local_site():
|
||||
principal = _current_principal()
|
||||
wants_json = request.headers.get("X-Requested-With") == "XMLHttpRequest"
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
except IamError:
|
||||
if wants_json:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
flash("Access denied", "danger")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
@@ -3489,8 +3291,6 @@ def update_local_site():
|
||||
display_name = request.form.get("display_name", "").strip()
|
||||
|
||||
if not site_id:
|
||||
if wants_json:
|
||||
return jsonify({"error": "Site ID is required"}), 400
|
||||
flash("Site ID is required", "danger")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
@@ -3512,8 +3312,6 @@ def update_local_site():
|
||||
)
|
||||
registry.set_local_site(site)
|
||||
|
||||
if wants_json:
|
||||
return jsonify({"message": "Local site configuration updated"})
|
||||
flash("Local site configuration updated", "success")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
@@ -3521,12 +3319,9 @@ def update_local_site():
|
||||
@ui_bp.post("/sites/peers")
|
||||
def add_peer_site():
|
||||
principal = _current_principal()
|
||||
wants_json = request.headers.get("X-Requested-With") == "XMLHttpRequest"
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
except IamError:
|
||||
if wants_json:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
flash("Access denied", "danger")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
@@ -3538,13 +3333,9 @@ def add_peer_site():
|
||||
connection_id = request.form.get("connection_id", "").strip() or None
|
||||
|
||||
if not site_id:
|
||||
if wants_json:
|
||||
return jsonify({"error": "Site ID is required"}), 400
|
||||
flash("Site ID is required", "danger")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
if not endpoint:
|
||||
if wants_json:
|
||||
return jsonify({"error": "Endpoint is required"}), 400
|
||||
flash("Endpoint is required", "danger")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
@@ -3556,14 +3347,10 @@ def add_peer_site():
|
||||
registry = _site_registry()
|
||||
|
||||
if registry.get_peer(site_id):
|
||||
if wants_json:
|
||||
return jsonify({"error": f"Peer site '{site_id}' already exists"}), 409
|
||||
flash(f"Peer site '{site_id}' already exists", "danger")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
if connection_id and not _connections().get(connection_id):
|
||||
if wants_json:
|
||||
return jsonify({"error": f"Connection '{connection_id}' not found"}), 404
|
||||
flash(f"Connection '{connection_id}' not found", "danger")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
@@ -3577,11 +3364,6 @@ def add_peer_site():
|
||||
)
|
||||
registry.add_peer(peer)
|
||||
|
||||
if wants_json:
|
||||
redirect_url = None
|
||||
if connection_id:
|
||||
redirect_url = url_for("ui.replication_wizard", site_id=site_id)
|
||||
return jsonify({"message": f"Peer site '{site_id}' added", "redirect": redirect_url})
|
||||
flash(f"Peer site '{site_id}' added", "success")
|
||||
|
||||
if connection_id:
|
||||
@@ -3592,12 +3374,9 @@ def add_peer_site():
|
||||
@ui_bp.post("/sites/peers/<site_id>/update")
|
||||
def update_peer_site(site_id: str):
|
||||
principal = _current_principal()
|
||||
wants_json = request.headers.get("X-Requested-With") == "XMLHttpRequest"
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
except IamError:
|
||||
if wants_json:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
flash("Access denied", "danger")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
@@ -3605,8 +3384,6 @@ def update_peer_site(site_id: str):
|
||||
existing = registry.get_peer(site_id)
|
||||
|
||||
if not existing:
|
||||
if wants_json:
|
||||
return jsonify({"error": f"Peer site '{site_id}' not found"}), 404
|
||||
flash(f"Peer site '{site_id}' not found", "danger")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
@@ -3614,10 +3391,7 @@ def update_peer_site(site_id: str):
|
||||
region = request.form.get("region", existing.region).strip()
|
||||
priority = request.form.get("priority", str(existing.priority))
|
||||
display_name = request.form.get("display_name", existing.display_name).strip()
|
||||
if "connection_id" in request.form:
|
||||
connection_id = request.form["connection_id"].strip() or None
|
||||
else:
|
||||
connection_id = existing.connection_id
|
||||
connection_id = request.form.get("connection_id", "").strip() or existing.connection_id
|
||||
|
||||
try:
|
||||
priority_int = int(priority)
|
||||
@@ -3625,8 +3399,6 @@ def update_peer_site(site_id: str):
|
||||
priority_int = existing.priority
|
||||
|
||||
if connection_id and not _connections().get(connection_id):
|
||||
if wants_json:
|
||||
return jsonify({"error": f"Connection '{connection_id}' not found"}), 404
|
||||
flash(f"Connection '{connection_id}' not found", "danger")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
@@ -3643,8 +3415,6 @@ def update_peer_site(site_id: str):
|
||||
)
|
||||
registry.update_peer(peer)
|
||||
|
||||
if wants_json:
|
||||
return jsonify({"message": f"Peer site '{site_id}' updated"})
|
||||
flash(f"Peer site '{site_id}' updated", "success")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
@@ -3652,23 +3422,16 @@ def update_peer_site(site_id: str):
|
||||
@ui_bp.post("/sites/peers/<site_id>/delete")
|
||||
def delete_peer_site(site_id: str):
|
||||
principal = _current_principal()
|
||||
wants_json = request.headers.get("X-Requested-With") == "XMLHttpRequest"
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
except IamError:
|
||||
if wants_json:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
flash("Access denied", "danger")
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
|
||||
registry = _site_registry()
|
||||
if registry.delete_peer(site_id):
|
||||
if wants_json:
|
||||
return jsonify({"message": f"Peer site '{site_id}' deleted"})
|
||||
flash(f"Peer site '{site_id}' deleted", "success")
|
||||
else:
|
||||
if wants_json:
|
||||
return jsonify({"error": f"Peer site '{site_id}' not found"}), 404
|
||||
flash(f"Peer site '{site_id}' not found", "danger")
|
||||
|
||||
return redirect(url_for("ui.sites_dashboard"))
|
||||
@@ -4123,182 +3886,6 @@ def get_peer_sync_stats(site_id: str):
|
||||
return jsonify(stats)
|
||||
|
||||
|
||||
@ui_bp.get("/system")
|
||||
def system_dashboard():
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
except IamError:
|
||||
flash("Access denied: System page requires admin permissions", "danger")
|
||||
return redirect(url_for("ui.buckets_overview"))
|
||||
|
||||
import platform as _platform
|
||||
import sys
|
||||
from app.version import APP_VERSION
|
||||
|
||||
try:
|
||||
import myfsio_core as _rc
|
||||
has_rust = True
|
||||
except ImportError:
|
||||
has_rust = False
|
||||
|
||||
gc = current_app.extensions.get("gc")
|
||||
gc_status = gc.get_status() if gc else {"enabled": False}
|
||||
gc_history_records = []
|
||||
if gc:
|
||||
raw = gc.get_history(limit=10, offset=0)
|
||||
for rec in raw:
|
||||
r = rec.get("result", {})
|
||||
total_freed = r.get("temp_bytes_freed", 0) + r.get("multipart_bytes_freed", 0) + r.get("orphaned_version_bytes_freed", 0)
|
||||
rec["bytes_freed_display"] = _format_bytes(total_freed)
|
||||
rec["timestamp_display"] = _format_datetime_display(datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc))
|
||||
gc_history_records.append(rec)
|
||||
|
||||
checker = current_app.extensions.get("integrity")
|
||||
integrity_status = checker.get_status() if checker else {"enabled": False}
|
||||
integrity_history_records = []
|
||||
if checker:
|
||||
raw = checker.get_history(limit=10, offset=0)
|
||||
for rec in raw:
|
||||
rec["timestamp_display"] = _format_datetime_display(datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc))
|
||||
integrity_history_records.append(rec)
|
||||
|
||||
features = [
|
||||
{"label": "Encryption (SSE-S3)", "enabled": current_app.config.get("ENCRYPTION_ENABLED", False)},
|
||||
{"label": "KMS", "enabled": current_app.config.get("KMS_ENABLED", False)},
|
||||
{"label": "Versioning Lifecycle", "enabled": current_app.config.get("LIFECYCLE_ENABLED", False)},
|
||||
{"label": "Metrics History", "enabled": current_app.config.get("METRICS_HISTORY_ENABLED", False)},
|
||||
{"label": "Operation Metrics", "enabled": current_app.config.get("OPERATION_METRICS_ENABLED", False)},
|
||||
{"label": "Site Sync", "enabled": current_app.config.get("SITE_SYNC_ENABLED", False)},
|
||||
{"label": "Website Hosting", "enabled": current_app.config.get("WEBSITE_HOSTING_ENABLED", False)},
|
||||
{"label": "Garbage Collection", "enabled": current_app.config.get("GC_ENABLED", False)},
|
||||
{"label": "Integrity Scanner", "enabled": current_app.config.get("INTEGRITY_ENABLED", False)},
|
||||
]
|
||||
|
||||
return render_template(
|
||||
"system.html",
|
||||
principal=principal,
|
||||
app_version=APP_VERSION,
|
||||
storage_root=current_app.config.get("STORAGE_ROOT", "./data"),
|
||||
platform=_platform.platform(),
|
||||
python_version=sys.version.split()[0],
|
||||
has_rust=has_rust,
|
||||
features=features,
|
||||
gc_status=gc_status,
|
||||
gc_history=gc_history_records,
|
||||
integrity_status=integrity_status,
|
||||
integrity_history=integrity_history_records,
|
||||
display_timezone=current_app.config.get("DISPLAY_TIMEZONE", "UTC"),
|
||||
)
|
||||
|
||||
|
||||
@ui_bp.post("/system/gc/run")
|
||||
def system_gc_run():
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
gc = current_app.extensions.get("gc")
|
||||
if not gc:
|
||||
return jsonify({"error": "GC is not enabled"}), 400
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
started = gc.run_async(dry_run=payload.get("dry_run"))
|
||||
if not started:
|
||||
return jsonify({"error": "GC is already in progress"}), 409
|
||||
return jsonify({"status": "started"})
|
||||
|
||||
|
||||
@ui_bp.get("/system/gc/status")
|
||||
def system_gc_status():
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
gc = current_app.extensions.get("gc")
|
||||
if not gc:
|
||||
return jsonify({"error": "GC is not enabled"}), 400
|
||||
|
||||
return jsonify(gc.get_status())
|
||||
|
||||
|
||||
@ui_bp.get("/system/gc/history")
|
||||
def system_gc_history():
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
gc = current_app.extensions.get("gc")
|
||||
if not gc:
|
||||
return jsonify({"executions": []})
|
||||
|
||||
limit = min(int(request.args.get("limit", 10)), 200)
|
||||
offset = int(request.args.get("offset", 0))
|
||||
records = gc.get_history(limit=limit, offset=offset)
|
||||
return jsonify({"executions": records})
|
||||
|
||||
|
||||
@ui_bp.post("/system/integrity/run")
|
||||
def system_integrity_run():
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
checker = current_app.extensions.get("integrity")
|
||||
if not checker:
|
||||
return jsonify({"error": "Integrity checker is not enabled"}), 400
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
started = checker.run_async(
|
||||
auto_heal=payload.get("auto_heal"),
|
||||
dry_run=payload.get("dry_run"),
|
||||
)
|
||||
if not started:
|
||||
return jsonify({"error": "A scan is already in progress"}), 409
|
||||
return jsonify({"status": "started"})
|
||||
|
||||
|
||||
@ui_bp.get("/system/integrity/status")
|
||||
def system_integrity_status():
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
checker = current_app.extensions.get("integrity")
|
||||
if not checker:
|
||||
return jsonify({"error": "Integrity checker is not enabled"}), 400
|
||||
|
||||
return jsonify(checker.get_status())
|
||||
|
||||
|
||||
@ui_bp.get("/system/integrity/history")
|
||||
def system_integrity_history():
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
checker = current_app.extensions.get("integrity")
|
||||
if not checker:
|
||||
return jsonify({"executions": []})
|
||||
|
||||
limit = min(int(request.args.get("limit", 10)), 200)
|
||||
offset = int(request.args.get("offset", 0))
|
||||
records = checker.get_history(limit=limit, offset=offset)
|
||||
return jsonify({"executions": records})
|
||||
|
||||
|
||||
@ui_bp.app_errorhandler(404)
|
||||
def ui_not_found(error): # type: ignore[override]
|
||||
prefix = ui_bp.url_prefix or ""
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
APP_VERSION = "0.4.3"
|
||||
APP_VERSION = "0.2.9"
|
||||
|
||||
|
||||
def get_version() -> str:
|
||||
55
app/website_domains.py
Normal file
55
app/website_domains.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
|
||||
class WebsiteDomainStore:
|
||||
def __init__(self, config_path: Path) -> None:
|
||||
self.config_path = config_path
|
||||
self._lock = threading.Lock()
|
||||
self._domains: Dict[str, str] = {}
|
||||
self.reload()
|
||||
|
||||
def reload(self) -> None:
|
||||
if not self.config_path.exists():
|
||||
self._domains = {}
|
||||
return
|
||||
try:
|
||||
with open(self.config_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if isinstance(data, dict):
|
||||
self._domains = {k.lower(): v for k, v in data.items()}
|
||||
else:
|
||||
self._domains = {}
|
||||
except (OSError, json.JSONDecodeError):
|
||||
self._domains = {}
|
||||
|
||||
def _save(self) -> None:
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(self.config_path, "w", encoding="utf-8") as f:
|
||||
json.dump(self._domains, f, indent=2)
|
||||
|
||||
def list_all(self) -> List[Dict[str, str]]:
|
||||
with self._lock:
|
||||
return [{"domain": d, "bucket": b} for d, b in self._domains.items()]
|
||||
|
||||
def get_bucket(self, domain: str) -> Optional[str]:
|
||||
with self._lock:
|
||||
return self._domains.get(domain.lower())
|
||||
|
||||
def set_mapping(self, domain: str, bucket: str) -> None:
|
||||
with self._lock:
|
||||
self._domains[domain.lower()] = bucket
|
||||
self._save()
|
||||
|
||||
def delete_mapping(self, domain: str) -> bool:
|
||||
with self._lock:
|
||||
key = domain.lower()
|
||||
if key not in self._domains:
|
||||
return False
|
||||
del self._domains[key]
|
||||
self._save()
|
||||
return True
|
||||
5
docker-entrypoint.sh
Normal file
5
docker-entrypoint.sh
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# Run both services using the python runner in production mode
|
||||
exec python run.py --prod
|
||||
@@ -1,56 +0,0 @@
|
||||
FROM python:3.14.3-slim AS builder
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential curl \
|
||||
&& curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
RUN pip install --no-cache-dir maturin
|
||||
|
||||
COPY myfsio_core ./myfsio_core
|
||||
RUN cd myfsio_core \
|
||||
&& maturin build --release --out /wheels
|
||||
|
||||
|
||||
FROM python:3.14.3-slim
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt ./
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY --from=builder /wheels/*.whl /tmp/
|
||||
RUN pip install --no-cache-dir /tmp/*.whl && rm /tmp/*.whl
|
||||
|
||||
COPY app ./app
|
||||
COPY templates ./templates
|
||||
COPY static ./static
|
||||
COPY run.py ./
|
||||
COPY docker-entrypoint.sh ./
|
||||
|
||||
RUN chmod +x docker-entrypoint.sh \
|
||||
&& mkdir -p /app/data \
|
||||
&& useradd -m -u 1000 myfsio \
|
||||
&& chown -R myfsio:myfsio /app
|
||||
|
||||
USER myfsio
|
||||
|
||||
EXPOSE 5000 5100
|
||||
ENV APP_HOST=0.0.0.0 \
|
||||
FLASK_ENV=production \
|
||||
FLASK_DEBUG=0
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD python -c "import requests; requests.get('http://localhost:5000/myfsio/health', timeout=2)"
|
||||
|
||||
CMD ["./docker-entrypoint.sh"]
|
||||
@@ -1,14 +0,0 @@
|
||||
# Deprecated Python Implementation
|
||||
|
||||
The Python implementation of MyFSIO is deprecated as of 2026-04-21.
|
||||
|
||||
The supported server runtime now lives in `../rust/myfsio-engine` and serves the S3 API and web UI from the Rust `myfsio-server` binary. Keep this tree for migration reference, compatibility checks, and legacy tests only.
|
||||
|
||||
For normal development and operations, run:
|
||||
|
||||
```bash
|
||||
cd ../rust/myfsio-engine
|
||||
cargo run -p myfsio-server --
|
||||
```
|
||||
|
||||
Do not add new product features to the Python implementation unless they are needed to unblock a migration or compare behavior with the Rust server.
|
||||
596
python/app/gc.py
596
python/app/gc.py
@@ -1,596 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GCResult:
|
||||
temp_files_deleted: int = 0
|
||||
temp_bytes_freed: int = 0
|
||||
multipart_uploads_deleted: int = 0
|
||||
multipart_bytes_freed: int = 0
|
||||
lock_files_deleted: int = 0
|
||||
orphaned_metadata_deleted: int = 0
|
||||
orphaned_versions_deleted: int = 0
|
||||
orphaned_version_bytes_freed: int = 0
|
||||
empty_dirs_removed: int = 0
|
||||
errors: List[str] = field(default_factory=list)
|
||||
execution_time_seconds: float = 0.0
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"temp_files_deleted": self.temp_files_deleted,
|
||||
"temp_bytes_freed": self.temp_bytes_freed,
|
||||
"multipart_uploads_deleted": self.multipart_uploads_deleted,
|
||||
"multipart_bytes_freed": self.multipart_bytes_freed,
|
||||
"lock_files_deleted": self.lock_files_deleted,
|
||||
"orphaned_metadata_deleted": self.orphaned_metadata_deleted,
|
||||
"orphaned_versions_deleted": self.orphaned_versions_deleted,
|
||||
"orphaned_version_bytes_freed": self.orphaned_version_bytes_freed,
|
||||
"empty_dirs_removed": self.empty_dirs_removed,
|
||||
"errors": self.errors,
|
||||
"execution_time_seconds": self.execution_time_seconds,
|
||||
}
|
||||
|
||||
@property
|
||||
def total_bytes_freed(self) -> int:
|
||||
return self.temp_bytes_freed + self.multipart_bytes_freed + self.orphaned_version_bytes_freed
|
||||
|
||||
@property
|
||||
def has_work(self) -> bool:
|
||||
return (
|
||||
self.temp_files_deleted > 0
|
||||
or self.multipart_uploads_deleted > 0
|
||||
or self.lock_files_deleted > 0
|
||||
or self.orphaned_metadata_deleted > 0
|
||||
or self.orphaned_versions_deleted > 0
|
||||
or self.empty_dirs_removed > 0
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GCExecutionRecord:
|
||||
timestamp: float
|
||||
result: dict
|
||||
dry_run: bool
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"timestamp": self.timestamp,
|
||||
"result": self.result,
|
||||
"dry_run": self.dry_run,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> GCExecutionRecord:
|
||||
return cls(
|
||||
timestamp=data["timestamp"],
|
||||
result=data["result"],
|
||||
dry_run=data.get("dry_run", False),
|
||||
)
|
||||
|
||||
|
||||
class GCHistoryStore:
|
||||
def __init__(self, storage_root: Path, max_records: int = 50) -> None:
|
||||
self.storage_root = storage_root
|
||||
self.max_records = max_records
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "gc_history.json"
|
||||
|
||||
def load(self) -> List[GCExecutionRecord]:
|
||||
path = self._get_path()
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
return [GCExecutionRecord.from_dict(d) for d in data.get("executions", [])]
|
||||
except (OSError, ValueError, KeyError) as e:
|
||||
logger.error("Failed to load GC history: %s", e)
|
||||
return []
|
||||
|
||||
def save(self, records: List[GCExecutionRecord]) -> None:
|
||||
path = self._get_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {"executions": [r.to_dict() for r in records[: self.max_records]]}
|
||||
try:
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error("Failed to save GC history: %s", e)
|
||||
|
||||
def add(self, record: GCExecutionRecord) -> None:
|
||||
with self._lock:
|
||||
records = self.load()
|
||||
records.insert(0, record)
|
||||
self.save(records)
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[GCExecutionRecord]:
|
||||
return self.load()[offset : offset + limit]
|
||||
|
||||
|
||||
def _dir_size(path: Path) -> int:
|
||||
total = 0
|
||||
try:
|
||||
for f in path.rglob("*"):
|
||||
if f.is_file():
|
||||
try:
|
||||
total += f.stat().st_size
|
||||
except OSError:
|
||||
pass
|
||||
except OSError:
|
||||
pass
|
||||
return total
|
||||
|
||||
|
||||
def _file_age_hours(path: Path) -> float:
|
||||
try:
|
||||
mtime = path.stat().st_mtime
|
||||
return (time.time() - mtime) / 3600.0
|
||||
except OSError:
|
||||
return 0.0
|
||||
|
||||
|
||||
class GarbageCollector:
|
||||
SYSTEM_ROOT = ".myfsio.sys"
|
||||
SYSTEM_TMP_DIR = "tmp"
|
||||
SYSTEM_MULTIPART_DIR = "multipart"
|
||||
SYSTEM_BUCKETS_DIR = "buckets"
|
||||
BUCKET_META_DIR = "meta"
|
||||
BUCKET_VERSIONS_DIR = "versions"
|
||||
INTERNAL_FOLDERS = {".meta", ".versions", ".multipart"}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage_root: Path,
|
||||
interval_hours: float = 6.0,
|
||||
temp_file_max_age_hours: float = 24.0,
|
||||
multipart_max_age_days: int = 7,
|
||||
lock_file_max_age_hours: float = 1.0,
|
||||
dry_run: bool = False,
|
||||
max_history: int = 50,
|
||||
io_throttle_ms: int = 10,
|
||||
) -> None:
|
||||
self.storage_root = Path(storage_root)
|
||||
self.interval_seconds = interval_hours * 3600.0
|
||||
self.temp_file_max_age_hours = temp_file_max_age_hours
|
||||
self.multipart_max_age_days = multipart_max_age_days
|
||||
self.lock_file_max_age_hours = lock_file_max_age_hours
|
||||
self.dry_run = dry_run
|
||||
self._timer: Optional[threading.Timer] = None
|
||||
self._shutdown = False
|
||||
self._lock = threading.Lock()
|
||||
self._scanning = False
|
||||
self._scan_start_time: Optional[float] = None
|
||||
self._io_throttle = max(0, io_throttle_ms) / 1000.0
|
||||
self.history_store = GCHistoryStore(storage_root, max_records=max_history)
|
||||
|
||||
def start(self) -> None:
|
||||
if self._timer is not None:
|
||||
return
|
||||
self._shutdown = False
|
||||
self._schedule_next()
|
||||
logger.info(
|
||||
"GC started: interval=%.1fh, temp_max_age=%.1fh, multipart_max_age=%dd, lock_max_age=%.1fh, dry_run=%s",
|
||||
self.interval_seconds / 3600.0,
|
||||
self.temp_file_max_age_hours,
|
||||
self.multipart_max_age_days,
|
||||
self.lock_file_max_age_hours,
|
||||
self.dry_run,
|
||||
)
|
||||
|
||||
def stop(self) -> None:
|
||||
self._shutdown = True
|
||||
if self._timer:
|
||||
self._timer.cancel()
|
||||
self._timer = None
|
||||
logger.info("GC stopped")
|
||||
|
||||
def _schedule_next(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
self._timer = threading.Timer(self.interval_seconds, self._run_cycle)
|
||||
self._timer.daemon = True
|
||||
self._timer.start()
|
||||
|
||||
def _run_cycle(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
try:
|
||||
self.run_now()
|
||||
except Exception as e:
|
||||
logger.error("GC cycle failed: %s", e)
|
||||
finally:
|
||||
self._schedule_next()
|
||||
|
||||
def run_now(self, dry_run: Optional[bool] = None) -> GCResult:
|
||||
if not self._lock.acquire(blocking=False):
|
||||
raise RuntimeError("GC is already in progress")
|
||||
|
||||
effective_dry_run = dry_run if dry_run is not None else self.dry_run
|
||||
|
||||
try:
|
||||
self._scanning = True
|
||||
self._scan_start_time = time.time()
|
||||
|
||||
start = self._scan_start_time
|
||||
result = GCResult()
|
||||
|
||||
original_dry_run = self.dry_run
|
||||
self.dry_run = effective_dry_run
|
||||
try:
|
||||
self._clean_temp_files(result)
|
||||
self._clean_orphaned_multipart(result)
|
||||
self._clean_stale_locks(result)
|
||||
self._clean_orphaned_metadata(result)
|
||||
self._clean_orphaned_versions(result)
|
||||
self._clean_empty_dirs(result)
|
||||
finally:
|
||||
self.dry_run = original_dry_run
|
||||
|
||||
result.execution_time_seconds = time.time() - start
|
||||
|
||||
if result.has_work or result.errors:
|
||||
logger.info(
|
||||
"GC completed in %.2fs: temp=%d (%.1f MB), multipart=%d (%.1f MB), "
|
||||
"locks=%d, meta=%d, versions=%d (%.1f MB), dirs=%d, errors=%d%s",
|
||||
result.execution_time_seconds,
|
||||
result.temp_files_deleted,
|
||||
result.temp_bytes_freed / (1024 * 1024),
|
||||
result.multipart_uploads_deleted,
|
||||
result.multipart_bytes_freed / (1024 * 1024),
|
||||
result.lock_files_deleted,
|
||||
result.orphaned_metadata_deleted,
|
||||
result.orphaned_versions_deleted,
|
||||
result.orphaned_version_bytes_freed / (1024 * 1024),
|
||||
result.empty_dirs_removed,
|
||||
len(result.errors),
|
||||
" (dry run)" if effective_dry_run else "",
|
||||
)
|
||||
|
||||
record = GCExecutionRecord(
|
||||
timestamp=time.time(),
|
||||
result=result.to_dict(),
|
||||
dry_run=effective_dry_run,
|
||||
)
|
||||
self.history_store.add(record)
|
||||
|
||||
return result
|
||||
finally:
|
||||
self._scanning = False
|
||||
self._scan_start_time = None
|
||||
self._lock.release()
|
||||
|
||||
def run_async(self, dry_run: Optional[bool] = None) -> bool:
|
||||
if self._scanning:
|
||||
return False
|
||||
t = threading.Thread(target=self.run_now, args=(dry_run,), daemon=True)
|
||||
t.start()
|
||||
return True
|
||||
|
||||
def _system_path(self) -> Path:
|
||||
return self.storage_root / self.SYSTEM_ROOT
|
||||
|
||||
def _throttle(self) -> bool:
|
||||
if self._shutdown:
|
||||
return True
|
||||
if self._io_throttle > 0:
|
||||
time.sleep(self._io_throttle)
|
||||
return self._shutdown
|
||||
|
||||
def _list_bucket_names(self) -> List[str]:
|
||||
names = []
|
||||
try:
|
||||
for entry in self.storage_root.iterdir():
|
||||
if entry.is_dir() and entry.name != self.SYSTEM_ROOT:
|
||||
names.append(entry.name)
|
||||
except OSError:
|
||||
pass
|
||||
return names
|
||||
|
||||
def _clean_temp_files(self, result: GCResult) -> None:
|
||||
tmp_dir = self._system_path() / self.SYSTEM_TMP_DIR
|
||||
if not tmp_dir.exists():
|
||||
return
|
||||
try:
|
||||
for entry in tmp_dir.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not entry.is_file():
|
||||
continue
|
||||
age = _file_age_hours(entry)
|
||||
if age < self.temp_file_max_age_hours:
|
||||
continue
|
||||
try:
|
||||
size = entry.stat().st_size
|
||||
if not self.dry_run:
|
||||
entry.unlink()
|
||||
result.temp_files_deleted += 1
|
||||
result.temp_bytes_freed += size
|
||||
except OSError as e:
|
||||
result.errors.append(f"temp file {entry.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan tmp dir: {e}")
|
||||
|
||||
def _clean_orphaned_multipart(self, result: GCResult) -> None:
|
||||
cutoff_hours = self.multipart_max_age_days * 24.0
|
||||
bucket_names = self._list_bucket_names()
|
||||
|
||||
for bucket_name in bucket_names:
|
||||
if self._shutdown:
|
||||
return
|
||||
for multipart_root in (
|
||||
self._system_path() / self.SYSTEM_MULTIPART_DIR / bucket_name,
|
||||
self.storage_root / bucket_name / ".multipart",
|
||||
):
|
||||
if not multipart_root.exists():
|
||||
continue
|
||||
try:
|
||||
for upload_dir in multipart_root.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not upload_dir.is_dir():
|
||||
continue
|
||||
self._maybe_clean_upload(upload_dir, cutoff_hours, result)
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan multipart {bucket_name}: {e}")
|
||||
|
||||
def _maybe_clean_upload(self, upload_dir: Path, cutoff_hours: float, result: GCResult) -> None:
|
||||
manifest_path = upload_dir / "manifest.json"
|
||||
age = _file_age_hours(manifest_path) if manifest_path.exists() else _file_age_hours(upload_dir)
|
||||
|
||||
if age < cutoff_hours:
|
||||
return
|
||||
|
||||
dir_bytes = _dir_size(upload_dir)
|
||||
try:
|
||||
if not self.dry_run:
|
||||
shutil.rmtree(upload_dir, ignore_errors=True)
|
||||
result.multipart_uploads_deleted += 1
|
||||
result.multipart_bytes_freed += dir_bytes
|
||||
except OSError as e:
|
||||
result.errors.append(f"multipart {upload_dir.name}: {e}")
|
||||
|
||||
def _clean_stale_locks(self, result: GCResult) -> None:
|
||||
buckets_root = self._system_path() / self.SYSTEM_BUCKETS_DIR
|
||||
if not buckets_root.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
for bucket_dir in buckets_root.iterdir():
|
||||
if self._shutdown:
|
||||
return
|
||||
if not bucket_dir.is_dir():
|
||||
continue
|
||||
locks_dir = bucket_dir / "locks"
|
||||
if not locks_dir.exists():
|
||||
continue
|
||||
try:
|
||||
for lock_file in locks_dir.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not lock_file.is_file() or not lock_file.name.endswith(".lock"):
|
||||
continue
|
||||
age = _file_age_hours(lock_file)
|
||||
if age < self.lock_file_max_age_hours:
|
||||
continue
|
||||
try:
|
||||
if not self.dry_run:
|
||||
lock_file.unlink(missing_ok=True)
|
||||
result.lock_files_deleted += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"lock {lock_file.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan locks {bucket_dir.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan buckets for locks: {e}")
|
||||
|
||||
def _clean_orphaned_metadata(self, result: GCResult) -> None:
|
||||
bucket_names = self._list_bucket_names()
|
||||
|
||||
for bucket_name in bucket_names:
|
||||
if self._shutdown:
|
||||
return
|
||||
legacy_meta = self.storage_root / bucket_name / ".meta"
|
||||
if legacy_meta.exists():
|
||||
self._clean_legacy_metadata(bucket_name, legacy_meta, result)
|
||||
|
||||
new_meta = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
if new_meta.exists():
|
||||
self._clean_index_metadata(bucket_name, new_meta, result)
|
||||
|
||||
def _clean_legacy_metadata(self, bucket_name: str, meta_root: Path, result: GCResult) -> None:
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
try:
|
||||
for meta_file in meta_root.rglob("*.meta.json"):
|
||||
if self._throttle():
|
||||
return
|
||||
if not meta_file.is_file():
|
||||
continue
|
||||
try:
|
||||
rel = meta_file.relative_to(meta_root)
|
||||
object_key = rel.as_posix().removesuffix(".meta.json")
|
||||
object_path = bucket_path / object_key
|
||||
if not object_path.exists():
|
||||
if not self.dry_run:
|
||||
meta_file.unlink(missing_ok=True)
|
||||
result.orphaned_metadata_deleted += 1
|
||||
except (OSError, ValueError) as e:
|
||||
result.errors.append(f"legacy meta {bucket_name}/{meta_file.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan legacy meta {bucket_name}: {e}")
|
||||
|
||||
def _clean_index_metadata(self, bucket_name: str, meta_root: Path, result: GCResult) -> None:
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
try:
|
||||
for index_file in meta_root.rglob("_index.json"):
|
||||
if self._throttle():
|
||||
return
|
||||
if not index_file.is_file():
|
||||
continue
|
||||
try:
|
||||
with open(index_file, "r", encoding="utf-8") as f:
|
||||
index_data = json.load(f)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
|
||||
keys_to_remove = []
|
||||
for key in index_data:
|
||||
rel_dir = index_file.parent.relative_to(meta_root)
|
||||
if rel_dir == Path("."):
|
||||
full_key = key
|
||||
else:
|
||||
full_key = rel_dir.as_posix() + "/" + key
|
||||
object_path = bucket_path / full_key
|
||||
if not object_path.exists():
|
||||
keys_to_remove.append(key)
|
||||
|
||||
if keys_to_remove:
|
||||
if not self.dry_run:
|
||||
for k in keys_to_remove:
|
||||
index_data.pop(k, None)
|
||||
if index_data:
|
||||
try:
|
||||
with open(index_file, "w", encoding="utf-8") as f:
|
||||
json.dump(index_data, f)
|
||||
except OSError as e:
|
||||
result.errors.append(f"write index {bucket_name}: {e}")
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
index_file.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
result.orphaned_metadata_deleted += len(keys_to_remove)
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan index meta {bucket_name}: {e}")
|
||||
|
||||
def _clean_orphaned_versions(self, result: GCResult) -> None:
|
||||
bucket_names = self._list_bucket_names()
|
||||
|
||||
for bucket_name in bucket_names:
|
||||
if self._shutdown:
|
||||
return
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
for versions_root in (
|
||||
self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR,
|
||||
self.storage_root / bucket_name / ".versions",
|
||||
):
|
||||
if not versions_root.exists():
|
||||
continue
|
||||
try:
|
||||
for key_dir in versions_root.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not key_dir.is_dir():
|
||||
continue
|
||||
self._clean_versions_for_key(bucket_path, versions_root, key_dir, result)
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan versions {bucket_name}: {e}")
|
||||
|
||||
def _clean_versions_for_key(
|
||||
self, bucket_path: Path, versions_root: Path, key_dir: Path, result: GCResult
|
||||
) -> None:
|
||||
try:
|
||||
rel = key_dir.relative_to(versions_root)
|
||||
except ValueError:
|
||||
return
|
||||
|
||||
object_path = bucket_path / rel
|
||||
if object_path.exists():
|
||||
return
|
||||
|
||||
version_files = list(key_dir.glob("*.bin")) + list(key_dir.glob("*.json"))
|
||||
if not version_files:
|
||||
return
|
||||
|
||||
for vf in version_files:
|
||||
try:
|
||||
size = vf.stat().st_size if vf.suffix == ".bin" else 0
|
||||
if not self.dry_run:
|
||||
vf.unlink(missing_ok=True)
|
||||
if vf.suffix == ".bin":
|
||||
result.orphaned_version_bytes_freed += size
|
||||
result.orphaned_versions_deleted += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"version file {vf.name}: {e}")
|
||||
|
||||
def _clean_empty_dirs(self, result: GCResult) -> None:
|
||||
targets = [
|
||||
self._system_path() / self.SYSTEM_TMP_DIR,
|
||||
self._system_path() / self.SYSTEM_MULTIPART_DIR,
|
||||
self._system_path() / self.SYSTEM_BUCKETS_DIR,
|
||||
]
|
||||
for bucket_name in self._list_bucket_names():
|
||||
targets.append(self.storage_root / bucket_name / ".meta")
|
||||
targets.append(self.storage_root / bucket_name / ".versions")
|
||||
targets.append(self.storage_root / bucket_name / ".multipart")
|
||||
|
||||
for root in targets:
|
||||
if not root.exists():
|
||||
continue
|
||||
self._remove_empty_dirs_recursive(root, root, result)
|
||||
|
||||
def _remove_empty_dirs_recursive(self, path: Path, stop_at: Path, result: GCResult) -> bool:
|
||||
if self._shutdown:
|
||||
return False
|
||||
if not path.is_dir():
|
||||
return False
|
||||
|
||||
try:
|
||||
children = list(path.iterdir())
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
all_empty = True
|
||||
for child in children:
|
||||
if self._throttle():
|
||||
return False
|
||||
if child.is_dir():
|
||||
if not self._remove_empty_dirs_recursive(child, stop_at, result):
|
||||
all_empty = False
|
||||
else:
|
||||
all_empty = False
|
||||
|
||||
if all_empty and path != stop_at:
|
||||
try:
|
||||
if not self.dry_run:
|
||||
path.rmdir()
|
||||
result.empty_dirs_removed += 1
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
return all_empty
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[dict]:
|
||||
records = self.history_store.get_history(limit, offset)
|
||||
return [r.to_dict() for r in records]
|
||||
|
||||
def get_status(self) -> dict:
|
||||
status: Dict[str, Any] = {
|
||||
"enabled": not self._shutdown or self._timer is not None,
|
||||
"running": self._timer is not None and not self._shutdown,
|
||||
"scanning": self._scanning,
|
||||
"interval_hours": self.interval_seconds / 3600.0,
|
||||
"temp_file_max_age_hours": self.temp_file_max_age_hours,
|
||||
"multipart_max_age_days": self.multipart_max_age_days,
|
||||
"lock_file_max_age_hours": self.lock_file_max_age_hours,
|
||||
"dry_run": self.dry_run,
|
||||
"io_throttle_ms": round(self._io_throttle * 1000),
|
||||
}
|
||||
if self._scanning and self._scan_start_time:
|
||||
status["scan_elapsed_seconds"] = time.time() - self._scan_start_time
|
||||
return status
|
||||
1095
python/app/iam.py
1095
python/app/iam.py
File diff suppressed because it is too large
Load Diff
@@ -1,995 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
try:
|
||||
import myfsio_core as _rc
|
||||
if not hasattr(_rc, "md5_file"):
|
||||
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
|
||||
_HAS_RUST = True
|
||||
except ImportError:
|
||||
_HAS_RUST = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _compute_etag(path: Path) -> str:
|
||||
if _HAS_RUST:
|
||||
return _rc.md5_file(str(path))
|
||||
checksum = hashlib.md5()
|
||||
with path.open("rb") as handle:
|
||||
for chunk in iter(lambda: handle.read(8192), b""):
|
||||
checksum.update(chunk)
|
||||
return checksum.hexdigest()
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntegrityIssue:
|
||||
issue_type: str
|
||||
bucket: str
|
||||
key: str
|
||||
detail: str
|
||||
healed: bool = False
|
||||
heal_action: str = ""
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"issue_type": self.issue_type,
|
||||
"bucket": self.bucket,
|
||||
"key": self.key,
|
||||
"detail": self.detail,
|
||||
"healed": self.healed,
|
||||
"heal_action": self.heal_action,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntegrityResult:
|
||||
corrupted_objects: int = 0
|
||||
orphaned_objects: int = 0
|
||||
phantom_metadata: int = 0
|
||||
stale_versions: int = 0
|
||||
etag_cache_inconsistencies: int = 0
|
||||
legacy_metadata_drifts: int = 0
|
||||
issues_healed: int = 0
|
||||
issues: List[IntegrityIssue] = field(default_factory=list)
|
||||
errors: List[str] = field(default_factory=list)
|
||||
objects_scanned: int = 0
|
||||
buckets_scanned: int = 0
|
||||
execution_time_seconds: float = 0.0
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"corrupted_objects": self.corrupted_objects,
|
||||
"orphaned_objects": self.orphaned_objects,
|
||||
"phantom_metadata": self.phantom_metadata,
|
||||
"stale_versions": self.stale_versions,
|
||||
"etag_cache_inconsistencies": self.etag_cache_inconsistencies,
|
||||
"legacy_metadata_drifts": self.legacy_metadata_drifts,
|
||||
"issues_healed": self.issues_healed,
|
||||
"issues": [i.to_dict() for i in self.issues],
|
||||
"errors": self.errors,
|
||||
"objects_scanned": self.objects_scanned,
|
||||
"buckets_scanned": self.buckets_scanned,
|
||||
"execution_time_seconds": self.execution_time_seconds,
|
||||
}
|
||||
|
||||
@property
|
||||
def total_issues(self) -> int:
|
||||
return (
|
||||
self.corrupted_objects
|
||||
+ self.orphaned_objects
|
||||
+ self.phantom_metadata
|
||||
+ self.stale_versions
|
||||
+ self.etag_cache_inconsistencies
|
||||
+ self.legacy_metadata_drifts
|
||||
)
|
||||
|
||||
@property
|
||||
def has_issues(self) -> bool:
|
||||
return self.total_issues > 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntegrityExecutionRecord:
|
||||
timestamp: float
|
||||
result: dict
|
||||
dry_run: bool
|
||||
auto_heal: bool
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"timestamp": self.timestamp,
|
||||
"result": self.result,
|
||||
"dry_run": self.dry_run,
|
||||
"auto_heal": self.auto_heal,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> IntegrityExecutionRecord:
|
||||
return cls(
|
||||
timestamp=data["timestamp"],
|
||||
result=data["result"],
|
||||
dry_run=data.get("dry_run", False),
|
||||
auto_heal=data.get("auto_heal", False),
|
||||
)
|
||||
|
||||
|
||||
class IntegrityHistoryStore:
|
||||
def __init__(self, storage_root: Path, max_records: int = 50) -> None:
|
||||
self.storage_root = storage_root
|
||||
self.max_records = max_records
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "integrity_history.json"
|
||||
|
||||
def load(self) -> List[IntegrityExecutionRecord]:
|
||||
path = self._get_path()
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
return [IntegrityExecutionRecord.from_dict(d) for d in data.get("executions", [])]
|
||||
except (OSError, ValueError, KeyError) as e:
|
||||
logger.error("Failed to load integrity history: %s", e)
|
||||
return []
|
||||
|
||||
def save(self, records: List[IntegrityExecutionRecord]) -> None:
|
||||
path = self._get_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {"executions": [r.to_dict() for r in records[: self.max_records]]}
|
||||
try:
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error("Failed to save integrity history: %s", e)
|
||||
|
||||
def add(self, record: IntegrityExecutionRecord) -> None:
|
||||
with self._lock:
|
||||
records = self.load()
|
||||
records.insert(0, record)
|
||||
self.save(records)
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[IntegrityExecutionRecord]:
|
||||
return self.load()[offset : offset + limit]
|
||||
|
||||
|
||||
class IntegrityCursorStore:
|
||||
def __init__(self, storage_root: Path) -> None:
|
||||
self.storage_root = storage_root
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "integrity_cursor.json"
|
||||
|
||||
def load(self) -> Dict[str, Any]:
|
||||
path = self._get_path()
|
||||
if not path.exists():
|
||||
return {"buckets": {}}
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if not isinstance(data.get("buckets"), dict):
|
||||
return {"buckets": {}}
|
||||
return data
|
||||
except (OSError, ValueError, KeyError):
|
||||
return {"buckets": {}}
|
||||
|
||||
def save(self, data: Dict[str, Any]) -> None:
|
||||
path = self._get_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error("Failed to save integrity cursor: %s", e)
|
||||
|
||||
def update_bucket(
|
||||
self,
|
||||
bucket_name: str,
|
||||
timestamp: float,
|
||||
last_key: Optional[str] = None,
|
||||
completed: bool = False,
|
||||
) -> None:
|
||||
with self._lock:
|
||||
data = self.load()
|
||||
entry = data["buckets"].get(bucket_name, {})
|
||||
if completed:
|
||||
entry["last_scanned"] = timestamp
|
||||
entry.pop("last_key", None)
|
||||
entry["completed"] = True
|
||||
else:
|
||||
entry["last_scanned"] = timestamp
|
||||
if last_key is not None:
|
||||
entry["last_key"] = last_key
|
||||
entry["completed"] = False
|
||||
data["buckets"][bucket_name] = entry
|
||||
self.save(data)
|
||||
|
||||
def clean_stale(self, existing_buckets: List[str]) -> None:
|
||||
with self._lock:
|
||||
data = self.load()
|
||||
existing_set = set(existing_buckets)
|
||||
stale_keys = [k for k in data["buckets"] if k not in existing_set]
|
||||
if stale_keys:
|
||||
for k in stale_keys:
|
||||
del data["buckets"][k]
|
||||
self.save(data)
|
||||
|
||||
def get_last_key(self, bucket_name: str) -> Optional[str]:
|
||||
data = self.load()
|
||||
entry = data.get("buckets", {}).get(bucket_name)
|
||||
if entry is None:
|
||||
return None
|
||||
return entry.get("last_key")
|
||||
|
||||
def get_bucket_order(self, bucket_names: List[str]) -> List[str]:
|
||||
data = self.load()
|
||||
buckets_info = data.get("buckets", {})
|
||||
|
||||
incomplete = []
|
||||
complete = []
|
||||
for name in bucket_names:
|
||||
entry = buckets_info.get(name)
|
||||
if entry is None:
|
||||
incomplete.append((name, 0.0))
|
||||
elif entry.get("last_key") is not None:
|
||||
incomplete.append((name, entry.get("last_scanned", 0.0)))
|
||||
else:
|
||||
complete.append((name, entry.get("last_scanned", 0.0)))
|
||||
|
||||
incomplete.sort(key=lambda x: x[1])
|
||||
complete.sort(key=lambda x: x[1])
|
||||
|
||||
return [n for n, _ in incomplete] + [n for n, _ in complete]
|
||||
|
||||
def get_info(self) -> Dict[str, Any]:
|
||||
data = self.load()
|
||||
buckets = data.get("buckets", {})
|
||||
return {
|
||||
"tracked_buckets": len(buckets),
|
||||
"buckets": {
|
||||
name: {
|
||||
"last_scanned": info.get("last_scanned"),
|
||||
"last_key": info.get("last_key"),
|
||||
"completed": info.get("completed", False),
|
||||
}
|
||||
for name, info in buckets.items()
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
MAX_ISSUES = 500
|
||||
|
||||
|
||||
class IntegrityChecker:
|
||||
SYSTEM_ROOT = ".myfsio.sys"
|
||||
SYSTEM_BUCKETS_DIR = "buckets"
|
||||
BUCKET_META_DIR = "meta"
|
||||
BUCKET_VERSIONS_DIR = "versions"
|
||||
INTERNAL_FOLDERS = {".meta", ".versions", ".multipart"}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage_root: Path,
|
||||
interval_hours: float = 24.0,
|
||||
batch_size: int = 1000,
|
||||
auto_heal: bool = False,
|
||||
dry_run: bool = False,
|
||||
max_history: int = 50,
|
||||
io_throttle_ms: int = 10,
|
||||
) -> None:
|
||||
self.storage_root = Path(storage_root)
|
||||
self.interval_seconds = interval_hours * 3600.0
|
||||
self.batch_size = batch_size
|
||||
self.auto_heal = auto_heal
|
||||
self.dry_run = dry_run
|
||||
self._timer: Optional[threading.Timer] = None
|
||||
self._shutdown = False
|
||||
self._lock = threading.Lock()
|
||||
self._scanning = False
|
||||
self._scan_start_time: Optional[float] = None
|
||||
self._io_throttle = max(0, io_throttle_ms) / 1000.0
|
||||
self.history_store = IntegrityHistoryStore(storage_root, max_records=max_history)
|
||||
self.cursor_store = IntegrityCursorStore(self.storage_root)
|
||||
|
||||
def start(self) -> None:
|
||||
if self._timer is not None:
|
||||
return
|
||||
self._shutdown = False
|
||||
self._schedule_next()
|
||||
logger.info(
|
||||
"Integrity checker started: interval=%.1fh, batch_size=%d, auto_heal=%s, dry_run=%s",
|
||||
self.interval_seconds / 3600.0,
|
||||
self.batch_size,
|
||||
self.auto_heal,
|
||||
self.dry_run,
|
||||
)
|
||||
|
||||
def stop(self) -> None:
|
||||
self._shutdown = True
|
||||
if self._timer:
|
||||
self._timer.cancel()
|
||||
self._timer = None
|
||||
logger.info("Integrity checker stopped")
|
||||
|
||||
def _schedule_next(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
self._timer = threading.Timer(self.interval_seconds, self._run_cycle)
|
||||
self._timer.daemon = True
|
||||
self._timer.start()
|
||||
|
||||
def _run_cycle(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
try:
|
||||
self.run_now()
|
||||
except Exception as e:
|
||||
logger.error("Integrity check cycle failed: %s", e)
|
||||
finally:
|
||||
self._schedule_next()
|
||||
|
||||
def run_now(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> IntegrityResult:
|
||||
if not self._lock.acquire(blocking=False):
|
||||
raise RuntimeError("Integrity scan is already in progress")
|
||||
|
||||
try:
|
||||
self._scanning = True
|
||||
self._scan_start_time = time.time()
|
||||
|
||||
effective_auto_heal = auto_heal if auto_heal is not None else self.auto_heal
|
||||
effective_dry_run = dry_run if dry_run is not None else self.dry_run
|
||||
|
||||
start = self._scan_start_time
|
||||
result = IntegrityResult()
|
||||
|
||||
bucket_names = self._list_bucket_names()
|
||||
self.cursor_store.clean_stale(bucket_names)
|
||||
ordered_buckets = self.cursor_store.get_bucket_order(bucket_names)
|
||||
|
||||
for bucket_name in ordered_buckets:
|
||||
if self._batch_exhausted(result):
|
||||
break
|
||||
result.buckets_scanned += 1
|
||||
cursor_key = self.cursor_store.get_last_key(bucket_name)
|
||||
key_corrupted = self._check_corrupted_objects(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
|
||||
key_orphaned = self._check_orphaned_objects(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
|
||||
key_phantom = self._check_phantom_metadata(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
|
||||
self._check_stale_versions(bucket_name, result, effective_auto_heal, effective_dry_run)
|
||||
self._check_etag_cache(bucket_name, result, effective_auto_heal, effective_dry_run)
|
||||
self._check_legacy_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
|
||||
returned_keys = [k for k in (key_corrupted, key_orphaned, key_phantom) if k is not None]
|
||||
bucket_exhausted = self._batch_exhausted(result)
|
||||
if bucket_exhausted and returned_keys:
|
||||
self.cursor_store.update_bucket(bucket_name, time.time(), last_key=min(returned_keys))
|
||||
else:
|
||||
self.cursor_store.update_bucket(bucket_name, time.time(), completed=True)
|
||||
|
||||
result.execution_time_seconds = time.time() - start
|
||||
|
||||
if result.has_issues or result.errors:
|
||||
logger.info(
|
||||
"Integrity check completed in %.2fs: corrupted=%d, orphaned=%d, phantom=%d, "
|
||||
"stale_versions=%d, etag_cache=%d, legacy_drift=%d, healed=%d, errors=%d%s",
|
||||
result.execution_time_seconds,
|
||||
result.corrupted_objects,
|
||||
result.orphaned_objects,
|
||||
result.phantom_metadata,
|
||||
result.stale_versions,
|
||||
result.etag_cache_inconsistencies,
|
||||
result.legacy_metadata_drifts,
|
||||
result.issues_healed,
|
||||
len(result.errors),
|
||||
" (dry run)" if effective_dry_run else "",
|
||||
)
|
||||
|
||||
record = IntegrityExecutionRecord(
|
||||
timestamp=time.time(),
|
||||
result=result.to_dict(),
|
||||
dry_run=effective_dry_run,
|
||||
auto_heal=effective_auto_heal,
|
||||
)
|
||||
self.history_store.add(record)
|
||||
|
||||
return result
|
||||
finally:
|
||||
self._scanning = False
|
||||
self._scan_start_time = None
|
||||
self._lock.release()
|
||||
|
||||
def run_async(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> bool:
|
||||
if self._scanning:
|
||||
return False
|
||||
t = threading.Thread(target=self.run_now, args=(auto_heal, dry_run), daemon=True)
|
||||
t.start()
|
||||
return True
|
||||
|
||||
def _system_path(self) -> Path:
|
||||
return self.storage_root / self.SYSTEM_ROOT
|
||||
|
||||
def _list_bucket_names(self) -> List[str]:
|
||||
names = []
|
||||
try:
|
||||
for entry in self.storage_root.iterdir():
|
||||
if entry.is_dir() and entry.name != self.SYSTEM_ROOT:
|
||||
names.append(entry.name)
|
||||
except OSError:
|
||||
pass
|
||||
return names
|
||||
|
||||
def _throttle(self) -> bool:
|
||||
if self._shutdown:
|
||||
return True
|
||||
if self._io_throttle > 0:
|
||||
time.sleep(self._io_throttle)
|
||||
return self._shutdown
|
||||
|
||||
def _batch_exhausted(self, result: IntegrityResult) -> bool:
|
||||
return self._shutdown or result.objects_scanned >= self.batch_size
|
||||
|
||||
def _add_issue(self, result: IntegrityResult, issue: IntegrityIssue) -> None:
|
||||
if len(result.issues) < MAX_ISSUES:
|
||||
result.issues.append(issue)
|
||||
|
||||
def _collect_index_keys(
|
||||
self, meta_root: Path, cursor_key: Optional[str] = None,
|
||||
) -> Dict[str, Dict[str, Any]]:
|
||||
all_keys: Dict[str, Dict[str, Any]] = {}
|
||||
if not meta_root.exists():
|
||||
return all_keys
|
||||
try:
|
||||
for index_file in meta_root.rglob("_index.json"):
|
||||
if not index_file.is_file():
|
||||
continue
|
||||
rel_dir = index_file.parent.relative_to(meta_root)
|
||||
dir_prefix = "" if rel_dir == Path(".") else rel_dir.as_posix()
|
||||
if cursor_key is not None and dir_prefix:
|
||||
full_prefix = dir_prefix + "/"
|
||||
if not cursor_key.startswith(full_prefix) and cursor_key > full_prefix:
|
||||
continue
|
||||
try:
|
||||
index_data = json.loads(index_file.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
for key_name, entry in index_data.items():
|
||||
full_key = (dir_prefix + "/" + key_name) if dir_prefix else key_name
|
||||
if cursor_key is not None and full_key <= cursor_key:
|
||||
continue
|
||||
all_keys[full_key] = {
|
||||
"entry": entry,
|
||||
"index_file": index_file,
|
||||
"key_name": key_name,
|
||||
}
|
||||
except OSError:
|
||||
pass
|
||||
return all_keys
|
||||
|
||||
def _walk_bucket_files_sorted(
|
||||
self, bucket_path: Path, cursor_key: Optional[str] = None,
|
||||
):
|
||||
def _walk(dir_path: Path, prefix: str):
|
||||
try:
|
||||
entries = list(os.scandir(dir_path))
|
||||
except OSError:
|
||||
return
|
||||
|
||||
def _sort_key(e):
|
||||
if e.is_dir(follow_symlinks=False):
|
||||
return e.name + "/"
|
||||
return e.name
|
||||
|
||||
entries.sort(key=_sort_key)
|
||||
|
||||
for entry in entries:
|
||||
if entry.is_dir(follow_symlinks=False):
|
||||
if not prefix and entry.name in self.INTERNAL_FOLDERS:
|
||||
continue
|
||||
new_prefix = (prefix + "/" + entry.name) if prefix else entry.name
|
||||
if cursor_key is not None:
|
||||
full_prefix = new_prefix + "/"
|
||||
if not cursor_key.startswith(full_prefix) and cursor_key > full_prefix:
|
||||
continue
|
||||
yield from _walk(Path(entry.path), new_prefix)
|
||||
elif entry.is_file(follow_symlinks=False):
|
||||
full_key = (prefix + "/" + entry.name) if prefix else entry.name
|
||||
if cursor_key is not None and full_key <= cursor_key:
|
||||
continue
|
||||
yield full_key
|
||||
|
||||
yield from _walk(bucket_path, "")
|
||||
|
||||
def _check_corrupted_objects(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
|
||||
cursor_key: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
if self._batch_exhausted(result):
|
||||
return None
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
if not meta_root.exists():
|
||||
return None
|
||||
|
||||
last_key = None
|
||||
try:
|
||||
all_keys = self._collect_index_keys(meta_root, cursor_key)
|
||||
sorted_keys = sorted(all_keys.keys())
|
||||
|
||||
for full_key in sorted_keys:
|
||||
if self._throttle():
|
||||
return last_key
|
||||
if self._batch_exhausted(result):
|
||||
return last_key
|
||||
|
||||
info = all_keys[full_key]
|
||||
entry = info["entry"]
|
||||
index_file = info["index_file"]
|
||||
key_name = info["key_name"]
|
||||
|
||||
object_path = bucket_path / full_key
|
||||
if not object_path.exists():
|
||||
continue
|
||||
|
||||
result.objects_scanned += 1
|
||||
last_key = full_key
|
||||
|
||||
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
|
||||
stored_etag = meta.get("__etag__")
|
||||
if not stored_etag:
|
||||
continue
|
||||
|
||||
try:
|
||||
actual_etag = _compute_etag(object_path)
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
if actual_etag != stored_etag:
|
||||
result.corrupted_objects += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="corrupted_object",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail=f"stored_etag={stored_etag} actual_etag={actual_etag}",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
stat = object_path.stat()
|
||||
meta["__etag__"] = actual_etag
|
||||
meta["__size__"] = str(stat.st_size)
|
||||
meta["__last_modified__"] = str(stat.st_mtime)
|
||||
try:
|
||||
index_data = json.loads(index_file.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
index_data = {}
|
||||
index_data[key_name] = {"metadata": meta}
|
||||
self._atomic_write_index(index_file, index_data)
|
||||
issue.healed = True
|
||||
issue.heal_action = "updated etag in index"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal corrupted {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check corrupted {bucket_name}: {e}")
|
||||
return last_key
|
||||
|
||||
def _check_orphaned_objects(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
|
||||
cursor_key: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
if self._batch_exhausted(result):
|
||||
return None
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
last_key = None
|
||||
try:
|
||||
for full_key in self._walk_bucket_files_sorted(bucket_path, cursor_key):
|
||||
if self._throttle():
|
||||
return last_key
|
||||
if self._batch_exhausted(result):
|
||||
return last_key
|
||||
|
||||
result.objects_scanned += 1
|
||||
last_key = full_key
|
||||
key_path = Path(full_key)
|
||||
key_name = key_path.name
|
||||
parent = key_path.parent
|
||||
|
||||
if parent == Path("."):
|
||||
index_path = meta_root / "_index.json"
|
||||
else:
|
||||
index_path = meta_root / parent / "_index.json"
|
||||
|
||||
has_entry = False
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
has_entry = key_name in index_data
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
if not has_entry:
|
||||
result.orphaned_objects += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="orphaned_object",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="file exists without metadata entry",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
object_path = bucket_path / full_key
|
||||
etag = _compute_etag(object_path)
|
||||
stat = object_path.stat()
|
||||
meta = {
|
||||
"__etag__": etag,
|
||||
"__size__": str(stat.st_size),
|
||||
"__last_modified__": str(stat.st_mtime),
|
||||
}
|
||||
index_data = {}
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
index_data[key_name] = {"metadata": meta}
|
||||
self._atomic_write_index(index_path, index_data)
|
||||
issue.healed = True
|
||||
issue.heal_action = "created metadata entry"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal orphaned {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check orphaned {bucket_name}: {e}")
|
||||
return last_key
|
||||
|
||||
def _check_phantom_metadata(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
|
||||
cursor_key: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
if self._batch_exhausted(result):
|
||||
return None
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
if not meta_root.exists():
|
||||
return None
|
||||
|
||||
last_key = None
|
||||
try:
|
||||
all_keys = self._collect_index_keys(meta_root, cursor_key)
|
||||
sorted_keys = sorted(all_keys.keys())
|
||||
|
||||
heal_by_index: Dict[Path, List[str]] = {}
|
||||
|
||||
for full_key in sorted_keys:
|
||||
if self._batch_exhausted(result):
|
||||
break
|
||||
|
||||
result.objects_scanned += 1
|
||||
last_key = full_key
|
||||
|
||||
object_path = bucket_path / full_key
|
||||
if not object_path.exists():
|
||||
result.phantom_metadata += 1
|
||||
info = all_keys[full_key]
|
||||
issue = IntegrityIssue(
|
||||
issue_type="phantom_metadata",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="metadata entry without file on disk",
|
||||
)
|
||||
if auto_heal and not dry_run:
|
||||
index_file = info["index_file"]
|
||||
heal_by_index.setdefault(index_file, []).append(info["key_name"])
|
||||
issue.healed = True
|
||||
issue.heal_action = "removed stale index entry"
|
||||
result.issues_healed += 1
|
||||
self._add_issue(result, issue)
|
||||
|
||||
if heal_by_index and auto_heal and not dry_run:
|
||||
for index_file, keys_to_remove in heal_by_index.items():
|
||||
try:
|
||||
index_data = json.loads(index_file.read_text(encoding="utf-8"))
|
||||
for k in keys_to_remove:
|
||||
index_data.pop(k, None)
|
||||
if index_data:
|
||||
self._atomic_write_index(index_file, index_data)
|
||||
else:
|
||||
index_file.unlink(missing_ok=True)
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal phantom {bucket_name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"check phantom {bucket_name}: {e}")
|
||||
return last_key
|
||||
|
||||
def _check_stale_versions(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
|
||||
) -> None:
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
versions_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR
|
||||
|
||||
if not versions_root.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
for key_dir in versions_root.rglob("*"):
|
||||
if self._throttle():
|
||||
return
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
if not key_dir.is_dir():
|
||||
continue
|
||||
|
||||
bin_files = {f.stem: f for f in key_dir.glob("*.bin")}
|
||||
json_files = {f.stem: f for f in key_dir.glob("*.json")}
|
||||
|
||||
for stem, bin_file in bin_files.items():
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
result.objects_scanned += 1
|
||||
if stem not in json_files:
|
||||
result.stale_versions += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="stale_version",
|
||||
bucket=bucket_name,
|
||||
key=f"{key_dir.relative_to(versions_root).as_posix()}/{bin_file.name}",
|
||||
detail="version data without manifest",
|
||||
)
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
bin_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "removed orphaned version data"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal stale version {bin_file}: {e}")
|
||||
self._add_issue(result, issue)
|
||||
|
||||
for stem, json_file in json_files.items():
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
result.objects_scanned += 1
|
||||
if stem not in bin_files:
|
||||
result.stale_versions += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="stale_version",
|
||||
bucket=bucket_name,
|
||||
key=f"{key_dir.relative_to(versions_root).as_posix()}/{json_file.name}",
|
||||
detail="version manifest without data",
|
||||
)
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
json_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "removed orphaned version manifest"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal stale version {json_file}: {e}")
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check stale versions {bucket_name}: {e}")
|
||||
|
||||
def _check_etag_cache(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
|
||||
) -> None:
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
etag_index_path = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / "etag_index.json"
|
||||
|
||||
if not etag_index_path.exists():
|
||||
return
|
||||
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
if not meta_root.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
etag_cache = json.loads(etag_index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
return
|
||||
|
||||
found_mismatch = False
|
||||
|
||||
for full_key, cached_etag in etag_cache.items():
|
||||
if self._batch_exhausted(result):
|
||||
break
|
||||
result.objects_scanned += 1
|
||||
key_path = Path(full_key)
|
||||
key_name = key_path.name
|
||||
parent = key_path.parent
|
||||
|
||||
if parent == Path("."):
|
||||
index_path = meta_root / "_index.json"
|
||||
else:
|
||||
index_path = meta_root / parent / "_index.json"
|
||||
|
||||
if not index_path.exists():
|
||||
continue
|
||||
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
|
||||
entry = index_data.get(key_name)
|
||||
if not entry:
|
||||
continue
|
||||
|
||||
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
|
||||
stored_etag = meta.get("__etag__")
|
||||
|
||||
if stored_etag and cached_etag != stored_etag:
|
||||
result.etag_cache_inconsistencies += 1
|
||||
found_mismatch = True
|
||||
issue = IntegrityIssue(
|
||||
issue_type="etag_cache_inconsistency",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail=f"cached_etag={cached_etag} index_etag={stored_etag}",
|
||||
)
|
||||
self._add_issue(result, issue)
|
||||
|
||||
if found_mismatch and auto_heal and not dry_run:
|
||||
try:
|
||||
etag_index_path.unlink(missing_ok=True)
|
||||
for issue in result.issues:
|
||||
if issue.issue_type == "etag_cache_inconsistency" and issue.bucket == bucket_name and not issue.healed:
|
||||
issue.healed = True
|
||||
issue.heal_action = "deleted etag_index.json"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal etag cache {bucket_name}: {e}")
|
||||
|
||||
def _check_legacy_metadata(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
|
||||
) -> None:
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
legacy_meta_root = self.storage_root / bucket_name / ".meta"
|
||||
if not legacy_meta_root.exists():
|
||||
return
|
||||
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
try:
|
||||
for meta_file in legacy_meta_root.rglob("*.meta.json"):
|
||||
if self._throttle():
|
||||
return
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
if not meta_file.is_file():
|
||||
continue
|
||||
|
||||
result.objects_scanned += 1
|
||||
try:
|
||||
rel = meta_file.relative_to(legacy_meta_root)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
full_key = rel.as_posix().removesuffix(".meta.json")
|
||||
key_path = Path(full_key)
|
||||
key_name = key_path.name
|
||||
parent = key_path.parent
|
||||
|
||||
if parent == Path("."):
|
||||
index_path = meta_root / "_index.json"
|
||||
else:
|
||||
index_path = meta_root / parent / "_index.json"
|
||||
|
||||
try:
|
||||
legacy_data = json.loads(meta_file.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
|
||||
index_entry = None
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
index_entry = index_data.get(key_name)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
if index_entry is None:
|
||||
result.legacy_metadata_drifts += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="legacy_metadata_drift",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="unmigrated legacy .meta.json",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
index_data = {}
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
index_data[key_name] = {"metadata": legacy_data}
|
||||
self._atomic_write_index(index_path, index_data)
|
||||
meta_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "migrated to index and deleted legacy file"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal legacy {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
else:
|
||||
index_meta = index_entry.get("metadata", {}) if isinstance(index_entry, dict) else {}
|
||||
if legacy_data != index_meta:
|
||||
result.legacy_metadata_drifts += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="legacy_metadata_drift",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="legacy .meta.json differs from index entry",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
meta_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "deleted legacy file (index is authoritative)"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal legacy drift {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check legacy meta {bucket_name}: {e}")
|
||||
|
||||
@staticmethod
|
||||
def _atomic_write_index(index_path: Path, data: Dict[str, Any]) -> None:
|
||||
index_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
tmp_path = index_path.with_suffix(".tmp")
|
||||
try:
|
||||
with open(tmp_path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f)
|
||||
os.replace(str(tmp_path), str(index_path))
|
||||
except BaseException:
|
||||
try:
|
||||
tmp_path.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[dict]:
|
||||
records = self.history_store.get_history(limit, offset)
|
||||
return [r.to_dict() for r in records]
|
||||
|
||||
def get_status(self) -> dict:
|
||||
status: Dict[str, Any] = {
|
||||
"enabled": not self._shutdown or self._timer is not None,
|
||||
"running": self._timer is not None and not self._shutdown,
|
||||
"scanning": self._scanning,
|
||||
"interval_hours": self.interval_seconds / 3600.0,
|
||||
"batch_size": self.batch_size,
|
||||
"auto_heal": self.auto_heal,
|
||||
"dry_run": self.dry_run,
|
||||
"io_throttle_ms": round(self._io_throttle * 1000),
|
||||
}
|
||||
if self._scanning and self._scan_start_time is not None:
|
||||
status["scan_elapsed_seconds"] = round(time.time() - self._scan_start_time, 1)
|
||||
status["cursor"] = self.cursor_store.get_info()
|
||||
return status
|
||||
@@ -1,108 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
_DOMAIN_RE = re.compile(
|
||||
r"^(?!-)[a-z0-9]([a-z0-9-]*[a-z0-9])?(\.[a-z0-9]([a-z0-9-]*[a-z0-9])?)*$"
|
||||
)
|
||||
|
||||
|
||||
def normalize_domain(raw: str) -> str:
|
||||
raw = raw.strip().lower()
|
||||
for prefix in ("https://", "http://"):
|
||||
if raw.startswith(prefix):
|
||||
raw = raw[len(prefix):]
|
||||
raw = raw.split("/", 1)[0]
|
||||
raw = raw.split("?", 1)[0]
|
||||
raw = raw.split("#", 1)[0]
|
||||
if ":" in raw:
|
||||
raw = raw.rsplit(":", 1)[0]
|
||||
return raw
|
||||
|
||||
|
||||
def is_valid_domain(domain: str) -> bool:
|
||||
if not domain or len(domain) > 253:
|
||||
return False
|
||||
return bool(_DOMAIN_RE.match(domain))
|
||||
|
||||
|
||||
class WebsiteDomainStore:
|
||||
def __init__(self, config_path: Path) -> None:
|
||||
self.config_path = config_path
|
||||
self._lock = threading.Lock()
|
||||
self._domains: Dict[str, str] = {}
|
||||
self._last_mtime: float = 0.0
|
||||
self.reload()
|
||||
|
||||
def reload(self) -> None:
|
||||
if not self.config_path.exists():
|
||||
self._domains = {}
|
||||
self._last_mtime = 0.0
|
||||
return
|
||||
try:
|
||||
self._last_mtime = self.config_path.stat().st_mtime
|
||||
with open(self.config_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if isinstance(data, dict):
|
||||
self._domains = {k.lower(): v for k, v in data.items()}
|
||||
else:
|
||||
self._domains = {}
|
||||
except (OSError, json.JSONDecodeError):
|
||||
self._domains = {}
|
||||
|
||||
def _maybe_reload(self) -> None:
|
||||
try:
|
||||
if self.config_path.exists():
|
||||
mtime = self.config_path.stat().st_mtime
|
||||
if mtime != self._last_mtime:
|
||||
self._last_mtime = mtime
|
||||
with open(self.config_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if isinstance(data, dict):
|
||||
self._domains = {k.lower(): v for k, v in data.items()}
|
||||
else:
|
||||
self._domains = {}
|
||||
elif self._domains:
|
||||
self._domains = {}
|
||||
self._last_mtime = 0.0
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
def _save(self) -> None:
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(self.config_path, "w", encoding="utf-8") as f:
|
||||
json.dump(self._domains, f, indent=2)
|
||||
self._last_mtime = self.config_path.stat().st_mtime
|
||||
|
||||
def list_all(self) -> List[Dict[str, str]]:
|
||||
with self._lock:
|
||||
self._maybe_reload()
|
||||
return [{"domain": d, "bucket": b} for d, b in self._domains.items()]
|
||||
|
||||
def get_bucket(self, domain: str) -> Optional[str]:
|
||||
with self._lock:
|
||||
self._maybe_reload()
|
||||
return self._domains.get(domain.lower())
|
||||
|
||||
def get_domains_for_bucket(self, bucket: str) -> List[str]:
|
||||
with self._lock:
|
||||
self._maybe_reload()
|
||||
return [d for d, b in self._domains.items() if b == bucket]
|
||||
|
||||
def set_mapping(self, domain: str, bucket: str) -> None:
|
||||
with self._lock:
|
||||
self._domains[domain.lower()] = bucket
|
||||
self._save()
|
||||
|
||||
def delete_mapping(self, domain: str) -> bool:
|
||||
with self._lock:
|
||||
key = domain.lower()
|
||||
if key not in self._domains:
|
||||
return False
|
||||
del self._domains[key]
|
||||
self._save()
|
||||
return True
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
exec python run.py --prod
|
||||
@@ -1,24 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio_core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "myfsio_core"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
pyo3 = { version = "0.28", features = ["extension-module"] }
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
md-5 = "0.10"
|
||||
hex = "0.4"
|
||||
unicode-normalization = "0.1"
|
||||
serde_json = "1"
|
||||
regex = "1"
|
||||
lru = "0.14"
|
||||
parking_lot = "0.12"
|
||||
percent-encoding = "2"
|
||||
aes-gcm = "0.10"
|
||||
hkdf = "0.12"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
@@ -1,11 +0,0 @@
|
||||
[build-system]
|
||||
requires = ["maturin>=1.0,<2.0"]
|
||||
build-backend = "maturin"
|
||||
|
||||
[project]
|
||||
name = "myfsio_core"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10"
|
||||
|
||||
[tool.maturin]
|
||||
features = ["pyo3/extension-module"]
|
||||
@@ -1,192 +0,0 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use hkdf::Hkdf;
|
||||
use pyo3::exceptions::{PyIOError, PyValueError};
|
||||
use pyo3::prelude::*;
|
||||
use sha2::Sha256;
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
|
||||
const DEFAULT_CHUNK_SIZE: usize = 65536;
|
||||
const HEADER_SIZE: usize = 4;
|
||||
|
||||
fn read_exact_chunk(reader: &mut impl Read, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
let mut filled = 0;
|
||||
while filled < buf.len() {
|
||||
match reader.read(&mut buf[filled..]) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => filled += n,
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(filled)
|
||||
}
|
||||
|
||||
fn derive_chunk_nonce(base_nonce: &[u8], chunk_index: u32) -> Result<[u8; 12], String> {
|
||||
let hkdf = Hkdf::<Sha256>::new(Some(base_nonce), b"chunk_nonce");
|
||||
let mut okm = [0u8; 12];
|
||||
hkdf.expand(&chunk_index.to_be_bytes(), &mut okm)
|
||||
.map_err(|e| format!("HKDF expand failed: {}", e))?;
|
||||
Ok(okm)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (input_path, output_path, key, base_nonce, chunk_size=DEFAULT_CHUNK_SIZE))]
|
||||
pub fn encrypt_stream_chunked(
|
||||
py: Python<'_>,
|
||||
input_path: &str,
|
||||
output_path: &str,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
chunk_size: usize,
|
||||
) -> PyResult<u32> {
|
||||
if key.len() != 32 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Key must be 32 bytes, got {}",
|
||||
key.len()
|
||||
)));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Base nonce must be 12 bytes, got {}",
|
||||
base_nonce.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let chunk_size = if chunk_size == 0 {
|
||||
DEFAULT_CHUNK_SIZE
|
||||
} else {
|
||||
chunk_size
|
||||
};
|
||||
|
||||
let inp = input_path.to_owned();
|
||||
let out = output_path.to_owned();
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
|
||||
py.detach(move || {
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(&inp)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open input: {}", e)))?;
|
||||
let mut outfile = File::create(&out)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create output: {}", e)))?;
|
||||
|
||||
outfile
|
||||
.write_all(&[0u8; 4])
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write header: {}", e)))?;
|
||||
|
||||
let mut buf = vec![0u8; chunk_size];
|
||||
let mut chunk_index: u32 = 0;
|
||||
|
||||
loop {
|
||||
let n = read_exact_chunk(&mut infile, &mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)
|
||||
.map_err(|e| PyValueError::new_err(e))?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, &buf[..n])
|
||||
.map_err(|e| PyValueError::new_err(format!("Encrypt failed: {}", e)))?;
|
||||
|
||||
let size = encrypted.len() as u32;
|
||||
outfile
|
||||
.write_all(&size.to_be_bytes())
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk size: {}", e)))?;
|
||||
outfile
|
||||
.write_all(&encrypted)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk: {}", e)))?;
|
||||
|
||||
chunk_index += 1;
|
||||
}
|
||||
|
||||
outfile
|
||||
.seek(SeekFrom::Start(0))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to seek: {}", e)))?;
|
||||
outfile
|
||||
.write_all(&chunk_index.to_be_bytes())
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk count: {}", e)))?;
|
||||
|
||||
Ok(chunk_index)
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn decrypt_stream_chunked(
|
||||
py: Python<'_>,
|
||||
input_path: &str,
|
||||
output_path: &str,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
) -> PyResult<u32> {
|
||||
if key.len() != 32 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Key must be 32 bytes, got {}",
|
||||
key.len()
|
||||
)));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Base nonce must be 12 bytes, got {}",
|
||||
base_nonce.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let inp = input_path.to_owned();
|
||||
let out = output_path.to_owned();
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
|
||||
py.detach(move || {
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(&inp)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open input: {}", e)))?;
|
||||
let mut outfile = File::create(&out)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create output: {}", e)))?;
|
||||
|
||||
let mut header = [0u8; HEADER_SIZE];
|
||||
infile
|
||||
.read_exact(&mut header)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read header: {}", e)))?;
|
||||
let chunk_count = u32::from_be_bytes(header);
|
||||
|
||||
let mut size_buf = [0u8; HEADER_SIZE];
|
||||
for chunk_index in 0..chunk_count {
|
||||
infile
|
||||
.read_exact(&mut size_buf)
|
||||
.map_err(|e| {
|
||||
PyIOError::new_err(format!(
|
||||
"Failed to read chunk {} size: {}",
|
||||
chunk_index, e
|
||||
))
|
||||
})?;
|
||||
let chunk_size = u32::from_be_bytes(size_buf) as usize;
|
||||
|
||||
let mut encrypted = vec![0u8; chunk_size];
|
||||
infile.read_exact(&mut encrypted).map_err(|e| {
|
||||
PyIOError::new_err(format!("Failed to read chunk {}: {}", chunk_index, e))
|
||||
})?;
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)
|
||||
.map_err(|e| PyValueError::new_err(e))?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let decrypted = cipher.decrypt(nonce, encrypted.as_ref()).map_err(|e| {
|
||||
PyValueError::new_err(format!("Decrypt chunk {} failed: {}", chunk_index, e))
|
||||
})?;
|
||||
|
||||
outfile.write_all(&decrypted).map_err(|e| {
|
||||
PyIOError::new_err(format!("Failed to write chunk {}: {}", chunk_index, e))
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(chunk_count)
|
||||
})
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
use md5::{Digest, Md5};
|
||||
use pyo3::exceptions::PyIOError;
|
||||
use pyo3::prelude::*;
|
||||
use sha2::Sha256;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
|
||||
const CHUNK_SIZE: usize = 65536;
|
||||
|
||||
#[pyfunction]
|
||||
pub fn md5_file(py: Python<'_>, path: &str) -> PyResult<String> {
|
||||
let path = path.to_owned();
|
||||
py.detach(move || {
|
||||
let mut file = File::open(&path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open file: {}", e)))?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read file: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn md5_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Md5::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn sha256_file(py: Python<'_>, path: &str) -> PyResult<String> {
|
||||
let path = path.to_owned();
|
||||
py.detach(move || {
|
||||
let mut file = File::open(&path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open file: {}", e)))?;
|
||||
let mut hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read file: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn sha256_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn md5_sha256_file(py: Python<'_>, path: &str) -> PyResult<(String, String)> {
|
||||
let path = path.to_owned();
|
||||
py.detach(move || {
|
||||
let mut file = File::open(&path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open file: {}", e)))?;
|
||||
let mut md5_hasher = Md5::new();
|
||||
let mut sha_hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read file: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
md5_hasher.update(&buf[..n]);
|
||||
sha_hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok((
|
||||
format!("{:x}", md5_hasher.finalize()),
|
||||
format!("{:x}", sha_hasher.finalize()),
|
||||
))
|
||||
})
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
mod crypto;
|
||||
mod hashing;
|
||||
mod metadata;
|
||||
mod sigv4;
|
||||
mod storage;
|
||||
mod streaming;
|
||||
mod validation;
|
||||
|
||||
use pyo3::prelude::*;
|
||||
|
||||
#[pymodule]
|
||||
mod myfsio_core {
|
||||
use super::*;
|
||||
|
||||
#[pymodule_init]
|
||||
fn init(m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(sigv4::verify_sigv4_signature, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::derive_signing_key, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::compute_signature, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::build_string_to_sign, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::constant_time_compare, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::clear_signing_key_cache, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(hashing::md5_file, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::md5_bytes, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::sha256_file, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::sha256_bytes, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::md5_sha256_file, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(validation::validate_object_key, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(validation::validate_bucket_name, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(metadata::read_index_entry, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(storage::write_index_entry, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::delete_index_entry, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::check_bucket_contents, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::shallow_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::bucket_stats_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::search_objects_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::build_object_cache, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(streaming::stream_to_file_with_md5, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(streaming::assemble_parts_with_md5, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(crypto::encrypt_stream_chunked, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(crypto::decrypt_stream_chunked, m)?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
use pyo3::exceptions::PyValueError;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::{PyDict, PyList, PyString};
|
||||
use serde_json::Value;
|
||||
use std::fs;
|
||||
|
||||
const MAX_DEPTH: u32 = 64;
|
||||
|
||||
fn value_to_py(py: Python<'_>, v: &Value, depth: u32) -> PyResult<Py<PyAny>> {
|
||||
if depth > MAX_DEPTH {
|
||||
return Err(PyValueError::new_err("JSON nesting too deep"));
|
||||
}
|
||||
match v {
|
||||
Value::Null => Ok(py.None()),
|
||||
Value::Bool(b) => Ok((*b).into_pyobject(py)?.to_owned().into_any().unbind()),
|
||||
Value::Number(n) => {
|
||||
if let Some(i) = n.as_i64() {
|
||||
Ok(i.into_pyobject(py)?.into_any().unbind())
|
||||
} else if let Some(f) = n.as_f64() {
|
||||
Ok(f.into_pyobject(py)?.into_any().unbind())
|
||||
} else {
|
||||
Ok(py.None())
|
||||
}
|
||||
}
|
||||
Value::String(s) => Ok(PyString::new(py, s).into_any().unbind()),
|
||||
Value::Array(arr) => {
|
||||
let list = PyList::empty(py);
|
||||
for item in arr {
|
||||
list.append(value_to_py(py, item, depth + 1)?)?;
|
||||
}
|
||||
Ok(list.into_any().unbind())
|
||||
}
|
||||
Value::Object(map) => {
|
||||
let dict = PyDict::new(py);
|
||||
for (k, val) in map {
|
||||
dict.set_item(k, value_to_py(py, val, depth + 1)?)?;
|
||||
}
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn read_index_entry(
|
||||
py: Python<'_>,
|
||||
path: &str,
|
||||
entry_name: &str,
|
||||
) -> PyResult<Option<Py<PyAny>>> {
|
||||
let path_owned = path.to_owned();
|
||||
let entry_owned = entry_name.to_owned();
|
||||
|
||||
let entry: Option<Value> = py.detach(move || -> PyResult<Option<Value>> {
|
||||
let content = match fs::read_to_string(&path_owned) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return Ok(None),
|
||||
};
|
||||
let parsed: Value = match serde_json::from_str(&content) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return Ok(None),
|
||||
};
|
||||
match parsed {
|
||||
Value::Object(mut map) => Ok(map.remove(&entry_owned)),
|
||||
_ => Ok(None),
|
||||
}
|
||||
})?;
|
||||
|
||||
match entry {
|
||||
Some(val) => Ok(Some(value_to_py(py, &val, 0)?)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
@@ -1,193 +0,0 @@
|
||||
use hmac::{Hmac, Mac};
|
||||
use lru::LruCache;
|
||||
use parking_lot::Mutex;
|
||||
use percent_encoding::{percent_encode, AsciiSet, NON_ALPHANUMERIC};
|
||||
use pyo3::prelude::*;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::num::NonZeroUsize;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Instant;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
struct CacheEntry {
|
||||
key: Vec<u8>,
|
||||
created: Instant,
|
||||
}
|
||||
|
||||
static SIGNING_KEY_CACHE: LazyLock<Mutex<LruCache<(String, String, String, String), CacheEntry>>> =
|
||||
LazyLock::new(|| Mutex::new(LruCache::new(NonZeroUsize::new(256).unwrap())));
|
||||
|
||||
const CACHE_TTL_SECS: u64 = 60;
|
||||
|
||||
const AWS_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC
|
||||
.remove(b'-')
|
||||
.remove(b'_')
|
||||
.remove(b'.')
|
||||
.remove(b'~');
|
||||
|
||||
fn hmac_sha256(key: &[u8], msg: &[u8]) -> Vec<u8> {
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
|
||||
mac.update(msg);
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
fn sha256_hex(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
hex::encode(hasher.finalize())
|
||||
}
|
||||
|
||||
fn aws_uri_encode(input: &str) -> String {
|
||||
percent_encode(input.as_bytes(), AWS_ENCODE_SET).to_string()
|
||||
}
|
||||
|
||||
fn derive_signing_key_cached(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
let cache_key = (
|
||||
secret_key.to_owned(),
|
||||
date_stamp.to_owned(),
|
||||
region.to_owned(),
|
||||
service.to_owned(),
|
||||
);
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
if let Some(entry) = cache.get(&cache_key) {
|
||||
if entry.created.elapsed().as_secs() < CACHE_TTL_SECS {
|
||||
return entry.key.clone();
|
||||
}
|
||||
cache.pop(&cache_key);
|
||||
}
|
||||
}
|
||||
|
||||
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date_stamp.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, service.as_bytes());
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
cache.put(
|
||||
cache_key,
|
||||
CacheEntry {
|
||||
key: k_signing.clone(),
|
||||
created: Instant::now(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
k_signing
|
||||
}
|
||||
|
||||
fn constant_time_compare_inner(a: &[u8], b: &[u8]) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
let mut result: u8 = 0;
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
result |= x ^ y;
|
||||
}
|
||||
result == 0
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn verify_sigv4_signature(
|
||||
method: &str,
|
||||
canonical_uri: &str,
|
||||
query_params: Vec<(String, String)>,
|
||||
signed_headers_str: &str,
|
||||
header_values: Vec<(String, String)>,
|
||||
payload_hash: &str,
|
||||
amz_date: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
secret_key: &str,
|
||||
provided_signature: &str,
|
||||
) -> bool {
|
||||
let mut sorted_params = query_params;
|
||||
sorted_params.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1)));
|
||||
|
||||
let canonical_query_string = sorted_params
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{}={}", aws_uri_encode(k), aws_uri_encode(v)))
|
||||
.collect::<Vec<_>>()
|
||||
.join("&");
|
||||
|
||||
let mut canonical_headers = String::new();
|
||||
for (name, value) in &header_values {
|
||||
let lower_name = name.to_lowercase();
|
||||
let normalized = value.split_whitespace().collect::<Vec<_>>().join(" ");
|
||||
let final_value = if lower_name == "expect" && normalized.is_empty() {
|
||||
"100-continue"
|
||||
} else {
|
||||
&normalized
|
||||
};
|
||||
canonical_headers.push_str(&lower_name);
|
||||
canonical_headers.push(':');
|
||||
canonical_headers.push_str(final_value);
|
||||
canonical_headers.push('\n');
|
||||
}
|
||||
|
||||
let canonical_request = format!(
|
||||
"{}\n{}\n{}\n{}\n{}\n{}",
|
||||
method, canonical_uri, canonical_query_string, canonical_headers, signed_headers_str, payload_hash
|
||||
);
|
||||
|
||||
let credential_scope = format!("{}/{}/{}/aws4_request", date_stamp, region, service);
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
);
|
||||
|
||||
let signing_key = derive_signing_key_cached(secret_key, date_stamp, region, service);
|
||||
let calculated = hmac_sha256(&signing_key, string_to_sign.as_bytes());
|
||||
let calculated_hex = hex::encode(&calculated);
|
||||
|
||||
constant_time_compare_inner(calculated_hex.as_bytes(), provided_signature.as_bytes())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn derive_signing_key(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
derive_signing_key_cached(secret_key, date_stamp, region, service)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn compute_signature(signing_key: &[u8], string_to_sign: &str) -> String {
|
||||
let sig = hmac_sha256(signing_key, string_to_sign.as_bytes());
|
||||
hex::encode(sig)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn build_string_to_sign(
|
||||
amz_date: &str,
|
||||
credential_scope: &str,
|
||||
canonical_request: &str,
|
||||
) -> String {
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn constant_time_compare(a: &str, b: &str) -> bool {
|
||||
constant_time_compare_inner(a.as_bytes(), b.as_bytes())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn clear_signing_key_cache() {
|
||||
SIGNING_KEY_CACHE.lock().clear();
|
||||
}
|
||||
@@ -1,817 +0,0 @@
|
||||
use pyo3::exceptions::PyIOError;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::{PyDict, PyList, PyString, PyTuple};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::time::SystemTime;
|
||||
|
||||
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
|
||||
fn system_time_to_epoch(t: SystemTime) -> f64 {
|
||||
t.duration_since(std::time::UNIX_EPOCH)
|
||||
.map(|d| d.as_secs_f64())
|
||||
.unwrap_or(0.0)
|
||||
}
|
||||
|
||||
fn extract_etag_from_meta_bytes(content: &[u8]) -> Option<String> {
|
||||
let marker = b"\"__etag__\"";
|
||||
let idx = content.windows(marker.len()).position(|w| w == marker)?;
|
||||
let after = &content[idx + marker.len()..];
|
||||
let start = after.iter().position(|&b| b == b'"')? + 1;
|
||||
let rest = &after[start..];
|
||||
let end = rest.iter().position(|&b| b == b'"')?;
|
||||
std::str::from_utf8(&rest[..end]).ok().map(|s| s.to_owned())
|
||||
}
|
||||
|
||||
fn has_any_file(root: &str) -> bool {
|
||||
let root_path = Path::new(root);
|
||||
if !root_path.is_dir() {
|
||||
return false;
|
||||
}
|
||||
let mut stack = vec![root_path.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_file() {
|
||||
return true;
|
||||
}
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn write_index_entry(
|
||||
py: Python<'_>,
|
||||
path: &str,
|
||||
entry_name: &str,
|
||||
entry_data_json: &str,
|
||||
) -> PyResult<()> {
|
||||
let path_owned = path.to_owned();
|
||||
let entry_owned = entry_name.to_owned();
|
||||
let data_owned = entry_data_json.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<()> {
|
||||
let entry_value: Value = serde_json::from_str(&data_owned)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to parse entry data: {}", e)))?;
|
||||
|
||||
if let Some(parent) = Path::new(&path_owned).parent() {
|
||||
let _ = fs::create_dir_all(parent);
|
||||
}
|
||||
|
||||
let mut index_data: serde_json::Map<String, Value> = match fs::read_to_string(&path_owned)
|
||||
{
|
||||
Ok(content) => serde_json::from_str(&content).unwrap_or_default(),
|
||||
Err(_) => serde_json::Map::new(),
|
||||
};
|
||||
|
||||
index_data.insert(entry_owned, entry_value);
|
||||
|
||||
let serialized = serde_json::to_string(&Value::Object(index_data))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to serialize index: {}", e)))?;
|
||||
|
||||
fs::write(&path_owned, serialized)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn delete_index_entry(py: Python<'_>, path: &str, entry_name: &str) -> PyResult<bool> {
|
||||
let path_owned = path.to_owned();
|
||||
let entry_owned = entry_name.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<bool> {
|
||||
let content = match fs::read_to_string(&path_owned) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return Ok(false),
|
||||
};
|
||||
|
||||
let mut index_data: serde_json::Map<String, Value> =
|
||||
match serde_json::from_str(&content) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return Ok(false),
|
||||
};
|
||||
|
||||
if index_data.remove(&entry_owned).is_none() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
if index_data.is_empty() {
|
||||
let _ = fs::remove_file(&path_owned);
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let serialized = serde_json::to_string(&Value::Object(index_data))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to serialize index: {}", e)))?;
|
||||
|
||||
fs::write(&path_owned, serialized)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
|
||||
|
||||
Ok(true)
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn check_bucket_contents(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
version_roots: Vec<String>,
|
||||
multipart_roots: Vec<String>,
|
||||
) -> PyResult<(bool, bool, bool)> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<(bool, bool, bool)> {
|
||||
let mut has_objects = false;
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
'obj_scan: while let Some(current) = stack.pop() {
|
||||
let is_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if ft.is_file() && !ft.is_symlink() {
|
||||
has_objects = true;
|
||||
break 'obj_scan;
|
||||
}
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut has_versions = false;
|
||||
for root in &version_roots {
|
||||
if has_versions {
|
||||
break;
|
||||
}
|
||||
has_versions = has_any_file(root);
|
||||
}
|
||||
|
||||
let mut has_multipart = false;
|
||||
for root in &multipart_roots {
|
||||
if has_multipart {
|
||||
break;
|
||||
}
|
||||
has_multipart = has_any_file(root);
|
||||
}
|
||||
|
||||
Ok((has_objects, has_versions, has_multipart))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn shallow_scan(
|
||||
py: Python<'_>,
|
||||
target_dir: &str,
|
||||
prefix: &str,
|
||||
meta_cache_json: &str,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let target_owned = target_dir.to_owned();
|
||||
let prefix_owned = prefix.to_owned();
|
||||
let cache_owned = meta_cache_json.to_owned();
|
||||
|
||||
let result: (
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
Vec<String>,
|
||||
Vec<(String, bool)>,
|
||||
) = py.detach(move || -> PyResult<(
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
Vec<String>,
|
||||
Vec<(String, bool)>,
|
||||
)> {
|
||||
let meta_cache: HashMap<String, String> =
|
||||
serde_json::from_str(&cache_owned).unwrap_or_default();
|
||||
|
||||
let mut files: Vec<(String, u64, f64, Option<String>)> = Vec::new();
|
||||
let mut dirs: Vec<String> = Vec::new();
|
||||
|
||||
let entries = match fs::read_dir(&target_owned) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return Ok((files, dirs, Vec::new())),
|
||||
};
|
||||
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let name = match entry.file_name().into_string() {
|
||||
Ok(n) => n,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&name.as_str()) {
|
||||
continue;
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
let cp = format!("{}{}/", prefix_owned, name);
|
||||
dirs.push(cp);
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let key = format!("{}{}", prefix_owned, name);
|
||||
let md = match entry.metadata() {
|
||||
Ok(m) => m,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
let etag = meta_cache.get(&key).cloned();
|
||||
files.push((key, size, mtime, etag));
|
||||
}
|
||||
}
|
||||
|
||||
files.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
dirs.sort();
|
||||
|
||||
let mut merged: Vec<(String, bool)> = Vec::with_capacity(files.len() + dirs.len());
|
||||
let mut fi = 0;
|
||||
let mut di = 0;
|
||||
while fi < files.len() && di < dirs.len() {
|
||||
if files[fi].0 <= dirs[di] {
|
||||
merged.push((files[fi].0.clone(), false));
|
||||
fi += 1;
|
||||
} else {
|
||||
merged.push((dirs[di].clone(), true));
|
||||
di += 1;
|
||||
}
|
||||
}
|
||||
while fi < files.len() {
|
||||
merged.push((files[fi].0.clone(), false));
|
||||
fi += 1;
|
||||
}
|
||||
while di < dirs.len() {
|
||||
merged.push((dirs[di].clone(), true));
|
||||
di += 1;
|
||||
}
|
||||
|
||||
Ok((files, dirs, merged))
|
||||
})?;
|
||||
|
||||
let (files, dirs, merged) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let files_list = PyList::empty(py);
|
||||
for (key, size, mtime, etag) in &files {
|
||||
let etag_py: Py<PyAny> = match etag {
|
||||
Some(e) => PyString::new(py, e).into_any().unbind(),
|
||||
None => py.None(),
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
etag_py,
|
||||
])?;
|
||||
files_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("files", files_list)?;
|
||||
|
||||
let dirs_list = PyList::empty(py);
|
||||
for d in &dirs {
|
||||
dirs_list.append(PyString::new(py, d))?;
|
||||
}
|
||||
dict.set_item("dirs", dirs_list)?;
|
||||
|
||||
let merged_list = PyList::empty(py);
|
||||
for (key, is_dir) in &merged {
|
||||
let bool_obj: Py<PyAny> = if *is_dir {
|
||||
true.into_pyobject(py)?.to_owned().into_any().unbind()
|
||||
} else {
|
||||
false.into_pyobject(py)?.to_owned().into_any().unbind()
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
bool_obj,
|
||||
])?;
|
||||
merged_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("merged_keys", merged_list)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn bucket_stats_scan(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
versions_root: &str,
|
||||
) -> PyResult<(u64, u64, u64, u64)> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let versions_owned = versions_root.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<(u64, u64, u64, u64)> {
|
||||
let mut object_count: u64 = 0;
|
||||
let mut total_bytes: u64 = 0;
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let is_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
object_count += 1;
|
||||
if let Ok(md) = entry.metadata() {
|
||||
total_bytes += md.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut version_count: u64 = 0;
|
||||
let mut version_bytes: u64 = 0;
|
||||
|
||||
let versions_p = Path::new(&versions_owned);
|
||||
if versions_p.is_dir() {
|
||||
let mut stack = vec![versions_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if name.ends_with(".bin") {
|
||||
version_count += 1;
|
||||
if let Ok(md) = entry.metadata() {
|
||||
version_bytes += md.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((object_count, total_bytes, version_count, version_bytes))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (bucket_path, search_root, query, limit))]
|
||||
pub fn search_objects_scan(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
search_root: &str,
|
||||
query: &str,
|
||||
limit: usize,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let search_owned = search_root.to_owned();
|
||||
let query_owned = query.to_owned();
|
||||
|
||||
let result: (Vec<(String, u64, f64)>, bool) = py.detach(
|
||||
move || -> PyResult<(Vec<(String, u64, f64)>, bool)> {
|
||||
let query_lower = query_owned.to_lowercase();
|
||||
let bucket_len = bucket_owned.len() + 1;
|
||||
let scan_limit = limit * 4;
|
||||
let mut matched: usize = 0;
|
||||
let mut results: Vec<(String, u64, f64)> = Vec::new();
|
||||
|
||||
let search_p = Path::new(&search_owned);
|
||||
if !search_p.is_dir() {
|
||||
return Ok((results, false));
|
||||
}
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
let mut stack = vec![search_p.to_path_buf()];
|
||||
|
||||
'scan: while let Some(current) = stack.pop() {
|
||||
let is_bucket_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_bucket_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let full_path = entry.path();
|
||||
let full_str = full_path.to_string_lossy();
|
||||
if full_str.len() <= bucket_len {
|
||||
continue;
|
||||
}
|
||||
let key = full_str[bucket_len..].replace('\\', "/");
|
||||
if key.to_lowercase().contains(&query_lower) {
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
results.push((key, size, mtime));
|
||||
matched += 1;
|
||||
}
|
||||
}
|
||||
if matched >= scan_limit {
|
||||
break 'scan;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
let truncated = results.len() > limit;
|
||||
results.truncate(limit);
|
||||
|
||||
Ok((results, truncated))
|
||||
},
|
||||
)?;
|
||||
|
||||
let (results, truncated) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let results_list = PyList::empty(py);
|
||||
for (key, size, mtime) in &results {
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
])?;
|
||||
results_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("results", results_list)?;
|
||||
dict.set_item("truncated", truncated)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn build_object_cache(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
meta_root: &str,
|
||||
etag_index_path: &str,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let meta_owned = meta_root.to_owned();
|
||||
let index_path_owned = etag_index_path.to_owned();
|
||||
|
||||
let result: (HashMap<String, String>, Vec<(String, u64, f64, Option<String>)>, bool) =
|
||||
py.detach(move || -> PyResult<(
|
||||
HashMap<String, String>,
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
bool,
|
||||
)> {
|
||||
let mut meta_cache: HashMap<String, String> = HashMap::new();
|
||||
let mut index_mtime: f64 = 0.0;
|
||||
let mut etag_cache_changed = false;
|
||||
|
||||
let index_p = Path::new(&index_path_owned);
|
||||
if index_p.is_file() {
|
||||
if let Ok(md) = fs::metadata(&index_path_owned) {
|
||||
index_mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
}
|
||||
if let Ok(content) = fs::read_to_string(&index_path_owned) {
|
||||
if let Ok(parsed) = serde_json::from_str::<HashMap<String, String>>(&content) {
|
||||
meta_cache = parsed;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let meta_p = Path::new(&meta_owned);
|
||||
let mut needs_rebuild = false;
|
||||
|
||||
if meta_p.is_dir() && index_mtime > 0.0 {
|
||||
fn check_newer(dir: &Path, index_mtime: f64) -> bool {
|
||||
let entries = match fs::read_dir(dir) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return false,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
if check_newer(&entry.path(), index_mtime) {
|
||||
return true;
|
||||
}
|
||||
} else if ft.is_file() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if name.ends_with(".meta.json") || name == "_index.json" {
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let mt = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
if mt > index_mtime {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
needs_rebuild = check_newer(meta_p, index_mtime);
|
||||
} else if meta_cache.is_empty() {
|
||||
needs_rebuild = true;
|
||||
}
|
||||
|
||||
if needs_rebuild && meta_p.is_dir() {
|
||||
let meta_str = meta_owned.clone();
|
||||
let meta_len = meta_str.len() + 1;
|
||||
let mut index_files: Vec<String> = Vec::new();
|
||||
let mut legacy_meta_files: Vec<(String, String)> = Vec::new();
|
||||
|
||||
fn collect_meta(
|
||||
dir: &Path,
|
||||
meta_len: usize,
|
||||
index_files: &mut Vec<String>,
|
||||
legacy_meta_files: &mut Vec<(String, String)>,
|
||||
) {
|
||||
let entries = match fs::read_dir(dir) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
collect_meta(&entry.path(), meta_len, index_files, legacy_meta_files);
|
||||
} else if ft.is_file() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
let full = entry.path().to_string_lossy().to_string();
|
||||
if name == "_index.json" {
|
||||
index_files.push(full);
|
||||
} else if name.ends_with(".meta.json") {
|
||||
if full.len() > meta_len {
|
||||
let rel = &full[meta_len..];
|
||||
let key = if rel.len() > 10 {
|
||||
rel[..rel.len() - 10].replace('\\', "/")
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
legacy_meta_files.push((key, full));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
collect_meta(
|
||||
meta_p,
|
||||
meta_len,
|
||||
&mut index_files,
|
||||
&mut legacy_meta_files,
|
||||
);
|
||||
|
||||
meta_cache.clear();
|
||||
|
||||
for idx_path in &index_files {
|
||||
if let Ok(content) = fs::read_to_string(idx_path) {
|
||||
if let Ok(idx_data) = serde_json::from_str::<HashMap<String, Value>>(&content) {
|
||||
let rel_dir = if idx_path.len() > meta_len {
|
||||
let r = &idx_path[meta_len..];
|
||||
r.replace('\\', "/")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
let dir_prefix = if rel_dir.ends_with("/_index.json") {
|
||||
&rel_dir[..rel_dir.len() - "/_index.json".len()]
|
||||
} else {
|
||||
""
|
||||
};
|
||||
for (entry_name, entry_data) in &idx_data {
|
||||
let key = if dir_prefix.is_empty() {
|
||||
entry_name.clone()
|
||||
} else {
|
||||
format!("{}/{}", dir_prefix, entry_name)
|
||||
};
|
||||
if let Some(meta_obj) = entry_data.get("metadata") {
|
||||
if let Some(etag) = meta_obj.get("__etag__") {
|
||||
if let Some(etag_str) = etag.as_str() {
|
||||
meta_cache.insert(key, etag_str.to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (key, path) in &legacy_meta_files {
|
||||
if meta_cache.contains_key(key) {
|
||||
continue;
|
||||
}
|
||||
if let Ok(content) = fs::read(path) {
|
||||
if let Some(etag) = extract_etag_from_meta_bytes(&content) {
|
||||
meta_cache.insert(key.clone(), etag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
etag_cache_changed = true;
|
||||
}
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
let bucket_len = bucket_owned.len() + 1;
|
||||
let mut objects: Vec<(String, u64, f64, Option<String>)> = Vec::new();
|
||||
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
let full = entry.path();
|
||||
let full_str = full.to_string_lossy();
|
||||
if full_str.len() > bucket_len {
|
||||
let first_part: &str = if let Some(sep_pos) =
|
||||
full_str[bucket_len..].find(|c: char| c == '\\' || c == '/')
|
||||
{
|
||||
&full_str[bucket_len..bucket_len + sep_pos]
|
||||
} else {
|
||||
&full_str[bucket_len..]
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&first_part) {
|
||||
continue;
|
||||
}
|
||||
} else if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
stack.push(full);
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let full = entry.path();
|
||||
let full_str = full.to_string_lossy();
|
||||
if full_str.len() <= bucket_len {
|
||||
continue;
|
||||
}
|
||||
let rel = &full_str[bucket_len..];
|
||||
let first_part: &str =
|
||||
if let Some(sep_pos) = rel.find(|c: char| c == '\\' || c == '/') {
|
||||
&rel[..sep_pos]
|
||||
} else {
|
||||
rel
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&first_part) {
|
||||
continue;
|
||||
}
|
||||
let key = rel.replace('\\', "/");
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
let etag = meta_cache.get(&key).cloned();
|
||||
objects.push((key, size, mtime, etag));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((meta_cache, objects, etag_cache_changed))
|
||||
})?;
|
||||
|
||||
let (meta_cache, objects, etag_cache_changed) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let cache_dict = PyDict::new(py);
|
||||
for (k, v) in &meta_cache {
|
||||
cache_dict.set_item(k, v)?;
|
||||
}
|
||||
dict.set_item("etag_cache", cache_dict)?;
|
||||
|
||||
let objects_list = PyList::empty(py);
|
||||
for (key, size, mtime, etag) in &objects {
|
||||
let etag_py: Py<PyAny> = match etag {
|
||||
Some(e) => PyString::new(py, e).into_any().unbind(),
|
||||
None => py.None(),
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
etag_py,
|
||||
])?;
|
||||
objects_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("objects", objects_list)?;
|
||||
dict.set_item("etag_cache_changed", etag_cache_changed)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
use md5::{Digest, Md5};
|
||||
use pyo3::exceptions::{PyIOError, PyValueError};
|
||||
use pyo3::prelude::*;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{Read, Write};
|
||||
use uuid::Uuid;
|
||||
|
||||
const DEFAULT_CHUNK_SIZE: usize = 262144;
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (stream, tmp_dir, chunk_size=DEFAULT_CHUNK_SIZE))]
|
||||
pub fn stream_to_file_with_md5(
|
||||
py: Python<'_>,
|
||||
stream: &Bound<'_, PyAny>,
|
||||
tmp_dir: &str,
|
||||
chunk_size: usize,
|
||||
) -> PyResult<(String, String, u64)> {
|
||||
let chunk_size = if chunk_size == 0 {
|
||||
DEFAULT_CHUNK_SIZE
|
||||
} else {
|
||||
chunk_size
|
||||
};
|
||||
|
||||
fs::create_dir_all(tmp_dir)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create tmp dir: {}", e)))?;
|
||||
|
||||
let tmp_name = format!("{}.tmp", Uuid::new_v4().as_hyphenated());
|
||||
let tmp_path_buf = std::path::PathBuf::from(tmp_dir).join(&tmp_name);
|
||||
let tmp_path = tmp_path_buf.to_string_lossy().into_owned();
|
||||
|
||||
let mut file = File::create(&tmp_path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create temp file: {}", e)))?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut total_bytes: u64 = 0;
|
||||
|
||||
let result: PyResult<()> = (|| {
|
||||
loop {
|
||||
let chunk: Vec<u8> = stream.call_method1("read", (chunk_size,))?.extract()?;
|
||||
if chunk.is_empty() {
|
||||
break;
|
||||
}
|
||||
hasher.update(&chunk);
|
||||
file.write_all(&chunk)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write: {}", e)))?;
|
||||
total_bytes += chunk.len() as u64;
|
||||
|
||||
py.check_signals()?;
|
||||
}
|
||||
file.sync_all()
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to fsync: {}", e)))?;
|
||||
Ok(())
|
||||
})();
|
||||
|
||||
if let Err(e) = result {
|
||||
drop(file);
|
||||
let _ = fs::remove_file(&tmp_path);
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
drop(file);
|
||||
|
||||
let md5_hex = format!("{:x}", hasher.finalize());
|
||||
Ok((tmp_path, md5_hex, total_bytes))
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn assemble_parts_with_md5(
|
||||
py: Python<'_>,
|
||||
part_paths: Vec<String>,
|
||||
dest_path: &str,
|
||||
) -> PyResult<String> {
|
||||
if part_paths.is_empty() {
|
||||
return Err(PyValueError::new_err("No parts to assemble"));
|
||||
}
|
||||
|
||||
let dest = dest_path.to_owned();
|
||||
let parts = part_paths;
|
||||
|
||||
py.detach(move || {
|
||||
if let Some(parent) = std::path::Path::new(&dest).parent() {
|
||||
fs::create_dir_all(parent)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create dest dir: {}", e)))?;
|
||||
}
|
||||
|
||||
let mut target = File::create(&dest)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create dest file: {}", e)))?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut buf = vec![0u8; 1024 * 1024];
|
||||
|
||||
for part_path in &parts {
|
||||
let mut part = File::open(part_path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open part {}: {}", part_path, e)))?;
|
||||
loop {
|
||||
let n = part
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read part: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
target
|
||||
.write_all(&buf[..n])
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write: {}", e)))?;
|
||||
}
|
||||
}
|
||||
|
||||
target.sync_all()
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to fsync: {}", e)))?;
|
||||
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
})
|
||||
}
|
||||
@@ -1,149 +0,0 @@
|
||||
use pyo3::prelude::*;
|
||||
use std::sync::LazyLock;
|
||||
use unicode_normalization::UnicodeNormalization;
|
||||
|
||||
const WINDOWS_RESERVED: &[&str] = &[
|
||||
"CON", "PRN", "AUX", "NUL", "COM0", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7",
|
||||
"COM8", "COM9", "LPT0", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8",
|
||||
"LPT9",
|
||||
];
|
||||
|
||||
const WINDOWS_ILLEGAL_CHARS: &[char] = &['<', '>', ':', '"', '/', '\\', '|', '?', '*'];
|
||||
|
||||
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
const SYSTEM_ROOT: &str = ".myfsio.sys";
|
||||
|
||||
static IP_REGEX: LazyLock<regex::Regex> =
|
||||
LazyLock::new(|| regex::Regex::new(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$").unwrap());
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (object_key, max_length_bytes=1024, is_windows=false, reserved_prefixes=None))]
|
||||
pub fn validate_object_key(
|
||||
object_key: &str,
|
||||
max_length_bytes: usize,
|
||||
is_windows: bool,
|
||||
reserved_prefixes: Option<Vec<String>>,
|
||||
) -> PyResult<Option<String>> {
|
||||
if object_key.is_empty() {
|
||||
return Ok(Some("Object key required".to_string()));
|
||||
}
|
||||
|
||||
if object_key.contains('\0') {
|
||||
return Ok(Some("Object key contains null bytes".to_string()));
|
||||
}
|
||||
|
||||
let normalized: String = object_key.nfc().collect();
|
||||
|
||||
if normalized.len() > max_length_bytes {
|
||||
return Ok(Some(format!(
|
||||
"Object key exceeds maximum length of {} bytes",
|
||||
max_length_bytes
|
||||
)));
|
||||
}
|
||||
|
||||
if normalized.starts_with('/') || normalized.starts_with('\\') {
|
||||
return Ok(Some("Object key cannot start with a slash".to_string()));
|
||||
}
|
||||
|
||||
let parts: Vec<&str> = if cfg!(windows) || is_windows {
|
||||
normalized.split(['/', '\\']).collect()
|
||||
} else {
|
||||
normalized.split('/').collect()
|
||||
};
|
||||
|
||||
for part in &parts {
|
||||
if part.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if *part == ".." {
|
||||
return Ok(Some(
|
||||
"Object key contains parent directory references".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if *part == "." {
|
||||
return Ok(Some("Object key contains invalid segments".to_string()));
|
||||
}
|
||||
|
||||
if part.chars().any(|c| (c as u32) < 32) {
|
||||
return Ok(Some(
|
||||
"Object key contains control characters".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if is_windows {
|
||||
if part.chars().any(|c| WINDOWS_ILLEGAL_CHARS.contains(&c)) {
|
||||
return Ok(Some(
|
||||
"Object key contains characters not supported on Windows filesystems"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
if part.ends_with(' ') || part.ends_with('.') {
|
||||
return Ok(Some(
|
||||
"Object key segments cannot end with spaces or periods on Windows".to_string(),
|
||||
));
|
||||
}
|
||||
let trimmed = part.trim_end_matches(['.', ' ']).to_uppercase();
|
||||
if WINDOWS_RESERVED.contains(&trimmed.as_str()) {
|
||||
return Ok(Some(format!("Invalid filename segment: {}", part)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let non_empty_parts: Vec<&str> = parts.iter().filter(|p| !p.is_empty()).copied().collect();
|
||||
if let Some(top) = non_empty_parts.first() {
|
||||
if INTERNAL_FOLDERS.contains(top) || *top == SYSTEM_ROOT {
|
||||
return Ok(Some("Object key uses a reserved prefix".to_string()));
|
||||
}
|
||||
|
||||
if let Some(ref prefixes) = reserved_prefixes {
|
||||
for prefix in prefixes {
|
||||
if *top == prefix.as_str() {
|
||||
return Ok(Some("Object key uses a reserved prefix".to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn validate_bucket_name(bucket_name: &str) -> Option<String> {
|
||||
let len = bucket_name.len();
|
||||
if len < 3 || len > 63 {
|
||||
return Some("Bucket name must be between 3 and 63 characters".to_string());
|
||||
}
|
||||
|
||||
let bytes = bucket_name.as_bytes();
|
||||
if !bytes[0].is_ascii_lowercase() && !bytes[0].is_ascii_digit() {
|
||||
return Some(
|
||||
"Bucket name must start and end with a lowercase letter or digit".to_string(),
|
||||
);
|
||||
}
|
||||
if !bytes[len - 1].is_ascii_lowercase() && !bytes[len - 1].is_ascii_digit() {
|
||||
return Some(
|
||||
"Bucket name must start and end with a lowercase letter or digit".to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
for &b in bytes {
|
||||
if !b.is_ascii_lowercase() && !b.is_ascii_digit() && b != b'.' && b != b'-' {
|
||||
return Some(
|
||||
"Bucket name can only contain lowercase letters, digits, dots, and hyphens"
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if bucket_name.contains("..") {
|
||||
return Some("Bucket name must not contain consecutive periods".to_string());
|
||||
}
|
||||
|
||||
if IP_REGEX.is_match(bucket_name) {
|
||||
return Some("Bucket name must not be formatted as an IP address".to_string());
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
315
python/run.py
315
python/run.py
@@ -1,315 +0,0 @@
|
||||
"""Helper script to run the API server, UI server, or both."""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import atexit
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import warnings
|
||||
import multiprocessing
|
||||
from multiprocessing import Process
|
||||
from pathlib import Path
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
for _env_file in [
|
||||
Path("/opt/myfsio/myfsio.env"),
|
||||
Path.cwd() / ".env",
|
||||
Path.cwd() / "myfsio.env",
|
||||
]:
|
||||
if _env_file.exists():
|
||||
load_dotenv(_env_file, override=True)
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from app import create_api_app, create_ui_app
|
||||
from app.config import AppConfig
|
||||
from app.iam import IamService, IamError, ALLOWED_ACTIONS, _derive_fernet_key
|
||||
from app.version import get_version
|
||||
|
||||
PYTHON_DEPRECATION_MESSAGE = (
|
||||
"The Python MyFSIO runtime is deprecated as of 2026-04-21. "
|
||||
"Use the Rust server in rust/myfsio-engine for supported development and production usage."
|
||||
)
|
||||
|
||||
|
||||
def _server_host() -> str:
|
||||
"""Return the bind host for API and UI servers."""
|
||||
return os.getenv("APP_HOST", "0.0.0.0")
|
||||
|
||||
|
||||
def _is_debug_enabled() -> bool:
|
||||
return os.getenv("FLASK_DEBUG", "0").lower() in ("1", "true", "yes")
|
||||
|
||||
|
||||
def _is_frozen() -> bool:
|
||||
"""Check if running as a compiled binary (PyInstaller/Nuitka)."""
|
||||
return getattr(sys, 'frozen', False) or '__compiled__' in globals()
|
||||
|
||||
|
||||
def _serve_granian(target: str, port: int, config: Optional[AppConfig] = None) -> None:
|
||||
from granian import Granian
|
||||
from granian.constants import Interfaces
|
||||
from granian.http import HTTP1Settings
|
||||
|
||||
kwargs: dict = {
|
||||
"target": target,
|
||||
"address": _server_host(),
|
||||
"port": port,
|
||||
"interface": Interfaces.WSGI,
|
||||
"factory": True,
|
||||
"workers": 1,
|
||||
}
|
||||
|
||||
if config:
|
||||
kwargs["blocking_threads"] = config.server_threads
|
||||
kwargs["backlog"] = config.server_backlog
|
||||
kwargs["backpressure"] = config.server_connection_limit
|
||||
kwargs["http1_settings"] = HTTP1Settings(
|
||||
header_read_timeout=config.server_channel_timeout * 1000,
|
||||
max_buffer_size=config.server_max_buffer_size,
|
||||
)
|
||||
else:
|
||||
kwargs["http1_settings"] = HTTP1Settings(
|
||||
max_buffer_size=1024 * 1024 * 128,
|
||||
)
|
||||
|
||||
server = Granian(**kwargs)
|
||||
server.serve()
|
||||
|
||||
|
||||
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
if prod:
|
||||
_serve_granian("app:create_api_app", port, config)
|
||||
else:
|
||||
app = create_api_app()
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
|
||||
app.run(host=_server_host(), port=port, debug=debug)
|
||||
|
||||
|
||||
def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
if prod:
|
||||
_serve_granian("app:create_ui_app", port, config)
|
||||
else:
|
||||
app = create_ui_app()
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
|
||||
app.run(host=_server_host(), port=port, debug=debug)
|
||||
|
||||
|
||||
def reset_credentials() -> None:
|
||||
import json
|
||||
import secrets
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
config = AppConfig.from_env()
|
||||
iam_path = config.iam_config_path
|
||||
encryption_key = config.secret_key
|
||||
|
||||
access_key = os.environ.get("ADMIN_ACCESS_KEY", "").strip() or secrets.token_hex(12)
|
||||
secret_key = os.environ.get("ADMIN_SECRET_KEY", "").strip() or secrets.token_urlsafe(32)
|
||||
custom_keys = bool(os.environ.get("ADMIN_ACCESS_KEY", "").strip())
|
||||
|
||||
fernet = Fernet(_derive_fernet_key(encryption_key)) if encryption_key else None
|
||||
|
||||
raw_config = None
|
||||
if iam_path.exists():
|
||||
try:
|
||||
raw_bytes = iam_path.read_bytes()
|
||||
from app.iam import _IAM_ENCRYPTED_PREFIX
|
||||
if raw_bytes.startswith(_IAM_ENCRYPTED_PREFIX):
|
||||
if fernet:
|
||||
try:
|
||||
content = fernet.decrypt(raw_bytes[len(_IAM_ENCRYPTED_PREFIX):]).decode("utf-8")
|
||||
raw_config = json.loads(content)
|
||||
except Exception:
|
||||
print("WARNING: Could not decrypt existing IAM config. Creating fresh config.")
|
||||
else:
|
||||
print("WARNING: IAM config is encrypted but no SECRET_KEY available. Creating fresh config.")
|
||||
else:
|
||||
try:
|
||||
raw_config = json.loads(raw_bytes.decode("utf-8"))
|
||||
except json.JSONDecodeError:
|
||||
print("WARNING: Existing IAM config is corrupted. Creating fresh config.")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if raw_config and raw_config.get("users"):
|
||||
is_v2 = raw_config.get("version", 1) >= 2
|
||||
admin_user = None
|
||||
for user in raw_config["users"]:
|
||||
policies = user.get("policies", [])
|
||||
for p in policies:
|
||||
actions = p.get("actions", [])
|
||||
if "iam:*" in actions or "*" in actions:
|
||||
admin_user = user
|
||||
break
|
||||
if admin_user:
|
||||
break
|
||||
if not admin_user:
|
||||
admin_user = raw_config["users"][0]
|
||||
|
||||
if is_v2:
|
||||
admin_keys = admin_user.get("access_keys", [])
|
||||
if admin_keys:
|
||||
admin_keys[0]["access_key"] = access_key
|
||||
admin_keys[0]["secret_key"] = secret_key
|
||||
else:
|
||||
from datetime import datetime as _dt, timezone as _tz
|
||||
admin_user["access_keys"] = [{
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"status": "active",
|
||||
"created_at": _dt.now(_tz.utc).isoformat(),
|
||||
}]
|
||||
else:
|
||||
admin_user["access_key"] = access_key
|
||||
admin_user["secret_key"] = secret_key
|
||||
else:
|
||||
from datetime import datetime as _dt, timezone as _tz
|
||||
raw_config = {
|
||||
"version": 2,
|
||||
"users": [
|
||||
{
|
||||
"user_id": f"u-{secrets.token_hex(8)}",
|
||||
"display_name": "Local Admin",
|
||||
"enabled": True,
|
||||
"access_keys": [
|
||||
{
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"status": "active",
|
||||
"created_at": _dt.now(_tz.utc).isoformat(),
|
||||
}
|
||||
],
|
||||
"policies": [
|
||||
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
json_text = json.dumps(raw_config, indent=2)
|
||||
iam_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
temp_path = iam_path.with_suffix(".json.tmp")
|
||||
if fernet:
|
||||
from app.iam import _IAM_ENCRYPTED_PREFIX
|
||||
encrypted = fernet.encrypt(json_text.encode("utf-8"))
|
||||
temp_path.write_bytes(_IAM_ENCRYPTED_PREFIX + encrypted)
|
||||
else:
|
||||
temp_path.write_text(json_text, encoding="utf-8")
|
||||
temp_path.replace(iam_path)
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print("MYFSIO - ADMIN CREDENTIALS RESET")
|
||||
print(f"{'='*60}")
|
||||
if custom_keys:
|
||||
print(f"Access Key: {access_key} (from ADMIN_ACCESS_KEY)")
|
||||
print(f"Secret Key: {'(from ADMIN_SECRET_KEY)' if os.environ.get('ADMIN_SECRET_KEY', '').strip() else secret_key}")
|
||||
else:
|
||||
print(f"Access Key: {access_key}")
|
||||
print(f"Secret Key: {secret_key}")
|
||||
print(f"{'='*60}")
|
||||
if fernet:
|
||||
print("IAM config saved (encrypted).")
|
||||
else:
|
||||
print(f"IAM config saved to: {iam_path}")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
multiprocessing.freeze_support()
|
||||
if _is_frozen():
|
||||
multiprocessing.set_start_method("spawn", force=True)
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run the S3 clone services.")
|
||||
parser.add_argument("--mode", choices=["api", "ui", "both", "reset-cred"], default="both")
|
||||
parser.add_argument("--api-port", type=int, default=5000)
|
||||
parser.add_argument("--ui-port", type=int, default=5100)
|
||||
parser.add_argument("--prod", action="store_true", help="Run in production mode using Granian")
|
||||
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
|
||||
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
|
||||
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
|
||||
parser.add_argument("--reset-cred", action="store_true", help="Reset admin credentials and exit")
|
||||
parser.add_argument("--version", action="version", version=f"MyFSIO {get_version()}")
|
||||
args = parser.parse_args()
|
||||
|
||||
warnings.warn(PYTHON_DEPRECATION_MESSAGE, DeprecationWarning, stacklevel=1)
|
||||
|
||||
if args.reset_cred or args.mode == "reset-cred":
|
||||
reset_credentials()
|
||||
sys.exit(0)
|
||||
|
||||
if args.check_config or args.show_config:
|
||||
config = AppConfig.from_env()
|
||||
config.print_startup_summary()
|
||||
if args.check_config:
|
||||
issues = config.validate_and_report()
|
||||
critical = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
sys.exit(1 if critical else 0)
|
||||
sys.exit(0)
|
||||
|
||||
prod_mode = args.prod or (_is_frozen() and not args.dev)
|
||||
|
||||
config = AppConfig.from_env()
|
||||
|
||||
first_run_marker = config.storage_root / ".myfsio.sys" / ".initialized"
|
||||
is_first_run = not first_run_marker.exists()
|
||||
|
||||
if is_first_run:
|
||||
config.print_startup_summary()
|
||||
|
||||
issues = config.validate_and_report()
|
||||
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
if critical_issues:
|
||||
print("ABORTING: Critical configuration issues detected. Please fix them before starting.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
first_run_marker.parent.mkdir(parents=True, exist_ok=True)
|
||||
first_run_marker.write_text(f"Initialized on {__import__('datetime').datetime.now().isoformat()}\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if prod_mode:
|
||||
print("Running in production mode (Granian)")
|
||||
issues = config.validate_and_report()
|
||||
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
if critical_issues:
|
||||
for issue in critical_issues:
|
||||
print(f" {issue}")
|
||||
print("ABORTING: Critical configuration issues detected. Please fix them before starting.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Running in development mode (Flask dev server)")
|
||||
|
||||
if args.mode in {"api", "both"}:
|
||||
print(f"Starting API server on port {args.api_port}...")
|
||||
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config))
|
||||
api_proc.start()
|
||||
else:
|
||||
api_proc = None
|
||||
|
||||
def _cleanup_api():
|
||||
if api_proc and api_proc.is_alive():
|
||||
api_proc.terminate()
|
||||
api_proc.join(timeout=5)
|
||||
if api_proc.is_alive():
|
||||
api_proc.kill()
|
||||
|
||||
if api_proc:
|
||||
atexit.register(_cleanup_api)
|
||||
signal.signal(signal.SIGTERM, lambda *_: sys.exit(0))
|
||||
|
||||
if args.mode in {"ui", "both"}:
|
||||
print(f"Starting UI server on port {args.ui_port}...")
|
||||
serve_ui(args.ui_port, prod_mode, config)
|
||||
elif api_proc:
|
||||
try:
|
||||
api_proc.join()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,891 +0,0 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block title %}Sites - S3 Compatible Storage{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="page-header d-flex justify-content-between align-items-center mb-4">
|
||||
<div>
|
||||
<p class="text-uppercase text-muted small mb-1">Geo-Distribution</p>
|
||||
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||
</svg>
|
||||
Site Registry
|
||||
</h1>
|
||||
<p class="text-muted mb-0 mt-1">Configure this site's identity and manage peer sites for geo-distribution.</p>
|
||||
</div>
|
||||
<div class="d-none d-md-flex align-items-center gap-2">
|
||||
{% if local_site and local_site.site_id %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary fs-6 px-3 py-2">
|
||||
{{ local_site.site_id }}
|
||||
</span>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M1 8a.5.5 0 0 1 .5-.5h11.793l-3.147-3.146a.5.5 0 0 1 .708-.708l4 4a.5.5 0 0 1 0 .708l-4 4a.5.5 0 0 1-.708-.708L13.293 8.5H1.5A.5.5 0 0 1 1 8z"/>
|
||||
</svg>
|
||||
{% endif %}
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary fs-6 px-3 py-2">
|
||||
{{ peers|length }} peer{{ 's' if peers|length != 1 else '' }}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row g-4">
|
||||
<div class="col-lg-4 col-md-5">
|
||||
<div class="card shadow-sm border-0 mb-4" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M8 16s6-5.686 6-10A6 6 0 0 0 2 6c0 4.314 6 10 6 10zm0-7a3 3 0 1 1 0-6 3 3 0 0 1 0 6z"/>
|
||||
</svg>
|
||||
Local Site Identity
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">This site's configuration</p>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
<form method="POST" action="{{ url_for('ui.update_local_site') }}" id="localSiteForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<div class="mb-3">
|
||||
<label for="site_id" class="form-label fw-medium">Site ID</label>
|
||||
<input type="text" class="form-control" id="site_id" name="site_id" required
|
||||
value="{{ local_site.site_id if local_site else config_site_id or '' }}"
|
||||
placeholder="us-west-1">
|
||||
<div class="form-text">Unique identifier for this site</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="endpoint" class="form-label fw-medium">Endpoint URL</label>
|
||||
<input type="url" class="form-control" id="endpoint" name="endpoint"
|
||||
value="{{ local_site.endpoint if local_site else config_site_endpoint or '' }}"
|
||||
placeholder="https://s3.us-west-1.example.com">
|
||||
<div class="form-text">Public URL for this site</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="region" class="form-label fw-medium">Region</label>
|
||||
<input type="text" class="form-control" id="region" name="region"
|
||||
value="{{ local_site.region if local_site else config_site_region }}">
|
||||
</div>
|
||||
<div class="row mb-3">
|
||||
<div class="col-6">
|
||||
<label for="priority" class="form-label fw-medium">Priority</label>
|
||||
<input type="number" class="form-control" id="priority" name="priority"
|
||||
value="{{ local_site.priority if local_site else 100 }}" min="0">
|
||||
<div class="form-text">Lower = preferred</div>
|
||||
</div>
|
||||
<div class="col-6">
|
||||
<label for="display_name" class="form-label fw-medium">Display Name</label>
|
||||
<input type="text" class="form-control" id="display_name" name="display_name"
|
||||
value="{{ local_site.display_name if local_site else '' }}"
|
||||
placeholder="US West Primary">
|
||||
</div>
|
||||
</div>
|
||||
<div class="d-grid">
|
||||
<button type="submit" class="btn btn-primary">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||
</svg>
|
||||
Save Local Site
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-3 pb-0 px-4">
|
||||
<button class="btn btn-link text-decoration-none p-0 w-100 d-flex align-items-center justify-content-between"
|
||||
type="button" data-bs-toggle="collapse" data-bs-target="#addPeerCollapse"
|
||||
aria-expanded="false" aria-controls="addPeerCollapse">
|
||||
<span class="d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 2a.5.5 0 0 1 .5.5v5h5a.5.5 0 0 1 0 1h-5v5a.5.5 0 0 1-1 0v-5h-5a.5.5 0 0 1 0-1h5v-5A.5.5 0 0 1 8 2Z"/>
|
||||
</svg>
|
||||
<span class="fw-semibold h5 mb-0">Add Peer Site</span>
|
||||
</span>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted add-peer-chevron" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<p class="text-muted small mb-0 mt-1">Register a remote site</p>
|
||||
</div>
|
||||
<div class="collapse" id="addPeerCollapse">
|
||||
<div class="card-body px-4 pb-4">
|
||||
<form method="POST" action="{{ url_for('ui.add_peer_site') }}" id="addPeerForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<div class="mb-3">
|
||||
<label for="peer_site_id" class="form-label fw-medium">Site ID</label>
|
||||
<input type="text" class="form-control" id="peer_site_id" name="site_id" required placeholder="us-east-1">
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="peer_endpoint" class="form-label fw-medium">Endpoint URL</label>
|
||||
<input type="url" class="form-control" id="peer_endpoint" name="endpoint" required placeholder="https://s3.us-east-1.example.com">
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="peer_region" class="form-label fw-medium">Region</label>
|
||||
<input type="text" class="form-control" id="peer_region" name="region" value="us-east-1">
|
||||
</div>
|
||||
<div class="row mb-3">
|
||||
<div class="col-6">
|
||||
<label for="peer_priority" class="form-label fw-medium">Priority</label>
|
||||
<input type="number" class="form-control" id="peer_priority" name="priority" value="100" min="0">
|
||||
</div>
|
||||
<div class="col-6">
|
||||
<label for="peer_display_name" class="form-label fw-medium">Display Name</label>
|
||||
<input type="text" class="form-control" id="peer_display_name" name="display_name" placeholder="US East DR">
|
||||
</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="peer_connection_id" class="form-label fw-medium">Connection</label>
|
||||
<select class="form-select" id="peer_connection_id" name="connection_id">
|
||||
<option value="">No connection</option>
|
||||
{% for conn in connections %}
|
||||
<option value="{{ conn.id }}">{{ conn.name }} ({{ conn.endpoint_url }})</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
<div class="form-text">Link to a remote connection for health checks</div>
|
||||
</div>
|
||||
<div class="d-grid">
|
||||
<button type="submit" class="btn btn-primary">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 2a.5.5 0 0 1 .5.5v5h5a.5.5 0 0 1 0 1h-5v5a.5.5 0 0 1-1 0v-5h-5a.5.5 0 0 1 0-1h5v-5A.5.5 0 0 1 8 2Z"/>
|
||||
</svg>
|
||||
Add Peer Site
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-lg-8 col-md-7">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4 d-flex justify-content-between align-items-start">
|
||||
<div>
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M6 3.5A1.5 1.5 0 0 1 7.5 2h1A1.5 1.5 0 0 1 10 3.5v1A1.5 1.5 0 0 1 8.5 6v1H14a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0v-1A.5.5 0 0 1 2 7h5.5V6A1.5 1.5 0 0 1 6 4.5v-1zM8.5 5a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1zM0 11.5A1.5 1.5 0 0 1 1.5 10h1A1.5 1.5 0 0 1 4 11.5v1A1.5 1.5 0 0 1 2.5 14h-1A1.5 1.5 0 0 1 0 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5A1.5 1.5 0 0 1 7.5 10h1a1.5 1.5 0 0 1 1.5 1.5v1A1.5 1.5 0 0 1 8.5 14h-1A1.5 1.5 0 0 1 6 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5a1.5 1.5 0 0 1 1.5-1.5h1a1.5 1.5 0 0 1 1.5 1.5v1a1.5 1.5 0 0 1-1.5 1.5h-1a1.5 1.5 0 0 1-1.5-1.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1z"/>
|
||||
</svg>
|
||||
Peer Sites
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Known remote sites in the cluster</p>
|
||||
</div>
|
||||
{% if peers %}
|
||||
<button type="button" class="btn btn-outline-secondary btn-sm" id="btnCheckAllHealth" title="Check health of all peers">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M11.251.068a.5.5 0 0 1 .227.58L9.677 6.5H13a.5.5 0 0 1 .364.843l-8 8.5a.5.5 0 0 1-.842-.49L6.323 9.5H3a.5.5 0 0 1-.364-.843l8-8.5a.5.5 0 0 1 .615-.09z"/>
|
||||
</svg>
|
||||
Check All
|
||||
</button>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
{% if peers %}
|
||||
<div class="table-responsive">
|
||||
<table class="table table-hover align-middle mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th scope="col" style="width: 50px;">Health</th>
|
||||
<th scope="col">Site ID</th>
|
||||
<th scope="col">Endpoint</th>
|
||||
<th scope="col">Region</th>
|
||||
<th scope="col">Priority</th>
|
||||
<th scope="col">Sync Status</th>
|
||||
<th scope="col" class="text-end">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for item in peers_with_stats %}
|
||||
{% set peer = item.peer %}
|
||||
<tr data-site-id="{{ peer.site_id }}">
|
||||
<td class="text-center">
|
||||
<span class="peer-health-status" data-site-id="{{ peer.site_id }}"
|
||||
data-last-checked="{{ peer.last_health_check or '' }}"
|
||||
title="{% if peer.is_healthy == true %}Healthy{% elif peer.is_healthy == false %}Unhealthy{% else %}Not checked{% endif %}{% if peer.last_health_check %} (checked {{ peer.last_health_check }}){% endif %}"
|
||||
style="cursor: help;">
|
||||
{% if peer.is_healthy == true %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-success" viewBox="0 0 16 16">
|
||||
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
|
||||
</svg>
|
||||
{% elif peer.is_healthy == false %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
|
||||
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>
|
||||
</svg>
|
||||
{% else %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||
<path d="M5.255 5.786a.237.237 0 0 0 .241.247h.825c.138 0 .248-.113.266-.25.09-.656.54-1.134 1.342-1.134.686 0 1.314.343 1.314 1.168 0 .635-.374.927-.965 1.371-.673.489-1.206 1.06-1.168 1.987l.003.217a.25.25 0 0 0 .25.246h.811a.25.25 0 0 0 .25-.25v-.105c0-.718.273-.927 1.01-1.486.609-.463 1.244-.977 1.244-2.056 0-1.511-1.276-2.241-2.673-2.241-1.267 0-2.655.59-2.75 2.286zm1.557 5.763c0 .533.425.927 1.01.927.609 0 1.028-.394 1.028-.927 0-.552-.42-.94-1.029-.94-.584 0-1.009.388-1.009.94z"/>
|
||||
</svg>
|
||||
{% endif %}
|
||||
</span>
|
||||
</td>
|
||||
<td>
|
||||
<div class="d-flex align-items-center gap-2">
|
||||
<div class="peer-icon">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div>
|
||||
<span class="fw-medium">{{ peer.display_name or peer.site_id }}</span>
|
||||
{% if peer.display_name and peer.display_name != peer.site_id %}
|
||||
<br><small class="text-muted">{{ peer.site_id }}</small>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<span class="endpoint-display text-muted small" data-full-url="{{ peer.endpoint }}" title="{{ peer.endpoint }}" style="cursor: pointer;">
|
||||
{% set parsed = peer.endpoint.split('//') %}
|
||||
{% if parsed|length > 1 %}{{ parsed[1].split('/')[0] }}{% else %}{{ peer.endpoint }}{% endif %}
|
||||
</span>
|
||||
<button type="button" class="btn btn-link btn-sm p-0 ms-1 btn-copy-endpoint" data-url="{{ peer.endpoint }}" title="Copy full URL">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M4 1.5H3a2 2 0 0 0-2 2V14a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V3.5a2 2 0 0 0-2-2h-1v1h1a1 1 0 0 1 1 1V14a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3.5a1 1 0 0 1 1-1h1v-1z"/>
|
||||
<path d="M9.5 1a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1a.5.5 0 0 1 .5-.5h3zm-3-1A1.5 1.5 0 0 0 5 1.5v1A1.5 1.5 0 0 0 6.5 4h3A1.5 1.5 0 0 0 11 2.5v-1A1.5 1.5 0 0 0 9.5 0h-3z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</td>
|
||||
<td><span class="text-muted small">{{ peer.region }}</span></td>
|
||||
<td><span class="text-muted small">{{ peer.priority }}</span></td>
|
||||
<td class="sync-stats-cell" data-site-id="{{ peer.site_id }}">
|
||||
{% if item.has_connection %}
|
||||
<div class="d-flex align-items-center gap-2">
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary">{{ item.buckets_syncing }} bucket{{ 's' if item.buckets_syncing != 1 else '' }}</span>
|
||||
{% if item.has_bidirectional %}
|
||||
<span class="bidir-status-icon" data-site-id="{{ peer.site_id }}" title="Bidirectional sync - click to verify">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-info" viewBox="0 0 16 16" style="cursor: pointer;">
|
||||
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||
</svg>
|
||||
</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="sync-stats-detail d-none mt-2 small" id="stats-{{ peer.site_id }}">
|
||||
<span class="spinner-border spinner-border-sm text-muted" style="width: 12px; height: 12px;"></span>
|
||||
</div>
|
||||
{% else %}
|
||||
<a href="#" class="text-muted small link-no-connection"
|
||||
data-site-id="{{ peer.site_id }}"
|
||||
title="Click to link a connection">Link a connection</a>
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="text-end">
|
||||
<div class="d-flex align-items-center justify-content-end gap-1">
|
||||
<button type="button" class="btn btn-outline-secondary btn-sm"
|
||||
data-bs-toggle="modal"
|
||||
data-bs-target="#editPeerModal"
|
||||
data-site-id="{{ peer.site_id }}"
|
||||
data-endpoint="{{ peer.endpoint }}"
|
||||
data-region="{{ peer.region }}"
|
||||
data-priority="{{ peer.priority }}"
|
||||
data-display-name="{{ peer.display_name }}"
|
||||
data-connection-id="{{ peer.connection_id or '' }}"
|
||||
title="Edit peer">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<div class="dropdown peer-actions-dropdown">
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-bs-toggle="dropdown" aria-expanded="false" title="More actions">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M3 9.5a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<ul class="dropdown-menu dropdown-menu-end">
|
||||
<li>
|
||||
<button type="button" class="dropdown-item btn-check-health" data-site-id="{{ peer.site_id }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2 text-warning" viewBox="0 0 16 16">
|
||||
<path d="M11.251.068a.5.5 0 0 1 .227.58L9.677 6.5H13a.5.5 0 0 1 .364.843l-8 8.5a.5.5 0 0 1-.842-.49L6.323 9.5H3a.5.5 0 0 1-.364-.843l8-8.5a.5.5 0 0 1 .615-.09z"/>
|
||||
</svg>
|
||||
Check Health
|
||||
</button>
|
||||
</li>
|
||||
<li>
|
||||
<button type="button" class="dropdown-item btn-check-bidir {% if not item.has_connection %}disabled{% endif %}"
|
||||
data-site-id="{{ peer.site_id }}"
|
||||
data-display-name="{{ peer.display_name or peer.site_id }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2 text-info" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||
</svg>
|
||||
Bidirectional Status
|
||||
</button>
|
||||
</li>
|
||||
{% if item.has_connection and item.buckets_syncing > 0 %}
|
||||
<li>
|
||||
<button type="button" class="dropdown-item btn-load-stats" data-site-id="{{ peer.site_id }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2 text-primary" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
||||
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
|
||||
</svg>
|
||||
Load Sync Stats
|
||||
</button>
|
||||
</li>
|
||||
{% endif %}
|
||||
<li>
|
||||
<a href="{{ url_for('ui.replication_wizard', site_id=peer.site_id) }}"
|
||||
class="dropdown-item {% if not item.has_connection %}disabled{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2 text-primary" viewBox="0 0 16 16">
|
||||
<path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/>
|
||||
<path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/>
|
||||
</svg>
|
||||
Replication Wizard
|
||||
</a>
|
||||
</li>
|
||||
<li><hr class="dropdown-divider"></li>
|
||||
<li>
|
||||
<button type="button" class="dropdown-item text-danger"
|
||||
data-bs-toggle="modal"
|
||||
data-bs-target="#deletePeerModal"
|
||||
data-site-id="{{ peer.site_id }}"
|
||||
data-display-name="{{ peer.display_name or peer.site_id }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
Delete Peer
|
||||
</button>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="empty-state text-center py-5">
|
||||
<div class="empty-state-icon mx-auto mb-3">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<h5 class="fw-semibold mb-2">No peer sites yet</h5>
|
||||
<p class="text-muted mb-0">Add peer sites to enable geo-distribution and site-to-site replication.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="editPeerModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h5 class="modal-title fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5zm-9.761 5.175-.106.106-1.528 3.821 3.821-1.528.106-.106A.5.5 0 0 1 5 12.5V12h-.5a.5.5 0 0 1-.5-.5V11h-.5a.5.5 0 0 1-.468-.325z"/>
|
||||
</svg>
|
||||
Edit Peer Site
|
||||
</h5>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<form method="POST" id="editPeerForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<div class="modal-body">
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Site ID</label>
|
||||
<input type="text" class="form-control" id="edit_site_id" readonly>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="edit_endpoint" class="form-label fw-medium">Endpoint URL</label>
|
||||
<input type="url" class="form-control" id="edit_endpoint" name="endpoint" required>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="edit_region" class="form-label fw-medium">Region</label>
|
||||
<input type="text" class="form-control" id="edit_region" name="region" required>
|
||||
</div>
|
||||
<div class="row mb-3">
|
||||
<div class="col-6">
|
||||
<label for="edit_priority" class="form-label fw-medium">Priority</label>
|
||||
<input type="number" class="form-control" id="edit_priority" name="priority" min="0">
|
||||
</div>
|
||||
<div class="col-6">
|
||||
<label for="edit_display_name" class="form-label fw-medium">Display Name</label>
|
||||
<input type="text" class="form-control" id="edit_display_name" name="display_name">
|
||||
</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="edit_connection_id" class="form-label fw-medium">Connection</label>
|
||||
<select class="form-select" id="edit_connection_id" name="connection_id">
|
||||
<option value="">No connection</option>
|
||||
{% for conn in connections %}
|
||||
<option value="{{ conn.id }}">{{ conn.name }} ({{ conn.endpoint_url }})</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
<button type="submit" class="btn btn-primary">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||
</svg>
|
||||
Save
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="deletePeerModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h5 class="modal-title fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
Delete Peer Site
|
||||
</h5>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<p>Are you sure you want to delete <strong id="deletePeerName"></strong>?</p>
|
||||
<div class="alert alert-warning d-flex align-items-start small" role="alert">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="flex-shrink-0 me-2 mt-0" viewBox="0 0 16 16">
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||
</svg>
|
||||
<div>This will remove the peer from the site registry. Any site sync configurations may be affected.</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
<form method="POST" id="deletePeerForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<button type="submit" class="btn btn-danger">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
Delete
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="bidirStatusModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered modal-lg">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h5 class="modal-title fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-info me-2" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||
</svg>
|
||||
Bidirectional Sync Status
|
||||
</h5>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<div id="bidirStatusContent">
|
||||
<div class="text-center py-4">
|
||||
<span class="spinner-border text-primary" role="status"></span>
|
||||
<p class="text-muted mt-2 mb-0">Checking configuration...</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Close</button>
|
||||
<a href="#" id="bidirWizardLink" class="btn btn-primary d-none">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M9.828.722a.5.5 0 0 1 .354.146l4.95 4.95a.5.5 0 0 1 0 .707c-.48.48-1.072.588-1.503.588-.177 0-.335-.018-.46-.039l-3.134 3.134a5.927 5.927 0 0 1 .16 1.013c.046.702-.032 1.687-.72 2.375a.5.5 0 0 1-.707 0l-2.829-2.828-3.182 3.182c-.195.195-1.219.902-1.414.707-.195-.195.512-1.22.707-1.414l3.182-3.182-2.828-2.829a.5.5 0 0 1 0-.707c.688-.688 1.673-.767 2.375-.72a5.922 5.922 0 0 1 1.013.16l3.134-3.133a2.772 2.772 0 0 1-.04-.461c0-.43.108-1.022.589-1.503a.5.5 0 0 1 .353-.146z"/>
|
||||
</svg>
|
||||
Run Setup Wizard
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
(function() {
|
||||
var escapeHtml = window.UICore ? window.UICore.escapeHtml : function(s) { return s; };
|
||||
|
||||
var editPeerModal = document.getElementById('editPeerModal');
|
||||
if (editPeerModal) {
|
||||
editPeerModal.addEventListener('show.bs.modal', function (event) {
|
||||
var button = event.relatedTarget;
|
||||
var siteId = button.getAttribute('data-site-id');
|
||||
document.getElementById('edit_site_id').value = siteId;
|
||||
document.getElementById('edit_endpoint').value = button.getAttribute('data-endpoint');
|
||||
document.getElementById('edit_region').value = button.getAttribute('data-region');
|
||||
document.getElementById('edit_priority').value = button.getAttribute('data-priority');
|
||||
document.getElementById('edit_display_name').value = button.getAttribute('data-display-name');
|
||||
document.getElementById('edit_connection_id').value = button.getAttribute('data-connection-id');
|
||||
document.getElementById('editPeerForm').action = '/ui/sites/peers/' + encodeURIComponent(siteId) + '/update';
|
||||
});
|
||||
}
|
||||
|
||||
document.querySelectorAll('.link-no-connection').forEach(function(link) {
|
||||
link.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
var siteId = this.getAttribute('data-site-id');
|
||||
var row = this.closest('tr[data-site-id]');
|
||||
if (row) {
|
||||
var btn = row.querySelector('.btn[data-bs-target="#editPeerModal"]');
|
||||
if (btn) btn.click();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
var deletePeerModal = document.getElementById('deletePeerModal');
|
||||
if (deletePeerModal) {
|
||||
deletePeerModal.addEventListener('show.bs.modal', function (event) {
|
||||
var button = event.relatedTarget;
|
||||
var siteId = button.getAttribute('data-site-id');
|
||||
var displayName = button.getAttribute('data-display-name');
|
||||
document.getElementById('deletePeerName').textContent = displayName;
|
||||
document.getElementById('deletePeerForm').action = '/ui/sites/peers/' + encodeURIComponent(siteId) + '/delete';
|
||||
});
|
||||
}
|
||||
|
||||
function formatTimeAgo(date) {
|
||||
var seconds = Math.floor((new Date() - date) / 1000);
|
||||
if (seconds < 60) return 'just now';
|
||||
var minutes = Math.floor(seconds / 60);
|
||||
if (minutes < 60) return minutes + 'm ago';
|
||||
var hours = Math.floor(minutes / 60);
|
||||
if (hours < 24) return hours + 'h ago';
|
||||
return Math.floor(hours / 24) + 'd ago';
|
||||
}
|
||||
|
||||
function doHealthCheck(siteId) {
|
||||
var row = document.querySelector('tr[data-site-id="' + CSS.escape(siteId) + '"]');
|
||||
var statusSpan = row ? row.querySelector('.peer-health-status') : null;
|
||||
if (!statusSpan) return Promise.resolve();
|
||||
|
||||
statusSpan.innerHTML = '<span class="spinner-border spinner-border-sm text-muted" role="status" style="width: 14px; height: 14px;"></span>';
|
||||
|
||||
return fetch('/ui/sites/peers/' + encodeURIComponent(siteId) + '/health')
|
||||
.then(function(response) { return response.json(); })
|
||||
.then(function(data) {
|
||||
var now = new Date();
|
||||
statusSpan.setAttribute('data-last-checked', now.toISOString());
|
||||
if (data.is_healthy) {
|
||||
statusSpan.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-success" viewBox="0 0 16 16"><path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/></svg>';
|
||||
statusSpan.title = 'Healthy (checked ' + formatTimeAgo(now) + ')';
|
||||
return { siteId: siteId, healthy: true };
|
||||
} else {
|
||||
statusSpan.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-danger" viewBox="0 0 16 16"><path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/></svg>';
|
||||
statusSpan.title = 'Unhealthy' + (data.error ? ': ' + data.error : '') + ' (checked ' + formatTimeAgo(now) + ')';
|
||||
return { siteId: siteId, healthy: false, error: data.error };
|
||||
}
|
||||
})
|
||||
.catch(function(err) {
|
||||
statusSpan.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16"><path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/><path d="M5.255 5.786a.237.237 0 0 0 .241.247h.825c.138 0 .248-.113.266-.25.09-.656.54-1.134 1.342-1.134.686 0 1.314.343 1.314 1.168 0 .635-.374.927-.965 1.371-.673.489-1.206 1.06-1.168 1.987l.003.217a.25.25 0 0 0 .25.246h.811a.25.25 0 0 0 .25-.25v-.105c0-.718.273-.927 1.01-1.486.609-.463 1.244-.977 1.244-2.056 0-1.511-1.276-2.241-2.673-2.241-1.267 0-2.655.59-2.75 2.286zm1.557 5.763c0 .533.425.927 1.01.927.609 0 1.028-.394 1.028-.927 0-.552-.42-.94-1.029-.94-.584 0-1.009.388-1.009.94z"/></svg>';
|
||||
statusSpan.title = 'Check failed';
|
||||
return { siteId: siteId, healthy: null };
|
||||
});
|
||||
}
|
||||
|
||||
document.querySelectorAll('.btn-check-health').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var siteId = this.getAttribute('data-site-id');
|
||||
doHealthCheck(siteId).then(function(result) {
|
||||
if (!result) return;
|
||||
if (result.healthy === true) {
|
||||
if (window.showToast) window.showToast('Peer site is healthy', 'Health Check', 'success');
|
||||
} else if (result.healthy === false) {
|
||||
if (window.showToast) window.showToast(result.error || 'Peer site is unhealthy', 'Health Check', 'error');
|
||||
} else {
|
||||
if (window.showToast) window.showToast('Failed to check health', 'Health Check', 'error');
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
var checkAllBtn = document.getElementById('btnCheckAllHealth');
|
||||
if (checkAllBtn) {
|
||||
checkAllBtn.addEventListener('click', function() {
|
||||
var btn = this;
|
||||
var originalHtml = btn.innerHTML;
|
||||
btn.disabled = true;
|
||||
btn.innerHTML = '<span class="spinner-border spinner-border-sm me-1"></span>Checking...';
|
||||
|
||||
var siteIds = [];
|
||||
document.querySelectorAll('.peer-health-status').forEach(function(el) {
|
||||
siteIds.push(el.getAttribute('data-site-id'));
|
||||
});
|
||||
|
||||
var promises = siteIds.map(function(id) { return doHealthCheck(id); });
|
||||
Promise.all(promises).then(function(results) {
|
||||
var healthy = results.filter(function(r) { return r && r.healthy === true; }).length;
|
||||
var unhealthy = results.filter(function(r) { return r && r.healthy === false; }).length;
|
||||
var failed = results.filter(function(r) { return r && r.healthy === null; }).length;
|
||||
|
||||
var msg = healthy + ' healthy';
|
||||
if (unhealthy > 0) msg += ', ' + unhealthy + ' unhealthy';
|
||||
if (failed > 0) msg += ', ' + failed + ' failed';
|
||||
if (window.showToast) window.showToast(msg, 'Health Check', unhealthy > 0 ? 'warning' : 'success');
|
||||
|
||||
btn.disabled = false;
|
||||
btn.innerHTML = originalHtml;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
document.querySelectorAll('.btn-load-stats').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var siteId = this.getAttribute('data-site-id');
|
||||
var detailDiv = document.getElementById('stats-' + siteId);
|
||||
if (!detailDiv) return;
|
||||
|
||||
detailDiv.classList.remove('d-none');
|
||||
detailDiv.innerHTML = '<span class="spinner-border spinner-border-sm text-muted" style="width: 12px; height: 12px;"></span> Loading...';
|
||||
|
||||
fetch('/ui/sites/peers/' + encodeURIComponent(siteId) + '/sync-stats')
|
||||
.then(function(response) { return response.json(); })
|
||||
.then(function(data) {
|
||||
if (data.error) {
|
||||
detailDiv.innerHTML = '<span class="text-danger">' + escapeHtml(data.error) + '</span>';
|
||||
} else {
|
||||
var lastSync = data.last_sync_at
|
||||
? new Date(data.last_sync_at * 1000).toLocaleString()
|
||||
: 'Never';
|
||||
detailDiv.innerHTML =
|
||||
'<div class="d-flex flex-wrap gap-2 mb-1">' +
|
||||
'<span class="text-success"><strong>' + escapeHtml(String(data.objects_synced)) + '</strong> synced</span>' +
|
||||
'<span class="text-warning"><strong>' + escapeHtml(String(data.objects_pending)) + '</strong> pending</span>' +
|
||||
'<span class="text-danger"><strong>' + escapeHtml(String(data.objects_failed)) + '</strong> failed</span>' +
|
||||
'</div>' +
|
||||
'<div class="text-muted" style="font-size: 0.75rem;">Last sync: ' + escapeHtml(lastSync) + '</div>';
|
||||
}
|
||||
})
|
||||
.catch(function() {
|
||||
detailDiv.innerHTML = '<span class="text-danger">Failed to load stats</span>';
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('.bidir-status-icon').forEach(function(icon) {
|
||||
icon.addEventListener('click', function() {
|
||||
var siteId = this.getAttribute('data-site-id');
|
||||
var row = this.closest('tr[data-site-id]');
|
||||
var btn = row ? row.querySelector('.btn-check-bidir') : null;
|
||||
if (btn) btn.click();
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('.btn-check-bidir').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var siteId = this.getAttribute('data-site-id');
|
||||
var displayName = this.getAttribute('data-display-name');
|
||||
var modal = new bootstrap.Modal(document.getElementById('bidirStatusModal'));
|
||||
var contentDiv = document.getElementById('bidirStatusContent');
|
||||
var wizardLink = document.getElementById('bidirWizardLink');
|
||||
|
||||
contentDiv.innerHTML =
|
||||
'<div class="text-center py-4">' +
|
||||
'<span class="spinner-border text-primary" role="status"></span>' +
|
||||
'<p class="text-muted mt-2 mb-0">Checking bidirectional configuration with ' + escapeHtml(displayName) + '...</p>' +
|
||||
'</div>';
|
||||
wizardLink.classList.add('d-none');
|
||||
modal.show();
|
||||
|
||||
fetch('/ui/sites/peers/' + encodeURIComponent(siteId) + '/bidirectional-status')
|
||||
.then(function(response) { return response.json(); })
|
||||
.then(function(data) {
|
||||
var html = '';
|
||||
|
||||
if (data.is_fully_configured) {
|
||||
html += '<div class="alert alert-success d-flex align-items-center mb-4" role="alert">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="flex-shrink-0 me-2" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>' +
|
||||
'</svg>' +
|
||||
'<div><strong>Bidirectional sync is fully configured!</strong><br><small>Both sites are set up to sync data in both directions.</small></div>' +
|
||||
'</div>';
|
||||
} else if (data.issues && data.issues.length > 0) {
|
||||
var errors = data.issues.filter(function(i) { return i.severity === 'error'; });
|
||||
var warnings = data.issues.filter(function(i) { return i.severity === 'warning'; });
|
||||
|
||||
if (errors.length > 0) {
|
||||
html += '<div class="alert alert-danger mb-3" role="alert">' +
|
||||
'<h6 class="alert-heading fw-bold mb-2">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16"><path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/></svg>' +
|
||||
' Configuration Errors</h6><ul class="mb-0 ps-3">';
|
||||
errors.forEach(function(issue) {
|
||||
html += '<li><strong>' + escapeHtml(issue.code) + ':</strong> ' + escapeHtml(issue.message) + '</li>';
|
||||
});
|
||||
html += '</ul></div>';
|
||||
}
|
||||
|
||||
if (warnings.length > 0) {
|
||||
html += '<div class="alert alert-warning mb-3" role="alert">' +
|
||||
'<h6 class="alert-heading fw-bold mb-2">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16"><path d="M8.982 1.566a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566zM8 5c.535 0 .954.462.9.995l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995A.905.905 0 0 1 8 5zm.002 6a1 1 0 1 1 0 2 1 1 0 0 1 0-2z"/></svg>' +
|
||||
' Warnings</h6><ul class="mb-0 ps-3">';
|
||||
warnings.forEach(function(issue) {
|
||||
html += '<li><strong>' + escapeHtml(issue.code) + ':</strong> ' + escapeHtml(issue.message) + '</li>';
|
||||
});
|
||||
html += '</ul></div>';
|
||||
}
|
||||
}
|
||||
|
||||
html += '<div class="row g-3">';
|
||||
html += '<div class="col-md-6"><div class="card h-100"><div class="card-header bg-light py-2"><strong>This Site (Local)</strong></div>' +
|
||||
'<div class="card-body small">' +
|
||||
'<p class="mb-1"><strong>Site ID:</strong> ' + (data.local_site_id ? escapeHtml(data.local_site_id) : '<span class="text-danger">Not configured</span>') + '</p>' +
|
||||
'<p class="mb-1"><strong>Endpoint:</strong> ' + (data.local_endpoint ? escapeHtml(data.local_endpoint) : '<span class="text-danger">Not configured</span>') + '</p>' +
|
||||
'<p class="mb-1"><strong>Site Sync Worker:</strong> ' + (data.local_site_sync_enabled ? '<span class="text-success">Enabled</span>' : '<span class="text-warning">Disabled</span>') + '</p>' +
|
||||
'<p class="mb-0"><strong>Bidirectional Rules:</strong> ' + (data.local_bidirectional_rules ? data.local_bidirectional_rules.length : 0) + '</p>' +
|
||||
'</div></div></div>';
|
||||
|
||||
if (data.remote_status) {
|
||||
var rs = data.remote_status;
|
||||
html += '<div class="col-md-6"><div class="card h-100"><div class="card-header bg-light py-2"><strong>Remote Site (' + escapeHtml(displayName) + ')</strong></div>' +
|
||||
'<div class="card-body small">';
|
||||
if (rs.admin_access_denied) {
|
||||
html += '<p class="text-warning mb-0">Admin access denied - cannot verify remote configuration</p>';
|
||||
} else if (rs.reachable === false) {
|
||||
html += '<p class="text-danger mb-0">Could not reach remote admin API</p>';
|
||||
} else {
|
||||
html += '<p class="mb-1"><strong>Has Peer Entry for Us:</strong> ' + (rs.has_peer_for_us ? '<span class="text-success">Yes</span>' : '<span class="text-danger">No</span>') + '</p>' +
|
||||
'<p class="mb-1"><strong>Connection Configured:</strong> ' + (rs.peer_connection_configured ? '<span class="text-success">Yes</span>' : '<span class="text-danger">No</span>') + '</p>';
|
||||
}
|
||||
html += '</div></div></div>';
|
||||
} else {
|
||||
html += '<div class="col-md-6"><div class="card h-100"><div class="card-header bg-light py-2"><strong>Remote Site (' + escapeHtml(displayName) + ')</strong></div>' +
|
||||
'<div class="card-body small"><p class="text-muted mb-0">Could not check remote status</p></div></div></div>';
|
||||
}
|
||||
html += '</div>';
|
||||
|
||||
if (data.local_bidirectional_rules && data.local_bidirectional_rules.length > 0) {
|
||||
html += '<div class="mt-3"><h6 class="fw-semibold">Local Bidirectional Rules</h6>' +
|
||||
'<table class="table table-sm table-bordered mb-0"><thead class="table-light"><tr><th>Source Bucket</th><th>Target Bucket</th><th>Status</th></tr></thead><tbody>';
|
||||
data.local_bidirectional_rules.forEach(function(rule) {
|
||||
html += '<tr><td>' + escapeHtml(rule.bucket_name) + '</td><td>' + escapeHtml(rule.target_bucket) + '</td>' +
|
||||
'<td>' + (rule.enabled ? '<span class="badge bg-success">Enabled</span>' : '<span class="badge bg-secondary">Disabled</span>') + '</td></tr>';
|
||||
});
|
||||
html += '</tbody></table></div>';
|
||||
}
|
||||
|
||||
if (!data.is_fully_configured) {
|
||||
html += '<div class="alert alert-info mt-3 mb-0" role="alert">' +
|
||||
'<h6 class="alert-heading fw-bold">How to Fix</h6>' +
|
||||
'<ol class="mb-0 ps-3">' +
|
||||
'<li>Ensure this site has a Site ID and Endpoint URL configured</li>' +
|
||||
'<li>On the remote site, register this site as a peer with a connection</li>' +
|
||||
'<li>Create bidirectional replication rules on both sites</li>' +
|
||||
'<li>Enable SITE_SYNC_ENABLED=true on both sites</li>' +
|
||||
'</ol></div>';
|
||||
var blockingErrors = ['NO_CONNECTION', 'CONNECTION_NOT_FOUND', 'REMOTE_UNREACHABLE', 'ENDPOINT_NOT_ALLOWED'];
|
||||
var hasBlockingError = data.issues && data.issues.some(function(i) { return blockingErrors.indexOf(i.code) !== -1; });
|
||||
if (!hasBlockingError) {
|
||||
wizardLink.href = '/ui/sites/peers/' + encodeURIComponent(siteId) + '/replication-wizard';
|
||||
wizardLink.classList.remove('d-none');
|
||||
}
|
||||
}
|
||||
|
||||
contentDiv.innerHTML = html;
|
||||
})
|
||||
.catch(function(err) {
|
||||
contentDiv.innerHTML = '<div class="alert alert-danger" role="alert"><strong>Error:</strong> Failed to check bidirectional status. ' + escapeHtml(err.message || '') + '</div>';
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('.btn-copy-endpoint').forEach(function(btn) {
|
||||
btn.addEventListener('click', function(e) {
|
||||
e.stopPropagation();
|
||||
var url = this.getAttribute('data-url');
|
||||
if (window.UICore && window.UICore.copyToClipboard) {
|
||||
window.UICore.copyToClipboard(url).then(function(ok) {
|
||||
if (ok && window.showToast) window.showToast('Endpoint URL copied', 'Copied', 'success');
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
var localSiteForm = document.getElementById('localSiteForm');
|
||||
if (localSiteForm) {
|
||||
localSiteForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(this, {
|
||||
successMessage: 'Local site configuration updated',
|
||||
onSuccess: function() {
|
||||
setTimeout(function() { location.reload(); }, 800);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var addPeerForm = document.getElementById('addPeerForm');
|
||||
if (addPeerForm) {
|
||||
addPeerForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(this, {
|
||||
successMessage: 'Peer site added',
|
||||
onSuccess: function(data) {
|
||||
if (data.redirect) {
|
||||
setTimeout(function() { window.location.href = data.redirect; }, 800);
|
||||
} else {
|
||||
setTimeout(function() { location.reload(); }, 800);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var editPeerForm = document.getElementById('editPeerForm');
|
||||
if (editPeerForm) {
|
||||
editPeerForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('editPeerModal'));
|
||||
window.UICore.submitFormAjax(this, {
|
||||
successMessage: 'Peer site updated',
|
||||
onSuccess: function() {
|
||||
if (modal) modal.hide();
|
||||
setTimeout(function() { location.reload(); }, 800);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var deletePeerForm = document.getElementById('deletePeerForm');
|
||||
if (deletePeerForm) {
|
||||
deletePeerForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('deletePeerModal'));
|
||||
window.UICore.submitFormAjax(this, {
|
||||
successMessage: 'Peer site deleted',
|
||||
onSuccess: function() {
|
||||
if (modal) modal.hide();
|
||||
setTimeout(function() { location.reload(); }, 800);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
document.querySelectorAll('.peer-actions-dropdown').forEach(function(dd) {
|
||||
dd.addEventListener('shown.bs.dropdown', function() {
|
||||
var toggle = dd.querySelector('[data-bs-toggle="dropdown"]');
|
||||
var menu = dd.querySelector('.dropdown-menu');
|
||||
if (!toggle || !menu) return;
|
||||
var rect = toggle.getBoundingClientRect();
|
||||
menu.style.top = rect.bottom + 'px';
|
||||
menu.style.left = (rect.right - menu.offsetWidth) + 'px';
|
||||
});
|
||||
});
|
||||
})();
|
||||
</script>
|
||||
|
||||
<style>
|
||||
.add-peer-chevron {
|
||||
transition: transform 0.2s ease;
|
||||
}
|
||||
[aria-expanded="true"] .add-peer-chevron {
|
||||
transform: rotate(180deg);
|
||||
}
|
||||
.endpoint-display:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
.peer-actions-dropdown .dropdown-menu {
|
||||
position: fixed !important;
|
||||
inset: auto !important;
|
||||
transform: none !important;
|
||||
}
|
||||
</style>
|
||||
{% endblock %}
|
||||
@@ -1,750 +0,0 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block title %}System - MyFSIO Console{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="page-header d-flex justify-content-between align-items-center mb-4">
|
||||
<div>
|
||||
<p class="text-uppercase text-muted small mb-1">Administration</p>
|
||||
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
System
|
||||
</h1>
|
||||
<p class="text-muted mb-0 mt-1">Server information, feature flags, and maintenance tools.</p>
|
||||
</div>
|
||||
<div class="d-none d-md-block">
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary fs-6 px-3 py-2">v{{ app_version }}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row g-4 mb-4">
|
||||
<div class="col-lg-6">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M5 0a.5.5 0 0 1 .5.5V2h1V.5a.5.5 0 0 1 1 0V2h1V.5a.5.5 0 0 1 1 0V2h1V.5a.5.5 0 0 1 1 0V2A2.5 2.5 0 0 1 14 4.5h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14a2.5 2.5 0 0 1-2.5 2.5v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14A2.5 2.5 0 0 1 2 11.5H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2A2.5 2.5 0 0 1 4.5 2V.5A.5.5 0 0 1 5 0zm-.5 3A1.5 1.5 0 0 0 3 4.5v7A1.5 1.5 0 0 0 4.5 13h7a1.5 1.5 0 0 0 1.5-1.5v-7A1.5 1.5 0 0 0 11.5 3h-7zM5 6.5A1.5 1.5 0 0 1 6.5 5h3A1.5 1.5 0 0 1 11 6.5v3A1.5 1.5 0 0 1 9.5 11h-3A1.5 1.5 0 0 1 5 9.5v-3zM6.5 6a.5.5 0 0 0-.5.5v3a.5.5 0 0 0 .5.5h3a.5.5 0 0 0 .5-.5v-3a.5.5 0 0 0-.5-.5h-3z"/>
|
||||
</svg>
|
||||
Server Information
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Runtime environment and configuration</p>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
<table class="table table-sm mb-0">
|
||||
<tbody>
|
||||
<tr><td class="text-muted" style="width:40%">Version</td><td class="fw-medium">{{ app_version }}</td></tr>
|
||||
<tr><td class="text-muted">Storage Root</td><td><code>{{ storage_root }}</code></td></tr>
|
||||
<tr><td class="text-muted">Platform</td><td>{{ platform }}</td></tr>
|
||||
<tr><td class="text-muted">Python</td><td>{{ python_version }}</td></tr>
|
||||
<tr><td class="text-muted">Rust Extension</td><td>
|
||||
{% if has_rust %}
|
||||
<span class="badge bg-success bg-opacity-10 text-success">Loaded</span>
|
||||
{% else %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">Not loaded</span>
|
||||
{% endif %}
|
||||
</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-lg-6">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M11.5 2a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zM9.05 3a2.5 2.5 0 0 1 4.9 0H16v1h-2.05a2.5 2.5 0 0 1-4.9 0H0V3h9.05zM4.5 7a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zM2.05 8a2.5 2.5 0 0 1 4.9 0H16v1H6.95a2.5 2.5 0 0 1-4.9 0H0V8h2.05zm9.45 4a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zm-2.45 1a2.5 2.5 0 0 1 4.9 0H16v1h-2.05a2.5 2.5 0 0 1-4.9 0H0v-1h9.05z"/>
|
||||
</svg>
|
||||
Feature Flags
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Features configured via environment variables</p>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
<table class="table table-sm mb-0">
|
||||
<tbody>
|
||||
{% for feat in features %}
|
||||
<tr>
|
||||
<td class="text-muted" style="width:55%">{{ feat.label }}</td>
|
||||
<td class="text-end">
|
||||
{% if feat.enabled %}
|
||||
<span class="badge bg-success bg-opacity-10 text-success">Enabled</span>
|
||||
{% else %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row g-4 mb-4">
|
||||
<div class="col-lg-6">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<div class="d-flex justify-content-between align-items-start">
|
||||
<div>
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M2.5 1a1 1 0 0 0-1 1v1a1 1 0 0 0 1 1H3v9a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V4h.5a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H10a1 1 0 0 0-1-1H7a1 1 0 0 0-1 1H2.5zm3 4a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 .5-.5zM8 5a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7A.5.5 0 0 1 8 5zm3 .5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 1 0z"/>
|
||||
</svg>
|
||||
Garbage Collection
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Clean up temporary files, orphaned uploads, and stale locks</p>
|
||||
</div>
|
||||
<div>
|
||||
{% if gc_status.enabled %}
|
||||
<span class="badge bg-success bg-opacity-10 text-success">Active</span>
|
||||
{% else %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
{% if gc_status.enabled %}
|
||||
<div class="d-flex gap-2 mb-3">
|
||||
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="gcRunBtn" onclick="runGC(false)">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
||||
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
|
||||
</svg>
|
||||
Run Now
|
||||
</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" id="gcDryRunBtn" onclick="runGC(true)">
|
||||
Dry Run
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div id="gcScanningBanner" class="mb-3 {% if not gc_status.scanning %}d-none{% endif %}">
|
||||
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
|
||||
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
|
||||
<span>GC in progress<span id="gcScanElapsed"></span></span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="gcResult" class="mb-3 d-none">
|
||||
<div class="alert mb-0 small" id="gcResultAlert">
|
||||
<div class="d-flex justify-content-between align-items-start">
|
||||
<div class="fw-semibold mb-1" id="gcResultTitle"></div>
|
||||
<button type="button" class="btn-close btn-close-sm" style="font-size:0.65rem" onclick="document.getElementById('gcResult').classList.add('d-none')"></button>
|
||||
</div>
|
||||
<div id="gcResultBody"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="border rounded p-3 mb-3" style="background: var(--bs-tertiary-bg, #f8f9fa);">
|
||||
<div class="d-flex align-items-center gap-2 mb-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
<span class="small fw-semibold text-muted">Configuration</span>
|
||||
</div>
|
||||
<div class="row small">
|
||||
<div class="col-6 mb-1"><span class="text-muted">Interval:</span> {{ gc_status.interval_hours }}h</div>
|
||||
<div class="col-6 mb-1"><span class="text-muted">Dry run:</span> {{ "Yes" if gc_status.dry_run else "No" }}</div>
|
||||
<div class="col-6 mb-1"><span class="text-muted">Temp max age:</span> {{ gc_status.temp_file_max_age_hours }}h</div>
|
||||
<div class="col-6 mb-1"><span class="text-muted">Lock max age:</span> {{ gc_status.lock_file_max_age_hours }}h</div>
|
||||
<div class="col-6"><span class="text-muted">Multipart max age:</span> {{ gc_status.multipart_max_age_days }}d</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="gcHistoryContainer">
|
||||
{% if gc_history %}
|
||||
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>
|
||||
<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>
|
||||
<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/>
|
||||
</svg>
|
||||
Recent Executions
|
||||
</h6>
|
||||
<div class="table-responsive">
|
||||
<table class="table table-sm small mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Time</th>
|
||||
<th class="text-center">Cleaned</th>
|
||||
<th class="text-center">Freed</th>
|
||||
<th class="text-center">Mode</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for exec in gc_history %}
|
||||
<tr>
|
||||
<td class="text-nowrap">{{ exec.timestamp_display }}</td>
|
||||
<td class="text-center">
|
||||
{% set r = exec.result %}
|
||||
{{ (r.temp_files_deleted|d(0)) + (r.multipart_uploads_deleted|d(0)) + (r.lock_files_deleted|d(0)) + (r.orphaned_metadata_deleted|d(0)) + (r.orphaned_versions_deleted|d(0)) + (r.empty_dirs_removed|d(0)) }}
|
||||
</td>
|
||||
<td class="text-center">{{ exec.bytes_freed_display }}</td>
|
||||
<td class="text-center">
|
||||
{% if exec.dry_run %}
|
||||
<span class="badge bg-warning bg-opacity-10 text-warning">Dry run</span>
|
||||
{% else %}
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary">Live</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="text-center py-2">
|
||||
<p class="text-muted small mb-0">No executions recorded yet.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
{% else %}
|
||||
<div class="text-center py-4">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="40" height="40" fill="currentColor" class="text-muted mb-2 opacity-50" viewBox="0 0 16 16">
|
||||
<path d="M2.5 1a1 1 0 0 0-1 1v1a1 1 0 0 0 1 1H3v9a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V4h.5a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H10a1 1 0 0 0-1-1H7a1 1 0 0 0-1 1H2.5zm3 4a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 .5-.5zM8 5a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7A.5.5 0 0 1 8 5zm3 .5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 1 0z"/>
|
||||
</svg>
|
||||
<p class="text-muted mb-1">Garbage collection is not enabled.</p>
|
||||
<p class="text-muted small mb-0">Set <code>GC_ENABLED=true</code> to enable automatic cleanup.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-lg-6">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<div class="d-flex justify-content-between align-items-start">
|
||||
<div>
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M5.338 1.59a61.44 61.44 0 0 0-2.837.856.481.481 0 0 0-.328.39c-.554 4.157.726 7.19 2.253 9.188a10.725 10.725 0 0 0 2.287 2.233c.346.244.652.42.893.533.12.057.218.095.293.118a.55.55 0 0 0 .101.025.615.615 0 0 0 .1-.025c.076-.023.174-.061.294-.118.24-.113.547-.29.893-.533a10.726 10.726 0 0 0 2.287-2.233c1.527-1.997 2.807-5.031 2.253-9.188a.48.48 0 0 0-.328-.39c-.651-.213-1.75-.56-2.837-.855C9.552 1.29 8.531 1.067 8 1.067c-.53 0-1.552.223-2.662.524zM5.072.56C6.157.265 7.31 0 8 0s1.843.265 2.928.56c1.11.3 2.229.655 2.887.87a1.54 1.54 0 0 1 1.044 1.262c.596 4.477-.787 7.795-2.465 9.99a11.775 11.775 0 0 1-2.517 2.453 7.159 7.159 0 0 1-1.048.625c-.28.132-.581.24-.829.24s-.548-.108-.829-.24a7.158 7.158 0 0 1-1.048-.625 11.777 11.777 0 0 1-2.517-2.453C1.928 10.487.545 7.169 1.141 2.692A1.54 1.54 0 0 1 2.185 1.43 62.456 62.456 0 0 1 5.072.56z"/>
|
||||
<path d="M10.854 5.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7.5 7.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
|
||||
</svg>
|
||||
Integrity Scanner
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Detect and heal corrupted objects, orphaned files, and metadata drift</p>
|
||||
</div>
|
||||
<div>
|
||||
{% if integrity_status.enabled %}
|
||||
<span class="badge bg-success bg-opacity-10 text-success">Active</span>
|
||||
{% else %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
{% if integrity_status.enabled %}
|
||||
<div class="d-flex gap-2 flex-wrap mb-3">
|
||||
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="integrityRunBtn" onclick="runIntegrity(false, false)" {% if integrity_status.scanning %}disabled{% endif %}>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
||||
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
|
||||
</svg>
|
||||
Scan Now
|
||||
</button>
|
||||
<button class="btn btn-outline-warning btn-sm" id="integrityHealBtn" onclick="runIntegrity(false, true)" {% if integrity_status.scanning %}disabled{% endif %}>
|
||||
Scan & Heal
|
||||
</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" id="integrityDryRunBtn" onclick="runIntegrity(true, false)" {% if integrity_status.scanning %}disabled{% endif %}>
|
||||
Dry Run
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div id="integrityScanningBanner" class="mb-3 {% if not integrity_status.scanning %}d-none{% endif %}">
|
||||
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
|
||||
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
|
||||
<span>Scan in progress<span id="integrityScanElapsed"></span></span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="integrityResult" class="mb-3 d-none">
|
||||
<div class="alert mb-0 small" id="integrityResultAlert">
|
||||
<div class="d-flex justify-content-between align-items-start">
|
||||
<div class="fw-semibold mb-1" id="integrityResultTitle"></div>
|
||||
<button type="button" class="btn-close btn-close-sm" style="font-size:0.65rem" onclick="document.getElementById('integrityResult').classList.add('d-none')"></button>
|
||||
</div>
|
||||
<div id="integrityResultBody"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="border rounded p-3 mb-3" style="background: var(--bs-tertiary-bg, #f8f9fa);">
|
||||
<div class="d-flex align-items-center gap-2 mb-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
<span class="small fw-semibold text-muted">Configuration</span>
|
||||
</div>
|
||||
<div class="row small">
|
||||
<div class="col-6 mb-1"><span class="text-muted">Interval:</span> {{ integrity_status.interval_hours }}h</div>
|
||||
<div class="col-6 mb-1"><span class="text-muted">Dry run:</span> {{ "Yes" if integrity_status.dry_run else "No" }}</div>
|
||||
<div class="col-6"><span class="text-muted">Batch size:</span> {{ integrity_status.batch_size }}</div>
|
||||
<div class="col-6"><span class="text-muted">Auto-heal:</span> {{ "Yes" if integrity_status.auto_heal else "No" }}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="integrityHistoryContainer">
|
||||
{% if integrity_history %}
|
||||
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>
|
||||
<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>
|
||||
<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/>
|
||||
</svg>
|
||||
Recent Scans
|
||||
</h6>
|
||||
<div class="table-responsive">
|
||||
<table class="table table-sm small mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Time</th>
|
||||
<th class="text-center">Scanned</th>
|
||||
<th class="text-center">Issues</th>
|
||||
<th class="text-center">Healed</th>
|
||||
<th class="text-center">Mode</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for exec in integrity_history %}
|
||||
<tr>
|
||||
<td class="text-nowrap">{{ exec.timestamp_display }}</td>
|
||||
<td class="text-center">{{ exec.result.objects_scanned|d(0) }}</td>
|
||||
<td class="text-center">
|
||||
{% set total_issues = (exec.result.corrupted_objects|d(0)) + (exec.result.orphaned_objects|d(0)) + (exec.result.phantom_metadata|d(0)) + (exec.result.stale_versions|d(0)) + (exec.result.etag_cache_inconsistencies|d(0)) + (exec.result.legacy_metadata_drifts|d(0)) %}
|
||||
{% if total_issues > 0 %}
|
||||
<span class="text-danger fw-medium">{{ total_issues }}</span>
|
||||
{% else %}
|
||||
<span class="text-success">0</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="text-center">{{ exec.result.issues_healed|d(0) }}</td>
|
||||
<td class="text-center">
|
||||
{% if exec.dry_run %}
|
||||
<span class="badge bg-warning bg-opacity-10 text-warning">Dry</span>
|
||||
{% elif exec.auto_heal %}
|
||||
<span class="badge bg-success bg-opacity-10 text-success">Heal</span>
|
||||
{% else %}
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary">Scan</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="text-center py-2">
|
||||
<p class="text-muted small mb-0">No scans recorded yet.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
{% else %}
|
||||
<div class="text-center py-4">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="40" height="40" fill="currentColor" class="text-muted mb-2 opacity-50" viewBox="0 0 16 16">
|
||||
<path d="M5.338 1.59a61.44 61.44 0 0 0-2.837.856.481.481 0 0 0-.328.39c-.554 4.157.726 7.19 2.253 9.188a10.725 10.725 0 0 0 2.287 2.233c.346.244.652.42.893.533.12.057.218.095.293.118a.55.55 0 0 0 .101.025.615.615 0 0 0 .1-.025c.076-.023.174-.061.294-.118.24-.113.547-.29.893-.533a10.726 10.726 0 0 0 2.287-2.233c1.527-1.997 2.807-5.031 2.253-9.188a.48.48 0 0 0-.328-.39c-.651-.213-1.75-.56-2.837-.855C9.552 1.29 8.531 1.067 8 1.067c-.53 0-1.552.223-2.662.524zM5.072.56C6.157.265 7.31 0 8 0s1.843.265 2.928.56c1.11.3 2.229.655 2.887.87a1.54 1.54 0 0 1 1.044 1.262c.596 4.477-.787 7.795-2.465 9.99a11.775 11.775 0 0 1-2.517 2.453 7.159 7.159 0 0 1-1.048.625c-.28.132-.581.24-.829.24s-.548-.108-.829-.24a7.158 7.158 0 0 1-1.048-.625 11.777 11.777 0 0 1-2.517-2.453C1.928 10.487.545 7.169 1.141 2.692A1.54 1.54 0 0 1 2.185 1.43 62.456 62.456 0 0 1 5.072.56z"/>
|
||||
<path d="M10.854 5.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7.5 7.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
|
||||
</svg>
|
||||
<p class="text-muted mb-1">Integrity scanner is not enabled.</p>
|
||||
<p class="text-muted small mb-0">Set <code>INTEGRITY_ENABLED=true</code> to enable automatic scanning.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block extra_scripts %}
|
||||
<script>
|
||||
(function () {
|
||||
var csrfToken = document.querySelector('meta[name="csrf-token"]')?.getAttribute('content') || '';
|
||||
|
||||
function setLoading(btnId, loading, spinnerOnly) {
|
||||
var btn = document.getElementById(btnId);
|
||||
if (!btn) return;
|
||||
btn.disabled = loading;
|
||||
if (loading && !spinnerOnly) {
|
||||
btn.dataset.originalHtml = btn.innerHTML;
|
||||
btn.innerHTML = '<span class="spinner-border spinner-border-sm me-1" role="status"></span>Running...';
|
||||
} else if (!loading && btn.dataset.originalHtml) {
|
||||
btn.innerHTML = btn.dataset.originalHtml;
|
||||
}
|
||||
}
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (!bytes || bytes === 0) return '0 B';
|
||||
var units = ['B', 'KB', 'MB', 'GB'];
|
||||
var i = 0;
|
||||
var b = bytes;
|
||||
while (b >= 1024 && i < units.length - 1) { b /= 1024; i++; }
|
||||
return (i === 0 ? b : b.toFixed(1)) + ' ' + units[i];
|
||||
}
|
||||
|
||||
var _displayTimezone = {{ display_timezone|tojson }};
|
||||
|
||||
function formatTimestamp(ts) {
|
||||
var d = new Date(ts * 1000);
|
||||
try {
|
||||
var opts = {year: 'numeric', month: 'short', day: '2-digit', hour: '2-digit', minute: '2-digit', hour12: false, timeZone: _displayTimezone, timeZoneName: 'short'};
|
||||
return d.toLocaleString('en-US', opts);
|
||||
} catch (e) {
|
||||
var pad = function (n) { return n < 10 ? '0' + n : '' + n; };
|
||||
return d.getUTCFullYear() + '-' + pad(d.getUTCMonth() + 1) + '-' + pad(d.getUTCDate()) +
|
||||
' ' + pad(d.getUTCHours()) + ':' + pad(d.getUTCMinutes()) + ' UTC';
|
||||
}
|
||||
}
|
||||
|
||||
var _gcHistoryIcon = '<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>' +
|
||||
'<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>' +
|
||||
'<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/></svg>';
|
||||
|
||||
function _gcRefreshHistory() {
|
||||
fetch('{{ url_for("ui.system_gc_history") }}?limit=10', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (hist) {
|
||||
var container = document.getElementById('gcHistoryContainer');
|
||||
if (!container) return;
|
||||
var execs = hist.executions || [];
|
||||
if (execs.length === 0) {
|
||||
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No executions recorded yet.</p></div>';
|
||||
return;
|
||||
}
|
||||
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
|
||||
_gcHistoryIcon + ' Recent Executions</h6>' +
|
||||
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
|
||||
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Cleaned</th>' +
|
||||
'<th class="text-center">Freed</th><th class="text-center">Mode</th></tr></thead><tbody>';
|
||||
execs.forEach(function (exec) {
|
||||
var r = exec.result || {};
|
||||
var cleaned = (r.temp_files_deleted || 0) + (r.multipart_uploads_deleted || 0) +
|
||||
(r.lock_files_deleted || 0) + (r.orphaned_metadata_deleted || 0) +
|
||||
(r.orphaned_versions_deleted || 0) + (r.empty_dirs_removed || 0);
|
||||
var freed = (r.temp_bytes_freed || 0) + (r.multipart_bytes_freed || 0) +
|
||||
(r.orphaned_version_bytes_freed || 0);
|
||||
var mode = exec.dry_run
|
||||
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry run</span>'
|
||||
: '<span class="badge bg-primary bg-opacity-10 text-primary">Live</span>';
|
||||
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
|
||||
'<td class="text-center">' + cleaned + '</td>' +
|
||||
'<td class="text-center">' + formatBytes(freed) + '</td>' +
|
||||
'<td class="text-center">' + mode + '</td></tr>';
|
||||
});
|
||||
html += '</tbody></table></div>';
|
||||
container.innerHTML = html;
|
||||
})
|
||||
.catch(function () {});
|
||||
}
|
||||
|
||||
function _integrityRefreshHistory() {
|
||||
fetch('{{ url_for("ui.system_integrity_history") }}?limit=10', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (hist) {
|
||||
var container = document.getElementById('integrityHistoryContainer');
|
||||
if (!container) return;
|
||||
var execs = hist.executions || [];
|
||||
if (execs.length === 0) {
|
||||
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No scans recorded yet.</p></div>';
|
||||
return;
|
||||
}
|
||||
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
|
||||
_gcHistoryIcon + ' Recent Scans</h6>' +
|
||||
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
|
||||
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Scanned</th>' +
|
||||
'<th class="text-center">Issues</th><th class="text-center">Healed</th>' +
|
||||
'<th class="text-center">Mode</th></tr></thead><tbody>';
|
||||
execs.forEach(function (exec) {
|
||||
var r = exec.result || {};
|
||||
var issues = (r.corrupted_objects || 0) + (r.orphaned_objects || 0) +
|
||||
(r.phantom_metadata || 0) + (r.stale_versions || 0) +
|
||||
(r.etag_cache_inconsistencies || 0) + (r.legacy_metadata_drifts || 0);
|
||||
var issueHtml = issues > 0
|
||||
? '<span class="text-danger fw-medium">' + issues + '</span>'
|
||||
: '<span class="text-success">0</span>';
|
||||
var mode = exec.dry_run
|
||||
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry</span>'
|
||||
: (exec.auto_heal
|
||||
? '<span class="badge bg-success bg-opacity-10 text-success">Heal</span>'
|
||||
: '<span class="badge bg-primary bg-opacity-10 text-primary">Scan</span>');
|
||||
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
|
||||
'<td class="text-center">' + (r.objects_scanned || 0) + '</td>' +
|
||||
'<td class="text-center">' + issueHtml + '</td>' +
|
||||
'<td class="text-center">' + (r.issues_healed || 0) + '</td>' +
|
||||
'<td class="text-center">' + mode + '</td></tr>';
|
||||
});
|
||||
html += '</tbody></table></div>';
|
||||
container.innerHTML = html;
|
||||
})
|
||||
.catch(function () {});
|
||||
}
|
||||
|
||||
var _gcPollTimer = null;
|
||||
var _gcLastDryRun = false;
|
||||
|
||||
function _gcSetScanning(scanning) {
|
||||
var banner = document.getElementById('gcScanningBanner');
|
||||
var btns = ['gcRunBtn', 'gcDryRunBtn'];
|
||||
if (scanning) {
|
||||
banner.classList.remove('d-none');
|
||||
btns.forEach(function (id) {
|
||||
var el = document.getElementById(id);
|
||||
if (el) el.disabled = true;
|
||||
});
|
||||
} else {
|
||||
banner.classList.add('d-none');
|
||||
document.getElementById('gcScanElapsed').textContent = '';
|
||||
btns.forEach(function (id) {
|
||||
var el = document.getElementById(id);
|
||||
if (el) el.disabled = false;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function _gcShowResult(data, dryRun) {
|
||||
var container = document.getElementById('gcResult');
|
||||
var alert = document.getElementById('gcResultAlert');
|
||||
var title = document.getElementById('gcResultTitle');
|
||||
var body = document.getElementById('gcResultBody');
|
||||
container.classList.remove('d-none');
|
||||
|
||||
var totalItems = (data.temp_files_deleted || 0) + (data.multipart_uploads_deleted || 0) +
|
||||
(data.lock_files_deleted || 0) + (data.orphaned_metadata_deleted || 0) +
|
||||
(data.orphaned_versions_deleted || 0) + (data.empty_dirs_removed || 0);
|
||||
var totalFreed = (data.temp_bytes_freed || 0) + (data.multipart_bytes_freed || 0) +
|
||||
(data.orphaned_version_bytes_freed || 0);
|
||||
|
||||
alert.className = totalItems > 0 ? 'alert alert-success mb-0 small' : 'alert alert-info mb-0 small';
|
||||
title.textContent = (dryRun ? '[Dry Run] ' : '') + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
|
||||
|
||||
var lines = [];
|
||||
if (data.temp_files_deleted) lines.push('Temp files: ' + data.temp_files_deleted + ' (' + formatBytes(data.temp_bytes_freed) + ')');
|
||||
if (data.multipart_uploads_deleted) lines.push('Multipart uploads: ' + data.multipart_uploads_deleted + ' (' + formatBytes(data.multipart_bytes_freed) + ')');
|
||||
if (data.lock_files_deleted) lines.push('Lock files: ' + data.lock_files_deleted);
|
||||
if (data.orphaned_metadata_deleted) lines.push('Orphaned metadata: ' + data.orphaned_metadata_deleted);
|
||||
if (data.orphaned_versions_deleted) lines.push('Orphaned versions: ' + data.orphaned_versions_deleted + ' (' + formatBytes(data.orphaned_version_bytes_freed) + ')');
|
||||
if (data.empty_dirs_removed) lines.push('Empty directories: ' + data.empty_dirs_removed);
|
||||
if (totalItems === 0) lines.push('Nothing to clean up.');
|
||||
if (totalFreed > 0) lines.push('Total freed: ' + formatBytes(totalFreed));
|
||||
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
|
||||
|
||||
body.innerHTML = lines.join('<br>');
|
||||
}
|
||||
|
||||
function _gcPoll() {
|
||||
fetch('{{ url_for("ui.system_gc_status") }}', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (status) {
|
||||
if (status.scanning) {
|
||||
var elapsed = status.scan_elapsed_seconds || 0;
|
||||
document.getElementById('gcScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
|
||||
_gcPollTimer = setTimeout(_gcPoll, 2000);
|
||||
} else {
|
||||
_gcSetScanning(false);
|
||||
_gcRefreshHistory();
|
||||
fetch('{{ url_for("ui.system_gc_history") }}?limit=1', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (hist) {
|
||||
if (hist.executions && hist.executions.length > 0) {
|
||||
var latest = hist.executions[0];
|
||||
_gcShowResult(latest.result, latest.dry_run);
|
||||
}
|
||||
})
|
||||
.catch(function () {});
|
||||
}
|
||||
})
|
||||
.catch(function () {
|
||||
_gcPollTimer = setTimeout(_gcPoll, 3000);
|
||||
});
|
||||
}
|
||||
|
||||
window.runGC = function (dryRun) {
|
||||
_gcLastDryRun = dryRun;
|
||||
document.getElementById('gcResult').classList.add('d-none');
|
||||
_gcSetScanning(true);
|
||||
|
||||
fetch('{{ url_for("ui.system_gc_run") }}', {
|
||||
method: 'POST',
|
||||
headers: {'Content-Type': 'application/json', 'X-CSRFToken': csrfToken},
|
||||
body: JSON.stringify({dry_run: dryRun})
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (data) {
|
||||
if (data.error) {
|
||||
_gcSetScanning(false);
|
||||
var container = document.getElementById('gcResult');
|
||||
var alert = document.getElementById('gcResultAlert');
|
||||
var title = document.getElementById('gcResultTitle');
|
||||
var body = document.getElementById('gcResultBody');
|
||||
container.classList.remove('d-none');
|
||||
alert.className = 'alert alert-danger mb-0 small';
|
||||
title.textContent = 'Error';
|
||||
body.textContent = data.error;
|
||||
return;
|
||||
}
|
||||
_gcPollTimer = setTimeout(_gcPoll, 2000);
|
||||
})
|
||||
.catch(function (err) {
|
||||
_gcSetScanning(false);
|
||||
var container = document.getElementById('gcResult');
|
||||
var alert = document.getElementById('gcResultAlert');
|
||||
var title = document.getElementById('gcResultTitle');
|
||||
var body = document.getElementById('gcResultBody');
|
||||
container.classList.remove('d-none');
|
||||
alert.className = 'alert alert-danger mb-0 small';
|
||||
title.textContent = 'Error';
|
||||
body.textContent = err.message;
|
||||
});
|
||||
};
|
||||
|
||||
{% if gc_status.scanning %}
|
||||
_gcSetScanning(true);
|
||||
_gcPollTimer = setTimeout(_gcPoll, 2000);
|
||||
{% endif %}
|
||||
|
||||
var _integrityPollTimer = null;
|
||||
var _integrityLastMode = {dryRun: false, autoHeal: false};
|
||||
|
||||
function _integritySetScanning(scanning) {
|
||||
var banner = document.getElementById('integrityScanningBanner');
|
||||
var btns = ['integrityRunBtn', 'integrityHealBtn', 'integrityDryRunBtn'];
|
||||
if (scanning) {
|
||||
banner.classList.remove('d-none');
|
||||
btns.forEach(function (id) {
|
||||
var el = document.getElementById(id);
|
||||
if (el) el.disabled = true;
|
||||
});
|
||||
} else {
|
||||
banner.classList.add('d-none');
|
||||
document.getElementById('integrityScanElapsed').textContent = '';
|
||||
btns.forEach(function (id) {
|
||||
var el = document.getElementById(id);
|
||||
if (el) el.disabled = false;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function _integrityShowResult(data, dryRun, autoHeal) {
|
||||
var container = document.getElementById('integrityResult');
|
||||
var alert = document.getElementById('integrityResultAlert');
|
||||
var title = document.getElementById('integrityResultTitle');
|
||||
var body = document.getElementById('integrityResultBody');
|
||||
container.classList.remove('d-none');
|
||||
|
||||
var totalIssues = (data.corrupted_objects || 0) + (data.orphaned_objects || 0) +
|
||||
(data.phantom_metadata || 0) + (data.stale_versions || 0) +
|
||||
(data.etag_cache_inconsistencies || 0) + (data.legacy_metadata_drifts || 0);
|
||||
|
||||
var prefix = dryRun ? '[Dry Run] ' : (autoHeal ? '[Heal] ' : '');
|
||||
alert.className = totalIssues > 0 ? 'alert alert-warning mb-0 small' : 'alert alert-success mb-0 small';
|
||||
title.textContent = prefix + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
|
||||
|
||||
var lines = [];
|
||||
lines.push('Scanned: ' + (data.objects_scanned || 0) + ' objects in ' + (data.buckets_scanned || 0) + ' buckets');
|
||||
if (totalIssues === 0) {
|
||||
lines.push('No issues found.');
|
||||
} else {
|
||||
if (data.corrupted_objects) lines.push('Corrupted objects: ' + data.corrupted_objects);
|
||||
if (data.orphaned_objects) lines.push('Orphaned objects: ' + data.orphaned_objects);
|
||||
if (data.phantom_metadata) lines.push('Phantom metadata: ' + data.phantom_metadata);
|
||||
if (data.stale_versions) lines.push('Stale versions: ' + data.stale_versions);
|
||||
if (data.etag_cache_inconsistencies) lines.push('ETag inconsistencies: ' + data.etag_cache_inconsistencies);
|
||||
if (data.legacy_metadata_drifts) lines.push('Legacy metadata drifts: ' + data.legacy_metadata_drifts);
|
||||
if (data.issues_healed) lines.push('Issues healed: ' + data.issues_healed);
|
||||
}
|
||||
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
|
||||
|
||||
body.innerHTML = lines.join('<br>');
|
||||
}
|
||||
|
||||
function _integrityPoll() {
|
||||
fetch('{{ url_for("ui.system_integrity_status") }}', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (status) {
|
||||
if (status.scanning) {
|
||||
var elapsed = status.scan_elapsed_seconds || 0;
|
||||
document.getElementById('integrityScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
|
||||
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
|
||||
} else {
|
||||
_integritySetScanning(false);
|
||||
_integrityRefreshHistory();
|
||||
fetch('{{ url_for("ui.system_integrity_history") }}?limit=1', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (hist) {
|
||||
if (hist.executions && hist.executions.length > 0) {
|
||||
var latest = hist.executions[0];
|
||||
_integrityShowResult(latest.result, latest.dry_run, latest.auto_heal);
|
||||
}
|
||||
})
|
||||
.catch(function () {});
|
||||
}
|
||||
})
|
||||
.catch(function () {
|
||||
_integrityPollTimer = setTimeout(_integrityPoll, 3000);
|
||||
});
|
||||
}
|
||||
|
||||
window.runIntegrity = function (dryRun, autoHeal) {
|
||||
_integrityLastMode = {dryRun: dryRun, autoHeal: autoHeal};
|
||||
document.getElementById('integrityResult').classList.add('d-none');
|
||||
_integritySetScanning(true);
|
||||
|
||||
fetch('{{ url_for("ui.system_integrity_run") }}', {
|
||||
method: 'POST',
|
||||
headers: {'Content-Type': 'application/json', 'X-CSRFToken': csrfToken},
|
||||
body: JSON.stringify({dry_run: dryRun, auto_heal: autoHeal})
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (data) {
|
||||
if (data.error) {
|
||||
_integritySetScanning(false);
|
||||
var container = document.getElementById('integrityResult');
|
||||
var alert = document.getElementById('integrityResultAlert');
|
||||
var title = document.getElementById('integrityResultTitle');
|
||||
var body = document.getElementById('integrityResultBody');
|
||||
container.classList.remove('d-none');
|
||||
alert.className = 'alert alert-danger mb-0 small';
|
||||
title.textContent = 'Error';
|
||||
body.textContent = data.error;
|
||||
return;
|
||||
}
|
||||
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
|
||||
})
|
||||
.catch(function (err) {
|
||||
_integritySetScanning(false);
|
||||
var container = document.getElementById('integrityResult');
|
||||
var alert = document.getElementById('integrityResultAlert');
|
||||
var title = document.getElementById('integrityResultTitle');
|
||||
var body = document.getElementById('integrityResultBody');
|
||||
container.classList.remove('d-none');
|
||||
alert.className = 'alert alert-danger mb-0 small';
|
||||
title.textContent = 'Error';
|
||||
body.textContent = err.message;
|
||||
});
|
||||
};
|
||||
|
||||
{% if integrity_status.scanning %}
|
||||
_integritySetScanning(true);
|
||||
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
|
||||
{% endif %}
|
||||
})();
|
||||
</script>
|
||||
{% endblock %}
|
||||
@@ -1,156 +0,0 @@
|
||||
import hashlib
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def bucket(client, signer):
|
||||
headers = signer("PUT", "/cond-test")
|
||||
client.put("/cond-test", headers=headers)
|
||||
return "cond-test"
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def uploaded(client, signer, bucket):
|
||||
body = b"hello conditional"
|
||||
etag = hashlib.md5(body).hexdigest()
|
||||
headers = signer("PUT", f"/{bucket}/obj.txt", body=body)
|
||||
resp = client.put(f"/{bucket}/obj.txt", headers=headers, data=body)
|
||||
last_modified = resp.headers.get("Last-Modified")
|
||||
return {"etag": etag, "last_modified": last_modified}
|
||||
|
||||
|
||||
class TestIfMatch:
|
||||
def test_get_matching_etag(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": f'"{uploaded["etag"]}"'})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_get_non_matching_etag(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": '"wrongetag"'})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
|
||||
def test_head_matching_etag(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Match": f'"{uploaded["etag"]}"'})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_head_non_matching_etag(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Match": '"wrongetag"'})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
|
||||
def test_wildcard_match(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": "*"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_multiple_etags_one_matches(self, client, signer, bucket, uploaded):
|
||||
etag_list = f'"bad1", "{uploaded["etag"]}", "bad2"'
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": etag_list})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_multiple_etags_none_match(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": '"bad1", "bad2"'})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
|
||||
|
||||
class TestIfNoneMatch:
|
||||
def test_get_matching_etag_returns_304(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-None-Match": f'"{uploaded["etag"]}"'})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 304
|
||||
assert uploaded["etag"] in resp.headers.get("ETag", "")
|
||||
|
||||
def test_get_non_matching_etag_returns_200(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-None-Match": '"wrongetag"'})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_head_matching_etag_returns_304(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-None-Match": f'"{uploaded["etag"]}"'})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 304
|
||||
|
||||
def test_head_non_matching_etag_returns_200(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-None-Match": '"wrongetag"'})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_wildcard_returns_304(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-None-Match": "*"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 304
|
||||
|
||||
|
||||
class TestIfModifiedSince:
|
||||
def test_not_modified_returns_304(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Modified-Since": "Sun, 01 Jan 2034 00:00:00 GMT"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 304
|
||||
assert "ETag" in resp.headers
|
||||
|
||||
def test_modified_returns_200(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Modified-Since": "Sun, 01 Jan 2000 00:00:00 GMT"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_head_not_modified(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Modified-Since": "Sun, 01 Jan 2034 00:00:00 GMT"})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 304
|
||||
|
||||
def test_if_none_match_takes_precedence(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={
|
||||
"If-None-Match": '"wrongetag"',
|
||||
"If-Modified-Since": "Sun, 01 Jan 2034 00:00:00 GMT",
|
||||
})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
||||
class TestIfUnmodifiedSince:
|
||||
def test_unmodified_returns_200(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Unmodified-Since": "Sun, 01 Jan 2034 00:00:00 GMT"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_modified_returns_412(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Unmodified-Since": "Sun, 01 Jan 2000 00:00:00 GMT"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
|
||||
def test_head_modified_returns_412(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Unmodified-Since": "Sun, 01 Jan 2000 00:00:00 GMT"})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
|
||||
def test_if_match_takes_precedence(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={
|
||||
"If-Match": f'"{uploaded["etag"]}"',
|
||||
"If-Unmodified-Since": "Sun, 01 Jan 2000 00:00:00 GMT",
|
||||
})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
||||
class TestConditionalWithRange:
|
||||
def test_if_match_with_range(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={
|
||||
"If-Match": f'"{uploaded["etag"]}"',
|
||||
"Range": "bytes=0-4",
|
||||
})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 206
|
||||
|
||||
def test_if_match_fails_with_range(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={
|
||||
"If-Match": '"wrongetag"',
|
||||
"Range": "bytes=0-4",
|
||||
})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
@@ -1,356 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from app.gc import GarbageCollector, GCResult
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def storage_root(tmp_path):
|
||||
root = tmp_path / "data"
|
||||
root.mkdir()
|
||||
sys_root = root / ".myfsio.sys"
|
||||
sys_root.mkdir()
|
||||
(sys_root / "config").mkdir(parents=True)
|
||||
(sys_root / "tmp").mkdir()
|
||||
(sys_root / "multipart").mkdir()
|
||||
(sys_root / "buckets").mkdir()
|
||||
return root
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def gc(storage_root):
|
||||
return GarbageCollector(
|
||||
storage_root=storage_root,
|
||||
interval_hours=1.0,
|
||||
temp_file_max_age_hours=1.0,
|
||||
multipart_max_age_days=1,
|
||||
lock_file_max_age_hours=0.5,
|
||||
dry_run=False,
|
||||
)
|
||||
|
||||
|
||||
def _make_old(path, hours=48):
|
||||
old_time = time.time() - hours * 3600
|
||||
os.utime(path, (old_time, old_time))
|
||||
|
||||
|
||||
class TestTempFileCleanup:
|
||||
def test_old_temp_files_deleted(self, storage_root, gc):
|
||||
tmp_dir = storage_root / ".myfsio.sys" / "tmp"
|
||||
old_file = tmp_dir / "abc123.tmp"
|
||||
old_file.write_bytes(b"x" * 1000)
|
||||
_make_old(old_file, hours=48)
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.temp_files_deleted == 1
|
||||
assert result.temp_bytes_freed == 1000
|
||||
assert not old_file.exists()
|
||||
|
||||
def test_recent_temp_files_kept(self, storage_root, gc):
|
||||
tmp_dir = storage_root / ".myfsio.sys" / "tmp"
|
||||
new_file = tmp_dir / "recent.tmp"
|
||||
new_file.write_bytes(b"data")
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.temp_files_deleted == 0
|
||||
assert new_file.exists()
|
||||
|
||||
def test_dry_run_keeps_files(self, storage_root, gc):
|
||||
gc.dry_run = True
|
||||
tmp_dir = storage_root / ".myfsio.sys" / "tmp"
|
||||
old_file = tmp_dir / "stale.tmp"
|
||||
old_file.write_bytes(b"x" * 500)
|
||||
_make_old(old_file, hours=48)
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.temp_files_deleted == 1
|
||||
assert result.temp_bytes_freed == 500
|
||||
assert old_file.exists()
|
||||
|
||||
|
||||
class TestMultipartCleanup:
|
||||
def test_old_orphaned_multipart_deleted(self, storage_root, gc):
|
||||
bucket = storage_root / "test-bucket"
|
||||
bucket.mkdir()
|
||||
mp_root = storage_root / ".myfsio.sys" / "multipart" / "test-bucket"
|
||||
mp_root.mkdir(parents=True)
|
||||
upload_dir = mp_root / "upload-123"
|
||||
upload_dir.mkdir()
|
||||
manifest = upload_dir / "manifest.json"
|
||||
manifest.write_text(json.dumps({"upload_id": "upload-123", "object_key": "foo.txt"}))
|
||||
part = upload_dir / "part-00001.part"
|
||||
part.write_bytes(b"x" * 2000)
|
||||
_make_old(manifest, hours=200)
|
||||
_make_old(part, hours=200)
|
||||
_make_old(upload_dir, hours=200)
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.multipart_uploads_deleted == 1
|
||||
assert result.multipart_bytes_freed > 0
|
||||
assert not upload_dir.exists()
|
||||
|
||||
def test_recent_multipart_kept(self, storage_root, gc):
|
||||
bucket = storage_root / "test-bucket"
|
||||
bucket.mkdir()
|
||||
mp_root = storage_root / ".myfsio.sys" / "multipart" / "test-bucket"
|
||||
mp_root.mkdir(parents=True)
|
||||
upload_dir = mp_root / "upload-new"
|
||||
upload_dir.mkdir()
|
||||
manifest = upload_dir / "manifest.json"
|
||||
manifest.write_text(json.dumps({"upload_id": "upload-new", "object_key": "bar.txt"}))
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.multipart_uploads_deleted == 0
|
||||
assert upload_dir.exists()
|
||||
|
||||
def test_legacy_multipart_cleaned(self, storage_root, gc):
|
||||
bucket = storage_root / "test-bucket"
|
||||
bucket.mkdir()
|
||||
legacy_mp = bucket / ".multipart" / "upload-old"
|
||||
legacy_mp.mkdir(parents=True)
|
||||
part = legacy_mp / "part-00001.part"
|
||||
part.write_bytes(b"y" * 500)
|
||||
_make_old(part, hours=200)
|
||||
_make_old(legacy_mp, hours=200)
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.multipart_uploads_deleted == 1
|
||||
|
||||
|
||||
class TestLockFileCleanup:
|
||||
def test_stale_lock_files_deleted(self, storage_root, gc):
|
||||
locks_dir = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "locks"
|
||||
locks_dir.mkdir(parents=True)
|
||||
lock = locks_dir / "some_key.lock"
|
||||
lock.write_text("")
|
||||
_make_old(lock, hours=2)
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.lock_files_deleted == 1
|
||||
assert not lock.exists()
|
||||
|
||||
def test_recent_lock_kept(self, storage_root, gc):
|
||||
locks_dir = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "locks"
|
||||
locks_dir.mkdir(parents=True)
|
||||
lock = locks_dir / "active.lock"
|
||||
lock.write_text("")
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.lock_files_deleted == 0
|
||||
assert lock.exists()
|
||||
|
||||
|
||||
class TestOrphanedMetadataCleanup:
|
||||
def test_legacy_orphaned_metadata_deleted(self, storage_root, gc):
|
||||
bucket = storage_root / "test-bucket"
|
||||
bucket.mkdir()
|
||||
meta_dir = bucket / ".meta"
|
||||
meta_dir.mkdir()
|
||||
orphan = meta_dir / "deleted_file.txt.meta.json"
|
||||
orphan.write_text(json.dumps({"etag": "abc"}))
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.orphaned_metadata_deleted == 1
|
||||
assert not orphan.exists()
|
||||
|
||||
def test_valid_metadata_kept(self, storage_root, gc):
|
||||
bucket = storage_root / "test-bucket"
|
||||
bucket.mkdir()
|
||||
obj = bucket / "exists.txt"
|
||||
obj.write_text("hello")
|
||||
meta_dir = bucket / ".meta"
|
||||
meta_dir.mkdir()
|
||||
meta = meta_dir / "exists.txt.meta.json"
|
||||
meta.write_text(json.dumps({"etag": "abc"}))
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.orphaned_metadata_deleted == 0
|
||||
assert meta.exists()
|
||||
|
||||
def test_index_orphaned_entries_cleaned(self, storage_root, gc):
|
||||
bucket = storage_root / "test-bucket"
|
||||
bucket.mkdir()
|
||||
obj = bucket / "keep.txt"
|
||||
obj.write_text("hello")
|
||||
|
||||
meta_dir = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "meta"
|
||||
meta_dir.mkdir(parents=True)
|
||||
index = meta_dir / "_index.json"
|
||||
index.write_text(json.dumps({"keep.txt": {"etag": "a"}, "gone.txt": {"etag": "b"}}))
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.orphaned_metadata_deleted == 1
|
||||
|
||||
updated = json.loads(index.read_text())
|
||||
assert "keep.txt" in updated
|
||||
assert "gone.txt" not in updated
|
||||
|
||||
|
||||
class TestOrphanedVersionsCleanup:
|
||||
def test_orphaned_versions_deleted(self, storage_root, gc):
|
||||
bucket = storage_root / "test-bucket"
|
||||
bucket.mkdir()
|
||||
versions_dir = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "versions" / "deleted_obj.txt"
|
||||
versions_dir.mkdir(parents=True)
|
||||
v_bin = versions_dir / "v1.bin"
|
||||
v_json = versions_dir / "v1.json"
|
||||
v_bin.write_bytes(b"old data" * 100)
|
||||
v_json.write_text(json.dumps({"version_id": "v1", "size": 800}))
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.orphaned_versions_deleted == 2
|
||||
assert result.orphaned_version_bytes_freed == 800
|
||||
|
||||
def test_active_versions_kept(self, storage_root, gc):
|
||||
bucket = storage_root / "test-bucket"
|
||||
bucket.mkdir()
|
||||
obj = bucket / "active.txt"
|
||||
obj.write_text("current")
|
||||
versions_dir = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "versions" / "active.txt"
|
||||
versions_dir.mkdir(parents=True)
|
||||
v_bin = versions_dir / "v1.bin"
|
||||
v_bin.write_bytes(b"old version")
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.orphaned_versions_deleted == 0
|
||||
assert v_bin.exists()
|
||||
|
||||
|
||||
class TestEmptyDirCleanup:
|
||||
def test_empty_dirs_removed(self, storage_root, gc):
|
||||
empty = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "locks" / "sub"
|
||||
empty.mkdir(parents=True)
|
||||
|
||||
result = gc.run_now()
|
||||
assert result.empty_dirs_removed > 0
|
||||
assert not empty.exists()
|
||||
|
||||
|
||||
class TestHistory:
|
||||
def test_history_recorded(self, storage_root, gc):
|
||||
gc.run_now()
|
||||
history = gc.get_history()
|
||||
assert len(history) == 1
|
||||
assert "result" in history[0]
|
||||
assert "timestamp" in history[0]
|
||||
|
||||
def test_multiple_runs(self, storage_root, gc):
|
||||
gc.run_now()
|
||||
gc.run_now()
|
||||
gc.run_now()
|
||||
history = gc.get_history()
|
||||
assert len(history) == 3
|
||||
assert history[0]["timestamp"] >= history[1]["timestamp"]
|
||||
|
||||
|
||||
class TestStatus:
|
||||
def test_get_status(self, storage_root, gc):
|
||||
status = gc.get_status()
|
||||
assert status["interval_hours"] == 1.0
|
||||
assert status["dry_run"] is False
|
||||
assert status["temp_file_max_age_hours"] == 1.0
|
||||
assert status["multipart_max_age_days"] == 1
|
||||
assert status["lock_file_max_age_hours"] == 0.5
|
||||
|
||||
|
||||
class TestGCResult:
|
||||
def test_total_bytes_freed(self):
|
||||
r = GCResult(temp_bytes_freed=100, multipart_bytes_freed=200, orphaned_version_bytes_freed=300)
|
||||
assert r.total_bytes_freed == 600
|
||||
|
||||
def test_has_work(self):
|
||||
assert not GCResult().has_work
|
||||
assert GCResult(temp_files_deleted=1).has_work
|
||||
assert GCResult(lock_files_deleted=1).has_work
|
||||
assert GCResult(empty_dirs_removed=1).has_work
|
||||
|
||||
|
||||
class TestAdminAPI:
|
||||
@pytest.fixture
|
||||
def gc_app(self, tmp_path):
|
||||
from app import create_api_app
|
||||
storage_root = tmp_path / "data"
|
||||
iam_config = tmp_path / "iam.json"
|
||||
bucket_policies = tmp_path / "bucket_policies.json"
|
||||
iam_payload = {
|
||||
"users": [
|
||||
{
|
||||
"access_key": "admin",
|
||||
"secret_key": "adminsecret",
|
||||
"display_name": "Admin",
|
||||
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy", "iam:*"]}],
|
||||
}
|
||||
]
|
||||
}
|
||||
iam_config.write_text(json.dumps(iam_payload))
|
||||
flask_app = create_api_app({
|
||||
"TESTING": True,
|
||||
"SECRET_KEY": "testing",
|
||||
"STORAGE_ROOT": storage_root,
|
||||
"IAM_CONFIG": iam_config,
|
||||
"BUCKET_POLICY_PATH": bucket_policies,
|
||||
"GC_ENABLED": True,
|
||||
"GC_INTERVAL_HOURS": 1.0,
|
||||
})
|
||||
yield flask_app
|
||||
gc = flask_app.extensions.get("gc")
|
||||
if gc:
|
||||
gc.stop()
|
||||
|
||||
def test_gc_status(self, gc_app):
|
||||
client = gc_app.test_client()
|
||||
resp = client.get("/admin/gc/status", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"})
|
||||
assert resp.status_code == 200
|
||||
data = resp.get_json()
|
||||
assert data["enabled"] is True
|
||||
|
||||
def test_gc_run(self, gc_app):
|
||||
client = gc_app.test_client()
|
||||
resp = client.post(
|
||||
"/admin/gc/run",
|
||||
headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"},
|
||||
content_type="application/json",
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.get_json()
|
||||
assert data["status"] == "started"
|
||||
|
||||
def test_gc_dry_run(self, gc_app):
|
||||
client = gc_app.test_client()
|
||||
resp = client.post(
|
||||
"/admin/gc/run",
|
||||
headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"},
|
||||
data=json.dumps({"dry_run": True}),
|
||||
content_type="application/json",
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.get_json()
|
||||
assert data["status"] == "started"
|
||||
|
||||
def test_gc_history(self, gc_app):
|
||||
import time
|
||||
client = gc_app.test_client()
|
||||
client.post("/admin/gc/run", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"})
|
||||
for _ in range(50):
|
||||
time.sleep(0.1)
|
||||
status = client.get("/admin/gc/status", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"}).get_json()
|
||||
if not status.get("scanning"):
|
||||
break
|
||||
resp = client.get("/admin/gc/history", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"})
|
||||
assert resp.status_code == 200
|
||||
data = resp.get_json()
|
||||
assert len(data["executions"]) >= 1
|
||||
|
||||
def test_gc_requires_admin(self, gc_app):
|
||||
iam = gc_app.extensions["iam"]
|
||||
user = iam.create_user(display_name="Regular")
|
||||
client = gc_app.test_client()
|
||||
resp = client.get(
|
||||
"/admin/gc/status",
|
||||
headers={"X-Access-Key": user["access_key"], "X-Secret-Key": user["secret_key"]},
|
||||
)
|
||||
assert resp.status_code == 403
|
||||
@@ -1,788 +0,0 @@
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
|
||||
|
||||
from app.integrity import IntegrityChecker, IntegrityCursorStore, IntegrityResult
|
||||
|
||||
|
||||
def _wait_scan_done(client, headers, timeout=10):
|
||||
deadline = time.time() + timeout
|
||||
while time.time() < deadline:
|
||||
resp = client.get("/admin/integrity/status", headers=headers)
|
||||
data = resp.get_json()
|
||||
if not data.get("scanning"):
|
||||
return
|
||||
time.sleep(0.1)
|
||||
raise TimeoutError("scan did not complete")
|
||||
|
||||
|
||||
def _md5(data: bytes) -> str:
|
||||
return hashlib.md5(data).hexdigest()
|
||||
|
||||
|
||||
def _setup_bucket(storage_root: Path, bucket_name: str, objects: dict[str, bytes]) -> None:
|
||||
bucket_path = storage_root / bucket_name
|
||||
bucket_path.mkdir(parents=True, exist_ok=True)
|
||||
meta_root = storage_root / ".myfsio.sys" / "buckets" / bucket_name / "meta"
|
||||
meta_root.mkdir(parents=True, exist_ok=True)
|
||||
bucket_json = storage_root / ".myfsio.sys" / "buckets" / bucket_name / ".bucket.json"
|
||||
bucket_json.write_text(json.dumps({"created": "2025-01-01"}))
|
||||
|
||||
for key, data in objects.items():
|
||||
obj_path = bucket_path / key
|
||||
obj_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
obj_path.write_bytes(data)
|
||||
|
||||
etag = _md5(data)
|
||||
stat = obj_path.stat()
|
||||
meta = {
|
||||
"__etag__": etag,
|
||||
"__size__": str(stat.st_size),
|
||||
"__last_modified__": str(stat.st_mtime),
|
||||
}
|
||||
|
||||
key_path = Path(key)
|
||||
parent = key_path.parent
|
||||
key_name = key_path.name
|
||||
if parent == Path("."):
|
||||
index_path = meta_root / "_index.json"
|
||||
else:
|
||||
index_path = meta_root / parent / "_index.json"
|
||||
index_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
index_data = {}
|
||||
if index_path.exists():
|
||||
index_data = json.loads(index_path.read_text())
|
||||
index_data[key_name] = {"metadata": meta}
|
||||
index_path.write_text(json.dumps(index_data))
|
||||
|
||||
|
||||
def _issues_of_type(result, issue_type):
|
||||
return [i for i in result.issues if i.issue_type == issue_type]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def storage_root(tmp_path):
|
||||
root = tmp_path / "data"
|
||||
root.mkdir()
|
||||
(root / ".myfsio.sys" / "config").mkdir(parents=True, exist_ok=True)
|
||||
return root
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def checker(storage_root):
|
||||
return IntegrityChecker(
|
||||
storage_root=storage_root,
|
||||
interval_hours=24.0,
|
||||
batch_size=1000,
|
||||
auto_heal=False,
|
||||
dry_run=False,
|
||||
)
|
||||
|
||||
|
||||
class TestCorruptedObjects:
|
||||
def test_detect_corrupted(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello world"})
|
||||
(storage_root / "mybucket" / "file.txt").write_bytes(b"corrupted data")
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.corrupted_objects == 1
|
||||
issues = _issues_of_type(result, "corrupted_object")
|
||||
assert len(issues) == 1
|
||||
assert issues[0].bucket == "mybucket"
|
||||
assert issues[0].key == "file.txt"
|
||||
assert not issues[0].healed
|
||||
|
||||
def test_heal_corrupted(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello world"})
|
||||
(storage_root / "mybucket" / "file.txt").write_bytes(b"corrupted data")
|
||||
|
||||
result = checker.run_now(auto_heal=True)
|
||||
assert result.corrupted_objects == 1
|
||||
assert result.issues_healed == 1
|
||||
issues = _issues_of_type(result, "corrupted_object")
|
||||
assert issues[0].healed
|
||||
|
||||
result2 = checker.run_now()
|
||||
assert result2.corrupted_objects == 0
|
||||
|
||||
def test_valid_objects_pass(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello world"})
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.corrupted_objects == 0
|
||||
assert result.objects_scanned >= 1
|
||||
|
||||
def test_corrupted_nested_key(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"sub/dir/file.txt": b"nested content"})
|
||||
(storage_root / "mybucket" / "sub" / "dir" / "file.txt").write_bytes(b"bad")
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.corrupted_objects == 1
|
||||
issues = _issues_of_type(result, "corrupted_object")
|
||||
assert issues[0].key == "sub/dir/file.txt"
|
||||
|
||||
|
||||
class TestOrphanedObjects:
|
||||
def test_detect_orphaned(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {})
|
||||
(storage_root / "mybucket" / "orphan.txt").write_bytes(b"orphan data")
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.orphaned_objects == 1
|
||||
issues = _issues_of_type(result, "orphaned_object")
|
||||
assert len(issues) == 1
|
||||
|
||||
def test_heal_orphaned(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {})
|
||||
(storage_root / "mybucket" / "orphan.txt").write_bytes(b"orphan data")
|
||||
|
||||
result = checker.run_now(auto_heal=True)
|
||||
assert result.orphaned_objects == 1
|
||||
assert result.issues_healed == 1
|
||||
issues = _issues_of_type(result, "orphaned_object")
|
||||
assert issues[0].healed
|
||||
|
||||
result2 = checker.run_now()
|
||||
assert result2.orphaned_objects == 0
|
||||
assert result2.objects_scanned >= 1
|
||||
|
||||
|
||||
class TestPhantomMetadata:
|
||||
def test_detect_phantom(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
(storage_root / "mybucket" / "file.txt").unlink()
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.phantom_metadata == 1
|
||||
issues = _issues_of_type(result, "phantom_metadata")
|
||||
assert len(issues) == 1
|
||||
|
||||
def test_heal_phantom(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
(storage_root / "mybucket" / "file.txt").unlink()
|
||||
|
||||
result = checker.run_now(auto_heal=True)
|
||||
assert result.phantom_metadata == 1
|
||||
assert result.issues_healed == 1
|
||||
|
||||
result2 = checker.run_now()
|
||||
assert result2.phantom_metadata == 0
|
||||
|
||||
|
||||
class TestStaleVersions:
|
||||
def test_manifest_without_data(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
versions_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "versions" / "file.txt"
|
||||
versions_root.mkdir(parents=True)
|
||||
(versions_root / "v1.json").write_text(json.dumps({"etag": "abc"}))
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.stale_versions == 1
|
||||
issues = _issues_of_type(result, "stale_version")
|
||||
assert "manifest without data" in issues[0].detail
|
||||
|
||||
def test_data_without_manifest(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
versions_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "versions" / "file.txt"
|
||||
versions_root.mkdir(parents=True)
|
||||
(versions_root / "v1.bin").write_bytes(b"old data")
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.stale_versions == 1
|
||||
issues = _issues_of_type(result, "stale_version")
|
||||
assert "data without manifest" in issues[0].detail
|
||||
|
||||
def test_heal_stale_versions(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
versions_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "versions" / "file.txt"
|
||||
versions_root.mkdir(parents=True)
|
||||
(versions_root / "v1.json").write_text(json.dumps({"etag": "abc"}))
|
||||
(versions_root / "v2.bin").write_bytes(b"old data")
|
||||
|
||||
result = checker.run_now(auto_heal=True)
|
||||
assert result.stale_versions == 2
|
||||
assert result.issues_healed == 2
|
||||
assert not (versions_root / "v1.json").exists()
|
||||
assert not (versions_root / "v2.bin").exists()
|
||||
|
||||
def test_valid_versions_pass(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
versions_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "versions" / "file.txt"
|
||||
versions_root.mkdir(parents=True)
|
||||
(versions_root / "v1.json").write_text(json.dumps({"etag": "abc"}))
|
||||
(versions_root / "v1.bin").write_bytes(b"old data")
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.stale_versions == 0
|
||||
|
||||
|
||||
class TestEtagCache:
|
||||
def test_detect_mismatch(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
etag_path = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "etag_index.json"
|
||||
etag_path.write_text(json.dumps({"file.txt": "wrong_etag"}))
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.etag_cache_inconsistencies == 1
|
||||
issues = _issues_of_type(result, "etag_cache_inconsistency")
|
||||
assert len(issues) == 1
|
||||
|
||||
def test_heal_mismatch(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
etag_path = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "etag_index.json"
|
||||
etag_path.write_text(json.dumps({"file.txt": "wrong_etag"}))
|
||||
|
||||
result = checker.run_now(auto_heal=True)
|
||||
assert result.etag_cache_inconsistencies == 1
|
||||
assert result.issues_healed == 1
|
||||
assert not etag_path.exists()
|
||||
|
||||
|
||||
class TestLegacyMetadata:
|
||||
def test_detect_unmigrated(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
|
||||
legacy_meta = storage_root / "mybucket" / ".meta" / "file.txt.meta.json"
|
||||
legacy_meta.parent.mkdir(parents=True)
|
||||
legacy_meta.write_text(json.dumps({"__etag__": "different_value"}))
|
||||
|
||||
meta_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "meta"
|
||||
index_path = meta_root / "_index.json"
|
||||
index_path.unlink()
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.legacy_metadata_drifts == 1
|
||||
issues = _issues_of_type(result, "legacy_metadata_drift")
|
||||
assert len(issues) == 1
|
||||
assert issues[0].detail == "unmigrated legacy .meta.json"
|
||||
|
||||
def test_detect_drift(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
|
||||
legacy_meta = storage_root / "mybucket" / ".meta" / "file.txt.meta.json"
|
||||
legacy_meta.parent.mkdir(parents=True)
|
||||
legacy_meta.write_text(json.dumps({"__etag__": "different_value"}))
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.legacy_metadata_drifts == 1
|
||||
issues = _issues_of_type(result, "legacy_metadata_drift")
|
||||
assert "differs from index" in issues[0].detail
|
||||
|
||||
def test_heal_unmigrated(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
|
||||
legacy_meta = storage_root / "mybucket" / ".meta" / "file.txt.meta.json"
|
||||
legacy_meta.parent.mkdir(parents=True)
|
||||
legacy_data = {"__etag__": _md5(b"hello"), "__size__": "5"}
|
||||
legacy_meta.write_text(json.dumps(legacy_data))
|
||||
|
||||
meta_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "meta"
|
||||
index_path = meta_root / "_index.json"
|
||||
index_path.unlink()
|
||||
|
||||
result = checker.run_now(auto_heal=True)
|
||||
assert result.legacy_metadata_drifts == 1
|
||||
legacy_issues = _issues_of_type(result, "legacy_metadata_drift")
|
||||
assert len(legacy_issues) == 1
|
||||
assert legacy_issues[0].healed
|
||||
assert not legacy_meta.exists()
|
||||
|
||||
index_data = json.loads(index_path.read_text())
|
||||
assert "file.txt" in index_data
|
||||
assert index_data["file.txt"]["metadata"]["__etag__"] == _md5(b"hello")
|
||||
|
||||
def test_heal_drift(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
|
||||
legacy_meta = storage_root / "mybucket" / ".meta" / "file.txt.meta.json"
|
||||
legacy_meta.parent.mkdir(parents=True)
|
||||
legacy_meta.write_text(json.dumps({"__etag__": "different_value"}))
|
||||
|
||||
result = checker.run_now(auto_heal=True)
|
||||
assert result.legacy_metadata_drifts == 1
|
||||
legacy_issues = _issues_of_type(result, "legacy_metadata_drift")
|
||||
assert legacy_issues[0].healed
|
||||
assert not legacy_meta.exists()
|
||||
|
||||
|
||||
class TestDryRun:
|
||||
def test_dry_run_no_changes(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
(storage_root / "mybucket" / "file.txt").write_bytes(b"corrupted")
|
||||
(storage_root / "mybucket" / "orphan.txt").write_bytes(b"orphan")
|
||||
|
||||
result = checker.run_now(auto_heal=True, dry_run=True)
|
||||
assert result.corrupted_objects == 1
|
||||
assert result.orphaned_objects == 1
|
||||
assert result.issues_healed == 0
|
||||
|
||||
meta_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "meta"
|
||||
index_data = json.loads((meta_root / "_index.json").read_text())
|
||||
assert "orphan.txt" not in index_data
|
||||
|
||||
|
||||
class TestBatchSize:
|
||||
def test_batch_limits_scan(self, storage_root):
|
||||
objects = {f"file{i}.txt": f"data{i}".encode() for i in range(10)}
|
||||
_setup_bucket(storage_root, "mybucket", objects)
|
||||
|
||||
checker = IntegrityChecker(
|
||||
storage_root=storage_root,
|
||||
batch_size=3,
|
||||
)
|
||||
result = checker.run_now()
|
||||
assert result.objects_scanned <= 3
|
||||
|
||||
|
||||
class TestHistory:
|
||||
def test_history_recorded(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
|
||||
checker.run_now()
|
||||
history = checker.get_history()
|
||||
assert len(history) == 1
|
||||
assert "corrupted_objects" in history[0]["result"]
|
||||
|
||||
def test_history_multiple(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
|
||||
checker.run_now()
|
||||
checker.run_now()
|
||||
checker.run_now()
|
||||
history = checker.get_history()
|
||||
assert len(history) == 3
|
||||
|
||||
def test_history_pagination(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
|
||||
for _ in range(5):
|
||||
checker.run_now()
|
||||
|
||||
history = checker.get_history(limit=2, offset=1)
|
||||
assert len(history) == 2
|
||||
|
||||
|
||||
AUTH_HEADERS = {"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"}
|
||||
|
||||
|
||||
class TestAdminAPI:
|
||||
@pytest.fixture
|
||||
def integrity_app(self, tmp_path):
|
||||
from app import create_api_app
|
||||
storage_root = tmp_path / "data"
|
||||
iam_config = tmp_path / "iam.json"
|
||||
bucket_policies = tmp_path / "bucket_policies.json"
|
||||
iam_payload = {
|
||||
"users": [
|
||||
{
|
||||
"access_key": "admin",
|
||||
"secret_key": "adminsecret",
|
||||
"display_name": "Admin",
|
||||
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy", "iam:*"]}],
|
||||
}
|
||||
]
|
||||
}
|
||||
iam_config.write_text(json.dumps(iam_payload))
|
||||
flask_app = create_api_app({
|
||||
"TESTING": True,
|
||||
"SECRET_KEY": "testing",
|
||||
"STORAGE_ROOT": storage_root,
|
||||
"IAM_CONFIG": iam_config,
|
||||
"BUCKET_POLICY_PATH": bucket_policies,
|
||||
"API_BASE_URL": "http://testserver",
|
||||
"INTEGRITY_ENABLED": True,
|
||||
"INTEGRITY_AUTO_HEAL": False,
|
||||
"INTEGRITY_DRY_RUN": False,
|
||||
})
|
||||
yield flask_app
|
||||
storage = flask_app.extensions.get("object_storage")
|
||||
if storage:
|
||||
base = getattr(storage, "storage", storage)
|
||||
if hasattr(base, "shutdown_stats"):
|
||||
base.shutdown_stats()
|
||||
ic = flask_app.extensions.get("integrity")
|
||||
if ic:
|
||||
ic.stop()
|
||||
|
||||
def test_status_endpoint(self, integrity_app):
|
||||
client = integrity_app.test_client()
|
||||
resp = client.get("/admin/integrity/status", headers=AUTH_HEADERS)
|
||||
assert resp.status_code == 200
|
||||
data = resp.get_json()
|
||||
assert data["enabled"] is True
|
||||
assert "interval_hours" in data
|
||||
|
||||
def test_run_endpoint(self, integrity_app):
|
||||
client = integrity_app.test_client()
|
||||
resp = client.post("/admin/integrity/run", headers=AUTH_HEADERS, json={})
|
||||
assert resp.status_code == 200
|
||||
data = resp.get_json()
|
||||
assert data["status"] == "started"
|
||||
_wait_scan_done(client, AUTH_HEADERS)
|
||||
resp = client.get("/admin/integrity/history?limit=1", headers=AUTH_HEADERS)
|
||||
hist = resp.get_json()
|
||||
assert len(hist["executions"]) >= 1
|
||||
assert "corrupted_objects" in hist["executions"][0]["result"]
|
||||
assert "objects_scanned" in hist["executions"][0]["result"]
|
||||
|
||||
def test_run_with_overrides(self, integrity_app):
|
||||
client = integrity_app.test_client()
|
||||
resp = client.post(
|
||||
"/admin/integrity/run",
|
||||
headers=AUTH_HEADERS,
|
||||
json={"dry_run": True, "auto_heal": True},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
_wait_scan_done(client, AUTH_HEADERS)
|
||||
|
||||
def test_history_endpoint(self, integrity_app):
|
||||
client = integrity_app.test_client()
|
||||
client.post("/admin/integrity/run", headers=AUTH_HEADERS, json={})
|
||||
_wait_scan_done(client, AUTH_HEADERS)
|
||||
resp = client.get("/admin/integrity/history", headers=AUTH_HEADERS)
|
||||
assert resp.status_code == 200
|
||||
data = resp.get_json()
|
||||
assert "executions" in data
|
||||
assert len(data["executions"]) >= 1
|
||||
|
||||
def test_auth_required(self, integrity_app):
|
||||
client = integrity_app.test_client()
|
||||
resp = client.get("/admin/integrity/status")
|
||||
assert resp.status_code in (401, 403)
|
||||
|
||||
def test_disabled_status(self, tmp_path):
|
||||
from app import create_api_app
|
||||
storage_root = tmp_path / "data2"
|
||||
iam_config = tmp_path / "iam2.json"
|
||||
bucket_policies = tmp_path / "bp2.json"
|
||||
iam_payload = {
|
||||
"users": [
|
||||
{
|
||||
"access_key": "admin",
|
||||
"secret_key": "adminsecret",
|
||||
"display_name": "Admin",
|
||||
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy", "iam:*"]}],
|
||||
}
|
||||
]
|
||||
}
|
||||
iam_config.write_text(json.dumps(iam_payload))
|
||||
flask_app = create_api_app({
|
||||
"TESTING": True,
|
||||
"SECRET_KEY": "testing",
|
||||
"STORAGE_ROOT": storage_root,
|
||||
"IAM_CONFIG": iam_config,
|
||||
"BUCKET_POLICY_PATH": bucket_policies,
|
||||
"API_BASE_URL": "http://testserver",
|
||||
"INTEGRITY_ENABLED": False,
|
||||
})
|
||||
c = flask_app.test_client()
|
||||
resp = c.get("/admin/integrity/status", headers=AUTH_HEADERS)
|
||||
assert resp.status_code == 200
|
||||
data = resp.get_json()
|
||||
assert data["enabled"] is False
|
||||
|
||||
storage = flask_app.extensions.get("object_storage")
|
||||
if storage:
|
||||
base = getattr(storage, "storage", storage)
|
||||
if hasattr(base, "shutdown_stats"):
|
||||
base.shutdown_stats()
|
||||
|
||||
|
||||
class TestMultipleBuckets:
|
||||
def test_scans_multiple_buckets(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "bucket1", {"a.txt": b"aaa"})
|
||||
_setup_bucket(storage_root, "bucket2", {"b.txt": b"bbb"})
|
||||
|
||||
result = checker.run_now()
|
||||
assert result.buckets_scanned == 2
|
||||
assert result.objects_scanned >= 2
|
||||
assert result.corrupted_objects == 0
|
||||
|
||||
|
||||
class TestGetStatus:
|
||||
def test_status_fields(self, checker):
|
||||
status = checker.get_status()
|
||||
assert "enabled" in status
|
||||
assert "running" in status
|
||||
assert "interval_hours" in status
|
||||
assert "batch_size" in status
|
||||
assert "auto_heal" in status
|
||||
assert "dry_run" in status
|
||||
|
||||
def test_status_includes_cursor(self, storage_root, checker):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
checker.run_now()
|
||||
status = checker.get_status()
|
||||
assert "cursor" in status
|
||||
assert status["cursor"]["tracked_buckets"] == 1
|
||||
assert "mybucket" in status["cursor"]["buckets"]
|
||||
|
||||
|
||||
class TestUnifiedBatchCounter:
|
||||
def test_orphaned_objects_count_toward_batch(self, storage_root):
|
||||
_setup_bucket(storage_root, "mybucket", {})
|
||||
for i in range(10):
|
||||
(storage_root / "mybucket" / f"orphan{i}.txt").write_bytes(f"data{i}".encode())
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=3)
|
||||
result = checker.run_now()
|
||||
assert result.objects_scanned <= 3
|
||||
|
||||
def test_phantom_metadata_counts_toward_batch(self, storage_root):
|
||||
objects = {f"file{i}.txt": f"data{i}".encode() for i in range(10)}
|
||||
_setup_bucket(storage_root, "mybucket", objects)
|
||||
for i in range(10):
|
||||
(storage_root / "mybucket" / f"file{i}.txt").unlink()
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=5)
|
||||
result = checker.run_now()
|
||||
assert result.objects_scanned <= 5
|
||||
|
||||
def test_all_check_types_contribute(self, storage_root):
|
||||
_setup_bucket(storage_root, "mybucket", {"valid.txt": b"hello"})
|
||||
(storage_root / "mybucket" / "orphan.txt").write_bytes(b"orphan")
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
|
||||
result = checker.run_now()
|
||||
assert result.objects_scanned > 2
|
||||
|
||||
|
||||
class TestCursorRotation:
|
||||
def test_oldest_bucket_scanned_first(self, storage_root):
|
||||
_setup_bucket(storage_root, "bucket-a", {"a.txt": b"aaa"})
|
||||
_setup_bucket(storage_root, "bucket-b", {"b.txt": b"bbb"})
|
||||
_setup_bucket(storage_root, "bucket-c", {"c.txt": b"ccc"})
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=5)
|
||||
|
||||
checker.cursor_store.update_bucket("bucket-a", 1000.0)
|
||||
checker.cursor_store.update_bucket("bucket-b", 3000.0)
|
||||
checker.cursor_store.update_bucket("bucket-c", 2000.0)
|
||||
|
||||
ordered = checker.cursor_store.get_bucket_order(["bucket-a", "bucket-b", "bucket-c"])
|
||||
assert ordered[0] == "bucket-a"
|
||||
assert ordered[1] == "bucket-c"
|
||||
assert ordered[2] == "bucket-b"
|
||||
|
||||
def test_never_scanned_buckets_first(self, storage_root):
|
||||
_setup_bucket(storage_root, "bucket-old", {"a.txt": b"aaa"})
|
||||
_setup_bucket(storage_root, "bucket-new", {"b.txt": b"bbb"})
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
|
||||
|
||||
checker.cursor_store.update_bucket("bucket-old", time.time())
|
||||
|
||||
ordered = checker.cursor_store.get_bucket_order(["bucket-old", "bucket-new"])
|
||||
assert ordered[0] == "bucket-new"
|
||||
|
||||
def test_rotation_covers_all_buckets(self, storage_root):
|
||||
for name in ["bucket-a", "bucket-b", "bucket-c"]:
|
||||
_setup_bucket(storage_root, name, {f"{name}.txt": name.encode()})
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=4)
|
||||
|
||||
result1 = checker.run_now()
|
||||
scanned_buckets_1 = set()
|
||||
for issue_bucket in [storage_root]:
|
||||
pass
|
||||
assert result1.buckets_scanned >= 1
|
||||
|
||||
result2 = checker.run_now()
|
||||
result3 = checker.run_now()
|
||||
|
||||
cursor_info = checker.cursor_store.get_info()
|
||||
assert cursor_info["tracked_buckets"] == 3
|
||||
|
||||
def test_cursor_persistence(self, storage_root):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
|
||||
checker1 = IntegrityChecker(storage_root=storage_root, batch_size=1000)
|
||||
checker1.run_now()
|
||||
|
||||
cursor1 = checker1.cursor_store.get_info()
|
||||
assert cursor1["tracked_buckets"] == 1
|
||||
assert "mybucket" in cursor1["buckets"]
|
||||
|
||||
checker2 = IntegrityChecker(storage_root=storage_root, batch_size=1000)
|
||||
cursor2 = checker2.cursor_store.get_info()
|
||||
assert cursor2["tracked_buckets"] == 1
|
||||
assert "mybucket" in cursor2["buckets"]
|
||||
|
||||
def test_stale_cursor_cleanup(self, storage_root):
|
||||
_setup_bucket(storage_root, "bucket-a", {"a.txt": b"aaa"})
|
||||
_setup_bucket(storage_root, "bucket-b", {"b.txt": b"bbb"})
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
|
||||
checker.run_now()
|
||||
|
||||
import shutil
|
||||
shutil.rmtree(storage_root / "bucket-b")
|
||||
meta_b = storage_root / ".myfsio.sys" / "buckets" / "bucket-b"
|
||||
if meta_b.exists():
|
||||
shutil.rmtree(meta_b)
|
||||
|
||||
checker.run_now()
|
||||
|
||||
cursor_info = checker.cursor_store.get_info()
|
||||
assert "bucket-b" not in cursor_info["buckets"]
|
||||
assert "bucket-a" in cursor_info["buckets"]
|
||||
|
||||
def test_cursor_updates_after_scan(self, storage_root):
|
||||
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
|
||||
before = time.time()
|
||||
checker.run_now()
|
||||
after = time.time()
|
||||
|
||||
cursor_info = checker.cursor_store.get_info()
|
||||
entry = cursor_info["buckets"]["mybucket"]
|
||||
assert before <= entry["last_scanned"] <= after
|
||||
assert entry["completed"] is True
|
||||
|
||||
|
||||
class TestIntraBucketCursor:
|
||||
def test_resumes_from_cursor_key(self, storage_root):
|
||||
objects = {f"file_{chr(ord('a') + i)}.txt": f"data{i}".encode() for i in range(10)}
|
||||
_setup_bucket(storage_root, "mybucket", objects)
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=3)
|
||||
result1 = checker.run_now()
|
||||
assert result1.objects_scanned == 3
|
||||
|
||||
cursor_info = checker.cursor_store.get_info()
|
||||
entry = cursor_info["buckets"]["mybucket"]
|
||||
assert entry["last_key"] is not None
|
||||
assert entry["completed"] is False
|
||||
|
||||
result2 = checker.run_now()
|
||||
assert result2.objects_scanned == 3
|
||||
|
||||
cursor_after = checker.cursor_store.get_info()["buckets"]["mybucket"]
|
||||
assert cursor_after["last_key"] > entry["last_key"]
|
||||
|
||||
def test_cursor_resets_after_full_pass(self, storage_root):
|
||||
objects = {f"file_{i}.txt": f"data{i}".encode() for i in range(3)}
|
||||
_setup_bucket(storage_root, "mybucket", objects)
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=100)
|
||||
checker.run_now()
|
||||
|
||||
cursor_info = checker.cursor_store.get_info()
|
||||
entry = cursor_info["buckets"]["mybucket"]
|
||||
assert entry["last_key"] is None
|
||||
assert entry["completed"] is True
|
||||
|
||||
def test_full_coverage_across_cycles(self, storage_root):
|
||||
objects = {f"obj_{chr(ord('a') + i)}.txt": f"data{i}".encode() for i in range(10)}
|
||||
_setup_bucket(storage_root, "mybucket", objects)
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=4)
|
||||
all_scanned = 0
|
||||
for _ in range(10):
|
||||
result = checker.run_now()
|
||||
all_scanned += result.objects_scanned
|
||||
if checker.cursor_store.get_info()["buckets"]["mybucket"]["completed"]:
|
||||
break
|
||||
|
||||
assert all_scanned >= 10
|
||||
|
||||
def test_deleted_cursor_key_skips_gracefully(self, storage_root):
|
||||
objects = {f"file_{chr(ord('a') + i)}.txt": f"data{i}".encode() for i in range(6)}
|
||||
_setup_bucket(storage_root, "mybucket", objects)
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=3)
|
||||
checker.run_now()
|
||||
|
||||
cursor_info = checker.cursor_store.get_info()
|
||||
cursor_key = cursor_info["buckets"]["mybucket"]["last_key"]
|
||||
assert cursor_key is not None
|
||||
|
||||
obj_path = storage_root / "mybucket" / cursor_key
|
||||
meta_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "meta"
|
||||
key_path = Path(cursor_key)
|
||||
index_path = meta_root / key_path.parent / "_index.json" if key_path.parent != Path(".") else meta_root / "_index.json"
|
||||
if key_path.parent == Path("."):
|
||||
index_path = meta_root / "_index.json"
|
||||
else:
|
||||
index_path = meta_root / key_path.parent / "_index.json"
|
||||
if obj_path.exists():
|
||||
obj_path.unlink()
|
||||
if index_path.exists():
|
||||
index_data = json.loads(index_path.read_text())
|
||||
index_data.pop(key_path.name, None)
|
||||
index_path.write_text(json.dumps(index_data))
|
||||
|
||||
result2 = checker.run_now()
|
||||
assert result2.objects_scanned > 0
|
||||
|
||||
def test_incomplete_buckets_prioritized(self, storage_root):
|
||||
_setup_bucket(storage_root, "bucket-a", {f"a{i}.txt": b"a" for i in range(5)})
|
||||
_setup_bucket(storage_root, "bucket-b", {f"b{i}.txt": b"b" for i in range(5)})
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=3)
|
||||
checker.run_now()
|
||||
|
||||
cursor_info = checker.cursor_store.get_info()
|
||||
incomplete = [
|
||||
name for name, info in cursor_info["buckets"].items()
|
||||
if info.get("last_key") is not None
|
||||
]
|
||||
assert len(incomplete) >= 1
|
||||
|
||||
result2 = checker.run_now()
|
||||
assert result2.objects_scanned > 0
|
||||
|
||||
def test_cursor_skips_nested_directories(self, storage_root):
|
||||
objects = {
|
||||
"aaa/file1.txt": b"a1",
|
||||
"aaa/file2.txt": b"a2",
|
||||
"bbb/file1.txt": b"b1",
|
||||
"bbb/file2.txt": b"b2",
|
||||
"ccc/file1.txt": b"c1",
|
||||
"ccc/file2.txt": b"c2",
|
||||
}
|
||||
_setup_bucket(storage_root, "mybucket", objects)
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=4)
|
||||
result1 = checker.run_now()
|
||||
assert result1.objects_scanned == 4
|
||||
|
||||
cursor_info = checker.cursor_store.get_info()
|
||||
cursor_key = cursor_info["buckets"]["mybucket"]["last_key"]
|
||||
assert cursor_key is not None
|
||||
assert cursor_key.startswith("aaa/") or cursor_key.startswith("bbb/")
|
||||
|
||||
result2 = checker.run_now()
|
||||
assert result2.objects_scanned >= 2
|
||||
|
||||
all_scanned = result1.objects_scanned + result2.objects_scanned
|
||||
for _ in range(10):
|
||||
if checker.cursor_store.get_info()["buckets"]["mybucket"]["completed"]:
|
||||
break
|
||||
r = checker.run_now()
|
||||
all_scanned += r.objects_scanned
|
||||
|
||||
assert all_scanned >= 6
|
||||
|
||||
def test_sorted_walk_order(self, storage_root):
|
||||
objects = {
|
||||
"bar.txt": b"bar",
|
||||
"bar/inner.txt": b"inner",
|
||||
"abc.txt": b"abc",
|
||||
"zzz/deep.txt": b"deep",
|
||||
}
|
||||
_setup_bucket(storage_root, "mybucket", objects)
|
||||
|
||||
checker = IntegrityChecker(storage_root=storage_root, batch_size=100)
|
||||
result = checker.run_now()
|
||||
assert result.objects_scanned >= 4
|
||||
assert result.total_issues == 0
|
||||
@@ -1,350 +0,0 @@
|
||||
import hashlib
|
||||
import io
|
||||
import os
|
||||
import secrets
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
|
||||
|
||||
try:
|
||||
import myfsio_core as _rc
|
||||
HAS_RUST = True
|
||||
except ImportError:
|
||||
_rc = None
|
||||
HAS_RUST = False
|
||||
|
||||
pytestmark = pytest.mark.skipif(not HAS_RUST, reason="myfsio_core not available")
|
||||
|
||||
|
||||
class TestStreamToFileWithMd5:
|
||||
def test_basic_write(self, tmp_path):
|
||||
data = b"hello world" * 1000
|
||||
stream = io.BytesIO(data)
|
||||
tmp_dir = str(tmp_path / "tmp")
|
||||
|
||||
tmp_path_str, md5_hex, size = _rc.stream_to_file_with_md5(stream, tmp_dir)
|
||||
|
||||
assert size == len(data)
|
||||
assert md5_hex == hashlib.md5(data).hexdigest()
|
||||
assert Path(tmp_path_str).exists()
|
||||
assert Path(tmp_path_str).read_bytes() == data
|
||||
|
||||
def test_empty_stream(self, tmp_path):
|
||||
stream = io.BytesIO(b"")
|
||||
tmp_dir = str(tmp_path / "tmp")
|
||||
|
||||
tmp_path_str, md5_hex, size = _rc.stream_to_file_with_md5(stream, tmp_dir)
|
||||
|
||||
assert size == 0
|
||||
assert md5_hex == hashlib.md5(b"").hexdigest()
|
||||
assert Path(tmp_path_str).read_bytes() == b""
|
||||
|
||||
def test_large_data(self, tmp_path):
|
||||
data = os.urandom(1024 * 1024 * 2)
|
||||
stream = io.BytesIO(data)
|
||||
tmp_dir = str(tmp_path / "tmp")
|
||||
|
||||
tmp_path_str, md5_hex, size = _rc.stream_to_file_with_md5(stream, tmp_dir)
|
||||
|
||||
assert size == len(data)
|
||||
assert md5_hex == hashlib.md5(data).hexdigest()
|
||||
|
||||
def test_custom_chunk_size(self, tmp_path):
|
||||
data = b"x" * 10000
|
||||
stream = io.BytesIO(data)
|
||||
tmp_dir = str(tmp_path / "tmp")
|
||||
|
||||
tmp_path_str, md5_hex, size = _rc.stream_to_file_with_md5(
|
||||
stream, tmp_dir, chunk_size=128
|
||||
)
|
||||
|
||||
assert size == len(data)
|
||||
assert md5_hex == hashlib.md5(data).hexdigest()
|
||||
|
||||
|
||||
class TestAssemblePartsWithMd5:
|
||||
def test_basic_assembly(self, tmp_path):
|
||||
parts = []
|
||||
combined = b""
|
||||
for i in range(3):
|
||||
data = f"part{i}data".encode() * 100
|
||||
combined += data
|
||||
p = tmp_path / f"part{i}"
|
||||
p.write_bytes(data)
|
||||
parts.append(str(p))
|
||||
|
||||
dest = str(tmp_path / "output")
|
||||
md5_hex = _rc.assemble_parts_with_md5(parts, dest)
|
||||
|
||||
assert md5_hex == hashlib.md5(combined).hexdigest()
|
||||
assert Path(dest).read_bytes() == combined
|
||||
|
||||
def test_single_part(self, tmp_path):
|
||||
data = b"single part data"
|
||||
p = tmp_path / "part0"
|
||||
p.write_bytes(data)
|
||||
|
||||
dest = str(tmp_path / "output")
|
||||
md5_hex = _rc.assemble_parts_with_md5([str(p)], dest)
|
||||
|
||||
assert md5_hex == hashlib.md5(data).hexdigest()
|
||||
assert Path(dest).read_bytes() == data
|
||||
|
||||
def test_empty_parts_list(self):
|
||||
with pytest.raises(ValueError, match="No parts"):
|
||||
_rc.assemble_parts_with_md5([], "dummy")
|
||||
|
||||
def test_missing_part_file(self, tmp_path):
|
||||
with pytest.raises(OSError):
|
||||
_rc.assemble_parts_with_md5(
|
||||
[str(tmp_path / "nonexistent")], str(tmp_path / "out")
|
||||
)
|
||||
|
||||
def test_large_parts(self, tmp_path):
|
||||
parts = []
|
||||
combined = b""
|
||||
for i in range(5):
|
||||
data = os.urandom(512 * 1024)
|
||||
combined += data
|
||||
p = tmp_path / f"part{i}"
|
||||
p.write_bytes(data)
|
||||
parts.append(str(p))
|
||||
|
||||
dest = str(tmp_path / "output")
|
||||
md5_hex = _rc.assemble_parts_with_md5(parts, dest)
|
||||
|
||||
assert md5_hex == hashlib.md5(combined).hexdigest()
|
||||
assert Path(dest).read_bytes() == combined
|
||||
|
||||
|
||||
class TestEncryptDecryptStreamChunked:
|
||||
def _python_derive_chunk_nonce(self, base_nonce, chunk_index):
|
||||
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
hkdf = HKDF(
|
||||
algorithm=hashes.SHA256(),
|
||||
length=12,
|
||||
salt=base_nonce,
|
||||
info=chunk_index.to_bytes(4, "big"),
|
||||
)
|
||||
return hkdf.derive(b"chunk_nonce")
|
||||
|
||||
def test_encrypt_decrypt_roundtrip(self, tmp_path):
|
||||
data = b"Hello, encryption!" * 500
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
input_path = str(tmp_path / "plaintext")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
chunk_count = _rc.encrypt_stream_chunked(
|
||||
input_path, encrypted_path, key, base_nonce
|
||||
)
|
||||
assert chunk_count > 0
|
||||
|
||||
chunk_count_dec = _rc.decrypt_stream_chunked(
|
||||
encrypted_path, decrypted_path, key, base_nonce
|
||||
)
|
||||
assert chunk_count_dec == chunk_count
|
||||
assert Path(decrypted_path).read_bytes() == data
|
||||
|
||||
def test_empty_file(self, tmp_path):
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
input_path = str(tmp_path / "empty")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(b"")
|
||||
|
||||
chunk_count = _rc.encrypt_stream_chunked(
|
||||
input_path, encrypted_path, key, base_nonce
|
||||
)
|
||||
assert chunk_count == 0
|
||||
|
||||
chunk_count_dec = _rc.decrypt_stream_chunked(
|
||||
encrypted_path, decrypted_path, key, base_nonce
|
||||
)
|
||||
assert chunk_count_dec == 0
|
||||
assert Path(decrypted_path).read_bytes() == b""
|
||||
|
||||
def test_custom_chunk_size(self, tmp_path):
|
||||
data = os.urandom(10000)
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
input_path = str(tmp_path / "plaintext")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
chunk_count = _rc.encrypt_stream_chunked(
|
||||
input_path, encrypted_path, key, base_nonce, chunk_size=1024
|
||||
)
|
||||
assert chunk_count == 10
|
||||
|
||||
_rc.decrypt_stream_chunked(encrypted_path, decrypted_path, key, base_nonce)
|
||||
assert Path(decrypted_path).read_bytes() == data
|
||||
|
||||
def test_invalid_key_length(self, tmp_path):
|
||||
input_path = str(tmp_path / "in")
|
||||
Path(input_path).write_bytes(b"data")
|
||||
|
||||
with pytest.raises(ValueError, match="32 bytes"):
|
||||
_rc.encrypt_stream_chunked(
|
||||
input_path, str(tmp_path / "out"), b"short", secrets.token_bytes(12)
|
||||
)
|
||||
|
||||
def test_invalid_nonce_length(self, tmp_path):
|
||||
input_path = str(tmp_path / "in")
|
||||
Path(input_path).write_bytes(b"data")
|
||||
|
||||
with pytest.raises(ValueError, match="12 bytes"):
|
||||
_rc.encrypt_stream_chunked(
|
||||
input_path, str(tmp_path / "out"), secrets.token_bytes(32), b"short"
|
||||
)
|
||||
|
||||
def test_wrong_key_fails_decrypt(self, tmp_path):
|
||||
data = b"sensitive data"
|
||||
key = secrets.token_bytes(32)
|
||||
wrong_key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
input_path = str(tmp_path / "plaintext")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(data)
|
||||
_rc.encrypt_stream_chunked(input_path, encrypted_path, key, base_nonce)
|
||||
|
||||
with pytest.raises((ValueError, OSError)):
|
||||
_rc.decrypt_stream_chunked(
|
||||
encrypted_path, decrypted_path, wrong_key, base_nonce
|
||||
)
|
||||
|
||||
def test_cross_compat_python_encrypt_rust_decrypt(self, tmp_path):
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
data = b"cross compat test data" * 100
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
chunk_size = 1024
|
||||
|
||||
encrypted_path = str(tmp_path / "py_encrypted")
|
||||
with open(encrypted_path, "wb") as f:
|
||||
f.write(b"\x00\x00\x00\x00")
|
||||
aesgcm = AESGCM(key)
|
||||
chunk_index = 0
|
||||
offset = 0
|
||||
while offset < len(data):
|
||||
chunk = data[offset:offset + chunk_size]
|
||||
nonce = self._python_derive_chunk_nonce(base_nonce, chunk_index)
|
||||
enc = aesgcm.encrypt(nonce, chunk, None)
|
||||
f.write(len(enc).to_bytes(4, "big"))
|
||||
f.write(enc)
|
||||
chunk_index += 1
|
||||
offset += chunk_size
|
||||
f.seek(0)
|
||||
f.write(chunk_index.to_bytes(4, "big"))
|
||||
|
||||
decrypted_path = str(tmp_path / "rust_decrypted")
|
||||
_rc.decrypt_stream_chunked(encrypted_path, decrypted_path, key, base_nonce)
|
||||
assert Path(decrypted_path).read_bytes() == data
|
||||
|
||||
def test_cross_compat_rust_encrypt_python_decrypt(self, tmp_path):
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
data = b"cross compat reverse test" * 100
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
chunk_size = 1024
|
||||
|
||||
input_path = str(tmp_path / "plaintext")
|
||||
encrypted_path = str(tmp_path / "rust_encrypted")
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
chunk_count = _rc.encrypt_stream_chunked(
|
||||
input_path, encrypted_path, key, base_nonce, chunk_size=chunk_size
|
||||
)
|
||||
|
||||
aesgcm = AESGCM(key)
|
||||
with open(encrypted_path, "rb") as f:
|
||||
count_bytes = f.read(4)
|
||||
assert int.from_bytes(count_bytes, "big") == chunk_count
|
||||
|
||||
decrypted = b""
|
||||
for i in range(chunk_count):
|
||||
size = int.from_bytes(f.read(4), "big")
|
||||
enc_chunk = f.read(size)
|
||||
nonce = self._python_derive_chunk_nonce(base_nonce, i)
|
||||
decrypted += aesgcm.decrypt(nonce, enc_chunk, None)
|
||||
|
||||
assert decrypted == data
|
||||
|
||||
def test_large_file_roundtrip(self, tmp_path):
|
||||
data = os.urandom(1024 * 1024)
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
input_path = str(tmp_path / "large")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
_rc.encrypt_stream_chunked(input_path, encrypted_path, key, base_nonce)
|
||||
_rc.decrypt_stream_chunked(encrypted_path, decrypted_path, key, base_nonce)
|
||||
|
||||
assert Path(decrypted_path).read_bytes() == data
|
||||
|
||||
|
||||
class TestStreamingEncryptorFileMethods:
|
||||
def test_encrypt_file_decrypt_file_roundtrip(self, tmp_path):
|
||||
from app.encryption import LocalKeyEncryption, StreamingEncryptor
|
||||
|
||||
master_key_path = tmp_path / "master.key"
|
||||
provider = LocalKeyEncryption(master_key_path)
|
||||
encryptor = StreamingEncryptor(provider, chunk_size=512)
|
||||
|
||||
data = b"file method test data" * 200
|
||||
input_path = str(tmp_path / "input")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
metadata = encryptor.encrypt_file(input_path, encrypted_path)
|
||||
assert metadata.algorithm == "AES256"
|
||||
|
||||
encryptor.decrypt_file(encrypted_path, decrypted_path, metadata)
|
||||
assert Path(decrypted_path).read_bytes() == data
|
||||
|
||||
def test_encrypt_file_matches_encrypt_stream(self, tmp_path):
|
||||
from app.encryption import LocalKeyEncryption, StreamingEncryptor
|
||||
|
||||
master_key_path = tmp_path / "master.key"
|
||||
provider = LocalKeyEncryption(master_key_path)
|
||||
encryptor = StreamingEncryptor(provider, chunk_size=512)
|
||||
|
||||
data = b"stream vs file comparison" * 100
|
||||
input_path = str(tmp_path / "input")
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
file_encrypted_path = str(tmp_path / "file_enc")
|
||||
metadata_file = encryptor.encrypt_file(input_path, file_encrypted_path)
|
||||
|
||||
file_decrypted_path = str(tmp_path / "file_dec")
|
||||
encryptor.decrypt_file(file_encrypted_path, file_decrypted_path, metadata_file)
|
||||
assert Path(file_decrypted_path).read_bytes() == data
|
||||
|
||||
stream_enc, metadata_stream = encryptor.encrypt_stream(io.BytesIO(data))
|
||||
stream_dec = encryptor.decrypt_stream(stream_enc, metadata_stream)
|
||||
assert stream_dec.read() == data
|
||||
@@ -1,4 +1,4 @@
|
||||
Flask>=3.1.3
|
||||
Flask>=3.1.2
|
||||
Flask-Limiter>=4.1.1
|
||||
Flask-Cors>=6.0.2
|
||||
Flask-WTF>=1.2.2
|
||||
@@ -6,8 +6,8 @@ python-dotenv>=1.2.1
|
||||
pytest>=9.0.2
|
||||
requests>=2.32.5
|
||||
boto3>=1.42.14
|
||||
granian>=2.7.2
|
||||
psutil>=7.2.2
|
||||
cryptography>=46.0.5
|
||||
waitress>=3.0.2
|
||||
psutil>=7.1.3
|
||||
cryptography>=46.0.3
|
||||
defusedxml>=0.7.1
|
||||
duckdb>=1.5.1
|
||||
duckdb>=1.4.4
|
||||
162
run.py
Normal file
162
run.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""Helper script to run the API server, UI server, or both."""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
import multiprocessing
|
||||
from multiprocessing import Process
|
||||
from pathlib import Path
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
for _env_file in [
|
||||
Path("/opt/myfsio/myfsio.env"),
|
||||
Path.cwd() / ".env",
|
||||
Path.cwd() / "myfsio.env",
|
||||
]:
|
||||
if _env_file.exists():
|
||||
load_dotenv(_env_file, override=True)
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from app import create_api_app, create_ui_app
|
||||
from app.config import AppConfig
|
||||
|
||||
|
||||
def _server_host() -> str:
|
||||
"""Return the bind host for API and UI servers."""
|
||||
return os.getenv("APP_HOST", "0.0.0.0")
|
||||
|
||||
|
||||
def _is_debug_enabled() -> bool:
|
||||
return os.getenv("FLASK_DEBUG", "0").lower() in ("1", "true", "yes")
|
||||
|
||||
|
||||
def _is_frozen() -> bool:
|
||||
"""Check if running as a compiled binary (PyInstaller/Nuitka)."""
|
||||
return getattr(sys, 'frozen', False) or '__compiled__' in globals()
|
||||
|
||||
|
||||
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
app = create_api_app()
|
||||
if prod:
|
||||
from waitress import serve
|
||||
if config:
|
||||
serve(
|
||||
app,
|
||||
host=_server_host(),
|
||||
port=port,
|
||||
ident="MyFSIO",
|
||||
threads=config.server_threads,
|
||||
connection_limit=config.server_connection_limit,
|
||||
backlog=config.server_backlog,
|
||||
channel_timeout=config.server_channel_timeout,
|
||||
)
|
||||
else:
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
else:
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
|
||||
app.run(host=_server_host(), port=port, debug=debug)
|
||||
|
||||
|
||||
def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
app = create_ui_app()
|
||||
if prod:
|
||||
from waitress import serve
|
||||
if config:
|
||||
serve(
|
||||
app,
|
||||
host=_server_host(),
|
||||
port=port,
|
||||
ident="MyFSIO",
|
||||
threads=config.server_threads,
|
||||
connection_limit=config.server_connection_limit,
|
||||
backlog=config.server_backlog,
|
||||
channel_timeout=config.server_channel_timeout,
|
||||
)
|
||||
else:
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
else:
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
|
||||
app.run(host=_server_host(), port=port, debug=debug)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
multiprocessing.freeze_support()
|
||||
if _is_frozen():
|
||||
multiprocessing.set_start_method("spawn", force=True)
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run the S3 clone services.")
|
||||
parser.add_argument("--mode", choices=["api", "ui", "both"], default="both")
|
||||
parser.add_argument("--api-port", type=int, default=5000)
|
||||
parser.add_argument("--ui-port", type=int, default=5100)
|
||||
parser.add_argument("--prod", action="store_true", help="Run in production mode using Waitress")
|
||||
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
|
||||
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
|
||||
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.check_config or args.show_config:
|
||||
config = AppConfig.from_env()
|
||||
config.print_startup_summary()
|
||||
if args.check_config:
|
||||
issues = config.validate_and_report()
|
||||
critical = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
sys.exit(1 if critical else 0)
|
||||
sys.exit(0)
|
||||
|
||||
prod_mode = args.prod or (_is_frozen() and not args.dev)
|
||||
|
||||
config = AppConfig.from_env()
|
||||
|
||||
first_run_marker = config.storage_root / ".myfsio.sys" / ".initialized"
|
||||
is_first_run = not first_run_marker.exists()
|
||||
|
||||
if is_first_run:
|
||||
config.print_startup_summary()
|
||||
|
||||
issues = config.validate_and_report()
|
||||
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
if critical_issues:
|
||||
print("ABORTING: Critical configuration issues detected. Please fix them before starting.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
first_run_marker.parent.mkdir(parents=True, exist_ok=True)
|
||||
first_run_marker.write_text(f"Initialized on {__import__('datetime').datetime.now().isoformat()}\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if prod_mode:
|
||||
print("Running in production mode (Waitress)")
|
||||
issues = config.validate_and_report()
|
||||
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
if critical_issues:
|
||||
for issue in critical_issues:
|
||||
print(f" {issue}")
|
||||
print("ABORTING: Critical configuration issues detected. Please fix them before starting.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Running in development mode (Flask dev server)")
|
||||
|
||||
if args.mode in {"api", "both"}:
|
||||
print(f"Starting API server on port {args.api_port}...")
|
||||
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config), daemon=True)
|
||||
api_proc.start()
|
||||
else:
|
||||
api_proc = None
|
||||
|
||||
if args.mode in {"ui", "both"}:
|
||||
print(f"Starting UI server on port {args.ui_port}...")
|
||||
serve_ui(args.ui_port, prod_mode, config)
|
||||
elif api_proc:
|
||||
try:
|
||||
api_proc.join()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
@@ -1,9 +0,0 @@
|
||||
.git
|
||||
.gitignore
|
||||
logs
|
||||
data
|
||||
tmp
|
||||
myfsio-engine/target
|
||||
myfsio-engine/tests
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
@@ -1,50 +0,0 @@
|
||||
FROM rust:1-slim-bookworm AS builder
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential pkg-config libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY myfsio-engine ./myfsio-engine
|
||||
|
||||
RUN cd myfsio-engine \
|
||||
&& cargo build --release --bin myfsio-server \
|
||||
&& strip target/release/myfsio-server
|
||||
|
||||
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends ca-certificates curl \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& mkdir -p /app/data \
|
||||
&& useradd -m -u 1000 myfsio \
|
||||
&& chown -R myfsio:myfsio /app
|
||||
|
||||
COPY --from=builder /build/myfsio-engine/target/release/myfsio-server /usr/local/bin/myfsio-server
|
||||
COPY --from=builder /build/myfsio-engine/crates/myfsio-server/templates /app/templates
|
||||
COPY --from=builder /build/myfsio-engine/crates/myfsio-server/static /app/static
|
||||
COPY docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||
|
||||
RUN chmod +x /app/docker-entrypoint.sh \
|
||||
&& chown -R myfsio:myfsio /app
|
||||
|
||||
USER myfsio
|
||||
|
||||
EXPOSE 5000
|
||||
EXPOSE 5100
|
||||
ENV HOST=0.0.0.0 \
|
||||
PORT=5000 \
|
||||
UI_PORT=5100 \
|
||||
STORAGE_ROOT=/app/data \
|
||||
TEMPLATES_DIR=/app/templates \
|
||||
STATIC_DIR=/app/static \
|
||||
RUST_LOG=info
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD curl -fsS "http://localhost:${PORT}/myfsio/health" || exit 1
|
||||
|
||||
CMD ["/app/docker-entrypoint.sh"]
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
exec /usr/local/bin/myfsio-server
|
||||
5223
rust/myfsio-engine/Cargo.lock
generated
5223
rust/myfsio-engine/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,61 +0,0 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"crates/myfsio-common",
|
||||
"crates/myfsio-auth",
|
||||
"crates/myfsio-crypto",
|
||||
"crates/myfsio-storage",
|
||||
"crates/myfsio-xml",
|
||||
"crates/myfsio-server",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.3"
|
||||
edition = "2021"
|
||||
|
||||
[workspace.dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
axum = { version = "0.8" }
|
||||
tower = { version = "0.5" }
|
||||
tower-http = { version = "0.6", features = ["cors", "trace", "fs", "compression-gzip"] }
|
||||
hyper = { version = "1" }
|
||||
bytes = "1"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
quick-xml = { version = "0.37", features = ["serialize"] }
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
md-5 = "0.10"
|
||||
hex = "0.4"
|
||||
aes = "0.8"
|
||||
aes-gcm = "0.10"
|
||||
cbc = { version = "0.1", features = ["alloc"] }
|
||||
hkdf = "0.12"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
parking_lot = "0.12"
|
||||
lru = "0.14"
|
||||
percent-encoding = "2"
|
||||
regex = "1"
|
||||
unicode-normalization = "0.1"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = "0.3"
|
||||
thiserror = "2"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
base64 = "0.22"
|
||||
tokio-util = { version = "0.7", features = ["io"] }
|
||||
futures = "0.3"
|
||||
dashmap = "6"
|
||||
crc32fast = "1"
|
||||
duckdb = { version = "1", features = ["bundled"] }
|
||||
reqwest = { version = "0.12", default-features = false, features = ["stream", "rustls-tls", "json"] }
|
||||
aws-sdk-s3 = { version = "1", features = ["behavior-version-latest", "rt-tokio"] }
|
||||
aws-config = { version = "1", features = ["behavior-version-latest"] }
|
||||
aws-credential-types = "1"
|
||||
aws-smithy-runtime-api = "1"
|
||||
aws-smithy-types = "1"
|
||||
async-trait = "0.1"
|
||||
tera = "1"
|
||||
cookie = "0.18"
|
||||
subtle = "2"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
dotenvy = "0.15"
|
||||
@@ -1,27 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio-auth"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
myfsio-common = { path = "../myfsio-common" }
|
||||
hmac = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
aes = { workspace = true }
|
||||
cbc = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
pbkdf2 = "0.12"
|
||||
rand = "0.8"
|
||||
lru = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
percent-encoding = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
@@ -1,118 +0,0 @@
|
||||
use aes::cipher::{block_padding::Pkcs7, BlockDecryptMut, BlockEncryptMut, KeyIvInit};
|
||||
use base64::{engine::general_purpose::URL_SAFE, Engine};
|
||||
use hmac::{Hmac, Mac};
|
||||
use rand::RngCore;
|
||||
use sha2::Sha256;
|
||||
|
||||
type Aes128CbcDec = cbc::Decryptor<aes::Aes128>;
|
||||
type Aes128CbcEnc = cbc::Encryptor<aes::Aes128>;
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
pub fn derive_fernet_key(secret: &str) -> String {
|
||||
let mut derived = [0u8; 32];
|
||||
pbkdf2::pbkdf2_hmac::<Sha256>(
|
||||
secret.as_bytes(),
|
||||
b"myfsio-iam-encryption",
|
||||
100_000,
|
||||
&mut derived,
|
||||
);
|
||||
URL_SAFE.encode(derived)
|
||||
}
|
||||
|
||||
pub fn decrypt(key_b64: &str, token: &str) -> Result<Vec<u8>, &'static str> {
|
||||
let key_bytes = URL_SAFE
|
||||
.decode(key_b64)
|
||||
.map_err(|_| "invalid fernet key base64")?;
|
||||
if key_bytes.len() != 32 {
|
||||
return Err("fernet key must be 32 bytes");
|
||||
}
|
||||
|
||||
let signing_key = &key_bytes[..16];
|
||||
let encryption_key = &key_bytes[16..];
|
||||
|
||||
let token_bytes = URL_SAFE
|
||||
.decode(token)
|
||||
.map_err(|_| "invalid fernet token base64")?;
|
||||
|
||||
if token_bytes.len() < 57 {
|
||||
return Err("fernet token too short");
|
||||
}
|
||||
|
||||
if token_bytes[0] != 0x80 {
|
||||
return Err("invalid fernet version");
|
||||
}
|
||||
|
||||
let hmac_offset = token_bytes.len() - 32;
|
||||
let payload = &token_bytes[..hmac_offset];
|
||||
let expected_hmac = &token_bytes[hmac_offset..];
|
||||
|
||||
let mut mac = HmacSha256::new_from_slice(signing_key).map_err(|_| "hmac key error")?;
|
||||
mac.update(payload);
|
||||
mac.verify_slice(expected_hmac)
|
||||
.map_err(|_| "HMAC verification failed")?;
|
||||
|
||||
let iv = &token_bytes[9..25];
|
||||
let ciphertext = &token_bytes[25..hmac_offset];
|
||||
|
||||
let plaintext = Aes128CbcDec::new(encryption_key.into(), iv.into())
|
||||
.decrypt_padded_vec_mut::<Pkcs7>(ciphertext)
|
||||
.map_err(|_| "AES-CBC decryption failed")?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
pub fn encrypt(key_b64: &str, plaintext: &[u8]) -> Result<String, &'static str> {
|
||||
let key_bytes = URL_SAFE
|
||||
.decode(key_b64)
|
||||
.map_err(|_| "invalid fernet key base64")?;
|
||||
if key_bytes.len() != 32 {
|
||||
return Err("fernet key must be 32 bytes");
|
||||
}
|
||||
|
||||
let signing_key = &key_bytes[..16];
|
||||
let encryption_key = &key_bytes[16..];
|
||||
|
||||
let mut iv = [0u8; 16];
|
||||
rand::thread_rng().fill_bytes(&mut iv);
|
||||
|
||||
let timestamp = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.map_err(|_| "system time error")?
|
||||
.as_secs();
|
||||
|
||||
let ciphertext = Aes128CbcEnc::new(encryption_key.into(), (&iv).into())
|
||||
.encrypt_padded_vec_mut::<Pkcs7>(plaintext);
|
||||
|
||||
let mut payload = Vec::with_capacity(1 + 8 + 16 + ciphertext.len());
|
||||
payload.push(0x80);
|
||||
payload.extend_from_slice(×tamp.to_be_bytes());
|
||||
payload.extend_from_slice(&iv);
|
||||
payload.extend_from_slice(&ciphertext);
|
||||
|
||||
let mut mac = HmacSha256::new_from_slice(signing_key).map_err(|_| "hmac key error")?;
|
||||
mac.update(&payload);
|
||||
let tag = mac.finalize().into_bytes();
|
||||
|
||||
let mut token_bytes = payload;
|
||||
token_bytes.extend_from_slice(&tag);
|
||||
Ok(URL_SAFE.encode(&token_bytes))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_derive_fernet_key_format() {
|
||||
let key = derive_fernet_key("test-secret");
|
||||
let decoded = URL_SAFE.decode(&key).unwrap();
|
||||
assert_eq!(decoded.len(), 32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roundtrip_with_python_compat() {
|
||||
let key = derive_fernet_key("dev-secret-key");
|
||||
let decoded = URL_SAFE.decode(&key).unwrap();
|
||||
assert_eq!(decoded.len(), 32);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +0,0 @@
|
||||
mod fernet;
|
||||
pub mod iam;
|
||||
pub mod principal;
|
||||
pub mod sigv4;
|
||||
@@ -1 +0,0 @@
|
||||
pub use myfsio_common::types::Principal;
|
||||
@@ -1,287 +0,0 @@
|
||||
use hmac::{Hmac, Mac};
|
||||
use lru::LruCache;
|
||||
use parking_lot::Mutex;
|
||||
use percent_encoding::{percent_encode, AsciiSet, NON_ALPHANUMERIC};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::num::NonZeroUsize;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Instant;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
struct CacheEntry {
|
||||
key: Vec<u8>,
|
||||
created: Instant,
|
||||
}
|
||||
|
||||
static SIGNING_KEY_CACHE: LazyLock<Mutex<LruCache<(String, String, String, String), CacheEntry>>> =
|
||||
LazyLock::new(|| Mutex::new(LruCache::new(NonZeroUsize::new(256).unwrap())));
|
||||
|
||||
const CACHE_TTL_SECS: u64 = 60;
|
||||
|
||||
const AWS_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC
|
||||
.remove(b'-')
|
||||
.remove(b'_')
|
||||
.remove(b'.')
|
||||
.remove(b'~');
|
||||
|
||||
fn hmac_sha256(key: &[u8], msg: &[u8]) -> Vec<u8> {
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
|
||||
mac.update(msg);
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
fn sha256_hex(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
hex::encode(hasher.finalize())
|
||||
}
|
||||
|
||||
fn aws_uri_encode(input: &str) -> String {
|
||||
percent_encode(input.as_bytes(), AWS_ENCODE_SET).to_string()
|
||||
}
|
||||
|
||||
pub fn derive_signing_key_cached(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
let cache_key = (
|
||||
secret_key.to_owned(),
|
||||
date_stamp.to_owned(),
|
||||
region.to_owned(),
|
||||
service.to_owned(),
|
||||
);
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
if let Some(entry) = cache.get(&cache_key) {
|
||||
if entry.created.elapsed().as_secs() < CACHE_TTL_SECS {
|
||||
return entry.key.clone();
|
||||
}
|
||||
cache.pop(&cache_key);
|
||||
}
|
||||
}
|
||||
|
||||
let k_date = hmac_sha256(
|
||||
format!("AWS4{}", secret_key).as_bytes(),
|
||||
date_stamp.as_bytes(),
|
||||
);
|
||||
let k_region = hmac_sha256(&k_date, region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, service.as_bytes());
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
cache.put(
|
||||
cache_key,
|
||||
CacheEntry {
|
||||
key: k_signing.clone(),
|
||||
created: Instant::now(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
k_signing
|
||||
}
|
||||
|
||||
fn constant_time_compare_inner(a: &[u8], b: &[u8]) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
let mut result: u8 = 0;
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
result |= x ^ y;
|
||||
}
|
||||
result == 0
|
||||
}
|
||||
|
||||
pub fn verify_sigv4_signature(
|
||||
method: &str,
|
||||
canonical_uri: &str,
|
||||
query_params: &[(String, String)],
|
||||
signed_headers_str: &str,
|
||||
header_values: &[(String, String)],
|
||||
payload_hash: &str,
|
||||
amz_date: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
secret_key: &str,
|
||||
provided_signature: &str,
|
||||
) -> bool {
|
||||
let mut sorted_params = query_params.to_vec();
|
||||
sorted_params.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1)));
|
||||
|
||||
let canonical_query_string = sorted_params
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{}={}", aws_uri_encode(k), aws_uri_encode(v)))
|
||||
.collect::<Vec<_>>()
|
||||
.join("&");
|
||||
|
||||
let mut canonical_headers = String::new();
|
||||
for (name, value) in header_values {
|
||||
let lower_name = name.to_lowercase();
|
||||
let normalized = value.split_whitespace().collect::<Vec<_>>().join(" ");
|
||||
let final_value = if lower_name == "expect" && normalized.is_empty() {
|
||||
"100-continue"
|
||||
} else {
|
||||
&normalized
|
||||
};
|
||||
canonical_headers.push_str(&lower_name);
|
||||
canonical_headers.push(':');
|
||||
canonical_headers.push_str(final_value);
|
||||
canonical_headers.push('\n');
|
||||
}
|
||||
|
||||
let canonical_request = format!(
|
||||
"{}\n{}\n{}\n{}\n{}\n{}",
|
||||
method,
|
||||
canonical_uri,
|
||||
canonical_query_string,
|
||||
canonical_headers,
|
||||
signed_headers_str,
|
||||
payload_hash
|
||||
);
|
||||
|
||||
let credential_scope = format!("{}/{}/{}/aws4_request", date_stamp, region, service);
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
);
|
||||
|
||||
let signing_key = derive_signing_key_cached(secret_key, date_stamp, region, service);
|
||||
let calculated = hmac_sha256(&signing_key, string_to_sign.as_bytes());
|
||||
let calculated_hex = hex::encode(&calculated);
|
||||
|
||||
constant_time_compare_inner(calculated_hex.as_bytes(), provided_signature.as_bytes())
|
||||
}
|
||||
|
||||
pub fn derive_signing_key(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
derive_signing_key_cached(secret_key, date_stamp, region, service)
|
||||
}
|
||||
|
||||
pub fn compute_signature(signing_key: &[u8], string_to_sign: &str) -> String {
|
||||
let sig = hmac_sha256(signing_key, string_to_sign.as_bytes());
|
||||
hex::encode(sig)
|
||||
}
|
||||
|
||||
pub fn compute_post_policy_signature(signing_key: &[u8], policy_b64: &str) -> String {
|
||||
let sig = hmac_sha256(signing_key, policy_b64.as_bytes());
|
||||
hex::encode(sig)
|
||||
}
|
||||
|
||||
pub fn build_string_to_sign(
|
||||
amz_date: &str,
|
||||
credential_scope: &str,
|
||||
canonical_request: &str,
|
||||
) -> String {
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
)
|
||||
}
|
||||
|
||||
pub fn constant_time_compare(a: &str, b: &str) -> bool {
|
||||
constant_time_compare_inner(a.as_bytes(), b.as_bytes())
|
||||
}
|
||||
|
||||
pub fn clear_signing_key_cache() {
|
||||
SIGNING_KEY_CACHE.lock().clear();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_derive_signing_key() {
|
||||
let key = derive_signing_key(
|
||||
"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
"20130524",
|
||||
"us-east-1",
|
||||
"s3",
|
||||
);
|
||||
assert_eq!(key.len(), 32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_derive_signing_key_cached() {
|
||||
let key1 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
|
||||
let key2 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
|
||||
assert_eq!(key1, key2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_constant_time_compare() {
|
||||
assert!(constant_time_compare("abc", "abc"));
|
||||
assert!(!constant_time_compare("abc", "abd"));
|
||||
assert!(!constant_time_compare("abc", "abcd"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_string_to_sign() {
|
||||
let result = build_string_to_sign(
|
||||
"20130524T000000Z",
|
||||
"20130524/us-east-1/s3/aws4_request",
|
||||
"GET\n/\n\nhost:example.com\n\nhost\nUNSIGNED-PAYLOAD",
|
||||
);
|
||||
assert!(result.starts_with("AWS4-HMAC-SHA256\n"));
|
||||
assert!(result.contains("20130524T000000Z"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aws_uri_encode() {
|
||||
assert_eq!(aws_uri_encode("hello world"), "hello%20world");
|
||||
assert_eq!(aws_uri_encode("test-file_name.txt"), "test-file_name.txt");
|
||||
assert_eq!(aws_uri_encode("a/b"), "a%2Fb");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_sigv4_roundtrip() {
|
||||
let secret = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
|
||||
let date_stamp = "20130524";
|
||||
let region = "us-east-1";
|
||||
let service = "s3";
|
||||
let amz_date = "20130524T000000Z";
|
||||
|
||||
let signing_key = derive_signing_key(secret, date_stamp, region, service);
|
||||
|
||||
let canonical_request =
|
||||
"GET\n/\n\nhost:examplebucket.s3.amazonaws.com\n\nhost\nUNSIGNED-PAYLOAD";
|
||||
let string_to_sign = build_string_to_sign(
|
||||
amz_date,
|
||||
&format!("{}/{}/{}/aws4_request", date_stamp, region, service),
|
||||
canonical_request,
|
||||
);
|
||||
|
||||
let signature = compute_signature(&signing_key, &string_to_sign);
|
||||
|
||||
let result = verify_sigv4_signature(
|
||||
"GET",
|
||||
"/",
|
||||
&[],
|
||||
"host",
|
||||
&[(
|
||||
"host".to_string(),
|
||||
"examplebucket.s3.amazonaws.com".to_string(),
|
||||
)],
|
||||
"UNSIGNED-PAYLOAD",
|
||||
amz_date,
|
||||
date_stamp,
|
||||
region,
|
||||
service,
|
||||
secret,
|
||||
&signature,
|
||||
);
|
||||
assert!(result);
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio-common"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
@@ -1,20 +0,0 @@
|
||||
pub const SYSTEM_ROOT: &str = ".myfsio.sys";
|
||||
pub const SYSTEM_BUCKETS_DIR: &str = "buckets";
|
||||
pub const SYSTEM_MULTIPART_DIR: &str = "multipart";
|
||||
pub const BUCKET_META_DIR: &str = "meta";
|
||||
pub const BUCKET_VERSIONS_DIR: &str = "versions";
|
||||
pub const BUCKET_CONFIG_FILE: &str = ".bucket.json";
|
||||
pub const STATS_FILE: &str = "stats.json";
|
||||
pub const ETAG_INDEX_FILE: &str = "etag_index.json";
|
||||
pub const INDEX_FILE: &str = "_index.json";
|
||||
pub const MANIFEST_FILE: &str = "manifest.json";
|
||||
|
||||
pub const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
|
||||
pub const DEFAULT_REGION: &str = "us-east-1";
|
||||
pub const AWS_SERVICE: &str = "s3";
|
||||
|
||||
pub const DEFAULT_MAX_KEYS: usize = 1000;
|
||||
pub const DEFAULT_OBJECT_KEY_MAX_BYTES: usize = 1024;
|
||||
pub const DEFAULT_CHUNK_SIZE: usize = 65536;
|
||||
pub const STREAM_CHUNK_SIZE: usize = 1_048_576;
|
||||
@@ -1,233 +0,0 @@
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum S3ErrorCode {
|
||||
AccessDenied,
|
||||
BadDigest,
|
||||
BucketAlreadyExists,
|
||||
BucketNotEmpty,
|
||||
EntityTooLarge,
|
||||
InternalError,
|
||||
InvalidAccessKeyId,
|
||||
InvalidArgument,
|
||||
InvalidBucketName,
|
||||
InvalidKey,
|
||||
InvalidPolicyDocument,
|
||||
InvalidRange,
|
||||
InvalidRequest,
|
||||
InvalidTag,
|
||||
MalformedXML,
|
||||
MethodNotAllowed,
|
||||
NoSuchBucket,
|
||||
NoSuchKey,
|
||||
NoSuchUpload,
|
||||
NoSuchVersion,
|
||||
NoSuchTagSet,
|
||||
PreconditionFailed,
|
||||
NotModified,
|
||||
QuotaExceeded,
|
||||
SignatureDoesNotMatch,
|
||||
SlowDown,
|
||||
}
|
||||
|
||||
impl S3ErrorCode {
|
||||
pub fn http_status(&self) -> u16 {
|
||||
match self {
|
||||
Self::AccessDenied => 403,
|
||||
Self::BadDigest => 400,
|
||||
Self::BucketAlreadyExists => 409,
|
||||
Self::BucketNotEmpty => 409,
|
||||
Self::EntityTooLarge => 413,
|
||||
Self::InternalError => 500,
|
||||
Self::InvalidAccessKeyId => 403,
|
||||
Self::InvalidArgument => 400,
|
||||
Self::InvalidBucketName => 400,
|
||||
Self::InvalidKey => 400,
|
||||
Self::InvalidPolicyDocument => 400,
|
||||
Self::InvalidRange => 416,
|
||||
Self::InvalidRequest => 400,
|
||||
Self::InvalidTag => 400,
|
||||
Self::MalformedXML => 400,
|
||||
Self::MethodNotAllowed => 405,
|
||||
Self::NoSuchBucket => 404,
|
||||
Self::NoSuchKey => 404,
|
||||
Self::NoSuchUpload => 404,
|
||||
Self::NoSuchVersion => 404,
|
||||
Self::NoSuchTagSet => 404,
|
||||
Self::PreconditionFailed => 412,
|
||||
Self::NotModified => 304,
|
||||
Self::QuotaExceeded => 403,
|
||||
Self::SignatureDoesNotMatch => 403,
|
||||
Self::SlowDown => 429,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::AccessDenied => "AccessDenied",
|
||||
Self::BadDigest => "BadDigest",
|
||||
Self::BucketAlreadyExists => "BucketAlreadyExists",
|
||||
Self::BucketNotEmpty => "BucketNotEmpty",
|
||||
Self::EntityTooLarge => "EntityTooLarge",
|
||||
Self::InternalError => "InternalError",
|
||||
Self::InvalidAccessKeyId => "InvalidAccessKeyId",
|
||||
Self::InvalidArgument => "InvalidArgument",
|
||||
Self::InvalidBucketName => "InvalidBucketName",
|
||||
Self::InvalidKey => "InvalidKey",
|
||||
Self::InvalidPolicyDocument => "InvalidPolicyDocument",
|
||||
Self::InvalidRange => "InvalidRange",
|
||||
Self::InvalidRequest => "InvalidRequest",
|
||||
Self::InvalidTag => "InvalidTag",
|
||||
Self::MalformedXML => "MalformedXML",
|
||||
Self::MethodNotAllowed => "MethodNotAllowed",
|
||||
Self::NoSuchBucket => "NoSuchBucket",
|
||||
Self::NoSuchKey => "NoSuchKey",
|
||||
Self::NoSuchUpload => "NoSuchUpload",
|
||||
Self::NoSuchVersion => "NoSuchVersion",
|
||||
Self::NoSuchTagSet => "NoSuchTagSet",
|
||||
Self::PreconditionFailed => "PreconditionFailed",
|
||||
Self::NotModified => "NotModified",
|
||||
Self::QuotaExceeded => "QuotaExceeded",
|
||||
Self::SignatureDoesNotMatch => "SignatureDoesNotMatch",
|
||||
Self::SlowDown => "SlowDown",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_message(&self) -> &'static str {
|
||||
match self {
|
||||
Self::AccessDenied => "Access Denied",
|
||||
Self::BadDigest => "The Content-MD5 or checksum value you specified did not match what we received",
|
||||
Self::BucketAlreadyExists => "The requested bucket name is not available",
|
||||
Self::BucketNotEmpty => "The bucket you tried to delete is not empty",
|
||||
Self::EntityTooLarge => "Your proposed upload exceeds the maximum allowed size",
|
||||
Self::InternalError => "We encountered an internal error. Please try again.",
|
||||
Self::InvalidAccessKeyId => "The access key ID you provided does not exist",
|
||||
Self::InvalidArgument => "Invalid argument",
|
||||
Self::InvalidBucketName => "The specified bucket is not valid",
|
||||
Self::InvalidKey => "The specified key is not valid",
|
||||
Self::InvalidPolicyDocument => "The content of the form does not meet the conditions specified in the policy document",
|
||||
Self::InvalidRange => "The requested range is not satisfiable",
|
||||
Self::InvalidRequest => "Invalid request",
|
||||
Self::InvalidTag => "The Tagging header is invalid",
|
||||
Self::MalformedXML => "The XML you provided was not well-formed",
|
||||
Self::MethodNotAllowed => "The specified method is not allowed against this resource",
|
||||
Self::NoSuchBucket => "The specified bucket does not exist",
|
||||
Self::NoSuchKey => "The specified key does not exist",
|
||||
Self::NoSuchUpload => "The specified multipart upload does not exist",
|
||||
Self::NoSuchVersion => "The specified version does not exist",
|
||||
Self::NoSuchTagSet => "The TagSet does not exist",
|
||||
Self::PreconditionFailed => "At least one of the preconditions you specified did not hold",
|
||||
Self::NotModified => "Not Modified",
|
||||
Self::QuotaExceeded => "The bucket quota has been exceeded",
|
||||
Self::SignatureDoesNotMatch => "The request signature we calculated does not match the signature you provided",
|
||||
Self::SlowDown => "Please reduce your request rate",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for S3ErrorCode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct S3Error {
|
||||
pub code: S3ErrorCode,
|
||||
pub message: String,
|
||||
pub resource: String,
|
||||
pub request_id: String,
|
||||
}
|
||||
|
||||
impl S3Error {
|
||||
pub fn new(code: S3ErrorCode, message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
code,
|
||||
message: message.into(),
|
||||
resource: String::new(),
|
||||
request_id: String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_code(code: S3ErrorCode) -> Self {
|
||||
Self::new(code, code.default_message())
|
||||
}
|
||||
|
||||
pub fn with_resource(mut self, resource: impl Into<String>) -> Self {
|
||||
self.resource = resource.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_request_id(mut self, request_id: impl Into<String>) -> Self {
|
||||
self.request_id = request_id.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn http_status(&self) -> u16 {
|
||||
self.code.http_status()
|
||||
}
|
||||
|
||||
pub fn to_xml(&self) -> String {
|
||||
format!(
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||
<Error>\
|
||||
<Code>{}</Code>\
|
||||
<Message>{}</Message>\
|
||||
<Resource>{}</Resource>\
|
||||
<RequestId>{}</RequestId>\
|
||||
</Error>",
|
||||
self.code.as_str(),
|
||||
xml_escape(&self.message),
|
||||
xml_escape(&self.resource),
|
||||
xml_escape(&self.request_id),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for S3Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}: {}", self.code, self.message)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for S3Error {}
|
||||
|
||||
fn xml_escape(s: &str) -> String {
|
||||
s.replace('&', "&")
|
||||
.replace('<', "<")
|
||||
.replace('>', ">")
|
||||
.replace('"', """)
|
||||
.replace('\'', "'")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_error_codes() {
|
||||
assert_eq!(S3ErrorCode::NoSuchKey.http_status(), 404);
|
||||
assert_eq!(S3ErrorCode::AccessDenied.http_status(), 403);
|
||||
assert_eq!(S3ErrorCode::NoSuchBucket.as_str(), "NoSuchBucket");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_to_xml() {
|
||||
let err = S3Error::from_code(S3ErrorCode::NoSuchKey)
|
||||
.with_resource("/test-bucket/test-key")
|
||||
.with_request_id("abc123");
|
||||
let xml = err.to_xml();
|
||||
assert!(xml.contains("<Code>NoSuchKey</Code>"));
|
||||
assert!(xml.contains("<Resource>/test-bucket/test-key</Resource>"));
|
||||
assert!(xml.contains("<RequestId>abc123</RequestId>"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_xml_escape() {
|
||||
let err = S3Error::new(S3ErrorCode::InvalidArgument, "key <test> & \"value\"")
|
||||
.with_resource("/bucket/key&");
|
||||
let xml = err.to_xml();
|
||||
assert!(xml.contains("<test>"));
|
||||
assert!(xml.contains("&"));
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
pub mod constants;
|
||||
pub mod error;
|
||||
pub mod types;
|
||||
@@ -1,178 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ObjectMeta {
|
||||
pub key: String,
|
||||
pub size: u64,
|
||||
pub last_modified: DateTime<Utc>,
|
||||
pub etag: Option<String>,
|
||||
pub content_type: Option<String>,
|
||||
pub storage_class: Option<String>,
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl ObjectMeta {
|
||||
pub fn new(key: String, size: u64, last_modified: DateTime<Utc>) -> Self {
|
||||
Self {
|
||||
key,
|
||||
size,
|
||||
last_modified,
|
||||
etag: None,
|
||||
content_type: None,
|
||||
storage_class: Some("STANDARD".to_string()),
|
||||
metadata: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BucketMeta {
|
||||
pub name: String,
|
||||
pub creation_date: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct BucketStats {
|
||||
pub objects: u64,
|
||||
pub bytes: u64,
|
||||
pub version_count: u64,
|
||||
pub version_bytes: u64,
|
||||
}
|
||||
|
||||
impl BucketStats {
|
||||
pub fn total_objects(&self) -> u64 {
|
||||
self.objects + self.version_count
|
||||
}
|
||||
|
||||
pub fn total_bytes(&self) -> u64 {
|
||||
self.bytes + self.version_bytes
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ListObjectsResult {
|
||||
pub objects: Vec<ObjectMeta>,
|
||||
pub is_truncated: bool,
|
||||
pub next_continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ShallowListResult {
|
||||
pub objects: Vec<ObjectMeta>,
|
||||
pub common_prefixes: Vec<String>,
|
||||
pub is_truncated: bool,
|
||||
pub next_continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ListParams {
|
||||
pub max_keys: usize,
|
||||
pub continuation_token: Option<String>,
|
||||
pub prefix: Option<String>,
|
||||
pub start_after: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ShallowListParams {
|
||||
pub prefix: String,
|
||||
pub delimiter: String,
|
||||
pub max_keys: usize,
|
||||
pub continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PartMeta {
|
||||
pub part_number: u32,
|
||||
pub etag: String,
|
||||
pub size: u64,
|
||||
pub last_modified: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PartInfo {
|
||||
pub part_number: u32,
|
||||
pub etag: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MultipartUploadInfo {
|
||||
pub upload_id: String,
|
||||
pub key: String,
|
||||
pub initiated: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VersionInfo {
|
||||
pub version_id: String,
|
||||
pub key: String,
|
||||
pub size: u64,
|
||||
pub last_modified: DateTime<Utc>,
|
||||
pub etag: Option<String>,
|
||||
pub is_latest: bool,
|
||||
#[serde(default)]
|
||||
pub is_delete_marker: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Tag {
|
||||
pub key: String,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct BucketConfig {
|
||||
#[serde(default)]
|
||||
pub versioning_enabled: bool,
|
||||
#[serde(default)]
|
||||
pub tags: Vec<Tag>,
|
||||
#[serde(default)]
|
||||
pub cors: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub encryption: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub lifecycle: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub website: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub quota: Option<QuotaConfig>,
|
||||
#[serde(default)]
|
||||
pub acl: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub notification: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub logging: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub object_lock: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub policy: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub replication: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct QuotaConfig {
|
||||
pub max_bytes: Option<u64>,
|
||||
pub max_objects: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Principal {
|
||||
pub access_key: String,
|
||||
pub user_id: String,
|
||||
pub display_name: String,
|
||||
pub is_admin: bool,
|
||||
}
|
||||
|
||||
impl Principal {
|
||||
pub fn new(access_key: String, user_id: String, display_name: String, is_admin: bool) -> Self {
|
||||
Self {
|
||||
access_key,
|
||||
user_id,
|
||||
display_name,
|
||||
is_admin,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio-crypto"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
myfsio-common = { path = "../myfsio-common" }
|
||||
md-5 = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
aes-gcm = { workspace = true }
|
||||
hkdf = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
rand = "0.8"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
|
||||
tempfile = "3"
|
||||
@@ -1,253 +0,0 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use hkdf::Hkdf;
|
||||
use sha2::Sha256;
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
use std::path::Path;
|
||||
use thiserror::Error;
|
||||
|
||||
const DEFAULT_CHUNK_SIZE: usize = 65536;
|
||||
const HEADER_SIZE: usize = 4;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum CryptoError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error("Invalid key size: expected 32 bytes, got {0}")]
|
||||
InvalidKeySize(usize),
|
||||
#[error("Invalid nonce size: expected 12 bytes, got {0}")]
|
||||
InvalidNonceSize(usize),
|
||||
#[error("Encryption failed: {0}")]
|
||||
EncryptionFailed(String),
|
||||
#[error("Decryption failed at chunk {0}")]
|
||||
DecryptionFailed(u32),
|
||||
#[error("HKDF expand failed: {0}")]
|
||||
HkdfFailed(String),
|
||||
}
|
||||
|
||||
fn read_exact_chunk(reader: &mut impl Read, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
let mut filled = 0;
|
||||
while filled < buf.len() {
|
||||
match reader.read(&mut buf[filled..]) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => filled += n,
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(filled)
|
||||
}
|
||||
|
||||
fn derive_chunk_nonce(base_nonce: &[u8], chunk_index: u32) -> Result<[u8; 12], CryptoError> {
|
||||
let hkdf = Hkdf::<Sha256>::new(Some(base_nonce), b"chunk_nonce");
|
||||
let mut okm = [0u8; 12];
|
||||
hkdf.expand(&chunk_index.to_be_bytes(), &mut okm)
|
||||
.map_err(|e| CryptoError::HkdfFailed(e.to_string()))?;
|
||||
Ok(okm)
|
||||
}
|
||||
|
||||
pub fn encrypt_stream_chunked(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
chunk_size: Option<usize>,
|
||||
) -> Result<u32, CryptoError> {
|
||||
if key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(key.len()));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
|
||||
}
|
||||
|
||||
let chunk_size = chunk_size.unwrap_or(DEFAULT_CHUNK_SIZE);
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(input_path)?;
|
||||
let mut outfile = File::create(output_path)?;
|
||||
|
||||
outfile.write_all(&[0u8; 4])?;
|
||||
|
||||
let mut buf = vec![0u8; chunk_size];
|
||||
let mut chunk_index: u32 = 0;
|
||||
|
||||
loop {
|
||||
let n = read_exact_chunk(&mut infile, &mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, &buf[..n])
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let size = encrypted.len() as u32;
|
||||
outfile.write_all(&size.to_be_bytes())?;
|
||||
outfile.write_all(&encrypted)?;
|
||||
|
||||
chunk_index += 1;
|
||||
}
|
||||
|
||||
outfile.seek(SeekFrom::Start(0))?;
|
||||
outfile.write_all(&chunk_index.to_be_bytes())?;
|
||||
|
||||
Ok(chunk_index)
|
||||
}
|
||||
|
||||
pub fn decrypt_stream_chunked(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
) -> Result<u32, CryptoError> {
|
||||
if key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(key.len()));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
|
||||
}
|
||||
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(input_path)?;
|
||||
let mut outfile = File::create(output_path)?;
|
||||
|
||||
let mut header = [0u8; HEADER_SIZE];
|
||||
infile.read_exact(&mut header)?;
|
||||
let chunk_count = u32::from_be_bytes(header);
|
||||
|
||||
let mut size_buf = [0u8; HEADER_SIZE];
|
||||
for chunk_index in 0..chunk_count {
|
||||
infile.read_exact(&mut size_buf)?;
|
||||
let chunk_size = u32::from_be_bytes(size_buf) as usize;
|
||||
|
||||
let mut encrypted = vec![0u8; chunk_size];
|
||||
infile.read_exact(&mut encrypted)?;
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let decrypted = cipher
|
||||
.decrypt(nonce, encrypted.as_ref())
|
||||
.map_err(|_| CryptoError::DecryptionFailed(chunk_index))?;
|
||||
|
||||
outfile.write_all(&decrypted)?;
|
||||
}
|
||||
|
||||
Ok(chunk_count)
|
||||
}
|
||||
|
||||
pub async fn encrypt_stream_chunked_async(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
chunk_size: Option<usize>,
|
||||
) -> Result<u32, CryptoError> {
|
||||
let input_path = input_path.to_owned();
|
||||
let output_path = output_path.to_owned();
|
||||
let key = key.to_vec();
|
||||
let base_nonce = base_nonce.to_vec();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
encrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce, chunk_size)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
|
||||
}
|
||||
|
||||
pub async fn decrypt_stream_chunked_async(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
) -> Result<u32, CryptoError> {
|
||||
let input_path = input_path.to_owned();
|
||||
let output_path = output_path.to_owned();
|
||||
let key = key.to_vec();
|
||||
let base_nonce = base_nonce.to_vec();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
decrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write as IoWrite;
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_decrypt_roundtrip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("input.bin");
|
||||
let encrypted = dir.path().join("encrypted.bin");
|
||||
let decrypted = dir.path().join("decrypted.bin");
|
||||
|
||||
let data = b"Hello, this is a test of AES-256-GCM chunked encryption!";
|
||||
std::fs::File::create(&input)
|
||||
.unwrap()
|
||||
.write_all(data)
|
||||
.unwrap();
|
||||
|
||||
let key = [0x42u8; 32];
|
||||
let nonce = [0x01u8; 12];
|
||||
|
||||
let chunks = encrypt_stream_chunked(&input, &encrypted, &key, &nonce, Some(16)).unwrap();
|
||||
assert!(chunks > 0);
|
||||
|
||||
let chunks2 = decrypt_stream_chunked(&encrypted, &decrypted, &key, &nonce).unwrap();
|
||||
assert_eq!(chunks, chunks2);
|
||||
|
||||
let result = std::fs::read(&decrypted).unwrap();
|
||||
assert_eq!(result, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_key_size() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("input.bin");
|
||||
std::fs::File::create(&input)
|
||||
.unwrap()
|
||||
.write_all(b"test")
|
||||
.unwrap();
|
||||
|
||||
let result = encrypt_stream_chunked(
|
||||
&input,
|
||||
&dir.path().join("out"),
|
||||
&[0u8; 16],
|
||||
&[0u8; 12],
|
||||
None,
|
||||
);
|
||||
assert!(matches!(result, Err(CryptoError::InvalidKeySize(16))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_key_fails_decrypt() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("input.bin");
|
||||
let encrypted = dir.path().join("encrypted.bin");
|
||||
let decrypted = dir.path().join("decrypted.bin");
|
||||
|
||||
std::fs::File::create(&input)
|
||||
.unwrap()
|
||||
.write_all(b"secret data")
|
||||
.unwrap();
|
||||
|
||||
let key = [0x42u8; 32];
|
||||
let nonce = [0x01u8; 12];
|
||||
encrypt_stream_chunked(&input, &encrypted, &key, &nonce, None).unwrap();
|
||||
|
||||
let wrong_key = [0x43u8; 32];
|
||||
let result = decrypt_stream_chunked(&encrypted, &decrypted, &wrong_key, &nonce);
|
||||
assert!(matches!(result, Err(CryptoError::DecryptionFailed(_))));
|
||||
}
|
||||
}
|
||||
@@ -1,377 +0,0 @@
|
||||
use base64::engine::general_purpose::STANDARD as B64;
|
||||
use base64::Engine;
|
||||
use rand::RngCore;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use crate::aes_gcm::{decrypt_stream_chunked, encrypt_stream_chunked, CryptoError};
|
||||
use crate::kms::KmsService;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum SseAlgorithm {
|
||||
Aes256,
|
||||
AwsKms,
|
||||
CustomerProvided,
|
||||
}
|
||||
|
||||
impl SseAlgorithm {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
SseAlgorithm::Aes256 => "AES256",
|
||||
SseAlgorithm::AwsKms => "aws:kms",
|
||||
SseAlgorithm::CustomerProvided => "AES256",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EncryptionContext {
|
||||
pub algorithm: SseAlgorithm,
|
||||
pub kms_key_id: Option<String>,
|
||||
pub customer_key: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EncryptionMetadata {
|
||||
pub algorithm: String,
|
||||
pub nonce: String,
|
||||
pub encrypted_data_key: Option<String>,
|
||||
pub kms_key_id: Option<String>,
|
||||
}
|
||||
|
||||
impl EncryptionMetadata {
|
||||
pub fn to_metadata_map(&self) -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
map.insert(
|
||||
"x-amz-server-side-encryption".to_string(),
|
||||
self.algorithm.clone(),
|
||||
);
|
||||
map.insert("x-amz-encryption-nonce".to_string(), self.nonce.clone());
|
||||
if let Some(ref dk) = self.encrypted_data_key {
|
||||
map.insert("x-amz-encrypted-data-key".to_string(), dk.clone());
|
||||
}
|
||||
if let Some(ref kid) = self.kms_key_id {
|
||||
map.insert("x-amz-encryption-key-id".to_string(), kid.clone());
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
pub fn from_metadata(meta: &HashMap<String, String>) -> Option<Self> {
|
||||
let algorithm = meta.get("x-amz-server-side-encryption")?;
|
||||
let nonce = meta.get("x-amz-encryption-nonce")?;
|
||||
Some(Self {
|
||||
algorithm: algorithm.clone(),
|
||||
nonce: nonce.clone(),
|
||||
encrypted_data_key: meta.get("x-amz-encrypted-data-key").cloned(),
|
||||
kms_key_id: meta.get("x-amz-encryption-key-id").cloned(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_encrypted(meta: &HashMap<String, String>) -> bool {
|
||||
meta.contains_key("x-amz-server-side-encryption")
|
||||
}
|
||||
|
||||
pub fn clean_metadata(meta: &mut HashMap<String, String>) {
|
||||
meta.remove("x-amz-server-side-encryption");
|
||||
meta.remove("x-amz-encryption-nonce");
|
||||
meta.remove("x-amz-encrypted-data-key");
|
||||
meta.remove("x-amz-encryption-key-id");
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EncryptionService {
|
||||
master_key: [u8; 32],
|
||||
kms: Option<std::sync::Arc<KmsService>>,
|
||||
}
|
||||
|
||||
impl EncryptionService {
|
||||
pub fn new(master_key: [u8; 32], kms: Option<std::sync::Arc<KmsService>>) -> Self {
|
||||
Self { master_key, kms }
|
||||
}
|
||||
|
||||
pub fn generate_data_key(&self) -> ([u8; 32], [u8; 12]) {
|
||||
let mut data_key = [0u8; 32];
|
||||
let mut nonce = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut data_key);
|
||||
rand::thread_rng().fill_bytes(&mut nonce);
|
||||
(data_key, nonce)
|
||||
}
|
||||
|
||||
pub fn wrap_data_key(&self, data_key: &[u8; 32]) -> Result<String, CryptoError> {
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
|
||||
let cipher = Aes256Gcm::new((&self.master_key).into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, data_key.as_slice())
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut combined = Vec::with_capacity(12 + encrypted.len());
|
||||
combined.extend_from_slice(&nonce_bytes);
|
||||
combined.extend_from_slice(&encrypted);
|
||||
Ok(B64.encode(&combined))
|
||||
}
|
||||
|
||||
pub fn unwrap_data_key(&self, wrapped_b64: &str) -> Result<[u8; 32], CryptoError> {
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
|
||||
let combined = B64.decode(wrapped_b64).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad wrapped key encoding: {}", e))
|
||||
})?;
|
||||
if combined.len() < 12 {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"Wrapped key too short".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let (nonce_bytes, ciphertext) = combined.split_at(12);
|
||||
let cipher = Aes256Gcm::new((&self.master_key).into());
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| CryptoError::DecryptionFailed(0))?;
|
||||
|
||||
if plaintext.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(plaintext.len()));
|
||||
}
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(&plaintext);
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
pub async fn encrypt_object(
|
||||
&self,
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
ctx: &EncryptionContext,
|
||||
) -> Result<EncryptionMetadata, CryptoError> {
|
||||
let (data_key, nonce) = self.generate_data_key();
|
||||
|
||||
let (encrypted_data_key, kms_key_id) = match ctx.algorithm {
|
||||
SseAlgorithm::Aes256 => {
|
||||
let wrapped = self.wrap_data_key(&data_key)?;
|
||||
(Some(wrapped), None)
|
||||
}
|
||||
SseAlgorithm::AwsKms => {
|
||||
let kms = self
|
||||
.kms
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
|
||||
let kid = ctx
|
||||
.kms_key_id
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID".into()))?;
|
||||
let ciphertext = kms.encrypt_data(kid, &data_key).await?;
|
||||
(Some(B64.encode(&ciphertext)), Some(kid.clone()))
|
||||
}
|
||||
SseAlgorithm::CustomerProvided => (None, None),
|
||||
};
|
||||
|
||||
let actual_key = if ctx.algorithm == SseAlgorithm::CustomerProvided {
|
||||
let ck = ctx
|
||||
.customer_key
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("No customer key provided".into()))?;
|
||||
if ck.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(ck.len()));
|
||||
}
|
||||
let mut k = [0u8; 32];
|
||||
k.copy_from_slice(ck);
|
||||
k
|
||||
} else {
|
||||
data_key
|
||||
};
|
||||
|
||||
let ip = input_path.to_owned();
|
||||
let op = output_path.to_owned();
|
||||
let ak = actual_key;
|
||||
let n = nonce;
|
||||
tokio::task::spawn_blocking(move || encrypt_stream_chunked(&ip, &op, &ak, &n, None))
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
|
||||
|
||||
Ok(EncryptionMetadata {
|
||||
algorithm: ctx.algorithm.as_str().to_string(),
|
||||
nonce: B64.encode(nonce),
|
||||
encrypted_data_key,
|
||||
kms_key_id,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn decrypt_object(
|
||||
&self,
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
enc_meta: &EncryptionMetadata,
|
||||
customer_key: Option<&[u8]>,
|
||||
) -> Result<(), CryptoError> {
|
||||
let nonce_bytes = B64
|
||||
.decode(&enc_meta.nonce)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(format!("Bad nonce encoding: {}", e)))?;
|
||||
if nonce_bytes.len() != 12 {
|
||||
return Err(CryptoError::InvalidNonceSize(nonce_bytes.len()));
|
||||
}
|
||||
|
||||
let data_key: [u8; 32] = if let Some(ck) = customer_key {
|
||||
if ck.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(ck.len()));
|
||||
}
|
||||
let mut k = [0u8; 32];
|
||||
k.copy_from_slice(ck);
|
||||
k
|
||||
} else if enc_meta.algorithm == "aws:kms" {
|
||||
let kms = self
|
||||
.kms
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
|
||||
let kid = enc_meta
|
||||
.kms_key_id
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID in metadata".into()))?;
|
||||
let encrypted_dk = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
|
||||
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
|
||||
})?;
|
||||
let ct = B64.decode(encrypted_dk).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad data key encoding: {}", e))
|
||||
})?;
|
||||
let dk = kms.decrypt_data(kid, &ct).await?;
|
||||
if dk.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(dk.len()));
|
||||
}
|
||||
let mut k = [0u8; 32];
|
||||
k.copy_from_slice(&dk);
|
||||
k
|
||||
} else {
|
||||
let wrapped = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
|
||||
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
|
||||
})?;
|
||||
self.unwrap_data_key(wrapped)?
|
||||
};
|
||||
|
||||
let ip = input_path.to_owned();
|
||||
let op = output_path.to_owned();
|
||||
let nb: [u8; 12] = nonce_bytes.try_into().unwrap();
|
||||
tokio::task::spawn_blocking(move || decrypt_stream_chunked(&ip, &op, &data_key, &nb))
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
|
||||
fn test_master_key() -> [u8; 32] {
|
||||
[0x42u8; 32]
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrap_unwrap_data_key() {
|
||||
let svc = EncryptionService::new(test_master_key(), None);
|
||||
let dk = [0xAAu8; 32];
|
||||
let wrapped = svc.wrap_data_key(&dk).unwrap();
|
||||
let unwrapped = svc.unwrap_data_key(&wrapped).unwrap();
|
||||
assert_eq!(dk, unwrapped);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypt_decrypt_object_sse_s3() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("plain.bin");
|
||||
let encrypted = dir.path().join("enc.bin");
|
||||
let decrypted = dir.path().join("dec.bin");
|
||||
|
||||
let data = b"SSE-S3 encrypted content for testing!";
|
||||
std::fs::File::create(&input)
|
||||
.unwrap()
|
||||
.write_all(data)
|
||||
.unwrap();
|
||||
|
||||
let svc = EncryptionService::new(test_master_key(), None);
|
||||
|
||||
let ctx = EncryptionContext {
|
||||
algorithm: SseAlgorithm::Aes256,
|
||||
kms_key_id: None,
|
||||
customer_key: None,
|
||||
};
|
||||
|
||||
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
|
||||
assert_eq!(meta.algorithm, "AES256");
|
||||
assert!(meta.encrypted_data_key.is_some());
|
||||
|
||||
svc.decrypt_object(&encrypted, &decrypted, &meta, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = std::fs::read(&decrypted).unwrap();
|
||||
assert_eq!(result, data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypt_decrypt_object_sse_c() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("plain.bin");
|
||||
let encrypted = dir.path().join("enc.bin");
|
||||
let decrypted = dir.path().join("dec.bin");
|
||||
|
||||
let data = b"SSE-C encrypted content!";
|
||||
std::fs::File::create(&input)
|
||||
.unwrap()
|
||||
.write_all(data)
|
||||
.unwrap();
|
||||
|
||||
let customer_key = [0xBBu8; 32];
|
||||
let svc = EncryptionService::new(test_master_key(), None);
|
||||
|
||||
let ctx = EncryptionContext {
|
||||
algorithm: SseAlgorithm::CustomerProvided,
|
||||
kms_key_id: None,
|
||||
customer_key: Some(customer_key.to_vec()),
|
||||
};
|
||||
|
||||
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
|
||||
assert!(meta.encrypted_data_key.is_none());
|
||||
|
||||
svc.decrypt_object(&encrypted, &decrypted, &meta, Some(&customer_key))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = std::fs::read(&decrypted).unwrap();
|
||||
assert_eq!(result, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encryption_metadata_roundtrip() {
|
||||
let meta = EncryptionMetadata {
|
||||
algorithm: "AES256".to_string(),
|
||||
nonce: "dGVzdG5vbmNlMTI=".to_string(),
|
||||
encrypted_data_key: Some("c29tZWtleQ==".to_string()),
|
||||
kms_key_id: None,
|
||||
};
|
||||
let map = meta.to_metadata_map();
|
||||
let restored = EncryptionMetadata::from_metadata(&map).unwrap();
|
||||
assert_eq!(restored.algorithm, "AES256");
|
||||
assert_eq!(restored.nonce, meta.nonce);
|
||||
assert_eq!(restored.encrypted_data_key, meta.encrypted_data_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_encrypted() {
|
||||
let mut meta = HashMap::new();
|
||||
assert!(!EncryptionMetadata::is_encrypted(&meta));
|
||||
meta.insert(
|
||||
"x-amz-server-side-encryption".to_string(),
|
||||
"AES256".to_string(),
|
||||
);
|
||||
assert!(EncryptionMetadata::is_encrypted(&meta));
|
||||
}
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
use md5::{Digest, Md5};
|
||||
use sha2::Sha256;
|
||||
use std::io::Read;
|
||||
use std::path::Path;
|
||||
|
||||
const CHUNK_SIZE: usize = 65536;
|
||||
|
||||
pub fn md5_file(path: &Path) -> std::io::Result<String> {
|
||||
let mut file = std::fs::File::open(path)?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file.read(&mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
}
|
||||
|
||||
pub fn md5_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Md5::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
pub fn sha256_file(path: &Path) -> std::io::Result<String> {
|
||||
let mut file = std::fs::File::open(path)?;
|
||||
let mut hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file.read(&mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
}
|
||||
|
||||
pub fn sha256_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
pub fn md5_sha256_file(path: &Path) -> std::io::Result<(String, String)> {
|
||||
let mut file = std::fs::File::open(path)?;
|
||||
let mut md5_hasher = Md5::new();
|
||||
let mut sha_hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file.read(&mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
md5_hasher.update(&buf[..n]);
|
||||
sha_hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok((
|
||||
format!("{:x}", md5_hasher.finalize()),
|
||||
format!("{:x}", sha_hasher.finalize()),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn md5_file_async(path: &Path) -> std::io::Result<String> {
|
||||
let path = path.to_owned();
|
||||
tokio::task::spawn_blocking(move || md5_file(&path))
|
||||
.await
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
}
|
||||
|
||||
pub async fn sha256_file_async(path: &Path) -> std::io::Result<String> {
|
||||
let path = path.to_owned();
|
||||
tokio::task::spawn_blocking(move || sha256_file(&path))
|
||||
.await
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
}
|
||||
|
||||
pub async fn md5_sha256_file_async(path: &Path) -> std::io::Result<(String, String)> {
|
||||
let path = path.to_owned();
|
||||
tokio::task::spawn_blocking(move || md5_sha256_file(&path))
|
||||
.await
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
|
||||
#[test]
|
||||
fn test_md5_bytes() {
|
||||
assert_eq!(md5_bytes(b""), "d41d8cd98f00b204e9800998ecf8427e");
|
||||
assert_eq!(md5_bytes(b"hello"), "5d41402abc4b2a76b9719d911017c592");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sha256_bytes() {
|
||||
let hash = sha256_bytes(b"hello");
|
||||
assert_eq!(
|
||||
hash,
|
||||
"2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_md5_file() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(b"hello").unwrap();
|
||||
tmp.flush().unwrap();
|
||||
let hash = md5_file(tmp.path()).unwrap();
|
||||
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_md5_sha256_file() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(b"hello").unwrap();
|
||||
tmp.flush().unwrap();
|
||||
let (md5, sha) = md5_sha256_file(tmp.path()).unwrap();
|
||||
assert_eq!(md5, "5d41402abc4b2a76b9719d911017c592");
|
||||
assert_eq!(
|
||||
sha,
|
||||
"2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_md5_file_async() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(b"hello").unwrap();
|
||||
tmp.flush().unwrap();
|
||||
let hash = md5_file_async(tmp.path()).await.unwrap();
|
||||
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
|
||||
}
|
||||
}
|
||||
@@ -1,451 +0,0 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use base64::engine::general_purpose::STANDARD as B64;
|
||||
use base64::Engine;
|
||||
use chrono::{DateTime, Utc};
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::aes_gcm::CryptoError;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct KmsKey {
|
||||
#[serde(rename = "KeyId")]
|
||||
pub key_id: String,
|
||||
#[serde(rename = "Arn")]
|
||||
pub arn: String,
|
||||
#[serde(rename = "Description")]
|
||||
pub description: String,
|
||||
#[serde(rename = "CreationDate")]
|
||||
pub creation_date: DateTime<Utc>,
|
||||
#[serde(rename = "Enabled")]
|
||||
pub enabled: bool,
|
||||
#[serde(rename = "KeyState")]
|
||||
pub key_state: String,
|
||||
#[serde(rename = "KeyUsage")]
|
||||
pub key_usage: String,
|
||||
#[serde(rename = "KeySpec")]
|
||||
pub key_spec: String,
|
||||
#[serde(rename = "EncryptedKeyMaterial")]
|
||||
pub encrypted_key_material: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct KmsStore {
|
||||
keys: Vec<KmsKey>,
|
||||
}
|
||||
|
||||
pub struct KmsService {
|
||||
keys_path: PathBuf,
|
||||
master_key: Arc<RwLock<[u8; 32]>>,
|
||||
keys: Arc<RwLock<Vec<KmsKey>>>,
|
||||
}
|
||||
|
||||
impl KmsService {
|
||||
pub async fn new(keys_dir: &Path) -> Result<Self, CryptoError> {
|
||||
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
|
||||
|
||||
let keys_path = keys_dir.join("kms_keys.json");
|
||||
|
||||
let master_key = Self::load_or_create_master_key(&keys_dir.join("kms_master.key"))?;
|
||||
|
||||
let keys = if keys_path.exists() {
|
||||
let data = std::fs::read_to_string(&keys_path).map_err(CryptoError::Io)?;
|
||||
let store: KmsStore = serde_json::from_str(&data)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(format!("Bad KMS store: {}", e)))?;
|
||||
store.keys
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
keys_path,
|
||||
master_key: Arc::new(RwLock::new(master_key)),
|
||||
keys: Arc::new(RwLock::new(keys)),
|
||||
})
|
||||
}
|
||||
|
||||
fn load_or_create_master_key(path: &Path) -> Result<[u8; 32], CryptoError> {
|
||||
if path.exists() {
|
||||
let encoded = std::fs::read_to_string(path).map_err(CryptoError::Io)?;
|
||||
let decoded = B64.decode(encoded.trim()).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
|
||||
})?;
|
||||
if decoded.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(decoded.len()));
|
||||
}
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(&decoded);
|
||||
Ok(key)
|
||||
} else {
|
||||
let mut key = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut key);
|
||||
let encoded = B64.encode(key);
|
||||
std::fs::write(path, &encoded).map_err(CryptoError::Io)?;
|
||||
Ok(key)
|
||||
}
|
||||
}
|
||||
|
||||
fn encrypt_key_material(
|
||||
master_key: &[u8; 32],
|
||||
plaintext_key: &[u8],
|
||||
) -> Result<String, CryptoError> {
|
||||
let cipher = Aes256Gcm::new(master_key.into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext_key)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut combined = Vec::with_capacity(12 + ciphertext.len());
|
||||
combined.extend_from_slice(&nonce_bytes);
|
||||
combined.extend_from_slice(&ciphertext);
|
||||
Ok(B64.encode(&combined))
|
||||
}
|
||||
|
||||
fn decrypt_key_material(
|
||||
master_key: &[u8; 32],
|
||||
encrypted_b64: &str,
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
let combined = B64.decode(encrypted_b64).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad key material encoding: {}", e))
|
||||
})?;
|
||||
if combined.len() < 12 {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"Encrypted key material too short".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let (nonce_bytes, ciphertext) = combined.split_at(12);
|
||||
let cipher = Aes256Gcm::new(master_key.into());
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| CryptoError::DecryptionFailed(0))
|
||||
}
|
||||
|
||||
async fn save(&self) -> Result<(), CryptoError> {
|
||||
let keys = self.keys.read().await;
|
||||
let store = KmsStore { keys: keys.clone() };
|
||||
let json = serde_json::to_string_pretty(&store)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
std::fs::write(&self.keys_path, json).map_err(CryptoError::Io)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_key(&self, description: &str) -> Result<KmsKey, CryptoError> {
|
||||
let key_id = uuid::Uuid::new_v4().to_string();
|
||||
let arn = format!("arn:aws:kms:local:000000000000:key/{}", key_id);
|
||||
|
||||
let mut plaintext_key = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut plaintext_key);
|
||||
|
||||
let master = self.master_key.read().await;
|
||||
let encrypted = Self::encrypt_key_material(&master, &plaintext_key)?;
|
||||
|
||||
let kms_key = KmsKey {
|
||||
key_id: key_id.clone(),
|
||||
arn,
|
||||
description: description.to_string(),
|
||||
creation_date: Utc::now(),
|
||||
enabled: true,
|
||||
key_state: "Enabled".to_string(),
|
||||
key_usage: "ENCRYPT_DECRYPT".to_string(),
|
||||
key_spec: "SYMMETRIC_DEFAULT".to_string(),
|
||||
encrypted_key_material: encrypted,
|
||||
};
|
||||
|
||||
self.keys.write().await.push(kms_key.clone());
|
||||
self.save().await?;
|
||||
Ok(kms_key)
|
||||
}
|
||||
|
||||
pub async fn list_keys(&self) -> Vec<KmsKey> {
|
||||
self.keys.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn get_key(&self, key_id: &str) -> Option<KmsKey> {
|
||||
let keys = self.keys.read().await;
|
||||
keys.iter()
|
||||
.find(|k| k.key_id == key_id || k.arn == key_id)
|
||||
.cloned()
|
||||
}
|
||||
|
||||
pub async fn delete_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
||||
let mut keys = self.keys.write().await;
|
||||
let len_before = keys.len();
|
||||
keys.retain(|k| k.key_id != key_id && k.arn != key_id);
|
||||
let removed = keys.len() < len_before;
|
||||
drop(keys);
|
||||
if removed {
|
||||
self.save().await?;
|
||||
}
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
pub async fn enable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
||||
let mut keys = self.keys.write().await;
|
||||
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
|
||||
key.enabled = true;
|
||||
key.key_state = "Enabled".to_string();
|
||||
drop(keys);
|
||||
self.save().await?;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn disable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
||||
let mut keys = self.keys.write().await;
|
||||
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
|
||||
key.enabled = false;
|
||||
key.key_state = "Disabled".to_string();
|
||||
drop(keys);
|
||||
self.save().await?;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn decrypt_data_key(&self, key_id: &str) -> Result<Vec<u8>, CryptoError> {
|
||||
let keys = self.keys.read().await;
|
||||
let key = keys
|
||||
.iter()
|
||||
.find(|k| k.key_id == key_id || k.arn == key_id)
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS key not found".to_string()))?;
|
||||
|
||||
if !key.enabled {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"KMS key is disabled".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let master = self.master_key.read().await;
|
||||
Self::decrypt_key_material(&master, &key.encrypted_key_material)
|
||||
}
|
||||
|
||||
pub async fn encrypt_data(
|
||||
&self,
|
||||
key_id: &str,
|
||||
plaintext: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
let data_key = self.decrypt_data_key(key_id).await?;
|
||||
if data_key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(data_key.len()));
|
||||
}
|
||||
|
||||
let key_arr: [u8; 32] = data_key.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut result = Vec::with_capacity(12 + ciphertext.len());
|
||||
result.extend_from_slice(&nonce_bytes);
|
||||
result.extend_from_slice(&ciphertext);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn decrypt_data(
|
||||
&self,
|
||||
key_id: &str,
|
||||
ciphertext: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
if ciphertext.len() < 12 {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"Ciphertext too short".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let data_key = self.decrypt_data_key(key_id).await?;
|
||||
if data_key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(data_key.len()));
|
||||
}
|
||||
|
||||
let key_arr: [u8; 32] = data_key.try_into().unwrap();
|
||||
let (nonce_bytes, ct) = ciphertext.split_at(12);
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
cipher
|
||||
.decrypt(nonce, ct)
|
||||
.map_err(|_| CryptoError::DecryptionFailed(0))
|
||||
}
|
||||
|
||||
pub async fn generate_data_key(
|
||||
&self,
|
||||
key_id: &str,
|
||||
num_bytes: usize,
|
||||
) -> Result<(Vec<u8>, Vec<u8>), CryptoError> {
|
||||
let kms_key = self.decrypt_data_key(key_id).await?;
|
||||
if kms_key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(kms_key.len()));
|
||||
}
|
||||
|
||||
let mut plaintext_key = vec![0u8; num_bytes];
|
||||
rand::thread_rng().fill_bytes(&mut plaintext_key);
|
||||
|
||||
let key_arr: [u8; 32] = kms_key.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, plaintext_key.as_slice())
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut wrapped = Vec::with_capacity(12 + encrypted.len());
|
||||
wrapped.extend_from_slice(&nonce_bytes);
|
||||
wrapped.extend_from_slice(&encrypted);
|
||||
|
||||
Ok((plaintext_key, wrapped))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_or_create_master_key(keys_dir: &Path) -> Result<[u8; 32], CryptoError> {
|
||||
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
|
||||
let path = keys_dir.join("master.key");
|
||||
|
||||
if path.exists() {
|
||||
let encoded = std::fs::read_to_string(&path).map_err(CryptoError::Io)?;
|
||||
let decoded = B64.decode(encoded.trim()).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
|
||||
})?;
|
||||
if decoded.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(decoded.len()));
|
||||
}
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(&decoded);
|
||||
Ok(key)
|
||||
} else {
|
||||
let mut key = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut key);
|
||||
let encoded = B64.encode(key);
|
||||
std::fs::write(&path, &encoded).map_err(CryptoError::Io)?;
|
||||
Ok(key)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_and_list_keys() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("test key").await.unwrap();
|
||||
assert!(key.enabled);
|
||||
assert_eq!(key.description, "test key");
|
||||
assert!(key.key_id.len() > 0);
|
||||
|
||||
let keys = kms.list_keys().await;
|
||||
assert_eq!(keys.len(), 1);
|
||||
assert_eq!(keys[0].key_id, key.key_id);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enable_disable_key() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("toggle").await.unwrap();
|
||||
assert!(key.enabled);
|
||||
|
||||
kms.disable_key(&key.key_id).await.unwrap();
|
||||
let k = kms.get_key(&key.key_id).await.unwrap();
|
||||
assert!(!k.enabled);
|
||||
|
||||
kms.enable_key(&key.key_id).await.unwrap();
|
||||
let k = kms.get_key(&key.key_id).await.unwrap();
|
||||
assert!(k.enabled);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_key() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("doomed").await.unwrap();
|
||||
assert!(kms.delete_key(&key.key_id).await.unwrap());
|
||||
assert!(kms.get_key(&key.key_id).await.is_none());
|
||||
assert_eq!(kms.list_keys().await.len(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypt_decrypt_data() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("enc-key").await.unwrap();
|
||||
let plaintext = b"Hello, KMS!";
|
||||
|
||||
let ciphertext = kms.encrypt_data(&key.key_id, plaintext).await.unwrap();
|
||||
assert_ne!(&ciphertext, plaintext);
|
||||
|
||||
let decrypted = kms.decrypt_data(&key.key_id, &ciphertext).await.unwrap();
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_generate_data_key() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("data-key-gen").await.unwrap();
|
||||
let (plaintext, wrapped) = kms.generate_data_key(&key.key_id, 32).await.unwrap();
|
||||
|
||||
assert_eq!(plaintext.len(), 32);
|
||||
assert!(wrapped.len() > 32);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_disabled_key_cannot_encrypt() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("disabled").await.unwrap();
|
||||
kms.disable_key(&key.key_id).await.unwrap();
|
||||
|
||||
let result = kms.encrypt_data(&key.key_id, b"test").await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_persistence_across_reload() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let key_id = {
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
let key = kms.create_key("persistent").await.unwrap();
|
||||
key.key_id
|
||||
};
|
||||
|
||||
let kms2 = KmsService::new(dir.path()).await.unwrap();
|
||||
let key = kms2.get_key(&key_id).await;
|
||||
assert!(key.is_some());
|
||||
assert_eq!(key.unwrap().description, "persistent");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_master_key_roundtrip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let key1 = load_or_create_master_key(dir.path()).await.unwrap();
|
||||
let key2 = load_or_create_master_key(dir.path()).await.unwrap();
|
||||
assert_eq!(key1, key2);
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
pub mod aes_gcm;
|
||||
pub mod encryption;
|
||||
pub mod hashing;
|
||||
pub mod kms;
|
||||
@@ -1,56 +0,0 @@
|
||||
[package]
|
||||
name = "myfsio-server"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
myfsio-common = { path = "../myfsio-common" }
|
||||
myfsio-auth = { path = "../myfsio-auth" }
|
||||
myfsio-crypto = { path = "../myfsio-crypto" }
|
||||
myfsio-storage = { path = "../myfsio-storage" }
|
||||
myfsio-xml = { path = "../myfsio-xml" }
|
||||
base64 = { workspace = true }
|
||||
md-5 = { workspace = true }
|
||||
axum = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tower = { workspace = true }
|
||||
tower-http = { workspace = true }
|
||||
hyper = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
http-body-util = "0.1"
|
||||
percent-encoding = { workspace = true }
|
||||
quick-xml = { workspace = true }
|
||||
mime_guess = "2"
|
||||
crc32fast = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
duckdb = { workspace = true }
|
||||
roxmltree = "0.20"
|
||||
parking_lot = { workspace = true }
|
||||
regex = "1"
|
||||
multer = "3"
|
||||
reqwest = { workspace = true }
|
||||
aws-sdk-s3 = { workspace = true }
|
||||
aws-config = { workspace = true }
|
||||
aws-credential-types = { workspace = true }
|
||||
aws-smithy-types = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
rand = "0.8"
|
||||
tera = { workspace = true }
|
||||
cookie = { workspace = true }
|
||||
subtle = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
dotenvy = { workspace = true }
|
||||
sysinfo = "0.32"
|
||||
aes-gcm = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
tower = { workspace = true, features = ["util"] }
|
||||
@@ -1,227 +0,0 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServerConfig {
|
||||
pub bind_addr: SocketAddr,
|
||||
pub ui_bind_addr: SocketAddr,
|
||||
pub storage_root: PathBuf,
|
||||
pub region: String,
|
||||
pub iam_config_path: PathBuf,
|
||||
pub sigv4_timestamp_tolerance_secs: u64,
|
||||
pub presigned_url_min_expiry: u64,
|
||||
pub presigned_url_max_expiry: u64,
|
||||
pub secret_key: Option<String>,
|
||||
pub encryption_enabled: bool,
|
||||
pub kms_enabled: bool,
|
||||
pub gc_enabled: bool,
|
||||
pub integrity_enabled: bool,
|
||||
pub metrics_enabled: bool,
|
||||
pub metrics_history_enabled: bool,
|
||||
pub metrics_interval_minutes: u64,
|
||||
pub metrics_retention_hours: u64,
|
||||
pub metrics_history_interval_minutes: u64,
|
||||
pub metrics_history_retention_hours: u64,
|
||||
pub lifecycle_enabled: bool,
|
||||
pub website_hosting_enabled: bool,
|
||||
pub replication_connect_timeout_secs: u64,
|
||||
pub replication_read_timeout_secs: u64,
|
||||
pub replication_max_retries: u32,
|
||||
pub replication_streaming_threshold_bytes: u64,
|
||||
pub replication_max_failures_per_bucket: usize,
|
||||
pub site_sync_enabled: bool,
|
||||
pub site_sync_interval_secs: u64,
|
||||
pub site_sync_batch_size: usize,
|
||||
pub site_sync_connect_timeout_secs: u64,
|
||||
pub site_sync_read_timeout_secs: u64,
|
||||
pub site_sync_max_retries: u32,
|
||||
pub site_sync_clock_skew_tolerance: f64,
|
||||
pub ui_enabled: bool,
|
||||
pub templates_dir: PathBuf,
|
||||
pub static_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl ServerConfig {
|
||||
pub fn from_env() -> Self {
|
||||
let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
|
||||
let port: u16 = std::env::var("PORT")
|
||||
.unwrap_or_else(|_| "5000".to_string())
|
||||
.parse()
|
||||
.unwrap_or(5000);
|
||||
let ui_port: u16 = std::env::var("UI_PORT")
|
||||
.unwrap_or_else(|_| "5100".to_string())
|
||||
.parse()
|
||||
.unwrap_or(5100);
|
||||
let storage_root = std::env::var("STORAGE_ROOT").unwrap_or_else(|_| "./data".to_string());
|
||||
let region = std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string());
|
||||
|
||||
let storage_path = PathBuf::from(&storage_root);
|
||||
let iam_config_path = std::env::var("IAM_CONFIG")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|_| {
|
||||
storage_path
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("iam.json")
|
||||
});
|
||||
|
||||
let sigv4_timestamp_tolerance_secs: u64 =
|
||||
std::env::var("SIGV4_TIMESTAMP_TOLERANCE_SECONDS")
|
||||
.unwrap_or_else(|_| "900".to_string())
|
||||
.parse()
|
||||
.unwrap_or(900);
|
||||
|
||||
let presigned_url_min_expiry: u64 = std::env::var("PRESIGNED_URL_MIN_EXPIRY_SECONDS")
|
||||
.unwrap_or_else(|_| "1".to_string())
|
||||
.parse()
|
||||
.unwrap_or(1);
|
||||
|
||||
let presigned_url_max_expiry: u64 = std::env::var("PRESIGNED_URL_MAX_EXPIRY_SECONDS")
|
||||
.unwrap_or_else(|_| "604800".to_string())
|
||||
.parse()
|
||||
.unwrap_or(604800);
|
||||
|
||||
let secret_key = {
|
||||
let env_key = std::env::var("SECRET_KEY").ok();
|
||||
match env_key {
|
||||
Some(k) if !k.is_empty() && k != "dev-secret-key" => Some(k),
|
||||
_ => {
|
||||
let secret_file = storage_path
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join(".secret");
|
||||
std::fs::read_to_string(&secret_file)
|
||||
.ok()
|
||||
.map(|s| s.trim().to_string())
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let encryption_enabled = parse_bool_env("ENCRYPTION_ENABLED", false);
|
||||
|
||||
let kms_enabled = parse_bool_env("KMS_ENABLED", false);
|
||||
|
||||
let gc_enabled = parse_bool_env("GC_ENABLED", false);
|
||||
|
||||
let integrity_enabled = parse_bool_env("INTEGRITY_ENABLED", false);
|
||||
|
||||
let metrics_enabled = parse_bool_env("OPERATION_METRICS_ENABLED", false);
|
||||
|
||||
let metrics_history_enabled = parse_bool_env("METRICS_HISTORY_ENABLED", false);
|
||||
|
||||
let metrics_interval_minutes = parse_u64_env("OPERATION_METRICS_INTERVAL_MINUTES", 5);
|
||||
let metrics_retention_hours = parse_u64_env("OPERATION_METRICS_RETENTION_HOURS", 24);
|
||||
let metrics_history_interval_minutes = parse_u64_env("METRICS_HISTORY_INTERVAL_MINUTES", 5);
|
||||
let metrics_history_retention_hours = parse_u64_env("METRICS_HISTORY_RETENTION_HOURS", 24);
|
||||
|
||||
let lifecycle_enabled = parse_bool_env("LIFECYCLE_ENABLED", false);
|
||||
|
||||
let website_hosting_enabled = parse_bool_env("WEBSITE_HOSTING_ENABLED", false);
|
||||
|
||||
let replication_connect_timeout_secs =
|
||||
parse_u64_env("REPLICATION_CONNECT_TIMEOUT_SECONDS", 5);
|
||||
let replication_read_timeout_secs = parse_u64_env("REPLICATION_READ_TIMEOUT_SECONDS", 30);
|
||||
let replication_max_retries = parse_u64_env("REPLICATION_MAX_RETRIES", 2) as u32;
|
||||
let replication_streaming_threshold_bytes =
|
||||
parse_u64_env("REPLICATION_STREAMING_THRESHOLD_BYTES", 10_485_760);
|
||||
let replication_max_failures_per_bucket =
|
||||
parse_u64_env("REPLICATION_MAX_FAILURES_PER_BUCKET", 50) as usize;
|
||||
|
||||
let site_sync_enabled = parse_bool_env("SITE_SYNC_ENABLED", false);
|
||||
let site_sync_interval_secs = parse_u64_env("SITE_SYNC_INTERVAL_SECONDS", 60);
|
||||
let site_sync_batch_size = parse_u64_env("SITE_SYNC_BATCH_SIZE", 100) as usize;
|
||||
let site_sync_connect_timeout_secs = parse_u64_env("SITE_SYNC_CONNECT_TIMEOUT_SECONDS", 10);
|
||||
let site_sync_read_timeout_secs = parse_u64_env("SITE_SYNC_READ_TIMEOUT_SECONDS", 120);
|
||||
let site_sync_max_retries = parse_u64_env("SITE_SYNC_MAX_RETRIES", 2) as u32;
|
||||
let site_sync_clock_skew_tolerance: f64 =
|
||||
std::env::var("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(1.0);
|
||||
|
||||
let ui_enabled = parse_bool_env("UI_ENABLED", true);
|
||||
let templates_dir = std::env::var("TEMPLATES_DIR")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|_| default_templates_dir());
|
||||
let static_dir = std::env::var("STATIC_DIR")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|_| default_static_dir());
|
||||
|
||||
let host_ip: std::net::IpAddr = host.parse().unwrap();
|
||||
Self {
|
||||
bind_addr: SocketAddr::new(host_ip, port),
|
||||
ui_bind_addr: SocketAddr::new(host_ip, ui_port),
|
||||
storage_root: storage_path,
|
||||
region,
|
||||
iam_config_path,
|
||||
sigv4_timestamp_tolerance_secs,
|
||||
presigned_url_min_expiry,
|
||||
presigned_url_max_expiry,
|
||||
secret_key,
|
||||
encryption_enabled,
|
||||
kms_enabled,
|
||||
gc_enabled,
|
||||
integrity_enabled,
|
||||
metrics_enabled,
|
||||
metrics_history_enabled,
|
||||
metrics_interval_minutes,
|
||||
metrics_retention_hours,
|
||||
metrics_history_interval_minutes,
|
||||
metrics_history_retention_hours,
|
||||
lifecycle_enabled,
|
||||
website_hosting_enabled,
|
||||
replication_connect_timeout_secs,
|
||||
replication_read_timeout_secs,
|
||||
replication_max_retries,
|
||||
replication_streaming_threshold_bytes,
|
||||
replication_max_failures_per_bucket,
|
||||
site_sync_enabled,
|
||||
site_sync_interval_secs,
|
||||
site_sync_batch_size,
|
||||
site_sync_connect_timeout_secs,
|
||||
site_sync_read_timeout_secs,
|
||||
site_sync_max_retries,
|
||||
site_sync_clock_skew_tolerance,
|
||||
ui_enabled,
|
||||
templates_dir,
|
||||
static_dir,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_templates_dir() -> PathBuf {
|
||||
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
manifest_dir.join("templates")
|
||||
}
|
||||
|
||||
fn default_static_dir() -> PathBuf {
|
||||
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
for candidate in [
|
||||
manifest_dir.join("static"),
|
||||
manifest_dir.join("..").join("..").join("..").join("static"),
|
||||
] {
|
||||
if candidate.exists() {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
manifest_dir.join("static")
|
||||
}
|
||||
|
||||
fn parse_u64_env(key: &str, default: u64) -> u64 {
|
||||
std::env::var(key)
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(default)
|
||||
}
|
||||
|
||||
fn parse_bool_env(key: &str, default: bool) -> bool {
|
||||
std::env::var(key)
|
||||
.ok()
|
||||
.map(|value| {
|
||||
matches!(
|
||||
value.trim().to_ascii_lowercase().as_str(),
|
||||
"1" | "true" | "yes" | "on"
|
||||
)
|
||||
})
|
||||
.unwrap_or(default)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,184 +0,0 @@
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use bytes::{Buf, BytesMut};
|
||||
use tokio::io::{AsyncRead, ReadBuf};
|
||||
|
||||
enum State {
|
||||
ReadSize,
|
||||
ReadData(u64),
|
||||
ReadTrailer,
|
||||
Finished,
|
||||
}
|
||||
|
||||
pub struct AwsChunkedStream<S> {
|
||||
inner: S,
|
||||
buffer: BytesMut,
|
||||
state: State,
|
||||
pending: BytesMut,
|
||||
eof: bool,
|
||||
}
|
||||
|
||||
impl<S> AwsChunkedStream<S> {
|
||||
pub fn new(inner: S) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
buffer: BytesMut::with_capacity(8192),
|
||||
state: State::ReadSize,
|
||||
pending: BytesMut::new(),
|
||||
eof: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn find_crlf(&self) -> Option<usize> {
|
||||
for i in 0..self.buffer.len().saturating_sub(1) {
|
||||
if self.buffer[i] == b'\r' && self.buffer[i + 1] == b'\n' {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn parse_chunk_size(line: &[u8]) -> std::io::Result<u64> {
|
||||
let text = std::str::from_utf8(line).map_err(|_| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"invalid chunk size encoding",
|
||||
)
|
||||
})?;
|
||||
let head = text.split(';').next().unwrap_or("").trim();
|
||||
u64::from_str_radix(head, 16).map_err(|_| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("invalid chunk size: {}", head),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn try_advance(&mut self, out: &mut ReadBuf<'_>) -> std::io::Result<bool> {
|
||||
loop {
|
||||
if out.remaining() == 0 {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
if !self.pending.is_empty() {
|
||||
let take = std::cmp::min(self.pending.len(), out.remaining());
|
||||
out.put_slice(&self.pending[..take]);
|
||||
self.pending.advance(take);
|
||||
continue;
|
||||
}
|
||||
|
||||
match self.state {
|
||||
State::Finished => return Ok(true),
|
||||
State::ReadSize => {
|
||||
let idx = match self.find_crlf() {
|
||||
Some(i) => i,
|
||||
None => return Ok(false),
|
||||
};
|
||||
let line = self.buffer.split_to(idx);
|
||||
self.buffer.advance(2);
|
||||
let size = Self::parse_chunk_size(&line)?;
|
||||
if size == 0 {
|
||||
self.state = State::ReadTrailer;
|
||||
} else {
|
||||
self.state = State::ReadData(size);
|
||||
}
|
||||
}
|
||||
State::ReadData(remaining) => {
|
||||
if self.buffer.is_empty() {
|
||||
return Ok(false);
|
||||
}
|
||||
let avail = std::cmp::min(self.buffer.len() as u64, remaining) as usize;
|
||||
let take = std::cmp::min(avail, out.remaining());
|
||||
out.put_slice(&self.buffer[..take]);
|
||||
self.buffer.advance(take);
|
||||
let new_remaining = remaining - take as u64;
|
||||
if new_remaining == 0 {
|
||||
if self.buffer.len() < 2 {
|
||||
self.state = State::ReadData(0);
|
||||
return Ok(false);
|
||||
}
|
||||
if &self.buffer[..2] != b"\r\n" {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"malformed chunk terminator",
|
||||
));
|
||||
}
|
||||
self.buffer.advance(2);
|
||||
self.state = State::ReadSize;
|
||||
} else {
|
||||
self.state = State::ReadData(new_remaining);
|
||||
}
|
||||
}
|
||||
State::ReadTrailer => {
|
||||
let idx = match self.find_crlf() {
|
||||
Some(i) => i,
|
||||
None => return Ok(false),
|
||||
};
|
||||
if idx == 0 {
|
||||
self.buffer.advance(2);
|
||||
self.state = State::Finished;
|
||||
} else {
|
||||
self.buffer.advance(idx + 2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> AsyncRead for AwsChunkedStream<S>
|
||||
where
|
||||
S: AsyncRead + Unpin,
|
||||
{
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<std::io::Result<()>> {
|
||||
loop {
|
||||
let before = buf.filled().len();
|
||||
let done = match self.try_advance(buf) {
|
||||
Ok(v) => v,
|
||||
Err(e) => return Poll::Ready(Err(e)),
|
||||
};
|
||||
if buf.filled().len() > before {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
if done {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
if self.eof {
|
||||
return Poll::Ready(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"unexpected EOF in aws-chunked stream",
|
||||
)));
|
||||
}
|
||||
|
||||
let mut tmp = [0u8; 8192];
|
||||
let mut rb = ReadBuf::new(&mut tmp);
|
||||
match Pin::new(&mut self.inner).poll_read(cx, &mut rb) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
let n = rb.filled().len();
|
||||
if n == 0 {
|
||||
self.eof = true;
|
||||
continue;
|
||||
}
|
||||
self.buffer.extend_from_slice(rb.filled());
|
||||
}
|
||||
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
|
||||
Poll::Pending => return Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode_body(body: axum::body::Body) -> impl AsyncRead + Send + Unpin {
|
||||
use futures::TryStreamExt;
|
||||
let stream = tokio_util::io::StreamReader::new(
|
||||
http_body_util::BodyStream::new(body)
|
||||
.map_ok(|frame| frame.into_data().unwrap_or_default())
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)),
|
||||
);
|
||||
AwsChunkedStream::new(stream)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,541 +0,0 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use axum::body::Body;
|
||||
use axum::extract::State;
|
||||
use axum::http::StatusCode;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use base64::engine::general_purpose::STANDARD as B64;
|
||||
use base64::Engine;
|
||||
use rand::RngCore;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use crate::state::AppState;
|
||||
|
||||
fn json_ok(value: Value) -> Response {
|
||||
(
|
||||
StatusCode::OK,
|
||||
[("content-type", "application/json")],
|
||||
value.to_string(),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
fn json_err(status: StatusCode, msg: &str) -> Response {
|
||||
(
|
||||
status,
|
||||
[("content-type", "application/json")],
|
||||
json!({"error": msg}).to_string(),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
async fn read_json(body: Body) -> Result<Value, Response> {
|
||||
let body_bytes = http_body_util::BodyExt::collect(body)
|
||||
.await
|
||||
.map_err(|_| json_err(StatusCode::BAD_REQUEST, "Invalid request body"))?
|
||||
.to_bytes();
|
||||
if body_bytes.is_empty() {
|
||||
Ok(json!({}))
|
||||
} else {
|
||||
serde_json::from_slice(&body_bytes)
|
||||
.map_err(|_| json_err(StatusCode::BAD_REQUEST, "Invalid JSON"))
|
||||
}
|
||||
}
|
||||
|
||||
fn require_kms(
|
||||
state: &AppState,
|
||||
) -> Result<&std::sync::Arc<myfsio_crypto::kms::KmsService>, Response> {
|
||||
state
|
||||
.kms
|
||||
.as_ref()
|
||||
.ok_or_else(|| json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"))
|
||||
}
|
||||
|
||||
fn decode_b64(value: &str, field: &str) -> Result<Vec<u8>, Response> {
|
||||
B64.decode(value).map_err(|_| {
|
||||
json_err(
|
||||
StatusCode::BAD_REQUEST,
|
||||
&format!("Invalid base64 {}", field),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn require_str<'a>(value: &'a Value, names: &[&str], message: &str) -> Result<&'a str, Response> {
|
||||
for name in names {
|
||||
if let Some(found) = value.get(*name).and_then(|v| v.as_str()) {
|
||||
return Ok(found);
|
||||
}
|
||||
}
|
||||
Err(json_err(StatusCode::BAD_REQUEST, message))
|
||||
}
|
||||
|
||||
pub async fn list_keys(State(state): State<AppState>) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let keys = kms.list_keys().await;
|
||||
let keys_json: Vec<Value> = keys
|
||||
.iter()
|
||||
.map(|k| {
|
||||
json!({
|
||||
"KeyId": k.key_id,
|
||||
"Arn": k.arn,
|
||||
"Description": k.description,
|
||||
"CreationDate": k.creation_date.to_rfc3339(),
|
||||
"Enabled": k.enabled,
|
||||
"KeyState": k.key_state,
|
||||
"KeyUsage": k.key_usage,
|
||||
"KeySpec": k.key_spec,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
json_ok(json!({"keys": keys_json}))
|
||||
}
|
||||
|
||||
pub async fn create_key(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let description = req
|
||||
.get("Description")
|
||||
.or_else(|| req.get("description"))
|
||||
.and_then(|d| d.as_str())
|
||||
.unwrap_or("");
|
||||
|
||||
match kms.create_key(description).await {
|
||||
Ok(key) => json_ok(json!({
|
||||
"KeyId": key.key_id,
|
||||
"Arn": key.arn,
|
||||
"Description": key.description,
|
||||
"CreationDate": key.creation_date.to_rfc3339(),
|
||||
"Enabled": key.enabled,
|
||||
"KeyState": key.key_state,
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.get_key(&key_id).await {
|
||||
Some(key) => json_ok(json!({
|
||||
"KeyId": key.key_id,
|
||||
"Arn": key.arn,
|
||||
"Description": key.description,
|
||||
"CreationDate": key.creation_date.to_rfc3339(),
|
||||
"Enabled": key.enabled,
|
||||
"KeyState": key.key_state,
|
||||
"KeyUsage": key.key_usage,
|
||||
"KeySpec": key.key_spec,
|
||||
})),
|
||||
None => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.delete_key(&key_id).await {
|
||||
Ok(true) => StatusCode::NO_CONTENT.into_response(),
|
||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn enable_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.enable_key(&key_id).await {
|
||||
Ok(true) => json_ok(json!({"status": "enabled"})),
|
||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn disable_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.disable_key(&key_id).await {
|
||||
Ok(true) => json_ok(json!({"status": "disabled"})),
|
||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn encrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let plaintext_b64 = match require_str(&req, &["Plaintext", "plaintext"], "Missing Plaintext") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let plaintext = match decode_b64(plaintext_b64, "Plaintext") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.encrypt_data(key_id, &plaintext).await {
|
||||
Ok(ct) => json_ok(json!({
|
||||
"KeyId": key_id,
|
||||
"CiphertextBlob": B64.encode(&ct),
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn decrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let ciphertext_b64 = match require_str(
|
||||
&req,
|
||||
&["CiphertextBlob", "ciphertext_blob"],
|
||||
"Missing CiphertextBlob",
|
||||
) {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let ciphertext = match decode_b64(ciphertext_b64, "CiphertextBlob") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.decrypt_data(key_id, &ciphertext).await {
|
||||
Ok(pt) => json_ok(json!({
|
||||
"KeyId": key_id,
|
||||
"Plaintext": B64.encode(&pt),
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn generate_data_key(State(state): State<AppState>, body: Body) -> Response {
|
||||
generate_data_key_inner(state, body, true).await
|
||||
}
|
||||
|
||||
pub async fn generate_data_key_without_plaintext(
|
||||
State(state): State<AppState>,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
generate_data_key_inner(state, body, false).await
|
||||
}
|
||||
|
||||
async fn generate_data_key_inner(state: AppState, body: Body, include_plaintext: bool) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let num_bytes = req
|
||||
.get("NumberOfBytes")
|
||||
.and_then(|v| v.as_u64())
|
||||
.unwrap_or(32) as usize;
|
||||
|
||||
if !(1..=1024).contains(&num_bytes) {
|
||||
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
|
||||
}
|
||||
|
||||
match kms.generate_data_key(key_id, num_bytes).await {
|
||||
Ok((plaintext, wrapped)) => {
|
||||
let mut value = json!({
|
||||
"KeyId": key_id,
|
||||
"CiphertextBlob": B64.encode(&wrapped),
|
||||
});
|
||||
if include_plaintext {
|
||||
value["Plaintext"] = json!(B64.encode(&plaintext));
|
||||
}
|
||||
json_ok(value)
|
||||
}
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn re_encrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let ciphertext_b64 = match require_str(
|
||||
&req,
|
||||
&["CiphertextBlob", "ciphertext_blob"],
|
||||
"CiphertextBlob is required",
|
||||
) {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let destination_key_id = match require_str(
|
||||
&req,
|
||||
&["DestinationKeyId", "destination_key_id"],
|
||||
"DestinationKeyId is required",
|
||||
) {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let ciphertext = match decode_b64(ciphertext_b64, "CiphertextBlob") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let keys = kms.list_keys().await;
|
||||
let mut source_key_id: Option<String> = None;
|
||||
let mut plaintext: Option<Vec<u8>> = None;
|
||||
for key in keys {
|
||||
if !key.enabled {
|
||||
continue;
|
||||
}
|
||||
if let Ok(value) = kms.decrypt_data(&key.key_id, &ciphertext).await {
|
||||
source_key_id = Some(key.key_id);
|
||||
plaintext = Some(value);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let Some(source_key_id) = source_key_id else {
|
||||
return json_err(
|
||||
StatusCode::BAD_REQUEST,
|
||||
"Could not determine source key for CiphertextBlob",
|
||||
);
|
||||
};
|
||||
let plaintext = plaintext.unwrap_or_default();
|
||||
|
||||
match kms.encrypt_data(destination_key_id, &plaintext).await {
|
||||
Ok(new_ciphertext) => json_ok(json!({
|
||||
"CiphertextBlob": B64.encode(&new_ciphertext),
|
||||
"SourceKeyId": source_key_id,
|
||||
"KeyId": destination_key_id,
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn generate_random(State(state): State<AppState>, body: Body) -> Response {
|
||||
if let Err(response) = require_kms(&state) {
|
||||
return response;
|
||||
}
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let num_bytes = req
|
||||
.get("NumberOfBytes")
|
||||
.and_then(|v| v.as_u64())
|
||||
.unwrap_or(32) as usize;
|
||||
|
||||
if !(1..=1024).contains(&num_bytes) {
|
||||
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
|
||||
}
|
||||
|
||||
let mut bytes = vec![0u8; num_bytes];
|
||||
rand::thread_rng().fill_bytes(&mut bytes);
|
||||
json_ok(json!({
|
||||
"Plaintext": B64.encode(bytes),
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn client_generate_key(State(state): State<AppState>) -> Response {
|
||||
let _ = state;
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut key);
|
||||
json_ok(json!({
|
||||
"Key": B64.encode(key),
|
||||
"Algorithm": "AES-256-GCM",
|
||||
"KeySize": 32,
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn client_encrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let _ = state;
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let plaintext_b64 =
|
||||
match require_str(&req, &["Plaintext", "plaintext"], "Plaintext is required") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let key_b64 = match require_str(&req, &["Key", "key"], "Key is required") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let plaintext = match decode_b64(plaintext_b64, "Plaintext") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let key_bytes = match decode_b64(key_b64, "Key") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
if key_bytes.len() != 32 {
|
||||
return json_err(StatusCode::BAD_REQUEST, "Key must decode to 32 bytes");
|
||||
}
|
||||
|
||||
let cipher = match Aes256Gcm::new_from_slice(&key_bytes) {
|
||||
Ok(cipher) => cipher,
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid encryption key"),
|
||||
};
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
match cipher.encrypt(nonce, plaintext.as_ref()) {
|
||||
Ok(ciphertext) => json_ok(json!({
|
||||
"Ciphertext": B64.encode(ciphertext),
|
||||
"Nonce": B64.encode(nonce_bytes),
|
||||
"Algorithm": "AES-256-GCM",
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::BAD_REQUEST, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn client_decrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let _ = state;
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let ciphertext_b64 = match require_str(
|
||||
&req,
|
||||
&["Ciphertext", "ciphertext"],
|
||||
"Ciphertext is required",
|
||||
) {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let nonce_b64 = match require_str(&req, &["Nonce", "nonce"], "Nonce is required") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let key_b64 = match require_str(&req, &["Key", "key"], "Key is required") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let ciphertext = match decode_b64(ciphertext_b64, "Ciphertext") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let nonce_bytes = match decode_b64(nonce_b64, "Nonce") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let key_bytes = match decode_b64(key_b64, "Key") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
if key_bytes.len() != 32 {
|
||||
return json_err(StatusCode::BAD_REQUEST, "Key must decode to 32 bytes");
|
||||
}
|
||||
if nonce_bytes.len() != 12 {
|
||||
return json_err(StatusCode::BAD_REQUEST, "Nonce must decode to 12 bytes");
|
||||
}
|
||||
|
||||
let cipher = match Aes256Gcm::new_from_slice(&key_bytes) {
|
||||
Ok(cipher) => cipher,
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid encryption key"),
|
||||
};
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
match cipher.decrypt(nonce, ciphertext.as_ref()) {
|
||||
Ok(plaintext) => json_ok(json!({
|
||||
"Plaintext": B64.encode(plaintext),
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::BAD_REQUEST, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn materials(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let _ = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.generate_data_key(&key_id, 32).await {
|
||||
Ok((plaintext, wrapped)) => json_ok(json!({
|
||||
"PlaintextKey": B64.encode(plaintext),
|
||||
"EncryptedKey": B64.encode(wrapped),
|
||||
"KeyId": key_id,
|
||||
"Algorithm": "AES-256-GCM",
|
||||
"KeyWrapAlgorithm": "kms",
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,578 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use axum::body::Body;
|
||||
use axum::http::{HeaderMap, HeaderName, StatusCode};
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use base64::Engine;
|
||||
use bytes::Bytes;
|
||||
use crc32fast::Hasher;
|
||||
use duckdb::types::ValueRef;
|
||||
use duckdb::Connection;
|
||||
use futures::stream;
|
||||
use http_body_util::BodyExt;
|
||||
use myfsio_common::error::{S3Error, S3ErrorCode};
|
||||
use myfsio_storage::traits::StorageEngine;
|
||||
|
||||
use crate::state::AppState;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
#[link(name = "Rstrtmgr")]
|
||||
extern "system" {}
|
||||
|
||||
const CHUNK_SIZE: usize = 65_536;
|
||||
|
||||
pub async fn post_select_object_content(
|
||||
state: &AppState,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
headers: &HeaderMap,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
if let Some(resp) = require_xml_content_type(headers) {
|
||||
return resp;
|
||||
}
|
||||
|
||||
let body_bytes = match body.collect().await {
|
||||
Ok(collected) => collected.to_bytes(),
|
||||
Err(_) => {
|
||||
return s3_error_response(S3Error::new(
|
||||
S3ErrorCode::MalformedXML,
|
||||
"Unable to parse XML document",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let request = match parse_select_request(&body_bytes) {
|
||||
Ok(r) => r,
|
||||
Err(err) => return s3_error_response(err),
|
||||
};
|
||||
|
||||
let object_path = match state.storage.get_object_path(bucket, key).await {
|
||||
Ok(path) => path,
|
||||
Err(_) => {
|
||||
return s3_error_response(S3Error::new(S3ErrorCode::NoSuchKey, "Object not found"));
|
||||
}
|
||||
};
|
||||
|
||||
let join_res =
|
||||
tokio::task::spawn_blocking(move || execute_select_query(object_path, request)).await;
|
||||
let chunks = match join_res {
|
||||
Ok(Ok(chunks)) => chunks,
|
||||
Ok(Err(message)) => {
|
||||
return s3_error_response(S3Error::new(S3ErrorCode::InvalidRequest, message));
|
||||
}
|
||||
Err(_) => {
|
||||
return s3_error_response(S3Error::new(
|
||||
S3ErrorCode::InternalError,
|
||||
"SelectObjectContent execution failed",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let bytes_returned: usize = chunks.iter().map(|c| c.len()).sum();
|
||||
let mut events: Vec<Bytes> = Vec::with_capacity(chunks.len() + 2);
|
||||
for chunk in chunks {
|
||||
events.push(Bytes::from(encode_select_event("Records", &chunk)));
|
||||
}
|
||||
|
||||
let stats_payload = build_stats_xml(0, bytes_returned);
|
||||
events.push(Bytes::from(encode_select_event(
|
||||
"Stats",
|
||||
stats_payload.as_bytes(),
|
||||
)));
|
||||
events.push(Bytes::from(encode_select_event("End", b"")));
|
||||
|
||||
let stream = stream::iter(events.into_iter().map(Ok::<Bytes, std::io::Error>));
|
||||
let body = Body::from_stream(stream);
|
||||
|
||||
let mut response = (StatusCode::OK, body).into_response();
|
||||
response.headers_mut().insert(
|
||||
HeaderName::from_static("content-type"),
|
||||
"application/octet-stream".parse().unwrap(),
|
||||
);
|
||||
response.headers_mut().insert(
|
||||
HeaderName::from_static("x-amz-request-charged"),
|
||||
"requester".parse().unwrap(),
|
||||
);
|
||||
response
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct SelectRequest {
|
||||
expression: String,
|
||||
input_format: InputFormat,
|
||||
output_format: OutputFormat,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum InputFormat {
|
||||
Csv(CsvInputConfig),
|
||||
Json(JsonInputConfig),
|
||||
Parquet,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct CsvInputConfig {
|
||||
file_header_info: String,
|
||||
field_delimiter: String,
|
||||
quote_character: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct JsonInputConfig {
|
||||
json_type: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum OutputFormat {
|
||||
Csv(CsvOutputConfig),
|
||||
Json(JsonOutputConfig),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct CsvOutputConfig {
|
||||
field_delimiter: String,
|
||||
record_delimiter: String,
|
||||
quote_character: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct JsonOutputConfig {
|
||||
record_delimiter: String,
|
||||
}
|
||||
|
||||
fn parse_select_request(payload: &[u8]) -> Result<SelectRequest, S3Error> {
|
||||
let xml = String::from_utf8_lossy(payload);
|
||||
let doc = roxmltree::Document::parse(&xml)
|
||||
.map_err(|_| S3Error::new(S3ErrorCode::MalformedXML, "Unable to parse XML document"))?;
|
||||
|
||||
let root = doc.root_element();
|
||||
if root.tag_name().name() != "SelectObjectContentRequest" {
|
||||
return Err(S3Error::new(
|
||||
S3ErrorCode::MalformedXML,
|
||||
"Root element must be SelectObjectContentRequest",
|
||||
));
|
||||
}
|
||||
|
||||
let expression = child_text(&root, "Expression")
|
||||
.filter(|v| !v.is_empty())
|
||||
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "Expression is required"))?;
|
||||
|
||||
let expression_type = child_text(&root, "ExpressionType").unwrap_or_else(|| "SQL".to_string());
|
||||
if !expression_type.eq_ignore_ascii_case("SQL") {
|
||||
return Err(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"Only SQL expression type is supported",
|
||||
));
|
||||
}
|
||||
|
||||
let input_node = child(&root, "InputSerialization").ok_or_else(|| {
|
||||
S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"InputSerialization is required",
|
||||
)
|
||||
})?;
|
||||
let output_node = child(&root, "OutputSerialization").ok_or_else(|| {
|
||||
S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"OutputSerialization is required",
|
||||
)
|
||||
})?;
|
||||
|
||||
let input_format = parse_input_format(&input_node)?;
|
||||
let output_format = parse_output_format(&output_node)?;
|
||||
|
||||
Ok(SelectRequest {
|
||||
expression,
|
||||
input_format,
|
||||
output_format,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_input_format(node: &roxmltree::Node<'_, '_>) -> Result<InputFormat, S3Error> {
|
||||
if let Some(csv_node) = child(node, "CSV") {
|
||||
return Ok(InputFormat::Csv(CsvInputConfig {
|
||||
file_header_info: child_text(&csv_node, "FileHeaderInfo")
|
||||
.unwrap_or_else(|| "NONE".to_string())
|
||||
.to_ascii_uppercase(),
|
||||
field_delimiter: child_text(&csv_node, "FieldDelimiter")
|
||||
.unwrap_or_else(|| ",".to_string()),
|
||||
quote_character: child_text(&csv_node, "QuoteCharacter")
|
||||
.unwrap_or_else(|| "\"".to_string()),
|
||||
}));
|
||||
}
|
||||
|
||||
if let Some(json_node) = child(node, "JSON") {
|
||||
return Ok(InputFormat::Json(JsonInputConfig {
|
||||
json_type: child_text(&json_node, "Type")
|
||||
.unwrap_or_else(|| "DOCUMENT".to_string())
|
||||
.to_ascii_uppercase(),
|
||||
}));
|
||||
}
|
||||
|
||||
if child(node, "Parquet").is_some() {
|
||||
return Ok(InputFormat::Parquet);
|
||||
}
|
||||
|
||||
Err(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"InputSerialization must specify CSV, JSON, or Parquet",
|
||||
))
|
||||
}
|
||||
|
||||
fn parse_output_format(node: &roxmltree::Node<'_, '_>) -> Result<OutputFormat, S3Error> {
|
||||
if let Some(csv_node) = child(node, "CSV") {
|
||||
return Ok(OutputFormat::Csv(CsvOutputConfig {
|
||||
field_delimiter: child_text(&csv_node, "FieldDelimiter")
|
||||
.unwrap_or_else(|| ",".to_string()),
|
||||
record_delimiter: child_text(&csv_node, "RecordDelimiter")
|
||||
.unwrap_or_else(|| "\n".to_string()),
|
||||
quote_character: child_text(&csv_node, "QuoteCharacter")
|
||||
.unwrap_or_else(|| "\"".to_string()),
|
||||
}));
|
||||
}
|
||||
|
||||
if let Some(json_node) = child(node, "JSON") {
|
||||
return Ok(OutputFormat::Json(JsonOutputConfig {
|
||||
record_delimiter: child_text(&json_node, "RecordDelimiter")
|
||||
.unwrap_or_else(|| "\n".to_string()),
|
||||
}));
|
||||
}
|
||||
|
||||
Err(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"OutputSerialization must specify CSV or JSON",
|
||||
))
|
||||
}
|
||||
|
||||
fn child<'a, 'input>(
|
||||
node: &'a roxmltree::Node<'a, 'input>,
|
||||
name: &str,
|
||||
) -> Option<roxmltree::Node<'a, 'input>> {
|
||||
node.children()
|
||||
.find(|n| n.is_element() && n.tag_name().name() == name)
|
||||
}
|
||||
|
||||
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
|
||||
child(node, name)
|
||||
.and_then(|n| n.text())
|
||||
.map(|s| s.to_string())
|
||||
}
|
||||
|
||||
fn execute_select_query(path: PathBuf, request: SelectRequest) -> Result<Vec<Vec<u8>>, String> {
|
||||
let conn =
|
||||
Connection::open_in_memory().map_err(|e| format!("DuckDB connection error: {}", e))?;
|
||||
|
||||
load_input_table(&conn, &path, &request.input_format)?;
|
||||
|
||||
let expression = request
|
||||
.expression
|
||||
.replace("s3object", "data")
|
||||
.replace("S3Object", "data");
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(&expression)
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
let mut rows = stmt
|
||||
.query([])
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
let stmt_ref = rows
|
||||
.as_ref()
|
||||
.ok_or_else(|| "SQL execution error: statement metadata unavailable".to_string())?;
|
||||
let col_count = stmt_ref.column_count();
|
||||
let mut columns: Vec<String> = Vec::with_capacity(col_count);
|
||||
for i in 0..col_count {
|
||||
let name = stmt_ref
|
||||
.column_name(i)
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|_| format!("_{}", i));
|
||||
columns.push(name);
|
||||
}
|
||||
|
||||
match request.output_format {
|
||||
OutputFormat::Csv(cfg) => collect_csv_chunks(&mut rows, col_count, cfg),
|
||||
OutputFormat::Json(cfg) => collect_json_chunks(&mut rows, col_count, &columns, cfg),
|
||||
}
|
||||
}
|
||||
|
||||
fn load_input_table(conn: &Connection, path: &Path, input: &InputFormat) -> Result<(), String> {
|
||||
let path_str = path.to_string_lossy().replace('\\', "/");
|
||||
match input {
|
||||
InputFormat::Csv(cfg) => {
|
||||
let header = cfg.file_header_info == "USE" || cfg.file_header_info == "IGNORE";
|
||||
let delimiter = normalize_single_char(&cfg.field_delimiter, ',');
|
||||
let quote = normalize_single_char(&cfg.quote_character, '"');
|
||||
|
||||
let sql = format!(
|
||||
"CREATE TABLE data AS SELECT * FROM read_csv('{}', header={}, delim='{}', quote='{}')",
|
||||
sql_escape(&path_str),
|
||||
if header { "true" } else { "false" },
|
||||
sql_escape(&delimiter),
|
||||
sql_escape("e)
|
||||
);
|
||||
conn.execute_batch(&sql)
|
||||
.map_err(|e| format!("Failed loading CSV data: {}", e))?;
|
||||
}
|
||||
InputFormat::Json(cfg) => {
|
||||
let format = if cfg.json_type == "LINES" {
|
||||
"newline_delimited"
|
||||
} else {
|
||||
"array"
|
||||
};
|
||||
let sql = format!(
|
||||
"CREATE TABLE data AS SELECT * FROM read_json_auto('{}', format='{}')",
|
||||
sql_escape(&path_str),
|
||||
format
|
||||
);
|
||||
conn.execute_batch(&sql)
|
||||
.map_err(|e| format!("Failed loading JSON data: {}", e))?;
|
||||
}
|
||||
InputFormat::Parquet => {
|
||||
let sql = format!(
|
||||
"CREATE TABLE data AS SELECT * FROM read_parquet('{}')",
|
||||
sql_escape(&path_str)
|
||||
);
|
||||
conn.execute_batch(&sql)
|
||||
.map_err(|e| format!("Failed loading Parquet data: {}", e))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sql_escape(value: &str) -> String {
|
||||
value.replace('\'', "''")
|
||||
}
|
||||
|
||||
fn normalize_single_char(value: &str, default_char: char) -> String {
|
||||
value.chars().next().unwrap_or(default_char).to_string()
|
||||
}
|
||||
|
||||
fn collect_csv_chunks(
|
||||
rows: &mut duckdb::Rows<'_>,
|
||||
col_count: usize,
|
||||
cfg: CsvOutputConfig,
|
||||
) -> Result<Vec<Vec<u8>>, String> {
|
||||
let delimiter = cfg.field_delimiter;
|
||||
let record_delimiter = cfg.record_delimiter;
|
||||
let quote = cfg.quote_character;
|
||||
|
||||
let mut chunks: Vec<Vec<u8>> = Vec::new();
|
||||
let mut buffer = String::new();
|
||||
|
||||
while let Some(row) = rows
|
||||
.next()
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?
|
||||
{
|
||||
let mut fields: Vec<String> = Vec::with_capacity(col_count);
|
||||
for i in 0..col_count {
|
||||
let value = row
|
||||
.get_ref(i)
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
if matches!(value, ValueRef::Null) {
|
||||
fields.push(String::new());
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut text = value_ref_to_string(value);
|
||||
if text.contains(&delimiter)
|
||||
|| text.contains("e)
|
||||
|| text.contains(&record_delimiter)
|
||||
{
|
||||
text = text.replace("e, &(quote.clone() + "e));
|
||||
text = format!("{}{}{}", quote, text, quote);
|
||||
}
|
||||
fields.push(text);
|
||||
}
|
||||
buffer.push_str(&fields.join(&delimiter));
|
||||
buffer.push_str(&record_delimiter);
|
||||
|
||||
while buffer.len() >= CHUNK_SIZE {
|
||||
let rest = buffer.split_off(CHUNK_SIZE);
|
||||
chunks.push(buffer.into_bytes());
|
||||
buffer = rest;
|
||||
}
|
||||
}
|
||||
|
||||
if !buffer.is_empty() {
|
||||
chunks.push(buffer.into_bytes());
|
||||
}
|
||||
Ok(chunks)
|
||||
}
|
||||
|
||||
fn collect_json_chunks(
|
||||
rows: &mut duckdb::Rows<'_>,
|
||||
col_count: usize,
|
||||
columns: &[String],
|
||||
cfg: JsonOutputConfig,
|
||||
) -> Result<Vec<Vec<u8>>, String> {
|
||||
let record_delimiter = cfg.record_delimiter;
|
||||
let mut chunks: Vec<Vec<u8>> = Vec::new();
|
||||
let mut buffer = String::new();
|
||||
|
||||
while let Some(row) = rows
|
||||
.next()
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?
|
||||
{
|
||||
let mut record: HashMap<String, serde_json::Value> = HashMap::with_capacity(col_count);
|
||||
for i in 0..col_count {
|
||||
let value = row
|
||||
.get_ref(i)
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
let key = columns.get(i).cloned().unwrap_or_else(|| format!("_{}", i));
|
||||
record.insert(key, value_ref_to_json(value));
|
||||
}
|
||||
let line = serde_json::to_string(&record)
|
||||
.map_err(|e| format!("JSON output encoding failed: {}", e))?;
|
||||
buffer.push_str(&line);
|
||||
buffer.push_str(&record_delimiter);
|
||||
|
||||
while buffer.len() >= CHUNK_SIZE {
|
||||
let rest = buffer.split_off(CHUNK_SIZE);
|
||||
chunks.push(buffer.into_bytes());
|
||||
buffer = rest;
|
||||
}
|
||||
}
|
||||
|
||||
if !buffer.is_empty() {
|
||||
chunks.push(buffer.into_bytes());
|
||||
}
|
||||
Ok(chunks)
|
||||
}
|
||||
|
||||
fn value_ref_to_string(value: ValueRef<'_>) -> String {
|
||||
match value {
|
||||
ValueRef::Null => String::new(),
|
||||
ValueRef::Boolean(v) => v.to_string(),
|
||||
ValueRef::TinyInt(v) => v.to_string(),
|
||||
ValueRef::SmallInt(v) => v.to_string(),
|
||||
ValueRef::Int(v) => v.to_string(),
|
||||
ValueRef::BigInt(v) => v.to_string(),
|
||||
ValueRef::UTinyInt(v) => v.to_string(),
|
||||
ValueRef::USmallInt(v) => v.to_string(),
|
||||
ValueRef::UInt(v) => v.to_string(),
|
||||
ValueRef::UBigInt(v) => v.to_string(),
|
||||
ValueRef::Float(v) => v.to_string(),
|
||||
ValueRef::Double(v) => v.to_string(),
|
||||
ValueRef::Decimal(v) => v.to_string(),
|
||||
ValueRef::Text(v) => String::from_utf8_lossy(v).into_owned(),
|
||||
ValueRef::Blob(v) => base64::engine::general_purpose::STANDARD.encode(v),
|
||||
_ => format!("{:?}", value),
|
||||
}
|
||||
}
|
||||
|
||||
fn value_ref_to_json(value: ValueRef<'_>) -> serde_json::Value {
|
||||
match value {
|
||||
ValueRef::Null => serde_json::Value::Null,
|
||||
ValueRef::Boolean(v) => serde_json::Value::Bool(v),
|
||||
ValueRef::TinyInt(v) => serde_json::json!(v),
|
||||
ValueRef::SmallInt(v) => serde_json::json!(v),
|
||||
ValueRef::Int(v) => serde_json::json!(v),
|
||||
ValueRef::BigInt(v) => serde_json::json!(v),
|
||||
ValueRef::UTinyInt(v) => serde_json::json!(v),
|
||||
ValueRef::USmallInt(v) => serde_json::json!(v),
|
||||
ValueRef::UInt(v) => serde_json::json!(v),
|
||||
ValueRef::UBigInt(v) => serde_json::json!(v),
|
||||
ValueRef::Float(v) => serde_json::json!(v),
|
||||
ValueRef::Double(v) => serde_json::json!(v),
|
||||
ValueRef::Decimal(v) => serde_json::Value::String(v.to_string()),
|
||||
ValueRef::Text(v) => serde_json::Value::String(String::from_utf8_lossy(v).into_owned()),
|
||||
ValueRef::Blob(v) => {
|
||||
serde_json::Value::String(base64::engine::general_purpose::STANDARD.encode(v))
|
||||
}
|
||||
_ => serde_json::Value::String(format!("{:?}", value)),
|
||||
}
|
||||
}
|
||||
|
||||
fn require_xml_content_type(headers: &HeaderMap) -> Option<Response> {
|
||||
let value = headers
|
||||
.get("content-type")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("")
|
||||
.trim();
|
||||
if value.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let lowered = value.to_ascii_lowercase();
|
||||
if lowered.starts_with("application/xml") || lowered.starts_with("text/xml") {
|
||||
return None;
|
||||
}
|
||||
Some(s3_error_response(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"Content-Type must be application/xml or text/xml",
|
||||
)))
|
||||
}
|
||||
|
||||
fn s3_error_response(err: S3Error) -> Response {
|
||||
let status =
|
||||
StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
|
||||
let resource = if err.resource.is_empty() {
|
||||
"/".to_string()
|
||||
} else {
|
||||
err.resource.clone()
|
||||
};
|
||||
let body = err
|
||||
.with_resource(resource)
|
||||
.with_request_id(uuid::Uuid::new_v4().simple().to_string())
|
||||
.to_xml();
|
||||
(status, [("content-type", "application/xml")], body).into_response()
|
||||
}
|
||||
|
||||
fn build_stats_xml(bytes_scanned: usize, bytes_returned: usize) -> String {
|
||||
format!(
|
||||
"<Stats><BytesScanned>{}</BytesScanned><BytesProcessed>{}</BytesProcessed><BytesReturned>{}</BytesReturned></Stats>",
|
||||
bytes_scanned,
|
||||
bytes_scanned,
|
||||
bytes_returned
|
||||
)
|
||||
}
|
||||
|
||||
fn encode_select_event(event_type: &str, payload: &[u8]) -> Vec<u8> {
|
||||
let mut headers = Vec::new();
|
||||
headers.extend(encode_select_header(":event-type", event_type));
|
||||
if event_type == "Records" {
|
||||
headers.extend(encode_select_header(
|
||||
":content-type",
|
||||
"application/octet-stream",
|
||||
));
|
||||
} else if event_type == "Stats" {
|
||||
headers.extend(encode_select_header(":content-type", "text/xml"));
|
||||
}
|
||||
headers.extend(encode_select_header(":message-type", "event"));
|
||||
|
||||
let headers_len = headers.len() as u32;
|
||||
let total_len = 4 + 4 + 4 + headers.len() + payload.len() + 4;
|
||||
|
||||
let mut message = Vec::with_capacity(total_len);
|
||||
let mut prelude = Vec::with_capacity(8);
|
||||
prelude.extend((total_len as u32).to_be_bytes());
|
||||
prelude.extend(headers_len.to_be_bytes());
|
||||
|
||||
let prelude_crc = crc32(&prelude);
|
||||
message.extend(prelude);
|
||||
message.extend(prelude_crc.to_be_bytes());
|
||||
message.extend(headers);
|
||||
message.extend(payload);
|
||||
|
||||
let msg_crc = crc32(&message);
|
||||
message.extend(msg_crc.to_be_bytes());
|
||||
message
|
||||
}
|
||||
|
||||
fn encode_select_header(name: &str, value: &str) -> Vec<u8> {
|
||||
let name_bytes = name.as_bytes();
|
||||
let value_bytes = value.as_bytes();
|
||||
let mut header = Vec::with_capacity(1 + name_bytes.len() + 1 + 2 + value_bytes.len());
|
||||
header.push(name_bytes.len() as u8);
|
||||
header.extend(name_bytes);
|
||||
header.push(7);
|
||||
header.extend((value_bytes.len() as u16).to_be_bytes());
|
||||
header.extend(value_bytes);
|
||||
header
|
||||
}
|
||||
|
||||
fn crc32(data: &[u8]) -> u32 {
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(data);
|
||||
hasher.finalize()
|
||||
}
|
||||
@@ -1,210 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::error::Error as StdError;
|
||||
|
||||
use axum::extract::{Extension, Form, State};
|
||||
use axum::http::{header, HeaderMap, StatusCode};
|
||||
use axum::response::{IntoResponse, Redirect, Response};
|
||||
use tera::Context;
|
||||
|
||||
use crate::middleware::session::SessionHandle;
|
||||
use crate::session::FlashMessage;
|
||||
use crate::state::AppState;
|
||||
|
||||
pub async fn login_page(
|
||||
State(state): State<AppState>,
|
||||
Extension(session): Extension<SessionHandle>,
|
||||
) -> Response {
|
||||
if session.read(|s| s.is_authenticated()) {
|
||||
return Redirect::to("/ui/buckets").into_response();
|
||||
}
|
||||
|
||||
let mut ctx = base_context(&session, None);
|
||||
let flashed = session.write(|s| s.take_flash());
|
||||
inject_flash(&mut ctx, flashed);
|
||||
|
||||
render(&state, "login.html", &ctx)
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
pub struct LoginForm {
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
#[serde(default)]
|
||||
pub csrf_token: String,
|
||||
#[serde(default)]
|
||||
pub next: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn login_submit(
|
||||
State(state): State<AppState>,
|
||||
Extension(session): Extension<SessionHandle>,
|
||||
Form(form): Form<LoginForm>,
|
||||
) -> Response {
|
||||
let access_key = form.access_key.trim();
|
||||
let secret_key = form.secret_key.trim();
|
||||
|
||||
match state.iam.get_secret_key(access_key) {
|
||||
Some(expected) if constant_time_eq_str(&expected, secret_key) => {
|
||||
let display = state
|
||||
.iam
|
||||
.get_user(access_key)
|
||||
.await
|
||||
.and_then(|v| {
|
||||
v.get("display_name")
|
||||
.and_then(|d| d.as_str())
|
||||
.map(|s| s.to_string())
|
||||
})
|
||||
.unwrap_or_else(|| access_key.to_string());
|
||||
|
||||
session.write(|s| {
|
||||
s.user_id = Some(access_key.to_string());
|
||||
s.display_name = Some(display);
|
||||
s.rotate_csrf();
|
||||
s.push_flash("success", "Signed in successfully.");
|
||||
});
|
||||
|
||||
let next = form
|
||||
.next
|
||||
.as_deref()
|
||||
.filter(|n| n.starts_with("/ui/") || *n == "/ui")
|
||||
.unwrap_or("/ui/buckets")
|
||||
.to_string();
|
||||
Redirect::to(&next).into_response()
|
||||
}
|
||||
_ => {
|
||||
session.write(|s| {
|
||||
s.push_flash("danger", "Invalid access key or secret key.");
|
||||
});
|
||||
Redirect::to("/login").into_response()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn logout(Extension(session): Extension<SessionHandle>) -> Response {
|
||||
session.write(|s| {
|
||||
s.user_id = None;
|
||||
s.display_name = None;
|
||||
s.flash.clear();
|
||||
s.rotate_csrf();
|
||||
s.push_flash("info", "Signed out.");
|
||||
});
|
||||
Redirect::to("/login").into_response()
|
||||
}
|
||||
|
||||
pub async fn csrf_error_page(
|
||||
State(state): State<AppState>,
|
||||
Extension(session): Extension<SessionHandle>,
|
||||
) -> Response {
|
||||
let ctx = base_context(&session, None);
|
||||
let mut resp = render(&state, "csrf_error.html", &ctx);
|
||||
*resp.status_mut() = StatusCode::FORBIDDEN;
|
||||
resp
|
||||
}
|
||||
|
||||
pub async fn root_redirect() -> Response {
|
||||
Redirect::to("/ui/buckets").into_response()
|
||||
}
|
||||
|
||||
pub async fn not_found_page(
|
||||
State(state): State<AppState>,
|
||||
Extension(session): Extension<SessionHandle>,
|
||||
) -> Response {
|
||||
let ctx = base_context(&session, None);
|
||||
let mut resp = render(&state, "404.html", &ctx);
|
||||
*resp.status_mut() = StatusCode::NOT_FOUND;
|
||||
resp
|
||||
}
|
||||
|
||||
pub async fn require_login(
|
||||
Extension(session): Extension<SessionHandle>,
|
||||
req: axum::extract::Request,
|
||||
next: axum::middleware::Next,
|
||||
) -> Response {
|
||||
if session.read(|s| s.is_authenticated()) {
|
||||
return next.run(req).await;
|
||||
}
|
||||
let path = req.uri().path().to_string();
|
||||
let query = req
|
||||
.uri()
|
||||
.query()
|
||||
.map(|q| format!("?{}", q))
|
||||
.unwrap_or_default();
|
||||
let next_url = format!("{}{}", path, query);
|
||||
let encoded =
|
||||
percent_encoding::utf8_percent_encode(&next_url, percent_encoding::NON_ALPHANUMERIC)
|
||||
.to_string();
|
||||
let target = format!("/login?next={}", encoded);
|
||||
Redirect::to(&target).into_response()
|
||||
}
|
||||
|
||||
pub fn render(state: &AppState, template: &str, ctx: &Context) -> Response {
|
||||
let engine = match &state.templates {
|
||||
Some(e) => e,
|
||||
None => {
|
||||
return (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"Templates not configured",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
};
|
||||
match engine.render(template, ctx) {
|
||||
Ok(html) => {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
header::CONTENT_TYPE,
|
||||
"text/html; charset=utf-8".parse().unwrap(),
|
||||
);
|
||||
(StatusCode::OK, headers, html).into_response()
|
||||
}
|
||||
Err(e) => {
|
||||
let mut detail = format!("{}", e);
|
||||
let mut src = StdError::source(&e);
|
||||
while let Some(s) = src {
|
||||
detail.push_str(" | ");
|
||||
detail.push_str(&s.to_string());
|
||||
src = s.source();
|
||||
}
|
||||
tracing::error!("Template render failed ({}): {}", template, detail);
|
||||
let fallback_ctx = Context::new();
|
||||
let body = if template != "500.html" {
|
||||
engine
|
||||
.render("500.html", &fallback_ctx)
|
||||
.unwrap_or_else(|_| "Internal Server Error".to_string())
|
||||
} else {
|
||||
"Internal Server Error".to_string()
|
||||
};
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
header::CONTENT_TYPE,
|
||||
"text/html; charset=utf-8".parse().unwrap(),
|
||||
);
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, headers, body).into_response()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn base_context(session: &SessionHandle, endpoint: Option<&str>) -> Context {
|
||||
let mut ctx = Context::new();
|
||||
let snapshot = session.snapshot();
|
||||
ctx.insert("csrf_token_value", &snapshot.csrf_token);
|
||||
ctx.insert("is_authenticated", &snapshot.user_id.is_some());
|
||||
ctx.insert("current_user", &snapshot.user_id);
|
||||
ctx.insert("current_user_display_name", &snapshot.display_name);
|
||||
ctx.insert("current_endpoint", &endpoint.unwrap_or(""));
|
||||
ctx.insert("request_args", &HashMap::<String, String>::new());
|
||||
ctx.insert("null", &serde_json::Value::Null);
|
||||
ctx.insert("none", &serde_json::Value::Null);
|
||||
ctx
|
||||
}
|
||||
|
||||
pub fn inject_flash(ctx: &mut Context, flashed: Vec<FlashMessage>) {
|
||||
ctx.insert("flashed_messages", &flashed);
|
||||
}
|
||||
|
||||
fn constant_time_eq_str(a: &str, b: &str) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
subtle::ConstantTimeEq::ct_eq(a.as_bytes(), b.as_bytes()).into()
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,559 +0,0 @@
|
||||
pub mod config;
|
||||
pub mod handlers;
|
||||
pub mod middleware;
|
||||
pub mod services;
|
||||
pub mod session;
|
||||
pub mod state;
|
||||
pub mod stores;
|
||||
pub mod templates;
|
||||
|
||||
use axum::Router;
|
||||
|
||||
pub const SERVER_HEADER: &str = concat!("MyFSIO-Rust/", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
pub fn create_ui_router(state: state::AppState) -> Router {
|
||||
use axum::routing::{delete, get, post, put};
|
||||
use handlers::ui;
|
||||
use handlers::ui_api;
|
||||
use handlers::ui_pages;
|
||||
|
||||
let protected = Router::new()
|
||||
.route("/", get(ui::root_redirect))
|
||||
.route("/ui", get(ui::root_redirect))
|
||||
.route("/ui/", get(ui::root_redirect))
|
||||
.route(
|
||||
"/ui/buckets",
|
||||
get(ui_pages::buckets_overview).post(ui_pages::create_bucket),
|
||||
)
|
||||
.route("/ui/buckets/create", post(ui_pages::create_bucket))
|
||||
.route("/ui/buckets/{bucket_name}", get(ui_pages::bucket_detail))
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/delete",
|
||||
post(ui_pages::delete_bucket),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/versioning",
|
||||
post(ui_pages::update_bucket_versioning),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/quota",
|
||||
post(ui_pages::update_bucket_quota),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/encryption",
|
||||
post(ui_pages::update_bucket_encryption),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/policy",
|
||||
post(ui_pages::update_bucket_policy),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication",
|
||||
post(ui_pages::update_bucket_replication),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/website",
|
||||
post(ui_pages::update_bucket_website),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/upload",
|
||||
post(ui_api::upload_object),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/initiate",
|
||||
post(ui_api::initiate_multipart_upload),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/part",
|
||||
put(ui_api::upload_multipart_part),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/parts",
|
||||
put(ui_api::upload_multipart_part),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/complete",
|
||||
post(ui_api::complete_multipart_upload),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/abort",
|
||||
delete(ui_api::abort_multipart_upload),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}",
|
||||
delete(ui_api::abort_multipart_upload),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/objects",
|
||||
get(ui_api::list_bucket_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/objects/stream",
|
||||
get(ui_api::stream_bucket_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/folders",
|
||||
get(ui_api::list_bucket_folders),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/copy-targets",
|
||||
get(ui_api::list_copy_targets),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/list-for-copy",
|
||||
get(ui_api::list_copy_targets),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/objects/bulk-delete",
|
||||
post(ui_api::bulk_delete_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/objects/bulk-download",
|
||||
post(ui_api::bulk_download_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/objects/{*rest}",
|
||||
get(ui_api::object_get_dispatch).post(ui_api::object_post_dispatch),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/acl",
|
||||
get(ui_api::bucket_acl).post(ui_api::update_bucket_acl),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/cors",
|
||||
get(ui_api::bucket_cors).post(ui_api::update_bucket_cors),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/lifecycle",
|
||||
get(ui_api::bucket_lifecycle).post(ui_api::update_bucket_lifecycle),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/lifecycle/history",
|
||||
get(ui_api::lifecycle_history),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/status",
|
||||
get(ui_api::replication_status),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures",
|
||||
get(ui_api::replication_failures).delete(ui_api::clear_replication_failures),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures/retry",
|
||||
post(ui_api::retry_replication_failure),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures/retry-all",
|
||||
post(ui_api::retry_all_replication_failures),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures/dismiss",
|
||||
delete(ui_api::dismiss_replication_failure),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures/clear",
|
||||
delete(ui_api::clear_replication_failures),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures/{*rest}",
|
||||
post(ui_api::retry_replication_failure_path)
|
||||
.delete(ui_api::dismiss_replication_failure_path),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/bulk-delete",
|
||||
post(ui_api::bulk_delete_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/bulk-download",
|
||||
post(ui_api::bulk_download_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/archived",
|
||||
get(ui_api::archived_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/archived/{*rest}",
|
||||
post(ui_api::archived_post_dispatch),
|
||||
)
|
||||
.route("/ui/iam", get(ui_pages::iam_dashboard))
|
||||
.route("/ui/iam/users", post(ui_pages::create_iam_user))
|
||||
.route("/ui/iam/users/{user_id}", post(ui_pages::update_iam_user))
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/delete",
|
||||
post(ui_pages::delete_iam_user),
|
||||
)
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/update",
|
||||
post(ui_pages::update_iam_user),
|
||||
)
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/policies",
|
||||
post(ui_pages::update_iam_policies),
|
||||
)
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/expiry",
|
||||
post(ui_pages::update_iam_expiry),
|
||||
)
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/rotate-secret",
|
||||
post(ui_pages::rotate_iam_secret),
|
||||
)
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/rotate",
|
||||
post(ui_pages::rotate_iam_secret),
|
||||
)
|
||||
.route("/ui/connections/create", post(ui_pages::create_connection))
|
||||
.route("/ui/connections/test", post(ui_api::test_connection))
|
||||
.route(
|
||||
"/ui/connections/{connection_id}",
|
||||
post(ui_pages::update_connection),
|
||||
)
|
||||
.route(
|
||||
"/ui/connections/{connection_id}/update",
|
||||
post(ui_pages::update_connection),
|
||||
)
|
||||
.route(
|
||||
"/ui/connections/{connection_id}/delete",
|
||||
post(ui_pages::delete_connection),
|
||||
)
|
||||
.route(
|
||||
"/ui/connections/{connection_id}/health",
|
||||
get(ui_api::connection_health),
|
||||
)
|
||||
.route("/ui/sites", get(ui_pages::sites_dashboard))
|
||||
.route("/ui/sites/local", post(ui_pages::update_local_site))
|
||||
.route("/ui/sites/peers", post(ui_pages::add_peer_site))
|
||||
.route(
|
||||
"/ui/sites/peers/{site_id}/update",
|
||||
post(ui_pages::update_peer_site),
|
||||
)
|
||||
.route(
|
||||
"/ui/sites/peers/{site_id}/delete",
|
||||
post(ui_pages::delete_peer_site),
|
||||
)
|
||||
.route("/ui/sites/peers/{site_id}/health", get(ui_api::peer_health))
|
||||
.route(
|
||||
"/ui/sites/peers/{site_id}/sync-stats",
|
||||
get(ui_api::peer_sync_stats),
|
||||
)
|
||||
.route(
|
||||
"/ui/sites/peers/{site_id}/bidirectional-status",
|
||||
get(ui_api::peer_bidirectional_status),
|
||||
)
|
||||
.route(
|
||||
"/ui/connections",
|
||||
get(ui_pages::connections_dashboard).post(ui_pages::create_connection),
|
||||
)
|
||||
.route("/ui/metrics", get(ui_pages::metrics_dashboard))
|
||||
.route(
|
||||
"/ui/metrics/settings",
|
||||
get(ui_api::metrics_settings).put(ui_api::update_metrics_settings),
|
||||
)
|
||||
.route("/ui/metrics/api", get(ui_api::metrics_api))
|
||||
.route("/ui/metrics/history", get(ui_api::metrics_history))
|
||||
.route("/ui/metrics/operations", get(ui_api::metrics_operations))
|
||||
.route(
|
||||
"/ui/metrics/operations/history",
|
||||
get(ui_api::metrics_operations_history),
|
||||
)
|
||||
.route("/ui/system", get(ui_pages::system_dashboard))
|
||||
.route("/ui/system/gc/status", get(ui_api::gc_status_ui))
|
||||
.route("/ui/system/gc/run", post(ui_api::gc_run_ui))
|
||||
.route("/ui/system/gc/history", get(ui_api::gc_history_ui))
|
||||
.route(
|
||||
"/ui/system/integrity/status",
|
||||
get(ui_api::integrity_status_ui),
|
||||
)
|
||||
.route("/ui/system/integrity/run", post(ui_api::integrity_run_ui))
|
||||
.route(
|
||||
"/ui/system/integrity/history",
|
||||
get(ui_api::integrity_history_ui),
|
||||
)
|
||||
.route(
|
||||
"/ui/website-domains",
|
||||
get(ui_pages::website_domains_dashboard),
|
||||
)
|
||||
.route(
|
||||
"/ui/website-domains/create",
|
||||
post(ui_pages::create_website_domain),
|
||||
)
|
||||
.route(
|
||||
"/ui/website-domains/{domain}",
|
||||
post(ui_pages::update_website_domain),
|
||||
)
|
||||
.route(
|
||||
"/ui/website-domains/{domain}/update",
|
||||
post(ui_pages::update_website_domain),
|
||||
)
|
||||
.route(
|
||||
"/ui/website-domains/{domain}/delete",
|
||||
post(ui_pages::delete_website_domain),
|
||||
)
|
||||
.route("/ui/replication/new", get(ui_pages::replication_wizard))
|
||||
.route(
|
||||
"/ui/replication/create",
|
||||
post(ui_pages::create_peer_replication_rules_from_query),
|
||||
)
|
||||
.route(
|
||||
"/ui/sites/peers/{site_id}/replication-rules",
|
||||
post(ui_pages::create_peer_replication_rules),
|
||||
)
|
||||
.route("/ui/docs", get(ui_pages::docs_page))
|
||||
.layer(axum::middleware::from_fn(ui::require_login));
|
||||
|
||||
let public = Router::new()
|
||||
.route("/login", get(ui::login_page).post(ui::login_submit))
|
||||
.route("/logout", post(ui::logout).get(ui::logout))
|
||||
.route("/csrf-error", get(ui::csrf_error_page));
|
||||
|
||||
let session_state = middleware::SessionLayerState {
|
||||
store: state.sessions.clone(),
|
||||
secure: false,
|
||||
};
|
||||
|
||||
let static_service = tower_http::services::ServeDir::new(&state.config.static_dir);
|
||||
|
||||
protected
|
||||
.merge(public)
|
||||
.fallback(ui::not_found_page)
|
||||
.layer(axum::middleware::from_fn(middleware::csrf_layer))
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
session_state,
|
||||
middleware::session_layer,
|
||||
))
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
middleware::ui_metrics_layer,
|
||||
))
|
||||
.with_state(state)
|
||||
.nest_service("/static", static_service)
|
||||
.layer(axum::middleware::from_fn(middleware::server_header))
|
||||
.layer(tower_http::compression::CompressionLayer::new())
|
||||
}
|
||||
|
||||
pub fn create_router(state: state::AppState) -> Router {
|
||||
let mut router = Router::new()
|
||||
.route("/myfsio/health", axum::routing::get(handlers::health_check))
|
||||
.route("/", axum::routing::get(handlers::list_buckets))
|
||||
.route(
|
||||
"/{bucket}",
|
||||
axum::routing::put(handlers::create_bucket)
|
||||
.get(handlers::get_bucket)
|
||||
.delete(handlers::delete_bucket)
|
||||
.head(handlers::head_bucket)
|
||||
.post(handlers::post_bucket),
|
||||
)
|
||||
.route(
|
||||
"/{bucket}/",
|
||||
axum::routing::put(handlers::create_bucket)
|
||||
.get(handlers::get_bucket)
|
||||
.delete(handlers::delete_bucket)
|
||||
.head(handlers::head_bucket)
|
||||
.post(handlers::post_bucket),
|
||||
)
|
||||
.route(
|
||||
"/{bucket}/{*key}",
|
||||
axum::routing::put(handlers::put_object)
|
||||
.get(handlers::get_object)
|
||||
.delete(handlers::delete_object)
|
||||
.head(handlers::head_object)
|
||||
.post(handlers::post_object),
|
||||
);
|
||||
|
||||
if state.config.kms_enabled {
|
||||
router = router
|
||||
.route(
|
||||
"/kms/keys",
|
||||
axum::routing::get(handlers::kms::list_keys).post(handlers::kms::create_key),
|
||||
)
|
||||
.route(
|
||||
"/kms/keys/{key_id}",
|
||||
axum::routing::get(handlers::kms::get_key).delete(handlers::kms::delete_key),
|
||||
)
|
||||
.route(
|
||||
"/kms/keys/{key_id}/enable",
|
||||
axum::routing::post(handlers::kms::enable_key),
|
||||
)
|
||||
.route(
|
||||
"/kms/keys/{key_id}/disable",
|
||||
axum::routing::post(handlers::kms::disable_key),
|
||||
)
|
||||
.route("/kms/encrypt", axum::routing::post(handlers::kms::encrypt))
|
||||
.route("/kms/decrypt", axum::routing::post(handlers::kms::decrypt))
|
||||
.route(
|
||||
"/kms/generate-data-key",
|
||||
axum::routing::post(handlers::kms::generate_data_key),
|
||||
)
|
||||
.route(
|
||||
"/kms/generate-data-key-without-plaintext",
|
||||
axum::routing::post(handlers::kms::generate_data_key_without_plaintext),
|
||||
)
|
||||
.route(
|
||||
"/kms/re-encrypt",
|
||||
axum::routing::post(handlers::kms::re_encrypt),
|
||||
)
|
||||
.route(
|
||||
"/kms/generate-random",
|
||||
axum::routing::post(handlers::kms::generate_random),
|
||||
)
|
||||
.route(
|
||||
"/kms/client/generate-key",
|
||||
axum::routing::post(handlers::kms::client_generate_key),
|
||||
)
|
||||
.route(
|
||||
"/kms/client/encrypt",
|
||||
axum::routing::post(handlers::kms::client_encrypt),
|
||||
)
|
||||
.route(
|
||||
"/kms/client/decrypt",
|
||||
axum::routing::post(handlers::kms::client_decrypt),
|
||||
)
|
||||
.route(
|
||||
"/kms/materials/{key_id}",
|
||||
axum::routing::post(handlers::kms::materials),
|
||||
);
|
||||
}
|
||||
|
||||
router = router
|
||||
.route(
|
||||
"/admin/site",
|
||||
axum::routing::get(handlers::admin::get_local_site)
|
||||
.put(handlers::admin::update_local_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/sites",
|
||||
axum::routing::get(handlers::admin::list_all_sites)
|
||||
.post(handlers::admin::register_peer_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/sites/{site_id}",
|
||||
axum::routing::get(handlers::admin::get_peer_site)
|
||||
.put(handlers::admin::update_peer_site)
|
||||
.delete(handlers::admin::delete_peer_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/sites/{site_id}/health",
|
||||
axum::routing::get(handlers::admin::check_peer_health)
|
||||
.post(handlers::admin::check_peer_health),
|
||||
)
|
||||
.route(
|
||||
"/admin/sites/{site_id}/bidirectional-status",
|
||||
axum::routing::get(handlers::admin::check_bidirectional_status),
|
||||
)
|
||||
.route(
|
||||
"/admin/topology",
|
||||
axum::routing::get(handlers::admin::get_topology),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/local",
|
||||
axum::routing::get(handlers::admin::get_local_site)
|
||||
.put(handlers::admin::update_local_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/all",
|
||||
axum::routing::get(handlers::admin::list_all_sites),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/peers",
|
||||
axum::routing::post(handlers::admin::register_peer_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/peers/{site_id}",
|
||||
axum::routing::get(handlers::admin::get_peer_site)
|
||||
.put(handlers::admin::update_peer_site)
|
||||
.delete(handlers::admin::delete_peer_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/peers/{site_id}/health",
|
||||
axum::routing::post(handlers::admin::check_peer_health),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/topology",
|
||||
axum::routing::get(handlers::admin::get_topology),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/peers/{site_id}/bidirectional-status",
|
||||
axum::routing::get(handlers::admin::check_bidirectional_status),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users",
|
||||
axum::routing::get(handlers::admin::iam_list_users),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}",
|
||||
axum::routing::get(handlers::admin::iam_get_user),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/policies",
|
||||
axum::routing::get(handlers::admin::iam_get_user_policies),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/access-keys",
|
||||
axum::routing::post(handlers::admin::iam_create_access_key),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/keys",
|
||||
axum::routing::post(handlers::admin::iam_create_access_key),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/access-keys/{access_key}",
|
||||
axum::routing::delete(handlers::admin::iam_delete_access_key),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/keys/{access_key}",
|
||||
axum::routing::delete(handlers::admin::iam_delete_access_key),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/disable",
|
||||
axum::routing::post(handlers::admin::iam_disable_user),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/enable",
|
||||
axum::routing::post(handlers::admin::iam_enable_user),
|
||||
)
|
||||
.route(
|
||||
"/admin/website-domains",
|
||||
axum::routing::get(handlers::admin::list_website_domains)
|
||||
.post(handlers::admin::create_website_domain),
|
||||
)
|
||||
.route(
|
||||
"/admin/website-domains/{domain}",
|
||||
axum::routing::get(handlers::admin::get_website_domain)
|
||||
.put(handlers::admin::update_website_domain)
|
||||
.delete(handlers::admin::delete_website_domain),
|
||||
)
|
||||
.route(
|
||||
"/admin/gc/status",
|
||||
axum::routing::get(handlers::admin::gc_status),
|
||||
)
|
||||
.route(
|
||||
"/admin/gc/run",
|
||||
axum::routing::post(handlers::admin::gc_run),
|
||||
)
|
||||
.route(
|
||||
"/admin/gc/history",
|
||||
axum::routing::get(handlers::admin::gc_history),
|
||||
)
|
||||
.route(
|
||||
"/admin/integrity/status",
|
||||
axum::routing::get(handlers::admin::integrity_status),
|
||||
)
|
||||
.route(
|
||||
"/admin/integrity/run",
|
||||
axum::routing::post(handlers::admin::integrity_run),
|
||||
)
|
||||
.route(
|
||||
"/admin/integrity/history",
|
||||
axum::routing::get(handlers::admin::integrity_history),
|
||||
);
|
||||
|
||||
router
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
middleware::auth_layer,
|
||||
))
|
||||
.layer(axum::middleware::from_fn(middleware::server_header))
|
||||
.layer(tower_http::compression::CompressionLayer::new())
|
||||
.with_state(state)
|
||||
}
|
||||
@@ -1,426 +0,0 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use myfsio_server::config::ServerConfig;
|
||||
use myfsio_server::state::AppState;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(
|
||||
name = "myfsio",
|
||||
version,
|
||||
about = "MyFSIO S3-compatible storage engine"
|
||||
)]
|
||||
struct Cli {
|
||||
#[arg(long, help = "Validate configuration and exit")]
|
||||
check_config: bool,
|
||||
#[arg(long, help = "Show configuration summary and exit")]
|
||||
show_config: bool,
|
||||
#[arg(long, help = "Reset admin credentials and exit")]
|
||||
reset_cred: bool,
|
||||
#[command(subcommand)]
|
||||
command: Option<Command>,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum Command {
|
||||
Serve,
|
||||
Version,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
load_env_files();
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let cli = Cli::parse();
|
||||
let config = ServerConfig::from_env();
|
||||
|
||||
if cli.reset_cred {
|
||||
reset_admin_credentials(&config);
|
||||
return;
|
||||
}
|
||||
if cli.check_config || cli.show_config {
|
||||
print_config_summary(&config);
|
||||
if cli.check_config {
|
||||
let issues = validate_config(&config);
|
||||
for issue in &issues {
|
||||
println!("{issue}");
|
||||
}
|
||||
if issues.iter().any(|issue| issue.starts_with("CRITICAL:")) {
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
match cli.command.unwrap_or(Command::Serve) {
|
||||
Command::Version => {
|
||||
println!("myfsio {}", env!("CARGO_PKG_VERSION"));
|
||||
return;
|
||||
}
|
||||
Command::Serve => {}
|
||||
}
|
||||
|
||||
ensure_iam_bootstrap(&config);
|
||||
let bind_addr = config.bind_addr;
|
||||
let ui_bind_addr = config.ui_bind_addr;
|
||||
|
||||
tracing::info!("MyFSIO Rust Engine starting — API on {}", bind_addr);
|
||||
if config.ui_enabled {
|
||||
tracing::info!("UI will bind on {}", ui_bind_addr);
|
||||
}
|
||||
tracing::info!("Storage root: {}", config.storage_root.display());
|
||||
tracing::info!("Region: {}", config.region);
|
||||
tracing::info!(
|
||||
"Encryption: {}, KMS: {}, GC: {}, Lifecycle: {}, Integrity: {}, Metrics History: {}, Operation Metrics: {}, UI: {}",
|
||||
config.encryption_enabled,
|
||||
config.kms_enabled,
|
||||
config.gc_enabled,
|
||||
config.lifecycle_enabled,
|
||||
config.integrity_enabled,
|
||||
config.metrics_history_enabled,
|
||||
config.metrics_enabled,
|
||||
config.ui_enabled
|
||||
);
|
||||
|
||||
let state = if config.encryption_enabled || config.kms_enabled {
|
||||
AppState::new_with_encryption(config.clone()).await
|
||||
} else {
|
||||
AppState::new(config.clone())
|
||||
};
|
||||
|
||||
let mut bg_handles: Vec<tokio::task::JoinHandle<()>> = Vec::new();
|
||||
|
||||
if let Some(ref gc) = state.gc {
|
||||
bg_handles.push(gc.clone().start_background());
|
||||
tracing::info!("GC background service started");
|
||||
}
|
||||
|
||||
if let Some(ref integrity) = state.integrity {
|
||||
bg_handles.push(integrity.clone().start_background());
|
||||
tracing::info!("Integrity checker background service started");
|
||||
}
|
||||
|
||||
if let Some(ref metrics) = state.metrics {
|
||||
bg_handles.push(metrics.clone().start_background());
|
||||
tracing::info!("Metrics collector background service started");
|
||||
}
|
||||
|
||||
if let Some(ref system_metrics) = state.system_metrics {
|
||||
bg_handles.push(system_metrics.clone().start_background());
|
||||
tracing::info!("System metrics history collector started");
|
||||
}
|
||||
|
||||
if config.lifecycle_enabled {
|
||||
let lifecycle =
|
||||
std::sync::Arc::new(myfsio_server::services::lifecycle::LifecycleService::new(
|
||||
state.storage.clone(),
|
||||
config.storage_root.clone(),
|
||||
myfsio_server::services::lifecycle::LifecycleConfig::default(),
|
||||
));
|
||||
bg_handles.push(lifecycle.start_background());
|
||||
tracing::info!("Lifecycle manager background service started");
|
||||
}
|
||||
|
||||
if let Some(ref site_sync) = state.site_sync {
|
||||
let worker = site_sync.clone();
|
||||
bg_handles.push(tokio::spawn(async move {
|
||||
worker.run().await;
|
||||
}));
|
||||
tracing::info!("Site sync worker started");
|
||||
}
|
||||
|
||||
let ui_enabled = config.ui_enabled;
|
||||
let api_app = myfsio_server::create_router(state.clone());
|
||||
let ui_app = if ui_enabled {
|
||||
Some(myfsio_server::create_ui_router(state.clone()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let api_listener = match tokio::net::TcpListener::bind(bind_addr).await {
|
||||
Ok(listener) => listener,
|
||||
Err(err) => {
|
||||
if err.kind() == std::io::ErrorKind::AddrInUse {
|
||||
tracing::error!("API port already in use: {}", bind_addr);
|
||||
} else {
|
||||
tracing::error!("Failed to bind API {}: {}", bind_addr, err);
|
||||
}
|
||||
for handle in bg_handles {
|
||||
handle.abort();
|
||||
}
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
tracing::info!("API listening on {}", bind_addr);
|
||||
|
||||
let ui_listener = if let Some(ref app) = ui_app {
|
||||
let _ = app;
|
||||
match tokio::net::TcpListener::bind(ui_bind_addr).await {
|
||||
Ok(listener) => {
|
||||
tracing::info!("UI listening on {}", ui_bind_addr);
|
||||
Some(listener)
|
||||
}
|
||||
Err(err) => {
|
||||
if err.kind() == std::io::ErrorKind::AddrInUse {
|
||||
tracing::error!("UI port already in use: {}", ui_bind_addr);
|
||||
} else {
|
||||
tracing::error!("Failed to bind UI {}: {}", ui_bind_addr, err);
|
||||
}
|
||||
for handle in bg_handles {
|
||||
handle.abort();
|
||||
}
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let shutdown = shutdown_signal_shared();
|
||||
let api_shutdown = shutdown.clone();
|
||||
let api_task = tokio::spawn(async move {
|
||||
axum::serve(api_listener, api_app)
|
||||
.with_graceful_shutdown(async move {
|
||||
api_shutdown.notified().await;
|
||||
})
|
||||
.await
|
||||
});
|
||||
|
||||
let ui_task = if let (Some(listener), Some(app)) = (ui_listener, ui_app) {
|
||||
let ui_shutdown = shutdown.clone();
|
||||
Some(tokio::spawn(async move {
|
||||
axum::serve(listener, app)
|
||||
.with_graceful_shutdown(async move {
|
||||
ui_shutdown.notified().await;
|
||||
})
|
||||
.await
|
||||
}))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
tokio::signal::ctrl_c()
|
||||
.await
|
||||
.expect("Failed to listen for Ctrl+C");
|
||||
tracing::info!("Shutdown signal received");
|
||||
shutdown.notify_waiters();
|
||||
|
||||
if let Err(err) = api_task.await.unwrap_or(Ok(())) {
|
||||
tracing::error!("API server exited with error: {}", err);
|
||||
}
|
||||
if let Some(task) = ui_task {
|
||||
if let Err(err) = task.await.unwrap_or(Ok(())) {
|
||||
tracing::error!("UI server exited with error: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
for handle in bg_handles {
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
|
||||
fn print_config_summary(config: &ServerConfig) {
|
||||
println!("MyFSIO Rust Configuration");
|
||||
println!("Version: {}", env!("CARGO_PKG_VERSION"));
|
||||
println!("API bind: {}", config.bind_addr);
|
||||
println!("UI bind: {}", config.ui_bind_addr);
|
||||
println!("UI enabled: {}", config.ui_enabled);
|
||||
println!("Storage root: {}", config.storage_root.display());
|
||||
println!("IAM config: {}", config.iam_config_path.display());
|
||||
println!("Region: {}", config.region);
|
||||
println!("Encryption enabled: {}", config.encryption_enabled);
|
||||
println!("KMS enabled: {}", config.kms_enabled);
|
||||
println!("GC enabled: {}", config.gc_enabled);
|
||||
println!("Integrity enabled: {}", config.integrity_enabled);
|
||||
println!("Lifecycle enabled: {}", config.lifecycle_enabled);
|
||||
println!(
|
||||
"Website hosting enabled: {}",
|
||||
config.website_hosting_enabled
|
||||
);
|
||||
println!("Site sync enabled: {}", config.site_sync_enabled);
|
||||
println!(
|
||||
"Metrics history enabled: {}",
|
||||
config.metrics_history_enabled
|
||||
);
|
||||
println!("Operation metrics enabled: {}", config.metrics_enabled);
|
||||
}
|
||||
|
||||
fn validate_config(config: &ServerConfig) -> Vec<String> {
|
||||
let mut issues = Vec::new();
|
||||
|
||||
if config.ui_enabled && config.bind_addr == config.ui_bind_addr {
|
||||
issues.push(
|
||||
"CRITICAL: API and UI bind addresses cannot be identical when UI is enabled."
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
if config.presigned_url_min_expiry > config.presigned_url_max_expiry {
|
||||
issues.push("CRITICAL: PRESIGNED_URL_MIN_EXPIRY_SECONDS cannot exceed PRESIGNED_URL_MAX_EXPIRY_SECONDS.".to_string());
|
||||
}
|
||||
if let Err(err) = std::fs::create_dir_all(&config.storage_root) {
|
||||
issues.push(format!(
|
||||
"CRITICAL: Cannot create storage root {}: {}",
|
||||
config.storage_root.display(),
|
||||
err
|
||||
));
|
||||
}
|
||||
if let Some(parent) = config.iam_config_path.parent() {
|
||||
if let Err(err) = std::fs::create_dir_all(parent) {
|
||||
issues.push(format!(
|
||||
"CRITICAL: Cannot create IAM config directory {}: {}",
|
||||
parent.display(),
|
||||
err
|
||||
));
|
||||
}
|
||||
}
|
||||
if config.encryption_enabled && config.secret_key.is_none() {
|
||||
issues.push(
|
||||
"WARNING: ENCRYPTION_ENABLED=true but SECRET_KEY is not configured; secure-at-rest config encryption is unavailable.".to_string(),
|
||||
);
|
||||
}
|
||||
if config.site_sync_enabled && !config.website_hosting_enabled {
|
||||
issues.push(
|
||||
"INFO: SITE_SYNC_ENABLED=true without WEBSITE_HOSTING_ENABLED; this is valid but unrelated.".to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
issues
|
||||
}
|
||||
|
||||
fn shutdown_signal_shared() -> std::sync::Arc<tokio::sync::Notify> {
|
||||
std::sync::Arc::new(tokio::sync::Notify::new())
|
||||
}
|
||||
|
||||
fn load_env_files() {
|
||||
let cwd = std::env::current_dir().ok();
|
||||
let mut candidates: Vec<std::path::PathBuf> = Vec::new();
|
||||
candidates.push(std::path::PathBuf::from("/opt/myfsio/myfsio.env"));
|
||||
if let Some(ref dir) = cwd {
|
||||
candidates.push(dir.join(".env"));
|
||||
candidates.push(dir.join("myfsio.env"));
|
||||
for ancestor in dir.ancestors().skip(1).take(4) {
|
||||
candidates.push(ancestor.join(".env"));
|
||||
candidates.push(ancestor.join("myfsio.env"));
|
||||
}
|
||||
}
|
||||
|
||||
let mut seen = std::collections::HashSet::new();
|
||||
for path in candidates {
|
||||
if !seen.insert(path.clone()) {
|
||||
continue;
|
||||
}
|
||||
if path.is_file() {
|
||||
match dotenvy::from_path_override(&path) {
|
||||
Ok(()) => eprintln!("Loaded env file: {}", path.display()),
|
||||
Err(e) => eprintln!("Failed to load env file {}: {}", path.display(), e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_iam_bootstrap(config: &ServerConfig) {
|
||||
let iam_path = &config.iam_config_path;
|
||||
if iam_path.exists() {
|
||||
return;
|
||||
}
|
||||
|
||||
let access_key = std::env::var("ADMIN_ACCESS_KEY")
|
||||
.ok()
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.unwrap_or_else(|| format!("AK{}", uuid::Uuid::new_v4().simple()));
|
||||
let secret_key = std::env::var("ADMIN_SECRET_KEY")
|
||||
.ok()
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.unwrap_or_else(|| format!("SK{}", uuid::Uuid::new_v4().simple()));
|
||||
|
||||
let user_id = format!("u-{}", &uuid::Uuid::new_v4().simple().to_string()[..16]);
|
||||
let created_at = chrono::Utc::now().to_rfc3339();
|
||||
|
||||
let body = serde_json::json!({
|
||||
"version": 2,
|
||||
"users": [{
|
||||
"user_id": user_id,
|
||||
"display_name": "Local Admin",
|
||||
"enabled": true,
|
||||
"access_keys": [{
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"status": "active",
|
||||
"created_at": created_at,
|
||||
}],
|
||||
"policies": [{
|
||||
"bucket": "*",
|
||||
"actions": ["*"],
|
||||
"prefix": "*",
|
||||
}]
|
||||
}]
|
||||
});
|
||||
|
||||
let json = match serde_json::to_string_pretty(&body) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to serialize IAM bootstrap config: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(parent) = iam_path.parent() {
|
||||
if let Err(e) = std::fs::create_dir_all(parent) {
|
||||
tracing::error!(
|
||||
"Failed to create IAM config dir {}: {}",
|
||||
parent.display(),
|
||||
e
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = std::fs::write(iam_path, json) {
|
||||
tracing::error!(
|
||||
"Failed to write IAM bootstrap config {}: {}",
|
||||
iam_path.display(),
|
||||
e
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
tracing::info!("============================================================");
|
||||
tracing::info!("MYFSIO - ADMIN CREDENTIALS INITIALIZED");
|
||||
tracing::info!("============================================================");
|
||||
tracing::info!("Access Key: {}", access_key);
|
||||
tracing::info!("Secret Key: {}", secret_key);
|
||||
tracing::info!("Saved to: {}", iam_path.display());
|
||||
tracing::info!("============================================================");
|
||||
}
|
||||
|
||||
fn reset_admin_credentials(config: &ServerConfig) {
|
||||
if let Some(parent) = config.iam_config_path.parent() {
|
||||
if let Err(err) = std::fs::create_dir_all(parent) {
|
||||
eprintln!(
|
||||
"Failed to create IAM config directory {}: {}",
|
||||
parent.display(),
|
||||
err
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if config.iam_config_path.exists() {
|
||||
let backup = config
|
||||
.iam_config_path
|
||||
.with_extension(format!("bak-{}", chrono::Utc::now().timestamp()));
|
||||
if let Err(err) = std::fs::rename(&config.iam_config_path, &backup) {
|
||||
eprintln!(
|
||||
"Failed to back up existing IAM config {}: {}",
|
||||
config.iam_config_path.display(),
|
||||
err
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
println!("Backed up existing IAM config to {}", backup.display());
|
||||
}
|
||||
|
||||
ensure_iam_bootstrap(config);
|
||||
println!("Admin credentials reset.");
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user