Compare commits
28 Commits
v0.3.0
...
2a0e77a754
| Author | SHA1 | Date | |
|---|---|---|---|
| 2a0e77a754 | |||
| c6e368324a | |||
| 7b6c096bb7 | |||
| 03353a0aec | |||
| eb0e435a5a | |||
| 72f5d9d70c | |||
| be63e27c15 | |||
| 7633007a08 | |||
| 81ef0fe4c7 | |||
| 5f24bd920d | |||
| 8552f193de | |||
| de0d869c9f | |||
| 5536330aeb | |||
| d4657c389d | |||
| 3827235232 | |||
| fdd068feee | |||
| dfc0058d0d | |||
| 27aef84311 | |||
| 66b7677d2c | |||
| 5003514a3d | |||
| 4d90ead816 | |||
| 20a314e030 | |||
| b37a51ed1d | |||
| d8232340c3 | |||
| a356bb0c4e | |||
| 1c328ee3af | |||
| 5bf7962c04 | |||
| e06f653606 |
@@ -80,7 +80,7 @@ python run.py --mode api # API only (port 5000)
|
||||
python run.py --mode ui # UI only (port 5100)
|
||||
```
|
||||
|
||||
**Default Credentials:** `localadmin` / `localadmin`
|
||||
**Credentials:** Generated automatically on first run and printed to the console. If missed, check the IAM config file at `<STORAGE_ROOT>/.myfsio.sys/config/iam.json`.
|
||||
|
||||
- **Web Console:** http://127.0.0.1:5100/ui
|
||||
- **API Endpoint:** http://127.0.0.1:5000
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import html as html_module
|
||||
import itertools
|
||||
import logging
|
||||
import mimetypes
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from datetime import timedelta
|
||||
@@ -39,6 +39,8 @@ from .storage import ObjectStorage, StorageError
|
||||
from .version import get_version
|
||||
from .website_domains import WebsiteDomainStore
|
||||
|
||||
_request_counter = itertools.count(1)
|
||||
|
||||
|
||||
def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
|
||||
"""Migrate config file from legacy locations to the active path.
|
||||
@@ -115,7 +117,7 @@ def create_app(
|
||||
|
||||
storage = ObjectStorage(
|
||||
Path(app.config["STORAGE_ROOT"]),
|
||||
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 5),
|
||||
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 60),
|
||||
object_cache_max_size=app.config.get("OBJECT_CACHE_MAX_SIZE", 100),
|
||||
bucket_config_cache_ttl=app.config.get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0),
|
||||
object_key_max_length_bytes=app.config.get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024),
|
||||
@@ -128,6 +130,7 @@ def create_app(
|
||||
Path(app.config["IAM_CONFIG"]),
|
||||
auth_max_attempts=app.config.get("AUTH_MAX_ATTEMPTS", 5),
|
||||
auth_lockout_minutes=app.config.get("AUTH_LOCKOUT_MINUTES", 15),
|
||||
encryption_key=app.config.get("SECRET_KEY"),
|
||||
)
|
||||
bucket_policies = BucketPolicyStore(Path(app.config["BUCKET_POLICY_PATH"]))
|
||||
secret_store = EphemeralSecretStore(default_ttl=app.config.get("SECRET_TTL_SECONDS", 300))
|
||||
@@ -481,13 +484,9 @@ def _configure_logging(app: Flask) -> None:
|
||||
|
||||
@app.before_request
|
||||
def _log_request_start() -> None:
|
||||
g.request_id = uuid.uuid4().hex
|
||||
g.request_id = f"{os.getpid():x}{next(_request_counter):012x}"
|
||||
g.request_started_at = time.perf_counter()
|
||||
g.request_bytes_in = request.content_length or 0
|
||||
app.logger.info(
|
||||
"Request started",
|
||||
extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr},
|
||||
)
|
||||
|
||||
@app.before_request
|
||||
def _maybe_serve_website():
|
||||
@@ -616,16 +615,17 @@ def _configure_logging(app: Flask) -> None:
|
||||
duration_ms = 0.0
|
||||
if hasattr(g, "request_started_at"):
|
||||
duration_ms = (time.perf_counter() - g.request_started_at) * 1000
|
||||
request_id = getattr(g, "request_id", uuid.uuid4().hex)
|
||||
request_id = getattr(g, "request_id", f"{os.getpid():x}{next(_request_counter):012x}")
|
||||
response.headers.setdefault("X-Request-ID", request_id)
|
||||
app.logger.info(
|
||||
"Request completed",
|
||||
extra={
|
||||
"path": request.path,
|
||||
"method": request.method,
|
||||
"remote_addr": request.remote_addr,
|
||||
},
|
||||
)
|
||||
if app.logger.isEnabledFor(logging.INFO):
|
||||
app.logger.info(
|
||||
"Request completed",
|
||||
extra={
|
||||
"path": request.path,
|
||||
"method": request.method,
|
||||
"remote_addr": request.remote_addr,
|
||||
},
|
||||
)
|
||||
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
|
||||
|
||||
operation_metrics = app.extensions.get("operation_metrics")
|
||||
|
||||
@@ -2,6 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
@@ -268,7 +269,7 @@ class BucketPolicyStore:
|
||||
self._last_mtime = self._current_mtime()
|
||||
# Performance: Avoid stat() on every request
|
||||
self._last_stat_check = 0.0
|
||||
self._stat_check_interval = 1.0 # Only check mtime every 1 second
|
||||
self._stat_check_interval = float(os.environ.get("BUCKET_POLICY_STAT_CHECK_INTERVAL_SECONDS", "2.0"))
|
||||
|
||||
def maybe_reload(self) -> None:
|
||||
# Performance: Skip stat check if we checked recently
|
||||
|
||||
@@ -241,7 +241,7 @@ class AppConfig:
|
||||
cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"])
|
||||
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
|
||||
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60))
|
||||
object_cache_ttl = int(_get("OBJECT_CACHE_TTL", 5))
|
||||
object_cache_ttl = int(_get("OBJECT_CACHE_TTL", 60))
|
||||
|
||||
encryption_enabled = str(_get("ENCRYPTION_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
encryption_keys_dir = storage_root / ".myfsio.sys" / "keys"
|
||||
|
||||
@@ -189,7 +189,13 @@ class EncryptedObjectStorage:
|
||||
|
||||
def list_objects(self, bucket_name: str, **kwargs):
|
||||
return self.storage.list_objects(bucket_name, **kwargs)
|
||||
|
||||
|
||||
def list_objects_shallow(self, bucket_name: str, **kwargs):
|
||||
return self.storage.list_objects_shallow(bucket_name, **kwargs)
|
||||
|
||||
def search_objects(self, bucket_name: str, query: str, **kwargs):
|
||||
return self.storage.search_objects(bucket_name, query, **kwargs)
|
||||
|
||||
def list_objects_all(self, bucket_name: str):
|
||||
return self.storage.list_objects_all(bucket_name)
|
||||
|
||||
|
||||
@@ -19,6 +19,13 @@ from cryptography.hazmat.primitives import hashes
|
||||
if sys.platform != "win32":
|
||||
import fcntl
|
||||
|
||||
try:
|
||||
import myfsio_core as _rc
|
||||
_HAS_RUST = True
|
||||
except ImportError:
|
||||
_rc = None
|
||||
_HAS_RUST = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -338,6 +345,69 @@ class StreamingEncryptor:
|
||||
output.seek(0)
|
||||
return output
|
||||
|
||||
def encrypt_file(self, input_path: str, output_path: str) -> EncryptionMetadata:
|
||||
data_key, encrypted_data_key = self.provider.generate_data_key()
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
if _HAS_RUST:
|
||||
_rc.encrypt_stream_chunked(
|
||||
input_path, output_path, data_key, base_nonce, self.chunk_size
|
||||
)
|
||||
else:
|
||||
with open(input_path, "rb") as stream:
|
||||
aesgcm = AESGCM(data_key)
|
||||
with open(output_path, "wb") as out:
|
||||
out.write(b"\x00\x00\x00\x00")
|
||||
chunk_index = 0
|
||||
while True:
|
||||
chunk = stream.read(self.chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
encrypted_chunk = aesgcm.encrypt(chunk_nonce, chunk, None)
|
||||
out.write(len(encrypted_chunk).to_bytes(self.HEADER_SIZE, "big"))
|
||||
out.write(encrypted_chunk)
|
||||
chunk_index += 1
|
||||
out.seek(0)
|
||||
out.write(chunk_index.to_bytes(4, "big"))
|
||||
|
||||
return EncryptionMetadata(
|
||||
algorithm="AES256",
|
||||
key_id=self.provider.KEY_ID if hasattr(self.provider, "KEY_ID") else "local",
|
||||
nonce=base_nonce,
|
||||
encrypted_data_key=encrypted_data_key,
|
||||
)
|
||||
|
||||
def decrypt_file(self, input_path: str, output_path: str,
|
||||
metadata: EncryptionMetadata) -> None:
|
||||
data_key = self.provider.decrypt_data_key(metadata.encrypted_data_key, metadata.key_id)
|
||||
base_nonce = metadata.nonce
|
||||
|
||||
if _HAS_RUST:
|
||||
_rc.decrypt_stream_chunked(input_path, output_path, data_key, base_nonce)
|
||||
else:
|
||||
with open(input_path, "rb") as stream:
|
||||
chunk_count_bytes = stream.read(4)
|
||||
if len(chunk_count_bytes) < 4:
|
||||
raise EncryptionError("Invalid encrypted stream: missing header")
|
||||
chunk_count = int.from_bytes(chunk_count_bytes, "big")
|
||||
aesgcm = AESGCM(data_key)
|
||||
with open(output_path, "wb") as out:
|
||||
for chunk_index in range(chunk_count):
|
||||
size_bytes = stream.read(self.HEADER_SIZE)
|
||||
if len(size_bytes) < self.HEADER_SIZE:
|
||||
raise EncryptionError(f"Invalid encrypted stream: truncated at chunk {chunk_index}")
|
||||
chunk_size = int.from_bytes(size_bytes, "big")
|
||||
encrypted_chunk = stream.read(chunk_size)
|
||||
if len(encrypted_chunk) < chunk_size:
|
||||
raise EncryptionError(f"Invalid encrypted stream: incomplete chunk {chunk_index}")
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
try:
|
||||
decrypted_chunk = aesgcm.decrypt(chunk_nonce, encrypted_chunk, None)
|
||||
out.write(decrypted_chunk)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt chunk {chunk_index}: {exc}") from exc
|
||||
|
||||
|
||||
class EncryptionManager:
|
||||
"""Manages encryption providers and operations."""
|
||||
|
||||
170
app/iam.py
170
app/iam.py
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
@@ -14,6 +15,8 @@ from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Deque, Dict, Iterable, List, Optional, Sequence, Set, Tuple
|
||||
|
||||
from cryptography.fernet import Fernet, InvalidToken
|
||||
|
||||
|
||||
class IamError(RuntimeError):
|
||||
"""Raised when authentication or authorization fails."""
|
||||
@@ -107,13 +110,24 @@ class Principal:
|
||||
policies: List[Policy]
|
||||
|
||||
|
||||
def _derive_fernet_key(secret: str) -> bytes:
|
||||
raw = hashlib.pbkdf2_hmac("sha256", secret.encode(), b"myfsio-iam-encryption", 100_000)
|
||||
return base64.urlsafe_b64encode(raw)
|
||||
|
||||
|
||||
_IAM_ENCRYPTED_PREFIX = b"MYFSIO_IAM_ENC:"
|
||||
|
||||
|
||||
class IamService:
|
||||
"""Loads IAM configuration, manages users, and evaluates policies."""
|
||||
|
||||
def __init__(self, config_path: Path, auth_max_attempts: int = 5, auth_lockout_minutes: int = 15) -> None:
|
||||
def __init__(self, config_path: Path, auth_max_attempts: int = 5, auth_lockout_minutes: int = 15, encryption_key: str | None = None) -> None:
|
||||
self.config_path = Path(config_path)
|
||||
self.auth_max_attempts = auth_max_attempts
|
||||
self.auth_lockout_window = timedelta(minutes=auth_lockout_minutes)
|
||||
self._fernet: Fernet | None = None
|
||||
if encryption_key:
|
||||
self._fernet = Fernet(_derive_fernet_key(encryption_key))
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if not self.config_path.exists():
|
||||
self._write_default()
|
||||
@@ -125,7 +139,7 @@ class IamService:
|
||||
self._secret_key_cache: Dict[str, Tuple[str, float]] = {}
|
||||
self._cache_ttl = float(os.environ.get("IAM_CACHE_TTL_SECONDS", "5.0"))
|
||||
self._last_stat_check = 0.0
|
||||
self._stat_check_interval = 1.0
|
||||
self._stat_check_interval = float(os.environ.get("IAM_STAT_CHECK_INTERVAL_SECONDS", "2.0"))
|
||||
self._sessions: Dict[str, Dict[str, Any]] = {}
|
||||
self._session_lock = threading.Lock()
|
||||
self._load()
|
||||
@@ -145,6 +159,19 @@ class IamService:
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def _check_expiry(self, access_key: str, record: Dict[str, Any]) -> None:
|
||||
expires_at = record.get("expires_at")
|
||||
if not expires_at:
|
||||
return
|
||||
try:
|
||||
exp_dt = datetime.fromisoformat(expires_at)
|
||||
if exp_dt.tzinfo is None:
|
||||
exp_dt = exp_dt.replace(tzinfo=timezone.utc)
|
||||
if datetime.now(timezone.utc) >= exp_dt:
|
||||
raise IamError(f"Credentials for '{access_key}' have expired")
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
def authenticate(self, access_key: str, secret_key: str) -> Principal:
|
||||
self._maybe_reload()
|
||||
access_key = (access_key or "").strip()
|
||||
@@ -161,6 +188,7 @@ class IamService:
|
||||
if not record or not hmac.compare_digest(stored_secret, secret_key):
|
||||
self._record_failed_attempt(access_key)
|
||||
raise IamError("Invalid credentials")
|
||||
self._check_expiry(access_key, record)
|
||||
self._clear_failed_attempts(access_key)
|
||||
return self._build_principal(access_key, record)
|
||||
|
||||
@@ -288,12 +316,16 @@ class IamService:
|
||||
if cached:
|
||||
principal, cached_time = cached
|
||||
if now - cached_time < self._cache_ttl:
|
||||
record = self._users.get(access_key)
|
||||
if record:
|
||||
self._check_expiry(access_key, record)
|
||||
return principal
|
||||
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if not record:
|
||||
raise IamError("Unknown access key")
|
||||
self._check_expiry(access_key, record)
|
||||
principal = self._build_principal(access_key, record)
|
||||
self._principal_cache[access_key] = (principal, now)
|
||||
return principal
|
||||
@@ -303,6 +335,7 @@ class IamService:
|
||||
record = self._users.get(access_key)
|
||||
if not record:
|
||||
raise IamError("Unknown access key")
|
||||
self._check_expiry(access_key, record)
|
||||
return record["secret_key"]
|
||||
|
||||
def authorize(self, principal: Principal, bucket_name: str | None, action: str) -> None:
|
||||
@@ -347,6 +380,7 @@ class IamService:
|
||||
{
|
||||
"access_key": access_key,
|
||||
"display_name": record["display_name"],
|
||||
"expires_at": record.get("expires_at"),
|
||||
"policies": [
|
||||
{"bucket": policy.bucket, "actions": sorted(policy.actions)}
|
||||
for policy in record["policies"]
|
||||
@@ -362,20 +396,25 @@ class IamService:
|
||||
policies: Optional[Sequence[Dict[str, Any]]] = None,
|
||||
access_key: str | None = None,
|
||||
secret_key: str | None = None,
|
||||
expires_at: str | None = None,
|
||||
) -> Dict[str, str]:
|
||||
access_key = (access_key or self._generate_access_key()).strip()
|
||||
if not access_key:
|
||||
raise IamError("Access key cannot be empty")
|
||||
if access_key in self._users:
|
||||
raise IamError("Access key already exists")
|
||||
if expires_at:
|
||||
self._validate_expires_at(expires_at)
|
||||
secret_key = secret_key or self._generate_secret_key()
|
||||
sanitized_policies = self._prepare_policy_payload(policies)
|
||||
record = {
|
||||
record: Dict[str, Any] = {
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"display_name": display_name or access_key,
|
||||
"policies": sanitized_policies,
|
||||
}
|
||||
if expires_at:
|
||||
record["expires_at"] = expires_at
|
||||
self._raw_config.setdefault("users", []).append(record)
|
||||
self._save()
|
||||
self._load()
|
||||
@@ -414,17 +453,43 @@ class IamService:
|
||||
clear_signing_key_cache()
|
||||
self._load()
|
||||
|
||||
def update_user_expiry(self, access_key: str, expires_at: str | None) -> None:
|
||||
user = self._get_raw_user(access_key)
|
||||
if expires_at:
|
||||
self._validate_expires_at(expires_at)
|
||||
user["expires_at"] = expires_at
|
||||
else:
|
||||
user.pop("expires_at", None)
|
||||
self._save()
|
||||
self._principal_cache.pop(access_key, None)
|
||||
self._secret_key_cache.pop(access_key, None)
|
||||
self._load()
|
||||
|
||||
def update_user_policies(self, access_key: str, policies: Sequence[Dict[str, Any]]) -> None:
|
||||
user = self._get_raw_user(access_key)
|
||||
user["policies"] = self._prepare_policy_payload(policies)
|
||||
self._save()
|
||||
self._load()
|
||||
|
||||
def _decrypt_content(self, raw_bytes: bytes) -> str:
|
||||
if raw_bytes.startswith(_IAM_ENCRYPTED_PREFIX):
|
||||
if not self._fernet:
|
||||
raise IamError("IAM config is encrypted but no encryption key provided. Set SECRET_KEY or use 'python run.py reset-cred'.")
|
||||
try:
|
||||
encrypted_data = raw_bytes[len(_IAM_ENCRYPTED_PREFIX):]
|
||||
return self._fernet.decrypt(encrypted_data).decode("utf-8")
|
||||
except InvalidToken:
|
||||
raise IamError("Cannot decrypt IAM config. SECRET_KEY may have changed. Use 'python run.py reset-cred' to reset credentials.")
|
||||
return raw_bytes.decode("utf-8")
|
||||
|
||||
def _load(self) -> None:
|
||||
try:
|
||||
self._last_load_time = self.config_path.stat().st_mtime
|
||||
content = self.config_path.read_text(encoding='utf-8')
|
||||
raw_bytes = self.config_path.read_bytes()
|
||||
content = self._decrypt_content(raw_bytes)
|
||||
raw = json.loads(content)
|
||||
except IamError:
|
||||
raise
|
||||
except FileNotFoundError:
|
||||
raise IamError(f"IAM config not found: {self.config_path}")
|
||||
except json.JSONDecodeError as e:
|
||||
@@ -433,34 +498,48 @@ class IamService:
|
||||
raise IamError(f"Cannot read IAM config (permission denied): {e}")
|
||||
except (OSError, ValueError) as e:
|
||||
raise IamError(f"Failed to load IAM config: {e}")
|
||||
|
||||
|
||||
was_plaintext = not raw_bytes.startswith(_IAM_ENCRYPTED_PREFIX)
|
||||
|
||||
users: Dict[str, Dict[str, Any]] = {}
|
||||
for user in raw.get("users", []):
|
||||
policies = self._build_policy_objects(user.get("policies", []))
|
||||
users[user["access_key"]] = {
|
||||
user_record: Dict[str, Any] = {
|
||||
"secret_key": user["secret_key"],
|
||||
"display_name": user.get("display_name", user["access_key"]),
|
||||
"policies": policies,
|
||||
}
|
||||
if user.get("expires_at"):
|
||||
user_record["expires_at"] = user["expires_at"]
|
||||
users[user["access_key"]] = user_record
|
||||
if not users:
|
||||
raise IamError("IAM configuration contains no users")
|
||||
self._users = users
|
||||
self._raw_config = {
|
||||
"users": [
|
||||
{
|
||||
"access_key": entry["access_key"],
|
||||
"secret_key": entry["secret_key"],
|
||||
"display_name": entry.get("display_name", entry["access_key"]),
|
||||
"policies": entry.get("policies", []),
|
||||
}
|
||||
for entry in raw.get("users", [])
|
||||
]
|
||||
}
|
||||
raw_users: List[Dict[str, Any]] = []
|
||||
for entry in raw.get("users", []):
|
||||
raw_entry: Dict[str, Any] = {
|
||||
"access_key": entry["access_key"],
|
||||
"secret_key": entry["secret_key"],
|
||||
"display_name": entry.get("display_name", entry["access_key"]),
|
||||
"policies": entry.get("policies", []),
|
||||
}
|
||||
if entry.get("expires_at"):
|
||||
raw_entry["expires_at"] = entry["expires_at"]
|
||||
raw_users.append(raw_entry)
|
||||
self._raw_config = {"users": raw_users}
|
||||
|
||||
if was_plaintext and self._fernet:
|
||||
self._save()
|
||||
|
||||
def _save(self) -> None:
|
||||
try:
|
||||
json_text = json.dumps(self._raw_config, indent=2)
|
||||
temp_path = self.config_path.with_suffix('.json.tmp')
|
||||
temp_path.write_text(json.dumps(self._raw_config, indent=2), encoding='utf-8')
|
||||
if self._fernet:
|
||||
encrypted = self._fernet.encrypt(json_text.encode("utf-8"))
|
||||
temp_path.write_bytes(_IAM_ENCRYPTED_PREFIX + encrypted)
|
||||
else:
|
||||
temp_path.write_text(json_text, encoding='utf-8')
|
||||
temp_path.replace(self.config_path)
|
||||
except (OSError, PermissionError) as e:
|
||||
raise IamError(f"Cannot save IAM config: {e}")
|
||||
@@ -475,9 +554,14 @@ class IamService:
|
||||
def export_config(self, mask_secrets: bool = True) -> Dict[str, Any]:
|
||||
payload: Dict[str, Any] = {"users": []}
|
||||
for user in self._raw_config.get("users", []):
|
||||
record = dict(user)
|
||||
if mask_secrets and "secret_key" in record:
|
||||
record["secret_key"] = "••••••••••"
|
||||
record: Dict[str, Any] = {
|
||||
"access_key": user["access_key"],
|
||||
"secret_key": "••••••••••" if mask_secrets else user["secret_key"],
|
||||
"display_name": user["display_name"],
|
||||
"policies": user["policies"],
|
||||
}
|
||||
if user.get("expires_at"):
|
||||
record["expires_at"] = user["expires_at"]
|
||||
payload["users"].append(record)
|
||||
return payload
|
||||
|
||||
@@ -546,8 +630,9 @@ class IamService:
|
||||
return candidate if candidate in ALLOWED_ACTIONS else ""
|
||||
|
||||
def _write_default(self) -> None:
|
||||
access_key = secrets.token_hex(12)
|
||||
secret_key = secrets.token_urlsafe(32)
|
||||
access_key = os.environ.get("ADMIN_ACCESS_KEY", "").strip() or secrets.token_hex(12)
|
||||
secret_key = os.environ.get("ADMIN_SECRET_KEY", "").strip() or secrets.token_urlsafe(32)
|
||||
custom_keys = bool(os.environ.get("ADMIN_ACCESS_KEY", "").strip())
|
||||
default = {
|
||||
"users": [
|
||||
{
|
||||
@@ -560,16 +645,37 @@ class IamService:
|
||||
}
|
||||
]
|
||||
}
|
||||
self.config_path.write_text(json.dumps(default, indent=2))
|
||||
json_text = json.dumps(default, indent=2)
|
||||
if self._fernet:
|
||||
encrypted = self._fernet.encrypt(json_text.encode("utf-8"))
|
||||
self.config_path.write_bytes(_IAM_ENCRYPTED_PREFIX + encrypted)
|
||||
else:
|
||||
self.config_path.write_text(json_text)
|
||||
print(f"\n{'='*60}")
|
||||
print("MYFSIO FIRST RUN - ADMIN CREDENTIALS GENERATED")
|
||||
print("MYFSIO FIRST RUN - ADMIN CREDENTIALS")
|
||||
print(f"{'='*60}")
|
||||
print(f"Access Key: {access_key}")
|
||||
print(f"Secret Key: {secret_key}")
|
||||
if custom_keys:
|
||||
print(f"Access Key: {access_key} (from ADMIN_ACCESS_KEY)")
|
||||
print(f"Secret Key: {'(from ADMIN_SECRET_KEY)' if os.environ.get('ADMIN_SECRET_KEY', '').strip() else secret_key}")
|
||||
else:
|
||||
print(f"Access Key: {access_key}")
|
||||
print(f"Secret Key: {secret_key}")
|
||||
print(f"{'='*60}")
|
||||
print(f"Missed this? Check: {self.config_path}")
|
||||
if self._fernet:
|
||||
print("IAM config is encrypted at rest.")
|
||||
print("Lost credentials? Run: python run.py reset-cred")
|
||||
else:
|
||||
print(f"Missed this? Check: {self.config_path}")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
def _validate_expires_at(self, expires_at: str) -> None:
|
||||
try:
|
||||
dt = datetime.fromisoformat(expires_at)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
except (ValueError, TypeError):
|
||||
raise IamError(f"Invalid expires_at format: {expires_at}. Use ISO 8601 (e.g. 2026-12-31T23:59:59Z)")
|
||||
|
||||
def _generate_access_key(self) -> str:
|
||||
return secrets.token_hex(8)
|
||||
|
||||
@@ -588,11 +694,15 @@ class IamService:
|
||||
if cached:
|
||||
secret_key, cached_time = cached
|
||||
if now - cached_time < self._cache_ttl:
|
||||
record = self._users.get(access_key)
|
||||
if record:
|
||||
self._check_expiry(access_key, record)
|
||||
return secret_key
|
||||
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if record:
|
||||
self._check_expiry(access_key, record)
|
||||
secret_key = record["secret_key"]
|
||||
self._secret_key_cache[access_key] = (secret_key, now)
|
||||
return secret_key
|
||||
@@ -604,11 +714,15 @@ class IamService:
|
||||
if cached:
|
||||
principal, cached_time = cached
|
||||
if now - cached_time < self._cache_ttl:
|
||||
record = self._users.get(access_key)
|
||||
if record:
|
||||
self._check_expiry(access_key, record)
|
||||
return principal
|
||||
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if record:
|
||||
self._check_expiry(access_key, record)
|
||||
principal = self._build_principal(access_key, record)
|
||||
self._principal_cache[access_key] = (principal, now)
|
||||
return principal
|
||||
|
||||
@@ -5,6 +5,7 @@ import logging
|
||||
import random
|
||||
import threading
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
@@ -138,8 +139,8 @@ class OperationMetricsCollector:
|
||||
self.interval_seconds = interval_minutes * 60
|
||||
self.retention_hours = retention_hours
|
||||
self._lock = threading.Lock()
|
||||
self._by_method: Dict[str, OperationStats] = {}
|
||||
self._by_endpoint: Dict[str, OperationStats] = {}
|
||||
self._by_method: Dict[str, OperationStats] = defaultdict(OperationStats)
|
||||
self._by_endpoint: Dict[str, OperationStats] = defaultdict(OperationStats)
|
||||
self._by_status_class: Dict[str, int] = {}
|
||||
self._error_codes: Dict[str, int] = {}
|
||||
self._totals = OperationStats()
|
||||
@@ -211,8 +212,8 @@ class OperationMetricsCollector:
|
||||
self._prune_old_snapshots()
|
||||
self._save_history()
|
||||
|
||||
self._by_method.clear()
|
||||
self._by_endpoint.clear()
|
||||
self._by_method = defaultdict(OperationStats)
|
||||
self._by_endpoint = defaultdict(OperationStats)
|
||||
self._by_status_class.clear()
|
||||
self._error_codes.clear()
|
||||
self._totals = OperationStats()
|
||||
@@ -232,12 +233,7 @@ class OperationMetricsCollector:
|
||||
status_class = f"{status_code // 100}xx"
|
||||
|
||||
with self._lock:
|
||||
if method not in self._by_method:
|
||||
self._by_method[method] = OperationStats()
|
||||
self._by_method[method].record(latency_ms, success, bytes_in, bytes_out)
|
||||
|
||||
if endpoint_type not in self._by_endpoint:
|
||||
self._by_endpoint[endpoint_type] = OperationStats()
|
||||
self._by_endpoint[endpoint_type].record(latency_ms, success, bytes_in, bytes_out)
|
||||
|
||||
self._by_status_class[status_class] = self._by_status_class.get(status_class, 0) + 1
|
||||
|
||||
190
app/s3_api.py
190
app/s3_api.py
@@ -85,6 +85,9 @@ def _bucket_policies() -> BucketPolicyStore:
|
||||
|
||||
|
||||
def _build_policy_context() -> Dict[str, Any]:
|
||||
cached = getattr(g, "_policy_context", None)
|
||||
if cached is not None:
|
||||
return cached
|
||||
ctx: Dict[str, Any] = {}
|
||||
if request.headers.get("Referer"):
|
||||
ctx["aws:Referer"] = request.headers.get("Referer")
|
||||
@@ -98,6 +101,7 @@ def _build_policy_context() -> Dict[str, Any]:
|
||||
ctx["aws:SecureTransport"] = str(request.is_secure).lower()
|
||||
if request.headers.get("User-Agent"):
|
||||
ctx["aws:UserAgent"] = request.headers.get("User-Agent")
|
||||
g._policy_context = ctx
|
||||
return ctx
|
||||
|
||||
|
||||
@@ -293,9 +297,7 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
||||
raise IamError("Required headers not signed")
|
||||
|
||||
canonical_uri = _get_canonical_uri(req)
|
||||
payload_hash = req.headers.get("X-Amz-Content-Sha256")
|
||||
if not payload_hash:
|
||||
payload_hash = hashlib.sha256(req.get_data()).hexdigest()
|
||||
payload_hash = req.headers.get("X-Amz-Content-Sha256") or "UNSIGNED-PAYLOAD"
|
||||
|
||||
if _HAS_RUST:
|
||||
query_params = list(req.args.items(multi=True))
|
||||
@@ -305,16 +307,10 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
||||
header_values, payload_hash, amz_date, date_stamp, region,
|
||||
service, secret_key, signature,
|
||||
):
|
||||
if current_app.config.get("DEBUG_SIGV4"):
|
||||
logger.warning("SigV4 signature mismatch for %s %s", req.method, req.path)
|
||||
raise IamError("SignatureDoesNotMatch")
|
||||
else:
|
||||
method = req.method
|
||||
query_args = []
|
||||
for key, value in req.args.items(multi=True):
|
||||
query_args.append((key, value))
|
||||
query_args.sort(key=lambda x: (x[0], x[1]))
|
||||
|
||||
query_args = sorted(req.args.items(multi=True), key=lambda x: (x[0], x[1]))
|
||||
canonical_query_parts = []
|
||||
for k, v in query_args:
|
||||
canonical_query_parts.append(f"{quote(k, safe='-_.~')}={quote(v, safe='-_.~')}")
|
||||
@@ -339,8 +335,6 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
||||
string_to_sign = f"AWS4-HMAC-SHA256\n{amz_date}\n{credential_scope}\n{hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()}"
|
||||
calculated_signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
|
||||
if not hmac.compare_digest(calculated_signature, signature):
|
||||
if current_app.config.get("DEBUG_SIGV4"):
|
||||
logger.warning("SigV4 signature mismatch for %s %s", method, req.path)
|
||||
raise IamError("SignatureDoesNotMatch")
|
||||
|
||||
session_token = req.headers.get("X-Amz-Security-Token")
|
||||
@@ -682,7 +676,7 @@ def _extract_request_metadata() -> Dict[str, str]:
|
||||
for header, value in request.headers.items():
|
||||
if header.lower().startswith("x-amz-meta-"):
|
||||
key = header[11:]
|
||||
if key:
|
||||
if key and not (key.startswith("__") and key.endswith("__")):
|
||||
metadata[key] = value
|
||||
return metadata
|
||||
|
||||
@@ -1031,14 +1025,20 @@ def _apply_object_headers(
|
||||
file_stat,
|
||||
metadata: Dict[str, str] | None,
|
||||
etag: str,
|
||||
size_override: int | None = None,
|
||||
mtime_override: float | None = None,
|
||||
) -> None:
|
||||
if file_stat is not None:
|
||||
if response.status_code != 206:
|
||||
response.headers["Content-Length"] = str(file_stat.st_size)
|
||||
response.headers["Last-Modified"] = http_date(file_stat.st_mtime)
|
||||
effective_size = size_override if size_override is not None else (file_stat.st_size if file_stat is not None else None)
|
||||
effective_mtime = mtime_override if mtime_override is not None else (file_stat.st_mtime if file_stat is not None else None)
|
||||
if effective_size is not None and response.status_code != 206:
|
||||
response.headers["Content-Length"] = str(effective_size)
|
||||
if effective_mtime is not None:
|
||||
response.headers["Last-Modified"] = http_date(effective_mtime)
|
||||
response.headers["ETag"] = f'"{etag}"'
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
for key, value in (metadata or {}).items():
|
||||
if key.startswith("__") and key.endswith("__"):
|
||||
continue
|
||||
safe_value = _sanitize_header_value(str(value))
|
||||
response.headers[f"X-Amz-Meta-{key}"] = safe_value
|
||||
|
||||
@@ -2467,7 +2467,7 @@ def _post_object(bucket_name: str) -> Response:
|
||||
for field_name, value in request.form.items():
|
||||
if field_name.lower().startswith("x-amz-meta-"):
|
||||
key = field_name[11:]
|
||||
if key:
|
||||
if key and not (key.startswith("__") and key.endswith("__")):
|
||||
metadata[key] = value
|
||||
try:
|
||||
meta = storage.put_object(bucket_name, object_key, file.stream, metadata=metadata or None)
|
||||
@@ -2671,54 +2671,43 @@ def bucket_handler(bucket_name: str) -> Response:
|
||||
else:
|
||||
effective_start = marker
|
||||
|
||||
fetch_keys = max_keys * 10 if delimiter else max_keys
|
||||
try:
|
||||
list_result = storage.list_objects(
|
||||
bucket_name,
|
||||
max_keys=fetch_keys,
|
||||
continuation_token=effective_start or None,
|
||||
prefix=prefix or None,
|
||||
)
|
||||
objects = list_result.objects
|
||||
if delimiter:
|
||||
shallow_result = storage.list_objects_shallow(
|
||||
bucket_name,
|
||||
prefix=prefix,
|
||||
delimiter=delimiter,
|
||||
max_keys=max_keys,
|
||||
continuation_token=effective_start or None,
|
||||
)
|
||||
objects = shallow_result.objects
|
||||
common_prefixes = shallow_result.common_prefixes
|
||||
is_truncated = shallow_result.is_truncated
|
||||
|
||||
next_marker = shallow_result.next_continuation_token or ""
|
||||
next_continuation_token = ""
|
||||
if is_truncated and next_marker and list_type == "2":
|
||||
next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8")
|
||||
else:
|
||||
list_result = storage.list_objects(
|
||||
bucket_name,
|
||||
max_keys=max_keys,
|
||||
continuation_token=effective_start or None,
|
||||
prefix=prefix or None,
|
||||
)
|
||||
objects = list_result.objects
|
||||
common_prefixes = []
|
||||
is_truncated = list_result.is_truncated
|
||||
|
||||
next_marker = ""
|
||||
next_continuation_token = ""
|
||||
if is_truncated:
|
||||
if objects:
|
||||
next_marker = objects[-1].key
|
||||
if list_type == "2" and next_marker:
|
||||
next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8")
|
||||
except StorageError as exc:
|
||||
return _error_response("NoSuchBucket", str(exc), 404)
|
||||
|
||||
common_prefixes: list[str] = []
|
||||
filtered_objects: list = []
|
||||
if delimiter:
|
||||
seen_prefixes: set[str] = set()
|
||||
for obj in objects:
|
||||
key_after_prefix = obj.key[len(prefix):] if prefix else obj.key
|
||||
if delimiter in key_after_prefix:
|
||||
common_prefix = prefix + key_after_prefix.split(delimiter)[0] + delimiter
|
||||
if common_prefix not in seen_prefixes:
|
||||
seen_prefixes.add(common_prefix)
|
||||
common_prefixes.append(common_prefix)
|
||||
else:
|
||||
filtered_objects.append(obj)
|
||||
objects = filtered_objects
|
||||
common_prefixes = sorted(common_prefixes)
|
||||
|
||||
total_items = len(objects) + len(common_prefixes)
|
||||
is_truncated = total_items > max_keys or list_result.is_truncated
|
||||
|
||||
if len(objects) >= max_keys:
|
||||
objects = objects[:max_keys]
|
||||
common_prefixes = []
|
||||
else:
|
||||
remaining = max_keys - len(objects)
|
||||
common_prefixes = common_prefixes[:remaining]
|
||||
|
||||
next_marker = ""
|
||||
next_continuation_token = ""
|
||||
if is_truncated:
|
||||
if objects:
|
||||
next_marker = objects[-1].key
|
||||
elif common_prefixes:
|
||||
next_marker = common_prefixes[-1].rstrip(delimiter) if delimiter else common_prefixes[-1]
|
||||
|
||||
if list_type == "2" and next_marker:
|
||||
next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8")
|
||||
|
||||
if list_type == "2":
|
||||
root = Element("ListBucketResult")
|
||||
@@ -2839,6 +2828,8 @@ def object_handler(bucket_name: str, object_key: str):
|
||||
if validation_error:
|
||||
return _error_response("InvalidArgument", validation_error, 400)
|
||||
|
||||
metadata["__content_type__"] = content_type or mimetypes.guess_type(object_key)[0] or "application/octet-stream"
|
||||
|
||||
try:
|
||||
meta = storage.put_object(
|
||||
bucket_name,
|
||||
@@ -2853,10 +2844,23 @@ def object_handler(bucket_name: str, object_key: str):
|
||||
if "Bucket" in message:
|
||||
return _error_response("NoSuchBucket", message, 404)
|
||||
return _error_response("InvalidArgument", message, 400)
|
||||
current_app.logger.info(
|
||||
"Object uploaded",
|
||||
extra={"bucket": bucket_name, "key": object_key, "size": meta.size},
|
||||
)
|
||||
|
||||
content_md5 = request.headers.get("Content-MD5")
|
||||
if content_md5 and meta.etag:
|
||||
try:
|
||||
expected_md5 = base64.b64decode(content_md5).hex()
|
||||
except Exception:
|
||||
storage.delete_object(bucket_name, object_key)
|
||||
return _error_response("InvalidDigest", "Content-MD5 header is not valid base64", 400)
|
||||
if expected_md5 != meta.etag:
|
||||
storage.delete_object(bucket_name, object_key)
|
||||
return _error_response("BadDigest", "The Content-MD5 you specified did not match what we received", 400)
|
||||
|
||||
if current_app.logger.isEnabledFor(logging.INFO):
|
||||
current_app.logger.info(
|
||||
"Object uploaded",
|
||||
extra={"bucket": bucket_name, "key": object_key, "size": meta.size},
|
||||
)
|
||||
response = Response(status=200)
|
||||
if meta.etag:
|
||||
response.headers["ETag"] = f'"{meta.etag}"'
|
||||
@@ -2890,7 +2894,7 @@ def object_handler(bucket_name: str, object_key: str):
|
||||
except StorageError as exc:
|
||||
return _error_response("NoSuchKey", str(exc), 404)
|
||||
metadata = storage.get_object_metadata(bucket_name, object_key)
|
||||
mimetype = mimetypes.guess_type(object_key)[0] or "application/octet-stream"
|
||||
mimetype = metadata.get("__content_type__") or mimetypes.guess_type(object_key)[0] or "application/octet-stream"
|
||||
|
||||
is_encrypted = "x-amz-server-side-encryption" in metadata
|
||||
|
||||
@@ -2982,10 +2986,7 @@ def object_handler(bucket_name: str, object_key: str):
|
||||
response.headers["Content-Type"] = mimetype
|
||||
logged_bytes = 0
|
||||
|
||||
try:
|
||||
file_stat = path.stat() if not is_encrypted else None
|
||||
except (PermissionError, OSError):
|
||||
file_stat = None
|
||||
file_stat = stat if not is_encrypted else None
|
||||
_apply_object_headers(response, file_stat=file_stat, metadata=metadata, etag=etag)
|
||||
|
||||
if request.method == "GET":
|
||||
@@ -3002,8 +3003,9 @@ def object_handler(bucket_name: str, object_key: str):
|
||||
if value:
|
||||
response.headers[header] = _sanitize_header_value(value)
|
||||
|
||||
action = "Object read" if request.method == "GET" else "Object head"
|
||||
current_app.logger.info(action, extra={"bucket": bucket_name, "key": object_key, "bytes": logged_bytes})
|
||||
if current_app.logger.isEnabledFor(logging.INFO):
|
||||
action = "Object read" if request.method == "GET" else "Object head"
|
||||
current_app.logger.info(action, extra={"bucket": bucket_name, "key": object_key, "bytes": logged_bytes})
|
||||
return response
|
||||
|
||||
if "uploadId" in request.args:
|
||||
@@ -3021,7 +3023,8 @@ def object_handler(bucket_name: str, object_key: str):
|
||||
|
||||
storage.delete_object(bucket_name, object_key)
|
||||
lock_service.delete_object_lock_metadata(bucket_name, object_key)
|
||||
current_app.logger.info("Object deleted", extra={"bucket": bucket_name, "key": object_key})
|
||||
if current_app.logger.isEnabledFor(logging.INFO):
|
||||
current_app.logger.info("Object deleted", extra={"bucket": bucket_name, "key": object_key})
|
||||
|
||||
principal, _ = _require_principal()
|
||||
_notifications().emit_object_removed(
|
||||
@@ -3362,12 +3365,20 @@ def head_object(bucket_name: str, object_key: str) -> Response:
|
||||
_authorize_action(principal, bucket_name, "read", object_key=object_key)
|
||||
path = _storage().get_object_path(bucket_name, object_key)
|
||||
metadata = _storage().get_object_metadata(bucket_name, object_key)
|
||||
stat = path.stat()
|
||||
etag = metadata.get("__etag__") or _storage()._compute_etag(path)
|
||||
|
||||
response = Response(status=200)
|
||||
_apply_object_headers(response, file_stat=stat, metadata=metadata, etag=etag)
|
||||
response.headers["Content-Type"] = mimetypes.guess_type(object_key)[0] or "application/octet-stream"
|
||||
|
||||
cached_size = metadata.get("__size__")
|
||||
cached_mtime = metadata.get("__last_modified__")
|
||||
if cached_size is not None and cached_mtime is not None:
|
||||
size_val = int(cached_size)
|
||||
mtime_val = float(cached_mtime)
|
||||
response = Response(status=200)
|
||||
_apply_object_headers(response, file_stat=None, metadata=metadata, etag=etag, size_override=size_val, mtime_override=mtime_val)
|
||||
else:
|
||||
stat = path.stat()
|
||||
response = Response(status=200)
|
||||
_apply_object_headers(response, file_stat=stat, metadata=metadata, etag=etag)
|
||||
response.headers["Content-Type"] = metadata.get("__content_type__") or mimetypes.guess_type(object_key)[0] or "application/octet-stream"
|
||||
return response
|
||||
except (StorageError, FileNotFoundError):
|
||||
return _error_response("NoSuchKey", "Object not found", 404)
|
||||
@@ -3456,8 +3467,8 @@ def _copy_object(dest_bucket: str, dest_key: str, copy_source: str) -> Response:
|
||||
if validation_error:
|
||||
return _error_response("InvalidArgument", validation_error, 400)
|
||||
else:
|
||||
metadata = source_metadata
|
||||
|
||||
metadata = {k: v for k, v in source_metadata.items() if not (k.startswith("__") and k.endswith("__"))}
|
||||
|
||||
try:
|
||||
with source_path.open("rb") as stream:
|
||||
meta = storage.put_object(
|
||||
@@ -3597,10 +3608,12 @@ def _initiate_multipart_upload(bucket_name: str, object_key: str) -> Response:
|
||||
return error
|
||||
|
||||
metadata = _extract_request_metadata()
|
||||
content_type = request.headers.get("Content-Type")
|
||||
metadata["__content_type__"] = content_type or mimetypes.guess_type(object_key)[0] or "application/octet-stream"
|
||||
try:
|
||||
upload_id = _storage().initiate_multipart_upload(
|
||||
bucket_name,
|
||||
object_key,
|
||||
bucket_name,
|
||||
object_key,
|
||||
metadata=metadata or None
|
||||
)
|
||||
except StorageError as exc:
|
||||
@@ -3649,6 +3662,15 @@ def _upload_part(bucket_name: str, object_key: str) -> Response:
|
||||
return _error_response("NoSuchUpload", str(exc), 404)
|
||||
return _error_response("InvalidArgument", str(exc), 400)
|
||||
|
||||
content_md5 = request.headers.get("Content-MD5")
|
||||
if content_md5 and etag:
|
||||
try:
|
||||
expected_md5 = base64.b64decode(content_md5).hex()
|
||||
except Exception:
|
||||
return _error_response("InvalidDigest", "Content-MD5 header is not valid base64", 400)
|
||||
if expected_md5 != etag:
|
||||
return _error_response("BadDigest", "The Content-MD5 you specified did not match what we received", 400)
|
||||
|
||||
response = Response(status=200)
|
||||
response.headers["ETag"] = f'"{etag}"'
|
||||
return response
|
||||
|
||||
@@ -245,6 +245,7 @@ def stream_objects_ndjson(
|
||||
url_templates: dict[str, str],
|
||||
display_tz: str = "UTC",
|
||||
versioning_enabled: bool = False,
|
||||
delimiter: Optional[str] = None,
|
||||
) -> Generator[str, None, None]:
|
||||
meta_line = json.dumps({
|
||||
"type": "meta",
|
||||
@@ -258,11 +259,20 @@ def stream_objects_ndjson(
|
||||
kwargs: dict[str, Any] = {"Bucket": bucket_name, "MaxKeys": 1000}
|
||||
if prefix:
|
||||
kwargs["Prefix"] = prefix
|
||||
if delimiter:
|
||||
kwargs["Delimiter"] = delimiter
|
||||
|
||||
running_count = 0
|
||||
try:
|
||||
paginator = client.get_paginator("list_objects_v2")
|
||||
for page in paginator.paginate(**kwargs):
|
||||
for obj in page.get("Contents", []):
|
||||
for cp in page.get("CommonPrefixes", []):
|
||||
yield json.dumps({
|
||||
"type": "folder",
|
||||
"prefix": cp["Prefix"],
|
||||
}) + "\n"
|
||||
page_contents = page.get("Contents", [])
|
||||
for obj in page_contents:
|
||||
last_mod = obj["LastModified"]
|
||||
yield json.dumps({
|
||||
"type": "object",
|
||||
@@ -273,6 +283,8 @@ def stream_objects_ndjson(
|
||||
"last_modified_iso": format_datetime_iso(last_mod, display_tz),
|
||||
"etag": obj.get("ETag", "").strip('"'),
|
||||
}) + "\n"
|
||||
running_count += len(page_contents)
|
||||
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
|
||||
except ClientError as exc:
|
||||
error_msg = exc.response.get("Error", {}).get("Message", "S3 operation failed")
|
||||
yield json.dumps({"type": "error", "error": error_msg}) + "\n"
|
||||
|
||||
955
app/storage.py
955
app/storage.py
File diff suppressed because it is too large
Load Diff
95
app/ui.py
95
app/ui.py
@@ -616,6 +616,7 @@ def stream_bucket_objects(bucket_name: str):
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
|
||||
prefix = request.args.get("prefix") or None
|
||||
delimiter = request.args.get("delimiter") or None
|
||||
|
||||
try:
|
||||
client = get_session_s3_client()
|
||||
@@ -629,6 +630,7 @@ def stream_bucket_objects(bucket_name: str):
|
||||
return Response(
|
||||
stream_objects_ndjson(
|
||||
client, bucket_name, prefix, url_templates, display_tz, versioning_enabled,
|
||||
delimiter=delimiter,
|
||||
),
|
||||
mimetype='application/x-ndjson',
|
||||
headers={
|
||||
@@ -639,6 +641,33 @@ def stream_bucket_objects(bucket_name: str):
|
||||
)
|
||||
|
||||
|
||||
@ui_bp.get("/buckets/<bucket_name>/objects/search")
|
||||
@limiter.limit("30 per minute")
|
||||
def search_bucket_objects(bucket_name: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "list")
|
||||
except IamError as exc:
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
|
||||
query = request.args.get("q", "").strip()
|
||||
if not query:
|
||||
return jsonify({"results": [], "truncated": False})
|
||||
|
||||
try:
|
||||
limit = max(1, min(int(request.args.get("limit", 500)), 1000))
|
||||
except (ValueError, TypeError):
|
||||
limit = 500
|
||||
|
||||
prefix = request.args.get("prefix", "").strip()
|
||||
|
||||
storage = _storage()
|
||||
try:
|
||||
return jsonify(storage.search_objects(bucket_name, query, prefix=prefix, limit=limit))
|
||||
except StorageError as exc:
|
||||
return jsonify({"error": str(exc)}), 404
|
||||
|
||||
|
||||
@ui_bp.post("/buckets/<bucket_name>/upload")
|
||||
@limiter.limit("30 per minute")
|
||||
def upload_object(bucket_name: str):
|
||||
@@ -1301,12 +1330,14 @@ def object_versions(bucket_name: str, object_key: str):
|
||||
for v in resp.get("Versions", []):
|
||||
if v.get("Key") != object_key:
|
||||
continue
|
||||
if v.get("IsLatest", False):
|
||||
continue
|
||||
versions.append({
|
||||
"version_id": v.get("VersionId", ""),
|
||||
"last_modified": v["LastModified"].isoformat() if v.get("LastModified") else None,
|
||||
"size": v.get("Size", 0),
|
||||
"etag": v.get("ETag", "").strip('"'),
|
||||
"is_latest": v.get("IsLatest", False),
|
||||
"is_latest": False,
|
||||
})
|
||||
return jsonify({"versions": versions})
|
||||
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
|
||||
@@ -1723,6 +1754,10 @@ def iam_dashboard():
|
||||
users = iam_service.list_users() if not locked else []
|
||||
config_summary = iam_service.config_summary()
|
||||
config_document = json.dumps(iam_service.export_config(mask_secrets=True), indent=2)
|
||||
from datetime import datetime as _dt, timedelta as _td, timezone as _tz
|
||||
_now = _dt.now(_tz.utc)
|
||||
now_iso = _now.isoformat()
|
||||
soon_iso = (_now + _td(days=7)).isoformat()
|
||||
return render_template(
|
||||
"iam.html",
|
||||
users=users,
|
||||
@@ -1732,6 +1767,8 @@ def iam_dashboard():
|
||||
config_summary=config_summary,
|
||||
config_document=config_document,
|
||||
disclosed_secret=disclosed_secret,
|
||||
now_iso=now_iso,
|
||||
soon_iso=soon_iso,
|
||||
)
|
||||
|
||||
|
||||
@@ -1751,6 +1788,8 @@ def create_iam_user():
|
||||
return jsonify({"error": "Display name must be 64 characters or fewer"}), 400
|
||||
flash("Display name must be 64 characters or fewer", "danger")
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
custom_access_key = request.form.get("access_key", "").strip() or None
|
||||
custom_secret_key = request.form.get("secret_key", "").strip() or None
|
||||
policies_text = request.form.get("policies", "").strip()
|
||||
policies = None
|
||||
if policies_text:
|
||||
@@ -1761,8 +1800,21 @@ def create_iam_user():
|
||||
return jsonify({"error": f"Invalid JSON: {exc}"}), 400
|
||||
flash(f"Invalid JSON: {exc}", "danger")
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
expires_at = request.form.get("expires_at", "").strip() or None
|
||||
if expires_at:
|
||||
try:
|
||||
from datetime import datetime as _dt, timezone as _tz
|
||||
exp_dt = _dt.fromisoformat(expires_at)
|
||||
if exp_dt.tzinfo is None:
|
||||
exp_dt = exp_dt.replace(tzinfo=_tz.utc)
|
||||
expires_at = exp_dt.isoformat()
|
||||
except (ValueError, TypeError):
|
||||
if _wants_json():
|
||||
return jsonify({"error": "Invalid expiry date format"}), 400
|
||||
flash("Invalid expiry date format", "danger")
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
try:
|
||||
created = _iam().create_user(display_name=display_name, policies=policies)
|
||||
created = _iam().create_user(display_name=display_name, policies=policies, access_key=custom_access_key, secret_key=custom_secret_key, expires_at=expires_at)
|
||||
except IamError as exc:
|
||||
if _wants_json():
|
||||
return jsonify({"error": str(exc)}), 400
|
||||
@@ -1936,6 +1988,45 @@ def update_iam_policies(access_key: str):
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
|
||||
|
||||
@ui_bp.post("/iam/users/<access_key>/expiry")
|
||||
def update_iam_expiry(access_key: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:update_policy")
|
||||
except IamError as exc:
|
||||
if _wants_json():
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
flash(str(exc), "danger")
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
|
||||
expires_at = request.form.get("expires_at", "").strip() or None
|
||||
if expires_at:
|
||||
try:
|
||||
from datetime import datetime as _dt, timezone as _tz
|
||||
exp_dt = _dt.fromisoformat(expires_at)
|
||||
if exp_dt.tzinfo is None:
|
||||
exp_dt = exp_dt.replace(tzinfo=_tz.utc)
|
||||
expires_at = exp_dt.isoformat()
|
||||
except (ValueError, TypeError):
|
||||
if _wants_json():
|
||||
return jsonify({"error": "Invalid expiry date format"}), 400
|
||||
flash("Invalid expiry date format", "danger")
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
|
||||
try:
|
||||
_iam().update_user_expiry(access_key, expires_at)
|
||||
if _wants_json():
|
||||
return jsonify({"success": True, "message": f"Updated expiry for {access_key}", "expires_at": expires_at})
|
||||
label = expires_at if expires_at else "never"
|
||||
flash(f"Expiry for {access_key} set to {label}", "success")
|
||||
except IamError as exc:
|
||||
if _wants_json():
|
||||
return jsonify({"error": str(exc)}), 400
|
||||
flash(str(exc), "danger")
|
||||
|
||||
return redirect(url_for("ui.iam_dashboard"))
|
||||
|
||||
|
||||
@ui_bp.post("/connections")
|
||||
def create_connection():
|
||||
principal = _current_principal()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
APP_VERSION = "0.3.0"
|
||||
APP_VERSION = "0.3.7"
|
||||
|
||||
|
||||
def get_version() -> str:
|
||||
|
||||
104
docs.md
104
docs.md
@@ -139,18 +139,21 @@ All configuration is done via environment variables. The table below lists every
|
||||
| `API_BASE_URL` | `http://127.0.0.1:5000` | Internal S3 API URL used by the web UI proxy. Also used for presigned URL generation. Set to your public URL if running behind a reverse proxy. |
|
||||
| `AWS_REGION` | `us-east-1` | Region embedded in SigV4 credential scope. |
|
||||
| `AWS_SERVICE` | `s3` | Service string for SigV4. |
|
||||
| `DISPLAY_TIMEZONE` | `UTC` | Timezone for timestamps in the web UI (e.g., `US/Eastern`, `Asia/Tokyo`). |
|
||||
|
||||
### IAM & Security
|
||||
|
||||
| Variable | Default | Notes |
|
||||
| --- | --- | --- |
|
||||
| `IAM_CONFIG` | `data/.myfsio.sys/config/iam.json` | Stores users, secrets, and inline policies. |
|
||||
| `IAM_CONFIG` | `data/.myfsio.sys/config/iam.json` | Stores users, secrets, and inline policies. Encrypted at rest when `SECRET_KEY` is set. |
|
||||
| `BUCKET_POLICY_PATH` | `data/.myfsio.sys/config/bucket_policies.json` | Bucket policy store (auto hot-reload). |
|
||||
| `AUTH_MAX_ATTEMPTS` | `5` | Failed login attempts before lockout. |
|
||||
| `AUTH_LOCKOUT_MINUTES` | `15` | Lockout duration after max failed attempts. |
|
||||
| `SESSION_LIFETIME_DAYS` | `30` | How long UI sessions remain valid. |
|
||||
| `SECRET_TTL_SECONDS` | `300` | TTL for ephemeral secrets (presigned URLs). |
|
||||
| `UI_ENFORCE_BUCKET_POLICIES` | `false` | Whether the UI should enforce bucket policies. |
|
||||
| `ADMIN_ACCESS_KEY` | (none) | Custom access key for the admin user on first run or credential reset. If unset, a random key is generated. |
|
||||
| `ADMIN_SECRET_KEY` | (none) | Custom secret key for the admin user on first run or credential reset. If unset, a random key is generated. |
|
||||
|
||||
### CORS (Cross-Origin Resource Sharing)
|
||||
|
||||
@@ -170,6 +173,7 @@ All configuration is done via environment variables. The table below lists every
|
||||
| `RATE_LIMIT_BUCKET_OPS` | `120 per minute` | Rate limit for bucket operations (PUT/DELETE/GET/POST on `/<bucket>`). |
|
||||
| `RATE_LIMIT_OBJECT_OPS` | `240 per minute` | Rate limit for object operations (PUT/GET/DELETE/POST on `/<bucket>/<key>`). |
|
||||
| `RATE_LIMIT_HEAD_OPS` | `100 per minute` | Rate limit for HEAD requests (bucket and object). |
|
||||
| `RATE_LIMIT_ADMIN` | `60 per minute` | Rate limit for admin API endpoints (`/admin/*`). |
|
||||
| `RATE_LIMIT_STORAGE_URI` | `memory://` | Storage backend for rate limits. Use `redis://host:port` for distributed setups. |
|
||||
|
||||
### Server Configuration
|
||||
@@ -256,6 +260,12 @@ Once enabled, configure lifecycle rules via:
|
||||
| `MULTIPART_MIN_PART_SIZE` | `5242880` (5 MB) | Minimum part size for multipart uploads. |
|
||||
| `BUCKET_STATS_CACHE_TTL` | `60` | Seconds to cache bucket statistics. |
|
||||
| `BULK_DELETE_MAX_KEYS` | `500` | Maximum keys per bulk delete request. |
|
||||
| `BULK_DOWNLOAD_MAX_BYTES` | `1073741824` (1 GiB) | Maximum total size for bulk ZIP downloads. |
|
||||
| `OBJECT_CACHE_TTL` | `60` | Seconds to cache object metadata. |
|
||||
|
||||
#### Gzip Compression
|
||||
|
||||
API responses for JSON, XML, HTML, CSS, and JavaScript are automatically gzip-compressed when the client sends `Accept-Encoding: gzip`. Compression activates for responses larger than 500 bytes and is handled by a WSGI middleware (`app/compression.py`). Binary object downloads and streaming responses are never compressed. No configuration is needed.
|
||||
|
||||
### Server Settings
|
||||
|
||||
@@ -269,13 +279,14 @@ Once enabled, configure lifecycle rules via:
|
||||
|
||||
Before deploying to production, ensure you:
|
||||
|
||||
1. **Set `SECRET_KEY`** - Use a strong, unique value (e.g., `openssl rand -base64 32`)
|
||||
1. **Set `SECRET_KEY`** - Use a strong, unique value (e.g., `openssl rand -base64 32`). This also enables IAM config encryption at rest.
|
||||
2. **Restrict CORS** - Set `CORS_ORIGINS` to your specific domains instead of `*`
|
||||
3. **Configure `API_BASE_URL`** - Required for correct presigned URLs behind proxies
|
||||
4. **Enable HTTPS** - Use a reverse proxy (nginx, Cloudflare) with TLS termination
|
||||
5. **Review rate limits** - Adjust `RATE_LIMIT_DEFAULT` based on your needs
|
||||
6. **Secure master keys** - Back up `ENCRYPTION_MASTER_KEY_PATH` if using encryption
|
||||
7. **Use `--prod` flag** - Runs with Waitress instead of Flask dev server
|
||||
8. **Set credential expiry** - Assign `expires_at` to non-admin users for time-limited access
|
||||
|
||||
### Proxy Configuration
|
||||
|
||||
@@ -285,6 +296,12 @@ If running behind a reverse proxy (e.g., Nginx, Cloudflare, or a tunnel), ensure
|
||||
|
||||
The application automatically trusts these headers to generate correct presigned URLs (e.g., `https://s3.example.com/...` instead of `http://127.0.0.1:5000/...`). Alternatively, you can explicitly set `API_BASE_URL` to your public endpoint.
|
||||
|
||||
| Variable | Default | Notes |
|
||||
| --- | --- | --- |
|
||||
| `NUM_TRUSTED_PROXIES` | `1` | Number of trusted reverse proxies for `X-Forwarded-*` header processing. |
|
||||
| `ALLOWED_REDIRECT_HOSTS` | `""` | Comma-separated whitelist of safe redirect targets. Empty allows only same-host redirects. |
|
||||
| `ALLOW_INTERNAL_ENDPOINTS` | `false` | Allow connections to internal/private IPs for webhooks and replication targets. **Keep disabled in production unless needed.** |
|
||||
|
||||
## 4. Upgrading and Updates
|
||||
|
||||
### Version Checking
|
||||
@@ -619,9 +636,10 @@ MyFSIO implements a comprehensive Identity and Access Management (IAM) system th
|
||||
|
||||
### Getting Started
|
||||
|
||||
1. On first boot, `data/.myfsio.sys/config/iam.json` is created with a randomly generated admin user. The access key and secret key are printed to the console during first startup. If you miss it, check the `iam.json` file directly—credentials are stored in plaintext.
|
||||
1. On first boot, `data/.myfsio.sys/config/iam.json` is created with a randomly generated admin user. The access key and secret key are printed to the console during first startup. You can set `ADMIN_ACCESS_KEY` and `ADMIN_SECRET_KEY` environment variables to use custom credentials instead of random ones. If `SECRET_KEY` is configured, the IAM config file is encrypted at rest using AES (Fernet). To reset admin credentials later, run `python run.py --reset-cred`.
|
||||
2. Sign into the UI using the generated credentials, then open **IAM**:
|
||||
- **Create user**: supply a display name and optional JSON inline policy array.
|
||||
- **Create user**: supply a display name, optional JSON inline policy array, and optional credential expiry date.
|
||||
- **Set expiry**: assign an expiration date to any user's credentials. Expired credentials are rejected at authentication time. The UI shows expiry badges and preset durations (1h, 24h, 7d, 30d, 90d).
|
||||
- **Rotate secret**: generates a new secret key; the UI surfaces it once.
|
||||
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
|
||||
3. Wildcard action `iam:*` is supported for admin user definitions.
|
||||
@@ -639,8 +657,11 @@ The API expects every request to include authentication headers. The UI persists
|
||||
|
||||
**Security Features:**
|
||||
- **Lockout Protection**: After `AUTH_MAX_ATTEMPTS` (default: 5) failed login attempts, the account is locked for `AUTH_LOCKOUT_MINUTES` (default: 15 minutes).
|
||||
- **Credential Expiry**: Each user can have an optional `expires_at` timestamp (ISO 8601). Once expired, all API requests using those credentials are rejected. Set or clear expiry via the UI or API.
|
||||
- **IAM Config Encryption**: When `SECRET_KEY` is set, the IAM config file (`iam.json`) is encrypted at rest using Fernet (AES-256-CBC with HMAC). Existing plaintext configs are automatically encrypted on next load.
|
||||
- **Session Management**: UI sessions remain valid for `SESSION_LIFETIME_DAYS` (default: 30 days).
|
||||
- **Hot Reload**: IAM configuration changes take effect immediately without restart.
|
||||
- **Credential Reset**: Run `python run.py --reset-cred` to reset admin credentials. Supports `ADMIN_ACCESS_KEY` and `ADMIN_SECRET_KEY` env vars for deterministic keys.
|
||||
|
||||
### Permission Model
|
||||
|
||||
@@ -800,7 +821,8 @@ curl -X POST http://localhost:5000/iam/users \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-d '{
|
||||
"display_name": "New User",
|
||||
"policies": [{"bucket": "*", "actions": ["list", "read"]}]
|
||||
"policies": [{"bucket": "*", "actions": ["list", "read"]}],
|
||||
"expires_at": "2026-12-31T23:59:59Z"
|
||||
}'
|
||||
|
||||
# Rotate user secret (requires iam:rotate_key)
|
||||
@@ -813,6 +835,18 @@ curl -X PUT http://localhost:5000/iam/users/<access-key>/policies \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-d '[{"bucket": "*", "actions": ["list", "read", "write"]}]'
|
||||
|
||||
# Update credential expiry (requires iam:update_policy)
|
||||
curl -X POST http://localhost:5000/iam/users/<access-key>/expiry \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-d 'expires_at=2026-12-31T23:59:59Z'
|
||||
|
||||
# Remove credential expiry (never expires)
|
||||
curl -X POST http://localhost:5000/iam/users/<access-key>/expiry \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-d 'expires_at='
|
||||
|
||||
# Delete a user (requires iam:delete_user)
|
||||
curl -X DELETE http://localhost:5000/iam/users/<access-key> \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||
@@ -824,8 +858,9 @@ When a request is made, permissions are evaluated in this order:
|
||||
|
||||
1. **Authentication** – Verify the access key and secret key are valid
|
||||
2. **Lockout Check** – Ensure the account is not locked due to failed attempts
|
||||
3. **IAM Policy Check** – Verify the user has the required action for the target bucket
|
||||
4. **Bucket Policy Check** – If a bucket policy exists, verify it allows the action
|
||||
3. **Expiry Check** – Reject requests if the user's credentials have expired (`expires_at`)
|
||||
4. **IAM Policy Check** – Verify the user has the required action for the target bucket
|
||||
5. **Bucket Policy Check** – If a bucket policy exists, verify it allows the action
|
||||
|
||||
A request is allowed only if:
|
||||
- The IAM policy grants the action, AND
|
||||
@@ -912,7 +947,7 @@ Objects with forward slashes (`/`) in their keys are displayed as a folder hiera
|
||||
|
||||
- Select multiple objects using checkboxes
|
||||
- **Bulk Delete**: Delete multiple objects at once
|
||||
- **Bulk Download**: Download selected objects as individual files
|
||||
- **Bulk Download**: Download selected objects as a single ZIP archive (up to `BULK_DOWNLOAD_MAX_BYTES`, default 1 GiB)
|
||||
|
||||
#### Search & Filter
|
||||
|
||||
@@ -985,6 +1020,7 @@ MyFSIO supports **server-side encryption at rest** to protect your data. When en
|
||||
|------|-------------|
|
||||
| **AES-256 (SSE-S3)** | Server-managed encryption using a local master key |
|
||||
| **KMS (SSE-KMS)** | Encryption using customer-managed keys via the built-in KMS |
|
||||
| **SSE-C** | Server-side encryption with customer-provided keys (per-request) |
|
||||
|
||||
### Enabling Encryption
|
||||
|
||||
@@ -1083,6 +1119,44 @@ encrypted, metadata = ClientEncryptionHelper.encrypt_for_upload(plaintext, key)
|
||||
decrypted = ClientEncryptionHelper.decrypt_from_download(encrypted, metadata, key)
|
||||
```
|
||||
|
||||
### SSE-C (Customer-Provided Keys)
|
||||
|
||||
With SSE-C, you provide your own 256-bit AES encryption key with each request. The server encrypts/decrypts using your key but never stores it. You must supply the same key for both upload and download.
|
||||
|
||||
**Required headers:**
|
||||
|
||||
| Header | Value |
|
||||
|--------|-------|
|
||||
| `x-amz-server-side-encryption-customer-algorithm` | `AES256` |
|
||||
| `x-amz-server-side-encryption-customer-key` | Base64-encoded 256-bit key |
|
||||
| `x-amz-server-side-encryption-customer-key-MD5` | Base64-encoded MD5 of the key |
|
||||
|
||||
```bash
|
||||
# Generate a 256-bit key
|
||||
KEY=$(openssl rand -base64 32)
|
||||
KEY_MD5=$(echo -n "$KEY" | base64 -d | openssl dgst -md5 -binary | base64)
|
||||
|
||||
# Upload with SSE-C
|
||||
curl -X PUT "http://localhost:5000/my-bucket/secret.txt" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
|
||||
-H "x-amz-server-side-encryption-customer-key: $KEY" \
|
||||
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5" \
|
||||
--data-binary @secret.txt
|
||||
|
||||
# Download with SSE-C (same key required)
|
||||
curl "http://localhost:5000/my-bucket/secret.txt" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
|
||||
-H "x-amz-server-side-encryption-customer-key: $KEY" \
|
||||
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5"
|
||||
```
|
||||
|
||||
**Key points:**
|
||||
- SSE-C does not require `ENCRYPTION_ENABLED` or `KMS_ENABLED` — the key is provided per-request
|
||||
- If you lose your key, the data is irrecoverable
|
||||
- The MD5 header is optional but recommended for integrity verification
|
||||
|
||||
### Important Notes
|
||||
|
||||
- **Existing objects are NOT encrypted** - Only new uploads after enabling encryption are encrypted
|
||||
@@ -1959,6 +2033,20 @@ curl -X PUT "http://localhost:5000/my-bucket/file.txt" \
|
||||
-H "x-amz-meta-newkey: newvalue"
|
||||
```
|
||||
|
||||
### MoveObject (UI)
|
||||
|
||||
Move an object to a different key or bucket. This is a UI-only convenience operation that performs a copy followed by a delete of the source. Requires `read` and `delete` on the source, and `write` on the destination.
|
||||
|
||||
```bash
|
||||
# Move via UI API
|
||||
curl -X POST "http://localhost:5100/ui/buckets/my-bucket/objects/old-path/file.txt/move" \
|
||||
-H "Content-Type: application/json" \
|
||||
--cookie "session=..." \
|
||||
-d '{"dest_bucket": "other-bucket", "dest_key": "new-path/file.txt"}'
|
||||
```
|
||||
|
||||
The move is atomic from the caller's perspective: if the copy succeeds but the delete fails, the object exists in both locations (no data loss).
|
||||
|
||||
### UploadPartCopy
|
||||
|
||||
Copy data from an existing object into a multipart upload part:
|
||||
|
||||
@@ -19,3 +19,6 @@ regex = "1"
|
||||
lru = "0.14"
|
||||
parking_lot = "0.12"
|
||||
percent-encoding = "2"
|
||||
aes-gcm = "0.10"
|
||||
hkdf = "0.12"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
|
||||
192
myfsio_core/src/crypto.rs
Normal file
192
myfsio_core/src/crypto.rs
Normal file
@@ -0,0 +1,192 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use hkdf::Hkdf;
|
||||
use pyo3::exceptions::{PyIOError, PyValueError};
|
||||
use pyo3::prelude::*;
|
||||
use sha2::Sha256;
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
|
||||
const DEFAULT_CHUNK_SIZE: usize = 65536;
|
||||
const HEADER_SIZE: usize = 4;
|
||||
|
||||
fn read_exact_chunk(reader: &mut impl Read, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
let mut filled = 0;
|
||||
while filled < buf.len() {
|
||||
match reader.read(&mut buf[filled..]) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => filled += n,
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(filled)
|
||||
}
|
||||
|
||||
fn derive_chunk_nonce(base_nonce: &[u8], chunk_index: u32) -> Result<[u8; 12], String> {
|
||||
let hkdf = Hkdf::<Sha256>::new(Some(base_nonce), b"chunk_nonce");
|
||||
let mut okm = [0u8; 12];
|
||||
hkdf.expand(&chunk_index.to_be_bytes(), &mut okm)
|
||||
.map_err(|e| format!("HKDF expand failed: {}", e))?;
|
||||
Ok(okm)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (input_path, output_path, key, base_nonce, chunk_size=DEFAULT_CHUNK_SIZE))]
|
||||
pub fn encrypt_stream_chunked(
|
||||
py: Python<'_>,
|
||||
input_path: &str,
|
||||
output_path: &str,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
chunk_size: usize,
|
||||
) -> PyResult<u32> {
|
||||
if key.len() != 32 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Key must be 32 bytes, got {}",
|
||||
key.len()
|
||||
)));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Base nonce must be 12 bytes, got {}",
|
||||
base_nonce.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let chunk_size = if chunk_size == 0 {
|
||||
DEFAULT_CHUNK_SIZE
|
||||
} else {
|
||||
chunk_size
|
||||
};
|
||||
|
||||
let inp = input_path.to_owned();
|
||||
let out = output_path.to_owned();
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
|
||||
py.detach(move || {
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(&inp)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open input: {}", e)))?;
|
||||
let mut outfile = File::create(&out)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create output: {}", e)))?;
|
||||
|
||||
outfile
|
||||
.write_all(&[0u8; 4])
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write header: {}", e)))?;
|
||||
|
||||
let mut buf = vec![0u8; chunk_size];
|
||||
let mut chunk_index: u32 = 0;
|
||||
|
||||
loop {
|
||||
let n = read_exact_chunk(&mut infile, &mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)
|
||||
.map_err(|e| PyValueError::new_err(e))?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, &buf[..n])
|
||||
.map_err(|e| PyValueError::new_err(format!("Encrypt failed: {}", e)))?;
|
||||
|
||||
let size = encrypted.len() as u32;
|
||||
outfile
|
||||
.write_all(&size.to_be_bytes())
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk size: {}", e)))?;
|
||||
outfile
|
||||
.write_all(&encrypted)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk: {}", e)))?;
|
||||
|
||||
chunk_index += 1;
|
||||
}
|
||||
|
||||
outfile
|
||||
.seek(SeekFrom::Start(0))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to seek: {}", e)))?;
|
||||
outfile
|
||||
.write_all(&chunk_index.to_be_bytes())
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk count: {}", e)))?;
|
||||
|
||||
Ok(chunk_index)
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn decrypt_stream_chunked(
|
||||
py: Python<'_>,
|
||||
input_path: &str,
|
||||
output_path: &str,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
) -> PyResult<u32> {
|
||||
if key.len() != 32 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Key must be 32 bytes, got {}",
|
||||
key.len()
|
||||
)));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Base nonce must be 12 bytes, got {}",
|
||||
base_nonce.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let inp = input_path.to_owned();
|
||||
let out = output_path.to_owned();
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
|
||||
py.detach(move || {
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(&inp)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open input: {}", e)))?;
|
||||
let mut outfile = File::create(&out)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create output: {}", e)))?;
|
||||
|
||||
let mut header = [0u8; HEADER_SIZE];
|
||||
infile
|
||||
.read_exact(&mut header)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read header: {}", e)))?;
|
||||
let chunk_count = u32::from_be_bytes(header);
|
||||
|
||||
let mut size_buf = [0u8; HEADER_SIZE];
|
||||
for chunk_index in 0..chunk_count {
|
||||
infile
|
||||
.read_exact(&mut size_buf)
|
||||
.map_err(|e| {
|
||||
PyIOError::new_err(format!(
|
||||
"Failed to read chunk {} size: {}",
|
||||
chunk_index, e
|
||||
))
|
||||
})?;
|
||||
let chunk_size = u32::from_be_bytes(size_buf) as usize;
|
||||
|
||||
let mut encrypted = vec![0u8; chunk_size];
|
||||
infile.read_exact(&mut encrypted).map_err(|e| {
|
||||
PyIOError::new_err(format!("Failed to read chunk {}: {}", chunk_index, e))
|
||||
})?;
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)
|
||||
.map_err(|e| PyValueError::new_err(e))?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let decrypted = cipher.decrypt(nonce, encrypted.as_ref()).map_err(|e| {
|
||||
PyValueError::new_err(format!("Decrypt chunk {} failed: {}", chunk_index, e))
|
||||
})?;
|
||||
|
||||
outfile.write_all(&decrypted).map_err(|e| {
|
||||
PyIOError::new_err(format!("Failed to write chunk {}: {}", chunk_index, e))
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(chunk_count)
|
||||
})
|
||||
}
|
||||
@@ -1,6 +1,9 @@
|
||||
mod crypto;
|
||||
mod hashing;
|
||||
mod metadata;
|
||||
mod sigv4;
|
||||
mod storage;
|
||||
mod streaming;
|
||||
mod validation;
|
||||
|
||||
use pyo3::prelude::*;
|
||||
@@ -29,6 +32,20 @@ mod myfsio_core {
|
||||
|
||||
m.add_function(wrap_pyfunction!(metadata::read_index_entry, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(storage::write_index_entry, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::delete_index_entry, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::check_bucket_contents, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::shallow_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::bucket_stats_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::search_objects_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::build_object_cache, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(streaming::stream_to_file_with_md5, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(streaming::assemble_parts_with_md5, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(crypto::encrypt_stream_chunked, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(crypto::decrypt_stream_chunked, m)?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
817
myfsio_core/src/storage.rs
Normal file
817
myfsio_core/src/storage.rs
Normal file
@@ -0,0 +1,817 @@
|
||||
use pyo3::exceptions::PyIOError;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::{PyDict, PyList, PyString, PyTuple};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::time::SystemTime;
|
||||
|
||||
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
|
||||
fn system_time_to_epoch(t: SystemTime) -> f64 {
|
||||
t.duration_since(std::time::UNIX_EPOCH)
|
||||
.map(|d| d.as_secs_f64())
|
||||
.unwrap_or(0.0)
|
||||
}
|
||||
|
||||
fn extract_etag_from_meta_bytes(content: &[u8]) -> Option<String> {
|
||||
let marker = b"\"__etag__\"";
|
||||
let idx = content.windows(marker.len()).position(|w| w == marker)?;
|
||||
let after = &content[idx + marker.len()..];
|
||||
let start = after.iter().position(|&b| b == b'"')? + 1;
|
||||
let rest = &after[start..];
|
||||
let end = rest.iter().position(|&b| b == b'"')?;
|
||||
std::str::from_utf8(&rest[..end]).ok().map(|s| s.to_owned())
|
||||
}
|
||||
|
||||
fn has_any_file(root: &str) -> bool {
|
||||
let root_path = Path::new(root);
|
||||
if !root_path.is_dir() {
|
||||
return false;
|
||||
}
|
||||
let mut stack = vec![root_path.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_file() {
|
||||
return true;
|
||||
}
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn write_index_entry(
|
||||
py: Python<'_>,
|
||||
path: &str,
|
||||
entry_name: &str,
|
||||
entry_data_json: &str,
|
||||
) -> PyResult<()> {
|
||||
let path_owned = path.to_owned();
|
||||
let entry_owned = entry_name.to_owned();
|
||||
let data_owned = entry_data_json.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<()> {
|
||||
let entry_value: Value = serde_json::from_str(&data_owned)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to parse entry data: {}", e)))?;
|
||||
|
||||
if let Some(parent) = Path::new(&path_owned).parent() {
|
||||
let _ = fs::create_dir_all(parent);
|
||||
}
|
||||
|
||||
let mut index_data: serde_json::Map<String, Value> = match fs::read_to_string(&path_owned)
|
||||
{
|
||||
Ok(content) => serde_json::from_str(&content).unwrap_or_default(),
|
||||
Err(_) => serde_json::Map::new(),
|
||||
};
|
||||
|
||||
index_data.insert(entry_owned, entry_value);
|
||||
|
||||
let serialized = serde_json::to_string(&Value::Object(index_data))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to serialize index: {}", e)))?;
|
||||
|
||||
fs::write(&path_owned, serialized)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn delete_index_entry(py: Python<'_>, path: &str, entry_name: &str) -> PyResult<bool> {
|
||||
let path_owned = path.to_owned();
|
||||
let entry_owned = entry_name.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<bool> {
|
||||
let content = match fs::read_to_string(&path_owned) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return Ok(false),
|
||||
};
|
||||
|
||||
let mut index_data: serde_json::Map<String, Value> =
|
||||
match serde_json::from_str(&content) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return Ok(false),
|
||||
};
|
||||
|
||||
if index_data.remove(&entry_owned).is_none() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
if index_data.is_empty() {
|
||||
let _ = fs::remove_file(&path_owned);
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let serialized = serde_json::to_string(&Value::Object(index_data))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to serialize index: {}", e)))?;
|
||||
|
||||
fs::write(&path_owned, serialized)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
|
||||
|
||||
Ok(false)
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn check_bucket_contents(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
version_roots: Vec<String>,
|
||||
multipart_roots: Vec<String>,
|
||||
) -> PyResult<(bool, bool, bool)> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<(bool, bool, bool)> {
|
||||
let mut has_objects = false;
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
'obj_scan: while let Some(current) = stack.pop() {
|
||||
let is_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if ft.is_file() && !ft.is_symlink() {
|
||||
has_objects = true;
|
||||
break 'obj_scan;
|
||||
}
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut has_versions = false;
|
||||
for root in &version_roots {
|
||||
if has_versions {
|
||||
break;
|
||||
}
|
||||
has_versions = has_any_file(root);
|
||||
}
|
||||
|
||||
let mut has_multipart = false;
|
||||
for root in &multipart_roots {
|
||||
if has_multipart {
|
||||
break;
|
||||
}
|
||||
has_multipart = has_any_file(root);
|
||||
}
|
||||
|
||||
Ok((has_objects, has_versions, has_multipart))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn shallow_scan(
|
||||
py: Python<'_>,
|
||||
target_dir: &str,
|
||||
prefix: &str,
|
||||
meta_cache_json: &str,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let target_owned = target_dir.to_owned();
|
||||
let prefix_owned = prefix.to_owned();
|
||||
let cache_owned = meta_cache_json.to_owned();
|
||||
|
||||
let result: (
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
Vec<String>,
|
||||
Vec<(String, bool)>,
|
||||
) = py.detach(move || -> PyResult<(
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
Vec<String>,
|
||||
Vec<(String, bool)>,
|
||||
)> {
|
||||
let meta_cache: HashMap<String, String> =
|
||||
serde_json::from_str(&cache_owned).unwrap_or_default();
|
||||
|
||||
let mut files: Vec<(String, u64, f64, Option<String>)> = Vec::new();
|
||||
let mut dirs: Vec<String> = Vec::new();
|
||||
|
||||
let entries = match fs::read_dir(&target_owned) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return Ok((files, dirs, Vec::new())),
|
||||
};
|
||||
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let name = match entry.file_name().into_string() {
|
||||
Ok(n) => n,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&name.as_str()) {
|
||||
continue;
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
let cp = format!("{}{}/", prefix_owned, name);
|
||||
dirs.push(cp);
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let key = format!("{}{}", prefix_owned, name);
|
||||
let md = match entry.metadata() {
|
||||
Ok(m) => m,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
let etag = meta_cache.get(&key).cloned();
|
||||
files.push((key, size, mtime, etag));
|
||||
}
|
||||
}
|
||||
|
||||
files.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
dirs.sort();
|
||||
|
||||
let mut merged: Vec<(String, bool)> = Vec::with_capacity(files.len() + dirs.len());
|
||||
let mut fi = 0;
|
||||
let mut di = 0;
|
||||
while fi < files.len() && di < dirs.len() {
|
||||
if files[fi].0 <= dirs[di] {
|
||||
merged.push((files[fi].0.clone(), false));
|
||||
fi += 1;
|
||||
} else {
|
||||
merged.push((dirs[di].clone(), true));
|
||||
di += 1;
|
||||
}
|
||||
}
|
||||
while fi < files.len() {
|
||||
merged.push((files[fi].0.clone(), false));
|
||||
fi += 1;
|
||||
}
|
||||
while di < dirs.len() {
|
||||
merged.push((dirs[di].clone(), true));
|
||||
di += 1;
|
||||
}
|
||||
|
||||
Ok((files, dirs, merged))
|
||||
})?;
|
||||
|
||||
let (files, dirs, merged) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let files_list = PyList::empty(py);
|
||||
for (key, size, mtime, etag) in &files {
|
||||
let etag_py: Py<PyAny> = match etag {
|
||||
Some(e) => PyString::new(py, e).into_any().unbind(),
|
||||
None => py.None(),
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
etag_py,
|
||||
])?;
|
||||
files_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("files", files_list)?;
|
||||
|
||||
let dirs_list = PyList::empty(py);
|
||||
for d in &dirs {
|
||||
dirs_list.append(PyString::new(py, d))?;
|
||||
}
|
||||
dict.set_item("dirs", dirs_list)?;
|
||||
|
||||
let merged_list = PyList::empty(py);
|
||||
for (key, is_dir) in &merged {
|
||||
let bool_obj: Py<PyAny> = if *is_dir {
|
||||
true.into_pyobject(py)?.to_owned().into_any().unbind()
|
||||
} else {
|
||||
false.into_pyobject(py)?.to_owned().into_any().unbind()
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
bool_obj,
|
||||
])?;
|
||||
merged_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("merged_keys", merged_list)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn bucket_stats_scan(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
versions_root: &str,
|
||||
) -> PyResult<(u64, u64, u64, u64)> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let versions_owned = versions_root.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<(u64, u64, u64, u64)> {
|
||||
let mut object_count: u64 = 0;
|
||||
let mut total_bytes: u64 = 0;
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let is_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
object_count += 1;
|
||||
if let Ok(md) = entry.metadata() {
|
||||
total_bytes += md.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut version_count: u64 = 0;
|
||||
let mut version_bytes: u64 = 0;
|
||||
|
||||
let versions_p = Path::new(&versions_owned);
|
||||
if versions_p.is_dir() {
|
||||
let mut stack = vec![versions_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if name.ends_with(".bin") {
|
||||
version_count += 1;
|
||||
if let Ok(md) = entry.metadata() {
|
||||
version_bytes += md.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((object_count, total_bytes, version_count, version_bytes))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (bucket_path, search_root, query, limit))]
|
||||
pub fn search_objects_scan(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
search_root: &str,
|
||||
query: &str,
|
||||
limit: usize,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let search_owned = search_root.to_owned();
|
||||
let query_owned = query.to_owned();
|
||||
|
||||
let result: (Vec<(String, u64, f64)>, bool) = py.detach(
|
||||
move || -> PyResult<(Vec<(String, u64, f64)>, bool)> {
|
||||
let query_lower = query_owned.to_lowercase();
|
||||
let bucket_len = bucket_owned.len() + 1;
|
||||
let scan_limit = limit * 4;
|
||||
let mut matched: usize = 0;
|
||||
let mut results: Vec<(String, u64, f64)> = Vec::new();
|
||||
|
||||
let search_p = Path::new(&search_owned);
|
||||
if !search_p.is_dir() {
|
||||
return Ok((results, false));
|
||||
}
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
let mut stack = vec![search_p.to_path_buf()];
|
||||
|
||||
'scan: while let Some(current) = stack.pop() {
|
||||
let is_bucket_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_bucket_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let full_path = entry.path();
|
||||
let full_str = full_path.to_string_lossy();
|
||||
if full_str.len() <= bucket_len {
|
||||
continue;
|
||||
}
|
||||
let key = full_str[bucket_len..].replace('\\', "/");
|
||||
if key.to_lowercase().contains(&query_lower) {
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
results.push((key, size, mtime));
|
||||
matched += 1;
|
||||
}
|
||||
}
|
||||
if matched >= scan_limit {
|
||||
break 'scan;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
let truncated = results.len() > limit;
|
||||
results.truncate(limit);
|
||||
|
||||
Ok((results, truncated))
|
||||
},
|
||||
)?;
|
||||
|
||||
let (results, truncated) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let results_list = PyList::empty(py);
|
||||
for (key, size, mtime) in &results {
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
])?;
|
||||
results_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("results", results_list)?;
|
||||
dict.set_item("truncated", truncated)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn build_object_cache(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
meta_root: &str,
|
||||
etag_index_path: &str,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let meta_owned = meta_root.to_owned();
|
||||
let index_path_owned = etag_index_path.to_owned();
|
||||
|
||||
let result: (HashMap<String, String>, Vec<(String, u64, f64, Option<String>)>, bool) =
|
||||
py.detach(move || -> PyResult<(
|
||||
HashMap<String, String>,
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
bool,
|
||||
)> {
|
||||
let mut meta_cache: HashMap<String, String> = HashMap::new();
|
||||
let mut index_mtime: f64 = 0.0;
|
||||
let mut etag_cache_changed = false;
|
||||
|
||||
let index_p = Path::new(&index_path_owned);
|
||||
if index_p.is_file() {
|
||||
if let Ok(md) = fs::metadata(&index_path_owned) {
|
||||
index_mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
}
|
||||
if let Ok(content) = fs::read_to_string(&index_path_owned) {
|
||||
if let Ok(parsed) = serde_json::from_str::<HashMap<String, String>>(&content) {
|
||||
meta_cache = parsed;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let meta_p = Path::new(&meta_owned);
|
||||
let mut needs_rebuild = false;
|
||||
|
||||
if meta_p.is_dir() && index_mtime > 0.0 {
|
||||
fn check_newer(dir: &Path, index_mtime: f64) -> bool {
|
||||
let entries = match fs::read_dir(dir) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return false,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
if check_newer(&entry.path(), index_mtime) {
|
||||
return true;
|
||||
}
|
||||
} else if ft.is_file() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if name.ends_with(".meta.json") || name == "_index.json" {
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let mt = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
if mt > index_mtime {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
needs_rebuild = check_newer(meta_p, index_mtime);
|
||||
} else if meta_cache.is_empty() {
|
||||
needs_rebuild = true;
|
||||
}
|
||||
|
||||
if needs_rebuild && meta_p.is_dir() {
|
||||
let meta_str = meta_owned.clone();
|
||||
let meta_len = meta_str.len() + 1;
|
||||
let mut index_files: Vec<String> = Vec::new();
|
||||
let mut legacy_meta_files: Vec<(String, String)> = Vec::new();
|
||||
|
||||
fn collect_meta(
|
||||
dir: &Path,
|
||||
meta_len: usize,
|
||||
index_files: &mut Vec<String>,
|
||||
legacy_meta_files: &mut Vec<(String, String)>,
|
||||
) {
|
||||
let entries = match fs::read_dir(dir) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
collect_meta(&entry.path(), meta_len, index_files, legacy_meta_files);
|
||||
} else if ft.is_file() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
let full = entry.path().to_string_lossy().to_string();
|
||||
if name == "_index.json" {
|
||||
index_files.push(full);
|
||||
} else if name.ends_with(".meta.json") {
|
||||
if full.len() > meta_len {
|
||||
let rel = &full[meta_len..];
|
||||
let key = if rel.len() > 10 {
|
||||
rel[..rel.len() - 10].replace('\\', "/")
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
legacy_meta_files.push((key, full));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
collect_meta(
|
||||
meta_p,
|
||||
meta_len,
|
||||
&mut index_files,
|
||||
&mut legacy_meta_files,
|
||||
);
|
||||
|
||||
meta_cache.clear();
|
||||
|
||||
for idx_path in &index_files {
|
||||
if let Ok(content) = fs::read_to_string(idx_path) {
|
||||
if let Ok(idx_data) = serde_json::from_str::<HashMap<String, Value>>(&content) {
|
||||
let rel_dir = if idx_path.len() > meta_len {
|
||||
let r = &idx_path[meta_len..];
|
||||
r.replace('\\', "/")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
let dir_prefix = if rel_dir.ends_with("/_index.json") {
|
||||
&rel_dir[..rel_dir.len() - "/_index.json".len()]
|
||||
} else {
|
||||
""
|
||||
};
|
||||
for (entry_name, entry_data) in &idx_data {
|
||||
let key = if dir_prefix.is_empty() {
|
||||
entry_name.clone()
|
||||
} else {
|
||||
format!("{}/{}", dir_prefix, entry_name)
|
||||
};
|
||||
if let Some(meta_obj) = entry_data.get("metadata") {
|
||||
if let Some(etag) = meta_obj.get("__etag__") {
|
||||
if let Some(etag_str) = etag.as_str() {
|
||||
meta_cache.insert(key, etag_str.to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (key, path) in &legacy_meta_files {
|
||||
if meta_cache.contains_key(key) {
|
||||
continue;
|
||||
}
|
||||
if let Ok(content) = fs::read(path) {
|
||||
if let Some(etag) = extract_etag_from_meta_bytes(&content) {
|
||||
meta_cache.insert(key.clone(), etag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
etag_cache_changed = true;
|
||||
}
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
let bucket_len = bucket_owned.len() + 1;
|
||||
let mut objects: Vec<(String, u64, f64, Option<String>)> = Vec::new();
|
||||
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
let full = entry.path();
|
||||
let full_str = full.to_string_lossy();
|
||||
if full_str.len() > bucket_len {
|
||||
let first_part: &str = if let Some(sep_pos) =
|
||||
full_str[bucket_len..].find(|c: char| c == '\\' || c == '/')
|
||||
{
|
||||
&full_str[bucket_len..bucket_len + sep_pos]
|
||||
} else {
|
||||
&full_str[bucket_len..]
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&first_part) {
|
||||
continue;
|
||||
}
|
||||
} else if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
stack.push(full);
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let full = entry.path();
|
||||
let full_str = full.to_string_lossy();
|
||||
if full_str.len() <= bucket_len {
|
||||
continue;
|
||||
}
|
||||
let rel = &full_str[bucket_len..];
|
||||
let first_part: &str =
|
||||
if let Some(sep_pos) = rel.find(|c: char| c == '\\' || c == '/') {
|
||||
&rel[..sep_pos]
|
||||
} else {
|
||||
rel
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&first_part) {
|
||||
continue;
|
||||
}
|
||||
let key = rel.replace('\\', "/");
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
let etag = meta_cache.get(&key).cloned();
|
||||
objects.push((key, size, mtime, etag));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((meta_cache, objects, etag_cache_changed))
|
||||
})?;
|
||||
|
||||
let (meta_cache, objects, etag_cache_changed) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let cache_dict = PyDict::new(py);
|
||||
for (k, v) in &meta_cache {
|
||||
cache_dict.set_item(k, v)?;
|
||||
}
|
||||
dict.set_item("etag_cache", cache_dict)?;
|
||||
|
||||
let objects_list = PyList::empty(py);
|
||||
for (key, size, mtime, etag) in &objects {
|
||||
let etag_py: Py<PyAny> = match etag {
|
||||
Some(e) => PyString::new(py, e).into_any().unbind(),
|
||||
None => py.None(),
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
etag_py,
|
||||
])?;
|
||||
objects_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("objects", objects_list)?;
|
||||
dict.set_item("etag_cache_changed", etag_cache_changed)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
112
myfsio_core/src/streaming.rs
Normal file
112
myfsio_core/src/streaming.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
use md5::{Digest, Md5};
|
||||
use pyo3::exceptions::{PyIOError, PyValueError};
|
||||
use pyo3::prelude::*;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{Read, Write};
|
||||
use uuid::Uuid;
|
||||
|
||||
const DEFAULT_CHUNK_SIZE: usize = 262144;
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (stream, tmp_dir, chunk_size=DEFAULT_CHUNK_SIZE))]
|
||||
pub fn stream_to_file_with_md5(
|
||||
py: Python<'_>,
|
||||
stream: &Bound<'_, PyAny>,
|
||||
tmp_dir: &str,
|
||||
chunk_size: usize,
|
||||
) -> PyResult<(String, String, u64)> {
|
||||
let chunk_size = if chunk_size == 0 {
|
||||
DEFAULT_CHUNK_SIZE
|
||||
} else {
|
||||
chunk_size
|
||||
};
|
||||
|
||||
fs::create_dir_all(tmp_dir)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create tmp dir: {}", e)))?;
|
||||
|
||||
let tmp_name = format!("{}.tmp", Uuid::new_v4().as_hyphenated());
|
||||
let tmp_path_buf = std::path::PathBuf::from(tmp_dir).join(&tmp_name);
|
||||
let tmp_path = tmp_path_buf.to_string_lossy().into_owned();
|
||||
|
||||
let mut file = File::create(&tmp_path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create temp file: {}", e)))?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut total_bytes: u64 = 0;
|
||||
|
||||
let result: PyResult<()> = (|| {
|
||||
loop {
|
||||
let chunk: Vec<u8> = stream.call_method1("read", (chunk_size,))?.extract()?;
|
||||
if chunk.is_empty() {
|
||||
break;
|
||||
}
|
||||
hasher.update(&chunk);
|
||||
file.write_all(&chunk)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write: {}", e)))?;
|
||||
total_bytes += chunk.len() as u64;
|
||||
|
||||
py.check_signals()?;
|
||||
}
|
||||
file.sync_all()
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to fsync: {}", e)))?;
|
||||
Ok(())
|
||||
})();
|
||||
|
||||
if let Err(e) = result {
|
||||
drop(file);
|
||||
let _ = fs::remove_file(&tmp_path);
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
drop(file);
|
||||
|
||||
let md5_hex = format!("{:x}", hasher.finalize());
|
||||
Ok((tmp_path, md5_hex, total_bytes))
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn assemble_parts_with_md5(
|
||||
py: Python<'_>,
|
||||
part_paths: Vec<String>,
|
||||
dest_path: &str,
|
||||
) -> PyResult<String> {
|
||||
if part_paths.is_empty() {
|
||||
return Err(PyValueError::new_err("No parts to assemble"));
|
||||
}
|
||||
|
||||
let dest = dest_path.to_owned();
|
||||
let parts = part_paths;
|
||||
|
||||
py.detach(move || {
|
||||
if let Some(parent) = std::path::Path::new(&dest).parent() {
|
||||
fs::create_dir_all(parent)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create dest dir: {}", e)))?;
|
||||
}
|
||||
|
||||
let mut target = File::create(&dest)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create dest file: {}", e)))?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut buf = vec![0u8; 1024 * 1024];
|
||||
|
||||
for part_path in &parts {
|
||||
let mut part = File::open(part_path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open part {}: {}", part_path, e)))?;
|
||||
loop {
|
||||
let n = part
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read part: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
target
|
||||
.write_all(&buf[..n])
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write: {}", e)))?;
|
||||
}
|
||||
}
|
||||
|
||||
target.sync_all()
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to fsync: {}", e)))?;
|
||||
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
})
|
||||
}
|
||||
103
run.py
103
run.py
@@ -23,6 +23,7 @@ from typing import Optional
|
||||
|
||||
from app import create_api_app, create_ui_app
|
||||
from app.config import AppConfig
|
||||
from app.iam import IamService, IamError, ALLOWED_ACTIONS, _derive_fernet_key
|
||||
|
||||
|
||||
def _server_host() -> str:
|
||||
@@ -87,21 +88,121 @@ def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None)
|
||||
app.run(host=_server_host(), port=port, debug=debug)
|
||||
|
||||
|
||||
def reset_credentials() -> None:
|
||||
import json
|
||||
import secrets
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
config = AppConfig.from_env()
|
||||
iam_path = config.iam_config_path
|
||||
encryption_key = config.secret_key
|
||||
|
||||
access_key = os.environ.get("ADMIN_ACCESS_KEY", "").strip() or secrets.token_hex(12)
|
||||
secret_key = os.environ.get("ADMIN_SECRET_KEY", "").strip() or secrets.token_urlsafe(32)
|
||||
custom_keys = bool(os.environ.get("ADMIN_ACCESS_KEY", "").strip())
|
||||
|
||||
fernet = Fernet(_derive_fernet_key(encryption_key)) if encryption_key else None
|
||||
|
||||
raw_config = None
|
||||
if iam_path.exists():
|
||||
try:
|
||||
raw_bytes = iam_path.read_bytes()
|
||||
from app.iam import _IAM_ENCRYPTED_PREFIX
|
||||
if raw_bytes.startswith(_IAM_ENCRYPTED_PREFIX):
|
||||
if fernet:
|
||||
try:
|
||||
content = fernet.decrypt(raw_bytes[len(_IAM_ENCRYPTED_PREFIX):]).decode("utf-8")
|
||||
raw_config = json.loads(content)
|
||||
except Exception:
|
||||
print("WARNING: Could not decrypt existing IAM config. Creating fresh config.")
|
||||
else:
|
||||
print("WARNING: IAM config is encrypted but no SECRET_KEY available. Creating fresh config.")
|
||||
else:
|
||||
try:
|
||||
raw_config = json.loads(raw_bytes.decode("utf-8"))
|
||||
except json.JSONDecodeError:
|
||||
print("WARNING: Existing IAM config is corrupted. Creating fresh config.")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if raw_config and raw_config.get("users"):
|
||||
admin_user = None
|
||||
for user in raw_config["users"]:
|
||||
policies = user.get("policies", [])
|
||||
for p in policies:
|
||||
actions = p.get("actions", [])
|
||||
if "iam:*" in actions or "*" in actions:
|
||||
admin_user = user
|
||||
break
|
||||
if admin_user:
|
||||
break
|
||||
if not admin_user:
|
||||
admin_user = raw_config["users"][0]
|
||||
|
||||
admin_user["access_key"] = access_key
|
||||
admin_user["secret_key"] = secret_key
|
||||
else:
|
||||
raw_config = {
|
||||
"users": [
|
||||
{
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"display_name": "Local Admin",
|
||||
"policies": [
|
||||
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
json_text = json.dumps(raw_config, indent=2)
|
||||
iam_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
temp_path = iam_path.with_suffix(".json.tmp")
|
||||
if fernet:
|
||||
from app.iam import _IAM_ENCRYPTED_PREFIX
|
||||
encrypted = fernet.encrypt(json_text.encode("utf-8"))
|
||||
temp_path.write_bytes(_IAM_ENCRYPTED_PREFIX + encrypted)
|
||||
else:
|
||||
temp_path.write_text(json_text, encoding="utf-8")
|
||||
temp_path.replace(iam_path)
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print("MYFSIO - ADMIN CREDENTIALS RESET")
|
||||
print(f"{'='*60}")
|
||||
if custom_keys:
|
||||
print(f"Access Key: {access_key} (from ADMIN_ACCESS_KEY)")
|
||||
print(f"Secret Key: {'(from ADMIN_SECRET_KEY)' if os.environ.get('ADMIN_SECRET_KEY', '').strip() else secret_key}")
|
||||
else:
|
||||
print(f"Access Key: {access_key}")
|
||||
print(f"Secret Key: {secret_key}")
|
||||
print(f"{'='*60}")
|
||||
if fernet:
|
||||
print("IAM config saved (encrypted).")
|
||||
else:
|
||||
print(f"IAM config saved to: {iam_path}")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
multiprocessing.freeze_support()
|
||||
if _is_frozen():
|
||||
multiprocessing.set_start_method("spawn", force=True)
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run the S3 clone services.")
|
||||
parser.add_argument("--mode", choices=["api", "ui", "both"], default="both")
|
||||
parser.add_argument("--mode", choices=["api", "ui", "both", "reset-cred"], default="both")
|
||||
parser.add_argument("--api-port", type=int, default=5000)
|
||||
parser.add_argument("--ui-port", type=int, default=5100)
|
||||
parser.add_argument("--prod", action="store_true", help="Run in production mode using Waitress")
|
||||
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
|
||||
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
|
||||
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
|
||||
parser.add_argument("--reset-cred", action="store_true", help="Reset admin credentials and exit")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.reset_cred or args.mode == "reset-cred":
|
||||
reset_credentials()
|
||||
sys.exit(0)
|
||||
|
||||
if args.check_config or args.show_config:
|
||||
config = AppConfig.from_env()
|
||||
config.print_startup_summary()
|
||||
|
||||
@@ -1154,39 +1154,20 @@ html.sidebar-will-collapse .sidebar-user {
|
||||
position: relative;
|
||||
border: 1px solid var(--myfsio-card-border) !important;
|
||||
border-radius: 1rem !important;
|
||||
overflow: hidden;
|
||||
overflow: visible;
|
||||
transition: all 0.2s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
}
|
||||
|
||||
.iam-user-card::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
height: 4px;
|
||||
background: linear-gradient(90deg, #3b82f6, #8b5cf6);
|
||||
opacity: 0;
|
||||
transition: opacity 0.2s ease;
|
||||
}
|
||||
|
||||
.iam-user-card:hover {
|
||||
transform: translateY(-2px);
|
||||
box-shadow: 0 8px 24px -4px rgba(0, 0, 0, 0.12), 0 4px 8px -4px rgba(0, 0, 0, 0.08);
|
||||
border-color: var(--myfsio-accent) !important;
|
||||
}
|
||||
|
||||
.iam-user-card:hover::before {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
[data-theme='dark'] .iam-user-card:hover {
|
||||
box-shadow: 0 8px 24px -4px rgba(0, 0, 0, 0.4), 0 4px 8px -4px rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
||||
.iam-admin-card::before {
|
||||
background: linear-gradient(90deg, #f59e0b, #ef4444);
|
||||
}
|
||||
|
||||
.iam-role-badge {
|
||||
display: inline-flex;
|
||||
|
||||
@@ -137,11 +137,11 @@
|
||||
const versionPanel = document.getElementById('version-panel');
|
||||
const versionList = document.getElementById('version-list');
|
||||
const refreshVersionsButton = document.getElementById('refreshVersionsButton');
|
||||
const archivedCard = document.getElementById('archived-objects-card');
|
||||
const archivedBody = archivedCard?.querySelector('[data-archived-body]');
|
||||
const archivedCountBadge = archivedCard?.querySelector('[data-archived-count]');
|
||||
const archivedRefreshButton = archivedCard?.querySelector('[data-archived-refresh]');
|
||||
const archivedEndpoint = archivedCard?.dataset.archivedEndpoint;
|
||||
let archivedCard = document.getElementById('archived-objects-card');
|
||||
let archivedBody = archivedCard?.querySelector('[data-archived-body]');
|
||||
let archivedCountBadge = archivedCard?.querySelector('[data-archived-count]');
|
||||
let archivedRefreshButton = archivedCard?.querySelector('[data-archived-refresh]');
|
||||
let archivedEndpoint = archivedCard?.dataset.archivedEndpoint;
|
||||
let versioningEnabled = objectsContainer?.dataset.versioning === 'true';
|
||||
const versionsCache = new Map();
|
||||
let activeRow = null;
|
||||
@@ -167,6 +167,8 @@
|
||||
let pageSize = 5000;
|
||||
let currentPrefix = '';
|
||||
let allObjects = [];
|
||||
let streamFolders = [];
|
||||
let useDelimiterMode = true;
|
||||
let urlTemplates = null;
|
||||
let streamAbortController = null;
|
||||
let useStreaming = !!objectsStreamUrl;
|
||||
@@ -186,7 +188,7 @@
|
||||
let renderedRange = { start: 0, end: 0 };
|
||||
|
||||
let memoizedVisibleItems = null;
|
||||
let memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
|
||||
let memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
|
||||
const createObjectRow = (obj, displayKey = null) => {
|
||||
const tr = document.createElement('tr');
|
||||
@@ -319,10 +321,13 @@
|
||||
`;
|
||||
};
|
||||
|
||||
let bucketTotalObjects = objectsContainer ? parseInt(objectsContainer.dataset.bucketTotalObjects || '0', 10) : 0;
|
||||
|
||||
const updateObjectCountBadge = () => {
|
||||
if (!objectCountBadge) return;
|
||||
if (totalObjectCount === 0) {
|
||||
objectCountBadge.textContent = '0 objects';
|
||||
if (useDelimiterMode) {
|
||||
const total = bucketTotalObjects || totalObjectCount;
|
||||
objectCountBadge.textContent = `${total.toLocaleString()} object${total !== 1 ? 's' : ''}`;
|
||||
} else {
|
||||
objectCountBadge.textContent = `${totalObjectCount.toLocaleString()} object${totalObjectCount !== 1 ? 's' : ''}`;
|
||||
}
|
||||
@@ -349,6 +354,7 @@
|
||||
const computeVisibleItems = (forceRecompute = false) => {
|
||||
const currentInputs = {
|
||||
objectCount: allObjects.length,
|
||||
folderCount: streamFolders.length,
|
||||
prefix: currentPrefix,
|
||||
filterTerm: currentFilterTerm,
|
||||
sortField: currentSortField,
|
||||
@@ -358,6 +364,7 @@
|
||||
if (!forceRecompute &&
|
||||
memoizedVisibleItems !== null &&
|
||||
memoizedInputs.objectCount === currentInputs.objectCount &&
|
||||
memoizedInputs.folderCount === currentInputs.folderCount &&
|
||||
memoizedInputs.prefix === currentInputs.prefix &&
|
||||
memoizedInputs.filterTerm === currentInputs.filterTerm &&
|
||||
memoizedInputs.sortField === currentInputs.sortField &&
|
||||
@@ -366,36 +373,49 @@
|
||||
}
|
||||
|
||||
const items = [];
|
||||
const folders = new Set();
|
||||
|
||||
allObjects.forEach(obj => {
|
||||
if (!obj.key.startsWith(currentPrefix)) return;
|
||||
if (searchResults !== null) {
|
||||
searchResults.forEach(obj => {
|
||||
items.push({ type: 'file', data: obj, displayKey: obj.key });
|
||||
});
|
||||
} else if (useDelimiterMode && streamFolders.length > 0) {
|
||||
streamFolders.forEach(folderPath => {
|
||||
const folderName = folderPath.slice(currentPrefix.length).replace(/\/$/, '');
|
||||
items.push({ type: 'folder', path: folderPath, displayKey: folderName });
|
||||
});
|
||||
allObjects.forEach(obj => {
|
||||
const remainder = obj.key.slice(currentPrefix.length);
|
||||
if (!remainder) return;
|
||||
items.push({ type: 'file', data: obj, displayKey: remainder });
|
||||
});
|
||||
} else {
|
||||
const folders = new Set();
|
||||
|
||||
const remainder = obj.key.slice(currentPrefix.length);
|
||||
allObjects.forEach(obj => {
|
||||
if (!obj.key.startsWith(currentPrefix)) return;
|
||||
|
||||
if (!remainder) return;
|
||||
const remainder = obj.key.slice(currentPrefix.length);
|
||||
|
||||
const isFolderMarker = obj.key.endsWith('/') && obj.size === 0;
|
||||
const slashIndex = remainder.indexOf('/');
|
||||
if (!remainder) return;
|
||||
|
||||
if (slashIndex === -1 && !isFolderMarker) {
|
||||
if (!currentFilterTerm || remainder.toLowerCase().includes(currentFilterTerm)) {
|
||||
const isFolderMarker = obj.key.endsWith('/') && obj.size === 0;
|
||||
const slashIndex = remainder.indexOf('/');
|
||||
|
||||
if (slashIndex === -1 && !isFolderMarker) {
|
||||
items.push({ type: 'file', data: obj, displayKey: remainder });
|
||||
}
|
||||
} else {
|
||||
const effectiveSlashIndex = isFolderMarker && slashIndex === remainder.length - 1
|
||||
? slashIndex
|
||||
: (slashIndex === -1 ? remainder.length - 1 : slashIndex);
|
||||
const folderName = remainder.slice(0, effectiveSlashIndex);
|
||||
const folderPath = currentPrefix + folderName + '/';
|
||||
if (!folders.has(folderPath)) {
|
||||
folders.add(folderPath);
|
||||
if (!currentFilterTerm || folderName.toLowerCase().includes(currentFilterTerm)) {
|
||||
} else {
|
||||
const effectiveSlashIndex = isFolderMarker && slashIndex === remainder.length - 1
|
||||
? slashIndex
|
||||
: (slashIndex === -1 ? remainder.length - 1 : slashIndex);
|
||||
const folderName = remainder.slice(0, effectiveSlashIndex);
|
||||
const folderPath = currentPrefix + folderName + '/';
|
||||
if (!folders.has(folderPath)) {
|
||||
folders.add(folderPath);
|
||||
items.push({ type: 'folder', path: folderPath, displayKey: folderName });
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
items.sort((a, b) => {
|
||||
if (a.type === 'folder' && b.type === 'file') return -1;
|
||||
@@ -471,7 +491,7 @@
|
||||
renderedRange = { start: -1, end: -1 };
|
||||
|
||||
if (visibleItems.length === 0) {
|
||||
if (allObjects.length === 0 && !hasMoreObjects) {
|
||||
if (allObjects.length === 0 && streamFolders.length === 0 && !hasMoreObjects) {
|
||||
showEmptyState();
|
||||
} else {
|
||||
objectsTableBody.innerHTML = `
|
||||
@@ -500,15 +520,7 @@
|
||||
const updateFolderViewStatus = () => {
|
||||
const folderViewStatusEl = document.getElementById('folder-view-status');
|
||||
if (!folderViewStatusEl) return;
|
||||
|
||||
if (currentPrefix) {
|
||||
const folderCount = visibleItems.filter(i => i.type === 'folder').length;
|
||||
const fileCount = visibleItems.filter(i => i.type === 'file').length;
|
||||
folderViewStatusEl.innerHTML = `<span class="text-muted">${folderCount} folder${folderCount !== 1 ? 's' : ''}, ${fileCount} file${fileCount !== 1 ? 's' : ''} in this view</span>`;
|
||||
folderViewStatusEl.classList.remove('d-none');
|
||||
} else {
|
||||
folderViewStatusEl.classList.add('d-none');
|
||||
}
|
||||
folderViewStatusEl.classList.add('d-none');
|
||||
};
|
||||
|
||||
const processStreamObject = (obj) => {
|
||||
@@ -536,21 +548,30 @@
|
||||
let lastStreamRenderTime = 0;
|
||||
const STREAM_RENDER_THROTTLE_MS = 500;
|
||||
|
||||
const buildBottomStatusText = (complete) => {
|
||||
if (!complete) {
|
||||
const countText = totalObjectCount > 0 ? ` of ${totalObjectCount.toLocaleString()}` : '';
|
||||
return `${loadedObjectCount.toLocaleString()}${countText} loading...`;
|
||||
}
|
||||
const parts = [];
|
||||
if (useDelimiterMode && streamFolders.length > 0) {
|
||||
parts.push(`${streamFolders.length.toLocaleString()} folder${streamFolders.length !== 1 ? 's' : ''}`);
|
||||
}
|
||||
parts.push(`${loadedObjectCount.toLocaleString()} object${loadedObjectCount !== 1 ? 's' : ''}`);
|
||||
return parts.join(', ');
|
||||
};
|
||||
|
||||
const flushPendingStreamObjects = () => {
|
||||
if (pendingStreamObjects.length === 0) return;
|
||||
const batch = pendingStreamObjects.splice(0, pendingStreamObjects.length);
|
||||
batch.forEach(obj => {
|
||||
loadedObjectCount++;
|
||||
allObjects.push(obj);
|
||||
});
|
||||
if (pendingStreamObjects.length > 0) {
|
||||
const batch = pendingStreamObjects.splice(0, pendingStreamObjects.length);
|
||||
batch.forEach(obj => {
|
||||
loadedObjectCount++;
|
||||
allObjects.push(obj);
|
||||
});
|
||||
}
|
||||
updateObjectCountBadge();
|
||||
if (loadMoreStatus) {
|
||||
if (streamingComplete) {
|
||||
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()} objects`;
|
||||
} else {
|
||||
const countText = totalObjectCount > 0 ? ` of ${totalObjectCount.toLocaleString()}` : '';
|
||||
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()}${countText} loading...`;
|
||||
}
|
||||
loadMoreStatus.textContent = buildBottomStatusText(streamingComplete);
|
||||
}
|
||||
if (objectsLoadingRow && objectsLoadingRow.parentNode) {
|
||||
const loadingText = objectsLoadingRow.querySelector('p');
|
||||
@@ -585,8 +606,9 @@
|
||||
loadedObjectCount = 0;
|
||||
totalObjectCount = 0;
|
||||
allObjects = [];
|
||||
streamFolders = [];
|
||||
memoizedVisibleItems = null;
|
||||
memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
|
||||
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
pendingStreamObjects = [];
|
||||
lastStreamRenderTime = 0;
|
||||
|
||||
@@ -595,6 +617,7 @@
|
||||
try {
|
||||
const params = new URLSearchParams();
|
||||
if (currentPrefix) params.set('prefix', currentPrefix);
|
||||
if (useDelimiterMode) params.set('delimiter', '/');
|
||||
|
||||
const response = await fetch(`${objectsStreamUrl}?${params}`, {
|
||||
signal: streamAbortController.signal
|
||||
@@ -639,6 +662,10 @@
|
||||
if (loadingText) loadingText.textContent = `Loading 0 of ${totalObjectCount.toLocaleString()} objects...`;
|
||||
}
|
||||
break;
|
||||
case 'folder':
|
||||
streamFolders.push(msg.prefix);
|
||||
scheduleStreamRender();
|
||||
break;
|
||||
case 'object':
|
||||
pendingStreamObjects.push(processStreamObject(msg));
|
||||
if (pendingStreamObjects.length >= STREAM_RENDER_BATCH) {
|
||||
@@ -675,6 +702,7 @@
|
||||
flushPendingStreamObjects();
|
||||
hasMoreObjects = false;
|
||||
totalObjectCount = loadedObjectCount;
|
||||
if (!currentPrefix) bucketTotalObjects = totalObjectCount;
|
||||
updateObjectCountBadge();
|
||||
|
||||
if (objectsLoadingRow && objectsLoadingRow.parentNode) {
|
||||
@@ -682,7 +710,7 @@
|
||||
}
|
||||
|
||||
if (loadMoreStatus) {
|
||||
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()} objects`;
|
||||
loadMoreStatus.textContent = buildBottomStatusText(true);
|
||||
}
|
||||
refreshVirtualList();
|
||||
renderBreadcrumb(currentPrefix);
|
||||
@@ -710,8 +738,9 @@
|
||||
loadedObjectCount = 0;
|
||||
totalObjectCount = 0;
|
||||
allObjects = [];
|
||||
streamFolders = [];
|
||||
memoizedVisibleItems = null;
|
||||
memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
|
||||
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
}
|
||||
|
||||
if (append && loadMoreSpinner) {
|
||||
@@ -738,6 +767,7 @@
|
||||
}
|
||||
|
||||
totalObjectCount = data.total_count || 0;
|
||||
if (!append && !currentPrefix) bucketTotalObjects = totalObjectCount;
|
||||
nextContinuationToken = data.next_continuation_token;
|
||||
|
||||
if (!append && objectsLoadingRow) {
|
||||
@@ -913,7 +943,7 @@
|
||||
});
|
||||
}
|
||||
|
||||
const hasFolders = () => allObjects.some(obj => obj.key.includes('/'));
|
||||
const hasFolders = () => streamFolders.length > 0 || allObjects.some(obj => obj.key.includes('/'));
|
||||
|
||||
const getFoldersAtPrefix = (prefix) => {
|
||||
const folders = new Set();
|
||||
@@ -940,6 +970,9 @@
|
||||
};
|
||||
|
||||
const countObjectsInFolder = (folderPrefix) => {
|
||||
if (useDelimiterMode) {
|
||||
return { count: 0, mayHaveMore: true };
|
||||
}
|
||||
const count = allObjects.filter(obj => obj.key.startsWith(folderPrefix)).length;
|
||||
return { count, mayHaveMore: hasMoreObjects };
|
||||
};
|
||||
@@ -1018,7 +1051,13 @@
|
||||
const createFolderRow = (folderPath, displayName = null) => {
|
||||
const folderName = displayName || folderPath.slice(currentPrefix.length).replace(/\/$/, '');
|
||||
const { count: objectCount, mayHaveMore } = countObjectsInFolder(folderPath);
|
||||
const countDisplay = mayHaveMore ? `${objectCount}+` : objectCount;
|
||||
let countLine = '';
|
||||
if (useDelimiterMode) {
|
||||
countLine = '';
|
||||
} else {
|
||||
const countDisplay = mayHaveMore ? `${objectCount}+` : objectCount;
|
||||
countLine = `<div class="text-muted small ms-4 ps-2">${countDisplay} object${objectCount !== 1 ? 's' : ''}</div>`;
|
||||
}
|
||||
|
||||
const tr = document.createElement('tr');
|
||||
tr.className = 'folder-row';
|
||||
@@ -1036,7 +1075,7 @@
|
||||
</svg>
|
||||
<span>${escapeHtml(folderName)}/</span>
|
||||
</div>
|
||||
<div class="text-muted small ms-4 ps-2">${countDisplay} object${objectCount !== 1 ? 's' : ''}</div>
|
||||
${countLine}
|
||||
</td>
|
||||
<td class="text-end text-nowrap">
|
||||
<span class="text-muted small">—</span>
|
||||
@@ -1537,7 +1576,7 @@
|
||||
|
||||
const confirmVersionRestore = (row, version, label = null, onConfirm) => {
|
||||
if (!version) return;
|
||||
const timestamp = version.archived_at ? new Date(version.archived_at).toLocaleString() : version.version_id;
|
||||
const timestamp = (version.archived_at || version.last_modified) ? new Date(version.archived_at || version.last_modified).toLocaleString() : version.version_id;
|
||||
const sizeLabel = formatBytes(Number(version.size) || 0);
|
||||
const reasonLabel = describeVersionReason(version.reason);
|
||||
const targetLabel = label || row?.dataset.key || 'this object';
|
||||
@@ -1610,7 +1649,7 @@
|
||||
|
||||
const latestCell = document.createElement('td');
|
||||
if (item.latest) {
|
||||
const ts = item.latest.archived_at ? new Date(item.latest.archived_at).toLocaleString() : item.latest.version_id;
|
||||
const ts = (item.latest.archived_at || item.latest.last_modified) ? new Date(item.latest.archived_at || item.latest.last_modified).toLocaleString() : item.latest.version_id;
|
||||
const sizeLabel = formatBytes(Number(item.latest.size) || 0);
|
||||
latestCell.innerHTML = `<div class="small">${ts}</div><div class="text-muted small">${sizeLabel} · ${describeVersionReason(item.latest.reason)}</div>`;
|
||||
} else {
|
||||
@@ -1737,6 +1776,15 @@
|
||||
loadArchivedObjects();
|
||||
}
|
||||
|
||||
const propertiesTab = document.getElementById('properties-tab');
|
||||
if (propertiesTab) {
|
||||
propertiesTab.addEventListener('shown.bs.tab', () => {
|
||||
if (archivedCard && archivedEndpoint) {
|
||||
loadArchivedObjects();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function restoreVersion(row, version) {
|
||||
if (!row || !version?.version_id) return;
|
||||
const template = row.dataset.restoreTemplate;
|
||||
@@ -1785,7 +1833,7 @@
|
||||
badge.textContent = `#${versionNumber}`;
|
||||
const title = document.createElement('div');
|
||||
title.className = 'fw-semibold small';
|
||||
const timestamp = entry.archived_at ? new Date(entry.archived_at).toLocaleString() : entry.version_id;
|
||||
const timestamp = (entry.archived_at || entry.last_modified) ? new Date(entry.archived_at || entry.last_modified).toLocaleString() : entry.version_id;
|
||||
title.textContent = timestamp;
|
||||
heading.appendChild(badge);
|
||||
heading.appendChild(title);
|
||||
@@ -2044,8 +2092,63 @@
|
||||
}
|
||||
};
|
||||
|
||||
let searchDebounceTimer = null;
|
||||
let searchAbortController = null;
|
||||
let searchResults = null;
|
||||
|
||||
const performServerSearch = async (term) => {
|
||||
if (searchAbortController) searchAbortController.abort();
|
||||
searchAbortController = new AbortController();
|
||||
|
||||
try {
|
||||
const params = new URLSearchParams({ q: term, limit: '500' });
|
||||
if (currentPrefix) params.set('prefix', currentPrefix);
|
||||
const searchUrl = objectsStreamUrl.replace('/stream', '/search');
|
||||
const response = await fetch(`${searchUrl}?${params}`, {
|
||||
signal: searchAbortController.signal
|
||||
});
|
||||
if (!response.ok) throw new Error(`HTTP ${response.status}`);
|
||||
const data = await response.json();
|
||||
searchResults = (data.results || []).map(obj => processStreamObject(obj));
|
||||
memoizedVisibleItems = null;
|
||||
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
refreshVirtualList();
|
||||
if (loadMoreStatus) {
|
||||
const countText = searchResults.length.toLocaleString();
|
||||
const truncated = data.truncated ? '+' : '';
|
||||
loadMoreStatus.textContent = `${countText}${truncated} result${searchResults.length !== 1 ? 's' : ''}`;
|
||||
}
|
||||
} catch (e) {
|
||||
if (e.name === 'AbortError') return;
|
||||
if (loadMoreStatus) {
|
||||
loadMoreStatus.textContent = 'Search failed';
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
document.getElementById('object-search')?.addEventListener('input', (event) => {
|
||||
currentFilterTerm = event.target.value.toLowerCase();
|
||||
const newTerm = event.target.value.toLowerCase();
|
||||
const wasFiltering = currentFilterTerm.length > 0;
|
||||
const isFiltering = newTerm.length > 0;
|
||||
currentFilterTerm = newTerm;
|
||||
|
||||
clearTimeout(searchDebounceTimer);
|
||||
|
||||
if (isFiltering) {
|
||||
searchDebounceTimer = setTimeout(() => performServerSearch(newTerm), 300);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!isFiltering && wasFiltering) {
|
||||
if (searchAbortController) searchAbortController.abort();
|
||||
searchResults = null;
|
||||
memoizedVisibleItems = null;
|
||||
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
if (loadMoreStatus) {
|
||||
loadMoreStatus.textContent = buildBottomStatusText(streamingComplete);
|
||||
}
|
||||
}
|
||||
|
||||
updateFilterWarning();
|
||||
refreshVirtualList();
|
||||
});
|
||||
@@ -2086,7 +2189,18 @@
|
||||
var searchInput = document.getElementById('object-search');
|
||||
if (searchInput && document.activeElement === searchInput) {
|
||||
searchInput.value = '';
|
||||
const wasFiltering = currentFilterTerm.length > 0;
|
||||
currentFilterTerm = '';
|
||||
if (wasFiltering) {
|
||||
clearTimeout(searchDebounceTimer);
|
||||
if (searchAbortController) searchAbortController.abort();
|
||||
searchResults = null;
|
||||
memoizedVisibleItems = null;
|
||||
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
if (loadMoreStatus) {
|
||||
loadMoreStatus.textContent = buildBottomStatusText(streamingComplete);
|
||||
}
|
||||
}
|
||||
refreshVirtualList();
|
||||
searchInput.blur();
|
||||
}
|
||||
@@ -2816,7 +2930,16 @@
|
||||
uploadFileInput.value = '';
|
||||
}
|
||||
|
||||
loadObjects(false);
|
||||
const previousKey = activeRow?.dataset.key || null;
|
||||
loadObjects(false).then(() => {
|
||||
if (previousKey) {
|
||||
const newRow = document.querySelector(`[data-object-row][data-key="${CSS.escape(previousKey)}"]`);
|
||||
if (newRow) {
|
||||
selectRow(newRow);
|
||||
if (versioningEnabled) loadObjectVersions(newRow, { force: true });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const successCount = uploadSuccessFiles.length;
|
||||
const errorCount = uploadErrorFiles.length;
|
||||
@@ -4154,6 +4277,47 @@
|
||||
var archivedCardEl = document.getElementById('archived-objects-card');
|
||||
if (archivedCardEl) {
|
||||
archivedCardEl.style.display = enabled ? '' : 'none';
|
||||
} else if (enabled) {
|
||||
var endpoint = window.BucketDetailConfig?.endpoints?.archivedObjects || '';
|
||||
if (endpoint) {
|
||||
var html = '<div class="card shadow-sm mt-4" id="archived-objects-card" data-archived-endpoint="' + endpoint + '">' +
|
||||
'<div class="card-header d-flex justify-content-between align-items-center flex-wrap gap-2">' +
|
||||
'<div class="d-flex align-items-center">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-warning me-2" viewBox="0 0 16 16">' +
|
||||
'<path d="M0 2a1 1 0 0 1 1-1h14a1 1 0 0 1 1 1v2a1 1 0 0 1-1 1v7.5a2.5 2.5 0 0 1-2.5 2.5h-9A2.5 2.5 0 0 1 1 12.5V5a1 1 0 0 1-1-1V2zm2 3v7.5A1.5 1.5 0 0 0 3.5 14h9a1.5 1.5 0 0 0 1.5-1.5V5H2zm13-3H1v2h14V2zM5 7.5a.5.5 0 0 1 .5-.5h5a.5.5 0 0 1 0 1h-5a.5.5 0 0 1-.5-.5z"/>' +
|
||||
'</svg><span class="fw-semibold">Archived Objects</span></div>' +
|
||||
'<div class="d-flex align-items-center gap-2">' +
|
||||
'<span class="badge text-bg-secondary" data-archived-count>0 items</span>' +
|
||||
'<button class="btn btn-outline-secondary btn-sm" type="button" data-archived-refresh>' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>' +
|
||||
'<path d="M8 4.466V.534a.25.25 0 0 0-.41-.192L5.23 2.308a.25.25 0 0 0 0 .384l2.36 1.966A.25.25 0 0 0 8 4.466z"/>' +
|
||||
'</svg>Refresh</button></div></div>' +
|
||||
'<div class="card-body">' +
|
||||
'<p class="text-muted small mb-3">Objects that have been deleted while versioning is enabled. Their previous versions remain available until you restore or purge them.</p>' +
|
||||
'<div class="table-responsive"><table class="table table-sm table-hover align-middle mb-0">' +
|
||||
'<thead class="table-light"><tr>' +
|
||||
'<th scope="col"><svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 text-muted" viewBox="0 0 16 16">' +
|
||||
'<path d="M4 0h5.293A1 1 0 0 1 10 .293L13.707 4a1 1 0 0 1 .293.707V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2zm5.5 1.5v2a1 1 0 0 0 1 1h2l-3-3z"/>' +
|
||||
'</svg>Key</th>' +
|
||||
'<th scope="col">Latest Version</th>' +
|
||||
'<th scope="col" class="text-center">Versions</th>' +
|
||||
'<th scope="col" class="text-end">Actions</th>' +
|
||||
'</tr></thead>' +
|
||||
'<tbody data-archived-body><tr><td colspan="4" class="text-center text-muted py-4">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="mb-2 d-block mx-auto" viewBox="0 0 16 16">' +
|
||||
'<path d="M0 2a1 1 0 0 1 1-1h14a1 1 0 0 1 1 1v2a1 1 0 0 1-1 1v7.5a2.5 2.5 0 0 1-2.5 2.5h-9A2.5 2.5 0 0 1 1 12.5V5a1 1 0 0 1-1-1V2zm2 3v7.5A1.5 1.5 0 0 0 3.5 14h9a1.5 1.5 0 0 0 1.5-1.5V5H2zm13-3H1v2h14V2zM5 7.5a.5.5 0 0 1 .5-.5h5a.5.5 0 0 1 0 1h-5a.5.5 0 0 1-.5-.5z"/>' +
|
||||
'</svg>No archived objects</td></tr></tbody>' +
|
||||
'</table></div></div></div>';
|
||||
card.insertAdjacentHTML('afterend', html);
|
||||
archivedCard = document.getElementById('archived-objects-card');
|
||||
archivedBody = archivedCard.querySelector('[data-archived-body]');
|
||||
archivedCountBadge = archivedCard.querySelector('[data-archived-count]');
|
||||
archivedRefreshButton = archivedCard.querySelector('[data-archived-refresh]');
|
||||
archivedEndpoint = endpoint;
|
||||
archivedRefreshButton.addEventListener('click', function() { loadArchivedObjects(); });
|
||||
loadArchivedObjects();
|
||||
}
|
||||
}
|
||||
|
||||
var dropZone = document.getElementById('objects-drop-zone');
|
||||
@@ -4161,6 +4325,15 @@
|
||||
dropZone.setAttribute('data-versioning', enabled ? 'true' : 'false');
|
||||
}
|
||||
|
||||
var bulkPurgeWrap = document.getElementById('bulkDeletePurgeWrap');
|
||||
if (bulkPurgeWrap) {
|
||||
bulkPurgeWrap.classList.toggle('d-none', !enabled);
|
||||
}
|
||||
var singleDeleteVerWrap = document.getElementById('deleteObjectVersioningWrap');
|
||||
if (singleDeleteVerWrap) {
|
||||
singleDeleteVerWrap.classList.toggle('d-none', !enabled);
|
||||
}
|
||||
|
||||
if (!enabled) {
|
||||
var newForm = document.getElementById('enableVersioningForm');
|
||||
if (newForm) {
|
||||
|
||||
@@ -11,9 +11,11 @@ window.IAMManagement = (function() {
|
||||
var editUserModal = null;
|
||||
var deleteUserModal = null;
|
||||
var rotateSecretModal = null;
|
||||
var expiryModal = null;
|
||||
var currentRotateKey = null;
|
||||
var currentEditKey = null;
|
||||
var currentDeleteKey = null;
|
||||
var currentExpiryKey = null;
|
||||
|
||||
var ALL_S3_ACTIONS = ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'lifecycle', 'cors'];
|
||||
|
||||
@@ -65,6 +67,7 @@ window.IAMManagement = (function() {
|
||||
setupEditUserModal();
|
||||
setupDeleteUserModal();
|
||||
setupRotateSecretModal();
|
||||
setupExpiryModal();
|
||||
setupFormHandlers();
|
||||
setupSearch();
|
||||
setupCopyAccessKeyButtons();
|
||||
@@ -75,11 +78,13 @@ window.IAMManagement = (function() {
|
||||
var editModalEl = document.getElementById('editUserModal');
|
||||
var deleteModalEl = document.getElementById('deleteUserModal');
|
||||
var rotateModalEl = document.getElementById('rotateSecretModal');
|
||||
var expiryModalEl = document.getElementById('expiryModal');
|
||||
|
||||
if (policyModalEl) policyModal = new bootstrap.Modal(policyModalEl);
|
||||
if (editModalEl) editUserModal = new bootstrap.Modal(editModalEl);
|
||||
if (deleteModalEl) deleteUserModal = new bootstrap.Modal(deleteModalEl);
|
||||
if (rotateModalEl) rotateSecretModal = new bootstrap.Modal(rotateModalEl);
|
||||
if (expiryModalEl) expiryModal = new bootstrap.Modal(expiryModalEl);
|
||||
}
|
||||
|
||||
function setupJsonAutoIndent() {
|
||||
@@ -97,6 +102,15 @@ window.IAMManagement = (function() {
|
||||
});
|
||||
});
|
||||
|
||||
var accessKeyCopyButton = document.querySelector('[data-access-key-copy]');
|
||||
if (accessKeyCopyButton) {
|
||||
accessKeyCopyButton.addEventListener('click', async function() {
|
||||
var accessKeyInput = document.getElementById('disclosedAccessKeyValue');
|
||||
if (!accessKeyInput) return;
|
||||
await window.UICore.copyToClipboard(accessKeyInput.value, accessKeyCopyButton, 'Copy');
|
||||
});
|
||||
}
|
||||
|
||||
var secretCopyButton = document.querySelector('[data-secret-copy]');
|
||||
if (secretCopyButton) {
|
||||
secretCopyButton.addEventListener('click', async function() {
|
||||
@@ -143,6 +157,22 @@ window.IAMManagement = (function() {
|
||||
});
|
||||
}
|
||||
|
||||
function generateSecureHex(byteCount) {
|
||||
var arr = new Uint8Array(byteCount);
|
||||
crypto.getRandomValues(arr);
|
||||
return Array.from(arr).map(function(b) { return b.toString(16).padStart(2, '0'); }).join('');
|
||||
}
|
||||
|
||||
function generateSecureBase64(byteCount) {
|
||||
var arr = new Uint8Array(byteCount);
|
||||
crypto.getRandomValues(arr);
|
||||
var binary = '';
|
||||
for (var i = 0; i < arr.length; i++) {
|
||||
binary += String.fromCharCode(arr[i]);
|
||||
}
|
||||
return btoa(binary).replace(/\+/g, '-').replace(/\//g, '_').replace(/=+$/, '');
|
||||
}
|
||||
|
||||
function setupCreateUserModal() {
|
||||
var createUserPoliciesEl = document.getElementById('createUserPolicies');
|
||||
|
||||
@@ -151,6 +181,22 @@ window.IAMManagement = (function() {
|
||||
applyPolicyTemplate(button.dataset.createPolicyTemplate, createUserPoliciesEl);
|
||||
});
|
||||
});
|
||||
|
||||
var genAccessKeyBtn = document.getElementById('generateAccessKeyBtn');
|
||||
if (genAccessKeyBtn) {
|
||||
genAccessKeyBtn.addEventListener('click', function() {
|
||||
var input = document.getElementById('createUserAccessKey');
|
||||
if (input) input.value = generateSecureHex(8);
|
||||
});
|
||||
}
|
||||
|
||||
var genSecretKeyBtn = document.getElementById('generateSecretKeyBtn');
|
||||
if (genSecretKeyBtn) {
|
||||
genSecretKeyBtn.addEventListener('click', function() {
|
||||
var input = document.getElementById('createUserSecretKey');
|
||||
if (input) input.value = generateSecureBase64(24);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function setupEditUserModal() {
|
||||
@@ -271,6 +317,77 @@ window.IAMManagement = (function() {
|
||||
}
|
||||
}
|
||||
|
||||
function openExpiryModal(key, expiresAt) {
|
||||
currentExpiryKey = key;
|
||||
var label = document.getElementById('expiryUserLabel');
|
||||
var input = document.getElementById('expiryDateInput');
|
||||
var form = document.getElementById('expiryForm');
|
||||
if (label) label.textContent = key;
|
||||
if (expiresAt) {
|
||||
try {
|
||||
var dt = new Date(expiresAt);
|
||||
var local = new Date(dt.getTime() - dt.getTimezoneOffset() * 60000);
|
||||
if (input) input.value = local.toISOString().slice(0, 16);
|
||||
} catch(e) {
|
||||
if (input) input.value = '';
|
||||
}
|
||||
} else {
|
||||
if (input) input.value = '';
|
||||
}
|
||||
if (form) form.action = endpoints.updateExpiry.replace('ACCESS_KEY', key);
|
||||
var modalEl = document.getElementById('expiryModal');
|
||||
if (modalEl) {
|
||||
var modal = bootstrap.Modal.getOrCreateInstance(modalEl);
|
||||
modal.show();
|
||||
}
|
||||
}
|
||||
|
||||
function setupExpiryModal() {
|
||||
document.querySelectorAll('[data-expiry-user]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
openExpiryModal(btn.dataset.expiryUser, btn.dataset.expiresAt || '');
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('[data-expiry-preset]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var preset = btn.dataset.expiryPreset;
|
||||
var input = document.getElementById('expiryDateInput');
|
||||
if (!input) return;
|
||||
if (preset === 'clear') {
|
||||
input.value = '';
|
||||
return;
|
||||
}
|
||||
var now = new Date();
|
||||
var ms = 0;
|
||||
if (preset === '1h') ms = 3600000;
|
||||
else if (preset === '24h') ms = 86400000;
|
||||
else if (preset === '7d') ms = 7 * 86400000;
|
||||
else if (preset === '30d') ms = 30 * 86400000;
|
||||
else if (preset === '90d') ms = 90 * 86400000;
|
||||
var future = new Date(now.getTime() + ms);
|
||||
var local = new Date(future.getTime() - future.getTimezoneOffset() * 60000);
|
||||
input.value = local.toISOString().slice(0, 16);
|
||||
});
|
||||
});
|
||||
|
||||
var expiryForm = document.getElementById('expiryForm');
|
||||
if (expiryForm) {
|
||||
expiryForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(expiryForm, {
|
||||
successMessage: 'Expiry updated',
|
||||
onSuccess: function() {
|
||||
var modalEl = document.getElementById('expiryModal');
|
||||
if (modalEl) bootstrap.Modal.getOrCreateInstance(modalEl).hide();
|
||||
window.location.reload();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function createUserCardHtml(accessKey, displayName, policies) {
|
||||
var admin = isAdminUser(policies);
|
||||
var cardClass = 'card h-100 iam-user-card' + (admin ? ' iam-admin-card' : '');
|
||||
@@ -324,6 +441,8 @@ window.IAMManagement = (function() {
|
||||
'<ul class="dropdown-menu dropdown-menu-end">' +
|
||||
'<li><button class="dropdown-item" type="button" data-edit-user="' + esc(accessKey) + '" data-display-name="' + esc(displayName) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/></svg>Edit Name</button></li>' +
|
||||
'<li><button class="dropdown-item" type="button" data-expiry-user="' + esc(accessKey) + '" data-expires-at="">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M8 3.5a.5.5 0 0 0-1 0V9a.5.5 0 0 0 .252.434l3.5 2a.5.5 0 0 0 .496-.868L8 8.71V3.5z"/><path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm7-8A7 7 0 1 1 1 8a7 7 0 0 1 14 0z"/></svg>Set Expiry</button></li>' +
|
||||
'<li><button class="dropdown-item" type="button" data-rotate-user="' + esc(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/><path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/></svg>Rotate Secret</button></li>' +
|
||||
'<li><hr class="dropdown-divider"></li>' +
|
||||
@@ -379,6 +498,14 @@ window.IAMManagement = (function() {
|
||||
});
|
||||
}
|
||||
|
||||
var expiryBtn = cardElement.querySelector('[data-expiry-user]');
|
||||
if (expiryBtn) {
|
||||
expiryBtn.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
openExpiryModal(accessKey, '');
|
||||
});
|
||||
}
|
||||
|
||||
var policyBtn = cardElement.querySelector('[data-policy-editor]');
|
||||
if (policyBtn) {
|
||||
policyBtn.addEventListener('click', function() {
|
||||
@@ -428,10 +555,15 @@ window.IAMManagement = (function() {
|
||||
'</svg>' +
|
||||
'<div class="flex-grow-1">' +
|
||||
'<div class="fw-semibold">New user created: <code>' + window.UICore.escapeHtml(data.access_key) + '</code></div>' +
|
||||
'<p class="mb-2 small">This secret is only shown once. Copy it now and store it securely.</p>' +
|
||||
'<p class="mb-2 small">These credentials are only shown once. Copy them now and store them securely.</p>' +
|
||||
'</div>' +
|
||||
'<button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button>' +
|
||||
'</div>' +
|
||||
'<div class="input-group mb-2">' +
|
||||
'<span class="input-group-text"><strong>Access key</strong></span>' +
|
||||
'<input class="form-control font-monospace" type="text" value="' + window.UICore.escapeHtml(data.access_key) + '" readonly />' +
|
||||
'<button class="btn btn-outline-primary" type="button" id="copyNewUserAccessKey">Copy</button>' +
|
||||
'</div>' +
|
||||
'<div class="input-group">' +
|
||||
'<span class="input-group-text"><strong>Secret key</strong></span>' +
|
||||
'<input class="form-control font-monospace" type="text" value="' + window.UICore.escapeHtml(data.secret_key) + '" readonly id="newUserSecret" />' +
|
||||
@@ -440,6 +572,9 @@ window.IAMManagement = (function() {
|
||||
var container = document.querySelector('.page-header');
|
||||
if (container) {
|
||||
container.insertAdjacentHTML('afterend', alertHtml);
|
||||
document.getElementById('copyNewUserAccessKey').addEventListener('click', async function() {
|
||||
await window.UICore.copyToClipboard(data.access_key, this, 'Copy');
|
||||
});
|
||||
document.getElementById('copyNewUserSecret').addEventListener('click', async function() {
|
||||
await window.UICore.copyToClipboard(data.secret_key, this, 'Copy');
|
||||
});
|
||||
|
||||
@@ -171,6 +171,7 @@
|
||||
data-bulk-download-endpoint="{{ url_for('ui.bulk_download_objects', bucket_name=bucket_name) }}"
|
||||
data-folders-url="{{ folders_url }}"
|
||||
data-buckets-for-copy-url="{{ buckets_for_copy_url }}"
|
||||
data-bucket-total-objects="{{ bucket_stats.get('objects', 0) }}"
|
||||
>
|
||||
<table class="table table-hover align-middle mb-0" id="objects-table" style="table-layout: fixed;">
|
||||
<thead class="table-light">
|
||||
@@ -2272,13 +2273,11 @@
|
||||
</div>
|
||||
<ul class="list-group mb-3" id="bulkDeleteList" style="max-height: 200px; overflow-y: auto;"></ul>
|
||||
<div class="text-muted small" id="bulkDeleteStatus"></div>
|
||||
{% if versioning_enabled %}
|
||||
<div class="form-check mt-3 p-3 bg-body-tertiary rounded-3">
|
||||
<div class="form-check mt-3 p-3 bg-body-tertiary rounded-3 {% if not versioning_enabled %}d-none{% endif %}" id="bulkDeletePurgeWrap">
|
||||
<input class="form-check-input" type="checkbox" id="bulkDeletePurge" />
|
||||
<label class="form-check-label" for="bulkDeletePurge">Also delete archived versions</label>
|
||||
<div class="form-text">Removes any archived versions stored in the archive.</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
@@ -2316,7 +2315,7 @@
|
||||
<div class="p-3 bg-body-tertiary rounded-3 mb-3">
|
||||
<code id="deleteObjectKey" class="d-block text-break"></code>
|
||||
</div>
|
||||
{% if versioning_enabled %}
|
||||
<div id="deleteObjectVersioningWrap" class="{% if not versioning_enabled %}d-none{% endif %}">
|
||||
<div class="alert alert-warning d-flex align-items-start small mb-3" role="alert">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="flex-shrink-0 me-2 mt-0" viewBox="0 0 16 16">
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||
@@ -2328,7 +2327,7 @@
|
||||
<label class="form-check-label" for="deletePurgeVersions">Also delete all archived versions</label>
|
||||
<div class="form-text mb-0">Removes the live object and every stored version.</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
@@ -2771,7 +2770,8 @@
|
||||
window.BucketDetailConfig = {
|
||||
endpoints: {
|
||||
versioning: "{{ url_for('ui.update_bucket_versioning', bucket_name=bucket_name) }}",
|
||||
bucketsOverview: "{{ url_for('ui.buckets_overview') }}"
|
||||
bucketsOverview: "{{ url_for('ui.buckets_overview') }}",
|
||||
archivedObjects: "{{ url_for('ui.archived_objects', bucket_name=bucket_name) }}"
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -52,6 +52,11 @@
|
||||
<li><a href="#acls">Access Control Lists</a></li>
|
||||
<li><a href="#tagging">Object & Bucket Tagging</a></li>
|
||||
<li><a href="#website-hosting">Static Website Hosting</a></li>
|
||||
<li><a href="#cors-config">CORS Configuration</a></li>
|
||||
<li><a href="#post-object">PostObject (Form Upload)</a></li>
|
||||
<li><a href="#list-objects-v2">List Objects API v2</a></li>
|
||||
<li><a href="#upgrading">Upgrading & Updates</a></li>
|
||||
<li><a href="#api-matrix">Full API Reference</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
@@ -126,6 +131,11 @@ python run.py --mode ui
|
||||
<td><code>5000</code></td>
|
||||
<td>Listen port (UI uses 5100).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>DISPLAY_TIMEZONE</code></td>
|
||||
<td><code>UTC</code></td>
|
||||
<td>Timezone for UI timestamps (e.g., <code>US/Eastern</code>, <code>Asia/Tokyo</code>).</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">CORS Settings</td>
|
||||
</tr>
|
||||
@@ -187,6 +197,21 @@ python run.py --mode ui
|
||||
<td><code>100 per minute</code></td>
|
||||
<td>Rate limit for HEAD requests.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>RATE_LIMIT_ADMIN</code></td>
|
||||
<td><code>60 per minute</code></td>
|
||||
<td>Rate limit for admin API endpoints (<code>/admin/*</code>).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>ADMIN_ACCESS_KEY</code></td>
|
||||
<td>(none)</td>
|
||||
<td>Custom access key for the admin user on first run or credential reset. Random if unset.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>ADMIN_SECRET_KEY</code></td>
|
||||
<td>(none)</td>
|
||||
<td>Custom secret key for the admin user on first run or credential reset. Random if unset.</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Server Settings</td>
|
||||
</tr>
|
||||
@@ -338,6 +363,24 @@ python run.py --mode ui
|
||||
<td><code>604800</code></td>
|
||||
<td>Maximum presigned URL expiry time (7 days).</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Proxy & Network Settings</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>NUM_TRUSTED_PROXIES</code></td>
|
||||
<td><code>1</code></td>
|
||||
<td>Number of trusted reverse proxies for <code>X-Forwarded-*</code> headers.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>ALLOWED_REDIRECT_HOSTS</code></td>
|
||||
<td>(empty)</td>
|
||||
<td>Comma-separated whitelist of safe redirect targets.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>ALLOW_INTERNAL_ENDPOINTS</code></td>
|
||||
<td><code>false</code></td>
|
||||
<td>Allow connections to internal/private IPs (webhooks, replication).</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Storage Limits</td>
|
||||
</tr>
|
||||
@@ -366,6 +409,16 @@ python run.py --mode ui
|
||||
<td><code>50</code></td>
|
||||
<td>Max lifecycle history records per bucket.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>OBJECT_CACHE_TTL</code></td>
|
||||
<td><code>60</code></td>
|
||||
<td>Seconds to cache object metadata.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>BULK_DOWNLOAD_MAX_BYTES</code></td>
|
||||
<td><code>1 GB</code></td>
|
||||
<td>Max total size for bulk ZIP downloads.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>ENCRYPTION_CHUNK_SIZE_BYTES</code></td>
|
||||
<td><code>65536</code></td>
|
||||
@@ -385,7 +438,7 @@ python run.py --mode ui
|
||||
</table>
|
||||
</div>
|
||||
<div class="alert alert-warning mt-3 mb-0 small">
|
||||
<strong>Production Checklist:</strong> Set <code>SECRET_KEY</code>, restrict <code>CORS_ORIGINS</code>, configure <code>API_BASE_URL</code>, enable HTTPS via reverse proxy, and use <code>--prod</code> flag.
|
||||
<strong>Production Checklist:</strong> Set <code>SECRET_KEY</code> (also enables IAM config encryption at rest), restrict <code>CORS_ORIGINS</code>, configure <code>API_BASE_URL</code>, enable HTTPS via reverse proxy, use <code>--prod</code> flag, and set credential expiry on non-admin users.
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
@@ -452,11 +505,12 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
|
||||
<span class="docs-section-kicker">03</span>
|
||||
<h2 class="h4 mb-0">Authenticate & manage IAM</h2>
|
||||
</div>
|
||||
<p class="text-muted">On first startup, MyFSIO generates random admin credentials and prints them to the console. Missed it? Check <code>data/.myfsio.sys/config/iam.json</code> directly—credentials are stored in plaintext.</p>
|
||||
<p class="text-muted">On first startup, MyFSIO generates random admin credentials and prints them to the console. Set <code>ADMIN_ACCESS_KEY</code> and <code>ADMIN_SECRET_KEY</code> env vars for custom credentials. When <code>SECRET_KEY</code> is configured, the IAM config is encrypted at rest. To reset credentials, run <code>python run.py --reset-cred</code>.</p>
|
||||
<div class="docs-highlight mb-3">
|
||||
<ol class="mb-0">
|
||||
<li>Check the console output (or <code>iam.json</code>) for the generated <code>Access Key</code> and <code>Secret Key</code>, then visit <code>/ui/login</code>.</li>
|
||||
<li>Create additional users with descriptive display names and AWS-style inline policies (for example <code>{"bucket": "*", "actions": ["list", "read"]}</code>).</li>
|
||||
<li>Check the console output for the generated <code>Access Key</code> and <code>Secret Key</code>, then visit <code>/ui/login</code>.</li>
|
||||
<li>Create additional users with descriptive display names, AWS-style inline policies (for example <code>{"bucket": "*", "actions": ["list", "read"]}</code>), and optional credential expiry dates.</li>
|
||||
<li>Set credential expiry on users to grant time-limited access. The UI shows expiry badges and provides preset durations (1h, 24h, 7d, 30d, 90d). Expired credentials are rejected at authentication.</li>
|
||||
<li>Rotate secrets when sharing with CI jobs—new secrets display once and persist to <code>data/.myfsio.sys/config/iam.json</code>.</li>
|
||||
<li>Bucket policies layer on top of IAM. Apply Private/Public presets or paste custom JSON; changes reload instantly.</li>
|
||||
</ol>
|
||||
@@ -491,7 +545,7 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
|
||||
<ul>
|
||||
<li>Navigate folder hierarchies using breadcrumbs. Objects with <code>/</code> in keys display as folders.</li>
|
||||
<li>Infinite scroll loads more objects automatically. Choose batch size (50–250) from the footer dropdown.</li>
|
||||
<li>Bulk select objects for multi-delete or multi-download. Filter by name using the search box.</li>
|
||||
<li>Bulk select objects for multi-delete or multi-download (ZIP archive, up to 1 GiB). Filter by name using the search box.</li>
|
||||
<li>If loading fails, click <strong>Retry</strong> to attempt again—no page refresh needed.</li>
|
||||
</ul>
|
||||
</div>
|
||||
@@ -613,15 +667,75 @@ curl -X PUT {{ api_base }}/demo/notes.txt \
|
||||
<td><code>/<bucket>/<key></code></td>
|
||||
<td>Delete an object.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>HEAD</td>
|
||||
<td><code>/<bucket></code></td>
|
||||
<td>Check if a bucket exists.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>HEAD</td>
|
||||
<td><code>/<bucket>/<key></code></td>
|
||||
<td>Get object metadata without downloading.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>POST</td>
|
||||
<td><code>/<bucket>?delete</code></td>
|
||||
<td>Bulk delete objects (XML body).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>?policy</code></td>
|
||||
<td>Fetch, upsert, or remove a bucket policy (S3-compatible).</td>
|
||||
<td>Bucket policy management.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT</td>
|
||||
<td><code>/<bucket>?versioning</code></td>
|
||||
<td>Versioning status.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>?lifecycle</code></td>
|
||||
<td>Lifecycle rules.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>?cors</code></td>
|
||||
<td>CORS configuration.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>?encryption</code></td>
|
||||
<td>Default encryption.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT</td>
|
||||
<td><code>/<bucket>?acl</code></td>
|
||||
<td>Bucket ACL.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>?tagging</code></td>
|
||||
<td>Bucket tags.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>/<key>?tagging</code></td>
|
||||
<td>Object tags.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>POST</td>
|
||||
<td><code>/<bucket>/<key>?uploads</code></td>
|
||||
<td>Initiate multipart upload.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>POST</td>
|
||||
<td><code>/<bucket>/<key>?select</code></td>
|
||||
<td>SQL query (SelectObjectContent).</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<p class="small text-muted mt-3 mb-0">All responses include <code>X-Request-Id</code> for tracing. Logs land in <code>logs/api.log</code> and <code>logs/ui.log</code>.</p>
|
||||
<p class="small text-muted mt-3 mb-0">All responses include <code>X-Request-Id</code> for tracing. See the <a href="#api-matrix">Full API Reference</a> for the complete endpoint list. Logs land in <code>logs/api.log</code> and <code>logs/ui.log</code>.</p>
|
||||
</div>
|
||||
</article>
|
||||
<article id="examples" class="card shadow-sm docs-section">
|
||||
@@ -1311,6 +1425,10 @@ curl -X PUT "{{ api_base }}/bucket/<bucket>?quota" \
|
||||
<td><strong>KMS (SSE-KMS)</strong></td>
|
||||
<td>Encryption using customer-managed keys via the built-in KMS</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><strong>SSE-C</strong></td>
|
||||
<td>Server-side encryption with customer-provided keys (per-request)</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
@@ -1377,6 +1495,54 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
|
||||
<p class="small text-muted mb-0">
|
||||
<strong>Envelope Encryption:</strong> Each object is encrypted with a unique Data Encryption Key (DEK). The DEK is then encrypted (wrapped) by the master key or KMS key and stored alongside the ciphertext. On read, the DEK is unwrapped and used to decrypt the object transparently.
|
||||
</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">SSE-C (Customer-Provided Keys)</h3>
|
||||
<p class="small text-muted">With SSE-C, you supply your own 256-bit AES key with each request. The server encrypts/decrypts using your key but never stores it. You must provide the same key for both upload and download.</p>
|
||||
<div class="table-responsive mb-3">
|
||||
<table class="table table-sm table-bordered small">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Header</th>
|
||||
<th>Value</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><code>x-amz-server-side-encryption-customer-algorithm</code></td>
|
||||
<td><code>AES256</code></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>x-amz-server-side-encryption-customer-key</code></td>
|
||||
<td>Base64-encoded 256-bit key</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>x-amz-server-side-encryption-customer-key-MD5</code></td>
|
||||
<td>Base64-encoded MD5 of the key</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<pre class="mb-3"><code class="language-bash"># Generate a 256-bit key
|
||||
KEY=$(openssl rand -base64 32)
|
||||
KEY_MD5=$(echo -n "$KEY" | base64 -d | openssl dgst -md5 -binary | base64)
|
||||
|
||||
# Upload with SSE-C
|
||||
curl -X PUT "{{ api_base }}/my-bucket/secret.txt" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
|
||||
-H "x-amz-server-side-encryption-customer-key: $KEY" \
|
||||
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5" \
|
||||
--data-binary @secret.txt
|
||||
|
||||
# Download with SSE-C (same key required)
|
||||
curl "{{ api_base }}/my-bucket/secret.txt" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
|
||||
-H "x-amz-server-side-encryption-customer-key: $KEY" \
|
||||
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5"</code></pre>
|
||||
<div class="alert alert-light border mb-0 small">
|
||||
<strong>Note:</strong> SSE-C does not require <code>ENCRYPTION_ENABLED</code> or <code>KMS_ENABLED</code>. If you lose your key, the data is irrecoverable.
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="lifecycle" class="card shadow-sm docs-section">
|
||||
@@ -1926,7 +2092,7 @@ curl -X POST "{{ api_base }}/<bucket>/data.csv?select" \
|
||||
<span class="docs-section-kicker">22</span>
|
||||
<h2 class="h4 mb-0">Advanced S3 Operations</h2>
|
||||
</div>
|
||||
<p class="text-muted">Copy objects, upload part copies, and use range requests for partial downloads.</p>
|
||||
<p class="text-muted">Copy, move, and partially download objects using advanced S3 operations.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">CopyObject</h3>
|
||||
<pre class="mb-3"><code class="language-bash"># Copy within same bucket
|
||||
@@ -1941,6 +2107,13 @@ curl -X PUT "{{ api_base }}/<bucket>/file.txt" \
|
||||
-H "x-amz-metadata-directive: REPLACE" \
|
||||
-H "x-amz-meta-newkey: newvalue"</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">MoveObject (UI)</h3>
|
||||
<p class="small text-muted">Move an object to a different key or bucket via the UI. Performs a copy then deletes the source. Requires <code>read</code>+<code>delete</code> on source and <code>write</code> on destination.</p>
|
||||
<pre class="mb-3"><code class="language-bash"># Move via UI API (session-authenticated)
|
||||
curl -X POST "http://localhost:5100/ui/buckets/<bucket>/objects/<key>/move" \
|
||||
-H "Content-Type: application/json" --cookie "session=..." \
|
||||
-d '{"dest_bucket": "other-bucket", "dest_key": "new-path/file.txt"}'</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">UploadPartCopy</h3>
|
||||
<p class="small text-muted">Copy data from an existing object into a multipart upload part:</p>
|
||||
<pre class="mb-3"><code class="language-bash"># Copy bytes 0-10485759 from source as part 1
|
||||
@@ -2193,6 +2366,274 @@ server {
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="cors-config" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">26</span>
|
||||
<h2 class="h4 mb-0">CORS Configuration</h2>
|
||||
</div>
|
||||
<p class="text-muted">Configure per-bucket Cross-Origin Resource Sharing rules to control which origins can access your bucket from a browser.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Setting CORS Rules</h3>
|
||||
<pre class="mb-3"><code class="language-bash"># Set CORS configuration
|
||||
curl -X PUT "{{ api_base }}/<bucket>?cors" \
|
||||
-H "Content-Type: application/xml" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-d '<CORSConfiguration>
|
||||
<CORSRule>
|
||||
<AllowedOrigin>https://example.com</AllowedOrigin>
|
||||
<AllowedMethod>GET</AllowedMethod>
|
||||
<AllowedMethod>PUT</AllowedMethod>
|
||||
<AllowedHeader>*</AllowedHeader>
|
||||
<ExposeHeader>ETag</ExposeHeader>
|
||||
<MaxAgeSeconds>3600</MaxAgeSeconds>
|
||||
</CORSRule>
|
||||
</CORSConfiguration>'
|
||||
|
||||
# Get CORS configuration
|
||||
curl "{{ api_base }}/<bucket>?cors" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||
|
||||
# Delete CORS configuration
|
||||
curl -X DELETE "{{ api_base }}/<bucket>?cors" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Rule Fields</h3>
|
||||
<div class="table-responsive mb-0">
|
||||
<table class="table table-sm table-bordered small mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Field</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><code>AllowedOrigin</code></td>
|
||||
<td>Origins allowed to make requests (supports <code>*</code> wildcard)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>AllowedMethod</code></td>
|
||||
<td>HTTP methods: <code>GET</code>, <code>PUT</code>, <code>POST</code>, <code>DELETE</code>, <code>HEAD</code></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>AllowedHeader</code></td>
|
||||
<td>Request headers allowed in preflight (supports <code>*</code>)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>ExposeHeader</code></td>
|
||||
<td>Response headers visible to the browser (e.g., <code>ETag</code>, <code>x-amz-request-id</code>)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>MaxAgeSeconds</code></td>
|
||||
<td>How long the browser caches preflight results</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="post-object" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">27</span>
|
||||
<h2 class="h4 mb-0">PostObject (HTML Form Upload)</h2>
|
||||
</div>
|
||||
<p class="text-muted">Upload objects directly from an HTML form using browser-based POST uploads with policy-based authorization.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Form Fields</h3>
|
||||
<div class="table-responsive mb-3">
|
||||
<table class="table table-sm table-bordered small">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Field</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td><code>key</code></td><td>Object key (supports <code>${filename}</code> variable)</td></tr>
|
||||
<tr><td><code>file</code></td><td>The file to upload</td></tr>
|
||||
<tr><td><code>policy</code></td><td>Base64-encoded policy document (JSON)</td></tr>
|
||||
<tr><td><code>x-amz-signature</code></td><td>HMAC-SHA256 signature of the policy</td></tr>
|
||||
<tr><td><code>x-amz-credential</code></td><td>Access key / date / region / s3 / aws4_request</td></tr>
|
||||
<tr><td><code>x-amz-algorithm</code></td><td><code>AWS4-HMAC-SHA256</code></td></tr>
|
||||
<tr><td><code>x-amz-date</code></td><td>ISO 8601 date (e.g., <code>20250101T000000Z</code>)</td></tr>
|
||||
<tr><td><code>Content-Type</code></td><td>MIME type of the uploaded file</td></tr>
|
||||
<tr><td><code>x-amz-meta-*</code></td><td>Custom metadata headers</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Simple Upload (No Signing)</h3>
|
||||
<pre class="mb-3"><code class="language-html"><form action="{{ api_base }}/my-bucket" method="POST" enctype="multipart/form-data">
|
||||
<input type="hidden" name="key" value="uploads/${filename}">
|
||||
<input type="file" name="file">
|
||||
<button type="submit">Upload</button>
|
||||
</form></code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Signed Upload (With Policy)</h3>
|
||||
<p class="small text-muted mb-0">For authenticated uploads, include a base64-encoded policy and SigV4 signature fields. The policy constrains allowed keys, content types, and size limits. See docs.md Section 20 for full signing examples.</p>
|
||||
</div>
|
||||
</article>
|
||||
<article id="list-objects-v2" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">28</span>
|
||||
<h2 class="h4 mb-0">List Objects API v2</h2>
|
||||
</div>
|
||||
<p class="text-muted">Use the v2 list API for improved pagination with continuation tokens instead of markers.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Usage</h3>
|
||||
<pre class="mb-3"><code class="language-bash"># List with v2 API
|
||||
curl "{{ api_base }}/<bucket>?list-type=2&prefix=logs/&delimiter=/&max-keys=100" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||
|
||||
# Paginate with continuation token
|
||||
curl "{{ api_base }}/<bucket>?list-type=2&continuation-token=<token>" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||
|
||||
# Start listing after a specific key
|
||||
curl "{{ api_base }}/<bucket>?list-type=2&start-after=photos/2025/" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Query Parameters</h3>
|
||||
<div class="table-responsive mb-0">
|
||||
<table class="table table-sm table-bordered small mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Parameter</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td><code>list-type=2</code></td><td>Enables v2 API (required)</td></tr>
|
||||
<tr><td><code>prefix</code></td><td>Filter to keys starting with this prefix</td></tr>
|
||||
<tr><td><code>delimiter</code></td><td>Group keys by delimiter (typically <code>/</code> for folders)</td></tr>
|
||||
<tr><td><code>max-keys</code></td><td>Maximum objects to return (default 1000)</td></tr>
|
||||
<tr><td><code>continuation-token</code></td><td>Token from previous response for pagination</td></tr>
|
||||
<tr><td><code>start-after</code></td><td>Start listing after this key (first page only)</td></tr>
|
||||
<tr><td><code>fetch-owner</code></td><td>Include owner info in response</td></tr>
|
||||
<tr><td><code>encoding-type</code></td><td>Set to <code>url</code> to URL-encode keys in response</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="upgrading" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">29</span>
|
||||
<h2 class="h4 mb-0">Upgrading & Updates</h2>
|
||||
</div>
|
||||
<p class="text-muted">How to safely update MyFSIO to a new version.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Pre-Update Backup</h3>
|
||||
<p class="small text-muted">Always back up before updating:</p>
|
||||
<pre class="mb-3"><code class="language-bash"># Back up configuration
|
||||
cp -r data/.myfsio.sys/config/ config-backup/
|
||||
|
||||
# Back up data (optional, for critical deployments)
|
||||
tar czf myfsio-backup-$(date +%Y%m%d).tar.gz data/
|
||||
|
||||
# Back up logs
|
||||
cp -r logs/ logs-backup/</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Update Procedure</h3>
|
||||
<ol class="docs-steps mb-3">
|
||||
<li><strong>Stop the service:</strong> <code>sudo systemctl stop myfsio</code> (or kill the process)</li>
|
||||
<li><strong>Pull new version:</strong> <code>git pull origin main</code> or download the new binary</li>
|
||||
<li><strong>Install dependencies:</strong> <code>pip install -r requirements.txt</code></li>
|
||||
<li><strong>Validate config:</strong> <code>python run.py --check-config</code></li>
|
||||
<li><strong>Start the service:</strong> <code>sudo systemctl start myfsio</code></li>
|
||||
<li><strong>Verify:</strong> <code>curl http://localhost:5000/myfsio/health</code></li>
|
||||
</ol>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Rollback</h3>
|
||||
<p class="small text-muted mb-0">If something goes wrong, stop the service, restore the backed-up config and data directories, then restart with the previous binary or code version. See <code>docs.md</code> Section 4 for detailed rollback procedures including blue-green deployment strategies.</p>
|
||||
</div>
|
||||
</article>
|
||||
<article id="api-matrix" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">30</span>
|
||||
<h2 class="h4 mb-0">Full API Reference</h2>
|
||||
</div>
|
||||
<p class="text-muted">Complete list of all S3-compatible, admin, and KMS endpoints.</p>
|
||||
<pre class="mb-0"><code class="language-text"># Service
|
||||
GET /myfsio/health # Health check
|
||||
|
||||
# Bucket Operations
|
||||
GET / # List buckets
|
||||
PUT /<bucket> # Create bucket
|
||||
DELETE /<bucket> # Delete bucket
|
||||
GET /<bucket> # List objects (?list-type=2)
|
||||
HEAD /<bucket> # Check bucket exists
|
||||
POST /<bucket> # POST object / form upload
|
||||
POST /<bucket>?delete # Bulk delete
|
||||
|
||||
# Bucket Configuration
|
||||
GET|PUT|DELETE /<bucket>?policy # Bucket policy
|
||||
GET|PUT /<bucket>?quota # Bucket quota
|
||||
GET|PUT /<bucket>?versioning # Versioning
|
||||
GET|PUT|DELETE /<bucket>?lifecycle # Lifecycle rules
|
||||
GET|PUT|DELETE /<bucket>?cors # CORS config
|
||||
GET|PUT|DELETE /<bucket>?encryption # Default encryption
|
||||
GET|PUT /<bucket>?acl # Bucket ACL
|
||||
GET|PUT|DELETE /<bucket>?tagging # Bucket tags
|
||||
GET|PUT|DELETE /<bucket>?replication # Replication rules
|
||||
GET|PUT /<bucket>?logging # Access logging
|
||||
GET|PUT /<bucket>?notification # Event notifications
|
||||
GET|PUT /<bucket>?object-lock # Object lock config
|
||||
GET|PUT|DELETE /<bucket>?website # Static website
|
||||
GET /<bucket>?uploads # List multipart uploads
|
||||
GET /<bucket>?versions # List object versions
|
||||
GET /<bucket>?location # Bucket region
|
||||
|
||||
# Object Operations
|
||||
PUT /<bucket>/<key> # Upload object
|
||||
GET /<bucket>/<key> # Download (Range supported)
|
||||
DELETE /<bucket>/<key> # Delete object
|
||||
HEAD /<bucket>/<key> # Object metadata
|
||||
POST /<bucket>/<key>?select # SQL query (SelectObjectContent)
|
||||
|
||||
# Object Configuration
|
||||
GET|PUT|DELETE /<bucket>/<key>?tagging # Object tags
|
||||
GET|PUT /<bucket>/<key>?acl # Object ACL
|
||||
GET|PUT /<bucket>/<key>?retention # Object retention
|
||||
GET|PUT /<bucket>/<key>?legal-hold # Legal hold
|
||||
|
||||
# Multipart Upload
|
||||
POST /<bucket>/<key>?uploads # Initiate
|
||||
PUT /<bucket>/<key>?uploadId=X&partNumber=N # Upload part
|
||||
POST /<bucket>/<key>?uploadId=X # Complete
|
||||
DELETE /<bucket>/<key>?uploadId=X # Abort
|
||||
GET /<bucket>/<key>?uploadId=X # List parts
|
||||
|
||||
# Copy (via x-amz-copy-source header)
|
||||
PUT /<bucket>/<key> # CopyObject
|
||||
PUT /<bucket>/<key>?uploadId&partNumber # UploadPartCopy
|
||||
|
||||
# Admin API
|
||||
GET|PUT /admin/site # Local site config
|
||||
GET /admin/sites # List peers
|
||||
POST /admin/sites # Register peer
|
||||
GET|PUT|DELETE /admin/sites/<id> # Manage peer
|
||||
GET /admin/sites/<id>/health # Peer health
|
||||
GET /admin/topology # Cluster topology
|
||||
GET|POST|PUT|DELETE /admin/website-domains # Domain mappings
|
||||
|
||||
# KMS API
|
||||
GET|POST /kms/keys # List / Create keys
|
||||
GET|DELETE /kms/keys/<id> # Get / Delete key
|
||||
POST /kms/keys/<id>/enable # Enable key
|
||||
POST /kms/keys/<id>/disable # Disable key
|
||||
POST /kms/keys/<id>/rotate # Rotate key
|
||||
POST /kms/encrypt # Encrypt data
|
||||
POST /kms/decrypt # Decrypt data
|
||||
POST /kms/generate-data-key # Generate data key
|
||||
POST /kms/generate-random # Generate random bytes</code></pre>
|
||||
</div>
|
||||
</article>
|
||||
</div>
|
||||
<div class="col-xl-4 docs-sidebar-col">
|
||||
<aside class="card shadow-sm docs-sidebar">
|
||||
@@ -2224,6 +2665,11 @@ server {
|
||||
<li><a href="#acls">Access Control Lists</a></li>
|
||||
<li><a href="#tagging">Object & Bucket Tagging</a></li>
|
||||
<li><a href="#website-hosting">Static Website Hosting</a></li>
|
||||
<li><a href="#cors-config">CORS Configuration</a></li>
|
||||
<li><a href="#post-object">PostObject (Form Upload)</a></li>
|
||||
<li><a href="#list-objects-v2">List Objects API v2</a></li>
|
||||
<li><a href="#upgrading">Upgrading & Updates</a></li>
|
||||
<li><a href="#api-matrix">Full API Reference</a></li>
|
||||
</ul>
|
||||
<div class="docs-sidebar-callouts">
|
||||
<div>
|
||||
|
||||
@@ -50,9 +50,20 @@
|
||||
New user created: <code>{{ disclosed_secret.access_key }}</code>
|
||||
{% endif %}
|
||||
</div>
|
||||
<p class="mb-2 small">⚠️ This secret is only shown once. Copy it now and store it securely.</p>
|
||||
<p class="mb-2 small">These credentials are only shown once. Copy them now and store them securely.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="input-group mb-2">
|
||||
<span class="input-group-text"><strong>Access key</strong></span>
|
||||
<input class="form-control font-monospace" type="text" value="{{ disclosed_secret.access_key }}" readonly id="disclosedAccessKeyValue" />
|
||||
<button class="btn btn-outline-primary" type="button" data-access-key-copy>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-clipboard" viewBox="0 0 16 16">
|
||||
<path d="M4 1.5H3a2 2 0 0 0-2 2V14a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V3.5a2 2 0 0 0-2-2h-1v1h1a1 1 0 0 1 1 1V14a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3.5a1 1 0 0 1 1-1h1v-1z"/>
|
||||
<path d="M9.5 1a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1a.5.5 0 0 1 .5-.5h3zm-3-1A1.5 1.5 0 0 0 5 1.5v1A1.5 1.5 0 0 0 6.5 4h3A1.5 1.5 0 0 0 11 2.5v-1A1.5 1.5 0 0 0 9.5 0h-3z"/>
|
||||
</svg>
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<div class="input-group">
|
||||
<span class="input-group-text"><strong>Secret key</strong></span>
|
||||
<input class="form-control font-monospace" type="text" value="{{ disclosed_secret.secret_key }}" readonly id="disclosedSecretValue" />
|
||||
@@ -79,7 +90,7 @@
|
||||
<pre class="policy-preview mb-0" id="iamConfigPreview">{{ config_document }}</pre>
|
||||
<button class="btn btn-outline-light btn-sm config-copy" type="button" data-copy-target="iamConfigPreview">Copy JSON</button>
|
||||
</div>
|
||||
<p class="text-muted small mt-2 mb-0">Secrets are masked above. Access <code>{{ config_summary.path }}</code> directly to view full credentials.</p>
|
||||
<p class="text-muted small mt-2 mb-0">Secrets are masked above. IAM config is encrypted at rest.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -122,12 +133,20 @@
|
||||
{% endif %}
|
||||
<div class="row g-3">
|
||||
{% for user in users %}
|
||||
{% set ns = namespace(is_admin=false) %}
|
||||
{% set ns = namespace(is_admin=false, is_expired=false, is_expiring_soon=false) %}
|
||||
{% for policy in user.policies %}
|
||||
{% if 'iam:*' in policy.actions or '*' in policy.actions %}
|
||||
{% set ns.is_admin = true %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if user.expires_at %}
|
||||
{% set exp_str = user.expires_at %}
|
||||
{% if exp_str <= now_iso %}
|
||||
{% set ns.is_expired = true %}
|
||||
{% elif exp_str <= soon_iso %}
|
||||
{% set ns.is_expiring_soon = true %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
<div class="col-md-6 col-xl-4 iam-user-item" data-display-name="{{ user.display_name|lower }}" data-access-key-filter="{{ user.access_key|lower }}">
|
||||
<div class="card h-100 iam-user-card{{ ' iam-admin-card' if ns.is_admin else '' }}">
|
||||
<div class="card-body">
|
||||
@@ -146,6 +165,11 @@
|
||||
{% else %}
|
||||
<span class="iam-role-badge iam-role-user" data-role-badge>User</span>
|
||||
{% endif %}
|
||||
{% if ns.is_expired %}
|
||||
<span class="badge text-bg-danger" style="font-size: .65rem">Expired</span>
|
||||
{% elif ns.is_expiring_soon %}
|
||||
<span class="badge text-bg-warning" style="font-size: .65rem">Expiring soon</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="d-flex align-items-center gap-1">
|
||||
<code class="small text-muted text-truncate" title="{{ user.access_key }}">{{ user.access_key }}</code>
|
||||
@@ -173,6 +197,15 @@
|
||||
Edit Name
|
||||
</button>
|
||||
</li>
|
||||
<li>
|
||||
<button class="dropdown-item" type="button" data-expiry-user="{{ user.access_key }}" data-expires-at="{{ user.expires_at or '' }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
<path d="M8 3.5a.5.5 0 0 0-1 0V9a.5.5 0 0 0 .252.434l3.5 2a.5.5 0 0 0 .496-.868L8 8.71V3.5z"/>
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm7-8A7 7 0 1 1 1 8a7 7 0 0 1 14 0z"/>
|
||||
</svg>
|
||||
Set Expiry
|
||||
</button>
|
||||
</li>
|
||||
<li>
|
||||
<button class="dropdown-item" type="button" data-rotate-user="{{ user.access_key }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
@@ -283,6 +316,32 @@
|
||||
<label class="form-label fw-medium">Display Name</label>
|
||||
<input class="form-control" type="text" name="display_name" placeholder="Analytics Team" required autofocus />
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium d-flex justify-content-between align-items-center">
|
||||
Access Key <span class="text-muted fw-normal small">optional</span>
|
||||
</label>
|
||||
<div class="input-group">
|
||||
<input class="form-control font-monospace" type="text" name="access_key" id="createUserAccessKey" placeholder="Leave blank to auto-generate" />
|
||||
<button class="btn btn-outline-secondary" type="button" id="generateAccessKeyBtn" title="Generate secure access key">Generate</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium d-flex justify-content-between align-items-center">
|
||||
Secret Key <span class="text-muted fw-normal small">optional</span>
|
||||
</label>
|
||||
<div class="input-group">
|
||||
<input class="form-control font-monospace" type="text" name="secret_key" id="createUserSecretKey" placeholder="Leave blank to auto-generate" />
|
||||
<button class="btn btn-outline-secondary" type="button" id="generateSecretKeyBtn" title="Generate secure secret key">Generate</button>
|
||||
</div>
|
||||
<div class="form-text">If you set a custom secret key, copy it now. It will be encrypted and cannot be recovered.</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium d-flex justify-content-between align-items-center">
|
||||
Expiry <span class="text-muted fw-normal small">optional</span>
|
||||
</label>
|
||||
<input class="form-control" type="datetime-local" name="expires_at" id="createUserExpiry" />
|
||||
<div class="form-text">Leave blank for no expiration. Expired users cannot authenticate.</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Initial Policies (JSON)</label>
|
||||
<textarea class="form-control font-monospace" name="policies" id="createUserPolicies" rows="6" spellcheck="false" placeholder='[
|
||||
@@ -495,6 +554,52 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="expiryModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h1 class="modal-title fs-5 fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M8 3.5a.5.5 0 0 0-1 0V9a.5.5 0 0 0 .252.434l3.5 2a.5.5 0 0 0 .496-.868L8 8.71V3.5z"/>
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm7-8A7 7 0 1 1 1 8a7 7 0 0 1 14 0z"/>
|
||||
</svg>
|
||||
Set Expiry
|
||||
</h1>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<form method="post" id="expiryForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<div class="modal-body">
|
||||
<p class="text-muted small mb-3">Set expiration for <code id="expiryUserLabel"></code></p>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Expires at</label>
|
||||
<input class="form-control" type="datetime-local" name="expires_at" id="expiryDateInput" />
|
||||
<div class="form-text">Leave blank to remove expiration (never expires).</div>
|
||||
</div>
|
||||
<div class="d-flex flex-wrap gap-2">
|
||||
<span class="text-muted small me-2 align-self-center">Quick presets:</span>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="1h">1 hour</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="24h">24 hours</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="7d">7 days</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="30d">30 days</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="90d">90 days</button>
|
||||
<button class="btn btn-outline-secondary btn-sm text-danger" type="button" data-expiry-preset="clear">Never</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
<button class="btn btn-primary" type="submit">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||
</svg>
|
||||
Save Expiry
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script id="iamUsersJson" type="application/json">{{ users | tojson }}</script>
|
||||
{% endblock %}
|
||||
|
||||
@@ -512,7 +617,8 @@
|
||||
updateUser: "{{ url_for('ui.update_iam_user', access_key='ACCESS_KEY') }}",
|
||||
deleteUser: "{{ url_for('ui.delete_iam_user', access_key='ACCESS_KEY') }}",
|
||||
updatePolicies: "{{ url_for('ui.update_iam_policies', access_key='ACCESS_KEY') }}",
|
||||
rotateSecret: "{{ url_for('ui.rotate_iam_secret', access_key='ACCESS_KEY') }}"
|
||||
rotateSecret: "{{ url_for('ui.rotate_iam_secret', access_key='ACCESS_KEY') }}",
|
||||
updateExpiry: "{{ url_for('ui.update_iam_expiry', access_key='ACCESS_KEY') }}"
|
||||
}
|
||||
});
|
||||
</script>
|
||||
|
||||
@@ -73,9 +73,6 @@
|
||||
</svg>
|
||||
</button>
|
||||
</form>
|
||||
<div class="text-center mt-4">
|
||||
<small class="text-muted">Need help? Check the <a href="{{ url_for('ui.docs_page') }}" class="text-decoration-none">documentation</a></small>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -43,6 +43,11 @@ def app(tmp_path: Path):
|
||||
}
|
||||
)
|
||||
yield flask_app
|
||||
storage = flask_app.extensions.get("object_storage")
|
||||
if storage:
|
||||
base = getattr(storage, "storage", storage)
|
||||
if hasattr(base, "shutdown_stats"):
|
||||
base.shutdown_stats()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
|
||||
@@ -53,15 +53,17 @@ def test_special_characters_in_metadata(tmp_path: Path):
|
||||
assert meta["special"] == "!@#$%^&*()"
|
||||
|
||||
def test_disk_full_scenario(tmp_path: Path, monkeypatch):
|
||||
# Simulate disk full by mocking write to fail
|
||||
import app.storage as _storage_mod
|
||||
monkeypatch.setattr(_storage_mod, "_HAS_RUST", False)
|
||||
|
||||
storage = ObjectStorage(tmp_path)
|
||||
storage.create_bucket("full")
|
||||
|
||||
|
||||
def mock_copyfileobj(*args, **kwargs):
|
||||
raise OSError(28, "No space left on device")
|
||||
|
||||
|
||||
import shutil
|
||||
monkeypatch.setattr(shutil, "copyfileobj", mock_copyfileobj)
|
||||
|
||||
|
||||
with pytest.raises(OSError, match="No space left on device"):
|
||||
storage.put_object("full", "file", io.BytesIO(b"data"))
|
||||
|
||||
350
tests/test_rust_extensions.py
Normal file
350
tests/test_rust_extensions.py
Normal file
@@ -0,0 +1,350 @@
|
||||
import hashlib
|
||||
import io
|
||||
import os
|
||||
import secrets
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
|
||||
|
||||
try:
|
||||
import myfsio_core as _rc
|
||||
HAS_RUST = True
|
||||
except ImportError:
|
||||
_rc = None
|
||||
HAS_RUST = False
|
||||
|
||||
pytestmark = pytest.mark.skipif(not HAS_RUST, reason="myfsio_core not available")
|
||||
|
||||
|
||||
class TestStreamToFileWithMd5:
|
||||
def test_basic_write(self, tmp_path):
|
||||
data = b"hello world" * 1000
|
||||
stream = io.BytesIO(data)
|
||||
tmp_dir = str(tmp_path / "tmp")
|
||||
|
||||
tmp_path_str, md5_hex, size = _rc.stream_to_file_with_md5(stream, tmp_dir)
|
||||
|
||||
assert size == len(data)
|
||||
assert md5_hex == hashlib.md5(data).hexdigest()
|
||||
assert Path(tmp_path_str).exists()
|
||||
assert Path(tmp_path_str).read_bytes() == data
|
||||
|
||||
def test_empty_stream(self, tmp_path):
|
||||
stream = io.BytesIO(b"")
|
||||
tmp_dir = str(tmp_path / "tmp")
|
||||
|
||||
tmp_path_str, md5_hex, size = _rc.stream_to_file_with_md5(stream, tmp_dir)
|
||||
|
||||
assert size == 0
|
||||
assert md5_hex == hashlib.md5(b"").hexdigest()
|
||||
assert Path(tmp_path_str).read_bytes() == b""
|
||||
|
||||
def test_large_data(self, tmp_path):
|
||||
data = os.urandom(1024 * 1024 * 2)
|
||||
stream = io.BytesIO(data)
|
||||
tmp_dir = str(tmp_path / "tmp")
|
||||
|
||||
tmp_path_str, md5_hex, size = _rc.stream_to_file_with_md5(stream, tmp_dir)
|
||||
|
||||
assert size == len(data)
|
||||
assert md5_hex == hashlib.md5(data).hexdigest()
|
||||
|
||||
def test_custom_chunk_size(self, tmp_path):
|
||||
data = b"x" * 10000
|
||||
stream = io.BytesIO(data)
|
||||
tmp_dir = str(tmp_path / "tmp")
|
||||
|
||||
tmp_path_str, md5_hex, size = _rc.stream_to_file_with_md5(
|
||||
stream, tmp_dir, chunk_size=128
|
||||
)
|
||||
|
||||
assert size == len(data)
|
||||
assert md5_hex == hashlib.md5(data).hexdigest()
|
||||
|
||||
|
||||
class TestAssemblePartsWithMd5:
|
||||
def test_basic_assembly(self, tmp_path):
|
||||
parts = []
|
||||
combined = b""
|
||||
for i in range(3):
|
||||
data = f"part{i}data".encode() * 100
|
||||
combined += data
|
||||
p = tmp_path / f"part{i}"
|
||||
p.write_bytes(data)
|
||||
parts.append(str(p))
|
||||
|
||||
dest = str(tmp_path / "output")
|
||||
md5_hex = _rc.assemble_parts_with_md5(parts, dest)
|
||||
|
||||
assert md5_hex == hashlib.md5(combined).hexdigest()
|
||||
assert Path(dest).read_bytes() == combined
|
||||
|
||||
def test_single_part(self, tmp_path):
|
||||
data = b"single part data"
|
||||
p = tmp_path / "part0"
|
||||
p.write_bytes(data)
|
||||
|
||||
dest = str(tmp_path / "output")
|
||||
md5_hex = _rc.assemble_parts_with_md5([str(p)], dest)
|
||||
|
||||
assert md5_hex == hashlib.md5(data).hexdigest()
|
||||
assert Path(dest).read_bytes() == data
|
||||
|
||||
def test_empty_parts_list(self):
|
||||
with pytest.raises(ValueError, match="No parts"):
|
||||
_rc.assemble_parts_with_md5([], "dummy")
|
||||
|
||||
def test_missing_part_file(self, tmp_path):
|
||||
with pytest.raises(OSError):
|
||||
_rc.assemble_parts_with_md5(
|
||||
[str(tmp_path / "nonexistent")], str(tmp_path / "out")
|
||||
)
|
||||
|
||||
def test_large_parts(self, tmp_path):
|
||||
parts = []
|
||||
combined = b""
|
||||
for i in range(5):
|
||||
data = os.urandom(512 * 1024)
|
||||
combined += data
|
||||
p = tmp_path / f"part{i}"
|
||||
p.write_bytes(data)
|
||||
parts.append(str(p))
|
||||
|
||||
dest = str(tmp_path / "output")
|
||||
md5_hex = _rc.assemble_parts_with_md5(parts, dest)
|
||||
|
||||
assert md5_hex == hashlib.md5(combined).hexdigest()
|
||||
assert Path(dest).read_bytes() == combined
|
||||
|
||||
|
||||
class TestEncryptDecryptStreamChunked:
|
||||
def _python_derive_chunk_nonce(self, base_nonce, chunk_index):
|
||||
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
hkdf = HKDF(
|
||||
algorithm=hashes.SHA256(),
|
||||
length=12,
|
||||
salt=base_nonce,
|
||||
info=chunk_index.to_bytes(4, "big"),
|
||||
)
|
||||
return hkdf.derive(b"chunk_nonce")
|
||||
|
||||
def test_encrypt_decrypt_roundtrip(self, tmp_path):
|
||||
data = b"Hello, encryption!" * 500
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
input_path = str(tmp_path / "plaintext")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
chunk_count = _rc.encrypt_stream_chunked(
|
||||
input_path, encrypted_path, key, base_nonce
|
||||
)
|
||||
assert chunk_count > 0
|
||||
|
||||
chunk_count_dec = _rc.decrypt_stream_chunked(
|
||||
encrypted_path, decrypted_path, key, base_nonce
|
||||
)
|
||||
assert chunk_count_dec == chunk_count
|
||||
assert Path(decrypted_path).read_bytes() == data
|
||||
|
||||
def test_empty_file(self, tmp_path):
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
input_path = str(tmp_path / "empty")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(b"")
|
||||
|
||||
chunk_count = _rc.encrypt_stream_chunked(
|
||||
input_path, encrypted_path, key, base_nonce
|
||||
)
|
||||
assert chunk_count == 0
|
||||
|
||||
chunk_count_dec = _rc.decrypt_stream_chunked(
|
||||
encrypted_path, decrypted_path, key, base_nonce
|
||||
)
|
||||
assert chunk_count_dec == 0
|
||||
assert Path(decrypted_path).read_bytes() == b""
|
||||
|
||||
def test_custom_chunk_size(self, tmp_path):
|
||||
data = os.urandom(10000)
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
input_path = str(tmp_path / "plaintext")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
chunk_count = _rc.encrypt_stream_chunked(
|
||||
input_path, encrypted_path, key, base_nonce, chunk_size=1024
|
||||
)
|
||||
assert chunk_count == 10
|
||||
|
||||
_rc.decrypt_stream_chunked(encrypted_path, decrypted_path, key, base_nonce)
|
||||
assert Path(decrypted_path).read_bytes() == data
|
||||
|
||||
def test_invalid_key_length(self, tmp_path):
|
||||
input_path = str(tmp_path / "in")
|
||||
Path(input_path).write_bytes(b"data")
|
||||
|
||||
with pytest.raises(ValueError, match="32 bytes"):
|
||||
_rc.encrypt_stream_chunked(
|
||||
input_path, str(tmp_path / "out"), b"short", secrets.token_bytes(12)
|
||||
)
|
||||
|
||||
def test_invalid_nonce_length(self, tmp_path):
|
||||
input_path = str(tmp_path / "in")
|
||||
Path(input_path).write_bytes(b"data")
|
||||
|
||||
with pytest.raises(ValueError, match="12 bytes"):
|
||||
_rc.encrypt_stream_chunked(
|
||||
input_path, str(tmp_path / "out"), secrets.token_bytes(32), b"short"
|
||||
)
|
||||
|
||||
def test_wrong_key_fails_decrypt(self, tmp_path):
|
||||
data = b"sensitive data"
|
||||
key = secrets.token_bytes(32)
|
||||
wrong_key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
input_path = str(tmp_path / "plaintext")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(data)
|
||||
_rc.encrypt_stream_chunked(input_path, encrypted_path, key, base_nonce)
|
||||
|
||||
with pytest.raises((ValueError, OSError)):
|
||||
_rc.decrypt_stream_chunked(
|
||||
encrypted_path, decrypted_path, wrong_key, base_nonce
|
||||
)
|
||||
|
||||
def test_cross_compat_python_encrypt_rust_decrypt(self, tmp_path):
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
data = b"cross compat test data" * 100
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
chunk_size = 1024
|
||||
|
||||
encrypted_path = str(tmp_path / "py_encrypted")
|
||||
with open(encrypted_path, "wb") as f:
|
||||
f.write(b"\x00\x00\x00\x00")
|
||||
aesgcm = AESGCM(key)
|
||||
chunk_index = 0
|
||||
offset = 0
|
||||
while offset < len(data):
|
||||
chunk = data[offset:offset + chunk_size]
|
||||
nonce = self._python_derive_chunk_nonce(base_nonce, chunk_index)
|
||||
enc = aesgcm.encrypt(nonce, chunk, None)
|
||||
f.write(len(enc).to_bytes(4, "big"))
|
||||
f.write(enc)
|
||||
chunk_index += 1
|
||||
offset += chunk_size
|
||||
f.seek(0)
|
||||
f.write(chunk_index.to_bytes(4, "big"))
|
||||
|
||||
decrypted_path = str(tmp_path / "rust_decrypted")
|
||||
_rc.decrypt_stream_chunked(encrypted_path, decrypted_path, key, base_nonce)
|
||||
assert Path(decrypted_path).read_bytes() == data
|
||||
|
||||
def test_cross_compat_rust_encrypt_python_decrypt(self, tmp_path):
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
data = b"cross compat reverse test" * 100
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
chunk_size = 1024
|
||||
|
||||
input_path = str(tmp_path / "plaintext")
|
||||
encrypted_path = str(tmp_path / "rust_encrypted")
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
chunk_count = _rc.encrypt_stream_chunked(
|
||||
input_path, encrypted_path, key, base_nonce, chunk_size=chunk_size
|
||||
)
|
||||
|
||||
aesgcm = AESGCM(key)
|
||||
with open(encrypted_path, "rb") as f:
|
||||
count_bytes = f.read(4)
|
||||
assert int.from_bytes(count_bytes, "big") == chunk_count
|
||||
|
||||
decrypted = b""
|
||||
for i in range(chunk_count):
|
||||
size = int.from_bytes(f.read(4), "big")
|
||||
enc_chunk = f.read(size)
|
||||
nonce = self._python_derive_chunk_nonce(base_nonce, i)
|
||||
decrypted += aesgcm.decrypt(nonce, enc_chunk, None)
|
||||
|
||||
assert decrypted == data
|
||||
|
||||
def test_large_file_roundtrip(self, tmp_path):
|
||||
data = os.urandom(1024 * 1024)
|
||||
key = secrets.token_bytes(32)
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
input_path = str(tmp_path / "large")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
_rc.encrypt_stream_chunked(input_path, encrypted_path, key, base_nonce)
|
||||
_rc.decrypt_stream_chunked(encrypted_path, decrypted_path, key, base_nonce)
|
||||
|
||||
assert Path(decrypted_path).read_bytes() == data
|
||||
|
||||
|
||||
class TestStreamingEncryptorFileMethods:
|
||||
def test_encrypt_file_decrypt_file_roundtrip(self, tmp_path):
|
||||
from app.encryption import LocalKeyEncryption, StreamingEncryptor
|
||||
|
||||
master_key_path = tmp_path / "master.key"
|
||||
provider = LocalKeyEncryption(master_key_path)
|
||||
encryptor = StreamingEncryptor(provider, chunk_size=512)
|
||||
|
||||
data = b"file method test data" * 200
|
||||
input_path = str(tmp_path / "input")
|
||||
encrypted_path = str(tmp_path / "encrypted")
|
||||
decrypted_path = str(tmp_path / "decrypted")
|
||||
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
metadata = encryptor.encrypt_file(input_path, encrypted_path)
|
||||
assert metadata.algorithm == "AES256"
|
||||
|
||||
encryptor.decrypt_file(encrypted_path, decrypted_path, metadata)
|
||||
assert Path(decrypted_path).read_bytes() == data
|
||||
|
||||
def test_encrypt_file_matches_encrypt_stream(self, tmp_path):
|
||||
from app.encryption import LocalKeyEncryption, StreamingEncryptor
|
||||
|
||||
master_key_path = tmp_path / "master.key"
|
||||
provider = LocalKeyEncryption(master_key_path)
|
||||
encryptor = StreamingEncryptor(provider, chunk_size=512)
|
||||
|
||||
data = b"stream vs file comparison" * 100
|
||||
input_path = str(tmp_path / "input")
|
||||
Path(input_path).write_bytes(data)
|
||||
|
||||
file_encrypted_path = str(tmp_path / "file_enc")
|
||||
metadata_file = encryptor.encrypt_file(input_path, file_encrypted_path)
|
||||
|
||||
file_decrypted_path = str(tmp_path / "file_dec")
|
||||
encryptor.decrypt_file(file_encrypted_path, file_decrypted_path, metadata_file)
|
||||
assert Path(file_decrypted_path).read_bytes() == data
|
||||
|
||||
stream_enc, metadata_stream = encryptor.encrypt_stream(io.BytesIO(data))
|
||||
stream_dec = encryptor.decrypt_stream(stream_enc, metadata_stream)
|
||||
assert stream_dec.read() == data
|
||||
Reference in New Issue
Block a user