23 Commits

Author SHA1 Message Date
28cb656d94 Merge pull request 'MyFSIO v0.1.7 Release' (#8) from next into main
Reviewed-on: #8
2025-12-22 03:10:35 +00:00
992d9eccd9 Update docs 2025-12-22 11:09:29 +08:00
40f3192c5c Add fallback button for object loading 2025-12-22 10:46:32 +08:00
2498b950f6 Update requirements.txt 2025-12-22 10:40:05 +08:00
97435f15e5 Revamp object bucket browser logic; Add new tests 2025-12-22 10:04:36 +08:00
3c44152fc6 Merge pull request 'MyFSIO v0.1.6 Release' (#7) from next into main
Reviewed-on: #7
2025-12-21 06:30:21 +00:00
97860669ec Fix presigned URL not generating for nested objects 2025-12-21 14:22:00 +08:00
4a5dd76286 Update installation and uninstallation scripts 2025-12-21 14:00:31 +08:00
d2dc293722 Fix inconsistency in config files 2025-12-21 13:17:33 +08:00
397515edce Merge pull request 'MyFSIO v0.1.5 Release' (#6) from next into main
Reviewed-on: #6
2025-12-13 15:41:03 +00:00
563bb8fa6a Fix incorrect STORAGE_ROOT setup; Add installation scripts 2025-12-13 22:26:43 +08:00
980fced7e4 Merge pull request 'MyFSIO v0.1.4 Release' (#5) from next into main
Reviewed-on: #5
2025-12-13 08:22:43 +00:00
5ccf53b688 Add app uptime and version status in Metrics dashboard 2025-12-13 16:18:38 +08:00
4d4256830a Update docs; Remove unnecessary hardcoded metrics details 2025-12-13 15:57:13 +08:00
137e3b7b68 Configure CORS default settings 2025-12-13 15:33:40 +08:00
bae5009ec4 Merge pull request 'Release v0.1.3' (#4) from next into main
Reviewed-on: #4
2025-12-03 04:14:57 +00:00
114e684cb8 Add logging to file missing 2025-12-03 12:11:42 +08:00
5d161c1d92 Fix presigned URL encoding issue 2025-12-03 12:08:02 +08:00
f160827b41 Update requirements.txt to the latest versions 2025-12-03 11:53:25 +08:00
9368715b16 Add bucket quota; Versioned objects now count towards the object storage and size count usage 2025-12-03 11:48:08 +08:00
233780617f Merge pull request 'Release V0.1.2' (#3) from next into main
Reviewed-on: #3
2025-11-26 04:59:15 +00:00
fd8fb21517 Merge pull request 'Prepare for binary release' (#2) from next into main
Reviewed-on: #2
2025-11-22 12:33:38 +00:00
c6cbe822e1 Merge pull request 'Release v0.1.1' (#1) from next into main
Reviewed-on: #1
2025-11-22 12:31:27 +00:00
28 changed files with 2973 additions and 418 deletions

View File

@@ -8,7 +8,7 @@ MyFSIO is a batteries-included, Flask-based recreation of Amazon S3 and IAM work
- **IAM + access keys:** Users, access keys, key rotation, and bucket-scoped actions (`list/read/write/delete/policy`) now live in `data/.myfsio.sys/config/iam.json` and are editable from the IAM dashboard.
- **Bucket policies + hot reload:** `data/.myfsio.sys/config/bucket_policies.json` uses AWS' policy grammar (Version `2012-10-17`) with a built-in watcher, so editing the JSON file applies immediately. The UI also ships Public/Private/Custom presets for faster edits.
- **Presigned URLs everywhere:** Signature Version 4 presigned URLs respect IAM + bucket policies and replace the now-removed "share link" feature for public access scenarios.
- **Modern UI:** Responsive tables, quick filters, preview sidebar, object-level delete buttons, a presign modal, and an inline JSON policy editor that respects dark mode keep bucket management friendly.
- **Modern UI:** Responsive tables, quick filters, preview sidebar, object-level delete buttons, a presign modal, and an inline JSON policy editor that respects dark mode keep bucket management friendly. The object browser supports folder navigation, infinite scroll pagination, bulk operations, and automatic retry on load failures.
- **Tests & health:** `/healthz` for smoke checks and `pytest` coverage for IAM, CRUD, presign, and policy flows.
## Architecture at a Glance
@@ -86,7 +86,7 @@ Presigned URLs follow the AWS CLI playbook:
| `AWS_REGION` | `us-east-1` | Region used in Signature V4 scope |
| `AWS_SERVICE` | `s3` | Service used in Signature V4 scope |
> Buckets now live directly under `data/` while system metadata (versions, IAM, bucket policies, multipart uploads, etc.) lives in `data/.myfsio.sys`. Existing installs can keep their environment variables, but the defaults now match MinIO's `data/.system` pattern for easier bind-mounting.
> Buckets now live directly under `data/` while system metadata (versions, IAM, bucket policies, multipart uploads, etc.) lives in `data/.myfsio.sys`.
## API Cheatsheet (IAM headers required)

View File

@@ -2,13 +2,14 @@
from __future__ import annotations
import logging
import shutil
import sys
import time
import uuid
from logging.handlers import RotatingFileHandler
from pathlib import Path
from datetime import timedelta
from typing import Any, Dict, Optional
from typing import Any, Dict, List, Optional
from flask import Flask, g, has_request_context, redirect, render_template, request, url_for
from flask_cors import CORS
@@ -28,6 +29,33 @@ from .storage import ObjectStorage
from .version import get_version
def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
"""Migrate config file from legacy locations to the active path.
Checks each legacy path in order and moves the first one found to the active path.
This ensures backward compatibility for users upgrading from older versions.
"""
active_path.parent.mkdir(parents=True, exist_ok=True)
if active_path.exists():
return active_path
for legacy_path in legacy_paths:
if legacy_path.exists():
try:
shutil.move(str(legacy_path), str(active_path))
except OSError:
# Fall back to copy + delete if move fails (e.g., cross-device)
shutil.copy2(legacy_path, active_path)
try:
legacy_path.unlink(missing_ok=True)
except OSError:
pass
break
return active_path
def create_app(
test_config: Optional[Dict[str, Any]] = None,
*,
@@ -74,8 +102,26 @@ def create_app(
secret_store = EphemeralSecretStore(default_ttl=app.config.get("SECRET_TTL_SECONDS", 300))
# Initialize Replication components
connections_path = Path(app.config["STORAGE_ROOT"]) / ".connections.json"
replication_rules_path = Path(app.config["STORAGE_ROOT"]) / ".replication_rules.json"
# Store config files in the system config directory for consistency
storage_root = Path(app.config["STORAGE_ROOT"])
config_dir = storage_root / ".myfsio.sys" / "config"
config_dir.mkdir(parents=True, exist_ok=True)
# Define paths with migration from legacy locations
connections_path = _migrate_config_file(
active_path=config_dir / "connections.json",
legacy_paths=[
storage_root / ".myfsio.sys" / "connections.json", # Previous location
storage_root / ".connections.json", # Original legacy location
],
)
replication_rules_path = _migrate_config_file(
active_path=config_dir / "replication_rules.json",
legacy_paths=[
storage_root / ".myfsio.sys" / "replication_rules.json", # Previous location
storage_root / ".replication_rules.json", # Original legacy location
],
)
connections = ConnectionStore(connections_path)
replication = ReplicationManager(storage, connections, replication_rules_path)
@@ -185,14 +231,12 @@ def create_ui_app(test_config: Optional[Dict[str, Any]] = None) -> Flask:
def _configure_cors(app: Flask) -> None:
origins = app.config.get("CORS_ORIGINS", ["*"])
methods = app.config.get("CORS_METHODS", ["GET", "PUT", "POST", "DELETE", "OPTIONS"])
allow_headers = app.config.get(
"CORS_ALLOW_HEADERS",
["Content-Type", "X-Access-Key", "X-Secret-Key", "X-Amz-Date", "X-Amz-SignedHeaders"],
)
methods = app.config.get("CORS_METHODS", ["GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"])
allow_headers = app.config.get("CORS_ALLOW_HEADERS", ["*"])
expose_headers = app.config.get("CORS_EXPOSE_HEADERS", ["*"])
CORS(
app,
resources={r"/*": {"origins": origins, "methods": methods, "allow_headers": allow_headers}},
resources={r"/*": {"origins": origins, "methods": methods, "allow_headers": allow_headers, "expose_headers": expose_headers}},
supports_credentials=True,
)

View File

@@ -50,6 +50,7 @@ class AppConfig:
aws_service: str
ui_enforce_bucket_policies: bool
log_level: str
log_to_file: bool
log_path: Path
log_max_bytes: int
log_backup_count: int
@@ -58,6 +59,7 @@ class AppConfig:
cors_origins: list[str]
cors_methods: list[str]
cors_allow_headers: list[str]
cors_expose_headers: list[str]
session_lifetime_days: int
auth_max_attempts: int
auth_lockout_minutes: int
@@ -109,19 +111,19 @@ class AppConfig:
iam_env_override = "IAM_CONFIG" in overrides or "IAM_CONFIG" in os.environ
bucket_policy_override = "BUCKET_POLICY_PATH" in overrides or "BUCKET_POLICY_PATH" in os.environ
default_iam_path = PROJECT_ROOT / "data" / ".myfsio.sys" / "config" / "iam.json"
default_bucket_policy_path = PROJECT_ROOT / "data" / ".myfsio.sys" / "config" / "bucket_policies.json"
default_iam_path = storage_root / ".myfsio.sys" / "config" / "iam.json"
default_bucket_policy_path = storage_root / ".myfsio.sys" / "config" / "bucket_policies.json"
iam_config_path = Path(_get("IAM_CONFIG", default_iam_path)).resolve()
bucket_policy_path = Path(_get("BUCKET_POLICY_PATH", default_bucket_policy_path)).resolve()
iam_config_path = _prepare_config_file(
iam_config_path,
legacy_path=None if iam_env_override else PROJECT_ROOT / "data" / "iam.json",
legacy_path=None if iam_env_override else storage_root / "iam.json",
)
bucket_policy_path = _prepare_config_file(
bucket_policy_path,
legacy_path=None if bucket_policy_override else PROJECT_ROOT / "data" / "bucket_policies.json",
legacy_path=None if bucket_policy_override else storage_root / "bucket_policies.json",
)
api_base_url = _get("API_BASE_URL", None)
if api_base_url:
@@ -131,7 +133,8 @@ class AppConfig:
aws_service = str(_get("AWS_SERVICE", "s3"))
enforce_ui_policies = str(_get("UI_ENFORCE_BUCKET_POLICIES", "0")).lower() in {"1", "true", "yes", "on"}
log_level = str(_get("LOG_LEVEL", "INFO")).upper()
log_dir = Path(_get("LOG_DIR", PROJECT_ROOT / "logs")).resolve()
log_to_file = str(_get("LOG_TO_FILE", "1")).lower() in {"1", "true", "yes", "on"}
log_dir = Path(_get("LOG_DIR", storage_root.parent / "logs")).resolve()
log_dir.mkdir(parents=True, exist_ok=True)
log_path = log_dir / str(_get("LOG_FILE", "app.log"))
log_max_bytes = int(_get("LOG_MAX_BYTES", 5 * 1024 * 1024))
@@ -146,18 +149,9 @@ class AppConfig:
return parts or default
cors_origins = _csv(str(_get("CORS_ORIGINS", "*")), ["*"])
cors_methods = _csv(str(_get("CORS_METHODS", "GET,PUT,POST,DELETE,OPTIONS")), ["GET", "PUT", "POST", "DELETE", "OPTIONS"])
cors_allow_headers = _csv(str(_get("CORS_ALLOW_HEADERS", "Content-Type,X-Access-Key,X-Secret-Key,X-Amz-Algorithm,X-Amz-Credential,X-Amz-Date,X-Amz-Expires,X-Amz-SignedHeaders,X-Amz-Signature")), [
"Content-Type",
"X-Access-Key",
"X-Secret-Key",
"X-Amz-Algorithm",
"X-Amz-Credential",
"X-Amz-Date",
"X-Amz-Expires",
"X-Amz-SignedHeaders",
"X-Amz-Signature",
])
cors_methods = _csv(str(_get("CORS_METHODS", "GET,PUT,POST,DELETE,OPTIONS,HEAD")), ["GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"])
cors_allow_headers = _csv(str(_get("CORS_ALLOW_HEADERS", "*")), ["*"])
cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"])
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60)) # Default 60 seconds
@@ -180,6 +174,7 @@ class AppConfig:
aws_service=aws_service,
ui_enforce_bucket_policies=enforce_ui_policies,
log_level=log_level,
log_to_file=log_to_file,
log_path=log_path,
log_max_bytes=log_max_bytes,
log_backup_count=log_backup_count,
@@ -188,6 +183,7 @@ class AppConfig:
cors_origins=cors_origins,
cors_methods=cors_methods,
cors_allow_headers=cors_allow_headers,
cors_expose_headers=cors_expose_headers,
session_lifetime_days=session_lifetime_days,
auth_max_attempts=auth_max_attempts,
auth_lockout_minutes=auth_lockout_minutes,
@@ -202,6 +198,102 @@ class AppConfig:
kms_keys_path=kms_keys_path,
default_encryption_algorithm=default_encryption_algorithm)
def validate_and_report(self) -> list[str]:
"""Validate configuration and return a list of warnings/issues.
Call this at startup to detect potential misconfigurations before
the application fully commits to running.
"""
issues = []
# Check if storage_root is writable
try:
test_file = self.storage_root / ".write_test"
test_file.touch()
test_file.unlink()
except (OSError, PermissionError) as e:
issues.append(f"CRITICAL: STORAGE_ROOT '{self.storage_root}' is not writable: {e}")
# Check if storage_root looks like a temp directory
storage_str = str(self.storage_root).lower()
if "/tmp" in storage_str or "\\temp" in storage_str or "appdata\\local\\temp" in storage_str:
issues.append(f"WARNING: STORAGE_ROOT '{self.storage_root}' appears to be a temporary directory. Data may be lost on reboot!")
# Check if IAM config path is under storage_root
try:
self.iam_config_path.relative_to(self.storage_root)
except ValueError:
issues.append(f"WARNING: IAM_CONFIG '{self.iam_config_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting IAM_CONFIG explicitly or ensuring paths are aligned.")
# Check if bucket policy path is under storage_root
try:
self.bucket_policy_path.relative_to(self.storage_root)
except ValueError:
issues.append(f"WARNING: BUCKET_POLICY_PATH '{self.bucket_policy_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting BUCKET_POLICY_PATH explicitly.")
# Check if log path is writable
try:
self.log_path.parent.mkdir(parents=True, exist_ok=True)
test_log = self.log_path.parent / ".write_test"
test_log.touch()
test_log.unlink()
except (OSError, PermissionError) as e:
issues.append(f"WARNING: Log directory '{self.log_path.parent}' is not writable: {e}")
# Check log path location
log_str = str(self.log_path).lower()
if "/tmp" in log_str or "\\temp" in log_str or "appdata\\local\\temp" in log_str:
issues.append(f"WARNING: LOG_DIR '{self.log_path.parent}' appears to be a temporary directory. Logs may be lost on reboot!")
# Check if encryption keys path is under storage_root (when encryption is enabled)
if self.encryption_enabled:
try:
self.encryption_master_key_path.relative_to(self.storage_root)
except ValueError:
issues.append(f"WARNING: ENCRYPTION_MASTER_KEY_PATH '{self.encryption_master_key_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
# Check if KMS keys path is under storage_root (when KMS is enabled)
if self.kms_enabled:
try:
self.kms_keys_path.relative_to(self.storage_root)
except ValueError:
issues.append(f"WARNING: KMS_KEYS_PATH '{self.kms_keys_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
# Warn about production settings
if self.secret_key == "dev-secret-key":
issues.append("WARNING: Using default SECRET_KEY. Set SECRET_KEY environment variable for production.")
if "*" in self.cors_origins:
issues.append("INFO: CORS_ORIGINS is set to '*'. Consider restricting to specific domains in production.")
return issues
def print_startup_summary(self) -> None:
"""Print a summary of the configuration at startup."""
print("\n" + "=" * 60)
print("MyFSIO Configuration Summary")
print("=" * 60)
print(f" STORAGE_ROOT: {self.storage_root}")
print(f" IAM_CONFIG: {self.iam_config_path}")
print(f" BUCKET_POLICY: {self.bucket_policy_path}")
print(f" LOG_PATH: {self.log_path}")
if self.api_base_url:
print(f" API_BASE_URL: {self.api_base_url}")
if self.encryption_enabled:
print(f" ENCRYPTION: Enabled (Master key: {self.encryption_master_key_path})")
if self.kms_enabled:
print(f" KMS: Enabled (Keys: {self.kms_keys_path})")
print("=" * 60)
issues = self.validate_and_report()
if issues:
print("\nConfiguration Issues Detected:")
for issue in issues:
print(f"{issue}")
print()
else:
print(" ✓ Configuration validated successfully\n")
def to_flask_config(self) -> Dict[str, Any]:
return {
"STORAGE_ROOT": str(self.storage_root),
@@ -222,6 +314,7 @@ class AppConfig:
"MULTIPART_MIN_PART_SIZE": self.multipart_min_part_size,
"BUCKET_STATS_CACHE_TTL": self.bucket_stats_cache_ttl,
"LOG_LEVEL": self.log_level,
"LOG_TO_FILE": self.log_to_file,
"LOG_FILE": str(self.log_path),
"LOG_MAX_BYTES": self.log_max_bytes,
"LOG_BACKUP_COUNT": self.log_backup_count,
@@ -230,6 +323,7 @@ class AppConfig:
"CORS_ORIGINS": self.cors_origins,
"CORS_METHODS": self.cors_methods,
"CORS_ALLOW_HEADERS": self.cors_allow_headers,
"CORS_EXPOSE_HEADERS": self.cors_expose_headers,
"SESSION_LIFETIME_DAYS": self.session_lifetime_days,
"ENCRYPTION_ENABLED": self.encryption_enabled,
"ENCRYPTION_MASTER_KEY_PATH": str(self.encryption_master_key_path),

View File

@@ -188,8 +188,11 @@ class EncryptedObjectStorage:
def bucket_stats(self, bucket_name: str, cache_ttl: int = 60):
return self.storage.bucket_stats(bucket_name, cache_ttl)
def list_objects(self, bucket_name: str):
return self.storage.list_objects(bucket_name)
def list_objects(self, bucket_name: str, **kwargs):
return self.storage.list_objects(bucket_name, **kwargs)
def list_objects_all(self, bucket_name: str):
return self.storage.list_objects_all(bucket_name)
def get_object_path(self, bucket_name: str, object_key: str):
return self.storage.get_object_path(bucket_name, object_key)
@@ -266,5 +269,11 @@ class EncryptedObjectStorage:
def list_multipart_parts(self, bucket_name: str, upload_id: str):
return self.storage.list_multipart_parts(bucket_name, upload_id)
def get_bucket_quota(self, bucket_name: str):
return self.storage.get_bucket_quota(bucket_name)
def set_bucket_quota(self, bucket_name: str, *, max_bytes=None, max_objects=None):
return self.storage.set_bucket_quota(bucket_name, max_bytes=max_bytes, max_objects=max_objects)
def _compute_etag(self, path: Path) -> str:
return self.storage._compute_etag(path)

View File

@@ -129,6 +129,25 @@ class EntityTooLargeError(AppError):
status_code: int = 413
@dataclass
class QuotaExceededAppError(AppError):
"""Bucket quota exceeded."""
code: str = "QuotaExceeded"
message: str = "The bucket quota has been exceeded"
status_code: int = 403
quota: Optional[Dict[str, Any]] = None
usage: Optional[Dict[str, int]] = None
def __post_init__(self):
if self.quota or self.usage:
self.details = {}
if self.quota:
self.details["quota"] = self.quota
if self.usage:
self.details["usage"] = self.usage
super().__post_init__()
def handle_app_error(error: AppError) -> Response:
"""Handle application errors with appropriate response format."""
log_extra = {"error_code": error.code}
@@ -163,5 +182,6 @@ def register_error_handlers(app):
ObjectNotFoundError, InvalidObjectKeyError,
AccessDeniedError, InvalidCredentialsError,
MalformedRequestError, InvalidArgumentError, EntityTooLargeError,
QuotaExceededAppError,
]:
app.register_error_handler(error_class, handle_app_error)

View File

@@ -155,7 +155,7 @@ class ReplicationManager:
try:
# Get source objects
source_objects = self.storage.list_objects(bucket_name)
source_objects = self.storage.list_objects_all(bucket_name)
source_keys = {obj.key: obj.size for obj in source_objects}
# Get destination objects
@@ -219,7 +219,7 @@ class ReplicationManager:
return
try:
objects = self.storage.list_objects(bucket_name)
objects = self.storage.list_objects_all(bucket_name)
logger.info(f"Starting replication of {len(objects)} existing objects from {bucket_name}")
for obj in objects:
self._executor.submit(self._replicate_task, bucket_name, obj.key, rule, connection, "write")

View File

@@ -18,7 +18,7 @@ from .bucket_policies import BucketPolicyStore
from .extensions import limiter
from .iam import IamError, Principal
from .replication import ReplicationManager
from .storage import ObjectStorage, StorageError
from .storage import ObjectStorage, StorageError, QuotaExceededError
s3_api_bp = Blueprint("s3_api", __name__)
@@ -803,6 +803,7 @@ def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None:
"acl": _bucket_acl_handler,
"versions": _bucket_list_versions_handler,
"lifecycle": _bucket_lifecycle_handler,
"quota": _bucket_quota_handler,
}
requested = [key for key in handlers if key in request.args]
if not requested:
@@ -1154,7 +1155,7 @@ def _bucket_list_versions_handler(bucket_name: str) -> Response:
storage = _storage()
try:
objects = storage.list_objects(bucket_name)
objects = storage.list_objects_all(bucket_name)
except StorageError as exc:
return _error_response("NoSuchBucket", str(exc), 404)
@@ -1400,6 +1401,87 @@ def _parse_lifecycle_config(payload: bytes) -> list:
return rules
def _bucket_quota_handler(bucket_name: str) -> Response:
"""Handle bucket quota configuration (GET/PUT/DELETE /<bucket>?quota)."""
if request.method not in {"GET", "PUT", "DELETE"}:
return _method_not_allowed(["GET", "PUT", "DELETE"])
principal, error = _require_principal()
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
if not storage.bucket_exists(bucket_name):
return _error_response("NoSuchBucket", "Bucket does not exist", 404)
if request.method == "GET":
quota = storage.get_bucket_quota(bucket_name)
if not quota:
return _error_response("NoSuchQuotaConfiguration", "No quota configuration found", 404)
# Return as JSON for simplicity (not a standard S3 API)
stats = storage.bucket_stats(bucket_name)
return jsonify({
"quota": quota,
"usage": {
"bytes": stats.get("bytes", 0),
"objects": stats.get("objects", 0),
}
})
if request.method == "DELETE":
try:
storage.set_bucket_quota(bucket_name, max_size_bytes=None, max_objects=None)
except StorageError as exc:
return _error_response("NoSuchBucket", str(exc), 404)
current_app.logger.info("Bucket quota deleted", extra={"bucket": bucket_name})
return Response(status=204)
# PUT
payload = request.get_json(silent=True)
if not payload:
return _error_response("MalformedRequest", "Request body must be JSON with quota limits", 400)
max_size_bytes = payload.get("max_size_bytes")
max_objects = payload.get("max_objects")
if max_size_bytes is None and max_objects is None:
return _error_response("InvalidArgument", "At least one of max_size_bytes or max_objects is required", 400)
# Validate types
if max_size_bytes is not None:
try:
max_size_bytes = int(max_size_bytes)
if max_size_bytes < 0:
raise ValueError("must be non-negative")
except (TypeError, ValueError) as exc:
return _error_response("InvalidArgument", f"max_size_bytes {exc}", 400)
if max_objects is not None:
try:
max_objects = int(max_objects)
if max_objects < 0:
raise ValueError("must be non-negative")
except (TypeError, ValueError) as exc:
return _error_response("InvalidArgument", f"max_objects {exc}", 400)
try:
storage.set_bucket_quota(bucket_name, max_size_bytes=max_size_bytes, max_objects=max_objects)
except StorageError as exc:
return _error_response("NoSuchBucket", str(exc), 404)
current_app.logger.info(
"Bucket quota updated",
extra={"bucket": bucket_name, "max_size_bytes": max_size_bytes, "max_objects": max_objects}
)
return Response(status=204)
def _bulk_delete_handler(bucket_name: str) -> Response:
principal, error = _require_principal()
if error:
@@ -1569,7 +1651,7 @@ def bucket_handler(bucket_name: str) -> Response:
return error
return _error_response("AccessDenied", str(exc), 403)
try:
objects = storage.list_objects(bucket_name)
objects = storage.list_objects_all(bucket_name)
except StorageError as exc:
return _error_response("NoSuchBucket", str(exc), 404)
@@ -1749,6 +1831,8 @@ def object_handler(bucket_name: str, object_key: str):
stream,
metadata=metadata or None,
)
except QuotaExceededError as exc:
return _error_response("QuotaExceeded", str(exc), 403)
except StorageError as exc:
message = str(exc)
if "Bucket" in message:
@@ -2256,6 +2340,8 @@ def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response:
try:
meta = _storage().complete_multipart_upload(bucket_name, upload_id, parts)
except QuotaExceededError as exc:
return _error_response("QuotaExceeded", str(exc), 403)
except StorageError as exc:
if "NoSuchBucket" in str(exc):
return _error_response("NoSuchBucket", str(exc), 404)

View File

@@ -75,6 +75,15 @@ class StorageError(RuntimeError):
"""Raised when the storage layer encounters an unrecoverable problem."""
class QuotaExceededError(StorageError):
"""Raised when an operation would exceed bucket quota limits."""
def __init__(self, message: str, quota: Dict[str, Any], usage: Dict[str, int]):
super().__init__(message)
self.quota = quota
self.usage = usage
@dataclass
class ObjectMeta:
key: str
@@ -90,6 +99,15 @@ class BucketMeta:
created_at: datetime
@dataclass
class ListObjectsResult:
"""Paginated result for object listing."""
objects: List[ObjectMeta]
is_truncated: bool
next_continuation_token: Optional[str]
total_count: Optional[int] = None # Total objects in bucket (from stats cache)
def _utcnow() -> datetime:
return datetime.now(timezone.utc)
@@ -169,16 +187,38 @@ class ObjectStorage:
object_count = 0
total_bytes = 0
version_count = 0
version_bytes = 0
# Count current objects in the bucket folder
for path in bucket_path.rglob("*"):
if path.is_file():
rel = path.relative_to(bucket_path)
if rel.parts and rel.parts[0] in self.INTERNAL_FOLDERS:
if not rel.parts:
continue
stat = path.stat()
object_count += 1
total_bytes += stat.st_size
top_folder = rel.parts[0]
if top_folder not in self.INTERNAL_FOLDERS:
stat = path.stat()
object_count += 1
total_bytes += stat.st_size
stats = {"objects": object_count, "bytes": total_bytes}
# Count archived versions in the system folder
versions_root = self._bucket_versions_root(bucket_name)
if versions_root.exists():
for path in versions_root.rglob("*.bin"):
if path.is_file():
stat = path.stat()
version_count += 1
version_bytes += stat.st_size
stats = {
"objects": object_count,
"bytes": total_bytes,
"version_count": version_count,
"version_bytes": version_bytes,
"total_objects": object_count + version_count, # All objects including versions
"total_bytes": total_bytes + version_bytes, # All storage including versions
}
try:
cache_path.parent.mkdir(parents=True, exist_ok=True)
@@ -210,31 +250,105 @@ class ObjectStorage:
self._remove_tree(self._system_bucket_root(bucket_path.name))
self._remove_tree(self._multipart_bucket_root(bucket_path.name))
def list_objects(self, bucket_name: str) -> List[ObjectMeta]:
def list_objects(
self,
bucket_name: str,
*,
max_keys: int = 1000,
continuation_token: Optional[str] = None,
prefix: Optional[str] = None,
) -> ListObjectsResult:
"""List objects in a bucket with pagination support.
Args:
bucket_name: Name of the bucket
max_keys: Maximum number of objects to return (default 1000)
continuation_token: Token from previous request for pagination
prefix: Filter objects by key prefix
Returns:
ListObjectsResult with objects, truncation status, and continuation token
"""
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
raise StorageError("Bucket does not exist")
bucket_id = bucket_path.name
objects: List[ObjectMeta] = []
# Collect all matching object keys first (lightweight - just paths)
all_keys: List[str] = []
for path in bucket_path.rglob("*"):
if path.is_file():
stat = path.stat()
rel = path.relative_to(bucket_path)
if rel.parts and rel.parts[0] in self.INTERNAL_FOLDERS:
continue
metadata = self._read_metadata(bucket_id, rel)
key = str(rel.as_posix())
if prefix and not key.startswith(prefix):
continue
all_keys.append(key)
all_keys.sort()
total_count = len(all_keys)
# Handle continuation token (the key to start after)
start_index = 0
if continuation_token:
try:
# continuation_token is the last key from previous page
for i, key in enumerate(all_keys):
if key > continuation_token:
start_index = i
break
else:
# Token is past all keys
return ListObjectsResult(
objects=[],
is_truncated=False,
next_continuation_token=None,
total_count=total_count,
)
except Exception:
pass # Invalid token, start from beginning
# Get the slice we need
end_index = start_index + max_keys
keys_slice = all_keys[start_index:end_index]
is_truncated = end_index < total_count
# Now load full metadata only for the objects we're returning
objects: List[ObjectMeta] = []
for key in keys_slice:
safe_key = self._sanitize_object_key(key)
path = bucket_path / safe_key
if not path.exists():
continue # Object may have been deleted
try:
stat = path.stat()
metadata = self._read_metadata(bucket_id, safe_key)
objects.append(
ObjectMeta(
key=str(rel.as_posix()),
key=key,
size=stat.st_size,
last_modified=datetime.fromtimestamp(stat.st_mtime),
etag=self._compute_etag(path),
metadata=metadata or None,
)
)
objects.sort(key=lambda meta: meta.key)
return objects
except OSError:
continue # File may have been deleted during iteration
next_token = keys_slice[-1] if is_truncated and keys_slice else None
return ListObjectsResult(
objects=objects,
is_truncated=is_truncated,
next_continuation_token=next_token,
total_count=total_count,
)
def list_objects_all(self, bucket_name: str) -> List[ObjectMeta]:
"""List all objects in a bucket (no pagination). Use with caution for large buckets."""
result = self.list_objects(bucket_name, max_keys=100000)
return result.objects
def put_object(
self,
@@ -243,6 +357,7 @@ class ObjectStorage:
stream: BinaryIO,
*,
metadata: Optional[Dict[str, str]] = None,
enforce_quota: bool = True,
) -> ObjectMeta:
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
@@ -253,12 +368,52 @@ class ObjectStorage:
destination = bucket_path / safe_key
destination.parent.mkdir(parents=True, exist_ok=True)
if self._is_versioning_enabled(bucket_path) and destination.exists():
# Check if this is an overwrite (won't add to object count)
is_overwrite = destination.exists()
existing_size = destination.stat().st_size if is_overwrite else 0
if self._is_versioning_enabled(bucket_path) and is_overwrite:
self._archive_current_version(bucket_id, safe_key, reason="overwrite")
checksum = hashlib.md5()
with destination.open("wb") as target:
shutil.copyfileobj(_HashingReader(stream, checksum), target)
# Write to temp file first to get actual size
tmp_dir = self._system_root_path() / self.SYSTEM_TMP_DIR
tmp_dir.mkdir(parents=True, exist_ok=True)
tmp_path = tmp_dir / f"{uuid.uuid4().hex}.tmp"
try:
checksum = hashlib.md5()
with tmp_path.open("wb") as target:
shutil.copyfileobj(_HashingReader(stream, checksum), target)
new_size = tmp_path.stat().st_size
# Check quota before finalizing
if enforce_quota:
# Calculate net change (new size minus size being replaced)
size_delta = new_size - existing_size
object_delta = 0 if is_overwrite else 1
quota_check = self.check_quota(
bucket_name,
additional_bytes=max(0, size_delta),
additional_objects=object_delta,
)
if not quota_check["allowed"]:
raise QuotaExceededError(
quota_check["message"] or "Quota exceeded",
quota_check["quota"],
quota_check["usage"],
)
# Move to final destination
shutil.move(str(tmp_path), str(destination))
finally:
# Clean up temp file if it still exists
try:
tmp_path.unlink(missing_ok=True)
except OSError:
pass
stat = destination.stat()
if metadata:
@@ -424,6 +579,124 @@ class ObjectStorage:
bucket_path = self._require_bucket_path(bucket_name)
self._set_bucket_config_entry(bucket_path.name, "lifecycle", rules)
def get_bucket_quota(self, bucket_name: str) -> Dict[str, Any]:
"""Get quota configuration for bucket.
Returns:
Dict with 'max_bytes' and 'max_objects' (None if unlimited).
"""
bucket_path = self._require_bucket_path(bucket_name)
config = self._read_bucket_config(bucket_path.name)
quota = config.get("quota")
if isinstance(quota, dict):
return {
"max_bytes": quota.get("max_bytes"),
"max_objects": quota.get("max_objects"),
}
return {"max_bytes": None, "max_objects": None}
def set_bucket_quota(
self,
bucket_name: str,
*,
max_bytes: Optional[int] = None,
max_objects: Optional[int] = None,
) -> None:
"""Set quota limits for a bucket.
Args:
bucket_name: Name of the bucket
max_bytes: Maximum total size in bytes (None to remove limit)
max_objects: Maximum number of objects (None to remove limit)
"""
bucket_path = self._require_bucket_path(bucket_name)
if max_bytes is None and max_objects is None:
# Remove quota entirely
self._set_bucket_config_entry(bucket_path.name, "quota", None)
return
quota: Dict[str, Any] = {}
if max_bytes is not None:
if max_bytes < 0:
raise StorageError("max_bytes must be non-negative")
quota["max_bytes"] = max_bytes
if max_objects is not None:
if max_objects < 0:
raise StorageError("max_objects must be non-negative")
quota["max_objects"] = max_objects
self._set_bucket_config_entry(bucket_path.name, "quota", quota)
def check_quota(
self,
bucket_name: str,
additional_bytes: int = 0,
additional_objects: int = 0,
) -> Dict[str, Any]:
"""Check if an operation would exceed bucket quota.
Args:
bucket_name: Name of the bucket
additional_bytes: Bytes that would be added
additional_objects: Objects that would be added
Returns:
Dict with 'allowed' (bool), 'quota' (current limits),
'usage' (current usage), and 'message' (if not allowed).
"""
quota = self.get_bucket_quota(bucket_name)
if not quota:
return {
"allowed": True,
"quota": None,
"usage": None,
"message": None,
}
# Get current stats (uses cache when available)
stats = self.bucket_stats(bucket_name)
# Use totals which include versions for quota enforcement
current_bytes = stats.get("total_bytes", stats.get("bytes", 0))
current_objects = stats.get("total_objects", stats.get("objects", 0))
result = {
"allowed": True,
"quota": quota,
"usage": {
"bytes": current_bytes,
"objects": current_objects,
"version_count": stats.get("version_count", 0),
"version_bytes": stats.get("version_bytes", 0),
},
"message": None,
}
max_bytes_limit = quota.get("max_bytes")
max_objects = quota.get("max_objects")
if max_bytes_limit is not None:
projected_bytes = current_bytes + additional_bytes
if projected_bytes > max_bytes_limit:
result["allowed"] = False
result["message"] = (
f"Quota exceeded: adding {additional_bytes} bytes would result in "
f"{projected_bytes} bytes, exceeding limit of {max_bytes_limit} bytes"
)
return result
if max_objects is not None:
projected_objects = current_objects + additional_objects
if projected_objects > max_objects:
result["allowed"] = False
result["message"] = (
f"Quota exceeded: adding {additional_objects} objects would result in "
f"{projected_objects} objects, exceeding limit of {max_objects} objects"
)
return result
return result
def get_object_tags(self, bucket_name: str, object_key: str) -> List[Dict[str, str]]:
"""Get tags for an object."""
bucket_path = self._bucket_path(bucket_name)
@@ -540,6 +813,7 @@ class ObjectStorage:
else:
self._delete_metadata(bucket_id, safe_key)
stat = destination.stat()
self._invalidate_bucket_stats_cache(bucket_id)
return ObjectMeta(
key=safe_key.as_posix(),
size=stat.st_size,
@@ -688,6 +962,7 @@ class ObjectStorage:
bucket_name: str,
upload_id: str,
ordered_parts: List[Dict[str, Any]],
enforce_quota: bool = True,
) -> ObjectMeta:
if not ordered_parts:
raise StorageError("parts list required")
@@ -698,6 +973,7 @@ class ObjectStorage:
if not parts_map:
raise StorageError("No uploaded parts found")
validated: List[tuple[int, Dict[str, Any]]] = []
total_size = 0
for part in ordered_parts:
raw_number = part.get("part_number")
if raw_number is None:
@@ -717,10 +993,33 @@ class ObjectStorage:
if supplied_etag and record.get("etag") and supplied_etag.strip('"') != record["etag"]:
raise StorageError(f"ETag mismatch for part {number}")
validated.append((number, record))
total_size += record.get("size", 0)
validated.sort(key=lambda entry: entry[0])
safe_key = self._sanitize_object_key(manifest["object_key"])
destination = bucket_path / safe_key
# Check if this is an overwrite
is_overwrite = destination.exists()
existing_size = destination.stat().st_size if is_overwrite else 0
# Check quota before writing
if enforce_quota:
size_delta = total_size - existing_size
object_delta = 0 if is_overwrite else 1
quota_check = self.check_quota(
bucket_name,
additional_bytes=max(0, size_delta),
additional_objects=object_delta,
)
if not quota_check["allowed"]:
raise QuotaExceededError(
quota_check["message"] or "Quota exceeded",
quota_check["quota"],
quota_check["usage"],
)
destination.parent.mkdir(parents=True, exist_ok=True)
lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock"

171
app/ui.py
View File

@@ -6,7 +6,7 @@ import uuid
import psutil
import shutil
from typing import Any
from urllib.parse import urlparse
from urllib.parse import quote, urlparse
import boto3
import requests
@@ -260,9 +260,9 @@ def buckets_overview():
visible_buckets.append({
"meta": bucket,
"summary": {
"objects": stats["objects"],
"total_bytes": stats["bytes"],
"human_size": _format_bytes(stats["bytes"]),
"objects": stats["total_objects"],
"total_bytes": stats["total_bytes"],
"human_size": _format_bytes(stats["total_bytes"]),
},
"access_label": access_label,
"access_badge": access_badge,
@@ -294,7 +294,9 @@ def bucket_detail(bucket_name: str):
storage = _storage()
try:
_authorize_ui(principal, bucket_name, "list")
objects = storage.list_objects(bucket_name)
# Don't load objects here - UI fetches them asynchronously via /buckets/<name>/objects
if not storage.bucket_exists(bucket_name):
raise StorageError("Bucket does not exist")
except (StorageError, IamError) as exc:
flash(_friendly_error_message(exc), "danger")
return redirect(url_for("ui.buckets_overview"))
@@ -372,10 +374,23 @@ def bucket_detail(bucket_name: str):
encryption_enabled = current_app.config.get("ENCRYPTION_ENABLED", False)
can_manage_encryption = can_manage_versioning # Same as other bucket properties
# Quota settings (admin only)
bucket_quota = storage.get_bucket_quota(bucket_name)
bucket_stats = storage.bucket_stats(bucket_name)
can_manage_quota = False
try:
_iam().authorize(principal, None, "iam:list_users")
can_manage_quota = True
except IamError:
pass
# Pass the objects API endpoint URL for async loading
objects_api_url = url_for("ui.list_bucket_objects", bucket_name=bucket_name)
return render_template(
"bucket_detail.html",
bucket_name=bucket_name,
objects=objects,
objects_api_url=objects_api_url,
principal=principal,
bucket_policy_text=policy_text,
bucket_policy=bucket_policy,
@@ -392,9 +407,67 @@ def bucket_detail(bucket_name: str):
kms_keys=kms_keys,
kms_enabled=kms_enabled,
encryption_enabled=encryption_enabled,
bucket_quota=bucket_quota,
bucket_stats=bucket_stats,
can_manage_quota=can_manage_quota,
)
@ui_bp.get("/buckets/<bucket_name>/objects")
def list_bucket_objects(bucket_name: str):
"""API endpoint for paginated object listing."""
principal = _current_principal()
storage = _storage()
try:
_authorize_ui(principal, bucket_name, "list")
except IamError as exc:
return jsonify({"error": str(exc)}), 403
max_keys = min(int(request.args.get("max_keys", 100)), 1000)
continuation_token = request.args.get("continuation_token") or None
prefix = request.args.get("prefix") or None
try:
result = storage.list_objects(
bucket_name,
max_keys=max_keys,
continuation_token=continuation_token,
prefix=prefix,
)
except StorageError as exc:
return jsonify({"error": str(exc)}), 400
try:
versioning_enabled = storage.is_versioning_enabled(bucket_name)
except StorageError:
versioning_enabled = False
objects_data = []
for obj in result.objects:
objects_data.append({
"key": obj.key,
"size": obj.size,
"last_modified": obj.last_modified.isoformat(),
"last_modified_display": obj.last_modified.strftime("%b %d, %Y %H:%M"),
"etag": obj.etag,
"metadata": obj.metadata or {},
"preview_url": url_for("ui.object_preview", bucket_name=bucket_name, object_key=obj.key),
"download_url": url_for("ui.object_preview", bucket_name=bucket_name, object_key=obj.key) + "?download=1",
"presign_endpoint": url_for("ui.object_presign", bucket_name=bucket_name, object_key=obj.key),
"delete_endpoint": url_for("ui.delete_object", bucket_name=bucket_name, object_key=obj.key),
"versions_endpoint": url_for("ui.object_versions", bucket_name=bucket_name, object_key=obj.key),
"restore_template": url_for("ui.restore_object_version", bucket_name=bucket_name, object_key=obj.key, version_id="VERSION_ID_PLACEHOLDER"),
})
return jsonify({
"objects": objects_data,
"is_truncated": result.is_truncated,
"next_continuation_token": result.next_continuation_token,
"total_count": result.total_count,
"versioning_enabled": versioning_enabled,
})
@ui_bp.post("/buckets/<bucket_name>/upload")
@limiter.limit("30 per minute")
def upload_object(bucket_name: str):
@@ -783,7 +856,8 @@ def object_presign(bucket_name: str, object_key: str):
api_base = current_app.config.get("API_BASE_URL") or "http://127.0.0.1:5000"
api_base = api_base.rstrip("/")
url = f"{api_base}/presign/{bucket_name}/{object_key}"
encoded_key = quote(object_key, safe="/")
url = f"{api_base}/presign/{bucket_name}/{encoded_key}"
# Use API base URL for forwarded headers so presigned URLs point to API, not UI
parsed_api = urlparse(api_base)
@@ -925,6 +999,71 @@ def update_bucket_versioning(bucket_name: str):
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
@ui_bp.post("/buckets/<bucket_name>/quota")
def update_bucket_quota(bucket_name: str):
"""Update bucket quota configuration (admin only)."""
principal = _current_principal()
# Quota management is admin-only
is_admin = False
try:
_iam().authorize(principal, None, "iam:list_users")
is_admin = True
except IamError:
pass
if not is_admin:
flash("Only administrators can manage bucket quotas", "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
action = request.form.get("action", "set")
if action == "remove":
try:
_storage().set_bucket_quota(bucket_name, max_bytes=None, max_objects=None)
flash("Bucket quota removed", "info")
except StorageError as exc:
flash(_friendly_error_message(exc), "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
# Parse quota values
max_mb_str = request.form.get("max_mb", "").strip()
max_objects_str = request.form.get("max_objects", "").strip()
max_bytes = None
max_objects = None
if max_mb_str:
try:
max_mb = int(max_mb_str)
if max_mb < 1:
raise ValueError("Size must be at least 1 MB")
max_bytes = max_mb * 1024 * 1024 # Convert MB to bytes
except ValueError as exc:
flash(f"Invalid size value: {exc}", "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
if max_objects_str:
try:
max_objects = int(max_objects_str)
if max_objects < 0:
raise ValueError("Object count must be non-negative")
except ValueError as exc:
flash(f"Invalid object count: {exc}", "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
try:
_storage().set_bucket_quota(bucket_name, max_bytes=max_bytes, max_objects=max_objects)
if max_bytes is None and max_objects is None:
flash("Bucket quota removed", "info")
else:
flash("Bucket quota updated", "success")
except StorageError as exc:
flash(_friendly_error_message(exc), "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
@ui_bp.post("/buckets/<bucket_name>/encryption")
def update_bucket_encryption(bucket_name: str):
"""Update bucket default encryption configuration."""
@@ -1426,6 +1565,9 @@ def metrics_dashboard():
flash("Access denied: Metrics require admin permissions", "danger")
return redirect(url_for("ui.buckets_overview"))
from app.version import APP_VERSION
import time
cpu_percent = psutil.cpu_percent(interval=0.1)
memory = psutil.virtual_memory()
@@ -1438,13 +1580,21 @@ def metrics_dashboard():
total_objects = 0
total_bytes_used = 0
total_versions = 0
# Note: Uses cached stats from storage layer to improve performance
cache_ttl = current_app.config.get("BUCKET_STATS_CACHE_TTL", 60)
for bucket in buckets:
stats = storage.bucket_stats(bucket.name, cache_ttl=cache_ttl)
total_objects += stats["objects"]
total_bytes_used += stats["bytes"]
# Use totals which include archived versions
total_objects += stats.get("total_objects", stats.get("objects", 0))
total_bytes_used += stats.get("total_bytes", stats.get("bytes", 0))
total_versions += stats.get("version_count", 0)
# Calculate system uptime
boot_time = psutil.boot_time()
uptime_seconds = time.time() - boot_time
uptime_days = int(uptime_seconds / 86400)
return render_template(
"metrics.html",
@@ -1465,8 +1615,11 @@ def metrics_dashboard():
app={
"buckets": total_buckets,
"objects": total_objects,
"versions": total_versions,
"storage_used": _format_bytes(total_bytes_used),
"storage_raw": total_bytes_used,
"version": APP_VERSION,
"uptime_days": uptime_days,
}
)

View File

@@ -1,7 +1,7 @@
"""Central location for the application version string."""
from __future__ import annotations
APP_VERSION = "0.1.3"
APP_VERSION = "0.1.7"
def get_version() -> str:

601
docs.md
View File

@@ -33,6 +33,63 @@ python run.py --mode api # API only (port 5000)
python run.py --mode ui # UI only (port 5100)
```
### Configuration validation
Validate your configuration before deploying:
```bash
# Show configuration summary
python run.py --show-config
./myfsio --show-config
# Validate and check for issues (exits with code 1 if critical issues found)
python run.py --check-config
./myfsio --check-config
```
### Linux Installation (Recommended for Production)
For production deployments on Linux, use the provided installation script:
```bash
# Download the binary and install script
# Then run the installer with sudo:
sudo ./scripts/install.sh --binary ./myfsio
# Or with custom paths:
sudo ./scripts/install.sh \
--binary ./myfsio \
--install-dir /opt/myfsio \
--data-dir /mnt/storage/myfsio \
--log-dir /var/log/myfsio \
--api-url https://s3.example.com \
--user myfsio
# Non-interactive mode (for automation):
sudo ./scripts/install.sh --binary ./myfsio -y
```
The installer will:
1. Create a dedicated system user
2. Set up directories with proper permissions
3. Generate a secure `SECRET_KEY`
4. Create an environment file at `/opt/myfsio/myfsio.env`
5. Install and configure a systemd service
After installation:
```bash
sudo systemctl start myfsio # Start the service
sudo systemctl enable myfsio # Enable on boot
sudo systemctl status myfsio # Check status
sudo journalctl -u myfsio -f # View logs
```
To uninstall:
```bash
sudo ./scripts/uninstall.sh # Full removal
sudo ./scripts/uninstall.sh --keep-data # Keep data directory
```
### Docker quickstart
The repo now ships a `Dockerfile` so you can run both services in one container:
@@ -69,23 +126,97 @@ The repo now tracks a human-friendly release string inside `app/version.py` (see
## 3. Configuration Reference
All configuration is done via environment variables. The table below lists every supported variable.
### Core Settings
| Variable | Default | Notes |
| --- | --- | --- |
| `STORAGE_ROOT` | `<repo>/data` | Filesystem home for all buckets/objects. |
| `MAX_UPLOAD_SIZE` | `1073741824` | Bytes. Caps incoming uploads in both API + UI. |
| `MAX_UPLOAD_SIZE` | `1073741824` (1 GiB) | Bytes. Caps incoming uploads in both API + UI. |
| `UI_PAGE_SIZE` | `100` | `MaxKeys` hint shown in listings. |
| `SECRET_KEY` | `dev-secret-key` | Flask session key for UI auth. |
| `IAM_CONFIG` | `<repo>/data/.myfsio.sys/config/iam.json` | Stores users, secrets, and inline policies. |
| `BUCKET_POLICY_PATH` | `<repo>/data/.myfsio.sys/config/bucket_policies.json` | Bucket policy store (auto hot-reload). |
| `API_BASE_URL` | `None` | Used by the UI to hit API endpoints (presign/policy). If unset, the UI will auto-detect the host or use `X-Forwarded-*` headers. |
| `SECRET_KEY` | Auto-generated | Flask session key. Auto-generates and persists if not set. **Set explicitly in production.** |
| `API_BASE_URL` | `None` | Public URL for presigned URLs. Required behind proxies. |
| `AWS_REGION` | `us-east-1` | Region embedded in SigV4 credential scope. |
| `AWS_SERVICE` | `s3` | Service string for SigV4. |
| `ENCRYPTION_ENABLED` | `false` | Enable server-side encryption support. |
| `KMS_ENABLED` | `false` | Enable KMS key management for encryption. |
| `KMS_KEYS_PATH` | `data/kms_keys.json` | Path to store KMS key metadata. |
| `ENCRYPTION_MASTER_KEY_PATH` | `data/master.key` | Path to the master encryption key file. |
Set env vars (or pass overrides to `create_app`) to point the servers at custom paths.
### IAM & Security
| Variable | Default | Notes |
| --- | --- | --- |
| `IAM_CONFIG` | `data/.myfsio.sys/config/iam.json` | Stores users, secrets, and inline policies. |
| `BUCKET_POLICY_PATH` | `data/.myfsio.sys/config/bucket_policies.json` | Bucket policy store (auto hot-reload). |
| `AUTH_MAX_ATTEMPTS` | `5` | Failed login attempts before lockout. |
| `AUTH_LOCKOUT_MINUTES` | `15` | Lockout duration after max failed attempts. |
| `SESSION_LIFETIME_DAYS` | `30` | How long UI sessions remain valid. |
| `SECRET_TTL_SECONDS` | `300` | TTL for ephemeral secrets (presigned URLs). |
| `UI_ENFORCE_BUCKET_POLICIES` | `false` | Whether the UI should enforce bucket policies. |
### CORS (Cross-Origin Resource Sharing)
| Variable | Default | Notes |
| --- | --- | --- |
| `CORS_ORIGINS` | `*` | Comma-separated allowed origins. Use specific domains in production. |
| `CORS_METHODS` | `GET,PUT,POST,DELETE,OPTIONS,HEAD` | Allowed HTTP methods. |
| `CORS_ALLOW_HEADERS` | `*` | Allowed request headers. |
| `CORS_EXPOSE_HEADERS` | `*` | Response headers visible to browsers (e.g., `ETag`). |
### Rate Limiting
| Variable | Default | Notes |
| --- | --- | --- |
| `RATE_LIMIT_DEFAULT` | `200 per minute` | Default rate limit for API endpoints. |
| `RATE_LIMIT_STORAGE_URI` | `memory://` | Storage backend for rate limits. Use `redis://host:port` for distributed setups. |
### Logging
| Variable | Default | Notes |
| --- | --- | --- |
| `LOG_LEVEL` | `INFO` | Log verbosity: `DEBUG`, `INFO`, `WARNING`, `ERROR`. |
| `LOG_TO_FILE` | `true` | Enable file logging. |
| `LOG_DIR` | `<repo>/logs` | Directory for log files. |
| `LOG_FILE` | `app.log` | Log filename. |
| `LOG_MAX_BYTES` | `5242880` (5 MB) | Max log file size before rotation. |
| `LOG_BACKUP_COUNT` | `3` | Number of rotated log files to keep. |
### Encryption
| Variable | Default | Notes |
| --- | --- | --- |
| `ENCRYPTION_ENABLED` | `false` | Enable server-side encryption support. |
| `ENCRYPTION_MASTER_KEY_PATH` | `data/.myfsio.sys/keys/master.key` | Path to the master encryption key file. |
| `DEFAULT_ENCRYPTION_ALGORITHM` | `AES256` | Default algorithm for new encrypted objects. |
| `KMS_ENABLED` | `false` | Enable KMS key management for encryption. |
| `KMS_KEYS_PATH` | `data/.myfsio.sys/keys/kms_keys.json` | Path to store KMS key metadata. |
### Performance Tuning
| Variable | Default | Notes |
| --- | --- | --- |
| `STREAM_CHUNK_SIZE` | `65536` (64 KB) | Chunk size for streaming large files. |
| `MULTIPART_MIN_PART_SIZE` | `5242880` (5 MB) | Minimum part size for multipart uploads. |
| `BUCKET_STATS_CACHE_TTL` | `60` | Seconds to cache bucket statistics. |
| `BULK_DELETE_MAX_KEYS` | `500` | Maximum keys per bulk delete request. |
### Server Settings
| Variable | Default | Notes |
| --- | --- | --- |
| `APP_HOST` | `0.0.0.0` | Network interface to bind to. |
| `APP_PORT` | `5000` | API server port (UI uses 5100). |
| `FLASK_DEBUG` | `0` | Enable Flask debug mode. **Never enable in production.** |
### Production Checklist
Before deploying to production, ensure you:
1. **Set `SECRET_KEY`** - Use a strong, unique value (e.g., `openssl rand -base64 32`)
2. **Restrict CORS** - Set `CORS_ORIGINS` to your specific domains instead of `*`
3. **Configure `API_BASE_URL`** - Required for correct presigned URLs behind proxies
4. **Enable HTTPS** - Use a reverse proxy (nginx, Cloudflare) with TLS termination
5. **Review rate limits** - Adjust `RATE_LIMIT_DEFAULT` based on your needs
6. **Secure master keys** - Back up `ENCRYPTION_MASTER_KEY_PATH` if using encryption
7. **Use `--prod` flag** - Runs with Waitress instead of Flask dev server
### Proxy Configuration
@@ -95,6 +226,334 @@ If running behind a reverse proxy (e.g., Nginx, Cloudflare, or a tunnel), ensure
The application automatically trusts these headers to generate correct presigned URLs (e.g., `https://s3.example.com/...` instead of `http://127.0.0.1:5000/...`). Alternatively, you can explicitly set `API_BASE_URL` to your public endpoint.
## 4. Upgrading and Updates
### Version Checking
The application version is tracked in `app/version.py` and exposed via:
- **Health endpoint:** `GET /healthz` returns JSON with `version` field
- **Metrics dashboard:** Navigate to `/ui/metrics` to see the running version in the System Status card
To check your current version:
```bash
# API health endpoint
curl http://localhost:5000/healthz
# Or inspect version.py directly
cat app/version.py | grep APP_VERSION
```
### Pre-Update Backup Procedures
**Always backup before upgrading to prevent data loss:**
```bash
# 1. Stop the application
# Ctrl+C if running in terminal, or:
docker stop myfsio # if using Docker
# 2. Backup configuration files (CRITICAL)
mkdir -p backups/$(date +%Y%m%d_%H%M%S)
cp -r data/.myfsio.sys/config backups/$(date +%Y%m%d_%H%M%S)/
# 3. Backup all data (optional but recommended)
tar -czf backups/data_$(date +%Y%m%d_%H%M%S).tar.gz data/
# 4. Backup logs for audit trail
cp -r logs backups/$(date +%Y%m%d_%H%M%S)/
```
**Windows PowerShell:**
```powershell
# Create timestamped backup
$timestamp = Get-Date -Format "yyyyMMdd_HHmmss"
New-Item -ItemType Directory -Path "backups\$timestamp" -Force
# Backup configs
Copy-Item -Recurse "data\.myfsio.sys\config" "backups\$timestamp\"
# Backup entire data directory
Compress-Archive -Path "data\" -DestinationPath "backups\data_$timestamp.zip"
```
**Critical files to backup:**
- `data/.myfsio.sys/config/iam.json` User accounts and access keys
- `data/.myfsio.sys/config/bucket_policies.json` Bucket access policies
- `data/.myfsio.sys/config/kms_keys.json` Encryption keys (if using KMS)
- `data/.myfsio.sys/config/secret_store.json` Application secrets
### Update Procedures
#### Source Installation Updates
```bash
# 1. Backup (see above)
# 2. Pull latest code
git fetch origin
git checkout main # or your target branch/tag
git pull
# 3. Check for dependency changes
pip install -r requirements.txt
# 4. Review CHANGELOG/release notes for breaking changes
cat CHANGELOG.md # if available
# 5. Run migration scripts (if any)
# python scripts/migrate_vX_to_vY.py # example
# 6. Restart application
python run.py
```
#### Docker Updates
```bash
# 1. Backup (see above)
# 2. Pull/rebuild image
docker pull yourregistry/myfsio:latest
# OR rebuild from source:
docker build -t myfsio:latest .
# 3. Stop and remove old container
docker stop myfsio
docker rm myfsio
# 4. Start new container with same volumes
docker run -d \
--name myfsio \
-p 5000:5000 -p 5100:5100 \
-v "$(pwd)/data:/app/data" \
-v "$(pwd)/logs:/app/logs" \
-e SECRET_KEY="your-secret" \
myfsio:latest
# 5. Verify health
curl http://localhost:5000/healthz
```
### Version Compatibility Checks
Before upgrading across major versions, verify compatibility:
| From Version | To Version | Breaking Changes | Migration Required |
|--------------|------------|------------------|-------------------|
| 0.1.x | 0.2.x | None expected | No |
| 0.1.6 | 0.1.7 | None | No |
| < 0.1.0 | >= 0.1.0 | New IAM config format | Yes - run migration script |
**Automatic compatibility detection:**
The application will log warnings on startup if config files need migration:
```
WARNING: IAM config format is outdated (v1). Please run: python scripts/migrate_iam.py
```
**Manual compatibility check:**
```bash
# Compare version schemas
python -c "from app.version import APP_VERSION; print(f'Running: {APP_VERSION}')"
python scripts/check_compatibility.py data/.myfsio.sys/config/
```
### Migration Steps for Breaking Changes
When release notes indicate breaking changes, follow these steps:
#### Config Format Migrations
```bash
# 1. Backup first (critical!)
cp data/.myfsio.sys/config/iam.json data/.myfsio.sys/config/iam.json.backup
# 2. Run provided migration script
python scripts/migrate_iam_v1_to_v2.py
# 3. Validate migration
python scripts/validate_config.py
# 4. Test with read-only mode first (if available)
# python run.py --read-only
# 5. Restart normally
python run.py
```
#### Database/Storage Schema Changes
If object metadata format changes:
```bash
# 1. Run storage migration script
python scripts/migrate_storage.py --dry-run # preview changes
# 2. Apply migration
python scripts/migrate_storage.py --apply
# 3. Verify integrity
python scripts/verify_storage.py
```
#### IAM Policy Updates
If IAM action names change (e.g., `s3:Get``s3:GetObject`):
```bash
# Migration script will update all policies
python scripts/migrate_policies.py \
--input data/.myfsio.sys/config/iam.json \
--backup data/.myfsio.sys/config/iam.json.v1
# Review changes before committing
python scripts/diff_policies.py \
data/.myfsio.sys/config/iam.json.v1 \
data/.myfsio.sys/config/iam.json
```
### Rollback Procedures
If an update causes issues, rollback to the previous version:
#### Quick Rollback (Source)
```bash
# 1. Stop application
# Ctrl+C or kill process
# 2. Revert code
git checkout <previous-version-tag>
# OR
git reset --hard HEAD~1
# 3. Restore configs from backup
cp backups/20241213_103000/config/* data/.myfsio.sys/config/
# 4. Downgrade dependencies if needed
pip install -r requirements.txt
# 5. Restart
python run.py
```
#### Docker Rollback
```bash
# 1. Stop current container
docker stop myfsio
docker rm myfsio
# 2. Start previous version
docker run -d \
--name myfsio \
-p 5000:5000 -p 5100:5100 \
-v "$(pwd)/data:/app/data" \
-v "$(pwd)/logs:/app/logs" \
-e SECRET_KEY="your-secret" \
myfsio:0.1.3 # specify previous version tag
# 3. Verify
curl http://localhost:5000/healthz
```
#### Emergency Config Restore
If only config is corrupted but code is fine:
```bash
# Stop app
# Restore from latest backup
cp backups/20241213_103000/config/iam.json data/.myfsio.sys/config/
cp backups/20241213_103000/config/bucket_policies.json data/.myfsio.sys/config/
# Restart app
python run.py
```
### Blue-Green Deployment (Zero Downtime)
For production environments requiring zero downtime:
```bash
# 1. Run new version on different port (e.g., 5001/5101)
APP_PORT=5001 UI_PORT=5101 python run.py &
# 2. Health check new instance
curl http://localhost:5001/healthz
# 3. Update load balancer to route to new ports
# 4. Monitor for issues
# 5. Gracefully stop old instance
kill -SIGTERM <old-pid>
```
### Post-Update Verification
After any update, verify functionality:
```bash
# 1. Health check
curl http://localhost:5000/healthz
# 2. Login to UI
open http://localhost:5100/ui
# 3. Test IAM authentication
curl -H "X-Amz-Security-Token: <your-access-key>:<your-secret>" \
http://localhost:5000/
# 4. Test presigned URL generation
# Via UI or API
# 5. Check logs for errors
tail -n 100 logs/myfsio.log
```
### Automated Update Scripts
Create a custom update script for your environment:
```bash
#!/bin/bash
# update.sh - Automated update with rollback capability
set -e # Exit on error
VERSION_NEW="$1"
BACKUP_DIR="backups/$(date +%Y%m%d_%H%M%S)"
echo "Creating backup..."
mkdir -p "$BACKUP_DIR"
cp -r data/.myfsio.sys/config "$BACKUP_DIR/"
echo "Updating to version $VERSION_NEW..."
git fetch origin
git checkout "v$VERSION_NEW"
pip install -r requirements.txt
echo "Starting application..."
python run.py &
APP_PID=$!
# Wait and health check
sleep 5
if curl -f http://localhost:5000/healthz; then
echo "Update successful!"
else
echo "Health check failed, rolling back..."
kill $APP_PID
git checkout -
cp -r "$BACKUP_DIR/config/*" data/.myfsio.sys/config/
python run.py &
exit 1
fi
```
## 4. Authentication & IAM
1. On first boot, `data/.myfsio.sys/config/iam.json` is seeded with `localadmin / localadmin` that has wildcard access.
@@ -176,6 +635,48 @@ curl -X PUT http://127.0.0.1:5000/bucket-policy/test \
The UI will reflect this change as soon as the request completes thanks to the hot reload.
### UI Object Browser
The bucket detail page includes a powerful object browser with the following features:
#### Folder Navigation
Objects with forward slashes (`/`) in their keys are displayed as a folder hierarchy. Click a folder row to navigate into it. A breadcrumb navigation bar shows your current path and allows quick navigation back to parent folders or the root.
#### Pagination & Infinite Scroll
- Objects load in configurable batches (50, 100, 150, 200, or 250 per page)
- Scroll to the bottom to automatically load more objects (infinite scroll)
- A **Load more** button is available as a fallback for touch devices or when infinite scroll doesn't trigger
- The footer shows the current load status (e.g., "Showing 100 of 500 objects")
#### Bulk Operations
- Select multiple objects using checkboxes
- **Bulk Delete**: Delete multiple objects at once
- **Bulk Download**: Download selected objects as individual files
#### Search & Filter
Use the search box to filter objects by name in real-time. The filter applies to the currently loaded objects.
#### Error Handling
If object loading fails (e.g., network error), a friendly error message is displayed with a **Retry** button to attempt loading again.
#### Object Preview
Click any object row to view its details in the preview sidebar:
- File size and last modified date
- ETag (content hash)
- Custom metadata (if present)
- Download and presign (share link) buttons
- Version history (when versioning is enabled)
#### Drag & Drop Upload
Drag files directly onto the objects table to upload them to the current bucket and folder path.
## 6. Presigned URLs
- Trigger from the UI using the **Presign** button after selecting an object.
@@ -340,7 +841,71 @@ To verify an object is encrypted:
2. Look for `.meta` files containing encryption metadata
3. Download via the API/UI - the object should be automatically decrypted
## 8. Site Replication
## 8. Bucket Quotas
MyFSIO supports **storage quotas** to limit how much data a bucket can hold. Quotas are enforced on uploads and multipart completions.
### Quota Types
| Limit | Description |
|-------|-------------|
| **Max Size (MB)** | Maximum total storage in megabytes (includes current objects + archived versions) |
| **Max Objects** | Maximum number of objects (includes current objects + archived versions) |
### Managing Quotas (Admin Only)
Quota management is restricted to administrators (users with `iam:*` or `iam:list_users` permissions).
#### Via UI
1. Navigate to your bucket in the UI
2. Click the **Properties** tab
3. Find the **Storage Quota** card
4. Enter limits:
- **Max Size (MB)**: Leave empty for unlimited
- **Max Objects**: Leave empty for unlimited
5. Click **Update Quota**
To remove a quota, click **Remove Quota**.
#### Via API
```bash
# Set quota (max 100MB, max 1000 objects)
curl -X PUT "http://localhost:5000/bucket/<bucket>?quota" \
-H "Content-Type: application/json" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-d '{"max_bytes": 104857600, "max_objects": 1000}'
# Get current quota
curl "http://localhost:5000/bucket/<bucket>?quota" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Remove quota
curl -X PUT "http://localhost:5000/bucket/<bucket>?quota" \
-H "Content-Type: application/json" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-d '{"max_bytes": null, "max_objects": null}'
```
### Quota Behavior
- **Version Counting**: When versioning is enabled, archived versions count toward the quota
- **Enforcement Points**: Quotas are checked during `PUT` object and `CompleteMultipartUpload` operations
- **Error Response**: When quota is exceeded, the API returns `HTTP 400` with error code `QuotaExceeded`
- **Visibility**: All users can view quota usage in the bucket detail page, but only admins can modify quotas
### Example Error
```xml
<Error>
<Code>QuotaExceeded</Code>
<Message>Bucket quota exceeded: storage limit reached</Message>
<BucketName>my-bucket</BucketName>
</Error>
```
## 9. Site Replication
### Permission Model
@@ -477,7 +1042,7 @@ To set up two-way replication (Server A ↔ Server B):
**Note**: Deleting a bucket will automatically remove its associated replication configuration.
## 9. Running Tests
## 11. Running Tests
```bash
pytest -q
@@ -487,7 +1052,7 @@ The suite now includes a boto3 integration test that spins up a live HTTP server
The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, and regression tests for anonymous reads when a Public policy is attached.
## 10. Troubleshooting
## 12. Troubleshooting
| Symptom | Likely Cause | Fix |
| --- | --- | --- |
@@ -496,7 +1061,7 @@ The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, an
| Presign modal errors with 403 | IAM user lacks `read/write/delete` for target bucket or bucket policy denies | Update IAM inline policies or remove conflicting deny statements. |
| Large upload rejected immediately | File exceeds `MAX_UPLOAD_SIZE` | Increase env var or shrink object. |
## 11. API Matrix
## 13. API Matrix
```
GET / # List buckets
@@ -510,10 +1075,6 @@ POST /presign/<bucket>/<key> # Generate SigV4 URL
GET /bucket-policy/<bucket> # Fetch policy
PUT /bucket-policy/<bucket> # Upsert policy
DELETE /bucket-policy/<bucket> # Delete policy
GET /<bucket>?quota # Get bucket quota
PUT /<bucket>?quota # Set bucket quota (admin only)
```
## 12. Next Steps
- Tailor IAM + policy JSON files for team-ready presets.
- Wrap `run_api.py` with gunicorn or another WSGI server for long-running workloads.
- Extend `bucket_policies.json` to cover Deny statements that simulate production security controls.

3
pytest.ini Normal file
View File

@@ -0,0 +1,3 @@
[pytest]
testpaths = tests
norecursedirs = data .git __pycache__ .venv

View File

@@ -1,10 +1,10 @@
Flask>=3.0.2
Flask-Limiter>=3.5.0
Flask-Cors>=4.0.0
Flask-WTF>=1.2.1
pytest>=7.4
requests>=2.31
boto3>=1.34
waitress>=2.1.2
psutil>=5.9.0
cryptography>=41.0.0
Flask>=3.1.2
Flask-Limiter>=4.1.1
Flask-Cors>=6.0.2
Flask-WTF>=1.2.2
pytest>=9.0.2
requests>=2.32.5
boto3>=1.42.14
waitress>=3.0.2
psutil>=7.1.3
cryptography>=46.0.3

37
run.py
View File

@@ -8,6 +8,7 @@ import warnings
from multiprocessing import Process
from app import create_api_app, create_ui_app
from app.config import AppConfig
def _server_host() -> str:
@@ -55,12 +56,48 @@ if __name__ == "__main__":
parser.add_argument("--ui-port", type=int, default=5100)
parser.add_argument("--prod", action="store_true", help="Run in production mode using Waitress")
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
args = parser.parse_args()
# Handle config check/show modes
if args.check_config or args.show_config:
config = AppConfig.from_env()
config.print_startup_summary()
if args.check_config:
issues = config.validate_and_report()
critical = [i for i in issues if i.startswith("CRITICAL:")]
sys.exit(1 if critical else 0)
sys.exit(0)
# Default to production mode when running as compiled binary
# unless --dev is explicitly passed
prod_mode = args.prod or (_is_frozen() and not args.dev)
# Validate configuration before starting
config = AppConfig.from_env()
# Show startup summary only on first run (when marker file doesn't exist)
first_run_marker = config.storage_root / ".myfsio.sys" / ".initialized"
is_first_run = not first_run_marker.exists()
if is_first_run:
config.print_startup_summary()
# Check for critical issues that should prevent startup
issues = config.validate_and_report()
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
if critical_issues:
print("ABORTING: Critical configuration issues detected. Fix them before starting.")
sys.exit(1)
# Create the marker file to indicate successful first run
try:
first_run_marker.parent.mkdir(parents=True, exist_ok=True)
first_run_marker.write_text(f"Initialized on {__import__('datetime').datetime.now().isoformat()}\n")
except OSError:
pass # Non-critical, just skip marker creation
if prod_mode:
print("Running in production mode (Waitress)")
else:

370
scripts/install.sh Normal file
View File

@@ -0,0 +1,370 @@
#!/bin/bash
#
# MyFSIO Installation Script
# This script sets up MyFSIO for production use on Linux systems.
#
# Usage:
# ./install.sh [OPTIONS]
#
# Options:
# --install-dir DIR Installation directory (default: /opt/myfsio)
# --data-dir DIR Data directory (default: /var/lib/myfsio)
# --log-dir DIR Log directory (default: /var/log/myfsio)
# --user USER System user to run as (default: myfsio)
# --port PORT API port (default: 5000)
# --ui-port PORT UI port (default: 5100)
# --api-url URL Public API URL (for presigned URLs behind proxy)
# --no-systemd Skip systemd service creation
# --binary PATH Path to myfsio binary (will download if not provided)
# -y, --yes Skip confirmation prompts
#
set -e
INSTALL_DIR="/opt/myfsio"
DATA_DIR="/var/lib/myfsio"
LOG_DIR="/var/log/myfsio"
SERVICE_USER="myfsio"
API_PORT="5000"
UI_PORT="5100"
API_URL=""
SKIP_SYSTEMD=false
BINARY_PATH=""
AUTO_YES=false
while [[ $# -gt 0 ]]; do
case $1 in
--install-dir)
INSTALL_DIR="$2"
shift 2
;;
--data-dir)
DATA_DIR="$2"
shift 2
;;
--log-dir)
LOG_DIR="$2"
shift 2
;;
--user)
SERVICE_USER="$2"
shift 2
;;
--port)
API_PORT="$2"
shift 2
;;
--ui-port)
UI_PORT="$2"
shift 2
;;
--api-url)
API_URL="$2"
shift 2
;;
--no-systemd)
SKIP_SYSTEMD=true
shift
;;
--binary)
BINARY_PATH="$2"
shift 2
;;
-y|--yes)
AUTO_YES=true
shift
;;
-h|--help)
head -30 "$0" | tail -25
exit 0
;;
*)
echo "Unknown option: $1"
exit 1
;;
esac
done
echo ""
echo "============================================================"
echo " MyFSIO Installation Script"
echo " S3-Compatible Object Storage"
echo "============================================================"
echo ""
echo "Documentation: https://go.jzwsite.com/myfsio"
echo ""
if [[ $EUID -ne 0 ]]; then
echo "Error: This script must be run as root (use sudo)"
exit 1
fi
echo "------------------------------------------------------------"
echo "STEP 1: Review Installation Configuration"
echo "------------------------------------------------------------"
echo ""
echo " Install directory: $INSTALL_DIR"
echo " Data directory: $DATA_DIR"
echo " Log directory: $LOG_DIR"
echo " Service user: $SERVICE_USER"
echo " API port: $API_PORT"
echo " UI port: $UI_PORT"
if [[ -n "$API_URL" ]]; then
echo " Public API URL: $API_URL"
fi
if [[ -n "$BINARY_PATH" ]]; then
echo " Binary path: $BINARY_PATH"
fi
echo ""
if [[ "$AUTO_YES" != true ]]; then
read -p "Do you want to proceed with these settings? [y/N] " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Installation cancelled."
exit 0
fi
fi
echo ""
echo "------------------------------------------------------------"
echo "STEP 2: Creating System User"
echo "------------------------------------------------------------"
echo ""
if id "$SERVICE_USER" &>/dev/null; then
echo " [OK] User '$SERVICE_USER' already exists"
else
useradd --system --no-create-home --shell /usr/sbin/nologin "$SERVICE_USER"
echo " [OK] Created user '$SERVICE_USER'"
fi
echo ""
echo "------------------------------------------------------------"
echo "STEP 3: Creating Directories"
echo "------------------------------------------------------------"
echo ""
mkdir -p "$INSTALL_DIR"
echo " [OK] Created $INSTALL_DIR"
mkdir -p "$DATA_DIR"
echo " [OK] Created $DATA_DIR"
mkdir -p "$LOG_DIR"
echo " [OK] Created $LOG_DIR"
echo ""
echo "------------------------------------------------------------"
echo "STEP 4: Installing Binary"
echo "------------------------------------------------------------"
echo ""
if [[ -n "$BINARY_PATH" ]]; then
if [[ -f "$BINARY_PATH" ]]; then
cp "$BINARY_PATH" "$INSTALL_DIR/myfsio"
echo " [OK] Copied binary from $BINARY_PATH"
else
echo " [ERROR] Binary not found at $BINARY_PATH"
exit 1
fi
elif [[ -f "./myfsio" ]]; then
cp "./myfsio" "$INSTALL_DIR/myfsio"
echo " [OK] Copied binary from ./myfsio"
else
echo " [ERROR] No binary provided."
echo " Use --binary PATH or place 'myfsio' in current directory"
exit 1
fi
chmod +x "$INSTALL_DIR/myfsio"
echo " [OK] Set executable permissions"
echo ""
echo "------------------------------------------------------------"
echo "STEP 5: Generating Secret Key"
echo "------------------------------------------------------------"
echo ""
SECRET_KEY=$(openssl rand -base64 32)
echo " [OK] Generated secure SECRET_KEY"
echo ""
echo "------------------------------------------------------------"
echo "STEP 6: Creating Configuration File"
echo "------------------------------------------------------------"
echo ""
cat > "$INSTALL_DIR/myfsio.env" << EOF
# MyFSIO Configuration
# Generated by install.sh on $(date)
# Documentation: https://go.jzwsite.com/myfsio
# Storage paths
STORAGE_ROOT=$DATA_DIR
LOG_DIR=$LOG_DIR
# Network
APP_HOST=0.0.0.0
APP_PORT=$API_PORT
# Security - CHANGE IN PRODUCTION
SECRET_KEY=$SECRET_KEY
CORS_ORIGINS=*
# Public URL (set this if behind a reverse proxy)
$(if [[ -n "$API_URL" ]]; then echo "API_BASE_URL=$API_URL"; else echo "# API_BASE_URL=https://s3.example.com"; fi)
# Logging
LOG_LEVEL=INFO
LOG_TO_FILE=true
# Rate limiting
RATE_LIMIT_DEFAULT=200 per minute
# Optional: Encryption (uncomment to enable)
# ENCRYPTION_ENABLED=true
# KMS_ENABLED=true
EOF
chmod 600 "$INSTALL_DIR/myfsio.env"
echo " [OK] Created $INSTALL_DIR/myfsio.env"
echo ""
echo "------------------------------------------------------------"
echo "STEP 7: Setting Permissions"
echo "------------------------------------------------------------"
echo ""
chown -R "$SERVICE_USER:$SERVICE_USER" "$INSTALL_DIR"
echo " [OK] Set ownership for $INSTALL_DIR"
chown -R "$SERVICE_USER:$SERVICE_USER" "$DATA_DIR"
echo " [OK] Set ownership for $DATA_DIR"
chown -R "$SERVICE_USER:$SERVICE_USER" "$LOG_DIR"
echo " [OK] Set ownership for $LOG_DIR"
if [[ "$SKIP_SYSTEMD" != true ]]; then
echo ""
echo "------------------------------------------------------------"
echo "STEP 8: Creating Systemd Service"
echo "------------------------------------------------------------"
echo ""
cat > /etc/systemd/system/myfsio.service << EOF
[Unit]
Description=MyFSIO S3-Compatible Storage
Documentation=https://go.jzwsite.com/myfsio
After=network.target
[Service]
Type=simple
User=$SERVICE_USER
Group=$SERVICE_USER
WorkingDirectory=$INSTALL_DIR
EnvironmentFile=$INSTALL_DIR/myfsio.env
ExecStart=$INSTALL_DIR/myfsio
Restart=on-failure
RestartSec=5
# Security hardening
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=$DATA_DIR $LOG_DIR
PrivateTmp=true
# Resource limits (adjust as needed)
# LimitNOFILE=65535
# MemoryMax=2G
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
echo " [OK] Created /etc/systemd/system/myfsio.service"
echo " [OK] Reloaded systemd daemon"
else
echo ""
echo "------------------------------------------------------------"
echo "STEP 8: Skipping Systemd Service (--no-systemd flag used)"
echo "------------------------------------------------------------"
fi
echo ""
echo "============================================================"
echo " Installation Complete!"
echo "============================================================"
echo ""
if [[ "$SKIP_SYSTEMD" != true ]]; then
echo "------------------------------------------------------------"
echo "STEP 9: Start the Service"
echo "------------------------------------------------------------"
echo ""
if [[ "$AUTO_YES" != true ]]; then
read -p "Would you like to start MyFSIO now? [Y/n] " -n 1 -r
echo
START_SERVICE=true
if [[ $REPLY =~ ^[Nn]$ ]]; then
START_SERVICE=false
fi
else
START_SERVICE=true
fi
if [[ "$START_SERVICE" == true ]]; then
echo " Starting MyFSIO service..."
systemctl start myfsio
echo " [OK] Service started"
echo ""
read -p "Would you like to enable MyFSIO to start on boot? [Y/n] " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Nn]$ ]]; then
systemctl enable myfsio
echo " [OK] Service enabled on boot"
fi
echo ""
sleep 2
echo " Service Status:"
echo " ---------------"
if systemctl is-active --quiet myfsio; then
echo " [OK] MyFSIO is running"
else
echo " [WARNING] MyFSIO may not have started correctly"
echo " Check logs with: journalctl -u myfsio -f"
fi
else
echo " [SKIPPED] Service not started"
echo ""
echo " To start manually, run:"
echo " sudo systemctl start myfsio"
echo ""
echo " To enable on boot, run:"
echo " sudo systemctl enable myfsio"
fi
fi
echo ""
echo "============================================================"
echo " Summary"
echo "============================================================"
echo ""
echo "Access Points:"
echo " API: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$API_PORT"
echo " UI: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$UI_PORT/ui"
echo ""
echo "Default Credentials:"
echo " Username: localadmin"
echo " Password: localadmin"
echo " [!] WARNING: Change these immediately after first login!"
echo ""
echo "Configuration Files:"
echo " Environment: $INSTALL_DIR/myfsio.env"
echo " IAM Users: $DATA_DIR/.myfsio.sys/config/iam.json"
echo " Bucket Policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
echo ""
echo "Useful Commands:"
echo " Check status: sudo systemctl status myfsio"
echo " View logs: sudo journalctl -u myfsio -f"
echo " Restart: sudo systemctl restart myfsio"
echo " Stop: sudo systemctl stop myfsio"
echo ""
echo "Documentation: https://go.jzwsite.com/myfsio"
echo ""
echo "============================================================"
echo " Thank you for installing MyFSIO!"
echo "============================================================"
echo ""

244
scripts/uninstall.sh Normal file
View File

@@ -0,0 +1,244 @@
#!/bin/bash
#
# MyFSIO Uninstall Script
# This script removes MyFSIO from your system.
#
# Usage:
# ./uninstall.sh [OPTIONS]
#
# Options:
# --keep-data Don't remove data directory
# --keep-logs Don't remove log directory
# --install-dir DIR Installation directory (default: /opt/myfsio)
# --data-dir DIR Data directory (default: /var/lib/myfsio)
# --log-dir DIR Log directory (default: /var/log/myfsio)
# --user USER System user (default: myfsio)
# -y, --yes Skip confirmation prompts
#
set -e
INSTALL_DIR="/opt/myfsio"
DATA_DIR="/var/lib/myfsio"
LOG_DIR="/var/log/myfsio"
SERVICE_USER="myfsio"
KEEP_DATA=false
KEEP_LOGS=false
AUTO_YES=false
while [[ $# -gt 0 ]]; do
case $1 in
--keep-data)
KEEP_DATA=true
shift
;;
--keep-logs)
KEEP_LOGS=true
shift
;;
--install-dir)
INSTALL_DIR="$2"
shift 2
;;
--data-dir)
DATA_DIR="$2"
shift 2
;;
--log-dir)
LOG_DIR="$2"
shift 2
;;
--user)
SERVICE_USER="$2"
shift 2
;;
-y|--yes)
AUTO_YES=true
shift
;;
-h|--help)
head -20 "$0" | tail -15
exit 0
;;
*)
echo "Unknown option: $1"
exit 1
;;
esac
done
echo ""
echo "============================================================"
echo " MyFSIO Uninstallation Script"
echo "============================================================"
echo ""
echo "Documentation: https://go.jzwsite.com/myfsio"
echo ""
if [[ $EUID -ne 0 ]]; then
echo "Error: This script must be run as root (use sudo)"
exit 1
fi
echo "------------------------------------------------------------"
echo "STEP 1: Review What Will Be Removed"
echo "------------------------------------------------------------"
echo ""
echo "The following items will be removed:"
echo ""
echo " Install directory: $INSTALL_DIR"
if [[ "$KEEP_DATA" != true ]]; then
echo " Data directory: $DATA_DIR (ALL YOUR DATA WILL BE DELETED!)"
else
echo " Data directory: $DATA_DIR (WILL BE KEPT)"
fi
if [[ "$KEEP_LOGS" != true ]]; then
echo " Log directory: $LOG_DIR"
else
echo " Log directory: $LOG_DIR (WILL BE KEPT)"
fi
echo " Systemd service: /etc/systemd/system/myfsio.service"
echo " System user: $SERVICE_USER"
echo ""
if [[ "$AUTO_YES" != true ]]; then
echo "WARNING: This action cannot be undone!"
echo ""
read -p "Are you sure you want to uninstall MyFSIO? [y/N] " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo ""
echo "Uninstallation cancelled."
exit 0
fi
if [[ "$KEEP_DATA" != true ]]; then
echo ""
read -p "This will DELETE ALL YOUR DATA. Type 'DELETE' to confirm: " CONFIRM
if [[ "$CONFIRM" != "DELETE" ]]; then
echo ""
echo "Uninstallation cancelled."
echo "Tip: Use --keep-data to preserve your data directory"
exit 0
fi
fi
fi
echo ""
echo "------------------------------------------------------------"
echo "STEP 2: Stopping Service"
echo "------------------------------------------------------------"
echo ""
if systemctl is-active --quiet myfsio 2>/dev/null; then
systemctl stop myfsio
echo " [OK] Stopped myfsio service"
else
echo " [SKIP] Service not running"
fi
echo ""
echo "------------------------------------------------------------"
echo "STEP 3: Disabling Service"
echo "------------------------------------------------------------"
echo ""
if systemctl is-enabled --quiet myfsio 2>/dev/null; then
systemctl disable myfsio
echo " [OK] Disabled myfsio service"
else
echo " [SKIP] Service not enabled"
fi
echo ""
echo "------------------------------------------------------------"
echo "STEP 4: Removing Systemd Service File"
echo "------------------------------------------------------------"
echo ""
if [[ -f /etc/systemd/system/myfsio.service ]]; then
rm -f /etc/systemd/system/myfsio.service
systemctl daemon-reload
echo " [OK] Removed /etc/systemd/system/myfsio.service"
echo " [OK] Reloaded systemd daemon"
else
echo " [SKIP] Service file not found"
fi
echo ""
echo "------------------------------------------------------------"
echo "STEP 5: Removing Installation Directory"
echo "------------------------------------------------------------"
echo ""
if [[ -d "$INSTALL_DIR" ]]; then
rm -rf "$INSTALL_DIR"
echo " [OK] Removed $INSTALL_DIR"
else
echo " [SKIP] Directory not found: $INSTALL_DIR"
fi
echo ""
echo "------------------------------------------------------------"
echo "STEP 6: Removing Data Directory"
echo "------------------------------------------------------------"
echo ""
if [[ "$KEEP_DATA" != true ]]; then
if [[ -d "$DATA_DIR" ]]; then
rm -rf "$DATA_DIR"
echo " [OK] Removed $DATA_DIR"
else
echo " [SKIP] Directory not found: $DATA_DIR"
fi
else
echo " [KEPT] Data preserved at: $DATA_DIR"
fi
echo ""
echo "------------------------------------------------------------"
echo "STEP 7: Removing Log Directory"
echo "------------------------------------------------------------"
echo ""
if [[ "$KEEP_LOGS" != true ]]; then
if [[ -d "$LOG_DIR" ]]; then
rm -rf "$LOG_DIR"
echo " [OK] Removed $LOG_DIR"
else
echo " [SKIP] Directory not found: $LOG_DIR"
fi
else
echo " [KEPT] Logs preserved at: $LOG_DIR"
fi
echo ""
echo "------------------------------------------------------------"
echo "STEP 8: Removing System User"
echo "------------------------------------------------------------"
echo ""
if id "$SERVICE_USER" &>/dev/null; then
userdel "$SERVICE_USER" 2>/dev/null || true
echo " [OK] Removed user '$SERVICE_USER'"
else
echo " [SKIP] User not found: $SERVICE_USER"
fi
echo ""
echo "============================================================"
echo " Uninstallation Complete!"
echo "============================================================"
echo ""
if [[ "$KEEP_DATA" == true ]]; then
echo "Your data has been preserved at: $DATA_DIR"
echo ""
echo "To reinstall MyFSIO with existing data, run:"
echo " curl -fsSL https://go.jzwsite.com/myfsio-install | sudo bash"
echo ""
fi
if [[ "$KEEP_LOGS" == true ]]; then
echo "Your logs have been preserved at: $LOG_DIR"
echo ""
fi
echo "Thank you for using MyFSIO."
echo "Documentation: https://go.jzwsite.com/myfsio"
echo ""
echo "============================================================"
echo ""

View File

@@ -199,7 +199,7 @@
})();
</script>
<script>
// Toast utility
window.showToast = function(message, title = 'Notification', type = 'info') {
const toastEl = document.getElementById('liveToast');
const toastTitle = document.getElementById('toastTitle');
@@ -207,8 +207,7 @@
toastTitle.textContent = title;
toastMessage.textContent = message;
// Reset classes
toastEl.classList.remove('text-bg-primary', 'text-bg-success', 'text-bg-danger', 'text-bg-warning');
if (type === 'success') toastEl.classList.add('text-bg-success');
@@ -221,13 +220,11 @@
</script>
<script>
(function () {
// Show flashed messages as toasts
{% with messages = get_flashed_messages(with_categories=true) %}
{% if messages %}
{% for category, message in messages %}
// Map Flask categories to Toast types
// Flask: success, danger, warning, info
// Toast: success, error, warning, info
var type = "{{ category }}";
if (type === "danger") type = "error";
window.showToast({{ message | tojson | safe }}, "Notification", type);

File diff suppressed because it is too large Load Diff

View File

@@ -131,7 +131,7 @@
{{ super() }}
<script>
(function () {
// Search functionality
const searchInput = document.getElementById('bucket-search');
const bucketItems = document.querySelectorAll('.bucket-item');
const noBucketsMsg = document.querySelector('.text-center.py-5'); // The "No buckets found" empty state
@@ -153,7 +153,6 @@
});
}
// View toggle functionality
const viewGrid = document.getElementById('view-grid');
const viewList = document.getElementById('view-list');
const container = document.getElementById('buckets-container');
@@ -168,8 +167,7 @@
});
cards.forEach(card => {
card.classList.remove('h-100');
// Optional: Add flex-row to card-body content if we want a horizontal layout
// For now, full-width stacked cards is a good list view
});
localStorage.setItem('bucket-view-pref', 'list');
} else {
@@ -188,7 +186,6 @@
viewGrid.addEventListener('change', () => setView('grid'));
viewList.addEventListener('change', () => setView('list'));
// Restore preference
const pref = localStorage.getItem('bucket-view-pref');
if (pref === 'list') {
viewList.checked = true;

View File

@@ -181,7 +181,6 @@
</div>
</div>
<!-- Edit Connection Modal -->
<div class="modal fade" id="editConnectionModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered">
<div class="modal-content">
@@ -247,7 +246,6 @@
</div>
</div>
<!-- Delete Connection Modal -->
<div class="modal fade" id="deleteConnectionModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered">
<div class="modal-content">
@@ -297,7 +295,6 @@
}
}
// Test Connection Logic
async function testConnection(formId, resultId) {
const form = document.getElementById(formId);
const resultDiv = document.getElementById(resultId);
@@ -335,7 +332,6 @@
testConnection('editConnectionForm', 'editTestResult');
});
// Modal Event Listeners
const editModal = document.getElementById('editConnectionModal');
editModal.addEventListener('show.bs.modal', event => {
const button = event.relatedTarget;

View File

@@ -55,8 +55,8 @@ python run.py --mode ui
<tbody>
<tr>
<td><code>API_BASE_URL</code></td>
<td><code>http://127.0.0.1:5000</code></td>
<td>The public URL of the API. <strong>Required</strong> if running behind a proxy or if the UI and API are on different domains. Ensures presigned URLs are generated correctly.</td>
<td><code>None</code></td>
<td>The public URL of the API. <strong>Required</strong> if running behind a proxy. Ensures presigned URLs are generated correctly.</td>
</tr>
<tr>
<td><code>STORAGE_ROOT</code></td>
@@ -65,13 +65,13 @@ python run.py --mode ui
</tr>
<tr>
<td><code>MAX_UPLOAD_SIZE</code></td>
<td><code>5 GB</code></td>
<td>Max request body size.</td>
<td><code>1 GB</code></td>
<td>Max request body size in bytes.</td>
</tr>
<tr>
<td><code>SECRET_KEY</code></td>
<td>(Random)</td>
<td>Flask session key. Set this in production.</td>
<td>(Auto-generated)</td>
<td>Flask session key. Auto-generates if not set. <strong>Set explicitly in production.</strong></td>
</tr>
<tr>
<td><code>APP_HOST</code></td>
@@ -81,7 +81,51 @@ python run.py --mode ui
<tr>
<td><code>APP_PORT</code></td>
<td><code>5000</code></td>
<td>Listen port.</td>
<td>Listen port (UI uses 5100).</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">CORS Settings</td>
</tr>
<tr>
<td><code>CORS_ORIGINS</code></td>
<td><code>*</code></td>
<td>Allowed origins. <strong>Restrict in production.</strong></td>
</tr>
<tr>
<td><code>CORS_METHODS</code></td>
<td><code>GET,PUT,POST,DELETE,OPTIONS,HEAD</code></td>
<td>Allowed HTTP methods.</td>
</tr>
<tr>
<td><code>CORS_ALLOW_HEADERS</code></td>
<td><code>*</code></td>
<td>Allowed request headers.</td>
</tr>
<tr>
<td><code>CORS_EXPOSE_HEADERS</code></td>
<td><code>*</code></td>
<td>Response headers visible to browsers (e.g., <code>ETag</code>).</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Security Settings</td>
</tr>
<tr>
<td><code>AUTH_MAX_ATTEMPTS</code></td>
<td><code>5</code></td>
<td>Failed login attempts before lockout.</td>
</tr>
<tr>
<td><code>AUTH_LOCKOUT_MINUTES</code></td>
<td><code>15</code></td>
<td>Lockout duration after max failed attempts.</td>
</tr>
<tr>
<td><code>RATE_LIMIT_DEFAULT</code></td>
<td><code>200 per minute</code></td>
<td>Default API rate limit.</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Encryption Settings</td>
</tr>
<tr>
<td><code>ENCRYPTION_ENABLED</code></td>
@@ -93,9 +137,25 @@ python run.py --mode ui
<td><code>false</code></td>
<td>Enable KMS key management for encryption.</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Logging Settings</td>
</tr>
<tr>
<td><code>LOG_LEVEL</code></td>
<td><code>INFO</code></td>
<td>Log verbosity: DEBUG, INFO, WARNING, ERROR.</td>
</tr>
<tr>
<td><code>LOG_TO_FILE</code></td>
<td><code>true</code></td>
<td>Enable file logging.</td>
</tr>
</tbody>
</table>
</div>
<div class="alert alert-warning mt-3 mb-0 small">
<strong>Production Checklist:</strong> Set <code>SECRET_KEY</code>, restrict <code>CORS_ORIGINS</code>, configure <code>API_BASE_URL</code>, enable HTTPS via reverse proxy, and use <code>--prod</code> flag.
</div>
</div>
</article>
<article id="background" class="card shadow-sm docs-section">
@@ -140,7 +200,7 @@ WorkingDirectory=/opt/myfsio
ExecStart=/opt/myfsio/myfsio
Restart=on-failure
RestartSec=5
Environment=MYFSIO_DATA_DIR=/var/lib/myfsio
Environment=STORAGE_ROOT=/var/lib/myfsio
Environment=API_BASE_URL=https://s3.example.com
[Install]
@@ -195,6 +255,15 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
<li>Progress rows highlight retries, throughput, and completion even if you close the modal.</li>
</ul>
</div>
<div>
<h3 class="h6 text-uppercase text-muted">Object browser</h3>
<ul>
<li>Navigate folder hierarchies using breadcrumbs. Objects with <code>/</code> in keys display as folders.</li>
<li>Infinite scroll loads more objects automatically. Choose batch size (50250) from the footer dropdown.</li>
<li>Bulk select objects for multi-delete or multi-download. Filter by name using the search box.</li>
<li>If loading fails, click <strong>Retry</strong> to attempt again—no page refresh needed.</li>
</ul>
</div>
<div>
<h3 class="h6 text-uppercase text-muted">Object details</h3>
<ul>
@@ -418,10 +487,78 @@ s3.complete_multipart_upload(
</p>
</div>
</article>
<article id="quotas" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">10</span>
<h2 class="h4 mb-0">Bucket Quotas</h2>
</div>
<p class="text-muted">Limit how much data a bucket can hold using storage quotas. Quotas are enforced on uploads and multipart completions.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Quota Types</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Limit</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>Max Size (MB)</strong></td>
<td>Maximum total storage in megabytes (includes current objects + archived versions)</td>
</tr>
<tr>
<td><strong>Max Objects</strong></td>
<td>Maximum number of objects (includes current objects + archived versions)</td>
</tr>
</tbody>
</table>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">Managing Quotas (Admin Only)</h3>
<p class="small text-muted">Quota management is restricted to administrators (users with <code>iam:*</code> permissions).</p>
<ol class="docs-steps mb-3">
<li>Navigate to your bucket → <strong>Properties</strong> tab → <strong>Storage Quota</strong> card.</li>
<li>Enter limits: <strong>Max Size (MB)</strong> and/or <strong>Max Objects</strong>. Leave empty for unlimited.</li>
<li>Click <strong>Update Quota</strong> to save, or <strong>Remove Quota</strong> to clear limits.</li>
</ol>
<h3 class="h6 text-uppercase text-muted mt-4">API Usage</h3>
<pre class="mb-3"><code class="language-bash"># Set quota (max 100MB, max 1000 objects)
curl -X PUT "{{ api_base }}/bucket/&lt;bucket&gt;?quota" \
-H "Content-Type: application/json" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-d '{"max_bytes": 104857600, "max_objects": 1000}'
# Get current quota
curl "{{ api_base }}/bucket/&lt;bucket&gt;?quota" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Remove quota
curl -X PUT "{{ api_base }}/bucket/&lt;bucket&gt;?quota" \
-H "Content-Type: application/json" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-d '{"max_bytes": null, "max_objects": null}'</code></pre>
<div class="alert alert-light border mb-0">
<div class="d-flex gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1" viewBox="0 0 16 16">
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
</svg>
<div>
<strong>Version Counting:</strong> When versioning is enabled, archived versions count toward the quota. The quota is checked against total storage, not just current objects.
</div>
</div>
</div>
</div>
</article>
<article id="encryption" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">09</span>
<span class="docs-section-kicker">11</span>
<h2 class="h4 mb-0">Encryption</h2>
</div>
<p class="text-muted">Protect data at rest with server-side encryption using AES-256-GCM. Objects are encrypted before being written to disk and decrypted transparently on read.</p>
@@ -515,7 +652,7 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
<article id="troubleshooting" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">10</span>
<span class="docs-section-kicker">12</span>
<h2 class="h4 mb-0">Troubleshooting &amp; tips</h2>
</div>
<div class="table-responsive">
@@ -572,6 +709,7 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
<li><a href="#api">REST endpoints</a></li>
<li><a href="#examples">API Examples</a></li>
<li><a href="#replication">Site Replication</a></li>
<li><a href="#quotas">Bucket Quotas</a></li>
<li><a href="#encryption">Encryption</a></li>
<li><a href="#troubleshooting">Troubleshooting</a></li>
</ul>

View File

@@ -203,7 +203,6 @@
{% endif %}
</div>
<!-- Create User Modal -->
<div class="modal fade" id="createUserModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered">
<div class="modal-content">
@@ -252,7 +251,6 @@
</div>
</div>
<!-- Policy Editor Modal -->
<div class="modal fade" id="policyEditorModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-lg modal-dialog-centered">
<div class="modal-content">
@@ -303,7 +301,6 @@
</div>
</div>
<!-- Edit User Modal -->
<div class="modal fade" id="editUserModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered">
<div class="modal-content">
@@ -338,7 +335,6 @@
</div>
</div>
<!-- Delete User Modal -->
<div class="modal fade" id="deleteUserModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered">
<div class="modal-content">
@@ -382,7 +378,6 @@
</div>
</div>
<!-- Rotate Secret Modal -->
<div class="modal fade" id="rotateSecretModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered">
<div class="modal-content">
@@ -486,8 +481,7 @@
const iamUsersData = document.getElementById('iamUsersJson');
const users = iamUsersData ? JSON.parse(iamUsersData.textContent || '[]') : [];
// Policy Editor Logic
const policyModalEl = document.getElementById('policyEditorModal');
const policyModal = new bootstrap.Modal(policyModalEl);
const userLabelEl = document.getElementById('policyEditorUserLabel');
@@ -534,7 +528,6 @@
button.addEventListener('click', () => applyTemplate(button.dataset.policyTemplate));
});
// Create User modal template buttons
const createUserPoliciesEl = document.getElementById('createUserPolicies');
const createTemplateButtons = document.querySelectorAll('[data-create-policy-template]');
@@ -591,7 +584,6 @@
});
});
// Edit User Logic
const editUserModal = new bootstrap.Modal(document.getElementById('editUserModal'));
const editUserForm = document.getElementById('editUserForm');
const editUserDisplayName = document.getElementById('editUserDisplayName');
@@ -606,7 +598,6 @@
});
});
// Delete User Logic
const deleteUserModal = new bootstrap.Modal(document.getElementById('deleteUserModal'));
const deleteUserForm = document.getElementById('deleteUserForm');
const deleteUserLabel = document.getElementById('deleteUserLabel');
@@ -628,7 +619,6 @@
});
});
// Rotate Secret Logic
const rotateSecretModal = new bootstrap.Modal(document.getElementById('rotateSecretModal'));
const rotateUserLabel = document.getElementById('rotateUserLabel');
const confirmRotateBtn = document.getElementById('confirmRotateBtn');
@@ -644,8 +634,7 @@
btn.addEventListener('click', () => {
currentRotateKey = btn.dataset.rotateUser;
rotateUserLabel.textContent = currentRotateKey;
// Reset Modal State
rotateSecretConfirm.classList.remove('d-none');
rotateSecretResult.classList.add('d-none');
confirmRotateBtn.classList.remove('d-none');
@@ -679,8 +668,7 @@
const data = await response.json();
newSecretKeyInput.value = data.secret_key;
// Show Result
rotateSecretConfirm.classList.add('d-none');
rotateSecretResult.classList.remove('d-none');
confirmRotateBtn.classList.add('d-none');

View File

@@ -126,7 +126,6 @@
<div class="card shadow-sm border-0">
<div class="card-header bg-transparent border-0 pt-4 px-4 d-flex justify-content-between align-items-center">
<h5 class="card-title mb-0 fw-semibold">System Overview</h5>
<span class="badge bg-primary-subtle text-primary">Live</span>
</div>
<div class="card-body p-4">
<div class="table-responsive">
@@ -233,14 +232,14 @@
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-check-circle-fill me-1" viewBox="0 0 16 16">
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
</svg>
Healthy
v{{ app.version }}
</span>
</div>
<h4 class="card-title fw-bold mb-3">System Status</h4>
<p class="card-text opacity-90 mb-4">All systems operational. Your storage infrastructure is running smoothly with no detected issues.</p>
<div class="d-flex gap-4">
<div>
<div class="h3 fw-bold mb-0">99.9%</div>
<div class="h3 fw-bold mb-0">{{ app.uptime_days }}d</div>
<small class="opacity-75">Uptime</small>
</div>
<div>

View File

@@ -38,7 +38,7 @@ def test_unicode_bucket_and_object_names(tmp_path: Path):
assert storage.get_object_path("unicode-test", key).exists()
# Verify listing
objects = storage.list_objects("unicode-test")
objects = storage.list_objects_all("unicode-test")
assert any(o.key == key for o in objects)
def test_special_characters_in_metadata(tmp_path: Path):

View File

@@ -220,7 +220,7 @@ def test_bucket_config_filename_allowed(tmp_path):
storage.create_bucket("demo")
storage.put_object("demo", ".bucket.json", io.BytesIO(b"{}"))
objects = storage.list_objects("demo")
objects = storage.list_objects_all("demo")
assert any(meta.key == ".bucket.json" for meta in objects)

View File

@@ -62,7 +62,7 @@ def test_bulk_delete_json_route(tmp_path: Path):
assert set(payload["deleted"]) == {"first.txt", "missing.txt"}
assert payload["errors"] == []
listing = storage.list_objects("demo")
listing = storage.list_objects_all("demo")
assert {meta.key for meta in listing} == {"second.txt"}
@@ -92,5 +92,5 @@ def test_bulk_delete_validation(tmp_path: Path):
assert limit_response.status_code == 400
assert limit_response.get_json()["status"] == "error"
still_there = storage.list_objects("demo")
still_there = storage.list_objects_all("demo")
assert {meta.key for meta in still_there} == {"keep.txt"}

183
tests/test_ui_pagination.py Normal file
View File

@@ -0,0 +1,183 @@
"""Tests for UI pagination of bucket objects."""
import json
from io import BytesIO
from pathlib import Path
import pytest
from app import create_app
def _make_app(tmp_path: Path):
"""Create an app for testing."""
storage_root = tmp_path / "data"
iam_config = tmp_path / "iam.json"
bucket_policies = tmp_path / "bucket_policies.json"
iam_payload = {
"users": [
{
"access_key": "test",
"secret_key": "secret",
"display_name": "Test User",
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy"]}],
},
]
}
iam_config.write_text(json.dumps(iam_payload))
flask_app = create_app(
{
"TESTING": True,
"WTF_CSRF_ENABLED": False,
"STORAGE_ROOT": storage_root,
"IAM_CONFIG": iam_config,
"BUCKET_POLICY_PATH": bucket_policies,
}
)
return flask_app
class TestPaginatedObjectListing:
"""Test paginated object listing API."""
def test_objects_api_returns_paginated_results(self, tmp_path):
"""Objects API should return paginated results."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
# Create 10 test objects
for i in range(10):
storage.put_object("test-bucket", f"file{i:02d}.txt", BytesIO(b"content"))
with app.test_client() as client:
# Login first
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
# Request first page of 3 objects
resp = client.get("/ui/buckets/test-bucket/objects?max_keys=3")
assert resp.status_code == 200
data = resp.get_json()
assert len(data["objects"]) == 3
assert data["is_truncated"] is True
assert data["next_continuation_token"] is not None
assert data["total_count"] == 10
def test_objects_api_pagination_continuation(self, tmp_path):
"""Objects API should support continuation tokens."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
# Create 5 test objects
for i in range(5):
storage.put_object("test-bucket", f"file{i:02d}.txt", BytesIO(b"content"))
with app.test_client() as client:
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
# Get first page
resp = client.get("/ui/buckets/test-bucket/objects?max_keys=2")
assert resp.status_code == 200
data = resp.get_json()
first_page_keys = [obj["key"] for obj in data["objects"]]
assert len(first_page_keys) == 2
assert data["is_truncated"] is True
# Get second page
token = data["next_continuation_token"]
resp = client.get(f"/ui/buckets/test-bucket/objects?max_keys=2&continuation_token={token}")
assert resp.status_code == 200
data = resp.get_json()
second_page_keys = [obj["key"] for obj in data["objects"]]
assert len(second_page_keys) == 2
# No overlap between pages
assert set(first_page_keys).isdisjoint(set(second_page_keys))
def test_objects_api_prefix_filter(self, tmp_path):
"""Objects API should support prefix filtering."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
# Create objects with different prefixes
storage.put_object("test-bucket", "logs/access.log", BytesIO(b"log"))
storage.put_object("test-bucket", "logs/error.log", BytesIO(b"log"))
storage.put_object("test-bucket", "data/file.txt", BytesIO(b"data"))
with app.test_client() as client:
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
# Filter by prefix
resp = client.get("/ui/buckets/test-bucket/objects?prefix=logs/")
assert resp.status_code == 200
data = resp.get_json()
keys = [obj["key"] for obj in data["objects"]]
assert all(k.startswith("logs/") for k in keys)
assert len(keys) == 2
def test_objects_api_requires_authentication(self, tmp_path):
"""Objects API should require login."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
with app.test_client() as client:
# Don't login
resp = client.get("/ui/buckets/test-bucket/objects")
# Should redirect to login
assert resp.status_code == 302
assert "/ui/login" in resp.headers.get("Location", "")
def test_objects_api_returns_object_metadata(self, tmp_path):
"""Objects API should return complete object metadata."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
storage.put_object("test-bucket", "test.txt", BytesIO(b"test content"))
with app.test_client() as client:
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
resp = client.get("/ui/buckets/test-bucket/objects")
assert resp.status_code == 200
data = resp.get_json()
assert len(data["objects"]) == 1
obj = data["objects"][0]
# Check all expected fields
assert obj["key"] == "test.txt"
assert obj["size"] == 12 # len("test content")
assert "last_modified" in obj
assert "last_modified_display" in obj
assert "etag" in obj
assert "preview_url" in obj
assert "download_url" in obj
assert "delete_endpoint" in obj
def test_bucket_detail_page_loads_without_objects(self, tmp_path):
"""Bucket detail page should load even with many objects."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
# Create many objects
for i in range(100):
storage.put_object("test-bucket", f"file{i:03d}.txt", BytesIO(b"x"))
with app.test_client() as client:
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
# The page should load quickly (objects loaded via JS)
resp = client.get("/ui/buckets/test-bucket")
assert resp.status_code == 200
html = resp.data.decode("utf-8")
# Should have the JavaScript loading infrastructure
assert "loadObjects" in html or "objectsApiUrl" in html

View File

@@ -70,8 +70,12 @@ def test_ui_bucket_policy_enforcement_toggle(tmp_path: Path, enforce: bool):
assert b"Access denied by bucket policy" in response.data
else:
assert response.status_code == 200
assert b"vid.mp4" in response.data
assert b"Access denied by bucket policy" not in response.data
# Objects are now loaded via async API - check the objects endpoint
objects_response = client.get("/ui/buckets/testbucket/objects")
assert objects_response.status_code == 200
data = objects_response.get_json()
assert any(obj["key"] == "vid.mp4" for obj in data["objects"])
def test_ui_bucket_policy_disabled_by_default(tmp_path: Path):
@@ -109,5 +113,9 @@ def test_ui_bucket_policy_disabled_by_default(tmp_path: Path):
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
response = client.get("/ui/buckets/testbucket", follow_redirects=True)
assert response.status_code == 200
assert b"vid.mp4" in response.data
assert b"Access denied by bucket policy" not in response.data
# Objects are now loaded via async API - check the objects endpoint
objects_response = client.get("/ui/buckets/testbucket/objects")
assert objects_response.status_code == 200
data = objects_response.get_json()
assert any(obj["key"] == "vid.mp4" for obj in data["objects"])