14 Commits

Author SHA1 Message Date
397515edce Merge pull request 'MyFSIO v0.1.5 Release' (#6) from next into main
Reviewed-on: #6
2025-12-13 15:41:03 +00:00
563bb8fa6a Fix incorrect STORAGE_ROOT setup; Add installation scripts 2025-12-13 22:26:43 +08:00
980fced7e4 Merge pull request 'MyFSIO v0.1.4 Release' (#5) from next into main
Reviewed-on: #5
2025-12-13 08:22:43 +00:00
5ccf53b688 Add app uptime and version status in Metrics dashboard 2025-12-13 16:18:38 +08:00
4d4256830a Update docs; Remove unnecessary hardcoded metrics details 2025-12-13 15:57:13 +08:00
137e3b7b68 Configure CORS default settings 2025-12-13 15:33:40 +08:00
bae5009ec4 Merge pull request 'Release v0.1.3' (#4) from next into main
Reviewed-on: #4
2025-12-03 04:14:57 +00:00
114e684cb8 Add logging to file missing 2025-12-03 12:11:42 +08:00
5d161c1d92 Fix presigned URL encoding issue 2025-12-03 12:08:02 +08:00
f160827b41 Update requirements.txt to the latest versions 2025-12-03 11:53:25 +08:00
9368715b16 Add bucket quota; Versioned objects now count towards the object storage and size count usage 2025-12-03 11:48:08 +08:00
233780617f Merge pull request 'Release V0.1.2' (#3) from next into main
Reviewed-on: #3
2025-11-26 04:59:15 +00:00
fd8fb21517 Merge pull request 'Prepare for binary release' (#2) from next into main
Reviewed-on: #2
2025-11-22 12:33:38 +00:00
c6cbe822e1 Merge pull request 'Release v0.1.1' (#1) from next into main
Reviewed-on: #1
2025-11-22 12:31:27 +00:00
17 changed files with 1899 additions and 85 deletions

View File

@@ -86,7 +86,7 @@ Presigned URLs follow the AWS CLI playbook:
| `AWS_REGION` | `us-east-1` | Region used in Signature V4 scope |
| `AWS_SERVICE` | `s3` | Service used in Signature V4 scope |
> Buckets now live directly under `data/` while system metadata (versions, IAM, bucket policies, multipart uploads, etc.) lives in `data/.myfsio.sys`. Existing installs can keep their environment variables, but the defaults now match MinIO's `data/.system` pattern for easier bind-mounting.
> Buckets now live directly under `data/` while system metadata (versions, IAM, bucket policies, multipart uploads, etc.) lives in `data/.myfsio.sys`.
## API Cheatsheet (IAM headers required)

View File

@@ -185,14 +185,12 @@ def create_ui_app(test_config: Optional[Dict[str, Any]] = None) -> Flask:
def _configure_cors(app: Flask) -> None:
origins = app.config.get("CORS_ORIGINS", ["*"])
methods = app.config.get("CORS_METHODS", ["GET", "PUT", "POST", "DELETE", "OPTIONS"])
allow_headers = app.config.get(
"CORS_ALLOW_HEADERS",
["Content-Type", "X-Access-Key", "X-Secret-Key", "X-Amz-Date", "X-Amz-SignedHeaders"],
)
methods = app.config.get("CORS_METHODS", ["GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"])
allow_headers = app.config.get("CORS_ALLOW_HEADERS", ["*"])
expose_headers = app.config.get("CORS_EXPOSE_HEADERS", ["*"])
CORS(
app,
resources={r"/*": {"origins": origins, "methods": methods, "allow_headers": allow_headers}},
resources={r"/*": {"origins": origins, "methods": methods, "allow_headers": allow_headers, "expose_headers": expose_headers}},
supports_credentials=True,
)

View File

@@ -50,6 +50,7 @@ class AppConfig:
aws_service: str
ui_enforce_bucket_policies: bool
log_level: str
log_to_file: bool
log_path: Path
log_max_bytes: int
log_backup_count: int
@@ -58,6 +59,7 @@ class AppConfig:
cors_origins: list[str]
cors_methods: list[str]
cors_allow_headers: list[str]
cors_expose_headers: list[str]
session_lifetime_days: int
auth_max_attempts: int
auth_lockout_minutes: int
@@ -109,19 +111,19 @@ class AppConfig:
iam_env_override = "IAM_CONFIG" in overrides or "IAM_CONFIG" in os.environ
bucket_policy_override = "BUCKET_POLICY_PATH" in overrides or "BUCKET_POLICY_PATH" in os.environ
default_iam_path = PROJECT_ROOT / "data" / ".myfsio.sys" / "config" / "iam.json"
default_bucket_policy_path = PROJECT_ROOT / "data" / ".myfsio.sys" / "config" / "bucket_policies.json"
default_iam_path = storage_root / ".myfsio.sys" / "config" / "iam.json"
default_bucket_policy_path = storage_root / ".myfsio.sys" / "config" / "bucket_policies.json"
iam_config_path = Path(_get("IAM_CONFIG", default_iam_path)).resolve()
bucket_policy_path = Path(_get("BUCKET_POLICY_PATH", default_bucket_policy_path)).resolve()
iam_config_path = _prepare_config_file(
iam_config_path,
legacy_path=None if iam_env_override else PROJECT_ROOT / "data" / "iam.json",
legacy_path=None if iam_env_override else storage_root / "iam.json",
)
bucket_policy_path = _prepare_config_file(
bucket_policy_path,
legacy_path=None if bucket_policy_override else PROJECT_ROOT / "data" / "bucket_policies.json",
legacy_path=None if bucket_policy_override else storage_root / "bucket_policies.json",
)
api_base_url = _get("API_BASE_URL", None)
if api_base_url:
@@ -131,7 +133,8 @@ class AppConfig:
aws_service = str(_get("AWS_SERVICE", "s3"))
enforce_ui_policies = str(_get("UI_ENFORCE_BUCKET_POLICIES", "0")).lower() in {"1", "true", "yes", "on"}
log_level = str(_get("LOG_LEVEL", "INFO")).upper()
log_dir = Path(_get("LOG_DIR", PROJECT_ROOT / "logs")).resolve()
log_to_file = str(_get("LOG_TO_FILE", "1")).lower() in {"1", "true", "yes", "on"}
log_dir = Path(_get("LOG_DIR", storage_root.parent / "logs")).resolve()
log_dir.mkdir(parents=True, exist_ok=True)
log_path = log_dir / str(_get("LOG_FILE", "app.log"))
log_max_bytes = int(_get("LOG_MAX_BYTES", 5 * 1024 * 1024))
@@ -146,18 +149,9 @@ class AppConfig:
return parts or default
cors_origins = _csv(str(_get("CORS_ORIGINS", "*")), ["*"])
cors_methods = _csv(str(_get("CORS_METHODS", "GET,PUT,POST,DELETE,OPTIONS")), ["GET", "PUT", "POST", "DELETE", "OPTIONS"])
cors_allow_headers = _csv(str(_get("CORS_ALLOW_HEADERS", "Content-Type,X-Access-Key,X-Secret-Key,X-Amz-Algorithm,X-Amz-Credential,X-Amz-Date,X-Amz-Expires,X-Amz-SignedHeaders,X-Amz-Signature")), [
"Content-Type",
"X-Access-Key",
"X-Secret-Key",
"X-Amz-Algorithm",
"X-Amz-Credential",
"X-Amz-Date",
"X-Amz-Expires",
"X-Amz-SignedHeaders",
"X-Amz-Signature",
])
cors_methods = _csv(str(_get("CORS_METHODS", "GET,PUT,POST,DELETE,OPTIONS,HEAD")), ["GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"])
cors_allow_headers = _csv(str(_get("CORS_ALLOW_HEADERS", "*")), ["*"])
cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"])
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60)) # Default 60 seconds
@@ -180,6 +174,7 @@ class AppConfig:
aws_service=aws_service,
ui_enforce_bucket_policies=enforce_ui_policies,
log_level=log_level,
log_to_file=log_to_file,
log_path=log_path,
log_max_bytes=log_max_bytes,
log_backup_count=log_backup_count,
@@ -188,6 +183,7 @@ class AppConfig:
cors_origins=cors_origins,
cors_methods=cors_methods,
cors_allow_headers=cors_allow_headers,
cors_expose_headers=cors_expose_headers,
session_lifetime_days=session_lifetime_days,
auth_max_attempts=auth_max_attempts,
auth_lockout_minutes=auth_lockout_minutes,
@@ -202,6 +198,102 @@ class AppConfig:
kms_keys_path=kms_keys_path,
default_encryption_algorithm=default_encryption_algorithm)
def validate_and_report(self) -> list[str]:
"""Validate configuration and return a list of warnings/issues.
Call this at startup to detect potential misconfigurations before
the application fully commits to running.
"""
issues = []
# Check if storage_root is writable
try:
test_file = self.storage_root / ".write_test"
test_file.touch()
test_file.unlink()
except (OSError, PermissionError) as e:
issues.append(f"CRITICAL: STORAGE_ROOT '{self.storage_root}' is not writable: {e}")
# Check if storage_root looks like a temp directory
storage_str = str(self.storage_root).lower()
if "/tmp" in storage_str or "\\temp" in storage_str or "appdata\\local\\temp" in storage_str:
issues.append(f"WARNING: STORAGE_ROOT '{self.storage_root}' appears to be a temporary directory. Data may be lost on reboot!")
# Check if IAM config path is under storage_root
try:
self.iam_config_path.relative_to(self.storage_root)
except ValueError:
issues.append(f"WARNING: IAM_CONFIG '{self.iam_config_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting IAM_CONFIG explicitly or ensuring paths are aligned.")
# Check if bucket policy path is under storage_root
try:
self.bucket_policy_path.relative_to(self.storage_root)
except ValueError:
issues.append(f"WARNING: BUCKET_POLICY_PATH '{self.bucket_policy_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting BUCKET_POLICY_PATH explicitly.")
# Check if log path is writable
try:
self.log_path.parent.mkdir(parents=True, exist_ok=True)
test_log = self.log_path.parent / ".write_test"
test_log.touch()
test_log.unlink()
except (OSError, PermissionError) as e:
issues.append(f"WARNING: Log directory '{self.log_path.parent}' is not writable: {e}")
# Check log path location
log_str = str(self.log_path).lower()
if "/tmp" in log_str or "\\temp" in log_str or "appdata\\local\\temp" in log_str:
issues.append(f"WARNING: LOG_DIR '{self.log_path.parent}' appears to be a temporary directory. Logs may be lost on reboot!")
# Check if encryption keys path is under storage_root (when encryption is enabled)
if self.encryption_enabled:
try:
self.encryption_master_key_path.relative_to(self.storage_root)
except ValueError:
issues.append(f"WARNING: ENCRYPTION_MASTER_KEY_PATH '{self.encryption_master_key_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
# Check if KMS keys path is under storage_root (when KMS is enabled)
if self.kms_enabled:
try:
self.kms_keys_path.relative_to(self.storage_root)
except ValueError:
issues.append(f"WARNING: KMS_KEYS_PATH '{self.kms_keys_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
# Warn about production settings
if self.secret_key == "dev-secret-key":
issues.append("WARNING: Using default SECRET_KEY. Set SECRET_KEY environment variable for production.")
if "*" in self.cors_origins:
issues.append("INFO: CORS_ORIGINS is set to '*'. Consider restricting to specific domains in production.")
return issues
def print_startup_summary(self) -> None:
"""Print a summary of the configuration at startup."""
print("\n" + "=" * 60)
print("MyFSIO Configuration Summary")
print("=" * 60)
print(f" STORAGE_ROOT: {self.storage_root}")
print(f" IAM_CONFIG: {self.iam_config_path}")
print(f" BUCKET_POLICY: {self.bucket_policy_path}")
print(f" LOG_PATH: {self.log_path}")
if self.api_base_url:
print(f" API_BASE_URL: {self.api_base_url}")
if self.encryption_enabled:
print(f" ENCRYPTION: Enabled (Master key: {self.encryption_master_key_path})")
if self.kms_enabled:
print(f" KMS: Enabled (Keys: {self.kms_keys_path})")
print("=" * 60)
issues = self.validate_and_report()
if issues:
print("\nConfiguration Issues Detected:")
for issue in issues:
print(f"{issue}")
print()
else:
print(" ✓ Configuration validated successfully\n")
def to_flask_config(self) -> Dict[str, Any]:
return {
"STORAGE_ROOT": str(self.storage_root),
@@ -222,6 +314,7 @@ class AppConfig:
"MULTIPART_MIN_PART_SIZE": self.multipart_min_part_size,
"BUCKET_STATS_CACHE_TTL": self.bucket_stats_cache_ttl,
"LOG_LEVEL": self.log_level,
"LOG_TO_FILE": self.log_to_file,
"LOG_FILE": str(self.log_path),
"LOG_MAX_BYTES": self.log_max_bytes,
"LOG_BACKUP_COUNT": self.log_backup_count,
@@ -230,6 +323,7 @@ class AppConfig:
"CORS_ORIGINS": self.cors_origins,
"CORS_METHODS": self.cors_methods,
"CORS_ALLOW_HEADERS": self.cors_allow_headers,
"CORS_EXPOSE_HEADERS": self.cors_expose_headers,
"SESSION_LIFETIME_DAYS": self.session_lifetime_days,
"ENCRYPTION_ENABLED": self.encryption_enabled,
"ENCRYPTION_MASTER_KEY_PATH": str(self.encryption_master_key_path),

View File

@@ -266,5 +266,11 @@ class EncryptedObjectStorage:
def list_multipart_parts(self, bucket_name: str, upload_id: str):
return self.storage.list_multipart_parts(bucket_name, upload_id)
def get_bucket_quota(self, bucket_name: str):
return self.storage.get_bucket_quota(bucket_name)
def set_bucket_quota(self, bucket_name: str, *, max_bytes=None, max_objects=None):
return self.storage.set_bucket_quota(bucket_name, max_bytes=max_bytes, max_objects=max_objects)
def _compute_etag(self, path: Path) -> str:
return self.storage._compute_etag(path)

View File

@@ -129,6 +129,25 @@ class EntityTooLargeError(AppError):
status_code: int = 413
@dataclass
class QuotaExceededAppError(AppError):
"""Bucket quota exceeded."""
code: str = "QuotaExceeded"
message: str = "The bucket quota has been exceeded"
status_code: int = 403
quota: Optional[Dict[str, Any]] = None
usage: Optional[Dict[str, int]] = None
def __post_init__(self):
if self.quota or self.usage:
self.details = {}
if self.quota:
self.details["quota"] = self.quota
if self.usage:
self.details["usage"] = self.usage
super().__post_init__()
def handle_app_error(error: AppError) -> Response:
"""Handle application errors with appropriate response format."""
log_extra = {"error_code": error.code}
@@ -163,5 +182,6 @@ def register_error_handlers(app):
ObjectNotFoundError, InvalidObjectKeyError,
AccessDeniedError, InvalidCredentialsError,
MalformedRequestError, InvalidArgumentError, EntityTooLargeError,
QuotaExceededAppError,
]:
app.register_error_handler(error_class, handle_app_error)

View File

@@ -18,7 +18,7 @@ from .bucket_policies import BucketPolicyStore
from .extensions import limiter
from .iam import IamError, Principal
from .replication import ReplicationManager
from .storage import ObjectStorage, StorageError
from .storage import ObjectStorage, StorageError, QuotaExceededError
s3_api_bp = Blueprint("s3_api", __name__)
@@ -803,6 +803,7 @@ def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None:
"acl": _bucket_acl_handler,
"versions": _bucket_list_versions_handler,
"lifecycle": _bucket_lifecycle_handler,
"quota": _bucket_quota_handler,
}
requested = [key for key in handlers if key in request.args]
if not requested:
@@ -1400,6 +1401,87 @@ def _parse_lifecycle_config(payload: bytes) -> list:
return rules
def _bucket_quota_handler(bucket_name: str) -> Response:
"""Handle bucket quota configuration (GET/PUT/DELETE /<bucket>?quota)."""
if request.method not in {"GET", "PUT", "DELETE"}:
return _method_not_allowed(["GET", "PUT", "DELETE"])
principal, error = _require_principal()
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
if not storage.bucket_exists(bucket_name):
return _error_response("NoSuchBucket", "Bucket does not exist", 404)
if request.method == "GET":
quota = storage.get_bucket_quota(bucket_name)
if not quota:
return _error_response("NoSuchQuotaConfiguration", "No quota configuration found", 404)
# Return as JSON for simplicity (not a standard S3 API)
stats = storage.bucket_stats(bucket_name)
return jsonify({
"quota": quota,
"usage": {
"bytes": stats.get("bytes", 0),
"objects": stats.get("objects", 0),
}
})
if request.method == "DELETE":
try:
storage.set_bucket_quota(bucket_name, max_size_bytes=None, max_objects=None)
except StorageError as exc:
return _error_response("NoSuchBucket", str(exc), 404)
current_app.logger.info("Bucket quota deleted", extra={"bucket": bucket_name})
return Response(status=204)
# PUT
payload = request.get_json(silent=True)
if not payload:
return _error_response("MalformedRequest", "Request body must be JSON with quota limits", 400)
max_size_bytes = payload.get("max_size_bytes")
max_objects = payload.get("max_objects")
if max_size_bytes is None and max_objects is None:
return _error_response("InvalidArgument", "At least one of max_size_bytes or max_objects is required", 400)
# Validate types
if max_size_bytes is not None:
try:
max_size_bytes = int(max_size_bytes)
if max_size_bytes < 0:
raise ValueError("must be non-negative")
except (TypeError, ValueError) as exc:
return _error_response("InvalidArgument", f"max_size_bytes {exc}", 400)
if max_objects is not None:
try:
max_objects = int(max_objects)
if max_objects < 0:
raise ValueError("must be non-negative")
except (TypeError, ValueError) as exc:
return _error_response("InvalidArgument", f"max_objects {exc}", 400)
try:
storage.set_bucket_quota(bucket_name, max_size_bytes=max_size_bytes, max_objects=max_objects)
except StorageError as exc:
return _error_response("NoSuchBucket", str(exc), 404)
current_app.logger.info(
"Bucket quota updated",
extra={"bucket": bucket_name, "max_size_bytes": max_size_bytes, "max_objects": max_objects}
)
return Response(status=204)
def _bulk_delete_handler(bucket_name: str) -> Response:
principal, error = _require_principal()
if error:
@@ -1749,6 +1831,8 @@ def object_handler(bucket_name: str, object_key: str):
stream,
metadata=metadata or None,
)
except QuotaExceededError as exc:
return _error_response("QuotaExceeded", str(exc), 403)
except StorageError as exc:
message = str(exc)
if "Bucket" in message:
@@ -2256,6 +2340,8 @@ def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response:
try:
meta = _storage().complete_multipart_upload(bucket_name, upload_id, parts)
except QuotaExceededError as exc:
return _error_response("QuotaExceeded", str(exc), 403)
except StorageError as exc:
if "NoSuchBucket" in str(exc):
return _error_response("NoSuchBucket", str(exc), 404)

View File

@@ -75,6 +75,15 @@ class StorageError(RuntimeError):
"""Raised when the storage layer encounters an unrecoverable problem."""
class QuotaExceededError(StorageError):
"""Raised when an operation would exceed bucket quota limits."""
def __init__(self, message: str, quota: Dict[str, Any], usage: Dict[str, int]):
super().__init__(message)
self.quota = quota
self.usage = usage
@dataclass
class ObjectMeta:
key: str
@@ -169,16 +178,38 @@ class ObjectStorage:
object_count = 0
total_bytes = 0
version_count = 0
version_bytes = 0
# Count current objects in the bucket folder
for path in bucket_path.rglob("*"):
if path.is_file():
rel = path.relative_to(bucket_path)
if rel.parts and rel.parts[0] in self.INTERNAL_FOLDERS:
if not rel.parts:
continue
stat = path.stat()
object_count += 1
total_bytes += stat.st_size
top_folder = rel.parts[0]
if top_folder not in self.INTERNAL_FOLDERS:
stat = path.stat()
object_count += 1
total_bytes += stat.st_size
stats = {"objects": object_count, "bytes": total_bytes}
# Count archived versions in the system folder
versions_root = self._bucket_versions_root(bucket_name)
if versions_root.exists():
for path in versions_root.rglob("*.bin"):
if path.is_file():
stat = path.stat()
version_count += 1
version_bytes += stat.st_size
stats = {
"objects": object_count,
"bytes": total_bytes,
"version_count": version_count,
"version_bytes": version_bytes,
"total_objects": object_count + version_count, # All objects including versions
"total_bytes": total_bytes + version_bytes, # All storage including versions
}
try:
cache_path.parent.mkdir(parents=True, exist_ok=True)
@@ -243,6 +274,7 @@ class ObjectStorage:
stream: BinaryIO,
*,
metadata: Optional[Dict[str, str]] = None,
enforce_quota: bool = True,
) -> ObjectMeta:
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
@@ -253,12 +285,52 @@ class ObjectStorage:
destination = bucket_path / safe_key
destination.parent.mkdir(parents=True, exist_ok=True)
if self._is_versioning_enabled(bucket_path) and destination.exists():
# Check if this is an overwrite (won't add to object count)
is_overwrite = destination.exists()
existing_size = destination.stat().st_size if is_overwrite else 0
if self._is_versioning_enabled(bucket_path) and is_overwrite:
self._archive_current_version(bucket_id, safe_key, reason="overwrite")
checksum = hashlib.md5()
with destination.open("wb") as target:
shutil.copyfileobj(_HashingReader(stream, checksum), target)
# Write to temp file first to get actual size
tmp_dir = self._system_root_path() / self.SYSTEM_TMP_DIR
tmp_dir.mkdir(parents=True, exist_ok=True)
tmp_path = tmp_dir / f"{uuid.uuid4().hex}.tmp"
try:
checksum = hashlib.md5()
with tmp_path.open("wb") as target:
shutil.copyfileobj(_HashingReader(stream, checksum), target)
new_size = tmp_path.stat().st_size
# Check quota before finalizing
if enforce_quota:
# Calculate net change (new size minus size being replaced)
size_delta = new_size - existing_size
object_delta = 0 if is_overwrite else 1
quota_check = self.check_quota(
bucket_name,
additional_bytes=max(0, size_delta),
additional_objects=object_delta,
)
if not quota_check["allowed"]:
raise QuotaExceededError(
quota_check["message"] or "Quota exceeded",
quota_check["quota"],
quota_check["usage"],
)
# Move to final destination
shutil.move(str(tmp_path), str(destination))
finally:
# Clean up temp file if it still exists
try:
tmp_path.unlink(missing_ok=True)
except OSError:
pass
stat = destination.stat()
if metadata:
@@ -424,6 +496,124 @@ class ObjectStorage:
bucket_path = self._require_bucket_path(bucket_name)
self._set_bucket_config_entry(bucket_path.name, "lifecycle", rules)
def get_bucket_quota(self, bucket_name: str) -> Dict[str, Any]:
"""Get quota configuration for bucket.
Returns:
Dict with 'max_bytes' and 'max_objects' (None if unlimited).
"""
bucket_path = self._require_bucket_path(bucket_name)
config = self._read_bucket_config(bucket_path.name)
quota = config.get("quota")
if isinstance(quota, dict):
return {
"max_bytes": quota.get("max_bytes"),
"max_objects": quota.get("max_objects"),
}
return {"max_bytes": None, "max_objects": None}
def set_bucket_quota(
self,
bucket_name: str,
*,
max_bytes: Optional[int] = None,
max_objects: Optional[int] = None,
) -> None:
"""Set quota limits for a bucket.
Args:
bucket_name: Name of the bucket
max_bytes: Maximum total size in bytes (None to remove limit)
max_objects: Maximum number of objects (None to remove limit)
"""
bucket_path = self._require_bucket_path(bucket_name)
if max_bytes is None and max_objects is None:
# Remove quota entirely
self._set_bucket_config_entry(bucket_path.name, "quota", None)
return
quota: Dict[str, Any] = {}
if max_bytes is not None:
if max_bytes < 0:
raise StorageError("max_bytes must be non-negative")
quota["max_bytes"] = max_bytes
if max_objects is not None:
if max_objects < 0:
raise StorageError("max_objects must be non-negative")
quota["max_objects"] = max_objects
self._set_bucket_config_entry(bucket_path.name, "quota", quota)
def check_quota(
self,
bucket_name: str,
additional_bytes: int = 0,
additional_objects: int = 0,
) -> Dict[str, Any]:
"""Check if an operation would exceed bucket quota.
Args:
bucket_name: Name of the bucket
additional_bytes: Bytes that would be added
additional_objects: Objects that would be added
Returns:
Dict with 'allowed' (bool), 'quota' (current limits),
'usage' (current usage), and 'message' (if not allowed).
"""
quota = self.get_bucket_quota(bucket_name)
if not quota:
return {
"allowed": True,
"quota": None,
"usage": None,
"message": None,
}
# Get current stats (uses cache when available)
stats = self.bucket_stats(bucket_name)
# Use totals which include versions for quota enforcement
current_bytes = stats.get("total_bytes", stats.get("bytes", 0))
current_objects = stats.get("total_objects", stats.get("objects", 0))
result = {
"allowed": True,
"quota": quota,
"usage": {
"bytes": current_bytes,
"objects": current_objects,
"version_count": stats.get("version_count", 0),
"version_bytes": stats.get("version_bytes", 0),
},
"message": None,
}
max_bytes_limit = quota.get("max_bytes")
max_objects = quota.get("max_objects")
if max_bytes_limit is not None:
projected_bytes = current_bytes + additional_bytes
if projected_bytes > max_bytes_limit:
result["allowed"] = False
result["message"] = (
f"Quota exceeded: adding {additional_bytes} bytes would result in "
f"{projected_bytes} bytes, exceeding limit of {max_bytes_limit} bytes"
)
return result
if max_objects is not None:
projected_objects = current_objects + additional_objects
if projected_objects > max_objects:
result["allowed"] = False
result["message"] = (
f"Quota exceeded: adding {additional_objects} objects would result in "
f"{projected_objects} objects, exceeding limit of {max_objects} objects"
)
return result
return result
def get_object_tags(self, bucket_name: str, object_key: str) -> List[Dict[str, str]]:
"""Get tags for an object."""
bucket_path = self._bucket_path(bucket_name)
@@ -540,6 +730,7 @@ class ObjectStorage:
else:
self._delete_metadata(bucket_id, safe_key)
stat = destination.stat()
self._invalidate_bucket_stats_cache(bucket_id)
return ObjectMeta(
key=safe_key.as_posix(),
size=stat.st_size,
@@ -688,6 +879,7 @@ class ObjectStorage:
bucket_name: str,
upload_id: str,
ordered_parts: List[Dict[str, Any]],
enforce_quota: bool = True,
) -> ObjectMeta:
if not ordered_parts:
raise StorageError("parts list required")
@@ -698,6 +890,7 @@ class ObjectStorage:
if not parts_map:
raise StorageError("No uploaded parts found")
validated: List[tuple[int, Dict[str, Any]]] = []
total_size = 0
for part in ordered_parts:
raw_number = part.get("part_number")
if raw_number is None:
@@ -717,10 +910,33 @@ class ObjectStorage:
if supplied_etag and record.get("etag") and supplied_etag.strip('"') != record["etag"]:
raise StorageError(f"ETag mismatch for part {number}")
validated.append((number, record))
total_size += record.get("size", 0)
validated.sort(key=lambda entry: entry[0])
safe_key = self._sanitize_object_key(manifest["object_key"])
destination = bucket_path / safe_key
# Check if this is an overwrite
is_overwrite = destination.exists()
existing_size = destination.stat().st_size if is_overwrite else 0
# Check quota before writing
if enforce_quota:
size_delta = total_size - existing_size
object_delta = 0 if is_overwrite else 1
quota_check = self.check_quota(
bucket_name,
additional_bytes=max(0, size_delta),
additional_objects=object_delta,
)
if not quota_check["allowed"]:
raise QuotaExceededError(
quota_check["message"] or "Quota exceeded",
quota_check["quota"],
quota_check["usage"],
)
destination.parent.mkdir(parents=True, exist_ok=True)
lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock"

107
app/ui.py
View File

@@ -6,7 +6,7 @@ import uuid
import psutil
import shutil
from typing import Any
from urllib.parse import urlparse
from urllib.parse import quote, urlparse
import boto3
import requests
@@ -260,9 +260,9 @@ def buckets_overview():
visible_buckets.append({
"meta": bucket,
"summary": {
"objects": stats["objects"],
"total_bytes": stats["bytes"],
"human_size": _format_bytes(stats["bytes"]),
"objects": stats["total_objects"],
"total_bytes": stats["total_bytes"],
"human_size": _format_bytes(stats["total_bytes"]),
},
"access_label": access_label,
"access_badge": access_badge,
@@ -372,6 +372,16 @@ def bucket_detail(bucket_name: str):
encryption_enabled = current_app.config.get("ENCRYPTION_ENABLED", False)
can_manage_encryption = can_manage_versioning # Same as other bucket properties
# Quota settings (admin only)
bucket_quota = storage.get_bucket_quota(bucket_name)
bucket_stats = storage.bucket_stats(bucket_name)
can_manage_quota = False
try:
_iam().authorize(principal, None, "iam:list_users")
can_manage_quota = True
except IamError:
pass
return render_template(
"bucket_detail.html",
bucket_name=bucket_name,
@@ -392,6 +402,9 @@ def bucket_detail(bucket_name: str):
kms_keys=kms_keys,
kms_enabled=kms_enabled,
encryption_enabled=encryption_enabled,
bucket_quota=bucket_quota,
bucket_stats=bucket_stats,
can_manage_quota=can_manage_quota,
)
@@ -783,7 +796,8 @@ def object_presign(bucket_name: str, object_key: str):
api_base = current_app.config.get("API_BASE_URL") or "http://127.0.0.1:5000"
api_base = api_base.rstrip("/")
url = f"{api_base}/presign/{bucket_name}/{object_key}"
encoded_key = quote(object_key, safe="")
url = f"{api_base}/presign/{bucket_name}/{encoded_key}"
# Use API base URL for forwarded headers so presigned URLs point to API, not UI
parsed_api = urlparse(api_base)
@@ -925,6 +939,71 @@ def update_bucket_versioning(bucket_name: str):
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
@ui_bp.post("/buckets/<bucket_name>/quota")
def update_bucket_quota(bucket_name: str):
"""Update bucket quota configuration (admin only)."""
principal = _current_principal()
# Quota management is admin-only
is_admin = False
try:
_iam().authorize(principal, None, "iam:list_users")
is_admin = True
except IamError:
pass
if not is_admin:
flash("Only administrators can manage bucket quotas", "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
action = request.form.get("action", "set")
if action == "remove":
try:
_storage().set_bucket_quota(bucket_name, max_bytes=None, max_objects=None)
flash("Bucket quota removed", "info")
except StorageError as exc:
flash(_friendly_error_message(exc), "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
# Parse quota values
max_mb_str = request.form.get("max_mb", "").strip()
max_objects_str = request.form.get("max_objects", "").strip()
max_bytes = None
max_objects = None
if max_mb_str:
try:
max_mb = int(max_mb_str)
if max_mb < 1:
raise ValueError("Size must be at least 1 MB")
max_bytes = max_mb * 1024 * 1024 # Convert MB to bytes
except ValueError as exc:
flash(f"Invalid size value: {exc}", "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
if max_objects_str:
try:
max_objects = int(max_objects_str)
if max_objects < 0:
raise ValueError("Object count must be non-negative")
except ValueError as exc:
flash(f"Invalid object count: {exc}", "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
try:
_storage().set_bucket_quota(bucket_name, max_bytes=max_bytes, max_objects=max_objects)
if max_bytes is None and max_objects is None:
flash("Bucket quota removed", "info")
else:
flash("Bucket quota updated", "success")
except StorageError as exc:
flash(_friendly_error_message(exc), "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
@ui_bp.post("/buckets/<bucket_name>/encryption")
def update_bucket_encryption(bucket_name: str):
"""Update bucket default encryption configuration."""
@@ -1426,6 +1505,9 @@ def metrics_dashboard():
flash("Access denied: Metrics require admin permissions", "danger")
return redirect(url_for("ui.buckets_overview"))
from app.version import APP_VERSION
import time
cpu_percent = psutil.cpu_percent(interval=0.1)
memory = psutil.virtual_memory()
@@ -1438,13 +1520,21 @@ def metrics_dashboard():
total_objects = 0
total_bytes_used = 0
total_versions = 0
# Note: Uses cached stats from storage layer to improve performance
cache_ttl = current_app.config.get("BUCKET_STATS_CACHE_TTL", 60)
for bucket in buckets:
stats = storage.bucket_stats(bucket.name, cache_ttl=cache_ttl)
total_objects += stats["objects"]
total_bytes_used += stats["bytes"]
# Use totals which include archived versions
total_objects += stats.get("total_objects", stats.get("objects", 0))
total_bytes_used += stats.get("total_bytes", stats.get("bytes", 0))
total_versions += stats.get("version_count", 0)
# Calculate system uptime
boot_time = psutil.boot_time()
uptime_seconds = time.time() - boot_time
uptime_days = int(uptime_seconds / 86400)
return render_template(
"metrics.html",
@@ -1465,8 +1555,11 @@ def metrics_dashboard():
app={
"buckets": total_buckets,
"objects": total_objects,
"versions": total_versions,
"storage_used": _format_bytes(total_bytes_used),
"storage_raw": total_bytes_used,
"version": APP_VERSION,
"uptime_days": uptime_days,
}
)

View File

@@ -1,7 +1,7 @@
"""Central location for the application version string."""
from __future__ import annotations
APP_VERSION = "0.1.3"
APP_VERSION = "0.1.5"
def get_version() -> str:

558
docs.md
View File

@@ -33,6 +33,63 @@ python run.py --mode api # API only (port 5000)
python run.py --mode ui # UI only (port 5100)
```
### Configuration validation
Validate your configuration before deploying:
```bash
# Show configuration summary
python run.py --show-config
./myfsio --show-config
# Validate and check for issues (exits with code 1 if critical issues found)
python run.py --check-config
./myfsio --check-config
```
### Linux Installation (Recommended for Production)
For production deployments on Linux, use the provided installation script:
```bash
# Download the binary and install script
# Then run the installer with sudo:
sudo ./scripts/install.sh --binary ./myfsio
# Or with custom paths:
sudo ./scripts/install.sh \
--binary ./myfsio \
--install-dir /opt/myfsio \
--data-dir /mnt/storage/myfsio \
--log-dir /var/log/myfsio \
--api-url https://s3.example.com \
--user myfsio
# Non-interactive mode (for automation):
sudo ./scripts/install.sh --binary ./myfsio -y
```
The installer will:
1. Create a dedicated system user
2. Set up directories with proper permissions
3. Generate a secure `SECRET_KEY`
4. Create an environment file at `/opt/myfsio/myfsio.env`
5. Install and configure a systemd service
After installation:
```bash
sudo systemctl start myfsio # Start the service
sudo systemctl enable myfsio # Enable on boot
sudo systemctl status myfsio # Check status
sudo journalctl -u myfsio -f # View logs
```
To uninstall:
```bash
sudo ./scripts/uninstall.sh # Full removal
sudo ./scripts/uninstall.sh --keep-data # Keep data directory
```
### Docker quickstart
The repo now ships a `Dockerfile` so you can run both services in one container:
@@ -69,23 +126,97 @@ The repo now tracks a human-friendly release string inside `app/version.py` (see
## 3. Configuration Reference
All configuration is done via environment variables. The table below lists every supported variable.
### Core Settings
| Variable | Default | Notes |
| --- | --- | --- |
| `STORAGE_ROOT` | `<repo>/data` | Filesystem home for all buckets/objects. |
| `MAX_UPLOAD_SIZE` | `1073741824` | Bytes. Caps incoming uploads in both API + UI. |
| `MAX_UPLOAD_SIZE` | `1073741824` (1 GiB) | Bytes. Caps incoming uploads in both API + UI. |
| `UI_PAGE_SIZE` | `100` | `MaxKeys` hint shown in listings. |
| `SECRET_KEY` | `dev-secret-key` | Flask session key for UI auth. |
| `IAM_CONFIG` | `<repo>/data/.myfsio.sys/config/iam.json` | Stores users, secrets, and inline policies. |
| `BUCKET_POLICY_PATH` | `<repo>/data/.myfsio.sys/config/bucket_policies.json` | Bucket policy store (auto hot-reload). |
| `API_BASE_URL` | `None` | Used by the UI to hit API endpoints (presign/policy). If unset, the UI will auto-detect the host or use `X-Forwarded-*` headers. |
| `SECRET_KEY` | Auto-generated | Flask session key. Auto-generates and persists if not set. **Set explicitly in production.** |
| `API_BASE_URL` | `None` | Public URL for presigned URLs. Required behind proxies. |
| `AWS_REGION` | `us-east-1` | Region embedded in SigV4 credential scope. |
| `AWS_SERVICE` | `s3` | Service string for SigV4. |
| `ENCRYPTION_ENABLED` | `false` | Enable server-side encryption support. |
| `KMS_ENABLED` | `false` | Enable KMS key management for encryption. |
| `KMS_KEYS_PATH` | `data/kms_keys.json` | Path to store KMS key metadata. |
| `ENCRYPTION_MASTER_KEY_PATH` | `data/master.key` | Path to the master encryption key file. |
Set env vars (or pass overrides to `create_app`) to point the servers at custom paths.
### IAM & Security
| Variable | Default | Notes |
| --- | --- | --- |
| `IAM_CONFIG` | `data/.myfsio.sys/config/iam.json` | Stores users, secrets, and inline policies. |
| `BUCKET_POLICY_PATH` | `data/.myfsio.sys/config/bucket_policies.json` | Bucket policy store (auto hot-reload). |
| `AUTH_MAX_ATTEMPTS` | `5` | Failed login attempts before lockout. |
| `AUTH_LOCKOUT_MINUTES` | `15` | Lockout duration after max failed attempts. |
| `SESSION_LIFETIME_DAYS` | `30` | How long UI sessions remain valid. |
| `SECRET_TTL_SECONDS` | `300` | TTL for ephemeral secrets (presigned URLs). |
| `UI_ENFORCE_BUCKET_POLICIES` | `false` | Whether the UI should enforce bucket policies. |
### CORS (Cross-Origin Resource Sharing)
| Variable | Default | Notes |
| --- | --- | --- |
| `CORS_ORIGINS` | `*` | Comma-separated allowed origins. Use specific domains in production. |
| `CORS_METHODS` | `GET,PUT,POST,DELETE,OPTIONS,HEAD` | Allowed HTTP methods. |
| `CORS_ALLOW_HEADERS` | `*` | Allowed request headers. |
| `CORS_EXPOSE_HEADERS` | `*` | Response headers visible to browsers (e.g., `ETag`). |
### Rate Limiting
| Variable | Default | Notes |
| --- | --- | --- |
| `RATE_LIMIT_DEFAULT` | `200 per minute` | Default rate limit for API endpoints. |
| `RATE_LIMIT_STORAGE_URI` | `memory://` | Storage backend for rate limits. Use `redis://host:port` for distributed setups. |
### Logging
| Variable | Default | Notes |
| --- | --- | --- |
| `LOG_LEVEL` | `INFO` | Log verbosity: `DEBUG`, `INFO`, `WARNING`, `ERROR`. |
| `LOG_TO_FILE` | `true` | Enable file logging. |
| `LOG_DIR` | `<repo>/logs` | Directory for log files. |
| `LOG_FILE` | `app.log` | Log filename. |
| `LOG_MAX_BYTES` | `5242880` (5 MB) | Max log file size before rotation. |
| `LOG_BACKUP_COUNT` | `3` | Number of rotated log files to keep. |
### Encryption
| Variable | Default | Notes |
| --- | --- | --- |
| `ENCRYPTION_ENABLED` | `false` | Enable server-side encryption support. |
| `ENCRYPTION_MASTER_KEY_PATH` | `data/.myfsio.sys/keys/master.key` | Path to the master encryption key file. |
| `DEFAULT_ENCRYPTION_ALGORITHM` | `AES256` | Default algorithm for new encrypted objects. |
| `KMS_ENABLED` | `false` | Enable KMS key management for encryption. |
| `KMS_KEYS_PATH` | `data/.myfsio.sys/keys/kms_keys.json` | Path to store KMS key metadata. |
### Performance Tuning
| Variable | Default | Notes |
| --- | --- | --- |
| `STREAM_CHUNK_SIZE` | `65536` (64 KB) | Chunk size for streaming large files. |
| `MULTIPART_MIN_PART_SIZE` | `5242880` (5 MB) | Minimum part size for multipart uploads. |
| `BUCKET_STATS_CACHE_TTL` | `60` | Seconds to cache bucket statistics. |
| `BULK_DELETE_MAX_KEYS` | `500` | Maximum keys per bulk delete request. |
### Server Settings
| Variable | Default | Notes |
| --- | --- | --- |
| `APP_HOST` | `0.0.0.0` | Network interface to bind to. |
| `APP_PORT` | `5000` | API server port (UI uses 5100). |
| `FLASK_DEBUG` | `0` | Enable Flask debug mode. **Never enable in production.** |
### Production Checklist
Before deploying to production, ensure you:
1. **Set `SECRET_KEY`** - Use a strong, unique value (e.g., `openssl rand -base64 32`)
2. **Restrict CORS** - Set `CORS_ORIGINS` to your specific domains instead of `*`
3. **Configure `API_BASE_URL`** - Required for correct presigned URLs behind proxies
4. **Enable HTTPS** - Use a reverse proxy (nginx, Cloudflare) with TLS termination
5. **Review rate limits** - Adjust `RATE_LIMIT_DEFAULT` based on your needs
6. **Secure master keys** - Back up `ENCRYPTION_MASTER_KEY_PATH` if using encryption
7. **Use `--prod` flag** - Runs with Waitress instead of Flask dev server
### Proxy Configuration
@@ -95,6 +226,333 @@ If running behind a reverse proxy (e.g., Nginx, Cloudflare, or a tunnel), ensure
The application automatically trusts these headers to generate correct presigned URLs (e.g., `https://s3.example.com/...` instead of `http://127.0.0.1:5000/...`). Alternatively, you can explicitly set `API_BASE_URL` to your public endpoint.
## 4. Upgrading and Updates
### Version Checking
The application version is tracked in `app/version.py` and exposed via:
- **Health endpoint:** `GET /healthz` returns JSON with `version` field
- **Metrics dashboard:** Navigate to `/ui/metrics` to see the running version in the System Status card
To check your current version:
```bash
# API health endpoint
curl http://localhost:5000/healthz
# Or inspect version.py directly
cat app/version.py | grep APP_VERSION
```
### Pre-Update Backup Procedures
**Always backup before upgrading to prevent data loss:**
```bash
# 1. Stop the application
# Ctrl+C if running in terminal, or:
docker stop myfsio # if using Docker
# 2. Backup configuration files (CRITICAL)
mkdir -p backups/$(date +%Y%m%d_%H%M%S)
cp -r data/.myfsio.sys/config backups/$(date +%Y%m%d_%H%M%S)/
# 3. Backup all data (optional but recommended)
tar -czf backups/data_$(date +%Y%m%d_%H%M%S).tar.gz data/
# 4. Backup logs for audit trail
cp -r logs backups/$(date +%Y%m%d_%H%M%S)/
```
**Windows PowerShell:**
```powershell
# Create timestamped backup
$timestamp = Get-Date -Format "yyyyMMdd_HHmmss"
New-Item -ItemType Directory -Path "backups\$timestamp" -Force
# Backup configs
Copy-Item -Recurse "data\.myfsio.sys\config" "backups\$timestamp\"
# Backup entire data directory
Compress-Archive -Path "data\" -DestinationPath "backups\data_$timestamp.zip"
```
**Critical files to backup:**
- `data/.myfsio.sys/config/iam.json` User accounts and access keys
- `data/.myfsio.sys/config/bucket_policies.json` Bucket access policies
- `data/.myfsio.sys/config/kms_keys.json` Encryption keys (if using KMS)
- `data/.myfsio.sys/config/secret_store.json` Application secrets
### Update Procedures
#### Source Installation Updates
```bash
# 1. Backup (see above)
# 2. Pull latest code
git fetch origin
git checkout main # or your target branch/tag
git pull
# 3. Check for dependency changes
pip install -r requirements.txt
# 4. Review CHANGELOG/release notes for breaking changes
cat CHANGELOG.md # if available
# 5. Run migration scripts (if any)
# python scripts/migrate_vX_to_vY.py # example
# 6. Restart application
python run.py
```
#### Docker Updates
```bash
# 1. Backup (see above)
# 2. Pull/rebuild image
docker pull yourregistry/myfsio:latest
# OR rebuild from source:
docker build -t myfsio:latest .
# 3. Stop and remove old container
docker stop myfsio
docker rm myfsio
# 4. Start new container with same volumes
docker run -d \
--name myfsio \
-p 5000:5000 -p 5100:5100 \
-v "$(pwd)/data:/app/data" \
-v "$(pwd)/logs:/app/logs" \
-e SECRET_KEY="your-secret" \
myfsio:latest
# 5. Verify health
curl http://localhost:5000/healthz
```
### Version Compatibility Checks
Before upgrading across major versions, verify compatibility:
| From Version | To Version | Breaking Changes | Migration Required |
|--------------|------------|------------------|-------------------|
| 0.1.x | 0.2.x | None expected | No |
| < 0.1.0 | >= 0.1.0 | New IAM config format | Yes - run migration script |
**Automatic compatibility detection:**
The application will log warnings on startup if config files need migration:
```
WARNING: IAM config format is outdated (v1). Please run: python scripts/migrate_iam.py
```
**Manual compatibility check:**
```bash
# Compare version schemas
python -c "from app.version import APP_VERSION; print(f'Running: {APP_VERSION}')"
python scripts/check_compatibility.py data/.myfsio.sys/config/
```
### Migration Steps for Breaking Changes
When release notes indicate breaking changes, follow these steps:
#### Config Format Migrations
```bash
# 1. Backup first (critical!)
cp data/.myfsio.sys/config/iam.json data/.myfsio.sys/config/iam.json.backup
# 2. Run provided migration script
python scripts/migrate_iam_v1_to_v2.py
# 3. Validate migration
python scripts/validate_config.py
# 4. Test with read-only mode first (if available)
# python run.py --read-only
# 5. Restart normally
python run.py
```
#### Database/Storage Schema Changes
If object metadata format changes:
```bash
# 1. Run storage migration script
python scripts/migrate_storage.py --dry-run # preview changes
# 2. Apply migration
python scripts/migrate_storage.py --apply
# 3. Verify integrity
python scripts/verify_storage.py
```
#### IAM Policy Updates
If IAM action names change (e.g., `s3:Get``s3:GetObject`):
```bash
# Migration script will update all policies
python scripts/migrate_policies.py \
--input data/.myfsio.sys/config/iam.json \
--backup data/.myfsio.sys/config/iam.json.v1
# Review changes before committing
python scripts/diff_policies.py \
data/.myfsio.sys/config/iam.json.v1 \
data/.myfsio.sys/config/iam.json
```
### Rollback Procedures
If an update causes issues, rollback to the previous version:
#### Quick Rollback (Source)
```bash
# 1. Stop application
# Ctrl+C or kill process
# 2. Revert code
git checkout <previous-version-tag>
# OR
git reset --hard HEAD~1
# 3. Restore configs from backup
cp backups/20241213_103000/config/* data/.myfsio.sys/config/
# 4. Downgrade dependencies if needed
pip install -r requirements.txt
# 5. Restart
python run.py
```
#### Docker Rollback
```bash
# 1. Stop current container
docker stop myfsio
docker rm myfsio
# 2. Start previous version
docker run -d \
--name myfsio \
-p 5000:5000 -p 5100:5100 \
-v "$(pwd)/data:/app/data" \
-v "$(pwd)/logs:/app/logs" \
-e SECRET_KEY="your-secret" \
myfsio:0.1.3 # specify previous version tag
# 3. Verify
curl http://localhost:5000/healthz
```
#### Emergency Config Restore
If only config is corrupted but code is fine:
```bash
# Stop app
# Restore from latest backup
cp backups/20241213_103000/config/iam.json data/.myfsio.sys/config/
cp backups/20241213_103000/config/bucket_policies.json data/.myfsio.sys/config/
# Restart app
python run.py
```
### Blue-Green Deployment (Zero Downtime)
For production environments requiring zero downtime:
```bash
# 1. Run new version on different port (e.g., 5001/5101)
APP_PORT=5001 UI_PORT=5101 python run.py &
# 2. Health check new instance
curl http://localhost:5001/healthz
# 3. Update load balancer to route to new ports
# 4. Monitor for issues
# 5. Gracefully stop old instance
kill -SIGTERM <old-pid>
```
### Post-Update Verification
After any update, verify functionality:
```bash
# 1. Health check
curl http://localhost:5000/healthz
# 2. Login to UI
open http://localhost:5100/ui
# 3. Test IAM authentication
curl -H "X-Amz-Security-Token: <your-access-key>:<your-secret>" \
http://localhost:5000/
# 4. Test presigned URL generation
# Via UI or API
# 5. Check logs for errors
tail -n 100 logs/myfsio.log
```
### Automated Update Scripts
Create a custom update script for your environment:
```bash
#!/bin/bash
# update.sh - Automated update with rollback capability
set -e # Exit on error
VERSION_NEW="$1"
BACKUP_DIR="backups/$(date +%Y%m%d_%H%M%S)"
echo "Creating backup..."
mkdir -p "$BACKUP_DIR"
cp -r data/.myfsio.sys/config "$BACKUP_DIR/"
echo "Updating to version $VERSION_NEW..."
git fetch origin
git checkout "v$VERSION_NEW"
pip install -r requirements.txt
echo "Starting application..."
python run.py &
APP_PID=$!
# Wait and health check
sleep 5
if curl -f http://localhost:5000/healthz; then
echo "Update successful!"
else
echo "Health check failed, rolling back..."
kill $APP_PID
git checkout -
cp -r "$BACKUP_DIR/config/*" data/.myfsio.sys/config/
python run.py &
exit 1
fi
```
## 4. Authentication & IAM
1. On first boot, `data/.myfsio.sys/config/iam.json` is seeded with `localadmin / localadmin` that has wildcard access.
@@ -340,7 +798,71 @@ To verify an object is encrypted:
2. Look for `.meta` files containing encryption metadata
3. Download via the API/UI - the object should be automatically decrypted
## 8. Site Replication
## 8. Bucket Quotas
MyFSIO supports **storage quotas** to limit how much data a bucket can hold. Quotas are enforced on uploads and multipart completions.
### Quota Types
| Limit | Description |
|-------|-------------|
| **Max Size (MB)** | Maximum total storage in megabytes (includes current objects + archived versions) |
| **Max Objects** | Maximum number of objects (includes current objects + archived versions) |
### Managing Quotas (Admin Only)
Quota management is restricted to administrators (users with `iam:*` or `iam:list_users` permissions).
#### Via UI
1. Navigate to your bucket in the UI
2. Click the **Properties** tab
3. Find the **Storage Quota** card
4. Enter limits:
- **Max Size (MB)**: Leave empty for unlimited
- **Max Objects**: Leave empty for unlimited
5. Click **Update Quota**
To remove a quota, click **Remove Quota**.
#### Via API
```bash
# Set quota (max 100MB, max 1000 objects)
curl -X PUT "http://localhost:5000/bucket/<bucket>?quota" \
-H "Content-Type: application/json" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-d '{"max_bytes": 104857600, "max_objects": 1000}'
# Get current quota
curl "http://localhost:5000/bucket/<bucket>?quota" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Remove quota
curl -X PUT "http://localhost:5000/bucket/<bucket>?quota" \
-H "Content-Type: application/json" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-d '{"max_bytes": null, "max_objects": null}'
```
### Quota Behavior
- **Version Counting**: When versioning is enabled, archived versions count toward the quota
- **Enforcement Points**: Quotas are checked during `PUT` object and `CompleteMultipartUpload` operations
- **Error Response**: When quota is exceeded, the API returns `HTTP 400` with error code `QuotaExceeded`
- **Visibility**: All users can view quota usage in the bucket detail page, but only admins can modify quotas
### Example Error
```xml
<Error>
<Code>QuotaExceeded</Code>
<Message>Bucket quota exceeded: storage limit reached</Message>
<BucketName>my-bucket</BucketName>
</Error>
```
## 9. Site Replication
### Permission Model
@@ -477,7 +999,7 @@ To set up two-way replication (Server A ↔ Server B):
**Note**: Deleting a bucket will automatically remove its associated replication configuration.
## 9. Running Tests
## 11. Running Tests
```bash
pytest -q
@@ -487,7 +1009,7 @@ The suite now includes a boto3 integration test that spins up a live HTTP server
The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, and regression tests for anonymous reads when a Public policy is attached.
## 10. Troubleshooting
## 12. Troubleshooting
| Symptom | Likely Cause | Fix |
| --- | --- | --- |
@@ -496,7 +1018,7 @@ The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, an
| Presign modal errors with 403 | IAM user lacks `read/write/delete` for target bucket or bucket policy denies | Update IAM inline policies or remove conflicting deny statements. |
| Large upload rejected immediately | File exceeds `MAX_UPLOAD_SIZE` | Increase env var or shrink object. |
## 11. API Matrix
## 13. API Matrix
```
GET / # List buckets
@@ -510,10 +1032,6 @@ POST /presign/<bucket>/<key> # Generate SigV4 URL
GET /bucket-policy/<bucket> # Fetch policy
PUT /bucket-policy/<bucket> # Upsert policy
DELETE /bucket-policy/<bucket> # Delete policy
GET /<bucket>?quota # Get bucket quota
PUT /<bucket>?quota # Set bucket quota (admin only)
```
## 12. Next Steps
- Tailor IAM + policy JSON files for team-ready presets.
- Wrap `run_api.py` with gunicorn or another WSGI server for long-running workloads.
- Extend `bucket_policies.json` to cover Deny statements that simulate production security controls.

View File

@@ -1,10 +1,10 @@
Flask>=3.0.2
Flask-Limiter>=3.5.0
Flask-Cors>=4.0.0
Flask-WTF>=1.2.1
pytest>=7.4
requests>=2.31
boto3>=1.34
waitress>=2.1.2
psutil>=5.9.0
cryptography>=41.0.0
Flask>=3.1.2
Flask-Limiter>=4.1.0
Flask-Cors>=6.0.1
Flask-WTF>=1.2.2
pytest>=9.0.1
requests>=2.32.5
boto3>=1.42.1
waitress>=3.0.2
psutil>=7.1.3
cryptography>=46.0.3

37
run.py
View File

@@ -8,6 +8,7 @@ import warnings
from multiprocessing import Process
from app import create_api_app, create_ui_app
from app.config import AppConfig
def _server_host() -> str:
@@ -55,12 +56,48 @@ if __name__ == "__main__":
parser.add_argument("--ui-port", type=int, default=5100)
parser.add_argument("--prod", action="store_true", help="Run in production mode using Waitress")
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
args = parser.parse_args()
# Handle config check/show modes
if args.check_config or args.show_config:
config = AppConfig.from_env()
config.print_startup_summary()
if args.check_config:
issues = config.validate_and_report()
critical = [i for i in issues if i.startswith("CRITICAL:")]
sys.exit(1 if critical else 0)
sys.exit(0)
# Default to production mode when running as compiled binary
# unless --dev is explicitly passed
prod_mode = args.prod or (_is_frozen() and not args.dev)
# Validate configuration before starting
config = AppConfig.from_env()
# Show startup summary only on first run (when marker file doesn't exist)
first_run_marker = config.storage_root / ".myfsio.sys" / ".initialized"
is_first_run = not first_run_marker.exists()
if is_first_run:
config.print_startup_summary()
# Check for critical issues that should prevent startup
issues = config.validate_and_report()
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
if critical_issues:
print("ABORTING: Critical configuration issues detected. Fix them before starting.")
sys.exit(1)
# Create the marker file to indicate successful first run
try:
first_run_marker.parent.mkdir(parents=True, exist_ok=True)
first_run_marker.write_text(f"Initialized on {__import__('datetime').datetime.now().isoformat()}\n")
except OSError:
pass # Non-critical, just skip marker creation
if prod_mode:
print("Running in production mode (Waitress)")
else:

292
scripts/install.sh Normal file
View File

@@ -0,0 +1,292 @@
#!/bin/bash
#
# MyFSIO Installation Script
# This script sets up MyFSIO for production use on Linux systems.
#
# Usage:
# curl -fsSL https://example.com/install.sh | bash
# OR
# ./install.sh [OPTIONS]
#
# Options:
# --install-dir DIR Installation directory (default: /opt/myfsio)
# --data-dir DIR Data directory (default: /var/lib/myfsio)
# --log-dir DIR Log directory (default: /var/log/myfsio)
# --user USER System user to run as (default: myfsio)
# --port PORT API port (default: 5000)
# --ui-port PORT UI port (default: 5100)
# --api-url URL Public API URL (for presigned URLs behind proxy)
# --no-systemd Skip systemd service creation
# --binary PATH Path to myfsio binary (will download if not provided)
# -y, --yes Skip confirmation prompts
#
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Default values
INSTALL_DIR="/opt/myfsio"
DATA_DIR="/var/lib/myfsio"
LOG_DIR="/var/log/myfsio"
SERVICE_USER="myfsio"
API_PORT="5000"
UI_PORT="5100"
API_URL=""
SKIP_SYSTEMD=false
BINARY_PATH=""
AUTO_YES=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--install-dir)
INSTALL_DIR="$2"
shift 2
;;
--data-dir)
DATA_DIR="$2"
shift 2
;;
--log-dir)
LOG_DIR="$2"
shift 2
;;
--user)
SERVICE_USER="$2"
shift 2
;;
--port)
API_PORT="$2"
shift 2
;;
--ui-port)
UI_PORT="$2"
shift 2
;;
--api-url)
API_URL="$2"
shift 2
;;
--no-systemd)
SKIP_SYSTEMD=true
shift
;;
--binary)
BINARY_PATH="$2"
shift 2
;;
-y|--yes)
AUTO_YES=true
shift
;;
-h|--help)
head -30 "$0" | tail -25
exit 0
;;
*)
echo -e "${RED}Unknown option: $1${NC}"
exit 1
;;
esac
done
echo -e "${BLUE}"
echo "╔══════════════════════════════════════════════════════════╗"
echo "║ MyFSIO Installation ║"
echo "║ S3-Compatible Object Storage ║"
echo "╚══════════════════════════════════════════════════════════╝"
echo -e "${NC}"
# Check if running as root
if [[ $EUID -ne 0 ]]; then
echo -e "${RED}Error: This script must be run as root (use sudo)${NC}"
exit 1
fi
# Display configuration
echo -e "${YELLOW}Installation Configuration:${NC}"
echo " Install directory: $INSTALL_DIR"
echo " Data directory: $DATA_DIR"
echo " Log directory: $LOG_DIR"
echo " Service user: $SERVICE_USER"
echo " API port: $API_PORT"
echo " UI port: $UI_PORT"
if [[ -n "$API_URL" ]]; then
echo " Public API URL: $API_URL"
fi
if [[ -n "$BINARY_PATH" ]]; then
echo " Binary path: $BINARY_PATH"
fi
echo ""
# Confirm installation
if [[ "$AUTO_YES" != true ]]; then
read -p "Proceed with installation? [y/N] " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Installation cancelled."
exit 0
fi
fi
echo ""
echo -e "${GREEN}[1/7]${NC} Creating system user..."
if id "$SERVICE_USER" &>/dev/null; then
echo " User '$SERVICE_USER' already exists"
else
useradd --system --no-create-home --shell /usr/sbin/nologin "$SERVICE_USER"
echo " Created user '$SERVICE_USER'"
fi
echo -e "${GREEN}[2/7]${NC} Creating directories..."
mkdir -p "$INSTALL_DIR"
mkdir -p "$DATA_DIR"
mkdir -p "$LOG_DIR"
echo " Created $INSTALL_DIR"
echo " Created $DATA_DIR"
echo " Created $LOG_DIR"
echo -e "${GREEN}[3/7]${NC} Installing binary..."
if [[ -n "$BINARY_PATH" ]]; then
if [[ -f "$BINARY_PATH" ]]; then
cp "$BINARY_PATH" "$INSTALL_DIR/myfsio"
echo " Copied binary from $BINARY_PATH"
else
echo -e "${RED}Error: Binary not found at $BINARY_PATH${NC}"
exit 1
fi
elif [[ -f "./myfsio" ]]; then
cp "./myfsio" "$INSTALL_DIR/myfsio"
echo " Copied binary from ./myfsio"
else
echo -e "${RED}Error: No binary provided. Use --binary PATH or place 'myfsio' in current directory${NC}"
exit 1
fi
chmod +x "$INSTALL_DIR/myfsio"
echo -e "${GREEN}[4/7]${NC} Generating secret key..."
SECRET_KEY=$(openssl rand -base64 32)
echo " Generated secure SECRET_KEY"
echo -e "${GREEN}[5/7]${NC} Creating environment file..."
cat > "$INSTALL_DIR/myfsio.env" << EOF
# MyFSIO Configuration
# Generated by install.sh on $(date)
# Storage paths
STORAGE_ROOT=$DATA_DIR
LOG_DIR=$LOG_DIR
# Network
APP_HOST=0.0.0.0
APP_PORT=$API_PORT
# Security - CHANGE IN PRODUCTION
SECRET_KEY=$SECRET_KEY
CORS_ORIGINS=*
# Public URL (set this if behind a reverse proxy)
$(if [[ -n "$API_URL" ]]; then echo "API_BASE_URL=$API_URL"; else echo "# API_BASE_URL=https://s3.example.com"; fi)
# Logging
LOG_LEVEL=INFO
LOG_TO_FILE=true
# Rate limiting
RATE_LIMIT_DEFAULT=200 per minute
# Optional: Encryption (uncomment to enable)
# ENCRYPTION_ENABLED=true
# KMS_ENABLED=true
EOF
chmod 600 "$INSTALL_DIR/myfsio.env"
echo " Created $INSTALL_DIR/myfsio.env"
echo -e "${GREEN}[6/7]${NC} Setting permissions..."
chown -R "$SERVICE_USER:$SERVICE_USER" "$INSTALL_DIR"
chown -R "$SERVICE_USER:$SERVICE_USER" "$DATA_DIR"
chown -R "$SERVICE_USER:$SERVICE_USER" "$LOG_DIR"
echo " Set ownership to $SERVICE_USER"
if [[ "$SKIP_SYSTEMD" != true ]]; then
echo -e "${GREEN}[7/7]${NC} Creating systemd service..."
cat > /etc/systemd/system/myfsio.service << EOF
[Unit]
Description=MyFSIO S3-Compatible Storage
Documentation=https://github.com/yourusername/myfsio
After=network.target
[Service]
Type=simple
User=$SERVICE_USER
Group=$SERVICE_USER
WorkingDirectory=$INSTALL_DIR
EnvironmentFile=$INSTALL_DIR/myfsio.env
ExecStart=$INSTALL_DIR/myfsio
Restart=on-failure
RestartSec=5
# Security hardening
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=$DATA_DIR $LOG_DIR
PrivateTmp=true
# Resource limits (adjust as needed)
# LimitNOFILE=65535
# MemoryMax=2G
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
echo " Created /etc/systemd/system/myfsio.service"
else
echo -e "${GREEN}[7/7]${NC} Skipping systemd service (--no-systemd)"
fi
echo ""
echo -e "${GREEN}╔══════════════════════════════════════════════════════════╗${NC}"
echo -e "${GREEN}║ Installation Complete! ║${NC}"
echo -e "${GREEN}╚══════════════════════════════════════════════════════════╝${NC}"
echo ""
echo -e "${YELLOW}Next steps:${NC}"
echo ""
echo " 1. Review configuration:"
echo " ${BLUE}cat $INSTALL_DIR/myfsio.env${NC}"
echo ""
echo " 2. Start the service:"
echo " ${BLUE}sudo systemctl start myfsio${NC}"
echo ""
echo " 3. Enable on boot:"
echo " ${BLUE}sudo systemctl enable myfsio${NC}"
echo ""
echo " 4. Check status:"
echo " ${BLUE}sudo systemctl status myfsio${NC}"
echo ""
echo " 5. View logs:"
echo " ${BLUE}sudo journalctl -u myfsio -f${NC}"
echo " ${BLUE}tail -f $LOG_DIR/app.log${NC}"
echo ""
echo -e "${YELLOW}Access:${NC}"
echo " API: http://$(hostname -I | awk '{print $1}'):$API_PORT"
echo " UI: http://$(hostname -I | awk '{print $1}'):$UI_PORT/ui"
echo ""
echo -e "${YELLOW}Default credentials:${NC}"
echo " Username: localadmin"
echo " Password: localadmin"
echo -e " ${RED}⚠ Change these immediately after first login!${NC}"
echo ""
echo -e "${YELLOW}Configuration files:${NC}"
echo " Environment: $INSTALL_DIR/myfsio.env"
echo " IAM Users: $DATA_DIR/.myfsio.sys/config/iam.json"
echo " Bucket Policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
echo ""

174
scripts/uninstall.sh Normal file
View File

@@ -0,0 +1,174 @@
#!/bin/bash
#
# MyFSIO Uninstall Script
# This script removes MyFSIO from your system.
#
# Usage:
# ./uninstall.sh [OPTIONS]
#
# Options:
# --keep-data Don't remove data directory
# --keep-logs Don't remove log directory
# --install-dir DIR Installation directory (default: /opt/myfsio)
# --data-dir DIR Data directory (default: /var/lib/myfsio)
# --log-dir DIR Log directory (default: /var/log/myfsio)
# --user USER System user (default: myfsio)
# -y, --yes Skip confirmation prompts
#
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
# Default values
INSTALL_DIR="/opt/myfsio"
DATA_DIR="/var/lib/myfsio"
LOG_DIR="/var/log/myfsio"
SERVICE_USER="myfsio"
KEEP_DATA=false
KEEP_LOGS=false
AUTO_YES=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--keep-data)
KEEP_DATA=true
shift
;;
--keep-logs)
KEEP_LOGS=true
shift
;;
--install-dir)
INSTALL_DIR="$2"
shift 2
;;
--data-dir)
DATA_DIR="$2"
shift 2
;;
--log-dir)
LOG_DIR="$2"
shift 2
;;
--user)
SERVICE_USER="$2"
shift 2
;;
-y|--yes)
AUTO_YES=true
shift
;;
-h|--help)
head -20 "$0" | tail -15
exit 0
;;
*)
echo -e "${RED}Unknown option: $1${NC}"
exit 1
;;
esac
done
echo -e "${RED}"
echo "╔══════════════════════════════════════════════════════════╗"
echo "║ MyFSIO Uninstallation ║"
echo "╚══════════════════════════════════════════════════════════╝"
echo -e "${NC}"
# Check if running as root
if [[ $EUID -ne 0 ]]; then
echo -e "${RED}Error: This script must be run as root (use sudo)${NC}"
exit 1
fi
echo -e "${YELLOW}The following will be removed:${NC}"
echo " Install directory: $INSTALL_DIR"
if [[ "$KEEP_DATA" != true ]]; then
echo -e " Data directory: $DATA_DIR ${RED}(ALL YOUR DATA!)${NC}"
else
echo " Data directory: $DATA_DIR (KEPT)"
fi
if [[ "$KEEP_LOGS" != true ]]; then
echo " Log directory: $LOG_DIR"
else
echo " Log directory: $LOG_DIR (KEPT)"
fi
echo " Systemd service: /etc/systemd/system/myfsio.service"
echo " System user: $SERVICE_USER"
echo ""
if [[ "$AUTO_YES" != true ]]; then
echo -e "${RED}WARNING: This action cannot be undone!${NC}"
read -p "Are you sure you want to uninstall MyFSIO? [y/N] " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Uninstallation cancelled."
exit 0
fi
fi
echo ""
echo -e "${GREEN}[1/5]${NC} Stopping service..."
if systemctl is-active --quiet myfsio 2>/dev/null; then
systemctl stop myfsio
echo " Stopped myfsio service"
else
echo " Service not running"
fi
echo -e "${GREEN}[2/5]${NC} Disabling service..."
if systemctl is-enabled --quiet myfsio 2>/dev/null; then
systemctl disable myfsio
echo " Disabled myfsio service"
else
echo " Service not enabled"
fi
echo -e "${GREEN}[3/5]${NC} Removing systemd service..."
if [[ -f /etc/systemd/system/myfsio.service ]]; then
rm -f /etc/systemd/system/myfsio.service
systemctl daemon-reload
echo " Removed /etc/systemd/system/myfsio.service"
else
echo " Service file not found"
fi
echo -e "${GREEN}[4/5]${NC} Removing directories..."
if [[ -d "$INSTALL_DIR" ]]; then
rm -rf "$INSTALL_DIR"
echo " Removed $INSTALL_DIR"
fi
if [[ "$KEEP_DATA" != true ]] && [[ -d "$DATA_DIR" ]]; then
rm -rf "$DATA_DIR"
echo " Removed $DATA_DIR"
elif [[ "$KEEP_DATA" == true ]]; then
echo " Kept $DATA_DIR"
fi
if [[ "$KEEP_LOGS" != true ]] && [[ -d "$LOG_DIR" ]]; then
rm -rf "$LOG_DIR"
echo " Removed $LOG_DIR"
elif [[ "$KEEP_LOGS" == true ]]; then
echo " Kept $LOG_DIR"
fi
echo -e "${GREEN}[5/5]${NC} Removing system user..."
if id "$SERVICE_USER" &>/dev/null; then
userdel "$SERVICE_USER" 2>/dev/null || true
echo " Removed user '$SERVICE_USER'"
else
echo " User not found"
fi
echo ""
echo -e "${GREEN}MyFSIO has been uninstalled.${NC}"
if [[ "$KEEP_DATA" == true ]]; then
echo -e "${YELLOW}Data preserved at: $DATA_DIR${NC}"
fi

View File

@@ -730,6 +730,158 @@
</div>
</div>
{% endif %}
<!-- Storage Quota Card -->
<div class="card shadow-sm mt-4" id="bucket-quota-card">
<div class="card-header d-flex align-items-center">
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-primary me-2" viewBox="0 0 16 16">
<path d="M1 2.5A1.5 1.5 0 0 1 2.5 1h3A1.5 1.5 0 0 1 7 2.5v3A1.5 1.5 0 0 1 5.5 7h-3A1.5 1.5 0 0 1 1 5.5v-3zM2.5 2a.5.5 0 0 0-.5.5v3a.5.5 0 0 0 .5.5h3a.5.5 0 0 0 .5-.5v-3a.5.5 0 0 0-.5-.5h-3zm6.5.5A1.5 1.5 0 0 1 10.5 1h3A1.5 1.5 0 0 1 15 2.5v3A1.5 1.5 0 0 1 13.5 7h-3A1.5 1.5 0 0 1 9 5.5v-3zm1.5-.5a.5.5 0 0 0-.5.5v3a.5.5 0 0 0 .5.5h3a.5.5 0 0 0 .5-.5v-3a.5.5 0 0 0-.5-.5h-3zM1 10.5A1.5 1.5 0 0 1 2.5 9h3A1.5 1.5 0 0 1 7 10.5v3A1.5 1.5 0 0 1 5.5 15h-3A1.5 1.5 0 0 1 1 13.5v-3zm1.5-.5a.5.5 0 0 0-.5.5v3a.5.5 0 0 0 .5.5h3a.5.5 0 0 0 .5-.5v-3a.5.5 0 0 0-.5-.5h-3zm6.5.5A1.5 1.5 0 0 1 10.5 9h3a1.5 1.5 0 0 1 1.5 1.5v3a1.5 1.5 0 0 1-1.5 1.5h-3A1.5 1.5 0 0 1 9 13.5v-3zm1.5-.5a.5.5 0 0 0-.5.5v3a.5.5 0 0 0 .5.5h3a.5.5 0 0 0 .5-.5v-3a.5.5 0 0 0-.5-.5h-3z"/>
</svg>
<span class="fw-semibold">Storage Quota</span>
</div>
<div class="card-body">
{% set max_bytes = bucket_quota.get('max_bytes') %}
{% set max_objects = bucket_quota.get('max_objects') %}
{% set has_quota = max_bytes is not none or max_objects is not none %}
{% set current_objects = bucket_stats.get('objects', 0) %}
{% set version_count = bucket_stats.get('version_count', 0) %}
{% set total_objects = bucket_stats.get('total_objects', current_objects) %}
{% set current_bytes = bucket_stats.get('bytes', 0) %}
{% set version_bytes = bucket_stats.get('version_bytes', 0) %}
{% set total_bytes = bucket_stats.get('total_bytes', current_bytes) %}
<!-- Current Usage Display -->
<div class="mb-4">
<h6 class="small fw-semibold mb-3">Current Usage</h6>
<div class="row g-3">
<div class="col-6">
<div class="border rounded p-3 text-center">
<div class="fs-4 fw-bold text-primary">{{ total_objects }}</div>
<div class="small text-muted">Total Objects</div>
{% if max_objects is not none %}
<div class="progress mt-2" style="height: 4px;">
{% set obj_pct = (total_objects / max_objects * 100) | int if max_objects > 0 else 0 %}
<div class="progress-bar {% if obj_pct >= 90 %}bg-danger{% elif obj_pct >= 75 %}bg-warning{% else %}bg-success{% endif %}" style="width: {{ [obj_pct, 100] | min }}%"></div>
</div>
<div class="small text-muted mt-1">{{ obj_pct }}% of {{ max_objects }} limit</div>
{% else %}
<div class="small text-muted mt-2">No limit</div>
{% endif %}
{% if version_count > 0 %}
<div class="small text-muted mt-1">
<span class="text-body-secondary">({{ current_objects }} current + {{ version_count }} versions)</span>
</div>
{% endif %}
</div>
</div>
<div class="col-6">
<div class="border rounded p-3 text-center">
<div class="fs-4 fw-bold text-primary">{{ total_bytes | filesizeformat }}</div>
<div class="small text-muted">Total Storage</div>
{% if max_bytes is not none %}
<div class="progress mt-2" style="height: 4px;">
{% set bytes_pct = (total_bytes / max_bytes * 100) | int if max_bytes > 0 else 0 %}
<div class="progress-bar {% if bytes_pct >= 90 %}bg-danger{% elif bytes_pct >= 75 %}bg-warning{% else %}bg-success{% endif %}" style="width: {{ [bytes_pct, 100] | min }}%"></div>
</div>
<div class="small text-muted mt-1">{{ bytes_pct }}% of {{ max_bytes | filesizeformat }} limit</div>
{% else %}
<div class="small text-muted mt-2">No limit</div>
{% endif %}
{% if version_bytes > 0 %}
<div class="small text-muted mt-1">
<span class="text-body-secondary">({{ current_bytes | filesizeformat }} current + {{ version_bytes | filesizeformat }} versions)</span>
</div>
{% endif %}
</div>
</div>
</div>
</div>
{% if has_quota %}
<!-- Quota Enabled State -->
<div class="alert alert-info d-flex align-items-start mb-4" role="alert">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="me-2 flex-shrink-0" viewBox="0 0 16 16">
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
</svg>
<div>
<strong>Storage quota enabled</strong>
<p class="mb-0 small">
{% if max_bytes is not none and max_objects is not none %}
Limited to {{ max_bytes | filesizeformat }} and {{ max_objects }} objects.
{% elif max_bytes is not none %}
Limited to {{ max_bytes | filesizeformat }} storage.
{% else %}
Limited to {{ max_objects }} objects.
{% endif %}
</p>
</div>
</div>
{% else %}
<!-- Quota Disabled State -->
<div class="alert alert-secondary d-flex align-items-start mb-4" role="alert">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="me-2 flex-shrink-0" viewBox="0 0 16 16">
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
<path d="M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z"/>
</svg>
<div>
<strong>No storage quota</strong>
<p class="mb-0 small">This bucket has no storage or object count limits. Set limits below to control usage.</p>
</div>
</div>
{% endif %}
{% if can_manage_quota %}
<form method="post" action="{{ url_for('ui.update_bucket_quota', bucket_name=bucket_name) }}" id="quotaForm">
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
<!-- Max Storage -->
<div class="mb-3">
<label for="max_mb" class="form-label fw-medium">Maximum Storage Size</label>
<div class="input-group">
<input type="number" class="form-control" id="max_mb" name="max_mb"
value="{{ (max_bytes / 1048576) | int if max_bytes is not none else '' }}"
min="1" step="1" placeholder="Unlimited">
<span class="input-group-text">MB</span>
</div>
<div class="form-text">Minimum 1 MB. Leave empty for unlimited.</div>
</div>
<!-- Max Objects -->
<div class="mb-4">
<label for="max_objects" class="form-label fw-medium">Maximum Object Count</label>
<input type="number" class="form-control" id="max_objects" name="max_objects"
value="{{ max_objects if max_objects is not none else '' }}"
min="0" step="1" placeholder="Unlimited">
<div class="form-text">Maximum number of objects allowed. Leave empty for unlimited.</div>
</div>
<div class="d-flex gap-2 flex-wrap">
<button class="btn btn-primary" type="submit">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
<path d="M12.736 3.97a.733.733 0 0 1 1.047 0c.286.289.29.756.01 1.05L7.88 12.01a.733.733 0 0 1-1.065.02L3.217 8.384a.757.757 0 0 1 0-1.06.733.733 0 0 1 1.047 0l3.052 3.093 5.4-6.425a.247.247 0 0 1 .02-.022Z"/>
</svg>
Save Quota Settings
</button>
{% if has_quota %}
<button type="submit" class="btn btn-outline-danger" id="removeQuotaBtn" name="action" value="remove">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
<path d="M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z"/>
</svg>
Remove Quota
</button>
{% endif %}
</div>
</form>
{% else %}
<div class="text-center py-3">
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="currentColor" class="text-muted mb-2" viewBox="0 0 16 16">
<path d="M8 1a2 2 0 0 1 2 2v4H6V3a2 2 0 0 1 2-2zm3 6V3a3 3 0 0 0-6 0v4a2 2 0 0 0-2 2v5a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V9a2 2 0 0 0-2-2z"/>
</svg>
<p class="text-muted mb-0 small">You do not have permission to modify quota settings for this bucket.</p>
</div>
{% endif %}
</div>
</div>
</div>
<!-- Sidebar -->

View File

@@ -55,8 +55,8 @@ python run.py --mode ui
<tbody>
<tr>
<td><code>API_BASE_URL</code></td>
<td><code>http://127.0.0.1:5000</code></td>
<td>The public URL of the API. <strong>Required</strong> if running behind a proxy or if the UI and API are on different domains. Ensures presigned URLs are generated correctly.</td>
<td><code>None</code></td>
<td>The public URL of the API. <strong>Required</strong> if running behind a proxy. Ensures presigned URLs are generated correctly.</td>
</tr>
<tr>
<td><code>STORAGE_ROOT</code></td>
@@ -65,13 +65,13 @@ python run.py --mode ui
</tr>
<tr>
<td><code>MAX_UPLOAD_SIZE</code></td>
<td><code>5 GB</code></td>
<td>Max request body size.</td>
<td><code>1 GB</code></td>
<td>Max request body size in bytes.</td>
</tr>
<tr>
<td><code>SECRET_KEY</code></td>
<td>(Random)</td>
<td>Flask session key. Set this in production.</td>
<td>(Auto-generated)</td>
<td>Flask session key. Auto-generates if not set. <strong>Set explicitly in production.</strong></td>
</tr>
<tr>
<td><code>APP_HOST</code></td>
@@ -81,7 +81,51 @@ python run.py --mode ui
<tr>
<td><code>APP_PORT</code></td>
<td><code>5000</code></td>
<td>Listen port.</td>
<td>Listen port (UI uses 5100).</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">CORS Settings</td>
</tr>
<tr>
<td><code>CORS_ORIGINS</code></td>
<td><code>*</code></td>
<td>Allowed origins. <strong>Restrict in production.</strong></td>
</tr>
<tr>
<td><code>CORS_METHODS</code></td>
<td><code>GET,PUT,POST,DELETE,OPTIONS,HEAD</code></td>
<td>Allowed HTTP methods.</td>
</tr>
<tr>
<td><code>CORS_ALLOW_HEADERS</code></td>
<td><code>*</code></td>
<td>Allowed request headers.</td>
</tr>
<tr>
<td><code>CORS_EXPOSE_HEADERS</code></td>
<td><code>*</code></td>
<td>Response headers visible to browsers (e.g., <code>ETag</code>).</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Security Settings</td>
</tr>
<tr>
<td><code>AUTH_MAX_ATTEMPTS</code></td>
<td><code>5</code></td>
<td>Failed login attempts before lockout.</td>
</tr>
<tr>
<td><code>AUTH_LOCKOUT_MINUTES</code></td>
<td><code>15</code></td>
<td>Lockout duration after max failed attempts.</td>
</tr>
<tr>
<td><code>RATE_LIMIT_DEFAULT</code></td>
<td><code>200 per minute</code></td>
<td>Default API rate limit.</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Encryption Settings</td>
</tr>
<tr>
<td><code>ENCRYPTION_ENABLED</code></td>
@@ -93,9 +137,25 @@ python run.py --mode ui
<td><code>false</code></td>
<td>Enable KMS key management for encryption.</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Logging Settings</td>
</tr>
<tr>
<td><code>LOG_LEVEL</code></td>
<td><code>INFO</code></td>
<td>Log verbosity: DEBUG, INFO, WARNING, ERROR.</td>
</tr>
<tr>
<td><code>LOG_TO_FILE</code></td>
<td><code>true</code></td>
<td>Enable file logging.</td>
</tr>
</tbody>
</table>
</div>
<div class="alert alert-warning mt-3 mb-0 small">
<strong>Production Checklist:</strong> Set <code>SECRET_KEY</code>, restrict <code>CORS_ORIGINS</code>, configure <code>API_BASE_URL</code>, enable HTTPS via reverse proxy, and use <code>--prod</code> flag.
</div>
</div>
</article>
<article id="background" class="card shadow-sm docs-section">
@@ -140,7 +200,7 @@ WorkingDirectory=/opt/myfsio
ExecStart=/opt/myfsio/myfsio
Restart=on-failure
RestartSec=5
Environment=MYFSIO_DATA_DIR=/var/lib/myfsio
Environment=STORAGE_ROOT=/var/lib/myfsio
Environment=API_BASE_URL=https://s3.example.com
[Install]
@@ -418,10 +478,78 @@ s3.complete_multipart_upload(
</p>
</div>
</article>
<article id="quotas" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">10</span>
<h2 class="h4 mb-0">Bucket Quotas</h2>
</div>
<p class="text-muted">Limit how much data a bucket can hold using storage quotas. Quotas are enforced on uploads and multipart completions.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Quota Types</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Limit</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>Max Size (MB)</strong></td>
<td>Maximum total storage in megabytes (includes current objects + archived versions)</td>
</tr>
<tr>
<td><strong>Max Objects</strong></td>
<td>Maximum number of objects (includes current objects + archived versions)</td>
</tr>
</tbody>
</table>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">Managing Quotas (Admin Only)</h3>
<p class="small text-muted">Quota management is restricted to administrators (users with <code>iam:*</code> permissions).</p>
<ol class="docs-steps mb-3">
<li>Navigate to your bucket → <strong>Properties</strong> tab → <strong>Storage Quota</strong> card.</li>
<li>Enter limits: <strong>Max Size (MB)</strong> and/or <strong>Max Objects</strong>. Leave empty for unlimited.</li>
<li>Click <strong>Update Quota</strong> to save, or <strong>Remove Quota</strong> to clear limits.</li>
</ol>
<h3 class="h6 text-uppercase text-muted mt-4">API Usage</h3>
<pre class="mb-3"><code class="language-bash"># Set quota (max 100MB, max 1000 objects)
curl -X PUT "{{ api_base }}/bucket/&lt;bucket&gt;?quota" \
-H "Content-Type: application/json" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-d '{"max_bytes": 104857600, "max_objects": 1000}'
# Get current quota
curl "{{ api_base }}/bucket/&lt;bucket&gt;?quota" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Remove quota
curl -X PUT "{{ api_base }}/bucket/&lt;bucket&gt;?quota" \
-H "Content-Type: application/json" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-d '{"max_bytes": null, "max_objects": null}'</code></pre>
<div class="alert alert-light border mb-0">
<div class="d-flex gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1" viewBox="0 0 16 16">
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
</svg>
<div>
<strong>Version Counting:</strong> When versioning is enabled, archived versions count toward the quota. The quota is checked against total storage, not just current objects.
</div>
</div>
</div>
</div>
</article>
<article id="encryption" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">09</span>
<span class="docs-section-kicker">11</span>
<h2 class="h4 mb-0">Encryption</h2>
</div>
<p class="text-muted">Protect data at rest with server-side encryption using AES-256-GCM. Objects are encrypted before being written to disk and decrypted transparently on read.</p>
@@ -515,7 +643,7 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
<article id="troubleshooting" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">10</span>
<span class="docs-section-kicker">12</span>
<h2 class="h4 mb-0">Troubleshooting &amp; tips</h2>
</div>
<div class="table-responsive">
@@ -572,6 +700,7 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
<li><a href="#api">REST endpoints</a></li>
<li><a href="#examples">API Examples</a></li>
<li><a href="#replication">Site Replication</a></li>
<li><a href="#quotas">Bucket Quotas</a></li>
<li><a href="#encryption">Encryption</a></li>
<li><a href="#troubleshooting">Troubleshooting</a></li>
</ul>

View File

@@ -126,7 +126,6 @@
<div class="card shadow-sm border-0">
<div class="card-header bg-transparent border-0 pt-4 px-4 d-flex justify-content-between align-items-center">
<h5 class="card-title mb-0 fw-semibold">System Overview</h5>
<span class="badge bg-primary-subtle text-primary">Live</span>
</div>
<div class="card-body p-4">
<div class="table-responsive">
@@ -233,14 +232,14 @@
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-check-circle-fill me-1" viewBox="0 0 16 16">
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
</svg>
Healthy
v{{ app.version }}
</span>
</div>
<h4 class="card-title fw-bold mb-3">System Status</h4>
<p class="card-text opacity-90 mb-4">All systems operational. Your storage infrastructure is running smoothly with no detected issues.</p>
<div class="d-flex gap-4">
<div>
<div class="h3 fw-bold mb-0">99.9%</div>
<div class="h3 fw-bold mb-0">{{ app.uptime_days }}d</div>
<small class="opacity-75">Uptime</small>
</div>
<div>