Release V0.1.2 #3
167
app/errors.py
Normal file
167
app/errors.py
Normal file
@@ -0,0 +1,167 @@
|
|||||||
|
"""Standardized error handling for API and UI responses."""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Optional, Dict, Any
|
||||||
|
from xml.etree.ElementTree import Element, SubElement, tostring
|
||||||
|
|
||||||
|
from flask import Response, jsonify, request, flash, redirect, url_for, g
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AppError(Exception):
|
||||||
|
"""Base application error with multi-format response support."""
|
||||||
|
code: str
|
||||||
|
message: str
|
||||||
|
status_code: int = 500
|
||||||
|
details: Optional[Dict[str, Any]] = field(default=None)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
super().__init__(self.message)
|
||||||
|
|
||||||
|
def to_xml_response(self) -> Response:
|
||||||
|
"""Convert to S3 API XML error response."""
|
||||||
|
error = Element("Error")
|
||||||
|
SubElement(error, "Code").text = self.code
|
||||||
|
SubElement(error, "Message").text = self.message
|
||||||
|
request_id = getattr(g, 'request_id', None) if g else None
|
||||||
|
SubElement(error, "RequestId").text = request_id or "unknown"
|
||||||
|
xml_bytes = tostring(error, encoding="utf-8")
|
||||||
|
return Response(xml_bytes, status=self.status_code, mimetype="application/xml")
|
||||||
|
|
||||||
|
def to_json_response(self) -> tuple[Response, int]:
|
||||||
|
"""Convert to JSON error response for UI AJAX calls."""
|
||||||
|
payload: Dict[str, Any] = {
|
||||||
|
"success": False,
|
||||||
|
"error": {
|
||||||
|
"code": self.code,
|
||||||
|
"message": self.message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if self.details:
|
||||||
|
payload["error"]["details"] = self.details
|
||||||
|
return jsonify(payload), self.status_code
|
||||||
|
|
||||||
|
def to_flash_message(self) -> str:
|
||||||
|
"""Convert to user-friendly flash message."""
|
||||||
|
return self.message
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class BucketNotFoundError(AppError):
|
||||||
|
"""Bucket does not exist."""
|
||||||
|
code: str = "NoSuchBucket"
|
||||||
|
message: str = "The specified bucket does not exist"
|
||||||
|
status_code: int = 404
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class BucketAlreadyExistsError(AppError):
|
||||||
|
"""Bucket already exists."""
|
||||||
|
code: str = "BucketAlreadyExists"
|
||||||
|
message: str = "The requested bucket name is not available"
|
||||||
|
status_code: int = 409
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class BucketNotEmptyError(AppError):
|
||||||
|
"""Bucket is not empty."""
|
||||||
|
code: str = "BucketNotEmpty"
|
||||||
|
message: str = "The bucket you tried to delete is not empty"
|
||||||
|
status_code: int = 409
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ObjectNotFoundError(AppError):
|
||||||
|
"""Object does not exist."""
|
||||||
|
code: str = "NoSuchKey"
|
||||||
|
message: str = "The specified key does not exist"
|
||||||
|
status_code: int = 404
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class InvalidObjectKeyError(AppError):
|
||||||
|
"""Invalid object key."""
|
||||||
|
code: str = "InvalidKey"
|
||||||
|
message: str = "The specified key is not valid"
|
||||||
|
status_code: int = 400
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AccessDeniedError(AppError):
|
||||||
|
"""Access denied."""
|
||||||
|
code: str = "AccessDenied"
|
||||||
|
message: str = "Access Denied"
|
||||||
|
status_code: int = 403
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class InvalidCredentialsError(AppError):
|
||||||
|
"""Invalid credentials."""
|
||||||
|
code: str = "InvalidAccessKeyId"
|
||||||
|
message: str = "The access key ID you provided does not exist"
|
||||||
|
status_code: int = 403
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MalformedRequestError(AppError):
|
||||||
|
"""Malformed request."""
|
||||||
|
code: str = "MalformedXML"
|
||||||
|
message: str = "The XML you provided was not well-formed"
|
||||||
|
status_code: int = 400
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class InvalidArgumentError(AppError):
|
||||||
|
"""Invalid argument."""
|
||||||
|
code: str = "InvalidArgument"
|
||||||
|
message: str = "Invalid argument"
|
||||||
|
status_code: int = 400
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class EntityTooLargeError(AppError):
|
||||||
|
"""Entity too large."""
|
||||||
|
code: str = "EntityTooLarge"
|
||||||
|
message: str = "Your proposed upload exceeds the maximum allowed size"
|
||||||
|
status_code: int = 413
|
||||||
|
|
||||||
|
|
||||||
|
def handle_app_error(error: AppError) -> Response:
|
||||||
|
"""Handle application errors with appropriate response format."""
|
||||||
|
log_extra = {"error_code": error.code}
|
||||||
|
if error.details:
|
||||||
|
log_extra["details"] = error.details
|
||||||
|
|
||||||
|
logger.error(f"{error.code}: {error.message}", extra=log_extra)
|
||||||
|
|
||||||
|
if request.path.startswith('/ui'):
|
||||||
|
wants_json = (
|
||||||
|
request.is_json or
|
||||||
|
request.headers.get('X-Requested-With') == 'XMLHttpRequest' or
|
||||||
|
'application/json' in request.accept_mimetypes.values()
|
||||||
|
)
|
||||||
|
if wants_json:
|
||||||
|
return error.to_json_response()
|
||||||
|
flash(error.to_flash_message(), 'danger')
|
||||||
|
referrer = request.referrer
|
||||||
|
if referrer and request.host in referrer:
|
||||||
|
return redirect(referrer)
|
||||||
|
return redirect(url_for('ui.buckets_overview'))
|
||||||
|
else:
|
||||||
|
return error.to_xml_response()
|
||||||
|
|
||||||
|
|
||||||
|
def register_error_handlers(app):
|
||||||
|
"""Register error handlers with a Flask app."""
|
||||||
|
app.register_error_handler(AppError, handle_app_error)
|
||||||
|
|
||||||
|
for error_class in [
|
||||||
|
BucketNotFoundError, BucketAlreadyExistsError, BucketNotEmptyError,
|
||||||
|
ObjectNotFoundError, InvalidObjectKeyError,
|
||||||
|
AccessDeniedError, InvalidCredentialsError,
|
||||||
|
MalformedRequestError, InvalidArgumentError, EntityTooLargeError,
|
||||||
|
]:
|
||||||
|
app.register_error_handler(error_class, handle_app_error)
|
||||||
@@ -258,9 +258,19 @@ class ReplicationManager:
|
|||||||
self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, action)
|
self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, action)
|
||||||
|
|
||||||
def _replicate_task(self, bucket_name: str, object_key: str, rule: ReplicationRule, conn: RemoteConnection, action: str) -> None:
|
def _replicate_task(self, bucket_name: str, object_key: str, rule: ReplicationRule, conn: RemoteConnection, action: str) -> None:
|
||||||
|
if ".." in object_key or object_key.startswith("/") or object_key.startswith("\\"):
|
||||||
|
logger.error(f"Invalid object key in replication (path traversal attempt): {object_key}")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
from .storage import ObjectStorage
|
||||||
|
ObjectStorage._sanitize_object_key(object_key)
|
||||||
|
except StorageError as e:
|
||||||
|
logger.error(f"Object key validation failed in replication: {e}")
|
||||||
|
return
|
||||||
|
|
||||||
file_size = 0
|
file_size = 0
|
||||||
try:
|
try:
|
||||||
# Using boto3 to upload
|
|
||||||
config = Config(user_agent_extra=REPLICATION_USER_AGENT)
|
config = Config(user_agent_extra=REPLICATION_USER_AGENT)
|
||||||
s3 = boto3.client(
|
s3 = boto3.client(
|
||||||
"s3",
|
"s3",
|
||||||
|
|||||||
264
app/s3_api.py
264
app/s3_api.py
@@ -800,6 +800,8 @@ def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None:
|
|||||||
"encryption": _bucket_encryption_handler,
|
"encryption": _bucket_encryption_handler,
|
||||||
"location": _bucket_location_handler,
|
"location": _bucket_location_handler,
|
||||||
"acl": _bucket_acl_handler,
|
"acl": _bucket_acl_handler,
|
||||||
|
"versions": _bucket_list_versions_handler,
|
||||||
|
"lifecycle": _bucket_lifecycle_handler,
|
||||||
}
|
}
|
||||||
requested = [key for key in handlers if key in request.args]
|
requested = [key for key in handlers if key in request.args]
|
||||||
if not requested:
|
if not requested:
|
||||||
@@ -1135,6 +1137,268 @@ def _bucket_acl_handler(bucket_name: str) -> Response:
|
|||||||
return _xml_response(root)
|
return _xml_response(root)
|
||||||
|
|
||||||
|
|
||||||
|
def _bucket_list_versions_handler(bucket_name: str) -> Response:
|
||||||
|
"""Handle ListObjectVersions (GET /<bucket>?versions)."""
|
||||||
|
if request.method != "GET":
|
||||||
|
return _method_not_allowed(["GET"])
|
||||||
|
|
||||||
|
principal, error = _require_principal()
|
||||||
|
try:
|
||||||
|
_authorize_action(principal, bucket_name, "list")
|
||||||
|
except IamError as exc:
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
return _error_response("AccessDenied", str(exc), 403)
|
||||||
|
|
||||||
|
storage = _storage()
|
||||||
|
|
||||||
|
try:
|
||||||
|
objects = storage.list_objects(bucket_name)
|
||||||
|
except StorageError as exc:
|
||||||
|
return _error_response("NoSuchBucket", str(exc), 404)
|
||||||
|
|
||||||
|
prefix = request.args.get("prefix", "")
|
||||||
|
delimiter = request.args.get("delimiter", "")
|
||||||
|
max_keys = min(int(request.args.get("max-keys", 1000)), 1000)
|
||||||
|
key_marker = request.args.get("key-marker", "")
|
||||||
|
|
||||||
|
if prefix:
|
||||||
|
objects = [obj for obj in objects if obj.key.startswith(prefix)]
|
||||||
|
|
||||||
|
if key_marker:
|
||||||
|
objects = [obj for obj in objects if obj.key > key_marker]
|
||||||
|
|
||||||
|
# Build XML response
|
||||||
|
root = Element("ListVersionsResult", xmlns="http://s3.amazonaws.com/doc/2006-03-01/")
|
||||||
|
SubElement(root, "Name").text = bucket_name
|
||||||
|
SubElement(root, "Prefix").text = prefix
|
||||||
|
SubElement(root, "KeyMarker").text = key_marker
|
||||||
|
SubElement(root, "MaxKeys").text = str(max_keys)
|
||||||
|
if delimiter:
|
||||||
|
SubElement(root, "Delimiter").text = delimiter
|
||||||
|
|
||||||
|
version_count = 0
|
||||||
|
is_truncated = False
|
||||||
|
next_key_marker = ""
|
||||||
|
|
||||||
|
for obj in objects:
|
||||||
|
if version_count >= max_keys:
|
||||||
|
is_truncated = True
|
||||||
|
break
|
||||||
|
|
||||||
|
# Current version
|
||||||
|
version = SubElement(root, "Version")
|
||||||
|
SubElement(version, "Key").text = obj.key
|
||||||
|
SubElement(version, "VersionId").text = "null" # Current version ID
|
||||||
|
SubElement(version, "IsLatest").text = "true"
|
||||||
|
SubElement(version, "LastModified").text = obj.last_modified.strftime("%Y-%m-%dT%H:%M:%S.000Z")
|
||||||
|
SubElement(version, "ETag").text = f'"{obj.etag}"'
|
||||||
|
SubElement(version, "Size").text = str(obj.size)
|
||||||
|
SubElement(version, "StorageClass").text = "STANDARD"
|
||||||
|
|
||||||
|
owner = SubElement(version, "Owner")
|
||||||
|
SubElement(owner, "ID").text = "local-owner"
|
||||||
|
SubElement(owner, "DisplayName").text = "Local Owner"
|
||||||
|
|
||||||
|
version_count += 1
|
||||||
|
next_key_marker = obj.key
|
||||||
|
|
||||||
|
# Get historical versions
|
||||||
|
try:
|
||||||
|
versions = storage.list_object_versions(bucket_name, obj.key)
|
||||||
|
for v in versions:
|
||||||
|
if version_count >= max_keys:
|
||||||
|
is_truncated = True
|
||||||
|
break
|
||||||
|
|
||||||
|
ver_elem = SubElement(root, "Version")
|
||||||
|
SubElement(ver_elem, "Key").text = obj.key
|
||||||
|
SubElement(ver_elem, "VersionId").text = v.get("version_id", "unknown")
|
||||||
|
SubElement(ver_elem, "IsLatest").text = "false"
|
||||||
|
SubElement(ver_elem, "LastModified").text = v.get("archived_at", "")
|
||||||
|
SubElement(ver_elem, "ETag").text = f'"{v.get("etag", "")}"'
|
||||||
|
SubElement(ver_elem, "Size").text = str(v.get("size", 0))
|
||||||
|
SubElement(ver_elem, "StorageClass").text = "STANDARD"
|
||||||
|
|
||||||
|
owner = SubElement(ver_elem, "Owner")
|
||||||
|
SubElement(owner, "ID").text = "local-owner"
|
||||||
|
SubElement(owner, "DisplayName").text = "Local Owner"
|
||||||
|
|
||||||
|
version_count += 1
|
||||||
|
except StorageError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
SubElement(root, "IsTruncated").text = "true" if is_truncated else "false"
|
||||||
|
if is_truncated and next_key_marker:
|
||||||
|
SubElement(root, "NextKeyMarker").text = next_key_marker
|
||||||
|
|
||||||
|
return _xml_response(root)
|
||||||
|
|
||||||
|
|
||||||
|
def _bucket_lifecycle_handler(bucket_name: str) -> Response:
|
||||||
|
"""Handle bucket lifecycle configuration (GET/PUT/DELETE /<bucket>?lifecycle)."""
|
||||||
|
if request.method not in {"GET", "PUT", "DELETE"}:
|
||||||
|
return _method_not_allowed(["GET", "PUT", "DELETE"])
|
||||||
|
|
||||||
|
principal, error = _require_principal()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
try:
|
||||||
|
_authorize_action(principal, bucket_name, "policy")
|
||||||
|
except IamError as exc:
|
||||||
|
return _error_response("AccessDenied", str(exc), 403)
|
||||||
|
|
||||||
|
storage = _storage()
|
||||||
|
|
||||||
|
if not storage.bucket_exists(bucket_name):
|
||||||
|
return _error_response("NoSuchBucket", "Bucket does not exist", 404)
|
||||||
|
|
||||||
|
if request.method == "GET":
|
||||||
|
config = storage.get_bucket_lifecycle(bucket_name)
|
||||||
|
if not config:
|
||||||
|
return _error_response("NoSuchLifecycleConfiguration", "The lifecycle configuration does not exist", 404)
|
||||||
|
return _xml_response(_render_lifecycle_config(config))
|
||||||
|
|
||||||
|
if request.method == "DELETE":
|
||||||
|
storage.set_bucket_lifecycle(bucket_name, None)
|
||||||
|
current_app.logger.info("Bucket lifecycle deleted", extra={"bucket": bucket_name})
|
||||||
|
return Response(status=204)
|
||||||
|
|
||||||
|
# PUT
|
||||||
|
payload = request.get_data(cache=False) or b""
|
||||||
|
if not payload.strip():
|
||||||
|
return _error_response("MalformedXML", "Request body is required", 400)
|
||||||
|
try:
|
||||||
|
config = _parse_lifecycle_config(payload)
|
||||||
|
storage.set_bucket_lifecycle(bucket_name, config)
|
||||||
|
except ValueError as exc:
|
||||||
|
return _error_response("MalformedXML", str(exc), 400)
|
||||||
|
except StorageError as exc:
|
||||||
|
return _error_response("NoSuchBucket", str(exc), 404)
|
||||||
|
|
||||||
|
current_app.logger.info("Bucket lifecycle updated", extra={"bucket": bucket_name})
|
||||||
|
return Response(status=200)
|
||||||
|
|
||||||
|
|
||||||
|
def _render_lifecycle_config(config: list) -> Element:
|
||||||
|
"""Render lifecycle configuration to XML."""
|
||||||
|
root = Element("LifecycleConfiguration", xmlns="http://s3.amazonaws.com/doc/2006-03-01/")
|
||||||
|
for rule in config:
|
||||||
|
rule_el = SubElement(root, "Rule")
|
||||||
|
SubElement(rule_el, "ID").text = rule.get("ID", "")
|
||||||
|
|
||||||
|
# Filter
|
||||||
|
filter_el = SubElement(rule_el, "Filter")
|
||||||
|
if rule.get("Prefix"):
|
||||||
|
SubElement(filter_el, "Prefix").text = rule.get("Prefix", "")
|
||||||
|
|
||||||
|
SubElement(rule_el, "Status").text = rule.get("Status", "Enabled")
|
||||||
|
|
||||||
|
# Expiration
|
||||||
|
if "Expiration" in rule:
|
||||||
|
exp = rule["Expiration"]
|
||||||
|
exp_el = SubElement(rule_el, "Expiration")
|
||||||
|
if "Days" in exp:
|
||||||
|
SubElement(exp_el, "Days").text = str(exp["Days"])
|
||||||
|
if "Date" in exp:
|
||||||
|
SubElement(exp_el, "Date").text = exp["Date"]
|
||||||
|
if exp.get("ExpiredObjectDeleteMarker"):
|
||||||
|
SubElement(exp_el, "ExpiredObjectDeleteMarker").text = "true"
|
||||||
|
|
||||||
|
# NoncurrentVersionExpiration
|
||||||
|
if "NoncurrentVersionExpiration" in rule:
|
||||||
|
nve = rule["NoncurrentVersionExpiration"]
|
||||||
|
nve_el = SubElement(rule_el, "NoncurrentVersionExpiration")
|
||||||
|
if "NoncurrentDays" in nve:
|
||||||
|
SubElement(nve_el, "NoncurrentDays").text = str(nve["NoncurrentDays"])
|
||||||
|
|
||||||
|
# AbortIncompleteMultipartUpload
|
||||||
|
if "AbortIncompleteMultipartUpload" in rule:
|
||||||
|
aimu = rule["AbortIncompleteMultipartUpload"]
|
||||||
|
aimu_el = SubElement(rule_el, "AbortIncompleteMultipartUpload")
|
||||||
|
if "DaysAfterInitiation" in aimu:
|
||||||
|
SubElement(aimu_el, "DaysAfterInitiation").text = str(aimu["DaysAfterInitiation"])
|
||||||
|
|
||||||
|
return root
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_lifecycle_config(payload: bytes) -> list:
|
||||||
|
"""Parse lifecycle configuration from XML."""
|
||||||
|
try:
|
||||||
|
root = fromstring(payload)
|
||||||
|
except ParseError as exc:
|
||||||
|
raise ValueError(f"Unable to parse XML document: {exc}") from exc
|
||||||
|
|
||||||
|
if _strip_ns(root.tag) != "LifecycleConfiguration":
|
||||||
|
raise ValueError("Root element must be LifecycleConfiguration")
|
||||||
|
|
||||||
|
rules = []
|
||||||
|
for rule_el in root.findall("{*}Rule") or root.findall("Rule"):
|
||||||
|
rule: dict = {}
|
||||||
|
|
||||||
|
# ID
|
||||||
|
id_el = rule_el.find("{*}ID") or rule_el.find("ID")
|
||||||
|
if id_el is not None and id_el.text:
|
||||||
|
rule["ID"] = id_el.text.strip()
|
||||||
|
|
||||||
|
# Filter/Prefix
|
||||||
|
filter_el = rule_el.find("{*}Filter") or rule_el.find("Filter")
|
||||||
|
if filter_el is not None:
|
||||||
|
prefix_el = filter_el.find("{*}Prefix") or filter_el.find("Prefix")
|
||||||
|
if prefix_el is not None and prefix_el.text:
|
||||||
|
rule["Prefix"] = prefix_el.text
|
||||||
|
|
||||||
|
# Legacy Prefix (outside Filter)
|
||||||
|
if "Prefix" not in rule:
|
||||||
|
prefix_el = rule_el.find("{*}Prefix") or rule_el.find("Prefix")
|
||||||
|
if prefix_el is not None:
|
||||||
|
rule["Prefix"] = prefix_el.text or ""
|
||||||
|
|
||||||
|
# Status
|
||||||
|
status_el = rule_el.find("{*}Status") or rule_el.find("Status")
|
||||||
|
rule["Status"] = (status_el.text or "Enabled").strip() if status_el is not None else "Enabled"
|
||||||
|
|
||||||
|
# Expiration
|
||||||
|
exp_el = rule_el.find("{*}Expiration") or rule_el.find("Expiration")
|
||||||
|
if exp_el is not None:
|
||||||
|
expiration: dict = {}
|
||||||
|
days_el = exp_el.find("{*}Days") or exp_el.find("Days")
|
||||||
|
if days_el is not None and days_el.text:
|
||||||
|
expiration["Days"] = int(days_el.text.strip())
|
||||||
|
date_el = exp_el.find("{*}Date") or exp_el.find("Date")
|
||||||
|
if date_el is not None and date_el.text:
|
||||||
|
expiration["Date"] = date_el.text.strip()
|
||||||
|
eodm_el = exp_el.find("{*}ExpiredObjectDeleteMarker") or exp_el.find("ExpiredObjectDeleteMarker")
|
||||||
|
if eodm_el is not None and (eodm_el.text or "").strip().lower() in {"true", "1"}:
|
||||||
|
expiration["ExpiredObjectDeleteMarker"] = True
|
||||||
|
if expiration:
|
||||||
|
rule["Expiration"] = expiration
|
||||||
|
|
||||||
|
# NoncurrentVersionExpiration
|
||||||
|
nve_el = rule_el.find("{*}NoncurrentVersionExpiration") or rule_el.find("NoncurrentVersionExpiration")
|
||||||
|
if nve_el is not None:
|
||||||
|
nve: dict = {}
|
||||||
|
days_el = nve_el.find("{*}NoncurrentDays") or nve_el.find("NoncurrentDays")
|
||||||
|
if days_el is not None and days_el.text:
|
||||||
|
nve["NoncurrentDays"] = int(days_el.text.strip())
|
||||||
|
if nve:
|
||||||
|
rule["NoncurrentVersionExpiration"] = nve
|
||||||
|
|
||||||
|
# AbortIncompleteMultipartUpload
|
||||||
|
aimu_el = rule_el.find("{*}AbortIncompleteMultipartUpload") or rule_el.find("AbortIncompleteMultipartUpload")
|
||||||
|
if aimu_el is not None:
|
||||||
|
aimu: dict = {}
|
||||||
|
days_el = aimu_el.find("{*}DaysAfterInitiation") or aimu_el.find("DaysAfterInitiation")
|
||||||
|
if days_el is not None and days_el.text:
|
||||||
|
aimu["DaysAfterInitiation"] = int(days_el.text.strip())
|
||||||
|
if aimu:
|
||||||
|
rule["AbortIncompleteMultipartUpload"] = aimu
|
||||||
|
|
||||||
|
rules.append(rule)
|
||||||
|
|
||||||
|
return rules
|
||||||
|
|
||||||
|
|
||||||
def _bulk_delete_handler(bucket_name: str) -> Response:
|
def _bulk_delete_handler(bucket_name: str) -> Response:
|
||||||
principal, error = _require_principal()
|
principal, error = _require_principal()
|
||||||
if error:
|
if error:
|
||||||
|
|||||||
@@ -116,7 +116,6 @@ class ObjectStorage:
|
|||||||
self.root.mkdir(parents=True, exist_ok=True)
|
self.root.mkdir(parents=True, exist_ok=True)
|
||||||
self._ensure_system_roots()
|
self._ensure_system_roots()
|
||||||
|
|
||||||
# ---------------------- Bucket helpers ----------------------
|
|
||||||
def list_buckets(self) -> List[BucketMeta]:
|
def list_buckets(self) -> List[BucketMeta]:
|
||||||
buckets: List[BucketMeta] = []
|
buckets: List[BucketMeta] = []
|
||||||
for bucket in sorted(self.root.iterdir()):
|
for bucket in sorted(self.root.iterdir()):
|
||||||
@@ -160,17 +159,14 @@ class ObjectStorage:
|
|||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
raise StorageError("Bucket does not exist")
|
raise StorageError("Bucket does not exist")
|
||||||
|
|
||||||
# Try to read from cache
|
|
||||||
cache_path = self._system_bucket_root(bucket_name) / "stats.json"
|
cache_path = self._system_bucket_root(bucket_name) / "stats.json"
|
||||||
if cache_path.exists():
|
if cache_path.exists():
|
||||||
try:
|
try:
|
||||||
# Check if cache is fresh
|
|
||||||
if time.time() - cache_path.stat().st_mtime < cache_ttl:
|
if time.time() - cache_path.stat().st_mtime < cache_ttl:
|
||||||
return json.loads(cache_path.read_text(encoding="utf-8"))
|
return json.loads(cache_path.read_text(encoding="utf-8"))
|
||||||
except (OSError, json.JSONDecodeError):
|
except (OSError, json.JSONDecodeError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Calculate fresh stats
|
|
||||||
object_count = 0
|
object_count = 0
|
||||||
total_bytes = 0
|
total_bytes = 0
|
||||||
for path in bucket_path.rglob("*"):
|
for path in bucket_path.rglob("*"):
|
||||||
@@ -184,7 +180,6 @@ class ObjectStorage:
|
|||||||
|
|
||||||
stats = {"objects": object_count, "bytes": total_bytes}
|
stats = {"objects": object_count, "bytes": total_bytes}
|
||||||
|
|
||||||
# Write to cache
|
|
||||||
try:
|
try:
|
||||||
cache_path.parent.mkdir(parents=True, exist_ok=True)
|
cache_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
cache_path.write_text(json.dumps(stats), encoding="utf-8")
|
cache_path.write_text(json.dumps(stats), encoding="utf-8")
|
||||||
@@ -215,7 +210,6 @@ class ObjectStorage:
|
|||||||
self._remove_tree(self._system_bucket_root(bucket_path.name))
|
self._remove_tree(self._system_bucket_root(bucket_path.name))
|
||||||
self._remove_tree(self._multipart_bucket_root(bucket_path.name))
|
self._remove_tree(self._multipart_bucket_root(bucket_path.name))
|
||||||
|
|
||||||
# ---------------------- Object helpers ----------------------
|
|
||||||
def list_objects(self, bucket_name: str) -> List[ObjectMeta]:
|
def list_objects(self, bucket_name: str) -> List[ObjectMeta]:
|
||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
@@ -272,7 +266,6 @@ class ObjectStorage:
|
|||||||
else:
|
else:
|
||||||
self._delete_metadata(bucket_id, safe_key)
|
self._delete_metadata(bucket_id, safe_key)
|
||||||
|
|
||||||
# Invalidate bucket stats cache
|
|
||||||
self._invalidate_bucket_stats_cache(bucket_id)
|
self._invalidate_bucket_stats_cache(bucket_id)
|
||||||
|
|
||||||
return ObjectMeta(
|
return ObjectMeta(
|
||||||
@@ -309,7 +302,6 @@ class ObjectStorage:
|
|||||||
self._safe_unlink(path)
|
self._safe_unlink(path)
|
||||||
self._delete_metadata(bucket_id, rel)
|
self._delete_metadata(bucket_id, rel)
|
||||||
|
|
||||||
# Invalidate bucket stats cache
|
|
||||||
self._invalidate_bucket_stats_cache(bucket_id)
|
self._invalidate_bucket_stats_cache(bucket_id)
|
||||||
|
|
||||||
for parent in path.parents:
|
for parent in path.parents:
|
||||||
@@ -345,7 +337,6 @@ class ObjectStorage:
|
|||||||
if parent.exists() and not any(parent.iterdir()):
|
if parent.exists() and not any(parent.iterdir()):
|
||||||
parent.rmdir()
|
parent.rmdir()
|
||||||
|
|
||||||
# ---------------------- Versioning helpers ----------------------
|
|
||||||
def is_versioning_enabled(self, bucket_name: str) -> bool:
|
def is_versioning_enabled(self, bucket_name: str) -> bool:
|
||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
@@ -358,7 +349,6 @@ class ObjectStorage:
|
|||||||
config["versioning_enabled"] = bool(enabled)
|
config["versioning_enabled"] = bool(enabled)
|
||||||
self._write_bucket_config(bucket_path.name, config)
|
self._write_bucket_config(bucket_path.name, config)
|
||||||
|
|
||||||
# ---------------------- Bucket configuration helpers ----------------------
|
|
||||||
def get_bucket_tags(self, bucket_name: str) -> List[Dict[str, str]]:
|
def get_bucket_tags(self, bucket_name: str) -> List[Dict[str, str]]:
|
||||||
bucket_path = self._require_bucket_path(bucket_name)
|
bucket_path = self._require_bucket_path(bucket_name)
|
||||||
config = self._read_bucket_config(bucket_path.name)
|
config = self._read_bucket_config(bucket_path.name)
|
||||||
@@ -411,7 +401,18 @@ class ObjectStorage:
|
|||||||
bucket_path = self._require_bucket_path(bucket_name)
|
bucket_path = self._require_bucket_path(bucket_name)
|
||||||
self._set_bucket_config_entry(bucket_path.name, "encryption", config_payload or None)
|
self._set_bucket_config_entry(bucket_path.name, "encryption", config_payload or None)
|
||||||
|
|
||||||
# ---------------------- Object tagging helpers ----------------------
|
def get_bucket_lifecycle(self, bucket_name: str) -> Optional[List[Dict[str, Any]]]:
|
||||||
|
"""Get lifecycle configuration for bucket."""
|
||||||
|
bucket_path = self._require_bucket_path(bucket_name)
|
||||||
|
config = self._read_bucket_config(bucket_path.name)
|
||||||
|
lifecycle = config.get("lifecycle")
|
||||||
|
return lifecycle if isinstance(lifecycle, list) else None
|
||||||
|
|
||||||
|
def set_bucket_lifecycle(self, bucket_name: str, rules: Optional[List[Dict[str, Any]]]) -> None:
|
||||||
|
"""Set lifecycle configuration for bucket."""
|
||||||
|
bucket_path = self._require_bucket_path(bucket_name)
|
||||||
|
self._set_bucket_config_entry(bucket_path.name, "lifecycle", rules)
|
||||||
|
|
||||||
def get_object_tags(self, bucket_name: str, object_key: str) -> List[Dict[str, str]]:
|
def get_object_tags(self, bucket_name: str, object_key: str) -> List[Dict[str, str]]:
|
||||||
"""Get tags for an object."""
|
"""Get tags for an object."""
|
||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
@@ -422,7 +423,6 @@ class ObjectStorage:
|
|||||||
if not object_path.exists():
|
if not object_path.exists():
|
||||||
raise StorageError("Object does not exist")
|
raise StorageError("Object does not exist")
|
||||||
|
|
||||||
# Tags are stored in the metadata file alongside user metadata
|
|
||||||
for meta_file in (self._metadata_file(bucket_path.name, safe_key), self._legacy_metadata_file(bucket_path.name, safe_key)):
|
for meta_file in (self._metadata_file(bucket_path.name, safe_key), self._legacy_metadata_file(bucket_path.name, safe_key)):
|
||||||
if not meta_file.exists():
|
if not meta_file.exists():
|
||||||
continue
|
continue
|
||||||
@@ -448,7 +448,6 @@ class ObjectStorage:
|
|||||||
|
|
||||||
meta_file = self._metadata_file(bucket_path.name, safe_key)
|
meta_file = self._metadata_file(bucket_path.name, safe_key)
|
||||||
|
|
||||||
# Read existing metadata
|
|
||||||
existing_payload: Dict[str, Any] = {}
|
existing_payload: Dict[str, Any] = {}
|
||||||
if meta_file.exists():
|
if meta_file.exists():
|
||||||
try:
|
try:
|
||||||
@@ -456,19 +455,16 @@ class ObjectStorage:
|
|||||||
except (OSError, json.JSONDecodeError):
|
except (OSError, json.JSONDecodeError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Update tags
|
|
||||||
if tags:
|
if tags:
|
||||||
existing_payload["tags"] = tags
|
existing_payload["tags"] = tags
|
||||||
else:
|
else:
|
||||||
existing_payload.pop("tags", None)
|
existing_payload.pop("tags", None)
|
||||||
|
|
||||||
# Write back if there's anything to store, otherwise delete
|
|
||||||
if existing_payload.get("metadata") or existing_payload.get("tags"):
|
if existing_payload.get("metadata") or existing_payload.get("tags"):
|
||||||
meta_file.parent.mkdir(parents=True, exist_ok=True)
|
meta_file.parent.mkdir(parents=True, exist_ok=True)
|
||||||
meta_file.write_text(json.dumps(existing_payload), encoding="utf-8")
|
meta_file.write_text(json.dumps(existing_payload), encoding="utf-8")
|
||||||
elif meta_file.exists():
|
elif meta_file.exists():
|
||||||
meta_file.unlink()
|
meta_file.unlink()
|
||||||
# Clean up empty parent directories
|
|
||||||
parent = meta_file.parent
|
parent = meta_file.parent
|
||||||
meta_root = self._bucket_meta_root(bucket_path.name)
|
meta_root = self._bucket_meta_root(bucket_path.name)
|
||||||
while parent != meta_root and parent.exists() and not any(parent.iterdir()):
|
while parent != meta_root and parent.exists() and not any(parent.iterdir()):
|
||||||
@@ -603,7 +599,6 @@ class ObjectStorage:
|
|||||||
record.pop("_latest_sort", None)
|
record.pop("_latest_sort", None)
|
||||||
return sorted(aggregated.values(), key=lambda item: item["key"])
|
return sorted(aggregated.values(), key=lambda item: item["key"])
|
||||||
|
|
||||||
# ---------------------- Multipart helpers ----------------------
|
|
||||||
def initiate_multipart_upload(
|
def initiate_multipart_upload(
|
||||||
self,
|
self,
|
||||||
bucket_name: str,
|
bucket_name: str,
|
||||||
@@ -695,7 +690,6 @@ class ObjectStorage:
|
|||||||
destination = bucket_path / safe_key
|
destination = bucket_path / safe_key
|
||||||
destination.parent.mkdir(parents=True, exist_ok=True)
|
destination.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
# Use a lock file to prevent concurrent writes to the same destination
|
|
||||||
lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock"
|
lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock"
|
||||||
lock_file_path.parent.mkdir(parents=True, exist_ok=True)
|
lock_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
@@ -726,7 +720,6 @@ class ObjectStorage:
|
|||||||
except BlockingIOError:
|
except BlockingIOError:
|
||||||
raise StorageError("Another upload to this key is in progress")
|
raise StorageError("Another upload to this key is in progress")
|
||||||
finally:
|
finally:
|
||||||
# Clean up lock file
|
|
||||||
try:
|
try:
|
||||||
lock_file_path.unlink(missing_ok=True)
|
lock_file_path.unlink(missing_ok=True)
|
||||||
except OSError:
|
except OSError:
|
||||||
@@ -734,7 +727,6 @@ class ObjectStorage:
|
|||||||
|
|
||||||
shutil.rmtree(upload_root, ignore_errors=True)
|
shutil.rmtree(upload_root, ignore_errors=True)
|
||||||
|
|
||||||
# Invalidate bucket stats cache
|
|
||||||
self._invalidate_bucket_stats_cache(bucket_id)
|
self._invalidate_bucket_stats_cache(bucket_id)
|
||||||
|
|
||||||
stat = destination.stat()
|
stat = destination.stat()
|
||||||
@@ -783,7 +775,6 @@ class ObjectStorage:
|
|||||||
parts.sort(key=lambda x: x["PartNumber"])
|
parts.sort(key=lambda x: x["PartNumber"])
|
||||||
return parts
|
return parts
|
||||||
|
|
||||||
# ---------------------- internal helpers ----------------------
|
|
||||||
def _bucket_path(self, bucket_name: str) -> Path:
|
def _bucket_path(self, bucket_name: str) -> Path:
|
||||||
safe_name = self._sanitize_bucket_name(bucket_name)
|
safe_name = self._sanitize_bucket_name(bucket_name)
|
||||||
return self.root / safe_name
|
return self.root / safe_name
|
||||||
|
|||||||
20
run.py
20
run.py
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
import warnings
|
import warnings
|
||||||
from multiprocessing import Process
|
from multiprocessing import Process
|
||||||
|
|
||||||
@@ -18,6 +19,11 @@ def _is_debug_enabled() -> bool:
|
|||||||
return os.getenv("FLASK_DEBUG", "0").lower() in ("1", "true", "yes")
|
return os.getenv("FLASK_DEBUG", "0").lower() in ("1", "true", "yes")
|
||||||
|
|
||||||
|
|
||||||
|
def _is_frozen() -> bool:
|
||||||
|
"""Check if running as a compiled binary (PyInstaller/Nuitka)."""
|
||||||
|
return getattr(sys, 'frozen', False) or '__compiled__' in globals()
|
||||||
|
|
||||||
|
|
||||||
def serve_api(port: int, prod: bool = False) -> None:
|
def serve_api(port: int, prod: bool = False) -> None:
|
||||||
app = create_api_app()
|
app = create_api_app()
|
||||||
if prod:
|
if prod:
|
||||||
@@ -48,18 +54,28 @@ if __name__ == "__main__":
|
|||||||
parser.add_argument("--api-port", type=int, default=5000)
|
parser.add_argument("--api-port", type=int, default=5000)
|
||||||
parser.add_argument("--ui-port", type=int, default=5100)
|
parser.add_argument("--ui-port", type=int, default=5100)
|
||||||
parser.add_argument("--prod", action="store_true", help="Run in production mode using Waitress")
|
parser.add_argument("--prod", action="store_true", help="Run in production mode using Waitress")
|
||||||
|
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Default to production mode when running as compiled binary
|
||||||
|
# unless --dev is explicitly passed
|
||||||
|
prod_mode = args.prod or (_is_frozen() and not args.dev)
|
||||||
|
|
||||||
|
if prod_mode:
|
||||||
|
print("Running in production mode (Waitress)")
|
||||||
|
else:
|
||||||
|
print("Running in development mode (Flask dev server)")
|
||||||
|
|
||||||
if args.mode in {"api", "both"}:
|
if args.mode in {"api", "both"}:
|
||||||
print(f"Starting API server on port {args.api_port}...")
|
print(f"Starting API server on port {args.api_port}...")
|
||||||
api_proc = Process(target=serve_api, args=(args.api_port, args.prod), daemon=True)
|
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode), daemon=True)
|
||||||
api_proc.start()
|
api_proc.start()
|
||||||
else:
|
else:
|
||||||
api_proc = None
|
api_proc = None
|
||||||
|
|
||||||
if args.mode in {"ui", "both"}:
|
if args.mode in {"ui", "both"}:
|
||||||
print(f"Starting UI server on port {args.ui_port}...")
|
print(f"Starting UI server on port {args.ui_port}...")
|
||||||
serve_ui(args.ui_port, args.prod)
|
serve_ui(args.ui_port, prod_mode)
|
||||||
elif api_proc:
|
elif api_proc:
|
||||||
try:
|
try:
|
||||||
api_proc.join()
|
api_proc.join()
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
<head>
|
<head>
|
||||||
<meta charset="utf-8" />
|
<meta charset="utf-8" />
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||||
<meta name="csrf-token" content="{{ csrf_token() }}" />
|
{% if principal %}<meta name="csrf-token" content="{{ csrf_token() }}" />{% endif %}
|
||||||
<title>MyFSIO Console</title>
|
<title>MyFSIO Console</title>
|
||||||
<link rel="icon" type="image/png" href="{{ url_for('static', filename='images/MyFISO.png') }}" />
|
<link rel="icon" type="image/png" href="{{ url_for('static', filename='images/MyFISO.png') }}" />
|
||||||
<link rel="icon" type="image/x-icon" href="{{ url_for('static', filename='images/MyFISO.ico') }}" />
|
<link rel="icon" type="image/x-icon" href="{{ url_for('static', filename='images/MyFISO.ico') }}" />
|
||||||
|
|||||||
@@ -88,10 +88,67 @@ python run.py --mode ui
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</article>
|
</article>
|
||||||
<article id="auth" class="card shadow-sm docs-section">
|
<article id="background" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">02</span>
|
<span class="docs-section-kicker">02</span>
|
||||||
|
<h2 class="h4 mb-0">Running in background</h2>
|
||||||
|
</div>
|
||||||
|
<p class="text-muted">For production or server deployments, run MyFSIO as a background service so it persists after you close the terminal.</p>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Quick Start (nohup)</h3>
|
||||||
|
<p class="text-muted small">Simplest way to run in background—survives terminal close:</p>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Using Python
|
||||||
|
nohup python run.py --prod > /dev/null 2>&1 &
|
||||||
|
|
||||||
|
# Using compiled binary
|
||||||
|
nohup ./myfsio > /dev/null 2>&1 &
|
||||||
|
|
||||||
|
# Check if running
|
||||||
|
ps aux | grep myfsio</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Screen / Tmux</h3>
|
||||||
|
<p class="text-muted small">Attach/detach from a persistent session:</p>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Start in a detached screen session
|
||||||
|
screen -dmS myfsio ./myfsio
|
||||||
|
|
||||||
|
# Attach to view logs
|
||||||
|
screen -r myfsio
|
||||||
|
|
||||||
|
# Detach: press Ctrl+A, then D</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Systemd (Recommended for Production)</h3>
|
||||||
|
<p class="text-muted small">Create <code>/etc/systemd/system/myfsio.service</code>:</p>
|
||||||
|
<pre class="mb-3"><code class="language-ini">[Unit]
|
||||||
|
Description=MyFSIO S3-Compatible Storage
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=myfsio
|
||||||
|
WorkingDirectory=/opt/myfsio
|
||||||
|
ExecStart=/opt/myfsio/myfsio
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
Environment=MYFSIO_DATA_DIR=/var/lib/myfsio
|
||||||
|
Environment=API_BASE_URL=https://s3.example.com
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target</code></pre>
|
||||||
|
<p class="text-muted small">Then enable and start:</p>
|
||||||
|
<pre class="mb-0"><code class="language-bash">sudo systemctl daemon-reload
|
||||||
|
sudo systemctl enable myfsio
|
||||||
|
sudo systemctl start myfsio
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
sudo systemctl status myfsio
|
||||||
|
sudo journalctl -u myfsio -f # View logs</code></pre>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
<article id="auth" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">03</span>
|
||||||
<h2 class="h4 mb-0">Authenticate & manage IAM</h2>
|
<h2 class="h4 mb-0">Authenticate & manage IAM</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">MyFSIO seeds <code>data/.myfsio.sys/config/iam.json</code> with <code>localadmin/localadmin</code>. Sign in once, rotate it, then grant least-privilege access to teammates and tools.</p>
|
<p class="text-muted">MyFSIO seeds <code>data/.myfsio.sys/config/iam.json</code> with <code>localadmin/localadmin</code>. Sign in once, rotate it, then grant least-privilege access to teammates and tools.</p>
|
||||||
@@ -109,7 +166,7 @@ python run.py --mode ui
|
|||||||
<article id="console" class="card shadow-sm docs-section">
|
<article id="console" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">03</span>
|
<span class="docs-section-kicker">04</span>
|
||||||
<h2 class="h4 mb-0">Use the console effectively</h2>
|
<h2 class="h4 mb-0">Use the console effectively</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Each workspace models an S3 workflow so you can administer buckets end-to-end.</p>
|
<p class="text-muted">Each workspace models an S3 workflow so you can administer buckets end-to-end.</p>
|
||||||
@@ -148,7 +205,7 @@ python run.py --mode ui
|
|||||||
<article id="automation" class="card shadow-sm docs-section">
|
<article id="automation" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">04</span>
|
<span class="docs-section-kicker">05</span>
|
||||||
<h2 class="h4 mb-0">Automate with CLI & tools</h2>
|
<h2 class="h4 mb-0">Automate with CLI & tools</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Point standard S3 clients at {{ api_base }} and reuse the same IAM credentials.</p>
|
<p class="text-muted">Point standard S3 clients at {{ api_base }} and reuse the same IAM credentials.</p>
|
||||||
@@ -201,7 +258,7 @@ curl -X POST {{ api_base }}/presign/demo/notes.txt \
|
|||||||
<article id="api" class="card shadow-sm docs-section">
|
<article id="api" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">05</span>
|
<span class="docs-section-kicker">06</span>
|
||||||
<h2 class="h4 mb-0">Key REST endpoints</h2>
|
<h2 class="h4 mb-0">Key REST endpoints</h2>
|
||||||
</div>
|
</div>
|
||||||
<div class="table-responsive">
|
<div class="table-responsive">
|
||||||
@@ -268,7 +325,7 @@ curl -X POST {{ api_base }}/presign/demo/notes.txt \
|
|||||||
<article id="examples" class="card shadow-sm docs-section">
|
<article id="examples" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">06</span>
|
<span class="docs-section-kicker">07</span>
|
||||||
<h2 class="h4 mb-0">API Examples</h2>
|
<h2 class="h4 mb-0">API Examples</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Common operations using boto3.</p>
|
<p class="text-muted">Common operations using boto3.</p>
|
||||||
@@ -307,7 +364,7 @@ s3.complete_multipart_upload(
|
|||||||
<article id="replication" class="card shadow-sm docs-section">
|
<article id="replication" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">07</span>
|
<span class="docs-section-kicker">08</span>
|
||||||
<h2 class="h4 mb-0">Site Replication</h2>
|
<h2 class="h4 mb-0">Site Replication</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Automatically copy new objects to another MyFSIO instance or S3-compatible service for backup or disaster recovery.</p>
|
<p class="text-muted">Automatically copy new objects to another MyFSIO instance or S3-compatible service for backup or disaster recovery.</p>
|
||||||
@@ -354,7 +411,7 @@ s3.complete_multipart_upload(
|
|||||||
<article id="troubleshooting" class="card shadow-sm docs-section">
|
<article id="troubleshooting" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">08</span>
|
<span class="docs-section-kicker">09</span>
|
||||||
<h2 class="h4 mb-0">Troubleshooting & tips</h2>
|
<h2 class="h4 mb-0">Troubleshooting & tips</h2>
|
||||||
</div>
|
</div>
|
||||||
<div class="table-responsive">
|
<div class="table-responsive">
|
||||||
@@ -404,10 +461,12 @@ s3.complete_multipart_upload(
|
|||||||
<h3 class="h6 text-uppercase text-muted mb-3">On this page</h3>
|
<h3 class="h6 text-uppercase text-muted mb-3">On this page</h3>
|
||||||
<ul class="list-unstyled docs-toc mb-4">
|
<ul class="list-unstyled docs-toc mb-4">
|
||||||
<li><a href="#setup">Set up & run</a></li>
|
<li><a href="#setup">Set up & run</a></li>
|
||||||
|
<li><a href="#background">Running in background</a></li>
|
||||||
<li><a href="#auth">Authentication & IAM</a></li>
|
<li><a href="#auth">Authentication & IAM</a></li>
|
||||||
<li><a href="#console">Console tour</a></li>
|
<li><a href="#console">Console tour</a></li>
|
||||||
<li><a href="#automation">Automation / CLI</a></li>
|
<li><a href="#automation">Automation / CLI</a></li>
|
||||||
<li><a href="#api">REST endpoints</a></li>
|
<li><a href="#api">REST endpoints</a></li>
|
||||||
|
<li><a href="#examples">API Examples</a></li>
|
||||||
<li><a href="#replication">Site Replication</a></li>
|
<li><a href="#replication">Site Replication</a></li>
|
||||||
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
||||||
</ul>
|
</ul>
|
||||||
|
|||||||
Reference in New Issue
Block a user