Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 66b7677d2c | |||
| 5003514a3d | |||
| 4d90ead816 | |||
| 20a314e030 | |||
| b37a51ed1d | |||
| d8232340c3 | |||
| a356bb0c4e | |||
| 1c328ee3af | |||
| 5bf7962c04 | |||
| e06f653606 |
@@ -115,7 +115,7 @@ def create_app(
|
||||
|
||||
storage = ObjectStorage(
|
||||
Path(app.config["STORAGE_ROOT"]),
|
||||
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 5),
|
||||
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 60),
|
||||
object_cache_max_size=app.config.get("OBJECT_CACHE_MAX_SIZE", 100),
|
||||
bucket_config_cache_ttl=app.config.get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0),
|
||||
object_key_max_length_bytes=app.config.get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024),
|
||||
|
||||
@@ -241,7 +241,7 @@ class AppConfig:
|
||||
cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"])
|
||||
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
|
||||
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60))
|
||||
object_cache_ttl = int(_get("OBJECT_CACHE_TTL", 5))
|
||||
object_cache_ttl = int(_get("OBJECT_CACHE_TTL", 60))
|
||||
|
||||
encryption_enabled = str(_get("ENCRYPTION_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
encryption_keys_dir = storage_root / ".myfsio.sys" / "keys"
|
||||
|
||||
@@ -189,7 +189,13 @@ class EncryptedObjectStorage:
|
||||
|
||||
def list_objects(self, bucket_name: str, **kwargs):
|
||||
return self.storage.list_objects(bucket_name, **kwargs)
|
||||
|
||||
|
||||
def list_objects_shallow(self, bucket_name: str, **kwargs):
|
||||
return self.storage.list_objects_shallow(bucket_name, **kwargs)
|
||||
|
||||
def search_objects(self, bucket_name: str, query: str, **kwargs):
|
||||
return self.storage.search_objects(bucket_name, query, **kwargs)
|
||||
|
||||
def list_objects_all(self, bucket_name: str):
|
||||
return self.storage.list_objects_all(bucket_name)
|
||||
|
||||
|
||||
@@ -2671,54 +2671,43 @@ def bucket_handler(bucket_name: str) -> Response:
|
||||
else:
|
||||
effective_start = marker
|
||||
|
||||
fetch_keys = max_keys * 10 if delimiter else max_keys
|
||||
try:
|
||||
list_result = storage.list_objects(
|
||||
bucket_name,
|
||||
max_keys=fetch_keys,
|
||||
continuation_token=effective_start or None,
|
||||
prefix=prefix or None,
|
||||
)
|
||||
objects = list_result.objects
|
||||
if delimiter:
|
||||
shallow_result = storage.list_objects_shallow(
|
||||
bucket_name,
|
||||
prefix=prefix,
|
||||
delimiter=delimiter,
|
||||
max_keys=max_keys,
|
||||
continuation_token=effective_start or None,
|
||||
)
|
||||
objects = shallow_result.objects
|
||||
common_prefixes = shallow_result.common_prefixes
|
||||
is_truncated = shallow_result.is_truncated
|
||||
|
||||
next_marker = shallow_result.next_continuation_token or ""
|
||||
next_continuation_token = ""
|
||||
if is_truncated and next_marker and list_type == "2":
|
||||
next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8")
|
||||
else:
|
||||
list_result = storage.list_objects(
|
||||
bucket_name,
|
||||
max_keys=max_keys,
|
||||
continuation_token=effective_start or None,
|
||||
prefix=prefix or None,
|
||||
)
|
||||
objects = list_result.objects
|
||||
common_prefixes = []
|
||||
is_truncated = list_result.is_truncated
|
||||
|
||||
next_marker = ""
|
||||
next_continuation_token = ""
|
||||
if is_truncated:
|
||||
if objects:
|
||||
next_marker = objects[-1].key
|
||||
if list_type == "2" and next_marker:
|
||||
next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8")
|
||||
except StorageError as exc:
|
||||
return _error_response("NoSuchBucket", str(exc), 404)
|
||||
|
||||
common_prefixes: list[str] = []
|
||||
filtered_objects: list = []
|
||||
if delimiter:
|
||||
seen_prefixes: set[str] = set()
|
||||
for obj in objects:
|
||||
key_after_prefix = obj.key[len(prefix):] if prefix else obj.key
|
||||
if delimiter in key_after_prefix:
|
||||
common_prefix = prefix + key_after_prefix.split(delimiter)[0] + delimiter
|
||||
if common_prefix not in seen_prefixes:
|
||||
seen_prefixes.add(common_prefix)
|
||||
common_prefixes.append(common_prefix)
|
||||
else:
|
||||
filtered_objects.append(obj)
|
||||
objects = filtered_objects
|
||||
common_prefixes = sorted(common_prefixes)
|
||||
|
||||
total_items = len(objects) + len(common_prefixes)
|
||||
is_truncated = total_items > max_keys or list_result.is_truncated
|
||||
|
||||
if len(objects) >= max_keys:
|
||||
objects = objects[:max_keys]
|
||||
common_prefixes = []
|
||||
else:
|
||||
remaining = max_keys - len(objects)
|
||||
common_prefixes = common_prefixes[:remaining]
|
||||
|
||||
next_marker = ""
|
||||
next_continuation_token = ""
|
||||
if is_truncated:
|
||||
if objects:
|
||||
next_marker = objects[-1].key
|
||||
elif common_prefixes:
|
||||
next_marker = common_prefixes[-1].rstrip(delimiter) if delimiter else common_prefixes[-1]
|
||||
|
||||
if list_type == "2" and next_marker:
|
||||
next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8")
|
||||
|
||||
if list_type == "2":
|
||||
root = Element("ListBucketResult")
|
||||
|
||||
@@ -245,6 +245,7 @@ def stream_objects_ndjson(
|
||||
url_templates: dict[str, str],
|
||||
display_tz: str = "UTC",
|
||||
versioning_enabled: bool = False,
|
||||
delimiter: Optional[str] = None,
|
||||
) -> Generator[str, None, None]:
|
||||
meta_line = json.dumps({
|
||||
"type": "meta",
|
||||
@@ -258,11 +259,20 @@ def stream_objects_ndjson(
|
||||
kwargs: dict[str, Any] = {"Bucket": bucket_name, "MaxKeys": 1000}
|
||||
if prefix:
|
||||
kwargs["Prefix"] = prefix
|
||||
if delimiter:
|
||||
kwargs["Delimiter"] = delimiter
|
||||
|
||||
running_count = 0
|
||||
try:
|
||||
paginator = client.get_paginator("list_objects_v2")
|
||||
for page in paginator.paginate(**kwargs):
|
||||
for obj in page.get("Contents", []):
|
||||
for cp in page.get("CommonPrefixes", []):
|
||||
yield json.dumps({
|
||||
"type": "folder",
|
||||
"prefix": cp["Prefix"],
|
||||
}) + "\n"
|
||||
page_contents = page.get("Contents", [])
|
||||
for obj in page_contents:
|
||||
last_mod = obj["LastModified"]
|
||||
yield json.dumps({
|
||||
"type": "object",
|
||||
@@ -273,6 +283,8 @@ def stream_objects_ndjson(
|
||||
"last_modified_iso": format_datetime_iso(last_mod, display_tz),
|
||||
"etag": obj.get("ETag", "").strip('"'),
|
||||
}) + "\n"
|
||||
running_count += len(page_contents)
|
||||
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
|
||||
except ClientError as exc:
|
||||
error_msg = exc.response.get("Error", {}).get("Message", "S3 operation failed")
|
||||
yield json.dumps({"type": "error", "error": error_msg}) + "\n"
|
||||
|
||||
399
app/storage.py
399
app/storage.py
@@ -154,6 +154,15 @@ class ListObjectsResult:
|
||||
total_count: Optional[int] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ShallowListResult:
|
||||
"""Result for delimiter-aware directory-level listing."""
|
||||
objects: List[ObjectMeta]
|
||||
common_prefixes: List[str]
|
||||
is_truncated: bool
|
||||
next_continuation_token: Optional[str]
|
||||
|
||||
|
||||
def _utcnow() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
@@ -279,25 +288,41 @@ class ObjectStorage:
|
||||
version_count = 0
|
||||
version_bytes = 0
|
||||
|
||||
internal = self.INTERNAL_FOLDERS
|
||||
bucket_str = str(bucket_path)
|
||||
|
||||
try:
|
||||
for path in bucket_path.rglob("*"):
|
||||
if path.is_file():
|
||||
rel = path.relative_to(bucket_path)
|
||||
if not rel.parts:
|
||||
continue
|
||||
top_folder = rel.parts[0]
|
||||
if top_folder not in self.INTERNAL_FOLDERS:
|
||||
stat = path.stat()
|
||||
object_count += 1
|
||||
total_bytes += stat.st_size
|
||||
stack = [bucket_str]
|
||||
while stack:
|
||||
current = stack.pop()
|
||||
try:
|
||||
with os.scandir(current) as it:
|
||||
for entry in it:
|
||||
if current == bucket_str and entry.name in internal:
|
||||
continue
|
||||
if entry.is_dir(follow_symlinks=False):
|
||||
stack.append(entry.path)
|
||||
elif entry.is_file(follow_symlinks=False):
|
||||
object_count += 1
|
||||
total_bytes += entry.stat(follow_symlinks=False).st_size
|
||||
except PermissionError:
|
||||
continue
|
||||
|
||||
versions_root = self._bucket_versions_root(bucket_name)
|
||||
if versions_root.exists():
|
||||
for path in versions_root.rglob("*.bin"):
|
||||
if path.is_file():
|
||||
stat = path.stat()
|
||||
version_count += 1
|
||||
version_bytes += stat.st_size
|
||||
v_stack = [str(versions_root)]
|
||||
while v_stack:
|
||||
v_current = v_stack.pop()
|
||||
try:
|
||||
with os.scandir(v_current) as it:
|
||||
for entry in it:
|
||||
if entry.is_dir(follow_symlinks=False):
|
||||
v_stack.append(entry.path)
|
||||
elif entry.is_file(follow_symlinks=False) and entry.name.endswith(".bin"):
|
||||
version_count += 1
|
||||
version_bytes += entry.stat(follow_symlinks=False).st_size
|
||||
except PermissionError:
|
||||
continue
|
||||
except OSError:
|
||||
if cached_stats is not None:
|
||||
return cached_stats
|
||||
@@ -377,9 +402,18 @@ class ObjectStorage:
|
||||
raise StorageError("Bucket contains archived object versions")
|
||||
if has_multipart:
|
||||
raise StorageError("Bucket has active multipart uploads")
|
||||
bucket_id = bucket_path.name
|
||||
self._remove_tree(bucket_path)
|
||||
self._remove_tree(self._system_bucket_root(bucket_path.name))
|
||||
self._remove_tree(self._multipart_bucket_root(bucket_path.name))
|
||||
self._remove_tree(self._system_bucket_root(bucket_id))
|
||||
self._remove_tree(self._multipart_bucket_root(bucket_id))
|
||||
self._bucket_config_cache.pop(bucket_id, None)
|
||||
with self._cache_lock:
|
||||
self._object_cache.pop(bucket_id, None)
|
||||
self._cache_version.pop(bucket_id, None)
|
||||
self._sorted_key_cache.pop(bucket_id, None)
|
||||
stale = [k for k in self._meta_read_cache if k[0] == bucket_id]
|
||||
for k in stale:
|
||||
del self._meta_read_cache[k]
|
||||
|
||||
def list_objects(
|
||||
self,
|
||||
@@ -462,6 +496,279 @@ class ObjectStorage:
|
||||
result = self.list_objects(bucket_name, max_keys=100000)
|
||||
return result.objects
|
||||
|
||||
def list_objects_shallow(
|
||||
self,
|
||||
bucket_name: str,
|
||||
*,
|
||||
prefix: str = "",
|
||||
delimiter: str = "/",
|
||||
max_keys: int = 1000,
|
||||
continuation_token: Optional[str] = None,
|
||||
) -> ShallowListResult:
|
||||
import bisect
|
||||
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
bucket_id = bucket_path.name
|
||||
|
||||
if delimiter != "/" or (prefix and not prefix.endswith(delimiter)):
|
||||
return self._shallow_via_full_scan(
|
||||
bucket_name, prefix=prefix, delimiter=delimiter,
|
||||
max_keys=max_keys, continuation_token=continuation_token,
|
||||
)
|
||||
|
||||
target_dir = bucket_path
|
||||
if prefix:
|
||||
safe_prefix_path = Path(prefix.rstrip("/"))
|
||||
if ".." in safe_prefix_path.parts:
|
||||
return ShallowListResult(
|
||||
objects=[], common_prefixes=[],
|
||||
is_truncated=False, next_continuation_token=None,
|
||||
)
|
||||
target_dir = bucket_path / safe_prefix_path
|
||||
try:
|
||||
resolved = target_dir.resolve()
|
||||
bucket_resolved = bucket_path.resolve()
|
||||
if not str(resolved).startswith(str(bucket_resolved) + os.sep) and resolved != bucket_resolved:
|
||||
return ShallowListResult(
|
||||
objects=[], common_prefixes=[],
|
||||
is_truncated=False, next_continuation_token=None,
|
||||
)
|
||||
except (OSError, ValueError):
|
||||
return ShallowListResult(
|
||||
objects=[], common_prefixes=[],
|
||||
is_truncated=False, next_continuation_token=None,
|
||||
)
|
||||
|
||||
if not target_dir.exists() or not target_dir.is_dir():
|
||||
return ShallowListResult(
|
||||
objects=[], common_prefixes=[],
|
||||
is_truncated=False, next_continuation_token=None,
|
||||
)
|
||||
|
||||
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
|
||||
meta_cache: Dict[str, str] = {}
|
||||
if etag_index_path.exists():
|
||||
try:
|
||||
with open(etag_index_path, 'r', encoding='utf-8') as f:
|
||||
meta_cache = json.load(f)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
entries_files: list[tuple[str, int, float, Optional[str]]] = []
|
||||
entries_dirs: list[str] = []
|
||||
|
||||
try:
|
||||
with os.scandir(str(target_dir)) as it:
|
||||
for entry in it:
|
||||
name = entry.name
|
||||
if name in self.INTERNAL_FOLDERS:
|
||||
continue
|
||||
if entry.is_dir(follow_symlinks=False):
|
||||
cp = prefix + name + delimiter
|
||||
entries_dirs.append(cp)
|
||||
elif entry.is_file(follow_symlinks=False):
|
||||
key = prefix + name
|
||||
try:
|
||||
st = entry.stat()
|
||||
etag = meta_cache.get(key)
|
||||
entries_files.append((key, st.st_size, st.st_mtime, etag))
|
||||
except OSError:
|
||||
pass
|
||||
except OSError:
|
||||
return ShallowListResult(
|
||||
objects=[], common_prefixes=[],
|
||||
is_truncated=False, next_continuation_token=None,
|
||||
)
|
||||
|
||||
entries_dirs.sort()
|
||||
entries_files.sort(key=lambda x: x[0])
|
||||
|
||||
all_items: list[tuple[str, bool]] = []
|
||||
fi, di = 0, 0
|
||||
while fi < len(entries_files) and di < len(entries_dirs):
|
||||
if entries_files[fi][0] <= entries_dirs[di]:
|
||||
all_items.append((entries_files[fi][0], False))
|
||||
fi += 1
|
||||
else:
|
||||
all_items.append((entries_dirs[di], True))
|
||||
di += 1
|
||||
while fi < len(entries_files):
|
||||
all_items.append((entries_files[fi][0], False))
|
||||
fi += 1
|
||||
while di < len(entries_dirs):
|
||||
all_items.append((entries_dirs[di], True))
|
||||
di += 1
|
||||
|
||||
files_map = {e[0]: e for e in entries_files}
|
||||
|
||||
start_index = 0
|
||||
if continuation_token:
|
||||
all_keys = [item[0] for item in all_items]
|
||||
start_index = bisect.bisect_right(all_keys, continuation_token)
|
||||
|
||||
selected = all_items[start_index:start_index + max_keys]
|
||||
is_truncated = (start_index + max_keys) < len(all_items)
|
||||
|
||||
result_objects: list[ObjectMeta] = []
|
||||
result_prefixes: list[str] = []
|
||||
for item_key, is_dir in selected:
|
||||
if is_dir:
|
||||
result_prefixes.append(item_key)
|
||||
else:
|
||||
fdata = files_map[item_key]
|
||||
result_objects.append(ObjectMeta(
|
||||
key=fdata[0],
|
||||
size=fdata[1],
|
||||
last_modified=datetime.fromtimestamp(fdata[2], timezone.utc),
|
||||
etag=fdata[3],
|
||||
metadata=None,
|
||||
))
|
||||
|
||||
next_token = None
|
||||
if is_truncated and selected:
|
||||
next_token = selected[-1][0]
|
||||
|
||||
return ShallowListResult(
|
||||
objects=result_objects,
|
||||
common_prefixes=result_prefixes,
|
||||
is_truncated=is_truncated,
|
||||
next_continuation_token=next_token,
|
||||
)
|
||||
|
||||
def _shallow_via_full_scan(
|
||||
self,
|
||||
bucket_name: str,
|
||||
*,
|
||||
prefix: str = "",
|
||||
delimiter: str = "/",
|
||||
max_keys: int = 1000,
|
||||
continuation_token: Optional[str] = None,
|
||||
) -> ShallowListResult:
|
||||
list_result = self.list_objects(
|
||||
bucket_name,
|
||||
max_keys=max_keys * 10,
|
||||
continuation_token=continuation_token,
|
||||
prefix=prefix or None,
|
||||
)
|
||||
|
||||
common_prefixes: list[str] = []
|
||||
filtered_objects: list[ObjectMeta] = []
|
||||
seen_prefixes: set[str] = set()
|
||||
|
||||
for obj in list_result.objects:
|
||||
key_after_prefix = obj.key[len(prefix):] if prefix else obj.key
|
||||
if delimiter in key_after_prefix:
|
||||
cp = prefix + key_after_prefix.split(delimiter)[0] + delimiter
|
||||
if cp not in seen_prefixes:
|
||||
seen_prefixes.add(cp)
|
||||
common_prefixes.append(cp)
|
||||
else:
|
||||
filtered_objects.append(obj)
|
||||
|
||||
common_prefixes.sort()
|
||||
total_items = len(filtered_objects) + len(common_prefixes)
|
||||
is_truncated = total_items > max_keys or list_result.is_truncated
|
||||
|
||||
if len(filtered_objects) >= max_keys:
|
||||
filtered_objects = filtered_objects[:max_keys]
|
||||
common_prefixes = []
|
||||
else:
|
||||
remaining = max_keys - len(filtered_objects)
|
||||
common_prefixes = common_prefixes[:remaining]
|
||||
|
||||
next_token = None
|
||||
if is_truncated:
|
||||
if filtered_objects:
|
||||
next_token = filtered_objects[-1].key
|
||||
elif common_prefixes:
|
||||
next_token = common_prefixes[-1].rstrip(delimiter) if delimiter else common_prefixes[-1]
|
||||
|
||||
return ShallowListResult(
|
||||
objects=filtered_objects,
|
||||
common_prefixes=common_prefixes,
|
||||
is_truncated=is_truncated,
|
||||
next_continuation_token=next_token,
|
||||
)
|
||||
|
||||
def search_objects(
|
||||
self,
|
||||
bucket_name: str,
|
||||
query: str,
|
||||
*,
|
||||
prefix: str = "",
|
||||
limit: int = 500,
|
||||
) -> Dict[str, Any]:
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.is_dir():
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
|
||||
if prefix:
|
||||
search_root = bucket_path / prefix.replace("/", os.sep)
|
||||
if not search_root.is_dir():
|
||||
return {"results": [], "truncated": False}
|
||||
resolved = search_root.resolve()
|
||||
if not str(resolved).startswith(str(bucket_path.resolve())):
|
||||
return {"results": [], "truncated": False}
|
||||
else:
|
||||
search_root = bucket_path
|
||||
|
||||
query_lower = query.lower()
|
||||
results: list[Dict[str, Any]] = []
|
||||
internal = self.INTERNAL_FOLDERS
|
||||
bucket_str = str(bucket_path)
|
||||
bucket_len = len(bucket_str) + 1
|
||||
meta_root = self._bucket_meta_root(bucket_name)
|
||||
scan_limit = limit * 4
|
||||
|
||||
matched = 0
|
||||
scanned = 0
|
||||
search_str = str(search_root)
|
||||
stack = [search_str]
|
||||
while stack:
|
||||
current = stack.pop()
|
||||
try:
|
||||
with os.scandir(current) as it:
|
||||
for entry in it:
|
||||
if current == bucket_str and entry.name in internal:
|
||||
continue
|
||||
if entry.is_dir(follow_symlinks=False):
|
||||
stack.append(entry.path)
|
||||
elif entry.is_file(follow_symlinks=False):
|
||||
scanned += 1
|
||||
key = entry.path[bucket_len:].replace(os.sep, "/")
|
||||
if query_lower in key.lower():
|
||||
st = entry.stat(follow_symlinks=False)
|
||||
meta_path = meta_root / (key + ".meta.json")
|
||||
last_modified = ""
|
||||
try:
|
||||
if meta_path.exists():
|
||||
md = json.loads(meta_path.read_text(encoding="utf-8"))
|
||||
last_modified = md.get("last_modified", "")
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
if not last_modified:
|
||||
last_modified = datetime.fromtimestamp(
|
||||
st.st_mtime, tz=timezone.utc
|
||||
).strftime("%Y-%m-%dT%H:%M:%S.000Z")
|
||||
results.append({
|
||||
"key": key,
|
||||
"size": st.st_size,
|
||||
"last_modified": last_modified,
|
||||
})
|
||||
matched += 1
|
||||
if matched >= scan_limit:
|
||||
break
|
||||
except PermissionError:
|
||||
continue
|
||||
if matched >= scan_limit:
|
||||
break
|
||||
|
||||
results.sort(key=lambda r: r["key"])
|
||||
truncated = len(results) > limit
|
||||
return {"results": results[:limit], "truncated": truncated}
|
||||
|
||||
def put_object(
|
||||
self,
|
||||
bucket_name: str,
|
||||
@@ -962,13 +1269,19 @@ class ObjectStorage:
|
||||
version_bytes_delta=archived_version_size,
|
||||
version_count_delta=1 if archived_version_size > 0 else 0,
|
||||
)
|
||||
return ObjectMeta(
|
||||
etag = self._compute_etag(destination)
|
||||
internal_meta = {"__etag__": etag, "__size__": str(stat.st_size)}
|
||||
combined_meta = {**internal_meta, **(metadata or {})}
|
||||
self._write_metadata(bucket_id, safe_key, combined_meta)
|
||||
obj_meta = ObjectMeta(
|
||||
key=safe_key.as_posix(),
|
||||
size=stat.st_size,
|
||||
last_modified=datetime.fromtimestamp(stat.st_mtime, timezone.utc),
|
||||
etag=self._compute_etag(destination),
|
||||
etag=etag,
|
||||
metadata=metadata or None,
|
||||
)
|
||||
self._update_object_cache_entry(bucket_id, safe_key.as_posix(), obj_meta)
|
||||
return obj_meta
|
||||
|
||||
def delete_object_version(self, bucket_name: str, object_key: str, version_id: str) -> None:
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
@@ -1766,11 +2079,6 @@ class ObjectStorage:
|
||||
return 0
|
||||
|
||||
def _update_object_cache_entry(self, bucket_id: str, key: str, meta: Optional[ObjectMeta]) -> None:
|
||||
"""Update a single entry in the object cache instead of invalidating the whole cache.
|
||||
|
||||
This is a performance optimization - lazy update instead of full invalidation.
|
||||
Cross-process invalidation is handled by checking stats.json mtime.
|
||||
"""
|
||||
with self._cache_lock:
|
||||
cached = self._object_cache.get(bucket_id)
|
||||
if cached:
|
||||
@@ -1782,6 +2090,25 @@ class ObjectStorage:
|
||||
self._cache_version[bucket_id] = self._cache_version.get(bucket_id, 0) + 1
|
||||
self._sorted_key_cache.pop(bucket_id, None)
|
||||
|
||||
self._update_etag_index(bucket_id, key, meta.etag if meta else None)
|
||||
|
||||
def _update_etag_index(self, bucket_id: str, key: str, etag: Optional[str]) -> None:
|
||||
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
|
||||
try:
|
||||
index: Dict[str, str] = {}
|
||||
if etag_index_path.exists():
|
||||
with open(etag_index_path, 'r', encoding='utf-8') as f:
|
||||
index = json.load(f)
|
||||
if etag is None:
|
||||
index.pop(key, None)
|
||||
else:
|
||||
index[key] = etag
|
||||
etag_index_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(etag_index_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(index, f)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
def warm_cache(self, bucket_names: Optional[List[str]] = None) -> None:
|
||||
"""Pre-warm the object cache for specified buckets or all buckets.
|
||||
|
||||
@@ -1832,30 +2159,40 @@ class ObjectStorage:
|
||||
|
||||
def _read_bucket_config(self, bucket_name: str) -> dict[str, Any]:
|
||||
now = time.time()
|
||||
config_path = self._bucket_config_path(bucket_name)
|
||||
cached = self._bucket_config_cache.get(bucket_name)
|
||||
if cached:
|
||||
config, cached_time = cached
|
||||
config, cached_time, cached_mtime = cached
|
||||
if now - cached_time < self._bucket_config_cache_ttl:
|
||||
return config.copy()
|
||||
try:
|
||||
current_mtime = config_path.stat().st_mtime if config_path.exists() else 0.0
|
||||
except OSError:
|
||||
current_mtime = 0.0
|
||||
if current_mtime == cached_mtime:
|
||||
return config.copy()
|
||||
|
||||
config_path = self._bucket_config_path(bucket_name)
|
||||
if not config_path.exists():
|
||||
self._bucket_config_cache[bucket_name] = ({}, now)
|
||||
self._bucket_config_cache[bucket_name] = ({}, now, 0.0)
|
||||
return {}
|
||||
try:
|
||||
data = json.loads(config_path.read_text(encoding="utf-8"))
|
||||
config = data if isinstance(data, dict) else {}
|
||||
self._bucket_config_cache[bucket_name] = (config, now)
|
||||
mtime = config_path.stat().st_mtime
|
||||
self._bucket_config_cache[bucket_name] = (config, now, mtime)
|
||||
return config.copy()
|
||||
except (OSError, json.JSONDecodeError):
|
||||
self._bucket_config_cache[bucket_name] = ({}, now)
|
||||
self._bucket_config_cache[bucket_name] = ({}, now, 0.0)
|
||||
return {}
|
||||
|
||||
def _write_bucket_config(self, bucket_name: str, payload: dict[str, Any]) -> None:
|
||||
config_path = self._bucket_config_path(bucket_name)
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(json.dumps(payload), encoding="utf-8")
|
||||
self._bucket_config_cache[bucket_name] = (payload.copy(), time.time())
|
||||
try:
|
||||
mtime = config_path.stat().st_mtime
|
||||
except OSError:
|
||||
mtime = 0.0
|
||||
self._bucket_config_cache[bucket_name] = (payload.copy(), time.time(), mtime)
|
||||
|
||||
def _set_bucket_config_entry(self, bucket_name: str, key: str, value: Any | None) -> None:
|
||||
config = self._read_bucket_config(bucket_name)
|
||||
|
||||
33
app/ui.py
33
app/ui.py
@@ -616,6 +616,7 @@ def stream_bucket_objects(bucket_name: str):
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
|
||||
prefix = request.args.get("prefix") or None
|
||||
delimiter = request.args.get("delimiter") or None
|
||||
|
||||
try:
|
||||
client = get_session_s3_client()
|
||||
@@ -629,6 +630,7 @@ def stream_bucket_objects(bucket_name: str):
|
||||
return Response(
|
||||
stream_objects_ndjson(
|
||||
client, bucket_name, prefix, url_templates, display_tz, versioning_enabled,
|
||||
delimiter=delimiter,
|
||||
),
|
||||
mimetype='application/x-ndjson',
|
||||
headers={
|
||||
@@ -639,6 +641,33 @@ def stream_bucket_objects(bucket_name: str):
|
||||
)
|
||||
|
||||
|
||||
@ui_bp.get("/buckets/<bucket_name>/objects/search")
|
||||
@limiter.limit("30 per minute")
|
||||
def search_bucket_objects(bucket_name: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "list")
|
||||
except IamError as exc:
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
|
||||
query = request.args.get("q", "").strip()
|
||||
if not query:
|
||||
return jsonify({"results": [], "truncated": False})
|
||||
|
||||
try:
|
||||
limit = max(1, min(int(request.args.get("limit", 500)), 1000))
|
||||
except (ValueError, TypeError):
|
||||
limit = 500
|
||||
|
||||
prefix = request.args.get("prefix", "").strip()
|
||||
|
||||
storage = _storage()
|
||||
try:
|
||||
return jsonify(storage.search_objects(bucket_name, query, prefix=prefix, limit=limit))
|
||||
except StorageError as exc:
|
||||
return jsonify({"error": str(exc)}), 404
|
||||
|
||||
|
||||
@ui_bp.post("/buckets/<bucket_name>/upload")
|
||||
@limiter.limit("30 per minute")
|
||||
def upload_object(bucket_name: str):
|
||||
@@ -1301,12 +1330,14 @@ def object_versions(bucket_name: str, object_key: str):
|
||||
for v in resp.get("Versions", []):
|
||||
if v.get("Key") != object_key:
|
||||
continue
|
||||
if v.get("IsLatest", False):
|
||||
continue
|
||||
versions.append({
|
||||
"version_id": v.get("VersionId", ""),
|
||||
"last_modified": v["LastModified"].isoformat() if v.get("LastModified") else None,
|
||||
"size": v.get("Size", 0),
|
||||
"etag": v.get("ETag", "").strip('"'),
|
||||
"is_latest": v.get("IsLatest", False),
|
||||
"is_latest": False,
|
||||
})
|
||||
return jsonify({"versions": versions})
|
||||
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
APP_VERSION = "0.3.0"
|
||||
APP_VERSION = "0.3.2"
|
||||
|
||||
|
||||
def get_version() -> str:
|
||||
|
||||
69
docs.md
69
docs.md
@@ -139,6 +139,7 @@ All configuration is done via environment variables. The table below lists every
|
||||
| `API_BASE_URL` | `http://127.0.0.1:5000` | Internal S3 API URL used by the web UI proxy. Also used for presigned URL generation. Set to your public URL if running behind a reverse proxy. |
|
||||
| `AWS_REGION` | `us-east-1` | Region embedded in SigV4 credential scope. |
|
||||
| `AWS_SERVICE` | `s3` | Service string for SigV4. |
|
||||
| `DISPLAY_TIMEZONE` | `UTC` | Timezone for timestamps in the web UI (e.g., `US/Eastern`, `Asia/Tokyo`). |
|
||||
|
||||
### IAM & Security
|
||||
|
||||
@@ -170,6 +171,7 @@ All configuration is done via environment variables. The table below lists every
|
||||
| `RATE_LIMIT_BUCKET_OPS` | `120 per minute` | Rate limit for bucket operations (PUT/DELETE/GET/POST on `/<bucket>`). |
|
||||
| `RATE_LIMIT_OBJECT_OPS` | `240 per minute` | Rate limit for object operations (PUT/GET/DELETE/POST on `/<bucket>/<key>`). |
|
||||
| `RATE_LIMIT_HEAD_OPS` | `100 per minute` | Rate limit for HEAD requests (bucket and object). |
|
||||
| `RATE_LIMIT_ADMIN` | `60 per minute` | Rate limit for admin API endpoints (`/admin/*`). |
|
||||
| `RATE_LIMIT_STORAGE_URI` | `memory://` | Storage backend for rate limits. Use `redis://host:port` for distributed setups. |
|
||||
|
||||
### Server Configuration
|
||||
@@ -256,6 +258,12 @@ Once enabled, configure lifecycle rules via:
|
||||
| `MULTIPART_MIN_PART_SIZE` | `5242880` (5 MB) | Minimum part size for multipart uploads. |
|
||||
| `BUCKET_STATS_CACHE_TTL` | `60` | Seconds to cache bucket statistics. |
|
||||
| `BULK_DELETE_MAX_KEYS` | `500` | Maximum keys per bulk delete request. |
|
||||
| `BULK_DOWNLOAD_MAX_BYTES` | `1073741824` (1 GiB) | Maximum total size for bulk ZIP downloads. |
|
||||
| `OBJECT_CACHE_TTL` | `60` | Seconds to cache object metadata. |
|
||||
|
||||
#### Gzip Compression
|
||||
|
||||
API responses for JSON, XML, HTML, CSS, and JavaScript are automatically gzip-compressed when the client sends `Accept-Encoding: gzip`. Compression activates for responses larger than 500 bytes and is handled by a WSGI middleware (`app/compression.py`). Binary object downloads and streaming responses are never compressed. No configuration is needed.
|
||||
|
||||
### Server Settings
|
||||
|
||||
@@ -285,6 +293,12 @@ If running behind a reverse proxy (e.g., Nginx, Cloudflare, or a tunnel), ensure
|
||||
|
||||
The application automatically trusts these headers to generate correct presigned URLs (e.g., `https://s3.example.com/...` instead of `http://127.0.0.1:5000/...`). Alternatively, you can explicitly set `API_BASE_URL` to your public endpoint.
|
||||
|
||||
| Variable | Default | Notes |
|
||||
| --- | --- | --- |
|
||||
| `NUM_TRUSTED_PROXIES` | `1` | Number of trusted reverse proxies for `X-Forwarded-*` header processing. |
|
||||
| `ALLOWED_REDIRECT_HOSTS` | `""` | Comma-separated whitelist of safe redirect targets. Empty allows only same-host redirects. |
|
||||
| `ALLOW_INTERNAL_ENDPOINTS` | `false` | Allow connections to internal/private IPs for webhooks and replication targets. **Keep disabled in production unless needed.** |
|
||||
|
||||
## 4. Upgrading and Updates
|
||||
|
||||
### Version Checking
|
||||
@@ -912,7 +926,7 @@ Objects with forward slashes (`/`) in their keys are displayed as a folder hiera
|
||||
|
||||
- Select multiple objects using checkboxes
|
||||
- **Bulk Delete**: Delete multiple objects at once
|
||||
- **Bulk Download**: Download selected objects as individual files
|
||||
- **Bulk Download**: Download selected objects as a single ZIP archive (up to `BULK_DOWNLOAD_MAX_BYTES`, default 1 GiB)
|
||||
|
||||
#### Search & Filter
|
||||
|
||||
@@ -985,6 +999,7 @@ MyFSIO supports **server-side encryption at rest** to protect your data. When en
|
||||
|------|-------------|
|
||||
| **AES-256 (SSE-S3)** | Server-managed encryption using a local master key |
|
||||
| **KMS (SSE-KMS)** | Encryption using customer-managed keys via the built-in KMS |
|
||||
| **SSE-C** | Server-side encryption with customer-provided keys (per-request) |
|
||||
|
||||
### Enabling Encryption
|
||||
|
||||
@@ -1083,6 +1098,44 @@ encrypted, metadata = ClientEncryptionHelper.encrypt_for_upload(plaintext, key)
|
||||
decrypted = ClientEncryptionHelper.decrypt_from_download(encrypted, metadata, key)
|
||||
```
|
||||
|
||||
### SSE-C (Customer-Provided Keys)
|
||||
|
||||
With SSE-C, you provide your own 256-bit AES encryption key with each request. The server encrypts/decrypts using your key but never stores it. You must supply the same key for both upload and download.
|
||||
|
||||
**Required headers:**
|
||||
|
||||
| Header | Value |
|
||||
|--------|-------|
|
||||
| `x-amz-server-side-encryption-customer-algorithm` | `AES256` |
|
||||
| `x-amz-server-side-encryption-customer-key` | Base64-encoded 256-bit key |
|
||||
| `x-amz-server-side-encryption-customer-key-MD5` | Base64-encoded MD5 of the key |
|
||||
|
||||
```bash
|
||||
# Generate a 256-bit key
|
||||
KEY=$(openssl rand -base64 32)
|
||||
KEY_MD5=$(echo -n "$KEY" | base64 -d | openssl dgst -md5 -binary | base64)
|
||||
|
||||
# Upload with SSE-C
|
||||
curl -X PUT "http://localhost:5000/my-bucket/secret.txt" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
|
||||
-H "x-amz-server-side-encryption-customer-key: $KEY" \
|
||||
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5" \
|
||||
--data-binary @secret.txt
|
||||
|
||||
# Download with SSE-C (same key required)
|
||||
curl "http://localhost:5000/my-bucket/secret.txt" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
|
||||
-H "x-amz-server-side-encryption-customer-key: $KEY" \
|
||||
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5"
|
||||
```
|
||||
|
||||
**Key points:**
|
||||
- SSE-C does not require `ENCRYPTION_ENABLED` or `KMS_ENABLED` — the key is provided per-request
|
||||
- If you lose your key, the data is irrecoverable
|
||||
- The MD5 header is optional but recommended for integrity verification
|
||||
|
||||
### Important Notes
|
||||
|
||||
- **Existing objects are NOT encrypted** - Only new uploads after enabling encryption are encrypted
|
||||
@@ -1959,6 +2012,20 @@ curl -X PUT "http://localhost:5000/my-bucket/file.txt" \
|
||||
-H "x-amz-meta-newkey: newvalue"
|
||||
```
|
||||
|
||||
### MoveObject (UI)
|
||||
|
||||
Move an object to a different key or bucket. This is a UI-only convenience operation that performs a copy followed by a delete of the source. Requires `read` and `delete` on the source, and `write` on the destination.
|
||||
|
||||
```bash
|
||||
# Move via UI API
|
||||
curl -X POST "http://localhost:5100/ui/buckets/my-bucket/objects/old-path/file.txt/move" \
|
||||
-H "Content-Type: application/json" \
|
||||
--cookie "session=..." \
|
||||
-d '{"dest_bucket": "other-bucket", "dest_key": "new-path/file.txt"}'
|
||||
```
|
||||
|
||||
The move is atomic from the caller's perspective: if the copy succeeds but the delete fails, the object exists in both locations (no data loss).
|
||||
|
||||
### UploadPartCopy
|
||||
|
||||
Copy data from an existing object into a multipart upload part:
|
||||
|
||||
@@ -137,11 +137,11 @@
|
||||
const versionPanel = document.getElementById('version-panel');
|
||||
const versionList = document.getElementById('version-list');
|
||||
const refreshVersionsButton = document.getElementById('refreshVersionsButton');
|
||||
const archivedCard = document.getElementById('archived-objects-card');
|
||||
const archivedBody = archivedCard?.querySelector('[data-archived-body]');
|
||||
const archivedCountBadge = archivedCard?.querySelector('[data-archived-count]');
|
||||
const archivedRefreshButton = archivedCard?.querySelector('[data-archived-refresh]');
|
||||
const archivedEndpoint = archivedCard?.dataset.archivedEndpoint;
|
||||
let archivedCard = document.getElementById('archived-objects-card');
|
||||
let archivedBody = archivedCard?.querySelector('[data-archived-body]');
|
||||
let archivedCountBadge = archivedCard?.querySelector('[data-archived-count]');
|
||||
let archivedRefreshButton = archivedCard?.querySelector('[data-archived-refresh]');
|
||||
let archivedEndpoint = archivedCard?.dataset.archivedEndpoint;
|
||||
let versioningEnabled = objectsContainer?.dataset.versioning === 'true';
|
||||
const versionsCache = new Map();
|
||||
let activeRow = null;
|
||||
@@ -167,6 +167,8 @@
|
||||
let pageSize = 5000;
|
||||
let currentPrefix = '';
|
||||
let allObjects = [];
|
||||
let streamFolders = [];
|
||||
let useDelimiterMode = true;
|
||||
let urlTemplates = null;
|
||||
let streamAbortController = null;
|
||||
let useStreaming = !!objectsStreamUrl;
|
||||
@@ -186,7 +188,7 @@
|
||||
let renderedRange = { start: 0, end: 0 };
|
||||
|
||||
let memoizedVisibleItems = null;
|
||||
let memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
|
||||
let memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
|
||||
const createObjectRow = (obj, displayKey = null) => {
|
||||
const tr = document.createElement('tr');
|
||||
@@ -319,10 +321,13 @@
|
||||
`;
|
||||
};
|
||||
|
||||
const bucketTotalObjects = objectsContainer ? parseInt(objectsContainer.dataset.bucketTotalObjects || '0', 10) : 0;
|
||||
|
||||
const updateObjectCountBadge = () => {
|
||||
if (!objectCountBadge) return;
|
||||
if (totalObjectCount === 0) {
|
||||
objectCountBadge.textContent = '0 objects';
|
||||
if (useDelimiterMode) {
|
||||
const total = bucketTotalObjects || totalObjectCount;
|
||||
objectCountBadge.textContent = `${total.toLocaleString()} object${total !== 1 ? 's' : ''}`;
|
||||
} else {
|
||||
objectCountBadge.textContent = `${totalObjectCount.toLocaleString()} object${totalObjectCount !== 1 ? 's' : ''}`;
|
||||
}
|
||||
@@ -349,6 +354,7 @@
|
||||
const computeVisibleItems = (forceRecompute = false) => {
|
||||
const currentInputs = {
|
||||
objectCount: allObjects.length,
|
||||
folderCount: streamFolders.length,
|
||||
prefix: currentPrefix,
|
||||
filterTerm: currentFilterTerm,
|
||||
sortField: currentSortField,
|
||||
@@ -358,6 +364,7 @@
|
||||
if (!forceRecompute &&
|
||||
memoizedVisibleItems !== null &&
|
||||
memoizedInputs.objectCount === currentInputs.objectCount &&
|
||||
memoizedInputs.folderCount === currentInputs.folderCount &&
|
||||
memoizedInputs.prefix === currentInputs.prefix &&
|
||||
memoizedInputs.filterTerm === currentInputs.filterTerm &&
|
||||
memoizedInputs.sortField === currentInputs.sortField &&
|
||||
@@ -366,36 +373,49 @@
|
||||
}
|
||||
|
||||
const items = [];
|
||||
const folders = new Set();
|
||||
|
||||
allObjects.forEach(obj => {
|
||||
if (!obj.key.startsWith(currentPrefix)) return;
|
||||
if (searchResults !== null) {
|
||||
searchResults.forEach(obj => {
|
||||
items.push({ type: 'file', data: obj, displayKey: obj.key });
|
||||
});
|
||||
} else if (useDelimiterMode && streamFolders.length > 0) {
|
||||
streamFolders.forEach(folderPath => {
|
||||
const folderName = folderPath.slice(currentPrefix.length).replace(/\/$/, '');
|
||||
items.push({ type: 'folder', path: folderPath, displayKey: folderName });
|
||||
});
|
||||
allObjects.forEach(obj => {
|
||||
const remainder = obj.key.slice(currentPrefix.length);
|
||||
if (!remainder) return;
|
||||
items.push({ type: 'file', data: obj, displayKey: remainder });
|
||||
});
|
||||
} else {
|
||||
const folders = new Set();
|
||||
|
||||
const remainder = obj.key.slice(currentPrefix.length);
|
||||
allObjects.forEach(obj => {
|
||||
if (!obj.key.startsWith(currentPrefix)) return;
|
||||
|
||||
if (!remainder) return;
|
||||
const remainder = obj.key.slice(currentPrefix.length);
|
||||
|
||||
const isFolderMarker = obj.key.endsWith('/') && obj.size === 0;
|
||||
const slashIndex = remainder.indexOf('/');
|
||||
if (!remainder) return;
|
||||
|
||||
if (slashIndex === -1 && !isFolderMarker) {
|
||||
if (!currentFilterTerm || remainder.toLowerCase().includes(currentFilterTerm)) {
|
||||
const isFolderMarker = obj.key.endsWith('/') && obj.size === 0;
|
||||
const slashIndex = remainder.indexOf('/');
|
||||
|
||||
if (slashIndex === -1 && !isFolderMarker) {
|
||||
items.push({ type: 'file', data: obj, displayKey: remainder });
|
||||
}
|
||||
} else {
|
||||
const effectiveSlashIndex = isFolderMarker && slashIndex === remainder.length - 1
|
||||
? slashIndex
|
||||
: (slashIndex === -1 ? remainder.length - 1 : slashIndex);
|
||||
const folderName = remainder.slice(0, effectiveSlashIndex);
|
||||
const folderPath = currentPrefix + folderName + '/';
|
||||
if (!folders.has(folderPath)) {
|
||||
folders.add(folderPath);
|
||||
if (!currentFilterTerm || folderName.toLowerCase().includes(currentFilterTerm)) {
|
||||
} else {
|
||||
const effectiveSlashIndex = isFolderMarker && slashIndex === remainder.length - 1
|
||||
? slashIndex
|
||||
: (slashIndex === -1 ? remainder.length - 1 : slashIndex);
|
||||
const folderName = remainder.slice(0, effectiveSlashIndex);
|
||||
const folderPath = currentPrefix + folderName + '/';
|
||||
if (!folders.has(folderPath)) {
|
||||
folders.add(folderPath);
|
||||
items.push({ type: 'folder', path: folderPath, displayKey: folderName });
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
items.sort((a, b) => {
|
||||
if (a.type === 'folder' && b.type === 'file') return -1;
|
||||
@@ -471,7 +491,7 @@
|
||||
renderedRange = { start: -1, end: -1 };
|
||||
|
||||
if (visibleItems.length === 0) {
|
||||
if (allObjects.length === 0 && !hasMoreObjects) {
|
||||
if (allObjects.length === 0 && streamFolders.length === 0 && !hasMoreObjects) {
|
||||
showEmptyState();
|
||||
} else {
|
||||
objectsTableBody.innerHTML = `
|
||||
@@ -500,15 +520,7 @@
|
||||
const updateFolderViewStatus = () => {
|
||||
const folderViewStatusEl = document.getElementById('folder-view-status');
|
||||
if (!folderViewStatusEl) return;
|
||||
|
||||
if (currentPrefix) {
|
||||
const folderCount = visibleItems.filter(i => i.type === 'folder').length;
|
||||
const fileCount = visibleItems.filter(i => i.type === 'file').length;
|
||||
folderViewStatusEl.innerHTML = `<span class="text-muted">${folderCount} folder${folderCount !== 1 ? 's' : ''}, ${fileCount} file${fileCount !== 1 ? 's' : ''} in this view</span>`;
|
||||
folderViewStatusEl.classList.remove('d-none');
|
||||
} else {
|
||||
folderViewStatusEl.classList.add('d-none');
|
||||
}
|
||||
folderViewStatusEl.classList.add('d-none');
|
||||
};
|
||||
|
||||
const processStreamObject = (obj) => {
|
||||
@@ -536,21 +548,30 @@
|
||||
let lastStreamRenderTime = 0;
|
||||
const STREAM_RENDER_THROTTLE_MS = 500;
|
||||
|
||||
const buildBottomStatusText = (complete) => {
|
||||
if (!complete) {
|
||||
const countText = totalObjectCount > 0 ? ` of ${totalObjectCount.toLocaleString()}` : '';
|
||||
return `${loadedObjectCount.toLocaleString()}${countText} loading...`;
|
||||
}
|
||||
const parts = [];
|
||||
if (useDelimiterMode && streamFolders.length > 0) {
|
||||
parts.push(`${streamFolders.length.toLocaleString()} folder${streamFolders.length !== 1 ? 's' : ''}`);
|
||||
}
|
||||
parts.push(`${loadedObjectCount.toLocaleString()} object${loadedObjectCount !== 1 ? 's' : ''}`);
|
||||
return parts.join(', ');
|
||||
};
|
||||
|
||||
const flushPendingStreamObjects = () => {
|
||||
if (pendingStreamObjects.length === 0) return;
|
||||
const batch = pendingStreamObjects.splice(0, pendingStreamObjects.length);
|
||||
batch.forEach(obj => {
|
||||
loadedObjectCount++;
|
||||
allObjects.push(obj);
|
||||
});
|
||||
if (pendingStreamObjects.length > 0) {
|
||||
const batch = pendingStreamObjects.splice(0, pendingStreamObjects.length);
|
||||
batch.forEach(obj => {
|
||||
loadedObjectCount++;
|
||||
allObjects.push(obj);
|
||||
});
|
||||
}
|
||||
updateObjectCountBadge();
|
||||
if (loadMoreStatus) {
|
||||
if (streamingComplete) {
|
||||
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()} objects`;
|
||||
} else {
|
||||
const countText = totalObjectCount > 0 ? ` of ${totalObjectCount.toLocaleString()}` : '';
|
||||
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()}${countText} loading...`;
|
||||
}
|
||||
loadMoreStatus.textContent = buildBottomStatusText(streamingComplete);
|
||||
}
|
||||
if (objectsLoadingRow && objectsLoadingRow.parentNode) {
|
||||
const loadingText = objectsLoadingRow.querySelector('p');
|
||||
@@ -585,8 +606,9 @@
|
||||
loadedObjectCount = 0;
|
||||
totalObjectCount = 0;
|
||||
allObjects = [];
|
||||
streamFolders = [];
|
||||
memoizedVisibleItems = null;
|
||||
memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
|
||||
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
pendingStreamObjects = [];
|
||||
lastStreamRenderTime = 0;
|
||||
|
||||
@@ -595,6 +617,7 @@
|
||||
try {
|
||||
const params = new URLSearchParams();
|
||||
if (currentPrefix) params.set('prefix', currentPrefix);
|
||||
if (useDelimiterMode) params.set('delimiter', '/');
|
||||
|
||||
const response = await fetch(`${objectsStreamUrl}?${params}`, {
|
||||
signal: streamAbortController.signal
|
||||
@@ -639,6 +662,10 @@
|
||||
if (loadingText) loadingText.textContent = `Loading 0 of ${totalObjectCount.toLocaleString()} objects...`;
|
||||
}
|
||||
break;
|
||||
case 'folder':
|
||||
streamFolders.push(msg.prefix);
|
||||
scheduleStreamRender();
|
||||
break;
|
||||
case 'object':
|
||||
pendingStreamObjects.push(processStreamObject(msg));
|
||||
if (pendingStreamObjects.length >= STREAM_RENDER_BATCH) {
|
||||
@@ -682,7 +709,7 @@
|
||||
}
|
||||
|
||||
if (loadMoreStatus) {
|
||||
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()} objects`;
|
||||
loadMoreStatus.textContent = buildBottomStatusText(true);
|
||||
}
|
||||
refreshVirtualList();
|
||||
renderBreadcrumb(currentPrefix);
|
||||
@@ -710,8 +737,9 @@
|
||||
loadedObjectCount = 0;
|
||||
totalObjectCount = 0;
|
||||
allObjects = [];
|
||||
streamFolders = [];
|
||||
memoizedVisibleItems = null;
|
||||
memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
|
||||
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
}
|
||||
|
||||
if (append && loadMoreSpinner) {
|
||||
@@ -913,7 +941,7 @@
|
||||
});
|
||||
}
|
||||
|
||||
const hasFolders = () => allObjects.some(obj => obj.key.includes('/'));
|
||||
const hasFolders = () => streamFolders.length > 0 || allObjects.some(obj => obj.key.includes('/'));
|
||||
|
||||
const getFoldersAtPrefix = (prefix) => {
|
||||
const folders = new Set();
|
||||
@@ -940,6 +968,9 @@
|
||||
};
|
||||
|
||||
const countObjectsInFolder = (folderPrefix) => {
|
||||
if (useDelimiterMode) {
|
||||
return { count: 0, mayHaveMore: true };
|
||||
}
|
||||
const count = allObjects.filter(obj => obj.key.startsWith(folderPrefix)).length;
|
||||
return { count, mayHaveMore: hasMoreObjects };
|
||||
};
|
||||
@@ -1018,7 +1049,13 @@
|
||||
const createFolderRow = (folderPath, displayName = null) => {
|
||||
const folderName = displayName || folderPath.slice(currentPrefix.length).replace(/\/$/, '');
|
||||
const { count: objectCount, mayHaveMore } = countObjectsInFolder(folderPath);
|
||||
const countDisplay = mayHaveMore ? `${objectCount}+` : objectCount;
|
||||
let countLine = '';
|
||||
if (useDelimiterMode) {
|
||||
countLine = '';
|
||||
} else {
|
||||
const countDisplay = mayHaveMore ? `${objectCount}+` : objectCount;
|
||||
countLine = `<div class="text-muted small ms-4 ps-2">${countDisplay} object${objectCount !== 1 ? 's' : ''}</div>`;
|
||||
}
|
||||
|
||||
const tr = document.createElement('tr');
|
||||
tr.className = 'folder-row';
|
||||
@@ -1036,7 +1073,7 @@
|
||||
</svg>
|
||||
<span>${escapeHtml(folderName)}/</span>
|
||||
</div>
|
||||
<div class="text-muted small ms-4 ps-2">${countDisplay} object${objectCount !== 1 ? 's' : ''}</div>
|
||||
${countLine}
|
||||
</td>
|
||||
<td class="text-end text-nowrap">
|
||||
<span class="text-muted small">—</span>
|
||||
@@ -1537,7 +1574,7 @@
|
||||
|
||||
const confirmVersionRestore = (row, version, label = null, onConfirm) => {
|
||||
if (!version) return;
|
||||
const timestamp = version.archived_at ? new Date(version.archived_at).toLocaleString() : version.version_id;
|
||||
const timestamp = (version.archived_at || version.last_modified) ? new Date(version.archived_at || version.last_modified).toLocaleString() : version.version_id;
|
||||
const sizeLabel = formatBytes(Number(version.size) || 0);
|
||||
const reasonLabel = describeVersionReason(version.reason);
|
||||
const targetLabel = label || row?.dataset.key || 'this object';
|
||||
@@ -1610,7 +1647,7 @@
|
||||
|
||||
const latestCell = document.createElement('td');
|
||||
if (item.latest) {
|
||||
const ts = item.latest.archived_at ? new Date(item.latest.archived_at).toLocaleString() : item.latest.version_id;
|
||||
const ts = (item.latest.archived_at || item.latest.last_modified) ? new Date(item.latest.archived_at || item.latest.last_modified).toLocaleString() : item.latest.version_id;
|
||||
const sizeLabel = formatBytes(Number(item.latest.size) || 0);
|
||||
latestCell.innerHTML = `<div class="small">${ts}</div><div class="text-muted small">${sizeLabel} · ${describeVersionReason(item.latest.reason)}</div>`;
|
||||
} else {
|
||||
@@ -1737,6 +1774,15 @@
|
||||
loadArchivedObjects();
|
||||
}
|
||||
|
||||
const propertiesTab = document.getElementById('properties-tab');
|
||||
if (propertiesTab) {
|
||||
propertiesTab.addEventListener('shown.bs.tab', () => {
|
||||
if (archivedCard && archivedEndpoint) {
|
||||
loadArchivedObjects();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function restoreVersion(row, version) {
|
||||
if (!row || !version?.version_id) return;
|
||||
const template = row.dataset.restoreTemplate;
|
||||
@@ -1785,7 +1831,7 @@
|
||||
badge.textContent = `#${versionNumber}`;
|
||||
const title = document.createElement('div');
|
||||
title.className = 'fw-semibold small';
|
||||
const timestamp = entry.archived_at ? new Date(entry.archived_at).toLocaleString() : entry.version_id;
|
||||
const timestamp = (entry.archived_at || entry.last_modified) ? new Date(entry.archived_at || entry.last_modified).toLocaleString() : entry.version_id;
|
||||
title.textContent = timestamp;
|
||||
heading.appendChild(badge);
|
||||
heading.appendChild(title);
|
||||
@@ -2044,8 +2090,63 @@
|
||||
}
|
||||
};
|
||||
|
||||
let searchDebounceTimer = null;
|
||||
let searchAbortController = null;
|
||||
let searchResults = null;
|
||||
|
||||
const performServerSearch = async (term) => {
|
||||
if (searchAbortController) searchAbortController.abort();
|
||||
searchAbortController = new AbortController();
|
||||
|
||||
try {
|
||||
const params = new URLSearchParams({ q: term, limit: '500' });
|
||||
if (currentPrefix) params.set('prefix', currentPrefix);
|
||||
const searchUrl = objectsStreamUrl.replace('/stream', '/search');
|
||||
const response = await fetch(`${searchUrl}?${params}`, {
|
||||
signal: searchAbortController.signal
|
||||
});
|
||||
if (!response.ok) throw new Error(`HTTP ${response.status}`);
|
||||
const data = await response.json();
|
||||
searchResults = (data.results || []).map(obj => processStreamObject(obj));
|
||||
memoizedVisibleItems = null;
|
||||
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
refreshVirtualList();
|
||||
if (loadMoreStatus) {
|
||||
const countText = searchResults.length.toLocaleString();
|
||||
const truncated = data.truncated ? '+' : '';
|
||||
loadMoreStatus.textContent = `${countText}${truncated} result${searchResults.length !== 1 ? 's' : ''}`;
|
||||
}
|
||||
} catch (e) {
|
||||
if (e.name === 'AbortError') return;
|
||||
if (loadMoreStatus) {
|
||||
loadMoreStatus.textContent = 'Search failed';
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
document.getElementById('object-search')?.addEventListener('input', (event) => {
|
||||
currentFilterTerm = event.target.value.toLowerCase();
|
||||
const newTerm = event.target.value.toLowerCase();
|
||||
const wasFiltering = currentFilterTerm.length > 0;
|
||||
const isFiltering = newTerm.length > 0;
|
||||
currentFilterTerm = newTerm;
|
||||
|
||||
clearTimeout(searchDebounceTimer);
|
||||
|
||||
if (isFiltering) {
|
||||
searchDebounceTimer = setTimeout(() => performServerSearch(newTerm), 300);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!isFiltering && wasFiltering) {
|
||||
if (searchAbortController) searchAbortController.abort();
|
||||
searchResults = null;
|
||||
memoizedVisibleItems = null;
|
||||
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
if (loadMoreStatus) {
|
||||
loadMoreStatus.textContent = buildBottomStatusText(streamingComplete);
|
||||
}
|
||||
}
|
||||
|
||||
updateFilterWarning();
|
||||
refreshVirtualList();
|
||||
});
|
||||
@@ -2086,7 +2187,18 @@
|
||||
var searchInput = document.getElementById('object-search');
|
||||
if (searchInput && document.activeElement === searchInput) {
|
||||
searchInput.value = '';
|
||||
const wasFiltering = currentFilterTerm.length > 0;
|
||||
currentFilterTerm = '';
|
||||
if (wasFiltering) {
|
||||
clearTimeout(searchDebounceTimer);
|
||||
if (searchAbortController) searchAbortController.abort();
|
||||
searchResults = null;
|
||||
memoizedVisibleItems = null;
|
||||
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
|
||||
if (loadMoreStatus) {
|
||||
loadMoreStatus.textContent = buildBottomStatusText(streamingComplete);
|
||||
}
|
||||
}
|
||||
refreshVirtualList();
|
||||
searchInput.blur();
|
||||
}
|
||||
@@ -2816,7 +2928,16 @@
|
||||
uploadFileInput.value = '';
|
||||
}
|
||||
|
||||
loadObjects(false);
|
||||
const previousKey = activeRow?.dataset.key || null;
|
||||
loadObjects(false).then(() => {
|
||||
if (previousKey) {
|
||||
const newRow = document.querySelector(`[data-object-row][data-key="${CSS.escape(previousKey)}"]`);
|
||||
if (newRow) {
|
||||
selectRow(newRow);
|
||||
if (versioningEnabled) loadObjectVersions(newRow, { force: true });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const successCount = uploadSuccessFiles.length;
|
||||
const errorCount = uploadErrorFiles.length;
|
||||
@@ -4154,6 +4275,47 @@
|
||||
var archivedCardEl = document.getElementById('archived-objects-card');
|
||||
if (archivedCardEl) {
|
||||
archivedCardEl.style.display = enabled ? '' : 'none';
|
||||
} else if (enabled) {
|
||||
var endpoint = window.BucketDetailConfig?.endpoints?.archivedObjects || '';
|
||||
if (endpoint) {
|
||||
var html = '<div class="card shadow-sm mt-4" id="archived-objects-card" data-archived-endpoint="' + endpoint + '">' +
|
||||
'<div class="card-header d-flex justify-content-between align-items-center flex-wrap gap-2">' +
|
||||
'<div class="d-flex align-items-center">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-warning me-2" viewBox="0 0 16 16">' +
|
||||
'<path d="M0 2a1 1 0 0 1 1-1h14a1 1 0 0 1 1 1v2a1 1 0 0 1-1 1v7.5a2.5 2.5 0 0 1-2.5 2.5h-9A2.5 2.5 0 0 1 1 12.5V5a1 1 0 0 1-1-1V2zm2 3v7.5A1.5 1.5 0 0 0 3.5 14h9a1.5 1.5 0 0 0 1.5-1.5V5H2zm13-3H1v2h14V2zM5 7.5a.5.5 0 0 1 .5-.5h5a.5.5 0 0 1 0 1h-5a.5.5 0 0 1-.5-.5z"/>' +
|
||||
'</svg><span class="fw-semibold">Archived Objects</span></div>' +
|
||||
'<div class="d-flex align-items-center gap-2">' +
|
||||
'<span class="badge text-bg-secondary" data-archived-count>0 items</span>' +
|
||||
'<button class="btn btn-outline-secondary btn-sm" type="button" data-archived-refresh>' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>' +
|
||||
'<path d="M8 4.466V.534a.25.25 0 0 0-.41-.192L5.23 2.308a.25.25 0 0 0 0 .384l2.36 1.966A.25.25 0 0 0 8 4.466z"/>' +
|
||||
'</svg>Refresh</button></div></div>' +
|
||||
'<div class="card-body">' +
|
||||
'<p class="text-muted small mb-3">Objects that have been deleted while versioning is enabled. Their previous versions remain available until you restore or purge them.</p>' +
|
||||
'<div class="table-responsive"><table class="table table-sm table-hover align-middle mb-0">' +
|
||||
'<thead class="table-light"><tr>' +
|
||||
'<th scope="col"><svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 text-muted" viewBox="0 0 16 16">' +
|
||||
'<path d="M4 0h5.293A1 1 0 0 1 10 .293L13.707 4a1 1 0 0 1 .293.707V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2zm5.5 1.5v2a1 1 0 0 0 1 1h2l-3-3z"/>' +
|
||||
'</svg>Key</th>' +
|
||||
'<th scope="col">Latest Version</th>' +
|
||||
'<th scope="col" class="text-center">Versions</th>' +
|
||||
'<th scope="col" class="text-end">Actions</th>' +
|
||||
'</tr></thead>' +
|
||||
'<tbody data-archived-body><tr><td colspan="4" class="text-center text-muted py-4">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="mb-2 d-block mx-auto" viewBox="0 0 16 16">' +
|
||||
'<path d="M0 2a1 1 0 0 1 1-1h14a1 1 0 0 1 1 1v2a1 1 0 0 1-1 1v7.5a2.5 2.5 0 0 1-2.5 2.5h-9A2.5 2.5 0 0 1 1 12.5V5a1 1 0 0 1-1-1V2zm2 3v7.5A1.5 1.5 0 0 0 3.5 14h9a1.5 1.5 0 0 0 1.5-1.5V5H2zm13-3H1v2h14V2zM5 7.5a.5.5 0 0 1 .5-.5h5a.5.5 0 0 1 0 1h-5a.5.5 0 0 1-.5-.5z"/>' +
|
||||
'</svg>No archived objects</td></tr></tbody>' +
|
||||
'</table></div></div></div>';
|
||||
card.insertAdjacentHTML('afterend', html);
|
||||
archivedCard = document.getElementById('archived-objects-card');
|
||||
archivedBody = archivedCard.querySelector('[data-archived-body]');
|
||||
archivedCountBadge = archivedCard.querySelector('[data-archived-count]');
|
||||
archivedRefreshButton = archivedCard.querySelector('[data-archived-refresh]');
|
||||
archivedEndpoint = endpoint;
|
||||
archivedRefreshButton.addEventListener('click', function() { loadArchivedObjects(); });
|
||||
loadArchivedObjects();
|
||||
}
|
||||
}
|
||||
|
||||
var dropZone = document.getElementById('objects-drop-zone');
|
||||
@@ -4161,6 +4323,15 @@
|
||||
dropZone.setAttribute('data-versioning', enabled ? 'true' : 'false');
|
||||
}
|
||||
|
||||
var bulkPurgeWrap = document.getElementById('bulkDeletePurgeWrap');
|
||||
if (bulkPurgeWrap) {
|
||||
bulkPurgeWrap.classList.toggle('d-none', !enabled);
|
||||
}
|
||||
var singleDeleteVerWrap = document.getElementById('deleteObjectVersioningWrap');
|
||||
if (singleDeleteVerWrap) {
|
||||
singleDeleteVerWrap.classList.toggle('d-none', !enabled);
|
||||
}
|
||||
|
||||
if (!enabled) {
|
||||
var newForm = document.getElementById('enableVersioningForm');
|
||||
if (newForm) {
|
||||
|
||||
@@ -171,6 +171,7 @@
|
||||
data-bulk-download-endpoint="{{ url_for('ui.bulk_download_objects', bucket_name=bucket_name) }}"
|
||||
data-folders-url="{{ folders_url }}"
|
||||
data-buckets-for-copy-url="{{ buckets_for_copy_url }}"
|
||||
data-bucket-total-objects="{{ bucket_stats.get('objects', 0) }}"
|
||||
>
|
||||
<table class="table table-hover align-middle mb-0" id="objects-table" style="table-layout: fixed;">
|
||||
<thead class="table-light">
|
||||
@@ -2272,13 +2273,11 @@
|
||||
</div>
|
||||
<ul class="list-group mb-3" id="bulkDeleteList" style="max-height: 200px; overflow-y: auto;"></ul>
|
||||
<div class="text-muted small" id="bulkDeleteStatus"></div>
|
||||
{% if versioning_enabled %}
|
||||
<div class="form-check mt-3 p-3 bg-body-tertiary rounded-3">
|
||||
<div class="form-check mt-3 p-3 bg-body-tertiary rounded-3 {% if not versioning_enabled %}d-none{% endif %}" id="bulkDeletePurgeWrap">
|
||||
<input class="form-check-input" type="checkbox" id="bulkDeletePurge" />
|
||||
<label class="form-check-label" for="bulkDeletePurge">Also delete archived versions</label>
|
||||
<div class="form-text">Removes any archived versions stored in the archive.</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
@@ -2316,7 +2315,7 @@
|
||||
<div class="p-3 bg-body-tertiary rounded-3 mb-3">
|
||||
<code id="deleteObjectKey" class="d-block text-break"></code>
|
||||
</div>
|
||||
{% if versioning_enabled %}
|
||||
<div id="deleteObjectVersioningWrap" class="{% if not versioning_enabled %}d-none{% endif %}">
|
||||
<div class="alert alert-warning d-flex align-items-start small mb-3" role="alert">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="flex-shrink-0 me-2 mt-0" viewBox="0 0 16 16">
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||
@@ -2328,7 +2327,7 @@
|
||||
<label class="form-check-label" for="deletePurgeVersions">Also delete all archived versions</label>
|
||||
<div class="form-text mb-0">Removes the live object and every stored version.</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
@@ -2771,7 +2770,8 @@
|
||||
window.BucketDetailConfig = {
|
||||
endpoints: {
|
||||
versioning: "{{ url_for('ui.update_bucket_versioning', bucket_name=bucket_name) }}",
|
||||
bucketsOverview: "{{ url_for('ui.buckets_overview') }}"
|
||||
bucketsOverview: "{{ url_for('ui.buckets_overview') }}",
|
||||
archivedObjects: "{{ url_for('ui.archived_objects', bucket_name=bucket_name) }}"
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -52,6 +52,11 @@
|
||||
<li><a href="#acls">Access Control Lists</a></li>
|
||||
<li><a href="#tagging">Object & Bucket Tagging</a></li>
|
||||
<li><a href="#website-hosting">Static Website Hosting</a></li>
|
||||
<li><a href="#cors-config">CORS Configuration</a></li>
|
||||
<li><a href="#post-object">PostObject (Form Upload)</a></li>
|
||||
<li><a href="#list-objects-v2">List Objects API v2</a></li>
|
||||
<li><a href="#upgrading">Upgrading & Updates</a></li>
|
||||
<li><a href="#api-matrix">Full API Reference</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
@@ -126,6 +131,11 @@ python run.py --mode ui
|
||||
<td><code>5000</code></td>
|
||||
<td>Listen port (UI uses 5100).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>DISPLAY_TIMEZONE</code></td>
|
||||
<td><code>UTC</code></td>
|
||||
<td>Timezone for UI timestamps (e.g., <code>US/Eastern</code>, <code>Asia/Tokyo</code>).</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">CORS Settings</td>
|
||||
</tr>
|
||||
@@ -187,6 +197,11 @@ python run.py --mode ui
|
||||
<td><code>100 per minute</code></td>
|
||||
<td>Rate limit for HEAD requests.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>RATE_LIMIT_ADMIN</code></td>
|
||||
<td><code>60 per minute</code></td>
|
||||
<td>Rate limit for admin API endpoints (<code>/admin/*</code>).</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Server Settings</td>
|
||||
</tr>
|
||||
@@ -338,6 +353,24 @@ python run.py --mode ui
|
||||
<td><code>604800</code></td>
|
||||
<td>Maximum presigned URL expiry time (7 days).</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Proxy & Network Settings</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>NUM_TRUSTED_PROXIES</code></td>
|
||||
<td><code>1</code></td>
|
||||
<td>Number of trusted reverse proxies for <code>X-Forwarded-*</code> headers.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>ALLOWED_REDIRECT_HOSTS</code></td>
|
||||
<td>(empty)</td>
|
||||
<td>Comma-separated whitelist of safe redirect targets.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>ALLOW_INTERNAL_ENDPOINTS</code></td>
|
||||
<td><code>false</code></td>
|
||||
<td>Allow connections to internal/private IPs (webhooks, replication).</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Storage Limits</td>
|
||||
</tr>
|
||||
@@ -366,6 +399,16 @@ python run.py --mode ui
|
||||
<td><code>50</code></td>
|
||||
<td>Max lifecycle history records per bucket.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>OBJECT_CACHE_TTL</code></td>
|
||||
<td><code>60</code></td>
|
||||
<td>Seconds to cache object metadata.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>BULK_DOWNLOAD_MAX_BYTES</code></td>
|
||||
<td><code>1 GB</code></td>
|
||||
<td>Max total size for bulk ZIP downloads.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>ENCRYPTION_CHUNK_SIZE_BYTES</code></td>
|
||||
<td><code>65536</code></td>
|
||||
@@ -491,7 +534,7 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
|
||||
<ul>
|
||||
<li>Navigate folder hierarchies using breadcrumbs. Objects with <code>/</code> in keys display as folders.</li>
|
||||
<li>Infinite scroll loads more objects automatically. Choose batch size (50–250) from the footer dropdown.</li>
|
||||
<li>Bulk select objects for multi-delete or multi-download. Filter by name using the search box.</li>
|
||||
<li>Bulk select objects for multi-delete or multi-download (ZIP archive, up to 1 GiB). Filter by name using the search box.</li>
|
||||
<li>If loading fails, click <strong>Retry</strong> to attempt again—no page refresh needed.</li>
|
||||
</ul>
|
||||
</div>
|
||||
@@ -613,15 +656,75 @@ curl -X PUT {{ api_base }}/demo/notes.txt \
|
||||
<td><code>/<bucket>/<key></code></td>
|
||||
<td>Delete an object.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>HEAD</td>
|
||||
<td><code>/<bucket></code></td>
|
||||
<td>Check if a bucket exists.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>HEAD</td>
|
||||
<td><code>/<bucket>/<key></code></td>
|
||||
<td>Get object metadata without downloading.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>POST</td>
|
||||
<td><code>/<bucket>?delete</code></td>
|
||||
<td>Bulk delete objects (XML body).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>?policy</code></td>
|
||||
<td>Fetch, upsert, or remove a bucket policy (S3-compatible).</td>
|
||||
<td>Bucket policy management.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT</td>
|
||||
<td><code>/<bucket>?versioning</code></td>
|
||||
<td>Versioning status.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>?lifecycle</code></td>
|
||||
<td>Lifecycle rules.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>?cors</code></td>
|
||||
<td>CORS configuration.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>?encryption</code></td>
|
||||
<td>Default encryption.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT</td>
|
||||
<td><code>/<bucket>?acl</code></td>
|
||||
<td>Bucket ACL.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>?tagging</code></td>
|
||||
<td>Bucket tags.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/<bucket>/<key>?tagging</code></td>
|
||||
<td>Object tags.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>POST</td>
|
||||
<td><code>/<bucket>/<key>?uploads</code></td>
|
||||
<td>Initiate multipart upload.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>POST</td>
|
||||
<td><code>/<bucket>/<key>?select</code></td>
|
||||
<td>SQL query (SelectObjectContent).</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<p class="small text-muted mt-3 mb-0">All responses include <code>X-Request-Id</code> for tracing. Logs land in <code>logs/api.log</code> and <code>logs/ui.log</code>.</p>
|
||||
<p class="small text-muted mt-3 mb-0">All responses include <code>X-Request-Id</code> for tracing. See the <a href="#api-matrix">Full API Reference</a> for the complete endpoint list. Logs land in <code>logs/api.log</code> and <code>logs/ui.log</code>.</p>
|
||||
</div>
|
||||
</article>
|
||||
<article id="examples" class="card shadow-sm docs-section">
|
||||
@@ -1311,6 +1414,10 @@ curl -X PUT "{{ api_base }}/bucket/<bucket>?quota" \
|
||||
<td><strong>KMS (SSE-KMS)</strong></td>
|
||||
<td>Encryption using customer-managed keys via the built-in KMS</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><strong>SSE-C</strong></td>
|
||||
<td>Server-side encryption with customer-provided keys (per-request)</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
@@ -1377,6 +1484,54 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
|
||||
<p class="small text-muted mb-0">
|
||||
<strong>Envelope Encryption:</strong> Each object is encrypted with a unique Data Encryption Key (DEK). The DEK is then encrypted (wrapped) by the master key or KMS key and stored alongside the ciphertext. On read, the DEK is unwrapped and used to decrypt the object transparently.
|
||||
</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">SSE-C (Customer-Provided Keys)</h3>
|
||||
<p class="small text-muted">With SSE-C, you supply your own 256-bit AES key with each request. The server encrypts/decrypts using your key but never stores it. You must provide the same key for both upload and download.</p>
|
||||
<div class="table-responsive mb-3">
|
||||
<table class="table table-sm table-bordered small">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Header</th>
|
||||
<th>Value</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><code>x-amz-server-side-encryption-customer-algorithm</code></td>
|
||||
<td><code>AES256</code></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>x-amz-server-side-encryption-customer-key</code></td>
|
||||
<td>Base64-encoded 256-bit key</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>x-amz-server-side-encryption-customer-key-MD5</code></td>
|
||||
<td>Base64-encoded MD5 of the key</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<pre class="mb-3"><code class="language-bash"># Generate a 256-bit key
|
||||
KEY=$(openssl rand -base64 32)
|
||||
KEY_MD5=$(echo -n "$KEY" | base64 -d | openssl dgst -md5 -binary | base64)
|
||||
|
||||
# Upload with SSE-C
|
||||
curl -X PUT "{{ api_base }}/my-bucket/secret.txt" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
|
||||
-H "x-amz-server-side-encryption-customer-key: $KEY" \
|
||||
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5" \
|
||||
--data-binary @secret.txt
|
||||
|
||||
# Download with SSE-C (same key required)
|
||||
curl "{{ api_base }}/my-bucket/secret.txt" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
|
||||
-H "x-amz-server-side-encryption-customer-key: $KEY" \
|
||||
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5"</code></pre>
|
||||
<div class="alert alert-light border mb-0 small">
|
||||
<strong>Note:</strong> SSE-C does not require <code>ENCRYPTION_ENABLED</code> or <code>KMS_ENABLED</code>. If you lose your key, the data is irrecoverable.
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="lifecycle" class="card shadow-sm docs-section">
|
||||
@@ -1926,7 +2081,7 @@ curl -X POST "{{ api_base }}/<bucket>/data.csv?select" \
|
||||
<span class="docs-section-kicker">22</span>
|
||||
<h2 class="h4 mb-0">Advanced S3 Operations</h2>
|
||||
</div>
|
||||
<p class="text-muted">Copy objects, upload part copies, and use range requests for partial downloads.</p>
|
||||
<p class="text-muted">Copy, move, and partially download objects using advanced S3 operations.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">CopyObject</h3>
|
||||
<pre class="mb-3"><code class="language-bash"># Copy within same bucket
|
||||
@@ -1941,6 +2096,13 @@ curl -X PUT "{{ api_base }}/<bucket>/file.txt" \
|
||||
-H "x-amz-metadata-directive: REPLACE" \
|
||||
-H "x-amz-meta-newkey: newvalue"</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">MoveObject (UI)</h3>
|
||||
<p class="small text-muted">Move an object to a different key or bucket via the UI. Performs a copy then deletes the source. Requires <code>read</code>+<code>delete</code> on source and <code>write</code> on destination.</p>
|
||||
<pre class="mb-3"><code class="language-bash"># Move via UI API (session-authenticated)
|
||||
curl -X POST "http://localhost:5100/ui/buckets/<bucket>/objects/<key>/move" \
|
||||
-H "Content-Type: application/json" --cookie "session=..." \
|
||||
-d '{"dest_bucket": "other-bucket", "dest_key": "new-path/file.txt"}'</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">UploadPartCopy</h3>
|
||||
<p class="small text-muted">Copy data from an existing object into a multipart upload part:</p>
|
||||
<pre class="mb-3"><code class="language-bash"># Copy bytes 0-10485759 from source as part 1
|
||||
@@ -2193,6 +2355,274 @@ server {
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="cors-config" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">26</span>
|
||||
<h2 class="h4 mb-0">CORS Configuration</h2>
|
||||
</div>
|
||||
<p class="text-muted">Configure per-bucket Cross-Origin Resource Sharing rules to control which origins can access your bucket from a browser.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Setting CORS Rules</h3>
|
||||
<pre class="mb-3"><code class="language-bash"># Set CORS configuration
|
||||
curl -X PUT "{{ api_base }}/<bucket>?cors" \
|
||||
-H "Content-Type: application/xml" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-d '<CORSConfiguration>
|
||||
<CORSRule>
|
||||
<AllowedOrigin>https://example.com</AllowedOrigin>
|
||||
<AllowedMethod>GET</AllowedMethod>
|
||||
<AllowedMethod>PUT</AllowedMethod>
|
||||
<AllowedHeader>*</AllowedHeader>
|
||||
<ExposeHeader>ETag</ExposeHeader>
|
||||
<MaxAgeSeconds>3600</MaxAgeSeconds>
|
||||
</CORSRule>
|
||||
</CORSConfiguration>'
|
||||
|
||||
# Get CORS configuration
|
||||
curl "{{ api_base }}/<bucket>?cors" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||
|
||||
# Delete CORS configuration
|
||||
curl -X DELETE "{{ api_base }}/<bucket>?cors" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Rule Fields</h3>
|
||||
<div class="table-responsive mb-0">
|
||||
<table class="table table-sm table-bordered small mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Field</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><code>AllowedOrigin</code></td>
|
||||
<td>Origins allowed to make requests (supports <code>*</code> wildcard)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>AllowedMethod</code></td>
|
||||
<td>HTTP methods: <code>GET</code>, <code>PUT</code>, <code>POST</code>, <code>DELETE</code>, <code>HEAD</code></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>AllowedHeader</code></td>
|
||||
<td>Request headers allowed in preflight (supports <code>*</code>)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>ExposeHeader</code></td>
|
||||
<td>Response headers visible to the browser (e.g., <code>ETag</code>, <code>x-amz-request-id</code>)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>MaxAgeSeconds</code></td>
|
||||
<td>How long the browser caches preflight results</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="post-object" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">27</span>
|
||||
<h2 class="h4 mb-0">PostObject (HTML Form Upload)</h2>
|
||||
</div>
|
||||
<p class="text-muted">Upload objects directly from an HTML form using browser-based POST uploads with policy-based authorization.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Form Fields</h3>
|
||||
<div class="table-responsive mb-3">
|
||||
<table class="table table-sm table-bordered small">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Field</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td><code>key</code></td><td>Object key (supports <code>${filename}</code> variable)</td></tr>
|
||||
<tr><td><code>file</code></td><td>The file to upload</td></tr>
|
||||
<tr><td><code>policy</code></td><td>Base64-encoded policy document (JSON)</td></tr>
|
||||
<tr><td><code>x-amz-signature</code></td><td>HMAC-SHA256 signature of the policy</td></tr>
|
||||
<tr><td><code>x-amz-credential</code></td><td>Access key / date / region / s3 / aws4_request</td></tr>
|
||||
<tr><td><code>x-amz-algorithm</code></td><td><code>AWS4-HMAC-SHA256</code></td></tr>
|
||||
<tr><td><code>x-amz-date</code></td><td>ISO 8601 date (e.g., <code>20250101T000000Z</code>)</td></tr>
|
||||
<tr><td><code>Content-Type</code></td><td>MIME type of the uploaded file</td></tr>
|
||||
<tr><td><code>x-amz-meta-*</code></td><td>Custom metadata headers</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Simple Upload (No Signing)</h3>
|
||||
<pre class="mb-3"><code class="language-html"><form action="{{ api_base }}/my-bucket" method="POST" enctype="multipart/form-data">
|
||||
<input type="hidden" name="key" value="uploads/${filename}">
|
||||
<input type="file" name="file">
|
||||
<button type="submit">Upload</button>
|
||||
</form></code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Signed Upload (With Policy)</h3>
|
||||
<p class="small text-muted mb-0">For authenticated uploads, include a base64-encoded policy and SigV4 signature fields. The policy constrains allowed keys, content types, and size limits. See docs.md Section 20 for full signing examples.</p>
|
||||
</div>
|
||||
</article>
|
||||
<article id="list-objects-v2" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">28</span>
|
||||
<h2 class="h4 mb-0">List Objects API v2</h2>
|
||||
</div>
|
||||
<p class="text-muted">Use the v2 list API for improved pagination with continuation tokens instead of markers.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Usage</h3>
|
||||
<pre class="mb-3"><code class="language-bash"># List with v2 API
|
||||
curl "{{ api_base }}/<bucket>?list-type=2&prefix=logs/&delimiter=/&max-keys=100" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||
|
||||
# Paginate with continuation token
|
||||
curl "{{ api_base }}/<bucket>?list-type=2&continuation-token=<token>" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||
|
||||
# Start listing after a specific key
|
||||
curl "{{ api_base }}/<bucket>?list-type=2&start-after=photos/2025/" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Query Parameters</h3>
|
||||
<div class="table-responsive mb-0">
|
||||
<table class="table table-sm table-bordered small mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Parameter</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td><code>list-type=2</code></td><td>Enables v2 API (required)</td></tr>
|
||||
<tr><td><code>prefix</code></td><td>Filter to keys starting with this prefix</td></tr>
|
||||
<tr><td><code>delimiter</code></td><td>Group keys by delimiter (typically <code>/</code> for folders)</td></tr>
|
||||
<tr><td><code>max-keys</code></td><td>Maximum objects to return (default 1000)</td></tr>
|
||||
<tr><td><code>continuation-token</code></td><td>Token from previous response for pagination</td></tr>
|
||||
<tr><td><code>start-after</code></td><td>Start listing after this key (first page only)</td></tr>
|
||||
<tr><td><code>fetch-owner</code></td><td>Include owner info in response</td></tr>
|
||||
<tr><td><code>encoding-type</code></td><td>Set to <code>url</code> to URL-encode keys in response</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="upgrading" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">29</span>
|
||||
<h2 class="h4 mb-0">Upgrading & Updates</h2>
|
||||
</div>
|
||||
<p class="text-muted">How to safely update MyFSIO to a new version.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Pre-Update Backup</h3>
|
||||
<p class="small text-muted">Always back up before updating:</p>
|
||||
<pre class="mb-3"><code class="language-bash"># Back up configuration
|
||||
cp -r data/.myfsio.sys/config/ config-backup/
|
||||
|
||||
# Back up data (optional, for critical deployments)
|
||||
tar czf myfsio-backup-$(date +%Y%m%d).tar.gz data/
|
||||
|
||||
# Back up logs
|
||||
cp -r logs/ logs-backup/</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Update Procedure</h3>
|
||||
<ol class="docs-steps mb-3">
|
||||
<li><strong>Stop the service:</strong> <code>sudo systemctl stop myfsio</code> (or kill the process)</li>
|
||||
<li><strong>Pull new version:</strong> <code>git pull origin main</code> or download the new binary</li>
|
||||
<li><strong>Install dependencies:</strong> <code>pip install -r requirements.txt</code></li>
|
||||
<li><strong>Validate config:</strong> <code>python run.py --check-config</code></li>
|
||||
<li><strong>Start the service:</strong> <code>sudo systemctl start myfsio</code></li>
|
||||
<li><strong>Verify:</strong> <code>curl http://localhost:5000/myfsio/health</code></li>
|
||||
</ol>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Rollback</h3>
|
||||
<p class="small text-muted mb-0">If something goes wrong, stop the service, restore the backed-up config and data directories, then restart with the previous binary or code version. See <code>docs.md</code> Section 4 for detailed rollback procedures including blue-green deployment strategies.</p>
|
||||
</div>
|
||||
</article>
|
||||
<article id="api-matrix" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">30</span>
|
||||
<h2 class="h4 mb-0">Full API Reference</h2>
|
||||
</div>
|
||||
<p class="text-muted">Complete list of all S3-compatible, admin, and KMS endpoints.</p>
|
||||
<pre class="mb-0"><code class="language-text"># Service
|
||||
GET /myfsio/health # Health check
|
||||
|
||||
# Bucket Operations
|
||||
GET / # List buckets
|
||||
PUT /<bucket> # Create bucket
|
||||
DELETE /<bucket> # Delete bucket
|
||||
GET /<bucket> # List objects (?list-type=2)
|
||||
HEAD /<bucket> # Check bucket exists
|
||||
POST /<bucket> # POST object / form upload
|
||||
POST /<bucket>?delete # Bulk delete
|
||||
|
||||
# Bucket Configuration
|
||||
GET|PUT|DELETE /<bucket>?policy # Bucket policy
|
||||
GET|PUT /<bucket>?quota # Bucket quota
|
||||
GET|PUT /<bucket>?versioning # Versioning
|
||||
GET|PUT|DELETE /<bucket>?lifecycle # Lifecycle rules
|
||||
GET|PUT|DELETE /<bucket>?cors # CORS config
|
||||
GET|PUT|DELETE /<bucket>?encryption # Default encryption
|
||||
GET|PUT /<bucket>?acl # Bucket ACL
|
||||
GET|PUT|DELETE /<bucket>?tagging # Bucket tags
|
||||
GET|PUT|DELETE /<bucket>?replication # Replication rules
|
||||
GET|PUT /<bucket>?logging # Access logging
|
||||
GET|PUT /<bucket>?notification # Event notifications
|
||||
GET|PUT /<bucket>?object-lock # Object lock config
|
||||
GET|PUT|DELETE /<bucket>?website # Static website
|
||||
GET /<bucket>?uploads # List multipart uploads
|
||||
GET /<bucket>?versions # List object versions
|
||||
GET /<bucket>?location # Bucket region
|
||||
|
||||
# Object Operations
|
||||
PUT /<bucket>/<key> # Upload object
|
||||
GET /<bucket>/<key> # Download (Range supported)
|
||||
DELETE /<bucket>/<key> # Delete object
|
||||
HEAD /<bucket>/<key> # Object metadata
|
||||
POST /<bucket>/<key>?select # SQL query (SelectObjectContent)
|
||||
|
||||
# Object Configuration
|
||||
GET|PUT|DELETE /<bucket>/<key>?tagging # Object tags
|
||||
GET|PUT /<bucket>/<key>?acl # Object ACL
|
||||
GET|PUT /<bucket>/<key>?retention # Object retention
|
||||
GET|PUT /<bucket>/<key>?legal-hold # Legal hold
|
||||
|
||||
# Multipart Upload
|
||||
POST /<bucket>/<key>?uploads # Initiate
|
||||
PUT /<bucket>/<key>?uploadId=X&partNumber=N # Upload part
|
||||
POST /<bucket>/<key>?uploadId=X # Complete
|
||||
DELETE /<bucket>/<key>?uploadId=X # Abort
|
||||
GET /<bucket>/<key>?uploadId=X # List parts
|
||||
|
||||
# Copy (via x-amz-copy-source header)
|
||||
PUT /<bucket>/<key> # CopyObject
|
||||
PUT /<bucket>/<key>?uploadId&partNumber # UploadPartCopy
|
||||
|
||||
# Admin API
|
||||
GET|PUT /admin/site # Local site config
|
||||
GET /admin/sites # List peers
|
||||
POST /admin/sites # Register peer
|
||||
GET|PUT|DELETE /admin/sites/<id> # Manage peer
|
||||
GET /admin/sites/<id>/health # Peer health
|
||||
GET /admin/topology # Cluster topology
|
||||
GET|POST|PUT|DELETE /admin/website-domains # Domain mappings
|
||||
|
||||
# KMS API
|
||||
GET|POST /kms/keys # List / Create keys
|
||||
GET|DELETE /kms/keys/<id> # Get / Delete key
|
||||
POST /kms/keys/<id>/enable # Enable key
|
||||
POST /kms/keys/<id>/disable # Disable key
|
||||
POST /kms/keys/<id>/rotate # Rotate key
|
||||
POST /kms/encrypt # Encrypt data
|
||||
POST /kms/decrypt # Decrypt data
|
||||
POST /kms/generate-data-key # Generate data key
|
||||
POST /kms/generate-random # Generate random bytes</code></pre>
|
||||
</div>
|
||||
</article>
|
||||
</div>
|
||||
<div class="col-xl-4 docs-sidebar-col">
|
||||
<aside class="card shadow-sm docs-sidebar">
|
||||
@@ -2224,6 +2654,11 @@ server {
|
||||
<li><a href="#acls">Access Control Lists</a></li>
|
||||
<li><a href="#tagging">Object & Bucket Tagging</a></li>
|
||||
<li><a href="#website-hosting">Static Website Hosting</a></li>
|
||||
<li><a href="#cors-config">CORS Configuration</a></li>
|
||||
<li><a href="#post-object">PostObject (Form Upload)</a></li>
|
||||
<li><a href="#list-objects-v2">List Objects API v2</a></li>
|
||||
<li><a href="#upgrading">Upgrading & Updates</a></li>
|
||||
<li><a href="#api-matrix">Full API Reference</a></li>
|
||||
</ul>
|
||||
<div class="docs-sidebar-callouts">
|
||||
<div>
|
||||
|
||||
Reference in New Issue
Block a user