21 Commits

Author SHA1 Message Date
0462a7b62e MyFSIO v0.3.0 Release
Reviewed-on: #22
2026-02-22 10:22:35 +00:00
52660570c1 Merge pull request 'MyFSIO v0.2.9 Release' (#21) from next into main
Reviewed-on: #21
2026-02-15 14:24:14 +00:00
35f61313e0 MyFSIO v0.2.8 Release
Reviewed-on: #20
2026-02-10 14:16:22 +00:00
c470cfb576 MyFSIO v0.2.7 Release
Reviewed-on: #19
2026-02-09 12:22:37 +00:00
jun
d96955deee MyFSIO v0.2.6 Release
Reviewed-on: #18
2026-02-05 16:18:03 +00:00
85181f0be6 Merge pull request 'MyFSIO v0.2.5 Release' (#17) from next into main
Reviewed-on: #17
2026-02-02 05:32:02 +00:00
d5ca7a8be1 Merge pull request 'MyFSIO v0.2.4 Release' (#16) from next into main
Reviewed-on: #16
2026-02-01 10:27:11 +00:00
476dc79e42 MyFSIO v0.2.3 Release
Reviewed-on: #15
2026-01-25 06:05:53 +00:00
bb6590fc5e Merge pull request 'MyFSIO v0.2.2 Release' (#14) from next into main
Reviewed-on: #14
2026-01-19 07:12:15 +00:00
899db3421b Merge pull request 'MyFSIO v0.2.1 Release' (#13) from next into main
Reviewed-on: #13
2026-01-12 08:03:29 +00:00
caf01d6ada Merge pull request 'MyFSIO v0.2.0 Release' (#12) from next into main
Reviewed-on: #12
2026-01-05 15:48:03 +00:00
bb366cb4cd Merge pull request 'MyFSIO v0.1.9 Release' (#10) from next into main
Reviewed-on: #10
2025-12-29 06:49:48 +00:00
a2745ff2ee Merge pull request 'MyFSIO v0.1.8 Release' (#9) from next into main
Reviewed-on: #9
2025-12-23 06:01:32 +00:00
28cb656d94 Merge pull request 'MyFSIO v0.1.7 Release' (#8) from next into main
Reviewed-on: #8
2025-12-22 03:10:35 +00:00
3c44152fc6 Merge pull request 'MyFSIO v0.1.6 Release' (#7) from next into main
Reviewed-on: #7
2025-12-21 06:30:21 +00:00
397515edce Merge pull request 'MyFSIO v0.1.5 Release' (#6) from next into main
Reviewed-on: #6
2025-12-13 15:41:03 +00:00
980fced7e4 Merge pull request 'MyFSIO v0.1.4 Release' (#5) from next into main
Reviewed-on: #5
2025-12-13 08:22:43 +00:00
bae5009ec4 Merge pull request 'Release v0.1.3' (#4) from next into main
Reviewed-on: #4
2025-12-03 04:14:57 +00:00
233780617f Merge pull request 'Release V0.1.2' (#3) from next into main
Reviewed-on: #3
2025-11-26 04:59:15 +00:00
fd8fb21517 Merge pull request 'Prepare for binary release' (#2) from next into main
Reviewed-on: #2
2025-11-22 12:33:38 +00:00
c6cbe822e1 Merge pull request 'Release v0.1.1' (#1) from next into main
Reviewed-on: #1
2025-11-22 12:31:27 +00:00
14 changed files with 224 additions and 2188 deletions

View File

@@ -115,7 +115,7 @@ def create_app(
storage = ObjectStorage(
Path(app.config["STORAGE_ROOT"]),
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 60),
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 5),
object_cache_max_size=app.config.get("OBJECT_CACHE_MAX_SIZE", 100),
bucket_config_cache_ttl=app.config.get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0),
object_key_max_length_bytes=app.config.get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024),

View File

@@ -241,7 +241,7 @@ class AppConfig:
cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"])
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60))
object_cache_ttl = int(_get("OBJECT_CACHE_TTL", 60))
object_cache_ttl = int(_get("OBJECT_CACHE_TTL", 5))
encryption_enabled = str(_get("ENCRYPTION_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
encryption_keys_dir = storage_root / ".myfsio.sys" / "keys"

View File

@@ -189,13 +189,7 @@ class EncryptedObjectStorage:
def list_objects(self, bucket_name: str, **kwargs):
return self.storage.list_objects(bucket_name, **kwargs)
def list_objects_shallow(self, bucket_name: str, **kwargs):
return self.storage.list_objects_shallow(bucket_name, **kwargs)
def search_objects(self, bucket_name: str, query: str, **kwargs):
return self.storage.search_objects(bucket_name, query, **kwargs)
def list_objects_all(self, bucket_name: str):
return self.storage.list_objects_all(bucket_name)

View File

@@ -293,7 +293,9 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
raise IamError("Required headers not signed")
canonical_uri = _get_canonical_uri(req)
payload_hash = req.headers.get("X-Amz-Content-Sha256") or "UNSIGNED-PAYLOAD"
payload_hash = req.headers.get("X-Amz-Content-Sha256")
if not payload_hash:
payload_hash = hashlib.sha256(req.get_data()).hexdigest()
if _HAS_RUST:
query_params = list(req.args.items(multi=True))
@@ -303,10 +305,16 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
header_values, payload_hash, amz_date, date_stamp, region,
service, secret_key, signature,
):
if current_app.config.get("DEBUG_SIGV4"):
logger.warning("SigV4 signature mismatch for %s %s", req.method, req.path)
raise IamError("SignatureDoesNotMatch")
else:
method = req.method
query_args = sorted(req.args.items(multi=True), key=lambda x: (x[0], x[1]))
query_args = []
for key, value in req.args.items(multi=True):
query_args.append((key, value))
query_args.sort(key=lambda x: (x[0], x[1]))
canonical_query_parts = []
for k, v in query_args:
canonical_query_parts.append(f"{quote(k, safe='-_.~')}={quote(v, safe='-_.~')}")
@@ -331,6 +339,8 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
string_to_sign = f"AWS4-HMAC-SHA256\n{amz_date}\n{credential_scope}\n{hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()}"
calculated_signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
if not hmac.compare_digest(calculated_signature, signature):
if current_app.config.get("DEBUG_SIGV4"):
logger.warning("SigV4 signature mismatch for %s %s", method, req.path)
raise IamError("SignatureDoesNotMatch")
session_token = req.headers.get("X-Amz-Security-Token")
@@ -672,7 +682,7 @@ def _extract_request_metadata() -> Dict[str, str]:
for header, value in request.headers.items():
if header.lower().startswith("x-amz-meta-"):
key = header[11:]
if key and not (key.startswith("__") and key.endswith("__")):
if key:
metadata[key] = value
return metadata
@@ -1029,8 +1039,6 @@ def _apply_object_headers(
response.headers["ETag"] = f'"{etag}"'
response.headers["Accept-Ranges"] = "bytes"
for key, value in (metadata or {}).items():
if key.startswith("__") and key.endswith("__"):
continue
safe_value = _sanitize_header_value(str(value))
response.headers[f"X-Amz-Meta-{key}"] = safe_value
@@ -2459,7 +2467,7 @@ def _post_object(bucket_name: str) -> Response:
for field_name, value in request.form.items():
if field_name.lower().startswith("x-amz-meta-"):
key = field_name[11:]
if key and not (key.startswith("__") and key.endswith("__")):
if key:
metadata[key] = value
try:
meta = storage.put_object(bucket_name, object_key, file.stream, metadata=metadata or None)
@@ -2663,43 +2671,54 @@ def bucket_handler(bucket_name: str) -> Response:
else:
effective_start = marker
fetch_keys = max_keys * 10 if delimiter else max_keys
try:
if delimiter:
shallow_result = storage.list_objects_shallow(
bucket_name,
prefix=prefix,
delimiter=delimiter,
max_keys=max_keys,
continuation_token=effective_start or None,
)
objects = shallow_result.objects
common_prefixes = shallow_result.common_prefixes
is_truncated = shallow_result.is_truncated
next_marker = shallow_result.next_continuation_token or ""
next_continuation_token = ""
if is_truncated and next_marker and list_type == "2":
next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8")
else:
list_result = storage.list_objects(
bucket_name,
max_keys=max_keys,
continuation_token=effective_start or None,
prefix=prefix or None,
)
objects = list_result.objects
common_prefixes = []
is_truncated = list_result.is_truncated
next_marker = ""
next_continuation_token = ""
if is_truncated:
if objects:
next_marker = objects[-1].key
if list_type == "2" and next_marker:
next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8")
list_result = storage.list_objects(
bucket_name,
max_keys=fetch_keys,
continuation_token=effective_start or None,
prefix=prefix or None,
)
objects = list_result.objects
except StorageError as exc:
return _error_response("NoSuchBucket", str(exc), 404)
common_prefixes: list[str] = []
filtered_objects: list = []
if delimiter:
seen_prefixes: set[str] = set()
for obj in objects:
key_after_prefix = obj.key[len(prefix):] if prefix else obj.key
if delimiter in key_after_prefix:
common_prefix = prefix + key_after_prefix.split(delimiter)[0] + delimiter
if common_prefix not in seen_prefixes:
seen_prefixes.add(common_prefix)
common_prefixes.append(common_prefix)
else:
filtered_objects.append(obj)
objects = filtered_objects
common_prefixes = sorted(common_prefixes)
total_items = len(objects) + len(common_prefixes)
is_truncated = total_items > max_keys or list_result.is_truncated
if len(objects) >= max_keys:
objects = objects[:max_keys]
common_prefixes = []
else:
remaining = max_keys - len(objects)
common_prefixes = common_prefixes[:remaining]
next_marker = ""
next_continuation_token = ""
if is_truncated:
if objects:
next_marker = objects[-1].key
elif common_prefixes:
next_marker = common_prefixes[-1].rstrip(delimiter) if delimiter else common_prefixes[-1]
if list_type == "2" and next_marker:
next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8")
if list_type == "2":
root = Element("ListBucketResult")
@@ -3437,8 +3456,8 @@ def _copy_object(dest_bucket: str, dest_key: str, copy_source: str) -> Response:
if validation_error:
return _error_response("InvalidArgument", validation_error, 400)
else:
metadata = {k: v for k, v in source_metadata.items() if not (k.startswith("__") and k.endswith("__"))}
metadata = source_metadata
try:
with source_path.open("rb") as stream:
meta = storage.put_object(

View File

@@ -245,7 +245,6 @@ def stream_objects_ndjson(
url_templates: dict[str, str],
display_tz: str = "UTC",
versioning_enabled: bool = False,
delimiter: Optional[str] = None,
) -> Generator[str, None, None]:
meta_line = json.dumps({
"type": "meta",
@@ -259,20 +258,11 @@ def stream_objects_ndjson(
kwargs: dict[str, Any] = {"Bucket": bucket_name, "MaxKeys": 1000}
if prefix:
kwargs["Prefix"] = prefix
if delimiter:
kwargs["Delimiter"] = delimiter
running_count = 0
try:
paginator = client.get_paginator("list_objects_v2")
for page in paginator.paginate(**kwargs):
for cp in page.get("CommonPrefixes", []):
yield json.dumps({
"type": "folder",
"prefix": cp["Prefix"],
}) + "\n"
page_contents = page.get("Contents", [])
for obj in page_contents:
for obj in page.get("Contents", []):
last_mod = obj["LastModified"]
yield json.dumps({
"type": "object",
@@ -283,8 +273,6 @@ def stream_objects_ndjson(
"last_modified_iso": format_datetime_iso(last_mod, display_tz),
"etag": obj.get("ETag", "").strip('"'),
}) + "\n"
running_count += len(page_contents)
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
except ClientError as exc:
error_msg = exc.response.get("Error", {}).get("Message", "S3 operation failed")
yield json.dumps({"type": "error", "error": error_msg}) + "\n"

View File

@@ -16,7 +16,7 @@ from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import dataclass
from datetime import datetime, timezone
from pathlib import Path, PurePosixPath
from pathlib import Path
from typing import Any, BinaryIO, Dict, Generator, List, Optional
try:
@@ -154,15 +154,6 @@ class ListObjectsResult:
total_count: Optional[int] = None
@dataclass
class ShallowListResult:
"""Result for delimiter-aware directory-level listing."""
objects: List[ObjectMeta]
common_prefixes: List[str]
is_truncated: bool
next_continuation_token: Optional[str]
def _utcnow() -> datetime:
return datetime.now(timezone.utc)
@@ -288,47 +279,25 @@ class ObjectStorage:
version_count = 0
version_bytes = 0
internal = self.INTERNAL_FOLDERS
bucket_str = str(bucket_path)
try:
if _HAS_RUST:
versions_root = str(self._bucket_versions_root(bucket_name))
object_count, total_bytes, version_count, version_bytes = _rc.bucket_stats_scan(
bucket_str, versions_root
)
else:
stack = [bucket_str]
while stack:
current = stack.pop()
try:
with os.scandir(current) as it:
for entry in it:
if current == bucket_str and entry.name in internal:
continue
if entry.is_dir(follow_symlinks=False):
stack.append(entry.path)
elif entry.is_file(follow_symlinks=False):
object_count += 1
total_bytes += entry.stat(follow_symlinks=False).st_size
except PermissionError:
for path in bucket_path.rglob("*"):
if path.is_file():
rel = path.relative_to(bucket_path)
if not rel.parts:
continue
top_folder = rel.parts[0]
if top_folder not in self.INTERNAL_FOLDERS:
stat = path.stat()
object_count += 1
total_bytes += stat.st_size
versions_root = self._bucket_versions_root(bucket_name)
if versions_root.exists():
v_stack = [str(versions_root)]
while v_stack:
v_current = v_stack.pop()
try:
with os.scandir(v_current) as it:
for entry in it:
if entry.is_dir(follow_symlinks=False):
v_stack.append(entry.path)
elif entry.is_file(follow_symlinks=False) and entry.name.endswith(".bin"):
version_count += 1
version_bytes += entry.stat(follow_symlinks=False).st_size
except PermissionError:
continue
versions_root = self._bucket_versions_root(bucket_name)
if versions_root.exists():
for path in versions_root.rglob("*.bin"):
if path.is_file():
stat = path.stat()
version_count += 1
version_bytes += stat.st_size
except OSError:
if cached_stats is not None:
return cached_stats
@@ -408,18 +377,9 @@ class ObjectStorage:
raise StorageError("Bucket contains archived object versions")
if has_multipart:
raise StorageError("Bucket has active multipart uploads")
bucket_id = bucket_path.name
self._remove_tree(bucket_path)
self._remove_tree(self._system_bucket_root(bucket_id))
self._remove_tree(self._multipart_bucket_root(bucket_id))
self._bucket_config_cache.pop(bucket_id, None)
with self._cache_lock:
self._object_cache.pop(bucket_id, None)
self._cache_version.pop(bucket_id, None)
self._sorted_key_cache.pop(bucket_id, None)
stale = [k for k in self._meta_read_cache if k[0] == bucket_id]
for k in stale:
del self._meta_read_cache[k]
self._remove_tree(self._system_bucket_root(bucket_path.name))
self._remove_tree(self._multipart_bucket_root(bucket_path.name))
def list_objects(
self,
@@ -502,317 +462,6 @@ class ObjectStorage:
result = self.list_objects(bucket_name, max_keys=100000)
return result.objects
def list_objects_shallow(
self,
bucket_name: str,
*,
prefix: str = "",
delimiter: str = "/",
max_keys: int = 1000,
continuation_token: Optional[str] = None,
) -> ShallowListResult:
import bisect
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
raise BucketNotFoundError("Bucket does not exist")
bucket_id = bucket_path.name
if delimiter != "/" or (prefix and not prefix.endswith(delimiter)):
return self._shallow_via_full_scan(
bucket_name, prefix=prefix, delimiter=delimiter,
max_keys=max_keys, continuation_token=continuation_token,
)
target_dir = bucket_path
if prefix:
safe_prefix_path = Path(prefix.rstrip("/"))
if ".." in safe_prefix_path.parts:
return ShallowListResult(
objects=[], common_prefixes=[],
is_truncated=False, next_continuation_token=None,
)
target_dir = bucket_path / safe_prefix_path
try:
resolved = target_dir.resolve()
bucket_resolved = bucket_path.resolve()
if not str(resolved).startswith(str(bucket_resolved) + os.sep) and resolved != bucket_resolved:
return ShallowListResult(
objects=[], common_prefixes=[],
is_truncated=False, next_continuation_token=None,
)
except (OSError, ValueError):
return ShallowListResult(
objects=[], common_prefixes=[],
is_truncated=False, next_continuation_token=None,
)
if not target_dir.exists() or not target_dir.is_dir():
return ShallowListResult(
objects=[], common_prefixes=[],
is_truncated=False, next_continuation_token=None,
)
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
meta_cache: Dict[str, str] = {}
if etag_index_path.exists():
try:
with open(etag_index_path, 'r', encoding='utf-8') as f:
meta_cache = json.load(f)
except (OSError, json.JSONDecodeError):
pass
entries_files: list[tuple[str, int, float, Optional[str]]] = []
entries_dirs: list[str] = []
if _HAS_RUST:
try:
raw = _rc.shallow_scan(str(target_dir), prefix, json.dumps(meta_cache))
entries_files = []
for key, size, mtime, etag in raw["files"]:
if etag is None:
safe_key = PurePosixPath(key)
meta = self._read_metadata(bucket_id, Path(safe_key))
etag = meta.get("__etag__") if meta else None
entries_files.append((key, size, mtime, etag))
entries_dirs = raw["dirs"]
all_items = raw["merged_keys"]
except OSError:
return ShallowListResult(
objects=[], common_prefixes=[],
is_truncated=False, next_continuation_token=None,
)
else:
try:
with os.scandir(str(target_dir)) as it:
for entry in it:
name = entry.name
if name in self.INTERNAL_FOLDERS:
continue
if entry.is_dir(follow_symlinks=False):
cp = prefix + name + delimiter
entries_dirs.append(cp)
elif entry.is_file(follow_symlinks=False):
key = prefix + name
try:
st = entry.stat()
etag = meta_cache.get(key)
if etag is None:
safe_key = PurePosixPath(key)
meta = self._read_metadata(bucket_id, Path(safe_key))
etag = meta.get("__etag__") if meta else None
entries_files.append((key, st.st_size, st.st_mtime, etag))
except OSError:
pass
except OSError:
return ShallowListResult(
objects=[], common_prefixes=[],
is_truncated=False, next_continuation_token=None,
)
entries_dirs.sort()
entries_files.sort(key=lambda x: x[0])
all_items: list[tuple[str, bool]] = []
fi, di = 0, 0
while fi < len(entries_files) and di < len(entries_dirs):
if entries_files[fi][0] <= entries_dirs[di]:
all_items.append((entries_files[fi][0], False))
fi += 1
else:
all_items.append((entries_dirs[di], True))
di += 1
while fi < len(entries_files):
all_items.append((entries_files[fi][0], False))
fi += 1
while di < len(entries_dirs):
all_items.append((entries_dirs[di], True))
di += 1
files_map = {e[0]: e for e in entries_files}
start_index = 0
if continuation_token:
all_keys = [item[0] for item in all_items]
start_index = bisect.bisect_right(all_keys, continuation_token)
selected = all_items[start_index:start_index + max_keys]
is_truncated = (start_index + max_keys) < len(all_items)
result_objects: list[ObjectMeta] = []
result_prefixes: list[str] = []
for item_key, is_dir in selected:
if is_dir:
result_prefixes.append(item_key)
else:
fdata = files_map[item_key]
result_objects.append(ObjectMeta(
key=fdata[0],
size=fdata[1],
last_modified=datetime.fromtimestamp(fdata[2], timezone.utc),
etag=fdata[3],
metadata=None,
))
next_token = None
if is_truncated and selected:
next_token = selected[-1][0]
return ShallowListResult(
objects=result_objects,
common_prefixes=result_prefixes,
is_truncated=is_truncated,
next_continuation_token=next_token,
)
def _shallow_via_full_scan(
self,
bucket_name: str,
*,
prefix: str = "",
delimiter: str = "/",
max_keys: int = 1000,
continuation_token: Optional[str] = None,
) -> ShallowListResult:
list_result = self.list_objects(
bucket_name,
max_keys=max_keys * 10,
continuation_token=continuation_token,
prefix=prefix or None,
)
common_prefixes: list[str] = []
filtered_objects: list[ObjectMeta] = []
seen_prefixes: set[str] = set()
for obj in list_result.objects:
key_after_prefix = obj.key[len(prefix):] if prefix else obj.key
if delimiter in key_after_prefix:
cp = prefix + key_after_prefix.split(delimiter)[0] + delimiter
if cp not in seen_prefixes:
seen_prefixes.add(cp)
common_prefixes.append(cp)
else:
filtered_objects.append(obj)
common_prefixes.sort()
total_items = len(filtered_objects) + len(common_prefixes)
is_truncated = total_items > max_keys or list_result.is_truncated
if len(filtered_objects) >= max_keys:
filtered_objects = filtered_objects[:max_keys]
common_prefixes = []
else:
remaining = max_keys - len(filtered_objects)
common_prefixes = common_prefixes[:remaining]
next_token = None
if is_truncated:
if filtered_objects:
next_token = filtered_objects[-1].key
elif common_prefixes:
next_token = common_prefixes[-1].rstrip(delimiter) if delimiter else common_prefixes[-1]
return ShallowListResult(
objects=filtered_objects,
common_prefixes=common_prefixes,
is_truncated=is_truncated,
next_continuation_token=next_token,
)
def search_objects(
self,
bucket_name: str,
query: str,
*,
prefix: str = "",
limit: int = 500,
) -> Dict[str, Any]:
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.is_dir():
raise BucketNotFoundError("Bucket does not exist")
if prefix:
search_root = bucket_path / prefix.replace("/", os.sep)
if not search_root.is_dir():
return {"results": [], "truncated": False}
resolved = search_root.resolve()
if not str(resolved).startswith(str(bucket_path.resolve())):
return {"results": [], "truncated": False}
else:
search_root = bucket_path
if _HAS_RUST:
raw = _rc.search_objects_scan(
str(bucket_path), str(search_root), query, limit
)
results = [
{
"key": k,
"size": s,
"last_modified": datetime.fromtimestamp(
m, tz=timezone.utc
).strftime("%Y-%m-%dT%H:%M:%S.000Z"),
}
for k, s, m in raw["results"]
]
return {"results": results, "truncated": raw["truncated"]}
query_lower = query.lower()
results: list[Dict[str, Any]] = []
internal = self.INTERNAL_FOLDERS
bucket_str = str(bucket_path)
bucket_len = len(bucket_str) + 1
meta_root = self._bucket_meta_root(bucket_name)
scan_limit = limit * 4
matched = 0
scanned = 0
search_str = str(search_root)
stack = [search_str]
while stack:
current = stack.pop()
try:
with os.scandir(current) as it:
for entry in it:
if current == bucket_str and entry.name in internal:
continue
if entry.is_dir(follow_symlinks=False):
stack.append(entry.path)
elif entry.is_file(follow_symlinks=False):
scanned += 1
key = entry.path[bucket_len:].replace(os.sep, "/")
if query_lower in key.lower():
st = entry.stat(follow_symlinks=False)
meta_path = meta_root / (key + ".meta.json")
last_modified = ""
try:
if meta_path.exists():
md = json.loads(meta_path.read_text(encoding="utf-8"))
last_modified = md.get("last_modified", "")
except (OSError, json.JSONDecodeError):
pass
if not last_modified:
last_modified = datetime.fromtimestamp(
st.st_mtime, tz=timezone.utc
).strftime("%Y-%m-%dT%H:%M:%S.000Z")
results.append({
"key": key,
"size": st.st_size,
"last_modified": last_modified,
})
matched += 1
if matched >= scan_limit:
break
except PermissionError:
continue
if matched >= scan_limit:
break
results.sort(key=lambda r: r["key"])
truncated = len(results) > limit
return {"results": results[:limit], "truncated": truncated}
def put_object(
self,
bucket_name: str,
@@ -844,9 +493,10 @@ class ObjectStorage:
tmp_path = tmp_dir / f"{uuid.uuid4().hex}.tmp"
try:
checksum = hashlib.md5()
with tmp_path.open("wb") as target:
shutil.copyfileobj(stream, target)
shutil.copyfileobj(_HashingReader(stream, checksum), target)
new_size = tmp_path.stat().st_size
size_delta = new_size - existing_size
object_delta = 0 if is_overwrite else 1
@@ -864,20 +514,8 @@ class ObjectStorage:
quota_check["usage"],
)
if _HAS_RUST:
etag = _rc.md5_file(str(tmp_path))
else:
checksum = hashlib.md5()
with tmp_path.open("rb") as f:
while True:
chunk = f.read(1048576)
if not chunk:
break
checksum.update(chunk)
etag = checksum.hexdigest()
shutil.move(str(tmp_path), str(destination))
finally:
try:
tmp_path.unlink(missing_ok=True)
@@ -885,6 +523,7 @@ class ObjectStorage:
pass
stat = destination.stat()
etag = checksum.hexdigest()
internal_meta = {"__etag__": etag, "__size__": str(stat.st_size)}
combined_meta = {**internal_meta, **(metadata or {})}
@@ -1323,19 +962,13 @@ class ObjectStorage:
version_bytes_delta=archived_version_size,
version_count_delta=1 if archived_version_size > 0 else 0,
)
etag = self._compute_etag(destination)
internal_meta = {"__etag__": etag, "__size__": str(stat.st_size)}
combined_meta = {**internal_meta, **(metadata or {})}
self._write_metadata(bucket_id, safe_key, combined_meta)
obj_meta = ObjectMeta(
return ObjectMeta(
key=safe_key.as_posix(),
size=stat.st_size,
last_modified=datetime.fromtimestamp(stat.st_mtime, timezone.utc),
etag=etag,
etag=self._compute_etag(destination),
metadata=metadata or None,
)
self._update_object_cache_entry(bucket_id, safe_key.as_posix(), obj_meta)
return obj_meta
def delete_object_version(self, bucket_name: str, object_key: str, version_id: str) -> None:
bucket_path = self._bucket_path(bucket_name)
@@ -1475,24 +1108,14 @@ class ObjectStorage:
if not upload_root.exists():
raise StorageError("Multipart upload not found")
checksum = hashlib.md5()
part_filename = f"part-{part_number:05d}.part"
part_path = upload_root / part_filename
temp_path = upload_root / f".{part_filename}.tmp"
try:
with temp_path.open("wb") as target:
shutil.copyfileobj(stream, target)
if _HAS_RUST:
part_etag = _rc.md5_file(str(temp_path))
else:
checksum = hashlib.md5()
with temp_path.open("rb") as f:
while True:
chunk = f.read(1048576)
if not chunk:
break
checksum.update(chunk)
part_etag = checksum.hexdigest()
shutil.copyfileobj(_HashingReader(stream, checksum), target)
temp_path.replace(part_path)
except OSError:
try:
@@ -1502,7 +1125,7 @@ class ObjectStorage:
raise
record = {
"etag": part_etag,
"etag": checksum.hexdigest(),
"size": part_path.stat().st_size,
"filename": part_filename,
}
@@ -1902,41 +1525,21 @@ class ObjectStorage:
return list(self._build_object_cache(bucket_path).keys())
def _build_object_cache(self, bucket_path: Path) -> Dict[str, ObjectMeta]:
"""Build a complete object metadata cache for a bucket.
Uses os.scandir for fast directory walking and a persistent etag index.
"""
from concurrent.futures import ThreadPoolExecutor
bucket_id = bucket_path.name
objects: Dict[str, ObjectMeta] = {}
bucket_str = str(bucket_path)
bucket_len = len(bucket_str) + 1
if _HAS_RUST:
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
raw = _rc.build_object_cache(
bucket_str,
str(self._bucket_meta_root(bucket_id)),
str(etag_index_path),
)
if raw["etag_cache_changed"] and raw["etag_cache"]:
try:
etag_index_path.parent.mkdir(parents=True, exist_ok=True)
with open(etag_index_path, 'w', encoding='utf-8') as f:
json.dump(raw["etag_cache"], f)
except OSError:
pass
for key, size, mtime, etag in raw["objects"]:
objects[key] = ObjectMeta(
key=key,
size=size,
last_modified=datetime.fromtimestamp(mtime, timezone.utc),
etag=etag,
metadata=None,
)
return objects
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
meta_cache: Dict[str, str] = {}
index_mtime: float = 0
if etag_index_path.exists():
try:
index_mtime = etag_index_path.stat().st_mtime
@@ -1944,10 +1547,10 @@ class ObjectStorage:
meta_cache = json.load(f)
except (OSError, json.JSONDecodeError):
meta_cache = {}
meta_root = self._bucket_meta_root(bucket_id)
needs_rebuild = False
if meta_root.exists() and index_mtime > 0:
def check_newer(dir_path: str) -> bool:
try:
@@ -1965,7 +1568,7 @@ class ObjectStorage:
needs_rebuild = check_newer(str(meta_root))
elif not meta_cache:
needs_rebuild = True
if needs_rebuild and meta_root.exists():
meta_str = str(meta_root)
meta_len = len(meta_str) + 1
@@ -2046,7 +1649,7 @@ class ObjectStorage:
json.dump(meta_cache, f)
except OSError:
pass
def scan_dir(dir_path: str) -> None:
try:
with os.scandir(dir_path) as it:
@@ -2061,11 +1664,11 @@ class ObjectStorage:
first_part = rel.split(os.sep)[0] if os.sep in rel else rel
if first_part in self.INTERNAL_FOLDERS:
continue
key = rel.replace(os.sep, '/')
try:
stat = entry.stat()
etag = meta_cache.get(key)
objects[key] = ObjectMeta(
@@ -2073,13 +1676,13 @@ class ObjectStorage:
size=stat.st_size,
last_modified=datetime.fromtimestamp(stat.st_mtime, timezone.utc),
etag=etag,
metadata=None,
metadata=None,
)
except OSError:
pass
except OSError:
pass
scan_dir(bucket_str)
return objects
@@ -2163,6 +1766,11 @@ class ObjectStorage:
return 0
def _update_object_cache_entry(self, bucket_id: str, key: str, meta: Optional[ObjectMeta]) -> None:
"""Update a single entry in the object cache instead of invalidating the whole cache.
This is a performance optimization - lazy update instead of full invalidation.
Cross-process invalidation is handled by checking stats.json mtime.
"""
with self._cache_lock:
cached = self._object_cache.get(bucket_id)
if cached:
@@ -2174,24 +1782,6 @@ class ObjectStorage:
self._cache_version[bucket_id] = self._cache_version.get(bucket_id, 0) + 1
self._sorted_key_cache.pop(bucket_id, None)
self._update_etag_index(bucket_id, key, meta.etag if meta else None)
def _update_etag_index(self, bucket_id: str, key: str, etag: Optional[str]) -> None:
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
if not etag_index_path.exists():
return
try:
with open(etag_index_path, 'r', encoding='utf-8') as f:
index = json.load(f)
if etag is None:
index.pop(key, None)
else:
index[key] = etag
with open(etag_index_path, 'w', encoding='utf-8') as f:
json.dump(index, f)
except (OSError, json.JSONDecodeError):
pass
def warm_cache(self, bucket_names: Optional[List[str]] = None) -> None:
"""Pre-warm the object cache for specified buckets or all buckets.
@@ -2242,40 +1832,30 @@ class ObjectStorage:
def _read_bucket_config(self, bucket_name: str) -> dict[str, Any]:
now = time.time()
config_path = self._bucket_config_path(bucket_name)
cached = self._bucket_config_cache.get(bucket_name)
if cached:
config, cached_time, cached_mtime = cached
config, cached_time = cached
if now - cached_time < self._bucket_config_cache_ttl:
try:
current_mtime = config_path.stat().st_mtime if config_path.exists() else 0.0
except OSError:
current_mtime = 0.0
if current_mtime == cached_mtime:
return config.copy()
return config.copy()
config_path = self._bucket_config_path(bucket_name)
if not config_path.exists():
self._bucket_config_cache[bucket_name] = ({}, now, 0.0)
self._bucket_config_cache[bucket_name] = ({}, now)
return {}
try:
data = json.loads(config_path.read_text(encoding="utf-8"))
config = data if isinstance(data, dict) else {}
mtime = config_path.stat().st_mtime
self._bucket_config_cache[bucket_name] = (config, now, mtime)
self._bucket_config_cache[bucket_name] = (config, now)
return config.copy()
except (OSError, json.JSONDecodeError):
self._bucket_config_cache[bucket_name] = ({}, now, 0.0)
self._bucket_config_cache[bucket_name] = ({}, now)
return {}
def _write_bucket_config(self, bucket_name: str, payload: dict[str, Any]) -> None:
config_path = self._bucket_config_path(bucket_name)
config_path.parent.mkdir(parents=True, exist_ok=True)
config_path.write_text(json.dumps(payload), encoding="utf-8")
try:
mtime = config_path.stat().st_mtime
except OSError:
mtime = 0.0
self._bucket_config_cache[bucket_name] = (payload.copy(), time.time(), mtime)
self._bucket_config_cache[bucket_name] = (payload.copy(), time.time())
def _set_bucket_config_entry(self, bucket_name: str, key: str, value: Any | None) -> None:
config = self._read_bucket_config(bucket_name)
@@ -2364,18 +1944,15 @@ class ObjectStorage:
index_path, entry_name = self._index_file_for_key(bucket_name, key)
lock = self._get_meta_index_lock(str(index_path))
with lock:
if _HAS_RUST:
_rc.write_index_entry(str(index_path), entry_name, json.dumps(entry))
else:
index_path.parent.mkdir(parents=True, exist_ok=True)
index_data: Dict[str, Any] = {}
if index_path.exists():
try:
index_data = json.loads(index_path.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
pass
index_data[entry_name] = entry
index_path.write_text(json.dumps(index_data), encoding="utf-8")
index_path.parent.mkdir(parents=True, exist_ok=True)
index_data: Dict[str, Any] = {}
if index_path.exists():
try:
index_data = json.loads(index_path.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
pass
index_data[entry_name] = entry
index_path.write_text(json.dumps(index_data), encoding="utf-8")
self._invalidate_meta_read_cache(bucket_name, key)
def _delete_index_entry(self, bucket_name: str, key: Path) -> None:
@@ -2385,23 +1962,20 @@ class ObjectStorage:
return
lock = self._get_meta_index_lock(str(index_path))
with lock:
if _HAS_RUST:
_rc.delete_index_entry(str(index_path), entry_name)
else:
try:
index_data = json.loads(index_path.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
self._invalidate_meta_read_cache(bucket_name, key)
return
if entry_name in index_data:
del index_data[entry_name]
if index_data:
index_path.write_text(json.dumps(index_data), encoding="utf-8")
else:
try:
index_path.unlink()
except OSError:
pass
try:
index_data = json.loads(index_path.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
self._invalidate_meta_read_cache(bucket_name, key)
return
if entry_name in index_data:
del index_data[entry_name]
if index_data:
index_path.write_text(json.dumps(index_data), encoding="utf-8")
else:
try:
index_path.unlink()
except OSError:
pass
self._invalidate_meta_read_cache(bucket_name, key)
def _normalize_metadata(self, metadata: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]:
@@ -2499,24 +2073,15 @@ class ObjectStorage:
continue
def _check_bucket_contents(self, bucket_path: Path) -> tuple[bool, bool, bool]:
bucket_name = bucket_path.name
if _HAS_RUST:
return _rc.check_bucket_contents(
str(bucket_path),
[
str(self._bucket_versions_root(bucket_name)),
str(self._legacy_versions_root(bucket_name)),
],
[
str(self._multipart_bucket_root(bucket_name)),
str(self._legacy_multipart_bucket_root(bucket_name)),
],
)
"""Check bucket for objects, versions, and multipart uploads in a single pass.
Returns (has_visible_objects, has_archived_versions, has_active_multipart_uploads).
Uses early exit when all three are found.
"""
has_objects = False
has_versions = False
has_multipart = False
bucket_name = bucket_path.name
for path in bucket_path.rglob("*"):
if has_objects:

View File

@@ -616,7 +616,6 @@ def stream_bucket_objects(bucket_name: str):
return jsonify({"error": str(exc)}), 403
prefix = request.args.get("prefix") or None
delimiter = request.args.get("delimiter") or None
try:
client = get_session_s3_client()
@@ -630,7 +629,6 @@ def stream_bucket_objects(bucket_name: str):
return Response(
stream_objects_ndjson(
client, bucket_name, prefix, url_templates, display_tz, versioning_enabled,
delimiter=delimiter,
),
mimetype='application/x-ndjson',
headers={
@@ -641,33 +639,6 @@ def stream_bucket_objects(bucket_name: str):
)
@ui_bp.get("/buckets/<bucket_name>/objects/search")
@limiter.limit("30 per minute")
def search_bucket_objects(bucket_name: str):
principal = _current_principal()
try:
_authorize_ui(principal, bucket_name, "list")
except IamError as exc:
return jsonify({"error": str(exc)}), 403
query = request.args.get("q", "").strip()
if not query:
return jsonify({"results": [], "truncated": False})
try:
limit = max(1, min(int(request.args.get("limit", 500)), 1000))
except (ValueError, TypeError):
limit = 500
prefix = request.args.get("prefix", "").strip()
storage = _storage()
try:
return jsonify(storage.search_objects(bucket_name, query, prefix=prefix, limit=limit))
except StorageError as exc:
return jsonify({"error": str(exc)}), 404
@ui_bp.post("/buckets/<bucket_name>/upload")
@limiter.limit("30 per minute")
def upload_object(bucket_name: str):
@@ -1330,14 +1301,12 @@ def object_versions(bucket_name: str, object_key: str):
for v in resp.get("Versions", []):
if v.get("Key") != object_key:
continue
if v.get("IsLatest", False):
continue
versions.append({
"version_id": v.get("VersionId", ""),
"last_modified": v["LastModified"].isoformat() if v.get("LastModified") else None,
"size": v.get("Size", 0),
"etag": v.get("ETag", "").strip('"'),
"is_latest": False,
"is_latest": v.get("IsLatest", False),
})
return jsonify({"versions": versions})
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
APP_VERSION = "0.3.4"
APP_VERSION = "0.3.0"
def get_version() -> str:

69
docs.md
View File

@@ -139,7 +139,6 @@ All configuration is done via environment variables. The table below lists every
| `API_BASE_URL` | `http://127.0.0.1:5000` | Internal S3 API URL used by the web UI proxy. Also used for presigned URL generation. Set to your public URL if running behind a reverse proxy. |
| `AWS_REGION` | `us-east-1` | Region embedded in SigV4 credential scope. |
| `AWS_SERVICE` | `s3` | Service string for SigV4. |
| `DISPLAY_TIMEZONE` | `UTC` | Timezone for timestamps in the web UI (e.g., `US/Eastern`, `Asia/Tokyo`). |
### IAM & Security
@@ -171,7 +170,6 @@ All configuration is done via environment variables. The table below lists every
| `RATE_LIMIT_BUCKET_OPS` | `120 per minute` | Rate limit for bucket operations (PUT/DELETE/GET/POST on `/<bucket>`). |
| `RATE_LIMIT_OBJECT_OPS` | `240 per minute` | Rate limit for object operations (PUT/GET/DELETE/POST on `/<bucket>/<key>`). |
| `RATE_LIMIT_HEAD_OPS` | `100 per minute` | Rate limit for HEAD requests (bucket and object). |
| `RATE_LIMIT_ADMIN` | `60 per minute` | Rate limit for admin API endpoints (`/admin/*`). |
| `RATE_LIMIT_STORAGE_URI` | `memory://` | Storage backend for rate limits. Use `redis://host:port` for distributed setups. |
### Server Configuration
@@ -258,12 +256,6 @@ Once enabled, configure lifecycle rules via:
| `MULTIPART_MIN_PART_SIZE` | `5242880` (5 MB) | Minimum part size for multipart uploads. |
| `BUCKET_STATS_CACHE_TTL` | `60` | Seconds to cache bucket statistics. |
| `BULK_DELETE_MAX_KEYS` | `500` | Maximum keys per bulk delete request. |
| `BULK_DOWNLOAD_MAX_BYTES` | `1073741824` (1 GiB) | Maximum total size for bulk ZIP downloads. |
| `OBJECT_CACHE_TTL` | `60` | Seconds to cache object metadata. |
#### Gzip Compression
API responses for JSON, XML, HTML, CSS, and JavaScript are automatically gzip-compressed when the client sends `Accept-Encoding: gzip`. Compression activates for responses larger than 500 bytes and is handled by a WSGI middleware (`app/compression.py`). Binary object downloads and streaming responses are never compressed. No configuration is needed.
### Server Settings
@@ -293,12 +285,6 @@ If running behind a reverse proxy (e.g., Nginx, Cloudflare, or a tunnel), ensure
The application automatically trusts these headers to generate correct presigned URLs (e.g., `https://s3.example.com/...` instead of `http://127.0.0.1:5000/...`). Alternatively, you can explicitly set `API_BASE_URL` to your public endpoint.
| Variable | Default | Notes |
| --- | --- | --- |
| `NUM_TRUSTED_PROXIES` | `1` | Number of trusted reverse proxies for `X-Forwarded-*` header processing. |
| `ALLOWED_REDIRECT_HOSTS` | `""` | Comma-separated whitelist of safe redirect targets. Empty allows only same-host redirects. |
| `ALLOW_INTERNAL_ENDPOINTS` | `false` | Allow connections to internal/private IPs for webhooks and replication targets. **Keep disabled in production unless needed.** |
## 4. Upgrading and Updates
### Version Checking
@@ -926,7 +912,7 @@ Objects with forward slashes (`/`) in their keys are displayed as a folder hiera
- Select multiple objects using checkboxes
- **Bulk Delete**: Delete multiple objects at once
- **Bulk Download**: Download selected objects as a single ZIP archive (up to `BULK_DOWNLOAD_MAX_BYTES`, default 1 GiB)
- **Bulk Download**: Download selected objects as individual files
#### Search & Filter
@@ -999,7 +985,6 @@ MyFSIO supports **server-side encryption at rest** to protect your data. When en
|------|-------------|
| **AES-256 (SSE-S3)** | Server-managed encryption using a local master key |
| **KMS (SSE-KMS)** | Encryption using customer-managed keys via the built-in KMS |
| **SSE-C** | Server-side encryption with customer-provided keys (per-request) |
### Enabling Encryption
@@ -1098,44 +1083,6 @@ encrypted, metadata = ClientEncryptionHelper.encrypt_for_upload(plaintext, key)
decrypted = ClientEncryptionHelper.decrypt_from_download(encrypted, metadata, key)
```
### SSE-C (Customer-Provided Keys)
With SSE-C, you provide your own 256-bit AES encryption key with each request. The server encrypts/decrypts using your key but never stores it. You must supply the same key for both upload and download.
**Required headers:**
| Header | Value |
|--------|-------|
| `x-amz-server-side-encryption-customer-algorithm` | `AES256` |
| `x-amz-server-side-encryption-customer-key` | Base64-encoded 256-bit key |
| `x-amz-server-side-encryption-customer-key-MD5` | Base64-encoded MD5 of the key |
```bash
# Generate a 256-bit key
KEY=$(openssl rand -base64 32)
KEY_MD5=$(echo -n "$KEY" | base64 -d | openssl dgst -md5 -binary | base64)
# Upload with SSE-C
curl -X PUT "http://localhost:5000/my-bucket/secret.txt" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
-H "x-amz-server-side-encryption-customer-key: $KEY" \
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5" \
--data-binary @secret.txt
# Download with SSE-C (same key required)
curl "http://localhost:5000/my-bucket/secret.txt" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
-H "x-amz-server-side-encryption-customer-key: $KEY" \
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5"
```
**Key points:**
- SSE-C does not require `ENCRYPTION_ENABLED` or `KMS_ENABLED` — the key is provided per-request
- If you lose your key, the data is irrecoverable
- The MD5 header is optional but recommended for integrity verification
### Important Notes
- **Existing objects are NOT encrypted** - Only new uploads after enabling encryption are encrypted
@@ -2012,20 +1959,6 @@ curl -X PUT "http://localhost:5000/my-bucket/file.txt" \
-H "x-amz-meta-newkey: newvalue"
```
### MoveObject (UI)
Move an object to a different key or bucket. This is a UI-only convenience operation that performs a copy followed by a delete of the source. Requires `read` and `delete` on the source, and `write` on the destination.
```bash
# Move via UI API
curl -X POST "http://localhost:5100/ui/buckets/my-bucket/objects/old-path/file.txt/move" \
-H "Content-Type: application/json" \
--cookie "session=..." \
-d '{"dest_bucket": "other-bucket", "dest_key": "new-path/file.txt"}'
```
The move is atomic from the caller's perspective: if the copy succeeds but the delete fails, the object exists in both locations (no data loss).
### UploadPartCopy
Copy data from an existing object into a multipart upload part:

View File

@@ -1,7 +1,6 @@
mod hashing;
mod metadata;
mod sigv4;
mod storage;
mod validation;
use pyo3::prelude::*;
@@ -30,14 +29,6 @@ mod myfsio_core {
m.add_function(wrap_pyfunction!(metadata::read_index_entry, m)?)?;
m.add_function(wrap_pyfunction!(storage::write_index_entry, m)?)?;
m.add_function(wrap_pyfunction!(storage::delete_index_entry, m)?)?;
m.add_function(wrap_pyfunction!(storage::check_bucket_contents, m)?)?;
m.add_function(wrap_pyfunction!(storage::shallow_scan, m)?)?;
m.add_function(wrap_pyfunction!(storage::bucket_stats_scan, m)?)?;
m.add_function(wrap_pyfunction!(storage::search_objects_scan, m)?)?;
m.add_function(wrap_pyfunction!(storage::build_object_cache, m)?)?;
Ok(())
}
}

View File

@@ -1,817 +0,0 @@
use pyo3::exceptions::PyIOError;
use pyo3::prelude::*;
use pyo3::types::{PyDict, PyList, PyString, PyTuple};
use serde_json::Value;
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use std::time::SystemTime;
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
fn system_time_to_epoch(t: SystemTime) -> f64 {
t.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs_f64())
.unwrap_or(0.0)
}
fn extract_etag_from_meta_bytes(content: &[u8]) -> Option<String> {
let marker = b"\"__etag__\"";
let idx = content.windows(marker.len()).position(|w| w == marker)?;
let after = &content[idx + marker.len()..];
let start = after.iter().position(|&b| b == b'"')? + 1;
let rest = &after[start..];
let end = rest.iter().position(|&b| b == b'"')?;
std::str::from_utf8(&rest[..end]).ok().map(|s| s.to_owned())
}
fn has_any_file(root: &str) -> bool {
let root_path = Path::new(root);
if !root_path.is_dir() {
return false;
}
let mut stack = vec![root_path.to_path_buf()];
while let Some(current) = stack.pop() {
let entries = match fs::read_dir(&current) {
Ok(e) => e,
Err(_) => continue,
};
for entry_result in entries {
let entry = match entry_result {
Ok(e) => e,
Err(_) => continue,
};
let ft = match entry.file_type() {
Ok(ft) => ft,
Err(_) => continue,
};
if ft.is_file() {
return true;
}
if ft.is_dir() && !ft.is_symlink() {
stack.push(entry.path());
}
}
}
false
}
#[pyfunction]
pub fn write_index_entry(
py: Python<'_>,
path: &str,
entry_name: &str,
entry_data_json: &str,
) -> PyResult<()> {
let path_owned = path.to_owned();
let entry_owned = entry_name.to_owned();
let data_owned = entry_data_json.to_owned();
py.detach(move || -> PyResult<()> {
let entry_value: Value = serde_json::from_str(&data_owned)
.map_err(|e| PyIOError::new_err(format!("Failed to parse entry data: {}", e)))?;
if let Some(parent) = Path::new(&path_owned).parent() {
let _ = fs::create_dir_all(parent);
}
let mut index_data: serde_json::Map<String, Value> = match fs::read_to_string(&path_owned)
{
Ok(content) => serde_json::from_str(&content).unwrap_or_default(),
Err(_) => serde_json::Map::new(),
};
index_data.insert(entry_owned, entry_value);
let serialized = serde_json::to_string(&Value::Object(index_data))
.map_err(|e| PyIOError::new_err(format!("Failed to serialize index: {}", e)))?;
fs::write(&path_owned, serialized)
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
Ok(())
})
}
#[pyfunction]
pub fn delete_index_entry(py: Python<'_>, path: &str, entry_name: &str) -> PyResult<bool> {
let path_owned = path.to_owned();
let entry_owned = entry_name.to_owned();
py.detach(move || -> PyResult<bool> {
let content = match fs::read_to_string(&path_owned) {
Ok(c) => c,
Err(_) => return Ok(false),
};
let mut index_data: serde_json::Map<String, Value> =
match serde_json::from_str(&content) {
Ok(v) => v,
Err(_) => return Ok(false),
};
if index_data.remove(&entry_owned).is_none() {
return Ok(false);
}
if index_data.is_empty() {
let _ = fs::remove_file(&path_owned);
return Ok(true);
}
let serialized = serde_json::to_string(&Value::Object(index_data))
.map_err(|e| PyIOError::new_err(format!("Failed to serialize index: {}", e)))?;
fs::write(&path_owned, serialized)
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
Ok(false)
})
}
#[pyfunction]
pub fn check_bucket_contents(
py: Python<'_>,
bucket_path: &str,
version_roots: Vec<String>,
multipart_roots: Vec<String>,
) -> PyResult<(bool, bool, bool)> {
let bucket_owned = bucket_path.to_owned();
py.detach(move || -> PyResult<(bool, bool, bool)> {
let mut has_objects = false;
let bucket_p = Path::new(&bucket_owned);
if bucket_p.is_dir() {
let mut stack = vec![bucket_p.to_path_buf()];
'obj_scan: while let Some(current) = stack.pop() {
let is_root = current == bucket_p;
let entries = match fs::read_dir(&current) {
Ok(e) => e,
Err(_) => continue,
};
for entry_result in entries {
let entry = match entry_result {
Ok(e) => e,
Err(_) => continue,
};
let ft = match entry.file_type() {
Ok(ft) => ft,
Err(_) => continue,
};
if is_root {
if let Some(name) = entry.file_name().to_str() {
if INTERNAL_FOLDERS.contains(&name) {
continue;
}
}
}
if ft.is_file() && !ft.is_symlink() {
has_objects = true;
break 'obj_scan;
}
if ft.is_dir() && !ft.is_symlink() {
stack.push(entry.path());
}
}
}
}
let mut has_versions = false;
for root in &version_roots {
if has_versions {
break;
}
has_versions = has_any_file(root);
}
let mut has_multipart = false;
for root in &multipart_roots {
if has_multipart {
break;
}
has_multipart = has_any_file(root);
}
Ok((has_objects, has_versions, has_multipart))
})
}
#[pyfunction]
pub fn shallow_scan(
py: Python<'_>,
target_dir: &str,
prefix: &str,
meta_cache_json: &str,
) -> PyResult<Py<PyAny>> {
let target_owned = target_dir.to_owned();
let prefix_owned = prefix.to_owned();
let cache_owned = meta_cache_json.to_owned();
let result: (
Vec<(String, u64, f64, Option<String>)>,
Vec<String>,
Vec<(String, bool)>,
) = py.detach(move || -> PyResult<(
Vec<(String, u64, f64, Option<String>)>,
Vec<String>,
Vec<(String, bool)>,
)> {
let meta_cache: HashMap<String, String> =
serde_json::from_str(&cache_owned).unwrap_or_default();
let mut files: Vec<(String, u64, f64, Option<String>)> = Vec::new();
let mut dirs: Vec<String> = Vec::new();
let entries = match fs::read_dir(&target_owned) {
Ok(e) => e,
Err(_) => return Ok((files, dirs, Vec::new())),
};
for entry_result in entries {
let entry = match entry_result {
Ok(e) => e,
Err(_) => continue,
};
let name = match entry.file_name().into_string() {
Ok(n) => n,
Err(_) => continue,
};
if INTERNAL_FOLDERS.contains(&name.as_str()) {
continue;
}
let ft = match entry.file_type() {
Ok(ft) => ft,
Err(_) => continue,
};
if ft.is_dir() && !ft.is_symlink() {
let cp = format!("{}{}/", prefix_owned, name);
dirs.push(cp);
} else if ft.is_file() && !ft.is_symlink() {
let key = format!("{}{}", prefix_owned, name);
let md = match entry.metadata() {
Ok(m) => m,
Err(_) => continue,
};
let size = md.len();
let mtime = md
.modified()
.map(system_time_to_epoch)
.unwrap_or(0.0);
let etag = meta_cache.get(&key).cloned();
files.push((key, size, mtime, etag));
}
}
files.sort_by(|a, b| a.0.cmp(&b.0));
dirs.sort();
let mut merged: Vec<(String, bool)> = Vec::with_capacity(files.len() + dirs.len());
let mut fi = 0;
let mut di = 0;
while fi < files.len() && di < dirs.len() {
if files[fi].0 <= dirs[di] {
merged.push((files[fi].0.clone(), false));
fi += 1;
} else {
merged.push((dirs[di].clone(), true));
di += 1;
}
}
while fi < files.len() {
merged.push((files[fi].0.clone(), false));
fi += 1;
}
while di < dirs.len() {
merged.push((dirs[di].clone(), true));
di += 1;
}
Ok((files, dirs, merged))
})?;
let (files, dirs, merged) = result;
let dict = PyDict::new(py);
let files_list = PyList::empty(py);
for (key, size, mtime, etag) in &files {
let etag_py: Py<PyAny> = match etag {
Some(e) => PyString::new(py, e).into_any().unbind(),
None => py.None(),
};
let tuple = PyTuple::new(py, &[
PyString::new(py, key).into_any().unbind(),
size.into_pyobject(py)?.into_any().unbind(),
mtime.into_pyobject(py)?.into_any().unbind(),
etag_py,
])?;
files_list.append(tuple)?;
}
dict.set_item("files", files_list)?;
let dirs_list = PyList::empty(py);
for d in &dirs {
dirs_list.append(PyString::new(py, d))?;
}
dict.set_item("dirs", dirs_list)?;
let merged_list = PyList::empty(py);
for (key, is_dir) in &merged {
let bool_obj: Py<PyAny> = if *is_dir {
true.into_pyobject(py)?.to_owned().into_any().unbind()
} else {
false.into_pyobject(py)?.to_owned().into_any().unbind()
};
let tuple = PyTuple::new(py, &[
PyString::new(py, key).into_any().unbind(),
bool_obj,
])?;
merged_list.append(tuple)?;
}
dict.set_item("merged_keys", merged_list)?;
Ok(dict.into_any().unbind())
}
#[pyfunction]
pub fn bucket_stats_scan(
py: Python<'_>,
bucket_path: &str,
versions_root: &str,
) -> PyResult<(u64, u64, u64, u64)> {
let bucket_owned = bucket_path.to_owned();
let versions_owned = versions_root.to_owned();
py.detach(move || -> PyResult<(u64, u64, u64, u64)> {
let mut object_count: u64 = 0;
let mut total_bytes: u64 = 0;
let bucket_p = Path::new(&bucket_owned);
if bucket_p.is_dir() {
let mut stack = vec![bucket_p.to_path_buf()];
while let Some(current) = stack.pop() {
let is_root = current == bucket_p;
let entries = match fs::read_dir(&current) {
Ok(e) => e,
Err(_) => continue,
};
for entry_result in entries {
let entry = match entry_result {
Ok(e) => e,
Err(_) => continue,
};
if is_root {
if let Some(name) = entry.file_name().to_str() {
if INTERNAL_FOLDERS.contains(&name) {
continue;
}
}
}
let ft = match entry.file_type() {
Ok(ft) => ft,
Err(_) => continue,
};
if ft.is_dir() && !ft.is_symlink() {
stack.push(entry.path());
} else if ft.is_file() && !ft.is_symlink() {
object_count += 1;
if let Ok(md) = entry.metadata() {
total_bytes += md.len();
}
}
}
}
}
let mut version_count: u64 = 0;
let mut version_bytes: u64 = 0;
let versions_p = Path::new(&versions_owned);
if versions_p.is_dir() {
let mut stack = vec![versions_p.to_path_buf()];
while let Some(current) = stack.pop() {
let entries = match fs::read_dir(&current) {
Ok(e) => e,
Err(_) => continue,
};
for entry_result in entries {
let entry = match entry_result {
Ok(e) => e,
Err(_) => continue,
};
let ft = match entry.file_type() {
Ok(ft) => ft,
Err(_) => continue,
};
if ft.is_dir() && !ft.is_symlink() {
stack.push(entry.path());
} else if ft.is_file() && !ft.is_symlink() {
if let Some(name) = entry.file_name().to_str() {
if name.ends_with(".bin") {
version_count += 1;
if let Ok(md) = entry.metadata() {
version_bytes += md.len();
}
}
}
}
}
}
}
Ok((object_count, total_bytes, version_count, version_bytes))
})
}
#[pyfunction]
#[pyo3(signature = (bucket_path, search_root, query, limit))]
pub fn search_objects_scan(
py: Python<'_>,
bucket_path: &str,
search_root: &str,
query: &str,
limit: usize,
) -> PyResult<Py<PyAny>> {
let bucket_owned = bucket_path.to_owned();
let search_owned = search_root.to_owned();
let query_owned = query.to_owned();
let result: (Vec<(String, u64, f64)>, bool) = py.detach(
move || -> PyResult<(Vec<(String, u64, f64)>, bool)> {
let query_lower = query_owned.to_lowercase();
let bucket_len = bucket_owned.len() + 1;
let scan_limit = limit * 4;
let mut matched: usize = 0;
let mut results: Vec<(String, u64, f64)> = Vec::new();
let search_p = Path::new(&search_owned);
if !search_p.is_dir() {
return Ok((results, false));
}
let bucket_p = Path::new(&bucket_owned);
let mut stack = vec![search_p.to_path_buf()];
'scan: while let Some(current) = stack.pop() {
let is_bucket_root = current == bucket_p;
let entries = match fs::read_dir(&current) {
Ok(e) => e,
Err(_) => continue,
};
for entry_result in entries {
let entry = match entry_result {
Ok(e) => e,
Err(_) => continue,
};
if is_bucket_root {
if let Some(name) = entry.file_name().to_str() {
if INTERNAL_FOLDERS.contains(&name) {
continue;
}
}
}
let ft = match entry.file_type() {
Ok(ft) => ft,
Err(_) => continue,
};
if ft.is_dir() && !ft.is_symlink() {
stack.push(entry.path());
} else if ft.is_file() && !ft.is_symlink() {
let full_path = entry.path();
let full_str = full_path.to_string_lossy();
if full_str.len() <= bucket_len {
continue;
}
let key = full_str[bucket_len..].replace('\\', "/");
if key.to_lowercase().contains(&query_lower) {
if let Ok(md) = entry.metadata() {
let size = md.len();
let mtime = md
.modified()
.map(system_time_to_epoch)
.unwrap_or(0.0);
results.push((key, size, mtime));
matched += 1;
}
}
if matched >= scan_limit {
break 'scan;
}
}
}
}
results.sort_by(|a, b| a.0.cmp(&b.0));
let truncated = results.len() > limit;
results.truncate(limit);
Ok((results, truncated))
},
)?;
let (results, truncated) = result;
let dict = PyDict::new(py);
let results_list = PyList::empty(py);
for (key, size, mtime) in &results {
let tuple = PyTuple::new(py, &[
PyString::new(py, key).into_any().unbind(),
size.into_pyobject(py)?.into_any().unbind(),
mtime.into_pyobject(py)?.into_any().unbind(),
])?;
results_list.append(tuple)?;
}
dict.set_item("results", results_list)?;
dict.set_item("truncated", truncated)?;
Ok(dict.into_any().unbind())
}
#[pyfunction]
pub fn build_object_cache(
py: Python<'_>,
bucket_path: &str,
meta_root: &str,
etag_index_path: &str,
) -> PyResult<Py<PyAny>> {
let bucket_owned = bucket_path.to_owned();
let meta_owned = meta_root.to_owned();
let index_path_owned = etag_index_path.to_owned();
let result: (HashMap<String, String>, Vec<(String, u64, f64, Option<String>)>, bool) =
py.detach(move || -> PyResult<(
HashMap<String, String>,
Vec<(String, u64, f64, Option<String>)>,
bool,
)> {
let mut meta_cache: HashMap<String, String> = HashMap::new();
let mut index_mtime: f64 = 0.0;
let mut etag_cache_changed = false;
let index_p = Path::new(&index_path_owned);
if index_p.is_file() {
if let Ok(md) = fs::metadata(&index_path_owned) {
index_mtime = md
.modified()
.map(system_time_to_epoch)
.unwrap_or(0.0);
}
if let Ok(content) = fs::read_to_string(&index_path_owned) {
if let Ok(parsed) = serde_json::from_str::<HashMap<String, String>>(&content) {
meta_cache = parsed;
}
}
}
let meta_p = Path::new(&meta_owned);
let mut needs_rebuild = false;
if meta_p.is_dir() && index_mtime > 0.0 {
fn check_newer(dir: &Path, index_mtime: f64) -> bool {
let entries = match fs::read_dir(dir) {
Ok(e) => e,
Err(_) => return false,
};
for entry_result in entries {
let entry = match entry_result {
Ok(e) => e,
Err(_) => continue,
};
let ft = match entry.file_type() {
Ok(ft) => ft,
Err(_) => continue,
};
if ft.is_dir() && !ft.is_symlink() {
if check_newer(&entry.path(), index_mtime) {
return true;
}
} else if ft.is_file() {
if let Some(name) = entry.file_name().to_str() {
if name.ends_with(".meta.json") || name == "_index.json" {
if let Ok(md) = entry.metadata() {
let mt = md
.modified()
.map(system_time_to_epoch)
.unwrap_or(0.0);
if mt > index_mtime {
return true;
}
}
}
}
}
}
false
}
needs_rebuild = check_newer(meta_p, index_mtime);
} else if meta_cache.is_empty() {
needs_rebuild = true;
}
if needs_rebuild && meta_p.is_dir() {
let meta_str = meta_owned.clone();
let meta_len = meta_str.len() + 1;
let mut index_files: Vec<String> = Vec::new();
let mut legacy_meta_files: Vec<(String, String)> = Vec::new();
fn collect_meta(
dir: &Path,
meta_len: usize,
index_files: &mut Vec<String>,
legacy_meta_files: &mut Vec<(String, String)>,
) {
let entries = match fs::read_dir(dir) {
Ok(e) => e,
Err(_) => return,
};
for entry_result in entries {
let entry = match entry_result {
Ok(e) => e,
Err(_) => continue,
};
let ft = match entry.file_type() {
Ok(ft) => ft,
Err(_) => continue,
};
if ft.is_dir() && !ft.is_symlink() {
collect_meta(&entry.path(), meta_len, index_files, legacy_meta_files);
} else if ft.is_file() {
if let Some(name) = entry.file_name().to_str() {
let full = entry.path().to_string_lossy().to_string();
if name == "_index.json" {
index_files.push(full);
} else if name.ends_with(".meta.json") {
if full.len() > meta_len {
let rel = &full[meta_len..];
let key = if rel.len() > 10 {
rel[..rel.len() - 10].replace('\\', "/")
} else {
continue;
};
legacy_meta_files.push((key, full));
}
}
}
}
}
}
collect_meta(
meta_p,
meta_len,
&mut index_files,
&mut legacy_meta_files,
);
meta_cache.clear();
for idx_path in &index_files {
if let Ok(content) = fs::read_to_string(idx_path) {
if let Ok(idx_data) = serde_json::from_str::<HashMap<String, Value>>(&content) {
let rel_dir = if idx_path.len() > meta_len {
let r = &idx_path[meta_len..];
r.replace('\\', "/")
} else {
String::new()
};
let dir_prefix = if rel_dir.ends_with("/_index.json") {
&rel_dir[..rel_dir.len() - "/_index.json".len()]
} else {
""
};
for (entry_name, entry_data) in &idx_data {
let key = if dir_prefix.is_empty() {
entry_name.clone()
} else {
format!("{}/{}", dir_prefix, entry_name)
};
if let Some(meta_obj) = entry_data.get("metadata") {
if let Some(etag) = meta_obj.get("__etag__") {
if let Some(etag_str) = etag.as_str() {
meta_cache.insert(key, etag_str.to_owned());
}
}
}
}
}
}
}
for (key, path) in &legacy_meta_files {
if meta_cache.contains_key(key) {
continue;
}
if let Ok(content) = fs::read(path) {
if let Some(etag) = extract_etag_from_meta_bytes(&content) {
meta_cache.insert(key.clone(), etag);
}
}
}
etag_cache_changed = true;
}
let bucket_p = Path::new(&bucket_owned);
let bucket_len = bucket_owned.len() + 1;
let mut objects: Vec<(String, u64, f64, Option<String>)> = Vec::new();
if bucket_p.is_dir() {
let mut stack = vec![bucket_p.to_path_buf()];
while let Some(current) = stack.pop() {
let entries = match fs::read_dir(&current) {
Ok(e) => e,
Err(_) => continue,
};
for entry_result in entries {
let entry = match entry_result {
Ok(e) => e,
Err(_) => continue,
};
let ft = match entry.file_type() {
Ok(ft) => ft,
Err(_) => continue,
};
if ft.is_dir() && !ft.is_symlink() {
let full = entry.path();
let full_str = full.to_string_lossy();
if full_str.len() > bucket_len {
let first_part: &str = if let Some(sep_pos) =
full_str[bucket_len..].find(|c: char| c == '\\' || c == '/')
{
&full_str[bucket_len..bucket_len + sep_pos]
} else {
&full_str[bucket_len..]
};
if INTERNAL_FOLDERS.contains(&first_part) {
continue;
}
} else if let Some(name) = entry.file_name().to_str() {
if INTERNAL_FOLDERS.contains(&name) {
continue;
}
}
stack.push(full);
} else if ft.is_file() && !ft.is_symlink() {
let full = entry.path();
let full_str = full.to_string_lossy();
if full_str.len() <= bucket_len {
continue;
}
let rel = &full_str[bucket_len..];
let first_part: &str =
if let Some(sep_pos) = rel.find(|c: char| c == '\\' || c == '/') {
&rel[..sep_pos]
} else {
rel
};
if INTERNAL_FOLDERS.contains(&first_part) {
continue;
}
let key = rel.replace('\\', "/");
if let Ok(md) = entry.metadata() {
let size = md.len();
let mtime = md
.modified()
.map(system_time_to_epoch)
.unwrap_or(0.0);
let etag = meta_cache.get(&key).cloned();
objects.push((key, size, mtime, etag));
}
}
}
}
}
Ok((meta_cache, objects, etag_cache_changed))
})?;
let (meta_cache, objects, etag_cache_changed) = result;
let dict = PyDict::new(py);
let cache_dict = PyDict::new(py);
for (k, v) in &meta_cache {
cache_dict.set_item(k, v)?;
}
dict.set_item("etag_cache", cache_dict)?;
let objects_list = PyList::empty(py);
for (key, size, mtime, etag) in &objects {
let etag_py: Py<PyAny> = match etag {
Some(e) => PyString::new(py, e).into_any().unbind(),
None => py.None(),
};
let tuple = PyTuple::new(py, &[
PyString::new(py, key).into_any().unbind(),
size.into_pyobject(py)?.into_any().unbind(),
mtime.into_pyobject(py)?.into_any().unbind(),
etag_py,
])?;
objects_list.append(tuple)?;
}
dict.set_item("objects", objects_list)?;
dict.set_item("etag_cache_changed", etag_cache_changed)?;
Ok(dict.into_any().unbind())
}

View File

@@ -137,11 +137,11 @@
const versionPanel = document.getElementById('version-panel');
const versionList = document.getElementById('version-list');
const refreshVersionsButton = document.getElementById('refreshVersionsButton');
let archivedCard = document.getElementById('archived-objects-card');
let archivedBody = archivedCard?.querySelector('[data-archived-body]');
let archivedCountBadge = archivedCard?.querySelector('[data-archived-count]');
let archivedRefreshButton = archivedCard?.querySelector('[data-archived-refresh]');
let archivedEndpoint = archivedCard?.dataset.archivedEndpoint;
const archivedCard = document.getElementById('archived-objects-card');
const archivedBody = archivedCard?.querySelector('[data-archived-body]');
const archivedCountBadge = archivedCard?.querySelector('[data-archived-count]');
const archivedRefreshButton = archivedCard?.querySelector('[data-archived-refresh]');
const archivedEndpoint = archivedCard?.dataset.archivedEndpoint;
let versioningEnabled = objectsContainer?.dataset.versioning === 'true';
const versionsCache = new Map();
let activeRow = null;
@@ -167,8 +167,6 @@
let pageSize = 5000;
let currentPrefix = '';
let allObjects = [];
let streamFolders = [];
let useDelimiterMode = true;
let urlTemplates = null;
let streamAbortController = null;
let useStreaming = !!objectsStreamUrl;
@@ -188,7 +186,7 @@
let renderedRange = { start: 0, end: 0 };
let memoizedVisibleItems = null;
let memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
let memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
const createObjectRow = (obj, displayKey = null) => {
const tr = document.createElement('tr');
@@ -321,13 +319,10 @@
`;
};
const bucketTotalObjects = objectsContainer ? parseInt(objectsContainer.dataset.bucketTotalObjects || '0', 10) : 0;
const updateObjectCountBadge = () => {
if (!objectCountBadge) return;
if (useDelimiterMode) {
const total = bucketTotalObjects || totalObjectCount;
objectCountBadge.textContent = `${total.toLocaleString()} object${total !== 1 ? 's' : ''}`;
if (totalObjectCount === 0) {
objectCountBadge.textContent = '0 objects';
} else {
objectCountBadge.textContent = `${totalObjectCount.toLocaleString()} object${totalObjectCount !== 1 ? 's' : ''}`;
}
@@ -354,7 +349,6 @@
const computeVisibleItems = (forceRecompute = false) => {
const currentInputs = {
objectCount: allObjects.length,
folderCount: streamFolders.length,
prefix: currentPrefix,
filterTerm: currentFilterTerm,
sortField: currentSortField,
@@ -364,7 +358,6 @@
if (!forceRecompute &&
memoizedVisibleItems !== null &&
memoizedInputs.objectCount === currentInputs.objectCount &&
memoizedInputs.folderCount === currentInputs.folderCount &&
memoizedInputs.prefix === currentInputs.prefix &&
memoizedInputs.filterTerm === currentInputs.filterTerm &&
memoizedInputs.sortField === currentInputs.sortField &&
@@ -373,49 +366,36 @@
}
const items = [];
const folders = new Set();
if (searchResults !== null) {
searchResults.forEach(obj => {
items.push({ type: 'file', data: obj, displayKey: obj.key });
});
} else if (useDelimiterMode && streamFolders.length > 0) {
streamFolders.forEach(folderPath => {
const folderName = folderPath.slice(currentPrefix.length).replace(/\/$/, '');
items.push({ type: 'folder', path: folderPath, displayKey: folderName });
});
allObjects.forEach(obj => {
const remainder = obj.key.slice(currentPrefix.length);
if (!remainder) return;
items.push({ type: 'file', data: obj, displayKey: remainder });
});
} else {
const folders = new Set();
allObjects.forEach(obj => {
if (!obj.key.startsWith(currentPrefix)) return;
allObjects.forEach(obj => {
if (!obj.key.startsWith(currentPrefix)) return;
const remainder = obj.key.slice(currentPrefix.length);
const remainder = obj.key.slice(currentPrefix.length);
if (!remainder) return;
if (!remainder) return;
const isFolderMarker = obj.key.endsWith('/') && obj.size === 0;
const slashIndex = remainder.indexOf('/');
const isFolderMarker = obj.key.endsWith('/') && obj.size === 0;
const slashIndex = remainder.indexOf('/');
if (slashIndex === -1 && !isFolderMarker) {
if (slashIndex === -1 && !isFolderMarker) {
if (!currentFilterTerm || remainder.toLowerCase().includes(currentFilterTerm)) {
items.push({ type: 'file', data: obj, displayKey: remainder });
} else {
const effectiveSlashIndex = isFolderMarker && slashIndex === remainder.length - 1
? slashIndex
: (slashIndex === -1 ? remainder.length - 1 : slashIndex);
const folderName = remainder.slice(0, effectiveSlashIndex);
const folderPath = currentPrefix + folderName + '/';
if (!folders.has(folderPath)) {
folders.add(folderPath);
}
} else {
const effectiveSlashIndex = isFolderMarker && slashIndex === remainder.length - 1
? slashIndex
: (slashIndex === -1 ? remainder.length - 1 : slashIndex);
const folderName = remainder.slice(0, effectiveSlashIndex);
const folderPath = currentPrefix + folderName + '/';
if (!folders.has(folderPath)) {
folders.add(folderPath);
if (!currentFilterTerm || folderName.toLowerCase().includes(currentFilterTerm)) {
items.push({ type: 'folder', path: folderPath, displayKey: folderName });
}
}
});
}
}
});
items.sort((a, b) => {
if (a.type === 'folder' && b.type === 'file') return -1;
@@ -491,7 +471,7 @@
renderedRange = { start: -1, end: -1 };
if (visibleItems.length === 0) {
if (allObjects.length === 0 && streamFolders.length === 0 && !hasMoreObjects) {
if (allObjects.length === 0 && !hasMoreObjects) {
showEmptyState();
} else {
objectsTableBody.innerHTML = `
@@ -520,7 +500,15 @@
const updateFolderViewStatus = () => {
const folderViewStatusEl = document.getElementById('folder-view-status');
if (!folderViewStatusEl) return;
folderViewStatusEl.classList.add('d-none');
if (currentPrefix) {
const folderCount = visibleItems.filter(i => i.type === 'folder').length;
const fileCount = visibleItems.filter(i => i.type === 'file').length;
folderViewStatusEl.innerHTML = `<span class="text-muted">${folderCount} folder${folderCount !== 1 ? 's' : ''}, ${fileCount} file${fileCount !== 1 ? 's' : ''} in this view</span>`;
folderViewStatusEl.classList.remove('d-none');
} else {
folderViewStatusEl.classList.add('d-none');
}
};
const processStreamObject = (obj) => {
@@ -548,30 +536,21 @@
let lastStreamRenderTime = 0;
const STREAM_RENDER_THROTTLE_MS = 500;
const buildBottomStatusText = (complete) => {
if (!complete) {
const countText = totalObjectCount > 0 ? ` of ${totalObjectCount.toLocaleString()}` : '';
return `${loadedObjectCount.toLocaleString()}${countText} loading...`;
}
const parts = [];
if (useDelimiterMode && streamFolders.length > 0) {
parts.push(`${streamFolders.length.toLocaleString()} folder${streamFolders.length !== 1 ? 's' : ''}`);
}
parts.push(`${loadedObjectCount.toLocaleString()} object${loadedObjectCount !== 1 ? 's' : ''}`);
return parts.join(', ');
};
const flushPendingStreamObjects = () => {
if (pendingStreamObjects.length > 0) {
const batch = pendingStreamObjects.splice(0, pendingStreamObjects.length);
batch.forEach(obj => {
loadedObjectCount++;
allObjects.push(obj);
});
}
if (pendingStreamObjects.length === 0) return;
const batch = pendingStreamObjects.splice(0, pendingStreamObjects.length);
batch.forEach(obj => {
loadedObjectCount++;
allObjects.push(obj);
});
updateObjectCountBadge();
if (loadMoreStatus) {
loadMoreStatus.textContent = buildBottomStatusText(streamingComplete);
if (streamingComplete) {
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()} objects`;
} else {
const countText = totalObjectCount > 0 ? ` of ${totalObjectCount.toLocaleString()}` : '';
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()}${countText} loading...`;
}
}
if (objectsLoadingRow && objectsLoadingRow.parentNode) {
const loadingText = objectsLoadingRow.querySelector('p');
@@ -606,9 +585,8 @@
loadedObjectCount = 0;
totalObjectCount = 0;
allObjects = [];
streamFolders = [];
memoizedVisibleItems = null;
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
pendingStreamObjects = [];
lastStreamRenderTime = 0;
@@ -617,7 +595,6 @@
try {
const params = new URLSearchParams();
if (currentPrefix) params.set('prefix', currentPrefix);
if (useDelimiterMode) params.set('delimiter', '/');
const response = await fetch(`${objectsStreamUrl}?${params}`, {
signal: streamAbortController.signal
@@ -662,10 +639,6 @@
if (loadingText) loadingText.textContent = `Loading 0 of ${totalObjectCount.toLocaleString()} objects...`;
}
break;
case 'folder':
streamFolders.push(msg.prefix);
scheduleStreamRender();
break;
case 'object':
pendingStreamObjects.push(processStreamObject(msg));
if (pendingStreamObjects.length >= STREAM_RENDER_BATCH) {
@@ -709,7 +682,7 @@
}
if (loadMoreStatus) {
loadMoreStatus.textContent = buildBottomStatusText(true);
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()} objects`;
}
refreshVirtualList();
renderBreadcrumb(currentPrefix);
@@ -737,9 +710,8 @@
loadedObjectCount = 0;
totalObjectCount = 0;
allObjects = [];
streamFolders = [];
memoizedVisibleItems = null;
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
}
if (append && loadMoreSpinner) {
@@ -941,7 +913,7 @@
});
}
const hasFolders = () => streamFolders.length > 0 || allObjects.some(obj => obj.key.includes('/'));
const hasFolders = () => allObjects.some(obj => obj.key.includes('/'));
const getFoldersAtPrefix = (prefix) => {
const folders = new Set();
@@ -968,9 +940,6 @@
};
const countObjectsInFolder = (folderPrefix) => {
if (useDelimiterMode) {
return { count: 0, mayHaveMore: true };
}
const count = allObjects.filter(obj => obj.key.startsWith(folderPrefix)).length;
return { count, mayHaveMore: hasMoreObjects };
};
@@ -1049,13 +1018,7 @@
const createFolderRow = (folderPath, displayName = null) => {
const folderName = displayName || folderPath.slice(currentPrefix.length).replace(/\/$/, '');
const { count: objectCount, mayHaveMore } = countObjectsInFolder(folderPath);
let countLine = '';
if (useDelimiterMode) {
countLine = '';
} else {
const countDisplay = mayHaveMore ? `${objectCount}+` : objectCount;
countLine = `<div class="text-muted small ms-4 ps-2">${countDisplay} object${objectCount !== 1 ? 's' : ''}</div>`;
}
const countDisplay = mayHaveMore ? `${objectCount}+` : objectCount;
const tr = document.createElement('tr');
tr.className = 'folder-row';
@@ -1073,7 +1036,7 @@
</svg>
<span>${escapeHtml(folderName)}/</span>
</div>
${countLine}
<div class="text-muted small ms-4 ps-2">${countDisplay} object${objectCount !== 1 ? 's' : ''}</div>
</td>
<td class="text-end text-nowrap">
<span class="text-muted small">—</span>
@@ -1574,7 +1537,7 @@
const confirmVersionRestore = (row, version, label = null, onConfirm) => {
if (!version) return;
const timestamp = (version.archived_at || version.last_modified) ? new Date(version.archived_at || version.last_modified).toLocaleString() : version.version_id;
const timestamp = version.archived_at ? new Date(version.archived_at).toLocaleString() : version.version_id;
const sizeLabel = formatBytes(Number(version.size) || 0);
const reasonLabel = describeVersionReason(version.reason);
const targetLabel = label || row?.dataset.key || 'this object';
@@ -1647,7 +1610,7 @@
const latestCell = document.createElement('td');
if (item.latest) {
const ts = (item.latest.archived_at || item.latest.last_modified) ? new Date(item.latest.archived_at || item.latest.last_modified).toLocaleString() : item.latest.version_id;
const ts = item.latest.archived_at ? new Date(item.latest.archived_at).toLocaleString() : item.latest.version_id;
const sizeLabel = formatBytes(Number(item.latest.size) || 0);
latestCell.innerHTML = `<div class="small">${ts}</div><div class="text-muted small">${sizeLabel} · ${describeVersionReason(item.latest.reason)}</div>`;
} else {
@@ -1774,15 +1737,6 @@
loadArchivedObjects();
}
const propertiesTab = document.getElementById('properties-tab');
if (propertiesTab) {
propertiesTab.addEventListener('shown.bs.tab', () => {
if (archivedCard && archivedEndpoint) {
loadArchivedObjects();
}
});
}
async function restoreVersion(row, version) {
if (!row || !version?.version_id) return;
const template = row.dataset.restoreTemplate;
@@ -1831,7 +1785,7 @@
badge.textContent = `#${versionNumber}`;
const title = document.createElement('div');
title.className = 'fw-semibold small';
const timestamp = (entry.archived_at || entry.last_modified) ? new Date(entry.archived_at || entry.last_modified).toLocaleString() : entry.version_id;
const timestamp = entry.archived_at ? new Date(entry.archived_at).toLocaleString() : entry.version_id;
title.textContent = timestamp;
heading.appendChild(badge);
heading.appendChild(title);
@@ -2090,63 +2044,8 @@
}
};
let searchDebounceTimer = null;
let searchAbortController = null;
let searchResults = null;
const performServerSearch = async (term) => {
if (searchAbortController) searchAbortController.abort();
searchAbortController = new AbortController();
try {
const params = new URLSearchParams({ q: term, limit: '500' });
if (currentPrefix) params.set('prefix', currentPrefix);
const searchUrl = objectsStreamUrl.replace('/stream', '/search');
const response = await fetch(`${searchUrl}?${params}`, {
signal: searchAbortController.signal
});
if (!response.ok) throw new Error(`HTTP ${response.status}`);
const data = await response.json();
searchResults = (data.results || []).map(obj => processStreamObject(obj));
memoizedVisibleItems = null;
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
refreshVirtualList();
if (loadMoreStatus) {
const countText = searchResults.length.toLocaleString();
const truncated = data.truncated ? '+' : '';
loadMoreStatus.textContent = `${countText}${truncated} result${searchResults.length !== 1 ? 's' : ''}`;
}
} catch (e) {
if (e.name === 'AbortError') return;
if (loadMoreStatus) {
loadMoreStatus.textContent = 'Search failed';
}
}
};
document.getElementById('object-search')?.addEventListener('input', (event) => {
const newTerm = event.target.value.toLowerCase();
const wasFiltering = currentFilterTerm.length > 0;
const isFiltering = newTerm.length > 0;
currentFilterTerm = newTerm;
clearTimeout(searchDebounceTimer);
if (isFiltering) {
searchDebounceTimer = setTimeout(() => performServerSearch(newTerm), 300);
return;
}
if (!isFiltering && wasFiltering) {
if (searchAbortController) searchAbortController.abort();
searchResults = null;
memoizedVisibleItems = null;
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
if (loadMoreStatus) {
loadMoreStatus.textContent = buildBottomStatusText(streamingComplete);
}
}
currentFilterTerm = event.target.value.toLowerCase();
updateFilterWarning();
refreshVirtualList();
});
@@ -2187,18 +2086,7 @@
var searchInput = document.getElementById('object-search');
if (searchInput && document.activeElement === searchInput) {
searchInput.value = '';
const wasFiltering = currentFilterTerm.length > 0;
currentFilterTerm = '';
if (wasFiltering) {
clearTimeout(searchDebounceTimer);
if (searchAbortController) searchAbortController.abort();
searchResults = null;
memoizedVisibleItems = null;
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
if (loadMoreStatus) {
loadMoreStatus.textContent = buildBottomStatusText(streamingComplete);
}
}
refreshVirtualList();
searchInput.blur();
}
@@ -2928,16 +2816,7 @@
uploadFileInput.value = '';
}
const previousKey = activeRow?.dataset.key || null;
loadObjects(false).then(() => {
if (previousKey) {
const newRow = document.querySelector(`[data-object-row][data-key="${CSS.escape(previousKey)}"]`);
if (newRow) {
selectRow(newRow);
if (versioningEnabled) loadObjectVersions(newRow, { force: true });
}
}
});
loadObjects(false);
const successCount = uploadSuccessFiles.length;
const errorCount = uploadErrorFiles.length;
@@ -4275,47 +4154,6 @@
var archivedCardEl = document.getElementById('archived-objects-card');
if (archivedCardEl) {
archivedCardEl.style.display = enabled ? '' : 'none';
} else if (enabled) {
var endpoint = window.BucketDetailConfig?.endpoints?.archivedObjects || '';
if (endpoint) {
var html = '<div class="card shadow-sm mt-4" id="archived-objects-card" data-archived-endpoint="' + endpoint + '">' +
'<div class="card-header d-flex justify-content-between align-items-center flex-wrap gap-2">' +
'<div class="d-flex align-items-center">' +
'<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-warning me-2" viewBox="0 0 16 16">' +
'<path d="M0 2a1 1 0 0 1 1-1h14a1 1 0 0 1 1 1v2a1 1 0 0 1-1 1v7.5a2.5 2.5 0 0 1-2.5 2.5h-9A2.5 2.5 0 0 1 1 12.5V5a1 1 0 0 1-1-1V2zm2 3v7.5A1.5 1.5 0 0 0 3.5 14h9a1.5 1.5 0 0 0 1.5-1.5V5H2zm13-3H1v2h14V2zM5 7.5a.5.5 0 0 1 .5-.5h5a.5.5 0 0 1 0 1h-5a.5.5 0 0 1-.5-.5z"/>' +
'</svg><span class="fw-semibold">Archived Objects</span></div>' +
'<div class="d-flex align-items-center gap-2">' +
'<span class="badge text-bg-secondary" data-archived-count>0 items</span>' +
'<button class="btn btn-outline-secondary btn-sm" type="button" data-archived-refresh>' +
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
'<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>' +
'<path d="M8 4.466V.534a.25.25 0 0 0-.41-.192L5.23 2.308a.25.25 0 0 0 0 .384l2.36 1.966A.25.25 0 0 0 8 4.466z"/>' +
'</svg>Refresh</button></div></div>' +
'<div class="card-body">' +
'<p class="text-muted small mb-3">Objects that have been deleted while versioning is enabled. Their previous versions remain available until you restore or purge them.</p>' +
'<div class="table-responsive"><table class="table table-sm table-hover align-middle mb-0">' +
'<thead class="table-light"><tr>' +
'<th scope="col"><svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 text-muted" viewBox="0 0 16 16">' +
'<path d="M4 0h5.293A1 1 0 0 1 10 .293L13.707 4a1 1 0 0 1 .293.707V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2zm5.5 1.5v2a1 1 0 0 0 1 1h2l-3-3z"/>' +
'</svg>Key</th>' +
'<th scope="col">Latest Version</th>' +
'<th scope="col" class="text-center">Versions</th>' +
'<th scope="col" class="text-end">Actions</th>' +
'</tr></thead>' +
'<tbody data-archived-body><tr><td colspan="4" class="text-center text-muted py-4">' +
'<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="mb-2 d-block mx-auto" viewBox="0 0 16 16">' +
'<path d="M0 2a1 1 0 0 1 1-1h14a1 1 0 0 1 1 1v2a1 1 0 0 1-1 1v7.5a2.5 2.5 0 0 1-2.5 2.5h-9A2.5 2.5 0 0 1 1 12.5V5a1 1 0 0 1-1-1V2zm2 3v7.5A1.5 1.5 0 0 0 3.5 14h9a1.5 1.5 0 0 0 1.5-1.5V5H2zm13-3H1v2h14V2zM5 7.5a.5.5 0 0 1 .5-.5h5a.5.5 0 0 1 0 1h-5a.5.5 0 0 1-.5-.5z"/>' +
'</svg>No archived objects</td></tr></tbody>' +
'</table></div></div></div>';
card.insertAdjacentHTML('afterend', html);
archivedCard = document.getElementById('archived-objects-card');
archivedBody = archivedCard.querySelector('[data-archived-body]');
archivedCountBadge = archivedCard.querySelector('[data-archived-count]');
archivedRefreshButton = archivedCard.querySelector('[data-archived-refresh]');
archivedEndpoint = endpoint;
archivedRefreshButton.addEventListener('click', function() { loadArchivedObjects(); });
loadArchivedObjects();
}
}
var dropZone = document.getElementById('objects-drop-zone');
@@ -4323,15 +4161,6 @@
dropZone.setAttribute('data-versioning', enabled ? 'true' : 'false');
}
var bulkPurgeWrap = document.getElementById('bulkDeletePurgeWrap');
if (bulkPurgeWrap) {
bulkPurgeWrap.classList.toggle('d-none', !enabled);
}
var singleDeleteVerWrap = document.getElementById('deleteObjectVersioningWrap');
if (singleDeleteVerWrap) {
singleDeleteVerWrap.classList.toggle('d-none', !enabled);
}
if (!enabled) {
var newForm = document.getElementById('enableVersioningForm');
if (newForm) {

View File

@@ -171,7 +171,6 @@
data-bulk-download-endpoint="{{ url_for('ui.bulk_download_objects', bucket_name=bucket_name) }}"
data-folders-url="{{ folders_url }}"
data-buckets-for-copy-url="{{ buckets_for_copy_url }}"
data-bucket-total-objects="{{ bucket_stats.get('objects', 0) }}"
>
<table class="table table-hover align-middle mb-0" id="objects-table" style="table-layout: fixed;">
<thead class="table-light">
@@ -2273,11 +2272,13 @@
</div>
<ul class="list-group mb-3" id="bulkDeleteList" style="max-height: 200px; overflow-y: auto;"></ul>
<div class="text-muted small" id="bulkDeleteStatus"></div>
<div class="form-check mt-3 p-3 bg-body-tertiary rounded-3 {% if not versioning_enabled %}d-none{% endif %}" id="bulkDeletePurgeWrap">
{% if versioning_enabled %}
<div class="form-check mt-3 p-3 bg-body-tertiary rounded-3">
<input class="form-check-input" type="checkbox" id="bulkDeletePurge" />
<label class="form-check-label" for="bulkDeletePurge">Also delete archived versions</label>
<div class="form-text">Removes any archived versions stored in the archive.</div>
</div>
{% endif %}
</div>
<div class="modal-footer">
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
@@ -2315,7 +2316,7 @@
<div class="p-3 bg-body-tertiary rounded-3 mb-3">
<code id="deleteObjectKey" class="d-block text-break"></code>
</div>
<div id="deleteObjectVersioningWrap" class="{% if not versioning_enabled %}d-none{% endif %}">
{% if versioning_enabled %}
<div class="alert alert-warning d-flex align-items-start small mb-3" role="alert">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="flex-shrink-0 me-2 mt-0" viewBox="0 0 16 16">
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
@@ -2327,7 +2328,7 @@
<label class="form-check-label" for="deletePurgeVersions">Also delete all archived versions</label>
<div class="form-text mb-0">Removes the live object and every stored version.</div>
</div>
</div>
{% endif %}
</div>
<div class="modal-footer">
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
@@ -2770,8 +2771,7 @@
window.BucketDetailConfig = {
endpoints: {
versioning: "{{ url_for('ui.update_bucket_versioning', bucket_name=bucket_name) }}",
bucketsOverview: "{{ url_for('ui.buckets_overview') }}",
archivedObjects: "{{ url_for('ui.archived_objects', bucket_name=bucket_name) }}"
bucketsOverview: "{{ url_for('ui.buckets_overview') }}"
}
};

View File

@@ -52,11 +52,6 @@
<li><a href="#acls">Access Control Lists</a></li>
<li><a href="#tagging">Object &amp; Bucket Tagging</a></li>
<li><a href="#website-hosting">Static Website Hosting</a></li>
<li><a href="#cors-config">CORS Configuration</a></li>
<li><a href="#post-object">PostObject (Form Upload)</a></li>
<li><a href="#list-objects-v2">List Objects API v2</a></li>
<li><a href="#upgrading">Upgrading &amp; Updates</a></li>
<li><a href="#api-matrix">Full API Reference</a></li>
</ul>
</div>
</div>
@@ -131,11 +126,6 @@ python run.py --mode ui
<td><code>5000</code></td>
<td>Listen port (UI uses 5100).</td>
</tr>
<tr>
<td><code>DISPLAY_TIMEZONE</code></td>
<td><code>UTC</code></td>
<td>Timezone for UI timestamps (e.g., <code>US/Eastern</code>, <code>Asia/Tokyo</code>).</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">CORS Settings</td>
</tr>
@@ -197,11 +187,6 @@ python run.py --mode ui
<td><code>100 per minute</code></td>
<td>Rate limit for HEAD requests.</td>
</tr>
<tr>
<td><code>RATE_LIMIT_ADMIN</code></td>
<td><code>60 per minute</code></td>
<td>Rate limit for admin API endpoints (<code>/admin/*</code>).</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Server Settings</td>
</tr>
@@ -353,24 +338,6 @@ python run.py --mode ui
<td><code>604800</code></td>
<td>Maximum presigned URL expiry time (7 days).</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Proxy &amp; Network Settings</td>
</tr>
<tr>
<td><code>NUM_TRUSTED_PROXIES</code></td>
<td><code>1</code></td>
<td>Number of trusted reverse proxies for <code>X-Forwarded-*</code> headers.</td>
</tr>
<tr>
<td><code>ALLOWED_REDIRECT_HOSTS</code></td>
<td>(empty)</td>
<td>Comma-separated whitelist of safe redirect targets.</td>
</tr>
<tr>
<td><code>ALLOW_INTERNAL_ENDPOINTS</code></td>
<td><code>false</code></td>
<td>Allow connections to internal/private IPs (webhooks, replication).</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Storage Limits</td>
</tr>
@@ -399,16 +366,6 @@ python run.py --mode ui
<td><code>50</code></td>
<td>Max lifecycle history records per bucket.</td>
</tr>
<tr>
<td><code>OBJECT_CACHE_TTL</code></td>
<td><code>60</code></td>
<td>Seconds to cache object metadata.</td>
</tr>
<tr>
<td><code>BULK_DOWNLOAD_MAX_BYTES</code></td>
<td><code>1 GB</code></td>
<td>Max total size for bulk ZIP downloads.</td>
</tr>
<tr>
<td><code>ENCRYPTION_CHUNK_SIZE_BYTES</code></td>
<td><code>65536</code></td>
@@ -534,7 +491,7 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
<ul>
<li>Navigate folder hierarchies using breadcrumbs. Objects with <code>/</code> in keys display as folders.</li>
<li>Infinite scroll loads more objects automatically. Choose batch size (50250) from the footer dropdown.</li>
<li>Bulk select objects for multi-delete or multi-download (ZIP archive, up to 1 GiB). Filter by name using the search box.</li>
<li>Bulk select objects for multi-delete or multi-download. Filter by name using the search box.</li>
<li>If loading fails, click <strong>Retry</strong> to attempt again—no page refresh needed.</li>
</ul>
</div>
@@ -656,75 +613,15 @@ curl -X PUT {{ api_base }}/demo/notes.txt \
<td><code>/&lt;bucket&gt;/&lt;key&gt;</code></td>
<td>Delete an object.</td>
</tr>
<tr>
<td>HEAD</td>
<td><code>/&lt;bucket&gt;</code></td>
<td>Check if a bucket exists.</td>
</tr>
<tr>
<td>HEAD</td>
<td><code>/&lt;bucket&gt;/&lt;key&gt;</code></td>
<td>Get object metadata without downloading.</td>
</tr>
<tr>
<td>POST</td>
<td><code>/&lt;bucket&gt;?delete</code></td>
<td>Bulk delete objects (XML body).</td>
</tr>
<tr>
<td>GET/PUT/DELETE</td>
<td><code>/&lt;bucket&gt;?policy</code></td>
<td>Bucket policy management.</td>
</tr>
<tr>
<td>GET/PUT</td>
<td><code>/&lt;bucket&gt;?versioning</code></td>
<td>Versioning status.</td>
</tr>
<tr>
<td>GET/PUT/DELETE</td>
<td><code>/&lt;bucket&gt;?lifecycle</code></td>
<td>Lifecycle rules.</td>
</tr>
<tr>
<td>GET/PUT/DELETE</td>
<td><code>/&lt;bucket&gt;?cors</code></td>
<td>CORS configuration.</td>
</tr>
<tr>
<td>GET/PUT/DELETE</td>
<td><code>/&lt;bucket&gt;?encryption</code></td>
<td>Default encryption.</td>
</tr>
<tr>
<td>GET/PUT</td>
<td><code>/&lt;bucket&gt;?acl</code></td>
<td>Bucket ACL.</td>
</tr>
<tr>
<td>GET/PUT/DELETE</td>
<td><code>/&lt;bucket&gt;?tagging</code></td>
<td>Bucket tags.</td>
</tr>
<tr>
<td>GET/PUT/DELETE</td>
<td><code>/&lt;bucket&gt;/&lt;key&gt;?tagging</code></td>
<td>Object tags.</td>
</tr>
<tr>
<td>POST</td>
<td><code>/&lt;bucket&gt;/&lt;key&gt;?uploads</code></td>
<td>Initiate multipart upload.</td>
</tr>
<tr>
<td>POST</td>
<td><code>/&lt;bucket&gt;/&lt;key&gt;?select</code></td>
<td>SQL query (SelectObjectContent).</td>
<td>Fetch, upsert, or remove a bucket policy (S3-compatible).</td>
</tr>
</tbody>
</table>
</div>
<p class="small text-muted mt-3 mb-0">All responses include <code>X-Request-Id</code> for tracing. See the <a href="#api-matrix">Full API Reference</a> for the complete endpoint list. Logs land in <code>logs/api.log</code> and <code>logs/ui.log</code>.</p>
<p class="small text-muted mt-3 mb-0">All responses include <code>X-Request-Id</code> for tracing. Logs land in <code>logs/api.log</code> and <code>logs/ui.log</code>.</p>
</div>
</article>
<article id="examples" class="card shadow-sm docs-section">
@@ -1414,10 +1311,6 @@ curl -X PUT "{{ api_base }}/bucket/&lt;bucket&gt;?quota" \
<td><strong>KMS (SSE-KMS)</strong></td>
<td>Encryption using customer-managed keys via the built-in KMS</td>
</tr>
<tr>
<td><strong>SSE-C</strong></td>
<td>Server-side encryption with customer-provided keys (per-request)</td>
</tr>
</tbody>
</table>
</div>
@@ -1484,54 +1377,6 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
<p class="small text-muted mb-0">
<strong>Envelope Encryption:</strong> Each object is encrypted with a unique Data Encryption Key (DEK). The DEK is then encrypted (wrapped) by the master key or KMS key and stored alongside the ciphertext. On read, the DEK is unwrapped and used to decrypt the object transparently.
</p>
<h3 class="h6 text-uppercase text-muted mt-4">SSE-C (Customer-Provided Keys)</h3>
<p class="small text-muted">With SSE-C, you supply your own 256-bit AES key with each request. The server encrypts/decrypts using your key but never stores it. You must provide the same key for both upload and download.</p>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Header</th>
<th>Value</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>x-amz-server-side-encryption-customer-algorithm</code></td>
<td><code>AES256</code></td>
</tr>
<tr>
<td><code>x-amz-server-side-encryption-customer-key</code></td>
<td>Base64-encoded 256-bit key</td>
</tr>
<tr>
<td><code>x-amz-server-side-encryption-customer-key-MD5</code></td>
<td>Base64-encoded MD5 of the key</td>
</tr>
</tbody>
</table>
</div>
<pre class="mb-3"><code class="language-bash"># Generate a 256-bit key
KEY=$(openssl rand -base64 32)
KEY_MD5=$(echo -n "$KEY" | base64 -d | openssl dgst -md5 -binary | base64)
# Upload with SSE-C
curl -X PUT "{{ api_base }}/my-bucket/secret.txt" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
-H "x-amz-server-side-encryption-customer-key: $KEY" \
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5" \
--data-binary @secret.txt
# Download with SSE-C (same key required)
curl "{{ api_base }}/my-bucket/secret.txt" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-H "x-amz-server-side-encryption-customer-algorithm: AES256" \
-H "x-amz-server-side-encryption-customer-key: $KEY" \
-H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5"</code></pre>
<div class="alert alert-light border mb-0 small">
<strong>Note:</strong> SSE-C does not require <code>ENCRYPTION_ENABLED</code> or <code>KMS_ENABLED</code>. If you lose your key, the data is irrecoverable.
</div>
</div>
</article>
<article id="lifecycle" class="card shadow-sm docs-section">
@@ -2081,7 +1926,7 @@ curl -X POST "{{ api_base }}/&lt;bucket&gt;/data.csv?select" \
<span class="docs-section-kicker">22</span>
<h2 class="h4 mb-0">Advanced S3 Operations</h2>
</div>
<p class="text-muted">Copy, move, and partially download objects using advanced S3 operations.</p>
<p class="text-muted">Copy objects, upload part copies, and use range requests for partial downloads.</p>
<h3 class="h6 text-uppercase text-muted mt-4">CopyObject</h3>
<pre class="mb-3"><code class="language-bash"># Copy within same bucket
@@ -2096,13 +1941,6 @@ curl -X PUT "{{ api_base }}/&lt;bucket&gt;/file.txt" \
-H "x-amz-metadata-directive: REPLACE" \
-H "x-amz-meta-newkey: newvalue"</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">MoveObject (UI)</h3>
<p class="small text-muted">Move an object to a different key or bucket via the UI. Performs a copy then deletes the source. Requires <code>read</code>+<code>delete</code> on source and <code>write</code> on destination.</p>
<pre class="mb-3"><code class="language-bash"># Move via UI API (session-authenticated)
curl -X POST "http://localhost:5100/ui/buckets/&lt;bucket&gt;/objects/&lt;key&gt;/move" \
-H "Content-Type: application/json" --cookie "session=..." \
-d '{"dest_bucket": "other-bucket", "dest_key": "new-path/file.txt"}'</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">UploadPartCopy</h3>
<p class="small text-muted">Copy data from an existing object into a multipart upload part:</p>
<pre class="mb-3"><code class="language-bash"># Copy bytes 0-10485759 from source as part 1
@@ -2355,274 +2193,6 @@ server {
</div>
</div>
</article>
<article id="cors-config" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">26</span>
<h2 class="h4 mb-0">CORS Configuration</h2>
</div>
<p class="text-muted">Configure per-bucket Cross-Origin Resource Sharing rules to control which origins can access your bucket from a browser.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Setting CORS Rules</h3>
<pre class="mb-3"><code class="language-bash"># Set CORS configuration
curl -X PUT "{{ api_base }}/&lt;bucket&gt;?cors" \
-H "Content-Type: application/xml" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-d '&lt;CORSConfiguration&gt;
&lt;CORSRule&gt;
&lt;AllowedOrigin&gt;https://example.com&lt;/AllowedOrigin&gt;
&lt;AllowedMethod&gt;GET&lt;/AllowedMethod&gt;
&lt;AllowedMethod&gt;PUT&lt;/AllowedMethod&gt;
&lt;AllowedHeader&gt;*&lt;/AllowedHeader&gt;
&lt;ExposeHeader&gt;ETag&lt;/ExposeHeader&gt;
&lt;MaxAgeSeconds&gt;3600&lt;/MaxAgeSeconds&gt;
&lt;/CORSRule&gt;
&lt;/CORSConfiguration&gt;'
# Get CORS configuration
curl "{{ api_base }}/&lt;bucket&gt;?cors" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Delete CORS configuration
curl -X DELETE "{{ api_base }}/&lt;bucket&gt;?cors" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Rule Fields</h3>
<div class="table-responsive mb-0">
<table class="table table-sm table-bordered small mb-0">
<thead class="table-light">
<tr>
<th>Field</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>AllowedOrigin</code></td>
<td>Origins allowed to make requests (supports <code>*</code> wildcard)</td>
</tr>
<tr>
<td><code>AllowedMethod</code></td>
<td>HTTP methods: <code>GET</code>, <code>PUT</code>, <code>POST</code>, <code>DELETE</code>, <code>HEAD</code></td>
</tr>
<tr>
<td><code>AllowedHeader</code></td>
<td>Request headers allowed in preflight (supports <code>*</code>)</td>
</tr>
<tr>
<td><code>ExposeHeader</code></td>
<td>Response headers visible to the browser (e.g., <code>ETag</code>, <code>x-amz-request-id</code>)</td>
</tr>
<tr>
<td><code>MaxAgeSeconds</code></td>
<td>How long the browser caches preflight results</td>
</tr>
</tbody>
</table>
</div>
</div>
</article>
<article id="post-object" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">27</span>
<h2 class="h4 mb-0">PostObject (HTML Form Upload)</h2>
</div>
<p class="text-muted">Upload objects directly from an HTML form using browser-based POST uploads with policy-based authorization.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Form Fields</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Field</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr><td><code>key</code></td><td>Object key (supports <code>${filename}</code> variable)</td></tr>
<tr><td><code>file</code></td><td>The file to upload</td></tr>
<tr><td><code>policy</code></td><td>Base64-encoded policy document (JSON)</td></tr>
<tr><td><code>x-amz-signature</code></td><td>HMAC-SHA256 signature of the policy</td></tr>
<tr><td><code>x-amz-credential</code></td><td>Access key / date / region / s3 / aws4_request</td></tr>
<tr><td><code>x-amz-algorithm</code></td><td><code>AWS4-HMAC-SHA256</code></td></tr>
<tr><td><code>x-amz-date</code></td><td>ISO 8601 date (e.g., <code>20250101T000000Z</code>)</td></tr>
<tr><td><code>Content-Type</code></td><td>MIME type of the uploaded file</td></tr>
<tr><td><code>x-amz-meta-*</code></td><td>Custom metadata headers</td></tr>
</tbody>
</table>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">Simple Upload (No Signing)</h3>
<pre class="mb-3"><code class="language-html">&lt;form action="{{ api_base }}/my-bucket" method="POST" enctype="multipart/form-data"&gt;
&lt;input type="hidden" name="key" value="uploads/${filename}"&gt;
&lt;input type="file" name="file"&gt;
&lt;button type="submit"&gt;Upload&lt;/button&gt;
&lt;/form&gt;</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Signed Upload (With Policy)</h3>
<p class="small text-muted mb-0">For authenticated uploads, include a base64-encoded policy and SigV4 signature fields. The policy constrains allowed keys, content types, and size limits. See docs.md Section 20 for full signing examples.</p>
</div>
</article>
<article id="list-objects-v2" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">28</span>
<h2 class="h4 mb-0">List Objects API v2</h2>
</div>
<p class="text-muted">Use the v2 list API for improved pagination with continuation tokens instead of markers.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Usage</h3>
<pre class="mb-3"><code class="language-bash"># List with v2 API
curl "{{ api_base }}/&lt;bucket&gt;?list-type=2&amp;prefix=logs/&amp;delimiter=/&amp;max-keys=100" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Paginate with continuation token
curl "{{ api_base }}/&lt;bucket&gt;?list-type=2&amp;continuation-token=&lt;token&gt;" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Start listing after a specific key
curl "{{ api_base }}/&lt;bucket&gt;?list-type=2&amp;start-after=photos/2025/" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Query Parameters</h3>
<div class="table-responsive mb-0">
<table class="table table-sm table-bordered small mb-0">
<thead class="table-light">
<tr>
<th>Parameter</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr><td><code>list-type=2</code></td><td>Enables v2 API (required)</td></tr>
<tr><td><code>prefix</code></td><td>Filter to keys starting with this prefix</td></tr>
<tr><td><code>delimiter</code></td><td>Group keys by delimiter (typically <code>/</code> for folders)</td></tr>
<tr><td><code>max-keys</code></td><td>Maximum objects to return (default 1000)</td></tr>
<tr><td><code>continuation-token</code></td><td>Token from previous response for pagination</td></tr>
<tr><td><code>start-after</code></td><td>Start listing after this key (first page only)</td></tr>
<tr><td><code>fetch-owner</code></td><td>Include owner info in response</td></tr>
<tr><td><code>encoding-type</code></td><td>Set to <code>url</code> to URL-encode keys in response</td></tr>
</tbody>
</table>
</div>
</div>
</article>
<article id="upgrading" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">29</span>
<h2 class="h4 mb-0">Upgrading &amp; Updates</h2>
</div>
<p class="text-muted">How to safely update MyFSIO to a new version.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Pre-Update Backup</h3>
<p class="small text-muted">Always back up before updating:</p>
<pre class="mb-3"><code class="language-bash"># Back up configuration
cp -r data/.myfsio.sys/config/ config-backup/
# Back up data (optional, for critical deployments)
tar czf myfsio-backup-$(date +%Y%m%d).tar.gz data/
# Back up logs
cp -r logs/ logs-backup/</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Update Procedure</h3>
<ol class="docs-steps mb-3">
<li><strong>Stop the service:</strong> <code>sudo systemctl stop myfsio</code> (or kill the process)</li>
<li><strong>Pull new version:</strong> <code>git pull origin main</code> or download the new binary</li>
<li><strong>Install dependencies:</strong> <code>pip install -r requirements.txt</code></li>
<li><strong>Validate config:</strong> <code>python run.py --check-config</code></li>
<li><strong>Start the service:</strong> <code>sudo systemctl start myfsio</code></li>
<li><strong>Verify:</strong> <code>curl http://localhost:5000/myfsio/health</code></li>
</ol>
<h3 class="h6 text-uppercase text-muted mt-4">Rollback</h3>
<p class="small text-muted mb-0">If something goes wrong, stop the service, restore the backed-up config and data directories, then restart with the previous binary or code version. See <code>docs.md</code> Section 4 for detailed rollback procedures including blue-green deployment strategies.</p>
</div>
</article>
<article id="api-matrix" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">30</span>
<h2 class="h4 mb-0">Full API Reference</h2>
</div>
<p class="text-muted">Complete list of all S3-compatible, admin, and KMS endpoints.</p>
<pre class="mb-0"><code class="language-text"># Service
GET /myfsio/health # Health check
# Bucket Operations
GET / # List buckets
PUT /&lt;bucket&gt; # Create bucket
DELETE /&lt;bucket&gt; # Delete bucket
GET /&lt;bucket&gt; # List objects (?list-type=2)
HEAD /&lt;bucket&gt; # Check bucket exists
POST /&lt;bucket&gt; # POST object / form upload
POST /&lt;bucket&gt;?delete # Bulk delete
# Bucket Configuration
GET|PUT|DELETE /&lt;bucket&gt;?policy # Bucket policy
GET|PUT /&lt;bucket&gt;?quota # Bucket quota
GET|PUT /&lt;bucket&gt;?versioning # Versioning
GET|PUT|DELETE /&lt;bucket&gt;?lifecycle # Lifecycle rules
GET|PUT|DELETE /&lt;bucket&gt;?cors # CORS config
GET|PUT|DELETE /&lt;bucket&gt;?encryption # Default encryption
GET|PUT /&lt;bucket&gt;?acl # Bucket ACL
GET|PUT|DELETE /&lt;bucket&gt;?tagging # Bucket tags
GET|PUT|DELETE /&lt;bucket&gt;?replication # Replication rules
GET|PUT /&lt;bucket&gt;?logging # Access logging
GET|PUT /&lt;bucket&gt;?notification # Event notifications
GET|PUT /&lt;bucket&gt;?object-lock # Object lock config
GET|PUT|DELETE /&lt;bucket&gt;?website # Static website
GET /&lt;bucket&gt;?uploads # List multipart uploads
GET /&lt;bucket&gt;?versions # List object versions
GET /&lt;bucket&gt;?location # Bucket region
# Object Operations
PUT /&lt;bucket&gt;/&lt;key&gt; # Upload object
GET /&lt;bucket&gt;/&lt;key&gt; # Download (Range supported)
DELETE /&lt;bucket&gt;/&lt;key&gt; # Delete object
HEAD /&lt;bucket&gt;/&lt;key&gt; # Object metadata
POST /&lt;bucket&gt;/&lt;key&gt;?select # SQL query (SelectObjectContent)
# Object Configuration
GET|PUT|DELETE /&lt;bucket&gt;/&lt;key&gt;?tagging # Object tags
GET|PUT /&lt;bucket&gt;/&lt;key&gt;?acl # Object ACL
GET|PUT /&lt;bucket&gt;/&lt;key&gt;?retention # Object retention
GET|PUT /&lt;bucket&gt;/&lt;key&gt;?legal-hold # Legal hold
# Multipart Upload
POST /&lt;bucket&gt;/&lt;key&gt;?uploads # Initiate
PUT /&lt;bucket&gt;/&lt;key&gt;?uploadId=X&amp;partNumber=N # Upload part
POST /&lt;bucket&gt;/&lt;key&gt;?uploadId=X # Complete
DELETE /&lt;bucket&gt;/&lt;key&gt;?uploadId=X # Abort
GET /&lt;bucket&gt;/&lt;key&gt;?uploadId=X # List parts
# Copy (via x-amz-copy-source header)
PUT /&lt;bucket&gt;/&lt;key&gt; # CopyObject
PUT /&lt;bucket&gt;/&lt;key&gt;?uploadId&amp;partNumber # UploadPartCopy
# Admin API
GET|PUT /admin/site # Local site config
GET /admin/sites # List peers
POST /admin/sites # Register peer
GET|PUT|DELETE /admin/sites/&lt;id&gt; # Manage peer
GET /admin/sites/&lt;id&gt;/health # Peer health
GET /admin/topology # Cluster topology
GET|POST|PUT|DELETE /admin/website-domains # Domain mappings
# KMS API
GET|POST /kms/keys # List / Create keys
GET|DELETE /kms/keys/&lt;id&gt; # Get / Delete key
POST /kms/keys/&lt;id&gt;/enable # Enable key
POST /kms/keys/&lt;id&gt;/disable # Disable key
POST /kms/keys/&lt;id&gt;/rotate # Rotate key
POST /kms/encrypt # Encrypt data
POST /kms/decrypt # Decrypt data
POST /kms/generate-data-key # Generate data key
POST /kms/generate-random # Generate random bytes</code></pre>
</div>
</article>
</div>
<div class="col-xl-4 docs-sidebar-col">
<aside class="card shadow-sm docs-sidebar">
@@ -2654,11 +2224,6 @@ POST /kms/generate-random # Generate random bytes</code></pre>
<li><a href="#acls">Access Control Lists</a></li>
<li><a href="#tagging">Object &amp; Bucket Tagging</a></li>
<li><a href="#website-hosting">Static Website Hosting</a></li>
<li><a href="#cors-config">CORS Configuration</a></li>
<li><a href="#post-object">PostObject (Form Upload)</a></li>
<li><a href="#list-objects-v2">List Objects API v2</a></li>
<li><a href="#upgrading">Upgrading &amp; Updates</a></li>
<li><a href="#api-matrix">Full API Reference</a></li>
</ul>
<div class="docs-sidebar-callouts">
<div>