20 Commits

Author SHA1 Message Date
c470cfb576 MyFSIO v0.2.7 Release
Reviewed-on: #19
2026-02-09 12:22:37 +00:00
4ecd32a554 Fix empty UI on large bucket first load: keep loading row during streaming, add progress indicator, throttle renders 2026-02-09 19:29:50 +08:00
aa6d7c4d28 Optimize replication failure caching, batch UI auth checks, add bulk download size limit, background parent cleanup 2026-02-09 18:23:45 +08:00
6e6d6d32bf Optimize KMS: cache AESGCM instance, remove duplicate get_provider 2026-02-09 17:01:19 +08:00
54705ab9c4 Fix Content-Length mismatch on range requests (206 Partial Content) 2026-02-06 16:14:35 +08:00
jun
d96955deee MyFSIO v0.2.6 Release
Reviewed-on: #18
2026-02-05 16:18:03 +00:00
77a46d0725 Binary run fix 2026-02-05 23:49:36 +08:00
0f750b9d89 Optimize object browser for large listings on slow networks 2026-02-05 22:56:00 +08:00
e0dee9db36 Fix UI object browser not showing objects uploaded via S3 API 2026-02-05 22:22:59 +08:00
126657c99f Further debugging of object browser object count delay 2026-02-05 21:45:02 +08:00
07fb1ac773 Fix cross-process cache invalidation on Windows using version counter instead of mtime 2026-02-05 21:32:40 +08:00
147962e1dd Further debugging of object browser object count delay 2026-02-05 21:18:35 +08:00
2643a79121 Debug object browser object count delay 2026-02-05 21:08:18 +08:00
e9a035827b Add _touch_cache_marker for UI object delay count issue 2026-02-05 20:56:42 +08:00
033b8a82be Fix error handlers for API mode; distinguish files from directories in object lookup; Fix UI not showing newly uploaded objects by adding Cache-Control headers 2026-02-05 20:44:11 +08:00
e76c311231 Update install/uninstall scripts with new config options and credential capture 2026-02-05 19:21:18 +08:00
cbdf1a27c8 Pin dockerfile python version to 3.14.3 2026-02-05 19:11:42 +08:00
4a60cb269a Update python version in Dockerfile 2026-02-05 19:11:00 +08:00
ebe7f6222d Fix hardcoded secret key ttl session 2026-02-05 19:08:18 +08:00
70b61fd8e6 Further optimize CPU usage; Improve security and performance; 4 bug fixes. 2026-02-05 17:45:34 +08:00
19 changed files with 585 additions and 202 deletions

View File

@@ -1,12 +1,10 @@
# syntax=docker/dockerfile:1.7
FROM python:3.12.12-slim
FROM python:3.14.3-slim
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1
WORKDIR /app
# Install build deps for any wheels that need compilation, then clean up
RUN apt-get update \
&& apt-get install -y --no-install-recommends build-essential \
&& rm -rf /var/lib/apt/lists/*
@@ -16,10 +14,8 @@ RUN pip install --no-cache-dir -r requirements.txt
COPY . .
# Make entrypoint executable
RUN chmod +x docker-entrypoint.sh
# Create data directory and set permissions
RUN mkdir -p /app/data \
&& useradd -m -u 1000 myfsio \
&& chown -R myfsio:myfsio /app

View File

@@ -263,11 +263,37 @@ def create_app(
@app.errorhandler(500)
def internal_error(error):
return render_template('500.html'), 500
wants_html = request.accept_mimetypes.accept_html
path = request.path or ""
if include_ui and wants_html and (path.startswith("/ui") or path == "/"):
return render_template('500.html'), 500
error_xml = (
'<?xml version="1.0" encoding="UTF-8"?>'
'<Error>'
'<Code>InternalError</Code>'
'<Message>An internal server error occurred</Message>'
f'<Resource>{path}</Resource>'
f'<RequestId>{getattr(g, "request_id", "-")}</RequestId>'
'</Error>'
)
return error_xml, 500, {'Content-Type': 'application/xml'}
@app.errorhandler(CSRFError)
def handle_csrf_error(e):
return render_template('csrf_error.html', reason=e.description), 400
wants_html = request.accept_mimetypes.accept_html
path = request.path or ""
if include_ui and wants_html and (path.startswith("/ui") or path == "/"):
return render_template('csrf_error.html', reason=e.description), 400
error_xml = (
'<?xml version="1.0" encoding="UTF-8"?>'
'<Error>'
'<Code>CSRFError</Code>'
f'<Message>{e.description}</Message>'
f'<Resource>{path}</Resource>'
f'<RequestId>{getattr(g, "request_id", "-")}</RequestId>'
'</Error>'
)
return error_xml, 400, {'Content-Type': 'application/xml'}
@app.template_filter("filesizeformat")
def filesizeformat(value: int) -> str:

View File

@@ -1,6 +1,7 @@
from __future__ import annotations
import ipaddress
import json
import logging
import re
import socket
@@ -354,6 +355,10 @@ def update_peer_site(site_id: str):
if region_error:
return _json_error("ValidationError", region_error, 400)
if "connection_id" in payload:
if payload["connection_id"] and not _connections().get(payload["connection_id"]):
return _json_error("ValidationError", f"Connection '{payload['connection_id']}' not found", 400)
peer = PeerSite(
site_id=site_id,
endpoint=payload.get("endpoint", existing.endpoint),

View File

@@ -36,10 +36,11 @@ class GzipMiddleware:
content_type = None
content_length = None
should_compress = False
is_streaming = False
exc_info_holder = [None]
def custom_start_response(status: str, headers: List[Tuple[str, str]], exc_info=None):
nonlocal response_started, status_code, response_headers, content_type, content_length, should_compress
nonlocal response_started, status_code, response_headers, content_type, content_length, should_compress, is_streaming
response_started = True
status_code = int(status.split(' ', 1)[0])
response_headers = list(headers)
@@ -54,6 +55,9 @@ class GzipMiddleware:
elif name_lower == 'content-encoding':
should_compress = False
return start_response(status, headers, exc_info)
elif name_lower == 'x-stream-response':
is_streaming = True
return start_response(status, headers, exc_info)
if content_type and content_type in COMPRESSIBLE_MIMES:
if content_length is None or content_length >= self.min_size:
@@ -61,7 +65,12 @@ class GzipMiddleware:
return None
response_body = b''.join(self.app(environ, custom_start_response))
app_iter = self.app(environ, custom_start_response)
if is_streaming:
return app_iter
response_body = b''.join(app_iter)
if not response_started:
return [response_body]

View File

@@ -309,6 +309,18 @@ class IamService:
if not self._is_allowed(principal, normalized, action):
raise IamError(f"Access denied for action '{action}' on bucket '{bucket_name}'")
def check_permissions(self, principal: Principal, bucket_name: str | None, actions: Iterable[str]) -> Dict[str, bool]:
self._maybe_reload()
bucket_name = (bucket_name or "*").lower() if bucket_name != "*" else (bucket_name or "*")
normalized_actions = {a: self._normalize_action(a) for a in actions}
results: Dict[str, bool] = {}
for original, canonical in normalized_actions.items():
if canonical not in ALLOWED_ACTIONS:
results[original] = False
else:
results[original] = self._is_allowed(principal, bucket_name, canonical)
return results
def buckets_for_principal(self, principal: Principal, buckets: Iterable[str]) -> List[str]:
return [bucket for bucket in buckets if self._is_allowed(principal, bucket, "list")]
@@ -529,11 +541,13 @@ class IamService:
return candidate if candidate in ALLOWED_ACTIONS else ""
def _write_default(self) -> None:
access_key = secrets.token_hex(12)
secret_key = secrets.token_urlsafe(32)
default = {
"users": [
{
"access_key": "localadmin",
"secret_key": "localadmin",
"access_key": access_key,
"secret_key": secret_key,
"display_name": "Local Admin",
"policies": [
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
@@ -542,6 +556,14 @@ class IamService:
]
}
self.config_path.write_text(json.dumps(default, indent=2))
print(f"\n{'='*60}")
print("MYFSIO FIRST RUN - ADMIN CREDENTIALS GENERATED")
print(f"{'='*60}")
print(f"Access Key: {access_key}")
print(f"Secret Key: {secret_key}")
print(f"{'='*60}")
print(f"Missed this? Check: {self.config_path}")
print(f"{'='*60}\n")
def _generate_access_key(self) -> str:
return secrets.token_hex(8)

View File

@@ -160,6 +160,7 @@ class KMSManager:
self.generate_data_key_max_bytes = generate_data_key_max_bytes
self._keys: Dict[str, KMSKey] = {}
self._master_key: bytes | None = None
self._master_aesgcm: AESGCM | None = None
self._loaded = False
@property
@@ -191,6 +192,7 @@ class KMSManager:
msvcrt.locking(lock_file.fileno(), msvcrt.LK_UNLCK, 1)
else:
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
self._master_aesgcm = AESGCM(self._master_key)
return self._master_key
def _load_keys(self) -> None:
@@ -231,18 +233,16 @@ class KMSManager:
_set_secure_file_permissions(self.keys_path)
def _encrypt_key_material(self, key_material: bytes) -> bytes:
"""Encrypt key material with the master key."""
aesgcm = AESGCM(self.master_key)
_ = self.master_key
nonce = secrets.token_bytes(12)
ciphertext = aesgcm.encrypt(nonce, key_material, None)
ciphertext = self._master_aesgcm.encrypt(nonce, key_material, None)
return nonce + ciphertext
def _decrypt_key_material(self, encrypted: bytes) -> bytes:
"""Decrypt key material with the master key."""
aesgcm = AESGCM(self.master_key)
_ = self.master_key
nonce = encrypted[:12]
ciphertext = encrypted[12:]
return aesgcm.decrypt(nonce, ciphertext, None)
return self._master_aesgcm.decrypt(nonce, ciphertext, None)
def create_key(self, description: str = "", key_id: str | None = None) -> KMSKey:
"""Create a new KMS key."""
@@ -404,22 +404,6 @@ class KMSManager:
plaintext, _ = self.decrypt(encrypted_key, context)
return plaintext
def get_provider(self, key_id: str | None = None) -> KMSEncryptionProvider:
"""Get an encryption provider for a specific key."""
self._load_keys()
if key_id is None:
if not self._keys:
key = self.create_key("Default KMS Key")
key_id = key.key_id
else:
key_id = next(iter(self._keys.keys()))
if key_id not in self._keys:
raise EncryptionError(f"Key not found: {key_id}")
return KMSEncryptionProvider(self, key_id)
def re_encrypt(self, ciphertext: bytes, destination_key_id: str,
source_context: Dict[str, str] | None = None,
destination_context: Dict[str, str] | None = None) -> bytes:

View File

@@ -176,11 +176,12 @@ class ReplicationFailureStore:
self.storage_root = storage_root
self.max_failures_per_bucket = max_failures_per_bucket
self._lock = threading.Lock()
self._cache: Dict[str, List[ReplicationFailure]] = {}
def _get_failures_path(self, bucket_name: str) -> Path:
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "replication_failures.json"
def load_failures(self, bucket_name: str) -> List[ReplicationFailure]:
def _load_from_disk(self, bucket_name: str) -> List[ReplicationFailure]:
path = self._get_failures_path(bucket_name)
if not path.exists():
return []
@@ -192,7 +193,7 @@ class ReplicationFailureStore:
logger.error(f"Failed to load replication failures for {bucket_name}: {e}")
return []
def save_failures(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
def _save_to_disk(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
path = self._get_failures_path(bucket_name)
path.parent.mkdir(parents=True, exist_ok=True)
data = {"failures": [f.to_dict() for f in failures[:self.max_failures_per_bucket]]}
@@ -202,6 +203,18 @@ class ReplicationFailureStore:
except OSError as e:
logger.error(f"Failed to save replication failures for {bucket_name}: {e}")
def load_failures(self, bucket_name: str) -> List[ReplicationFailure]:
if bucket_name in self._cache:
return list(self._cache[bucket_name])
failures = self._load_from_disk(bucket_name)
self._cache[bucket_name] = failures
return list(failures)
def save_failures(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
trimmed = failures[:self.max_failures_per_bucket]
self._cache[bucket_name] = trimmed
self._save_to_disk(bucket_name, trimmed)
def add_failure(self, bucket_name: str, failure: ReplicationFailure) -> None:
with self._lock:
failures = self.load_failures(bucket_name)
@@ -227,6 +240,7 @@ class ReplicationFailureStore:
def clear_failures(self, bucket_name: str) -> None:
with self._lock:
self._cache.pop(bucket_name, None)
path = self._get_failures_path(bucket_name)
if path.exists():
path.unlink()

View File

@@ -999,12 +999,14 @@ def _apply_object_headers(
etag: str,
) -> None:
if file_stat is not None:
response.headers["Content-Length"] = str(file_stat.st_size)
if response.status_code != 206:
response.headers["Content-Length"] = str(file_stat.st_size)
response.headers["Last-Modified"] = http_date(file_stat.st_mtime)
response.headers["ETag"] = f'"{etag}"'
response.headers["Accept-Ranges"] = "bytes"
for key, value in (metadata or {}).items():
response.headers[f"X-Amz-Meta-{key}"] = value
safe_value = _sanitize_header_value(str(value))
response.headers[f"X-Amz-Meta-{key}"] = safe_value
def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None:
@@ -2342,10 +2344,12 @@ def _post_object(bucket_name: str) -> Response:
success_action_redirect = request.form.get("success_action_redirect")
if success_action_redirect:
allowed_hosts = current_app.config.get("ALLOWED_REDIRECT_HOSTS", [])
if not allowed_hosts:
allowed_hosts = [request.host]
parsed = urlparse(success_action_redirect)
if parsed.scheme not in ("http", "https"):
return _error_response("InvalidArgument", "Redirect URL must use http or https", 400)
if allowed_hosts and parsed.netloc not in allowed_hosts:
if parsed.netloc not in allowed_hosts:
return _error_response("InvalidArgument", "Redirect URL host not allowed", 400)
redirect_url = f"{success_action_redirect}?bucket={bucket_name}&key={quote(object_key)}&etag={meta.etag}"
return Response(status=303, headers={"Location": redirect_url})
@@ -2773,9 +2777,14 @@ def object_handler(bucket_name: str, object_key: str):
except StorageError as exc:
return _error_response("InternalError", str(exc), 500)
else:
stat = path.stat()
file_size = stat.st_size
etag = storage._compute_etag(path)
try:
stat = path.stat()
file_size = stat.st_size
etag = storage._compute_etag(path)
except PermissionError:
return _error_response("AccessDenied", "Permission denied accessing object", 403)
except OSError as exc:
return _error_response("InternalError", f"Failed to access object: {exc}", 500)
if range_header:
try:
@@ -2816,13 +2825,22 @@ def object_handler(bucket_name: str, object_key: str):
except StorageError as exc:
return _error_response("InternalError", str(exc), 500)
else:
stat = path.stat()
response = Response(status=200)
etag = storage._compute_etag(path)
try:
stat = path.stat()
response = Response(status=200)
etag = storage._compute_etag(path)
except PermissionError:
return _error_response("AccessDenied", "Permission denied accessing object", 403)
except OSError as exc:
return _error_response("InternalError", f"Failed to access object: {exc}", 500)
response.headers["Content-Type"] = mimetype
logged_bytes = 0
_apply_object_headers(response, file_stat=path.stat() if not is_encrypted else None, metadata=metadata, etag=etag)
try:
file_stat = path.stat() if not is_encrypted else None
except (PermissionError, OSError):
file_stat = None
_apply_object_headers(response, file_stat=file_stat, metadata=metadata, etag=etag)
if request.method == "GET":
response_overrides = {

View File

@@ -18,6 +18,18 @@ class EphemeralSecretStore:
self._store[token] = (payload, expires_at)
return token
def peek(self, token: str | None) -> Any | None:
if not token:
return None
entry = self._store.get(token)
if not entry:
return None
payload, expires_at = entry
if expires_at < time.time():
self._store.pop(token, None)
return None
return payload
def pop(self, token: str | None) -> Any | None:
if not token:
return None

View File

@@ -11,6 +11,7 @@ import time
import unicodedata
import uuid
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import dataclass
from datetime import datetime, timezone
@@ -177,7 +178,7 @@ class ObjectStorage:
self.root = Path(root)
self.root.mkdir(parents=True, exist_ok=True)
self._ensure_system_roots()
self._object_cache: OrderedDict[str, tuple[Dict[str, ObjectMeta], float]] = OrderedDict()
self._object_cache: OrderedDict[str, tuple[Dict[str, ObjectMeta], float, float]] = OrderedDict()
self._cache_lock = threading.Lock()
self._bucket_locks: Dict[str, threading.Lock] = {}
self._cache_version: Dict[str, int] = {}
@@ -186,6 +187,8 @@ class ObjectStorage:
self._cache_ttl = cache_ttl
self._object_cache_max_size = object_cache_max_size
self._object_key_max_length_bytes = object_key_max_length_bytes
self._sorted_key_cache: Dict[str, tuple[list[str], int]] = {}
self._cleanup_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="ParentCleanup")
def _get_bucket_lock(self, bucket_id: str) -> threading.Lock:
"""Get or create a lock for a specific bucket. Reduces global lock contention."""
@@ -243,10 +246,15 @@ class ObjectStorage:
raise BucketNotFoundError("Bucket does not exist")
cache_path = self._system_bucket_root(bucket_name) / "stats.json"
cached_stats = None
cache_fresh = False
if cache_path.exists():
try:
if time.time() - cache_path.stat().st_mtime < cache_ttl:
return json.loads(cache_path.read_text(encoding="utf-8"))
cache_fresh = time.time() - cache_path.stat().st_mtime < cache_ttl
cached_stats = json.loads(cache_path.read_text(encoding="utf-8"))
if cache_fresh:
return cached_stats
except (OSError, json.JSONDecodeError):
pass
@@ -255,40 +263,50 @@ class ObjectStorage:
version_count = 0
version_bytes = 0
for path in bucket_path.rglob("*"):
if path.is_file():
rel = path.relative_to(bucket_path)
if not rel.parts:
continue
top_folder = rel.parts[0]
if top_folder not in self.INTERNAL_FOLDERS:
stat = path.stat()
object_count += 1
total_bytes += stat.st_size
versions_root = self._bucket_versions_root(bucket_name)
if versions_root.exists():
for path in versions_root.rglob("*.bin"):
try:
for path in bucket_path.rglob("*"):
if path.is_file():
stat = path.stat()
version_count += 1
version_bytes += stat.st_size
rel = path.relative_to(bucket_path)
if not rel.parts:
continue
top_folder = rel.parts[0]
if top_folder not in self.INTERNAL_FOLDERS:
stat = path.stat()
object_count += 1
total_bytes += stat.st_size
versions_root = self._bucket_versions_root(bucket_name)
if versions_root.exists():
for path in versions_root.rglob("*.bin"):
if path.is_file():
stat = path.stat()
version_count += 1
version_bytes += stat.st_size
except OSError:
if cached_stats is not None:
return cached_stats
raise
existing_serial = 0
if cached_stats is not None:
existing_serial = cached_stats.get("_cache_serial", 0)
stats = {
"objects": object_count,
"bytes": total_bytes,
"version_count": version_count,
"version_bytes": version_bytes,
"total_objects": object_count + version_count,
"total_bytes": total_bytes + version_bytes,
"total_bytes": total_bytes + version_bytes,
"_cache_serial": existing_serial,
}
try:
cache_path.parent.mkdir(parents=True, exist_ok=True)
cache_path.write_text(json.dumps(stats), encoding="utf-8")
except OSError:
pass
return stats
def _invalidate_bucket_stats_cache(self, bucket_id: str) -> None:
@@ -299,6 +317,39 @@ class ObjectStorage:
except OSError:
pass
def _update_bucket_stats_cache(
self,
bucket_id: str,
*,
bytes_delta: int = 0,
objects_delta: int = 0,
version_bytes_delta: int = 0,
version_count_delta: int = 0,
) -> None:
"""Incrementally update cached bucket statistics instead of invalidating.
This avoids expensive full directory scans on every PUT/DELETE by
adjusting the cached values directly. Also signals cross-process cache
invalidation by incrementing _cache_serial.
"""
cache_path = self._system_bucket_root(bucket_id) / "stats.json"
try:
cache_path.parent.mkdir(parents=True, exist_ok=True)
if cache_path.exists():
data = json.loads(cache_path.read_text(encoding="utf-8"))
else:
data = {"objects": 0, "bytes": 0, "version_count": 0, "version_bytes": 0, "total_objects": 0, "total_bytes": 0, "_cache_serial": 0}
data["objects"] = max(0, data.get("objects", 0) + objects_delta)
data["bytes"] = max(0, data.get("bytes", 0) + bytes_delta)
data["version_count"] = max(0, data.get("version_count", 0) + version_count_delta)
data["version_bytes"] = max(0, data.get("version_bytes", 0) + version_bytes_delta)
data["total_objects"] = max(0, data.get("total_objects", 0) + objects_delta + version_count_delta)
data["total_bytes"] = max(0, data.get("total_bytes", 0) + bytes_delta + version_bytes_delta)
data["_cache_serial"] = data.get("_cache_serial", 0) + 1
cache_path.write_text(json.dumps(data), encoding="utf-8")
except (OSError, json.JSONDecodeError):
pass
def delete_bucket(self, bucket_name: str) -> None:
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
@@ -333,22 +384,35 @@ class ObjectStorage:
Returns:
ListObjectsResult with objects, truncation status, and continuation token
"""
import bisect
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
raise BucketNotFoundError("Bucket does not exist")
bucket_id = bucket_path.name
object_cache = self._get_object_cache(bucket_id, bucket_path)
all_keys = sorted(object_cache.keys())
cache_version = self._cache_version.get(bucket_id, 0)
cached_entry = self._sorted_key_cache.get(bucket_id)
if cached_entry and cached_entry[1] == cache_version:
all_keys = cached_entry[0]
else:
all_keys = sorted(object_cache.keys())
self._sorted_key_cache[bucket_id] = (all_keys, cache_version)
if prefix:
all_keys = [k for k in all_keys if k.startswith(prefix)]
lo = bisect.bisect_left(all_keys, prefix)
hi = len(all_keys)
for i in range(lo, len(all_keys)):
if not all_keys[i].startswith(prefix):
hi = i
break
all_keys = all_keys[lo:hi]
total_count = len(all_keys)
start_index = 0
if continuation_token:
import bisect
start_index = bisect.bisect_right(all_keys, continuation_token)
if start_index >= total_count:
return ListObjectsResult(
@@ -356,8 +420,8 @@ class ObjectStorage:
is_truncated=False,
next_continuation_token=None,
total_count=total_count,
)
)
end_index = start_index + max_keys
keys_slice = all_keys[start_index:end_index]
is_truncated = end_index < total_count
@@ -403,7 +467,9 @@ class ObjectStorage:
is_overwrite = destination.exists()
existing_size = destination.stat().st_size if is_overwrite else 0
archived_version_size = 0
if self._is_versioning_enabled(bucket_path) and is_overwrite:
archived_version_size = existing_size
self._archive_current_version(bucket_id, safe_key, reason="overwrite")
tmp_dir = self._system_root_path() / self.SYSTEM_TMP_DIR
@@ -416,11 +482,10 @@ class ObjectStorage:
shutil.copyfileobj(_HashingReader(stream, checksum), target)
new_size = tmp_path.stat().st_size
size_delta = new_size - existing_size
object_delta = 0 if is_overwrite else 1
if enforce_quota:
size_delta = new_size - existing_size
object_delta = 0 if is_overwrite else 1
quota_check = self.check_quota(
bucket_name,
additional_bytes=max(0, size_delta),
@@ -432,7 +497,7 @@ class ObjectStorage:
quota_check["quota"],
quota_check["usage"],
)
shutil.move(str(tmp_path), str(destination))
finally:
@@ -448,7 +513,13 @@ class ObjectStorage:
combined_meta = {**internal_meta, **(metadata or {})}
self._write_metadata(bucket_id, safe_key, combined_meta)
self._invalidate_bucket_stats_cache(bucket_id)
self._update_bucket_stats_cache(
bucket_id,
bytes_delta=size_delta,
objects_delta=object_delta,
version_bytes_delta=archived_version_size,
version_count_delta=1 if archived_version_size > 0 else 0,
)
obj_meta = ObjectMeta(
key=safe_key.as_posix(),
@@ -463,7 +534,7 @@ class ObjectStorage:
def get_object_path(self, bucket_name: str, object_key: str) -> Path:
path = self._object_path(bucket_name, object_key)
if not path.exists():
if not path.is_file():
raise ObjectNotFoundError("Object not found")
return path
@@ -475,11 +546,14 @@ class ObjectStorage:
return self._read_metadata(bucket_path.name, safe_key) or {}
def _cleanup_empty_parents(self, path: Path, stop_at: Path) -> None:
"""Remove empty parent directories up to (but not including) stop_at.
"""Remove empty parent directories in a background thread.
On Windows/OneDrive, directories may be locked briefly after file deletion.
This method retries with a small delay to handle that case.
Running this in the background avoids blocking the request thread with retries.
"""
self._cleanup_executor.submit(self._do_cleanup_empty_parents, path, stop_at)
def _do_cleanup_empty_parents(self, path: Path, stop_at: Path) -> None:
for parent in path.parents:
if parent == stop_at:
break
@@ -487,7 +561,7 @@ class ObjectStorage:
try:
if parent.exists() and not any(parent.iterdir()):
parent.rmdir()
break
break
except OSError:
if attempt < 2:
time.sleep(0.1)
@@ -498,15 +572,24 @@ class ObjectStorage:
path = self._object_path(bucket_name, object_key)
if not path.exists():
return
deleted_size = path.stat().st_size
safe_key = path.relative_to(bucket_path)
bucket_id = bucket_path.name
archived_version_size = 0
if self._is_versioning_enabled(bucket_path):
archived_version_size = deleted_size
self._archive_current_version(bucket_id, safe_key, reason="delete")
rel = path.relative_to(bucket_path)
self._safe_unlink(path)
self._delete_metadata(bucket_id, rel)
self._invalidate_bucket_stats_cache(bucket_id)
self._update_bucket_stats_cache(
bucket_id,
bytes_delta=-deleted_size,
objects_delta=-1,
version_bytes_delta=archived_version_size,
version_count_delta=1 if archived_version_size > 0 else 0,
)
self._update_object_cache_entry(bucket_id, safe_key.as_posix(), None)
self._cleanup_empty_parents(path, bucket_path)
@@ -828,7 +911,12 @@ class ObjectStorage:
if not isinstance(metadata, dict):
metadata = {}
destination = bucket_path / safe_key
if self._is_versioning_enabled(bucket_path) and destination.exists():
restored_size = data_path.stat().st_size
is_overwrite = destination.exists()
existing_size = destination.stat().st_size if is_overwrite else 0
archived_version_size = 0
if self._is_versioning_enabled(bucket_path) and is_overwrite:
archived_version_size = existing_size
self._archive_current_version(bucket_id, safe_key, reason="restore-overwrite")
destination.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(data_path, destination)
@@ -837,7 +925,13 @@ class ObjectStorage:
else:
self._delete_metadata(bucket_id, safe_key)
stat = destination.stat()
self._invalidate_bucket_stats_cache(bucket_id)
self._update_bucket_stats_cache(
bucket_id,
bytes_delta=restored_size - existing_size,
objects_delta=0 if is_overwrite else 1,
version_bytes_delta=archived_version_size,
version_count_delta=1 if archived_version_size > 0 else 0,
)
return ObjectMeta(
key=safe_key.as_posix(),
size=stat.st_size,
@@ -861,6 +955,7 @@ class ObjectStorage:
meta_path = legacy_version_dir / f"{version_id}.json"
if not data_path.exists() and not meta_path.exists():
raise StorageError(f"Version {version_id} not found")
deleted_version_size = data_path.stat().st_size if data_path.exists() else 0
if data_path.exists():
data_path.unlink()
if meta_path.exists():
@@ -868,6 +963,12 @@ class ObjectStorage:
parent = data_path.parent
if parent.exists() and not any(parent.iterdir()):
parent.rmdir()
if deleted_version_size > 0:
self._update_bucket_stats_cache(
bucket_id,
version_bytes_delta=-deleted_version_size,
version_count_delta=-1,
)
def list_orphaned_objects(self, bucket_name: str) -> List[Dict[str, Any]]:
bucket_path = self._bucket_path(bucket_name)
@@ -1164,14 +1265,14 @@ class ObjectStorage:
safe_key = self._sanitize_object_key(manifest["object_key"], self._object_key_max_length_bytes)
destination = bucket_path / safe_key
is_overwrite = destination.exists()
existing_size = destination.stat().st_size if is_overwrite else 0
size_delta = total_size - existing_size
object_delta = 0 if is_overwrite else 1
versioning_enabled = self._is_versioning_enabled(bucket_path)
if enforce_quota:
size_delta = total_size - existing_size
object_delta = 0 if is_overwrite else 1
quota_check = self.check_quota(
bucket_name,
additional_bytes=max(0, size_delta),
@@ -1183,14 +1284,16 @@ class ObjectStorage:
quota_check["quota"],
quota_check["usage"],
)
destination.parent.mkdir(parents=True, exist_ok=True)
lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock"
archived_version_size = 0
try:
with _atomic_lock_file(lock_file_path):
if self._is_versioning_enabled(bucket_path) and destination.exists():
if versioning_enabled and destination.exists():
archived_version_size = destination.stat().st_size
self._archive_current_version(bucket_id, safe_key, reason="overwrite")
checksum = hashlib.md5()
with destination.open("wb") as target:
@@ -1210,7 +1313,13 @@ class ObjectStorage:
shutil.rmtree(upload_root, ignore_errors=True)
self._invalidate_bucket_stats_cache(bucket_id)
self._update_bucket_stats_cache(
bucket_id,
bytes_delta=size_delta,
objects_delta=object_delta,
version_bytes_delta=archived_version_size,
version_count_delta=1 if archived_version_size > 0 else 0,
)
stat = destination.stat()
etag = checksum.hexdigest()
@@ -1523,38 +1632,46 @@ class ObjectStorage:
Uses LRU eviction to prevent unbounded cache growth.
Thread-safe with per-bucket locks to reduce contention.
Checks stats.json for cross-process cache invalidation.
"""
now = time.time()
current_stats_mtime = self._get_cache_marker_mtime(bucket_id)
with self._cache_lock:
cached = self._object_cache.get(bucket_id)
if cached:
objects, timestamp = cached
if now - timestamp < self._cache_ttl:
objects, timestamp, cached_stats_mtime = cached
if now - timestamp < self._cache_ttl and current_stats_mtime == cached_stats_mtime:
self._object_cache.move_to_end(bucket_id)
return objects
cache_version = self._cache_version.get(bucket_id, 0)
bucket_lock = self._get_bucket_lock(bucket_id)
with bucket_lock:
current_stats_mtime = self._get_cache_marker_mtime(bucket_id)
with self._cache_lock:
cached = self._object_cache.get(bucket_id)
if cached:
objects, timestamp = cached
if now - timestamp < self._cache_ttl:
objects, timestamp, cached_stats_mtime = cached
if now - timestamp < self._cache_ttl and current_stats_mtime == cached_stats_mtime:
self._object_cache.move_to_end(bucket_id)
return objects
objects = self._build_object_cache(bucket_path)
new_stats_mtime = self._get_cache_marker_mtime(bucket_id)
with self._cache_lock:
current_version = self._cache_version.get(bucket_id, 0)
if current_version != cache_version:
objects = self._build_object_cache(bucket_path)
new_stats_mtime = self._get_cache_marker_mtime(bucket_id)
while len(self._object_cache) >= self._object_cache_max_size:
self._object_cache.popitem(last=False)
self._object_cache[bucket_id] = (objects, time.time())
self._object_cache[bucket_id] = (objects, time.time(), new_stats_mtime)
self._object_cache.move_to_end(bucket_id)
self._cache_version[bucket_id] = current_version + 1
self._sorted_key_cache.pop(bucket_id, None)
return objects
@@ -1562,6 +1679,7 @@ class ObjectStorage:
"""Invalidate the object cache and etag index for a bucket.
Increments version counter to signal stale reads.
Cross-process invalidation is handled by checking stats.json mtime.
"""
with self._cache_lock:
self._object_cache.pop(bucket_id, None)
@@ -1573,19 +1691,37 @@ class ObjectStorage:
except OSError:
pass
def _get_cache_marker_mtime(self, bucket_id: str) -> float:
"""Get a cache marker combining serial and object count for cross-process invalidation.
Returns a combined value that changes if either _cache_serial or object count changes.
This handles cases where the serial was reset but object count differs.
"""
stats_path = self._system_bucket_root(bucket_id) / "stats.json"
try:
data = json.loads(stats_path.read_text(encoding="utf-8"))
serial = data.get("_cache_serial", 0)
count = data.get("objects", 0)
return float(serial * 1000000 + count)
except (OSError, json.JSONDecodeError):
return 0
def _update_object_cache_entry(self, bucket_id: str, key: str, meta: Optional[ObjectMeta]) -> None:
"""Update a single entry in the object cache instead of invalidating the whole cache.
This is a performance optimization - lazy update instead of full invalidation.
Cross-process invalidation is handled by checking stats.json mtime.
"""
with self._cache_lock:
cached = self._object_cache.get(bucket_id)
if cached:
objects, timestamp = cached
objects, timestamp, stats_mtime = cached
if meta is None:
objects.pop(key, None)
else:
objects[key] = meta
self._cache_version[bucket_id] = self._cache_version.get(bucket_id, 0) + 1
self._sorted_key_cache.pop(bucket_id, None)
def warm_cache(self, bucket_names: Optional[List[str]] = None) -> None:
"""Pre-warm the object cache for specified buckets or all buckets.

112
app/ui.py
View File

@@ -220,13 +220,16 @@ def _bucket_access_descriptor(policy: dict[str, Any] | None) -> tuple[str, str]:
def _current_principal():
creds = session.get("credentials")
token = session.get("cred_token")
creds = _secret_store().peek(token) if token else None
if not creds:
return None
try:
return _iam().authenticate(creds["access_key"], creds["secret_key"])
except IamError:
session.pop("credentials", None)
session.pop("cred_token", None)
if token:
_secret_store().pop(token)
return None
@@ -251,7 +254,8 @@ def _authorize_ui(principal, bucket_name: str | None, action: str, *, object_key
def _api_headers() -> dict[str, str]:
creds = session.get("credentials") or {}
token = session.get("cred_token")
creds = _secret_store().peek(token) or {}
return {
"X-Access-Key": creds.get("access_key", ""),
"X-Secret-Key": creds.get("secret_key", ""),
@@ -296,7 +300,10 @@ def login():
except IamError as exc:
flash(_friendly_error_message(exc), "danger")
return render_template("login.html")
session["credentials"] = {"access_key": access_key, "secret_key": secret_key}
creds = {"access_key": access_key, "secret_key": secret_key}
ttl = int(current_app.permanent_session_lifetime.total_seconds())
token = _secret_store().remember(creds, ttl=ttl)
session["cred_token"] = token
session.permanent = True
flash(f"Welcome back, {principal.display_name}", "success")
return redirect(url_for("ui.buckets_overview"))
@@ -305,7 +312,9 @@ def login():
@ui_bp.post("/logout")
def logout():
session.pop("credentials", None)
token = session.pop("cred_token", None)
if token:
_secret_store().pop(token)
flash("Signed out", "info")
return redirect(url_for("ui.login"))
@@ -414,57 +423,25 @@ def bucket_detail(bucket_name: str):
},
indent=2,
)
can_edit_policy = False
if principal:
try:
_iam().authorize(principal, bucket_name, "policy")
can_edit_policy = True
except IamError:
can_edit_policy = False
iam = _iam()
bucket_perms = iam.check_permissions(
principal, bucket_name, ["policy", "lifecycle", "cors", "write", "replication"],
) if principal else {}
admin_perms = iam.check_permissions(
principal, None, ["iam:list_users"],
) if principal else {}
can_manage_lifecycle = False
if principal:
try:
_iam().authorize(principal, bucket_name, "lifecycle")
can_manage_lifecycle = True
except IamError:
can_manage_lifecycle = False
can_manage_cors = False
if principal:
try:
_iam().authorize(principal, bucket_name, "cors")
can_manage_cors = True
except IamError:
can_manage_cors = False
can_edit_policy = bucket_perms.get("policy", False)
can_manage_lifecycle = bucket_perms.get("lifecycle", False)
can_manage_cors = bucket_perms.get("cors", False)
can_manage_versioning = bucket_perms.get("write", False)
can_manage_replication = bucket_perms.get("replication", False)
is_replication_admin = admin_perms.get("iam:list_users", False)
try:
versioning_enabled = storage.is_versioning_enabled(bucket_name)
except StorageError:
versioning_enabled = False
can_manage_versioning = False
if principal:
try:
_iam().authorize(principal, bucket_name, "write")
can_manage_versioning = True
except IamError:
can_manage_versioning = False
can_manage_replication = False
if principal:
try:
_iam().authorize(principal, bucket_name, "replication")
can_manage_replication = True
except IamError:
can_manage_replication = False
is_replication_admin = False
if principal:
try:
_iam().authorize(principal, None, "iam:list_users")
is_replication_admin = True
except IamError:
is_replication_admin = False
replication_rule = _replication().get_rule(bucket_name)
connections = _connections().list() if (is_replication_admin or replication_rule) else []
@@ -480,12 +457,7 @@ def bucket_detail(bucket_name: str):
bucket_quota = storage.get_bucket_quota(bucket_name)
bucket_stats = storage.bucket_stats(bucket_name)
can_manage_quota = False
try:
_iam().authorize(principal, None, "iam:list_users")
can_manage_quota = True
except IamError:
pass
can_manage_quota = is_replication_admin
objects_api_url = url_for("ui.list_bucket_objects", bucket_name=bucket_name)
objects_stream_url = url_for("ui.stream_bucket_objects", bucket_name=bucket_name)
@@ -542,7 +514,10 @@ def list_bucket_objects(bucket_name: str):
except IamError as exc:
return jsonify({"error": str(exc)}), 403
max_keys = min(int(request.args.get("max_keys", 1000)), 100000)
try:
max_keys = min(int(request.args.get("max_keys", 1000)), 100000)
except ValueError:
return jsonify({"error": "max_keys must be an integer"}), 400
continuation_token = request.args.get("continuation_token") or None
prefix = request.args.get("prefix") or None
@@ -582,7 +557,7 @@ def list_bucket_objects(bucket_name: str):
"etag": obj.etag,
})
return jsonify({
response = jsonify({
"objects": objects_data,
"is_truncated": result.is_truncated,
"next_continuation_token": result.next_continuation_token,
@@ -601,6 +576,8 @@ def list_bucket_objects(bucket_name: str):
"metadata": metadata_template,
},
})
response.headers["Cache-Control"] = "no-store"
return response
@ui_bp.get("/buckets/<bucket_name>/objects/stream")
@@ -697,6 +674,7 @@ def stream_bucket_objects(bucket_name: str):
headers={
'Cache-Control': 'no-cache',
'X-Accel-Buffering': 'no',
'X-Stream-Response': 'true',
}
)
@@ -988,21 +966,33 @@ def bulk_download_objects(bucket_name: str):
unique_keys = list(dict.fromkeys(cleaned))
storage = _storage()
try:
_authorize_ui(principal, bucket_name, "read")
except IamError as exc:
return jsonify({"error": str(exc)}), 403
max_total_bytes = current_app.config.get("BULK_DOWNLOAD_MAX_BYTES", 1024 * 1024 * 1024)
total_size = 0
for key in unique_keys:
try:
path = storage.get_object_path(bucket_name, key)
total_size += path.stat().st_size
except (StorageError, OSError):
continue
if total_size > max_total_bytes:
limit_mb = max_total_bytes // (1024 * 1024)
return jsonify({"error": f"Total download size exceeds {limit_mb} MB limit. Select fewer objects."}), 400
buffer = io.BytesIO()
with zipfile.ZipFile(buffer, "w", zipfile.ZIP_DEFLATED) as zf:
for key in unique_keys:
try:
_authorize_ui(principal, bucket_name, "read", object_key=key)
metadata = storage.get_object_metadata(bucket_name, key)
is_encrypted = "x-amz-server-side-encryption" in metadata
if is_encrypted and hasattr(storage, 'get_object_data'):
data, _ = storage.get_object_data(bucket_name, key)
zf.writestr(key, data)

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
APP_VERSION = "0.2.5"
APP_VERSION = "0.2.7"
def get_version() -> str:

View File

@@ -619,13 +619,15 @@ MyFSIO implements a comprehensive Identity and Access Management (IAM) system th
### Getting Started
1. On first boot, `data/.myfsio.sys/config/iam.json` is seeded with `localadmin / localadmin` that has wildcard access.
2. Sign into the UI using those credentials, then open **IAM**:
1. On first boot, `data/.myfsio.sys/config/iam.json` is created with a randomly generated admin user. The access key and secret key are printed to the console during first startup. If you miss it, check the `iam.json` file directly—credentials are stored in plaintext.
2. Sign into the UI using the generated credentials, then open **IAM**:
- **Create user**: supply a display name and optional JSON inline policy array.
- **Rotate secret**: generates a new secret key; the UI surfaces it once.
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
3. Wildcard action `iam:*` is supported for admin user definitions.
> **Breaking Change (v0.2.0+):** Previous versions used fixed default credentials (`localadmin/localadmin`). If upgrading from an older version, your existing credentials remain unchanged, but new installations will generate random credentials.
### Authentication
The API expects every request to include authentication headers. The UI persists them in the Flask session after login.

5
run.py
View File

@@ -5,6 +5,7 @@ import argparse
import os
import sys
import warnings
import multiprocessing
from multiprocessing import Process
from pathlib import Path
@@ -87,6 +88,10 @@ def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None)
if __name__ == "__main__":
multiprocessing.freeze_support()
if _is_frozen():
multiprocessing.set_start_method("spawn", force=True)
parser = argparse.ArgumentParser(description="Run the S3 clone services.")
parser.add_argument("--mode", choices=["api", "ui", "both"], default="both")
parser.add_argument("--api-port", type=int, default=5000)

View File

@@ -192,31 +192,86 @@ cat > "$INSTALL_DIR/myfsio.env" << EOF
# Generated by install.sh on $(date)
# Documentation: https://go.jzwsite.com/myfsio
# Storage paths
# =============================================================================
# STORAGE PATHS
# =============================================================================
STORAGE_ROOT=$DATA_DIR
LOG_DIR=$LOG_DIR
# Network
# =============================================================================
# NETWORK
# =============================================================================
APP_HOST=0.0.0.0
APP_PORT=$API_PORT
# Security - CHANGE IN PRODUCTION
SECRET_KEY=$SECRET_KEY
CORS_ORIGINS=*
# Public URL (set this if behind a reverse proxy)
# Public URL (set this if behind a reverse proxy for presigned URLs)
$(if [[ -n "$API_URL" ]]; then echo "API_BASE_URL=$API_URL"; else echo "# API_BASE_URL=https://s3.example.com"; fi)
# Logging
# =============================================================================
# SECURITY
# =============================================================================
# Secret key for session signing (auto-generated if not set)
SECRET_KEY=$SECRET_KEY
# CORS settings - restrict in production
CORS_ORIGINS=*
# Brute-force protection
AUTH_MAX_ATTEMPTS=5
AUTH_LOCKOUT_MINUTES=15
# Reverse proxy settings (set to number of trusted proxies in front)
# NUM_TRUSTED_PROXIES=1
# Allow internal admin endpoints (only enable on trusted networks)
# ALLOW_INTERNAL_ENDPOINTS=false
# Allowed hosts for redirects (comma-separated, empty = restrict all)
# ALLOWED_REDIRECT_HOSTS=
# =============================================================================
# LOGGING
# =============================================================================
LOG_LEVEL=INFO
LOG_TO_FILE=true
# Rate limiting
# =============================================================================
# RATE LIMITING
# =============================================================================
RATE_LIMIT_DEFAULT=200 per minute
# RATE_LIMIT_LIST_BUCKETS=60 per minute
# RATE_LIMIT_BUCKET_OPS=120 per minute
# RATE_LIMIT_OBJECT_OPS=240 per minute
# RATE_LIMIT_ADMIN=60 per minute
# Optional: Encryption (uncomment to enable)
# =============================================================================
# SERVER TUNING (0 = auto-detect based on system resources)
# =============================================================================
# SERVER_THREADS=0
# SERVER_CONNECTION_LIMIT=0
# SERVER_BACKLOG=0
# SERVER_CHANNEL_TIMEOUT=120
# =============================================================================
# ENCRYPTION (uncomment to enable)
# =============================================================================
# ENCRYPTION_ENABLED=true
# KMS_ENABLED=true
# =============================================================================
# SITE SYNC / REPLICATION (for multi-site deployments)
# =============================================================================
# SITE_ID=site-1
# SITE_ENDPOINT=https://s3-site1.example.com
# SITE_REGION=us-east-1
# SITE_SYNC_ENABLED=false
# =============================================================================
# OPTIONAL FEATURES
# =============================================================================
# LIFECYCLE_ENABLED=false
# METRICS_HISTORY_ENABLED=false
# OPERATION_METRICS_ENABLED=false
EOF
chmod 600 "$INSTALL_DIR/myfsio.env"
echo " [OK] Created $INSTALL_DIR/myfsio.env"
@@ -308,7 +363,7 @@ if [[ "$SKIP_SYSTEMD" != true ]]; then
systemctl start myfsio
echo " [OK] Service started"
echo ""
read -p "Would you like to enable MyFSIO to start on boot? [Y/n] " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Nn]$ ]]; then
@@ -316,12 +371,37 @@ if [[ "$SKIP_SYSTEMD" != true ]]; then
echo " [OK] Service enabled on boot"
fi
echo ""
sleep 2
echo " Waiting for service initialization..."
sleep 3
echo " Service Status:"
echo " ---------------"
if systemctl is-active --quiet myfsio; then
echo " [OK] MyFSIO is running"
IAM_FILE="$DATA_DIR/.myfsio.sys/config/iam.json"
if [[ -f "$IAM_FILE" ]]; then
echo ""
echo " ============================================"
echo " ADMIN CREDENTIALS (save these securely!)"
echo " ============================================"
if command -v jq &>/dev/null; then
ACCESS_KEY=$(jq -r '.users[0].access_key' "$IAM_FILE" 2>/dev/null)
SECRET_KEY=$(jq -r '.users[0].secret_key' "$IAM_FILE" 2>/dev/null)
else
ACCESS_KEY=$(grep -o '"access_key"[[:space:]]*:[[:space:]]*"[^"]*"' "$IAM_FILE" | head -1 | sed 's/.*"\([^"]*\)"$/\1/')
SECRET_KEY=$(grep -o '"secret_key"[[:space:]]*:[[:space:]]*"[^"]*"' "$IAM_FILE" | head -1 | sed 's/.*"\([^"]*\)"$/\1/')
fi
if [[ -n "$ACCESS_KEY" && -n "$SECRET_KEY" ]]; then
echo " Access Key: $ACCESS_KEY"
echo " Secret Key: $SECRET_KEY"
else
echo " [!] Could not parse credentials from $IAM_FILE"
echo " Check the file manually or view service logs."
fi
echo " ============================================"
fi
else
echo " [WARNING] MyFSIO may not have started correctly"
echo " Check logs with: journalctl -u myfsio -f"
@@ -346,19 +426,26 @@ echo "Access Points:"
echo " API: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$API_PORT"
echo " UI: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$UI_PORT/ui"
echo ""
echo "Default Credentials:"
echo " Username: localadmin"
echo " Password: localadmin"
echo " [!] WARNING: Change these immediately after first login!"
echo "Credentials:"
echo " Admin credentials were shown above (if service was started)."
echo " You can also find them in: $DATA_DIR/.myfsio.sys/config/iam.json"
echo ""
echo "Configuration Files:"
echo " Environment: $INSTALL_DIR/myfsio.env"
echo " IAM Users: $DATA_DIR/.myfsio.sys/config/iam.json"
echo " Bucket Policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
echo " Secret Key: $DATA_DIR/.myfsio.sys/config/.secret (auto-generated)"
echo ""
echo "Security Notes:"
echo " - Rate limiting is enabled by default (200 req/min)"
echo " - Brute-force protection: 5 attempts, 15 min lockout"
echo " - Set CORS_ORIGINS to specific domains in production"
echo " - Set NUM_TRUSTED_PROXIES if behind a reverse proxy"
echo ""
echo "Useful Commands:"
echo " Check status: sudo systemctl status myfsio"
echo " View logs: sudo journalctl -u myfsio -f"
echo " Validate config: $INSTALL_DIR/myfsio --check-config"
echo " Restart: sudo systemctl restart myfsio"
echo " Stop: sudo systemctl stop myfsio"
echo ""

View File

@@ -88,7 +88,8 @@ echo "The following items will be removed:"
echo ""
echo " Install directory: $INSTALL_DIR"
if [[ "$KEEP_DATA" != true ]]; then
echo " Data directory: $DATA_DIR (ALL YOUR DATA WILL BE DELETED!)"
echo " Data directory: $DATA_DIR"
echo " [!] ALL DATA, IAM USERS, AND ENCRYPTION KEYS WILL BE DELETED!"
else
echo " Data directory: $DATA_DIR (WILL BE KEPT)"
fi
@@ -227,8 +228,15 @@ echo ""
if [[ "$KEEP_DATA" == true ]]; then
echo "Your data has been preserved at: $DATA_DIR"
echo ""
echo "To reinstall MyFSIO with existing data, run:"
echo " curl -fsSL https://go.jzwsite.com/myfsio-install | sudo bash"
echo "Preserved files include:"
echo " - All buckets and objects"
echo " - IAM configuration: $DATA_DIR/.myfsio.sys/config/iam.json"
echo " - Bucket policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
echo " - Secret key: $DATA_DIR/.myfsio.sys/config/.secret"
echo " - Encryption keys: $DATA_DIR/.myfsio.sys/keys/ (if encryption was enabled)"
echo ""
echo "To reinstall MyFSIO with existing data:"
echo " ./install.sh --data-dir $DATA_DIR"
echo ""
fi

View File

@@ -182,6 +182,9 @@
let visibleItems = [];
let renderedRange = { start: 0, end: 0 };
let memoizedVisibleItems = null;
let memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
const createObjectRow = (obj, displayKey = null) => {
const tr = document.createElement('tr');
tr.dataset.objectRow = '';
@@ -340,7 +343,21 @@
}
};
const computeVisibleItems = () => {
const computeVisibleItems = (forceRecompute = false) => {
const currentInputs = {
objectCount: allObjects.length,
prefix: currentPrefix,
filterTerm: currentFilterTerm
};
if (!forceRecompute &&
memoizedVisibleItems !== null &&
memoizedInputs.objectCount === currentInputs.objectCount &&
memoizedInputs.prefix === currentInputs.prefix &&
memoizedInputs.filterTerm === currentInputs.filterTerm) {
return memoizedVisibleItems;
}
const items = [];
const folders = new Set();
@@ -381,6 +398,8 @@
return aKey.localeCompare(bKey);
});
memoizedVisibleItems = items;
memoizedInputs = currentInputs;
return items;
};
@@ -497,6 +516,9 @@
};
};
let lastStreamRenderTime = 0;
const STREAM_RENDER_THROTTLE_MS = 500;
const flushPendingStreamObjects = () => {
if (pendingStreamObjects.length === 0) return;
const batch = pendingStreamObjects.splice(0, pendingStreamObjects.length);
@@ -513,6 +535,19 @@
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()}${countText} loading...`;
}
}
if (objectsLoadingRow && objectsLoadingRow.parentNode) {
const loadingText = objectsLoadingRow.querySelector('p');
if (loadingText) {
const countText = totalObjectCount > 0 ? ` of ${totalObjectCount.toLocaleString()}` : '';
loadingText.textContent = `Loading ${loadedObjectCount.toLocaleString()}${countText} objects...`;
}
}
const now = performance.now();
if (!streamingComplete && now - lastStreamRenderTime < STREAM_RENDER_THROTTLE_MS) {
streamRenderScheduled = false;
return;
}
lastStreamRenderTime = now;
refreshVirtualList();
streamRenderScheduled = false;
};
@@ -533,7 +568,10 @@
loadedObjectCount = 0;
totalObjectCount = 0;
allObjects = [];
memoizedVisibleItems = null;
memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
pendingStreamObjects = [];
lastStreamRenderTime = 0;
streamAbortController = new AbortController();
@@ -548,7 +586,10 @@
throw new Error(`HTTP ${response.status}`);
}
if (objectsLoadingRow) objectsLoadingRow.remove();
if (objectsLoadingRow) {
const loadingText = objectsLoadingRow.querySelector('p');
if (loadingText) loadingText.textContent = 'Receiving objects...';
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
@@ -576,6 +617,10 @@
break;
case 'count':
totalObjectCount = msg.total_count || 0;
if (objectsLoadingRow) {
const loadingText = objectsLoadingRow.querySelector('p');
if (loadingText) loadingText.textContent = `Loading 0 of ${totalObjectCount.toLocaleString()} objects...`;
}
break;
case 'object':
pendingStreamObjects.push(processStreamObject(msg));
@@ -609,11 +654,15 @@
} catch (e) { }
}
flushPendingStreamObjects();
streamingComplete = true;
flushPendingStreamObjects();
hasMoreObjects = false;
updateObjectCountBadge();
if (objectsLoadingRow && objectsLoadingRow.parentNode) {
objectsLoadingRow.remove();
}
if (loadMoreStatus) {
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()} objects`;
}
@@ -643,6 +692,8 @@
loadedObjectCount = 0;
totalObjectCount = 0;
allObjects = [];
memoizedVisibleItems = null;
memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
}
if (append && loadMoreSpinner) {
@@ -985,13 +1036,15 @@
};
const navigateToFolder = (prefix) => {
if (streamAbortController) {
streamAbortController.abort();
streamAbortController = null;
}
currentPrefix = prefix;
if (scrollContainer) scrollContainer.scrollTop = 0;
refreshVirtualList();
renderBreadcrumb(prefix);
selectedRows.clear();
if (typeof updateBulkDeleteState === 'function') {
@@ -1001,6 +1054,9 @@
if (previewPanel) previewPanel.classList.add('d-none');
if (previewEmpty) previewEmpty.classList.remove('d-none');
activeRow = null;
isLoadingObjects = false;
loadObjects(false);
};
const renderObjectsView = () => {

View File

@@ -451,10 +451,10 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
<span class="docs-section-kicker">03</span>
<h2 class="h4 mb-0">Authenticate &amp; manage IAM</h2>
</div>
<p class="text-muted">MyFSIO seeds <code>data/.myfsio.sys/config/iam.json</code> with <code>localadmin/localadmin</code>. Sign in once, rotate it, then grant least-privilege access to teammates and tools.</p>
<p class="text-muted">On first startup, MyFSIO generates random admin credentials and prints them to the console. Missed it? Check <code>data/.myfsio.sys/config/iam.json</code> directly—credentials are stored in plaintext.</p>
<div class="docs-highlight mb-3">
<ol class="mb-0">
<li>Visit <code>/ui/login</code>, enter the bootstrap credentials, and rotate them immediately from the IAM page.</li>
<li>Check the console output (or <code>iam.json</code>) for the generated <code>Access Key</code> and <code>Secret Key</code>, then visit <code>/ui/login</code>.</li>
<li>Create additional users with descriptive display names and AWS-style inline policies (for example <code>{"bucket": "*", "actions": ["list", "read"]}</code>).</li>
<li>Rotate secrets when sharing with CI jobs—new secrets display once and persist to <code>data/.myfsio.sys/config/iam.json</code>.</li>
<li>Bucket policies layer on top of IAM. Apply Private/Public presets or paste custom JSON; changes reload instantly.</li>
@@ -2136,8 +2136,8 @@ curl -X PUT "{{ api_base }}/&lt;bucket&gt;?tagging" \
<code class="d-block">{{ api_base }}</code>
</div>
<div>
<div class="small text-uppercase text-muted">Sample user</div>
<code class="d-block">localadmin / localadmin</code>
<div class="small text-uppercase text-muted">Initial credentials</div>
<span class="text-muted small">Generated on first run (check console)</span>
</div>
<div>
<div class="small text-uppercase text-muted">Logs</div>

View File

@@ -398,6 +398,14 @@
<option value="24" selected>Last 24 hours</option>
<option value="168">Last 7 days</option>
</select>
<select class="form-select form-select-sm" id="maxDataPoints" style="width: auto;" title="Maximum data points to display">
<option value="100">100 points</option>
<option value="250">250 points</option>
<option value="500" selected>500 points</option>
<option value="1000">1000 points</option>
<option value="2000">2000 points</option>
<option value="0">Unlimited</option>
</select>
</div>
</div>
<div class="card-body p-4">
@@ -817,8 +825,8 @@
var diskChart = null;
var historyStatus = document.getElementById('historyStatus');
var timeRangeSelect = document.getElementById('historyTimeRange');
var maxDataPointsSelect = document.getElementById('maxDataPoints');
var historyTimer = null;
var MAX_DATA_POINTS = 500;
function createChart(ctx, label, color) {
return new Chart(ctx, {
@@ -889,7 +897,8 @@
if (historyStatus) historyStatus.textContent = 'No history data available yet. Data is recorded every ' + (data.interval_minutes || 5) + ' minutes.';
return;
}
var history = data.history.slice(-MAX_DATA_POINTS);
var maxPoints = maxDataPointsSelect ? parseInt(maxDataPointsSelect.value, 10) : 500;
var history = maxPoints > 0 ? data.history.slice(-maxPoints) : data.history;
var labels = history.map(function(h) { return formatTime(h.timestamp); });
var cpuData = history.map(function(h) { return h.cpu_percent; });
var memData = history.map(function(h) { return h.memory_percent; });
@@ -927,6 +936,10 @@
timeRangeSelect.addEventListener('change', loadHistory);
}
if (maxDataPointsSelect) {
maxDataPointsSelect.addEventListener('change', loadHistory);
}
document.addEventListener('visibilitychange', function() {
if (document.hidden) {
if (historyTimer) clearInterval(historyTimer);