Further optimize CPU usage; Improve security and performance; 4 bug fixes.

This commit is contained in:
2026-02-05 17:45:34 +08:00
parent a779b002d7
commit 70b61fd8e6
10 changed files with 219 additions and 67 deletions

View File

@@ -1,6 +1,7 @@
from __future__ import annotations from __future__ import annotations
import ipaddress import ipaddress
import json
import logging import logging
import re import re
import socket import socket
@@ -354,6 +355,10 @@ def update_peer_site(site_id: str):
if region_error: if region_error:
return _json_error("ValidationError", region_error, 400) return _json_error("ValidationError", region_error, 400)
if "connection_id" in payload:
if payload["connection_id"] and not _connections().get(payload["connection_id"]):
return _json_error("ValidationError", f"Connection '{payload['connection_id']}' not found", 400)
peer = PeerSite( peer = PeerSite(
site_id=site_id, site_id=site_id,
endpoint=payload.get("endpoint", existing.endpoint), endpoint=payload.get("endpoint", existing.endpoint),

View File

@@ -529,11 +529,13 @@ class IamService:
return candidate if candidate in ALLOWED_ACTIONS else "" return candidate if candidate in ALLOWED_ACTIONS else ""
def _write_default(self) -> None: def _write_default(self) -> None:
access_key = secrets.token_hex(12)
secret_key = secrets.token_urlsafe(32)
default = { default = {
"users": [ "users": [
{ {
"access_key": "localadmin", "access_key": access_key,
"secret_key": "localadmin", "secret_key": secret_key,
"display_name": "Local Admin", "display_name": "Local Admin",
"policies": [ "policies": [
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)} {"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
@@ -542,6 +544,14 @@ class IamService:
] ]
} }
self.config_path.write_text(json.dumps(default, indent=2)) self.config_path.write_text(json.dumps(default, indent=2))
print(f"\n{'='*60}")
print("MYFSIO FIRST RUN - ADMIN CREDENTIALS GENERATED")
print(f"{'='*60}")
print(f"Access Key: {access_key}")
print(f"Secret Key: {secret_key}")
print(f"{'='*60}")
print(f"Missed this? Check: {self.config_path}")
print(f"{'='*60}\n")
def _generate_access_key(self) -> str: def _generate_access_key(self) -> str:
return secrets.token_hex(8) return secrets.token_hex(8)

View File

@@ -1004,7 +1004,8 @@ def _apply_object_headers(
response.headers["ETag"] = f'"{etag}"' response.headers["ETag"] = f'"{etag}"'
response.headers["Accept-Ranges"] = "bytes" response.headers["Accept-Ranges"] = "bytes"
for key, value in (metadata or {}).items(): for key, value in (metadata or {}).items():
response.headers[f"X-Amz-Meta-{key}"] = value safe_value = _sanitize_header_value(str(value))
response.headers[f"X-Amz-Meta-{key}"] = safe_value
def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None: def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None:
@@ -2342,10 +2343,12 @@ def _post_object(bucket_name: str) -> Response:
success_action_redirect = request.form.get("success_action_redirect") success_action_redirect = request.form.get("success_action_redirect")
if success_action_redirect: if success_action_redirect:
allowed_hosts = current_app.config.get("ALLOWED_REDIRECT_HOSTS", []) allowed_hosts = current_app.config.get("ALLOWED_REDIRECT_HOSTS", [])
if not allowed_hosts:
allowed_hosts = [request.host]
parsed = urlparse(success_action_redirect) parsed = urlparse(success_action_redirect)
if parsed.scheme not in ("http", "https"): if parsed.scheme not in ("http", "https"):
return _error_response("InvalidArgument", "Redirect URL must use http or https", 400) return _error_response("InvalidArgument", "Redirect URL must use http or https", 400)
if allowed_hosts and parsed.netloc not in allowed_hosts: if parsed.netloc not in allowed_hosts:
return _error_response("InvalidArgument", "Redirect URL host not allowed", 400) return _error_response("InvalidArgument", "Redirect URL host not allowed", 400)
redirect_url = f"{success_action_redirect}?bucket={bucket_name}&key={quote(object_key)}&etag={meta.etag}" redirect_url = f"{success_action_redirect}?bucket={bucket_name}&key={quote(object_key)}&etag={meta.etag}"
return Response(status=303, headers={"Location": redirect_url}) return Response(status=303, headers={"Location": redirect_url})

View File

@@ -18,6 +18,18 @@ class EphemeralSecretStore:
self._store[token] = (payload, expires_at) self._store[token] = (payload, expires_at)
return token return token
def peek(self, token: str | None) -> Any | None:
if not token:
return None
entry = self._store.get(token)
if not entry:
return None
payload, expires_at = entry
if expires_at < time.time():
self._store.pop(token, None)
return None
return payload
def pop(self, token: str | None) -> Any | None: def pop(self, token: str | None) -> Any | None:
if not token: if not token:
return None return None

View File

@@ -186,6 +186,7 @@ class ObjectStorage:
self._cache_ttl = cache_ttl self._cache_ttl = cache_ttl
self._object_cache_max_size = object_cache_max_size self._object_cache_max_size = object_cache_max_size
self._object_key_max_length_bytes = object_key_max_length_bytes self._object_key_max_length_bytes = object_key_max_length_bytes
self._sorted_key_cache: Dict[str, tuple[list[str], int]] = {}
def _get_bucket_lock(self, bucket_id: str) -> threading.Lock: def _get_bucket_lock(self, bucket_id: str) -> threading.Lock:
"""Get or create a lock for a specific bucket. Reduces global lock contention.""" """Get or create a lock for a specific bucket. Reduces global lock contention."""
@@ -243,10 +244,15 @@ class ObjectStorage:
raise BucketNotFoundError("Bucket does not exist") raise BucketNotFoundError("Bucket does not exist")
cache_path = self._system_bucket_root(bucket_name) / "stats.json" cache_path = self._system_bucket_root(bucket_name) / "stats.json"
cached_stats = None
cache_fresh = False
if cache_path.exists(): if cache_path.exists():
try: try:
if time.time() - cache_path.stat().st_mtime < cache_ttl: cache_fresh = time.time() - cache_path.stat().st_mtime < cache_ttl
return json.loads(cache_path.read_text(encoding="utf-8")) cached_stats = json.loads(cache_path.read_text(encoding="utf-8"))
if cache_fresh:
return cached_stats
except (OSError, json.JSONDecodeError): except (OSError, json.JSONDecodeError):
pass pass
@@ -255,6 +261,7 @@ class ObjectStorage:
version_count = 0 version_count = 0
version_bytes = 0 version_bytes = 0
try:
for path in bucket_path.rglob("*"): for path in bucket_path.rglob("*"):
if path.is_file(): if path.is_file():
rel = path.relative_to(bucket_path) rel = path.relative_to(bucket_path)
@@ -273,6 +280,10 @@ class ObjectStorage:
stat = path.stat() stat = path.stat()
version_count += 1 version_count += 1
version_bytes += stat.st_size version_bytes += stat.st_size
except OSError:
if cached_stats is not None:
return cached_stats
raise
stats = { stats = {
"objects": object_count, "objects": object_count,
@@ -299,6 +310,34 @@ class ObjectStorage:
except OSError: except OSError:
pass pass
def _update_bucket_stats_cache(
self,
bucket_id: str,
*,
bytes_delta: int = 0,
objects_delta: int = 0,
version_bytes_delta: int = 0,
version_count_delta: int = 0,
) -> None:
"""Incrementally update cached bucket statistics instead of invalidating.
This avoids expensive full directory scans on every PUT/DELETE by
adjusting the cached values directly.
"""
cache_path = self._system_bucket_root(bucket_id) / "stats.json"
try:
if cache_path.exists():
data = json.loads(cache_path.read_text(encoding="utf-8"))
data["objects"] = max(0, data.get("objects", 0) + objects_delta)
data["bytes"] = max(0, data.get("bytes", 0) + bytes_delta)
data["version_count"] = max(0, data.get("version_count", 0) + version_count_delta)
data["version_bytes"] = max(0, data.get("version_bytes", 0) + version_bytes_delta)
data["total_objects"] = max(0, data.get("total_objects", 0) + objects_delta + version_count_delta)
data["total_bytes"] = max(0, data.get("total_bytes", 0) + bytes_delta + version_bytes_delta)
cache_path.write_text(json.dumps(data), encoding="utf-8")
except (OSError, json.JSONDecodeError):
pass
def delete_bucket(self, bucket_name: str) -> None: def delete_bucket(self, bucket_name: str) -> None:
bucket_path = self._bucket_path(bucket_name) bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists(): if not bucket_path.exists():
@@ -333,6 +372,8 @@ class ObjectStorage:
Returns: Returns:
ListObjectsResult with objects, truncation status, and continuation token ListObjectsResult with objects, truncation status, and continuation token
""" """
import bisect
bucket_path = self._bucket_path(bucket_name) bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists(): if not bucket_path.exists():
raise BucketNotFoundError("Bucket does not exist") raise BucketNotFoundError("Bucket does not exist")
@@ -340,15 +381,26 @@ class ObjectStorage:
object_cache = self._get_object_cache(bucket_id, bucket_path) object_cache = self._get_object_cache(bucket_id, bucket_path)
cache_version = self._cache_version.get(bucket_id, 0)
cached_entry = self._sorted_key_cache.get(bucket_id)
if cached_entry and cached_entry[1] == cache_version:
all_keys = cached_entry[0]
else:
all_keys = sorted(object_cache.keys()) all_keys = sorted(object_cache.keys())
self._sorted_key_cache[bucket_id] = (all_keys, cache_version)
if prefix: if prefix:
all_keys = [k for k in all_keys if k.startswith(prefix)] lo = bisect.bisect_left(all_keys, prefix)
hi = len(all_keys)
for i in range(lo, len(all_keys)):
if not all_keys[i].startswith(prefix):
hi = i
break
all_keys = all_keys[lo:hi]
total_count = len(all_keys) total_count = len(all_keys)
start_index = 0 start_index = 0
if continuation_token: if continuation_token:
import bisect
start_index = bisect.bisect_right(all_keys, continuation_token) start_index = bisect.bisect_right(all_keys, continuation_token)
if start_index >= total_count: if start_index >= total_count:
return ListObjectsResult( return ListObjectsResult(
@@ -403,7 +455,9 @@ class ObjectStorage:
is_overwrite = destination.exists() is_overwrite = destination.exists()
existing_size = destination.stat().st_size if is_overwrite else 0 existing_size = destination.stat().st_size if is_overwrite else 0
archived_version_size = 0
if self._is_versioning_enabled(bucket_path) and is_overwrite: if self._is_versioning_enabled(bucket_path) and is_overwrite:
archived_version_size = existing_size
self._archive_current_version(bucket_id, safe_key, reason="overwrite") self._archive_current_version(bucket_id, safe_key, reason="overwrite")
tmp_dir = self._system_root_path() / self.SYSTEM_TMP_DIR tmp_dir = self._system_root_path() / self.SYSTEM_TMP_DIR
@@ -416,11 +470,10 @@ class ObjectStorage:
shutil.copyfileobj(_HashingReader(stream, checksum), target) shutil.copyfileobj(_HashingReader(stream, checksum), target)
new_size = tmp_path.stat().st_size new_size = tmp_path.stat().st_size
if enforce_quota:
size_delta = new_size - existing_size size_delta = new_size - existing_size
object_delta = 0 if is_overwrite else 1 object_delta = 0 if is_overwrite else 1
if enforce_quota:
quota_check = self.check_quota( quota_check = self.check_quota(
bucket_name, bucket_name,
additional_bytes=max(0, size_delta), additional_bytes=max(0, size_delta),
@@ -448,7 +501,13 @@ class ObjectStorage:
combined_meta = {**internal_meta, **(metadata or {})} combined_meta = {**internal_meta, **(metadata or {})}
self._write_metadata(bucket_id, safe_key, combined_meta) self._write_metadata(bucket_id, safe_key, combined_meta)
self._invalidate_bucket_stats_cache(bucket_id) self._update_bucket_stats_cache(
bucket_id,
bytes_delta=size_delta,
objects_delta=object_delta,
version_bytes_delta=archived_version_size,
version_count_delta=1 if archived_version_size > 0 else 0,
)
obj_meta = ObjectMeta( obj_meta = ObjectMeta(
key=safe_key.as_posix(), key=safe_key.as_posix(),
@@ -498,15 +557,24 @@ class ObjectStorage:
path = self._object_path(bucket_name, object_key) path = self._object_path(bucket_name, object_key)
if not path.exists(): if not path.exists():
return return
deleted_size = path.stat().st_size
safe_key = path.relative_to(bucket_path) safe_key = path.relative_to(bucket_path)
bucket_id = bucket_path.name bucket_id = bucket_path.name
archived_version_size = 0
if self._is_versioning_enabled(bucket_path): if self._is_versioning_enabled(bucket_path):
archived_version_size = deleted_size
self._archive_current_version(bucket_id, safe_key, reason="delete") self._archive_current_version(bucket_id, safe_key, reason="delete")
rel = path.relative_to(bucket_path) rel = path.relative_to(bucket_path)
self._safe_unlink(path) self._safe_unlink(path)
self._delete_metadata(bucket_id, rel) self._delete_metadata(bucket_id, rel)
self._invalidate_bucket_stats_cache(bucket_id) self._update_bucket_stats_cache(
bucket_id,
bytes_delta=-deleted_size,
objects_delta=-1,
version_bytes_delta=archived_version_size,
version_count_delta=1 if archived_version_size > 0 else 0,
)
self._update_object_cache_entry(bucket_id, safe_key.as_posix(), None) self._update_object_cache_entry(bucket_id, safe_key.as_posix(), None)
self._cleanup_empty_parents(path, bucket_path) self._cleanup_empty_parents(path, bucket_path)
@@ -828,7 +896,12 @@ class ObjectStorage:
if not isinstance(metadata, dict): if not isinstance(metadata, dict):
metadata = {} metadata = {}
destination = bucket_path / safe_key destination = bucket_path / safe_key
if self._is_versioning_enabled(bucket_path) and destination.exists(): restored_size = data_path.stat().st_size
is_overwrite = destination.exists()
existing_size = destination.stat().st_size if is_overwrite else 0
archived_version_size = 0
if self._is_versioning_enabled(bucket_path) and is_overwrite:
archived_version_size = existing_size
self._archive_current_version(bucket_id, safe_key, reason="restore-overwrite") self._archive_current_version(bucket_id, safe_key, reason="restore-overwrite")
destination.parent.mkdir(parents=True, exist_ok=True) destination.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(data_path, destination) shutil.copy2(data_path, destination)
@@ -837,7 +910,13 @@ class ObjectStorage:
else: else:
self._delete_metadata(bucket_id, safe_key) self._delete_metadata(bucket_id, safe_key)
stat = destination.stat() stat = destination.stat()
self._invalidate_bucket_stats_cache(bucket_id) self._update_bucket_stats_cache(
bucket_id,
bytes_delta=restored_size - existing_size,
objects_delta=0 if is_overwrite else 1,
version_bytes_delta=archived_version_size,
version_count_delta=1 if archived_version_size > 0 else 0,
)
return ObjectMeta( return ObjectMeta(
key=safe_key.as_posix(), key=safe_key.as_posix(),
size=stat.st_size, size=stat.st_size,
@@ -861,6 +940,7 @@ class ObjectStorage:
meta_path = legacy_version_dir / f"{version_id}.json" meta_path = legacy_version_dir / f"{version_id}.json"
if not data_path.exists() and not meta_path.exists(): if not data_path.exists() and not meta_path.exists():
raise StorageError(f"Version {version_id} not found") raise StorageError(f"Version {version_id} not found")
deleted_version_size = data_path.stat().st_size if data_path.exists() else 0
if data_path.exists(): if data_path.exists():
data_path.unlink() data_path.unlink()
if meta_path.exists(): if meta_path.exists():
@@ -868,6 +948,12 @@ class ObjectStorage:
parent = data_path.parent parent = data_path.parent
if parent.exists() and not any(parent.iterdir()): if parent.exists() and not any(parent.iterdir()):
parent.rmdir() parent.rmdir()
if deleted_version_size > 0:
self._update_bucket_stats_cache(
bucket_id,
version_bytes_delta=-deleted_version_size,
version_count_delta=-1,
)
def list_orphaned_objects(self, bucket_name: str) -> List[Dict[str, Any]]: def list_orphaned_objects(self, bucket_name: str) -> List[Dict[str, Any]]:
bucket_path = self._bucket_path(bucket_name) bucket_path = self._bucket_path(bucket_name)
@@ -1167,11 +1253,11 @@ class ObjectStorage:
is_overwrite = destination.exists() is_overwrite = destination.exists()
existing_size = destination.stat().st_size if is_overwrite else 0 existing_size = destination.stat().st_size if is_overwrite else 0
if enforce_quota:
size_delta = total_size - existing_size size_delta = total_size - existing_size
object_delta = 0 if is_overwrite else 1 object_delta = 0 if is_overwrite else 1
versioning_enabled = self._is_versioning_enabled(bucket_path)
if enforce_quota:
quota_check = self.check_quota( quota_check = self.check_quota(
bucket_name, bucket_name,
additional_bytes=max(0, size_delta), additional_bytes=max(0, size_delta),
@@ -1188,9 +1274,11 @@ class ObjectStorage:
lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock" lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock"
archived_version_size = 0
try: try:
with _atomic_lock_file(lock_file_path): with _atomic_lock_file(lock_file_path):
if self._is_versioning_enabled(bucket_path) and destination.exists(): if versioning_enabled and destination.exists():
archived_version_size = destination.stat().st_size
self._archive_current_version(bucket_id, safe_key, reason="overwrite") self._archive_current_version(bucket_id, safe_key, reason="overwrite")
checksum = hashlib.md5() checksum = hashlib.md5()
with destination.open("wb") as target: with destination.open("wb") as target:
@@ -1210,7 +1298,13 @@ class ObjectStorage:
shutil.rmtree(upload_root, ignore_errors=True) shutil.rmtree(upload_root, ignore_errors=True)
self._invalidate_bucket_stats_cache(bucket_id) self._update_bucket_stats_cache(
bucket_id,
bytes_delta=size_delta,
objects_delta=object_delta,
version_bytes_delta=archived_version_size,
version_count_delta=1 if archived_version_size > 0 else 0,
)
stat = destination.stat() stat = destination.stat()
etag = checksum.hexdigest() etag = checksum.hexdigest()
@@ -1586,6 +1680,8 @@ class ObjectStorage:
objects.pop(key, None) objects.pop(key, None)
else: else:
objects[key] = meta objects[key] = meta
self._cache_version[bucket_id] = self._cache_version.get(bucket_id, 0) + 1
self._sorted_key_cache.pop(bucket_id, None)
def warm_cache(self, bucket_names: Optional[List[str]] = None) -> None: def warm_cache(self, bucket_names: Optional[List[str]] = None) -> None:
"""Pre-warm the object cache for specified buckets or all buckets. """Pre-warm the object cache for specified buckets or all buckets.

View File

@@ -220,13 +220,16 @@ def _bucket_access_descriptor(policy: dict[str, Any] | None) -> tuple[str, str]:
def _current_principal(): def _current_principal():
creds = session.get("credentials") token = session.get("cred_token")
creds = _secret_store().peek(token) if token else None
if not creds: if not creds:
return None return None
try: try:
return _iam().authenticate(creds["access_key"], creds["secret_key"]) return _iam().authenticate(creds["access_key"], creds["secret_key"])
except IamError: except IamError:
session.pop("credentials", None) session.pop("cred_token", None)
if token:
_secret_store().pop(token)
return None return None
@@ -251,7 +254,8 @@ def _authorize_ui(principal, bucket_name: str | None, action: str, *, object_key
def _api_headers() -> dict[str, str]: def _api_headers() -> dict[str, str]:
creds = session.get("credentials") or {} token = session.get("cred_token")
creds = _secret_store().peek(token) or {}
return { return {
"X-Access-Key": creds.get("access_key", ""), "X-Access-Key": creds.get("access_key", ""),
"X-Secret-Key": creds.get("secret_key", ""), "X-Secret-Key": creds.get("secret_key", ""),
@@ -296,7 +300,9 @@ def login():
except IamError as exc: except IamError as exc:
flash(_friendly_error_message(exc), "danger") flash(_friendly_error_message(exc), "danger")
return render_template("login.html") return render_template("login.html")
session["credentials"] = {"access_key": access_key, "secret_key": secret_key} creds = {"access_key": access_key, "secret_key": secret_key}
token = _secret_store().remember(creds, ttl=3600)
session["cred_token"] = token
session.permanent = True session.permanent = True
flash(f"Welcome back, {principal.display_name}", "success") flash(f"Welcome back, {principal.display_name}", "success")
return redirect(url_for("ui.buckets_overview")) return redirect(url_for("ui.buckets_overview"))
@@ -305,7 +311,9 @@ def login():
@ui_bp.post("/logout") @ui_bp.post("/logout")
def logout(): def logout():
session.pop("credentials", None) token = session.pop("cred_token", None)
if token:
_secret_store().pop(token)
flash("Signed out", "info") flash("Signed out", "info")
return redirect(url_for("ui.login")) return redirect(url_for("ui.login"))
@@ -542,7 +550,10 @@ def list_bucket_objects(bucket_name: str):
except IamError as exc: except IamError as exc:
return jsonify({"error": str(exc)}), 403 return jsonify({"error": str(exc)}), 403
try:
max_keys = min(int(request.args.get("max_keys", 1000)), 100000) max_keys = min(int(request.args.get("max_keys", 1000)), 100000)
except ValueError:
return jsonify({"error": "max_keys must be an integer"}), 400
continuation_token = request.args.get("continuation_token") or None continuation_token = request.args.get("continuation_token") or None
prefix = request.args.get("prefix") or None prefix = request.args.get("prefix") or None

View File

@@ -1,6 +1,6 @@
from __future__ import annotations from __future__ import annotations
APP_VERSION = "0.2.5" APP_VERSION = "0.2.6"
def get_version() -> str: def get_version() -> str:

View File

@@ -619,13 +619,15 @@ MyFSIO implements a comprehensive Identity and Access Management (IAM) system th
### Getting Started ### Getting Started
1. On first boot, `data/.myfsio.sys/config/iam.json` is seeded with `localadmin / localadmin` that has wildcard access. 1. On first boot, `data/.myfsio.sys/config/iam.json` is created with a randomly generated admin user. The access key and secret key are printed to the console during first startup. If you miss it, check the `iam.json` file directly—credentials are stored in plaintext.
2. Sign into the UI using those credentials, then open **IAM**: 2. Sign into the UI using the generated credentials, then open **IAM**:
- **Create user**: supply a display name and optional JSON inline policy array. - **Create user**: supply a display name and optional JSON inline policy array.
- **Rotate secret**: generates a new secret key; the UI surfaces it once. - **Rotate secret**: generates a new secret key; the UI surfaces it once.
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`). - **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
3. Wildcard action `iam:*` is supported for admin user definitions. 3. Wildcard action `iam:*` is supported for admin user definitions.
> **Breaking Change (v0.2.0+):** Previous versions used fixed default credentials (`localadmin/localadmin`). If upgrading from an older version, your existing credentials remain unchanged, but new installations will generate random credentials.
### Authentication ### Authentication
The API expects every request to include authentication headers. The UI persists them in the Flask session after login. The API expects every request to include authentication headers. The UI persists them in the Flask session after login.

View File

@@ -451,10 +451,10 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
<span class="docs-section-kicker">03</span> <span class="docs-section-kicker">03</span>
<h2 class="h4 mb-0">Authenticate &amp; manage IAM</h2> <h2 class="h4 mb-0">Authenticate &amp; manage IAM</h2>
</div> </div>
<p class="text-muted">MyFSIO seeds <code>data/.myfsio.sys/config/iam.json</code> with <code>localadmin/localadmin</code>. Sign in once, rotate it, then grant least-privilege access to teammates and tools.</p> <p class="text-muted">On first startup, MyFSIO generates random admin credentials and prints them to the console. Missed it? Check <code>data/.myfsio.sys/config/iam.json</code> directly—credentials are stored in plaintext.</p>
<div class="docs-highlight mb-3"> <div class="docs-highlight mb-3">
<ol class="mb-0"> <ol class="mb-0">
<li>Visit <code>/ui/login</code>, enter the bootstrap credentials, and rotate them immediately from the IAM page.</li> <li>Check the console output (or <code>iam.json</code>) for the generated <code>Access Key</code> and <code>Secret Key</code>, then visit <code>/ui/login</code>.</li>
<li>Create additional users with descriptive display names and AWS-style inline policies (for example <code>{"bucket": "*", "actions": ["list", "read"]}</code>).</li> <li>Create additional users with descriptive display names and AWS-style inline policies (for example <code>{"bucket": "*", "actions": ["list", "read"]}</code>).</li>
<li>Rotate secrets when sharing with CI jobs—new secrets display once and persist to <code>data/.myfsio.sys/config/iam.json</code>.</li> <li>Rotate secrets when sharing with CI jobs—new secrets display once and persist to <code>data/.myfsio.sys/config/iam.json</code>.</li>
<li>Bucket policies layer on top of IAM. Apply Private/Public presets or paste custom JSON; changes reload instantly.</li> <li>Bucket policies layer on top of IAM. Apply Private/Public presets or paste custom JSON; changes reload instantly.</li>
@@ -2136,8 +2136,8 @@ curl -X PUT "{{ api_base }}/&lt;bucket&gt;?tagging" \
<code class="d-block">{{ api_base }}</code> <code class="d-block">{{ api_base }}</code>
</div> </div>
<div> <div>
<div class="small text-uppercase text-muted">Sample user</div> <div class="small text-uppercase text-muted">Initial credentials</div>
<code class="d-block">localadmin / localadmin</code> <span class="text-muted small">Generated on first run (check console)</span>
</div> </div>
<div> <div>
<div class="small text-uppercase text-muted">Logs</div> <div class="small text-uppercase text-muted">Logs</div>

View File

@@ -398,6 +398,14 @@
<option value="24" selected>Last 24 hours</option> <option value="24" selected>Last 24 hours</option>
<option value="168">Last 7 days</option> <option value="168">Last 7 days</option>
</select> </select>
<select class="form-select form-select-sm" id="maxDataPoints" style="width: auto;" title="Maximum data points to display">
<option value="100">100 points</option>
<option value="250">250 points</option>
<option value="500" selected>500 points</option>
<option value="1000">1000 points</option>
<option value="2000">2000 points</option>
<option value="0">Unlimited</option>
</select>
</div> </div>
</div> </div>
<div class="card-body p-4"> <div class="card-body p-4">
@@ -817,8 +825,8 @@
var diskChart = null; var diskChart = null;
var historyStatus = document.getElementById('historyStatus'); var historyStatus = document.getElementById('historyStatus');
var timeRangeSelect = document.getElementById('historyTimeRange'); var timeRangeSelect = document.getElementById('historyTimeRange');
var maxDataPointsSelect = document.getElementById('maxDataPoints');
var historyTimer = null; var historyTimer = null;
var MAX_DATA_POINTS = 500;
function createChart(ctx, label, color) { function createChart(ctx, label, color) {
return new Chart(ctx, { return new Chart(ctx, {
@@ -889,7 +897,8 @@
if (historyStatus) historyStatus.textContent = 'No history data available yet. Data is recorded every ' + (data.interval_minutes || 5) + ' minutes.'; if (historyStatus) historyStatus.textContent = 'No history data available yet. Data is recorded every ' + (data.interval_minutes || 5) + ' minutes.';
return; return;
} }
var history = data.history.slice(-MAX_DATA_POINTS); var maxPoints = maxDataPointsSelect ? parseInt(maxDataPointsSelect.value, 10) : 500;
var history = maxPoints > 0 ? data.history.slice(-maxPoints) : data.history;
var labels = history.map(function(h) { return formatTime(h.timestamp); }); var labels = history.map(function(h) { return formatTime(h.timestamp); });
var cpuData = history.map(function(h) { return h.cpu_percent; }); var cpuData = history.map(function(h) { return h.cpu_percent; });
var memData = history.map(function(h) { return h.memory_percent; }); var memData = history.map(function(h) { return h.memory_percent; });
@@ -927,6 +936,10 @@
timeRangeSelect.addEventListener('change', loadHistory); timeRangeSelect.addEventListener('change', loadHistory);
} }
if (maxDataPointsSelect) {
maxDataPointsSelect.addEventListener('change', loadHistory);
}
document.addEventListener('visibilitychange', function() { document.addEventListener('visibilitychange', function() {
if (document.hidden) { if (document.hidden) {
if (historyTimer) clearInterval(historyTimer); if (historyTimer) clearInterval(historyTimer);