From e06f653606fe600fb9bb7d738fe794151216897a Mon Sep 17 00:00:00 2001 From: kqjy Date: Tue, 24 Feb 2026 17:19:12 +0800 Subject: [PATCH 1/5] Fix version panel showing 'null' instead of timestamp, exclude current version from list, auto-refresh versions after upload --- app/ui.py | 4 +++- app/version.py | 2 +- static/js/bucket-detail-main.js | 17 +++++++++++++---- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/app/ui.py b/app/ui.py index 7dfaf90..eba8a85 100644 --- a/app/ui.py +++ b/app/ui.py @@ -1301,12 +1301,14 @@ def object_versions(bucket_name: str, object_key: str): for v in resp.get("Versions", []): if v.get("Key") != object_key: continue + if v.get("IsLatest", False): + continue versions.append({ "version_id": v.get("VersionId", ""), "last_modified": v["LastModified"].isoformat() if v.get("LastModified") else None, "size": v.get("Size", 0), "etag": v.get("ETag", "").strip('"'), - "is_latest": v.get("IsLatest", False), + "is_latest": False, }) return jsonify({"versions": versions}) except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc: diff --git a/app/version.py b/app/version.py index fc8981e..b156699 100644 --- a/app/version.py +++ b/app/version.py @@ -1,6 +1,6 @@ from __future__ import annotations -APP_VERSION = "0.3.0" +APP_VERSION = "0.3.1" def get_version() -> str: diff --git a/static/js/bucket-detail-main.js b/static/js/bucket-detail-main.js index 3ff9871..8e326fb 100644 --- a/static/js/bucket-detail-main.js +++ b/static/js/bucket-detail-main.js @@ -1537,7 +1537,7 @@ const confirmVersionRestore = (row, version, label = null, onConfirm) => { if (!version) return; - const timestamp = version.archived_at ? new Date(version.archived_at).toLocaleString() : version.version_id; + const timestamp = (version.archived_at || version.last_modified) ? new Date(version.archived_at || version.last_modified).toLocaleString() : version.version_id; const sizeLabel = formatBytes(Number(version.size) || 0); const reasonLabel = describeVersionReason(version.reason); const targetLabel = label || row?.dataset.key || 'this object'; @@ -1610,7 +1610,7 @@ const latestCell = document.createElement('td'); if (item.latest) { - const ts = item.latest.archived_at ? new Date(item.latest.archived_at).toLocaleString() : item.latest.version_id; + const ts = (item.latest.archived_at || item.latest.last_modified) ? new Date(item.latest.archived_at || item.latest.last_modified).toLocaleString() : item.latest.version_id; const sizeLabel = formatBytes(Number(item.latest.size) || 0); latestCell.innerHTML = `
${ts}
${sizeLabel} · ${describeVersionReason(item.latest.reason)}
`; } else { @@ -1785,7 +1785,7 @@ badge.textContent = `#${versionNumber}`; const title = document.createElement('div'); title.className = 'fw-semibold small'; - const timestamp = entry.archived_at ? new Date(entry.archived_at).toLocaleString() : entry.version_id; + const timestamp = (entry.archived_at || entry.last_modified) ? new Date(entry.archived_at || entry.last_modified).toLocaleString() : entry.version_id; title.textContent = timestamp; heading.appendChild(badge); heading.appendChild(title); @@ -2816,7 +2816,16 @@ uploadFileInput.value = ''; } - loadObjects(false); + const previousKey = activeRow?.dataset.key || null; + loadObjects(false).then(() => { + if (previousKey) { + const newRow = document.querySelector(`[data-object-row][data-key="${CSS.escape(previousKey)}"]`); + if (newRow) { + selectRow(newRow); + if (versioningEnabled) loadObjectVersions(newRow, { force: true }); + } + } + }); const successCount = uploadSuccessFiles.length; const errorCount = uploadErrorFiles.length; From 5bf7962c043407b21bf283c0f96d9dd7915bf403 Mon Sep 17 00:00:00 2001 From: kqjy Date: Tue, 24 Feb 2026 20:41:39 +0800 Subject: [PATCH 2/5] Fix UI: versioning modals and object browser panel showing 'null' --- app/storage.py | 37 +++++++++++++----- static/js/bucket-detail-main.js | 69 ++++++++++++++++++++++++++++++--- templates/bucket_detail.html | 11 +++--- 3 files changed, 97 insertions(+), 20 deletions(-) diff --git a/app/storage.py b/app/storage.py index 683fd4b..47590f2 100644 --- a/app/storage.py +++ b/app/storage.py @@ -377,9 +377,18 @@ class ObjectStorage: raise StorageError("Bucket contains archived object versions") if has_multipart: raise StorageError("Bucket has active multipart uploads") + bucket_id = bucket_path.name self._remove_tree(bucket_path) - self._remove_tree(self._system_bucket_root(bucket_path.name)) - self._remove_tree(self._multipart_bucket_root(bucket_path.name)) + self._remove_tree(self._system_bucket_root(bucket_id)) + self._remove_tree(self._multipart_bucket_root(bucket_id)) + self._bucket_config_cache.pop(bucket_id, None) + with self._cache_lock: + self._object_cache.pop(bucket_id, None) + self._cache_version.pop(bucket_id, None) + self._sorted_key_cache.pop(bucket_id, None) + stale = [k for k in self._meta_read_cache if k[0] == bucket_id] + for k in stale: + del self._meta_read_cache[k] def list_objects( self, @@ -1832,30 +1841,40 @@ class ObjectStorage: def _read_bucket_config(self, bucket_name: str) -> dict[str, Any]: now = time.time() + config_path = self._bucket_config_path(bucket_name) cached = self._bucket_config_cache.get(bucket_name) if cached: - config, cached_time = cached + config, cached_time, cached_mtime = cached if now - cached_time < self._bucket_config_cache_ttl: - return config.copy() + try: + current_mtime = config_path.stat().st_mtime if config_path.exists() else 0.0 + except OSError: + current_mtime = 0.0 + if current_mtime == cached_mtime: + return config.copy() - config_path = self._bucket_config_path(bucket_name) if not config_path.exists(): - self._bucket_config_cache[bucket_name] = ({}, now) + self._bucket_config_cache[bucket_name] = ({}, now, 0.0) return {} try: data = json.loads(config_path.read_text(encoding="utf-8")) config = data if isinstance(data, dict) else {} - self._bucket_config_cache[bucket_name] = (config, now) + mtime = config_path.stat().st_mtime + self._bucket_config_cache[bucket_name] = (config, now, mtime) return config.copy() except (OSError, json.JSONDecodeError): - self._bucket_config_cache[bucket_name] = ({}, now) + self._bucket_config_cache[bucket_name] = ({}, now, 0.0) return {} def _write_bucket_config(self, bucket_name: str, payload: dict[str, Any]) -> None: config_path = self._bucket_config_path(bucket_name) config_path.parent.mkdir(parents=True, exist_ok=True) config_path.write_text(json.dumps(payload), encoding="utf-8") - self._bucket_config_cache[bucket_name] = (payload.copy(), time.time()) + try: + mtime = config_path.stat().st_mtime + except OSError: + mtime = 0.0 + self._bucket_config_cache[bucket_name] = (payload.copy(), time.time(), mtime) def _set_bucket_config_entry(self, bucket_name: str, key: str, value: Any | None) -> None: config = self._read_bucket_config(bucket_name) diff --git a/static/js/bucket-detail-main.js b/static/js/bucket-detail-main.js index 8e326fb..d194a3a 100644 --- a/static/js/bucket-detail-main.js +++ b/static/js/bucket-detail-main.js @@ -137,11 +137,11 @@ const versionPanel = document.getElementById('version-panel'); const versionList = document.getElementById('version-list'); const refreshVersionsButton = document.getElementById('refreshVersionsButton'); - const archivedCard = document.getElementById('archived-objects-card'); - const archivedBody = archivedCard?.querySelector('[data-archived-body]'); - const archivedCountBadge = archivedCard?.querySelector('[data-archived-count]'); - const archivedRefreshButton = archivedCard?.querySelector('[data-archived-refresh]'); - const archivedEndpoint = archivedCard?.dataset.archivedEndpoint; + let archivedCard = document.getElementById('archived-objects-card'); + let archivedBody = archivedCard?.querySelector('[data-archived-body]'); + let archivedCountBadge = archivedCard?.querySelector('[data-archived-count]'); + let archivedRefreshButton = archivedCard?.querySelector('[data-archived-refresh]'); + let archivedEndpoint = archivedCard?.dataset.archivedEndpoint; let versioningEnabled = objectsContainer?.dataset.versioning === 'true'; const versionsCache = new Map(); let activeRow = null; @@ -1737,6 +1737,15 @@ loadArchivedObjects(); } + const propertiesTab = document.getElementById('properties-tab'); + if (propertiesTab) { + propertiesTab.addEventListener('shown.bs.tab', () => { + if (archivedCard && archivedEndpoint) { + loadArchivedObjects(); + } + }); + } + async function restoreVersion(row, version) { if (!row || !version?.version_id) return; const template = row.dataset.restoreTemplate; @@ -4163,6 +4172,47 @@ var archivedCardEl = document.getElementById('archived-objects-card'); if (archivedCardEl) { archivedCardEl.style.display = enabled ? '' : 'none'; + } else if (enabled) { + var endpoint = window.BucketDetailConfig?.endpoints?.archivedObjects || ''; + if (endpoint) { + var html = '
' + + '
' + + '
' + + '' + + '' + + 'Archived Objects
' + + '
' + + '0 items' + + '
' + + '
' + + '

Objects that have been deleted while versioning is enabled. Their previous versions remain available until you restore or purge them.

' + + '
' + + '' + + '' + + '' + + '' + + '' + + '' + + '' + + '
' + + '' + + 'KeyLatest VersionVersionsActions
' + + '' + + '' + + 'No archived objects
'; + card.insertAdjacentHTML('afterend', html); + archivedCard = document.getElementById('archived-objects-card'); + archivedBody = archivedCard.querySelector('[data-archived-body]'); + archivedCountBadge = archivedCard.querySelector('[data-archived-count]'); + archivedRefreshButton = archivedCard.querySelector('[data-archived-refresh]'); + archivedEndpoint = endpoint; + archivedRefreshButton.addEventListener('click', function() { loadArchivedObjects(); }); + loadArchivedObjects(); + } } var dropZone = document.getElementById('objects-drop-zone'); @@ -4170,6 +4220,15 @@ dropZone.setAttribute('data-versioning', enabled ? 'true' : 'false'); } + var bulkPurgeWrap = document.getElementById('bulkDeletePurgeWrap'); + if (bulkPurgeWrap) { + bulkPurgeWrap.classList.toggle('d-none', !enabled); + } + var singleDeleteVerWrap = document.getElementById('deleteObjectVersioningWrap'); + if (singleDeleteVerWrap) { + singleDeleteVerWrap.classList.toggle('d-none', !enabled); + } + if (!enabled) { var newForm = document.getElementById('enableVersioningForm'); if (newForm) { diff --git a/templates/bucket_detail.html b/templates/bucket_detail.html index 14f3406..d4fc3ee 100644 --- a/templates/bucket_detail.html +++ b/templates/bucket_detail.html @@ -2272,13 +2272,11 @@
- {% if versioning_enabled %} -
+
Removes any archived versions stored in the archive.
- {% endif %}
-
${countDisplay} object${objectCount !== 1 ? 's' : ''}
+ ${countLine} diff --git a/templates/bucket_detail.html b/templates/bucket_detail.html index d4fc3ee..c8f9c32 100644 --- a/templates/bucket_detail.html +++ b/templates/bucket_detail.html @@ -171,6 +171,7 @@ data-bulk-download-endpoint="{{ url_for('ui.bulk_download_objects', bucket_name=bucket_name) }}" data-folders-url="{{ folders_url }}" data-buckets-for-copy-url="{{ buckets_for_copy_url }}" + data-bucket-total-objects="{{ bucket_stats.get('objects', 0) }}" > From a356bb0c4e1c2fc796e4b41bbbf2d7a85e169604 Mon Sep 17 00:00:00 2001 From: kqjy Date: Thu, 26 Feb 2026 17:11:07 +0800 Subject: [PATCH 4/5] perf: shallow listing, os.scandir stats, server-side search for large buckets --- app/encrypted_storage.py | 5 +- app/storage.py | 77 ++++++++++++++++++++++++++++ app/ui.py | 27 ++++++++++ static/js/bucket-detail-main.js | 90 ++++++++++++++++++++++++++++----- 4 files changed, 184 insertions(+), 15 deletions(-) diff --git a/app/encrypted_storage.py b/app/encrypted_storage.py index ff097bb..a0d3a58 100644 --- a/app/encrypted_storage.py +++ b/app/encrypted_storage.py @@ -192,7 +192,10 @@ class EncryptedObjectStorage: def list_objects_shallow(self, bucket_name: str, **kwargs): return self.storage.list_objects_shallow(bucket_name, **kwargs) - + + def search_objects(self, bucket_name: str, query: str, **kwargs): + return self.storage.search_objects(bucket_name, query, **kwargs) + def list_objects_all(self, bucket_name: str): return self.storage.list_objects_all(bucket_name) diff --git a/app/storage.py b/app/storage.py index 2da6aa3..e67b2ae 100644 --- a/app/storage.py +++ b/app/storage.py @@ -692,6 +692,83 @@ class ObjectStorage: next_continuation_token=next_token, ) + def search_objects( + self, + bucket_name: str, + query: str, + *, + prefix: str = "", + limit: int = 500, + ) -> Dict[str, Any]: + bucket_path = self._bucket_path(bucket_name) + if not bucket_path.is_dir(): + raise BucketNotFoundError("Bucket does not exist") + + if prefix: + search_root = bucket_path / prefix.replace("/", os.sep) + if not search_root.is_dir(): + return {"results": [], "truncated": False} + resolved = search_root.resolve() + if not str(resolved).startswith(str(bucket_path.resolve())): + return {"results": [], "truncated": False} + else: + search_root = bucket_path + + query_lower = query.lower() + results: list[Dict[str, Any]] = [] + internal = self.INTERNAL_FOLDERS + bucket_str = str(bucket_path) + bucket_len = len(bucket_str) + 1 + meta_root = self._bucket_meta_root(bucket_name) + scan_limit = limit * 4 + + matched = 0 + scanned = 0 + search_str = str(search_root) + stack = [search_str] + while stack: + current = stack.pop() + try: + with os.scandir(current) as it: + for entry in it: + if current == bucket_str and entry.name in internal: + continue + if entry.is_dir(follow_symlinks=False): + stack.append(entry.path) + elif entry.is_file(follow_symlinks=False): + scanned += 1 + key = entry.path[bucket_len:].replace(os.sep, "/") + if query_lower in key.lower(): + st = entry.stat(follow_symlinks=False) + meta_path = meta_root / (key + ".meta.json") + last_modified = "" + try: + if meta_path.exists(): + md = json.loads(meta_path.read_text(encoding="utf-8")) + last_modified = md.get("last_modified", "") + except (OSError, json.JSONDecodeError): + pass + if not last_modified: + last_modified = datetime.fromtimestamp( + st.st_mtime, tz=timezone.utc + ).strftime("%Y-%m-%dT%H:%M:%S.000Z") + results.append({ + "key": key, + "size": st.st_size, + "last_modified": last_modified, + }) + matched += 1 + if matched >= scan_limit: + break + except PermissionError: + continue + if matched >= scan_limit: + break + + results.sort(key=lambda r: r["key"]) + truncated = len(results) > limit + return {"results": results[:limit], "truncated": truncated} + def put_object( self, bucket_name: str, diff --git a/app/ui.py b/app/ui.py index 8da6a09..908a0c7 100644 --- a/app/ui.py +++ b/app/ui.py @@ -641,6 +641,33 @@ def stream_bucket_objects(bucket_name: str): ) +@ui_bp.get("/buckets//objects/search") +@limiter.limit("30 per minute") +def search_bucket_objects(bucket_name: str): + principal = _current_principal() + try: + _authorize_ui(principal, bucket_name, "list") + except IamError as exc: + return jsonify({"error": str(exc)}), 403 + + query = request.args.get("q", "").strip() + if not query: + return jsonify({"results": [], "truncated": False}) + + try: + limit = max(1, min(int(request.args.get("limit", 500)), 1000)) + except (ValueError, TypeError): + limit = 500 + + prefix = request.args.get("prefix", "").strip() + + storage = _storage() + try: + return jsonify(storage.search_objects(bucket_name, query, prefix=prefix, limit=limit)) + except StorageError as exc: + return jsonify({"error": str(exc)}), 404 + + @ui_bp.post("/buckets//upload") @limiter.limit("30 per minute") def upload_object(bucket_name: str): diff --git a/static/js/bucket-detail-main.js b/static/js/bucket-detail-main.js index 09b443c..7b5d6ff 100644 --- a/static/js/bucket-detail-main.js +++ b/static/js/bucket-detail-main.js @@ -374,19 +374,19 @@ const items = []; - if (useDelimiterMode && streamFolders.length > 0) { + if (searchResults !== null) { + searchResults.forEach(obj => { + items.push({ type: 'file', data: obj, displayKey: obj.key }); + }); + } else if (useDelimiterMode && streamFolders.length > 0) { streamFolders.forEach(folderPath => { const folderName = folderPath.slice(currentPrefix.length).replace(/\/$/, ''); - if (!currentFilterTerm || folderName.toLowerCase().includes(currentFilterTerm)) { - items.push({ type: 'folder', path: folderPath, displayKey: folderName }); - } + items.push({ type: 'folder', path: folderPath, displayKey: folderName }); }); allObjects.forEach(obj => { const remainder = obj.key.slice(currentPrefix.length); if (!remainder) return; - if (!currentFilterTerm || remainder.toLowerCase().includes(currentFilterTerm)) { - items.push({ type: 'file', data: obj, displayKey: remainder }); - } + items.push({ type: 'file', data: obj, displayKey: remainder }); }); } else { const folders = new Set(); @@ -402,9 +402,7 @@ const slashIndex = remainder.indexOf('/'); if (slashIndex === -1 && !isFolderMarker) { - if (!currentFilterTerm || remainder.toLowerCase().includes(currentFilterTerm)) { - items.push({ type: 'file', data: obj, displayKey: remainder }); - } + items.push({ type: 'file', data: obj, displayKey: remainder }); } else { const effectiveSlashIndex = isFolderMarker && slashIndex === remainder.length - 1 ? slashIndex @@ -413,9 +411,7 @@ const folderPath = currentPrefix + folderName + '/'; if (!folders.has(folderPath)) { folders.add(folderPath); - if (!currentFilterTerm || folderName.toLowerCase().includes(currentFilterTerm)) { - items.push({ type: 'folder', path: folderPath, displayKey: folderName }); - } + items.push({ type: 'folder', path: folderPath, displayKey: folderName }); } } }); @@ -2094,8 +2090,63 @@ } }; + let searchDebounceTimer = null; + let searchAbortController = null; + let searchResults = null; + + const performServerSearch = async (term) => { + if (searchAbortController) searchAbortController.abort(); + searchAbortController = new AbortController(); + + try { + const params = new URLSearchParams({ q: term, limit: '500' }); + if (currentPrefix) params.set('prefix', currentPrefix); + const searchUrl = objectsStreamUrl.replace('/stream', '/search'); + const response = await fetch(`${searchUrl}?${params}`, { + signal: searchAbortController.signal + }); + if (!response.ok) throw new Error(`HTTP ${response.status}`); + const data = await response.json(); + searchResults = (data.results || []).map(obj => processStreamObject(obj)); + memoizedVisibleItems = null; + memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null }; + refreshVirtualList(); + if (loadMoreStatus) { + const countText = searchResults.length.toLocaleString(); + const truncated = data.truncated ? '+' : ''; + loadMoreStatus.textContent = `${countText}${truncated} result${searchResults.length !== 1 ? 's' : ''}`; + } + } catch (e) { + if (e.name === 'AbortError') return; + if (loadMoreStatus) { + loadMoreStatus.textContent = 'Search failed'; + } + } + }; + document.getElementById('object-search')?.addEventListener('input', (event) => { - currentFilterTerm = event.target.value.toLowerCase(); + const newTerm = event.target.value.toLowerCase(); + const wasFiltering = currentFilterTerm.length > 0; + const isFiltering = newTerm.length > 0; + currentFilterTerm = newTerm; + + clearTimeout(searchDebounceTimer); + + if (isFiltering) { + searchDebounceTimer = setTimeout(() => performServerSearch(newTerm), 300); + return; + } + + if (!isFiltering && wasFiltering) { + if (searchAbortController) searchAbortController.abort(); + searchResults = null; + memoizedVisibleItems = null; + memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null }; + if (loadMoreStatus) { + loadMoreStatus.textContent = buildBottomStatusText(streamingComplete); + } + } + updateFilterWarning(); refreshVirtualList(); }); @@ -2136,7 +2187,18 @@ var searchInput = document.getElementById('object-search'); if (searchInput && document.activeElement === searchInput) { searchInput.value = ''; + const wasFiltering = currentFilterTerm.length > 0; currentFilterTerm = ''; + if (wasFiltering) { + clearTimeout(searchDebounceTimer); + if (searchAbortController) searchAbortController.abort(); + searchResults = null; + memoizedVisibleItems = null; + memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null }; + if (loadMoreStatus) { + loadMoreStatus.textContent = buildBottomStatusText(streamingComplete); + } + } refreshVirtualList(); searchInput.blur(); } From d8232340c3d3284dbf7788e17108b36e94d81b8e Mon Sep 17 00:00:00 2001 From: kqjy Date: Thu, 26 Feb 2026 17:38:44 +0800 Subject: [PATCH 5/5] Update docs --- docs.md | 69 ++++++- templates/docs.html | 448 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 512 insertions(+), 5 deletions(-) diff --git a/docs.md b/docs.md index 90110bc..7576a62 100644 --- a/docs.md +++ b/docs.md @@ -139,6 +139,7 @@ All configuration is done via environment variables. The table below lists every | `API_BASE_URL` | `http://127.0.0.1:5000` | Internal S3 API URL used by the web UI proxy. Also used for presigned URL generation. Set to your public URL if running behind a reverse proxy. | | `AWS_REGION` | `us-east-1` | Region embedded in SigV4 credential scope. | | `AWS_SERVICE` | `s3` | Service string for SigV4. | +| `DISPLAY_TIMEZONE` | `UTC` | Timezone for timestamps in the web UI (e.g., `US/Eastern`, `Asia/Tokyo`). | ### IAM & Security @@ -170,6 +171,7 @@ All configuration is done via environment variables. The table below lists every | `RATE_LIMIT_BUCKET_OPS` | `120 per minute` | Rate limit for bucket operations (PUT/DELETE/GET/POST on `/`). | | `RATE_LIMIT_OBJECT_OPS` | `240 per minute` | Rate limit for object operations (PUT/GET/DELETE/POST on `//`). | | `RATE_LIMIT_HEAD_OPS` | `100 per minute` | Rate limit for HEAD requests (bucket and object). | +| `RATE_LIMIT_ADMIN` | `60 per minute` | Rate limit for admin API endpoints (`/admin/*`). | | `RATE_LIMIT_STORAGE_URI` | `memory://` | Storage backend for rate limits. Use `redis://host:port` for distributed setups. | ### Server Configuration @@ -256,6 +258,12 @@ Once enabled, configure lifecycle rules via: | `MULTIPART_MIN_PART_SIZE` | `5242880` (5 MB) | Minimum part size for multipart uploads. | | `BUCKET_STATS_CACHE_TTL` | `60` | Seconds to cache bucket statistics. | | `BULK_DELETE_MAX_KEYS` | `500` | Maximum keys per bulk delete request. | +| `BULK_DOWNLOAD_MAX_BYTES` | `1073741824` (1 GiB) | Maximum total size for bulk ZIP downloads. | +| `OBJECT_CACHE_TTL` | `60` | Seconds to cache object metadata. | + +#### Gzip Compression + +API responses for JSON, XML, HTML, CSS, and JavaScript are automatically gzip-compressed when the client sends `Accept-Encoding: gzip`. Compression activates for responses larger than 500 bytes and is handled by a WSGI middleware (`app/compression.py`). Binary object downloads and streaming responses are never compressed. No configuration is needed. ### Server Settings @@ -285,6 +293,12 @@ If running behind a reverse proxy (e.g., Nginx, Cloudflare, or a tunnel), ensure The application automatically trusts these headers to generate correct presigned URLs (e.g., `https://s3.example.com/...` instead of `http://127.0.0.1:5000/...`). Alternatively, you can explicitly set `API_BASE_URL` to your public endpoint. +| Variable | Default | Notes | +| --- | --- | --- | +| `NUM_TRUSTED_PROXIES` | `1` | Number of trusted reverse proxies for `X-Forwarded-*` header processing. | +| `ALLOWED_REDIRECT_HOSTS` | `""` | Comma-separated whitelist of safe redirect targets. Empty allows only same-host redirects. | +| `ALLOW_INTERNAL_ENDPOINTS` | `false` | Allow connections to internal/private IPs for webhooks and replication targets. **Keep disabled in production unless needed.** | + ## 4. Upgrading and Updates ### Version Checking @@ -912,7 +926,7 @@ Objects with forward slashes (`/`) in their keys are displayed as a folder hiera - Select multiple objects using checkboxes - **Bulk Delete**: Delete multiple objects at once -- **Bulk Download**: Download selected objects as individual files +- **Bulk Download**: Download selected objects as a single ZIP archive (up to `BULK_DOWNLOAD_MAX_BYTES`, default 1 GiB) #### Search & Filter @@ -985,6 +999,7 @@ MyFSIO supports **server-side encryption at rest** to protect your data. When en |------|-------------| | **AES-256 (SSE-S3)** | Server-managed encryption using a local master key | | **KMS (SSE-KMS)** | Encryption using customer-managed keys via the built-in KMS | +| **SSE-C** | Server-side encryption with customer-provided keys (per-request) | ### Enabling Encryption @@ -1083,6 +1098,44 @@ encrypted, metadata = ClientEncryptionHelper.encrypt_for_upload(plaintext, key) decrypted = ClientEncryptionHelper.decrypt_from_download(encrypted, metadata, key) ``` +### SSE-C (Customer-Provided Keys) + +With SSE-C, you provide your own 256-bit AES encryption key with each request. The server encrypts/decrypts using your key but never stores it. You must supply the same key for both upload and download. + +**Required headers:** + +| Header | Value | +|--------|-------| +| `x-amz-server-side-encryption-customer-algorithm` | `AES256` | +| `x-amz-server-side-encryption-customer-key` | Base64-encoded 256-bit key | +| `x-amz-server-side-encryption-customer-key-MD5` | Base64-encoded MD5 of the key | + +```bash +# Generate a 256-bit key +KEY=$(openssl rand -base64 32) +KEY_MD5=$(echo -n "$KEY" | base64 -d | openssl dgst -md5 -binary | base64) + +# Upload with SSE-C +curl -X PUT "http://localhost:5000/my-bucket/secret.txt" \ + -H "X-Access-Key: ..." -H "X-Secret-Key: ..." \ + -H "x-amz-server-side-encryption-customer-algorithm: AES256" \ + -H "x-amz-server-side-encryption-customer-key: $KEY" \ + -H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5" \ + --data-binary @secret.txt + +# Download with SSE-C (same key required) +curl "http://localhost:5000/my-bucket/secret.txt" \ + -H "X-Access-Key: ..." -H "X-Secret-Key: ..." \ + -H "x-amz-server-side-encryption-customer-algorithm: AES256" \ + -H "x-amz-server-side-encryption-customer-key: $KEY" \ + -H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5" +``` + +**Key points:** +- SSE-C does not require `ENCRYPTION_ENABLED` or `KMS_ENABLED` — the key is provided per-request +- If you lose your key, the data is irrecoverable +- The MD5 header is optional but recommended for integrity verification + ### Important Notes - **Existing objects are NOT encrypted** - Only new uploads after enabling encryption are encrypted @@ -1959,6 +2012,20 @@ curl -X PUT "http://localhost:5000/my-bucket/file.txt" \ -H "x-amz-meta-newkey: newvalue" ``` +### MoveObject (UI) + +Move an object to a different key or bucket. This is a UI-only convenience operation that performs a copy followed by a delete of the source. Requires `read` and `delete` on the source, and `write` on the destination. + +```bash +# Move via UI API +curl -X POST "http://localhost:5100/ui/buckets/my-bucket/objects/old-path/file.txt/move" \ + -H "Content-Type: application/json" \ + --cookie "session=..." \ + -d '{"dest_bucket": "other-bucket", "dest_key": "new-path/file.txt"}' +``` + +The move is atomic from the caller's perspective: if the copy succeeds but the delete fails, the object exists in both locations (no data loss). + ### UploadPartCopy Copy data from an existing object into a multipart upload part: diff --git a/templates/docs.html b/templates/docs.html index 9ab9dcf..2faf377 100644 --- a/templates/docs.html +++ b/templates/docs.html @@ -52,6 +52,11 @@
  • Access Control Lists
  • Object & Bucket Tagging
  • Static Website Hosting
  • +
  • CORS Configuration
  • +
  • PostObject (Form Upload)
  • +
  • List Objects API v2
  • +
  • Upgrading & Updates
  • +
  • Full API Reference
  • @@ -126,6 +131,11 @@ python run.py --mode ui
    + + + + + @@ -187,6 +197,11 @@ python run.py --mode ui + + + + + @@ -338,6 +353,24 @@ python run.py --mode ui + + + + + + + + + + + + + + + + + + @@ -366,6 +399,16 @@ python run.py --mode ui + + + + + + + + + + @@ -491,7 +534,7 @@ sudo journalctl -u myfsio -f # View logs @@ -613,15 +656,75 @@ curl -X PUT {{ api_base }}/demo/notes.txt \ + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    5000 Listen port (UI uses 5100).
    DISPLAY_TIMEZONEUTCTimezone for UI timestamps (e.g., US/Eastern, Asia/Tokyo).
    CORS Settings
    100 per minute Rate limit for HEAD requests.
    RATE_LIMIT_ADMIN60 per minuteRate limit for admin API endpoints (/admin/*).
    Server Settings
    604800 Maximum presigned URL expiry time (7 days).
    Proxy & Network Settings
    NUM_TRUSTED_PROXIES1Number of trusted reverse proxies for X-Forwarded-* headers.
    ALLOWED_REDIRECT_HOSTS(empty)Comma-separated whitelist of safe redirect targets.
    ALLOW_INTERNAL_ENDPOINTSfalseAllow connections to internal/private IPs (webhooks, replication).
    Storage Limits
    50 Max lifecycle history records per bucket.
    OBJECT_CACHE_TTL60Seconds to cache object metadata.
    BULK_DOWNLOAD_MAX_BYTES1 GBMax total size for bulk ZIP downloads.
    ENCRYPTION_CHUNK_SIZE_BYTES 65536 /<bucket>/<key> Delete an object.
    HEAD/<bucket>Check if a bucket exists.
    HEAD/<bucket>/<key>Get object metadata without downloading.
    POST/<bucket>?deleteBulk delete objects (XML body).
    GET/PUT/DELETE /<bucket>?policyFetch, upsert, or remove a bucket policy (S3-compatible).Bucket policy management.
    GET/PUT/<bucket>?versioningVersioning status.
    GET/PUT/DELETE/<bucket>?lifecycleLifecycle rules.
    GET/PUT/DELETE/<bucket>?corsCORS configuration.
    GET/PUT/DELETE/<bucket>?encryptionDefault encryption.
    GET/PUT/<bucket>?aclBucket ACL.
    GET/PUT/DELETE/<bucket>?taggingBucket tags.
    GET/PUT/DELETE/<bucket>/<key>?taggingObject tags.
    POST/<bucket>/<key>?uploadsInitiate multipart upload.
    POST/<bucket>/<key>?selectSQL query (SelectObjectContent).
    -

    All responses include X-Request-Id for tracing. Logs land in logs/api.log and logs/ui.log.

    +

    All responses include X-Request-Id for tracing. See the Full API Reference for the complete endpoint list. Logs land in logs/api.log and logs/ui.log.

    @@ -1311,6 +1414,10 @@ curl -X PUT "{{ api_base }}/bucket/<bucket>?quota" \ KMS (SSE-KMS) Encryption using customer-managed keys via the built-in KMS + + SSE-C + Server-side encryption with customer-provided keys (per-request) + @@ -1377,6 +1484,54 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \

    Envelope Encryption: Each object is encrypted with a unique Data Encryption Key (DEK). The DEK is then encrypted (wrapped) by the master key or KMS key and stored alongside the ciphertext. On read, the DEK is unwrapped and used to decrypt the object transparently.

    + +

    SSE-C (Customer-Provided Keys)

    +

    With SSE-C, you supply your own 256-bit AES key with each request. The server encrypts/decrypts using your key but never stores it. You must provide the same key for both upload and download.

    +
    + + + + + + + + + + + + + + + + + + + + + +
    HeaderValue
    x-amz-server-side-encryption-customer-algorithmAES256
    x-amz-server-side-encryption-customer-keyBase64-encoded 256-bit key
    x-amz-server-side-encryption-customer-key-MD5Base64-encoded MD5 of the key
    +
    +
    # Generate a 256-bit key
    +KEY=$(openssl rand -base64 32)
    +KEY_MD5=$(echo -n "$KEY" | base64 -d | openssl dgst -md5 -binary | base64)
    +
    +# Upload with SSE-C
    +curl -X PUT "{{ api_base }}/my-bucket/secret.txt" \
    +  -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
    +  -H "x-amz-server-side-encryption-customer-algorithm: AES256" \
    +  -H "x-amz-server-side-encryption-customer-key: $KEY" \
    +  -H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5" \
    +  --data-binary @secret.txt
    +
    +# Download with SSE-C (same key required)
    +curl "{{ api_base }}/my-bucket/secret.txt" \
    +  -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
    +  -H "x-amz-server-side-encryption-customer-algorithm: AES256" \
    +  -H "x-amz-server-side-encryption-customer-key: $KEY" \
    +  -H "x-amz-server-side-encryption-customer-key-MD5: $KEY_MD5"
    +
    + Note: SSE-C does not require ENCRYPTION_ENABLED or KMS_ENABLED. If you lose your key, the data is irrecoverable. +
    @@ -1926,7 +2081,7 @@ curl -X POST "{{ api_base }}/<bucket>/data.csv?select" \ 22

    Advanced S3 Operations

    -

    Copy objects, upload part copies, and use range requests for partial downloads.

    +

    Copy, move, and partially download objects using advanced S3 operations.

    CopyObject

    # Copy within same bucket
    @@ -1941,6 +2096,13 @@ curl -X PUT "{{ api_base }}/<bucket>/file.txt" \
       -H "x-amz-metadata-directive: REPLACE" \
       -H "x-amz-meta-newkey: newvalue"
    +

    MoveObject (UI)

    +

    Move an object to a different key or bucket via the UI. Performs a copy then deletes the source. Requires read+delete on source and write on destination.

    +
    # Move via UI API (session-authenticated)
    +curl -X POST "http://localhost:5100/ui/buckets/<bucket>/objects/<key>/move" \
    +  -H "Content-Type: application/json" --cookie "session=..." \
    +  -d '{"dest_bucket": "other-bucket", "dest_key": "new-path/file.txt"}'
    +

    UploadPartCopy

    Copy data from an existing object into a multipart upload part:

    # Copy bytes 0-10485759 from source as part 1
    @@ -2193,6 +2355,279 @@ server {
             
           
         
    +
    +
    +
    + 26 +

    CORS Configuration

    +
    +

    Configure per-bucket Cross-Origin Resource Sharing rules to control which origins can access your bucket from a browser.

    + +

    Setting CORS Rules

    +
    # Set CORS configuration
    +curl -X PUT "{{ api_base }}/<bucket>?cors" \
    +  -H "Content-Type: application/xml" \
    +  -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
    +  -d '<CORSConfiguration>
    +  <CORSRule>
    +    <AllowedOrigin>https://example.com</AllowedOrigin>
    +    <AllowedMethod>GET</AllowedMethod>
    +    <AllowedMethod>PUT</AllowedMethod>
    +    <AllowedHeader>*</AllowedHeader>
    +    <ExposeHeader>ETag</ExposeHeader>
    +    <MaxAgeSeconds>3600</MaxAgeSeconds>
    +  </CORSRule>
    +</CORSConfiguration>'
    +
    +# Get CORS configuration
    +curl "{{ api_base }}/<bucket>?cors" \
    +  -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
    +
    +# Delete CORS configuration
    +curl -X DELETE "{{ api_base }}/<bucket>?cors" \
    +  -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
    + +

    Rule Fields

    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    AllowedOriginOrigins allowed to make requests (supports * wildcard)
    AllowedMethodHTTP methods: GET, PUT, POST, DELETE, HEAD
    AllowedHeaderRequest headers allowed in preflight (supports *)
    ExposeHeaderResponse headers visible to the browser (e.g., ETag, x-amz-request-id)
    MaxAgeSecondsHow long the browser caches preflight results
    +
    +
    +
    +
    +
    +
    + 27 +

    PostObject (HTML Form Upload)

    +
    +

    Upload objects directly from an HTML form using browser-based POST uploads with policy-based authorization.

    + +

    Form Fields

    +
    + + + + + + + + + + + + + + + + + + +
    FieldDescription
    keyObject key (supports ${filename} variable)
    fileThe file to upload
    policyBase64-encoded policy document (JSON)
    x-amz-signatureHMAC-SHA256 signature of the policy
    x-amz-credentialAccess key / date / region / s3 / aws4_request
    x-amz-algorithmAWS4-HMAC-SHA256
    x-amz-dateISO 8601 date (e.g., 20250101T000000Z)
    Content-TypeMIME type of the uploaded file
    x-amz-meta-*Custom metadata headers
    +
    + +

    Simple Upload (No Signing)

    +
    <form action="{{ api_base }}/my-bucket" method="POST" enctype="multipart/form-data">
    +  <input type="hidden" name="key" value="uploads/${filename}">
    +  <input type="file" name="file">
    +  <button type="submit">Upload</button>
    +</form>
    + +

    Signed Upload (With Policy)

    +

    For authenticated uploads, include a base64-encoded policy and SigV4 signature fields. The policy constrains allowed keys, content types, and size limits. See docs.md Section 20 for full signing examples.

    +
    +
    +
    +
    +
    + 28 +

    List Objects API v2

    +
    +

    Use the v2 list API for improved pagination with continuation tokens instead of markers.

    + +

    Usage

    +
    # List with v2 API
    +curl "{{ api_base }}/<bucket>?list-type=2&prefix=logs/&delimiter=/&max-keys=100" \
    +  -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
    +
    +# Paginate with continuation token
    +curl "{{ api_base }}/<bucket>?list-type=2&continuation-token=<token>" \
    +  -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
    +
    +# Start listing after a specific key
    +curl "{{ api_base }}/<bucket>?list-type=2&start-after=photos/2025/" \
    +  -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
    + +

    Query Parameters

    +
    + + + + + + + + + + + + + + + + + +
    ParameterDescription
    list-type=2Enables v2 API (required)
    prefixFilter to keys starting with this prefix
    delimiterGroup keys by delimiter (typically / for folders)
    max-keysMaximum objects to return (default 1000)
    continuation-tokenToken from previous response for pagination
    start-afterStart listing after this key (first page only)
    fetch-ownerInclude owner info in response
    encoding-typeSet to url to URL-encode keys in response
    +
    +
    +
    +
    +
    +
    + 29 +

    Upgrading & Updates

    +
    +

    How to safely update MyFSIO to a new version.

    + +

    Pre-Update Backup

    +

    Always back up before updating:

    +
    # Back up configuration
    +cp -r data/.myfsio.sys/config/ config-backup/
    +
    +# Back up data (optional, for critical deployments)
    +tar czf myfsio-backup-$(date +%Y%m%d).tar.gz data/
    +
    +# Back up logs
    +cp -r logs/ logs-backup/
    + +

    Update Procedure

    +
      +
    1. Stop the service: sudo systemctl stop myfsio (or kill the process)
    2. +
    3. Pull new version: git pull origin main or download the new binary
    4. +
    5. Install dependencies: pip install -r requirements.txt
    6. +
    7. Validate config: python run.py --check-config
    8. +
    9. Start the service: sudo systemctl start myfsio
    10. +
    11. Verify: curl http://localhost:5000/myfsio/health
    12. +
    + +

    Docker Update

    +
    docker pull myfsio:latest
    +docker stop myfsio && docker rm myfsio
    +docker run -d --name myfsio -v myfsio-data:/app/data -p 5000:5000 -p 5100:5100 myfsio:latest
    + +

    Rollback

    +

    If something goes wrong, stop the service, restore the backed-up config and data directories, then restart with the previous binary or code version. See docs.md Section 4 for detailed rollback procedures including blue-green deployment strategies.

    +
    +
    +
    +
    +
    + 30 +

    Full API Reference

    +
    +

    Complete list of all S3-compatible, admin, and KMS endpoints.

    +
    # Service
    +GET    /myfsio/health                   # Health check
    +
    +# Bucket Operations
    +GET    /                               # List buckets
    +PUT    /<bucket>                        # Create bucket
    +DELETE /<bucket>                        # Delete bucket
    +GET    /<bucket>                        # List objects (?list-type=2)
    +HEAD   /<bucket>                        # Check bucket exists
    +POST   /<bucket>                        # POST object / form upload
    +POST   /<bucket>?delete                 # Bulk delete
    +
    +# Bucket Configuration
    +GET|PUT|DELETE /<bucket>?policy          # Bucket policy
    +GET|PUT        /<bucket>?quota           # Bucket quota
    +GET|PUT        /<bucket>?versioning      # Versioning
    +GET|PUT|DELETE /<bucket>?lifecycle        # Lifecycle rules
    +GET|PUT|DELETE /<bucket>?cors             # CORS config
    +GET|PUT|DELETE /<bucket>?encryption       # Default encryption
    +GET|PUT        /<bucket>?acl              # Bucket ACL
    +GET|PUT|DELETE /<bucket>?tagging          # Bucket tags
    +GET|PUT|DELETE /<bucket>?replication      # Replication rules
    +GET|PUT        /<bucket>?logging          # Access logging
    +GET|PUT        /<bucket>?notification     # Event notifications
    +GET|PUT        /<bucket>?object-lock      # Object lock config
    +GET|PUT|DELETE /<bucket>?website          # Static website
    +GET            /<bucket>?uploads          # List multipart uploads
    +GET            /<bucket>?versions         # List object versions
    +GET            /<bucket>?location         # Bucket region
    +
    +# Object Operations
    +PUT    /<bucket>/<key>                  # Upload object
    +GET    /<bucket>/<key>                  # Download (Range supported)
    +DELETE /<bucket>/<key>                  # Delete object
    +HEAD   /<bucket>/<key>                  # Object metadata
    +POST   /<bucket>/<key>?select           # SQL query (SelectObjectContent)
    +
    +# Object Configuration
    +GET|PUT|DELETE /<bucket>/<key>?tagging    # Object tags
    +GET|PUT        /<bucket>/<key>?acl        # Object ACL
    +GET|PUT        /<bucket>/<key>?retention  # Object retention
    +GET|PUT        /<bucket>/<key>?legal-hold # Legal hold
    +
    +# Multipart Upload
    +POST   /<bucket>/<key>?uploads          # Initiate
    +PUT    /<bucket>/<key>?uploadId=X&partNumber=N  # Upload part
    +POST   /<bucket>/<key>?uploadId=X       # Complete
    +DELETE /<bucket>/<key>?uploadId=X       # Abort
    +GET    /<bucket>/<key>?uploadId=X       # List parts
    +
    +# Copy (via x-amz-copy-source header)
    +PUT    /<bucket>/<key>                  # CopyObject
    +PUT    /<bucket>/<key>?uploadId&partNumber # UploadPartCopy
    +
    +# Admin API
    +GET|PUT /admin/site                     # Local site config
    +GET     /admin/sites                    # List peers
    +POST    /admin/sites                    # Register peer
    +GET|PUT|DELETE /admin/sites/<id>        # Manage peer
    +GET     /admin/sites/<id>/health        # Peer health
    +GET     /admin/topology                 # Cluster topology
    +GET|POST|PUT|DELETE /admin/website-domains  # Domain mappings
    +
    +# KMS API
    +GET|POST /kms/keys                      # List / Create keys
    +GET|DELETE /kms/keys/<id>               # Get / Delete key
    +POST   /kms/keys/<id>/enable            # Enable key
    +POST   /kms/keys/<id>/disable           # Disable key
    +POST   /kms/keys/<id>/rotate            # Rotate key
    +POST   /kms/encrypt                     # Encrypt data
    +POST   /kms/decrypt                     # Decrypt data
    +POST   /kms/generate-data-key           # Generate data key
    +POST   /kms/generate-random             # Generate random bytes
    +
    +