65 Commits

Author SHA1 Message Date
b4e2e15936 MyFSIO v0.5.0 Release
Reviewed-on: #37
2026-04-27 06:40:43 +00:00
05a30d2227 Embed UI templates and static assets into binary; simplify deployment to single file; Update script files 2026-04-27 13:35:50 +08:00
02fa9d612c Docs update 2026-04-26 23:51:54 +08:00
6c5ccee8cb Fix peer-site edit 422; align IAM admin definition across runtime/UI/JS; auto-migrate legacy full-access policies (gated on iam:* to avoid promoting bucketadmin); reject empty endpoint on peer-site update; update docs 2026-04-26 23:02:38 +08:00
0a60ea4348 Cluster UI: prevent peer actions dropdown overflow; surface peer 403 body 2026-04-26 21:29:56 +08:00
069049b146 Include peer_inbound_access_key in /ui/sites peers JSON
The sites.html edit modal reads peer_inbound_access_key from the row's
data attribute, but the peers JSON built by sites_dashboard omitted the
field, so every edit cleared an existing key. Add the field to the JSON
so the modal renders the stored value and preserves it on save.
2026-04-26 20:29:09 +08:00
6ba948bcc0 Add Cluster feature 2026-04-26 19:24:18 +08:00
b5facd8d37 Fix DeleteObject(VersionId='null') to permanently delete null version instead of creating a delete-marker 2026-04-26 17:11:32 +08:00
1c9ebdeab7 Allow recovery of poisoned objects via PUT/DELETE/DeleteObjects while preserving object-lock; implement race-free GetObject/HeadObject ?partNumber=N with correct zero-length-part response 2026-04-25 21:28:27 +08:00
777d862a02 Fix integrity auto-heal data-loss bug, return 422 ObjectCorrupted, lock heal swap, verify multipart peer body 2026-04-25 19:29:54 +08:00
660c328a84 Merge pull request - dev-pyrust into next
Reviewed-on: #36
2026-04-25 09:19:12 +00:00
3a590e6639 Make auto_heal real: peer-fetch corrupted_object with verified swap, poison-fallback on no peer 2026-04-25 17:14:38 +08:00
dd1e6d0409 Stop search auto-pagination from looping on failure; accept CSRF in JSON body; make replication pause/resume idempotent 2026-04-25 14:06:39 +08:00
7e32ac2a46 Add object search endpoint, hide internal metadata keys, fix toast/template bugs
- Implement missing /ui/buckets/{bucket}/objects/search route used by the
  Objects tab filter; previously returned 404 and showed 'Search failed'.
- Filter __checksum_*__ and __size__ sentinels from the object metadata
  panel so users no longer see internal keys in the UI.
- Include a body field in bucket-delete flash message so the toast shows
  distinct title and body.
- Replace Tera boolean 'or' operator with if/else fallback in
  replication_wizard.html, sites.html, iam.html.
2026-04-25 00:58:49 +08:00
37541ffba1 Gate 206 response checksum headers on full-range coverage 2026-04-24 20:18:41 +08:00
5aba9ac9e9 Add snapshot/range storage primitives, gate GET preconditions on served snapshot, support partial-decrypt Range GET for SSE-encrypted objects 2026-04-24 18:45:22 +08:00
4f05192548 Fix DELETE cleanup cost with single-syscall rmdir walk, tighten FS segment validation, stream CopyObject 2026-04-24 15:04:16 +08:00
1ea6dfae07 Fix S3 conformance: XML config round-trip, Suspended versioning, ListVersions pagination, per-bucket CORS, canned ACL/SSE rejection, checksum attrs, request logging 2026-04-24 13:09:30 +08:00
f2df64479c Fix S3 versioning (live-object VersionId, DM PUT/DELETE), harden DeleteObjects/ListObjects conformance, and run hot paths on blocking threads 2026-04-23 22:40:38 +08:00
bd405cc2fe Fix S3 versioning/delete markers, path-safety leaks, and error-code conformance; parallelize DeleteObjects; restore per-op rate limits 2026-04-23 20:23:11 +08:00
7ef3820f6e Fix SigV4/SHA256/TCP_NODELAY critical paths; tighten multipart, copy, versioning, and S3 error conformance 2026-04-23 17:52:30 +08:00
e1fb225034 csrf fixes 2026-04-22 23:01:32 +08:00
2767e7e79d Optimize bucket listing for 10K-100K objects
- Shallow listing: read per-directory _index.json once for eTags instead
  of N serial .meta.json reads. Validate prefix for path traversal and
  verify normalized target stays within bucket root.
- Recursive listing: cache full per-directory index during the walk so
  each _index.json is parsed at most once per call.
- Per-bucket listing cache with 5s TTL and per-bucket rebuild mutex.
  Invalidated on put/delete/copy/metadata/tags/multipart-complete.
  Pagination uses partition_point for O(log n) start lookup.
- UI stream endpoint now actually streams via mpsc + Body::from_stream
  instead of buffering into a Vec<String>. Cancels producer on client
  disconnect.
- UI JSON endpoint honors delimiter=/ and returns common_prefixes.
- run_blocking wrapper dispatches sync filesystem work via
  block_in_place on multi-threaded runtimes, falls back to inline on
  current-thread runtimes (unit tests).
2026-04-22 19:55:44 +08:00
217af6d1c6 Full migration and transition to Rust; Remove python artifacts 2026-04-22 17:19:19 +08:00
51d54b42ac Rust fixes 2026-04-22 15:41:18 +08:00
9ec5797919 Applied max-keys to combined current + archived ListObjectVersions output and reports truncation 2026-04-22 00:12:22 +08:00
8935188c8f Update static website 404 page 2026-04-21 21:26:50 +08:00
c77c592832 Update static website to include proper error handling; add missing features 2026-04-21 20:54:00 +08:00
501d563df2 Add missing features - notifcations, object lock, acl 2026-04-21 00:27:50 +08:00
ddcdb4026c Fix domains mapping missing 2026-04-20 22:02:05 +08:00
3e7c0af019 Fix dockerfile issue 2026-04-20 21:39:06 +08:00
ae11c654f9 Merge pull request 'MyFSIO v0.4.2 Release' (#35) from next into main
Reviewed-on: #35
2026-04-01 08:33:28 +00:00
f0c95ac0a9 MyFSIO v0.4.1 Release
Reviewed-on: #34
2026-03-25 04:29:28 +00:00
8ff4797041 MyFSIO v0.4.0 Release
Reviewed-on: #33
2026-03-22 05:06:47 +00:00
50fb5aa387 MyFSIO v0.3.9 Release
Reviewed-on: #32
2026-03-14 09:44:14 +00:00
cc161bf362 MyFSIO v0.3.8 Release
Reviewed-on: #31
2026-03-10 08:31:27 +00:00
2a0e77a754 MyFSIO v0.3.7 Release
Reviewed-on: #30
2026-03-09 06:25:50 +00:00
eb0e435a5a MyFSIO v0.3.6 Release
Reviewed-on: #29
2026-03-08 04:46:31 +00:00
7633007a08 MyFSIO v0.3.5 Release
Reviewed-on: #28
2026-03-07 05:53:02 +00:00
de0d869c9f Merge pull request 'MyFSIO v0.3.4 Release' (#27) from next into main
Reviewed-on: #27
2026-03-02 08:31:32 +00:00
fdd068feee MyFSIO v0.3.3 Release
Reviewed-on: #26
2026-02-27 04:49:32 +00:00
66b7677d2c MyFSIO v0.3.2 Release
Reviewed-on: #25
2026-02-26 10:10:19 +00:00
4d90ead816 Merge pull request 'Fix incorrect Upgrading & Updates section in Docs' (#24) from next into main
Reviewed-on: #24
2026-02-26 09:50:17 +00:00
b37a51ed1d MyFSIO v0.3.1 Release
Reviewed-on: #23
2026-02-26 09:42:37 +00:00
0462a7b62e MyFSIO v0.3.0 Release
Reviewed-on: #22
2026-02-22 10:22:35 +00:00
52660570c1 Merge pull request 'MyFSIO v0.2.9 Release' (#21) from next into main
Reviewed-on: #21
2026-02-15 14:24:14 +00:00
35f61313e0 MyFSIO v0.2.8 Release
Reviewed-on: #20
2026-02-10 14:16:22 +00:00
c470cfb576 MyFSIO v0.2.7 Release
Reviewed-on: #19
2026-02-09 12:22:37 +00:00
jun
d96955deee MyFSIO v0.2.6 Release
Reviewed-on: #18
2026-02-05 16:18:03 +00:00
85181f0be6 Merge pull request 'MyFSIO v0.2.5 Release' (#17) from next into main
Reviewed-on: #17
2026-02-02 05:32:02 +00:00
d5ca7a8be1 Merge pull request 'MyFSIO v0.2.4 Release' (#16) from next into main
Reviewed-on: #16
2026-02-01 10:27:11 +00:00
476dc79e42 MyFSIO v0.2.3 Release
Reviewed-on: #15
2026-01-25 06:05:53 +00:00
bb6590fc5e Merge pull request 'MyFSIO v0.2.2 Release' (#14) from next into main
Reviewed-on: #14
2026-01-19 07:12:15 +00:00
899db3421b Merge pull request 'MyFSIO v0.2.1 Release' (#13) from next into main
Reviewed-on: #13
2026-01-12 08:03:29 +00:00
caf01d6ada Merge pull request 'MyFSIO v0.2.0 Release' (#12) from next into main
Reviewed-on: #12
2026-01-05 15:48:03 +00:00
bb366cb4cd Merge pull request 'MyFSIO v0.1.9 Release' (#10) from next into main
Reviewed-on: #10
2025-12-29 06:49:48 +00:00
a2745ff2ee Merge pull request 'MyFSIO v0.1.8 Release' (#9) from next into main
Reviewed-on: #9
2025-12-23 06:01:32 +00:00
28cb656d94 Merge pull request 'MyFSIO v0.1.7 Release' (#8) from next into main
Reviewed-on: #8
2025-12-22 03:10:35 +00:00
3c44152fc6 Merge pull request 'MyFSIO v0.1.6 Release' (#7) from next into main
Reviewed-on: #7
2025-12-21 06:30:21 +00:00
397515edce Merge pull request 'MyFSIO v0.1.5 Release' (#6) from next into main
Reviewed-on: #6
2025-12-13 15:41:03 +00:00
980fced7e4 Merge pull request 'MyFSIO v0.1.4 Release' (#5) from next into main
Reviewed-on: #5
2025-12-13 08:22:43 +00:00
bae5009ec4 Merge pull request 'Release v0.1.3' (#4) from next into main
Reviewed-on: #4
2025-12-03 04:14:57 +00:00
233780617f Merge pull request 'Release V0.1.2' (#3) from next into main
Reviewed-on: #3
2025-11-26 04:59:15 +00:00
fd8fb21517 Merge pull request 'Prepare for binary release' (#2) from next into main
Reviewed-on: #2
2025-11-22 12:33:38 +00:00
c6cbe822e1 Merge pull request 'Release v0.1.1' (#1) from next into main
Reviewed-on: #1
2025-11-22 12:31:27 +00:00
220 changed files with 27984 additions and 64817 deletions

View File

@@ -3,7 +3,7 @@
logs
data
tmp
myfsio-engine/target
myfsio-engine/tests
target
crates/*/tests
Dockerfile
.dockerignore

6
.gitignore vendored
View File

@@ -26,12 +26,8 @@ dist/
*.egg-info/
.eggs/
# Rust / maturin build artifacts
python/myfsio_core/target/
python/myfsio_core/Cargo.lock
# Rust engine build artifacts
rust/myfsio-engine/target/
target/
# Local runtime artifacts
logs/

View File

@@ -1460,6 +1460,27 @@ dependencies = [
"crypto-common 0.2.1",
]
[[package]]
name = "dirs"
version = "6.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e"
dependencies = [
"dirs-sys",
]
[[package]]
name = "dirs-sys"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab"
dependencies = [
"libc",
"option-ext",
"redox_users",
"windows-sys 0.60.2",
]
[[package]]
name = "displaydoc"
version = "0.2.5"
@@ -2542,6 +2563,15 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
[[package]]
name = "matchers"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9"
dependencies = [
"regex-automata",
]
[[package]]
name = "matchit"
version = "0.8.4"
@@ -2705,8 +2735,11 @@ dependencies = [
"dotenvy",
"duckdb",
"futures",
"hex",
"http-body 1.0.1",
"http-body-util",
"hyper 1.9.0",
"md-5 0.10.6",
"mime_guess",
"multer",
"myfsio-auth",
@@ -2721,13 +2754,17 @@ dependencies = [
"regex",
"reqwest",
"roxmltree",
"rust-embed",
"serde",
"serde_json",
"serde_urlencoded",
"sha2 0.10.9",
"subtle",
"sysinfo",
"tempfile",
"tera",
"tokio",
"tokio-stream",
"tokio-util",
"tower",
"tower-http",
@@ -2754,6 +2791,7 @@ dependencies = [
"tempfile",
"thiserror",
"tokio",
"tokio-util",
"tracing",
"unicode-normalization",
"uuid",
@@ -2765,6 +2803,7 @@ version = "0.5.0"
dependencies = [
"chrono",
"myfsio-common",
"percent-encoding",
"quick-xml",
"serde",
]
@@ -2855,6 +2894,12 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe"
[[package]]
name = "option-ext"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
[[package]]
name = "outref"
version = "0.5.2"
@@ -3308,6 +3353,17 @@ dependencies = [
"bitflags",
]
[[package]]
name = "redox_users"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac"
dependencies = [
"getrandom 0.2.17",
"libredox",
"thiserror",
]
[[package]]
name = "regex"
version = "1.12.3"
@@ -3454,6 +3510,42 @@ version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c20b6793b5c2fa6553b250154b78d6d0db37e72700ae35fad9387a46f487c97"
[[package]]
name = "rust-embed"
version = "8.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04113cb9355a377d83f06ef1f0a45b8ab8cd7d8b1288160717d66df5c7988d27"
dependencies = [
"rust-embed-impl",
"rust-embed-utils",
"walkdir",
]
[[package]]
name = "rust-embed-impl"
version = "8.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da0902e4c7c8e997159ab384e6d0fc91c221375f6894346ae107f47dd0f3ccaa"
dependencies = [
"proc-macro2",
"quote",
"rust-embed-utils",
"shellexpand",
"syn 2.0.117",
"walkdir",
]
[[package]]
name = "rust-embed-utils"
version = "8.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5bcdef0be6fe7f6fa333b1073c949729274b05f123a0ad7efcb8efd878e5c3b1"
dependencies = [
"globset",
"sha2 0.10.9",
"walkdir",
]
[[package]]
name = "rust_decimal"
version = "1.41.0"
@@ -3784,6 +3876,15 @@ dependencies = [
"lazy_static",
]
[[package]]
name = "shellexpand"
version = "3.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32824fab5e16e6c4d86dc1ba84489390419a39f97699852b66480bb87d297ed8"
dependencies = [
"dirs",
]
[[package]]
name = "shlex"
version = "1.3.0"
@@ -4181,6 +4282,17 @@ dependencies = [
"tokio",
]
[[package]]
name = "tokio-stream"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70"
dependencies = [
"futures-core",
"pin-project-lite",
"tokio",
]
[[package]]
name = "tokio-util"
version = "0.7.18"
@@ -4331,10 +4443,14 @@ version = "0.3.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319"
dependencies = [
"matchers",
"nu-ansi-term",
"once_cell",
"regex-automata",
"sharded-slab",
"smallvec",
"thread_local",
"tracing",
"tracing-core",
"tracing-log",
]

View File

@@ -10,14 +10,14 @@ members = [
]
[workspace.package]
version = "0.4.3"
version = "0.5.0"
edition = "2021"
[workspace.dependencies]
tokio = { version = "1", features = ["full"] }
axum = { version = "0.8" }
tower = { version = "0.5" }
tower-http = { version = "0.6", features = ["cors", "trace", "fs", "compression-gzip"] }
tower-http = { version = "0.6", features = ["cors", "trace", "fs", "compression-gzip", "timeout", "set-header"] }
hyper = { version = "1" }
bytes = "1"
serde = { version = "1", features = ["derive"] }
@@ -38,11 +38,12 @@ percent-encoding = "2"
regex = "1"
unicode-normalization = "0.1"
tracing = "0.1"
tracing-subscriber = "0.3"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
thiserror = "2"
chrono = { version = "0.4", features = ["serde"] }
base64 = "0.22"
tokio-util = { version = "0.7", features = ["io"] }
tokio-util = { version = "0.7", features = ["io", "io-util"] }
tokio-stream = "0.1"
futures = "0.3"
dashmap = "6"
crc32fast = "1"

View File

@@ -3,13 +3,13 @@ FROM rust:1-slim-bookworm AS builder
WORKDIR /build
RUN apt-get update \
&& apt-get install -y --no-install-recommends pkg-config libssl-dev \
&& apt-get install -y --no-install-recommends build-essential pkg-config libssl-dev \
&& rm -rf /var/lib/apt/lists/*
COPY myfsio-engine ./myfsio-engine
COPY Cargo.toml Cargo.lock ./
COPY crates ./crates
RUN cd myfsio-engine \
&& cargo build --release --bin myfsio-server \
RUN cargo build --release --bin myfsio-server \
&& strip target/release/myfsio-server
@@ -24,8 +24,9 @@ RUN apt-get update \
&& useradd -m -u 1000 myfsio \
&& chown -R myfsio:myfsio /app
COPY --from=builder /build/myfsio-engine/target/release/myfsio-server /usr/local/bin/myfsio-server
COPY --from=builder /build/myfsio-engine/templates /app/templates
COPY --from=builder /build/target/release/myfsio-server /usr/local/bin/myfsio-server
COPY --from=builder /build/crates/myfsio-server/templates /app/templates
COPY --from=builder /build/crates/myfsio-server/static /app/static
COPY docker-entrypoint.sh /app/docker-entrypoint.sh
RUN chmod +x /app/docker-entrypoint.sh \
@@ -34,9 +35,13 @@ RUN chmod +x /app/docker-entrypoint.sh \
USER myfsio
EXPOSE 5000
EXPOSE 5100
ENV HOST=0.0.0.0 \
PORT=5000 \
UI_PORT=5100 \
STORAGE_ROOT=/app/data \
TEMPLATES_DIR=/app/templates \
STATIC_DIR=/app/static \
RUST_LOG=info
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \

View File

@@ -1,8 +1,6 @@
# MyFSIO
MyFSIO is an S3-compatible object storage server with a Rust runtime and a filesystem-backed storage engine. The active server lives under `rust/myfsio-engine` and serves both the S3 API and the built-in web UI from a single process.
The repository still contains a `python/` tree, but you do not need Python to run the current server.
MyFSIO is an S3-compatible object storage server with a Rust runtime and a filesystem-backed storage engine. The repository root is the Cargo workspace; the server serves both the S3 API and the built-in web UI from a single process.
## Features
@@ -29,7 +27,6 @@ If you want API-only mode, set `UI_ENABLED=false`. There is no separate "UI-only
From the repository root:
```bash
cd rust/myfsio-engine
cargo run -p myfsio-server --
```
@@ -60,14 +57,13 @@ UI_ENABLED=false cargo run -p myfsio-server --
## Building a Binary
```bash
cd rust/myfsio-engine
cargo build --release -p myfsio-server
```
Binary locations:
- Linux/macOS: `rust/myfsio-engine/target/release/myfsio-server`
- Windows: `rust/myfsio-engine/target/release/myfsio-server.exe`
- Linux/macOS: `target/release/myfsio-server`
- Windows: `target/release/myfsio-server.exe`
Run the built binary directly:
@@ -166,10 +162,10 @@ data/
## Docker
Build the Rust image from the `rust/` directory:
Build the Rust image from the repository root:
```bash
docker build -t myfsio ./rust
docker build -t myfsio .
docker run --rm -p 5000:5000 -p 5100:5100 -v "${PWD}/data:/app/data" myfsio
```
@@ -180,11 +176,9 @@ If the instance sits behind a reverse proxy, set `API_BASE_URL` to the public S3
The repository includes `scripts/install.sh` for systemd-style Linux installs. Build the Rust binary first, then pass it to the installer:
```bash
cd rust/myfsio-engine
cargo build --release -p myfsio-server
cd ../..
sudo ./scripts/install.sh --binary ./rust/myfsio-engine/target/release/myfsio-server
sudo ./scripts/install.sh --binary ./target/release/myfsio-server
```
The installer copies the binary into `/opt/myfsio/myfsio`, writes `/opt/myfsio/myfsio.env`, and can register a `myfsio.service` unit.
@@ -194,7 +188,6 @@ The installer copies the binary into `/opt/myfsio/myfsio`, writes `/opt/myfsio/m
Run the Rust test suite from the workspace:
```bash
cd rust/myfsio-engine
cargo test
```
@@ -209,4 +202,4 @@ cargo test
}
```
The `version` field comes from the Rust crate version in `rust/myfsio-engine/crates/myfsio-server/Cargo.toml`.
The `version` field comes from the Rust crate version in `crates/myfsio-server/Cargo.toml`.

View File

@@ -77,17 +77,66 @@ impl RawIamUser {
let user_id = self.user_id.unwrap_or_else(|| {
format!("u-{}", display_name.to_ascii_lowercase().replace(' ', "-"))
});
let policies = self
.policies
.into_iter()
.map(normalize_legacy_full_access)
.collect();
IamUser {
user_id,
display_name,
enabled: self.enabled,
expires_at: self.expires_at,
access_keys,
policies: self.policies,
policies,
}
}
}
const LEGACY_FULL_ACCESS_ACTIONS: &[&str] = &[
"list",
"read",
"write",
"delete",
"share",
"policy",
"create_bucket",
"delete_bucket",
"replication",
"lifecycle",
"cors",
"versioning",
"tagging",
"encryption",
"quota",
"object_lock",
"notification",
"logging",
"website",
];
fn normalize_legacy_full_access(policy: IamPolicy) -> IamPolicy {
if policy.bucket != "*"
|| policy.prefix != "*"
|| policy.actions.iter().any(|a| a == "*")
{
return policy;
}
if !policy.actions.iter().any(|a| a == "iam:*") {
return policy;
}
for required in LEGACY_FULL_ACCESS_ACTIONS {
if !policy.actions.iter().any(|a| a == *required) {
return policy;
}
}
IamPolicy {
bucket: policy.bucket,
prefix: policy.prefix,
actions: vec!["*".to_string()],
}
}
fn default_enabled() -> bool {
true
}

View File

@@ -31,13 +31,13 @@ fn hmac_sha256(key: &[u8], msg: &[u8]) -> Vec<u8> {
mac.finalize().into_bytes().to_vec()
}
fn sha256_hex(data: &[u8]) -> String {
pub fn sha256_hex(data: &[u8]) -> String {
let mut hasher = Sha256::new();
hasher.update(data);
hex::encode(hasher.finalize())
}
fn aws_uri_encode(input: &str) -> String {
pub fn aws_uri_encode(input: &str) -> String {
percent_encode(input.as_bytes(), AWS_ENCODE_SET).to_string()
}

View File

@@ -8,6 +8,7 @@ pub const STATS_FILE: &str = "stats.json";
pub const ETAG_INDEX_FILE: &str = "etag_index.json";
pub const INDEX_FILE: &str = "_index.json";
pub const MANIFEST_FILE: &str = "manifest.json";
pub const DIR_MARKER_FILE: &str = ".__myfsio_dirobj__";
pub const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];

View File

@@ -3,27 +3,38 @@ use std::fmt;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum S3ErrorCode {
AccessDenied,
BadDigest,
BucketAlreadyExists,
BucketAlreadyOwnedByYou,
BucketNotEmpty,
EntityTooLarge,
EntityTooSmall,
InternalError,
InvalidAccessKeyId,
InvalidArgument,
InvalidBucketName,
InvalidKey,
InvalidPart,
InvalidPartOrder,
InvalidPolicyDocument,
InvalidRange,
InvalidRequest,
InvalidTag,
MalformedXML,
MethodNotAllowed,
NoSuchBucket,
NoSuchBucketPolicy,
NoSuchKey,
NoSuchLifecycleConfiguration,
NoSuchUpload,
NoSuchVersion,
NoSuchTagSet,
ObjectCorrupted,
PreconditionFailed,
NotModified,
QuotaExceeded,
RequestTimeTooSkewed,
ServerSideEncryptionConfigurationNotFoundError,
SignatureDoesNotMatch,
SlowDown,
}
@@ -32,56 +43,80 @@ impl S3ErrorCode {
pub fn http_status(&self) -> u16 {
match self {
Self::AccessDenied => 403,
Self::BadDigest => 400,
Self::BucketAlreadyExists => 409,
Self::BucketAlreadyOwnedByYou => 409,
Self::BucketNotEmpty => 409,
Self::EntityTooLarge => 413,
Self::EntityTooSmall => 400,
Self::InternalError => 500,
Self::InvalidAccessKeyId => 403,
Self::InvalidArgument => 400,
Self::InvalidBucketName => 400,
Self::InvalidKey => 400,
Self::InvalidPart => 400,
Self::InvalidPartOrder => 400,
Self::InvalidPolicyDocument => 400,
Self::InvalidRange => 416,
Self::InvalidRequest => 400,
Self::InvalidTag => 400,
Self::MalformedXML => 400,
Self::MethodNotAllowed => 405,
Self::NoSuchBucket => 404,
Self::NoSuchBucketPolicy => 404,
Self::NoSuchKey => 404,
Self::NoSuchLifecycleConfiguration => 404,
Self::NoSuchUpload => 404,
Self::NoSuchVersion => 404,
Self::NoSuchTagSet => 404,
Self::ObjectCorrupted => 422,
Self::PreconditionFailed => 412,
Self::NotModified => 304,
Self::QuotaExceeded => 403,
Self::RequestTimeTooSkewed => 403,
Self::ServerSideEncryptionConfigurationNotFoundError => 404,
Self::SignatureDoesNotMatch => 403,
Self::SlowDown => 429,
Self::SlowDown => 503,
}
}
pub fn as_str(&self) -> &'static str {
match self {
Self::AccessDenied => "AccessDenied",
Self::BadDigest => "BadDigest",
Self::BucketAlreadyExists => "BucketAlreadyExists",
Self::BucketAlreadyOwnedByYou => "BucketAlreadyOwnedByYou",
Self::BucketNotEmpty => "BucketNotEmpty",
Self::EntityTooLarge => "EntityTooLarge",
Self::EntityTooSmall => "EntityTooSmall",
Self::InternalError => "InternalError",
Self::InvalidAccessKeyId => "InvalidAccessKeyId",
Self::InvalidArgument => "InvalidArgument",
Self::InvalidBucketName => "InvalidBucketName",
Self::InvalidKey => "InvalidKey",
Self::InvalidPart => "InvalidPart",
Self::InvalidPartOrder => "InvalidPartOrder",
Self::InvalidPolicyDocument => "InvalidPolicyDocument",
Self::InvalidRange => "InvalidRange",
Self::InvalidRequest => "InvalidRequest",
Self::InvalidTag => "InvalidTag",
Self::MalformedXML => "MalformedXML",
Self::MethodNotAllowed => "MethodNotAllowed",
Self::NoSuchBucket => "NoSuchBucket",
Self::NoSuchBucketPolicy => "NoSuchBucketPolicy",
Self::NoSuchKey => "NoSuchKey",
Self::NoSuchLifecycleConfiguration => "NoSuchLifecycleConfiguration",
Self::NoSuchUpload => "NoSuchUpload",
Self::NoSuchVersion => "NoSuchVersion",
Self::NoSuchTagSet => "NoSuchTagSet",
Self::ObjectCorrupted => "ObjectCorrupted",
Self::PreconditionFailed => "PreconditionFailed",
Self::NotModified => "NotModified",
Self::QuotaExceeded => "QuotaExceeded",
Self::RequestTimeTooSkewed => "RequestTimeTooSkewed",
Self::ServerSideEncryptionConfigurationNotFoundError => {
"ServerSideEncryptionConfigurationNotFoundError"
}
Self::SignatureDoesNotMatch => "SignatureDoesNotMatch",
Self::SlowDown => "SlowDown",
}
@@ -90,27 +125,38 @@ impl S3ErrorCode {
pub fn default_message(&self) -> &'static str {
match self {
Self::AccessDenied => "Access Denied",
Self::BadDigest => "The Content-MD5 or checksum value you specified did not match what we received",
Self::BucketAlreadyExists => "The requested bucket name is not available",
Self::BucketAlreadyOwnedByYou => "Your previous request to create the named bucket succeeded and you already own it",
Self::BucketNotEmpty => "The bucket you tried to delete is not empty",
Self::EntityTooLarge => "Your proposed upload exceeds the maximum allowed size",
Self::EntityTooSmall => "Your proposed upload is smaller than the minimum allowed object size",
Self::InternalError => "We encountered an internal error. Please try again.",
Self::InvalidAccessKeyId => "The access key ID you provided does not exist",
Self::InvalidArgument => "Invalid argument",
Self::InvalidBucketName => "The specified bucket is not valid",
Self::InvalidKey => "The specified key is not valid",
Self::InvalidPart => "One or more of the specified parts could not be found",
Self::InvalidPartOrder => "The list of parts was not in ascending order",
Self::InvalidPolicyDocument => "The content of the form does not meet the conditions specified in the policy document",
Self::InvalidRange => "The requested range is not satisfiable",
Self::InvalidRequest => "Invalid request",
Self::InvalidTag => "The Tagging header is invalid",
Self::MalformedXML => "The XML you provided was not well-formed",
Self::MethodNotAllowed => "The specified method is not allowed against this resource",
Self::NoSuchBucket => "The specified bucket does not exist",
Self::NoSuchBucketPolicy => "The bucket policy does not exist",
Self::NoSuchKey => "The specified key does not exist",
Self::NoSuchLifecycleConfiguration => "The lifecycle configuration does not exist",
Self::NoSuchUpload => "The specified multipart upload does not exist",
Self::NoSuchVersion => "The specified version does not exist",
Self::NoSuchTagSet => "The TagSet does not exist",
Self::ObjectCorrupted => "The stored object is corrupted and cannot be served",
Self::PreconditionFailed => "At least one of the preconditions you specified did not hold",
Self::NotModified => "Not Modified",
Self::QuotaExceeded => "The bucket quota has been exceeded",
Self::RequestTimeTooSkewed => "The difference between the request time and the server's time is too large",
Self::ServerSideEncryptionConfigurationNotFoundError => "The server side encryption configuration was not found",
Self::SignatureDoesNotMatch => "The request signature we calculated does not match the signature you provided",
Self::SlowDown => "Please reduce your request rate",
}

View File

@@ -12,6 +12,12 @@ pub struct ObjectMeta {
pub content_type: Option<String>,
pub storage_class: Option<String>,
pub metadata: HashMap<String, String>,
#[serde(default)]
pub version_id: Option<String>,
#[serde(default)]
pub is_delete_marker: bool,
#[serde(default, skip_serializing)]
pub internal_metadata: HashMap<String, String>,
}
impl ObjectMeta {
@@ -24,10 +30,20 @@ impl ObjectMeta {
content_type: None,
storage_class: Some("STANDARD".to_string()),
metadata: HashMap::new(),
version_id: None,
is_delete_marker: false,
internal_metadata: HashMap::new(),
}
}
}
#[derive(Debug, Clone, Default)]
pub struct DeleteOutcome {
pub version_id: Option<String>,
pub is_delete_marker: bool,
pub existed: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BucketMeta {
pub name: String,
@@ -112,6 +128,8 @@ pub struct VersionInfo {
pub last_modified: DateTime<Utc>,
pub etag: Option<String>,
pub is_latest: bool,
#[serde(default)]
pub is_delete_marker: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -120,11 +138,34 @@ pub struct Tag {
pub value: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)]
pub enum VersioningStatus {
#[default]
Disabled,
Enabled,
Suspended,
}
impl VersioningStatus {
pub fn is_enabled(self) -> bool {
matches!(self, VersioningStatus::Enabled)
}
pub fn is_active(self) -> bool {
matches!(
self,
VersioningStatus::Enabled | VersioningStatus::Suspended
)
}
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct BucketConfig {
#[serde(default)]
pub versioning_enabled: bool,
#[serde(default)]
pub versioning_suspended: bool,
#[serde(default)]
pub tags: Vec<Tag>,
#[serde(default)]
pub cors: Option<serde_json::Value>,
@@ -150,6 +191,35 @@ pub struct BucketConfig {
pub replication: Option<serde_json::Value>,
}
impl BucketConfig {
pub fn versioning_status(&self) -> VersioningStatus {
if self.versioning_enabled {
VersioningStatus::Enabled
} else if self.versioning_suspended {
VersioningStatus::Suspended
} else {
VersioningStatus::Disabled
}
}
pub fn set_versioning_status(&mut self, status: VersioningStatus) {
match status {
VersioningStatus::Enabled => {
self.versioning_enabled = true;
self.versioning_suspended = false;
}
VersioningStatus::Suspended => {
self.versioning_enabled = false;
self.versioning_suspended = true;
}
VersioningStatus::Disabled => {
self.versioning_enabled = false;
self.versioning_suspended = false;
}
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QuotaConfig {
pub max_bytes: Option<u64>,

View File

@@ -0,0 +1,545 @@
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
use hkdf::Hkdf;
use sha2::Sha256;
use std::fs::File;
use std::io::{Read, Seek, SeekFrom, Write};
use std::path::Path;
use thiserror::Error;
const DEFAULT_CHUNK_SIZE: usize = 65536;
const HEADER_SIZE: usize = 4;
#[derive(Debug, Error)]
pub enum CryptoError {
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Invalid key size: expected 32 bytes, got {0}")]
InvalidKeySize(usize),
#[error("Invalid nonce size: expected 12 bytes, got {0}")]
InvalidNonceSize(usize),
#[error("Encryption failed: {0}")]
EncryptionFailed(String),
#[error("Decryption failed at chunk {0}")]
DecryptionFailed(u32),
#[error("HKDF expand failed: {0}")]
HkdfFailed(String),
}
fn read_exact_chunk(reader: &mut impl Read, buf: &mut [u8]) -> std::io::Result<usize> {
let mut filled = 0;
while filled < buf.len() {
match reader.read(&mut buf[filled..]) {
Ok(0) => break,
Ok(n) => filled += n,
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
}
Ok(filled)
}
fn derive_chunk_nonce(base_nonce: &[u8], chunk_index: u32) -> Result<[u8; 12], CryptoError> {
let hkdf = Hkdf::<Sha256>::new(Some(base_nonce), b"chunk_nonce");
let mut okm = [0u8; 12];
hkdf.expand(&chunk_index.to_be_bytes(), &mut okm)
.map_err(|e| CryptoError::HkdfFailed(e.to_string()))?;
Ok(okm)
}
pub fn encrypt_stream_chunked(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
chunk_size: Option<usize>,
) -> Result<u32, CryptoError> {
if key.len() != 32 {
return Err(CryptoError::InvalidKeySize(key.len()));
}
if base_nonce.len() != 12 {
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
}
let chunk_size = chunk_size.unwrap_or(DEFAULT_CHUNK_SIZE);
let key_arr: [u8; 32] = key.try_into().unwrap();
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
let cipher = Aes256Gcm::new(&key_arr.into());
let mut infile = File::open(input_path)?;
let mut outfile = File::create(output_path)?;
outfile.write_all(&[0u8; 4])?;
let mut buf = vec![0u8; chunk_size];
let mut chunk_index: u32 = 0;
loop {
let n = read_exact_chunk(&mut infile, &mut buf)?;
if n == 0 {
break;
}
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
let nonce = Nonce::from_slice(&nonce_bytes);
let encrypted = cipher
.encrypt(nonce, &buf[..n])
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
let size = encrypted.len() as u32;
outfile.write_all(&size.to_be_bytes())?;
outfile.write_all(&encrypted)?;
chunk_index += 1;
}
outfile.seek(SeekFrom::Start(0))?;
outfile.write_all(&chunk_index.to_be_bytes())?;
Ok(chunk_index)
}
pub fn decrypt_stream_chunked(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
) -> Result<u32, CryptoError> {
if key.len() != 32 {
return Err(CryptoError::InvalidKeySize(key.len()));
}
if base_nonce.len() != 12 {
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
}
let key_arr: [u8; 32] = key.try_into().unwrap();
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
let cipher = Aes256Gcm::new(&key_arr.into());
let mut infile = File::open(input_path)?;
let mut outfile = File::create(output_path)?;
let mut header = [0u8; HEADER_SIZE];
infile.read_exact(&mut header)?;
let chunk_count = u32::from_be_bytes(header);
let mut size_buf = [0u8; HEADER_SIZE];
for chunk_index in 0..chunk_count {
infile.read_exact(&mut size_buf)?;
let chunk_size = u32::from_be_bytes(size_buf) as usize;
let mut encrypted = vec![0u8; chunk_size];
infile.read_exact(&mut encrypted)?;
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
let nonce = Nonce::from_slice(&nonce_bytes);
let decrypted = cipher
.decrypt(nonce, encrypted.as_ref())
.map_err(|_| CryptoError::DecryptionFailed(chunk_index))?;
outfile.write_all(&decrypted)?;
}
Ok(chunk_count)
}
const GCM_TAG_LEN: usize = 16;
pub fn decrypt_stream_chunked_range(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
chunk_plain_size: usize,
plaintext_size: u64,
plain_start: u64,
plain_end_inclusive: u64,
) -> Result<u64, CryptoError> {
if key.len() != 32 {
return Err(CryptoError::InvalidKeySize(key.len()));
}
if base_nonce.len() != 12 {
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
}
if chunk_plain_size == 0 {
return Err(CryptoError::EncryptionFailed(
"chunk_plain_size must be > 0".into(),
));
}
if plaintext_size == 0 {
let _ = File::create(output_path)?;
return Ok(0);
}
if plain_start > plain_end_inclusive || plain_end_inclusive >= plaintext_size {
return Err(CryptoError::EncryptionFailed(format!(
"range [{}, {}] invalid for plaintext size {}",
plain_start, plain_end_inclusive, plaintext_size
)));
}
let key_arr: [u8; 32] = key.try_into().unwrap();
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
let cipher = Aes256Gcm::new(&key_arr.into());
let n = chunk_plain_size as u64;
let first_chunk = (plain_start / n) as u32;
let last_chunk = (plain_end_inclusive / n) as u32;
let total_chunks = plaintext_size.div_ceil(n) as u32;
let final_chunk_plain = plaintext_size - (total_chunks as u64 - 1) * n;
let mut infile = File::open(input_path)?;
let mut header = [0u8; HEADER_SIZE];
infile.read_exact(&mut header)?;
let stored_chunk_count = u32::from_be_bytes(header);
if stored_chunk_count != total_chunks {
return Err(CryptoError::EncryptionFailed(format!(
"chunk count mismatch: header says {}, plaintext_size implies {}",
stored_chunk_count, total_chunks
)));
}
let mut outfile = File::create(output_path)?;
let stride = n + GCM_TAG_LEN as u64 + HEADER_SIZE as u64;
let first_offset = HEADER_SIZE as u64 + first_chunk as u64 * stride;
infile.seek(SeekFrom::Start(first_offset))?;
let mut size_buf = [0u8; HEADER_SIZE];
let mut bytes_written: u64 = 0;
for chunk_index in first_chunk..=last_chunk {
infile.read_exact(&mut size_buf)?;
let ct_len = u32::from_be_bytes(size_buf) as usize;
let expected_plain = if chunk_index + 1 == total_chunks {
final_chunk_plain as usize
} else {
chunk_plain_size
};
let expected_ct = expected_plain + GCM_TAG_LEN;
if ct_len != expected_ct {
return Err(CryptoError::EncryptionFailed(format!(
"chunk {} stored length {} != expected {} (corrupt file or chunk_size mismatch)",
chunk_index, ct_len, expected_ct
)));
}
let mut encrypted = vec![0u8; ct_len];
infile.read_exact(&mut encrypted)?;
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
let nonce = Nonce::from_slice(&nonce_bytes);
let decrypted = cipher
.decrypt(nonce, encrypted.as_ref())
.map_err(|_| CryptoError::DecryptionFailed(chunk_index))?;
let chunk_plain_start = chunk_index as u64 * n;
let chunk_plain_end_exclusive = chunk_plain_start + decrypted.len() as u64;
let slice_start = plain_start.saturating_sub(chunk_plain_start) as usize;
let slice_end = (plain_end_inclusive + 1).min(chunk_plain_end_exclusive);
let slice_end_local = (slice_end - chunk_plain_start) as usize;
if slice_end_local > slice_start {
outfile.write_all(&decrypted[slice_start..slice_end_local])?;
bytes_written += (slice_end_local - slice_start) as u64;
}
}
Ok(bytes_written)
}
pub async fn encrypt_stream_chunked_async(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
chunk_size: Option<usize>,
) -> Result<u32, CryptoError> {
let input_path = input_path.to_owned();
let output_path = output_path.to_owned();
let key = key.to_vec();
let base_nonce = base_nonce.to_vec();
tokio::task::spawn_blocking(move || {
encrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce, chunk_size)
})
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
}
pub async fn decrypt_stream_chunked_async(
input_path: &Path,
output_path: &Path,
key: &[u8],
base_nonce: &[u8],
) -> Result<u32, CryptoError> {
let input_path = input_path.to_owned();
let output_path = output_path.to_owned();
let key = key.to_vec();
let base_nonce = base_nonce.to_vec();
tokio::task::spawn_blocking(move || {
decrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce)
})
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write as IoWrite;
#[test]
fn test_encrypt_decrypt_roundtrip() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("input.bin");
let encrypted = dir.path().join("encrypted.bin");
let decrypted = dir.path().join("decrypted.bin");
let data = b"Hello, this is a test of AES-256-GCM chunked encryption!";
std::fs::File::create(&input)
.unwrap()
.write_all(data)
.unwrap();
let key = [0x42u8; 32];
let nonce = [0x01u8; 12];
let chunks = encrypt_stream_chunked(&input, &encrypted, &key, &nonce, Some(16)).unwrap();
assert!(chunks > 0);
let chunks2 = decrypt_stream_chunked(&encrypted, &decrypted, &key, &nonce).unwrap();
assert_eq!(chunks, chunks2);
let result = std::fs::read(&decrypted).unwrap();
assert_eq!(result, data);
}
#[test]
fn test_invalid_key_size() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("input.bin");
std::fs::File::create(&input)
.unwrap()
.write_all(b"test")
.unwrap();
let result = encrypt_stream_chunked(
&input,
&dir.path().join("out"),
&[0u8; 16],
&[0u8; 12],
None,
);
assert!(matches!(result, Err(CryptoError::InvalidKeySize(16))));
}
fn write_file(path: &Path, data: &[u8]) {
std::fs::File::create(path).unwrap().write_all(data).unwrap();
}
fn make_encrypted_file(
dir: &Path,
data: &[u8],
key: &[u8; 32],
nonce: &[u8; 12],
chunk: usize,
) -> std::path::PathBuf {
let input = dir.join("input.bin");
let encrypted = dir.join("encrypted.bin");
write_file(&input, data);
encrypt_stream_chunked(&input, &encrypted, key, nonce, Some(chunk)).unwrap();
encrypted
}
#[test]
fn test_range_within_single_chunk() {
let dir = tempfile::tempdir().unwrap();
let data: Vec<u8> = (0u8..=255).cycle().take(4096).collect();
let key = [0x33u8; 32];
let nonce = [0x07u8; 12];
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 1024);
let out = dir.path().join("range.bin");
let n = decrypt_stream_chunked_range(
&encrypted,
&out,
&key,
&nonce,
1024,
data.len() as u64,
200,
399,
)
.unwrap();
assert_eq!(n, 200);
let got = std::fs::read(&out).unwrap();
assert_eq!(got, &data[200..400]);
}
#[test]
fn test_range_spanning_multiple_chunks() {
let dir = tempfile::tempdir().unwrap();
let data: Vec<u8> = (0..5000u32).map(|i| (i % 251) as u8).collect();
let key = [0x44u8; 32];
let nonce = [0x02u8; 12];
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 512);
let out = dir.path().join("range.bin");
let n = decrypt_stream_chunked_range(
&encrypted,
&out,
&key,
&nonce,
512,
data.len() as u64,
100,
2999,
)
.unwrap();
assert_eq!(n, 2900);
let got = std::fs::read(&out).unwrap();
assert_eq!(got, &data[100..3000]);
}
#[test]
fn test_range_covers_final_partial_chunk() {
let dir = tempfile::tempdir().unwrap();
let data: Vec<u8> = (0..1300u32).map(|i| (i % 71) as u8).collect();
let key = [0x55u8; 32];
let nonce = [0x0au8; 12];
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 512);
let out = dir.path().join("range.bin");
let n = decrypt_stream_chunked_range(
&encrypted,
&out,
&key,
&nonce,
512,
data.len() as u64,
900,
1299,
)
.unwrap();
assert_eq!(n, 400);
let got = std::fs::read(&out).unwrap();
assert_eq!(got, &data[900..1300]);
}
#[test]
fn test_range_full_object() {
let dir = tempfile::tempdir().unwrap();
let data: Vec<u8> = (0..2048u32).map(|i| (i % 13) as u8).collect();
let key = [0x11u8; 32];
let nonce = [0x33u8; 12];
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 512);
let out = dir.path().join("range.bin");
let n = decrypt_stream_chunked_range(
&encrypted,
&out,
&key,
&nonce,
512,
data.len() as u64,
0,
data.len() as u64 - 1,
)
.unwrap();
assert_eq!(n, data.len() as u64);
let got = std::fs::read(&out).unwrap();
assert_eq!(got, data);
}
#[test]
fn test_range_wrong_key_fails() {
let dir = tempfile::tempdir().unwrap();
let data = b"range-auth-check".repeat(100);
let key = [0x66u8; 32];
let nonce = [0x09u8; 12];
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 256);
let out = dir.path().join("range.bin");
let wrong = [0x67u8; 32];
let r = decrypt_stream_chunked_range(
&encrypted,
&out,
&wrong,
&nonce,
256,
data.len() as u64,
0,
data.len() as u64 - 1,
);
assert!(matches!(r, Err(CryptoError::DecryptionFailed(_))));
}
#[test]
fn test_range_out_of_bounds_rejected() {
let dir = tempfile::tempdir().unwrap();
let data = vec![0u8; 100];
let key = [0x22u8; 32];
let nonce = [0x44u8; 12];
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 64);
let out = dir.path().join("range.bin");
let r = decrypt_stream_chunked_range(
&encrypted,
&out,
&key,
&nonce,
64,
data.len() as u64,
50,
200,
);
assert!(r.is_err());
}
#[test]
fn test_range_mismatched_chunk_size_detected() {
let dir = tempfile::tempdir().unwrap();
let data: Vec<u8> = (0..2048u32).map(|i| i as u8).collect();
let key = [0x77u8; 32];
let nonce = [0x88u8; 12];
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 512);
let out = dir.path().join("range.bin");
let r = decrypt_stream_chunked_range(
&encrypted,
&out,
&key,
&nonce,
1024,
data.len() as u64,
0,
1023,
);
assert!(r.is_err());
}
#[test]
fn test_wrong_key_fails_decrypt() {
let dir = tempfile::tempdir().unwrap();
let input = dir.path().join("input.bin");
let encrypted = dir.path().join("encrypted.bin");
let decrypted = dir.path().join("decrypted.bin");
std::fs::File::create(&input)
.unwrap()
.write_all(b"secret data")
.unwrap();
let key = [0x42u8; 32];
let nonce = [0x01u8; 12];
encrypt_stream_chunked(&input, &encrypted, &key, &nonce, None).unwrap();
let wrong_key = [0x43u8; 32];
let result = decrypt_stream_chunked(&encrypted, &decrypted, &wrong_key, &nonce);
assert!(matches!(result, Err(CryptoError::DecryptionFailed(_))));
}
}

View File

@@ -4,7 +4,9 @@ use rand::RngCore;
use std::collections::HashMap;
use std::path::Path;
use crate::aes_gcm::{decrypt_stream_chunked, encrypt_stream_chunked, CryptoError};
use crate::aes_gcm::{
decrypt_stream_chunked, decrypt_stream_chunked_range, encrypt_stream_chunked, CryptoError,
};
use crate::kms::KmsService;
#[derive(Debug, Clone, PartialEq)]
@@ -37,6 +39,8 @@ pub struct EncryptionMetadata {
pub nonce: String,
pub encrypted_data_key: Option<String>,
pub kms_key_id: Option<String>,
pub chunk_size: Option<usize>,
pub plaintext_size: Option<u64>,
}
impl EncryptionMetadata {
@@ -53,6 +57,15 @@ impl EncryptionMetadata {
if let Some(ref kid) = self.kms_key_id {
map.insert("x-amz-encryption-key-id".to_string(), kid.clone());
}
if let Some(cs) = self.chunk_size {
map.insert("x-amz-encryption-chunk-size".to_string(), cs.to_string());
}
if let Some(ps) = self.plaintext_size {
map.insert(
"x-amz-encryption-plaintext-size".to_string(),
ps.to_string(),
);
}
map
}
@@ -64,6 +77,12 @@ impl EncryptionMetadata {
nonce: nonce.clone(),
encrypted_data_key: meta.get("x-amz-encrypted-data-key").cloned(),
kms_key_id: meta.get("x-amz-encryption-key-id").cloned(),
chunk_size: meta
.get("x-amz-encryption-chunk-size")
.and_then(|s| s.parse().ok()),
plaintext_size: meta
.get("x-amz-encryption-plaintext-size")
.and_then(|s| s.parse().ok()),
})
}
@@ -76,17 +95,43 @@ impl EncryptionMetadata {
meta.remove("x-amz-encryption-nonce");
meta.remove("x-amz-encrypted-data-key");
meta.remove("x-amz-encryption-key-id");
meta.remove("x-amz-encryption-chunk-size");
meta.remove("x-amz-encryption-plaintext-size");
}
}
pub struct EncryptionService {
master_key: [u8; 32],
kms: Option<std::sync::Arc<KmsService>>,
config: EncryptionConfig,
}
#[derive(Debug, Clone, Copy)]
pub struct EncryptionConfig {
pub chunk_size: usize,
}
impl Default for EncryptionConfig {
fn default() -> Self {
Self { chunk_size: 65_536 }
}
}
impl EncryptionService {
pub fn new(master_key: [u8; 32], kms: Option<std::sync::Arc<KmsService>>) -> Self {
Self { master_key, kms }
Self::with_config(master_key, kms, EncryptionConfig::default())
}
pub fn with_config(
master_key: [u8; 32],
kms: Option<std::sync::Arc<KmsService>>,
config: EncryptionConfig,
) -> Self {
Self {
master_key,
kms,
config,
}
}
pub fn generate_data_key(&self) -> ([u8; 32], [u8; 12]) {
@@ -188,11 +233,19 @@ impl EncryptionService {
data_key
};
let plaintext_size = tokio::fs::metadata(input_path)
.await
.map_err(CryptoError::Io)?
.len();
let ip = input_path.to_owned();
let op = output_path.to_owned();
let ak = actual_key;
let n = nonce;
tokio::task::spawn_blocking(move || encrypt_stream_chunked(&ip, &op, &ak, &n, None))
let chunk_size = self.config.chunk_size;
tokio::task::spawn_blocking(move || {
encrypt_stream_chunked(&ip, &op, &ak, &n, Some(chunk_size))
})
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
@@ -201,22 +254,23 @@ impl EncryptionService {
nonce: B64.encode(nonce),
encrypted_data_key,
kms_key_id,
chunk_size: Some(chunk_size),
plaintext_size: Some(plaintext_size),
})
}
pub async fn decrypt_object(
async fn resolve_data_key(
&self,
input_path: &Path,
output_path: &Path,
enc_meta: &EncryptionMetadata,
customer_key: Option<&[u8]>,
) -> Result<(), CryptoError> {
) -> Result<([u8; 32], [u8; 12]), CryptoError> {
let nonce_bytes = B64
.decode(&enc_meta.nonce)
.map_err(|e| CryptoError::EncryptionFailed(format!("Bad nonce encoding: {}", e)))?;
if nonce_bytes.len() != 12 {
return Err(CryptoError::InvalidNonceSize(nonce_bytes.len()));
}
let nonce: [u8; 12] = nonce_bytes.try_into().unwrap();
let data_key: [u8; 32] = if let Some(ck) = customer_key {
if ck.len() != 32 {
@@ -254,15 +308,62 @@ impl EncryptionService {
self.unwrap_data_key(wrapped)?
};
Ok((data_key, nonce))
}
pub async fn decrypt_object(
&self,
input_path: &Path,
output_path: &Path,
enc_meta: &EncryptionMetadata,
customer_key: Option<&[u8]>,
) -> Result<(), CryptoError> {
let (data_key, nonce) = self.resolve_data_key(enc_meta, customer_key).await?;
let ip = input_path.to_owned();
let op = output_path.to_owned();
let nb: [u8; 12] = nonce_bytes.try_into().unwrap();
tokio::task::spawn_blocking(move || decrypt_stream_chunked(&ip, &op, &data_key, &nb))
tokio::task::spawn_blocking(move || decrypt_stream_chunked(&ip, &op, &data_key, &nonce))
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
Ok(())
}
pub async fn decrypt_object_range(
&self,
input_path: &Path,
output_path: &Path,
enc_meta: &EncryptionMetadata,
customer_key: Option<&[u8]>,
plain_start: u64,
plain_end_inclusive: u64,
) -> Result<u64, CryptoError> {
let chunk_size = enc_meta.chunk_size.ok_or_else(|| {
CryptoError::EncryptionFailed("chunk_size missing from encryption metadata".into())
})?;
let plaintext_size = enc_meta.plaintext_size.ok_or_else(|| {
CryptoError::EncryptionFailed("plaintext_size missing from encryption metadata".into())
})?;
let (data_key, nonce) = self.resolve_data_key(enc_meta, customer_key).await?;
let ip = input_path.to_owned();
let op = output_path.to_owned();
tokio::task::spawn_blocking(move || {
decrypt_stream_chunked_range(
&ip,
&op,
&data_key,
&nonce,
chunk_size,
plaintext_size,
plain_start,
plain_end_inclusive,
)
})
.await
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
}
}
#[cfg(test)]
@@ -356,12 +457,26 @@ mod tests {
nonce: "dGVzdG5vbmNlMTI=".to_string(),
encrypted_data_key: Some("c29tZWtleQ==".to_string()),
kms_key_id: None,
chunk_size: Some(65_536),
plaintext_size: Some(1_234_567),
};
let map = meta.to_metadata_map();
let restored = EncryptionMetadata::from_metadata(&map).unwrap();
assert_eq!(restored.algorithm, "AES256");
assert_eq!(restored.nonce, meta.nonce);
assert_eq!(restored.encrypted_data_key, meta.encrypted_data_key);
assert_eq!(restored.chunk_size, Some(65_536));
assert_eq!(restored.plaintext_size, Some(1_234_567));
}
#[test]
fn test_encryption_metadata_legacy_missing_sizes() {
let mut map = HashMap::new();
map.insert("x-amz-server-side-encryption".to_string(), "AES256".into());
map.insert("x-amz-encryption-nonce".to_string(), "aGVsbG8=".into());
let restored = EncryptionMetadata::from_metadata(&map).unwrap();
assert_eq!(restored.chunk_size, None);
assert_eq!(restored.plaintext_size, None);
}
#[test]

View File

@@ -10,6 +10,7 @@ myfsio-crypto = { path = "../myfsio-crypto" }
myfsio-storage = { path = "../myfsio-storage" }
myfsio-xml = { path = "../myfsio-xml" }
base64 = { workspace = true }
md-5 = { workspace = true }
axum = { workspace = true }
tokio = { workspace = true }
tower = { workspace = true }
@@ -18,17 +19,22 @@ hyper = { workspace = true }
bytes = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_urlencoded = "0.7"
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
tokio-util = { workspace = true }
tokio-stream = { workspace = true }
chrono = { workspace = true }
uuid = { workspace = true }
futures = { workspace = true }
http-body = "1"
http-body-util = "0.1"
percent-encoding = { workspace = true }
quick-xml = { workspace = true }
mime_guess = "2"
crc32fast = { workspace = true }
sha2 = { workspace = true }
hex = { workspace = true }
duckdb = { workspace = true }
roxmltree = "0.20"
parking_lot = { workspace = true }
@@ -42,6 +48,7 @@ aws-smithy-types = { workspace = true }
async-trait = { workspace = true }
rand = "0.8"
tera = { workspace = true }
rust-embed = { version = "8", features = ["debug-embed", "include-exclude", "interpolate-folder-path"] }
cookie = { workspace = true }
subtle = { workspace = true }
clap = { workspace = true }

View File

@@ -0,0 +1,643 @@
use std::net::SocketAddr;
use std::path::PathBuf;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct RateLimitSetting {
pub max_requests: u32,
pub window_seconds: u64,
}
impl RateLimitSetting {
pub const fn new(max_requests: u32, window_seconds: u64) -> Self {
Self {
max_requests,
window_seconds,
}
}
}
#[derive(Debug, Clone)]
pub struct ServerConfig {
pub bind_addr: SocketAddr,
pub ui_bind_addr: SocketAddr,
pub storage_root: PathBuf,
pub region: String,
pub iam_config_path: PathBuf,
pub sigv4_timestamp_tolerance_secs: u64,
pub presigned_url_min_expiry: u64,
pub presigned_url_max_expiry: u64,
pub secret_key: Option<String>,
pub encryption_enabled: bool,
pub encryption_chunk_size_bytes: usize,
pub kms_enabled: bool,
pub kms_generate_data_key_min_bytes: usize,
pub kms_generate_data_key_max_bytes: usize,
pub gc_enabled: bool,
pub gc_interval_hours: f64,
pub gc_temp_file_max_age_hours: f64,
pub gc_multipart_max_age_days: u64,
pub gc_lock_file_max_age_hours: f64,
pub gc_dry_run: bool,
pub integrity_enabled: bool,
pub integrity_interval_hours: f64,
pub integrity_batch_size: usize,
pub integrity_auto_heal: bool,
pub integrity_dry_run: bool,
pub integrity_heal_concurrency: usize,
pub integrity_quarantine_retention_days: u64,
pub metrics_enabled: bool,
pub metrics_history_enabled: bool,
pub metrics_interval_minutes: u64,
pub metrics_retention_hours: u64,
pub metrics_history_interval_minutes: u64,
pub metrics_history_retention_hours: u64,
pub lifecycle_enabled: bool,
pub lifecycle_max_history_per_bucket: usize,
pub website_hosting_enabled: bool,
pub object_key_max_length_bytes: usize,
pub object_tag_limit: usize,
pub object_cache_max_size: usize,
pub bucket_config_cache_ttl_seconds: f64,
pub replication_connect_timeout_secs: u64,
pub replication_read_timeout_secs: u64,
pub replication_max_retries: u32,
pub replication_streaming_threshold_bytes: u64,
pub replication_max_failures_per_bucket: usize,
pub site_sync_enabled: bool,
pub site_sync_interval_secs: u64,
pub site_sync_batch_size: usize,
pub site_sync_connect_timeout_secs: u64,
pub site_sync_read_timeout_secs: u64,
pub site_sync_max_retries: u32,
pub site_sync_clock_skew_tolerance: f64,
pub site_id: Option<String>,
pub site_endpoint: Option<String>,
pub site_region: String,
pub site_priority: i32,
pub api_base_url: String,
pub num_trusted_proxies: usize,
pub allowed_redirect_hosts: Vec<String>,
pub allow_internal_endpoints: bool,
pub cors_origins: Vec<String>,
pub cors_methods: Vec<String>,
pub cors_allow_headers: Vec<String>,
pub cors_expose_headers: Vec<String>,
pub session_lifetime_days: u64,
pub log_level: String,
pub multipart_min_part_size: u64,
pub bulk_delete_max_keys: usize,
pub stream_chunk_size: usize,
pub request_body_timeout_secs: u64,
pub ratelimit_default: RateLimitSetting,
pub ratelimit_list_buckets: RateLimitSetting,
pub ratelimit_bucket_ops: RateLimitSetting,
pub ratelimit_object_ops: RateLimitSetting,
pub ratelimit_head_ops: RateLimitSetting,
pub ratelimit_admin: RateLimitSetting,
pub ratelimit_storage_uri: String,
pub ui_enabled: bool,
pub templates_dir: PathBuf,
pub static_dir: PathBuf,
}
impl ServerConfig {
pub fn from_env() -> Self {
let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
let port: u16 = std::env::var("PORT")
.unwrap_or_else(|_| "5000".to_string())
.parse()
.unwrap_or(5000);
let host_ip: std::net::IpAddr = host.parse().unwrap();
let bind_addr = SocketAddr::new(host_ip, port);
let ui_port: u16 = std::env::var("UI_PORT")
.unwrap_or_else(|_| "5100".to_string())
.parse()
.unwrap_or(5100);
let storage_root = std::env::var("STORAGE_ROOT").unwrap_or_else(|_| "./data".to_string());
let region = std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string());
let storage_path = PathBuf::from(&storage_root);
let iam_config_path = std::env::var("IAM_CONFIG")
.map(PathBuf::from)
.unwrap_or_else(|_| {
storage_path
.join(".myfsio.sys")
.join("config")
.join("iam.json")
});
let sigv4_timestamp_tolerance_secs: u64 =
std::env::var("SIGV4_TIMESTAMP_TOLERANCE_SECONDS")
.unwrap_or_else(|_| "900".to_string())
.parse()
.unwrap_or(900);
let presigned_url_min_expiry: u64 = std::env::var("PRESIGNED_URL_MIN_EXPIRY_SECONDS")
.unwrap_or_else(|_| "1".to_string())
.parse()
.unwrap_or(1);
let presigned_url_max_expiry: u64 = std::env::var("PRESIGNED_URL_MAX_EXPIRY_SECONDS")
.unwrap_or_else(|_| "604800".to_string())
.parse()
.unwrap_or(604800);
let secret_key = {
let env_key = std::env::var("SECRET_KEY").ok();
match env_key {
Some(k) if !k.is_empty() && k != "dev-secret-key" => Some(k),
_ => {
let secret_file = storage_path
.join(".myfsio.sys")
.join("config")
.join(".secret");
std::fs::read_to_string(&secret_file)
.ok()
.map(|s| s.trim().to_string())
}
}
};
let encryption_enabled = parse_bool_env("ENCRYPTION_ENABLED", false);
let encryption_chunk_size_bytes = parse_usize_env("ENCRYPTION_CHUNK_SIZE_BYTES", 65_536);
let kms_enabled = parse_bool_env("KMS_ENABLED", false);
let kms_generate_data_key_min_bytes = parse_usize_env("KMS_GENERATE_DATA_KEY_MIN_BYTES", 1);
let kms_generate_data_key_max_bytes =
parse_usize_env("KMS_GENERATE_DATA_KEY_MAX_BYTES", 1024);
let gc_enabled = parse_bool_env("GC_ENABLED", false);
let gc_interval_hours = parse_f64_env("GC_INTERVAL_HOURS", 6.0);
let gc_temp_file_max_age_hours = parse_f64_env("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0);
let gc_multipart_max_age_days = parse_u64_env("GC_MULTIPART_MAX_AGE_DAYS", 7);
let gc_lock_file_max_age_hours = parse_f64_env("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0);
let gc_dry_run = parse_bool_env("GC_DRY_RUN", false);
let integrity_enabled = parse_bool_env("INTEGRITY_ENABLED", false);
let integrity_interval_hours = parse_f64_env("INTEGRITY_INTERVAL_HOURS", 24.0);
let integrity_batch_size = parse_usize_env("INTEGRITY_BATCH_SIZE", 10_000);
let integrity_auto_heal = parse_bool_env("INTEGRITY_AUTO_HEAL", false);
let integrity_dry_run = parse_bool_env("INTEGRITY_DRY_RUN", false);
let integrity_heal_concurrency = parse_usize_env("INTEGRITY_HEAL_CONCURRENCY", 4);
let integrity_quarantine_retention_days =
parse_u64_env("INTEGRITY_QUARANTINE_RETENTION_DAYS", 7);
let metrics_enabled = parse_bool_env("OPERATION_METRICS_ENABLED", false);
let metrics_history_enabled = parse_bool_env("METRICS_HISTORY_ENABLED", false);
let metrics_interval_minutes = parse_u64_env("OPERATION_METRICS_INTERVAL_MINUTES", 5);
let metrics_retention_hours = parse_u64_env("OPERATION_METRICS_RETENTION_HOURS", 24);
let metrics_history_interval_minutes = parse_u64_env("METRICS_HISTORY_INTERVAL_MINUTES", 5);
let metrics_history_retention_hours = parse_u64_env("METRICS_HISTORY_RETENTION_HOURS", 24);
let lifecycle_enabled = parse_bool_env("LIFECYCLE_ENABLED", false);
let lifecycle_max_history_per_bucket =
parse_usize_env("LIFECYCLE_MAX_HISTORY_PER_BUCKET", 50);
let website_hosting_enabled = parse_bool_env("WEBSITE_HOSTING_ENABLED", false);
let object_key_max_length_bytes = parse_usize_env("OBJECT_KEY_MAX_LENGTH_BYTES", 1024);
let object_tag_limit = parse_usize_env("OBJECT_TAG_LIMIT", 50);
let object_cache_max_size = parse_usize_env("OBJECT_CACHE_MAX_SIZE", 100);
let bucket_config_cache_ttl_seconds =
parse_f64_env("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0);
let replication_connect_timeout_secs =
parse_u64_env("REPLICATION_CONNECT_TIMEOUT_SECONDS", 5);
let replication_read_timeout_secs = parse_u64_env("REPLICATION_READ_TIMEOUT_SECONDS", 30);
let replication_max_retries = parse_u64_env("REPLICATION_MAX_RETRIES", 2) as u32;
let replication_streaming_threshold_bytes =
parse_u64_env("REPLICATION_STREAMING_THRESHOLD_BYTES", 10_485_760);
let replication_max_failures_per_bucket =
parse_u64_env("REPLICATION_MAX_FAILURES_PER_BUCKET", 50) as usize;
let site_sync_enabled = parse_bool_env("SITE_SYNC_ENABLED", false);
let site_sync_interval_secs = parse_u64_env("SITE_SYNC_INTERVAL_SECONDS", 60);
let site_sync_batch_size = parse_u64_env("SITE_SYNC_BATCH_SIZE", 100) as usize;
let site_sync_connect_timeout_secs = parse_u64_env("SITE_SYNC_CONNECT_TIMEOUT_SECONDS", 10);
let site_sync_read_timeout_secs = parse_u64_env("SITE_SYNC_READ_TIMEOUT_SECONDS", 120);
let site_sync_max_retries = parse_u64_env("SITE_SYNC_MAX_RETRIES", 2) as u32;
let site_sync_clock_skew_tolerance: f64 =
std::env::var("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(1.0);
let site_id = parse_optional_string_env("SITE_ID");
let site_endpoint = parse_optional_string_env("SITE_ENDPOINT");
let site_region = std::env::var("SITE_REGION").unwrap_or_else(|_| region.clone());
let site_priority = parse_i32_env("SITE_PRIORITY", 100);
let api_base_url = std::env::var("API_BASE_URL")
.unwrap_or_else(|_| format!("http://{}", bind_addr))
.trim_end_matches('/')
.to_string();
let num_trusted_proxies = parse_usize_env("NUM_TRUSTED_PROXIES", 0);
let allowed_redirect_hosts = parse_list_env("ALLOWED_REDIRECT_HOSTS", "");
let allow_internal_endpoints = parse_bool_env("ALLOW_INTERNAL_ENDPOINTS", false);
let cors_origins = parse_list_env("CORS_ORIGINS", "*");
let cors_methods = parse_list_env("CORS_METHODS", "GET,PUT,POST,DELETE,OPTIONS,HEAD");
let cors_allow_headers = parse_list_env("CORS_ALLOW_HEADERS", "*");
let cors_expose_headers = parse_list_env("CORS_EXPOSE_HEADERS", "*");
let session_lifetime_days = parse_u64_env("SESSION_LIFETIME_DAYS", 1);
let log_level = std::env::var("LOG_LEVEL").unwrap_or_else(|_| "INFO".to_string());
let multipart_min_part_size = parse_u64_env("MULTIPART_MIN_PART_SIZE", 5_242_880);
let bulk_delete_max_keys = parse_usize_env("BULK_DELETE_MAX_KEYS", 1000);
let stream_chunk_size = parse_usize_env("STREAM_CHUNK_SIZE", 1_048_576);
let request_body_timeout_secs = parse_u64_env("REQUEST_BODY_TIMEOUT_SECONDS", 60);
let ratelimit_default =
parse_rate_limit_env("RATE_LIMIT_DEFAULT", RateLimitSetting::new(5000, 60));
let ratelimit_list_buckets =
parse_rate_limit_env("RATE_LIMIT_LIST_BUCKETS", ratelimit_default);
let ratelimit_bucket_ops = parse_rate_limit_env("RATE_LIMIT_BUCKET_OPS", ratelimit_default);
let ratelimit_object_ops = parse_rate_limit_env("RATE_LIMIT_OBJECT_OPS", ratelimit_default);
let ratelimit_head_ops = parse_rate_limit_env("RATE_LIMIT_HEAD_OPS", ratelimit_default);
let ratelimit_admin =
parse_rate_limit_env("RATE_LIMIT_ADMIN", RateLimitSetting::new(60, 60));
let ratelimit_storage_uri =
std::env::var("RATE_LIMIT_STORAGE_URI").unwrap_or_else(|_| "memory://".to_string());
let ui_enabled = parse_bool_env("UI_ENABLED", true);
let templates_dir = std::env::var("TEMPLATES_DIR")
.map(PathBuf::from)
.unwrap_or_else(|_| default_templates_dir());
let static_dir = std::env::var("STATIC_DIR")
.map(PathBuf::from)
.unwrap_or_else(|_| default_static_dir());
Self {
bind_addr,
ui_bind_addr: SocketAddr::new(host_ip, ui_port),
storage_root: storage_path,
region,
iam_config_path,
sigv4_timestamp_tolerance_secs,
presigned_url_min_expiry,
presigned_url_max_expiry,
secret_key,
encryption_enabled,
encryption_chunk_size_bytes,
kms_enabled,
kms_generate_data_key_min_bytes,
kms_generate_data_key_max_bytes,
gc_enabled,
gc_interval_hours,
gc_temp_file_max_age_hours,
gc_multipart_max_age_days,
gc_lock_file_max_age_hours,
gc_dry_run,
integrity_enabled,
integrity_interval_hours,
integrity_batch_size,
integrity_auto_heal,
integrity_dry_run,
integrity_heal_concurrency,
integrity_quarantine_retention_days,
metrics_enabled,
metrics_history_enabled,
metrics_interval_minutes,
metrics_retention_hours,
metrics_history_interval_minutes,
metrics_history_retention_hours,
lifecycle_enabled,
lifecycle_max_history_per_bucket,
website_hosting_enabled,
object_key_max_length_bytes,
object_tag_limit,
object_cache_max_size,
bucket_config_cache_ttl_seconds,
replication_connect_timeout_secs,
replication_read_timeout_secs,
replication_max_retries,
replication_streaming_threshold_bytes,
replication_max_failures_per_bucket,
site_sync_enabled,
site_sync_interval_secs,
site_sync_batch_size,
site_sync_connect_timeout_secs,
site_sync_read_timeout_secs,
site_sync_max_retries,
site_sync_clock_skew_tolerance,
site_id,
site_endpoint,
site_region,
site_priority,
api_base_url,
num_trusted_proxies,
allowed_redirect_hosts,
allow_internal_endpoints,
cors_origins,
cors_methods,
cors_allow_headers,
cors_expose_headers,
session_lifetime_days,
log_level,
multipart_min_part_size,
bulk_delete_max_keys,
stream_chunk_size,
request_body_timeout_secs,
ratelimit_default,
ratelimit_list_buckets,
ratelimit_bucket_ops,
ratelimit_object_ops,
ratelimit_head_ops,
ratelimit_admin,
ratelimit_storage_uri,
ui_enabled,
templates_dir,
static_dir,
}
}
}
impl Default for ServerConfig {
fn default() -> Self {
Self {
bind_addr: "127.0.0.1:5000".parse().unwrap(),
ui_bind_addr: "127.0.0.1:5100".parse().unwrap(),
storage_root: PathBuf::from("./data"),
region: "us-east-1".to_string(),
iam_config_path: PathBuf::from("./data/.myfsio.sys/config/iam.json"),
sigv4_timestamp_tolerance_secs: 900,
presigned_url_min_expiry: 1,
presigned_url_max_expiry: 604_800,
secret_key: None,
encryption_enabled: false,
encryption_chunk_size_bytes: 65_536,
kms_enabled: false,
kms_generate_data_key_min_bytes: 1,
kms_generate_data_key_max_bytes: 1024,
gc_enabled: false,
gc_interval_hours: 6.0,
gc_temp_file_max_age_hours: 24.0,
gc_multipart_max_age_days: 7,
gc_lock_file_max_age_hours: 1.0,
gc_dry_run: false,
integrity_enabled: false,
integrity_interval_hours: 24.0,
integrity_batch_size: 10_000,
integrity_auto_heal: false,
integrity_dry_run: false,
integrity_heal_concurrency: 4,
integrity_quarantine_retention_days: 7,
metrics_enabled: false,
metrics_history_enabled: false,
metrics_interval_minutes: 5,
metrics_retention_hours: 24,
metrics_history_interval_minutes: 5,
metrics_history_retention_hours: 24,
lifecycle_enabled: false,
lifecycle_max_history_per_bucket: 50,
website_hosting_enabled: false,
object_key_max_length_bytes: 1024,
object_tag_limit: 50,
object_cache_max_size: 100,
bucket_config_cache_ttl_seconds: 30.0,
replication_connect_timeout_secs: 5,
replication_read_timeout_secs: 30,
replication_max_retries: 2,
replication_streaming_threshold_bytes: 10_485_760,
replication_max_failures_per_bucket: 50,
site_sync_enabled: false,
site_sync_interval_secs: 60,
site_sync_batch_size: 100,
site_sync_connect_timeout_secs: 10,
site_sync_read_timeout_secs: 120,
site_sync_max_retries: 2,
site_sync_clock_skew_tolerance: 1.0,
site_id: None,
site_endpoint: None,
site_region: "us-east-1".to_string(),
site_priority: 100,
api_base_url: "http://127.0.0.1:5000".to_string(),
num_trusted_proxies: 0,
allowed_redirect_hosts: Vec::new(),
allow_internal_endpoints: false,
cors_origins: vec!["*".to_string()],
cors_methods: vec![
"GET".to_string(),
"PUT".to_string(),
"POST".to_string(),
"DELETE".to_string(),
"OPTIONS".to_string(),
"HEAD".to_string(),
],
cors_allow_headers: vec!["*".to_string()],
cors_expose_headers: vec!["*".to_string()],
session_lifetime_days: 1,
log_level: "INFO".to_string(),
multipart_min_part_size: 5_242_880,
bulk_delete_max_keys: 1000,
stream_chunk_size: 1_048_576,
request_body_timeout_secs: 60,
ratelimit_default: RateLimitSetting::new(5000, 60),
ratelimit_list_buckets: RateLimitSetting::new(5000, 60),
ratelimit_bucket_ops: RateLimitSetting::new(5000, 60),
ratelimit_object_ops: RateLimitSetting::new(5000, 60),
ratelimit_head_ops: RateLimitSetting::new(5000, 60),
ratelimit_admin: RateLimitSetting::new(60, 60),
ratelimit_storage_uri: "memory://".to_string(),
ui_enabled: true,
templates_dir: default_templates_dir(),
static_dir: default_static_dir(),
}
}
}
fn default_templates_dir() -> PathBuf {
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
manifest_dir.join("templates")
}
fn default_static_dir() -> PathBuf {
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
for candidate in [
manifest_dir.join("static"),
manifest_dir.join("..").join("..").join("..").join("static"),
] {
if candidate.exists() {
return candidate;
}
}
manifest_dir.join("static")
}
fn parse_u64_env(key: &str, default: u64) -> u64 {
std::env::var(key)
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(default)
}
fn parse_usize_env(key: &str, default: usize) -> usize {
std::env::var(key)
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(default)
}
fn parse_i32_env(key: &str, default: i32) -> i32 {
std::env::var(key)
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(default)
}
fn parse_f64_env(key: &str, default: f64) -> f64 {
std::env::var(key)
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(default)
}
fn parse_bool_env(key: &str, default: bool) -> bool {
std::env::var(key)
.ok()
.map(|value| {
matches!(
value.trim().to_ascii_lowercase().as_str(),
"1" | "true" | "yes" | "on"
)
})
.unwrap_or(default)
}
fn parse_optional_string_env(key: &str) -> Option<String> {
std::env::var(key)
.ok()
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty())
}
fn parse_list_env(key: &str, default: &str) -> Vec<String> {
std::env::var(key)
.unwrap_or_else(|_| default.to_string())
.split(',')
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty())
.collect()
}
pub fn parse_rate_limit(value: &str) -> Option<RateLimitSetting> {
let trimmed = value.trim();
if let Some((requests, window)) = trimmed.split_once('/') {
let max_requests = requests.trim().parse::<u32>().ok()?;
if max_requests == 0 {
return None;
}
let window_str = window.trim().to_ascii_lowercase();
let window_seconds = if let Ok(n) = window_str.parse::<u64>() {
if n == 0 {
return None;
}
n
} else {
match window_str.as_str() {
"s" | "sec" | "second" | "seconds" => 1,
"m" | "min" | "minute" | "minutes" => 60,
"h" | "hr" | "hour" | "hours" => 3600,
"d" | "day" | "days" => 86_400,
_ => return None,
}
};
return Some(RateLimitSetting::new(max_requests, window_seconds));
}
let parts = trimmed.split_whitespace().collect::<Vec<_>>();
if parts.len() != 3 || !parts[1].eq_ignore_ascii_case("per") {
return None;
}
let max_requests = parts[0].parse::<u32>().ok()?;
if max_requests == 0 {
return None;
}
let window_seconds = match parts[2].to_ascii_lowercase().as_str() {
"second" | "seconds" => 1,
"minute" | "minutes" => 60,
"hour" | "hours" => 3600,
"day" | "days" => 86_400,
_ => return None,
};
Some(RateLimitSetting::new(max_requests, window_seconds))
}
fn parse_rate_limit_env(key: &str, default: RateLimitSetting) -> RateLimitSetting {
std::env::var(key)
.ok()
.and_then(|value| parse_rate_limit(&value))
.unwrap_or(default)
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Mutex, OnceLock};
fn env_lock() -> &'static Mutex<()> {
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
LOCK.get_or_init(|| Mutex::new(()))
}
#[test]
fn parses_rate_limit_text() {
assert_eq!(
parse_rate_limit("200 per minute"),
Some(RateLimitSetting::new(200, 60))
);
assert_eq!(
parse_rate_limit("3 per hours"),
Some(RateLimitSetting::new(3, 3600))
);
assert_eq!(
parse_rate_limit("50000/60"),
Some(RateLimitSetting::new(50000, 60))
);
assert_eq!(
parse_rate_limit("100/minute"),
Some(RateLimitSetting::new(100, 60))
);
assert_eq!(parse_rate_limit("0/60"), None);
assert_eq!(parse_rate_limit("0 per minute"), None);
assert_eq!(parse_rate_limit("bad"), None);
}
#[test]
fn env_defaults_and_invalid_values_fall_back() {
let _guard = env_lock().lock().unwrap();
std::env::remove_var("OBJECT_KEY_MAX_LENGTH_BYTES");
std::env::set_var("OBJECT_TAG_LIMIT", "not-a-number");
std::env::set_var("RATE_LIMIT_DEFAULT", "invalid");
let config = ServerConfig::from_env();
assert_eq!(config.object_key_max_length_bytes, 1024);
assert_eq!(config.object_tag_limit, 50);
assert_eq!(config.ratelimit_default, RateLimitSetting::new(5000, 60));
std::env::remove_var("OBJECT_TAG_LIMIT");
std::env::remove_var("RATE_LIMIT_DEFAULT");
}
#[test]
fn env_overrides_new_values() {
let _guard = env_lock().lock().unwrap();
std::env::set_var("OBJECT_KEY_MAX_LENGTH_BYTES", "2048");
std::env::set_var("GC_DRY_RUN", "true");
std::env::set_var("RATE_LIMIT_ADMIN", "7 per second");
std::env::set_var("HOST", "127.0.0.1");
std::env::set_var("PORT", "5501");
std::env::remove_var("API_BASE_URL");
let config = ServerConfig::from_env();
assert_eq!(config.object_key_max_length_bytes, 2048);
assert!(config.gc_dry_run);
assert_eq!(config.ratelimit_admin, RateLimitSetting::new(7, 1));
assert_eq!(config.api_base_url, "http://127.0.0.1:5501");
std::env::remove_var("OBJECT_KEY_MAX_LENGTH_BYTES");
std::env::remove_var("GC_DRY_RUN");
std::env::remove_var("RATE_LIMIT_ADMIN");
std::env::remove_var("HOST");
std::env::remove_var("PORT");
}
}

View File

@@ -0,0 +1,25 @@
use rust_embed::{EmbeddedFile, RustEmbed};
#[derive(RustEmbed)]
#[folder = "$CARGO_MANIFEST_DIR/templates"]
#[include = "*.html"]
pub struct EmbeddedTemplates;
#[derive(RustEmbed)]
#[folder = "$CARGO_MANIFEST_DIR/static"]
pub struct EmbeddedStatic;
pub fn template_names() -> Vec<String> {
EmbeddedTemplates::iter()
.map(|c: std::borrow::Cow<'static, str>| c.into_owned())
.collect()
}
pub fn template_contents(name: &str) -> Option<String> {
let file = EmbeddedTemplates::get(name)?;
String::from_utf8(file.data.into_owned()).ok()
}
pub fn static_file(path: &str) -> Option<EmbeddedFile> {
EmbeddedStatic::get(path)
}

View File

@@ -46,6 +46,17 @@ fn require_admin(principal: &Principal) -> Option<Response> {
None
}
fn require_iam_action(state: &AppState, principal: &Principal, action: &str) -> Option<Response> {
if !state.iam.authorize(principal, None, action, None) {
return Some(json_error(
"AccessDenied",
&format!("Requires {} permission", action),
StatusCode::FORBIDDEN,
));
}
None
}
async fn read_json_body(body: Body) -> Option<serde_json::Value> {
let bytes = http_body_util::BodyExt::collect(body)
.await
@@ -334,6 +345,12 @@ pub async fn register_peer_site(
.get("connection_id")
.and_then(|v| v.as_str())
.map(|s| s.to_string()),
peer_inbound_access_key: payload
.get("peer_inbound_access_key")
.and_then(|v| v.as_str())
.map(str::trim)
.filter(|s| !s.is_empty())
.map(|s| s.to_string()),
created_at: Some(chrono::Utc::now().to_rfc3339()),
is_healthy: false,
last_health_check: None,
@@ -456,6 +473,16 @@ pub async fn update_peer_site(
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.or(existing.connection_id),
peer_inbound_access_key: if payload.get("peer_inbound_access_key").is_some() {
payload
.get("peer_inbound_access_key")
.and_then(|v| v.as_str())
.map(str::trim)
.filter(|s| !s.is_empty())
.map(|s| s.to_string())
} else {
existing.peer_inbound_access_key
},
created_at: existing.created_at,
is_healthy: existing.is_healthy,
last_health_check: existing.last_health_check,
@@ -926,7 +953,7 @@ pub async fn iam_list_users(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin(&principal) {
if let Some(err) = require_iam_action(&state, &principal, "iam:list_users") {
return err;
}
let users = state.iam.list_users().await;
@@ -938,7 +965,7 @@ pub async fn iam_get_user(
Extension(principal): Extension<Principal>,
Path(identifier): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) {
if let Some(err) = require_iam_action(&state, &principal, "iam:get_user") {
return err;
}
match state.iam.get_user(&identifier).await {
@@ -956,7 +983,7 @@ pub async fn iam_get_user_policies(
Extension(principal): Extension<Principal>,
Path(identifier): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) {
if let Some(err) = require_iam_action(&state, &principal, "iam:get_policy") {
return err;
}
match state.iam.get_user_policies(&identifier) {
@@ -974,7 +1001,7 @@ pub async fn iam_create_access_key(
Extension(principal): Extension<Principal>,
Path(identifier): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) {
if let Some(err) = require_iam_action(&state, &principal, "iam:create_key") {
return err;
}
match state.iam.create_access_key(&identifier) {
@@ -988,7 +1015,7 @@ pub async fn iam_delete_access_key(
Extension(principal): Extension<Principal>,
Path((_identifier, access_key)): Path<(String, String)>,
) -> Response {
if let Some(err) = require_admin(&principal) {
if let Some(err) = require_iam_action(&state, &principal, "iam:delete_key") {
return err;
}
match state.iam.delete_access_key(&access_key) {
@@ -1002,7 +1029,7 @@ pub async fn iam_disable_user(
Extension(principal): Extension<Principal>,
Path(identifier): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) {
if let Some(err) = require_iam_action(&state, &principal, "iam:disable_user") {
return err;
}
match state.iam.set_user_enabled(&identifier, false).await {
@@ -1016,7 +1043,7 @@ pub async fn iam_enable_user(
Extension(principal): Extension<Principal>,
Path(identifier): Path<String>,
) -> Response {
if let Some(err) = require_admin(&principal) {
if let Some(err) = require_iam_action(&state, &principal, "iam:disable_user") {
return err;
}
match state.iam.set_user_enabled(&identifier, true).await {
@@ -1401,3 +1428,166 @@ pub async fn integrity_history(
None => json_response(StatusCode::OK, serde_json::json!({"executions": []})),
}
}
fn require_admin_or_registered_peer(state: &AppState, principal: &Principal) -> Option<Response> {
if principal.is_admin {
return None;
}
let registry = match &state.site_registry {
Some(r) => r,
None => {
return Some(json_error(
"AccessDenied",
"Admin access required",
StatusCode::FORBIDDEN,
))
}
};
for peer in registry.list_peers() {
if peer.peer_inbound_access_key.as_deref() == Some(principal.access_key.as_str()) {
return None;
}
}
Some(json_error(
"AccessDenied",
"Admin or registered peer required",
StatusCode::FORBIDDEN,
))
}
pub async fn build_cluster_overview_public(state: &AppState) -> serde_json::Value {
build_cluster_overview(state).await
}
async fn build_cluster_overview(state: &AppState) -> serde_json::Value {
let local_site = state
.site_registry
.as_ref()
.and_then(|r| r.get_local_site());
let buckets = state.storage.list_buckets().await.unwrap_or_default();
let bucket_count = buckets.len() as u64;
let mut total_objects: u64 = 0;
let mut size_bytes: u64 = 0;
for b in &buckets {
if let Ok(stats) = state.storage.bucket_stats(&b.name).await {
total_objects += stats.total_objects();
size_bytes += stats.total_bytes();
}
}
let (disk_total, disk_free) =
crate::services::system_metrics::sample_disk(&state.config.storage_root);
let system = match state.system_metrics.as_ref() {
Some(svc) => {
let history = svc.get_history(Some(1)).await;
history
.last()
.map(|s| {
serde_json::json!({
"cpu_percent": s.cpu_percent,
"memory_percent": s.memory_percent,
"disk_percent": s.disk_percent,
"storage_bytes": s.storage_bytes,
})
})
.unwrap_or_else(|| serde_json::json!({}))
}
None => serde_json::json!({}),
};
let sync_snapshot = state
.site_sync
.as_ref()
.map(|w| w.snapshot_stats())
.unwrap_or_default();
let mut sync_errors: u64 = 0;
let mut last_sync_at: Option<f64> = None;
for s in sync_snapshot.values() {
sync_errors += s.errors;
if let Some(ts) = s.last_sync_at {
last_sync_at = match last_sync_at {
Some(prev) if prev > ts => Some(prev),
_ => Some(ts),
};
}
}
let now = chrono::Utc::now().timestamp_millis() as f64 / 1000.0;
serde_json::json!({
"site_id": local_site.as_ref().map(|s| s.site_id.clone()),
"display_name": local_site.as_ref().map(|s| s.display_name.clone()),
"endpoint": local_site.as_ref().map(|s| s.endpoint.clone()),
"region": local_site.as_ref().map(|s| s.region.clone()),
"priority": local_site.as_ref().map(|s| s.priority),
"capacity": {
"total_bytes": disk_total,
"available_bytes": disk_free,
},
"buckets": bucket_count,
"objects": total_objects,
"size_bytes": size_bytes,
"system": system,
"sync": {
"errors": sync_errors,
"last_sync_at": last_sync_at,
},
"generated_at": now,
})
}
pub async fn get_cluster_overview(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin_or_registered_peer(&state, &principal) {
return err;
}
{
let guard = state.cluster_overview_cache.lock();
if let Some((at, ref value)) = *guard {
if at.elapsed() < std::time::Duration::from_secs(10) {
return json_response(StatusCode::OK, value.clone());
}
}
}
let value = build_cluster_overview(&state).await;
*state.cluster_overview_cache.lock() =
Some((std::time::Instant::now(), value.clone()));
json_response(StatusCode::OK, value)
}
pub async fn get_sync_stats(
State(state): State<AppState>,
Extension(principal): Extension<Principal>,
) -> Response {
if let Some(err) = require_admin(&principal) {
return err;
}
let snapshot = match state.site_sync.as_ref() {
Some(worker) => worker.snapshot_stats(),
None => Default::default(),
};
let stats: Vec<serde_json::Value> = snapshot
.into_iter()
.map(|(bucket, s)| {
serde_json::json!({
"bucket": bucket,
"last_sync_at": s.last_sync_at,
"objects_pulled": s.objects_pulled,
"objects_skipped": s.objects_skipped,
"conflicts_resolved": s.conflicts_resolved,
"deletions_applied": s.deletions_applied,
"errors": s.errors,
})
})
.collect();
json_response(
StatusCode::OK,
serde_json::json!({
"enabled": state.site_sync.is_some(),
"stats": stats,
}),
)
}

View File

@@ -1,16 +1,32 @@
use axum::body::Body;
use axum::http::StatusCode;
use axum::http::{HeaderMap, StatusCode};
use axum::response::{IntoResponse, Response};
use chrono::{DateTime, Utc};
use myfsio_common::error::{S3Error, S3ErrorCode};
use myfsio_storage::traits::StorageEngine;
use crate::services::acl::{
acl_from_object_metadata, acl_to_xml, create_canned_acl, store_object_acl,
};
use crate::services::notifications::parse_notification_configurations;
use crate::services::object_lock::{
ensure_retention_mutable, get_legal_hold, get_object_retention as retention_from_metadata,
set_legal_hold, set_object_retention as store_retention, ObjectLockRetention, RetentionMode,
};
use crate::state::AppState;
fn xml_response(status: StatusCode, xml: String) -> Response {
(status, [("content-type", "application/xml")], xml).into_response()
}
fn stored_xml(value: &serde_json::Value) -> String {
match value {
serde_json::Value::String(s) => s.clone(),
other => other.to_string(),
}
}
fn storage_err(err: myfsio_storage::error::StorageError) -> Response {
let s3err = S3Error::from(err);
let status =
@@ -32,18 +48,42 @@ fn json_response(status: StatusCode, value: serde_json::Value) -> Response {
.into_response()
}
pub async fn get_versioning(state: &AppState, bucket: &str) -> Response {
match state.storage.is_versioning_enabled(bucket).await {
Ok(enabled) => {
let status_str = if enabled { "Enabled" } else { "Suspended" };
fn custom_xml_error(status: StatusCode, code: &str, message: &str) -> Response {
let xml = format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Status>{}</Status>\
</VersioningConfiguration>",
status_str
<Error><Code>{}</Code><Message>{}</Message><Resource></Resource><RequestId></RequestId></Error>",
xml_escape(code),
xml_escape(message),
);
xml_response(StatusCode::OK, xml)
xml_response(status, xml)
}
pub async fn get_versioning(state: &AppState, bucket: &str) -> Response {
match state.storage.get_versioning_status(bucket).await {
Ok(status) => {
let body = match status {
myfsio_common::types::VersioningStatus::Enabled => {
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Status>Enabled</Status>\
</VersioningConfiguration>"
.to_string()
}
myfsio_common::types::VersioningStatus::Suspended => {
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Status>Suspended</Status>\
</VersioningConfiguration>"
.to_string()
}
myfsio_common::types::VersioningStatus::Disabled => {
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
</VersioningConfiguration>"
.to_string()
}
};
xml_response(StatusCode::OK, body)
}
Err(e) => storage_err(e),
}
@@ -61,9 +101,22 @@ pub async fn put_versioning(state: &AppState, bucket: &str, body: Body) -> Respo
};
let xml_str = String::from_utf8_lossy(&body_bytes);
let enabled = xml_str.contains("<Status>Enabled</Status>");
let status = if xml_str.contains("<Status>Enabled</Status>") {
myfsio_common::types::VersioningStatus::Enabled
} else if xml_str.contains("<Status>Suspended</Status>") {
myfsio_common::types::VersioningStatus::Suspended
} else {
return xml_response(
StatusCode::BAD_REQUEST,
S3Error::new(
S3ErrorCode::MalformedXML,
"VersioningConfiguration Status must be Enabled or Suspended",
)
.to_xml(),
);
};
match state.storage.set_versioning(bucket, enabled).await {
match state.storage.set_versioning_status(bucket, status).await {
Ok(()) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
@@ -132,7 +185,7 @@ pub async fn get_cors(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(cors) = &config.cors {
xml_response(StatusCode::OK, cors.to_string())
xml_response(StatusCode::OK, stored_xml(cors))
} else {
xml_response(
StatusCode::NOT_FOUND,
@@ -195,14 +248,11 @@ pub async fn get_encryption(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(enc) = &config.encryption {
xml_response(StatusCode::OK, enc.to_string())
xml_response(StatusCode::OK, stored_xml(enc))
} else {
xml_response(
StatusCode::NOT_FOUND,
S3Error::new(
S3ErrorCode::InvalidRequest,
"The server side encryption configuration was not found",
)
S3Error::from_code(S3ErrorCode::ServerSideEncryptionConfigurationNotFoundError)
.to_xml(),
)
}
@@ -247,15 +297,11 @@ pub async fn get_lifecycle(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(lc) = &config.lifecycle {
xml_response(StatusCode::OK, lc.to_string())
xml_response(StatusCode::OK, stored_xml(lc))
} else {
xml_response(
StatusCode::NOT_FOUND,
S3Error::new(
S3ErrorCode::NoSuchKey,
"The lifecycle configuration does not exist",
)
.to_xml(),
S3Error::from_code(S3ErrorCode::NoSuchLifecycleConfiguration).to_xml(),
)
}
}
@@ -402,7 +448,7 @@ pub async fn get_policy(state: &AppState, bucket: &str) -> Response {
} else {
xml_response(
StatusCode::NOT_FOUND,
S3Error::new(S3ErrorCode::NoSuchKey, "No bucket policy attached").to_xml(),
S3Error::from_code(S3ErrorCode::NoSuchBucketPolicy).to_xml(),
)
}
}
@@ -478,10 +524,7 @@ pub async fn get_replication(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(replication) = &config.replication {
match replication {
serde_json::Value::String(s) => xml_response(StatusCode::OK, s.clone()),
other => xml_response(StatusCode::OK, other.to_string()),
}
xml_response(StatusCode::OK, stored_xml(replication))
} else {
xml_response(
StatusCode::NOT_FOUND,
@@ -574,7 +617,7 @@ pub async fn get_acl(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(acl) = &config.acl {
xml_response(StatusCode::OK, acl.to_string())
xml_response(StatusCode::OK, stored_xml(acl))
} else {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
@@ -614,7 +657,7 @@ pub async fn get_website(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(ws) = &config.website {
xml_response(StatusCode::OK, ws.to_string())
xml_response(StatusCode::OK, stored_xml(ws))
} else {
xml_response(
StatusCode::NOT_FOUND,
@@ -666,7 +709,7 @@ pub async fn get_object_lock(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(ol) = &config.object_lock {
xml_response(StatusCode::OK, ol.to_string())
xml_response(StatusCode::OK, stored_xml(ol))
} else {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<ObjectLockConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
@@ -683,7 +726,7 @@ pub async fn get_notification(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(n) = &config.notification {
xml_response(StatusCode::OK, n.to_string())
xml_response(StatusCode::OK, stored_xml(n))
} else {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
@@ -847,13 +890,34 @@ pub async fn delete_object_lock(state: &AppState, bucket: &str) -> Response {
pub async fn put_notification(state: &AppState, bucket: &str, body: Body) -> Response {
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(collected) => collected.to_bytes(),
Err(_) => return StatusCode::BAD_REQUEST.into_response(),
Err(_) => {
return custom_xml_error(
StatusCode::BAD_REQUEST,
"MalformedXML",
"Unable to parse XML document",
)
}
};
let raw = String::from_utf8_lossy(&body_bytes).to_string();
let notification = if raw.trim().is_empty() {
None
} else {
match parse_notification_configurations(&raw) {
Ok(_) => Some(serde_json::Value::String(raw)),
Err(message) => {
let code = if message.contains("Destination URL is required") {
"InvalidArgument"
} else {
"MalformedXML"
};
return custom_xml_error(StatusCode::BAD_REQUEST, code, &message);
}
}
};
let value = serde_json::Value::String(String::from_utf8_lossy(&body_bytes).to_string());
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.notification = Some(value);
config.notification = notification;
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
@@ -995,52 +1059,267 @@ pub async fn delete_logging(state: &AppState, bucket: &str) -> Response {
fn s3_error_response(code: S3ErrorCode, message: &str, status: StatusCode) -> Response {
let err = S3Error::new(code, message.to_string());
(status, [("content-type", "application/xml")], err.to_xml()).into_response()
let code_str = code.as_str();
(
status,
[
("content-type", "application/xml"),
("x-amz-error-code", code_str),
],
err.to_xml(),
)
.into_response()
}
pub async fn list_object_versions(state: &AppState, bucket: &str) -> Response {
match state.storage.list_buckets().await {
Ok(buckets) => {
if !buckets.iter().any(|b| b.name == bucket) {
pub async fn list_object_versions(
state: &AppState,
bucket: &str,
prefix: Option<&str>,
delimiter: Option<&str>,
key_marker: Option<&str>,
version_id_marker: Option<&str>,
max_keys: usize,
) -> Response {
match state.storage.bucket_exists(bucket).await {
Ok(true) => {}
Ok(false) => {
return storage_err(myfsio_storage::error::StorageError::BucketNotFound(
bucket.to_string(),
));
}
}
Err(e) => return storage_err(e),
}
let params = myfsio_common::types::ListParams {
max_keys: 1000,
max_keys: usize::MAX,
prefix: prefix.map(ToOwned::to_owned),
..Default::default()
};
let objects = match state.storage.list_objects(bucket, &params).await {
Ok(result) => result.objects,
let object_result = match state.storage.list_objects(bucket, &params).await {
Ok(result) => result,
Err(e) => return storage_err(e),
};
let live_objects = object_result.objects;
let archived_versions = match state
.storage
.list_bucket_object_versions(bucket, prefix)
.await
{
Ok(versions) => versions,
Err(e) => return storage_err(e),
};
#[derive(Clone)]
struct Entry {
key: String,
version_id: String,
last_modified: chrono::DateTime<chrono::Utc>,
etag: Option<String>,
size: u64,
storage_class: String,
is_delete_marker: bool,
}
let mut entries: Vec<Entry> = Vec::with_capacity(live_objects.len() + archived_versions.len());
for obj in &live_objects {
entries.push(Entry {
key: obj.key.clone(),
version_id: obj.version_id.clone().unwrap_or_else(|| "null".to_string()),
last_modified: obj.last_modified,
etag: obj.etag.clone(),
size: obj.size,
storage_class: obj
.storage_class
.clone()
.unwrap_or_else(|| "STANDARD".to_string()),
is_delete_marker: false,
});
}
for version in &archived_versions {
entries.push(Entry {
key: version.key.clone(),
version_id: version.version_id.clone(),
last_modified: version.last_modified,
etag: version.etag.clone(),
size: version.size,
storage_class: "STANDARD".to_string(),
is_delete_marker: version.is_delete_marker,
});
}
entries.sort_by(|a, b| {
a.key
.cmp(&b.key)
.then_with(|| b.last_modified.cmp(&a.last_modified))
.then_with(|| a.version_id.cmp(&b.version_id))
});
let mut latest_marked: std::collections::HashSet<String> = std::collections::HashSet::new();
let mut is_latest_flags: Vec<bool> = Vec::with_capacity(entries.len());
for entry in &entries {
if latest_marked.insert(entry.key.clone()) {
is_latest_flags.push(true);
} else {
is_latest_flags.push(false);
}
}
let km = key_marker.unwrap_or("");
let vim = version_id_marker.unwrap_or("");
let start_index = if km.is_empty() {
0
} else if vim.is_empty() {
entries
.iter()
.position(|e| e.key.as_str() > km)
.unwrap_or(entries.len())
} else if let Some(pos) = entries
.iter()
.position(|e| e.key == km && e.version_id == vim)
{
pos + 1
} else {
entries
.iter()
.position(|e| e.key.as_str() > km)
.unwrap_or(entries.len())
};
let delim = delimiter.unwrap_or("");
let prefix_str = prefix.unwrap_or("");
let mut common_prefixes: Vec<String> = Vec::new();
let mut seen_prefixes: std::collections::HashSet<String> = std::collections::HashSet::new();
let mut rendered = String::new();
let mut count = 0usize;
let mut is_truncated = false;
let mut next_key_marker: Option<String> = None;
let mut next_version_id_marker: Option<String> = None;
let mut last_emitted: Option<(String, String)> = None;
let mut idx = start_index;
while idx < entries.len() {
let entry = &entries[idx];
let is_latest = is_latest_flags[idx];
if !delim.is_empty() {
let rest = entry.key.strip_prefix(prefix_str).unwrap_or(&entry.key);
if let Some(delim_pos) = rest.find(delim) {
let grouped = entry.key[..prefix_str.len() + delim_pos + delim.len()].to_string();
if seen_prefixes.contains(&grouped) {
idx += 1;
continue;
}
if count >= max_keys {
is_truncated = true;
if let Some((k, v)) = last_emitted.clone() {
next_key_marker = Some(k);
next_version_id_marker = Some(v);
}
break;
}
common_prefixes.push(grouped.clone());
seen_prefixes.insert(grouped.clone());
count += 1;
let mut group_last = (entry.key.clone(), entry.version_id.clone());
idx += 1;
while idx < entries.len() && entries[idx].key.starts_with(&grouped) {
group_last = (entries[idx].key.clone(), entries[idx].version_id.clone());
idx += 1;
}
last_emitted = Some(group_last);
continue;
}
}
if count >= max_keys {
is_truncated = true;
if let Some((k, v)) = last_emitted.clone() {
next_key_marker = Some(k);
next_version_id_marker = Some(v);
}
break;
}
let tag = if entry.is_delete_marker {
"DeleteMarker"
} else {
"Version"
};
rendered.push_str(&format!("<{}>", tag));
rendered.push_str(&format!("<Key>{}</Key>", xml_escape(&entry.key)));
rendered.push_str(&format!(
"<VersionId>{}</VersionId>",
xml_escape(&entry.version_id)
));
rendered.push_str(&format!("<IsLatest>{}</IsLatest>", is_latest));
rendered.push_str(&format!(
"<LastModified>{}</LastModified>",
myfsio_xml::response::format_s3_datetime(&entry.last_modified)
));
if !entry.is_delete_marker {
if let Some(ref etag) = entry.etag {
rendered.push_str(&format!("<ETag>\"{}\"</ETag>", xml_escape(etag)));
}
rendered.push_str(&format!("<Size>{}</Size>", entry.size));
rendered.push_str(&format!(
"<StorageClass>{}</StorageClass>",
xml_escape(&entry.storage_class)
));
}
rendered.push_str(&format!("</{}>", tag));
last_emitted = Some((entry.key.clone(), entry.version_id.clone()));
count += 1;
idx += 1;
}
let mut xml = String::from(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<ListVersionsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">",
);
xml.push_str(&format!("<Name>{}</Name>", bucket));
for obj in &objects {
xml.push_str("<Version>");
xml.push_str(&format!("<Key>{}</Key>", obj.key));
xml.push_str("<VersionId>null</VersionId>");
xml.push_str("<IsLatest>true</IsLatest>");
xml.push_str(&format!(
"<LastModified>{}</LastModified>",
myfsio_xml::response::format_s3_datetime(&obj.last_modified)
));
if let Some(ref etag) = obj.etag {
xml.push_str(&format!("<ETag>\"{}\"</ETag>", etag));
xml.push_str(&format!("<Name>{}</Name>", xml_escape(bucket)));
xml.push_str(&format!("<Prefix>{}</Prefix>", xml_escape(prefix_str)));
if !km.is_empty() {
xml.push_str(&format!("<KeyMarker>{}</KeyMarker>", xml_escape(km)));
} else {
xml.push_str("<KeyMarker></KeyMarker>");
}
xml.push_str(&format!("<Size>{}</Size>", obj.size));
xml.push_str("<StorageClass>STANDARD</StorageClass>");
xml.push_str("</Version>");
if !vim.is_empty() {
xml.push_str(&format!(
"<VersionIdMarker>{}</VersionIdMarker>",
xml_escape(vim)
));
} else {
xml.push_str("<VersionIdMarker></VersionIdMarker>");
}
xml.push_str(&format!("<MaxKeys>{}</MaxKeys>", max_keys));
if !delim.is_empty() {
xml.push_str(&format!("<Delimiter>{}</Delimiter>", xml_escape(delim)));
}
xml.push_str(&format!("<IsTruncated>{}</IsTruncated>", is_truncated));
if let Some(ref nk) = next_key_marker {
xml.push_str(&format!(
"<NextKeyMarker>{}</NextKeyMarker>",
xml_escape(nk)
));
}
if let Some(ref nv) = next_version_id_marker {
xml.push_str(&format!(
"<NextVersionIdMarker>{}</NextVersionIdMarker>",
xml_escape(nv)
));
}
xml.push_str(&rendered);
for cp in &common_prefixes {
xml.push_str(&format!(
"<CommonPrefixes><Prefix>{}</Prefix></CommonPrefixes>",
xml_escape(cp)
));
}
xml.push_str("</ListVersionsResult>");
@@ -1080,6 +1359,36 @@ pub async fn put_object_tagging(state: &AppState, bucket: &str, key: &str, body:
let xml_str = String::from_utf8_lossy(&body_bytes);
let tags = parse_tagging_xml(&xml_str);
if tags.len() > state.config.object_tag_limit {
return xml_response(
StatusCode::BAD_REQUEST,
S3Error::new(
S3ErrorCode::InvalidTag,
format!("Maximum {} tags allowed", state.config.object_tag_limit),
)
.to_xml(),
);
}
for tag in &tags {
if tag.key.is_empty() || tag.key.len() > 128 {
return xml_response(
StatusCode::BAD_REQUEST,
S3Error::new(S3ErrorCode::InvalidTag, "Tag key length must be 1-128").to_xml(),
);
}
if tag.value.len() > 256 {
return xml_response(
StatusCode::BAD_REQUEST,
S3Error::new(S3ErrorCode::InvalidTag, "Tag value length must be 0-256").to_xml(),
);
}
if tag.key.contains('=') {
return xml_response(
StatusCode::BAD_REQUEST,
S3Error::new(S3ErrorCode::InvalidTag, "Tag keys must not contain '='").to_xml(),
);
}
}
match state.storage.set_object_tags(bucket, key, &tags).await {
Ok(()) => StatusCode::OK.into_response(),
@@ -1094,40 +1403,68 @@ pub async fn delete_object_tagging(state: &AppState, bucket: &str, key: &str) ->
}
}
pub async fn get_object_acl(state: &AppState, bucket: &str, key: &str) -> Response {
pub async fn put_object_acl(
state: &AppState,
bucket: &str,
key: &str,
headers: &HeaderMap,
_body: Body,
) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Owner><ID>myfsio</ID><DisplayName>myfsio</DisplayName></Owner>\
<AccessControlList>\
<Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
<ID>myfsio</ID><DisplayName>myfsio</DisplayName></Grantee>\
<Permission>FULL_CONTROL</Permission></Grant>\
</AccessControlList></AccessControlPolicy>";
xml_response(StatusCode::OK, xml.to_string())
}
Err(e) => storage_err(e),
let canned_acl = headers
.get("x-amz-acl")
.and_then(|value| value.to_str().ok())
.unwrap_or("private");
let mut metadata = match state.storage.get_object_metadata(bucket, key).await {
Ok(metadata) => metadata,
Err(err) => return storage_err(err),
};
let owner = acl_from_object_metadata(&metadata)
.map(|acl| acl.owner)
.unwrap_or_else(|| "myfsio".to_string());
let acl = create_canned_acl(canned_acl, &owner);
store_object_acl(&mut metadata, &acl);
match state
.storage
.put_object_metadata(bucket, key, &metadata)
.await
{
Ok(()) => StatusCode::OK.into_response(),
Err(err) => storage_err(err),
}
}
pub async fn put_object_acl(state: &AppState, bucket: &str, key: &str, _body: Body) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
pub async fn get_object_retention(state: &AppState, bucket: &str, key: &str) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => xml_response(
Ok(_) => {
let metadata = match state.storage.get_object_metadata(bucket, key).await {
Ok(metadata) => metadata,
Err(err) => return storage_err(err),
};
if let Some(retention) = retention_from_metadata(&metadata) {
let xml = format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<Retention xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Mode>{}</Mode><RetainUntilDate>{}</RetainUntilDate></Retention>",
match retention.mode {
RetentionMode::GOVERNANCE => "GOVERNANCE",
RetentionMode::COMPLIANCE => "COMPLIANCE",
},
retention.retain_until_date.format("%Y-%m-%dT%H:%M:%S.000Z"),
);
xml_response(StatusCode::OK, xml)
} else {
custom_xml_error(
StatusCode::NOT_FOUND,
S3Error::new(
S3ErrorCode::InvalidRequest,
"No retention policy configured",
"NoSuchObjectLockConfiguration",
"No retention policy",
)
.to_xml(),
),
}
}
Err(e) => storage_err(e),
}
}
@@ -1136,21 +1473,116 @@ pub async fn put_object_retention(
state: &AppState,
bucket: &str,
key: &str,
_body: Body,
headers: &HeaderMap,
body: Body,
) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
Ok(_) => {}
Err(e) => return storage_err(e),
}
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(collected) => collected.to_bytes(),
Err(_) => {
return custom_xml_error(
StatusCode::BAD_REQUEST,
"MalformedXML",
"Unable to parse XML document",
)
}
};
let body_str = String::from_utf8_lossy(&body_bytes);
let doc = match roxmltree::Document::parse(&body_str) {
Ok(doc) => doc,
Err(_) => {
return custom_xml_error(
StatusCode::BAD_REQUEST,
"MalformedXML",
"Unable to parse XML document",
)
}
};
let mode = find_xml_text(&doc, "Mode").unwrap_or_default();
let retain_until = find_xml_text(&doc, "RetainUntilDate").unwrap_or_default();
if mode.is_empty() || retain_until.is_empty() {
return custom_xml_error(
StatusCode::BAD_REQUEST,
"InvalidArgument",
"Mode and RetainUntilDate are required",
);
}
let mode = match mode.as_str() {
"GOVERNANCE" => RetentionMode::GOVERNANCE,
"COMPLIANCE" => RetentionMode::COMPLIANCE,
other => {
return custom_xml_error(
StatusCode::BAD_REQUEST,
"InvalidArgument",
&format!("Invalid retention mode: {}", other),
)
}
};
let retain_until_date = match DateTime::parse_from_rfc3339(&retain_until) {
Ok(value) => value.with_timezone(&Utc),
Err(_) => {
return custom_xml_error(
StatusCode::BAD_REQUEST,
"InvalidArgument",
&format!("Invalid date format: {}", retain_until),
)
}
};
let bypass_governance = headers
.get("x-amz-bypass-governance-retention")
.and_then(|value| value.to_str().ok())
.map(|value| value.eq_ignore_ascii_case("true"))
.unwrap_or(false);
let mut metadata = match state.storage.get_object_metadata(bucket, key).await {
Ok(metadata) => metadata,
Err(err) => return storage_err(err),
};
if let Err(message) = ensure_retention_mutable(&metadata, bypass_governance) {
return custom_xml_error(StatusCode::FORBIDDEN, "AccessDenied", &message);
}
if let Err(message) = store_retention(
&mut metadata,
&ObjectLockRetention {
mode,
retain_until_date,
},
) {
return custom_xml_error(StatusCode::BAD_REQUEST, "InvalidArgument", &message);
}
match state
.storage
.put_object_metadata(bucket, key, &metadata)
.await
{
Ok(()) => StatusCode::OK.into_response(),
Err(err) => storage_err(err),
}
}
pub async fn get_object_legal_hold(state: &AppState, bucket: &str, key: &str) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
let metadata = match state.storage.get_object_metadata(bucket, key).await {
Ok(metadata) => metadata,
Err(err) => return storage_err(err),
};
let status = if get_legal_hold(&metadata) {
"ON"
} else {
"OFF"
};
let xml = format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<LegalHold xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Status>OFF</Status></LegalHold>";
xml_response(StatusCode::OK, xml.to_string())
<Status>{}</Status></LegalHold>",
status
);
xml_response(StatusCode::OK, xml)
}
Err(e) => storage_err(e),
}
@@ -1160,14 +1592,84 @@ pub async fn put_object_legal_hold(
state: &AppState,
bucket: &str,
key: &str,
_body: Body,
body: Body,
) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => StatusCode::OK.into_response(),
Ok(_) => {}
Err(e) => return storage_err(e),
}
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(collected) => collected.to_bytes(),
Err(_) => {
return custom_xml_error(
StatusCode::BAD_REQUEST,
"MalformedXML",
"Unable to parse XML document",
)
}
};
let body_str = String::from_utf8_lossy(&body_bytes);
let doc = match roxmltree::Document::parse(&body_str) {
Ok(doc) => doc,
Err(_) => {
return custom_xml_error(
StatusCode::BAD_REQUEST,
"MalformedXML",
"Unable to parse XML document",
)
}
};
let status = find_xml_text(&doc, "Status").unwrap_or_default();
let enabled = match status.as_str() {
"ON" => true,
"OFF" => false,
_ => {
return custom_xml_error(
StatusCode::BAD_REQUEST,
"InvalidArgument",
"Status must be ON or OFF",
)
}
};
let mut metadata = match state.storage.get_object_metadata(bucket, key).await {
Ok(metadata) => metadata,
Err(err) => return storage_err(err),
};
set_legal_hold(&mut metadata, enabled);
match state
.storage
.put_object_metadata(bucket, key, &metadata)
.await
{
Ok(()) => StatusCode::OK.into_response(),
Err(err) => storage_err(err),
}
}
pub async fn get_object_acl(state: &AppState, bucket: &str, key: &str) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => {
let metadata = match state.storage.get_object_metadata(bucket, key).await {
Ok(metadata) => metadata,
Err(err) => return storage_err(err),
};
let acl = acl_from_object_metadata(&metadata)
.unwrap_or_else(|| create_canned_acl("private", "myfsio"));
xml_response(StatusCode::OK, acl_to_xml(&acl))
}
Err(e) => storage_err(e),
}
}
fn find_xml_text(doc: &roxmltree::Document<'_>, name: &str) -> Option<String> {
doc.descendants()
.find(|node| node.is_element() && node.tag_name().name() == name)
.and_then(|node| node.text())
.map(|text| text.trim().to_string())
.filter(|text| !text.is_empty())
}
#[cfg(test)]
mod tests {
use super::{legacy_logging_config, parse_logging_config_xml};

View File

@@ -294,8 +294,17 @@ async fn generate_data_key_inner(state: AppState, body: Body, include_plaintext:
.and_then(|v| v.as_u64())
.unwrap_or(32) as usize;
if !(1..=1024).contains(&num_bytes) {
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
if num_bytes < state.config.kms_generate_data_key_min_bytes
|| num_bytes > state.config.kms_generate_data_key_max_bytes
{
return json_err(
StatusCode::BAD_REQUEST,
&format!(
"NumberOfBytes must be {}-{}",
state.config.kms_generate_data_key_min_bytes,
state.config.kms_generate_data_key_max_bytes
),
);
}
match kms.generate_data_key(key_id, num_bytes).await {
@@ -389,8 +398,17 @@ pub async fn generate_random(State(state): State<AppState>, body: Body) -> Respo
.and_then(|v| v.as_u64())
.unwrap_or(32) as usize;
if !(1..=1024).contains(&num_bytes) {
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
if num_bytes < state.config.kms_generate_data_key_min_bytes
|| num_bytes > state.config.kms_generate_data_key_max_bytes
{
return json_err(
StatusCode::BAD_REQUEST,
&format!(
"NumberOfBytes must be {}-{}",
state.config.kms_generate_data_key_min_bytes,
state.config.kms_generate_data_key_max_bytes
),
);
}
let mut bytes = vec![0u8; num_bytes];

File diff suppressed because it is too large Load Diff

View File

@@ -511,11 +511,20 @@ fn s3_error_response(err: S3Error) -> Response {
} else {
err.resource.clone()
};
let code_str = err.code.as_str();
let body = err
.with_resource(resource)
.with_request_id(uuid::Uuid::new_v4().simple().to_string())
.to_xml();
(status, [("content-type", "application/xml")], body).into_response()
(
status,
[
("content-type", "application/xml"),
("x-amz-error-code", code_str),
],
body,
)
.into_response()
}
fn build_stats_xml(bytes_scanned: usize, bytes_returned: usize) -> String {

View File

@@ -0,0 +1,56 @@
use axum::{
body::Body,
extract::{Path, State},
http::{header, HeaderValue, StatusCode},
response::{IntoResponse, Response},
};
use crate::embedded;
use crate::state::AppState;
pub async fn serve(State(state): State<AppState>, Path(path): Path<String>) -> Response {
let normalized = path.trim_start_matches('/').to_string();
if normalized.is_empty() || normalized.contains("..") {
return StatusCode::NOT_FOUND.into_response();
}
let use_disk = std::env::var("STATIC_DIR").is_ok() && state.config.static_dir.is_dir();
if use_disk {
let candidate = state.config.static_dir.join(&normalized);
if let Ok(canonical) = candidate.canonicalize() {
if canonical.starts_with(
state
.config
.static_dir
.canonicalize()
.unwrap_or_else(|_| state.config.static_dir.clone()),
) {
if let Ok(bytes) = tokio::fs::read(&canonical).await {
let mime = mime_guess::from_path(&canonical).first_or_octet_stream();
return build_response(&normalized, bytes, mime.as_ref());
}
}
}
return StatusCode::NOT_FOUND.into_response();
}
match embedded::static_file(&normalized) {
Some(file) => {
let mime = mime_guess::from_path(&normalized).first_or_octet_stream();
build_response(&normalized, file.data.into_owned(), mime.as_ref())
}
None => StatusCode::NOT_FOUND.into_response(),
}
}
fn build_response(_path: &str, bytes: Vec<u8>, mime: &str) -> Response {
let mut response = Response::new(Body::from(bytes));
if let Ok(value) = HeaderValue::from_str(mime) {
response.headers_mut().insert(header::CONTENT_TYPE, value);
}
response.headers_mut().insert(
header::CACHE_CONTROL,
HeaderValue::from_static("no-cache"),
);
response
}

View File

@@ -66,7 +66,7 @@ pub async fn login_submit(
let next = form
.next
.as_deref()
.filter(|n| n.starts_with("/ui/") || *n == "/ui")
.filter(|n| is_allowed_redirect(n, &state.config.allowed_redirect_hosts))
.unwrap_or("/ui/buckets")
.to_string();
Redirect::to(&next).into_response()
@@ -80,6 +80,32 @@ pub async fn login_submit(
}
}
fn is_allowed_redirect(target: &str, allowed_hosts: &[String]) -> bool {
if target == "/ui" || target.starts_with("/ui/") {
return true;
}
let Some(rest) = target
.strip_prefix("https://")
.or_else(|| target.strip_prefix("http://"))
else {
return false;
};
let host = rest
.split('/')
.next()
.unwrap_or_default()
.split('@')
.last()
.unwrap_or_default()
.split(':')
.next()
.unwrap_or_default()
.to_ascii_lowercase();
allowed_hosts
.iter()
.any(|allowed| allowed.eq_ignore_ascii_case(&host))
}
pub async fn logout(Extension(session): Extension<SessionHandle>) -> Response {
session.write(|s| {
s.user_id = None;
@@ -91,16 +117,6 @@ pub async fn logout(Extension(session): Extension<SessionHandle>) -> Response {
Redirect::to("/login").into_response()
}
pub async fn csrf_error_page(
State(state): State<AppState>,
Extension(session): Extension<SessionHandle>,
) -> Response {
let ctx = base_context(&session, None);
let mut resp = render(&state, "csrf_error.html", &ctx);
*resp.status_mut() = StatusCode::FORBIDDEN;
resp
}
pub async fn root_redirect() -> Response {
Redirect::to("/ui/buckets").into_response()
}

View File

@@ -49,6 +49,8 @@ const AWS_QUERY_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC
.remove(b'.')
.remove(b'~');
const UI_OBJECT_BROWSER_MAX_KEYS: usize = 5000;
fn url_templates_for(bucket: &str) -> Value {
json!({
"download": format!("/ui/buckets/{}/objects/KEY_PLACEHOLDER/download", bucket),
@@ -117,13 +119,17 @@ fn storage_status(err: &StorageError) -> StatusCode {
match err {
StorageError::BucketNotFound(_)
| StorageError::ObjectNotFound { .. }
| StorageError::VersionNotFound { .. }
| StorageError::UploadNotFound(_) => StatusCode::NOT_FOUND,
StorageError::DeleteMarker { .. } => StatusCode::NOT_FOUND,
StorageError::MethodNotAllowed(_) => StatusCode::METHOD_NOT_ALLOWED,
StorageError::InvalidBucketName(_)
| StorageError::InvalidObjectKey(_)
| StorageError::InvalidRange
| StorageError::QuotaExceeded(_) => StatusCode::BAD_REQUEST,
StorageError::BucketAlreadyExists(_) => StatusCode::CONFLICT,
StorageError::BucketNotEmpty(_) => StatusCode::CONFLICT,
StorageError::ObjectCorrupted { .. } => StatusCode::UNPROCESSABLE_ENTITY,
StorageError::Io(_) | StorageError::Json(_) | StorageError::Internal(_) => {
StatusCode::INTERNAL_SERVER_ERROR
}
@@ -184,10 +190,7 @@ fn safe_attachment_filename(key: &str) -> String {
}
fn parse_api_base(state: &AppState) -> String {
std::env::var("API_BASE_URL")
.unwrap_or_else(|_| format!("http://{}", state.config.bind_addr))
.trim_end_matches('/')
.to_string()
state.config.api_base_url.trim_end_matches('/').to_string()
}
fn aws_query_encode(value: &str) -> String {
@@ -904,6 +907,35 @@ pub struct ListObjectsQuery {
pub prefix: Option<String>,
#[serde(default)]
pub start_after: Option<String>,
#[serde(default)]
pub delimiter: Option<String>,
}
fn object_json(bucket_name: &str, o: &myfsio_common::types::ObjectMeta) -> Value {
json!({
"key": o.key,
"size": o.size,
"last_modified": o.last_modified.to_rfc3339(),
"last_modified_iso": o.last_modified.to_rfc3339(),
"last_modified_display": o.last_modified.format("%Y-%m-%d %H:%M:%S").to_string(),
"etag": o.etag.clone().unwrap_or_default(),
"storage_class": o.storage_class.clone().unwrap_or_else(|| "STANDARD".to_string()),
"content_type": o.content_type.clone().unwrap_or_default(),
"download_url": build_ui_object_url(bucket_name, &o.key, "download"),
"preview_url": build_ui_object_url(bucket_name, &o.key, "preview"),
"delete_endpoint": build_ui_object_url(bucket_name, &o.key, "delete"),
"presign_endpoint": build_ui_object_url(bucket_name, &o.key, "presign"),
"metadata_url": build_ui_object_url(bucket_name, &o.key, "metadata"),
"versions_endpoint": build_ui_object_url(bucket_name, &o.key, "versions"),
"restore_template": format!(
"/ui/buckets/{}/objects/{}/restore/VERSION_ID_PLACEHOLDER",
bucket_name,
encode_object_key(&o.key)
),
"tags_url": build_ui_object_url(bucket_name, &o.key, "tags"),
"copy_url": build_ui_object_url(bucket_name, &o.key, "copy"),
"move_url": build_ui_object_url(bucket_name, &o.key, "move"),
})
}
pub async fn list_bucket_objects(
@@ -917,6 +949,49 @@ pub async fn list_bucket_objects(
}
let max_keys = q.max_keys.unwrap_or(1000).min(5000);
let versioning_enabled = state
.storage
.is_versioning_enabled(&bucket_name)
.await
.unwrap_or(false);
let stats = state.storage.bucket_stats(&bucket_name).await.ok();
let total_count = stats.as_ref().map(|s| s.objects).unwrap_or(0);
let use_shallow = q.delimiter.as_deref() == Some("/");
if use_shallow {
let params = myfsio_common::types::ShallowListParams {
prefix: q.prefix.clone().unwrap_or_default(),
delimiter: "/".to_string(),
max_keys,
continuation_token: q.continuation_token.clone(),
};
return match state
.storage
.list_objects_shallow(&bucket_name, &params)
.await
{
Ok(res) => {
let objects: Vec<Value> = res
.objects
.iter()
.map(|o| object_json(&bucket_name, o))
.collect();
Json(json!({
"versioning_enabled": versioning_enabled,
"total_count": total_count,
"is_truncated": res.is_truncated,
"next_continuation_token": res.next_continuation_token,
"url_templates": url_templates_for(&bucket_name),
"objects": objects,
"common_prefixes": res.common_prefixes,
}))
.into_response()
}
Err(e) => storage_json_error(e),
};
}
let params = ListParams {
max_keys,
continuation_token: q.continuation_token.clone(),
@@ -924,46 +999,12 @@ pub async fn list_bucket_objects(
start_after: q.start_after.clone(),
};
let versioning_enabled = state
.storage
.is_versioning_enabled(&bucket_name)
.await
.unwrap_or(false);
let stats = state.storage.bucket_stats(&bucket_name).await.ok();
let total_count = stats.as_ref().map(|s| s.objects).unwrap_or(0);
match state.storage.list_objects(&bucket_name, &params).await {
Ok(res) => {
let objects: Vec<Value> = res
.objects
.iter()
.map(|o| {
json!({
"key": o.key,
"size": o.size,
"last_modified": o.last_modified.to_rfc3339(),
"last_modified_iso": o.last_modified.to_rfc3339(),
"last_modified_display": o.last_modified.format("%Y-%m-%d %H:%M:%S").to_string(),
"etag": o.etag.clone().unwrap_or_default(),
"storage_class": o.storage_class.clone().unwrap_or_else(|| "STANDARD".to_string()),
"content_type": o.content_type.clone().unwrap_or_default(),
"download_url": build_ui_object_url(&bucket_name, &o.key, "download"),
"preview_url": build_ui_object_url(&bucket_name, &o.key, "preview"),
"delete_endpoint": build_ui_object_url(&bucket_name, &o.key, "delete"),
"presign_endpoint": build_ui_object_url(&bucket_name, &o.key, "presign"),
"metadata_url": build_ui_object_url(&bucket_name, &o.key, "metadata"),
"versions_endpoint": build_ui_object_url(&bucket_name, &o.key, "versions"),
"restore_template": format!(
"/ui/buckets/{}/objects/{}/restore/VERSION_ID_PLACEHOLDER",
bucket_name,
encode_object_key(&o.key)
),
"tags_url": build_ui_object_url(&bucket_name, &o.key, "tags"),
"copy_url": build_ui_object_url(&bucket_name, &o.key, "copy"),
"move_url": build_ui_object_url(&bucket_name, &o.key, "move"),
})
})
.map(|o| object_json(&bucket_name, o))
.collect();
Json(json!({
@@ -1006,39 +1047,62 @@ pub async fn stream_bucket_objects(
let stats = state.storage.bucket_stats(&bucket_name).await.ok();
let total_count = stats.as_ref().map(|s| s.objects).unwrap_or(0);
let mut lines: Vec<String> = Vec::new();
lines.push(
json!({
let use_delimiter = q.delimiter.as_deref() == Some("/");
let prefix = q.prefix.clone().unwrap_or_default();
let (tx, rx) = tokio::sync::mpsc::channel::<Result<bytes::Bytes, std::io::Error>>(64);
let meta_line = json!({
"type": "meta",
"url_templates": url_templates_for(&bucket_name),
"versioning_enabled": versioning_enabled,
})
.to_string(),
);
lines.push(json!({ "type": "count", "total_count": total_count }).to_string());
.to_string()
+ "\n";
let count_line = json!({ "type": "count", "total_count": total_count }).to_string() + "\n";
let use_delimiter = q.delimiter.as_deref() == Some("/");
let prefix = q.prefix.clone().unwrap_or_default();
let storage = state.storage.clone();
let bucket = bucket_name.clone();
tokio::spawn(async move {
if tx
.send(Ok(bytes::Bytes::from(meta_line.into_bytes())))
.await
.is_err()
{
return;
}
if tx
.send(Ok(bytes::Bytes::from(count_line.into_bytes())))
.await
.is_err()
{
return;
}
if use_delimiter {
let mut token: Option<String> = None;
loop {
let params = myfsio_common::types::ShallowListParams {
prefix: prefix.clone(),
delimiter: "/".to_string(),
max_keys: 5000,
continuation_token: None,
max_keys: UI_OBJECT_BROWSER_MAX_KEYS,
continuation_token: token.clone(),
};
match state
.storage
.list_objects_shallow(&bucket_name, &params)
.await
{
match storage.list_objects_shallow(&bucket, &params).await {
Ok(res) => {
for p in &res.common_prefixes {
lines.push(json!({ "type": "folder", "prefix": p }).to_string());
let line = json!({ "type": "folder", "prefix": p }).to_string() + "\n";
if tx
.send(Ok(bytes::Bytes::from(line.into_bytes())))
.await
.is_err()
{
return;
}
}
for o in &res.objects {
lines.push(
json!({
let line = json!({
"type": "object",
"key": o.key,
"size": o.size,
@@ -1048,11 +1112,28 @@ pub async fn stream_bucket_objects(
"etag": o.etag.clone().unwrap_or_default(),
"storage_class": o.storage_class.clone().unwrap_or_else(|| "STANDARD".to_string()),
})
.to_string(),
);
.to_string()
+ "\n";
if tx
.send(Ok(bytes::Bytes::from(line.into_bytes())))
.await
.is_err()
{
return;
}
}
if !res.is_truncated || res.next_continuation_token.is_none() {
break;
}
token = res.next_continuation_token;
}
Err(e) => {
let line =
json!({ "type": "error", "error": e.to_string() }).to_string() + "\n";
let _ = tx.send(Ok(bytes::Bytes::from(line.into_bytes()))).await;
return;
}
}
Err(e) => lines.push(json!({ "type": "error", "error": e.to_string() }).to_string()),
}
} else {
let mut token: Option<String> = None;
@@ -1067,11 +1148,10 @@ pub async fn stream_bucket_objects(
},
start_after: None,
};
match state.storage.list_objects(&bucket_name, &params).await {
match storage.list_objects(&bucket, &params).await {
Ok(res) => {
for o in &res.objects {
lines.push(
json!({
let line = json!({
"type": "object",
"key": o.key,
"size": o.size,
@@ -1081,8 +1161,15 @@ pub async fn stream_bucket_objects(
"etag": o.etag.clone().unwrap_or_default(),
"storage_class": o.storage_class.clone().unwrap_or_else(|| "STANDARD".to_string()),
})
.to_string(),
);
.to_string()
+ "\n";
if tx
.send(Ok(bytes::Bytes::from(line.into_bytes())))
.await
.is_err()
{
return;
}
}
if !res.is_truncated || res.next_continuation_token.is_none() {
break;
@@ -1090,24 +1177,138 @@ pub async fn stream_bucket_objects(
token = res.next_continuation_token;
}
Err(e) => {
lines.push(json!({ "type": "error", "error": e.to_string() }).to_string());
break;
let line =
json!({ "type": "error", "error": e.to_string() }).to_string() + "\n";
let _ = tx.send(Ok(bytes::Bytes::from(line.into_bytes()))).await;
return;
}
}
}
}
lines.push(json!({ "type": "done" }).to_string());
let done_line = json!({ "type": "done" }).to_string() + "\n";
let _ = tx
.send(Ok(bytes::Bytes::from(done_line.into_bytes())))
.await;
});
let stream = tokio_stream::wrappers::ReceiverStream::new(rx);
let body = Body::from_stream(stream);
let body = lines.join("\n") + "\n";
let mut headers = HeaderMap::new();
headers.insert(
header::CONTENT_TYPE,
"application/x-ndjson; charset=utf-8".parse().unwrap(),
);
headers.insert(header::CACHE_CONTROL, "no-cache".parse().unwrap());
headers.insert("x-accel-buffering", "no".parse().unwrap());
(StatusCode::OK, headers, body).into_response()
}
#[derive(Deserialize, Default)]
pub struct SearchObjectsQuery {
#[serde(default)]
pub q: Option<String>,
#[serde(default)]
pub prefix: Option<String>,
#[serde(default)]
pub limit: Option<usize>,
#[serde(default)]
pub start_after: Option<String>,
}
pub async fn search_bucket_objects(
State(state): State<AppState>,
Extension(_session): Extension<SessionHandle>,
Path(bucket_name): Path<String>,
Query(q): Query<SearchObjectsQuery>,
) -> Response {
if !matches!(state.storage.bucket_exists(&bucket_name).await, Ok(true)) {
return json_error(StatusCode::NOT_FOUND, "Bucket not found");
}
let term = q.q.unwrap_or_default().to_lowercase();
let limit = q.limit.unwrap_or(500).clamp(1, 1000);
let prefix = q.prefix.clone().unwrap_or_default();
let start_after = q.start_after.clone().filter(|s| !s.is_empty());
if term.is_empty() {
return Json(json!({ "results": [], "truncated": false, "next_token": Value::Null }))
.into_response();
}
let mut results: Vec<Value> = Vec::new();
let mut truncated = false;
let mut last_match_key: Option<String> = None;
let mut token: Option<String> = None;
let mut start_after_arg = start_after;
loop {
let params = ListParams {
max_keys: 1000,
continuation_token: token.clone(),
prefix: if prefix.is_empty() {
None
} else {
Some(prefix.clone())
},
start_after: start_after_arg.take(),
};
match state.storage.list_objects(&bucket_name, &params).await {
Ok(res) => {
for o in &res.objects {
if o.key.to_lowercase().contains(&term) {
if results.len() >= limit {
truncated = true;
break;
}
last_match_key = Some(o.key.clone());
results.push(object_json(&bucket_name, o));
}
}
if truncated || !res.is_truncated || res.next_continuation_token.is_none() {
if res.is_truncated && results.len() >= limit {
truncated = true;
}
break;
}
token = res.next_continuation_token;
}
Err(e) => return storage_json_error(e),
}
}
let next_token = if truncated { last_match_key } else { None };
Json(json!({
"results": results,
"truncated": truncated,
"next_token": next_token,
}))
.into_response()
}
pub async fn bucket_stats_json(
State(state): State<AppState>,
Extension(_session): Extension<SessionHandle>,
Path(bucket_name): Path<String>,
) -> Response {
if !matches!(state.storage.bucket_exists(&bucket_name).await, Ok(true)) {
return json_error(StatusCode::NOT_FOUND, "Bucket not found");
}
match state.storage.bucket_stats(&bucket_name).await {
Ok(stats) => Json(json!({
"objects": stats.objects,
"bytes": stats.bytes,
"version_count": stats.version_count,
"version_bytes": stats.version_bytes,
"total_objects": stats.objects + stats.version_count,
"total_bytes": stats.bytes + stats.version_bytes,
}))
.into_response(),
Err(e) => storage_json_error(e),
}
}
pub async fn list_bucket_folders(
State(state): State<AppState>,
Extension(_session): Extension<SessionHandle>,
@@ -1122,7 +1323,7 @@ pub async fn list_bucket_folders(
let params = myfsio_common::types::ShallowListParams {
prefix: prefix.clone(),
delimiter: "/".to_string(),
max_keys: 5000,
max_keys: UI_OBJECT_BROWSER_MAX_KEYS,
continuation_token: None,
};
match state
@@ -1153,13 +1354,6 @@ pub async fn list_copy_targets(
Json(json!({ "buckets": buckets })).into_response()
}
pub async fn json_not_implemented() -> Response {
json_error(
StatusCode::NOT_IMPLEMENTED,
"This feature is not implemented yet",
)
}
#[derive(Deserialize)]
pub struct ConnectionTestPayload {
pub endpoint_url: String,
@@ -1976,6 +2170,7 @@ pub async fn complete_multipart_upload(
State(state): State<AppState>,
Extension(_session): Extension<SessionHandle>,
Path((bucket_name, upload_id)): Path<(String, String)>,
headers: HeaderMap,
body: Body,
) -> Response {
let payload: CompleteMultipartPayload = match parse_json_body(body).await {
@@ -1996,17 +2191,36 @@ pub async fn complete_multipart_upload(
})
.collect::<Vec<_>>();
let upload_key = match state.storage.list_multipart_uploads(&bucket_name).await {
Ok(uploads) => uploads
.into_iter()
.find(|u| u.upload_id == upload_id)
.map(|u| u.key),
Err(err) => return storage_json_error(err),
};
if let Some(ref key) = upload_key {
if let Err(response) =
super::ensure_archived_null_lock_allows_overwrite(&state, &bucket_name, key, Some(&headers))
.await
{
return response;
}
}
match state
.storage
.complete_multipart(&bucket_name, &upload_id, &parts)
.await
{
Ok(meta) => json_ok(json!({
Ok(meta) => {
super::trigger_replication(&state, &bucket_name, &meta.key, "write");
json_ok(json!({
"key": meta.key,
"size": meta.size,
"etag": meta.etag.unwrap_or_default(),
"last_modified": meta.last_modified.to_rfc3339(),
})),
}))
}
Err(err) => storage_json_error(err),
}
}
@@ -2230,7 +2444,11 @@ async fn object_metadata_json(state: &AppState, bucket: &str, key: &str) -> Resp
.await
.unwrap_or_default();
let mut out = metadata.clone();
let mut out: std::collections::HashMap<String, String> = metadata
.iter()
.filter(|(k, _)| !(k.starts_with("__") && k.ends_with("__")))
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
if let Some(content_type) = head.content_type {
out.insert("Content-Type".to_string(), content_type);
}
@@ -2411,8 +2629,11 @@ async fn update_object_tags(state: &AppState, bucket: &str, key: &str, body: Bod
Err(response) => return response,
};
if payload.tags.len() > 50 {
return json_error(StatusCode::BAD_REQUEST, "Maximum 50 tags allowed");
if payload.tags.len() > state.config.object_tag_limit {
return json_error(
StatusCode::BAD_REQUEST,
format!("Maximum {} tags allowed", state.config.object_tag_limit),
);
}
let tags = payload
@@ -2450,7 +2671,13 @@ struct CopyMovePayload {
dest_key: String,
}
async fn copy_object_json(state: &AppState, bucket: &str, key: &str, body: Body) -> Response {
async fn copy_object_json(
state: &AppState,
bucket: &str,
key: &str,
headers: &HeaderMap,
body: Body,
) -> Response {
let payload: CopyMovePayload = match parse_json_body(body).await {
Ok(payload) => payload,
Err(response) => return response,
@@ -2464,23 +2691,43 @@ async fn copy_object_json(state: &AppState, bucket: &str, key: &str, body: Body)
);
}
if let Err(response) = super::ensure_archived_null_lock_allows_overwrite(
state,
dest_bucket,
dest_key,
Some(headers),
)
.await
{
return response;
}
match state
.storage
.copy_object(bucket, key, dest_bucket, dest_key)
.await
{
Ok(_) => Json(json!({
Ok(_) => {
super::trigger_replication(state, dest_bucket, dest_key, "write");
Json(json!({
"status": "ok",
"message": format!("Copied to {}/{}", dest_bucket, dest_key),
"dest_bucket": dest_bucket,
"dest_key": dest_key,
}))
.into_response(),
.into_response()
}
Err(err) => storage_json_error(err),
}
}
async fn move_object_json(state: &AppState, bucket: &str, key: &str, body: Body) -> Response {
async fn move_object_json(
state: &AppState,
bucket: &str,
key: &str,
headers: &HeaderMap,
body: Body,
) -> Response {
let payload: CopyMovePayload = match parse_json_body(body).await {
Ok(payload) => payload,
Err(response) => return response,
@@ -2500,15 +2747,30 @@ async fn move_object_json(state: &AppState, bucket: &str, key: &str, body: Body)
);
}
if let Err(response) = super::ensure_archived_null_lock_allows_overwrite(
state,
dest_bucket,
dest_key,
Some(headers),
)
.await
{
return response;
}
match state.storage.copy_object(bucket, key, dest_bucket, dest_key).await {
Ok(_) => match state.storage.delete_object(bucket, key).await {
Ok(()) => Json(json!({
Ok(_) => {
super::trigger_replication(state, dest_bucket, dest_key, "write");
super::trigger_replication(state, bucket, key, "delete");
Json(json!({
"status": "ok",
"message": format!("Moved to {}/{}", dest_bucket, dest_key),
"dest_bucket": dest_bucket,
"dest_key": dest_key,
}))
.into_response(),
.into_response()
}
Err(_) => Json(json!({
"status": "partial",
"message": format!("Copied to {}/{} but failed to delete source", dest_bucket, dest_key),
@@ -2561,6 +2823,7 @@ async fn delete_object_json(
if let Err(err) = state.storage.delete_object(bucket, key).await {
return storage_json_error(err);
}
super::trigger_replication(state, bucket, key, "delete");
if let Err(err) = purge_object_versions_for_key(state, bucket, key).await {
return json_error(StatusCode::BAD_REQUEST, err);
}
@@ -2572,11 +2835,14 @@ async fn delete_object_json(
}
match state.storage.delete_object(bucket, key).await {
Ok(()) => Json(json!({
Ok(_) => {
super::trigger_replication(state, bucket, key, "delete");
Json(json!({
"status": "ok",
"message": format!("Deleted '{}'", key),
}))
.into_response(),
.into_response()
}
Err(err) => storage_json_error(err),
}
}
@@ -2637,6 +2903,7 @@ async fn restore_object_version_json(
{
return storage_json_error(err);
}
super::trigger_replication(state, bucket, key, "write");
let mut message = format!("Restored '{}'", key);
if live_exists && versioning_enabled {
@@ -2686,6 +2953,14 @@ fn parse_object_post_action(rest: &str) -> Option<(String, ObjectPostAction)> {
ObjectPostAction::Restore(version_id.to_string()),
));
}
if let Some(key_with_version) = rest.strip_suffix("/restore") {
if let Some((key, version_id)) = key_with_version.rsplit_once("/versions/") {
return Some((
key.to_string(),
ObjectPostAction::Restore(version_id.to_string()),
));
}
}
for (suffix, action) in [
("/delete", ObjectPostAction::Delete),
("/presign", ObjectPostAction::Presign),
@@ -2745,8 +3020,12 @@ pub async fn object_post_dispatch(
object_presign_json(&state, &session, &bucket_name, &key, body).await
}
ObjectPostAction::Tags => update_object_tags(&state, &bucket_name, &key, body).await,
ObjectPostAction::Copy => copy_object_json(&state, &bucket_name, &key, body).await,
ObjectPostAction::Move => move_object_json(&state, &bucket_name, &key, body).await,
ObjectPostAction::Copy => {
copy_object_json(&state, &bucket_name, &key, &headers, body).await
}
ObjectPostAction::Move => {
move_object_json(&state, &bucket_name, &key, &headers, body).await
}
ObjectPostAction::Restore(version_id) => {
restore_object_version_json(&state, &bucket_name, &key, &version_id).await
}
@@ -2824,13 +3103,23 @@ pub async fn bulk_delete_objects(
"No objects found under the selected folders",
);
}
if keys.len() > state.config.bulk_delete_max_keys {
return json_error(
StatusCode::BAD_REQUEST,
format!(
"Bulk delete supports at most {} keys",
state.config.bulk_delete_max_keys
),
);
}
let mut deleted = Vec::new();
let mut errors = Vec::new();
for key in keys {
match state.storage.delete_object(&bucket_name, &key).await {
Ok(()) => {
Ok(_) => {
super::trigger_replication(&state, &bucket_name, &key, "delete");
if payload.purge_versions {
if let Err(err) =
purge_object_versions_for_key(&state, &bucket_name, &key).await
@@ -3045,6 +3334,7 @@ pub async fn archived_post_dispatch(
match purge_object_versions_for_key(&state, &bucket_name, key).await {
Ok(()) => {
let _ = state.storage.delete_object(&bucket_name, key).await;
super::trigger_replication(&state, &bucket_name, key, "delete");
Json(json!({
"status": "ok",
"message": format!("Removed archived versions for '{}'", key),
@@ -3163,20 +3453,36 @@ fn apply_history_limit(mut value: Value, limit: Option<usize>) -> Value {
value
}
pub async fn bucket_stub_json(Extension(_session): Extension<SessionHandle>) -> Response {
Json(json!({"status": "not_implemented", "items": []})).into_response()
}
pub async fn lifecycle_history_stub(
pub async fn lifecycle_history(
State(state): State<AppState>,
Extension(_session): Extension<SessionHandle>,
Path(_bucket_name): Path<String>,
Path(bucket_name): Path<String>,
Query(params): Query<HashMap<String, String>>,
) -> Response {
Json(json!({
"enabled": state.config.lifecycle_enabled,
let limit = params
.get("limit")
.and_then(|value| value.parse::<usize>().ok())
.unwrap_or(50);
let offset = params
.get("offset")
.and_then(|value| value.parse::<usize>().ok())
.unwrap_or(0);
if !state.config.lifecycle_enabled {
return Json(json!({
"executions": [],
"total": 0,
"limit": limit,
"offset": offset,
"enabled": false,
}))
.into_response();
}
Json(crate::services::lifecycle::read_history(
&state.config.storage_root,
&bucket_name,
limit,
offset,
))
.into_response()
}
@@ -3258,14 +3564,32 @@ pub async fn retry_replication_failure(
Path(bucket_name): Path<String>,
Query(q): Query<ReplicationObjectKeyQuery>,
) -> Response {
let object_key = q.object_key.trim();
retry_replication_failure_key(&state, &bucket_name, q.object_key.trim()).await
}
pub async fn retry_replication_failure_path(
State(state): State<AppState>,
Extension(_session): Extension<SessionHandle>,
Path((bucket_name, rest)): Path<(String, String)>,
) -> Response {
let Some(object_key) = rest.strip_suffix("/retry") else {
return json_error(StatusCode::NOT_FOUND, "Unknown replication failure action");
};
retry_replication_failure_key(&state, &bucket_name, object_key.trim()).await
}
async fn retry_replication_failure_key(
state: &AppState,
bucket_name: &str,
object_key: &str,
) -> Response {
if object_key.is_empty() {
return json_error(StatusCode::BAD_REQUEST, "object_key is required");
}
if state
.replication
.retry_failed(&bucket_name, object_key)
.retry_failed(bucket_name, object_key)
.await
{
json_ok(json!({
@@ -3296,12 +3620,27 @@ pub async fn dismiss_replication_failure(
Path(bucket_name): Path<String>,
Query(q): Query<ReplicationObjectKeyQuery>,
) -> Response {
let object_key = q.object_key.trim();
dismiss_replication_failure_key(&state, &bucket_name, q.object_key.trim())
}
pub async fn dismiss_replication_failure_path(
State(state): State<AppState>,
Extension(_session): Extension<SessionHandle>,
Path((bucket_name, object_key)): Path<(String, String)>,
) -> Response {
dismiss_replication_failure_key(&state, &bucket_name, object_key.trim())
}
fn dismiss_replication_failure_key(
state: &AppState,
bucket_name: &str,
object_key: &str,
) -> Response {
if object_key.is_empty() {
return json_error(StatusCode::BAD_REQUEST, "object_key is required");
}
if state.replication.dismiss_failure(&bucket_name, object_key) {
if state.replication.dismiss_failure(bucket_name, object_key) {
json_ok(json!({
"status": "dismissed",
"object_key": object_key,

View File

@@ -1,8 +1,10 @@
use std::collections::HashMap;
use axum::body::Body;
use axum::extract::{Extension, Form, Path, Query, State};
use axum::http::{header, HeaderMap, StatusCode};
use axum::response::{IntoResponse, Redirect, Response};
use http_body_util::BodyExt;
use serde_json::{json, Value};
use tera::Context;
@@ -121,6 +123,7 @@ pub fn register_ui_endpoints(engine: &TemplateEngine) {
("ui.sites_dashboard", "/ui/sites"),
("ui.update_local_site", "/ui/sites/local"),
("ui.add_peer_site", "/ui/sites/peers"),
("ui.cluster_dashboard", "/ui/cluster"),
("ui.metrics_dashboard", "/ui/metrics"),
("ui.system_dashboard", "/ui/system"),
("ui.system_gc_status", "/ui/system/gc/status"),
@@ -203,6 +206,57 @@ fn wants_json(headers: &HeaderMap) -> bool {
.unwrap_or(false)
}
async fn parse_form_any(
headers: &HeaderMap,
body: Body,
) -> Result<HashMap<String, String>, String> {
let content_type = headers
.get(header::CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.to_string();
let is_multipart = content_type
.to_ascii_lowercase()
.starts_with("multipart/form-data");
let bytes = body
.collect()
.await
.map_err(|e| format!("Failed to read request body: {}", e))?
.to_bytes();
if is_multipart {
let boundary = multer::parse_boundary(&content_type)
.map_err(|_| "Missing multipart boundary".to_string())?;
let stream = futures::stream::once(async move { Ok::<_, std::io::Error>(bytes) });
let mut multipart = multer::Multipart::new(stream, boundary);
let mut out = HashMap::new();
while let Some(field) = multipart
.next_field()
.await
.map_err(|e| format!("Malformed multipart body: {}", e))?
{
let name = match field.name() {
Some(name) => name.to_string(),
None => continue,
};
if field.file_name().is_some() {
continue;
}
let value = field
.text()
.await
.map_err(|e| format!("Invalid multipart field '{}': {}", name, e))?;
out.insert(name, value);
}
Ok(out)
} else {
let parsed: Vec<(String, String)> = serde_urlencoded::from_bytes(&bytes)
.map_err(|e| format!("Invalid form body: {}", e))?;
Ok(parsed.into_iter().collect())
}
}
fn bucket_tab_redirect(bucket_name: &str, tab: &str) -> Response {
Redirect::to(&format!("/ui/buckets/{}?tab={}", bucket_name, tab)).into_response()
}
@@ -231,10 +285,7 @@ fn default_public_policy(bucket_name: &str) -> String {
}
fn parse_api_base(state: &AppState) -> (String, String) {
let api_base = std::env::var("API_BASE_URL")
.unwrap_or_else(|_| format!("http://{}", state.config.bind_addr))
.trim_end_matches('/')
.to_string();
let api_base = state.config.api_base_url.trim_end_matches('/').to_string();
let api_host = api_base
.split("://")
.nth(1)
@@ -257,11 +308,23 @@ fn config_encryption_to_ui(value: Option<&Value>) -> Value {
}
fn config_website_to_ui(value: Option<&Value>) -> Value {
match value {
let parsed = match value {
Some(Value::Object(map)) => Value::Object(map.clone()),
Some(Value::String(s)) => serde_json::from_str(s).unwrap_or(Value::Null),
_ => Value::Null,
}
};
let Some(map) = parsed.as_object() else {
return Value::Null;
};
json!({
"index_document": map
.get("index_document")
.and_then(Value::as_str)
.unwrap_or("index.html"),
"error_document": map.get("error_document").and_then(Value::as_str),
})
}
fn bucket_access_descriptor(
@@ -334,7 +397,13 @@ pub async fn bucket_detail(
Query(request_args): Query<HashMap<String, String>>,
) -> Response {
if !matches!(state.storage.bucket_exists(&bucket_name).await, Ok(true)) {
return (StatusCode::NOT_FOUND, "Bucket not found").into_response();
session.write(|s| {
s.push_flash(
"danger",
format!("Bucket '{}' does not exist.", bucket_name),
)
});
return Redirect::to("/ui/buckets").into_response();
}
let mut ctx = page_context(&state, &session, "ui.bucket_detail");
@@ -359,11 +428,19 @@ pub async fn bucket_detail(
let target_conn = replication_rule
.as_ref()
.and_then(|rule| state.connections.get(&rule.target_connection_id));
let versioning_enabled = state
let versioning_status_enum = state
.storage
.is_versioning_enabled(&bucket_name)
.get_versioning_status(&bucket_name)
.await
.unwrap_or(false);
.unwrap_or(myfsio_common::types::VersioningStatus::Disabled);
let versioning_enabled = matches!(
versioning_status_enum,
myfsio_common::types::VersioningStatus::Enabled
);
let versioning_suspended = matches!(
versioning_status_enum,
myfsio_common::types::VersioningStatus::Suspended
);
let encryption_config = config_encryption_to_ui(bucket_config.encryption.as_ref());
let website_config = config_website_to_ui(bucket_config.website.as_ref());
let quota = bucket_config.quota.clone();
@@ -423,12 +500,13 @@ pub async fn bucket_detail(
);
ctx.insert("has_quota", &quota.is_some());
ctx.insert("versioning_enabled", &versioning_enabled);
ctx.insert("versioning_suspended", &versioning_suspended);
ctx.insert(
"versioning_status",
&(if versioning_enabled {
"Enabled"
} else {
"Disabled"
&(match versioning_status_enum {
myfsio_common::types::VersioningStatus::Enabled => "Enabled",
myfsio_common::types::VersioningStatus::Suspended => "Suspended",
myfsio_common::types::VersioningStatus::Disabled => "Disabled",
}),
);
ctx.insert("encryption_config", &encryption_config);
@@ -643,13 +721,21 @@ pub async fn iam_dashboard(
.as_array()
.map(|items| {
items.iter().any(|policy| {
let bucket_wildcard = policy
.get("bucket")
.and_then(|v| v.as_str())
.map(|b| b == "*")
.unwrap_or(false);
if !bucket_wildcard {
return false;
}
policy
.get("actions")
.and_then(|value| value.as_array())
.map(|actions| {
actions
.iter()
.any(|action| matches!(action.as_str(), Some("*") | Some("iam:*")))
.any(|action| action.as_str() == Some("*"))
})
.unwrap_or(false)
})
@@ -1114,6 +1200,7 @@ pub async fn sites_dashboard(
"region": p.region,
"priority": p.priority,
"connection_id": p.connection_id,
"peer_inbound_access_key": p.peer_inbound_access_key,
"is_healthy": p.is_healthy,
"last_health_check": p.last_health_check,
})
@@ -1122,20 +1209,60 @@ pub async fn sites_dashboard(
})
.unwrap_or_default();
let rules = state.replication.rules_snapshot();
let sync_snapshot = state
.site_sync
.as_ref()
.map(|w| w.snapshot_stats())
.unwrap_or_default();
let peers_with_stats: Vec<Value> = peers
.iter()
.cloned()
.map(|peer| {
let has_connection = peer
let connection_id = peer
.get("connection_id")
.and_then(|value| value.as_str())
.map(|value| !value.is_empty())
.unwrap_or(false);
.filter(|value| !value.is_empty())
.map(|value| value.to_string());
let has_connection = connection_id.is_some();
let mut buckets_syncing: u64 = 0;
let mut has_bidirectional = false;
let mut last_sync_at: Option<f64> = None;
let mut total_pulled: u64 = 0;
let mut total_errors: u64 = 0;
if let Some(ref conn_id) = connection_id {
for (bucket, rule) in &rules {
if &rule.target_connection_id != conn_id || !rule.enabled {
continue;
}
if rule.mode == crate::services::replication::MODE_BIDIRECTIONAL {
has_bidirectional = true;
buckets_syncing += 1;
if let Some(stats) = sync_snapshot.get(bucket) {
total_pulled += stats.objects_pulled;
total_errors += stats.errors;
if let Some(ts) = stats.last_sync_at {
last_sync_at = match last_sync_at {
Some(prev) if prev > ts => Some(prev),
_ => Some(ts),
};
}
}
}
}
}
json!({
"peer": peer,
"has_connection": has_connection,
"buckets_syncing": 0,
"has_bidirectional": false,
"buckets_syncing": buckets_syncing,
"has_bidirectional": has_bidirectional,
"last_sync_at": last_sync_at,
"objects_pulled": total_pulled,
"errors": total_errors,
})
})
.collect();
@@ -1161,20 +1288,195 @@ pub async fn sites_dashboard(
ctx.insert("connections", &conns);
ctx.insert(
"config_site_id",
&std::env::var("SITE_ID").unwrap_or_default(),
&state.config.site_id.clone().unwrap_or_default(),
);
ctx.insert(
"config_site_endpoint",
&std::env::var("SITE_ENDPOINT").unwrap_or_default(),
);
ctx.insert(
"config_site_region",
&std::env::var("SITE_REGION").unwrap_or_else(|_| state.config.region.clone()),
&state.config.site_endpoint.clone().unwrap_or_default(),
);
ctx.insert("config_site_region", &state.config.site_region);
ctx.insert("topology", &json!({"sites": [], "connections": []}));
render(&state, "sites.html", &ctx)
}
pub async fn cluster_data_json(
State(state): State<AppState>,
Extension(_session): Extension<SessionHandle>,
Query(params): Query<HashMap<String, String>>,
) -> Response {
let force = params
.get("force")
.map(|v| v == "1" || v.eq_ignore_ascii_case("true"))
.unwrap_or(false);
if force {
*state.cluster_aggregate_cache.lock() = None;
*state.cluster_overview_cache.lock() = None;
}
let sites = build_cluster_sites(&state).await;
let totals = cluster_totals(&sites);
let body = json!({ "sites": sites, "totals": totals });
(
StatusCode::OK,
[(header::CONTENT_TYPE, "application/json")],
body.to_string(),
)
.into_response()
}
fn cluster_totals(sites: &[Value]) -> Value {
let total_buckets: u64 = sites
.iter()
.filter_map(|s| s.get("buckets").and_then(|v| v.as_u64()))
.sum();
let total_objects: u64 = sites
.iter()
.filter_map(|s| s.get("objects").and_then(|v| v.as_u64()))
.sum();
let total_size_bytes: u64 = sites
.iter()
.filter_map(|s| s.get("size_bytes").and_then(|v| v.as_u64()))
.sum();
let online = sites
.iter()
.filter(|s| s.get("online").and_then(|v| v.as_bool()).unwrap_or(false))
.count();
json!({
"buckets": total_buckets,
"objects": total_objects,
"size_bytes": total_size_bytes,
"online_count": online,
"total_count": sites.len(),
})
}
pub async fn cluster_dashboard(
State(state): State<AppState>,
Extension(session): Extension<SessionHandle>,
) -> Response {
let mut ctx = page_context(&state, &session, "ui.cluster_dashboard");
let sites = build_cluster_sites(&state).await;
let total_buckets: u64 = sites
.iter()
.filter_map(|s| s.get("buckets").and_then(|v| v.as_u64()))
.sum();
let total_objects: u64 = sites
.iter()
.filter_map(|s| s.get("objects").and_then(|v| v.as_u64()))
.sum();
let total_size_bytes: u64 = sites
.iter()
.filter_map(|s| s.get("size_bytes").and_then(|v| v.as_u64()))
.sum();
let online_count = sites
.iter()
.filter(|s| s.get("online").and_then(|v| v.as_bool()).unwrap_or(false))
.count();
ctx.insert("cluster_sites", &sites);
ctx.insert("cluster_total_buckets", &total_buckets);
ctx.insert("cluster_total_objects", &total_objects);
ctx.insert("cluster_total_size_bytes", &total_size_bytes);
ctx.insert("cluster_online_count", &online_count);
ctx.insert("cluster_total_count", &sites.len());
render(&state, "cluster.html", &ctx)
}
async fn build_cluster_sites(state: &AppState) -> Vec<Value> {
{
let guard = state.cluster_aggregate_cache.lock();
if let Some((at, ref value)) = *guard {
if at.elapsed() < std::time::Duration::from_secs(10) {
if let Some(arr) = value.as_array() {
return arr.clone();
}
}
}
}
let mut sites: Vec<Value> = Vec::new();
let local = crate::handlers::admin::build_cluster_overview_public(state).await;
let mut local_card = decorate_site(local, true, false, None);
if local_card.get("site_id").and_then(|v| v.as_str()).is_none() {
local_card["site_id"] = json!(state
.config
.site_id
.clone()
.unwrap_or_else(|| "local".to_string()));
}
local_card["is_local"] = json!(true);
sites.push(local_card);
let peers = state
.site_registry
.as_ref()
.map(|r| r.list_peers())
.unwrap_or_default();
let connect_to = std::time::Duration::from_secs(2);
let read_to = std::time::Duration::from_secs(3);
let client = crate::services::peer_admin::PeerAdminClient::new(connect_to, read_to);
let mut peer_futures = Vec::new();
for peer in peers {
let conn = peer
.connection_id
.as_deref()
.and_then(|id| state.connections.get(id));
let endpoint = peer.endpoint.clone();
let conn_clone = conn.clone();
let client_ref = &client;
peer_futures.push(async move {
let value = match conn_clone {
Some(c) => client_ref.fetch_cluster_overview(&endpoint, &c).await,
None => Err("no connection configured".to_string()),
};
(peer, value)
});
}
let results = futures::future::join_all(peer_futures).await;
for (peer, result) in results {
let (overview, online, error) = match result {
Ok(value) => (value, true, None),
Err(err) => (json!({}), false, Some(err)),
};
let mut card = decorate_site(overview, online, !online, error);
if card.get("site_id").and_then(|v| v.as_str()).is_none() {
card["site_id"] = json!(peer.site_id.clone());
}
if card.get("display_name").and_then(|v| v.as_str()).is_none() {
card["display_name"] = json!(peer.display_name.clone());
}
if card.get("endpoint").and_then(|v| v.as_str()).is_none() {
card["endpoint"] = json!(peer.endpoint.clone());
}
card["is_local"] = json!(false);
card["registered_priority"] = json!(peer.priority);
card["registered_region"] = json!(peer.region);
sites.push(card);
}
*state.cluster_aggregate_cache.lock() =
Some((std::time::Instant::now(), Value::Array(sites.clone())));
sites
}
fn decorate_site(mut value: Value, online: bool, stale: bool, error: Option<String>) -> Value {
if !value.is_object() {
value = json!({});
}
value["online"] = json!(online);
value["stale"] = json!(stale);
value["error"] = match error {
Some(e) => json!(e),
None => Value::Null,
};
value
}
#[derive(serde::Deserialize)]
pub struct LocalSiteForm {
pub site_id: String,
@@ -1203,6 +1505,8 @@ pub struct PeerSiteForm {
#[serde(default)]
pub connection_id: String,
#[serde(default)]
pub peer_inbound_access_key: String,
#[serde(default)]
pub csrf_token: String,
}
@@ -1364,6 +1668,14 @@ pub async fn add_peer_site(
}
let has_connection = connection_id.is_some();
let peer_inbound_access_key = {
let value = form.peer_inbound_access_key.trim();
if value.is_empty() {
None
} else {
Some(value.to_string())
}
};
let peer = crate::services::site_registry::PeerSite {
site_id: site_id.clone(),
endpoint,
@@ -1378,6 +1690,7 @@ pub async fn add_peer_site(
}
},
connection_id: connection_id.clone(),
peer_inbound_access_key,
created_at: None,
is_healthy: false,
last_health_check: None,
@@ -1439,6 +1752,20 @@ pub async fn update_peer_site(
return Redirect::to("/ui/sites").into_response();
};
let endpoint = form.endpoint.trim().to_string();
if endpoint.is_empty() {
let message = "Endpoint is required.".to_string();
if wants_json {
return (
StatusCode::BAD_REQUEST,
axum::Json(json!({ "error": message })),
)
.into_response();
}
session.write(|s| s.push_flash("danger", message));
return Redirect::to("/ui/sites").into_response();
}
let connection_id = {
let value = form.connection_id.trim();
if value.is_empty() {
@@ -1462,9 +1789,17 @@ pub async fn update_peer_site(
}
}
let peer_inbound_access_key = {
let value = form.peer_inbound_access_key.trim();
if value.is_empty() {
None
} else {
Some(value.to_string())
}
};
let peer = crate::services::site_registry::PeerSite {
site_id: site_id.clone(),
endpoint: form.endpoint.trim().to_string(),
endpoint,
region: form.region.trim().to_string(),
priority: form.priority,
display_name: {
@@ -1476,6 +1811,7 @@ pub async fn update_peer_site(
}
},
connection_id,
peer_inbound_access_key,
created_at: existing.created_at,
is_healthy: existing.is_healthy,
last_health_check: existing.last_health_check,
@@ -2107,9 +2443,26 @@ pub async fn create_bucket(
State(state): State<AppState>,
Extension(session): Extension<SessionHandle>,
headers: HeaderMap,
axum::extract::Form(form): axum::extract::Form<CreateBucketForm>,
body: Body,
) -> Response {
let wants_json = wants_json(&headers);
let form = match parse_form_any(&headers, body).await {
Ok(fields) => CreateBucketForm {
bucket_name: fields.get("bucket_name").cloned().unwrap_or_default(),
csrf_token: fields.get("csrf_token").cloned().unwrap_or_default(),
},
Err(message) => {
if wants_json {
return (
StatusCode::BAD_REQUEST,
axum::Json(json!({ "error": message })),
)
.into_response();
}
session.write(|s| s.push_flash("danger", message));
return Redirect::to("/ui/buckets").into_response();
}
};
let bucket_name = form.bucket_name.trim().to_string();
if bucket_name.is_empty() {
@@ -2279,10 +2632,10 @@ pub async fn update_bucket_replication(
"pause" => {
let Some(mut rule) = state.replication.get_rule(&bucket_name) else {
return respond(
false,
StatusCode::NOT_FOUND,
true,
StatusCode::OK,
"No replication configuration to pause.".to_string(),
json!({ "error": "No replication configuration to pause" }),
json!({ "action": "pause", "enabled": false, "no_op": true }),
);
};
rule.enabled = false;
@@ -2297,10 +2650,10 @@ pub async fn update_bucket_replication(
"resume" => {
let Some(mut rule) = state.replication.get_rule(&bucket_name) else {
return respond(
false,
StatusCode::NOT_FOUND,
true,
StatusCode::OK,
"No replication configuration to resume.".to_string(),
json!({ "error": "No replication configuration to resume" }),
json!({ "action": "resume", "enabled": false, "no_op": true }),
);
};
rule.enabled = true;
@@ -3003,15 +3356,3 @@ pub async fn update_bucket_website(
.into_response(),
}
}
pub async fn stub_post(Extension(session): Extension<SessionHandle>) -> Response {
session.write(|s| s.push_flash("info", "This action is not yet implemented in the Rust UI."));
Redirect::to("/ui/buckets").into_response()
}
#[derive(serde::Deserialize)]
pub struct QueryArgs(#[serde(default)] pub HashMap<String, String>);
pub async fn json_stub(Query(_q): Query<QueryArgs>) -> Response {
axum::Json(json!({"status": "not_implemented", "items": []})).into_response()
}

View File

@@ -1,4 +1,5 @@
pub mod config;
pub mod embedded;
pub mod handlers;
pub mod middleware;
pub mod services;
@@ -9,7 +10,7 @@ pub mod templates;
use axum::Router;
pub const SERVER_HEADER: &str = "MyFSIO";
pub const SERVER_HEADER: &str = concat!("MyFSIO-Rust/", env!("CARGO_PKG_VERSION"));
pub fn create_ui_router(state: state::AppState) -> Router {
use axum::routing::{delete, get, post, put};
@@ -21,7 +22,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
.route("/", get(ui::root_redirect))
.route("/ui", get(ui::root_redirect))
.route("/ui/", get(ui::root_redirect))
.route("/ui/buckets", get(ui_pages::buckets_overview))
.route(
"/ui/buckets",
get(ui_pages::buckets_overview).post(ui_pages::create_bucket),
)
.route("/ui/buckets/create", post(ui_pages::create_bucket))
.route("/ui/buckets/{bucket_name}", get(ui_pages::bucket_detail))
.route(
@@ -64,6 +68,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
"/ui/buckets/{bucket_name}/multipart/{upload_id}/part",
put(ui_api::upload_multipart_part),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}/parts",
put(ui_api::upload_multipart_part),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}/complete",
post(ui_api::complete_multipart_upload),
@@ -72,6 +80,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
"/ui/buckets/{bucket_name}/multipart/{upload_id}/abort",
delete(ui_api::abort_multipart_upload),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}",
delete(ui_api::abort_multipart_upload),
)
.route(
"/ui/buckets/{bucket_name}/objects",
get(ui_api::list_bucket_objects),
@@ -80,6 +92,14 @@ pub fn create_ui_router(state: state::AppState) -> Router {
"/ui/buckets/{bucket_name}/objects/stream",
get(ui_api::stream_bucket_objects),
)
.route(
"/ui/buckets/{bucket_name}/objects/search",
get(ui_api::search_bucket_objects),
)
.route(
"/ui/buckets/{bucket_name}/stats",
get(ui_api::bucket_stats_json),
)
.route(
"/ui/buckets/{bucket_name}/folders",
get(ui_api::list_bucket_folders),
@@ -88,6 +108,18 @@ pub fn create_ui_router(state: state::AppState) -> Router {
"/ui/buckets/{bucket_name}/copy-targets",
get(ui_api::list_copy_targets),
)
.route(
"/ui/buckets/{bucket_name}/list-for-copy",
get(ui_api::list_copy_targets),
)
.route(
"/ui/buckets/{bucket_name}/objects/bulk-delete",
post(ui_api::bulk_delete_objects),
)
.route(
"/ui/buckets/{bucket_name}/objects/bulk-download",
post(ui_api::bulk_download_objects),
)
.route(
"/ui/buckets/{bucket_name}/objects/{*rest}",
get(ui_api::object_get_dispatch).post(ui_api::object_post_dispatch),
@@ -106,7 +138,7 @@ pub fn create_ui_router(state: state::AppState) -> Router {
)
.route(
"/ui/buckets/{bucket_name}/lifecycle/history",
get(ui_api::lifecycle_history_stub),
get(ui_api::lifecycle_history),
)
.route(
"/ui/buckets/{bucket_name}/replication/status",
@@ -132,6 +164,11 @@ pub fn create_ui_router(state: state::AppState) -> Router {
"/ui/buckets/{bucket_name}/replication/failures/clear",
delete(ui_api::clear_replication_failures),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures/{*rest}",
post(ui_api::retry_replication_failure_path)
.delete(ui_api::dismiss_replication_failure_path),
)
.route(
"/ui/buckets/{bucket_name}/bulk-delete",
post(ui_api::bulk_delete_objects),
@@ -155,6 +192,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
"/ui/iam/users/{user_id}/delete",
post(ui_pages::delete_iam_user),
)
.route(
"/ui/iam/users/{user_id}/update",
post(ui_pages::update_iam_user),
)
.route(
"/ui/iam/users/{user_id}/policies",
post(ui_pages::update_iam_policies),
@@ -167,12 +208,20 @@ pub fn create_ui_router(state: state::AppState) -> Router {
"/ui/iam/users/{user_id}/rotate-secret",
post(ui_pages::rotate_iam_secret),
)
.route(
"/ui/iam/users/{user_id}/rotate",
post(ui_pages::rotate_iam_secret),
)
.route("/ui/connections/create", post(ui_pages::create_connection))
.route("/ui/connections/test", post(ui_api::test_connection))
.route(
"/ui/connections/{connection_id}",
post(ui_pages::update_connection),
)
.route(
"/ui/connections/{connection_id}/update",
post(ui_pages::update_connection),
)
.route(
"/ui/connections/{connection_id}/delete",
post(ui_pages::delete_connection),
@@ -182,6 +231,8 @@ pub fn create_ui_router(state: state::AppState) -> Router {
get(ui_api::connection_health),
)
.route("/ui/sites", get(ui_pages::sites_dashboard))
.route("/ui/cluster", get(ui_pages::cluster_dashboard))
.route("/ui/cluster/data", get(ui_pages::cluster_data_json))
.route("/ui/sites/local", post(ui_pages::update_local_site))
.route("/ui/sites/peers", post(ui_pages::add_peer_site))
.route(
@@ -201,7 +252,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
"/ui/sites/peers/{site_id}/bidirectional-status",
get(ui_api::peer_bidirectional_status),
)
.route("/ui/connections", get(ui_pages::connections_dashboard))
.route(
"/ui/connections",
get(ui_pages::connections_dashboard).post(ui_pages::create_connection),
)
.route("/ui/metrics", get(ui_pages::metrics_dashboard))
.route(
"/ui/metrics/settings",
@@ -239,6 +293,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
"/ui/website-domains/{domain}",
post(ui_pages::update_website_domain),
)
.route(
"/ui/website-domains/{domain}/update",
post(ui_pages::update_website_domain),
)
.route(
"/ui/website-domains/{domain}/delete",
post(ui_pages::delete_website_domain),
@@ -257,20 +315,27 @@ pub fn create_ui_router(state: state::AppState) -> Router {
let public = Router::new()
.route("/login", get(ui::login_page).post(ui::login_submit))
.route("/logout", post(ui::logout).get(ui::logout))
.route("/csrf-error", get(ui::csrf_error_page));
.route("/logout", post(ui::logout).get(ui::logout));
let session_state = middleware::SessionLayerState {
store: state.sessions.clone(),
secure: false,
};
let static_service = tower_http::services::ServeDir::new(&state.config.static_dir);
let static_router = Router::new()
.route(
"/static/{*path}",
axum::routing::get(handlers::static_assets::serve),
)
.with_state(state.clone());
protected
.merge(public)
.fallback(ui::not_found_page)
.layer(axum::middleware::from_fn(middleware::csrf_layer))
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::csrf_layer,
))
.layer(axum::middleware::from_fn_with_state(
session_state,
middleware::session_layer,
@@ -280,13 +345,26 @@ pub fn create_ui_router(state: state::AppState) -> Router {
middleware::ui_metrics_layer,
))
.with_state(state)
.nest_service("/static", static_service)
.merge(static_router)
.layer(axum::middleware::from_fn(middleware::server_header))
.layer(tower_http::compression::CompressionLayer::new())
}
pub fn create_router(state: state::AppState) -> Router {
let mut router = Router::new()
let default_rate_limit = middleware::RateLimitLayerState::with_per_op(
state.config.ratelimit_default,
state.config.ratelimit_list_buckets,
state.config.ratelimit_bucket_ops,
state.config.ratelimit_object_ops,
state.config.ratelimit_head_ops,
state.config.num_trusted_proxies,
);
let admin_rate_limit = middleware::RateLimitLayerState::new(
state.config.ratelimit_admin,
state.config.num_trusted_proxies,
);
let mut api_router = Router::new()
.route("/myfsio/health", axum::routing::get(handlers::health_check))
.route("/", axum::routing::get(handlers::list_buckets))
.route(
@@ -315,7 +393,7 @@ pub fn create_router(state: state::AppState) -> Router {
);
if state.config.kms_enabled {
router = router
api_router = api_router
.route(
"/kms/keys",
axum::routing::get(handlers::kms::list_keys).post(handlers::kms::create_key),
@@ -368,7 +446,17 @@ pub fn create_router(state: state::AppState) -> Router {
);
}
router = router
api_router = api_router
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::auth_layer,
))
.layer(axum::middleware::from_fn_with_state(
default_rate_limit,
middleware::rate_limit_layer,
));
let admin_router = Router::new()
.route(
"/admin/site",
axum::routing::get(handlers::admin::get_local_site)
@@ -394,6 +482,14 @@ pub fn create_router(state: state::AppState) -> Router {
"/admin/sites/{site_id}/bidirectional-status",
axum::routing::get(handlers::admin::check_bidirectional_status),
)
.route(
"/admin/sync/stats",
axum::routing::get(handlers::admin::get_sync_stats),
)
.route(
"/admin/cluster/overview",
axum::routing::get(handlers::admin::get_cluster_overview),
)
.route(
"/admin/topology",
axum::routing::get(handlers::admin::get_topology),
@@ -445,10 +541,18 @@ pub fn create_router(state: state::AppState) -> Router {
"/admin/iam/users/{identifier}/access-keys",
axum::routing::post(handlers::admin::iam_create_access_key),
)
.route(
"/admin/iam/users/{identifier}/keys",
axum::routing::post(handlers::admin::iam_create_access_key),
)
.route(
"/admin/iam/users/{identifier}/access-keys/{access_key}",
axum::routing::delete(handlers::admin::iam_delete_access_key),
)
.route(
"/admin/iam/users/{identifier}/keys/{access_key}",
axum::routing::delete(handlers::admin::iam_delete_access_key),
)
.route(
"/admin/iam/users/{identifier}/disable",
axum::routing::post(handlers::admin::iam_disable_user),
@@ -491,14 +595,92 @@ pub fn create_router(state: state::AppState) -> Router {
.route(
"/admin/integrity/history",
axum::routing::get(handlers::admin::integrity_history),
);
router
)
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::auth_layer,
))
.layer(axum::middleware::from_fn_with_state(
admin_rate_limit,
middleware::rate_limit_layer,
));
let request_body_timeout =
std::time::Duration::from_secs(state.config.request_body_timeout_secs);
api_router
.merge(admin_router)
.layer(axum::middleware::from_fn(middleware::server_header))
.layer(cors_layer(&state.config))
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::bucket_cors_layer,
))
.layer(axum::middleware::from_fn(middleware::request_log_layer))
.layer(tower_http::compression::CompressionLayer::new())
.layer(tower_http::timeout::RequestBodyTimeoutLayer::new(
request_body_timeout,
))
.with_state(state)
}
fn cors_layer(config: &config::ServerConfig) -> tower_http::cors::CorsLayer {
use axum::http::{HeaderName, HeaderValue, Method};
use tower_http::cors::{Any, CorsLayer};
let mut layer = CorsLayer::new();
if config.cors_origins.iter().any(|origin| origin == "*") {
layer = layer.allow_origin(Any);
} else {
let origins = config
.cors_origins
.iter()
.filter_map(|origin| HeaderValue::from_str(origin).ok())
.collect::<Vec<_>>();
if !origins.is_empty() {
layer = layer.allow_origin(origins);
}
}
let methods = config
.cors_methods
.iter()
.filter_map(|method| method.parse::<Method>().ok())
.collect::<Vec<_>>();
if !methods.is_empty() {
layer = layer.allow_methods(methods);
}
if config.cors_allow_headers.iter().any(|header| header == "*") {
layer = layer.allow_headers(Any);
} else {
let headers = config
.cors_allow_headers
.iter()
.filter_map(|header| header.parse::<HeaderName>().ok())
.collect::<Vec<_>>();
if !headers.is_empty() {
layer = layer.allow_headers(headers);
}
}
if config
.cors_expose_headers
.iter()
.any(|header| header == "*")
{
layer = layer.expose_headers(Any);
} else {
let headers = config
.cors_expose_headers
.iter()
.filter_map(|header| header.parse::<HeaderName>().ok())
.collect::<Vec<_>>();
if !headers.is_empty() {
layer = layer.expose_headers(headers);
}
}
layer
}

View File

@@ -28,10 +28,19 @@ enum Command {
#[tokio::main]
async fn main() {
load_env_files();
tracing_subscriber::fmt::init();
init_tracing();
let cli = Cli::parse();
let config = ServerConfig::from_env();
if !config
.ratelimit_storage_uri
.eq_ignore_ascii_case("memory://")
{
tracing::warn!(
"RATE_LIMIT_STORAGE_URI={} is not supported yet; using in-memory rate limits",
config.ratelimit_storage_uri
);
}
if cli.reset_cred {
reset_admin_credentials(&config);
@@ -113,7 +122,11 @@ async fn main() {
let lifecycle =
std::sync::Arc::new(myfsio_server::services::lifecycle::LifecycleService::new(
state.storage.clone(),
myfsio_server::services::lifecycle::LifecycleConfig::default(),
config.storage_root.clone(),
myfsio_server::services::lifecycle::LifecycleConfig {
interval_seconds: 3600,
max_history_per_bucket: config.lifecycle_max_history_per_bucket,
},
));
bg_handles.push(lifecycle.start_background());
tracing::info!("Lifecycle manager background service started");
@@ -176,8 +189,16 @@ async fn main() {
let shutdown = shutdown_signal_shared();
let api_shutdown = shutdown.clone();
let api_listener = axum::serve::ListenerExt::tap_io(api_listener, |stream| {
if let Err(err) = stream.set_nodelay(true) {
tracing::trace!("failed to set TCP_NODELAY on api socket: {}", err);
}
});
let api_task = tokio::spawn(async move {
axum::serve(api_listener, api_app)
axum::serve(
api_listener,
api_app.into_make_service_with_connect_info::<std::net::SocketAddr>(),
)
.with_graceful_shutdown(async move {
api_shutdown.notified().await;
})
@@ -186,6 +207,11 @@ async fn main() {
let ui_task = if let (Some(listener), Some(app)) = (ui_listener, ui_app) {
let ui_shutdown = shutdown.clone();
let listener = axum::serve::ListenerExt::tap_io(listener, |stream| {
if let Err(err) = stream.set_nodelay(true) {
tracing::trace!("failed to set TCP_NODELAY on ui socket: {}", err);
}
});
Some(tokio::spawn(async move {
axum::serve(listener, app)
.with_graceful_shutdown(async move {
@@ -227,15 +253,43 @@ fn print_config_summary(config: &ServerConfig) {
println!("IAM config: {}", config.iam_config_path.display());
println!("Region: {}", config.region);
println!("Encryption enabled: {}", config.encryption_enabled);
println!(
"Encryption chunk size: {} bytes",
config.encryption_chunk_size_bytes
);
println!("KMS enabled: {}", config.kms_enabled);
println!(
"KMS data key bounds: {}-{} bytes",
config.kms_generate_data_key_min_bytes, config.kms_generate_data_key_max_bytes
);
println!("GC enabled: {}", config.gc_enabled);
println!(
"GC interval: {} hours, dry run: {}",
config.gc_interval_hours, config.gc_dry_run
);
println!("Integrity enabled: {}", config.integrity_enabled);
println!("Lifecycle enabled: {}", config.lifecycle_enabled);
println!(
"Lifecycle history limit: {}",
config.lifecycle_max_history_per_bucket
);
println!(
"Website hosting enabled: {}",
config.website_hosting_enabled
);
println!("Site sync enabled: {}", config.site_sync_enabled);
println!("API base URL: {}", config.api_base_url);
println!(
"Object key max: {} bytes, tag limit: {}",
config.object_key_max_length_bytes, config.object_tag_limit
);
println!(
"Rate limits: default {} per {}s, admin {} per {}s",
config.ratelimit_default.max_requests,
config.ratelimit_default.window_seconds,
config.ratelimit_admin.max_requests,
config.ratelimit_admin.window_seconds
);
println!(
"Metrics history enabled: {}",
config.metrics_history_enabled
@@ -255,6 +309,32 @@ fn validate_config(config: &ServerConfig) -> Vec<String> {
if config.presigned_url_min_expiry > config.presigned_url_max_expiry {
issues.push("CRITICAL: PRESIGNED_URL_MIN_EXPIRY_SECONDS cannot exceed PRESIGNED_URL_MAX_EXPIRY_SECONDS.".to_string());
}
if config.encryption_chunk_size_bytes == 0 {
issues.push("CRITICAL: ENCRYPTION_CHUNK_SIZE_BYTES must be greater than zero.".to_string());
}
if config.kms_generate_data_key_min_bytes == 0 {
issues.push(
"CRITICAL: KMS_GENERATE_DATA_KEY_MIN_BYTES must be greater than zero.".to_string(),
);
}
if config.kms_generate_data_key_min_bytes > config.kms_generate_data_key_max_bytes {
issues.push("CRITICAL: KMS_GENERATE_DATA_KEY_MIN_BYTES cannot exceed KMS_GENERATE_DATA_KEY_MAX_BYTES.".to_string());
}
if config.gc_interval_hours <= 0.0 {
issues.push("CRITICAL: GC_INTERVAL_HOURS must be greater than zero.".to_string());
}
if config.bucket_config_cache_ttl_seconds < 0.0 {
issues.push("CRITICAL: BUCKET_CONFIG_CACHE_TTL_SECONDS cannot be negative.".to_string());
}
if !config
.ratelimit_storage_uri
.eq_ignore_ascii_case("memory://")
{
issues.push(format!(
"WARNING: RATE_LIMIT_STORAGE_URI={} is not supported yet; using in-memory limits.",
config.ratelimit_storage_uri
));
}
if let Err(err) = std::fs::create_dir_all(&config.storage_root) {
issues.push(format!(
"CRITICAL: Cannot create storage root {}: {}",
@@ -285,6 +365,17 @@ fn validate_config(config: &ServerConfig) -> Vec<String> {
issues
}
fn init_tracing() {
use tracing_subscriber::EnvFilter;
let filter = EnvFilter::try_from_env("RUST_LOG")
.or_else(|_| {
EnvFilter::try_new(std::env::var("LOG_LEVEL").unwrap_or_else(|_| "INFO".to_string()))
})
.unwrap_or_else(|_| EnvFilter::new("INFO"));
tracing_subscriber::fmt().with_env_filter(filter).init();
}
fn shutdown_signal_shared() -> std::sync::Arc<tokio::sync::Notify> {
std::sync::Arc::new(tokio::sync::Notify::new())
}
@@ -418,8 +509,49 @@ fn reset_admin_credentials(config: &ServerConfig) {
std::process::exit(1);
}
println!("Backed up existing IAM config to {}", backup.display());
prune_iam_backups(&config.iam_config_path, 5);
}
ensure_iam_bootstrap(config);
println!("Admin credentials reset.");
}
fn prune_iam_backups(iam_path: &std::path::Path, keep: usize) {
let parent = match iam_path.parent() {
Some(p) => p,
None => return,
};
let stem = match iam_path.file_stem().and_then(|s| s.to_str()) {
Some(s) => s,
None => return,
};
let prefix = format!("{}.bak-", stem);
let entries = match std::fs::read_dir(parent) {
Ok(entries) => entries,
Err(_) => return,
};
let mut backups: Vec<(i64, std::path::PathBuf)> = entries
.filter_map(|e| e.ok())
.filter_map(|e| {
let path = e.path();
let name = path.file_name()?.to_str()?;
let rest = name.strip_prefix(&prefix)?;
let ts: i64 = rest.parse().ok()?;
Some((ts, path))
})
.collect();
backups.sort_by(|a, b| b.0.cmp(&a.0));
for (_, path) in backups.into_iter().skip(keep) {
if let Err(err) = std::fs::remove_file(&path) {
eprintln!(
"Failed to remove old IAM backup {}: {}",
path.display(),
err
);
} else {
println!("Pruned old IAM backup {}", path.display());
}
}
}

View File

@@ -1,5 +1,5 @@
use axum::extract::{Request, State};
use axum::http::{header, HeaderMap, Method, StatusCode};
use axum::http::{header, HeaderMap, Method, StatusCode, Uri};
use axum::middleware::Next;
use axum::response::{IntoResponse, Response};
@@ -12,27 +12,76 @@ use serde_json::Value;
use std::time::Instant;
use tokio::io::AsyncReadExt;
use crate::middleware::sha_body::{is_hex_sha256, Sha256VerifyBody};
use crate::services::acl::acl_from_bucket_config;
use crate::state::AppState;
fn wrap_body_for_sha256_verification(req: &mut Request) {
let declared = match req
.headers()
.get("x-amz-content-sha256")
.and_then(|v| v.to_str().ok())
{
Some(v) => v.to_string(),
None => return,
};
if !is_hex_sha256(&declared) {
return;
}
let is_chunked = req
.headers()
.get("content-encoding")
.and_then(|v| v.to_str().ok())
.map(|v| v.to_ascii_lowercase().contains("aws-chunked"))
.unwrap_or(false);
if is_chunked {
return;
}
let body = std::mem::replace(req.body_mut(), axum::body::Body::empty());
let wrapped = Sha256VerifyBody::new(body, declared);
*req.body_mut() = axum::body::Body::new(wrapped);
}
#[derive(Clone, Debug)]
struct OriginalCanonicalPath(String);
fn website_error_response(
status: StatusCode,
body: Option<Vec<u8>>,
content_type: &str,
include_body: bool,
) -> Response {
let (body, content_type) = match body {
Some(body) => (body, content_type),
None => (
default_website_error_body(status).into_bytes(),
"text/html; charset=utf-8",
),
};
let mut headers = HeaderMap::new();
headers.insert(header::CONTENT_TYPE, content_type.parse().unwrap());
headers.insert(header::ACCEPT_RANGES, "bytes".parse().unwrap());
if let Some(ref body) = body {
headers.insert(
header::CONTENT_LENGTH,
body.len().to_string().parse().unwrap(),
);
if include_body {
(status, headers, body.clone()).into_response()
} else {
(status, headers).into_response()
}
}
fn default_website_error_body(status: StatusCode) -> String {
let code = status.as_u16();
if status == StatusCode::NOT_FOUND {
"<h1>404 page not found</h1>".to_string()
} else {
let reason = status.canonical_reason().unwrap_or("Error");
format!("{code} {reason}")
}
}
fn parse_range_header(range_header: &str, total_size: u64) -> Option<(u64, u64)> {
let range_spec = range_header.strip_prefix("bytes=")?;
if let Some(suffix) = range_spec.strip_prefix('-') {
@@ -191,6 +240,7 @@ async fn maybe_serve_website(
return None;
}
let request_path = uri_path.trim_start_matches('/').to_string();
let include_error_body = method != axum::http::Method::HEAD;
let store = state.website_domains.as_ref()?;
let bucket = store.get_bucket(&host)?;
if !matches!(state.storage.bucket_exists(&bucket).await, Ok(true)) {
@@ -198,6 +248,7 @@ async fn maybe_serve_website(
StatusCode::NOT_FOUND,
None,
"text/plain; charset=utf-8",
include_error_body,
));
}
@@ -207,6 +258,7 @@ async fn maybe_serve_website(
StatusCode::NOT_FOUND,
None,
"text/plain; charset=utf-8",
include_error_body,
));
};
let Some((index_document, error_document)) = parse_website_config(website_config) else {
@@ -214,6 +266,7 @@ async fn maybe_serve_website(
StatusCode::NOT_FOUND,
None,
"text/plain; charset=utf-8",
include_error_body,
));
};
@@ -251,6 +304,7 @@ async fn maybe_serve_website(
StatusCode::NOT_FOUND,
None,
"text/plain; charset=utf-8",
include_error_body,
))
});
} else {
@@ -258,6 +312,7 @@ async fn maybe_serve_website(
StatusCode::NOT_FOUND,
None,
"text/plain; charset=utf-8",
include_error_body,
));
}
} else if !exists {
@@ -276,6 +331,7 @@ async fn maybe_serve_website(
StatusCode::NOT_FOUND,
None,
"text/plain; charset=utf-8",
include_error_body,
))
});
}
@@ -283,6 +339,7 @@ async fn maybe_serve_website(
StatusCode::NOT_FOUND,
None,
"text/plain; charset=utf-8",
include_error_body,
));
}
@@ -297,6 +354,67 @@ async fn maybe_serve_website(
.await
}
fn virtual_host_candidate(host: &str) -> Option<String> {
let (candidate, _) = host.split_once('.')?;
if candidate.is_empty() || matches!(candidate, "www" | "s3" | "api" | "admin" | "kms") {
return None;
}
if myfsio_storage::validation::validate_bucket_name(candidate).is_some() {
return None;
}
Some(candidate.to_string())
}
async fn virtual_host_bucket(
state: &AppState,
host: &str,
path: &str,
method: &Method,
) -> Option<String> {
if path.starts_with("/ui")
|| path.starts_with("/admin")
|| path.starts_with("/kms")
|| path.starts_with("/myfsio")
{
return None;
}
let bucket = virtual_host_candidate(host)?;
if path == format!("/{}", bucket) || path.starts_with(&format!("/{}/", bucket)) {
return None;
}
match state.storage.bucket_exists(&bucket).await {
Ok(true) => Some(bucket),
Ok(false) if *method == Method::PUT && path == "/" => Some(bucket),
_ => None,
}
}
fn rewrite_uri_for_virtual_host(uri: &Uri, bucket: &str) -> Option<Uri> {
let path = uri.path();
let rewritten_path = if path == "/" {
format!("/{}/", bucket)
} else {
format!("/{}{}", bucket, path)
};
let path_and_query = match uri.query() {
Some(query) => format!("{}?{}", rewritten_path, query),
None => rewritten_path,
};
let mut parts = uri.clone().into_parts();
parts.path_and_query = Some(path_and_query.parse().ok()?);
Uri::from_parts(parts).ok()
}
fn sigv4_canonical_path(req: &Request) -> &str {
req.extensions()
.get::<OriginalCanonicalPath>()
.map(|path| path.0.as_str())
.unwrap_or_else(|| req.uri().path())
}
pub async fn auth_layer(State(state): State<AppState>, mut req: Request, next: Next) -> Response {
let start = Instant::now();
let uri = req.uri().clone();
@@ -328,12 +446,12 @@ pub async fn auth_layer(State(state): State<AppState>, mut req: Request, next: N
.and_then(|value| value.to_str().ok())
.map(|value| value.to_string());
let response = if path == "/myfsio/health" || path == "/health" {
let response = if path == "/myfsio/health" {
next.run(req).await
} else if let Some(response) = maybe_serve_website(
&state,
method.clone(),
host.unwrap_or_default(),
host.clone().unwrap_or_default(),
path.clone(),
range_header,
)
@@ -341,38 +459,54 @@ pub async fn auth_layer(State(state): State<AppState>, mut req: Request, next: N
{
response
} else {
let auth_path = if let Some(bucket) =
virtual_host_bucket(&state, host.as_deref().unwrap_or_default(), &path, &method).await
{
if let Some(rewritten) = rewrite_uri_for_virtual_host(req.uri(), &bucket) {
req.extensions_mut()
.insert(OriginalCanonicalPath(path.clone()));
*req.uri_mut() = rewritten;
req.uri().path().to_string()
} else {
path.clone()
}
} else {
path.clone()
};
match try_auth(&state, &req) {
AuthResult::NoAuth => match authorize_request(
&state,
None,
&method,
&path,
&auth_path,
&query,
copy_source.as_deref(),
)
.await
{
Ok(()) => next.run(req).await,
Err(err) => error_response(err, &path),
Err(err) => error_response(err, &auth_path),
},
AuthResult::Ok(principal) => {
if let Err(err) = authorize_request(
&state,
Some(&principal),
&method,
&path,
&auth_path,
&query,
copy_source.as_deref(),
)
.await
{
error_response(err, &path)
error_response(err, &auth_path)
} else {
req.extensions_mut().insert(principal);
wrap_body_for_sha256_verification(&mut req);
next.run(req).await
}
}
AuthResult::Denied(err) => error_response(err, &path),
AuthResult::Denied(err) => error_response(err, &auth_path),
}
};
@@ -488,7 +622,7 @@ async fn authorize_request(
query: &str,
copy_source: Option<&str>,
) -> Result<(), S3Error> {
if path == "/myfsio/health" || path == "/health" {
if path == "/myfsio/health" {
return Ok(());
}
if path == "/" {
@@ -589,6 +723,17 @@ async fn authorize_action(
if iam_allowed || matches!(policy_decision, PolicyDecision::Allow) {
return Ok(());
}
if evaluate_bucket_acl(
state,
bucket,
principal.map(|principal| principal.access_key.as_str()),
action,
principal.is_some(),
)
.await
{
return Ok(());
}
if principal.is_some() {
Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"))
@@ -600,6 +745,27 @@ async fn authorize_action(
}
}
async fn evaluate_bucket_acl(
state: &AppState,
bucket: &str,
principal_id: Option<&str>,
action: &str,
is_authenticated: bool,
) -> bool {
let config = match state.storage.get_bucket_config(bucket).await {
Ok(config) => config,
Err(_) => return false,
};
let Some(value) = config.acl.as_ref() else {
return false;
};
let Some(acl) = acl_from_bucket_config(value) else {
return false;
};
acl.allowed_actions(principal_id, is_authenticated)
.contains(action)
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum PolicyDecision {
Allow,
@@ -964,7 +1130,9 @@ fn verify_sigv4_header(state: &AppState, req: &Request, auth_str: &str) -> AuthR
let parts: Vec<&str> = auth_str
.strip_prefix("AWS4-HMAC-SHA256 ")
.unwrap()
.split(", ")
.split(',')
.map(str::trim)
.filter(|s| !s.is_empty())
.collect();
if parts.len() != 3 {
@@ -974,9 +1142,24 @@ fn verify_sigv4_header(state: &AppState, req: &Request, auth_str: &str) -> AuthR
));
}
let credential = parts[0].strip_prefix("Credential=").unwrap_or("");
let signed_headers_str = parts[1].strip_prefix("SignedHeaders=").unwrap_or("");
let provided_signature = parts[2].strip_prefix("Signature=").unwrap_or("");
let mut credential: &str = "";
let mut signed_headers_str: &str = "";
let mut provided_signature: &str = "";
for part in &parts {
if let Some(v) = part.strip_prefix("Credential=") {
credential = v;
} else if let Some(v) = part.strip_prefix("SignedHeaders=") {
signed_headers_str = v;
} else if let Some(v) = part.strip_prefix("Signature=") {
provided_signature = v;
}
}
if credential.is_empty() || signed_headers_str.is_empty() || provided_signature.is_empty() {
return AuthResult::Denied(S3Error::new(
S3ErrorCode::InvalidArgument,
"Malformed Authorization header",
));
}
let cred_parts: Vec<&str> = credential.split('/').collect();
if cred_parts.len() != 5 {
@@ -1019,7 +1202,7 @@ fn verify_sigv4_header(state: &AppState, req: &Request, auth_str: &str) -> AuthR
};
let method = req.method().as_str();
let canonical_uri = req.uri().path();
let canonical_uri = sigv4_canonical_path(req);
let query_params = parse_query_params(req.uri().query().unwrap_or(""));
@@ -1161,7 +1344,7 @@ fn verify_sigv4_query(state: &AppState, req: &Request) -> AuthResult {
}
if elapsed < -(state.config.sigv4_timestamp_tolerance_secs as i64) {
return AuthResult::Denied(S3Error::new(
S3ErrorCode::AccessDenied,
S3ErrorCode::RequestTimeTooSkewed,
"Request is too far in the future",
));
}
@@ -1175,7 +1358,7 @@ fn verify_sigv4_query(state: &AppState, req: &Request) -> AuthResult {
};
let method = req.method().as_str();
let canonical_uri = req.uri().path();
let canonical_uri = sigv4_canonical_path(req);
let query_params_no_sig: Vec<(String, String)> = params
.iter()
@@ -1231,8 +1414,11 @@ fn check_timestamp_freshness(amz_date: &str, tolerance_secs: u64) -> Option<S3Er
if diff > tolerance_secs {
return Some(S3Error::new(
S3ErrorCode::AccessDenied,
"Request timestamp too old or too far in the future",
S3ErrorCode::RequestTimeTooSkewed,
format!(
"The difference between the request time and the server's time is too large ({}s, tolerance {}s)",
diff, tolerance_secs
),
));
}
None
@@ -1263,9 +1449,18 @@ fn error_response(err: S3Error, resource: &str) -> Response {
let status =
StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
let request_id = uuid::Uuid::new_v4().simple().to_string();
let code_str = err.code.as_str();
let body = err
.with_resource(resource.to_string())
.with_request_id(request_id)
.to_xml();
(status, [("content-type", "application/xml")], body).into_response()
(
status,
[
("content-type", "application/xml"),
("x-amz-error-code", code_str),
],
body,
)
.into_response()
}

View File

@@ -0,0 +1,276 @@
use axum::extract::{Request, State};
use axum::http::{HeaderMap, HeaderValue, Method, StatusCode};
use axum::middleware::Next;
use axum::response::{IntoResponse, Response};
use myfsio_storage::traits::StorageEngine;
use crate::state::AppState;
#[derive(Debug, Default, Clone)]
struct CorsRule {
allowed_origins: Vec<String>,
allowed_methods: Vec<String>,
allowed_headers: Vec<String>,
expose_headers: Vec<String>,
max_age_seconds: Option<u64>,
}
fn parse_cors_config(xml: &str) -> Vec<CorsRule> {
let doc = match roxmltree::Document::parse(xml) {
Ok(d) => d,
Err(_) => return Vec::new(),
};
let mut rules = Vec::new();
for rule_node in doc
.descendants()
.filter(|node| node.is_element() && node.tag_name().name() == "CORSRule")
{
let mut rule = CorsRule::default();
for child in rule_node.children().filter(|n| n.is_element()) {
let text = child.text().unwrap_or("").trim().to_string();
match child.tag_name().name() {
"AllowedOrigin" => rule.allowed_origins.push(text),
"AllowedMethod" => rule.allowed_methods.push(text.to_ascii_uppercase()),
"AllowedHeader" => rule.allowed_headers.push(text),
"ExposeHeader" => rule.expose_headers.push(text),
"MaxAgeSeconds" => {
if let Ok(v) = text.parse::<u64>() {
rule.max_age_seconds = Some(v);
}
}
_ => {}
}
}
rules.push(rule);
}
rules
}
fn match_origin(pattern: &str, origin: &str) -> bool {
if pattern == "*" {
return true;
}
if pattern == origin {
return true;
}
if let Some(suffix) = pattern.strip_prefix('*') {
return origin.ends_with(suffix);
}
if let Some(prefix) = pattern.strip_suffix('*') {
return origin.starts_with(prefix);
}
false
}
fn match_header(pattern: &str, header: &str) -> bool {
if pattern == "*" {
return true;
}
pattern.eq_ignore_ascii_case(header)
}
fn find_matching_rule<'a>(
rules: &'a [CorsRule],
origin: &str,
method: &str,
request_headers: &[&str],
) -> Option<&'a CorsRule> {
rules.iter().find(|rule| {
let origin_match = rule.allowed_origins.iter().any(|p| match_origin(p, origin));
if !origin_match {
return false;
}
let method_match = rule
.allowed_methods
.iter()
.any(|m| m.eq_ignore_ascii_case(method));
if !method_match {
return false;
}
request_headers.iter().all(|h| {
rule.allowed_headers
.iter()
.any(|pattern| match_header(pattern, h))
})
})
}
fn find_matching_rule_for_actual<'a>(
rules: &'a [CorsRule],
origin: &str,
method: &str,
) -> Option<&'a CorsRule> {
rules.iter().find(|rule| {
rule.allowed_origins.iter().any(|p| match_origin(p, origin))
&& rule
.allowed_methods
.iter()
.any(|m| m.eq_ignore_ascii_case(method))
})
}
fn bucket_from_path(path: &str) -> Option<&str> {
let trimmed = path.trim_start_matches('/');
if trimmed.is_empty() {
return None;
}
if trimmed.starts_with("admin/")
|| trimmed.starts_with("myfsio/")
|| trimmed.starts_with("kms/")
{
return None;
}
let first = trimmed.split('/').next().unwrap_or("");
if myfsio_storage::validation::validate_bucket_name(first).is_some() {
return None;
}
Some(first)
}
async fn bucket_from_host(state: &AppState, headers: &HeaderMap) -> Option<String> {
let host = headers
.get("host")
.and_then(|value| value.to_str().ok())
.and_then(|value| value.split(':').next())?
.trim()
.to_ascii_lowercase();
let (candidate, _) = host.split_once('.')?;
if myfsio_storage::validation::validate_bucket_name(candidate).is_some() {
return None;
}
match state.storage.bucket_exists(candidate).await {
Ok(true) => Some(candidate.to_string()),
_ => None,
}
}
async fn resolve_bucket(state: &AppState, headers: &HeaderMap, path: &str) -> Option<String> {
if let Some(name) = bucket_from_host(state, headers).await {
return Some(name);
}
bucket_from_path(path).map(str::to_string)
}
fn apply_rule_headers(headers: &mut axum::http::HeaderMap, rule: &CorsRule, origin: &str) {
headers.remove("access-control-allow-origin");
headers.remove("vary");
if let Ok(val) = HeaderValue::from_str(origin) {
headers.insert("access-control-allow-origin", val);
}
headers.insert("vary", HeaderValue::from_static("Origin"));
if !rule.expose_headers.is_empty() {
let value = rule.expose_headers.join(", ");
if let Ok(val) = HeaderValue::from_str(&value) {
headers.remove("access-control-expose-headers");
headers.insert("access-control-expose-headers", val);
}
}
}
fn strip_cors_response_headers(headers: &mut HeaderMap) {
headers.remove("access-control-allow-origin");
headers.remove("access-control-allow-credentials");
headers.remove("access-control-expose-headers");
headers.remove("access-control-allow-methods");
headers.remove("access-control-allow-headers");
headers.remove("access-control-max-age");
}
pub async fn bucket_cors_layer(
State(state): State<AppState>,
req: Request,
next: Next,
) -> Response {
let path = req.uri().path().to_string();
let bucket = match resolve_bucket(&state, req.headers(), &path).await {
Some(name) => name,
None => return next.run(req).await,
};
let origin = req
.headers()
.get("origin")
.and_then(|v| v.to_str().ok())
.map(|s| s.to_string());
let bucket_rules = if origin.is_some() {
match state.storage.get_bucket_config(&bucket).await {
Ok(cfg) => cfg
.cors
.as_ref()
.map(|v| match v {
serde_json::Value::String(s) => s.clone(),
other => other.to_string(),
})
.map(|xml| parse_cors_config(&xml))
.filter(|rules| !rules.is_empty()),
Err(_) => None,
}
} else {
None
};
let is_preflight = req.method() == Method::OPTIONS
&& req.headers().contains_key("access-control-request-method");
if is_preflight {
if let (Some(origin), Some(rules)) = (origin.as_deref(), bucket_rules.as_ref()) {
let req_method = req
.headers()
.get("access-control-request-method")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
let req_headers_raw = req
.headers()
.get("access-control-request-headers")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
let req_headers: Vec<&str> = req_headers_raw
.split(',')
.map(str::trim)
.filter(|s| !s.is_empty())
.collect();
if let Some(rule) = find_matching_rule(rules, origin, req_method, &req_headers) {
let mut resp = StatusCode::NO_CONTENT.into_response();
apply_rule_headers(resp.headers_mut(), rule, origin);
let methods_value = rule.allowed_methods.join(", ");
if let Ok(val) = HeaderValue::from_str(&methods_value) {
resp.headers_mut()
.insert("access-control-allow-methods", val);
}
let headers_value = if rule.allowed_headers.iter().any(|h| h == "*") {
req_headers_raw.to_string()
} else {
rule.allowed_headers.join(", ")
};
if !headers_value.is_empty() {
if let Ok(val) = HeaderValue::from_str(&headers_value) {
resp.headers_mut()
.insert("access-control-allow-headers", val);
}
}
if let Some(max_age) = rule.max_age_seconds {
if let Ok(val) = HeaderValue::from_str(&max_age.to_string()) {
resp.headers_mut().insert("access-control-max-age", val);
}
}
return resp;
}
return (StatusCode::FORBIDDEN, "CORSResponse: CORS is not enabled").into_response();
}
}
let method = req.method().clone();
let mut resp = next.run(req).await;
if let (Some(origin), Some(rules)) = (origin.as_deref(), bucket_rules.as_ref()) {
if let Some(rule) = find_matching_rule_for_actual(rules, origin, method.as_str()) {
apply_rule_headers(resp.headers_mut(), rule, origin);
} else {
strip_cors_response_headers(resp.headers_mut());
}
}
resp
}

View File

@@ -1,7 +1,12 @@
mod auth;
mod bucket_cors;
pub mod ratelimit;
pub mod session;
pub(crate) mod sha_body;
pub use auth::auth_layer;
pub use bucket_cors::bucket_cors_layer;
pub use ratelimit::{rate_limit_layer, RateLimitLayerState};
pub use session::{csrf_layer, session_layer, SessionHandle, SessionLayerState};
use axum::extract::{Request, State};
@@ -18,6 +23,42 @@ pub async fn server_header(req: Request, next: Next) -> Response {
resp
}
pub async fn request_log_layer(req: Request, next: Next) -> Response {
let start = Instant::now();
let method = req.method().clone();
let uri = req.uri().clone();
let version = req.version();
let remote = req
.extensions()
.get::<axum::extract::ConnectInfo<std::net::SocketAddr>>()
.map(|ci| ci.0.ip().to_string())
.unwrap_or_else(|| "-".to_string());
let response = next.run(req).await;
let status = response.status().as_u16();
let elapsed_ms = start.elapsed().as_secs_f64() * 1000.0;
let bytes_out = response
.headers()
.get(axum::http::header::CONTENT_LENGTH)
.and_then(|v| v.to_str().ok())
.and_then(|v| v.parse::<u64>().ok());
tracing::info!(
target: "myfsio::access",
remote = %remote,
method = %method,
uri = %uri,
version = ?version,
status,
bytes_out = bytes_out.unwrap_or(0),
elapsed_ms = format!("{:.3}", elapsed_ms),
"request"
);
response
}
pub async fn ui_metrics_layer(State(state): State<AppState>, req: Request, next: Next) -> Response {
let metrics = match state.metrics.clone() {
Some(m) => m,

View File

@@ -0,0 +1,311 @@
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::{Duration, Instant};
use axum::extract::{ConnectInfo, Request, State};
use axum::http::{header, Method, StatusCode};
use axum::middleware::Next;
use axum::response::{IntoResponse, Response};
use parking_lot::Mutex;
use crate::config::RateLimitSetting;
#[derive(Clone)]
pub struct RateLimitLayerState {
default_limiter: Arc<FixedWindowLimiter>,
list_buckets_limiter: Option<Arc<FixedWindowLimiter>>,
bucket_ops_limiter: Option<Arc<FixedWindowLimiter>>,
object_ops_limiter: Option<Arc<FixedWindowLimiter>>,
head_ops_limiter: Option<Arc<FixedWindowLimiter>>,
num_trusted_proxies: usize,
}
impl RateLimitLayerState {
pub fn new(setting: RateLimitSetting, num_trusted_proxies: usize) -> Self {
Self {
default_limiter: Arc::new(FixedWindowLimiter::new(setting)),
list_buckets_limiter: None,
bucket_ops_limiter: None,
object_ops_limiter: None,
head_ops_limiter: None,
num_trusted_proxies,
}
}
pub fn with_per_op(
default: RateLimitSetting,
list_buckets: RateLimitSetting,
bucket_ops: RateLimitSetting,
object_ops: RateLimitSetting,
head_ops: RateLimitSetting,
num_trusted_proxies: usize,
) -> Self {
Self {
default_limiter: Arc::new(FixedWindowLimiter::new(default)),
list_buckets_limiter: (list_buckets != default)
.then(|| Arc::new(FixedWindowLimiter::new(list_buckets))),
bucket_ops_limiter: (bucket_ops != default)
.then(|| Arc::new(FixedWindowLimiter::new(bucket_ops))),
object_ops_limiter: (object_ops != default)
.then(|| Arc::new(FixedWindowLimiter::new(object_ops))),
head_ops_limiter: (head_ops != default)
.then(|| Arc::new(FixedWindowLimiter::new(head_ops))),
num_trusted_proxies,
}
}
fn select_limiter(&self, req: &Request) -> &Arc<FixedWindowLimiter> {
let path = req.uri().path();
let method = req.method();
if path == "/" && *method == Method::GET {
if let Some(ref limiter) = self.list_buckets_limiter {
return limiter;
}
}
let segments: Vec<&str> = path
.trim_start_matches('/')
.split('/')
.filter(|s| !s.is_empty())
.collect();
if *method == Method::HEAD {
if let Some(ref limiter) = self.head_ops_limiter {
return limiter;
}
}
if segments.len() == 1 {
if let Some(ref limiter) = self.bucket_ops_limiter {
return limiter;
}
} else if segments.len() >= 2 {
if let Some(ref limiter) = self.object_ops_limiter {
return limiter;
}
}
&self.default_limiter
}
}
#[derive(Debug)]
struct FixedWindowLimiter {
setting: RateLimitSetting,
state: Mutex<LimiterState>,
}
#[derive(Debug)]
struct LimiterState {
entries: HashMap<String, LimitEntry>,
last_sweep: Instant,
}
#[derive(Debug, Clone, Copy)]
struct LimitEntry {
window_started: Instant,
count: u32,
}
const SWEEP_MIN_INTERVAL: Duration = Duration::from_secs(60);
const SWEEP_ENTRY_THRESHOLD: usize = 1024;
impl FixedWindowLimiter {
fn new(setting: RateLimitSetting) -> Self {
Self {
setting,
state: Mutex::new(LimiterState {
entries: HashMap::new(),
last_sweep: Instant::now(),
}),
}
}
fn check(&self, key: &str) -> Result<(), u64> {
let now = Instant::now();
let window = Duration::from_secs(self.setting.window_seconds.max(1));
let mut state = self.state.lock();
if state.entries.len() >= SWEEP_ENTRY_THRESHOLD
&& now.duration_since(state.last_sweep) >= SWEEP_MIN_INTERVAL
{
state
.entries
.retain(|_, entry| now.duration_since(entry.window_started) < window);
state.last_sweep = now;
}
let entry = state.entries.entry(key.to_string()).or_insert(LimitEntry {
window_started: now,
count: 0,
});
if now.duration_since(entry.window_started) >= window {
entry.window_started = now;
entry.count = 0;
}
if entry.count >= self.setting.max_requests {
let elapsed = now.duration_since(entry.window_started);
let retry_after = window.saturating_sub(elapsed).as_secs().max(1);
return Err(retry_after);
}
entry.count += 1;
Ok(())
}
}
pub async fn rate_limit_layer(
State(state): State<RateLimitLayerState>,
req: Request,
next: Next,
) -> Response {
let key = rate_limit_key(&req, state.num_trusted_proxies);
let limiter = state.select_limiter(&req);
match limiter.check(&key) {
Ok(()) => next.run(req).await,
Err(retry_after) => {
let resource = req.uri().path().to_string();
too_many_requests(retry_after, &resource)
}
}
}
fn too_many_requests(retry_after: u64, resource: &str) -> Response {
let request_id = uuid::Uuid::new_v4().simple().to_string();
let body = myfsio_xml::response::rate_limit_exceeded_xml(resource, &request_id);
let mut response = (
StatusCode::SERVICE_UNAVAILABLE,
[
(header::CONTENT_TYPE, "application/xml".to_string()),
(header::RETRY_AFTER, retry_after.to_string()),
],
body,
)
.into_response();
if let Ok(value) = request_id.parse() {
response.headers_mut().insert("x-amz-request-id", value);
}
response
}
fn rate_limit_key(req: &Request, num_trusted_proxies: usize) -> String {
format!("ip:{}", client_ip(req, num_trusted_proxies))
}
fn client_ip(req: &Request, num_trusted_proxies: usize) -> String {
if num_trusted_proxies > 0 {
if let Some(value) = req
.headers()
.get("x-forwarded-for")
.and_then(|v| v.to_str().ok())
{
let parts = value
.split(',')
.map(|part| part.trim())
.filter(|part| !part.is_empty())
.collect::<Vec<_>>();
if parts.len() > num_trusted_proxies {
let index = parts.len() - num_trusted_proxies - 1;
return parts[index].to_string();
}
}
if let Some(value) = req.headers().get("x-real-ip").and_then(|v| v.to_str().ok()) {
if !value.trim().is_empty() {
return value.trim().to_string();
}
}
}
req.extensions()
.get::<ConnectInfo<SocketAddr>>()
.map(|ConnectInfo(addr)| addr.ip().to_string())
.unwrap_or_else(|| "unknown".to_string())
}
#[cfg(test)]
mod tests {
use super::*;
use axum::body::Body;
#[test]
fn honors_trusted_proxy_count_for_forwarded_for() {
let req = Request::builder()
.header("x-forwarded-for", "198.51.100.1, 10.0.0.1, 10.0.0.2")
.body(Body::empty())
.unwrap();
assert_eq!(rate_limit_key(&req, 2), "ip:198.51.100.1");
assert_eq!(rate_limit_key(&req, 1), "ip:10.0.0.1");
}
#[test]
fn falls_back_to_connect_info_when_forwarded_for_has_too_few_hops() {
let mut req = Request::builder()
.header("x-forwarded-for", "198.51.100.1")
.body(Body::empty())
.unwrap();
req.extensions_mut()
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 9], 443))));
assert_eq!(rate_limit_key(&req, 2), "ip:203.0.113.9");
}
#[test]
fn ignores_forwarded_headers_when_no_proxies_are_trusted() {
let mut req = Request::builder()
.header("x-forwarded-for", "198.51.100.1")
.header("x-real-ip", "198.51.100.2")
.body(Body::empty())
.unwrap();
req.extensions_mut()
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 9], 443))));
assert_eq!(rate_limit_key(&req, 0), "ip:203.0.113.9");
}
#[test]
fn uses_connect_info_for_direct_clients() {
let mut req = Request::builder().body(Body::empty()).unwrap();
req.extensions_mut()
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 10], 443))));
assert_eq!(rate_limit_key(&req, 0), "ip:203.0.113.10");
}
#[test]
fn fixed_window_rejects_after_quota() {
let limiter = FixedWindowLimiter::new(RateLimitSetting::new(2, 60));
assert!(limiter.check("k").is_ok());
assert!(limiter.check("k").is_ok());
assert!(limiter.check("k").is_err());
}
#[test]
fn sweep_removes_expired_entries() {
let limiter = FixedWindowLimiter::new(RateLimitSetting::new(10, 1));
let far_past = Instant::now() - (SWEEP_MIN_INTERVAL + Duration::from_secs(5));
{
let mut state = limiter.state.lock();
for i in 0..(SWEEP_ENTRY_THRESHOLD + 1024) {
state.entries.insert(
format!("stale-{}", i),
LimitEntry {
window_started: far_past,
count: 5,
},
);
}
state.last_sweep = far_past;
}
let seeded = limiter.state.lock().entries.len();
assert_eq!(seeded, SWEEP_ENTRY_THRESHOLD + 1024);
assert!(limiter.check("fresh").is_ok());
let remaining = limiter.state.lock().entries.len();
assert_eq!(
remaining, 1,
"expected sweep to leave only the fresh entry, got {}",
remaining
);
}
}

View File

@@ -90,7 +90,11 @@ pub async fn session_layer(
resp
}
pub async fn csrf_layer(req: Request, next: Next) -> Response {
pub async fn csrf_layer(
State(state): State<crate::state::AppState>,
req: Request,
next: Next,
) -> Response {
const CSRF_HEADER_ALIAS: &str = "x-csrftoken";
let method = req.method().clone();
@@ -151,6 +155,8 @@ pub async fn csrf_layer(req: Request, next: Next) -> Response {
extract_form_token(&bytes)
} else if content_type.starts_with("multipart/form-data") {
extract_multipart_token(&content_type, &bytes)
} else if content_type.starts_with("application/json") {
extract_json_token(&bytes)
} else {
None
};
@@ -169,7 +175,32 @@ pub async fn csrf_layer(req: Request, next: Next) -> Response {
header_present = header_token.is_some(),
"CSRF token mismatch"
);
(StatusCode::FORBIDDEN, "Invalid CSRF token").into_response()
let accept = parts
.headers
.get(header::ACCEPT)
.and_then(|v| v.to_str().ok())
.unwrap_or("");
let is_form_submit = content_type.starts_with("application/x-www-form-urlencoded")
|| content_type.starts_with("multipart/form-data");
let wants_json =
accept.contains("application/json") || content_type.starts_with("application/json");
if is_form_submit && !wants_json {
let ctx = crate::handlers::ui::base_context(&handle, None);
let mut resp = crate::handlers::ui::render(&state, "csrf_error.html", &ctx);
*resp.status_mut() = StatusCode::FORBIDDEN;
return resp;
}
let mut resp = (
StatusCode::FORBIDDEN,
[(header::CONTENT_TYPE, "application/json")],
r#"{"error":"Invalid CSRF token. Send it via the X-CSRF-Token header or a csrf_token field in the form/JSON body."}"#,
)
.into_response();
*resp.status_mut() = StatusCode::FORBIDDEN;
resp
}
fn extract_multipart_token(content_type: &str, body: &[u8]) -> Option<String> {
@@ -209,6 +240,14 @@ fn build_session_cookie(id: &str, secure: bool) -> Cookie<'static> {
cookie
}
fn extract_json_token(body: &[u8]) -> Option<String> {
let value: serde_json::Value = serde_json::from_slice(body).ok()?;
value
.get(CSRF_FIELD_NAME)
.and_then(|v| v.as_str())
.map(|s| s.to_string())
}
fn extract_form_token(body: &[u8]) -> Option<String> {
let text = std::str::from_utf8(body).ok()?;
let prefix = format!("{}=", CSRF_FIELD_NAME);

View File

@@ -0,0 +1,107 @@
use axum::body::Body;
use bytes::Bytes;
use http_body::{Body as HttpBody, Frame};
use sha2::{Digest, Sha256};
use std::error::Error;
use std::fmt;
use std::pin::Pin;
use std::task::{Context, Poll};
#[derive(Debug)]
struct Sha256MismatchError {
expected: String,
computed: String,
}
impl Sha256MismatchError {
fn message(&self) -> String {
format!(
"The x-amz-content-sha256 you specified did not match what we received (expected {}, computed {})",
self.expected, self.computed
)
}
}
impl fmt::Display for Sha256MismatchError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"XAmzContentSHA256Mismatch: expected {}, computed {}",
self.expected, self.computed
)
}
}
impl Error for Sha256MismatchError {}
pub struct Sha256VerifyBody {
inner: Body,
expected: String,
hasher: Option<Sha256>,
}
impl Sha256VerifyBody {
pub fn new(inner: Body, expected_hex: String) -> Self {
Self {
inner,
expected: expected_hex.to_ascii_lowercase(),
hasher: Some(Sha256::new()),
}
}
}
impl HttpBody for Sha256VerifyBody {
type Data = Bytes;
type Error = Box<dyn std::error::Error + Send + Sync>;
fn poll_frame(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
let this = self.as_mut().get_mut();
match Pin::new(&mut this.inner).poll_frame(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(Box::new(e)))),
Poll::Ready(Some(Ok(frame))) => {
if let Some(data) = frame.data_ref() {
if let Some(h) = this.hasher.as_mut() {
h.update(data);
}
}
Poll::Ready(Some(Ok(frame)))
}
Poll::Ready(None) => {
if let Some(hasher) = this.hasher.take() {
let computed = hex::encode(hasher.finalize());
if computed != this.expected {
return Poll::Ready(Some(Err(Box::new(Sha256MismatchError {
expected: this.expected.clone(),
computed,
}))));
}
}
Poll::Ready(None)
}
}
}
fn is_end_stream(&self) -> bool {
self.inner.is_end_stream()
}
fn size_hint(&self) -> http_body::SizeHint {
self.inner.size_hint()
}
}
pub fn is_hex_sha256(s: &str) -> bool {
s.len() == 64 && s.bytes().all(|b| b.is_ascii_hexdigit())
}
pub fn sha256_mismatch_message(err: &(dyn Error + 'static)) -> Option<String> {
if let Some(mismatch) = err.downcast_ref::<Sha256MismatchError>() {
return Some(mismatch.message());
}
err.source().and_then(sha256_mismatch_message)
}

View File

@@ -0,0 +1,276 @@
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::{HashMap, HashSet};
pub const ACL_METADATA_KEY: &str = "__acl__";
pub const GRANTEE_ALL_USERS: &str = "*";
pub const GRANTEE_AUTHENTICATED_USERS: &str = "authenticated";
const ACL_PERMISSION_FULL_CONTROL: &str = "FULL_CONTROL";
const ACL_PERMISSION_WRITE: &str = "WRITE";
const ACL_PERMISSION_WRITE_ACP: &str = "WRITE_ACP";
const ACL_PERMISSION_READ: &str = "READ";
const ACL_PERMISSION_READ_ACP: &str = "READ_ACP";
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct AclGrant {
pub grantee: String,
pub permission: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Acl {
pub owner: String,
#[serde(default)]
pub grants: Vec<AclGrant>,
}
impl Acl {
pub fn allowed_actions(
&self,
principal_id: Option<&str>,
is_authenticated: bool,
) -> HashSet<&'static str> {
let mut actions = HashSet::new();
if let Some(principal_id) = principal_id {
if principal_id == self.owner {
actions.extend(permission_to_actions(ACL_PERMISSION_FULL_CONTROL));
}
}
for grant in &self.grants {
if grant.grantee == GRANTEE_ALL_USERS {
actions.extend(permission_to_actions(&grant.permission));
} else if grant.grantee == GRANTEE_AUTHENTICATED_USERS && is_authenticated {
actions.extend(permission_to_actions(&grant.permission));
} else if let Some(principal_id) = principal_id {
if grant.grantee == principal_id {
actions.extend(permission_to_actions(&grant.permission));
}
}
}
actions
}
}
pub fn create_canned_acl(canned_acl: &str, owner: &str) -> Acl {
let owner_grant = AclGrant {
grantee: owner.to_string(),
permission: ACL_PERMISSION_FULL_CONTROL.to_string(),
};
match canned_acl {
"public-read" => Acl {
owner: owner.to_string(),
grants: vec![
owner_grant,
AclGrant {
grantee: GRANTEE_ALL_USERS.to_string(),
permission: ACL_PERMISSION_READ.to_string(),
},
],
},
"public-read-write" => Acl {
owner: owner.to_string(),
grants: vec![
owner_grant,
AclGrant {
grantee: GRANTEE_ALL_USERS.to_string(),
permission: ACL_PERMISSION_READ.to_string(),
},
AclGrant {
grantee: GRANTEE_ALL_USERS.to_string(),
permission: ACL_PERMISSION_WRITE.to_string(),
},
],
},
"authenticated-read" => Acl {
owner: owner.to_string(),
grants: vec![
owner_grant,
AclGrant {
grantee: GRANTEE_AUTHENTICATED_USERS.to_string(),
permission: ACL_PERMISSION_READ.to_string(),
},
],
},
"bucket-owner-read" | "bucket-owner-full-control" | "private" | _ => Acl {
owner: owner.to_string(),
grants: vec![owner_grant],
},
}
}
pub fn acl_to_xml(acl: &Acl) -> String {
let mut xml = format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Owner><ID>{}</ID><DisplayName>{}</DisplayName></Owner>\
<AccessControlList>",
xml_escape(&acl.owner),
xml_escape(&acl.owner),
);
for grant in &acl.grants {
xml.push_str("<Grant>");
match grant.grantee.as_str() {
GRANTEE_ALL_USERS => {
xml.push_str(
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\">\
<URI>http://acs.amazonaws.com/groups/global/AllUsers</URI>\
</Grantee>",
);
}
GRANTEE_AUTHENTICATED_USERS => {
xml.push_str(
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\">\
<URI>http://acs.amazonaws.com/groups/global/AuthenticatedUsers</URI>\
</Grantee>",
);
}
other => {
xml.push_str(&format!(
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
<ID>{}</ID><DisplayName>{}</DisplayName>\
</Grantee>",
xml_escape(other),
xml_escape(other),
));
}
}
xml.push_str(&format!(
"<Permission>{}</Permission></Grant>",
xml_escape(&grant.permission)
));
}
xml.push_str("</AccessControlList></AccessControlPolicy>");
xml
}
pub fn acl_from_bucket_config(value: &Value) -> Option<Acl> {
match value {
Value::String(raw) => acl_from_xml(raw).or_else(|| serde_json::from_str(raw).ok()),
Value::Object(_) => serde_json::from_value(value.clone()).ok(),
_ => None,
}
}
pub fn acl_from_object_metadata(metadata: &HashMap<String, String>) -> Option<Acl> {
metadata
.get(ACL_METADATA_KEY)
.and_then(|raw| serde_json::from_str::<Acl>(raw).ok())
}
pub fn store_object_acl(metadata: &mut HashMap<String, String>, acl: &Acl) {
if let Ok(serialized) = serde_json::to_string(acl) {
metadata.insert(ACL_METADATA_KEY.to_string(), serialized);
}
}
fn acl_from_xml(xml: &str) -> Option<Acl> {
let doc = roxmltree::Document::parse(xml).ok()?;
let owner = doc
.descendants()
.find(|node| node.is_element() && node.tag_name().name() == "Owner")
.and_then(|node| {
node.children()
.find(|child| child.is_element() && child.tag_name().name() == "ID")
.and_then(|child| child.text())
})
.unwrap_or("myfsio")
.trim()
.to_string();
let mut grants = Vec::new();
for grant in doc
.descendants()
.filter(|node| node.is_element() && node.tag_name().name() == "Grant")
{
let permission = grant
.children()
.find(|child| child.is_element() && child.tag_name().name() == "Permission")
.and_then(|child| child.text())
.unwrap_or_default()
.trim()
.to_string();
if permission.is_empty() {
continue;
}
let grantee_node = grant
.children()
.find(|child| child.is_element() && child.tag_name().name() == "Grantee");
let grantee = grantee_node
.and_then(|node| {
let uri = node
.children()
.find(|child| child.is_element() && child.tag_name().name() == "URI")
.and_then(|child| child.text())
.map(|text| text.trim().to_string());
match uri.as_deref() {
Some("http://acs.amazonaws.com/groups/global/AllUsers") => {
Some(GRANTEE_ALL_USERS.to_string())
}
Some("http://acs.amazonaws.com/groups/global/AuthenticatedUsers") => {
Some(GRANTEE_AUTHENTICATED_USERS.to_string())
}
_ => node
.children()
.find(|child| child.is_element() && child.tag_name().name() == "ID")
.and_then(|child| child.text())
.map(|text| text.trim().to_string()),
}
})
.unwrap_or_default();
if grantee.is_empty() {
continue;
}
grants.push(AclGrant {
grantee,
permission,
});
}
Some(Acl { owner, grants })
}
fn permission_to_actions(permission: &str) -> &'static [&'static str] {
match permission {
ACL_PERMISSION_FULL_CONTROL => &["read", "write", "delete", "list", "share"],
ACL_PERMISSION_WRITE => &["write", "delete"],
ACL_PERMISSION_WRITE_ACP => &["share"],
ACL_PERMISSION_READ => &["read", "list"],
ACL_PERMISSION_READ_ACP => &["share"],
_ => &[],
}
}
fn xml_escape(s: &str) -> String {
s.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
.replace('"', "&quot;")
.replace('\'', "&apos;")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn canned_acl_grants_public_read() {
let acl = create_canned_acl("public-read", "owner");
let actions = acl.allowed_actions(None, false);
assert!(actions.contains("read"));
assert!(actions.contains("list"));
assert!(!actions.contains("write"));
}
#[test]
fn xml_round_trip_preserves_grants() {
let acl = create_canned_acl("authenticated-read", "owner");
let parsed = acl_from_bucket_config(&Value::String(acl_to_xml(&acl))).unwrap();
assert_eq!(parsed.owner, "owner");
assert_eq!(parsed.grants.len(), 2);
assert!(parsed
.grants
.iter()
.any(|grant| grant.grantee == GRANTEE_AUTHENTICATED_USERS));
}
}

View File

@@ -9,6 +9,7 @@ pub struct GcConfig {
pub temp_file_max_age_hours: f64,
pub multipart_max_age_days: u64,
pub lock_file_max_age_hours: f64,
pub quarantine_max_age_days: u64,
pub dry_run: bool,
}
@@ -19,11 +20,41 @@ impl Default for GcConfig {
temp_file_max_age_hours: 24.0,
multipart_max_age_days: 7,
lock_file_max_age_hours: 1.0,
quarantine_max_age_days: 7,
dry_run: false,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn dry_run_reports_but_does_not_delete_temp_files() {
let tmp = tempfile::tempdir().unwrap();
let tmp_dir = tmp.path().join(".myfsio.sys").join("tmp");
std::fs::create_dir_all(&tmp_dir).unwrap();
let file_path = tmp_dir.join("stale.tmp");
std::fs::write(&file_path, b"temporary").unwrap();
tokio::time::sleep(std::time::Duration::from_millis(5)).await;
let service = GcService::new(
tmp.path().to_path_buf(),
GcConfig {
temp_file_max_age_hours: 0.0,
dry_run: true,
..GcConfig::default()
},
);
let result = service.run_now(false).await.unwrap();
assert_eq!(result["temp_files_deleted"], 1);
assert!(file_path.exists());
}
}
pub struct GcService {
storage_root: PathBuf,
config: GcConfig,
@@ -77,6 +108,7 @@ impl GcService {
"temp_file_max_age_hours": self.config.temp_file_max_age_hours,
"multipart_max_age_days": self.config.multipart_max_age_days,
"lock_file_max_age_hours": self.config.lock_file_max_age_hours,
"quarantine_max_age_days": self.config.quarantine_max_age_days,
"dry_run": self.config.dry_run,
})
}
@@ -135,6 +167,8 @@ impl GcService {
let mut multipart_uploads_deleted = 0u64;
let mut lock_files_deleted = 0u64;
let mut empty_dirs_removed = 0u64;
let mut quarantine_entries_deleted = 0u64;
let mut quarantine_bytes_freed = 0u64;
let mut errors: Vec<String> = Vec::new();
let now = std::time::SystemTime::now();
@@ -144,6 +178,8 @@ impl GcService {
std::time::Duration::from_secs(self.config.multipart_max_age_days * 86400);
let lock_max_age =
std::time::Duration::from_secs_f64(self.config.lock_file_max_age_hours * 3600.0);
let quarantine_max_age =
std::time::Duration::from_secs(self.config.quarantine_max_age_days * 86400);
let tmp_dir = self.storage_root.join(".myfsio.sys").join("tmp");
if tmp_dir.exists() {
@@ -227,6 +263,55 @@ impl GcService {
}
}
let quarantine_dir = self.storage_root.join(".myfsio.sys").join("quarantine");
if quarantine_dir.exists() {
if let Ok(bucket_dirs) = std::fs::read_dir(&quarantine_dir) {
for bucket_entry in bucket_dirs.flatten() {
if !bucket_entry.path().is_dir() {
continue;
}
if let Ok(ts_dirs) = std::fs::read_dir(bucket_entry.path()) {
for ts_entry in ts_dirs.flatten() {
let ts_path = ts_entry.path();
if !ts_path.is_dir() {
continue;
}
let modified = ts_entry.metadata().ok().and_then(|m| m.modified().ok());
let Some(modified) = modified else {
continue;
};
let Ok(age) = now.duration_since(modified) else {
continue;
};
if age <= quarantine_max_age {
continue;
}
let bytes = dir_total_bytes(&ts_path);
if !dry_run {
if let Err(e) = std::fs::remove_dir_all(&ts_path) {
errors.push(format!(
"Failed to remove quarantine {}: {}",
ts_path.display(),
e
));
continue;
}
}
quarantine_entries_deleted += 1;
quarantine_bytes_freed += bytes;
}
}
if !dry_run {
if let Ok(mut remaining) = std::fs::read_dir(bucket_entry.path()) {
if remaining.next().is_none() {
let _ = std::fs::remove_dir(bucket_entry.path());
}
}
}
}
}
}
if !dry_run {
for dir in [&tmp_dir, &multipart_dir] {
if dir.exists() {
@@ -252,6 +337,8 @@ impl GcService {
"multipart_uploads_deleted": multipart_uploads_deleted,
"lock_files_deleted": lock_files_deleted,
"empty_dirs_removed": empty_dirs_removed,
"quarantine_entries_deleted": quarantine_entries_deleted,
"quarantine_bytes_freed": quarantine_bytes_freed,
"errors": errors,
})
}
@@ -284,3 +371,22 @@ impl GcService {
})
}
}
fn dir_total_bytes(path: &std::path::Path) -> u64 {
let mut total: u64 = 0;
let mut stack: Vec<PathBuf> = vec![path.to_path_buf()];
while let Some(dir) = stack.pop() {
let Ok(entries) = std::fs::read_dir(&dir) else {
continue;
};
for entry in entries.flatten() {
let Ok(ft) = entry.file_type() else { continue };
if ft.is_dir() {
stack.push(entry.path());
} else if ft.is_file() {
total = total.saturating_add(entry.metadata().map(|m| m.len()).unwrap_or(0));
}
}
}
total
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,637 @@
use chrono::{DateTime, Duration, Utc};
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::traits::StorageEngine;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::VecDeque;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tokio::sync::RwLock;
pub struct LifecycleConfig {
pub interval_seconds: u64,
pub max_history_per_bucket: usize,
}
impl Default for LifecycleConfig {
fn default() -> Self {
Self {
interval_seconds: 3600,
max_history_per_bucket: 50,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LifecycleExecutionRecord {
pub timestamp: f64,
pub bucket_name: String,
pub objects_deleted: u64,
pub versions_deleted: u64,
pub uploads_aborted: u64,
#[serde(default)]
pub errors: Vec<String>,
pub execution_time_seconds: f64,
}
#[derive(Debug, Clone, Default)]
struct BucketLifecycleResult {
bucket_name: String,
objects_deleted: u64,
versions_deleted: u64,
uploads_aborted: u64,
errors: Vec<String>,
execution_time_seconds: f64,
}
#[derive(Debug, Clone, Default)]
struct ParsedLifecycleRule {
status: String,
prefix: String,
expiration_days: Option<u64>,
expiration_date: Option<DateTime<Utc>>,
noncurrent_days: Option<u64>,
abort_incomplete_multipart_days: Option<u64>,
}
pub struct LifecycleService {
storage: Arc<FsStorageBackend>,
storage_root: PathBuf,
config: LifecycleConfig,
running: Arc<RwLock<bool>>,
}
impl LifecycleService {
pub fn new(
storage: Arc<FsStorageBackend>,
storage_root: impl Into<PathBuf>,
config: LifecycleConfig,
) -> Self {
Self {
storage,
storage_root: storage_root.into(),
config,
running: Arc::new(RwLock::new(false)),
}
}
pub async fn run_cycle(&self) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("Lifecycle already running".to_string());
}
*running = true;
}
let result = self.evaluate_rules().await;
*self.running.write().await = false;
Ok(result)
}
async fn evaluate_rules(&self) -> Value {
let buckets = match self.storage.list_buckets().await {
Ok(buckets) => buckets,
Err(err) => return json!({ "error": err.to_string() }),
};
let mut bucket_results = Vec::new();
let mut total_objects_deleted = 0u64;
let mut total_versions_deleted = 0u64;
let mut total_uploads_aborted = 0u64;
let mut errors = Vec::new();
for bucket in &buckets {
let started_at = std::time::Instant::now();
let mut result = BucketLifecycleResult {
bucket_name: bucket.name.clone(),
..Default::default()
};
let config = match self.storage.get_bucket_config(&bucket.name).await {
Ok(config) => config,
Err(err) => {
result.errors.push(err.to_string());
result.execution_time_seconds = started_at.elapsed().as_secs_f64();
self.append_history(&result);
errors.extend(result.errors.clone());
bucket_results.push(result);
continue;
}
};
let Some(lifecycle) = config.lifecycle.as_ref() else {
continue;
};
let rules = parse_lifecycle_rules(lifecycle);
if rules.is_empty() {
continue;
}
for rule in &rules {
if rule.status != "Enabled" {
continue;
}
if let Some(err) = self
.apply_expiration_rule(&bucket.name, rule, &mut result)
.await
{
result.errors.push(err);
}
if let Some(err) = self
.apply_noncurrent_expiration_rule(&bucket.name, rule, &mut result)
.await
{
result.errors.push(err);
}
if let Some(err) = self
.apply_abort_incomplete_multipart_rule(&bucket.name, rule, &mut result)
.await
{
result.errors.push(err);
}
}
result.execution_time_seconds = started_at.elapsed().as_secs_f64();
if result.objects_deleted > 0
|| result.versions_deleted > 0
|| result.uploads_aborted > 0
|| !result.errors.is_empty()
{
total_objects_deleted += result.objects_deleted;
total_versions_deleted += result.versions_deleted;
total_uploads_aborted += result.uploads_aborted;
errors.extend(result.errors.clone());
self.append_history(&result);
bucket_results.push(result);
}
}
json!({
"objects_deleted": total_objects_deleted,
"versions_deleted": total_versions_deleted,
"multipart_aborted": total_uploads_aborted,
"buckets_evaluated": buckets.len(),
"results": bucket_results.iter().map(result_to_json).collect::<Vec<_>>(),
"errors": errors,
})
}
async fn apply_expiration_rule(
&self,
bucket: &str,
rule: &ParsedLifecycleRule,
result: &mut BucketLifecycleResult,
) -> Option<String> {
let cutoff = if let Some(days) = rule.expiration_days {
Some(Utc::now() - Duration::days(days as i64))
} else {
rule.expiration_date
};
let Some(cutoff) = cutoff else {
return None;
};
let params = myfsio_common::types::ListParams {
max_keys: 10_000,
prefix: if rule.prefix.is_empty() {
None
} else {
Some(rule.prefix.clone())
},
..Default::default()
};
match self.storage.list_objects(bucket, &params).await {
Ok(objects) => {
for object in &objects.objects {
if object.last_modified < cutoff {
if let Err(err) = self.storage.delete_object(bucket, &object.key).await {
result
.errors
.push(format!("{}:{}: {}", bucket, object.key, err));
} else {
result.objects_deleted += 1;
}
}
}
None
}
Err(err) => Some(format!("Failed to list objects for {}: {}", bucket, err)),
}
}
async fn apply_noncurrent_expiration_rule(
&self,
bucket: &str,
rule: &ParsedLifecycleRule,
result: &mut BucketLifecycleResult,
) -> Option<String> {
let Some(days) = rule.noncurrent_days else {
return None;
};
let cutoff = Utc::now() - Duration::days(days as i64);
let versions_root = version_root_for_bucket(&self.storage_root, bucket);
if !versions_root.exists() {
return None;
}
let mut stack = VecDeque::from([versions_root]);
while let Some(current) = stack.pop_front() {
let entries = match std::fs::read_dir(&current) {
Ok(entries) => entries,
Err(err) => return Some(err.to_string()),
};
for entry in entries.flatten() {
let file_type = match entry.file_type() {
Ok(file_type) => file_type,
Err(_) => continue,
};
if file_type.is_dir() {
stack.push_back(entry.path());
continue;
}
if entry.path().extension().and_then(|ext| ext.to_str()) != Some("json") {
continue;
}
let contents = match std::fs::read_to_string(entry.path()) {
Ok(contents) => contents,
Err(_) => continue,
};
let Ok(manifest) = serde_json::from_str::<Value>(&contents) else {
continue;
};
let key = manifest
.get("key")
.and_then(|value| value.as_str())
.unwrap_or_default()
.to_string();
if !rule.prefix.is_empty() && !key.starts_with(&rule.prefix) {
continue;
}
let archived_at = manifest
.get("archived_at")
.and_then(|value| value.as_str())
.and_then(|value| DateTime::parse_from_rfc3339(value).ok())
.map(|value| value.with_timezone(&Utc));
if archived_at.is_none() || archived_at.unwrap() >= cutoff {
continue;
}
let version_id = manifest
.get("version_id")
.and_then(|value| value.as_str())
.unwrap_or_default();
let data_path = entry.path().with_file_name(format!("{}.bin", version_id));
let _ = std::fs::remove_file(&data_path);
let _ = std::fs::remove_file(entry.path());
result.versions_deleted += 1;
}
}
None
}
async fn apply_abort_incomplete_multipart_rule(
&self,
bucket: &str,
rule: &ParsedLifecycleRule,
result: &mut BucketLifecycleResult,
) -> Option<String> {
let Some(days) = rule.abort_incomplete_multipart_days else {
return None;
};
let cutoff = Utc::now() - Duration::days(days as i64);
match self.storage.list_multipart_uploads(bucket).await {
Ok(uploads) => {
for upload in &uploads {
if upload.initiated < cutoff {
if let Err(err) = self
.storage
.abort_multipart(bucket, &upload.upload_id)
.await
{
result
.errors
.push(format!("abort {}: {}", upload.upload_id, err));
} else {
result.uploads_aborted += 1;
}
}
}
None
}
Err(err) => Some(format!(
"Failed to list multipart uploads for {}: {}",
bucket, err
)),
}
}
fn append_history(&self, result: &BucketLifecycleResult) {
let path = lifecycle_history_path(&self.storage_root, &result.bucket_name);
let mut history = load_history(&path);
history.insert(
0,
LifecycleExecutionRecord {
timestamp: Utc::now().timestamp_millis() as f64 / 1000.0,
bucket_name: result.bucket_name.clone(),
objects_deleted: result.objects_deleted,
versions_deleted: result.versions_deleted,
uploads_aborted: result.uploads_aborted,
errors: result.errors.clone(),
execution_time_seconds: result.execution_time_seconds,
},
);
history.truncate(self.config.max_history_per_bucket);
let payload = json!({
"executions": history,
});
if let Some(parent) = path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&path,
serde_json::to_string_pretty(&payload).unwrap_or_else(|_| "{}".to_string()),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs(self.config.interval_seconds);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("Lifecycle evaluation starting");
match self.run_cycle().await {
Ok(result) => tracing::info!("Lifecycle cycle complete: {:?}", result),
Err(err) => tracing::warn!("Lifecycle cycle failed: {}", err),
}
}
})
}
}
pub fn read_history(storage_root: &Path, bucket_name: &str, limit: usize, offset: usize) -> Value {
let path = lifecycle_history_path(storage_root, bucket_name);
let mut history = load_history(&path);
let total = history.len();
let executions = history
.drain(offset.min(total)..)
.take(limit)
.collect::<Vec<_>>();
json!({
"executions": executions,
"total": total,
"limit": limit,
"offset": offset,
"enabled": true,
})
}
fn load_history(path: &Path) -> Vec<LifecycleExecutionRecord> {
if !path.exists() {
return Vec::new();
}
std::fs::read_to_string(path)
.ok()
.and_then(|contents| serde_json::from_str::<Value>(&contents).ok())
.and_then(|value| value.get("executions").cloned())
.and_then(|value| serde_json::from_value::<Vec<LifecycleExecutionRecord>>(value).ok())
.unwrap_or_default()
}
fn lifecycle_history_path(storage_root: &Path, bucket_name: &str) -> PathBuf {
storage_root
.join(".myfsio.sys")
.join("buckets")
.join(bucket_name)
.join("lifecycle_history.json")
}
fn version_root_for_bucket(storage_root: &Path, bucket_name: &str) -> PathBuf {
storage_root
.join(".myfsio.sys")
.join("buckets")
.join(bucket_name)
.join("versions")
}
fn parse_lifecycle_rules(value: &Value) -> Vec<ParsedLifecycleRule> {
match value {
Value::String(raw) => parse_lifecycle_rules_from_string(raw),
Value::Array(items) => items.iter().filter_map(parse_lifecycle_rule).collect(),
Value::Object(map) => map
.get("Rules")
.and_then(|rules| rules.as_array())
.map(|rules| rules.iter().filter_map(parse_lifecycle_rule).collect())
.unwrap_or_default(),
_ => Vec::new(),
}
}
fn parse_lifecycle_rules_from_string(raw: &str) -> Vec<ParsedLifecycleRule> {
if let Ok(json) = serde_json::from_str::<Value>(raw) {
return parse_lifecycle_rules(&json);
}
let Ok(doc) = roxmltree::Document::parse(raw) else {
return Vec::new();
};
doc.descendants()
.filter(|node| node.is_element() && node.tag_name().name() == "Rule")
.map(|rule| ParsedLifecycleRule {
status: child_text(&rule, "Status").unwrap_or_else(|| "Enabled".to_string()),
prefix: child_text(&rule, "Prefix")
.or_else(|| {
rule.descendants()
.find(|node| {
node.is_element()
&& node.tag_name().name() == "Filter"
&& node.children().any(|child| {
child.is_element() && child.tag_name().name() == "Prefix"
})
})
.and_then(|filter| child_text(&filter, "Prefix"))
})
.unwrap_or_default(),
expiration_days: rule
.descendants()
.find(|node| node.is_element() && node.tag_name().name() == "Expiration")
.and_then(|expiration| child_text(&expiration, "Days"))
.and_then(|value| value.parse::<u64>().ok()),
expiration_date: rule
.descendants()
.find(|node| node.is_element() && node.tag_name().name() == "Expiration")
.and_then(|expiration| child_text(&expiration, "Date"))
.as_deref()
.and_then(parse_datetime),
noncurrent_days: rule
.descendants()
.find(|node| {
node.is_element() && node.tag_name().name() == "NoncurrentVersionExpiration"
})
.and_then(|node| child_text(&node, "NoncurrentDays"))
.and_then(|value| value.parse::<u64>().ok()),
abort_incomplete_multipart_days: rule
.descendants()
.find(|node| {
node.is_element() && node.tag_name().name() == "AbortIncompleteMultipartUpload"
})
.and_then(|node| child_text(&node, "DaysAfterInitiation"))
.and_then(|value| value.parse::<u64>().ok()),
})
.collect()
}
fn parse_lifecycle_rule(value: &Value) -> Option<ParsedLifecycleRule> {
let map = value.as_object()?;
Some(ParsedLifecycleRule {
status: map
.get("Status")
.and_then(|value| value.as_str())
.unwrap_or("Enabled")
.to_string(),
prefix: map
.get("Prefix")
.and_then(|value| value.as_str())
.or_else(|| {
map.get("Filter")
.and_then(|value| value.get("Prefix"))
.and_then(|value| value.as_str())
})
.unwrap_or_default()
.to_string(),
expiration_days: map
.get("Expiration")
.and_then(|value| value.get("Days"))
.and_then(|value| value.as_u64()),
expiration_date: map
.get("Expiration")
.and_then(|value| value.get("Date"))
.and_then(|value| value.as_str())
.and_then(parse_datetime),
noncurrent_days: map
.get("NoncurrentVersionExpiration")
.and_then(|value| value.get("NoncurrentDays"))
.and_then(|value| value.as_u64()),
abort_incomplete_multipart_days: map
.get("AbortIncompleteMultipartUpload")
.and_then(|value| value.get("DaysAfterInitiation"))
.and_then(|value| value.as_u64()),
})
}
fn parse_datetime(value: &str) -> Option<DateTime<Utc>> {
DateTime::parse_from_rfc3339(value)
.ok()
.map(|value| value.with_timezone(&Utc))
}
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
node.children()
.find(|child| child.is_element() && child.tag_name().name() == name)
.and_then(|child| child.text())
.map(|text| text.trim().to_string())
.filter(|text| !text.is_empty())
}
fn result_to_json(result: &BucketLifecycleResult) -> Value {
json!({
"bucket_name": result.bucket_name,
"objects_deleted": result.objects_deleted,
"versions_deleted": result.versions_deleted,
"uploads_aborted": result.uploads_aborted,
"errors": result.errors,
"execution_time_seconds": result.execution_time_seconds,
})
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration;
#[test]
fn parses_rules_from_xml() {
let xml = r#"<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration>
<Rule>
<Status>Enabled</Status>
<Filter><Prefix>logs/</Prefix></Filter>
<Expiration><Days>10</Days></Expiration>
<NoncurrentVersionExpiration><NoncurrentDays>30</NoncurrentDays></NoncurrentVersionExpiration>
<AbortIncompleteMultipartUpload><DaysAfterInitiation>7</DaysAfterInitiation></AbortIncompleteMultipartUpload>
</Rule>
</LifecycleConfiguration>"#;
let rules = parse_lifecycle_rules(&Value::String(xml.to_string()));
assert_eq!(rules.len(), 1);
assert_eq!(rules[0].prefix, "logs/");
assert_eq!(rules[0].expiration_days, Some(10));
assert_eq!(rules[0].noncurrent_days, Some(30));
assert_eq!(rules[0].abort_incomplete_multipart_days, Some(7));
}
#[tokio::test]
async fn run_cycle_writes_history_and_deletes_noncurrent_versions() {
let tmp = tempfile::tempdir().unwrap();
let storage = Arc::new(FsStorageBackend::new(tmp.path().to_path_buf()));
storage.create_bucket("docs").await.unwrap();
storage.set_versioning("docs", true).await.unwrap();
storage
.put_object(
"docs",
"logs/file.txt",
Box::pin(std::io::Cursor::new(b"old".to_vec())),
None,
)
.await
.unwrap();
storage
.put_object(
"docs",
"logs/file.txt",
Box::pin(std::io::Cursor::new(b"new".to_vec())),
None,
)
.await
.unwrap();
let versions_root = version_root_for_bucket(tmp.path(), "docs")
.join("logs")
.join("file.txt");
let manifest = std::fs::read_dir(&versions_root)
.unwrap()
.flatten()
.find(|entry| entry.path().extension().and_then(|ext| ext.to_str()) == Some("json"))
.unwrap()
.path();
let old_manifest = json!({
"version_id": "ver-1",
"key": "logs/file.txt",
"size": 3,
"archived_at": (Utc::now() - Duration::days(45)).to_rfc3339(),
"etag": "etag",
});
std::fs::write(&manifest, serde_json::to_string(&old_manifest).unwrap()).unwrap();
std::fs::write(manifest.with_file_name("ver-1.bin"), b"old").unwrap();
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration>
<Rule>
<Status>Enabled</Status>
<Filter><Prefix>logs/</Prefix></Filter>
<NoncurrentVersionExpiration><NoncurrentDays>30</NoncurrentDays></NoncurrentVersionExpiration>
</Rule>
</LifecycleConfiguration>"#;
let mut config = storage.get_bucket_config("docs").await.unwrap();
config.lifecycle = Some(Value::String(lifecycle_xml.to_string()));
storage.set_bucket_config("docs", &config).await.unwrap();
let service =
LifecycleService::new(storage.clone(), tmp.path(), LifecycleConfig::default());
let result = service.run_cycle().await.unwrap();
assert_eq!(result["versions_deleted"], 1);
let history = read_history(tmp.path(), "docs", 50, 0);
assert_eq!(history["total"], 1);
assert_eq!(history["executions"][0]["versions_deleted"], 1);
}
}

View File

@@ -1,8 +1,13 @@
pub mod access_logging;
pub mod acl;
pub mod gc;
pub mod integrity;
pub mod lifecycle;
pub mod metrics;
pub mod notifications;
pub mod object_lock;
pub mod peer_admin;
pub mod peer_fetch;
pub mod replication;
pub mod s3_client;
pub mod site_registry;

View File

@@ -0,0 +1,296 @@
use crate::state::AppState;
use chrono::{DateTime, Utc};
use myfsio_storage::traits::StorageEngine;
use serde::Serialize;
use serde_json::json;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WebhookDestination {
pub url: String,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct NotificationConfiguration {
pub id: String,
pub events: Vec<String>,
pub destination: WebhookDestination,
pub prefix_filter: String,
pub suffix_filter: String,
}
#[derive(Debug, Clone, Serialize)]
pub struct NotificationEvent {
#[serde(rename = "eventVersion")]
event_version: &'static str,
#[serde(rename = "eventSource")]
event_source: &'static str,
#[serde(rename = "awsRegion")]
aws_region: &'static str,
#[serde(rename = "eventTime")]
event_time: String,
#[serde(rename = "eventName")]
event_name: String,
#[serde(rename = "userIdentity")]
user_identity: serde_json::Value,
#[serde(rename = "requestParameters")]
request_parameters: serde_json::Value,
#[serde(rename = "responseElements")]
response_elements: serde_json::Value,
s3: serde_json::Value,
}
impl NotificationConfiguration {
pub fn matches_event(&self, event_name: &str, object_key: &str) -> bool {
let event_match = self.events.iter().any(|pattern| {
if let Some(prefix) = pattern.strip_suffix('*') {
event_name.starts_with(prefix)
} else {
pattern == event_name
}
});
if !event_match {
return false;
}
if !self.prefix_filter.is_empty() && !object_key.starts_with(&self.prefix_filter) {
return false;
}
if !self.suffix_filter.is_empty() && !object_key.ends_with(&self.suffix_filter) {
return false;
}
true
}
}
pub fn parse_notification_configurations(
xml: &str,
) -> Result<Vec<NotificationConfiguration>, String> {
let doc = roxmltree::Document::parse(xml).map_err(|err| err.to_string())?;
let mut configs = Vec::new();
for webhook in doc
.descendants()
.filter(|node| node.is_element() && node.tag_name().name() == "WebhookConfiguration")
{
let id = child_text(&webhook, "Id").unwrap_or_else(|| uuid::Uuid::new_v4().to_string());
let events = webhook
.children()
.filter(|node| node.is_element() && node.tag_name().name() == "Event")
.filter_map(|node| node.text())
.map(|text| text.trim().to_string())
.filter(|text| !text.is_empty())
.collect::<Vec<_>>();
let destination = webhook
.children()
.find(|node| node.is_element() && node.tag_name().name() == "Destination");
let url = destination
.as_ref()
.and_then(|node| child_text(node, "Url"))
.unwrap_or_default();
if url.trim().is_empty() {
return Err("Destination URL is required".to_string());
}
let mut prefix_filter = String::new();
let mut suffix_filter = String::new();
if let Some(filter) = webhook
.children()
.find(|node| node.is_element() && node.tag_name().name() == "Filter")
{
if let Some(key) = filter
.children()
.find(|node| node.is_element() && node.tag_name().name() == "S3Key")
{
for rule in key
.children()
.filter(|node| node.is_element() && node.tag_name().name() == "FilterRule")
{
let name = child_text(&rule, "Name").unwrap_or_default();
let value = child_text(&rule, "Value").unwrap_or_default();
if name == "prefix" {
prefix_filter = value;
} else if name == "suffix" {
suffix_filter = value;
}
}
}
}
configs.push(NotificationConfiguration {
id,
events,
destination: WebhookDestination { url },
prefix_filter,
suffix_filter,
});
}
Ok(configs)
}
pub fn emit_object_created(
state: &AppState,
bucket: &str,
key: &str,
size: u64,
etag: Option<&str>,
request_id: &str,
source_ip: &str,
user_identity: &str,
operation: &str,
) {
emit_notifications(
state.clone(),
bucket.to_string(),
key.to_string(),
format!("s3:ObjectCreated:{}", operation),
size,
etag.unwrap_or_default().to_string(),
request_id.to_string(),
source_ip.to_string(),
user_identity.to_string(),
);
}
pub fn emit_object_removed(
state: &AppState,
bucket: &str,
key: &str,
request_id: &str,
source_ip: &str,
user_identity: &str,
operation: &str,
) {
emit_notifications(
state.clone(),
bucket.to_string(),
key.to_string(),
format!("s3:ObjectRemoved:{}", operation),
0,
String::new(),
request_id.to_string(),
source_ip.to_string(),
user_identity.to_string(),
);
}
fn emit_notifications(
state: AppState,
bucket: String,
key: String,
event_name: String,
size: u64,
etag: String,
request_id: String,
source_ip: String,
user_identity: String,
) {
tokio::spawn(async move {
let config = match state.storage.get_bucket_config(&bucket).await {
Ok(config) => config,
Err(_) => return,
};
let raw = match config.notification {
Some(serde_json::Value::String(raw)) => raw,
_ => return,
};
let configs = match parse_notification_configurations(&raw) {
Ok(configs) => configs,
Err(err) => {
tracing::warn!("Invalid notification config for bucket {}: {}", bucket, err);
return;
}
};
let record = NotificationEvent {
event_version: "2.1",
event_source: "myfsio:s3",
aws_region: "local",
event_time: format_event_time(Utc::now()),
event_name: event_name.clone(),
user_identity: json!({ "principalId": if user_identity.is_empty() { "ANONYMOUS" } else { &user_identity } }),
request_parameters: json!({ "sourceIPAddress": if source_ip.is_empty() { "127.0.0.1" } else { &source_ip } }),
response_elements: json!({
"x-amz-request-id": request_id,
"x-amz-id-2": request_id,
}),
s3: json!({
"s3SchemaVersion": "1.0",
"configurationId": "notification",
"bucket": {
"name": bucket,
"ownerIdentity": { "principalId": "local" },
"arn": format!("arn:aws:s3:::{}", bucket),
},
"object": {
"key": key,
"size": size,
"eTag": etag,
"versionId": "null",
"sequencer": format!("{:016X}", Utc::now().timestamp_millis()),
}
}),
};
let payload = json!({ "Records": [record] });
let client = reqwest::Client::new();
for config in configs {
if !config.matches_event(&event_name, &key) {
continue;
}
let result = client
.post(&config.destination.url)
.header("content-type", "application/json")
.json(&payload)
.send()
.await;
if let Err(err) = result {
tracing::warn!(
"Failed to deliver notification for {} to {}: {}",
event_name,
config.destination.url,
err
);
}
}
});
}
fn format_event_time(value: DateTime<Utc>) -> String {
value.format("%Y-%m-%dT%H:%M:%S.000Z").to_string()
}
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
node.children()
.find(|child| child.is_element() && child.tag_name().name() == name)
.and_then(|child| child.text())
.map(|text| text.trim().to_string())
.filter(|text| !text.is_empty())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_webhook_configuration() {
let xml = r#"<?xml version="1.0" encoding="UTF-8"?>
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<WebhookConfiguration>
<Id>upload</Id>
<Event>s3:ObjectCreated:*</Event>
<Destination><Url>https://example.com/hook</Url></Destination>
<Filter>
<S3Key>
<FilterRule><Name>prefix</Name><Value>logs/</Value></FilterRule>
<FilterRule><Name>suffix</Name><Value>.txt</Value></FilterRule>
</S3Key>
</Filter>
</WebhookConfiguration>
</NotificationConfiguration>"#;
let configs = parse_notification_configurations(xml).unwrap();
assert_eq!(configs.len(), 1);
assert!(configs[0].matches_event("s3:ObjectCreated:Put", "logs/test.txt"));
assert!(!configs[0].matches_event("s3:ObjectRemoved:Delete", "logs/test.txt"));
}
}

View File

@@ -0,0 +1,128 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
pub const LEGAL_HOLD_METADATA_KEY: &str = "__legal_hold__";
pub const RETENTION_METADATA_KEY: &str = "__object_retention__";
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum RetentionMode {
GOVERNANCE,
COMPLIANCE,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ObjectLockRetention {
pub mode: RetentionMode,
pub retain_until_date: DateTime<Utc>,
}
impl ObjectLockRetention {
pub fn is_expired(&self) -> bool {
Utc::now() > self.retain_until_date
}
}
pub fn get_object_retention(metadata: &HashMap<String, String>) -> Option<ObjectLockRetention> {
metadata
.get(RETENTION_METADATA_KEY)
.and_then(|raw| serde_json::from_str::<ObjectLockRetention>(raw).ok())
}
pub fn set_object_retention(
metadata: &mut HashMap<String, String>,
retention: &ObjectLockRetention,
) -> Result<(), String> {
let encoded = serde_json::to_string(retention).map_err(|err| err.to_string())?;
metadata.insert(RETENTION_METADATA_KEY.to_string(), encoded);
Ok(())
}
pub fn get_legal_hold(metadata: &HashMap<String, String>) -> bool {
metadata
.get(LEGAL_HOLD_METADATA_KEY)
.map(|value| value.eq_ignore_ascii_case("ON") || value.eq_ignore_ascii_case("true"))
.unwrap_or(false)
}
pub fn set_legal_hold(metadata: &mut HashMap<String, String>, enabled: bool) {
metadata.insert(
LEGAL_HOLD_METADATA_KEY.to_string(),
if enabled { "ON" } else { "OFF" }.to_string(),
);
}
pub fn ensure_retention_mutable(
metadata: &HashMap<String, String>,
bypass_governance: bool,
) -> Result<(), String> {
let Some(existing) = get_object_retention(metadata) else {
return Ok(());
};
if existing.is_expired() {
return Ok(());
}
match existing.mode {
RetentionMode::COMPLIANCE => Err(format!(
"Cannot modify retention on object with COMPLIANCE mode until retention expires"
)),
RetentionMode::GOVERNANCE if !bypass_governance => Err(
"Cannot modify GOVERNANCE retention without bypass-governance permission".to_string(),
),
RetentionMode::GOVERNANCE => Ok(()),
}
}
pub fn can_delete_object(
metadata: &HashMap<String, String>,
bypass_governance: bool,
) -> Result<(), String> {
if get_legal_hold(metadata) {
return Err("Object is under legal hold".to_string());
}
if let Some(retention) = get_object_retention(metadata) {
if !retention.is_expired() {
return match retention.mode {
RetentionMode::COMPLIANCE => Err(format!(
"Object is locked in COMPLIANCE mode until {}",
retention.retain_until_date.to_rfc3339()
)),
RetentionMode::GOVERNANCE if !bypass_governance => Err(format!(
"Object is locked in GOVERNANCE mode until {}",
retention.retain_until_date.to_rfc3339()
)),
RetentionMode::GOVERNANCE => Ok(()),
};
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration;
#[test]
fn legal_hold_blocks_delete() {
let mut metadata = HashMap::new();
set_legal_hold(&mut metadata, true);
let err = can_delete_object(&metadata, false).unwrap_err();
assert!(err.contains("legal hold"));
}
#[test]
fn governance_requires_bypass() {
let mut metadata = HashMap::new();
set_object_retention(
&mut metadata,
&ObjectLockRetention {
mode: RetentionMode::GOVERNANCE,
retain_until_date: Utc::now() + Duration::hours(1),
},
)
.unwrap();
assert!(can_delete_object(&metadata, false).is_err());
assert!(can_delete_object(&metadata, true).is_ok());
}
}

View File

@@ -0,0 +1,183 @@
use std::time::Duration;
use chrono::Utc;
use serde_json::Value;
fn extract_error_detail(body: &str) -> String {
let trimmed = body.trim();
if trimmed.is_empty() {
return String::new();
}
if let Ok(value) = serde_json::from_str::<Value>(trimmed) {
let err = value.get("error").unwrap_or(&value);
let code = err
.get("code")
.or_else(|| err.get("Code"))
.and_then(|v| v.as_str())
.map(str::trim)
.filter(|s| !s.is_empty());
let message = err
.get("message")
.or_else(|| err.get("Message"))
.and_then(|v| v.as_str())
.map(str::trim)
.filter(|s| !s.is_empty());
let detail = match (code, message) {
(Some(c), Some(m)) => format!("{}: {}", c, m),
(Some(c), None) => c.to_string(),
(None, Some(m)) => m.to_string(),
(None, None) => String::new(),
};
if !detail.is_empty() {
return truncate_chars(&detail, 240);
}
}
let collapsed = trimmed
.lines()
.map(|l| l.trim())
.filter(|l| !l.is_empty())
.collect::<Vec<_>>()
.join(" ");
truncate_chars(&collapsed, 240)
}
fn truncate_chars(s: &str, max_chars: usize) -> String {
match s.char_indices().nth(max_chars) {
Some((boundary, _)) => format!("{}", &s[..boundary]),
None => s.to_string(),
}
}
use myfsio_auth::sigv4::{
aws_uri_encode, build_string_to_sign, compute_signature, derive_signing_key, sha256_hex,
};
use crate::stores::connections::RemoteConnection;
pub struct PeerAdminClient {
client: reqwest::Client,
}
impl PeerAdminClient {
pub fn new(connect_timeout: Duration, read_timeout: Duration) -> Self {
let client = reqwest::Client::builder()
.connect_timeout(connect_timeout)
.timeout(read_timeout)
.build()
.unwrap_or_else(|_| reqwest::Client::new());
Self { client }
}
pub async fn fetch_cluster_overview(
&self,
endpoint: &str,
connection: &RemoteConnection,
) -> Result<Value, String> {
let url = format!(
"{}/admin/cluster/overview",
endpoint.trim_end_matches('/')
);
let parsed = reqwest::Url::parse(&url).map_err(|e| format!("invalid url: {}", e))?;
let host = parsed
.host_str()
.ok_or_else(|| "missing host".to_string())?
.to_string();
let host_with_port = match parsed.port() {
Some(p) => format!("{}:{}", host, p),
None => host.clone(),
};
let canonical_uri = parsed.path().to_string();
let canonical_uri = if canonical_uri.is_empty() {
"/".to_string()
} else {
canonical_uri
};
let now = Utc::now();
let amz_date = now.format("%Y%m%dT%H%M%SZ").to_string();
let date_stamp = now.format("%Y%m%d").to_string();
let region = if connection.region.is_empty() {
"us-east-1".to_string()
} else {
connection.region.clone()
};
let service = "s3";
let payload_hash = sha256_hex(b"");
let canonical_headers = format!(
"host:{}\nx-amz-content-sha256:{}\nx-amz-date:{}\n",
host_with_port, payload_hash, amz_date
);
let signed_headers = "host;x-amz-content-sha256;x-amz-date";
let canonical_query = parsed
.query()
.map(|q| {
let mut pairs: Vec<(String, String)> = q
.split('&')
.filter(|p| !p.is_empty())
.map(|p| {
let mut it = p.splitn(2, '=');
let k = it.next().unwrap_or("").to_string();
let v = it.next().unwrap_or("").to_string();
(k, v)
})
.collect();
pairs.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1)));
pairs
.iter()
.map(|(k, v)| format!("{}={}", aws_uri_encode(k), aws_uri_encode(v)))
.collect::<Vec<_>>()
.join("&")
})
.unwrap_or_default();
let canonical_request = format!(
"GET\n{}\n{}\n{}\n{}\n{}",
canonical_uri, canonical_query, canonical_headers, signed_headers, payload_hash
);
let credential_scope = format!("{}/{}/{}/aws4_request", date_stamp, region, service);
let string_to_sign = build_string_to_sign(&amz_date, &credential_scope, &canonical_request);
let signing_key =
derive_signing_key(&connection.secret_key, &date_stamp, &region, service);
let signature = compute_signature(&signing_key, &string_to_sign);
let authorization = format!(
"AWS4-HMAC-SHA256 Credential={}/{},SignedHeaders={},Signature={}",
connection.access_key, credential_scope, signed_headers, signature
);
let resp = self
.client
.get(&url)
.header("host", &host_with_port)
.header("x-amz-content-sha256", &payload_hash)
.header("x-amz-date", &amz_date)
.header("authorization", &authorization)
.send()
.await
.map_err(|e| format!("request failed: {}", e))?;
let status = resp.status();
if !status.is_success() {
let body_text = resp.text().await.unwrap_or_default();
let detail = extract_error_detail(&body_text);
if detail.is_empty() {
return Err(format!("peer returned status {}", status.as_u16()));
}
return Err(format!(
"peer returned status {}{}",
status.as_u16(),
detail
));
}
let body: Value = resp
.json()
.await
.map_err(|e| format!("invalid json: {}", e))?;
Ok(body)
}
}

View File

@@ -0,0 +1,385 @@
use std::collections::HashMap;
use std::path::Path;
use std::pin::Pin;
use std::sync::Arc;
use aws_sdk_s3::Client;
use md5::{Digest, Md5};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWriteExt};
use myfsio_storage::fs_backend::{is_multipart_etag, FsStorageBackend};
use myfsio_storage::traits::StorageEngine;
use crate::services::replication::ReplicationManager;
use crate::services::s3_client::{build_client, ClientOptions};
use crate::stores::connections::ConnectionStore;
pub struct PeerFetcher {
storage: Arc<FsStorageBackend>,
connections: Arc<ConnectionStore>,
replication: Arc<ReplicationManager>,
client_options: ClientOptions,
}
#[derive(Debug)]
pub enum HealOutcome {
Healed { peer_etag: String, bytes: u64 },
PeerMismatch { stored: String, peer: String },
PeerUnavailable { error: String },
NotConfigured,
VerifyFailed { expected: String, actual: String },
}
impl PeerFetcher {
pub fn new(
storage: Arc<FsStorageBackend>,
connections: Arc<ConnectionStore>,
replication: Arc<ReplicationManager>,
client_options: ClientOptions,
) -> Self {
Self {
storage,
connections,
replication,
client_options,
}
}
fn build_client_for_bucket(&self, bucket: &str) -> Option<(Client, String)> {
let rule = self.replication.get_rule(bucket)?;
if !rule.enabled {
return None;
}
let conn = self.connections.get(&rule.target_connection_id)?;
let client = build_client(&conn, &self.client_options);
Some((client, rule.target_bucket))
}
pub async fn fetch_into_storage(
&self,
client: &Client,
remote_bucket: &str,
local_bucket: &str,
key: &str,
) -> bool {
let resp = match client
.get_object()
.bucket(remote_bucket)
.key(key)
.send()
.await
{
Ok(r) => r,
Err(err) => {
tracing::error!("Pull GetObject failed {}/{}: {:?}", local_bucket, key, err);
return false;
}
};
let head = match client
.head_object()
.bucket(remote_bucket)
.key(key)
.send()
.await
{
Ok(r) => r,
Err(err) => {
tracing::error!("Pull HeadObject failed {}/{}: {:?}", local_bucket, key, err);
return false;
}
};
let metadata: Option<HashMap<String, String>> = head
.metadata()
.map(|m| m.iter().map(|(k, v)| (k.clone(), v.clone())).collect());
let stream = resp.body.into_async_read();
let boxed: Pin<Box<dyn AsyncRead + Send>> = Box::pin(stream);
match self
.storage
.put_object(local_bucket, key, boxed, metadata)
.await
{
Ok(_) => {
tracing::debug!("Pulled object {}/{} from remote", local_bucket, key);
true
}
Err(err) => {
tracing::error!(
"Store pulled object failed {}/{}: {}",
local_bucket,
key,
err
);
false
}
}
}
pub async fn fetch_for_heal(
&self,
local_bucket: &str,
key: &str,
expected_etag: &str,
dest_path: &Path,
) -> HealOutcome {
let (client, target_bucket) = match self.build_client_for_bucket(local_bucket) {
Some(v) => v,
None => return HealOutcome::NotConfigured,
};
let head = match client
.head_object()
.bucket(&target_bucket)
.key(key)
.send()
.await
{
Ok(r) => r,
Err(err) => {
return HealOutcome::PeerUnavailable {
error: format!("HeadObject: {:?}", err),
};
}
};
let peer_etag = head.e_tag().unwrap_or("").trim_matches('"').to_string();
if peer_etag.is_empty() {
return HealOutcome::PeerUnavailable {
error: "remote returned empty ETag".into(),
};
}
if peer_etag != expected_etag {
return HealOutcome::PeerMismatch {
stored: expected_etag.to_string(),
peer: peer_etag,
};
}
if is_multipart_etag(expected_etag) {
return self
.fetch_multipart_for_heal(&client, &target_bucket, key, expected_etag, dest_path)
.await;
}
let resp = match client
.get_object()
.bucket(&target_bucket)
.key(key)
.send()
.await
{
Ok(r) => r,
Err(err) => {
return HealOutcome::PeerUnavailable {
error: format!("GetObject: {:?}", err),
};
}
};
if let Some(parent) = dest_path.parent() {
if let Err(e) = tokio::fs::create_dir_all(parent).await {
return HealOutcome::PeerUnavailable {
error: format!("mkdir parent: {}", e),
};
}
}
let mut file = match tokio::fs::File::create(dest_path).await {
Ok(f) => f,
Err(e) => {
return HealOutcome::PeerUnavailable {
error: format!("create temp: {}", e),
};
}
};
let mut reader = resp.body.into_async_read();
let mut hasher = Md5::new();
let mut buf = vec![0u8; 64 * 1024];
let mut total: u64 = 0;
loop {
let n = match reader.read(&mut buf).await {
Ok(n) => n,
Err(e) => {
drop(file);
let _ = tokio::fs::remove_file(dest_path).await;
return HealOutcome::PeerUnavailable {
error: format!("read body: {}", e),
};
}
};
if n == 0 {
break;
}
hasher.update(&buf[..n]);
if let Err(e) = file.write_all(&buf[..n]).await {
drop(file);
let _ = tokio::fs::remove_file(dest_path).await;
return HealOutcome::PeerUnavailable {
error: format!("write temp: {}", e),
};
}
total += n as u64;
}
if let Err(e) = file.flush().await {
return HealOutcome::PeerUnavailable {
error: format!("flush temp: {}", e),
};
}
drop(file);
let actual = format!("{:x}", hasher.finalize());
if actual != expected_etag {
let _ = tokio::fs::remove_file(dest_path).await;
return HealOutcome::VerifyFailed {
expected: expected_etag.to_string(),
actual,
};
}
HealOutcome::Healed {
peer_etag,
bytes: total,
}
}
async fn fetch_multipart_for_heal(
&self,
client: &Client,
target_bucket: &str,
key: &str,
expected_etag: &str,
dest_path: &Path,
) -> HealOutcome {
let part_count = match expected_etag
.split_once('-')
.and_then(|(_, n)| n.parse::<u32>().ok())
{
Some(n) if n >= 1 => n,
_ => {
return HealOutcome::VerifyFailed {
expected: expected_etag.to_string(),
actual: format!("unparseable multipart suffix in {}", expected_etag),
};
}
};
if let Some(parent) = dest_path.parent() {
if let Err(e) = tokio::fs::create_dir_all(parent).await {
return HealOutcome::PeerUnavailable {
error: format!("mkdir parent: {}", e),
};
}
}
let mut file = match tokio::fs::File::create(dest_path).await {
Ok(f) => f,
Err(e) => {
return HealOutcome::PeerUnavailable {
error: format!("create temp: {}", e),
};
}
};
let mut composite = Md5::new();
let mut total: u64 = 0;
let mut buf = vec![0u8; 64 * 1024];
for part_no in 1..=part_count {
let part_no_i32 = part_no as i32;
let resp = match client
.get_object()
.bucket(target_bucket)
.key(key)
.part_number(part_no_i32)
.send()
.await
{
Ok(r) => r,
Err(err) => {
drop(file);
let _ = tokio::fs::remove_file(dest_path).await;
return HealOutcome::PeerUnavailable {
error: format!("GetObject part {}: {:?}", part_no, err),
};
}
};
let mut reader = resp.body.into_async_read();
let mut part_hasher = Md5::new();
let mut part_bytes: u64 = 0;
loop {
let n = match reader.read(&mut buf).await {
Ok(n) => n,
Err(e) => {
drop(file);
let _ = tokio::fs::remove_file(dest_path).await;
return HealOutcome::PeerUnavailable {
error: format!("read part {}: {}", part_no, e),
};
}
};
if n == 0 {
break;
}
part_hasher.update(&buf[..n]);
if let Err(e) = file.write_all(&buf[..n]).await {
drop(file);
let _ = tokio::fs::remove_file(dest_path).await;
return HealOutcome::PeerUnavailable {
error: format!("write part {}: {}", part_no, e),
};
}
part_bytes += n as u64;
}
if part_bytes == 0 {
drop(file);
let _ = tokio::fs::remove_file(dest_path).await;
return HealOutcome::VerifyFailed {
expected: expected_etag.to_string(),
actual: format!("part {} returned zero bytes", part_no),
};
}
composite.update(part_hasher.finalize().as_slice());
total += part_bytes;
}
if let Err(e) = file.flush().await {
return HealOutcome::PeerUnavailable {
error: format!("flush temp: {}", e),
};
}
drop(file);
let composite_etag = format!("{:x}-{}", composite.finalize(), part_count);
if composite_etag != expected_etag {
let _ = tokio::fs::remove_file(dest_path).await;
return HealOutcome::VerifyFailed {
expected: expected_etag.to_string(),
actual: composite_etag,
};
}
HealOutcome::Healed {
peer_etag: expected_etag.to_string(),
bytes: total,
}
}
}
#[cfg(test)]
mod tests {
use myfsio_storage::fs_backend::is_multipart_etag;
#[test]
fn detects_multipart_etags() {
assert!(is_multipart_etag("d41d8cd98f00b204e9800998ecf8427e-3"));
assert!(is_multipart_etag("00000000000000000000000000000000-1"));
assert!(!is_multipart_etag("d41d8cd98f00b204e9800998ecf8427e"));
assert!(!is_multipart_etag("d41d8cd98f00b204e9800998ecf8427e-"));
assert!(!is_multipart_etag("not-hex-at-all-1"));
assert!(!is_multipart_etag("d41d8cd98f00b204e9800998ecf8427e-abc"));
}
}

View File

@@ -9,7 +9,7 @@ use serde::{Deserialize, Serialize};
use tokio::sync::Semaphore;
use myfsio_common::types::ListParams;
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::fs_backend::{metadata_is_corrupted, FsStorageBackend};
use myfsio_storage::traits::StorageEngine;
use crate::services::s3_client::{build_client, check_endpoint_health, ClientOptions};
@@ -483,6 +483,17 @@ impl ReplicationManager {
return;
}
if let Ok(src_meta) = self.storage.get_object_metadata(bucket, object_key).await {
if metadata_is_corrupted(&src_meta) {
tracing::warn!(
"Replication skipped for {}/{}: source object is poisoned (corrupted)",
bucket,
object_key
);
return;
}
}
let src_path = match self.storage.get_object_path(bucket, object_key).await {
Ok(p) => p,
Err(_) => {

View File

@@ -38,6 +38,8 @@ pub struct PeerSite {
#[serde(default)]
pub connection_id: Option<String>,
#[serde(default)]
pub peer_inbound_access_key: Option<String>,
#[serde(default)]
pub created_at: Option<String>,
#[serde(default)]
pub is_healthy: bool,

View File

@@ -1,19 +1,18 @@
use std::collections::HashMap;
use std::path::PathBuf;
use std::pin::Pin;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use aws_sdk_s3::Client;
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
use tokio::io::AsyncRead;
use tokio::sync::Notify;
use myfsio_common::types::{ListParams, ObjectMeta};
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::traits::StorageEngine;
use crate::services::peer_fetch::PeerFetcher;
use crate::services::replication::{ReplicationManager, ReplicationRule, MODE_BIDIRECTIONAL};
use crate::services::s3_client::{build_client, ClientOptions};
use crate::stores::connections::ConnectionStore;
@@ -33,7 +32,7 @@ pub struct SyncState {
pub last_full_sync: Option<f64>,
}
#[derive(Debug, Clone, Default, Serialize)]
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct SiteSyncStats {
pub last_sync_at: Option<f64>,
pub objects_pulled: u64,
@@ -53,6 +52,7 @@ pub struct SiteSyncWorker {
storage: Arc<FsStorageBackend>,
connections: Arc<ConnectionStore>,
replication: Arc<ReplicationManager>,
peer_fetcher: Arc<PeerFetcher>,
storage_root: PathBuf,
interval: Duration,
batch_size: usize,
@@ -75,24 +75,41 @@ impl SiteSyncWorker {
max_retries: u32,
clock_skew_tolerance: f64,
) -> Self {
Self {
storage,
connections,
replication,
storage_root,
interval: Duration::from_secs(interval_seconds),
batch_size,
clock_skew_tolerance,
client_options: ClientOptions {
let client_options = ClientOptions {
connect_timeout,
read_timeout,
max_attempts: max_retries,
};
let peer_fetcher = Arc::new(PeerFetcher::new(
storage.clone(),
connections.clone(),
replication.clone(),
ClientOptions {
connect_timeout,
read_timeout,
max_attempts: max_retries,
},
bucket_stats: Mutex::new(HashMap::new()),
));
let bucket_stats = Mutex::new(load_stats(&storage_root));
Self {
storage,
connections,
replication,
peer_fetcher,
storage_root,
interval: Duration::from_secs(interval_seconds),
batch_size,
clock_skew_tolerance,
client_options,
bucket_stats,
shutdown: Arc::new(Notify::new()),
}
}
pub fn peer_fetcher(&self) -> Arc<PeerFetcher> {
self.peer_fetcher.clone()
}
pub fn shutdown(&self) {
self.shutdown.notify_waiters();
}
@@ -101,6 +118,15 @@ impl SiteSyncWorker {
self.bucket_stats.lock().get(bucket).cloned()
}
pub fn snapshot_stats(&self) -> HashMap<String, SiteSyncStats> {
self.bucket_stats.lock().clone()
}
fn save_stats(&self) {
let snapshot = self.bucket_stats.lock().clone();
save_stats(&self.storage_root, &snapshot);
}
pub async fn run(self: Arc<Self>) {
tracing::info!(
"Site sync worker started (interval={}s)",
@@ -120,6 +146,7 @@ impl SiteSyncWorker {
async fn run_cycle(&self) {
let rules = self.replication.rules_snapshot();
let mut mutated = false;
for (bucket, rule) in rules {
if rule.mode != MODE_BIDIRECTIONAL || !rule.enabled {
continue;
@@ -127,12 +154,16 @@ impl SiteSyncWorker {
match self.sync_bucket(&rule).await {
Ok(stats) => {
self.bucket_stats.lock().insert(bucket, stats);
mutated = true;
}
Err(e) => {
tracing::error!("Site sync failed for bucket {}: {}", bucket, e);
}
}
}
if mutated {
self.save_stats();
}
}
pub async fn trigger_sync(&self, bucket: &str) -> Option<SiteSyncStats> {
@@ -145,6 +176,7 @@ impl SiteSyncWorker {
self.bucket_stats
.lock()
.insert(bucket.to_string(), stats.clone());
self.save_stats();
Some(stats)
}
Err(e) => {
@@ -383,60 +415,9 @@ impl SiteSyncWorker {
local_bucket: &str,
key: &str,
) -> bool {
let resp = match client
.get_object()
.bucket(remote_bucket)
.key(key)
.send()
self.peer_fetcher
.fetch_into_storage(client, remote_bucket, local_bucket, key)
.await
{
Ok(r) => r,
Err(err) => {
tracing::error!("Pull GetObject failed {}/{}: {:?}", local_bucket, key, err);
return false;
}
};
let head = match client
.head_object()
.bucket(remote_bucket)
.key(key)
.send()
.await
{
Ok(r) => r,
Err(err) => {
tracing::error!("Pull HeadObject failed {}/{}: {:?}", local_bucket, key, err);
return false;
}
};
let metadata: Option<HashMap<String, String>> = head
.metadata()
.map(|m| m.iter().map(|(k, v)| (k.clone(), v.clone())).collect());
let stream = resp.body.into_async_read();
let boxed: Pin<Box<dyn AsyncRead + Send>> = Box::pin(stream);
match self
.storage
.put_object(local_bucket, key, boxed, metadata)
.await
{
Ok(_) => {
tracing::debug!("Pulled object {}/{} from remote", local_bucket, key);
true
}
Err(err) => {
tracing::error!(
"Store pulled object failed {}/{}: {}",
local_bucket,
key,
err
);
false
}
}
}
async fn apply_remote_deletion(&self, bucket: &str, key: &str) -> bool {
@@ -489,6 +470,34 @@ fn now_secs() -> f64 {
.unwrap_or(0.0)
}
fn stats_path(storage_root: &std::path::Path) -> PathBuf {
storage_root
.join(".myfsio.sys")
.join("config")
.join("site_sync_stats.json")
}
fn load_stats(storage_root: &std::path::Path) -> HashMap<String, SiteSyncStats> {
let path = stats_path(storage_root);
if !path.exists() {
return HashMap::new();
}
match std::fs::read_to_string(&path) {
Ok(text) => serde_json::from_str(&text).unwrap_or_default(),
Err(_) => HashMap::new(),
}
}
fn save_stats(storage_root: &std::path::Path, stats: &HashMap<String, SiteSyncStats>) {
let path = stats_path(storage_root);
if let Some(parent) = path.parent() {
let _ = std::fs::create_dir_all(parent);
}
if let Ok(text) = serde_json::to_string_pretty(stats) {
let _ = std::fs::write(&path, text);
}
}
fn is_not_found_error<E: std::fmt::Debug>(err: &aws_sdk_s3::error::SdkError<E>) -> bool {
let msg = format!("{:?}", err);
msg.contains("NoSuchBucket")

View File

@@ -144,7 +144,7 @@ fn normalize_path_for_mount(path: &Path) -> String {
stripped.to_lowercase()
}
fn sample_disk(path: &Path) -> (u64, u64) {
pub fn sample_disk(path: &Path) -> (u64, u64) {
let disks = Disks::new_with_refreshed_list();
let path_str = normalize_path_for_mount(path);
let mut best: Option<(usize, u64, u64)> = None;

View File

@@ -0,0 +1,197 @@
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(deny_unknown_fields)]
struct DomainData {
#[serde(default)]
mappings: HashMap<String, String>,
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum DomainDataFile {
Wrapped(DomainData),
Flat(HashMap<String, String>),
}
impl DomainDataFile {
fn into_domain_data(self) -> DomainData {
match self {
Self::Wrapped(data) => data,
Self::Flat(mappings) => DomainData {
mappings: mappings
.into_iter()
.map(|(domain, bucket)| (normalize_domain(&domain), bucket))
.collect(),
},
}
}
}
pub struct WebsiteDomainStore {
path: PathBuf,
data: Arc<RwLock<DomainData>>,
}
impl WebsiteDomainStore {
pub fn new(storage_root: &std::path::Path) -> Self {
let path = storage_root
.join(".myfsio.sys")
.join("config")
.join("website_domains.json");
let data = if path.exists() {
std::fs::read_to_string(&path)
.ok()
.and_then(|s| serde_json::from_str::<DomainDataFile>(&s).ok())
.map(DomainDataFile::into_domain_data)
.unwrap_or_default()
} else {
DomainData::default()
};
Self {
path,
data: Arc::new(RwLock::new(data)),
}
}
fn save(&self) {
let data = self.data.read();
if let Some(parent) = self.path.parent() {
let _ = std::fs::create_dir_all(parent);
}
if let Ok(json) = serde_json::to_string_pretty(&data.mappings) {
let _ = std::fs::write(&self.path, json);
}
}
pub fn list_all(&self) -> Vec<serde_json::Value> {
self.data
.read()
.mappings
.iter()
.map(|(domain, bucket)| {
serde_json::json!({
"domain": domain,
"bucket": bucket,
})
})
.collect()
}
pub fn get_bucket(&self, domain: &str) -> Option<String> {
let domain = normalize_domain(domain);
self.data.read().mappings.get(&domain).cloned()
}
pub fn set_mapping(&self, domain: &str, bucket: &str) {
let domain = normalize_domain(domain);
self.data
.write()
.mappings
.insert(domain, bucket.to_string());
self.save();
}
pub fn delete_mapping(&self, domain: &str) -> bool {
let domain = normalize_domain(domain);
let removed = self.data.write().mappings.remove(&domain).is_some();
if removed {
self.save();
}
removed
}
}
pub fn normalize_domain(domain: &str) -> String {
domain.trim().to_ascii_lowercase()
}
pub fn is_valid_domain(domain: &str) -> bool {
if domain.is_empty() || domain.len() > 253 {
return false;
}
let labels: Vec<&str> = domain.split('.').collect();
if labels.len() < 2 {
return false;
}
for label in &labels {
if label.is_empty() || label.len() > 63 {
return false;
}
if !label.chars().all(|c| c.is_ascii_alphanumeric() || c == '-') {
return false;
}
if label.starts_with('-') || label.ends_with('-') {
return false;
}
}
true
}
#[cfg(test)]
mod tests {
use super::WebsiteDomainStore;
use serde_json::json;
use tempfile::tempdir;
#[test]
fn loads_legacy_flat_mapping_file() {
let tmp = tempdir().expect("tempdir");
let config_dir = tmp.path().join(".myfsio.sys").join("config");
std::fs::create_dir_all(&config_dir).expect("create config dir");
std::fs::write(
config_dir.join("website_domains.json"),
r#"{"Example.COM":"site-bucket"}"#,
)
.expect("write config");
let store = WebsiteDomainStore::new(tmp.path());
assert_eq!(
store.get_bucket("example.com"),
Some("site-bucket".to_string())
);
}
#[test]
fn loads_wrapped_mapping_file() {
let tmp = tempdir().expect("tempdir");
let config_dir = tmp.path().join(".myfsio.sys").join("config");
std::fs::create_dir_all(&config_dir).expect("create config dir");
std::fs::write(
config_dir.join("website_domains.json"),
r#"{"mappings":{"example.com":"site-bucket"}}"#,
)
.expect("write config");
let store = WebsiteDomainStore::new(tmp.path());
assert_eq!(
store.get_bucket("example.com"),
Some("site-bucket".to_string())
);
}
#[test]
fn saves_in_shared_plain_mapping_format() {
let tmp = tempdir().expect("tempdir");
let store = WebsiteDomainStore::new(tmp.path());
store.set_mapping("Example.COM", "site-bucket");
let saved = std::fs::read_to_string(
tmp.path()
.join(".myfsio.sys")
.join("config")
.join("website_domains.json"),
)
.expect("read config");
let json: serde_json::Value = serde_json::from_str(&saved).expect("parse config");
assert_eq!(json, json!({"example.com": "site-bucket"}));
}
}

View File

@@ -1,12 +1,17 @@
use std::sync::Arc;
use std::time::Duration;
use std::time::{Duration, Instant};
use parking_lot::Mutex;
use serde_json::Value;
use crate::config::ServerConfig;
use crate::services::access_logging::AccessLoggingService;
use crate::services::gc::GcService;
use crate::services::integrity::IntegrityService;
use crate::services::metrics::MetricsService;
use crate::services::peer_fetch::PeerFetcher;
use crate::services::replication::ReplicationManager;
use crate::services::s3_client::ClientOptions;
use crate::services::site_registry::SiteRegistry;
use crate::services::site_sync::SiteSyncWorker;
use crate::services::system_metrics::SystemMetricsService;
@@ -15,9 +20,9 @@ use crate::session::SessionStore;
use crate::stores::connections::ConnectionStore;
use crate::templates::TemplateEngine;
use myfsio_auth::iam::IamService;
use myfsio_crypto::encryption::EncryptionService;
use myfsio_crypto::encryption::{EncryptionConfig, EncryptionService};
use myfsio_crypto::kms::KmsService;
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::fs_backend::{FsStorageBackend, FsStorageBackendConfig};
#[derive(Clone)]
pub struct AppState {
@@ -38,11 +43,23 @@ pub struct AppState {
pub templates: Option<Arc<TemplateEngine>>,
pub sessions: Arc<SessionStore>,
pub access_logging: Arc<AccessLoggingService>,
pub cluster_overview_cache: Arc<Mutex<Option<(Instant, Value)>>>,
pub cluster_aggregate_cache: Arc<Mutex<Option<(Instant, Value)>>>,
}
impl AppState {
pub fn new(config: ServerConfig) -> Self {
let storage = Arc::new(FsStorageBackend::new(config.storage_root.clone()));
let storage = Arc::new(FsStorageBackend::new_with_config(
config.storage_root.clone(),
FsStorageBackendConfig {
object_key_max_length_bytes: config.object_key_max_length_bytes,
object_cache_max_size: config.object_cache_max_size,
bucket_config_cache_ttl: Duration::from_secs_f64(
config.bucket_config_cache_ttl_seconds,
),
stream_chunk_size: config.stream_chunk_size,
},
));
let iam = Arc::new(IamService::new_with_secret(
config.iam_config_path.clone(),
config.secret_key.clone(),
@@ -51,17 +68,14 @@ impl AppState {
let gc = if config.gc_enabled {
Some(Arc::new(GcService::new(
config.storage_root.clone(),
crate::services::gc::GcConfig::default(),
)))
} else {
None
};
let integrity = if config.integrity_enabled {
Some(Arc::new(IntegrityService::new(
storage.clone(),
&config.storage_root,
crate::services::integrity::IntegrityConfig::default(),
crate::services::gc::GcConfig {
interval_hours: config.gc_interval_hours,
temp_file_max_age_hours: config.gc_temp_file_max_age_hours,
multipart_max_age_days: config.gc_multipart_max_age_days,
lock_file_max_age_hours: config.gc_lock_file_max_age_hours,
quarantine_max_age_days: config.integrity_quarantine_retention_days,
dry_run: config.gc_dry_run,
},
)))
} else {
None
@@ -92,7 +106,22 @@ impl AppState {
None
};
let site_registry = Some(Arc::new(SiteRegistry::new(&config.storage_root)));
let site_registry = {
let registry = SiteRegistry::new(&config.storage_root);
if let (Some(site_id), Some(endpoint)) =
(config.site_id.as_deref(), config.site_endpoint.as_deref())
{
registry.set_local_site(crate::services::site_registry::SiteInfo {
site_id: site_id.to_string(),
endpoint: endpoint.to_string(),
region: config.site_region.clone(),
priority: config.site_priority,
display_name: site_id.to_string(),
created_at: Some(chrono::Utc::now().to_rfc3339()),
});
}
Some(Arc::new(registry))
};
let website_domains = if config.website_hosting_enabled {
Some(Arc::new(WebsiteDomainStore::new(&config.storage_root)))
@@ -130,8 +159,42 @@ impl AppState {
None
};
let integrity_peer_fetcher: Option<Arc<PeerFetcher>> = if let Some(ref ss) = site_sync {
Some(ss.peer_fetcher())
} else {
Some(Arc::new(PeerFetcher::new(
storage.clone(),
connections.clone(),
replication.clone(),
ClientOptions {
connect_timeout: Duration::from_secs(config.site_sync_connect_timeout_secs),
read_timeout: Duration::from_secs(config.site_sync_read_timeout_secs),
max_attempts: config.site_sync_max_retries,
},
)))
};
let integrity = if config.integrity_enabled {
Some(Arc::new(IntegrityService::new(
storage.clone(),
&config.storage_root,
crate::services::integrity::IntegrityConfig {
interval_hours: config.integrity_interval_hours,
batch_size: config.integrity_batch_size,
auto_heal: config.integrity_auto_heal,
dry_run: config.integrity_dry_run,
heal_concurrency: config.integrity_heal_concurrency,
quarantine_retention_days: config.integrity_quarantine_retention_days,
},
integrity_peer_fetcher,
)))
} else {
None
};
let templates = init_templates(&config.templates_dir);
let access_logging = Arc::new(AccessLoggingService::new(&config.storage_root));
let session_ttl = Duration::from_secs(config.session_lifetime_days.saturating_mul(86_400));
Self {
config,
storage,
@@ -148,8 +211,10 @@ impl AppState {
replication,
site_sync,
templates,
sessions: Arc::new(SessionStore::new(Duration::from_secs(60 * 60 * 12))),
sessions: Arc::new(SessionStore::new(session_ttl)),
access_logging,
cluster_overview_cache: Arc::new(Mutex::new(None)),
cluster_aggregate_cache: Arc::new(Mutex::new(None)),
}
}
@@ -172,7 +237,13 @@ impl AppState {
let encryption = if config.encryption_enabled {
match myfsio_crypto::kms::load_or_create_master_key(&keys_dir).await {
Ok(master_key) => Some(Arc::new(EncryptionService::new(master_key, kms.clone()))),
Ok(master_key) => Some(Arc::new(EncryptionService::with_config(
master_key,
kms.clone(),
EncryptionConfig {
chunk_size: config.encryption_chunk_size_bytes,
},
))),
Err(e) => {
tracing::error!("Failed to initialize encryption: {}", e);
None
@@ -189,8 +260,16 @@ impl AppState {
}
fn init_templates(templates_dir: &std::path::Path) -> Option<Arc<TemplateEngine>> {
let use_disk = std::env::var("TEMPLATES_DIR").is_ok() && templates_dir.is_dir();
let result = if use_disk {
let glob = format!("{}/*.html", templates_dir.display()).replace('\\', "/");
match TemplateEngine::new(&glob) {
tracing::info!("Loading templates from disk: {}", templates_dir.display());
TemplateEngine::new(&glob)
} else {
tracing::info!("Loading templates from embedded assets");
TemplateEngine::from_embedded()
};
match result {
Ok(engine) => {
crate::handlers::ui_pages::register_ui_endpoints(&engine);
Some(Arc::new(engine))

View File

@@ -31,6 +31,33 @@ impl TemplateEngine {
})
}
pub fn from_embedded() -> Result<Self, TeraError> {
let mut tera = Tera::default();
tera.set_escape_fn(html_escape);
register_filters(&mut tera);
let names = crate::embedded::template_names();
let mut entries: Vec<(String, String)> = Vec::with_capacity(names.len());
for name in names {
if let Some(contents) = crate::embedded::template_contents(&name) {
entries.push((name, contents));
}
}
let refs: Vec<(&str, &str)> = entries
.iter()
.map(|(n, c)| (n.as_str(), c.as_str()))
.collect();
tera.add_raw_templates(refs)?;
let endpoints: Arc<RwLock<HashMap<String, String>>> = Arc::new(RwLock::new(HashMap::new()));
register_functions(&mut tera, endpoints.clone());
Ok(Self {
tera: Arc::new(RwLock::new(tera)),
endpoints,
})
}
pub fn register_endpoint(&self, name: &str, path_template: &str) {
self.endpoints
.write()
@@ -343,6 +370,24 @@ mod tests {
);
}
#[test]
fn embedded_templates_parse() {
let engine = TemplateEngine::from_embedded().expect("Embedded Tera parse failed");
let names: Vec<String> = engine
.tera
.read()
.get_template_names()
.map(|s| s.to_string())
.collect();
assert!(
names.len() >= 10,
"expected 10+ embedded templates, got {}",
names.len()
);
assert!(names.iter().any(|n| n == "login.html"));
assert!(names.iter().any(|n| n == "404.html"));
}
#[test]
fn format_datetime_rfc3339() {
let v = format_datetime_filter(

View File

@@ -404,6 +404,7 @@ html.sidebar-will-collapse .sidebar-user {
min-height: 70px;
gap: 0.5rem;
overflow: visible;
flex-shrink: 0;
}
.sidebar-collapsed .sidebar-header {
@@ -516,10 +517,16 @@ html.sidebar-will-collapse .sidebar-user {
}
.sidebar-body {
flex: 1;
flex: 1 1 auto;
min-height: 0;
overflow-y: auto;
padding: 1rem 0;
}
.sidebar-footer {
flex-shrink: 0;
}
.sidebar-nav {
display: flex;
flex-direction: column;

View File

Before

Width:  |  Height:  |  Size: 200 KiB

After

Width:  |  Height:  |  Size: 200 KiB

View File

Before

Width:  |  Height:  |  Size: 872 KiB

After

Width:  |  Height:  |  Size: 872 KiB

View File

@@ -336,6 +336,72 @@
}
};
const renderObjectsLimit = (totalObjects, maxObjects) => {
if (maxObjects && maxObjects > 0) {
const pct = Math.min(100, Math.floor(totalObjects / maxObjects * 100));
const cls = pct >= 90 ? 'bg-danger' : pct >= 75 ? 'bg-warning' : 'bg-success';
return '<div class="progress mt-2" style="height: 4px;">' +
'<div class="progress-bar ' + cls + '" style="width: ' + pct + '%"></div>' +
'</div>' +
'<div class="small text-muted mt-1">' + pct + '% of ' + maxObjects.toLocaleString() + ' limit</div>';
}
return '<div class="small text-muted mt-2">No limit</div>';
};
const renderBytesLimit = (totalBytes, maxBytes) => {
if (maxBytes && maxBytes > 0) {
const pct = Math.min(100, Math.floor(totalBytes / maxBytes * 100));
const cls = pct >= 90 ? 'bg-danger' : pct >= 75 ? 'bg-warning' : 'bg-success';
return '<div class="progress mt-2" style="height: 4px;">' +
'<div class="progress-bar ' + cls + '" style="width: ' + pct + '%"></div>' +
'</div>' +
'<div class="small text-muted mt-1">' + pct + '% of ' + formatBytes(maxBytes) + ' limit</div>';
}
return '<div class="small text-muted mt-2">No limit</div>';
};
const redrawUsageLimits = () => {
const objectsCard = document.querySelector('[data-usage-objects]');
const objectsLimit = document.querySelector('[data-usage-objects-limit]');
if (objectsCard && objectsLimit) {
const totalObjects = parseInt(objectsCard.dataset.totalObjects || '0', 10);
const maxObjectsRaw = objectsCard.dataset.maxObjects;
const maxObjects = maxObjectsRaw ? parseInt(maxObjectsRaw, 10) : 0;
objectsLimit.innerHTML = renderObjectsLimit(totalObjects, maxObjects);
}
const bytesCard = document.querySelector('[data-usage-bytes]');
const bytesLimit = document.querySelector('[data-usage-bytes-limit]');
if (bytesCard && bytesLimit) {
const totalBytes = parseInt(bytesCard.dataset.totalBytes || '0', 10);
const maxBytesRaw = bytesCard.dataset.maxBytes;
const maxBytes = maxBytesRaw ? parseInt(maxBytesRaw, 10) : 0;
bytesLimit.innerHTML = renderBytesLimit(totalBytes, maxBytes);
}
};
const refreshBucketUsage = async () => {
try {
const bucketName = objectsContainer?.dataset.bucket;
if (!bucketName) return;
const url = `/ui/buckets/${encodeURIComponent(bucketName)}/stats`;
const response = await fetch(url, { headers: { 'Accept': 'application/json' } });
if (!response.ok) return;
const data = await response.json();
const objectsCard = document.querySelector('[data-usage-objects]');
const objectsValue = document.querySelector('[data-usage-objects-value]');
if (objectsCard) objectsCard.dataset.totalObjects = String(data.total_objects);
if (objectsValue) objectsValue.textContent = data.total_objects.toLocaleString();
const bytesCard = document.querySelector('[data-usage-bytes]');
const bytesValue = document.querySelector('[data-usage-bytes-value]');
if (bytesCard) bytesCard.dataset.totalBytes = String(data.total_bytes);
if (bytesValue) bytesValue.textContent = formatBytes(data.total_bytes);
redrawUsageLimits();
} catch (e) { }
};
let topSpacer = null;
let bottomSpacer = null;
@@ -486,7 +552,13 @@
let scrollTimeout = null;
const handleVirtualScroll = () => {
if (scrollTimeout) cancelAnimationFrame(scrollTimeout);
scrollTimeout = requestAnimationFrame(renderVirtualRows);
scrollTimeout = requestAnimationFrame(() => {
renderVirtualRows();
const c = document.querySelector('.objects-table-container');
if (c && c.scrollHeight - c.scrollTop - c.clientHeight < 500) {
if (typeof loadMoreOnSentinel === 'function') loadMoreOnSentinel();
}
});
};
const refreshVirtualList = () => {
@@ -497,6 +569,11 @@
if (allObjects.length === 0 && streamFolders.length === 0 && !hasMoreObjects) {
showEmptyState();
} else {
const isFiltering = currentFilterTerm && currentFilterTerm.length > 0;
const title = isFiltering ? 'No matches' : 'Empty folder';
const body = isFiltering
? `No objects match "${escapeHtml(currentFilterTerm)}".`
: `This folder contains no objects${hasMoreObjects ? ' yet. Loading more...' : '.'}`;
objectsTableBody.innerHTML = `
<tr>
<td colspan="4" class="py-5">
@@ -506,8 +583,8 @@
<path d="M9.828 3h3.982a2 2 0 0 1 1.992 2.181l-.637 7A2 2 0 0 1 13.174 14H2.825a2 2 0 0 1-1.991-1.819l-.637-7a1.99 1.99 0 0 1 .342-1.31L.5 3a2 2 0 0 1 2-2h3.672a2 2 0 0 1 1.414.586l.828.828A2 2 0 0 0 9.828 3zm-8.322.12C1.72 3.042 1.95 3 2.19 3h5.396l-.707-.707A1 1 0 0 0 6.172 2H2.5a1 1 0 0 0-1 .981l.006.139z"/>
</svg>
</div>
<h6 class="mb-2">Empty folder</h6>
<p class="text-muted small mb-0">This folder contains no objects${hasMoreObjects ? ' yet. Loading more...' : '.'}</p>
<h6 class="mb-2">${title}</h6>
<p class="text-muted small mb-0">${body}</p>
</div>
</td>
</tr>
@@ -660,6 +737,10 @@
break;
case 'count':
totalObjectCount = msg.total_count || 0;
if (!currentPrefix) {
bucketTotalObjects = totalObjectCount;
updateObjectCountBadge();
}
if (objectsLoadingRow) {
const loadingText = objectsLoadingRow.querySelector('p');
if (loadingText) loadingText.textContent = `Loading 0 of ${totalObjectCount.toLocaleString()} objects...`;
@@ -770,7 +851,7 @@
}
totalObjectCount = data.total_count || 0;
if (!append && !currentPrefix && !useDelimiterMode) bucketTotalObjects = totalObjectCount;
if (!append && !currentPrefix) bucketTotalObjects = totalObjectCount;
nextContinuationToken = data.next_continuation_token;
if (!append && objectsLoadingRow) {
@@ -907,12 +988,32 @@
scrollContainer.addEventListener('scroll', handleVirtualScroll, { passive: true });
}
const isSentinelVisible = () => {
if (!scrollSentinel) return false;
const rect = scrollSentinel.getBoundingClientRect();
if (scrollContainer) {
const cr = scrollContainer.getBoundingClientRect();
return rect.top <= cr.bottom + 500 && rect.bottom >= cr.top - 500;
}
return rect.top <= window.innerHeight + 500 && rect.bottom >= -500;
};
const loadMoreOnSentinel = () => {
if (searchResults !== null) {
if (searchNextToken && !searchLoading) {
performServerSearch(currentFilterTerm, true);
}
return;
}
if (hasMoreObjects && !isLoadingObjects) {
loadObjects(true);
}
};
if (scrollSentinel && scrollContainer) {
const containerObserver = new IntersectionObserver((entries) => {
entries.forEach(entry => {
if (entry.isIntersecting && hasMoreObjects && !isLoadingObjects) {
loadObjects(true);
}
if (entry.isIntersecting) loadMoreOnSentinel();
});
}, {
root: scrollContainer,
@@ -923,9 +1024,7 @@
const viewportObserver = new IntersectionObserver((entries) => {
entries.forEach(entry => {
if (entry.isIntersecting && hasMoreObjects && !isLoadingObjects) {
loadObjects(true);
}
if (entry.isIntersecting) loadMoreOnSentinel();
});
}, {
root: null,
@@ -1161,6 +1260,11 @@
});
if (folders.length === 0 && files.length === 0) {
const isFiltering = currentFilterTerm && currentFilterTerm.length > 0;
const title = isFiltering ? 'No matches' : 'Empty folder';
const body = isFiltering
? `No objects match "${escapeHtml(currentFilterTerm)}".`
: 'This folder contains no objects.';
const emptyRow = document.createElement('tr');
emptyRow.innerHTML = `
<td colspan="4" class="py-5">
@@ -1170,8 +1274,8 @@
<path d="M9.828 3h3.982a2 2 0 0 1 1.992 2.181l-.637 7A2 2 0 0 1 13.174 14H2.825a2 2 0 0 1-1.991-1.819l-.637-7a1.99 1.99 0 0 1 .342-1.31L.5 3a2 2 0 0 1 2-2h3.672a2 2 0 0 1 1.414.586l.828.828A2 2 0 0 0 9.828 3zm-8.322.12C1.72 3.042 1.95 3 2.19 3h5.396l-.707-.707A1 1 0 0 0 6.172 2H2.5a1 1 0 0 0-1 .981l.006.139z"/>
</svg>
</div>
<h6 class="mb-2">Empty folder</h6>
<p class="text-muted small mb-0">This folder contains no objects.</p>
<h6 class="mb-2">${title}</h6>
<p class="text-muted small mb-0">${body}</p>
</div>
</td>
`;
@@ -1351,6 +1455,11 @@
});
const bulkActionsWrapper = document.getElementById('bulk-actions-wrapper');
const bulkDownloadButton = document.querySelector('[data-bulk-download-trigger]');
const updateBulkDownloadState = () => {
if (!bulkDownloadButton) return;
bulkDownloadButton.disabled = selectedRows.size === 0;
};
const updateBulkDeleteState = () => {
const selectedCount = selectedRows.size;
if (bulkDeleteButton) {
@@ -1377,6 +1486,7 @@
selectAllCheckbox.checked = visibleSelectedCount > 0 && visibleSelectedCount === total && total > 0;
selectAllCheckbox.indeterminate = visibleSelectedCount > 0 && visibleSelectedCount < total;
}
updateBulkDownloadState();
};
function toggleRowSelection(row, shouldSelect) {
@@ -1491,6 +1601,7 @@
previewPanel.classList.add('d-none');
activeRow = null;
loadObjects(false);
refreshBucketUsage();
} catch (error) {
bulkDeleteModal?.hide();
showMessage({ title: 'Delete failed', body: (error && error.message) || 'Unable to delete selected objects', variant: 'danger' });
@@ -1966,6 +2077,7 @@
previewPanel.classList.add('d-none');
activeRow = null;
loadObjects(false);
refreshBucketUsage();
} catch (err) {
if (deleteModal) deleteModal.hide();
showMessage({ title: 'Delete failed', body: err.message || 'Unable to delete object', variant: 'danger' });
@@ -2202,47 +2314,69 @@
const filterWarningText = document.getElementById('filter-warning-text');
const folderViewStatus = document.getElementById('folder-view-status');
const updateFilterWarning = () => {
if (!filterWarning) return;
const isFiltering = currentFilterTerm.length > 0;
if (isFiltering && hasMoreObjects) {
filterWarning.classList.remove('d-none');
} else {
filterWarning.classList.add('d-none');
}
};
let searchDebounceTimer = null;
let searchAbortController = null;
let searchResults = null;
let searchNextToken = null;
let searchLoading = false;
const SEARCH_PAGE_SIZE = 500;
const performServerSearch = async (term) => {
if (searchAbortController) searchAbortController.abort();
const updateFilterWarning = () => {
if (!filterWarning) return;
filterWarning.classList.add('d-none');
};
const performServerSearch = async (term, append = false) => {
if (!append && searchAbortController) searchAbortController.abort();
if (append && (searchLoading || !searchNextToken)) return;
if (!append) {
searchAbortController = new AbortController();
}
searchLoading = true;
if (append && loadMoreSpinner) loadMoreSpinner.classList.remove('d-none');
let succeeded = false;
try {
const params = new URLSearchParams({ q: term, limit: '500' });
const params = new URLSearchParams({ q: term, limit: String(SEARCH_PAGE_SIZE) });
if (currentPrefix) params.set('prefix', currentPrefix);
if (append && searchNextToken) params.set('start_after', searchNextToken);
const searchUrl = objectsStreamUrl.replace('/stream', '/search');
const response = await fetch(`${searchUrl}?${params}`, {
signal: searchAbortController.signal
signal: searchAbortController?.signal
});
if (!response.ok) throw new Error(`HTTP ${response.status}`);
const data = await response.json();
searchResults = (data.results || []).map(obj => processStreamObject(obj));
const newResults = (data.results || []).map(obj => processStreamObject(obj));
if (append && Array.isArray(searchResults)) {
searchResults = searchResults.concat(newResults);
} else {
searchResults = newResults;
}
searchNextToken = data.truncated ? (data.next_token || null) : null;
memoizedVisibleItems = null;
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
refreshVirtualList();
if (loadMoreStatus) {
const countText = searchResults.length.toLocaleString();
const truncated = data.truncated ? '+' : '';
loadMoreStatus.textContent = `${countText}${truncated} result${searchResults.length !== 1 ? 's' : ''}`;
const more = searchNextToken ? '+' : '';
const noun = searchResults.length === 1 ? 'result' : 'results';
loadMoreStatus.textContent = searchNextToken
? `${countText}${more} ${noun} (scroll to load more)`
: `${countText} ${noun}`;
}
succeeded = true;
} catch (e) {
if (e.name === 'AbortError') return;
if (loadMoreStatus) {
loadMoreStatus.textContent = 'Search failed';
loadMoreStatus.textContent = 'Search failed (scroll to retry)';
}
} finally {
searchLoading = false;
if (loadMoreSpinner) loadMoreSpinner.classList.add('d-none');
}
if (succeeded && searchNextToken && !searchLoading && isSentinelVisible()) {
performServerSearch(currentFilterTerm, true);
}
};
@@ -2262,6 +2396,7 @@
if (!isFiltering && wasFiltering) {
if (searchAbortController) searchAbortController.abort();
searchResults = null;
searchNextToken = null;
memoizedVisibleItems = null;
memoizedInputs = { objectCount: -1, folderCount: -1, prefix: null, filterTerm: null };
if (loadMoreStatus) {
@@ -3071,6 +3206,7 @@
} else if (errorCount > 0) {
showMessage({ title: 'Upload failed', body: `${errorCount} file(s) failed to upload.`, variant: 'danger' });
}
if (successCount > 0) refreshBucketUsage();
};
const performBulkUpload = async (files) => {
@@ -3238,15 +3374,8 @@
}
}
const bulkDownloadButton = document.querySelector('[data-bulk-download-trigger]');
const bulkDownloadEndpoint = document.getElementById('objects-drop-zone')?.dataset.bulkDownloadEndpoint;
const updateBulkDownloadState = () => {
if (!bulkDownloadButton) return;
const selectedCount = document.querySelectorAll('[data-object-select]:checked').length;
bulkDownloadButton.disabled = selectedCount === 0;
};
selectAllCheckbox?.addEventListener('change', (event) => {
const shouldSelect = Boolean(event.target?.checked);
@@ -3281,7 +3410,6 @@
});
updateBulkDeleteState();
setTimeout(updateBulkDownloadState, 0);
});
bulkDownloadButton?.addEventListener('click', async () => {
@@ -4329,10 +4457,25 @@
});
if (lifecycleHistoryCard) {
loadLifecycleHistory();
const lifecycleTab = document.getElementById('lifecycle-tab');
const lifecyclePane = document.getElementById('lifecycle-pane');
const startLifecyclePolling = () => {
if (window.pollingManager) {
window.pollingManager.start('lifecycle', loadLifecycleHistory);
} else {
loadLifecycleHistory();
}
};
const stopLifecyclePolling = () => {
if (window.pollingManager) {
window.pollingManager.stop('lifecycle');
}
};
if (lifecyclePane && lifecyclePane.classList.contains('show') && lifecyclePane.classList.contains('active')) {
startLifecyclePolling();
}
lifecycleTab?.addEventListener('shown.bs.tab', startLifecyclePolling);
lifecycleTab?.addEventListener('hidden.bs.tab', stopLifecyclePolling);
}
if (corsCard) loadCorsRules();
@@ -4542,6 +4685,16 @@
var maxObjInput = document.getElementById('max_objects');
if (maxMbInput) maxMbInput.value = maxBytes ? Math.floor(maxBytes / 1048576) : '';
if (maxObjInput) maxObjInput.value = maxObjects || '';
var objectsCard = document.querySelector('[data-usage-objects]');
if (objectsCard) {
objectsCard.dataset.maxObjects = maxObjects && maxObjects > 0 ? String(maxObjects) : '';
}
var bytesCard = document.querySelector('[data-usage-bytes]');
if (bytesCard) {
bytesCard.dataset.maxBytes = maxBytes && maxBytes > 0 ? String(maxBytes) : '';
}
redrawUsageLimits();
}
function updatePolicyCard(hasPolicy, preset) {
@@ -4815,7 +4968,7 @@
e.preventDefault();
window.UICore.submitFormAjax(deleteBucketForm, {
onSuccess: function () {
sessionStorage.setItem('flashMessage', JSON.stringify({ title: 'Bucket deleted', variant: 'success' }));
sessionStorage.setItem('flashMessage', JSON.stringify({ title: 'Success', body: 'Bucket deleted', variant: 'success' }));
window.location.href = window.BucketDetailConfig?.endpoints?.bucketsOverview || '/ui/buckets';
}
});

View File

@@ -29,7 +29,7 @@ window.IAMManagement = (function() {
];
var policyTemplates = {
full: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'create_bucket', 'delete_bucket', 'replication', 'lifecycle', 'cors', 'versioning', 'tagging', 'encryption', 'quota', 'object_lock', 'notification', 'logging', 'website', 'iam:*'] }],
full: [{ bucket: '*', actions: ['*'] }],
readonly: [{ bucket: '*', actions: ['list', 'read'] }],
writer: [{ bucket: '*', actions: ['list', 'read', 'write'] }],
operator: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'create_bucket', 'delete_bucket'] }],
@@ -39,7 +39,7 @@ window.IAMManagement = (function() {
function isAdminUser(policies) {
if (!policies || !policies.length) return false;
return policies.some(function(p) {
return p.actions && (p.actions.indexOf('iam:*') >= 0 || p.actions.indexOf('*') >= 0);
return p.bucket === '*' && p.actions && p.actions.indexOf('*') >= 0;
});
}

View File

@@ -87,19 +87,18 @@
</svg>
<span>Connections</span>
</a>
<a href="{{ url_for(endpoint="ui.metrics_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.metrics_dashboard" %}active{% endif %}">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
</svg>
<span>Metrics</span>
</a>
<a href="{{ url_for(endpoint="ui.sites_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.sites_dashboard" %}active{% endif %}">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
</svg>
<span>Sites</span>
</a>
<a href="{{ url_for(endpoint="ui.cluster_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.cluster_dashboard" %}active{% endif %}">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M7.752.066a.5.5 0 0 1 .496 0l3.75 2.143a.5.5 0 0 1 .252.434v3.995l3.498 2A.5.5 0 0 1 16 9.07v4.286a.5.5 0 0 1-.252.434l-3.75 2.143a.5.5 0 0 1-.496 0l-3.502-2-3.502 2.001a.5.5 0 0 1-.496 0l-3.75-2.143A.5.5 0 0 1 0 13.357V9.071a.5.5 0 0 1 .252-.434L3.75 6.638V2.643a.5.5 0 0 1 .252-.434L7.752.066ZM4.25 7.504 1.508 9.071l2.742 1.567 2.742-1.567L4.25 7.504ZM7.5 9.933l-2.75 1.571v3.134l2.75-1.571V9.933Zm1 3.134 2.75 1.571v-3.134L8.5 9.933v3.134Zm.508-3.996 2.742 1.567 2.742-1.567-2.742-1.567-2.742 1.567Zm2.242-2.433V3.504L8.5 5.076V8.21l2.75-1.572ZM7.5 8.21V5.076L4.75 3.504v3.134L7.5 8.21ZM5.258 2.643 8 4.21l2.742-1.567L8 1.076 5.258 2.643ZM15 9.933l-2.75 1.571v3.134L15 13.067V9.933ZM3.75 14.638v-3.134L1 9.933v3.134l2.75 1.571Z"/>
</svg>
<span>Cluster</span>
</a>
{% endif %}
{% if website_hosting_nav %}
<a href="{{ url_for(endpoint="ui.website_domains_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.website_domains_dashboard" %}active{% endif %}">
@@ -111,6 +110,13 @@
</a>
{% endif %}
{% if can_manage_iam %}
<a href="{{ url_for(endpoint="ui.metrics_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.metrics_dashboard" %}active{% endif %}">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
</svg>
<span>Metrics</span>
</a>
<a href="{{ url_for(endpoint="ui.system_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.system_dashboard" %}active{% endif %}">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
@@ -195,19 +201,18 @@
</svg>
<span class="sidebar-link-text">Connections</span>
</a>
<a href="{{ url_for(endpoint="ui.metrics_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.metrics_dashboard" %}active{% endif %}" data-tooltip="Metrics">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
</svg>
<span class="sidebar-link-text">Metrics</span>
</a>
<a href="{{ url_for(endpoint="ui.sites_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.sites_dashboard" %}active{% endif %}" data-tooltip="Sites">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
</svg>
<span class="sidebar-link-text">Sites</span>
</a>
<a href="{{ url_for(endpoint="ui.cluster_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.cluster_dashboard" %}active{% endif %}" data-tooltip="Cluster">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M7.752.066a.5.5 0 0 1 .496 0l3.75 2.143a.5.5 0 0 1 .252.434v3.995l3.498 2A.5.5 0 0 1 16 9.07v4.286a.5.5 0 0 1-.252.434l-3.75 2.143a.5.5 0 0 1-.496 0l-3.502-2-3.502 2.001a.5.5 0 0 1-.496 0l-3.75-2.143A.5.5 0 0 1 0 13.357V9.071a.5.5 0 0 1 .252-.434L3.75 6.638V2.643a.5.5 0 0 1 .252-.434L7.752.066ZM4.25 7.504 1.508 9.071l2.742 1.567 2.742-1.567L4.25 7.504ZM7.5 9.933l-2.75 1.571v3.134l2.75-1.571V9.933Zm1 3.134 2.75 1.571v-3.134L8.5 9.933v3.134Zm.508-3.996 2.742 1.567 2.742-1.567-2.742-1.567-2.742 1.567Zm2.242-2.433V3.504L8.5 5.076V8.21l2.75-1.572ZM7.5 8.21V5.076L4.75 3.504v3.134L7.5 8.21ZM5.258 2.643 8 4.21l2.742-1.567L8 1.076 5.258 2.643ZM15 9.933l-2.75 1.571v3.134L15 13.067V9.933ZM3.75 14.638v-3.134L1 9.933v3.134l2.75 1.571Z"/>
</svg>
<span class="sidebar-link-text">Cluster</span>
</a>
{% endif %}
{% if website_hosting_nav %}
<a href="{{ url_for(endpoint="ui.website_domains_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.website_domains_dashboard" %}active{% endif %}" data-tooltip="Domains">
@@ -219,6 +224,13 @@
</a>
{% endif %}
{% if can_manage_iam %}
<a href="{{ url_for(endpoint="ui.metrics_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.metrics_dashboard" %}active{% endif %}" data-tooltip="Metrics">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
</svg>
<span class="sidebar-link-text">Metrics</span>
</a>
<a href="{{ url_for(endpoint="ui.system_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.system_dashboard" %}active{% endif %}" data-tooltip="System">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>

View File

@@ -19,11 +19,11 @@
<div>
<h1 class="h3 fw-bold mb-1">{{ bucket_name }}</h1>
<div class="d-flex align-items-center gap-2">
<span class="badge {% if versioning_enabled %}text-bg-success{% else %}text-bg-secondary{% endif %} rounded-pill">
<span class="badge {% if versioning_enabled %}text-bg-success{% elif versioning_suspended %}text-bg-warning{% else %}text-bg-secondary{% endif %} rounded-pill">
<svg xmlns="http://www.w3.org/2000/svg" width="10" height="10" fill="currentColor" class="me-1" viewBox="0 0 16 16">
<path d="M8 16a2 2 0 0 0 2-2H6a2 2 0 0 0 2 2zm.995-14.901a1 1 0 1 0-1.99 0A5.002 5.002 0 0 0 3 6c0 1.098-.5 6-2 7h14c-1.5-1-2-5.902-2-7 0-2.42-1.72-4.44-4.005-4.901z"/>
</svg>
{% if versioning_enabled %}Versioning On{% else %}Versioning Off{% endif %}
{% if versioning_enabled %}Versioning On{% elif versioning_suspended %}Versioning Suspended{% else %}Versioning Off{% endif %}
</span>
<span class="text-muted small" id="object-count-badge">
<span class="spinner-border spinner-border-sm" role="status" style="width: 0.75rem; height: 0.75rem;"></span>
@@ -626,6 +626,16 @@
<p class="mb-0 small">All previous versions of objects are preserved. You can roll back accidental changes or deletions at any time.</p>
</div>
</div>
{% elif versioning_suspended %}
<div class="alert alert-warning d-flex align-items-start mb-4" role="alert">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="me-2 flex-shrink-0" viewBox="0 0 16 16">
<path d="M5.5 3.5A1.5 1.5 0 0 1 7 5v6a1.5 1.5 0 0 1-3 0V5a1.5 1.5 0 0 1 1.5-1.5zm5 0A1.5 1.5 0 0 1 12 5v6a1.5 1.5 0 0 1-3 0V5a1.5 1.5 0 0 1 1.5-1.5z"/>
</svg>
<div>
<strong>Versioning is suspended</strong>
<p class="mb-0 small">New uploads overwrite existing objects, but previously archived versions are still retained. Re-enable versioning to start preserving new versions again.</p>
</div>
</div>
{% else %}
<div class="alert alert-secondary d-flex align-items-start mb-4" role="alert">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="me-2 flex-shrink-0" viewBox="0 0 16 16">
@@ -633,8 +643,8 @@
<path d="M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z"/>
</svg>
<div>
<strong>Versioning is suspended</strong>
<p class="mb-0 small">New object uploads overwrite existing objects. Enable versioning to preserve previous versions.</p>
<strong>Versioning is disabled</strong>
<p class="mb-0 small">This bucket has never had versioning enabled. Enable it to preserve previous versions of every object.</p>
</div>
</div>
{% endif %}
@@ -869,9 +879,10 @@
<h6 class="small fw-semibold mb-3">Current Usage</h6>
<div class="row g-3">
<div class="col-6">
<div class="border rounded p-3 text-center">
<div class="fs-4 fw-bold text-primary">{{ total_objects }}</div>
<div class="border rounded p-3 text-center" data-usage-objects data-total-objects="{{ total_objects }}" data-max-objects="{% if has_max_objects %}{{ max_objects }}{% endif %}">
<div class="fs-4 fw-bold text-primary" data-usage-objects-value>{{ total_objects }}</div>
<div class="small text-muted">Total Objects</div>
<div data-usage-objects-limit>
{% if has_max_objects %}
<div class="progress mt-2" style="height: 4px;">
{% if max_objects > 0 %}{% set obj_pct = total_objects / max_objects * 100 | int %}{% else %}{% set obj_pct = 0 %}{% endif %}
@@ -881,6 +892,7 @@
{% else %}
<div class="small text-muted mt-2">No limit</div>
{% endif %}
</div>
{% if version_count > 0 %}
<div class="small text-muted mt-1">
<span class="text-body-secondary">({{ current_objects }} current + {{ version_count }} versions)</span>
@@ -889,9 +901,10 @@
</div>
</div>
<div class="col-6">
<div class="border rounded p-3 text-center">
<div class="fs-4 fw-bold text-primary">{{ total_bytes | filesizeformat }}</div>
<div class="border rounded p-3 text-center" data-usage-bytes data-total-bytes="{{ total_bytes }}" data-max-bytes="{% if has_max_bytes %}{{ max_bytes }}{% endif %}">
<div class="fs-4 fw-bold text-primary" data-usage-bytes-value>{{ total_bytes | filesizeformat }}</div>
<div class="small text-muted">Total Storage</div>
<div data-usage-bytes-limit>
{% if has_max_bytes %}
<div class="progress mt-2" style="height: 4px;">
{% if max_bytes > 0 %}{% set bytes_pct = total_bytes / max_bytes * 100 | int %}{% else %}{% set bytes_pct = 0 %}{% endif %}
@@ -901,6 +914,7 @@
{% else %}
<div class="small text-muted mt-2">No limit</div>
{% endif %}
</div>
{% if version_bytes > 0 %}
<div class="small text-muted mt-1">
<span class="text-body-secondary">({{ current_bytes | filesizeformat }} current + {{ version_bytes | filesizeformat }} versions)</span>

View File

@@ -0,0 +1,461 @@
{% extends "base.html" %}
{% block title %}Cluster - S3 Compatible Storage{% endblock %}
{% block content %}
<div class="page-header d-flex justify-content-between align-items-center mb-4">
<div>
<p class="text-uppercase text-muted small mb-1">Cluster Overview</p>
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M7.752.066a.5.5 0 0 1 .496 0l3.75 2.143a.5.5 0 0 1 .252.434v3.995l3.498 2A.5.5 0 0 1 16 9.07v4.286a.5.5 0 0 1-.252.434l-3.75 2.143a.5.5 0 0 1-.496 0l-3.502-2-3.502 2.001a.5.5 0 0 1-.496 0l-3.75-2.143A.5.5 0 0 1 0 13.357V9.071a.5.5 0 0 1 .252-.434L3.75 6.638V2.643a.5.5 0 0 1 .252-.434L7.752.066ZM4.25 7.504 1.508 9.071l2.742 1.567 2.742-1.567L4.25 7.504ZM7.5 9.933l-2.75 1.571v3.134l2.75-1.571V9.933Zm1 3.134 2.75 1.571v-3.134L8.5 9.933v3.134Zm.508-3.996 2.742 1.567 2.742-1.567-2.742-1.567-2.742 1.567Zm2.242-2.433V3.504L8.5 5.076V8.21l2.75-1.572ZM7.5 8.21V5.076L4.75 3.504v3.134L7.5 8.21ZM5.258 2.643 8 4.21l2.742-1.567L8 1.076 5.258 2.643ZM15 9.933l-2.75 1.571v3.134L15 13.067V9.933ZM3.75 14.638v-3.134L1 9.933v3.134l2.75 1.571Z"/>
</svg>
Cluster
</h1>
<p class="text-muted mb-0 mt-1">Live view across this site and every registered peer.</p>
</div>
<div class="d-flex align-items-center gap-2">
<span class="badge bg-success bg-opacity-10 text-success fs-6 px-3 py-2" id="cluster-online-badge">
{{ cluster_online_count }} / {{ cluster_total_count }} online
</span>
<span class="text-muted small d-none d-md-inline" id="cluster-updated-at" title="Last refresh">just now</span>
<button type="button" class="btn btn-outline-secondary btn-sm d-flex align-items-center gap-1" id="cluster-refresh-btn" title="Refresh now (bypass 10s cache)">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16" id="cluster-refresh-icon">
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
</svg>
<span>Refresh</span>
</button>
</div>
</div>
<div class="row g-3 mb-4">
<div class="col-md-3 col-sm-6">
<div class="card shadow-sm border-0 h-100" style="border-radius: 1rem;">
<div class="card-body d-flex align-items-center gap-3">
<div class="d-flex align-items-center justify-content-center rounded-3 bg-primary bg-opacity-10 text-primary" style="width:44px;height:44px;flex-shrink:0;">
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" fill="currentColor" viewBox="0 0 16 16">
<path d="M7.752.066a.5.5 0 0 1 .496 0l3.75 2.143a.5.5 0 0 1 .252.434v3.995l3.498 2A.5.5 0 0 1 16 9.07v4.286a.5.5 0 0 1-.252.434l-3.75 2.143a.5.5 0 0 1-.496 0l-3.502-2-3.502 2.001a.5.5 0 0 1-.496 0l-3.75-2.143A.5.5 0 0 1 0 13.357V9.071a.5.5 0 0 1 .252-.434L3.75 6.638V2.643a.5.5 0 0 1 .252-.434L7.752.066Z"/>
</svg>
</div>
<div class="flex-grow-1">
<div class="text-uppercase text-muted small">Sites</div>
<div class="h3 mb-0" id="cluster-total-sites">{{ cluster_total_count }}</div>
</div>
</div>
</div>
</div>
<div class="col-md-3 col-sm-6">
<div class="card shadow-sm border-0 h-100" style="border-radius: 1rem;">
<div class="card-body d-flex align-items-center gap-3">
<div class="d-flex align-items-center justify-content-center rounded-3 bg-info bg-opacity-10 text-info" style="width:44px;height:44px;flex-shrink:0;">
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" fill="currentColor" viewBox="0 0 16 16">
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
</svg>
</div>
<div class="flex-grow-1">
<div class="text-uppercase text-muted small">Buckets</div>
<div class="h3 mb-0" id="cluster-total-buckets">{{ cluster_total_buckets }}</div>
</div>
</div>
</div>
</div>
<div class="col-md-3 col-sm-6">
<div class="card shadow-sm border-0 h-100" style="border-radius: 1rem;">
<div class="card-body d-flex align-items-center gap-3">
<div class="d-flex align-items-center justify-content-center rounded-3 bg-warning bg-opacity-10 text-warning" style="width:44px;height:44px;flex-shrink:0;">
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" fill="currentColor" viewBox="0 0 16 16">
<path d="M14 14V4.5L9.5 0H4a2 2 0 0 0-2 2v12a2 2 0 0 0 2 2h8a2 2 0 0 0 2-2zM9.5 3A1.5 1.5 0 0 0 11 4.5h2V14a1 1 0 0 1-1 1H4a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1h5.5z"/>
</svg>
</div>
<div class="flex-grow-1">
<div class="text-uppercase text-muted small">Objects</div>
<div class="h3 mb-0" id="cluster-total-objects">{{ cluster_total_objects }}</div>
</div>
</div>
</div>
</div>
<div class="col-md-3 col-sm-6">
<div class="card shadow-sm border-0 h-100" style="border-radius: 1rem;">
<div class="card-body d-flex align-items-center gap-3">
<div class="d-flex align-items-center justify-content-center rounded-3 bg-success bg-opacity-10 text-success" style="width:44px;height:44px;flex-shrink:0;">
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" fill="currentColor" viewBox="0 0 16 16">
<path d="M0 10a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8H0v2zm1.5 1a.5.5 0 1 1 0-1 .5.5 0 0 1 0 1zm2 0a.5.5 0 1 1 0-1 .5.5 0 0 1 0 1zM0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v3H0V4z"/>
<path d="M1.5 6a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zm2 0a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1z"/>
</svg>
</div>
<div class="flex-grow-1">
<div class="text-uppercase text-muted small">Size</div>
<div class="h3 mb-0" id="cluster-total-size" data-bytes="{{ cluster_total_size_bytes }}">{{ cluster_total_size_bytes }}</div>
</div>
</div>
</div>
</div>
</div>
<div class="row g-4" id="cluster-sites-row">
{% for site in cluster_sites %}
<div class="col-xl-6">
<div class="card shadow-sm border-0 h-100 site-card" data-site-id="{{ site.site_id }}" style="border-radius: 1rem;">
<div class="card-body p-4">
<div class="d-flex align-items-start justify-content-between mb-3">
<div class="flex-grow-1 min-w-0">
<div class="d-flex align-items-center gap-2 mb-1 flex-wrap">
<span class="badge bg-success bg-opacity-10 text-success site-status-online {% if not site.online %}d-none{% endif %}">
<span class="d-inline-block rounded-circle bg-success me-1" style="width:6px;height:6px;"></span>online
</span>
<span class="badge bg-danger bg-opacity-10 text-danger site-status-offline {% if site.online %}d-none{% endif %}">
<span class="d-inline-block rounded-circle bg-danger me-1" style="width:6px;height:6px;"></span>offline
</span>
<span class="badge bg-warning bg-opacity-10 text-warning site-status-stale {% if not site.stale %}d-none{% endif %}" title="Could not reach peer">stale</span>
{% if site.is_local %}
<span class="badge bg-primary bg-opacity-10 text-primary">this site</span>
{% endif %}
</div>
<h5 class="fw-semibold mb-0 text-truncate">
{% if site.display_name and site.display_name != "" %}{{ site.display_name }}{% else %}{{ site.site_id }}{% endif %}
</h5>
<div class="text-muted small">
<span class="font-monospace">{{ site.site_id }}</span>
{% if site.region %} · {{ site.region }}{% elif site.registered_region %} · {{ site.registered_region }}{% endif %}
</div>
</div>
{% if site.endpoint %}
<code class="small text-muted text-end ms-2" style="word-break:break-all;">{{ site.endpoint }}</code>
{% endif %}
</div>
<div class="site-online-content {% if not site.online %}d-none{% endif %}">
<div class="row g-3 mb-3">
<div class="col-4">
<div class="text-uppercase text-muted small mb-1 d-flex align-items-center gap-1">
<svg xmlns="http://www.w3.org/2000/svg" width="11" height="11" fill="currentColor" viewBox="0 0 16 16"><path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/></svg>
Buckets
</div>
<div class="h4 mb-0 site-buckets">{{ site.buckets | default(value=0) }}</div>
</div>
<div class="col-4">
<div class="text-uppercase text-muted small mb-1 d-flex align-items-center gap-1">
<svg xmlns="http://www.w3.org/2000/svg" width="11" height="11" fill="currentColor" viewBox="0 0 16 16"><path d="M14 14V4.5L9.5 0H4a2 2 0 0 0-2 2v12a2 2 0 0 0 2 2h8a2 2 0 0 0 2-2z"/></svg>
Objects
</div>
<div class="h4 mb-0 site-objects">{{ site.objects | default(value=0) }}</div>
</div>
<div class="col-4">
<div class="text-uppercase text-muted small mb-1 d-flex align-items-center gap-1">
<svg xmlns="http://www.w3.org/2000/svg" width="11" height="11" fill="currentColor" viewBox="0 0 16 16"><path d="M0 10a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8H0v2zM0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v3H0V4z"/></svg>
Size
</div>
<div class="h4 mb-0 site-size" data-bytes="{{ site.size_bytes | default(value=0) }}">{{ site.size_bytes | default(value=0) }}</div>
</div>
</div>
{% if site.capacity %}
<div class="mb-3">
<div class="d-flex justify-content-between align-items-center mb-1">
<span class="text-uppercase text-muted small">Disk Capacity</span>
<span class="small text-muted">
<span class="site-disk-used" data-bytes="0">0</span> / <span class="site-disk-total" data-bytes="{{ site.capacity.total_bytes | default(value=0) }}">{{ site.capacity.total_bytes | default(value=0) }}</span>
</span>
</div>
<div class="progress" style="height:6px;border-radius:3px;">
<div class="progress-bar bg-primary site-disk-bar" role="progressbar" style="width:0%;" data-total="{{ site.capacity.total_bytes | default(value=0) }}" data-available="{{ site.capacity.available_bytes | default(value=0) }}"></div>
</div>
</div>
{% endif %}
{% if site.system and site.system.cpu_percent is defined %}
<div class="mb-3">
<div class="text-uppercase text-muted small mb-2">System</div>
<div class="d-flex flex-column gap-2">
<div>
<div class="d-flex justify-content-between small mb-1">
<span class="text-muted">CPU</span>
<span class="site-cpu-label">{{ site.system.cpu_percent | default(value=0) }}%</span>
</div>
<div class="progress" style="height:4px;border-radius:2px;">
<div class="progress-bar site-cpu-bar" role="progressbar" style="width:{{ site.system.cpu_percent | default(value=0) }}%;"></div>
</div>
</div>
<div>
<div class="d-flex justify-content-between small mb-1">
<span class="text-muted">Memory</span>
<span class="site-mem-label">{{ site.system.memory_percent | default(value=0) }}%</span>
</div>
<div class="progress" style="height:4px;border-radius:2px;">
<div class="progress-bar site-mem-bar" role="progressbar" style="width:{{ site.system.memory_percent | default(value=0) }}%;"></div>
</div>
</div>
<div>
<div class="d-flex justify-content-between small mb-1">
<span class="text-muted">Disk</span>
<span class="site-diskpct-label">{{ site.system.disk_percent | default(value=0) }}%</span>
</div>
<div class="progress" style="height:4px;border-radius:2px;">
<div class="progress-bar site-diskpct-bar" role="progressbar" style="width:{{ site.system.disk_percent | default(value=0) }}%;"></div>
</div>
</div>
</div>
</div>
{% endif %}
{% if site.sync %}
<div class="d-flex align-items-center justify-content-between border-top pt-3">
<div class="d-flex align-items-center gap-2 small">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
</svg>
<span class="text-muted">Sync</span>
<span class="site-sync-label">
{% if site.sync.last_sync_at %}
<span data-last-sync-at="{{ site.sync.last_sync_at }}">last sync <span class="last-sync-rel">just now</span></span>
{% else %}
<span class="text-muted">no sync yet</span>
{% endif %}
</span>
</div>
<span class="badge bg-danger bg-opacity-10 text-danger site-sync-errors {% if not site.sync.errors or site.sync.errors == 0 %}d-none{% endif %}">
<span class="site-sync-errors-count">{{ site.sync.errors | default(value=0) }}</span> err
</span>
</div>
{% endif %}
</div>
<div class="site-offline-content {% if site.online %}d-none{% endif %}">
<div class="alert alert-light border-0 mb-0 py-2 px-3 small">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 text-warning" viewBox="0 0 16 16">
<path d="M8.982 1.566a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566zM8 5c.535 0 .954.462.9.995l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995A.905.905 0 0 1 8 5zm.002 6a1 1 0 1 1 0 2 1 1 0 0 1 0-2z"/>
</svg>
<span class="site-offline-message">{% if site.error %}{{ site.error }}{% else %}Peer unreachable.{% endif %}</span>
</div>
</div>
</div>
</div>
</div>
{% endfor %}
{% if cluster_total_count <= 1 %}
<div class="col-xl-6">
<a href="{{ url_for(endpoint='ui.sites_dashboard') }}" class="card shadow-sm border-0 h-100 text-decoration-none text-reset" style="border-radius: 1rem; border: 2px dashed var(--bs-border-color) !important;">
<div class="card-body d-flex flex-column align-items-center justify-content-center text-center p-5">
<div class="d-flex align-items-center justify-content-center rounded-circle bg-primary bg-opacity-10 text-primary mb-3" style="width:64px;height:64px;">
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="currentColor" viewBox="0 0 16 16">
<path d="M8 4a.5.5 0 0 1 .5.5v3h3a.5.5 0 0 1 0 1h-3v3a.5.5 0 0 1-1 0v-3h-3a.5.5 0 0 1 0-1h3v-3A.5.5 0 0 1 8 4z"/>
</svg>
</div>
<h5 class="fw-semibold mb-1">Add a peer site</h5>
<p class="text-muted small mb-0">Register another MyFSIO instance to see it appear here side-by-side.</p>
</div>
</a>
</div>
{% endif %}
</div>
<script>
(function () {
function fmtBytes(n) {
if (!n || n < 0) return "0 B";
var u = ["B", "KB", "MB", "GB", "TB", "PB"];
var i = 0;
var v = n;
while (v >= 1024 && i < u.length - 1) { v /= 1024; i++; }
return v.toFixed(i === 0 ? 0 : (v >= 100 ? 0 : 1)) + " " + u[i];
}
function fmtRel(ts) {
var diff = Math.max(0, Math.floor(Date.now() / 1000 - ts));
if (diff < 60) return diff + "s ago";
if (diff < 3600) return Math.floor(diff / 60) + "m ago";
if (diff < 86400) return Math.floor(diff / 3600) + "h ago";
return Math.floor(diff / 86400) + "d ago";
}
function pctColor(p) {
if (p >= 80) return "bg-danger";
if (p >= 60) return "bg-warning";
return "bg-success";
}
function applyBytesFormat(root) {
(root || document).querySelectorAll("[data-bytes]").forEach(function (el) {
var n = parseInt(el.getAttribute("data-bytes"), 10);
if (!isNaN(n)) el.textContent = fmtBytes(n);
});
}
function applyDiskBars(root) {
(root || document).querySelectorAll(".site-disk-bar").forEach(function (bar) {
var total = parseFloat(bar.getAttribute("data-total")) || 0;
var avail = parseFloat(bar.getAttribute("data-available")) || 0;
var used = Math.max(0, total - avail);
var pct = total > 0 ? (used / total) * 100 : 0;
bar.style.width = pct.toFixed(1) + "%";
bar.classList.remove("bg-success", "bg-warning", "bg-danger", "bg-primary");
bar.classList.add(pctColor(pct));
var card = bar.closest(".site-card");
if (card) {
var usedEl = card.querySelector(".site-disk-used");
if (usedEl) {
usedEl.setAttribute("data-bytes", String(Math.floor(used)));
usedEl.textContent = fmtBytes(used);
}
}
});
}
function applyPctBars(root) {
var pairs = [
[".site-cpu-bar", ".site-cpu-label"],
[".site-mem-bar", ".site-mem-label"],
[".site-diskpct-bar", ".site-diskpct-label"],
];
pairs.forEach(function (sel) {
(root || document).querySelectorAll(sel[0]).forEach(function (bar) {
var label = bar.closest(".site-card").querySelector(sel[1]);
var pct = parseFloat(label ? label.textContent : "0") || 0;
bar.classList.remove("bg-success", "bg-warning", "bg-danger");
bar.classList.add(pctColor(pct));
});
});
}
function refreshRel() {
document.querySelectorAll("[data-last-sync-at]").forEach(function (el) {
var ts = parseFloat(el.getAttribute("data-last-sync-at"));
var span = el.querySelector(".last-sync-rel");
if (span && !isNaN(ts)) span.textContent = fmtRel(ts);
});
}
function updateCard(card, site) {
if (!card) return;
var online = !!site.online;
var stale = !!site.stale;
function toggle(sel, show) {
var el = card.querySelector(sel);
if (el) el.classList.toggle("d-none", !show);
}
toggle(".site-status-online", online);
toggle(".site-status-offline", !online);
toggle(".site-status-stale", stale);
toggle(".site-online-content", online);
toggle(".site-offline-content", !online);
if (online) {
var setNum = function (sel, val) {
var el = card.querySelector(sel);
if (el) el.textContent = String(val == null ? 0 : val);
};
setNum(".site-buckets", site.buckets);
setNum(".site-objects", site.objects);
var sizeEl = card.querySelector(".site-size");
if (sizeEl) {
sizeEl.setAttribute("data-bytes", String(site.size_bytes || 0));
sizeEl.textContent = fmtBytes(site.size_bytes || 0);
}
var capacity = site.capacity || {};
var diskBar = card.querySelector(".site-disk-bar");
if (diskBar) {
diskBar.setAttribute("data-total", String(capacity.total_bytes || 0));
diskBar.setAttribute("data-available", String(capacity.available_bytes || 0));
}
var diskTotalEl = card.querySelector(".site-disk-total");
if (diskTotalEl) {
diskTotalEl.setAttribute("data-bytes", String(capacity.total_bytes || 0));
diskTotalEl.textContent = fmtBytes(capacity.total_bytes || 0);
}
var sys = site.system || {};
var setPct = function (labelSel, val) {
var label = card.querySelector(labelSel);
if (label) label.textContent = (val == null ? 0 : val) + "%";
};
setPct(".site-cpu-label", sys.cpu_percent);
setPct(".site-mem-label", sys.memory_percent);
setPct(".site-diskpct-label", sys.disk_percent);
var setBarPct = function (sel, val) {
var bar = card.querySelector(sel);
if (bar) bar.style.width = (val == null ? 0 : val) + "%";
};
setBarPct(".site-cpu-bar", sys.cpu_percent);
setBarPct(".site-mem-bar", sys.memory_percent);
setBarPct(".site-diskpct-bar", sys.disk_percent);
var sync = site.sync || {};
var syncLabel = card.querySelector(".site-sync-label");
if (syncLabel) {
if (sync.last_sync_at) {
syncLabel.innerHTML = '<span data-last-sync-at="' + sync.last_sync_at + '">last sync <span class="last-sync-rel">' + fmtRel(sync.last_sync_at) + '</span></span>';
} else {
syncLabel.innerHTML = '<span class="text-muted">no sync yet</span>';
}
}
var errBadge = card.querySelector(".site-sync-errors");
var errCount = sync.errors || 0;
if (errBadge) {
errBadge.classList.toggle("d-none", errCount === 0);
var c = errBadge.querySelector(".site-sync-errors-count");
if (c) c.textContent = errCount;
}
} else {
var msg = card.querySelector(".site-offline-message");
if (msg) msg.textContent = site.error || "Peer unreachable.";
}
}
function poll(force) {
var url = "/ui/cluster/data" + (force ? "?force=1" : "");
var icon = document.getElementById("cluster-refresh-icon");
var btn = document.getElementById("cluster-refresh-btn");
if (force && icon) icon.classList.add("spin");
if (force && btn) btn.disabled = true;
return fetch(url, { credentials: "same-origin", cache: "no-store" })
.then(function (r) { return r.ok ? r.json() : null; })
.then(function (data) {
if (!data) return;
var totals = data.totals || {};
var setTotal = function (id, v) {
var el = document.getElementById(id);
if (el) el.textContent = String(v == null ? 0 : v);
};
setTotal("cluster-total-sites", totals.total_count);
setTotal("cluster-total-buckets", totals.buckets);
setTotal("cluster-total-objects", totals.objects);
var sizeEl = document.getElementById("cluster-total-size");
if (sizeEl) {
sizeEl.setAttribute("data-bytes", String(totals.size_bytes || 0));
sizeEl.textContent = fmtBytes(totals.size_bytes || 0);
}
var onlineBadge = document.getElementById("cluster-online-badge");
if (onlineBadge) onlineBadge.textContent = (totals.online_count || 0) + " / " + (totals.total_count || 0) + " online";
(data.sites || []).forEach(function (site) {
var card = document.querySelector('.site-card[data-site-id="' + site.site_id + '"]');
if (card) updateCard(card, site);
});
applyDiskBars();
applyPctBars();
var stamp = document.getElementById("cluster-updated-at");
if (stamp) stamp.textContent = "updated " + new Date().toLocaleTimeString();
})
.catch(function () { /* silent — keep last good state */ })
.finally(function () {
if (icon) icon.classList.remove("spin");
if (btn) btn.disabled = false;
});
}
applyBytesFormat();
applyDiskBars();
applyPctBars();
refreshRel();
setInterval(refreshRel, 5000);
setInterval(function () { poll(false); }, 10000);
var refreshBtn = document.getElementById("cluster-refresh-btn");
if (refreshBtn) {
refreshBtn.addEventListener("click", function () { poll(true); });
}
})();
</script>
<style>
@keyframes cluster-spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
.spin { animation: cluster-spin 0.8s linear infinite; transform-origin: 50% 50%; }
</style>
{% endblock %}

View File

@@ -73,16 +73,13 @@
</div>
<p class="text-muted">Build or run the Rust server and launch the API plus web UI from a single process.</p>
<div class="alert alert-light border small mb-3">
Runtime note: MyFSIO now runs from the Rust server in <code>rust/myfsio-engine</code>. For the verified runtime configuration list, use the repository <code>docs.md</code>.
Runtime note: the repository root is the Cargo workspace. For the verified runtime configuration list, use the repository <code>docs.md</code>.
</div>
<ol class="docs-steps">
<li>Install a current Rust toolchain.</li>
<li>Change into <code>rust/myfsio-engine</code>.</li>
<li>Start the server with <code>cargo run -p myfsio-server --</code>.</li>
<li>From the repository root, start the server with <code>cargo run -p myfsio-server --</code>.</li>
</ol>
<pre class="mb-3"><code class="language-bash">cd rust/myfsio-engine
# Run API + UI
<pre class="mb-3"><code class="language-bash"># Run API + UI
cargo run -p myfsio-server --
# Show resolved configuration
@@ -112,7 +109,7 @@ cargo build --release -p myfsio-server
<tbody>
<tr>
<td><code>API_BASE_URL</code></td>
<td><code>http://127.0.0.1:5000</code></td>
<td>Derived from <code>HOST</code>/<code>PORT</code></td>
<td>Internal S3 API URL used by the web UI proxy. Also used for presigned URL generation. Set to your public URL if running behind a reverse proxy.</td>
</tr>
<tr>
@@ -121,29 +118,69 @@ cargo build --release -p myfsio-server
<td>Directory for buckets and objects.</td>
</tr>
<tr>
<td><code>MAX_UPLOAD_SIZE</code></td>
<td><code>1 GB</code></td>
<td>Max request body size in bytes.</td>
<td><code>IAM_CONFIG</code></td>
<td><code>&lt;STORAGE_ROOT&gt;/.myfsio.sys/config/iam.json</code></td>
<td>IAM users / access keys file path.</td>
</tr>
<tr>
<td><code>SECRET_KEY</code></td>
<td>(Auto-generated)</td>
<td>(loaded from <code>.myfsio.sys/config/.secret</code> when present)</td>
<td>Session signing and IAM-at-rest encryption key. <strong>Set explicitly in production.</strong></td>
</tr>
<tr>
<td><code>HOST</code></td>
<td><code>127.0.0.1</code></td>
<td>Bind interface.</td>
<td>Bind interface for both API and UI listeners.</td>
</tr>
<tr>
<td><code>PORT</code></td>
<td><code>5000</code></td>
<td>Listen port (UI uses 5100).</td>
<td>S3 API listen port.</td>
</tr>
<tr>
<td><code>DISPLAY_TIMEZONE</code></td>
<td><code>UTC</code></td>
<td>Timezone for UI timestamps (e.g., <code>US/Eastern</code>, <code>Asia/Tokyo</code>).</td>
<td><code>UI_PORT</code></td>
<td><code>5100</code></td>
<td>Web UI listen port.</td>
</tr>
<tr>
<td><code>UI_ENABLED</code></td>
<td><code>true</code></td>
<td>Set to <code>false</code> to run API-only (no UI listener).</td>
</tr>
<tr>
<td><code>AWS_REGION</code></td>
<td><code>us-east-1</code></td>
<td>Region used in SigV4 scope.</td>
</tr>
<tr>
<td><code>LOG_LEVEL</code></td>
<td><code>INFO</code></td>
<td>Log verbosity (also honored via <code>RUST_LOG</code>).</td>
</tr>
<tr>
<td><code>SESSION_LIFETIME_DAYS</code></td>
<td><code>1</code></td>
<td>UI session lifetime in days.</td>
</tr>
<tr>
<td><code>REQUEST_BODY_TIMEOUT_SECONDS</code></td>
<td><code>60</code></td>
<td>Per-request body read timeout for the S3 API.</td>
</tr>
<tr>
<td><code>MULTIPART_MIN_PART_SIZE</code></td>
<td><code>5242880</code></td>
<td>Minimum part size enforced for multipart uploads (5&nbsp;MiB).</td>
</tr>
<tr>
<td><code>BULK_DELETE_MAX_KEYS</code></td>
<td><code>1000</code></td>
<td>Maximum keys accepted by the UI bulk-delete endpoint.</td>
</tr>
<tr>
<td><code>STREAM_CHUNK_SIZE</code></td>
<td><code>1048576</code></td>
<td>Default streaming chunk size for routes that opt into configured chunking (1&nbsp;MiB).</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">CORS Settings</td>
@@ -169,48 +206,43 @@ cargo build --release -p myfsio-server
<td>Response headers visible to browsers (e.g., <code>ETag</code>).</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Security Settings</td>
</tr>
<tr>
<td><code>AUTH_MAX_ATTEMPTS</code></td>
<td><code>5</code></td>
<td>Failed login attempts before lockout.</td>
</tr>
<tr>
<td><code>AUTH_LOCKOUT_MINUTES</code></td>
<td><code>15</code></td>
<td>Lockout duration after max failed attempts.</td>
<td colspan="3" class="fw-semibold">Rate Limiting</td>
</tr>
<tr>
<td><code>RATE_LIMIT_DEFAULT</code></td>
<td><code>200 per minute</code></td>
<td>Default API rate limit.</td>
<td><code>5000 per minute</code></td>
<td>Default rate limit for S3 and KMS API endpoints. Accepts <code>N per &lt;second/minute/hour/day&gt;</code> or <code>N/&lt;seconds&gt;</code>.</td>
</tr>
<tr>
<td><code>RATE_LIMIT_LIST_BUCKETS</code></td>
<td><code>60 per minute</code></td>
<td>Rate limit for listing buckets.</td>
<td>inherits <code>RATE_LIMIT_DEFAULT</code></td>
<td>Rate limit for <code>GET /</code> (ListBuckets).</td>
</tr>
<tr>
<td><code>RATE_LIMIT_BUCKET_OPS</code></td>
<td><code>120 per minute</code></td>
<td>Rate limit for bucket operations.</td>
<td>inherits <code>RATE_LIMIT_DEFAULT</code></td>
<td>Rate limit for bucket-scoped operations (<code>/bucket</code>).</td>
</tr>
<tr>
<td><code>RATE_LIMIT_OBJECT_OPS</code></td>
<td><code>240 per minute</code></td>
<td>Rate limit for object operations.</td>
<td>inherits <code>RATE_LIMIT_DEFAULT</code></td>
<td>Rate limit for object-scoped operations (<code>/bucket/key</code>).</td>
</tr>
<tr>
<td><code>RATE_LIMIT_HEAD_OPS</code></td>
<td><code>100 per minute</code></td>
<td>Rate limit for HEAD requests.</td>
<td>inherits <code>RATE_LIMIT_DEFAULT</code></td>
<td>Rate limit applied when the request method is HEAD.</td>
</tr>
<tr>
<td><code>RATE_LIMIT_ADMIN</code></td>
<td><code>60 per minute</code></td>
<td>Rate limit for admin API endpoints (<code>/admin/*</code>).</td>
</tr>
<tr>
<td><code>RATE_LIMIT_STORAGE_URI</code></td>
<td><code>memory://</code></td>
<td>Rate limit storage backend. Only in-memory storage is currently supported.</td>
</tr>
<tr>
<td><code>ADMIN_ACCESS_KEY</code></td>
<td>(none)</td>
@@ -222,30 +254,7 @@ cargo build --release -p myfsio-server
<td>Custom secret key for the admin user on first run or credential reset. Random if unset.</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Server Settings</td>
</tr>
<tr>
<td><code>SERVER_THREADS</code></td>
<td><code>0</code> (auto)</td>
<td>Granian blocking threads (1-64). 0 = auto (CPU cores × 2).</td>
</tr>
<tr>
<td><code>SERVER_CONNECTION_LIMIT</code></td>
<td><code>0</code> (auto)</td>
<td>Max concurrent connections (10-1000). 0 = auto (RAM-based).</td>
</tr>
<tr>
<td><code>SERVER_BACKLOG</code></td>
<td><code>0</code> (auto)</td>
<td>TCP listen backlog (64-4096). 0 = auto (conn_limit × 2).</td>
</tr>
<tr>
<td><code>SERVER_CHANNEL_TIMEOUT</code></td>
<td><code>120</code></td>
<td>Idle connection timeout in seconds (10-300).</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Encryption Settings</td>
<td colspan="3" class="fw-semibold">Feature Toggles</td>
</tr>
<tr>
<td><code>ENCRYPTION_ENABLED</code></td>
@@ -255,20 +264,32 @@ cargo build --release -p myfsio-server
<tr>
<td><code>KMS_ENABLED</code></td>
<td><code>false</code></td>
<td>Enable KMS key management for encryption.</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Logging Settings</td>
<td>Enable built-in KMS key management.</td>
</tr>
<tr>
<td><code>LOG_LEVEL</code></td>
<td><code>INFO</code></td>
<td>Log verbosity: DEBUG, INFO, WARNING, ERROR.</td>
<td><code>GC_ENABLED</code></td>
<td><code>false</code></td>
<td>Start the garbage collector worker.</td>
</tr>
<tr>
<td><code>LOG_TO_FILE</code></td>
<td><code>true</code></td>
<td>Enable file logging.</td>
<td><code>INTEGRITY_ENABLED</code></td>
<td><code>false</code></td>
<td>Start the integrity scanner worker.</td>
</tr>
<tr>
<td><code>LIFECYCLE_ENABLED</code></td>
<td><code>false</code></td>
<td>Start the lifecycle worker.</td>
</tr>
<tr>
<td><code>WEBSITE_HOSTING_ENABLED</code></td>
<td><code>false</code></td>
<td>Enable static website hosting and domain mappings.</td>
</tr>
<tr>
<td><code>SITE_SYNC_ENABLED</code></td>
<td><code>false</code></td>
<td>Start the bi-directional site sync worker.</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Metrics History Settings</td>
@@ -377,8 +398,8 @@ cargo build --release -p myfsio-server
</tr>
<tr>
<td><code>NUM_TRUSTED_PROXIES</code></td>
<td><code>1</code></td>
<td>Number of trusted reverse proxies for <code>X-Forwarded-*</code> headers.</td>
<td><code>0</code></td>
<td>Number of trusted reverse proxies for <code>X-Forwarded-*</code> headers. Forwarded IP headers are ignored when this is <code>0</code>.</td>
</tr>
<tr>
<td><code>ALLOWED_REDIRECT_HOSTS</code></td>
@@ -418,16 +439,6 @@ cargo build --release -p myfsio-server
<td><code>50</code></td>
<td>Max lifecycle history records per bucket.</td>
</tr>
<tr>
<td><code>OBJECT_CACHE_TTL</code></td>
<td><code>60</code></td>
<td>Seconds to cache object metadata.</td>
</tr>
<tr>
<td><code>BULK_DOWNLOAD_MAX_BYTES</code></td>
<td><code>1 GB</code></td>
<td>Max total size for bulk ZIP downloads.</td>
</tr>
<tr>
<td><code>ENCRYPTION_CHUNK_SIZE_BYTES</code></td>
<td><code>65536</code></td>
@@ -447,7 +458,7 @@ cargo build --release -p myfsio-server
</table>
</div>
<div class="alert alert-warning mt-3 mb-0 small">
<strong>Production Checklist:</strong> Set <code>SECRET_KEY</code> (also enables IAM config encryption at rest), restrict <code>CORS_ORIGINS</code>, configure <code>API_BASE_URL</code>, enable HTTPS via reverse proxy, use <code>--prod</code> flag, and set credential expiry on non-admin users.
<strong>Production Checklist:</strong> Set <code>SECRET_KEY</code> (also enables IAM config encryption at rest), restrict <code>CORS_ORIGINS</code>, configure <code>API_BASE_URL</code>, enable HTTPS via a reverse proxy, run <code>myfsio-server --check-config</code> before starting, and set credential expiry on non-admin users.
</div>
</div>
</article>
@@ -518,7 +529,7 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
<div class="docs-highlight mb-3">
<ol class="mb-0">
<li>Check the console output for the generated <code>Access Key</code> and <code>Secret Key</code>, then visit <code>/ui/login</code>.</li>
<li>Create additional users with descriptive display names, AWS-style inline policies (for example <code>{"bucket": "*", "actions": ["list", "read"]}</code>), and optional credential expiry dates.</li>
<li>Create additional users with descriptive display names, AWS-style inline policies (for example <code>{"bucket": "*", "actions": ["list", "read"]}</code>), and optional credential expiry dates. Use <code>{"bucket": "*", "actions": ["*"]}</code> to grant full administrator access — this is the only policy shape that satisfies <code>require_admin</code> on routes such as <code>/admin/cluster/overview</code>. <code>iam:*</code> grants only IAM-management actions and is <strong>not</strong> a substitute for <code>"*"</code> on admin routes.</li>
<li>Set credential expiry on users to grant time-limited access. The UI shows expiry badges and provides preset durations (1h, 24h, 7d, 30d, 90d). Expired credentials are rejected at authentication.</li>
<li>Rotate secrets when sharing with CI jobs—new secrets display once and persist to <code>data/.myfsio.sys/config/iam.json</code>.</li>
<li>Bucket policies layer on top of IAM. Apply Private/Public presets or paste custom JSON; changes reload instantly.</li>
@@ -545,7 +556,7 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
<div>
<h3 class="h6 text-uppercase text-muted">Uploads</h3>
<ul>
<li>Drag and drop folders or files into the upload modal. Objects above 16&nbsp;MB switch to multipart automatically.</li>
<li>Drag and drop folders or files into the upload modal. Objects above 8&nbsp;MB switch to multipart automatically.</li>
<li>Progress rows highlight retries, throughput, and completion even if you close the modal.</li>
</ul>
</div>
@@ -554,7 +565,7 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
<ul>
<li>Navigate folder hierarchies using breadcrumbs. Objects with <code>/</code> in keys display as folders.</li>
<li>Infinite scroll loads more objects automatically. Choose batch size (50250) from the footer dropdown.</li>
<li>Bulk select objects for multi-delete or multi-download (ZIP archive, up to 1 GiB). Filter by name using the search box.</li>
<li>Bulk select objects for multi-delete or multi-download (ZIP archive, up to 256&nbsp;MB total). Filter by name using the search box.</li>
<li>If loading fails, click <strong>Retry</strong> to attempt again—no page refresh needed.</li>
</ul>
</div>
@@ -569,7 +580,7 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
<h3 class="h6 text-uppercase text-muted">Policies &amp; versioning</h3>
<ul>
<li>Toggle versioning (requires write access). Archived-only keys are flagged so you can restore them quickly.</li>
<li>The policy editor saves drafts, ships with presets, and hot-reloads <code>data/.myfsio.sys/config/bucket_policies.json</code>.</li>
<li>The policy editor saves each bucket policy in that bucket's <code>.bucket.json</code>; legacy <code>data/.myfsio.sys/config/bucket_policies.json</code> entries are still read as a fallback.</li>
</ul>
</div>
</div>
@@ -871,46 +882,15 @@ s3.complete_multipart_upload(
</li>
</ol>
<div class="alert alert-light border mb-3 overflow-hidden">
<div class="d-flex flex-column flex-sm-row gap-2 mb-2">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-terminal text-muted mt-1 flex-shrink-0 d-none d-sm-block" viewBox="0 0 16 16">
<div class="alert alert-light border mb-3">
<div class="d-flex gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-terminal text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
<path d="M6 9a.5.5 0 0 1 .5-.5h3a.5.5 0 0 1 0 1h-3A.5.5 0 0 1 6 9zM3.854 4.146a.5.5 0 1 0-.708.708L4.793 6.5 3.146 8.146a.5.5 0 1 0 .708.708l2-2a.5.5 0 0 0 0-.708l-2-2z"/>
<path d="M2 1a2 2 0 0 0-2 2v10a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V3a2 2 0 0 0-2-2H2zm12 1a1 1 0 0 1 1 1v10a1 1 0 0 1-1 1H2a1 1 0 0 1-1-1V3a1 1 0 0 1 1-1h12z"/>
</svg>
<div class="flex-grow-1 min-width-0">
<div>
<strong>Headless Target Setup</strong>
<p class="small text-muted mb-2">If your target server has no UI, create a <code>setup_target.py</code> script to bootstrap credentials:</p>
<pre class="mb-0 overflow-auto" style="max-width: 100%;"><code class="language-python"># setup_target.py
from pathlib import Path
from app.iam import IamService
from app.storage import ObjectStorage
# Initialize services (paths match default config)
data_dir = Path("data")
iam = IamService(data_dir / ".myfsio.sys" / "config" / "iam.json")
storage = ObjectStorage(data_dir)
# 1. Create the bucket
bucket_name = "backup-bucket"
try:
storage.create_bucket(bucket_name)
print(f"Bucket '{bucket_name}' created.")
except Exception as e:
print(f"Bucket creation skipped: {e}")
# 2. Create the user
try:
creds = iam.create_user(
display_name="Replication User",
policies=[{"bucket": bucket_name, "actions": ["write", "read", "list"]}]
)
print("\n--- CREDENTIALS GENERATED ---")
print(f"Access Key: {creds['access_key']}")
print(f"Secret Key: {creds['secret_key']}")
print("-----------------------------")
except Exception as e:
print(f"User creation failed: {e}")</code></pre>
<p class="small text-muted mt-2 mb-0">Save and run: <code>python setup_target.py</code></p>
<p class="small text-muted mb-0">If your target server has no UI, start it with <code>ADMIN_ACCESS_KEY</code> and <code>ADMIN_SECRET_KEY</code> set so the first-run bootstrap installs deterministic credentials, then create the destination bucket via the AWS CLI: <code>aws --endpoint-url &lt;target&gt; s3api create-bucket --bucket backup-bucket</code>. Use the admin API at <code>/admin/iam/users</code> (or <code>--reset-cred</code>) to manage credentials remotely.</p>
</div>
</div>
</div>
@@ -1070,15 +1050,15 @@ SITE_SYNC_BATCH_SIZE=100 # Max objects per sync cycle</code></pre>
<tbody>
<tr>
<td><strong>Retry Logic</strong></td>
<td>boto3 automatically handles 429 (rate limit) errors using exponential backoff with <code>max_attempts=2</code></td>
<td>The replication worker retries failed transfers up to <code>REPLICATION_MAX_RETRIES</code> times (default <code>2</code>).</td>
</tr>
<tr>
<td><strong>Concurrency</strong></td>
<td>Uses a ThreadPoolExecutor with 4 parallel workers for replication tasks</td>
<td><strong>Failure Budget</strong></td>
<td>After <code>REPLICATION_MAX_FAILURES_PER_BUCKET</code> recorded failures (default <code>50</code>), further records for that bucket are dropped until a retry succeeds.</td>
</tr>
<tr>
<td><strong>Timeouts</strong></td>
<td>Connect: 5s, Read: 30s. Large files use streaming transfers</td>
<td>Connect: <code>REPLICATION_CONNECT_TIMEOUT_SECONDS</code> (default 5s), Read: <code>REPLICATION_READ_TIMEOUT_SECONDS</code> (default 30s). Objects larger than <code>REPLICATION_STREAMING_THRESHOLD_BYTES</code> (default 10&nbsp;MB) use streaming transfers.</td>
</tr>
</tbody>
</table>
@@ -1213,6 +1193,56 @@ cargo run -p myfsio-server --</code></pre>
</div>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">Cross-site authentication (Peer Inbound Access Key)</h3>
<p class="small text-muted">When the Cluster page asks one site to render a peer card, the local server signs a SigV4 call to <code>/admin/cluster/overview</code> on the peer. The peer needs to know which inbound caller is authorized. There are two ways for that signed call to be accepted:</p>
<ol class="small text-muted mb-3">
<li>The signing access key resolves to a real admin user on the peer (policy <code>{"bucket":"*","actions":["*"]}</code>), <strong>or</strong></li>
<li>The signing access key is whitelisted on the peer's site-registry entry as <strong>Peer Inbound Access Key</strong>. With this set, the peer accepts the call without granting full admin.</li>
</ol>
<p class="small text-muted">Option 2 is preferred — it follows least-privilege. The mental model is:</p>
<div class="alert alert-light border mb-3">
<strong class="small">Peer Inbound Access Key on Site&nbsp;X for peer&nbsp;Y</strong>
<span class="small">= "the access key Y signs with when calling X."</span>
<br>
<span class="small text-muted">It is copied from <em>the other site's outbound Connection</em>, never from your own IAM.</span>
</div>
<h4 class="h6 mt-3 mb-2">Worked example (two sites: <code>us-east-1</code> and <code>us-west-1</code>)</h4>
<p class="small text-muted">Each site has an outbound Connection it uses to reach the other:</p>
<ul class="small text-muted mb-3">
<li><code>us-east-1</code>'s "to-west" Connection signs with access key <code>AKIA-EAST-OUT</code></li>
<li><code>us-west-1</code>'s "to-east" Connection signs with access key <code>AKIA-WEST-OUT</code></li>
</ul>
<p class="small text-muted">For the Cluster page on both sides to fetch peer data, register the peer entries with cross-paired inbound keys:</p>
<div class="row g-3 mb-3">
<div class="col-md-6">
<div class="card border h-100">
<div class="card-header bg-light py-2"><strong class="small">On us-east-1 → peer "us-west-1"</strong></div>
<div class="card-body small">
<ul class="mb-0 ps-3">
<li>Connection: "to-west"</li>
<li><strong>Peer Inbound Access Key:</strong> <code>AKIA-WEST-OUT</code></li>
<li class="text-muted">(the key us-west-1 will sign with when calling us-east-1)</li>
</ul>
</div>
</div>
</div>
<div class="col-md-6">
<div class="card border h-100">
<div class="card-header bg-light py-2"><strong class="small">On us-west-1 → peer "us-east-1"</strong></div>
<div class="card-body small">
<ul class="mb-0 ps-3">
<li>Connection: "to-east"</li>
<li><strong>Peer Inbound Access Key:</strong> <code>AKIA-EAST-OUT</code></li>
<li class="text-muted">(the key us-east-1 will sign with when calling us-west-1)</li>
</ul>
</div>
</div>
</div>
</div>
<p class="small text-muted">Each side's inbound key matches the <em>other</em> side's outbound key. Putting your own admin key (e.g. <code>localadmin</code>) into your own peer entry does nothing — the inbound caller is the peer, not you.</p>
<p class="small text-muted">Each whitelisted access key still has to exist as an enabled IAM user on the receiving site (so SigV4 verification can find a matching secret). Its policy can be empty for cluster-overview alone; for site-sync to work, it needs the S3 verbs the sync workload uses (<code>list</code>, <code>read</code>, plus <code>write</code>/<code>delete</code> for inbound replication / bidirectional sync). Leave the field blank only if the signing key is a full admin on the receiving site.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Admin API Endpoints</h3>
<p class="small text-muted">The <code>/admin</code> API provides programmatic access to site registry:</p>
<pre class="mb-3"><code class="language-bash"># Get local site configuration
@@ -1371,7 +1401,7 @@ curl "{{ api_base }}/&lt;bucket&gt;/&lt;key&gt;?versionId=&lt;version-id&gt;" \
</div>
<h3 class="h6 text-uppercase text-muted mt-4">Managing Quotas (Admin Only)</h3>
<p class="small text-muted">Quota management is restricted to administrators (users with <code>iam:*</code> permissions).</p>
<p class="small text-muted">Quota management is restricted to administrators users whose policy is <code>{"bucket": "*", "actions": ["*"]}</code>.</p>
<ol class="docs-steps mb-3">
<li>Navigate to your bucket → <strong>Properties</strong> tab → <strong>Storage Quota</strong> card.</li>
<li>Enter limits: <strong>Max Size (MB)</strong> and/or <strong>Max Objects</strong>. Leave empty for unlimited.</li>
@@ -1488,17 +1518,23 @@ curl -X POST {{ api_base }}/kms/keys \
curl {{ api_base }}/kms/keys \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Rotate a key (creates new key material)
curl -X POST {{ api_base }}/kms/keys/{key-id}/rotate \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Disable/Enable a key
# Disable a key
curl -X POST {{ api_base }}/kms/keys/{key-id}/disable \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Schedule key deletion (30-day waiting period)
curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"</code></pre>
# Re-enable a key
curl -X POST {{ api_base }}/kms/keys/{key-id}/enable \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Delete a key
curl -X DELETE "{{ api_base }}/kms/keys/{key-id}" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Generate a data key (sized between KMS_GENERATE_DATA_KEY_MIN_BYTES and _MAX_BYTES)
curl -X POST {{ api_base }}/kms/generate-data-key \
-H "Content-Type: application/json" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-d '{"KeyId": "&lt;key-id&gt;", "NumberOfBytes": 32}'</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">How It Works</h3>
<p class="small text-muted mb-0">
@@ -1762,14 +1798,11 @@ curl "{{ api_base }}/admin/gc/history?limit=10" \
</tr>
</thead>
<tbody>
<tr><td><code>INTEGRITY_ENABLED</code></td><td><code>false</code></td><td>Enable background integrity scanning</td></tr>
<tr><td><code>INTEGRITY_INTERVAL_HOURS</code></td><td><code>24</code></td><td>Hours between scan cycles</td></tr>
<tr><td><code>INTEGRITY_BATCH_SIZE</code></td><td><code>1000</code></td><td>Max objects to scan per cycle</td></tr>
<tr><td><code>INTEGRITY_AUTO_HEAL</code></td><td><code>false</code></td><td>Automatically repair detected issues</td></tr>
<tr><td><code>INTEGRITY_DRY_RUN</code></td><td><code>false</code></td><td>Log issues without healing</td></tr>
<tr><td><code>INTEGRITY_ENABLED</code></td><td><code>false</code></td><td>Enable the background integrity scanner</td></tr>
</tbody>
</table>
</div>
<p class="small text-muted mb-3">Other scanner settings (interval, batch size, auto-heal, dry run) currently use hardcoded defaults: 24-hour interval, batch size 10&nbsp;000, auto-heal off, dry run off. Use the admin API below to trigger a one-off scan with <code>auto_heal</code> or <code>dry_run</code> overrides.</p>
<h3 class="h6 text-uppercase text-muted mt-4">What Gets Checked</h3>
<div class="table-responsive mb-3">
@@ -1837,7 +1870,7 @@ curl "{{ api_base }}/admin/integrity/history?limit=10" \
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
</svg>
<div>
<strong>Dry Run:</strong> Use <code>INTEGRITY_DRY_RUN=true</code> or pass <code>{"dry_run": true}</code> to the API to preview detected issues without making any changes. Combine with <code>{"auto_heal": true}</code> to see what would be repaired.
<strong>Dry Run:</strong> Pass <code>{"dry_run": true}</code> to <code>/admin/integrity/run</code> to preview detected issues without making any changes. Combine with <code>{"auto_heal": true}</code> to see what would be repaired.
</div>
</div>
</div>
@@ -2058,7 +2091,7 @@ curl "{{ api_base | replace(from="/api", to="/ui") }}/metrics/operations/history
<tr>
<td>UI shows stale policy/object data</td>
<td>Browser cached prior state</td>
<td>Refresh; the server hot-reloads <code>data/.myfsio.sys/config/bucket_policies.json</code> and storage metadata.</td>
<td>Refresh; the server hot-reloads bucket <code>.bucket.json</code> policy data and legacy <code>data/.myfsio.sys/config/bucket_policies.json</code> fallback entries.</td>
</tr>
<tr>
<td>Presign dialog returns 403</td>
@@ -2066,9 +2099,9 @@ curl "{{ api_base | replace(from="/api", to="/ui") }}/metrics/operations/history
<td>Update IAM inline policies or remove conflicting deny statements.</td>
</tr>
<tr>
<td>Large uploads fail instantly</td>
<td><code>MAX_UPLOAD_SIZE</code> exceeded</td>
<td>Raise the env var or split the object.</td>
<td>Large uploads time out mid-stream</td>
<td><code>REQUEST_BODY_TIMEOUT_SECONDS</code> exceeded</td>
<td>Raise the timeout, use multipart uploads, or upload from a faster network.</td>
</tr>
<tr>
<td>Requests hit the wrong host</td>
@@ -2077,8 +2110,8 @@ curl "{{ api_base | replace(from="/api", to="/ui") }}/metrics/operations/history
</tr>
<tr>
<td>Large folder uploads hitting rate limits (429)</td>
<td><code>RATE_LIMIT_DEFAULT</code> exceeded (200/min)</td>
<td>Increase rate limit in env config, use Redis backend (<code>RATE_LIMIT_STORAGE_URI=redis://host:port</code>) for distributed setups, or upload in smaller batches.</td>
<td><code>RATE_LIMIT_DEFAULT</code> exceeded (5000/min by default)</td>
<td>Increase <code>RATE_LIMIT_DEFAULT</code> (or the per-route override), or upload in smaller batches. Distributed rate-limit storage is not supported yet.</td>
</tr>
</tbody>
</table>
@@ -2097,7 +2130,7 @@ curl "{{ api_base | replace(from="/api", to="/ui") }}/metrics/operations/history
curl {{ api_base }}/myfsio/health
# Response
{"status": "ok", "version": "0.1.7"}</code></pre>
{"status": "ok", "version": "0.5.0"}</code></pre>
<p class="small text-muted mb-3">Use this endpoint for:</p>
<ul class="small text-muted mb-0">
@@ -2266,7 +2299,7 @@ curl -X PUT "{{ api_base }}/&lt;bucket&gt;?notification" \
<p class="text-muted">Query CSV, JSON, or Parquet files directly using SQL without downloading the entire object.</p>
<div class="alert alert-info border small mb-3">
<strong>Prerequisite:</strong> Requires DuckDB to be installed (<code>pip install duckdb</code>)
<strong>Note:</strong> DuckDB is bundled into the Rust server binary &mdash; no separate install is required.
</div>
<pre class="mb-3"><code class="language-bash"># Query a CSV file
@@ -2834,20 +2867,36 @@ GET|PUT /admin/site # Local site config
GET /admin/sites # List peers
POST /admin/sites # Register peer
GET|PUT|DELETE /admin/sites/&lt;id&gt; # Manage peer
GET /admin/sites/&lt;id&gt;/health # Peer health
GET|POST /admin/sites/&lt;id&gt;/health # Peer health
GET /admin/sites/&lt;id&gt;/bidirectional-status # Bidi sync state
GET /admin/topology # Cluster topology
GET|POST|PUT|DELETE /admin/website-domains # Domain mappings
GET|POST /admin/website-domains # List / Create domain mapping
GET|PUT|DELETE /admin/website-domains/&lt;domain&gt; # Manage domain mapping
GET /admin/iam/users # List IAM users
GET /admin/iam/users/&lt;id&gt; # Get user
GET /admin/iam/users/&lt;id&gt;/policies # Get user policies
POST /admin/iam/users/&lt;id&gt;/access-keys # Create access key
DELETE /admin/iam/users/&lt;id&gt;/access-keys/&lt;ak&gt; # Delete access key
POST /admin/iam/users/&lt;id&gt;/disable # Disable user
POST /admin/iam/users/&lt;id&gt;/enable # Enable user
GET|POST /admin/gc/status, /admin/gc/run, /admin/gc/history
GET|POST /admin/integrity/status, /admin/integrity/run, /admin/integrity/history
# KMS API
# KMS API (only mounted when KMS_ENABLED=true)
GET|POST /kms/keys # List / Create keys
GET|DELETE /kms/keys/&lt;id&gt; # Get / Delete key
POST /kms/keys/&lt;id&gt;/enable # Enable key
POST /kms/keys/&lt;id&gt;/disable # Disable key
POST /kms/keys/&lt;id&gt;/rotate # Rotate key
POST /kms/encrypt # Encrypt data
POST /kms/decrypt # Decrypt data
POST /kms/re-encrypt # Re-encrypt under a different key
POST /kms/generate-data-key # Generate data key
POST /kms/generate-random # Generate random bytes</code></pre>
POST /kms/generate-data-key-without-plaintext # Generate wrapped DEK only
POST /kms/generate-random # Generate random bytes
POST /kms/client/generate-key # Client-side key helper
POST /kms/client/encrypt # Client-side encrypt helper
POST /kms/client/decrypt # Client-side decrypt helper
POST /kms/materials/&lt;id&gt; # Fetch wrapped key materials</code></pre>
</div>
</article>
</div>

View File

@@ -31,7 +31,7 @@
{% if iam_locked %}
<div class="alert alert-warning" role="alert">
<div class="fw-semibold mb-1">Administrator permissions required</div>
<p class="mb-0">You need the <code>iam:list_users</code> action to edit users or policies. {{ locked_reason or "Sign in with an admin identity to continue." }}</p>
<p class="mb-0">You need the <code>iam:list_users</code> action to edit users or policies. {% if locked_reason %}{{ locked_reason }}{% else %}Sign in with an admin identity to continue.{% endif %}</p>
</div>
{% endif %}
@@ -133,7 +133,7 @@
{% endif %}
<div class="row g-3">
{% for user in users %}
<div class="col-md-6 col-xl-4 iam-user-item" data-user-id="{{ user.user_id }}" data-access-key="{{ user.access_key }}" data-display-name="{{ user.display_name|lower }}" data-access-key-filter="{{ user.access_key|lower }}">
<div class="col-md-6 col-xl-4 iam-user-item" data-user-id="{{ user.user_id }}" data-access-key="{{ user.access_key }}" data-display-name="{{ user.display_name|lower }}" data-access-key-filter="{{ user.access_key|lower }}" data-update-url="{{ url_for(endpoint="ui.update_iam_user", user_id=user.user_id) }}">
<div class="card h-100 iam-user-card{% if user.is_admin %} iam-admin-card{% else %}{% endif %}">
<div class="card-body">
<div class="d-flex align-items-start justify-content-between mb-3">

View File

@@ -18,7 +18,7 @@
</svg>
Set Up Replication
</h1>
<p class="text-muted mb-0 mt-1">Configure bucket replication to <strong>{{ peer.display_name or peer.site_id }}</strong></p>
<p class="text-muted mb-0 mt-1">Configure bucket replication to <strong>{% if peer.display_name %}{{ peer.display_name }}{% else %}{{ peer.site_id }}{% endif %}</strong></p>
</div>
</div>
@@ -100,7 +100,7 @@
<hr class="my-2">
<p class="mb-2 fw-semibold">After completing this wizard, you must also:</p>
<ol class="mb-2 ps-3">
<li>Go to <strong>{{ peer.display_name or peer.site_id }}</strong>'s admin UI</li>
<li>Go to <strong>{% if peer.display_name %}{{ peer.display_name }}{% else %}{{ peer.site_id }}{% endif %}</strong>'s admin UI</li>
<li>Register <strong>this site</strong> as a peer (with a connection)</li>
<li>Create matching bidirectional replication rules pointing back to this site</li>
<li>Ensure <code>SITE_SYNC_ENABLED=true</code> is set on both sites</li>
@@ -147,7 +147,7 @@
<td>
<input type="text" class="form-control form-control-sm"
name="target_{{ bucket.name }}"
value="{{ bucket.existing_target or bucket.name }}"
value="{% if bucket.existing_target %}{{ bucket.existing_target }}{% else %}{{ bucket.name }}{% endif %}"
placeholder="{{ bucket.name }}"
{% if bucket.has_rule %}disabled{% endif %}>
</td>

View File

@@ -142,6 +142,11 @@
</select>
<div class="form-text">Link to a remote connection for health checks</div>
</div>
<div class="mb-3">
<label for="peer_inbound_access_key" class="form-label fw-medium">Peer Inbound Access Key</label>
<input type="text" class="form-control" id="peer_inbound_access_key" name="peer_inbound_access_key" placeholder="AKIA... (optional)" autocomplete="off" spellcheck="false">
<div class="form-text">Access key the peer presents when calling this site (e.g. /admin/cluster/overview). Leave blank to require admin credentials.</div>
</div>
<div class="d-grid">
<button type="submit" class="btn btn-primary">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
@@ -225,7 +230,7 @@
</svg>
</div>
<div>
<span class="fw-medium">{{ peer.display_name or peer.site_id }}</span>
<span class="fw-medium">{% if peer.display_name %}{{ peer.display_name }}{% else %}{{ peer.site_id }}{% endif %}</span>
{% if peer.display_name and peer.display_name != peer.site_id %}
<br><small class="text-muted">{{ peer.site_id }}</small>
{% endif %}
@@ -257,7 +262,16 @@
</svg>
</span>
{% endif %}
{% if item.errors and item.errors > 0 %}
<span class="badge bg-danger bg-opacity-10 text-danger" title="Sync errors across bidirectional buckets">{{ item.errors }} err</span>
{% endif %}
</div>
{% if item.last_sync_at %}
<div class="text-muted small mt-1" data-last-sync-at="{{ item.last_sync_at }}">
last sync: <span class="last-sync-rel">just now</span>
{% if item.objects_pulled %} · {{ item.objects_pulled }} pulled{% endif %}
</div>
{% endif %}
<div class="sync-stats-detail d-none mt-2 small" id="stats-{{ peer.site_id }}">
<span class="spinner-border spinner-border-sm text-muted" style="width: 12px; height: 12px;"></span>
</div>
@@ -278,6 +292,7 @@
data-priority="{{ peer.priority }}"
data-display-name="{{ peer.display_name }}"
data-connection-id="{{ peer.connection_id | default(value="") }}"
data-peer-inbound-access-key="{{ peer.peer_inbound_access_key | default(value="") }}"
title="Edit peer">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
@@ -301,7 +316,7 @@
<li>
<button type="button" class="dropdown-item btn-check-bidir {% if not item.has_connection %}disabled{% endif %}"
data-site-id="{{ peer.site_id }}"
data-display-name="{{ peer.display_name or peer.site_id }}">
data-display-name="{% if peer.display_name %}{{ peer.display_name }}{% else %}{{ peer.site_id }}{% endif %}">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2 text-info" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
</svg>
@@ -335,7 +350,7 @@
data-bs-toggle="modal"
data-bs-target="#deletePeerModal"
data-site-id="{{ peer.site_id }}"
data-display-name="{{ peer.display_name or peer.site_id }}">
data-display-name="{% if peer.display_name %}{{ peer.display_name }}{% else %}{{ peer.site_id }}{% endif %}">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
@@ -385,7 +400,7 @@
<div class="modal-body">
<div class="mb-3">
<label class="form-label fw-medium">Site ID</label>
<input type="text" class="form-control" id="edit_site_id" readonly>
<input type="text" class="form-control" id="edit_site_id" name="site_id" readonly>
</div>
<div class="mb-3">
<label for="edit_endpoint" class="form-label fw-medium">Endpoint URL</label>
@@ -414,6 +429,11 @@
{% endfor %}
</select>
</div>
<div class="mb-3">
<label for="edit_peer_inbound_access_key" class="form-label fw-medium">Peer Inbound Access Key</label>
<input type="text" class="form-control" id="edit_peer_inbound_access_key" name="peer_inbound_access_key" placeholder="AKIA... (optional)" autocomplete="off" spellcheck="false">
<div class="form-text">Access key the peer presents when calling this site (e.g. /admin/cluster/overview). Leave blank to require admin credentials.</div>
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
@@ -516,6 +536,7 @@
document.getElementById('edit_priority').value = button.getAttribute('data-priority');
document.getElementById('edit_display_name').value = button.getAttribute('data-display-name');
document.getElementById('edit_connection_id').value = button.getAttribute('data-connection-id');
document.getElementById('edit_peer_inbound_access_key').value = button.getAttribute('data-peer-inbound-access-key') || '';
document.getElementById('editPeerForm').action = '/ui/sites/peers/' + encodeURIComponent(siteId) + '/update';
});
}
@@ -859,16 +880,63 @@
});
});
}
document.querySelectorAll('.peer-actions-dropdown').forEach(function(dd) {
dd.addEventListener('shown.bs.dropdown', function() {
var toggle = dd.querySelector('[data-bs-toggle="dropdown"]');
document.querySelectorAll('.peer-actions-dropdown').forEach(function(dd, idx) {
var menu = dd.querySelector('.dropdown-menu');
if (!toggle || !menu) return;
if (!menu) return;
var pairId = 'peer-dd-' + idx;
dd.dataset.peerDdPair = pairId;
menu.dataset.peerDdPair = pairId;
function reposition() {
var toggle = dd.querySelector('[data-bs-toggle="dropdown"]');
if (!toggle) return;
var rect = toggle.getBoundingClientRect();
menu.style.top = rect.bottom + 'px';
menu.style.left = (rect.right - menu.offsetWidth) + 'px';
var menuWidth = menu.offsetWidth;
var menuHeight = menu.offsetHeight;
var pad = 8;
var left = rect.right - menuWidth;
if (left + menuWidth > window.innerWidth - pad) left = window.innerWidth - pad - menuWidth;
if (left < pad) left = pad;
var top = rect.bottom;
if (top + menuHeight > window.innerHeight - pad) {
top = Math.max(pad, rect.top - menuHeight);
}
menu.style.position = 'fixed';
menu.style.top = top + 'px';
menu.style.left = left + 'px';
menu.style.right = 'auto';
menu.style.bottom = 'auto';
menu.style.transform = 'none';
}
dd.addEventListener('show.bs.dropdown', function() {
if (menu.parentNode !== document.body) document.body.appendChild(menu);
});
dd.addEventListener('shown.bs.dropdown', reposition);
dd.addEventListener('hidden.bs.dropdown', function() {
menu.style.cssText = '';
if (menu.parentNode !== dd) dd.appendChild(menu);
});
window.addEventListener('resize', function() { if (menu.classList.contains('show')) reposition(); });
window.addEventListener('scroll', function() { if (menu.classList.contains('show')) reposition(); }, true);
});
})();
(function () {
function fmtRel(ts) {
var diff = Math.max(0, Math.floor(Date.now() / 1000 - ts));
if (diff < 60) return diff + "s ago";
if (diff < 3600) return Math.floor(diff / 60) + "m ago";
if (diff < 86400) return Math.floor(diff / 3600) + "h ago";
return Math.floor(diff / 86400) + "d ago";
}
function refresh() {
document.querySelectorAll("[data-last-sync-at]").forEach(function (el) {
var ts = parseFloat(el.getAttribute("data-last-sync-at"));
var span = el.querySelector(".last-sync-rel");
if (span && !isNaN(ts)) span.textContent = fmtRel(ts);
});
}
refresh();
setInterval(refresh, 30000);
})();
</script>

File diff suppressed because it is too large Load Diff

View File

@@ -219,6 +219,59 @@ fn render_sites() {
render_or_panic("sites.html", &ctx);
}
#[test]
fn render_cluster_empty() {
let mut ctx = base_ctx();
ctx.insert("cluster_sites", &Vec::<Value>::new());
ctx.insert("cluster_total_buckets", &0u64);
ctx.insert("cluster_total_objects", &0u64);
ctx.insert("cluster_total_size_bytes", &0u64);
ctx.insert("cluster_online_count", &0usize);
ctx.insert("cluster_total_count", &0usize);
render_or_panic("cluster.html", &ctx);
}
#[test]
fn render_cluster_with_sites() {
let mut ctx = base_ctx();
let sites = json!([
{
"site_id": "local-1",
"display_name": "Local",
"endpoint": "http://127.0.0.1:8000",
"region": "us-east-1",
"online": true,
"stale": false,
"is_local": true,
"buckets": 3,
"objects": 42,
"size_bytes": 1048576,
"capacity": {"total_bytes": 100000000, "available_bytes": 50000000},
"system": {"cpu_percent": 12.5, "memory_percent": 33.0, "disk_percent": 50.0, "storage_bytes": 1048576},
"sync": {"errors": 0, "last_sync_at": 1700000000.0},
"error": null
},
{
"site_id": "peer-1",
"display_name": "Peer",
"endpoint": "http://peer.example.com",
"online": false,
"stale": true,
"is_local": false,
"registered_region": "us-west-2",
"registered_priority": 100,
"error": "request failed: timeout"
}
]);
ctx.insert("cluster_sites", &sites);
ctx.insert("cluster_total_buckets", &3u64);
ctx.insert("cluster_total_objects", &42u64);
ctx.insert("cluster_total_size_bytes", &1048576u64);
ctx.insert("cluster_online_count", &1usize);
ctx.insert("cluster_total_count", &2usize);
render_or_panic("cluster.html", &ctx);
}
#[test]
fn render_website_domains() {
let mut ctx = base_ctx();
@@ -288,6 +341,7 @@ fn render_bucket_detail() {
ctx.insert("bytes_pct", &0);
ctx.insert("has_quota", &false);
ctx.insert("versioning_enabled", &false);
ctx.insert("versioning_suspended", &false);
ctx.insert("versioning_status", &"Disabled");
ctx.insert("encryption_config", &json!({"Rules": []}));
ctx.insert("enc_rules", &Vec::<Value>::new());
@@ -341,3 +395,85 @@ fn render_bucket_detail() {
ctx.insert("objects_stream_url", &"");
render_or_panic("bucket_detail.html", &ctx);
}
#[test]
fn render_bucket_detail_without_error_document() {
let mut ctx = base_ctx();
ctx.insert("bucket_name", &"site-bucket");
ctx.insert(
"bucket",
&json!({
"name": "site-bucket",
"creation_date": "2025-01-01T00:00:00Z",
}),
);
ctx.insert("objects", &Vec::<Value>::new());
ctx.insert("prefixes", &Vec::<Value>::new());
ctx.insert("total_objects", &0u64);
ctx.insert("total_bytes", &0u64);
ctx.insert("current_objects", &0u64);
ctx.insert("current_bytes", &0u64);
ctx.insert("version_count", &0u64);
ctx.insert("version_bytes", &0u64);
ctx.insert("max_objects", &Value::Null);
ctx.insert("max_bytes", &Value::Null);
ctx.insert("has_max_objects", &false);
ctx.insert("has_max_bytes", &false);
ctx.insert("obj_pct", &0);
ctx.insert("bytes_pct", &0);
ctx.insert("has_quota", &false);
ctx.insert("versioning_enabled", &false);
ctx.insert("versioning_suspended", &false);
ctx.insert("versioning_status", &"Disabled");
ctx.insert("encryption_config", &json!({"Rules": []}));
ctx.insert("enc_rules", &Vec::<Value>::new());
ctx.insert("enc_algorithm", &"");
ctx.insert("enc_kms_key", &"");
ctx.insert("replication_rules", &Vec::<Value>::new());
ctx.insert("replication_rule", &Value::Null);
ctx.insert("website_config", &json!({"index_document": "index.html"}));
ctx.insert("bucket_policy", &"");
ctx.insert("bucket_policy_text", &"");
ctx.insert("connections", &Vec::<Value>::new());
ctx.insert("current_prefix", &"");
ctx.insert("parent_prefix", &"");
ctx.insert("has_more", &false);
ctx.insert("next_token", &"");
ctx.insert("active_tab", &"objects");
ctx.insert("multipart_uploads", &Vec::<Value>::new());
ctx.insert("target_conn", &Value::Null);
ctx.insert("target_conn_name", &"");
ctx.insert("preset_choice", &"");
ctx.insert("default_policy", &"");
ctx.insert("can_manage_cors", &true);
ctx.insert("can_manage_lifecycle", &true);
ctx.insert("can_manage_quota", &true);
ctx.insert("can_manage_versioning", &true);
ctx.insert("can_manage_website", &true);
ctx.insert("can_edit_policy", &true);
ctx.insert("is_replication_admin", &true);
ctx.insert("lifecycle_enabled", &false);
ctx.insert("site_sync_enabled", &false);
ctx.insert("website_hosting_enabled", &true);
ctx.insert("website_domains", &Vec::<Value>::new());
ctx.insert("kms_keys", &Vec::<Value>::new());
ctx.insert(
"bucket_stats",
&json!({
"bytes": 0, "objects": 0, "total_bytes": 0, "total_objects": 0,
"version_bytes": 0, "version_count": 0
}),
);
ctx.insert(
"bucket_quota",
&json!({ "max_bytes": null, "max_objects": null }),
);
ctx.insert("buckets_for_copy_url", &"");
ctx.insert("acl_url", &"");
ctx.insert("cors_url", &"");
ctx.insert("folders_url", &"");
ctx.insert("lifecycle_url", &"");
ctx.insert("objects_api_url", &"");
ctx.insert("objects_stream_url", &"");
render_or_panic("bucket_detail.html", &ctx);
}

View File

@@ -9,6 +9,7 @@ myfsio-crypto = { path = "../myfsio-crypto" }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
tokio-util = { workspace = true }
dashmap = { workspace = true }
parking_lot = { workspace = true }
uuid = { workspace = true }

View File

@@ -11,10 +11,30 @@ pub enum StorageError {
BucketNotEmpty(String),
#[error("Object not found: {bucket}/{key}")]
ObjectNotFound { bucket: String, key: String },
#[error("Object version not found: {bucket}/{key}?versionId={version_id}")]
VersionNotFound {
bucket: String,
key: String,
version_id: String,
},
#[error("Object is a delete marker: {bucket}/{key}")]
DeleteMarker {
bucket: String,
key: String,
version_id: String,
},
#[error("Object corrupted: {bucket}/{key} ({detail})")]
ObjectCorrupted {
bucket: String,
key: String,
detail: String,
},
#[error("Invalid bucket name: {0}")]
InvalidBucketName(String),
#[error("Invalid object key: {0}")]
InvalidObjectKey(String),
#[error("Method not allowed: {0}")]
MethodNotAllowed(String),
#[error("Upload not found: {0}")]
UploadNotFound(String),
#[error("Quota exceeded: {0}")]
@@ -36,7 +56,7 @@ impl From<StorageError> for S3Error {
S3Error::from_code(S3ErrorCode::NoSuchBucket).with_resource(format!("/{}", name))
}
StorageError::BucketAlreadyExists(name) => {
S3Error::from_code(S3ErrorCode::BucketAlreadyExists)
S3Error::from_code(S3ErrorCode::BucketAlreadyOwnedByYou)
.with_resource(format!("/{}", name))
}
StorageError::BucketNotEmpty(name) => {
@@ -46,10 +66,32 @@ impl From<StorageError> for S3Error {
S3Error::from_code(S3ErrorCode::NoSuchKey)
.with_resource(format!("/{}/{}", bucket, key))
}
StorageError::VersionNotFound {
bucket,
key,
version_id,
} => S3Error::from_code(S3ErrorCode::NoSuchVersion)
.with_resource(format!("/{}/{}?versionId={}", bucket, key, version_id)),
StorageError::DeleteMarker {
bucket,
key,
version_id,
} => S3Error::from_code(S3ErrorCode::MethodNotAllowed)
.with_resource(format!("/{}/{}?versionId={}", bucket, key, version_id)),
StorageError::ObjectCorrupted {
bucket,
key,
detail,
} => S3Error::new(
S3ErrorCode::ObjectCorrupted,
format!("Object corrupted: {}", detail),
)
.with_resource(format!("/{}/{}", bucket, key)),
StorageError::InvalidBucketName(msg) => {
S3Error::new(S3ErrorCode::InvalidBucketName, msg)
}
StorageError::InvalidObjectKey(msg) => S3Error::new(S3ErrorCode::InvalidKey, msg),
StorageError::MethodNotAllowed(msg) => S3Error::new(S3ErrorCode::MethodNotAllowed, msg),
StorageError::UploadNotFound(id) => S3Error::new(
S3ErrorCode::NoSuchUpload,
format!("Upload {} not found", id),

File diff suppressed because it is too large Load Diff

View File

@@ -30,11 +30,97 @@ pub trait StorageEngine: Send + Sync {
key: &str,
) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
async fn get_object_range(
&self,
bucket: &str,
key: &str,
start: u64,
len: Option<u64>,
) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
async fn get_object_snapshot(
&self,
bucket: &str,
key: &str,
) -> StorageResult<(ObjectMeta, tokio::fs::File)>;
async fn get_object_version_snapshot(
&self,
bucket: &str,
key: &str,
version_id: &str,
) -> StorageResult<(ObjectMeta, tokio::fs::File)>;
async fn get_object_path(&self, bucket: &str, key: &str) -> StorageResult<PathBuf>;
async fn snapshot_object_to_link(
&self,
bucket: &str,
key: &str,
link_path: &std::path::Path,
) -> StorageResult<ObjectMeta>;
async fn snapshot_object_version_to_link(
&self,
bucket: &str,
key: &str,
version_id: &str,
link_path: &std::path::Path,
) -> StorageResult<ObjectMeta>;
async fn head_object(&self, bucket: &str, key: &str) -> StorageResult<ObjectMeta>;
async fn delete_object(&self, bucket: &str, key: &str) -> StorageResult<()>;
async fn get_object_version(
&self,
bucket: &str,
key: &str,
version_id: &str,
) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
async fn get_object_version_range(
&self,
bucket: &str,
key: &str,
version_id: &str,
start: u64,
len: Option<u64>,
) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
async fn get_object_version_path(
&self,
bucket: &str,
key: &str,
version_id: &str,
) -> StorageResult<PathBuf>;
async fn head_object_version(
&self,
bucket: &str,
key: &str,
version_id: &str,
) -> StorageResult<ObjectMeta>;
async fn get_object_version_metadata(
&self,
bucket: &str,
key: &str,
version_id: &str,
) -> StorageResult<HashMap<String, String>>;
async fn get_archived_null_version_metadata(
&self,
bucket: &str,
key: &str,
) -> StorageResult<Option<HashMap<String, String>>>;
async fn delete_object(&self, bucket: &str, key: &str) -> StorageResult<DeleteOutcome>;
async fn delete_object_version(
&self,
bucket: &str,
key: &str,
version_id: &str,
) -> StorageResult<DeleteOutcome>;
async fn copy_object(
&self,
@@ -91,6 +177,7 @@ pub trait StorageEngine: Send + Sync {
part_number: u32,
src_bucket: &str,
src_key: &str,
src_version_id: Option<&str>,
range: Option<(u64, u64)>,
) -> StorageResult<(String, chrono::DateTime<chrono::Utc>)>;
@@ -113,6 +200,12 @@ pub trait StorageEngine: Send + Sync {
async fn is_versioning_enabled(&self, bucket: &str) -> StorageResult<bool>;
async fn set_versioning(&self, bucket: &str, enabled: bool) -> StorageResult<()>;
async fn get_versioning_status(&self, bucket: &str) -> StorageResult<VersioningStatus>;
async fn set_versioning_status(
&self,
bucket: &str,
status: VersioningStatus,
) -> StorageResult<()>;
async fn list_object_versions(
&self,
@@ -120,6 +213,12 @@ pub trait StorageEngine: Send + Sync {
key: &str,
) -> StorageResult<Vec<VersionInfo>>;
async fn list_bucket_object_versions(
&self,
bucket: &str,
prefix: Option<&str>,
) -> StorageResult<Vec<VersionInfo>>;
async fn get_object_tags(&self, bucket: &str, key: &str) -> StorageResult<Vec<Tag>>;
async fn set_object_tags(&self, bucket: &str, key: &str, tags: &[Tag]) -> StorageResult<()>;

View File

@@ -60,6 +60,13 @@ pub fn validate_object_key(
return Some("Object key contains invalid segments".to_string());
}
if part.len() > 255 {
return Some(
"Object key contains a path segment longer than 255 bytes (filesystem backend limit)"
.to_string(),
);
}
if part.chars().any(|c| (c as u32) < 32) {
return Some("Object key contains control characters".to_string());
}
@@ -98,6 +105,15 @@ pub fn validate_object_key(
}
}
for part in &non_empty_parts {
if *part == ".__myfsio_dirobj__"
|| *part == ".__myfsio_empty__"
|| part.starts_with("_index.json")
{
return Some("Object key segment uses a reserved internal name".to_string());
}
}
None
}
@@ -132,6 +148,13 @@ pub fn validate_bucket_name(bucket_name: &str) -> Option<String> {
return Some("Bucket name must not be formatted as an IP address".to_string());
}
if bucket_name.starts_with("xn--") {
return Some("Bucket name must not start with the reserved prefix 'xn--'".to_string());
}
if bucket_name.ends_with("-s3alias") || bucket_name.ends_with("--ol-s3") {
return Some("Bucket name must not end with a reserved suffix".to_string());
}
None
}
@@ -174,10 +197,18 @@ mod tests {
#[test]
fn test_object_key_max_length() {
let long_key = "a".repeat(1025);
assert!(validate_object_key(&long_key, 1024, false, None).is_some());
let ok_key = "a".repeat(1024);
let too_long_total = "a/".repeat(513) + "a";
assert!(validate_object_key(&too_long_total, 1024, false, None).is_some());
let too_long_segment = "a".repeat(256);
assert!(validate_object_key(&too_long_segment, 1024, false, None).is_some());
let ok_key = vec!["a".repeat(255); 4].join("/");
assert_eq!(ok_key.len(), 255 * 4 + 3);
assert!(validate_object_key(&ok_key, 1024, false, None).is_none());
let ok_max_segment = "a".repeat(255);
assert!(validate_object_key(&ok_max_segment, 1024, false, None).is_none());
}
#[test]

Some files were not shown because too many files have changed in this diff Show More