Compare commits
13 Commits
476b9bd2e4
...
dev-pyrust
| Author | SHA1 | Date | |
|---|---|---|---|
| f2df64479c | |||
| bd405cc2fe | |||
| 7ef3820f6e | |||
| e1fb225034 | |||
| 2767e7e79d | |||
| 217af6d1c6 | |||
| 51d54b42ac | |||
| 9ec5797919 | |||
| 8935188c8f | |||
| c77c592832 | |||
| 501d563df2 | |||
| ddcdb4026c | |||
| 3e7c0af019 |
@@ -3,7 +3,7 @@
|
|||||||
logs
|
logs
|
||||||
data
|
data
|
||||||
tmp
|
tmp
|
||||||
myfsio-engine/target
|
target
|
||||||
myfsio-engine/tests
|
crates/*/tests
|
||||||
Dockerfile
|
Dockerfile
|
||||||
.dockerignore
|
.dockerignore
|
||||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -26,12 +26,8 @@ dist/
|
|||||||
*.egg-info/
|
*.egg-info/
|
||||||
.eggs/
|
.eggs/
|
||||||
|
|
||||||
# Rust / maturin build artifacts
|
|
||||||
python/myfsio_core/target/
|
|
||||||
python/myfsio_core/Cargo.lock
|
|
||||||
|
|
||||||
# Rust engine build artifacts
|
# Rust engine build artifacts
|
||||||
rust/myfsio-engine/target/
|
target/
|
||||||
|
|
||||||
# Local runtime artifacts
|
# Local runtime artifacts
|
||||||
logs/
|
logs/
|
||||||
|
|||||||
43
rust/myfsio-engine/Cargo.lock → Cargo.lock
generated
43
rust/myfsio-engine/Cargo.lock → Cargo.lock
generated
@@ -2542,6 +2542,15 @@ version = "0.1.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
|
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "matchers"
|
||||||
|
version = "0.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9"
|
||||||
|
dependencies = [
|
||||||
|
"regex-automata",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "matchit"
|
name = "matchit"
|
||||||
version = "0.8.4"
|
version = "0.8.4"
|
||||||
@@ -2630,7 +2639,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "myfsio-auth"
|
name = "myfsio-auth"
|
||||||
version = "0.5.0"
|
version = "0.4.4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes",
|
"aes",
|
||||||
"base64",
|
"base64",
|
||||||
@@ -2655,7 +2664,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "myfsio-common"
|
name = "myfsio-common"
|
||||||
version = "0.5.0"
|
version = "0.4.4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -2666,7 +2675,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "myfsio-crypto"
|
name = "myfsio-crypto"
|
||||||
version = "0.5.0"
|
version = "0.4.4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes-gcm",
|
"aes-gcm",
|
||||||
"base64",
|
"base64",
|
||||||
@@ -2687,7 +2696,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "myfsio-server"
|
name = "myfsio-server"
|
||||||
version = "0.5.0"
|
version = "0.4.4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes-gcm",
|
"aes-gcm",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2705,8 +2714,11 @@ dependencies = [
|
|||||||
"dotenvy",
|
"dotenvy",
|
||||||
"duckdb",
|
"duckdb",
|
||||||
"futures",
|
"futures",
|
||||||
|
"hex",
|
||||||
|
"http-body 1.0.1",
|
||||||
"http-body-util",
|
"http-body-util",
|
||||||
"hyper 1.9.0",
|
"hyper 1.9.0",
|
||||||
|
"md-5 0.10.6",
|
||||||
"mime_guess",
|
"mime_guess",
|
||||||
"multer",
|
"multer",
|
||||||
"myfsio-auth",
|
"myfsio-auth",
|
||||||
@@ -2723,11 +2735,14 @@ dependencies = [
|
|||||||
"roxmltree",
|
"roxmltree",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
"serde_urlencoded",
|
||||||
|
"sha2 0.10.9",
|
||||||
"subtle",
|
"subtle",
|
||||||
"sysinfo",
|
"sysinfo",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tera",
|
"tera",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
"tokio-stream",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
"tower",
|
"tower",
|
||||||
"tower-http",
|
"tower-http",
|
||||||
@@ -2738,7 +2753,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "myfsio-storage"
|
name = "myfsio-storage"
|
||||||
version = "0.5.0"
|
version = "0.4.4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"dashmap",
|
"dashmap",
|
||||||
@@ -2761,10 +2776,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "myfsio-xml"
|
name = "myfsio-xml"
|
||||||
version = "0.5.0"
|
version = "0.4.4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"myfsio-common",
|
"myfsio-common",
|
||||||
|
"percent-encoding",
|
||||||
"quick-xml",
|
"quick-xml",
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
@@ -4181,6 +4197,17 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tokio-stream"
|
||||||
|
version = "0.1.18"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70"
|
||||||
|
dependencies = [
|
||||||
|
"futures-core",
|
||||||
|
"pin-project-lite",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio-util"
|
name = "tokio-util"
|
||||||
version = "0.7.18"
|
version = "0.7.18"
|
||||||
@@ -4331,10 +4358,14 @@ version = "0.3.23"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319"
|
checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"matchers",
|
||||||
"nu-ansi-term",
|
"nu-ansi-term",
|
||||||
|
"once_cell",
|
||||||
|
"regex-automata",
|
||||||
"sharded-slab",
|
"sharded-slab",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
"thread_local",
|
"thread_local",
|
||||||
|
"tracing",
|
||||||
"tracing-core",
|
"tracing-core",
|
||||||
"tracing-log",
|
"tracing-log",
|
||||||
]
|
]
|
||||||
@@ -10,14 +10,14 @@ members = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.4.3"
|
version = "0.4.4"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1", features = ["full"] }
|
||||||
axum = { version = "0.8" }
|
axum = { version = "0.8" }
|
||||||
tower = { version = "0.5" }
|
tower = { version = "0.5" }
|
||||||
tower-http = { version = "0.6", features = ["cors", "trace", "fs", "compression-gzip"] }
|
tower-http = { version = "0.6", features = ["cors", "trace", "fs", "compression-gzip", "timeout"] }
|
||||||
hyper = { version = "1" }
|
hyper = { version = "1" }
|
||||||
bytes = "1"
|
bytes = "1"
|
||||||
serde = { version = "1", features = ["derive"] }
|
serde = { version = "1", features = ["derive"] }
|
||||||
@@ -38,11 +38,12 @@ percent-encoding = "2"
|
|||||||
regex = "1"
|
regex = "1"
|
||||||
unicode-normalization = "0.1"
|
unicode-normalization = "0.1"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-subscriber = "0.3"
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
thiserror = "2"
|
thiserror = "2"
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
base64 = "0.22"
|
base64 = "0.22"
|
||||||
tokio-util = { version = "0.7", features = ["io"] }
|
tokio-util = { version = "0.7", features = ["io"] }
|
||||||
|
tokio-stream = "0.1"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
dashmap = "6"
|
dashmap = "6"
|
||||||
crc32fast = "1"
|
crc32fast = "1"
|
||||||
@@ -3,13 +3,13 @@ FROM rust:1-slim-bookworm AS builder
|
|||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y --no-install-recommends pkg-config libssl-dev \
|
&& apt-get install -y --no-install-recommends build-essential pkg-config libssl-dev \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
COPY myfsio-engine ./myfsio-engine
|
COPY Cargo.toml Cargo.lock ./
|
||||||
|
COPY crates ./crates
|
||||||
|
|
||||||
RUN cd myfsio-engine \
|
RUN cargo build --release --bin myfsio-server \
|
||||||
&& cargo build --release --bin myfsio-server \
|
|
||||||
&& strip target/release/myfsio-server
|
&& strip target/release/myfsio-server
|
||||||
|
|
||||||
|
|
||||||
@@ -24,8 +24,9 @@ RUN apt-get update \
|
|||||||
&& useradd -m -u 1000 myfsio \
|
&& useradd -m -u 1000 myfsio \
|
||||||
&& chown -R myfsio:myfsio /app
|
&& chown -R myfsio:myfsio /app
|
||||||
|
|
||||||
COPY --from=builder /build/myfsio-engine/target/release/myfsio-server /usr/local/bin/myfsio-server
|
COPY --from=builder /build/target/release/myfsio-server /usr/local/bin/myfsio-server
|
||||||
COPY --from=builder /build/myfsio-engine/templates /app/templates
|
COPY --from=builder /build/crates/myfsio-server/templates /app/templates
|
||||||
|
COPY --from=builder /build/crates/myfsio-server/static /app/static
|
||||||
COPY docker-entrypoint.sh /app/docker-entrypoint.sh
|
COPY docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||||
|
|
||||||
RUN chmod +x /app/docker-entrypoint.sh \
|
RUN chmod +x /app/docker-entrypoint.sh \
|
||||||
@@ -34,9 +35,13 @@ RUN chmod +x /app/docker-entrypoint.sh \
|
|||||||
USER myfsio
|
USER myfsio
|
||||||
|
|
||||||
EXPOSE 5000
|
EXPOSE 5000
|
||||||
|
EXPOSE 5100
|
||||||
ENV HOST=0.0.0.0 \
|
ENV HOST=0.0.0.0 \
|
||||||
PORT=5000 \
|
PORT=5000 \
|
||||||
|
UI_PORT=5100 \
|
||||||
STORAGE_ROOT=/app/data \
|
STORAGE_ROOT=/app/data \
|
||||||
|
TEMPLATES_DIR=/app/templates \
|
||||||
|
STATIC_DIR=/app/static \
|
||||||
RUST_LOG=info
|
RUST_LOG=info
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||||
21
README.md
21
README.md
@@ -1,8 +1,6 @@
|
|||||||
# MyFSIO
|
# MyFSIO
|
||||||
|
|
||||||
MyFSIO is an S3-compatible object storage server with a Rust runtime and a filesystem-backed storage engine. The active server lives under `rust/myfsio-engine` and serves both the S3 API and the built-in web UI from a single process.
|
MyFSIO is an S3-compatible object storage server with a Rust runtime and a filesystem-backed storage engine. The repository root is the Cargo workspace; the server serves both the S3 API and the built-in web UI from a single process.
|
||||||
|
|
||||||
The repository still contains a `python/` tree, but you do not need Python to run the current server.
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
@@ -29,7 +27,6 @@ If you want API-only mode, set `UI_ENABLED=false`. There is no separate "UI-only
|
|||||||
From the repository root:
|
From the repository root:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd rust/myfsio-engine
|
|
||||||
cargo run -p myfsio-server --
|
cargo run -p myfsio-server --
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -60,14 +57,13 @@ UI_ENABLED=false cargo run -p myfsio-server --
|
|||||||
## Building a Binary
|
## Building a Binary
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd rust/myfsio-engine
|
|
||||||
cargo build --release -p myfsio-server
|
cargo build --release -p myfsio-server
|
||||||
```
|
```
|
||||||
|
|
||||||
Binary locations:
|
Binary locations:
|
||||||
|
|
||||||
- Linux/macOS: `rust/myfsio-engine/target/release/myfsio-server`
|
- Linux/macOS: `target/release/myfsio-server`
|
||||||
- Windows: `rust/myfsio-engine/target/release/myfsio-server.exe`
|
- Windows: `target/release/myfsio-server.exe`
|
||||||
|
|
||||||
Run the built binary directly:
|
Run the built binary directly:
|
||||||
|
|
||||||
@@ -166,10 +162,10 @@ data/
|
|||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
Build the Rust image from the `rust/` directory:
|
Build the Rust image from the repository root:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker build -t myfsio ./rust
|
docker build -t myfsio .
|
||||||
docker run --rm -p 5000:5000 -p 5100:5100 -v "${PWD}/data:/app/data" myfsio
|
docker run --rm -p 5000:5000 -p 5100:5100 -v "${PWD}/data:/app/data" myfsio
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -180,11 +176,9 @@ If the instance sits behind a reverse proxy, set `API_BASE_URL` to the public S3
|
|||||||
The repository includes `scripts/install.sh` for systemd-style Linux installs. Build the Rust binary first, then pass it to the installer:
|
The repository includes `scripts/install.sh` for systemd-style Linux installs. Build the Rust binary first, then pass it to the installer:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd rust/myfsio-engine
|
|
||||||
cargo build --release -p myfsio-server
|
cargo build --release -p myfsio-server
|
||||||
|
|
||||||
cd ../..
|
sudo ./scripts/install.sh --binary ./target/release/myfsio-server
|
||||||
sudo ./scripts/install.sh --binary ./rust/myfsio-engine/target/release/myfsio-server
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The installer copies the binary into `/opt/myfsio/myfsio`, writes `/opt/myfsio/myfsio.env`, and can register a `myfsio.service` unit.
|
The installer copies the binary into `/opt/myfsio/myfsio`, writes `/opt/myfsio/myfsio.env`, and can register a `myfsio.service` unit.
|
||||||
@@ -194,7 +188,6 @@ The installer copies the binary into `/opt/myfsio/myfsio`, writes `/opt/myfsio/m
|
|||||||
Run the Rust test suite from the workspace:
|
Run the Rust test suite from the workspace:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd rust/myfsio-engine
|
|
||||||
cargo test
|
cargo test
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -209,4 +202,4 @@ cargo test
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The `version` field comes from the Rust crate version in `rust/myfsio-engine/crates/myfsio-server/Cargo.toml`.
|
The `version` field comes from the Rust crate version in `crates/myfsio-server/Cargo.toml`.
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ pub const STATS_FILE: &str = "stats.json";
|
|||||||
pub const ETAG_INDEX_FILE: &str = "etag_index.json";
|
pub const ETAG_INDEX_FILE: &str = "etag_index.json";
|
||||||
pub const INDEX_FILE: &str = "_index.json";
|
pub const INDEX_FILE: &str = "_index.json";
|
||||||
pub const MANIFEST_FILE: &str = "manifest.json";
|
pub const MANIFEST_FILE: &str = "manifest.json";
|
||||||
|
pub const DIR_MARKER_FILE: &str = ".__myfsio_dirobj__";
|
||||||
|
|
||||||
pub const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
pub const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||||
|
|
||||||
@@ -3,27 +3,37 @@ use std::fmt;
|
|||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
pub enum S3ErrorCode {
|
pub enum S3ErrorCode {
|
||||||
AccessDenied,
|
AccessDenied,
|
||||||
|
BadDigest,
|
||||||
BucketAlreadyExists,
|
BucketAlreadyExists,
|
||||||
|
BucketAlreadyOwnedByYou,
|
||||||
BucketNotEmpty,
|
BucketNotEmpty,
|
||||||
EntityTooLarge,
|
EntityTooLarge,
|
||||||
|
EntityTooSmall,
|
||||||
InternalError,
|
InternalError,
|
||||||
InvalidAccessKeyId,
|
InvalidAccessKeyId,
|
||||||
InvalidArgument,
|
InvalidArgument,
|
||||||
InvalidBucketName,
|
InvalidBucketName,
|
||||||
InvalidKey,
|
InvalidKey,
|
||||||
|
InvalidPart,
|
||||||
|
InvalidPartOrder,
|
||||||
InvalidPolicyDocument,
|
InvalidPolicyDocument,
|
||||||
InvalidRange,
|
InvalidRange,
|
||||||
InvalidRequest,
|
InvalidRequest,
|
||||||
|
InvalidTag,
|
||||||
MalformedXML,
|
MalformedXML,
|
||||||
MethodNotAllowed,
|
MethodNotAllowed,
|
||||||
NoSuchBucket,
|
NoSuchBucket,
|
||||||
|
NoSuchBucketPolicy,
|
||||||
NoSuchKey,
|
NoSuchKey,
|
||||||
|
NoSuchLifecycleConfiguration,
|
||||||
NoSuchUpload,
|
NoSuchUpload,
|
||||||
NoSuchVersion,
|
NoSuchVersion,
|
||||||
NoSuchTagSet,
|
NoSuchTagSet,
|
||||||
PreconditionFailed,
|
PreconditionFailed,
|
||||||
NotModified,
|
NotModified,
|
||||||
QuotaExceeded,
|
QuotaExceeded,
|
||||||
|
RequestTimeTooSkewed,
|
||||||
|
ServerSideEncryptionConfigurationNotFoundError,
|
||||||
SignatureDoesNotMatch,
|
SignatureDoesNotMatch,
|
||||||
SlowDown,
|
SlowDown,
|
||||||
}
|
}
|
||||||
@@ -32,56 +42,78 @@ impl S3ErrorCode {
|
|||||||
pub fn http_status(&self) -> u16 {
|
pub fn http_status(&self) -> u16 {
|
||||||
match self {
|
match self {
|
||||||
Self::AccessDenied => 403,
|
Self::AccessDenied => 403,
|
||||||
|
Self::BadDigest => 400,
|
||||||
Self::BucketAlreadyExists => 409,
|
Self::BucketAlreadyExists => 409,
|
||||||
|
Self::BucketAlreadyOwnedByYou => 409,
|
||||||
Self::BucketNotEmpty => 409,
|
Self::BucketNotEmpty => 409,
|
||||||
Self::EntityTooLarge => 413,
|
Self::EntityTooLarge => 413,
|
||||||
|
Self::EntityTooSmall => 400,
|
||||||
Self::InternalError => 500,
|
Self::InternalError => 500,
|
||||||
Self::InvalidAccessKeyId => 403,
|
Self::InvalidAccessKeyId => 403,
|
||||||
Self::InvalidArgument => 400,
|
Self::InvalidArgument => 400,
|
||||||
Self::InvalidBucketName => 400,
|
Self::InvalidBucketName => 400,
|
||||||
Self::InvalidKey => 400,
|
Self::InvalidKey => 400,
|
||||||
|
Self::InvalidPart => 400,
|
||||||
|
Self::InvalidPartOrder => 400,
|
||||||
Self::InvalidPolicyDocument => 400,
|
Self::InvalidPolicyDocument => 400,
|
||||||
Self::InvalidRange => 416,
|
Self::InvalidRange => 416,
|
||||||
Self::InvalidRequest => 400,
|
Self::InvalidRequest => 400,
|
||||||
|
Self::InvalidTag => 400,
|
||||||
Self::MalformedXML => 400,
|
Self::MalformedXML => 400,
|
||||||
Self::MethodNotAllowed => 405,
|
Self::MethodNotAllowed => 405,
|
||||||
Self::NoSuchBucket => 404,
|
Self::NoSuchBucket => 404,
|
||||||
|
Self::NoSuchBucketPolicy => 404,
|
||||||
Self::NoSuchKey => 404,
|
Self::NoSuchKey => 404,
|
||||||
|
Self::NoSuchLifecycleConfiguration => 404,
|
||||||
Self::NoSuchUpload => 404,
|
Self::NoSuchUpload => 404,
|
||||||
Self::NoSuchVersion => 404,
|
Self::NoSuchVersion => 404,
|
||||||
Self::NoSuchTagSet => 404,
|
Self::NoSuchTagSet => 404,
|
||||||
Self::PreconditionFailed => 412,
|
Self::PreconditionFailed => 412,
|
||||||
Self::NotModified => 304,
|
Self::NotModified => 304,
|
||||||
Self::QuotaExceeded => 403,
|
Self::QuotaExceeded => 403,
|
||||||
|
Self::RequestTimeTooSkewed => 403,
|
||||||
|
Self::ServerSideEncryptionConfigurationNotFoundError => 404,
|
||||||
Self::SignatureDoesNotMatch => 403,
|
Self::SignatureDoesNotMatch => 403,
|
||||||
Self::SlowDown => 429,
|
Self::SlowDown => 503,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn as_str(&self) -> &'static str {
|
pub fn as_str(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
Self::AccessDenied => "AccessDenied",
|
Self::AccessDenied => "AccessDenied",
|
||||||
|
Self::BadDigest => "BadDigest",
|
||||||
Self::BucketAlreadyExists => "BucketAlreadyExists",
|
Self::BucketAlreadyExists => "BucketAlreadyExists",
|
||||||
|
Self::BucketAlreadyOwnedByYou => "BucketAlreadyOwnedByYou",
|
||||||
Self::BucketNotEmpty => "BucketNotEmpty",
|
Self::BucketNotEmpty => "BucketNotEmpty",
|
||||||
Self::EntityTooLarge => "EntityTooLarge",
|
Self::EntityTooLarge => "EntityTooLarge",
|
||||||
|
Self::EntityTooSmall => "EntityTooSmall",
|
||||||
Self::InternalError => "InternalError",
|
Self::InternalError => "InternalError",
|
||||||
Self::InvalidAccessKeyId => "InvalidAccessKeyId",
|
Self::InvalidAccessKeyId => "InvalidAccessKeyId",
|
||||||
Self::InvalidArgument => "InvalidArgument",
|
Self::InvalidArgument => "InvalidArgument",
|
||||||
Self::InvalidBucketName => "InvalidBucketName",
|
Self::InvalidBucketName => "InvalidBucketName",
|
||||||
Self::InvalidKey => "InvalidKey",
|
Self::InvalidKey => "InvalidKey",
|
||||||
|
Self::InvalidPart => "InvalidPart",
|
||||||
|
Self::InvalidPartOrder => "InvalidPartOrder",
|
||||||
Self::InvalidPolicyDocument => "InvalidPolicyDocument",
|
Self::InvalidPolicyDocument => "InvalidPolicyDocument",
|
||||||
Self::InvalidRange => "InvalidRange",
|
Self::InvalidRange => "InvalidRange",
|
||||||
Self::InvalidRequest => "InvalidRequest",
|
Self::InvalidRequest => "InvalidRequest",
|
||||||
|
Self::InvalidTag => "InvalidTag",
|
||||||
Self::MalformedXML => "MalformedXML",
|
Self::MalformedXML => "MalformedXML",
|
||||||
Self::MethodNotAllowed => "MethodNotAllowed",
|
Self::MethodNotAllowed => "MethodNotAllowed",
|
||||||
Self::NoSuchBucket => "NoSuchBucket",
|
Self::NoSuchBucket => "NoSuchBucket",
|
||||||
|
Self::NoSuchBucketPolicy => "NoSuchBucketPolicy",
|
||||||
Self::NoSuchKey => "NoSuchKey",
|
Self::NoSuchKey => "NoSuchKey",
|
||||||
|
Self::NoSuchLifecycleConfiguration => "NoSuchLifecycleConfiguration",
|
||||||
Self::NoSuchUpload => "NoSuchUpload",
|
Self::NoSuchUpload => "NoSuchUpload",
|
||||||
Self::NoSuchVersion => "NoSuchVersion",
|
Self::NoSuchVersion => "NoSuchVersion",
|
||||||
Self::NoSuchTagSet => "NoSuchTagSet",
|
Self::NoSuchTagSet => "NoSuchTagSet",
|
||||||
Self::PreconditionFailed => "PreconditionFailed",
|
Self::PreconditionFailed => "PreconditionFailed",
|
||||||
Self::NotModified => "NotModified",
|
Self::NotModified => "NotModified",
|
||||||
Self::QuotaExceeded => "QuotaExceeded",
|
Self::QuotaExceeded => "QuotaExceeded",
|
||||||
|
Self::RequestTimeTooSkewed => "RequestTimeTooSkewed",
|
||||||
|
Self::ServerSideEncryptionConfigurationNotFoundError => {
|
||||||
|
"ServerSideEncryptionConfigurationNotFoundError"
|
||||||
|
}
|
||||||
Self::SignatureDoesNotMatch => "SignatureDoesNotMatch",
|
Self::SignatureDoesNotMatch => "SignatureDoesNotMatch",
|
||||||
Self::SlowDown => "SlowDown",
|
Self::SlowDown => "SlowDown",
|
||||||
}
|
}
|
||||||
@@ -90,27 +122,37 @@ impl S3ErrorCode {
|
|||||||
pub fn default_message(&self) -> &'static str {
|
pub fn default_message(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
Self::AccessDenied => "Access Denied",
|
Self::AccessDenied => "Access Denied",
|
||||||
|
Self::BadDigest => "The Content-MD5 or checksum value you specified did not match what we received",
|
||||||
Self::BucketAlreadyExists => "The requested bucket name is not available",
|
Self::BucketAlreadyExists => "The requested bucket name is not available",
|
||||||
|
Self::BucketAlreadyOwnedByYou => "Your previous request to create the named bucket succeeded and you already own it",
|
||||||
Self::BucketNotEmpty => "The bucket you tried to delete is not empty",
|
Self::BucketNotEmpty => "The bucket you tried to delete is not empty",
|
||||||
Self::EntityTooLarge => "Your proposed upload exceeds the maximum allowed size",
|
Self::EntityTooLarge => "Your proposed upload exceeds the maximum allowed size",
|
||||||
|
Self::EntityTooSmall => "Your proposed upload is smaller than the minimum allowed object size",
|
||||||
Self::InternalError => "We encountered an internal error. Please try again.",
|
Self::InternalError => "We encountered an internal error. Please try again.",
|
||||||
Self::InvalidAccessKeyId => "The access key ID you provided does not exist",
|
Self::InvalidAccessKeyId => "The access key ID you provided does not exist",
|
||||||
Self::InvalidArgument => "Invalid argument",
|
Self::InvalidArgument => "Invalid argument",
|
||||||
Self::InvalidBucketName => "The specified bucket is not valid",
|
Self::InvalidBucketName => "The specified bucket is not valid",
|
||||||
Self::InvalidKey => "The specified key is not valid",
|
Self::InvalidKey => "The specified key is not valid",
|
||||||
|
Self::InvalidPart => "One or more of the specified parts could not be found",
|
||||||
|
Self::InvalidPartOrder => "The list of parts was not in ascending order",
|
||||||
Self::InvalidPolicyDocument => "The content of the form does not meet the conditions specified in the policy document",
|
Self::InvalidPolicyDocument => "The content of the form does not meet the conditions specified in the policy document",
|
||||||
Self::InvalidRange => "The requested range is not satisfiable",
|
Self::InvalidRange => "The requested range is not satisfiable",
|
||||||
Self::InvalidRequest => "Invalid request",
|
Self::InvalidRequest => "Invalid request",
|
||||||
|
Self::InvalidTag => "The Tagging header is invalid",
|
||||||
Self::MalformedXML => "The XML you provided was not well-formed",
|
Self::MalformedXML => "The XML you provided was not well-formed",
|
||||||
Self::MethodNotAllowed => "The specified method is not allowed against this resource",
|
Self::MethodNotAllowed => "The specified method is not allowed against this resource",
|
||||||
Self::NoSuchBucket => "The specified bucket does not exist",
|
Self::NoSuchBucket => "The specified bucket does not exist",
|
||||||
|
Self::NoSuchBucketPolicy => "The bucket policy does not exist",
|
||||||
Self::NoSuchKey => "The specified key does not exist",
|
Self::NoSuchKey => "The specified key does not exist",
|
||||||
|
Self::NoSuchLifecycleConfiguration => "The lifecycle configuration does not exist",
|
||||||
Self::NoSuchUpload => "The specified multipart upload does not exist",
|
Self::NoSuchUpload => "The specified multipart upload does not exist",
|
||||||
Self::NoSuchVersion => "The specified version does not exist",
|
Self::NoSuchVersion => "The specified version does not exist",
|
||||||
Self::NoSuchTagSet => "The TagSet does not exist",
|
Self::NoSuchTagSet => "The TagSet does not exist",
|
||||||
Self::PreconditionFailed => "At least one of the preconditions you specified did not hold",
|
Self::PreconditionFailed => "At least one of the preconditions you specified did not hold",
|
||||||
Self::NotModified => "Not Modified",
|
Self::NotModified => "Not Modified",
|
||||||
Self::QuotaExceeded => "The bucket quota has been exceeded",
|
Self::QuotaExceeded => "The bucket quota has been exceeded",
|
||||||
|
Self::RequestTimeTooSkewed => "The difference between the request time and the server's time is too large",
|
||||||
|
Self::ServerSideEncryptionConfigurationNotFoundError => "The server side encryption configuration was not found",
|
||||||
Self::SignatureDoesNotMatch => "The request signature we calculated does not match the signature you provided",
|
Self::SignatureDoesNotMatch => "The request signature we calculated does not match the signature you provided",
|
||||||
Self::SlowDown => "Please reduce your request rate",
|
Self::SlowDown => "Please reduce your request rate",
|
||||||
}
|
}
|
||||||
@@ -12,6 +12,10 @@ pub struct ObjectMeta {
|
|||||||
pub content_type: Option<String>,
|
pub content_type: Option<String>,
|
||||||
pub storage_class: Option<String>,
|
pub storage_class: Option<String>,
|
||||||
pub metadata: HashMap<String, String>,
|
pub metadata: HashMap<String, String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub version_id: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub is_delete_marker: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ObjectMeta {
|
impl ObjectMeta {
|
||||||
@@ -24,10 +28,19 @@ impl ObjectMeta {
|
|||||||
content_type: None,
|
content_type: None,
|
||||||
storage_class: Some("STANDARD".to_string()),
|
storage_class: Some("STANDARD".to_string()),
|
||||||
metadata: HashMap::new(),
|
metadata: HashMap::new(),
|
||||||
|
version_id: None,
|
||||||
|
is_delete_marker: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct DeleteOutcome {
|
||||||
|
pub version_id: Option<String>,
|
||||||
|
pub is_delete_marker: bool,
|
||||||
|
pub existed: bool,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct BucketMeta {
|
pub struct BucketMeta {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
@@ -112,6 +125,8 @@ pub struct VersionInfo {
|
|||||||
pub last_modified: DateTime<Utc>,
|
pub last_modified: DateTime<Utc>,
|
||||||
pub etag: Option<String>,
|
pub etag: Option<String>,
|
||||||
pub is_latest: bool,
|
pub is_latest: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub is_delete_marker: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
@@ -82,11 +82,35 @@ impl EncryptionMetadata {
|
|||||||
pub struct EncryptionService {
|
pub struct EncryptionService {
|
||||||
master_key: [u8; 32],
|
master_key: [u8; 32],
|
||||||
kms: Option<std::sync::Arc<KmsService>>,
|
kms: Option<std::sync::Arc<KmsService>>,
|
||||||
|
config: EncryptionConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
pub struct EncryptionConfig {
|
||||||
|
pub chunk_size: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for EncryptionConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self { chunk_size: 65_536 }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EncryptionService {
|
impl EncryptionService {
|
||||||
pub fn new(master_key: [u8; 32], kms: Option<std::sync::Arc<KmsService>>) -> Self {
|
pub fn new(master_key: [u8; 32], kms: Option<std::sync::Arc<KmsService>>) -> Self {
|
||||||
Self { master_key, kms }
|
Self::with_config(master_key, kms, EncryptionConfig::default())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_config(
|
||||||
|
master_key: [u8; 32],
|
||||||
|
kms: Option<std::sync::Arc<KmsService>>,
|
||||||
|
config: EncryptionConfig,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
master_key,
|
||||||
|
kms,
|
||||||
|
config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_data_key(&self) -> ([u8; 32], [u8; 12]) {
|
pub fn generate_data_key(&self) -> ([u8; 32], [u8; 12]) {
|
||||||
@@ -192,7 +216,10 @@ impl EncryptionService {
|
|||||||
let op = output_path.to_owned();
|
let op = output_path.to_owned();
|
||||||
let ak = actual_key;
|
let ak = actual_key;
|
||||||
let n = nonce;
|
let n = nonce;
|
||||||
tokio::task::spawn_blocking(move || encrypt_stream_chunked(&ip, &op, &ak, &n, None))
|
let chunk_size = self.config.chunk_size;
|
||||||
|
tokio::task::spawn_blocking(move || {
|
||||||
|
encrypt_stream_chunked(&ip, &op, &ak, &n, Some(chunk_size))
|
||||||
|
})
|
||||||
.await
|
.await
|
||||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
|
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
|
||||||
|
|
||||||
@@ -10,6 +10,7 @@ myfsio-crypto = { path = "../myfsio-crypto" }
|
|||||||
myfsio-storage = { path = "../myfsio-storage" }
|
myfsio-storage = { path = "../myfsio-storage" }
|
||||||
myfsio-xml = { path = "../myfsio-xml" }
|
myfsio-xml = { path = "../myfsio-xml" }
|
||||||
base64 = { workspace = true }
|
base64 = { workspace = true }
|
||||||
|
md-5 = { workspace = true }
|
||||||
axum = { workspace = true }
|
axum = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
tower = { workspace = true }
|
tower = { workspace = true }
|
||||||
@@ -18,17 +19,22 @@ hyper = { workspace = true }
|
|||||||
bytes = { workspace = true }
|
bytes = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
serde_urlencoded = "0.7"
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
tracing-subscriber = { workspace = true }
|
tracing-subscriber = { workspace = true }
|
||||||
tokio-util = { workspace = true }
|
tokio-util = { workspace = true }
|
||||||
|
tokio-stream = { workspace = true }
|
||||||
chrono = { workspace = true }
|
chrono = { workspace = true }
|
||||||
uuid = { workspace = true }
|
uuid = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
|
http-body = "1"
|
||||||
http-body-util = "0.1"
|
http-body-util = "0.1"
|
||||||
percent-encoding = { workspace = true }
|
percent-encoding = { workspace = true }
|
||||||
quick-xml = { workspace = true }
|
quick-xml = { workspace = true }
|
||||||
mime_guess = "2"
|
mime_guess = "2"
|
||||||
crc32fast = { workspace = true }
|
crc32fast = { workspace = true }
|
||||||
|
sha2 = { workspace = true }
|
||||||
|
hex = { workspace = true }
|
||||||
duckdb = { workspace = true }
|
duckdb = { workspace = true }
|
||||||
roxmltree = "0.20"
|
roxmltree = "0.20"
|
||||||
parking_lot = { workspace = true }
|
parking_lot = { workspace = true }
|
||||||
621
crates/myfsio-server/src/config.rs
Normal file
621
crates/myfsio-server/src/config.rs
Normal file
@@ -0,0 +1,621 @@
|
|||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub struct RateLimitSetting {
|
||||||
|
pub max_requests: u32,
|
||||||
|
pub window_seconds: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RateLimitSetting {
|
||||||
|
pub const fn new(max_requests: u32, window_seconds: u64) -> Self {
|
||||||
|
Self {
|
||||||
|
max_requests,
|
||||||
|
window_seconds,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ServerConfig {
|
||||||
|
pub bind_addr: SocketAddr,
|
||||||
|
pub ui_bind_addr: SocketAddr,
|
||||||
|
pub storage_root: PathBuf,
|
||||||
|
pub region: String,
|
||||||
|
pub iam_config_path: PathBuf,
|
||||||
|
pub sigv4_timestamp_tolerance_secs: u64,
|
||||||
|
pub presigned_url_min_expiry: u64,
|
||||||
|
pub presigned_url_max_expiry: u64,
|
||||||
|
pub secret_key: Option<String>,
|
||||||
|
pub encryption_enabled: bool,
|
||||||
|
pub encryption_chunk_size_bytes: usize,
|
||||||
|
pub kms_enabled: bool,
|
||||||
|
pub kms_generate_data_key_min_bytes: usize,
|
||||||
|
pub kms_generate_data_key_max_bytes: usize,
|
||||||
|
pub gc_enabled: bool,
|
||||||
|
pub gc_interval_hours: f64,
|
||||||
|
pub gc_temp_file_max_age_hours: f64,
|
||||||
|
pub gc_multipart_max_age_days: u64,
|
||||||
|
pub gc_lock_file_max_age_hours: f64,
|
||||||
|
pub gc_dry_run: bool,
|
||||||
|
pub integrity_enabled: bool,
|
||||||
|
pub metrics_enabled: bool,
|
||||||
|
pub metrics_history_enabled: bool,
|
||||||
|
pub metrics_interval_minutes: u64,
|
||||||
|
pub metrics_retention_hours: u64,
|
||||||
|
pub metrics_history_interval_minutes: u64,
|
||||||
|
pub metrics_history_retention_hours: u64,
|
||||||
|
pub lifecycle_enabled: bool,
|
||||||
|
pub lifecycle_max_history_per_bucket: usize,
|
||||||
|
pub website_hosting_enabled: bool,
|
||||||
|
pub object_key_max_length_bytes: usize,
|
||||||
|
pub object_tag_limit: usize,
|
||||||
|
pub object_cache_max_size: usize,
|
||||||
|
pub bucket_config_cache_ttl_seconds: f64,
|
||||||
|
pub replication_connect_timeout_secs: u64,
|
||||||
|
pub replication_read_timeout_secs: u64,
|
||||||
|
pub replication_max_retries: u32,
|
||||||
|
pub replication_streaming_threshold_bytes: u64,
|
||||||
|
pub replication_max_failures_per_bucket: usize,
|
||||||
|
pub site_sync_enabled: bool,
|
||||||
|
pub site_sync_interval_secs: u64,
|
||||||
|
pub site_sync_batch_size: usize,
|
||||||
|
pub site_sync_connect_timeout_secs: u64,
|
||||||
|
pub site_sync_read_timeout_secs: u64,
|
||||||
|
pub site_sync_max_retries: u32,
|
||||||
|
pub site_sync_clock_skew_tolerance: f64,
|
||||||
|
pub site_id: Option<String>,
|
||||||
|
pub site_endpoint: Option<String>,
|
||||||
|
pub site_region: String,
|
||||||
|
pub site_priority: i32,
|
||||||
|
pub api_base_url: String,
|
||||||
|
pub num_trusted_proxies: usize,
|
||||||
|
pub allowed_redirect_hosts: Vec<String>,
|
||||||
|
pub allow_internal_endpoints: bool,
|
||||||
|
pub cors_origins: Vec<String>,
|
||||||
|
pub cors_methods: Vec<String>,
|
||||||
|
pub cors_allow_headers: Vec<String>,
|
||||||
|
pub cors_expose_headers: Vec<String>,
|
||||||
|
pub session_lifetime_days: u64,
|
||||||
|
pub log_level: String,
|
||||||
|
pub multipart_min_part_size: u64,
|
||||||
|
pub bulk_delete_max_keys: usize,
|
||||||
|
pub stream_chunk_size: usize,
|
||||||
|
pub request_body_timeout_secs: u64,
|
||||||
|
pub ratelimit_default: RateLimitSetting,
|
||||||
|
pub ratelimit_list_buckets: RateLimitSetting,
|
||||||
|
pub ratelimit_bucket_ops: RateLimitSetting,
|
||||||
|
pub ratelimit_object_ops: RateLimitSetting,
|
||||||
|
pub ratelimit_head_ops: RateLimitSetting,
|
||||||
|
pub ratelimit_admin: RateLimitSetting,
|
||||||
|
pub ratelimit_storage_uri: String,
|
||||||
|
pub ui_enabled: bool,
|
||||||
|
pub templates_dir: PathBuf,
|
||||||
|
pub static_dir: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServerConfig {
|
||||||
|
pub fn from_env() -> Self {
|
||||||
|
let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
|
||||||
|
let port: u16 = std::env::var("PORT")
|
||||||
|
.unwrap_or_else(|_| "5000".to_string())
|
||||||
|
.parse()
|
||||||
|
.unwrap_or(5000);
|
||||||
|
let host_ip: std::net::IpAddr = host.parse().unwrap();
|
||||||
|
let bind_addr = SocketAddr::new(host_ip, port);
|
||||||
|
let ui_port: u16 = std::env::var("UI_PORT")
|
||||||
|
.unwrap_or_else(|_| "5100".to_string())
|
||||||
|
.parse()
|
||||||
|
.unwrap_or(5100);
|
||||||
|
let storage_root = std::env::var("STORAGE_ROOT").unwrap_or_else(|_| "./data".to_string());
|
||||||
|
let region = std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string());
|
||||||
|
|
||||||
|
let storage_path = PathBuf::from(&storage_root);
|
||||||
|
let iam_config_path = std::env::var("IAM_CONFIG")
|
||||||
|
.map(PathBuf::from)
|
||||||
|
.unwrap_or_else(|_| {
|
||||||
|
storage_path
|
||||||
|
.join(".myfsio.sys")
|
||||||
|
.join("config")
|
||||||
|
.join("iam.json")
|
||||||
|
});
|
||||||
|
|
||||||
|
let sigv4_timestamp_tolerance_secs: u64 =
|
||||||
|
std::env::var("SIGV4_TIMESTAMP_TOLERANCE_SECONDS")
|
||||||
|
.unwrap_or_else(|_| "900".to_string())
|
||||||
|
.parse()
|
||||||
|
.unwrap_or(900);
|
||||||
|
|
||||||
|
let presigned_url_min_expiry: u64 = std::env::var("PRESIGNED_URL_MIN_EXPIRY_SECONDS")
|
||||||
|
.unwrap_or_else(|_| "1".to_string())
|
||||||
|
.parse()
|
||||||
|
.unwrap_or(1);
|
||||||
|
|
||||||
|
let presigned_url_max_expiry: u64 = std::env::var("PRESIGNED_URL_MAX_EXPIRY_SECONDS")
|
||||||
|
.unwrap_or_else(|_| "604800".to_string())
|
||||||
|
.parse()
|
||||||
|
.unwrap_or(604800);
|
||||||
|
|
||||||
|
let secret_key = {
|
||||||
|
let env_key = std::env::var("SECRET_KEY").ok();
|
||||||
|
match env_key {
|
||||||
|
Some(k) if !k.is_empty() && k != "dev-secret-key" => Some(k),
|
||||||
|
_ => {
|
||||||
|
let secret_file = storage_path
|
||||||
|
.join(".myfsio.sys")
|
||||||
|
.join("config")
|
||||||
|
.join(".secret");
|
||||||
|
std::fs::read_to_string(&secret_file)
|
||||||
|
.ok()
|
||||||
|
.map(|s| s.trim().to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let encryption_enabled = parse_bool_env("ENCRYPTION_ENABLED", false);
|
||||||
|
let encryption_chunk_size_bytes = parse_usize_env("ENCRYPTION_CHUNK_SIZE_BYTES", 65_536);
|
||||||
|
|
||||||
|
let kms_enabled = parse_bool_env("KMS_ENABLED", false);
|
||||||
|
let kms_generate_data_key_min_bytes = parse_usize_env("KMS_GENERATE_DATA_KEY_MIN_BYTES", 1);
|
||||||
|
let kms_generate_data_key_max_bytes =
|
||||||
|
parse_usize_env("KMS_GENERATE_DATA_KEY_MAX_BYTES", 1024);
|
||||||
|
|
||||||
|
let gc_enabled = parse_bool_env("GC_ENABLED", false);
|
||||||
|
let gc_interval_hours = parse_f64_env("GC_INTERVAL_HOURS", 6.0);
|
||||||
|
let gc_temp_file_max_age_hours = parse_f64_env("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0);
|
||||||
|
let gc_multipart_max_age_days = parse_u64_env("GC_MULTIPART_MAX_AGE_DAYS", 7);
|
||||||
|
let gc_lock_file_max_age_hours = parse_f64_env("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0);
|
||||||
|
let gc_dry_run = parse_bool_env("GC_DRY_RUN", false);
|
||||||
|
|
||||||
|
let integrity_enabled = parse_bool_env("INTEGRITY_ENABLED", false);
|
||||||
|
|
||||||
|
let metrics_enabled = parse_bool_env("OPERATION_METRICS_ENABLED", false);
|
||||||
|
|
||||||
|
let metrics_history_enabled = parse_bool_env("METRICS_HISTORY_ENABLED", false);
|
||||||
|
|
||||||
|
let metrics_interval_minutes = parse_u64_env("OPERATION_METRICS_INTERVAL_MINUTES", 5);
|
||||||
|
let metrics_retention_hours = parse_u64_env("OPERATION_METRICS_RETENTION_HOURS", 24);
|
||||||
|
let metrics_history_interval_minutes = parse_u64_env("METRICS_HISTORY_INTERVAL_MINUTES", 5);
|
||||||
|
let metrics_history_retention_hours = parse_u64_env("METRICS_HISTORY_RETENTION_HOURS", 24);
|
||||||
|
|
||||||
|
let lifecycle_enabled = parse_bool_env("LIFECYCLE_ENABLED", false);
|
||||||
|
let lifecycle_max_history_per_bucket =
|
||||||
|
parse_usize_env("LIFECYCLE_MAX_HISTORY_PER_BUCKET", 50);
|
||||||
|
|
||||||
|
let website_hosting_enabled = parse_bool_env("WEBSITE_HOSTING_ENABLED", false);
|
||||||
|
let object_key_max_length_bytes = parse_usize_env("OBJECT_KEY_MAX_LENGTH_BYTES", 1024);
|
||||||
|
let object_tag_limit = parse_usize_env("OBJECT_TAG_LIMIT", 50);
|
||||||
|
let object_cache_max_size = parse_usize_env("OBJECT_CACHE_MAX_SIZE", 100);
|
||||||
|
let bucket_config_cache_ttl_seconds =
|
||||||
|
parse_f64_env("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0);
|
||||||
|
|
||||||
|
let replication_connect_timeout_secs =
|
||||||
|
parse_u64_env("REPLICATION_CONNECT_TIMEOUT_SECONDS", 5);
|
||||||
|
let replication_read_timeout_secs = parse_u64_env("REPLICATION_READ_TIMEOUT_SECONDS", 30);
|
||||||
|
let replication_max_retries = parse_u64_env("REPLICATION_MAX_RETRIES", 2) as u32;
|
||||||
|
let replication_streaming_threshold_bytes =
|
||||||
|
parse_u64_env("REPLICATION_STREAMING_THRESHOLD_BYTES", 10_485_760);
|
||||||
|
let replication_max_failures_per_bucket =
|
||||||
|
parse_u64_env("REPLICATION_MAX_FAILURES_PER_BUCKET", 50) as usize;
|
||||||
|
|
||||||
|
let site_sync_enabled = parse_bool_env("SITE_SYNC_ENABLED", false);
|
||||||
|
let site_sync_interval_secs = parse_u64_env("SITE_SYNC_INTERVAL_SECONDS", 60);
|
||||||
|
let site_sync_batch_size = parse_u64_env("SITE_SYNC_BATCH_SIZE", 100) as usize;
|
||||||
|
let site_sync_connect_timeout_secs = parse_u64_env("SITE_SYNC_CONNECT_TIMEOUT_SECONDS", 10);
|
||||||
|
let site_sync_read_timeout_secs = parse_u64_env("SITE_SYNC_READ_TIMEOUT_SECONDS", 120);
|
||||||
|
let site_sync_max_retries = parse_u64_env("SITE_SYNC_MAX_RETRIES", 2) as u32;
|
||||||
|
let site_sync_clock_skew_tolerance: f64 =
|
||||||
|
std::env::var("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS")
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| s.parse().ok())
|
||||||
|
.unwrap_or(1.0);
|
||||||
|
|
||||||
|
let site_id = parse_optional_string_env("SITE_ID");
|
||||||
|
let site_endpoint = parse_optional_string_env("SITE_ENDPOINT");
|
||||||
|
let site_region = std::env::var("SITE_REGION").unwrap_or_else(|_| region.clone());
|
||||||
|
let site_priority = parse_i32_env("SITE_PRIORITY", 100);
|
||||||
|
let api_base_url = std::env::var("API_BASE_URL")
|
||||||
|
.unwrap_or_else(|_| format!("http://{}", bind_addr))
|
||||||
|
.trim_end_matches('/')
|
||||||
|
.to_string();
|
||||||
|
let num_trusted_proxies = parse_usize_env("NUM_TRUSTED_PROXIES", 0);
|
||||||
|
let allowed_redirect_hosts = parse_list_env("ALLOWED_REDIRECT_HOSTS", "");
|
||||||
|
let allow_internal_endpoints = parse_bool_env("ALLOW_INTERNAL_ENDPOINTS", false);
|
||||||
|
let cors_origins = parse_list_env("CORS_ORIGINS", "*");
|
||||||
|
let cors_methods = parse_list_env("CORS_METHODS", "GET,PUT,POST,DELETE,OPTIONS,HEAD");
|
||||||
|
let cors_allow_headers = parse_list_env("CORS_ALLOW_HEADERS", "*");
|
||||||
|
let cors_expose_headers = parse_list_env("CORS_EXPOSE_HEADERS", "*");
|
||||||
|
let session_lifetime_days = parse_u64_env("SESSION_LIFETIME_DAYS", 1);
|
||||||
|
let log_level = std::env::var("LOG_LEVEL").unwrap_or_else(|_| "INFO".to_string());
|
||||||
|
let multipart_min_part_size = parse_u64_env("MULTIPART_MIN_PART_SIZE", 5_242_880);
|
||||||
|
let bulk_delete_max_keys = parse_usize_env("BULK_DELETE_MAX_KEYS", 1000);
|
||||||
|
let stream_chunk_size = parse_usize_env("STREAM_CHUNK_SIZE", 1_048_576);
|
||||||
|
let request_body_timeout_secs = parse_u64_env("REQUEST_BODY_TIMEOUT_SECONDS", 60);
|
||||||
|
let ratelimit_default =
|
||||||
|
parse_rate_limit_env("RATE_LIMIT_DEFAULT", RateLimitSetting::new(500, 60));
|
||||||
|
let ratelimit_list_buckets =
|
||||||
|
parse_rate_limit_env("RATE_LIMIT_LIST_BUCKETS", ratelimit_default);
|
||||||
|
let ratelimit_bucket_ops =
|
||||||
|
parse_rate_limit_env("RATE_LIMIT_BUCKET_OPS", ratelimit_default);
|
||||||
|
let ratelimit_object_ops =
|
||||||
|
parse_rate_limit_env("RATE_LIMIT_OBJECT_OPS", ratelimit_default);
|
||||||
|
let ratelimit_head_ops =
|
||||||
|
parse_rate_limit_env("RATE_LIMIT_HEAD_OPS", ratelimit_default);
|
||||||
|
let ratelimit_admin =
|
||||||
|
parse_rate_limit_env("RATE_LIMIT_ADMIN", RateLimitSetting::new(60, 60));
|
||||||
|
let ratelimit_storage_uri =
|
||||||
|
std::env::var("RATE_LIMIT_STORAGE_URI").unwrap_or_else(|_| "memory://".to_string());
|
||||||
|
|
||||||
|
let ui_enabled = parse_bool_env("UI_ENABLED", true);
|
||||||
|
let templates_dir = std::env::var("TEMPLATES_DIR")
|
||||||
|
.map(PathBuf::from)
|
||||||
|
.unwrap_or_else(|_| default_templates_dir());
|
||||||
|
let static_dir = std::env::var("STATIC_DIR")
|
||||||
|
.map(PathBuf::from)
|
||||||
|
.unwrap_or_else(|_| default_static_dir());
|
||||||
|
|
||||||
|
Self {
|
||||||
|
bind_addr,
|
||||||
|
ui_bind_addr: SocketAddr::new(host_ip, ui_port),
|
||||||
|
storage_root: storage_path,
|
||||||
|
region,
|
||||||
|
iam_config_path,
|
||||||
|
sigv4_timestamp_tolerance_secs,
|
||||||
|
presigned_url_min_expiry,
|
||||||
|
presigned_url_max_expiry,
|
||||||
|
secret_key,
|
||||||
|
encryption_enabled,
|
||||||
|
encryption_chunk_size_bytes,
|
||||||
|
kms_enabled,
|
||||||
|
kms_generate_data_key_min_bytes,
|
||||||
|
kms_generate_data_key_max_bytes,
|
||||||
|
gc_enabled,
|
||||||
|
gc_interval_hours,
|
||||||
|
gc_temp_file_max_age_hours,
|
||||||
|
gc_multipart_max_age_days,
|
||||||
|
gc_lock_file_max_age_hours,
|
||||||
|
gc_dry_run,
|
||||||
|
integrity_enabled,
|
||||||
|
metrics_enabled,
|
||||||
|
metrics_history_enabled,
|
||||||
|
metrics_interval_minutes,
|
||||||
|
metrics_retention_hours,
|
||||||
|
metrics_history_interval_minutes,
|
||||||
|
metrics_history_retention_hours,
|
||||||
|
lifecycle_enabled,
|
||||||
|
lifecycle_max_history_per_bucket,
|
||||||
|
website_hosting_enabled,
|
||||||
|
object_key_max_length_bytes,
|
||||||
|
object_tag_limit,
|
||||||
|
object_cache_max_size,
|
||||||
|
bucket_config_cache_ttl_seconds,
|
||||||
|
replication_connect_timeout_secs,
|
||||||
|
replication_read_timeout_secs,
|
||||||
|
replication_max_retries,
|
||||||
|
replication_streaming_threshold_bytes,
|
||||||
|
replication_max_failures_per_bucket,
|
||||||
|
site_sync_enabled,
|
||||||
|
site_sync_interval_secs,
|
||||||
|
site_sync_batch_size,
|
||||||
|
site_sync_connect_timeout_secs,
|
||||||
|
site_sync_read_timeout_secs,
|
||||||
|
site_sync_max_retries,
|
||||||
|
site_sync_clock_skew_tolerance,
|
||||||
|
site_id,
|
||||||
|
site_endpoint,
|
||||||
|
site_region,
|
||||||
|
site_priority,
|
||||||
|
api_base_url,
|
||||||
|
num_trusted_proxies,
|
||||||
|
allowed_redirect_hosts,
|
||||||
|
allow_internal_endpoints,
|
||||||
|
cors_origins,
|
||||||
|
cors_methods,
|
||||||
|
cors_allow_headers,
|
||||||
|
cors_expose_headers,
|
||||||
|
session_lifetime_days,
|
||||||
|
log_level,
|
||||||
|
multipart_min_part_size,
|
||||||
|
bulk_delete_max_keys,
|
||||||
|
stream_chunk_size,
|
||||||
|
request_body_timeout_secs,
|
||||||
|
ratelimit_default,
|
||||||
|
ratelimit_list_buckets,
|
||||||
|
ratelimit_bucket_ops,
|
||||||
|
ratelimit_object_ops,
|
||||||
|
ratelimit_head_ops,
|
||||||
|
ratelimit_admin,
|
||||||
|
ratelimit_storage_uri,
|
||||||
|
ui_enabled,
|
||||||
|
templates_dir,
|
||||||
|
static_dir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ServerConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
bind_addr: "127.0.0.1:5000".parse().unwrap(),
|
||||||
|
ui_bind_addr: "127.0.0.1:5100".parse().unwrap(),
|
||||||
|
storage_root: PathBuf::from("./data"),
|
||||||
|
region: "us-east-1".to_string(),
|
||||||
|
iam_config_path: PathBuf::from("./data/.myfsio.sys/config/iam.json"),
|
||||||
|
sigv4_timestamp_tolerance_secs: 900,
|
||||||
|
presigned_url_min_expiry: 1,
|
||||||
|
presigned_url_max_expiry: 604_800,
|
||||||
|
secret_key: None,
|
||||||
|
encryption_enabled: false,
|
||||||
|
encryption_chunk_size_bytes: 65_536,
|
||||||
|
kms_enabled: false,
|
||||||
|
kms_generate_data_key_min_bytes: 1,
|
||||||
|
kms_generate_data_key_max_bytes: 1024,
|
||||||
|
gc_enabled: false,
|
||||||
|
gc_interval_hours: 6.0,
|
||||||
|
gc_temp_file_max_age_hours: 24.0,
|
||||||
|
gc_multipart_max_age_days: 7,
|
||||||
|
gc_lock_file_max_age_hours: 1.0,
|
||||||
|
gc_dry_run: false,
|
||||||
|
integrity_enabled: false,
|
||||||
|
metrics_enabled: false,
|
||||||
|
metrics_history_enabled: false,
|
||||||
|
metrics_interval_minutes: 5,
|
||||||
|
metrics_retention_hours: 24,
|
||||||
|
metrics_history_interval_minutes: 5,
|
||||||
|
metrics_history_retention_hours: 24,
|
||||||
|
lifecycle_enabled: false,
|
||||||
|
lifecycle_max_history_per_bucket: 50,
|
||||||
|
website_hosting_enabled: false,
|
||||||
|
object_key_max_length_bytes: 1024,
|
||||||
|
object_tag_limit: 50,
|
||||||
|
object_cache_max_size: 100,
|
||||||
|
bucket_config_cache_ttl_seconds: 30.0,
|
||||||
|
replication_connect_timeout_secs: 5,
|
||||||
|
replication_read_timeout_secs: 30,
|
||||||
|
replication_max_retries: 2,
|
||||||
|
replication_streaming_threshold_bytes: 10_485_760,
|
||||||
|
replication_max_failures_per_bucket: 50,
|
||||||
|
site_sync_enabled: false,
|
||||||
|
site_sync_interval_secs: 60,
|
||||||
|
site_sync_batch_size: 100,
|
||||||
|
site_sync_connect_timeout_secs: 10,
|
||||||
|
site_sync_read_timeout_secs: 120,
|
||||||
|
site_sync_max_retries: 2,
|
||||||
|
site_sync_clock_skew_tolerance: 1.0,
|
||||||
|
site_id: None,
|
||||||
|
site_endpoint: None,
|
||||||
|
site_region: "us-east-1".to_string(),
|
||||||
|
site_priority: 100,
|
||||||
|
api_base_url: "http://127.0.0.1:5000".to_string(),
|
||||||
|
num_trusted_proxies: 0,
|
||||||
|
allowed_redirect_hosts: Vec::new(),
|
||||||
|
allow_internal_endpoints: false,
|
||||||
|
cors_origins: vec!["*".to_string()],
|
||||||
|
cors_methods: vec![
|
||||||
|
"GET".to_string(),
|
||||||
|
"PUT".to_string(),
|
||||||
|
"POST".to_string(),
|
||||||
|
"DELETE".to_string(),
|
||||||
|
"OPTIONS".to_string(),
|
||||||
|
"HEAD".to_string(),
|
||||||
|
],
|
||||||
|
cors_allow_headers: vec!["*".to_string()],
|
||||||
|
cors_expose_headers: vec!["*".to_string()],
|
||||||
|
session_lifetime_days: 1,
|
||||||
|
log_level: "INFO".to_string(),
|
||||||
|
multipart_min_part_size: 5_242_880,
|
||||||
|
bulk_delete_max_keys: 1000,
|
||||||
|
stream_chunk_size: 1_048_576,
|
||||||
|
request_body_timeout_secs: 60,
|
||||||
|
ratelimit_default: RateLimitSetting::new(500, 60),
|
||||||
|
ratelimit_list_buckets: RateLimitSetting::new(500, 60),
|
||||||
|
ratelimit_bucket_ops: RateLimitSetting::new(500, 60),
|
||||||
|
ratelimit_object_ops: RateLimitSetting::new(500, 60),
|
||||||
|
ratelimit_head_ops: RateLimitSetting::new(500, 60),
|
||||||
|
ratelimit_admin: RateLimitSetting::new(60, 60),
|
||||||
|
ratelimit_storage_uri: "memory://".to_string(),
|
||||||
|
ui_enabled: true,
|
||||||
|
templates_dir: default_templates_dir(),
|
||||||
|
static_dir: default_static_dir(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_templates_dir() -> PathBuf {
|
||||||
|
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||||
|
manifest_dir.join("templates")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_static_dir() -> PathBuf {
|
||||||
|
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||||
|
for candidate in [
|
||||||
|
manifest_dir.join("static"),
|
||||||
|
manifest_dir.join("..").join("..").join("..").join("static"),
|
||||||
|
] {
|
||||||
|
if candidate.exists() {
|
||||||
|
return candidate;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
manifest_dir.join("static")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_u64_env(key: &str, default: u64) -> u64 {
|
||||||
|
std::env::var(key)
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| s.parse().ok())
|
||||||
|
.unwrap_or(default)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_usize_env(key: &str, default: usize) -> usize {
|
||||||
|
std::env::var(key)
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| s.parse().ok())
|
||||||
|
.unwrap_or(default)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_i32_env(key: &str, default: i32) -> i32 {
|
||||||
|
std::env::var(key)
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| s.parse().ok())
|
||||||
|
.unwrap_or(default)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_f64_env(key: &str, default: f64) -> f64 {
|
||||||
|
std::env::var(key)
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| s.parse().ok())
|
||||||
|
.unwrap_or(default)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_bool_env(key: &str, default: bool) -> bool {
|
||||||
|
std::env::var(key)
|
||||||
|
.ok()
|
||||||
|
.map(|value| {
|
||||||
|
matches!(
|
||||||
|
value.trim().to_ascii_lowercase().as_str(),
|
||||||
|
"1" | "true" | "yes" | "on"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.unwrap_or(default)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_optional_string_env(key: &str) -> Option<String> {
|
||||||
|
std::env::var(key)
|
||||||
|
.ok()
|
||||||
|
.map(|value| value.trim().to_string())
|
||||||
|
.filter(|value| !value.is_empty())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_list_env(key: &str, default: &str) -> Vec<String> {
|
||||||
|
std::env::var(key)
|
||||||
|
.unwrap_or_else(|_| default.to_string())
|
||||||
|
.split(',')
|
||||||
|
.map(|value| value.trim().to_string())
|
||||||
|
.filter(|value| !value.is_empty())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse_rate_limit(value: &str) -> Option<RateLimitSetting> {
|
||||||
|
let trimmed = value.trim();
|
||||||
|
if let Some((requests, window)) = trimmed.split_once('/') {
|
||||||
|
let max_requests = requests.trim().parse::<u32>().ok()?;
|
||||||
|
if max_requests == 0 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let window_str = window.trim().to_ascii_lowercase();
|
||||||
|
let window_seconds = if let Ok(n) = window_str.parse::<u64>() {
|
||||||
|
if n == 0 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
n
|
||||||
|
} else {
|
||||||
|
match window_str.as_str() {
|
||||||
|
"s" | "sec" | "second" | "seconds" => 1,
|
||||||
|
"m" | "min" | "minute" | "minutes" => 60,
|
||||||
|
"h" | "hr" | "hour" | "hours" => 3600,
|
||||||
|
"d" | "day" | "days" => 86_400,
|
||||||
|
_ => return None,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
return Some(RateLimitSetting::new(max_requests, window_seconds));
|
||||||
|
}
|
||||||
|
|
||||||
|
let parts = trimmed.split_whitespace().collect::<Vec<_>>();
|
||||||
|
if parts.len() != 3 || !parts[1].eq_ignore_ascii_case("per") {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let max_requests = parts[0].parse::<u32>().ok()?;
|
||||||
|
if max_requests == 0 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let window_seconds = match parts[2].to_ascii_lowercase().as_str() {
|
||||||
|
"second" | "seconds" => 1,
|
||||||
|
"minute" | "minutes" => 60,
|
||||||
|
"hour" | "hours" => 3600,
|
||||||
|
"day" | "days" => 86_400,
|
||||||
|
_ => return None,
|
||||||
|
};
|
||||||
|
Some(RateLimitSetting::new(max_requests, window_seconds))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_rate_limit_env(key: &str, default: RateLimitSetting) -> RateLimitSetting {
|
||||||
|
std::env::var(key)
|
||||||
|
.ok()
|
||||||
|
.and_then(|value| parse_rate_limit(&value))
|
||||||
|
.unwrap_or(default)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::sync::{Mutex, OnceLock};
|
||||||
|
|
||||||
|
fn env_lock() -> &'static Mutex<()> {
|
||||||
|
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||||
|
LOCK.get_or_init(|| Mutex::new(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parses_rate_limit_text() {
|
||||||
|
assert_eq!(
|
||||||
|
parse_rate_limit("200 per minute"),
|
||||||
|
Some(RateLimitSetting::new(200, 60))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
parse_rate_limit("3 per hours"),
|
||||||
|
Some(RateLimitSetting::new(3, 3600))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
parse_rate_limit("50000/60"),
|
||||||
|
Some(RateLimitSetting::new(50000, 60))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
parse_rate_limit("100/minute"),
|
||||||
|
Some(RateLimitSetting::new(100, 60))
|
||||||
|
);
|
||||||
|
assert_eq!(parse_rate_limit("0/60"), None);
|
||||||
|
assert_eq!(parse_rate_limit("0 per minute"), None);
|
||||||
|
assert_eq!(parse_rate_limit("bad"), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn env_defaults_and_invalid_values_fall_back() {
|
||||||
|
let _guard = env_lock().lock().unwrap();
|
||||||
|
std::env::remove_var("OBJECT_KEY_MAX_LENGTH_BYTES");
|
||||||
|
std::env::set_var("OBJECT_TAG_LIMIT", "not-a-number");
|
||||||
|
std::env::set_var("RATE_LIMIT_DEFAULT", "invalid");
|
||||||
|
|
||||||
|
let config = ServerConfig::from_env();
|
||||||
|
|
||||||
|
assert_eq!(config.object_key_max_length_bytes, 1024);
|
||||||
|
assert_eq!(config.object_tag_limit, 50);
|
||||||
|
assert_eq!(config.ratelimit_default, RateLimitSetting::new(500, 60));
|
||||||
|
|
||||||
|
std::env::remove_var("OBJECT_TAG_LIMIT");
|
||||||
|
std::env::remove_var("RATE_LIMIT_DEFAULT");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn env_overrides_new_values() {
|
||||||
|
let _guard = env_lock().lock().unwrap();
|
||||||
|
std::env::set_var("OBJECT_KEY_MAX_LENGTH_BYTES", "2048");
|
||||||
|
std::env::set_var("GC_DRY_RUN", "true");
|
||||||
|
std::env::set_var("RATE_LIMIT_ADMIN", "7 per second");
|
||||||
|
std::env::set_var("HOST", "127.0.0.1");
|
||||||
|
std::env::set_var("PORT", "5501");
|
||||||
|
std::env::remove_var("API_BASE_URL");
|
||||||
|
|
||||||
|
let config = ServerConfig::from_env();
|
||||||
|
|
||||||
|
assert_eq!(config.object_key_max_length_bytes, 2048);
|
||||||
|
assert!(config.gc_dry_run);
|
||||||
|
assert_eq!(config.ratelimit_admin, RateLimitSetting::new(7, 1));
|
||||||
|
assert_eq!(config.api_base_url, "http://127.0.0.1:5501");
|
||||||
|
|
||||||
|
std::env::remove_var("OBJECT_KEY_MAX_LENGTH_BYTES");
|
||||||
|
std::env::remove_var("GC_DRY_RUN");
|
||||||
|
std::env::remove_var("RATE_LIMIT_ADMIN");
|
||||||
|
std::env::remove_var("HOST");
|
||||||
|
std::env::remove_var("PORT");
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -46,6 +46,17 @@ fn require_admin(principal: &Principal) -> Option<Response> {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn require_iam_action(state: &AppState, principal: &Principal, action: &str) -> Option<Response> {
|
||||||
|
if !state.iam.authorize(principal, None, action, None) {
|
||||||
|
return Some(json_error(
|
||||||
|
"AccessDenied",
|
||||||
|
&format!("Requires {} permission", action),
|
||||||
|
StatusCode::FORBIDDEN,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
async fn read_json_body(body: Body) -> Option<serde_json::Value> {
|
async fn read_json_body(body: Body) -> Option<serde_json::Value> {
|
||||||
let bytes = http_body_util::BodyExt::collect(body)
|
let bytes = http_body_util::BodyExt::collect(body)
|
||||||
.await
|
.await
|
||||||
@@ -926,7 +937,7 @@ pub async fn iam_list_users(
|
|||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Extension(principal): Extension<Principal>,
|
Extension(principal): Extension<Principal>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
if let Some(err) = require_admin(&principal) {
|
if let Some(err) = require_iam_action(&state, &principal, "iam:list_users") {
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
let users = state.iam.list_users().await;
|
let users = state.iam.list_users().await;
|
||||||
@@ -938,7 +949,7 @@ pub async fn iam_get_user(
|
|||||||
Extension(principal): Extension<Principal>,
|
Extension(principal): Extension<Principal>,
|
||||||
Path(identifier): Path<String>,
|
Path(identifier): Path<String>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
if let Some(err) = require_admin(&principal) {
|
if let Some(err) = require_iam_action(&state, &principal, "iam:get_user") {
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
match state.iam.get_user(&identifier).await {
|
match state.iam.get_user(&identifier).await {
|
||||||
@@ -956,7 +967,7 @@ pub async fn iam_get_user_policies(
|
|||||||
Extension(principal): Extension<Principal>,
|
Extension(principal): Extension<Principal>,
|
||||||
Path(identifier): Path<String>,
|
Path(identifier): Path<String>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
if let Some(err) = require_admin(&principal) {
|
if let Some(err) = require_iam_action(&state, &principal, "iam:get_policy") {
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
match state.iam.get_user_policies(&identifier) {
|
match state.iam.get_user_policies(&identifier) {
|
||||||
@@ -974,7 +985,7 @@ pub async fn iam_create_access_key(
|
|||||||
Extension(principal): Extension<Principal>,
|
Extension(principal): Extension<Principal>,
|
||||||
Path(identifier): Path<String>,
|
Path(identifier): Path<String>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
if let Some(err) = require_admin(&principal) {
|
if let Some(err) = require_iam_action(&state, &principal, "iam:create_key") {
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
match state.iam.create_access_key(&identifier) {
|
match state.iam.create_access_key(&identifier) {
|
||||||
@@ -988,7 +999,7 @@ pub async fn iam_delete_access_key(
|
|||||||
Extension(principal): Extension<Principal>,
|
Extension(principal): Extension<Principal>,
|
||||||
Path((_identifier, access_key)): Path<(String, String)>,
|
Path((_identifier, access_key)): Path<(String, String)>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
if let Some(err) = require_admin(&principal) {
|
if let Some(err) = require_iam_action(&state, &principal, "iam:delete_key") {
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
match state.iam.delete_access_key(&access_key) {
|
match state.iam.delete_access_key(&access_key) {
|
||||||
@@ -1002,7 +1013,7 @@ pub async fn iam_disable_user(
|
|||||||
Extension(principal): Extension<Principal>,
|
Extension(principal): Extension<Principal>,
|
||||||
Path(identifier): Path<String>,
|
Path(identifier): Path<String>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
if let Some(err) = require_admin(&principal) {
|
if let Some(err) = require_iam_action(&state, &principal, "iam:disable_user") {
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
match state.iam.set_user_enabled(&identifier, false).await {
|
match state.iam.set_user_enabled(&identifier, false).await {
|
||||||
@@ -1016,7 +1027,7 @@ pub async fn iam_enable_user(
|
|||||||
Extension(principal): Extension<Principal>,
|
Extension(principal): Extension<Principal>,
|
||||||
Path(identifier): Path<String>,
|
Path(identifier): Path<String>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
if let Some(err) = require_admin(&principal) {
|
if let Some(err) = require_iam_action(&state, &principal, "iam:disable_user") {
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
match state.iam.set_user_enabled(&identifier, true).await {
|
match state.iam.set_user_enabled(&identifier, true).await {
|
||||||
@@ -1,10 +1,19 @@
|
|||||||
use axum::body::Body;
|
use axum::body::Body;
|
||||||
use axum::http::StatusCode;
|
use axum::http::{HeaderMap, StatusCode};
|
||||||
use axum::response::{IntoResponse, Response};
|
use axum::response::{IntoResponse, Response};
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
|
||||||
use myfsio_common::error::{S3Error, S3ErrorCode};
|
use myfsio_common::error::{S3Error, S3ErrorCode};
|
||||||
use myfsio_storage::traits::StorageEngine;
|
use myfsio_storage::traits::StorageEngine;
|
||||||
|
|
||||||
|
use crate::services::acl::{
|
||||||
|
acl_from_object_metadata, acl_to_xml, create_canned_acl, store_object_acl,
|
||||||
|
};
|
||||||
|
use crate::services::notifications::parse_notification_configurations;
|
||||||
|
use crate::services::object_lock::{
|
||||||
|
ensure_retention_mutable, get_legal_hold, get_object_retention as retention_from_metadata,
|
||||||
|
set_legal_hold, set_object_retention as store_retention, ObjectLockRetention, RetentionMode,
|
||||||
|
};
|
||||||
use crate::state::AppState;
|
use crate::state::AppState;
|
||||||
|
|
||||||
fn xml_response(status: StatusCode, xml: String) -> Response {
|
fn xml_response(status: StatusCode, xml: String) -> Response {
|
||||||
@@ -32,6 +41,16 @@ fn json_response(status: StatusCode, value: serde_json::Value) -> Response {
|
|||||||
.into_response()
|
.into_response()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn custom_xml_error(status: StatusCode, code: &str, message: &str) -> Response {
|
||||||
|
let xml = format!(
|
||||||
|
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||||
|
<Error><Code>{}</Code><Message>{}</Message><Resource></Resource><RequestId></RequestId></Error>",
|
||||||
|
xml_escape(code),
|
||||||
|
xml_escape(message),
|
||||||
|
);
|
||||||
|
xml_response(status, xml)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn get_versioning(state: &AppState, bucket: &str) -> Response {
|
pub async fn get_versioning(state: &AppState, bucket: &str) -> Response {
|
||||||
match state.storage.is_versioning_enabled(bucket).await {
|
match state.storage.is_versioning_enabled(bucket).await {
|
||||||
Ok(enabled) => {
|
Ok(enabled) => {
|
||||||
@@ -199,10 +218,7 @@ pub async fn get_encryption(state: &AppState, bucket: &str) -> Response {
|
|||||||
} else {
|
} else {
|
||||||
xml_response(
|
xml_response(
|
||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
S3Error::new(
|
S3Error::from_code(S3ErrorCode::ServerSideEncryptionConfigurationNotFoundError)
|
||||||
S3ErrorCode::InvalidRequest,
|
|
||||||
"The server side encryption configuration was not found",
|
|
||||||
)
|
|
||||||
.to_xml(),
|
.to_xml(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -251,11 +267,7 @@ pub async fn get_lifecycle(state: &AppState, bucket: &str) -> Response {
|
|||||||
} else {
|
} else {
|
||||||
xml_response(
|
xml_response(
|
||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
S3Error::new(
|
S3Error::from_code(S3ErrorCode::NoSuchLifecycleConfiguration).to_xml(),
|
||||||
S3ErrorCode::NoSuchKey,
|
|
||||||
"The lifecycle configuration does not exist",
|
|
||||||
)
|
|
||||||
.to_xml(),
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -402,7 +414,7 @@ pub async fn get_policy(state: &AppState, bucket: &str) -> Response {
|
|||||||
} else {
|
} else {
|
||||||
xml_response(
|
xml_response(
|
||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
S3Error::new(S3ErrorCode::NoSuchKey, "No bucket policy attached").to_xml(),
|
S3Error::from_code(S3ErrorCode::NoSuchBucketPolicy).to_xml(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -847,13 +859,34 @@ pub async fn delete_object_lock(state: &AppState, bucket: &str) -> Response {
|
|||||||
pub async fn put_notification(state: &AppState, bucket: &str, body: Body) -> Response {
|
pub async fn put_notification(state: &AppState, bucket: &str, body: Body) -> Response {
|
||||||
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
||||||
Ok(collected) => collected.to_bytes(),
|
Ok(collected) => collected.to_bytes(),
|
||||||
Err(_) => return StatusCode::BAD_REQUEST.into_response(),
|
Err(_) => {
|
||||||
|
return custom_xml_error(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
"MalformedXML",
|
||||||
|
"Unable to parse XML document",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let raw = String::from_utf8_lossy(&body_bytes).to_string();
|
||||||
|
let notification = if raw.trim().is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
match parse_notification_configurations(&raw) {
|
||||||
|
Ok(_) => Some(serde_json::Value::String(raw)),
|
||||||
|
Err(message) => {
|
||||||
|
let code = if message.contains("Destination URL is required") {
|
||||||
|
"InvalidArgument"
|
||||||
|
} else {
|
||||||
|
"MalformedXML"
|
||||||
|
};
|
||||||
|
return custom_xml_error(StatusCode::BAD_REQUEST, code, &message);
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
let value = serde_json::Value::String(String::from_utf8_lossy(&body_bytes).to_string());
|
|
||||||
|
|
||||||
match state.storage.get_bucket_config(bucket).await {
|
match state.storage.get_bucket_config(bucket).await {
|
||||||
Ok(mut config) => {
|
Ok(mut config) => {
|
||||||
config.notification = Some(value);
|
config.notification = notification;
|
||||||
match state.storage.set_bucket_config(bucket, &config).await {
|
match state.storage.set_bucket_config(bucket, &config).await {
|
||||||
Ok(()) => StatusCode::OK.into_response(),
|
Ok(()) => StatusCode::OK.into_response(),
|
||||||
Err(e) => storage_err(e),
|
Err(e) => storage_err(e),
|
||||||
@@ -998,7 +1031,12 @@ fn s3_error_response(code: S3ErrorCode, message: &str, status: StatusCode) -> Re
|
|||||||
(status, [("content-type", "application/xml")], err.to_xml()).into_response()
|
(status, [("content-type", "application/xml")], err.to_xml()).into_response()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn list_object_versions(state: &AppState, bucket: &str) -> Response {
|
pub async fn list_object_versions(
|
||||||
|
state: &AppState,
|
||||||
|
bucket: &str,
|
||||||
|
prefix: Option<&str>,
|
||||||
|
max_keys: usize,
|
||||||
|
) -> Response {
|
||||||
match state.storage.list_buckets().await {
|
match state.storage.list_buckets().await {
|
||||||
Ok(buckets) => {
|
Ok(buckets) => {
|
||||||
if !buckets.iter().any(|b| b.name == bucket) {
|
if !buckets.iter().any(|b| b.name == bucket) {
|
||||||
@@ -1010,13 +1048,24 @@ pub async fn list_object_versions(state: &AppState, bucket: &str) -> Response {
|
|||||||
Err(e) => return storage_err(e),
|
Err(e) => return storage_err(e),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let fetch_limit = max_keys.saturating_add(1).max(1);
|
||||||
let params = myfsio_common::types::ListParams {
|
let params = myfsio_common::types::ListParams {
|
||||||
max_keys: 1000,
|
max_keys: fetch_limit,
|
||||||
|
prefix: prefix.map(ToOwned::to_owned),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let objects = match state.storage.list_objects(bucket, ¶ms).await {
|
let object_result = match state.storage.list_objects(bucket, ¶ms).await {
|
||||||
Ok(result) => result.objects,
|
Ok(result) => result,
|
||||||
|
Err(e) => return storage_err(e),
|
||||||
|
};
|
||||||
|
let objects = object_result.objects;
|
||||||
|
let archived_versions = match state
|
||||||
|
.storage
|
||||||
|
.list_bucket_object_versions(bucket, prefix)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(versions) => versions,
|
||||||
Err(e) => return storage_err(e),
|
Err(e) => return storage_err(e),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1024,25 +1073,105 @@ pub async fn list_object_versions(state: &AppState, bucket: &str) -> Response {
|
|||||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||||
<ListVersionsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">",
|
<ListVersionsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">",
|
||||||
);
|
);
|
||||||
xml.push_str(&format!("<Name>{}</Name>", bucket));
|
xml.push_str(&format!("<Name>{}</Name>", xml_escape(bucket)));
|
||||||
|
xml.push_str(&format!(
|
||||||
|
"<Prefix>{}</Prefix>",
|
||||||
|
xml_escape(prefix.unwrap_or(""))
|
||||||
|
));
|
||||||
|
xml.push_str(&format!("<MaxKeys>{}</MaxKeys>", max_keys));
|
||||||
|
|
||||||
for obj in &objects {
|
let current_count = objects.len().min(max_keys);
|
||||||
|
let remaining = max_keys.saturating_sub(current_count);
|
||||||
|
let archived_count = archived_versions.len().min(remaining);
|
||||||
|
let is_truncated = object_result.is_truncated
|
||||||
|
|| objects.len() > current_count
|
||||||
|
|| archived_versions.len() > archived_count;
|
||||||
|
xml.push_str(&format!("<IsTruncated>{}</IsTruncated>", is_truncated));
|
||||||
|
|
||||||
|
let current_keys: std::collections::HashSet<String> = objects
|
||||||
|
.iter()
|
||||||
|
.take(current_count)
|
||||||
|
.map(|o| o.key.clone())
|
||||||
|
.collect();
|
||||||
|
let mut latest_archived_per_key: std::collections::HashMap<String, String> =
|
||||||
|
std::collections::HashMap::new();
|
||||||
|
for v in archived_versions.iter().take(archived_count) {
|
||||||
|
if current_keys.contains(&v.key) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let existing = latest_archived_per_key.get(&v.key).cloned();
|
||||||
|
match existing {
|
||||||
|
None => {
|
||||||
|
latest_archived_per_key.insert(v.key.clone(), v.version_id.clone());
|
||||||
|
}
|
||||||
|
Some(existing_id) => {
|
||||||
|
let existing_ts = archived_versions
|
||||||
|
.iter()
|
||||||
|
.find(|x| x.key == v.key && x.version_id == existing_id)
|
||||||
|
.map(|x| x.last_modified)
|
||||||
|
.unwrap_or(v.last_modified);
|
||||||
|
if v.last_modified > existing_ts {
|
||||||
|
latest_archived_per_key.insert(v.key.clone(), v.version_id.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for obj in objects.iter().take(current_count) {
|
||||||
|
let version_id = obj.version_id.clone().unwrap_or_else(|| "null".to_string());
|
||||||
xml.push_str("<Version>");
|
xml.push_str("<Version>");
|
||||||
xml.push_str(&format!("<Key>{}</Key>", obj.key));
|
xml.push_str(&format!("<Key>{}</Key>", xml_escape(&obj.key)));
|
||||||
xml.push_str("<VersionId>null</VersionId>");
|
xml.push_str(&format!(
|
||||||
|
"<VersionId>{}</VersionId>",
|
||||||
|
xml_escape(&version_id)
|
||||||
|
));
|
||||||
xml.push_str("<IsLatest>true</IsLatest>");
|
xml.push_str("<IsLatest>true</IsLatest>");
|
||||||
xml.push_str(&format!(
|
xml.push_str(&format!(
|
||||||
"<LastModified>{}</LastModified>",
|
"<LastModified>{}</LastModified>",
|
||||||
myfsio_xml::response::format_s3_datetime(&obj.last_modified)
|
myfsio_xml::response::format_s3_datetime(&obj.last_modified)
|
||||||
));
|
));
|
||||||
if let Some(ref etag) = obj.etag {
|
if let Some(ref etag) = obj.etag {
|
||||||
xml.push_str(&format!("<ETag>\"{}\"</ETag>", etag));
|
xml.push_str(&format!("<ETag>\"{}\"</ETag>", xml_escape(etag)));
|
||||||
}
|
}
|
||||||
xml.push_str(&format!("<Size>{}</Size>", obj.size));
|
xml.push_str(&format!("<Size>{}</Size>", obj.size));
|
||||||
xml.push_str("<StorageClass>STANDARD</StorageClass>");
|
xml.push_str(&format!(
|
||||||
|
"<StorageClass>{}</StorageClass>",
|
||||||
|
xml_escape(obj.storage_class.as_deref().unwrap_or("STANDARD"))
|
||||||
|
));
|
||||||
xml.push_str("</Version>");
|
xml.push_str("</Version>");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for version in archived_versions.iter().take(archived_count) {
|
||||||
|
let is_latest = latest_archived_per_key
|
||||||
|
.get(&version.key)
|
||||||
|
.map(|id| id == &version.version_id)
|
||||||
|
.unwrap_or(false);
|
||||||
|
let tag = if version.is_delete_marker {
|
||||||
|
"DeleteMarker"
|
||||||
|
} else {
|
||||||
|
"Version"
|
||||||
|
};
|
||||||
|
xml.push_str(&format!("<{}>", tag));
|
||||||
|
xml.push_str(&format!("<Key>{}</Key>", xml_escape(&version.key)));
|
||||||
|
xml.push_str(&format!(
|
||||||
|
"<VersionId>{}</VersionId>",
|
||||||
|
xml_escape(&version.version_id)
|
||||||
|
));
|
||||||
|
xml.push_str(&format!("<IsLatest>{}</IsLatest>", is_latest));
|
||||||
|
xml.push_str(&format!(
|
||||||
|
"<LastModified>{}</LastModified>",
|
||||||
|
myfsio_xml::response::format_s3_datetime(&version.last_modified)
|
||||||
|
));
|
||||||
|
if !version.is_delete_marker {
|
||||||
|
if let Some(ref etag) = version.etag {
|
||||||
|
xml.push_str(&format!("<ETag>\"{}\"</ETag>", xml_escape(etag)));
|
||||||
|
}
|
||||||
|
xml.push_str(&format!("<Size>{}</Size>", version.size));
|
||||||
|
xml.push_str("<StorageClass>STANDARD</StorageClass>");
|
||||||
|
}
|
||||||
|
xml.push_str(&format!("</{}>", tag));
|
||||||
|
}
|
||||||
|
|
||||||
xml.push_str("</ListVersionsResult>");
|
xml.push_str("</ListVersionsResult>");
|
||||||
xml_response(StatusCode::OK, xml)
|
xml_response(StatusCode::OK, xml)
|
||||||
}
|
}
|
||||||
@@ -1080,6 +1209,36 @@ pub async fn put_object_tagging(state: &AppState, bucket: &str, key: &str, body:
|
|||||||
|
|
||||||
let xml_str = String::from_utf8_lossy(&body_bytes);
|
let xml_str = String::from_utf8_lossy(&body_bytes);
|
||||||
let tags = parse_tagging_xml(&xml_str);
|
let tags = parse_tagging_xml(&xml_str);
|
||||||
|
if tags.len() > state.config.object_tag_limit {
|
||||||
|
return xml_response(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
S3Error::new(
|
||||||
|
S3ErrorCode::InvalidTag,
|
||||||
|
format!("Maximum {} tags allowed", state.config.object_tag_limit),
|
||||||
|
)
|
||||||
|
.to_xml(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
for tag in &tags {
|
||||||
|
if tag.key.is_empty() || tag.key.len() > 128 {
|
||||||
|
return xml_response(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
S3Error::new(S3ErrorCode::InvalidTag, "Tag key length must be 1-128").to_xml(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if tag.value.len() > 256 {
|
||||||
|
return xml_response(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
S3Error::new(S3ErrorCode::InvalidTag, "Tag value length must be 0-256").to_xml(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if tag.key.contains('=') {
|
||||||
|
return xml_response(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
S3Error::new(S3ErrorCode::InvalidTag, "Tag keys must not contain '='").to_xml(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
match state.storage.set_object_tags(bucket, key, &tags).await {
|
match state.storage.set_object_tags(bucket, key, &tags).await {
|
||||||
Ok(()) => StatusCode::OK.into_response(),
|
Ok(()) => StatusCode::OK.into_response(),
|
||||||
@@ -1094,40 +1253,68 @@ pub async fn delete_object_tagging(state: &AppState, bucket: &str, key: &str) ->
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_object_acl(state: &AppState, bucket: &str, key: &str) -> Response {
|
pub async fn put_object_acl(
|
||||||
|
state: &AppState,
|
||||||
|
bucket: &str,
|
||||||
|
key: &str,
|
||||||
|
headers: &HeaderMap,
|
||||||
|
_body: Body,
|
||||||
|
) -> Response {
|
||||||
match state.storage.head_object(bucket, key).await {
|
match state.storage.head_object(bucket, key).await {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
let canned_acl = headers
|
||||||
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
|
.get("x-amz-acl")
|
||||||
<Owner><ID>myfsio</ID><DisplayName>myfsio</DisplayName></Owner>\
|
.and_then(|value| value.to_str().ok())
|
||||||
<AccessControlList>\
|
.unwrap_or("private");
|
||||||
<Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
|
let mut metadata = match state.storage.get_object_metadata(bucket, key).await {
|
||||||
<ID>myfsio</ID><DisplayName>myfsio</DisplayName></Grantee>\
|
Ok(metadata) => metadata,
|
||||||
<Permission>FULL_CONTROL</Permission></Grant>\
|
Err(err) => return storage_err(err),
|
||||||
</AccessControlList></AccessControlPolicy>";
|
};
|
||||||
xml_response(StatusCode::OK, xml.to_string())
|
let owner = acl_from_object_metadata(&metadata)
|
||||||
}
|
.map(|acl| acl.owner)
|
||||||
Err(e) => storage_err(e),
|
.unwrap_or_else(|| "myfsio".to_string());
|
||||||
|
let acl = create_canned_acl(canned_acl, &owner);
|
||||||
|
store_object_acl(&mut metadata, &acl);
|
||||||
|
match state
|
||||||
|
.storage
|
||||||
|
.put_object_metadata(bucket, key, &metadata)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(()) => StatusCode::OK.into_response(),
|
||||||
|
Err(err) => storage_err(err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn put_object_acl(state: &AppState, bucket: &str, key: &str, _body: Body) -> Response {
|
|
||||||
match state.storage.head_object(bucket, key).await {
|
|
||||||
Ok(_) => StatusCode::OK.into_response(),
|
|
||||||
Err(e) => storage_err(e),
|
Err(e) => storage_err(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_object_retention(state: &AppState, bucket: &str, key: &str) -> Response {
|
pub async fn get_object_retention(state: &AppState, bucket: &str, key: &str) -> Response {
|
||||||
match state.storage.head_object(bucket, key).await {
|
match state.storage.head_object(bucket, key).await {
|
||||||
Ok(_) => xml_response(
|
Ok(_) => {
|
||||||
|
let metadata = match state.storage.get_object_metadata(bucket, key).await {
|
||||||
|
Ok(metadata) => metadata,
|
||||||
|
Err(err) => return storage_err(err),
|
||||||
|
};
|
||||||
|
if let Some(retention) = retention_from_metadata(&metadata) {
|
||||||
|
let xml = format!(
|
||||||
|
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||||
|
<Retention xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
|
||||||
|
<Mode>{}</Mode><RetainUntilDate>{}</RetainUntilDate></Retention>",
|
||||||
|
match retention.mode {
|
||||||
|
RetentionMode::GOVERNANCE => "GOVERNANCE",
|
||||||
|
RetentionMode::COMPLIANCE => "COMPLIANCE",
|
||||||
|
},
|
||||||
|
retention.retain_until_date.format("%Y-%m-%dT%H:%M:%S.000Z"),
|
||||||
|
);
|
||||||
|
xml_response(StatusCode::OK, xml)
|
||||||
|
} else {
|
||||||
|
custom_xml_error(
|
||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
S3Error::new(
|
"NoSuchObjectLockConfiguration",
|
||||||
S3ErrorCode::InvalidRequest,
|
"No retention policy",
|
||||||
"No retention policy configured",
|
|
||||||
)
|
)
|
||||||
.to_xml(),
|
}
|
||||||
),
|
}
|
||||||
Err(e) => storage_err(e),
|
Err(e) => storage_err(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1136,21 +1323,116 @@ pub async fn put_object_retention(
|
|||||||
state: &AppState,
|
state: &AppState,
|
||||||
bucket: &str,
|
bucket: &str,
|
||||||
key: &str,
|
key: &str,
|
||||||
_body: Body,
|
headers: &HeaderMap,
|
||||||
|
body: Body,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
match state.storage.head_object(bucket, key).await {
|
match state.storage.head_object(bucket, key).await {
|
||||||
Ok(_) => StatusCode::OK.into_response(),
|
Ok(_) => {}
|
||||||
Err(e) => storage_err(e),
|
Err(e) => return storage_err(e),
|
||||||
|
}
|
||||||
|
|
||||||
|
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
||||||
|
Ok(collected) => collected.to_bytes(),
|
||||||
|
Err(_) => {
|
||||||
|
return custom_xml_error(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
"MalformedXML",
|
||||||
|
"Unable to parse XML document",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let body_str = String::from_utf8_lossy(&body_bytes);
|
||||||
|
let doc = match roxmltree::Document::parse(&body_str) {
|
||||||
|
Ok(doc) => doc,
|
||||||
|
Err(_) => {
|
||||||
|
return custom_xml_error(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
"MalformedXML",
|
||||||
|
"Unable to parse XML document",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let mode = find_xml_text(&doc, "Mode").unwrap_or_default();
|
||||||
|
let retain_until = find_xml_text(&doc, "RetainUntilDate").unwrap_or_default();
|
||||||
|
if mode.is_empty() || retain_until.is_empty() {
|
||||||
|
return custom_xml_error(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
"InvalidArgument",
|
||||||
|
"Mode and RetainUntilDate are required",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
let mode = match mode.as_str() {
|
||||||
|
"GOVERNANCE" => RetentionMode::GOVERNANCE,
|
||||||
|
"COMPLIANCE" => RetentionMode::COMPLIANCE,
|
||||||
|
other => {
|
||||||
|
return custom_xml_error(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
"InvalidArgument",
|
||||||
|
&format!("Invalid retention mode: {}", other),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let retain_until_date = match DateTime::parse_from_rfc3339(&retain_until) {
|
||||||
|
Ok(value) => value.with_timezone(&Utc),
|
||||||
|
Err(_) => {
|
||||||
|
return custom_xml_error(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
"InvalidArgument",
|
||||||
|
&format!("Invalid date format: {}", retain_until),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let bypass_governance = headers
|
||||||
|
.get("x-amz-bypass-governance-retention")
|
||||||
|
.and_then(|value| value.to_str().ok())
|
||||||
|
.map(|value| value.eq_ignore_ascii_case("true"))
|
||||||
|
.unwrap_or(false);
|
||||||
|
let mut metadata = match state.storage.get_object_metadata(bucket, key).await {
|
||||||
|
Ok(metadata) => metadata,
|
||||||
|
Err(err) => return storage_err(err),
|
||||||
|
};
|
||||||
|
if let Err(message) = ensure_retention_mutable(&metadata, bypass_governance) {
|
||||||
|
return custom_xml_error(StatusCode::FORBIDDEN, "AccessDenied", &message);
|
||||||
|
}
|
||||||
|
if let Err(message) = store_retention(
|
||||||
|
&mut metadata,
|
||||||
|
&ObjectLockRetention {
|
||||||
|
mode,
|
||||||
|
retain_until_date,
|
||||||
|
},
|
||||||
|
) {
|
||||||
|
return custom_xml_error(StatusCode::BAD_REQUEST, "InvalidArgument", &message);
|
||||||
|
}
|
||||||
|
match state
|
||||||
|
.storage
|
||||||
|
.put_object_metadata(bucket, key, &metadata)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(()) => StatusCode::OK.into_response(),
|
||||||
|
Err(err) => storage_err(err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_object_legal_hold(state: &AppState, bucket: &str, key: &str) -> Response {
|
pub async fn get_object_legal_hold(state: &AppState, bucket: &str, key: &str) -> Response {
|
||||||
match state.storage.head_object(bucket, key).await {
|
match state.storage.head_object(bucket, key).await {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
let metadata = match state.storage.get_object_metadata(bucket, key).await {
|
||||||
|
Ok(metadata) => metadata,
|
||||||
|
Err(err) => return storage_err(err),
|
||||||
|
};
|
||||||
|
let status = if get_legal_hold(&metadata) {
|
||||||
|
"ON"
|
||||||
|
} else {
|
||||||
|
"OFF"
|
||||||
|
};
|
||||||
|
let xml = format!(
|
||||||
|
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||||
<LegalHold xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
|
<LegalHold xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
|
||||||
<Status>OFF</Status></LegalHold>";
|
<Status>{}</Status></LegalHold>",
|
||||||
xml_response(StatusCode::OK, xml.to_string())
|
status
|
||||||
|
);
|
||||||
|
xml_response(StatusCode::OK, xml)
|
||||||
}
|
}
|
||||||
Err(e) => storage_err(e),
|
Err(e) => storage_err(e),
|
||||||
}
|
}
|
||||||
@@ -1160,14 +1442,84 @@ pub async fn put_object_legal_hold(
|
|||||||
state: &AppState,
|
state: &AppState,
|
||||||
bucket: &str,
|
bucket: &str,
|
||||||
key: &str,
|
key: &str,
|
||||||
_body: Body,
|
body: Body,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
match state.storage.head_object(bucket, key).await {
|
match state.storage.head_object(bucket, key).await {
|
||||||
Ok(_) => StatusCode::OK.into_response(),
|
Ok(_) => {}
|
||||||
|
Err(e) => return storage_err(e),
|
||||||
|
}
|
||||||
|
|
||||||
|
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
||||||
|
Ok(collected) => collected.to_bytes(),
|
||||||
|
Err(_) => {
|
||||||
|
return custom_xml_error(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
"MalformedXML",
|
||||||
|
"Unable to parse XML document",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let body_str = String::from_utf8_lossy(&body_bytes);
|
||||||
|
let doc = match roxmltree::Document::parse(&body_str) {
|
||||||
|
Ok(doc) => doc,
|
||||||
|
Err(_) => {
|
||||||
|
return custom_xml_error(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
"MalformedXML",
|
||||||
|
"Unable to parse XML document",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let status = find_xml_text(&doc, "Status").unwrap_or_default();
|
||||||
|
let enabled = match status.as_str() {
|
||||||
|
"ON" => true,
|
||||||
|
"OFF" => false,
|
||||||
|
_ => {
|
||||||
|
return custom_xml_error(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
"InvalidArgument",
|
||||||
|
"Status must be ON or OFF",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let mut metadata = match state.storage.get_object_metadata(bucket, key).await {
|
||||||
|
Ok(metadata) => metadata,
|
||||||
|
Err(err) => return storage_err(err),
|
||||||
|
};
|
||||||
|
set_legal_hold(&mut metadata, enabled);
|
||||||
|
match state
|
||||||
|
.storage
|
||||||
|
.put_object_metadata(bucket, key, &metadata)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(()) => StatusCode::OK.into_response(),
|
||||||
|
Err(err) => storage_err(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_object_acl(state: &AppState, bucket: &str, key: &str) -> Response {
|
||||||
|
match state.storage.head_object(bucket, key).await {
|
||||||
|
Ok(_) => {
|
||||||
|
let metadata = match state.storage.get_object_metadata(bucket, key).await {
|
||||||
|
Ok(metadata) => metadata,
|
||||||
|
Err(err) => return storage_err(err),
|
||||||
|
};
|
||||||
|
let acl = acl_from_object_metadata(&metadata)
|
||||||
|
.unwrap_or_else(|| create_canned_acl("private", "myfsio"));
|
||||||
|
xml_response(StatusCode::OK, acl_to_xml(&acl))
|
||||||
|
}
|
||||||
Err(e) => storage_err(e),
|
Err(e) => storage_err(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn find_xml_text(doc: &roxmltree::Document<'_>, name: &str) -> Option<String> {
|
||||||
|
doc.descendants()
|
||||||
|
.find(|node| node.is_element() && node.tag_name().name() == name)
|
||||||
|
.and_then(|node| node.text())
|
||||||
|
.map(|text| text.trim().to_string())
|
||||||
|
.filter(|text| !text.is_empty())
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::{legacy_logging_config, parse_logging_config_xml};
|
use super::{legacy_logging_config, parse_logging_config_xml};
|
||||||
@@ -294,8 +294,17 @@ async fn generate_data_key_inner(state: AppState, body: Body, include_plaintext:
|
|||||||
.and_then(|v| v.as_u64())
|
.and_then(|v| v.as_u64())
|
||||||
.unwrap_or(32) as usize;
|
.unwrap_or(32) as usize;
|
||||||
|
|
||||||
if !(1..=1024).contains(&num_bytes) {
|
if num_bytes < state.config.kms_generate_data_key_min_bytes
|
||||||
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
|
|| num_bytes > state.config.kms_generate_data_key_max_bytes
|
||||||
|
{
|
||||||
|
return json_err(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
&format!(
|
||||||
|
"NumberOfBytes must be {}-{}",
|
||||||
|
state.config.kms_generate_data_key_min_bytes,
|
||||||
|
state.config.kms_generate_data_key_max_bytes
|
||||||
|
),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
match kms.generate_data_key(key_id, num_bytes).await {
|
match kms.generate_data_key(key_id, num_bytes).await {
|
||||||
@@ -389,8 +398,17 @@ pub async fn generate_random(State(state): State<AppState>, body: Body) -> Respo
|
|||||||
.and_then(|v| v.as_u64())
|
.and_then(|v| v.as_u64())
|
||||||
.unwrap_or(32) as usize;
|
.unwrap_or(32) as usize;
|
||||||
|
|
||||||
if !(1..=1024).contains(&num_bytes) {
|
if num_bytes < state.config.kms_generate_data_key_min_bytes
|
||||||
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
|
|| num_bytes > state.config.kms_generate_data_key_max_bytes
|
||||||
|
{
|
||||||
|
return json_err(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
&format!(
|
||||||
|
"NumberOfBytes must be {}-{}",
|
||||||
|
state.config.kms_generate_data_key_min_bytes,
|
||||||
|
state.config.kms_generate_data_key_max_bytes
|
||||||
|
),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut bytes = vec![0u8; num_bytes];
|
let mut bytes = vec![0u8; num_bytes];
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -66,7 +66,7 @@ pub async fn login_submit(
|
|||||||
let next = form
|
let next = form
|
||||||
.next
|
.next
|
||||||
.as_deref()
|
.as_deref()
|
||||||
.filter(|n| n.starts_with("/ui/") || *n == "/ui")
|
.filter(|n| is_allowed_redirect(n, &state.config.allowed_redirect_hosts))
|
||||||
.unwrap_or("/ui/buckets")
|
.unwrap_or("/ui/buckets")
|
||||||
.to_string();
|
.to_string();
|
||||||
Redirect::to(&next).into_response()
|
Redirect::to(&next).into_response()
|
||||||
@@ -80,6 +80,32 @@ pub async fn login_submit(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn is_allowed_redirect(target: &str, allowed_hosts: &[String]) -> bool {
|
||||||
|
if target == "/ui" || target.starts_with("/ui/") {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
let Some(rest) = target
|
||||||
|
.strip_prefix("https://")
|
||||||
|
.or_else(|| target.strip_prefix("http://"))
|
||||||
|
else {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
let host = rest
|
||||||
|
.split('/')
|
||||||
|
.next()
|
||||||
|
.unwrap_or_default()
|
||||||
|
.split('@')
|
||||||
|
.last()
|
||||||
|
.unwrap_or_default()
|
||||||
|
.split(':')
|
||||||
|
.next()
|
||||||
|
.unwrap_or_default()
|
||||||
|
.to_ascii_lowercase();
|
||||||
|
allowed_hosts
|
||||||
|
.iter()
|
||||||
|
.any(|allowed| allowed.eq_ignore_ascii_case(&host))
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn logout(Extension(session): Extension<SessionHandle>) -> Response {
|
pub async fn logout(Extension(session): Extension<SessionHandle>) -> Response {
|
||||||
session.write(|s| {
|
session.write(|s| {
|
||||||
s.user_id = None;
|
s.user_id = None;
|
||||||
@@ -91,16 +117,6 @@ pub async fn logout(Extension(session): Extension<SessionHandle>) -> Response {
|
|||||||
Redirect::to("/login").into_response()
|
Redirect::to("/login").into_response()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn csrf_error_page(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(session): Extension<SessionHandle>,
|
|
||||||
) -> Response {
|
|
||||||
let ctx = base_context(&session, None);
|
|
||||||
let mut resp = render(&state, "csrf_error.html", &ctx);
|
|
||||||
*resp.status_mut() = StatusCode::FORBIDDEN;
|
|
||||||
resp
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn root_redirect() -> Response {
|
pub async fn root_redirect() -> Response {
|
||||||
Redirect::to("/ui/buckets").into_response()
|
Redirect::to("/ui/buckets").into_response()
|
||||||
}
|
}
|
||||||
@@ -49,6 +49,8 @@ const AWS_QUERY_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC
|
|||||||
.remove(b'.')
|
.remove(b'.')
|
||||||
.remove(b'~');
|
.remove(b'~');
|
||||||
|
|
||||||
|
const UI_OBJECT_BROWSER_MAX_KEYS: usize = 5000;
|
||||||
|
|
||||||
fn url_templates_for(bucket: &str) -> Value {
|
fn url_templates_for(bucket: &str) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"download": format!("/ui/buckets/{}/objects/KEY_PLACEHOLDER/download", bucket),
|
"download": format!("/ui/buckets/{}/objects/KEY_PLACEHOLDER/download", bucket),
|
||||||
@@ -117,7 +119,10 @@ fn storage_status(err: &StorageError) -> StatusCode {
|
|||||||
match err {
|
match err {
|
||||||
StorageError::BucketNotFound(_)
|
StorageError::BucketNotFound(_)
|
||||||
| StorageError::ObjectNotFound { .. }
|
| StorageError::ObjectNotFound { .. }
|
||||||
|
| StorageError::VersionNotFound { .. }
|
||||||
| StorageError::UploadNotFound(_) => StatusCode::NOT_FOUND,
|
| StorageError::UploadNotFound(_) => StatusCode::NOT_FOUND,
|
||||||
|
StorageError::DeleteMarker { .. } => StatusCode::NOT_FOUND,
|
||||||
|
StorageError::MethodNotAllowed(_) => StatusCode::METHOD_NOT_ALLOWED,
|
||||||
StorageError::InvalidBucketName(_)
|
StorageError::InvalidBucketName(_)
|
||||||
| StorageError::InvalidObjectKey(_)
|
| StorageError::InvalidObjectKey(_)
|
||||||
| StorageError::InvalidRange
|
| StorageError::InvalidRange
|
||||||
@@ -184,10 +189,7 @@ fn safe_attachment_filename(key: &str) -> String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn parse_api_base(state: &AppState) -> String {
|
fn parse_api_base(state: &AppState) -> String {
|
||||||
std::env::var("API_BASE_URL")
|
state.config.api_base_url.trim_end_matches('/').to_string()
|
||||||
.unwrap_or_else(|_| format!("http://{}", state.config.bind_addr))
|
|
||||||
.trim_end_matches('/')
|
|
||||||
.to_string()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn aws_query_encode(value: &str) -> String {
|
fn aws_query_encode(value: &str) -> String {
|
||||||
@@ -904,6 +906,35 @@ pub struct ListObjectsQuery {
|
|||||||
pub prefix: Option<String>,
|
pub prefix: Option<String>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub start_after: Option<String>,
|
pub start_after: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub delimiter: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn object_json(bucket_name: &str, o: &myfsio_common::types::ObjectMeta) -> Value {
|
||||||
|
json!({
|
||||||
|
"key": o.key,
|
||||||
|
"size": o.size,
|
||||||
|
"last_modified": o.last_modified.to_rfc3339(),
|
||||||
|
"last_modified_iso": o.last_modified.to_rfc3339(),
|
||||||
|
"last_modified_display": o.last_modified.format("%Y-%m-%d %H:%M:%S").to_string(),
|
||||||
|
"etag": o.etag.clone().unwrap_or_default(),
|
||||||
|
"storage_class": o.storage_class.clone().unwrap_or_else(|| "STANDARD".to_string()),
|
||||||
|
"content_type": o.content_type.clone().unwrap_or_default(),
|
||||||
|
"download_url": build_ui_object_url(bucket_name, &o.key, "download"),
|
||||||
|
"preview_url": build_ui_object_url(bucket_name, &o.key, "preview"),
|
||||||
|
"delete_endpoint": build_ui_object_url(bucket_name, &o.key, "delete"),
|
||||||
|
"presign_endpoint": build_ui_object_url(bucket_name, &o.key, "presign"),
|
||||||
|
"metadata_url": build_ui_object_url(bucket_name, &o.key, "metadata"),
|
||||||
|
"versions_endpoint": build_ui_object_url(bucket_name, &o.key, "versions"),
|
||||||
|
"restore_template": format!(
|
||||||
|
"/ui/buckets/{}/objects/{}/restore/VERSION_ID_PLACEHOLDER",
|
||||||
|
bucket_name,
|
||||||
|
encode_object_key(&o.key)
|
||||||
|
),
|
||||||
|
"tags_url": build_ui_object_url(bucket_name, &o.key, "tags"),
|
||||||
|
"copy_url": build_ui_object_url(bucket_name, &o.key, "copy"),
|
||||||
|
"move_url": build_ui_object_url(bucket_name, &o.key, "move"),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn list_bucket_objects(
|
pub async fn list_bucket_objects(
|
||||||
@@ -917,6 +948,49 @@ pub async fn list_bucket_objects(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let max_keys = q.max_keys.unwrap_or(1000).min(5000);
|
let max_keys = q.max_keys.unwrap_or(1000).min(5000);
|
||||||
|
let versioning_enabled = state
|
||||||
|
.storage
|
||||||
|
.is_versioning_enabled(&bucket_name)
|
||||||
|
.await
|
||||||
|
.unwrap_or(false);
|
||||||
|
let stats = state.storage.bucket_stats(&bucket_name).await.ok();
|
||||||
|
let total_count = stats.as_ref().map(|s| s.objects).unwrap_or(0);
|
||||||
|
|
||||||
|
let use_shallow = q.delimiter.as_deref() == Some("/");
|
||||||
|
|
||||||
|
if use_shallow {
|
||||||
|
let params = myfsio_common::types::ShallowListParams {
|
||||||
|
prefix: q.prefix.clone().unwrap_or_default(),
|
||||||
|
delimiter: "/".to_string(),
|
||||||
|
max_keys,
|
||||||
|
continuation_token: q.continuation_token.clone(),
|
||||||
|
};
|
||||||
|
return match state
|
||||||
|
.storage
|
||||||
|
.list_objects_shallow(&bucket_name, ¶ms)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(res) => {
|
||||||
|
let objects: Vec<Value> = res
|
||||||
|
.objects
|
||||||
|
.iter()
|
||||||
|
.map(|o| object_json(&bucket_name, o))
|
||||||
|
.collect();
|
||||||
|
Json(json!({
|
||||||
|
"versioning_enabled": versioning_enabled,
|
||||||
|
"total_count": total_count,
|
||||||
|
"is_truncated": res.is_truncated,
|
||||||
|
"next_continuation_token": res.next_continuation_token,
|
||||||
|
"url_templates": url_templates_for(&bucket_name),
|
||||||
|
"objects": objects,
|
||||||
|
"common_prefixes": res.common_prefixes,
|
||||||
|
}))
|
||||||
|
.into_response()
|
||||||
|
}
|
||||||
|
Err(e) => storage_json_error(e),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
let params = ListParams {
|
let params = ListParams {
|
||||||
max_keys,
|
max_keys,
|
||||||
continuation_token: q.continuation_token.clone(),
|
continuation_token: q.continuation_token.clone(),
|
||||||
@@ -924,46 +998,12 @@ pub async fn list_bucket_objects(
|
|||||||
start_after: q.start_after.clone(),
|
start_after: q.start_after.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let versioning_enabled = state
|
|
||||||
.storage
|
|
||||||
.is_versioning_enabled(&bucket_name)
|
|
||||||
.await
|
|
||||||
.unwrap_or(false);
|
|
||||||
|
|
||||||
let stats = state.storage.bucket_stats(&bucket_name).await.ok();
|
|
||||||
let total_count = stats.as_ref().map(|s| s.objects).unwrap_or(0);
|
|
||||||
|
|
||||||
match state.storage.list_objects(&bucket_name, ¶ms).await {
|
match state.storage.list_objects(&bucket_name, ¶ms).await {
|
||||||
Ok(res) => {
|
Ok(res) => {
|
||||||
let objects: Vec<Value> = res
|
let objects: Vec<Value> = res
|
||||||
.objects
|
.objects
|
||||||
.iter()
|
.iter()
|
||||||
.map(|o| {
|
.map(|o| object_json(&bucket_name, o))
|
||||||
json!({
|
|
||||||
"key": o.key,
|
|
||||||
"size": o.size,
|
|
||||||
"last_modified": o.last_modified.to_rfc3339(),
|
|
||||||
"last_modified_iso": o.last_modified.to_rfc3339(),
|
|
||||||
"last_modified_display": o.last_modified.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
"etag": o.etag.clone().unwrap_or_default(),
|
|
||||||
"storage_class": o.storage_class.clone().unwrap_or_else(|| "STANDARD".to_string()),
|
|
||||||
"content_type": o.content_type.clone().unwrap_or_default(),
|
|
||||||
"download_url": build_ui_object_url(&bucket_name, &o.key, "download"),
|
|
||||||
"preview_url": build_ui_object_url(&bucket_name, &o.key, "preview"),
|
|
||||||
"delete_endpoint": build_ui_object_url(&bucket_name, &o.key, "delete"),
|
|
||||||
"presign_endpoint": build_ui_object_url(&bucket_name, &o.key, "presign"),
|
|
||||||
"metadata_url": build_ui_object_url(&bucket_name, &o.key, "metadata"),
|
|
||||||
"versions_endpoint": build_ui_object_url(&bucket_name, &o.key, "versions"),
|
|
||||||
"restore_template": format!(
|
|
||||||
"/ui/buckets/{}/objects/{}/restore/VERSION_ID_PLACEHOLDER",
|
|
||||||
bucket_name,
|
|
||||||
encode_object_key(&o.key)
|
|
||||||
),
|
|
||||||
"tags_url": build_ui_object_url(&bucket_name, &o.key, "tags"),
|
|
||||||
"copy_url": build_ui_object_url(&bucket_name, &o.key, "copy"),
|
|
||||||
"move_url": build_ui_object_url(&bucket_name, &o.key, "move"),
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
@@ -1006,39 +1046,62 @@ pub async fn stream_bucket_objects(
|
|||||||
let stats = state.storage.bucket_stats(&bucket_name).await.ok();
|
let stats = state.storage.bucket_stats(&bucket_name).await.ok();
|
||||||
let total_count = stats.as_ref().map(|s| s.objects).unwrap_or(0);
|
let total_count = stats.as_ref().map(|s| s.objects).unwrap_or(0);
|
||||||
|
|
||||||
let mut lines: Vec<String> = Vec::new();
|
let use_delimiter = q.delimiter.as_deref() == Some("/");
|
||||||
lines.push(
|
let prefix = q.prefix.clone().unwrap_or_default();
|
||||||
json!({
|
|
||||||
|
let (tx, rx) = tokio::sync::mpsc::channel::<Result<bytes::Bytes, std::io::Error>>(64);
|
||||||
|
|
||||||
|
let meta_line = json!({
|
||||||
"type": "meta",
|
"type": "meta",
|
||||||
"url_templates": url_templates_for(&bucket_name),
|
"url_templates": url_templates_for(&bucket_name),
|
||||||
"versioning_enabled": versioning_enabled,
|
"versioning_enabled": versioning_enabled,
|
||||||
})
|
})
|
||||||
.to_string(),
|
.to_string()
|
||||||
);
|
+ "\n";
|
||||||
lines.push(json!({ "type": "count", "total_count": total_count }).to_string());
|
let count_line = json!({ "type": "count", "total_count": total_count }).to_string() + "\n";
|
||||||
|
|
||||||
let use_delimiter = q.delimiter.as_deref() == Some("/");
|
let storage = state.storage.clone();
|
||||||
let prefix = q.prefix.clone().unwrap_or_default();
|
let bucket = bucket_name.clone();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if tx
|
||||||
|
.send(Ok(bytes::Bytes::from(meta_line.into_bytes())))
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if tx
|
||||||
|
.send(Ok(bytes::Bytes::from(count_line.into_bytes())))
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if use_delimiter {
|
if use_delimiter {
|
||||||
|
let mut token: Option<String> = None;
|
||||||
|
loop {
|
||||||
let params = myfsio_common::types::ShallowListParams {
|
let params = myfsio_common::types::ShallowListParams {
|
||||||
prefix: prefix.clone(),
|
prefix: prefix.clone(),
|
||||||
delimiter: "/".to_string(),
|
delimiter: "/".to_string(),
|
||||||
max_keys: 5000,
|
max_keys: UI_OBJECT_BROWSER_MAX_KEYS,
|
||||||
continuation_token: None,
|
continuation_token: token.clone(),
|
||||||
};
|
};
|
||||||
match state
|
match storage.list_objects_shallow(&bucket, ¶ms).await {
|
||||||
.storage
|
|
||||||
.list_objects_shallow(&bucket_name, ¶ms)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(res) => {
|
Ok(res) => {
|
||||||
for p in &res.common_prefixes {
|
for p in &res.common_prefixes {
|
||||||
lines.push(json!({ "type": "folder", "prefix": p }).to_string());
|
let line = json!({ "type": "folder", "prefix": p }).to_string() + "\n";
|
||||||
|
if tx
|
||||||
|
.send(Ok(bytes::Bytes::from(line.into_bytes())))
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for o in &res.objects {
|
for o in &res.objects {
|
||||||
lines.push(
|
let line = json!({
|
||||||
json!({
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"key": o.key,
|
"key": o.key,
|
||||||
"size": o.size,
|
"size": o.size,
|
||||||
@@ -1048,11 +1111,28 @@ pub async fn stream_bucket_objects(
|
|||||||
"etag": o.etag.clone().unwrap_or_default(),
|
"etag": o.etag.clone().unwrap_or_default(),
|
||||||
"storage_class": o.storage_class.clone().unwrap_or_else(|| "STANDARD".to_string()),
|
"storage_class": o.storage_class.clone().unwrap_or_else(|| "STANDARD".to_string()),
|
||||||
})
|
})
|
||||||
.to_string(),
|
.to_string()
|
||||||
);
|
+ "\n";
|
||||||
|
if tx
|
||||||
|
.send(Ok(bytes::Bytes::from(line.into_bytes())))
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !res.is_truncated || res.next_continuation_token.is_none() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
token = res.next_continuation_token;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
let line =
|
||||||
|
json!({ "type": "error", "error": e.to_string() }).to_string() + "\n";
|
||||||
|
let _ = tx.send(Ok(bytes::Bytes::from(line.into_bytes()))).await;
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => lines.push(json!({ "type": "error", "error": e.to_string() }).to_string()),
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let mut token: Option<String> = None;
|
let mut token: Option<String> = None;
|
||||||
@@ -1067,11 +1147,10 @@ pub async fn stream_bucket_objects(
|
|||||||
},
|
},
|
||||||
start_after: None,
|
start_after: None,
|
||||||
};
|
};
|
||||||
match state.storage.list_objects(&bucket_name, ¶ms).await {
|
match storage.list_objects(&bucket, ¶ms).await {
|
||||||
Ok(res) => {
|
Ok(res) => {
|
||||||
for o in &res.objects {
|
for o in &res.objects {
|
||||||
lines.push(
|
let line = json!({
|
||||||
json!({
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"key": o.key,
|
"key": o.key,
|
||||||
"size": o.size,
|
"size": o.size,
|
||||||
@@ -1081,8 +1160,15 @@ pub async fn stream_bucket_objects(
|
|||||||
"etag": o.etag.clone().unwrap_or_default(),
|
"etag": o.etag.clone().unwrap_or_default(),
|
||||||
"storage_class": o.storage_class.clone().unwrap_or_else(|| "STANDARD".to_string()),
|
"storage_class": o.storage_class.clone().unwrap_or_else(|| "STANDARD".to_string()),
|
||||||
})
|
})
|
||||||
.to_string(),
|
.to_string()
|
||||||
);
|
+ "\n";
|
||||||
|
if tx
|
||||||
|
.send(Ok(bytes::Bytes::from(line.into_bytes())))
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if !res.is_truncated || res.next_continuation_token.is_none() {
|
if !res.is_truncated || res.next_continuation_token.is_none() {
|
||||||
break;
|
break;
|
||||||
@@ -1090,21 +1176,32 @@ pub async fn stream_bucket_objects(
|
|||||||
token = res.next_continuation_token;
|
token = res.next_continuation_token;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
lines.push(json!({ "type": "error", "error": e.to_string() }).to_string());
|
let line =
|
||||||
break;
|
json!({ "type": "error", "error": e.to_string() }).to_string() + "\n";
|
||||||
|
let _ = tx.send(Ok(bytes::Bytes::from(line.into_bytes()))).await;
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lines.push(json!({ "type": "done" }).to_string());
|
let done_line = json!({ "type": "done" }).to_string() + "\n";
|
||||||
|
let _ = tx
|
||||||
|
.send(Ok(bytes::Bytes::from(done_line.into_bytes())))
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let stream = tokio_stream::wrappers::ReceiverStream::new(rx);
|
||||||
|
let body = Body::from_stream(stream);
|
||||||
|
|
||||||
let body = lines.join("\n") + "\n";
|
|
||||||
let mut headers = HeaderMap::new();
|
let mut headers = HeaderMap::new();
|
||||||
headers.insert(
|
headers.insert(
|
||||||
header::CONTENT_TYPE,
|
header::CONTENT_TYPE,
|
||||||
"application/x-ndjson; charset=utf-8".parse().unwrap(),
|
"application/x-ndjson; charset=utf-8".parse().unwrap(),
|
||||||
);
|
);
|
||||||
|
headers.insert(header::CACHE_CONTROL, "no-cache".parse().unwrap());
|
||||||
|
headers.insert("x-accel-buffering", "no".parse().unwrap());
|
||||||
|
|
||||||
(StatusCode::OK, headers, body).into_response()
|
(StatusCode::OK, headers, body).into_response()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1122,7 +1219,7 @@ pub async fn list_bucket_folders(
|
|||||||
let params = myfsio_common::types::ShallowListParams {
|
let params = myfsio_common::types::ShallowListParams {
|
||||||
prefix: prefix.clone(),
|
prefix: prefix.clone(),
|
||||||
delimiter: "/".to_string(),
|
delimiter: "/".to_string(),
|
||||||
max_keys: 5000,
|
max_keys: UI_OBJECT_BROWSER_MAX_KEYS,
|
||||||
continuation_token: None,
|
continuation_token: None,
|
||||||
};
|
};
|
||||||
match state
|
match state
|
||||||
@@ -1153,13 +1250,6 @@ pub async fn list_copy_targets(
|
|||||||
Json(json!({ "buckets": buckets })).into_response()
|
Json(json!({ "buckets": buckets })).into_response()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn json_not_implemented() -> Response {
|
|
||||||
json_error(
|
|
||||||
StatusCode::NOT_IMPLEMENTED,
|
|
||||||
"This feature is not implemented yet",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
pub struct ConnectionTestPayload {
|
pub struct ConnectionTestPayload {
|
||||||
pub endpoint_url: String,
|
pub endpoint_url: String,
|
||||||
@@ -2001,12 +2091,15 @@ pub async fn complete_multipart_upload(
|
|||||||
.complete_multipart(&bucket_name, &upload_id, &parts)
|
.complete_multipart(&bucket_name, &upload_id, &parts)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(meta) => json_ok(json!({
|
Ok(meta) => {
|
||||||
|
super::trigger_replication(&state, &bucket_name, &meta.key, "write");
|
||||||
|
json_ok(json!({
|
||||||
"key": meta.key,
|
"key": meta.key,
|
||||||
"size": meta.size,
|
"size": meta.size,
|
||||||
"etag": meta.etag.unwrap_or_default(),
|
"etag": meta.etag.unwrap_or_default(),
|
||||||
"last_modified": meta.last_modified.to_rfc3339(),
|
"last_modified": meta.last_modified.to_rfc3339(),
|
||||||
})),
|
}))
|
||||||
|
}
|
||||||
Err(err) => storage_json_error(err),
|
Err(err) => storage_json_error(err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2411,8 +2504,11 @@ async fn update_object_tags(state: &AppState, bucket: &str, key: &str, body: Bod
|
|||||||
Err(response) => return response,
|
Err(response) => return response,
|
||||||
};
|
};
|
||||||
|
|
||||||
if payload.tags.len() > 50 {
|
if payload.tags.len() > state.config.object_tag_limit {
|
||||||
return json_error(StatusCode::BAD_REQUEST, "Maximum 50 tags allowed");
|
return json_error(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
format!("Maximum {} tags allowed", state.config.object_tag_limit),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let tags = payload
|
let tags = payload
|
||||||
@@ -2469,13 +2565,16 @@ async fn copy_object_json(state: &AppState, bucket: &str, key: &str, body: Body)
|
|||||||
.copy_object(bucket, key, dest_bucket, dest_key)
|
.copy_object(bucket, key, dest_bucket, dest_key)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(_) => Json(json!({
|
Ok(_) => {
|
||||||
|
super::trigger_replication(state, dest_bucket, dest_key, "write");
|
||||||
|
Json(json!({
|
||||||
"status": "ok",
|
"status": "ok",
|
||||||
"message": format!("Copied to {}/{}", dest_bucket, dest_key),
|
"message": format!("Copied to {}/{}", dest_bucket, dest_key),
|
||||||
"dest_bucket": dest_bucket,
|
"dest_bucket": dest_bucket,
|
||||||
"dest_key": dest_key,
|
"dest_key": dest_key,
|
||||||
}))
|
}))
|
||||||
.into_response(),
|
.into_response()
|
||||||
|
}
|
||||||
Err(err) => storage_json_error(err),
|
Err(err) => storage_json_error(err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2502,13 +2601,17 @@ async fn move_object_json(state: &AppState, bucket: &str, key: &str, body: Body)
|
|||||||
|
|
||||||
match state.storage.copy_object(bucket, key, dest_bucket, dest_key).await {
|
match state.storage.copy_object(bucket, key, dest_bucket, dest_key).await {
|
||||||
Ok(_) => match state.storage.delete_object(bucket, key).await {
|
Ok(_) => match state.storage.delete_object(bucket, key).await {
|
||||||
Ok(()) => Json(json!({
|
Ok(_) => {
|
||||||
|
super::trigger_replication(state, dest_bucket, dest_key, "write");
|
||||||
|
super::trigger_replication(state, bucket, key, "delete");
|
||||||
|
Json(json!({
|
||||||
"status": "ok",
|
"status": "ok",
|
||||||
"message": format!("Moved to {}/{}", dest_bucket, dest_key),
|
"message": format!("Moved to {}/{}", dest_bucket, dest_key),
|
||||||
"dest_bucket": dest_bucket,
|
"dest_bucket": dest_bucket,
|
||||||
"dest_key": dest_key,
|
"dest_key": dest_key,
|
||||||
}))
|
}))
|
||||||
.into_response(),
|
.into_response()
|
||||||
|
}
|
||||||
Err(_) => Json(json!({
|
Err(_) => Json(json!({
|
||||||
"status": "partial",
|
"status": "partial",
|
||||||
"message": format!("Copied to {}/{} but failed to delete source", dest_bucket, dest_key),
|
"message": format!("Copied to {}/{} but failed to delete source", dest_bucket, dest_key),
|
||||||
@@ -2561,6 +2664,7 @@ async fn delete_object_json(
|
|||||||
if let Err(err) = state.storage.delete_object(bucket, key).await {
|
if let Err(err) = state.storage.delete_object(bucket, key).await {
|
||||||
return storage_json_error(err);
|
return storage_json_error(err);
|
||||||
}
|
}
|
||||||
|
super::trigger_replication(state, bucket, key, "delete");
|
||||||
if let Err(err) = purge_object_versions_for_key(state, bucket, key).await {
|
if let Err(err) = purge_object_versions_for_key(state, bucket, key).await {
|
||||||
return json_error(StatusCode::BAD_REQUEST, err);
|
return json_error(StatusCode::BAD_REQUEST, err);
|
||||||
}
|
}
|
||||||
@@ -2572,11 +2676,14 @@ async fn delete_object_json(
|
|||||||
}
|
}
|
||||||
|
|
||||||
match state.storage.delete_object(bucket, key).await {
|
match state.storage.delete_object(bucket, key).await {
|
||||||
Ok(()) => Json(json!({
|
Ok(_) => {
|
||||||
|
super::trigger_replication(state, bucket, key, "delete");
|
||||||
|
Json(json!({
|
||||||
"status": "ok",
|
"status": "ok",
|
||||||
"message": format!("Deleted '{}'", key),
|
"message": format!("Deleted '{}'", key),
|
||||||
}))
|
}))
|
||||||
.into_response(),
|
.into_response()
|
||||||
|
}
|
||||||
Err(err) => storage_json_error(err),
|
Err(err) => storage_json_error(err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2637,6 +2744,7 @@ async fn restore_object_version_json(
|
|||||||
{
|
{
|
||||||
return storage_json_error(err);
|
return storage_json_error(err);
|
||||||
}
|
}
|
||||||
|
super::trigger_replication(state, bucket, key, "write");
|
||||||
|
|
||||||
let mut message = format!("Restored '{}'", key);
|
let mut message = format!("Restored '{}'", key);
|
||||||
if live_exists && versioning_enabled {
|
if live_exists && versioning_enabled {
|
||||||
@@ -2686,6 +2794,14 @@ fn parse_object_post_action(rest: &str) -> Option<(String, ObjectPostAction)> {
|
|||||||
ObjectPostAction::Restore(version_id.to_string()),
|
ObjectPostAction::Restore(version_id.to_string()),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
if let Some(key_with_version) = rest.strip_suffix("/restore") {
|
||||||
|
if let Some((key, version_id)) = key_with_version.rsplit_once("/versions/") {
|
||||||
|
return Some((
|
||||||
|
key.to_string(),
|
||||||
|
ObjectPostAction::Restore(version_id.to_string()),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
for (suffix, action) in [
|
for (suffix, action) in [
|
||||||
("/delete", ObjectPostAction::Delete),
|
("/delete", ObjectPostAction::Delete),
|
||||||
("/presign", ObjectPostAction::Presign),
|
("/presign", ObjectPostAction::Presign),
|
||||||
@@ -2824,13 +2940,23 @@ pub async fn bulk_delete_objects(
|
|||||||
"No objects found under the selected folders",
|
"No objects found under the selected folders",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
if keys.len() > state.config.bulk_delete_max_keys {
|
||||||
|
return json_error(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
format!(
|
||||||
|
"Bulk delete supports at most {} keys",
|
||||||
|
state.config.bulk_delete_max_keys
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
let mut deleted = Vec::new();
|
let mut deleted = Vec::new();
|
||||||
let mut errors = Vec::new();
|
let mut errors = Vec::new();
|
||||||
|
|
||||||
for key in keys {
|
for key in keys {
|
||||||
match state.storage.delete_object(&bucket_name, &key).await {
|
match state.storage.delete_object(&bucket_name, &key).await {
|
||||||
Ok(()) => {
|
Ok(_) => {
|
||||||
|
super::trigger_replication(&state, &bucket_name, &key, "delete");
|
||||||
if payload.purge_versions {
|
if payload.purge_versions {
|
||||||
if let Err(err) =
|
if let Err(err) =
|
||||||
purge_object_versions_for_key(&state, &bucket_name, &key).await
|
purge_object_versions_for_key(&state, &bucket_name, &key).await
|
||||||
@@ -3045,6 +3171,7 @@ pub async fn archived_post_dispatch(
|
|||||||
match purge_object_versions_for_key(&state, &bucket_name, key).await {
|
match purge_object_versions_for_key(&state, &bucket_name, key).await {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
let _ = state.storage.delete_object(&bucket_name, key).await;
|
let _ = state.storage.delete_object(&bucket_name, key).await;
|
||||||
|
super::trigger_replication(&state, &bucket_name, key, "delete");
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"status": "ok",
|
"status": "ok",
|
||||||
"message": format!("Removed archived versions for '{}'", key),
|
"message": format!("Removed archived versions for '{}'", key),
|
||||||
@@ -3163,20 +3290,36 @@ fn apply_history_limit(mut value: Value, limit: Option<usize>) -> Value {
|
|||||||
value
|
value
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn bucket_stub_json(Extension(_session): Extension<SessionHandle>) -> Response {
|
pub async fn lifecycle_history(
|
||||||
Json(json!({"status": "not_implemented", "items": []})).into_response()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn lifecycle_history_stub(
|
|
||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Extension(_session): Extension<SessionHandle>,
|
Extension(_session): Extension<SessionHandle>,
|
||||||
Path(_bucket_name): Path<String>,
|
Path(bucket_name): Path<String>,
|
||||||
|
Query(params): Query<HashMap<String, String>>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
Json(json!({
|
let limit = params
|
||||||
"enabled": state.config.lifecycle_enabled,
|
.get("limit")
|
||||||
|
.and_then(|value| value.parse::<usize>().ok())
|
||||||
|
.unwrap_or(50);
|
||||||
|
let offset = params
|
||||||
|
.get("offset")
|
||||||
|
.and_then(|value| value.parse::<usize>().ok())
|
||||||
|
.unwrap_or(0);
|
||||||
|
if !state.config.lifecycle_enabled {
|
||||||
|
return Json(json!({
|
||||||
"executions": [],
|
"executions": [],
|
||||||
"total": 0,
|
"total": 0,
|
||||||
|
"limit": limit,
|
||||||
|
"offset": offset,
|
||||||
|
"enabled": false,
|
||||||
}))
|
}))
|
||||||
|
.into_response();
|
||||||
|
}
|
||||||
|
Json(crate::services::lifecycle::read_history(
|
||||||
|
&state.config.storage_root,
|
||||||
|
&bucket_name,
|
||||||
|
limit,
|
||||||
|
offset,
|
||||||
|
))
|
||||||
.into_response()
|
.into_response()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3258,14 +3401,32 @@ pub async fn retry_replication_failure(
|
|||||||
Path(bucket_name): Path<String>,
|
Path(bucket_name): Path<String>,
|
||||||
Query(q): Query<ReplicationObjectKeyQuery>,
|
Query(q): Query<ReplicationObjectKeyQuery>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
let object_key = q.object_key.trim();
|
retry_replication_failure_key(&state, &bucket_name, q.object_key.trim()).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn retry_replication_failure_path(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
Extension(_session): Extension<SessionHandle>,
|
||||||
|
Path((bucket_name, rest)): Path<(String, String)>,
|
||||||
|
) -> Response {
|
||||||
|
let Some(object_key) = rest.strip_suffix("/retry") else {
|
||||||
|
return json_error(StatusCode::NOT_FOUND, "Unknown replication failure action");
|
||||||
|
};
|
||||||
|
retry_replication_failure_key(&state, &bucket_name, object_key.trim()).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn retry_replication_failure_key(
|
||||||
|
state: &AppState,
|
||||||
|
bucket_name: &str,
|
||||||
|
object_key: &str,
|
||||||
|
) -> Response {
|
||||||
if object_key.is_empty() {
|
if object_key.is_empty() {
|
||||||
return json_error(StatusCode::BAD_REQUEST, "object_key is required");
|
return json_error(StatusCode::BAD_REQUEST, "object_key is required");
|
||||||
}
|
}
|
||||||
|
|
||||||
if state
|
if state
|
||||||
.replication
|
.replication
|
||||||
.retry_failed(&bucket_name, object_key)
|
.retry_failed(bucket_name, object_key)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
json_ok(json!({
|
json_ok(json!({
|
||||||
@@ -3296,12 +3457,27 @@ pub async fn dismiss_replication_failure(
|
|||||||
Path(bucket_name): Path<String>,
|
Path(bucket_name): Path<String>,
|
||||||
Query(q): Query<ReplicationObjectKeyQuery>,
|
Query(q): Query<ReplicationObjectKeyQuery>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
let object_key = q.object_key.trim();
|
dismiss_replication_failure_key(&state, &bucket_name, q.object_key.trim())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn dismiss_replication_failure_path(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
Extension(_session): Extension<SessionHandle>,
|
||||||
|
Path((bucket_name, object_key)): Path<(String, String)>,
|
||||||
|
) -> Response {
|
||||||
|
dismiss_replication_failure_key(&state, &bucket_name, object_key.trim())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dismiss_replication_failure_key(
|
||||||
|
state: &AppState,
|
||||||
|
bucket_name: &str,
|
||||||
|
object_key: &str,
|
||||||
|
) -> Response {
|
||||||
if object_key.is_empty() {
|
if object_key.is_empty() {
|
||||||
return json_error(StatusCode::BAD_REQUEST, "object_key is required");
|
return json_error(StatusCode::BAD_REQUEST, "object_key is required");
|
||||||
}
|
}
|
||||||
|
|
||||||
if state.replication.dismiss_failure(&bucket_name, object_key) {
|
if state.replication.dismiss_failure(bucket_name, object_key) {
|
||||||
json_ok(json!({
|
json_ok(json!({
|
||||||
"status": "dismissed",
|
"status": "dismissed",
|
||||||
"object_key": object_key,
|
"object_key": object_key,
|
||||||
@@ -1,8 +1,10 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use axum::body::Body;
|
||||||
use axum::extract::{Extension, Form, Path, Query, State};
|
use axum::extract::{Extension, Form, Path, Query, State};
|
||||||
use axum::http::{header, HeaderMap, StatusCode};
|
use axum::http::{header, HeaderMap, StatusCode};
|
||||||
use axum::response::{IntoResponse, Redirect, Response};
|
use axum::response::{IntoResponse, Redirect, Response};
|
||||||
|
use http_body_util::BodyExt;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use tera::Context;
|
use tera::Context;
|
||||||
|
|
||||||
@@ -203,6 +205,57 @@ fn wants_json(headers: &HeaderMap) -> bool {
|
|||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn parse_form_any(
|
||||||
|
headers: &HeaderMap,
|
||||||
|
body: Body,
|
||||||
|
) -> Result<HashMap<String, String>, String> {
|
||||||
|
let content_type = headers
|
||||||
|
.get(header::CONTENT_TYPE)
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
.unwrap_or("")
|
||||||
|
.to_string();
|
||||||
|
let is_multipart = content_type
|
||||||
|
.to_ascii_lowercase()
|
||||||
|
.starts_with("multipart/form-data");
|
||||||
|
|
||||||
|
let bytes = body
|
||||||
|
.collect()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to read request body: {}", e))?
|
||||||
|
.to_bytes();
|
||||||
|
|
||||||
|
if is_multipart {
|
||||||
|
let boundary = multer::parse_boundary(&content_type)
|
||||||
|
.map_err(|_| "Missing multipart boundary".to_string())?;
|
||||||
|
let stream = futures::stream::once(async move { Ok::<_, std::io::Error>(bytes) });
|
||||||
|
let mut multipart = multer::Multipart::new(stream, boundary);
|
||||||
|
let mut out = HashMap::new();
|
||||||
|
while let Some(field) = multipart
|
||||||
|
.next_field()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Malformed multipart body: {}", e))?
|
||||||
|
{
|
||||||
|
let name = match field.name() {
|
||||||
|
Some(name) => name.to_string(),
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
if field.file_name().is_some() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let value = field
|
||||||
|
.text()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Invalid multipart field '{}': {}", name, e))?;
|
||||||
|
out.insert(name, value);
|
||||||
|
}
|
||||||
|
Ok(out)
|
||||||
|
} else {
|
||||||
|
let parsed: Vec<(String, String)> = serde_urlencoded::from_bytes(&bytes)
|
||||||
|
.map_err(|e| format!("Invalid form body: {}", e))?;
|
||||||
|
Ok(parsed.into_iter().collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn bucket_tab_redirect(bucket_name: &str, tab: &str) -> Response {
|
fn bucket_tab_redirect(bucket_name: &str, tab: &str) -> Response {
|
||||||
Redirect::to(&format!("/ui/buckets/{}?tab={}", bucket_name, tab)).into_response()
|
Redirect::to(&format!("/ui/buckets/{}?tab={}", bucket_name, tab)).into_response()
|
||||||
}
|
}
|
||||||
@@ -231,10 +284,7 @@ fn default_public_policy(bucket_name: &str) -> String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn parse_api_base(state: &AppState) -> (String, String) {
|
fn parse_api_base(state: &AppState) -> (String, String) {
|
||||||
let api_base = std::env::var("API_BASE_URL")
|
let api_base = state.config.api_base_url.trim_end_matches('/').to_string();
|
||||||
.unwrap_or_else(|_| format!("http://{}", state.config.bind_addr))
|
|
||||||
.trim_end_matches('/')
|
|
||||||
.to_string();
|
|
||||||
let api_host = api_base
|
let api_host = api_base
|
||||||
.split("://")
|
.split("://")
|
||||||
.nth(1)
|
.nth(1)
|
||||||
@@ -257,11 +307,23 @@ fn config_encryption_to_ui(value: Option<&Value>) -> Value {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn config_website_to_ui(value: Option<&Value>) -> Value {
|
fn config_website_to_ui(value: Option<&Value>) -> Value {
|
||||||
match value {
|
let parsed = match value {
|
||||||
Some(Value::Object(map)) => Value::Object(map.clone()),
|
Some(Value::Object(map)) => Value::Object(map.clone()),
|
||||||
Some(Value::String(s)) => serde_json::from_str(s).unwrap_or(Value::Null),
|
Some(Value::String(s)) => serde_json::from_str(s).unwrap_or(Value::Null),
|
||||||
_ => Value::Null,
|
_ => Value::Null,
|
||||||
}
|
};
|
||||||
|
|
||||||
|
let Some(map) = parsed.as_object() else {
|
||||||
|
return Value::Null;
|
||||||
|
};
|
||||||
|
|
||||||
|
json!({
|
||||||
|
"index_document": map
|
||||||
|
.get("index_document")
|
||||||
|
.and_then(Value::as_str)
|
||||||
|
.unwrap_or("index.html"),
|
||||||
|
"error_document": map.get("error_document").and_then(Value::as_str),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bucket_access_descriptor(
|
fn bucket_access_descriptor(
|
||||||
@@ -1161,16 +1223,13 @@ pub async fn sites_dashboard(
|
|||||||
ctx.insert("connections", &conns);
|
ctx.insert("connections", &conns);
|
||||||
ctx.insert(
|
ctx.insert(
|
||||||
"config_site_id",
|
"config_site_id",
|
||||||
&std::env::var("SITE_ID").unwrap_or_default(),
|
&state.config.site_id.clone().unwrap_or_default(),
|
||||||
);
|
);
|
||||||
ctx.insert(
|
ctx.insert(
|
||||||
"config_site_endpoint",
|
"config_site_endpoint",
|
||||||
&std::env::var("SITE_ENDPOINT").unwrap_or_default(),
|
&state.config.site_endpoint.clone().unwrap_or_default(),
|
||||||
);
|
|
||||||
ctx.insert(
|
|
||||||
"config_site_region",
|
|
||||||
&std::env::var("SITE_REGION").unwrap_or_else(|_| state.config.region.clone()),
|
|
||||||
);
|
);
|
||||||
|
ctx.insert("config_site_region", &state.config.site_region);
|
||||||
ctx.insert("topology", &json!({"sites": [], "connections": []}));
|
ctx.insert("topology", &json!({"sites": [], "connections": []}));
|
||||||
render(&state, "sites.html", &ctx)
|
render(&state, "sites.html", &ctx)
|
||||||
}
|
}
|
||||||
@@ -2107,9 +2166,26 @@ pub async fn create_bucket(
|
|||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Extension(session): Extension<SessionHandle>,
|
Extension(session): Extension<SessionHandle>,
|
||||||
headers: HeaderMap,
|
headers: HeaderMap,
|
||||||
axum::extract::Form(form): axum::extract::Form<CreateBucketForm>,
|
body: Body,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
let wants_json = wants_json(&headers);
|
let wants_json = wants_json(&headers);
|
||||||
|
let form = match parse_form_any(&headers, body).await {
|
||||||
|
Ok(fields) => CreateBucketForm {
|
||||||
|
bucket_name: fields.get("bucket_name").cloned().unwrap_or_default(),
|
||||||
|
csrf_token: fields.get("csrf_token").cloned().unwrap_or_default(),
|
||||||
|
},
|
||||||
|
Err(message) => {
|
||||||
|
if wants_json {
|
||||||
|
return (
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
axum::Json(json!({ "error": message })),
|
||||||
|
)
|
||||||
|
.into_response();
|
||||||
|
}
|
||||||
|
session.write(|s| s.push_flash("danger", message));
|
||||||
|
return Redirect::to("/ui/buckets").into_response();
|
||||||
|
}
|
||||||
|
};
|
||||||
let bucket_name = form.bucket_name.trim().to_string();
|
let bucket_name = form.bucket_name.trim().to_string();
|
||||||
|
|
||||||
if bucket_name.is_empty() {
|
if bucket_name.is_empty() {
|
||||||
@@ -3003,15 +3079,3 @@ pub async fn update_bucket_website(
|
|||||||
.into_response(),
|
.into_response(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stub_post(Extension(session): Extension<SessionHandle>) -> Response {
|
|
||||||
session.write(|s| s.push_flash("info", "This action is not yet implemented in the Rust UI."));
|
|
||||||
Redirect::to("/ui/buckets").into_response()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(serde::Deserialize)]
|
|
||||||
pub struct QueryArgs(#[serde(default)] pub HashMap<String, String>);
|
|
||||||
|
|
||||||
pub async fn json_stub(Query(_q): Query<QueryArgs>) -> Response {
|
|
||||||
axum::Json(json!({"status": "not_implemented", "items": []})).into_response()
|
|
||||||
}
|
|
||||||
@@ -9,7 +9,7 @@ pub mod templates;
|
|||||||
|
|
||||||
use axum::Router;
|
use axum::Router;
|
||||||
|
|
||||||
pub const SERVER_HEADER: &str = "MyFSIO";
|
pub const SERVER_HEADER: &str = concat!("MyFSIO-Rust/", env!("CARGO_PKG_VERSION"));
|
||||||
|
|
||||||
pub fn create_ui_router(state: state::AppState) -> Router {
|
pub fn create_ui_router(state: state::AppState) -> Router {
|
||||||
use axum::routing::{delete, get, post, put};
|
use axum::routing::{delete, get, post, put};
|
||||||
@@ -21,7 +21,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
.route("/", get(ui::root_redirect))
|
.route("/", get(ui::root_redirect))
|
||||||
.route("/ui", get(ui::root_redirect))
|
.route("/ui", get(ui::root_redirect))
|
||||||
.route("/ui/", get(ui::root_redirect))
|
.route("/ui/", get(ui::root_redirect))
|
||||||
.route("/ui/buckets", get(ui_pages::buckets_overview))
|
.route(
|
||||||
|
"/ui/buckets",
|
||||||
|
get(ui_pages::buckets_overview).post(ui_pages::create_bucket),
|
||||||
|
)
|
||||||
.route("/ui/buckets/create", post(ui_pages::create_bucket))
|
.route("/ui/buckets/create", post(ui_pages::create_bucket))
|
||||||
.route("/ui/buckets/{bucket_name}", get(ui_pages::bucket_detail))
|
.route("/ui/buckets/{bucket_name}", get(ui_pages::bucket_detail))
|
||||||
.route(
|
.route(
|
||||||
@@ -64,6 +67,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/part",
|
"/ui/buckets/{bucket_name}/multipart/{upload_id}/part",
|
||||||
put(ui_api::upload_multipart_part),
|
put(ui_api::upload_multipart_part),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/ui/buckets/{bucket_name}/multipart/{upload_id}/parts",
|
||||||
|
put(ui_api::upload_multipart_part),
|
||||||
|
)
|
||||||
.route(
|
.route(
|
||||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/complete",
|
"/ui/buckets/{bucket_name}/multipart/{upload_id}/complete",
|
||||||
post(ui_api::complete_multipart_upload),
|
post(ui_api::complete_multipart_upload),
|
||||||
@@ -72,6 +79,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/abort",
|
"/ui/buckets/{bucket_name}/multipart/{upload_id}/abort",
|
||||||
delete(ui_api::abort_multipart_upload),
|
delete(ui_api::abort_multipart_upload),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/ui/buckets/{bucket_name}/multipart/{upload_id}",
|
||||||
|
delete(ui_api::abort_multipart_upload),
|
||||||
|
)
|
||||||
.route(
|
.route(
|
||||||
"/ui/buckets/{bucket_name}/objects",
|
"/ui/buckets/{bucket_name}/objects",
|
||||||
get(ui_api::list_bucket_objects),
|
get(ui_api::list_bucket_objects),
|
||||||
@@ -88,6 +99,18 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
"/ui/buckets/{bucket_name}/copy-targets",
|
"/ui/buckets/{bucket_name}/copy-targets",
|
||||||
get(ui_api::list_copy_targets),
|
get(ui_api::list_copy_targets),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/ui/buckets/{bucket_name}/list-for-copy",
|
||||||
|
get(ui_api::list_copy_targets),
|
||||||
|
)
|
||||||
|
.route(
|
||||||
|
"/ui/buckets/{bucket_name}/objects/bulk-delete",
|
||||||
|
post(ui_api::bulk_delete_objects),
|
||||||
|
)
|
||||||
|
.route(
|
||||||
|
"/ui/buckets/{bucket_name}/objects/bulk-download",
|
||||||
|
post(ui_api::bulk_download_objects),
|
||||||
|
)
|
||||||
.route(
|
.route(
|
||||||
"/ui/buckets/{bucket_name}/objects/{*rest}",
|
"/ui/buckets/{bucket_name}/objects/{*rest}",
|
||||||
get(ui_api::object_get_dispatch).post(ui_api::object_post_dispatch),
|
get(ui_api::object_get_dispatch).post(ui_api::object_post_dispatch),
|
||||||
@@ -106,7 +129,7 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
)
|
)
|
||||||
.route(
|
.route(
|
||||||
"/ui/buckets/{bucket_name}/lifecycle/history",
|
"/ui/buckets/{bucket_name}/lifecycle/history",
|
||||||
get(ui_api::lifecycle_history_stub),
|
get(ui_api::lifecycle_history),
|
||||||
)
|
)
|
||||||
.route(
|
.route(
|
||||||
"/ui/buckets/{bucket_name}/replication/status",
|
"/ui/buckets/{bucket_name}/replication/status",
|
||||||
@@ -132,6 +155,11 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
"/ui/buckets/{bucket_name}/replication/failures/clear",
|
"/ui/buckets/{bucket_name}/replication/failures/clear",
|
||||||
delete(ui_api::clear_replication_failures),
|
delete(ui_api::clear_replication_failures),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/ui/buckets/{bucket_name}/replication/failures/{*rest}",
|
||||||
|
post(ui_api::retry_replication_failure_path)
|
||||||
|
.delete(ui_api::dismiss_replication_failure_path),
|
||||||
|
)
|
||||||
.route(
|
.route(
|
||||||
"/ui/buckets/{bucket_name}/bulk-delete",
|
"/ui/buckets/{bucket_name}/bulk-delete",
|
||||||
post(ui_api::bulk_delete_objects),
|
post(ui_api::bulk_delete_objects),
|
||||||
@@ -155,6 +183,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
"/ui/iam/users/{user_id}/delete",
|
"/ui/iam/users/{user_id}/delete",
|
||||||
post(ui_pages::delete_iam_user),
|
post(ui_pages::delete_iam_user),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/ui/iam/users/{user_id}/update",
|
||||||
|
post(ui_pages::update_iam_user),
|
||||||
|
)
|
||||||
.route(
|
.route(
|
||||||
"/ui/iam/users/{user_id}/policies",
|
"/ui/iam/users/{user_id}/policies",
|
||||||
post(ui_pages::update_iam_policies),
|
post(ui_pages::update_iam_policies),
|
||||||
@@ -167,12 +199,20 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
"/ui/iam/users/{user_id}/rotate-secret",
|
"/ui/iam/users/{user_id}/rotate-secret",
|
||||||
post(ui_pages::rotate_iam_secret),
|
post(ui_pages::rotate_iam_secret),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/ui/iam/users/{user_id}/rotate",
|
||||||
|
post(ui_pages::rotate_iam_secret),
|
||||||
|
)
|
||||||
.route("/ui/connections/create", post(ui_pages::create_connection))
|
.route("/ui/connections/create", post(ui_pages::create_connection))
|
||||||
.route("/ui/connections/test", post(ui_api::test_connection))
|
.route("/ui/connections/test", post(ui_api::test_connection))
|
||||||
.route(
|
.route(
|
||||||
"/ui/connections/{connection_id}",
|
"/ui/connections/{connection_id}",
|
||||||
post(ui_pages::update_connection),
|
post(ui_pages::update_connection),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/ui/connections/{connection_id}/update",
|
||||||
|
post(ui_pages::update_connection),
|
||||||
|
)
|
||||||
.route(
|
.route(
|
||||||
"/ui/connections/{connection_id}/delete",
|
"/ui/connections/{connection_id}/delete",
|
||||||
post(ui_pages::delete_connection),
|
post(ui_pages::delete_connection),
|
||||||
@@ -201,7 +241,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
"/ui/sites/peers/{site_id}/bidirectional-status",
|
"/ui/sites/peers/{site_id}/bidirectional-status",
|
||||||
get(ui_api::peer_bidirectional_status),
|
get(ui_api::peer_bidirectional_status),
|
||||||
)
|
)
|
||||||
.route("/ui/connections", get(ui_pages::connections_dashboard))
|
.route(
|
||||||
|
"/ui/connections",
|
||||||
|
get(ui_pages::connections_dashboard).post(ui_pages::create_connection),
|
||||||
|
)
|
||||||
.route("/ui/metrics", get(ui_pages::metrics_dashboard))
|
.route("/ui/metrics", get(ui_pages::metrics_dashboard))
|
||||||
.route(
|
.route(
|
||||||
"/ui/metrics/settings",
|
"/ui/metrics/settings",
|
||||||
@@ -239,6 +282,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
"/ui/website-domains/{domain}",
|
"/ui/website-domains/{domain}",
|
||||||
post(ui_pages::update_website_domain),
|
post(ui_pages::update_website_domain),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/ui/website-domains/{domain}/update",
|
||||||
|
post(ui_pages::update_website_domain),
|
||||||
|
)
|
||||||
.route(
|
.route(
|
||||||
"/ui/website-domains/{domain}/delete",
|
"/ui/website-domains/{domain}/delete",
|
||||||
post(ui_pages::delete_website_domain),
|
post(ui_pages::delete_website_domain),
|
||||||
@@ -257,8 +304,7 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
|
|
||||||
let public = Router::new()
|
let public = Router::new()
|
||||||
.route("/login", get(ui::login_page).post(ui::login_submit))
|
.route("/login", get(ui::login_page).post(ui::login_submit))
|
||||||
.route("/logout", post(ui::logout).get(ui::logout))
|
.route("/logout", post(ui::logout).get(ui::logout));
|
||||||
.route("/csrf-error", get(ui::csrf_error_page));
|
|
||||||
|
|
||||||
let session_state = middleware::SessionLayerState {
|
let session_state = middleware::SessionLayerState {
|
||||||
store: state.sessions.clone(),
|
store: state.sessions.clone(),
|
||||||
@@ -270,7 +316,10 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
protected
|
protected
|
||||||
.merge(public)
|
.merge(public)
|
||||||
.fallback(ui::not_found_page)
|
.fallback(ui::not_found_page)
|
||||||
.layer(axum::middleware::from_fn(middleware::csrf_layer))
|
.layer(axum::middleware::from_fn_with_state(
|
||||||
|
state.clone(),
|
||||||
|
middleware::csrf_layer,
|
||||||
|
))
|
||||||
.layer(axum::middleware::from_fn_with_state(
|
.layer(axum::middleware::from_fn_with_state(
|
||||||
session_state,
|
session_state,
|
||||||
middleware::session_layer,
|
middleware::session_layer,
|
||||||
@@ -286,7 +335,20 @@ pub fn create_ui_router(state: state::AppState) -> Router {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_router(state: state::AppState) -> Router {
|
pub fn create_router(state: state::AppState) -> Router {
|
||||||
let mut router = Router::new()
|
let default_rate_limit = middleware::RateLimitLayerState::with_per_op(
|
||||||
|
state.config.ratelimit_default,
|
||||||
|
state.config.ratelimit_list_buckets,
|
||||||
|
state.config.ratelimit_bucket_ops,
|
||||||
|
state.config.ratelimit_object_ops,
|
||||||
|
state.config.ratelimit_head_ops,
|
||||||
|
state.config.num_trusted_proxies,
|
||||||
|
);
|
||||||
|
let admin_rate_limit = middleware::RateLimitLayerState::new(
|
||||||
|
state.config.ratelimit_admin,
|
||||||
|
state.config.num_trusted_proxies,
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut api_router = Router::new()
|
||||||
.route("/myfsio/health", axum::routing::get(handlers::health_check))
|
.route("/myfsio/health", axum::routing::get(handlers::health_check))
|
||||||
.route("/", axum::routing::get(handlers::list_buckets))
|
.route("/", axum::routing::get(handlers::list_buckets))
|
||||||
.route(
|
.route(
|
||||||
@@ -315,7 +377,7 @@ pub fn create_router(state: state::AppState) -> Router {
|
|||||||
);
|
);
|
||||||
|
|
||||||
if state.config.kms_enabled {
|
if state.config.kms_enabled {
|
||||||
router = router
|
api_router = api_router
|
||||||
.route(
|
.route(
|
||||||
"/kms/keys",
|
"/kms/keys",
|
||||||
axum::routing::get(handlers::kms::list_keys).post(handlers::kms::create_key),
|
axum::routing::get(handlers::kms::list_keys).post(handlers::kms::create_key),
|
||||||
@@ -368,7 +430,17 @@ pub fn create_router(state: state::AppState) -> Router {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
router = router
|
api_router = api_router
|
||||||
|
.layer(axum::middleware::from_fn_with_state(
|
||||||
|
state.clone(),
|
||||||
|
middleware::auth_layer,
|
||||||
|
))
|
||||||
|
.layer(axum::middleware::from_fn_with_state(
|
||||||
|
default_rate_limit,
|
||||||
|
middleware::rate_limit_layer,
|
||||||
|
));
|
||||||
|
|
||||||
|
let admin_router = Router::new()
|
||||||
.route(
|
.route(
|
||||||
"/admin/site",
|
"/admin/site",
|
||||||
axum::routing::get(handlers::admin::get_local_site)
|
axum::routing::get(handlers::admin::get_local_site)
|
||||||
@@ -445,10 +517,18 @@ pub fn create_router(state: state::AppState) -> Router {
|
|||||||
"/admin/iam/users/{identifier}/access-keys",
|
"/admin/iam/users/{identifier}/access-keys",
|
||||||
axum::routing::post(handlers::admin::iam_create_access_key),
|
axum::routing::post(handlers::admin::iam_create_access_key),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/admin/iam/users/{identifier}/keys",
|
||||||
|
axum::routing::post(handlers::admin::iam_create_access_key),
|
||||||
|
)
|
||||||
.route(
|
.route(
|
||||||
"/admin/iam/users/{identifier}/access-keys/{access_key}",
|
"/admin/iam/users/{identifier}/access-keys/{access_key}",
|
||||||
axum::routing::delete(handlers::admin::iam_delete_access_key),
|
axum::routing::delete(handlers::admin::iam_delete_access_key),
|
||||||
)
|
)
|
||||||
|
.route(
|
||||||
|
"/admin/iam/users/{identifier}/keys/{access_key}",
|
||||||
|
axum::routing::delete(handlers::admin::iam_delete_access_key),
|
||||||
|
)
|
||||||
.route(
|
.route(
|
||||||
"/admin/iam/users/{identifier}/disable",
|
"/admin/iam/users/{identifier}/disable",
|
||||||
axum::routing::post(handlers::admin::iam_disable_user),
|
axum::routing::post(handlers::admin::iam_disable_user),
|
||||||
@@ -491,14 +571,87 @@ pub fn create_router(state: state::AppState) -> Router {
|
|||||||
.route(
|
.route(
|
||||||
"/admin/integrity/history",
|
"/admin/integrity/history",
|
||||||
axum::routing::get(handlers::admin::integrity_history),
|
axum::routing::get(handlers::admin::integrity_history),
|
||||||
);
|
)
|
||||||
|
|
||||||
router
|
|
||||||
.layer(axum::middleware::from_fn_with_state(
|
.layer(axum::middleware::from_fn_with_state(
|
||||||
state.clone(),
|
state.clone(),
|
||||||
middleware::auth_layer,
|
middleware::auth_layer,
|
||||||
))
|
))
|
||||||
|
.layer(axum::middleware::from_fn_with_state(
|
||||||
|
admin_rate_limit,
|
||||||
|
middleware::rate_limit_layer,
|
||||||
|
));
|
||||||
|
|
||||||
|
let request_body_timeout =
|
||||||
|
std::time::Duration::from_secs(state.config.request_body_timeout_secs);
|
||||||
|
|
||||||
|
api_router
|
||||||
|
.merge(admin_router)
|
||||||
.layer(axum::middleware::from_fn(middleware::server_header))
|
.layer(axum::middleware::from_fn(middleware::server_header))
|
||||||
|
.layer(cors_layer(&state.config))
|
||||||
.layer(tower_http::compression::CompressionLayer::new())
|
.layer(tower_http::compression::CompressionLayer::new())
|
||||||
|
.layer(tower_http::timeout::RequestBodyTimeoutLayer::new(
|
||||||
|
request_body_timeout,
|
||||||
|
))
|
||||||
.with_state(state)
|
.with_state(state)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn cors_layer(config: &config::ServerConfig) -> tower_http::cors::CorsLayer {
|
||||||
|
use axum::http::{HeaderName, HeaderValue, Method};
|
||||||
|
use tower_http::cors::{Any, CorsLayer};
|
||||||
|
|
||||||
|
let mut layer = CorsLayer::new();
|
||||||
|
|
||||||
|
if config.cors_origins.iter().any(|origin| origin == "*") {
|
||||||
|
layer = layer.allow_origin(Any);
|
||||||
|
} else {
|
||||||
|
let origins = config
|
||||||
|
.cors_origins
|
||||||
|
.iter()
|
||||||
|
.filter_map(|origin| HeaderValue::from_str(origin).ok())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if !origins.is_empty() {
|
||||||
|
layer = layer.allow_origin(origins);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let methods = config
|
||||||
|
.cors_methods
|
||||||
|
.iter()
|
||||||
|
.filter_map(|method| method.parse::<Method>().ok())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if !methods.is_empty() {
|
||||||
|
layer = layer.allow_methods(methods);
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.cors_allow_headers.iter().any(|header| header == "*") {
|
||||||
|
layer = layer.allow_headers(Any);
|
||||||
|
} else {
|
||||||
|
let headers = config
|
||||||
|
.cors_allow_headers
|
||||||
|
.iter()
|
||||||
|
.filter_map(|header| header.parse::<HeaderName>().ok())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if !headers.is_empty() {
|
||||||
|
layer = layer.allow_headers(headers);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if config
|
||||||
|
.cors_expose_headers
|
||||||
|
.iter()
|
||||||
|
.any(|header| header == "*")
|
||||||
|
{
|
||||||
|
layer = layer.expose_headers(Any);
|
||||||
|
} else {
|
||||||
|
let headers = config
|
||||||
|
.cors_expose_headers
|
||||||
|
.iter()
|
||||||
|
.filter_map(|header| header.parse::<HeaderName>().ok())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if !headers.is_empty() {
|
||||||
|
layer = layer.expose_headers(headers);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
layer
|
||||||
|
}
|
||||||
@@ -28,10 +28,19 @@ enum Command {
|
|||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
load_env_files();
|
load_env_files();
|
||||||
tracing_subscriber::fmt::init();
|
init_tracing();
|
||||||
|
|
||||||
let cli = Cli::parse();
|
let cli = Cli::parse();
|
||||||
let config = ServerConfig::from_env();
|
let config = ServerConfig::from_env();
|
||||||
|
if !config
|
||||||
|
.ratelimit_storage_uri
|
||||||
|
.eq_ignore_ascii_case("memory://")
|
||||||
|
{
|
||||||
|
tracing::warn!(
|
||||||
|
"RATE_LIMIT_STORAGE_URI={} is not supported yet; using in-memory rate limits",
|
||||||
|
config.ratelimit_storage_uri
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
if cli.reset_cred {
|
if cli.reset_cred {
|
||||||
reset_admin_credentials(&config);
|
reset_admin_credentials(&config);
|
||||||
@@ -113,7 +122,11 @@ async fn main() {
|
|||||||
let lifecycle =
|
let lifecycle =
|
||||||
std::sync::Arc::new(myfsio_server::services::lifecycle::LifecycleService::new(
|
std::sync::Arc::new(myfsio_server::services::lifecycle::LifecycleService::new(
|
||||||
state.storage.clone(),
|
state.storage.clone(),
|
||||||
myfsio_server::services::lifecycle::LifecycleConfig::default(),
|
config.storage_root.clone(),
|
||||||
|
myfsio_server::services::lifecycle::LifecycleConfig {
|
||||||
|
interval_seconds: 3600,
|
||||||
|
max_history_per_bucket: config.lifecycle_max_history_per_bucket,
|
||||||
|
},
|
||||||
));
|
));
|
||||||
bg_handles.push(lifecycle.start_background());
|
bg_handles.push(lifecycle.start_background());
|
||||||
tracing::info!("Lifecycle manager background service started");
|
tracing::info!("Lifecycle manager background service started");
|
||||||
@@ -176,8 +189,16 @@ async fn main() {
|
|||||||
|
|
||||||
let shutdown = shutdown_signal_shared();
|
let shutdown = shutdown_signal_shared();
|
||||||
let api_shutdown = shutdown.clone();
|
let api_shutdown = shutdown.clone();
|
||||||
|
let api_listener = axum::serve::ListenerExt::tap_io(api_listener, |stream| {
|
||||||
|
if let Err(err) = stream.set_nodelay(true) {
|
||||||
|
tracing::trace!("failed to set TCP_NODELAY on api socket: {}", err);
|
||||||
|
}
|
||||||
|
});
|
||||||
let api_task = tokio::spawn(async move {
|
let api_task = tokio::spawn(async move {
|
||||||
axum::serve(api_listener, api_app)
|
axum::serve(
|
||||||
|
api_listener,
|
||||||
|
api_app.into_make_service_with_connect_info::<std::net::SocketAddr>(),
|
||||||
|
)
|
||||||
.with_graceful_shutdown(async move {
|
.with_graceful_shutdown(async move {
|
||||||
api_shutdown.notified().await;
|
api_shutdown.notified().await;
|
||||||
})
|
})
|
||||||
@@ -186,6 +207,11 @@ async fn main() {
|
|||||||
|
|
||||||
let ui_task = if let (Some(listener), Some(app)) = (ui_listener, ui_app) {
|
let ui_task = if let (Some(listener), Some(app)) = (ui_listener, ui_app) {
|
||||||
let ui_shutdown = shutdown.clone();
|
let ui_shutdown = shutdown.clone();
|
||||||
|
let listener = axum::serve::ListenerExt::tap_io(listener, |stream| {
|
||||||
|
if let Err(err) = stream.set_nodelay(true) {
|
||||||
|
tracing::trace!("failed to set TCP_NODELAY on ui socket: {}", err);
|
||||||
|
}
|
||||||
|
});
|
||||||
Some(tokio::spawn(async move {
|
Some(tokio::spawn(async move {
|
||||||
axum::serve(listener, app)
|
axum::serve(listener, app)
|
||||||
.with_graceful_shutdown(async move {
|
.with_graceful_shutdown(async move {
|
||||||
@@ -227,15 +253,43 @@ fn print_config_summary(config: &ServerConfig) {
|
|||||||
println!("IAM config: {}", config.iam_config_path.display());
|
println!("IAM config: {}", config.iam_config_path.display());
|
||||||
println!("Region: {}", config.region);
|
println!("Region: {}", config.region);
|
||||||
println!("Encryption enabled: {}", config.encryption_enabled);
|
println!("Encryption enabled: {}", config.encryption_enabled);
|
||||||
|
println!(
|
||||||
|
"Encryption chunk size: {} bytes",
|
||||||
|
config.encryption_chunk_size_bytes
|
||||||
|
);
|
||||||
println!("KMS enabled: {}", config.kms_enabled);
|
println!("KMS enabled: {}", config.kms_enabled);
|
||||||
|
println!(
|
||||||
|
"KMS data key bounds: {}-{} bytes",
|
||||||
|
config.kms_generate_data_key_min_bytes, config.kms_generate_data_key_max_bytes
|
||||||
|
);
|
||||||
println!("GC enabled: {}", config.gc_enabled);
|
println!("GC enabled: {}", config.gc_enabled);
|
||||||
|
println!(
|
||||||
|
"GC interval: {} hours, dry run: {}",
|
||||||
|
config.gc_interval_hours, config.gc_dry_run
|
||||||
|
);
|
||||||
println!("Integrity enabled: {}", config.integrity_enabled);
|
println!("Integrity enabled: {}", config.integrity_enabled);
|
||||||
println!("Lifecycle enabled: {}", config.lifecycle_enabled);
|
println!("Lifecycle enabled: {}", config.lifecycle_enabled);
|
||||||
|
println!(
|
||||||
|
"Lifecycle history limit: {}",
|
||||||
|
config.lifecycle_max_history_per_bucket
|
||||||
|
);
|
||||||
println!(
|
println!(
|
||||||
"Website hosting enabled: {}",
|
"Website hosting enabled: {}",
|
||||||
config.website_hosting_enabled
|
config.website_hosting_enabled
|
||||||
);
|
);
|
||||||
println!("Site sync enabled: {}", config.site_sync_enabled);
|
println!("Site sync enabled: {}", config.site_sync_enabled);
|
||||||
|
println!("API base URL: {}", config.api_base_url);
|
||||||
|
println!(
|
||||||
|
"Object key max: {} bytes, tag limit: {}",
|
||||||
|
config.object_key_max_length_bytes, config.object_tag_limit
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"Rate limits: default {} per {}s, admin {} per {}s",
|
||||||
|
config.ratelimit_default.max_requests,
|
||||||
|
config.ratelimit_default.window_seconds,
|
||||||
|
config.ratelimit_admin.max_requests,
|
||||||
|
config.ratelimit_admin.window_seconds
|
||||||
|
);
|
||||||
println!(
|
println!(
|
||||||
"Metrics history enabled: {}",
|
"Metrics history enabled: {}",
|
||||||
config.metrics_history_enabled
|
config.metrics_history_enabled
|
||||||
@@ -255,6 +309,32 @@ fn validate_config(config: &ServerConfig) -> Vec<String> {
|
|||||||
if config.presigned_url_min_expiry > config.presigned_url_max_expiry {
|
if config.presigned_url_min_expiry > config.presigned_url_max_expiry {
|
||||||
issues.push("CRITICAL: PRESIGNED_URL_MIN_EXPIRY_SECONDS cannot exceed PRESIGNED_URL_MAX_EXPIRY_SECONDS.".to_string());
|
issues.push("CRITICAL: PRESIGNED_URL_MIN_EXPIRY_SECONDS cannot exceed PRESIGNED_URL_MAX_EXPIRY_SECONDS.".to_string());
|
||||||
}
|
}
|
||||||
|
if config.encryption_chunk_size_bytes == 0 {
|
||||||
|
issues.push("CRITICAL: ENCRYPTION_CHUNK_SIZE_BYTES must be greater than zero.".to_string());
|
||||||
|
}
|
||||||
|
if config.kms_generate_data_key_min_bytes == 0 {
|
||||||
|
issues.push(
|
||||||
|
"CRITICAL: KMS_GENERATE_DATA_KEY_MIN_BYTES must be greater than zero.".to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if config.kms_generate_data_key_min_bytes > config.kms_generate_data_key_max_bytes {
|
||||||
|
issues.push("CRITICAL: KMS_GENERATE_DATA_KEY_MIN_BYTES cannot exceed KMS_GENERATE_DATA_KEY_MAX_BYTES.".to_string());
|
||||||
|
}
|
||||||
|
if config.gc_interval_hours <= 0.0 {
|
||||||
|
issues.push("CRITICAL: GC_INTERVAL_HOURS must be greater than zero.".to_string());
|
||||||
|
}
|
||||||
|
if config.bucket_config_cache_ttl_seconds < 0.0 {
|
||||||
|
issues.push("CRITICAL: BUCKET_CONFIG_CACHE_TTL_SECONDS cannot be negative.".to_string());
|
||||||
|
}
|
||||||
|
if !config
|
||||||
|
.ratelimit_storage_uri
|
||||||
|
.eq_ignore_ascii_case("memory://")
|
||||||
|
{
|
||||||
|
issues.push(format!(
|
||||||
|
"WARNING: RATE_LIMIT_STORAGE_URI={} is not supported yet; using in-memory limits.",
|
||||||
|
config.ratelimit_storage_uri
|
||||||
|
));
|
||||||
|
}
|
||||||
if let Err(err) = std::fs::create_dir_all(&config.storage_root) {
|
if let Err(err) = std::fs::create_dir_all(&config.storage_root) {
|
||||||
issues.push(format!(
|
issues.push(format!(
|
||||||
"CRITICAL: Cannot create storage root {}: {}",
|
"CRITICAL: Cannot create storage root {}: {}",
|
||||||
@@ -285,6 +365,17 @@ fn validate_config(config: &ServerConfig) -> Vec<String> {
|
|||||||
issues
|
issues
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn init_tracing() {
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
|
let filter = EnvFilter::try_from_env("RUST_LOG")
|
||||||
|
.or_else(|_| {
|
||||||
|
EnvFilter::try_new(std::env::var("LOG_LEVEL").unwrap_or_else(|_| "INFO".to_string()))
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|_| EnvFilter::new("INFO"));
|
||||||
|
tracing_subscriber::fmt().with_env_filter(filter).init();
|
||||||
|
}
|
||||||
|
|
||||||
fn shutdown_signal_shared() -> std::sync::Arc<tokio::sync::Notify> {
|
fn shutdown_signal_shared() -> std::sync::Arc<tokio::sync::Notify> {
|
||||||
std::sync::Arc::new(tokio::sync::Notify::new())
|
std::sync::Arc::new(tokio::sync::Notify::new())
|
||||||
}
|
}
|
||||||
@@ -418,8 +509,49 @@ fn reset_admin_credentials(config: &ServerConfig) {
|
|||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
println!("Backed up existing IAM config to {}", backup.display());
|
println!("Backed up existing IAM config to {}", backup.display());
|
||||||
|
prune_iam_backups(&config.iam_config_path, 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
ensure_iam_bootstrap(config);
|
ensure_iam_bootstrap(config);
|
||||||
println!("Admin credentials reset.");
|
println!("Admin credentials reset.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn prune_iam_backups(iam_path: &std::path::Path, keep: usize) {
|
||||||
|
let parent = match iam_path.parent() {
|
||||||
|
Some(p) => p,
|
||||||
|
None => return,
|
||||||
|
};
|
||||||
|
let stem = match iam_path.file_stem().and_then(|s| s.to_str()) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => return,
|
||||||
|
};
|
||||||
|
let prefix = format!("{}.bak-", stem);
|
||||||
|
|
||||||
|
let entries = match std::fs::read_dir(parent) {
|
||||||
|
Ok(entries) => entries,
|
||||||
|
Err(_) => return,
|
||||||
|
};
|
||||||
|
let mut backups: Vec<(i64, std::path::PathBuf)> = entries
|
||||||
|
.filter_map(|e| e.ok())
|
||||||
|
.filter_map(|e| {
|
||||||
|
let path = e.path();
|
||||||
|
let name = path.file_name()?.to_str()?;
|
||||||
|
let rest = name.strip_prefix(&prefix)?;
|
||||||
|
let ts: i64 = rest.parse().ok()?;
|
||||||
|
Some((ts, path))
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
backups.sort_by(|a, b| b.0.cmp(&a.0));
|
||||||
|
|
||||||
|
for (_, path) in backups.into_iter().skip(keep) {
|
||||||
|
if let Err(err) = std::fs::remove_file(&path) {
|
||||||
|
eprintln!(
|
||||||
|
"Failed to remove old IAM backup {}: {}",
|
||||||
|
path.display(),
|
||||||
|
err
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
println!("Pruned old IAM backup {}", path.display());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
use axum::extract::{Request, State};
|
use axum::extract::{Request, State};
|
||||||
use axum::http::{header, HeaderMap, Method, StatusCode};
|
use axum::http::{header, HeaderMap, Method, StatusCode, Uri};
|
||||||
use axum::middleware::Next;
|
use axum::middleware::Next;
|
||||||
use axum::response::{IntoResponse, Response};
|
use axum::response::{IntoResponse, Response};
|
||||||
|
|
||||||
@@ -12,27 +12,76 @@ use serde_json::Value;
|
|||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use tokio::io::AsyncReadExt;
|
use tokio::io::AsyncReadExt;
|
||||||
|
|
||||||
|
use crate::middleware::sha_body::{is_hex_sha256, Sha256VerifyBody};
|
||||||
|
use crate::services::acl::acl_from_bucket_config;
|
||||||
use crate::state::AppState;
|
use crate::state::AppState;
|
||||||
|
|
||||||
|
fn wrap_body_for_sha256_verification(req: &mut Request) {
|
||||||
|
let declared = match req
|
||||||
|
.headers()
|
||||||
|
.get("x-amz-content-sha256")
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
{
|
||||||
|
Some(v) => v.to_string(),
|
||||||
|
None => return,
|
||||||
|
};
|
||||||
|
if !is_hex_sha256(&declared) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let is_chunked = req
|
||||||
|
.headers()
|
||||||
|
.get("content-encoding")
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
.map(|v| v.to_ascii_lowercase().contains("aws-chunked"))
|
||||||
|
.unwrap_or(false);
|
||||||
|
if is_chunked {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let body = std::mem::replace(req.body_mut(), axum::body::Body::empty());
|
||||||
|
let wrapped = Sha256VerifyBody::new(body, declared);
|
||||||
|
*req.body_mut() = axum::body::Body::new(wrapped);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
struct OriginalCanonicalPath(String);
|
||||||
|
|
||||||
fn website_error_response(
|
fn website_error_response(
|
||||||
status: StatusCode,
|
status: StatusCode,
|
||||||
body: Option<Vec<u8>>,
|
body: Option<Vec<u8>>,
|
||||||
content_type: &str,
|
content_type: &str,
|
||||||
|
include_body: bool,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
|
let (body, content_type) = match body {
|
||||||
|
Some(body) => (body, content_type),
|
||||||
|
None => (
|
||||||
|
default_website_error_body(status).into_bytes(),
|
||||||
|
"text/html; charset=utf-8",
|
||||||
|
),
|
||||||
|
};
|
||||||
let mut headers = HeaderMap::new();
|
let mut headers = HeaderMap::new();
|
||||||
headers.insert(header::CONTENT_TYPE, content_type.parse().unwrap());
|
headers.insert(header::CONTENT_TYPE, content_type.parse().unwrap());
|
||||||
headers.insert(header::ACCEPT_RANGES, "bytes".parse().unwrap());
|
headers.insert(header::ACCEPT_RANGES, "bytes".parse().unwrap());
|
||||||
if let Some(ref body) = body {
|
|
||||||
headers.insert(
|
headers.insert(
|
||||||
header::CONTENT_LENGTH,
|
header::CONTENT_LENGTH,
|
||||||
body.len().to_string().parse().unwrap(),
|
body.len().to_string().parse().unwrap(),
|
||||||
);
|
);
|
||||||
|
if include_body {
|
||||||
(status, headers, body.clone()).into_response()
|
(status, headers, body.clone()).into_response()
|
||||||
} else {
|
} else {
|
||||||
(status, headers).into_response()
|
(status, headers).into_response()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_website_error_body(status: StatusCode) -> String {
|
||||||
|
let code = status.as_u16();
|
||||||
|
if status == StatusCode::NOT_FOUND {
|
||||||
|
"<h1>404 page not found</h1>".to_string()
|
||||||
|
} else {
|
||||||
|
let reason = status.canonical_reason().unwrap_or("Error");
|
||||||
|
format!("{code} {reason}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn parse_range_header(range_header: &str, total_size: u64) -> Option<(u64, u64)> {
|
fn parse_range_header(range_header: &str, total_size: u64) -> Option<(u64, u64)> {
|
||||||
let range_spec = range_header.strip_prefix("bytes=")?;
|
let range_spec = range_header.strip_prefix("bytes=")?;
|
||||||
if let Some(suffix) = range_spec.strip_prefix('-') {
|
if let Some(suffix) = range_spec.strip_prefix('-') {
|
||||||
@@ -191,6 +240,7 @@ async fn maybe_serve_website(
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let request_path = uri_path.trim_start_matches('/').to_string();
|
let request_path = uri_path.trim_start_matches('/').to_string();
|
||||||
|
let include_error_body = method != axum::http::Method::HEAD;
|
||||||
let store = state.website_domains.as_ref()?;
|
let store = state.website_domains.as_ref()?;
|
||||||
let bucket = store.get_bucket(&host)?;
|
let bucket = store.get_bucket(&host)?;
|
||||||
if !matches!(state.storage.bucket_exists(&bucket).await, Ok(true)) {
|
if !matches!(state.storage.bucket_exists(&bucket).await, Ok(true)) {
|
||||||
@@ -198,6 +248,7 @@ async fn maybe_serve_website(
|
|||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
None,
|
None,
|
||||||
"text/plain; charset=utf-8",
|
"text/plain; charset=utf-8",
|
||||||
|
include_error_body,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -207,6 +258,7 @@ async fn maybe_serve_website(
|
|||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
None,
|
None,
|
||||||
"text/plain; charset=utf-8",
|
"text/plain; charset=utf-8",
|
||||||
|
include_error_body,
|
||||||
));
|
));
|
||||||
};
|
};
|
||||||
let Some((index_document, error_document)) = parse_website_config(website_config) else {
|
let Some((index_document, error_document)) = parse_website_config(website_config) else {
|
||||||
@@ -214,6 +266,7 @@ async fn maybe_serve_website(
|
|||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
None,
|
None,
|
||||||
"text/plain; charset=utf-8",
|
"text/plain; charset=utf-8",
|
||||||
|
include_error_body,
|
||||||
));
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -251,6 +304,7 @@ async fn maybe_serve_website(
|
|||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
None,
|
None,
|
||||||
"text/plain; charset=utf-8",
|
"text/plain; charset=utf-8",
|
||||||
|
include_error_body,
|
||||||
))
|
))
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
@@ -258,6 +312,7 @@ async fn maybe_serve_website(
|
|||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
None,
|
None,
|
||||||
"text/plain; charset=utf-8",
|
"text/plain; charset=utf-8",
|
||||||
|
include_error_body,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
} else if !exists {
|
} else if !exists {
|
||||||
@@ -276,6 +331,7 @@ async fn maybe_serve_website(
|
|||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
None,
|
None,
|
||||||
"text/plain; charset=utf-8",
|
"text/plain; charset=utf-8",
|
||||||
|
include_error_body,
|
||||||
))
|
))
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -283,6 +339,7 @@ async fn maybe_serve_website(
|
|||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
None,
|
None,
|
||||||
"text/plain; charset=utf-8",
|
"text/plain; charset=utf-8",
|
||||||
|
include_error_body,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -297,6 +354,67 @@ async fn maybe_serve_website(
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn virtual_host_candidate(host: &str) -> Option<String> {
|
||||||
|
let (candidate, _) = host.split_once('.')?;
|
||||||
|
if candidate.is_empty() || matches!(candidate, "www" | "s3" | "api" | "admin" | "kms") {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
if myfsio_storage::validation::validate_bucket_name(candidate).is_some() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Some(candidate.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn virtual_host_bucket(
|
||||||
|
state: &AppState,
|
||||||
|
host: &str,
|
||||||
|
path: &str,
|
||||||
|
method: &Method,
|
||||||
|
) -> Option<String> {
|
||||||
|
if path.starts_with("/ui")
|
||||||
|
|| path.starts_with("/admin")
|
||||||
|
|| path.starts_with("/kms")
|
||||||
|
|| path.starts_with("/myfsio")
|
||||||
|
{
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let bucket = virtual_host_candidate(host)?;
|
||||||
|
if path == format!("/{}", bucket) || path.starts_with(&format!("/{}/", bucket)) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
match state.storage.bucket_exists(&bucket).await {
|
||||||
|
Ok(true) => Some(bucket),
|
||||||
|
Ok(false) if *method == Method::PUT && path == "/" => Some(bucket),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rewrite_uri_for_virtual_host(uri: &Uri, bucket: &str) -> Option<Uri> {
|
||||||
|
let path = uri.path();
|
||||||
|
let rewritten_path = if path == "/" {
|
||||||
|
format!("/{}/", bucket)
|
||||||
|
} else {
|
||||||
|
format!("/{}{}", bucket, path)
|
||||||
|
};
|
||||||
|
let path_and_query = match uri.query() {
|
||||||
|
Some(query) => format!("{}?{}", rewritten_path, query),
|
||||||
|
None => rewritten_path,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut parts = uri.clone().into_parts();
|
||||||
|
parts.path_and_query = Some(path_and_query.parse().ok()?);
|
||||||
|
Uri::from_parts(parts).ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sigv4_canonical_path(req: &Request) -> &str {
|
||||||
|
req.extensions()
|
||||||
|
.get::<OriginalCanonicalPath>()
|
||||||
|
.map(|path| path.0.as_str())
|
||||||
|
.unwrap_or_else(|| req.uri().path())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn auth_layer(State(state): State<AppState>, mut req: Request, next: Next) -> Response {
|
pub async fn auth_layer(State(state): State<AppState>, mut req: Request, next: Next) -> Response {
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
let uri = req.uri().clone();
|
let uri = req.uri().clone();
|
||||||
@@ -328,12 +446,12 @@ pub async fn auth_layer(State(state): State<AppState>, mut req: Request, next: N
|
|||||||
.and_then(|value| value.to_str().ok())
|
.and_then(|value| value.to_str().ok())
|
||||||
.map(|value| value.to_string());
|
.map(|value| value.to_string());
|
||||||
|
|
||||||
let response = if path == "/myfsio/health" || path == "/health" {
|
let response = if path == "/myfsio/health" {
|
||||||
next.run(req).await
|
next.run(req).await
|
||||||
} else if let Some(response) = maybe_serve_website(
|
} else if let Some(response) = maybe_serve_website(
|
||||||
&state,
|
&state,
|
||||||
method.clone(),
|
method.clone(),
|
||||||
host.unwrap_or_default(),
|
host.clone().unwrap_or_default(),
|
||||||
path.clone(),
|
path.clone(),
|
||||||
range_header,
|
range_header,
|
||||||
)
|
)
|
||||||
@@ -341,38 +459,54 @@ pub async fn auth_layer(State(state): State<AppState>, mut req: Request, next: N
|
|||||||
{
|
{
|
||||||
response
|
response
|
||||||
} else {
|
} else {
|
||||||
|
let auth_path = if let Some(bucket) =
|
||||||
|
virtual_host_bucket(&state, host.as_deref().unwrap_or_default(), &path, &method).await
|
||||||
|
{
|
||||||
|
if let Some(rewritten) = rewrite_uri_for_virtual_host(req.uri(), &bucket) {
|
||||||
|
req.extensions_mut()
|
||||||
|
.insert(OriginalCanonicalPath(path.clone()));
|
||||||
|
*req.uri_mut() = rewritten;
|
||||||
|
req.uri().path().to_string()
|
||||||
|
} else {
|
||||||
|
path.clone()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
path.clone()
|
||||||
|
};
|
||||||
|
|
||||||
match try_auth(&state, &req) {
|
match try_auth(&state, &req) {
|
||||||
AuthResult::NoAuth => match authorize_request(
|
AuthResult::NoAuth => match authorize_request(
|
||||||
&state,
|
&state,
|
||||||
None,
|
None,
|
||||||
&method,
|
&method,
|
||||||
&path,
|
&auth_path,
|
||||||
&query,
|
&query,
|
||||||
copy_source.as_deref(),
|
copy_source.as_deref(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(()) => next.run(req).await,
|
Ok(()) => next.run(req).await,
|
||||||
Err(err) => error_response(err, &path),
|
Err(err) => error_response(err, &auth_path),
|
||||||
},
|
},
|
||||||
AuthResult::Ok(principal) => {
|
AuthResult::Ok(principal) => {
|
||||||
if let Err(err) = authorize_request(
|
if let Err(err) = authorize_request(
|
||||||
&state,
|
&state,
|
||||||
Some(&principal),
|
Some(&principal),
|
||||||
&method,
|
&method,
|
||||||
&path,
|
&auth_path,
|
||||||
&query,
|
&query,
|
||||||
copy_source.as_deref(),
|
copy_source.as_deref(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
error_response(err, &path)
|
error_response(err, &auth_path)
|
||||||
} else {
|
} else {
|
||||||
req.extensions_mut().insert(principal);
|
req.extensions_mut().insert(principal);
|
||||||
|
wrap_body_for_sha256_verification(&mut req);
|
||||||
next.run(req).await
|
next.run(req).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
AuthResult::Denied(err) => error_response(err, &path),
|
AuthResult::Denied(err) => error_response(err, &auth_path),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -488,7 +622,7 @@ async fn authorize_request(
|
|||||||
query: &str,
|
query: &str,
|
||||||
copy_source: Option<&str>,
|
copy_source: Option<&str>,
|
||||||
) -> Result<(), S3Error> {
|
) -> Result<(), S3Error> {
|
||||||
if path == "/myfsio/health" || path == "/health" {
|
if path == "/myfsio/health" {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
if path == "/" {
|
if path == "/" {
|
||||||
@@ -589,6 +723,17 @@ async fn authorize_action(
|
|||||||
if iam_allowed || matches!(policy_decision, PolicyDecision::Allow) {
|
if iam_allowed || matches!(policy_decision, PolicyDecision::Allow) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
if evaluate_bucket_acl(
|
||||||
|
state,
|
||||||
|
bucket,
|
||||||
|
principal.map(|principal| principal.access_key.as_str()),
|
||||||
|
action,
|
||||||
|
principal.is_some(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
if principal.is_some() {
|
if principal.is_some() {
|
||||||
Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"))
|
Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"))
|
||||||
@@ -600,6 +745,27 @@ async fn authorize_action(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn evaluate_bucket_acl(
|
||||||
|
state: &AppState,
|
||||||
|
bucket: &str,
|
||||||
|
principal_id: Option<&str>,
|
||||||
|
action: &str,
|
||||||
|
is_authenticated: bool,
|
||||||
|
) -> bool {
|
||||||
|
let config = match state.storage.get_bucket_config(bucket).await {
|
||||||
|
Ok(config) => config,
|
||||||
|
Err(_) => return false,
|
||||||
|
};
|
||||||
|
let Some(value) = config.acl.as_ref() else {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
let Some(acl) = acl_from_bucket_config(value) else {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
acl.allowed_actions(principal_id, is_authenticated)
|
||||||
|
.contains(action)
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||||
enum PolicyDecision {
|
enum PolicyDecision {
|
||||||
Allow,
|
Allow,
|
||||||
@@ -964,7 +1130,9 @@ fn verify_sigv4_header(state: &AppState, req: &Request, auth_str: &str) -> AuthR
|
|||||||
let parts: Vec<&str> = auth_str
|
let parts: Vec<&str> = auth_str
|
||||||
.strip_prefix("AWS4-HMAC-SHA256 ")
|
.strip_prefix("AWS4-HMAC-SHA256 ")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.split(", ")
|
.split(',')
|
||||||
|
.map(str::trim)
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if parts.len() != 3 {
|
if parts.len() != 3 {
|
||||||
@@ -974,9 +1142,24 @@ fn verify_sigv4_header(state: &AppState, req: &Request, auth_str: &str) -> AuthR
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let credential = parts[0].strip_prefix("Credential=").unwrap_or("");
|
let mut credential: &str = "";
|
||||||
let signed_headers_str = parts[1].strip_prefix("SignedHeaders=").unwrap_or("");
|
let mut signed_headers_str: &str = "";
|
||||||
let provided_signature = parts[2].strip_prefix("Signature=").unwrap_or("");
|
let mut provided_signature: &str = "";
|
||||||
|
for part in &parts {
|
||||||
|
if let Some(v) = part.strip_prefix("Credential=") {
|
||||||
|
credential = v;
|
||||||
|
} else if let Some(v) = part.strip_prefix("SignedHeaders=") {
|
||||||
|
signed_headers_str = v;
|
||||||
|
} else if let Some(v) = part.strip_prefix("Signature=") {
|
||||||
|
provided_signature = v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if credential.is_empty() || signed_headers_str.is_empty() || provided_signature.is_empty() {
|
||||||
|
return AuthResult::Denied(S3Error::new(
|
||||||
|
S3ErrorCode::InvalidArgument,
|
||||||
|
"Malformed Authorization header",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
let cred_parts: Vec<&str> = credential.split('/').collect();
|
let cred_parts: Vec<&str> = credential.split('/').collect();
|
||||||
if cred_parts.len() != 5 {
|
if cred_parts.len() != 5 {
|
||||||
@@ -1019,7 +1202,7 @@ fn verify_sigv4_header(state: &AppState, req: &Request, auth_str: &str) -> AuthR
|
|||||||
};
|
};
|
||||||
|
|
||||||
let method = req.method().as_str();
|
let method = req.method().as_str();
|
||||||
let canonical_uri = req.uri().path();
|
let canonical_uri = sigv4_canonical_path(req);
|
||||||
|
|
||||||
let query_params = parse_query_params(req.uri().query().unwrap_or(""));
|
let query_params = parse_query_params(req.uri().query().unwrap_or(""));
|
||||||
|
|
||||||
@@ -1161,7 +1344,7 @@ fn verify_sigv4_query(state: &AppState, req: &Request) -> AuthResult {
|
|||||||
}
|
}
|
||||||
if elapsed < -(state.config.sigv4_timestamp_tolerance_secs as i64) {
|
if elapsed < -(state.config.sigv4_timestamp_tolerance_secs as i64) {
|
||||||
return AuthResult::Denied(S3Error::new(
|
return AuthResult::Denied(S3Error::new(
|
||||||
S3ErrorCode::AccessDenied,
|
S3ErrorCode::RequestTimeTooSkewed,
|
||||||
"Request is too far in the future",
|
"Request is too far in the future",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
@@ -1175,7 +1358,7 @@ fn verify_sigv4_query(state: &AppState, req: &Request) -> AuthResult {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let method = req.method().as_str();
|
let method = req.method().as_str();
|
||||||
let canonical_uri = req.uri().path();
|
let canonical_uri = sigv4_canonical_path(req);
|
||||||
|
|
||||||
let query_params_no_sig: Vec<(String, String)> = params
|
let query_params_no_sig: Vec<(String, String)> = params
|
||||||
.iter()
|
.iter()
|
||||||
@@ -1231,8 +1414,11 @@ fn check_timestamp_freshness(amz_date: &str, tolerance_secs: u64) -> Option<S3Er
|
|||||||
|
|
||||||
if diff > tolerance_secs {
|
if diff > tolerance_secs {
|
||||||
return Some(S3Error::new(
|
return Some(S3Error::new(
|
||||||
S3ErrorCode::AccessDenied,
|
S3ErrorCode::RequestTimeTooSkewed,
|
||||||
"Request timestamp too old or too far in the future",
|
format!(
|
||||||
|
"The difference between the request time and the server's time is too large ({}s, tolerance {}s)",
|
||||||
|
diff, tolerance_secs
|
||||||
|
),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
@@ -1,7 +1,10 @@
|
|||||||
mod auth;
|
mod auth;
|
||||||
|
pub mod ratelimit;
|
||||||
pub mod session;
|
pub mod session;
|
||||||
|
pub(crate) mod sha_body;
|
||||||
|
|
||||||
pub use auth::auth_layer;
|
pub use auth::auth_layer;
|
||||||
|
pub use ratelimit::{rate_limit_layer, RateLimitLayerState};
|
||||||
pub use session::{csrf_layer, session_layer, SessionHandle, SessionLayerState};
|
pub use session::{csrf_layer, session_layer, SessionHandle, SessionLayerState};
|
||||||
|
|
||||||
use axum::extract::{Request, State};
|
use axum::extract::{Request, State};
|
||||||
313
crates/myfsio-server/src/middleware/ratelimit.rs
Normal file
313
crates/myfsio-server/src/middleware/ratelimit.rs
Normal file
@@ -0,0 +1,313 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use axum::extract::{ConnectInfo, Request, State};
|
||||||
|
use axum::http::{header, Method, StatusCode};
|
||||||
|
use axum::middleware::Next;
|
||||||
|
use axum::response::{IntoResponse, Response};
|
||||||
|
use parking_lot::Mutex;
|
||||||
|
|
||||||
|
use crate::config::RateLimitSetting;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct RateLimitLayerState {
|
||||||
|
default_limiter: Arc<FixedWindowLimiter>,
|
||||||
|
list_buckets_limiter: Option<Arc<FixedWindowLimiter>>,
|
||||||
|
bucket_ops_limiter: Option<Arc<FixedWindowLimiter>>,
|
||||||
|
object_ops_limiter: Option<Arc<FixedWindowLimiter>>,
|
||||||
|
head_ops_limiter: Option<Arc<FixedWindowLimiter>>,
|
||||||
|
num_trusted_proxies: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RateLimitLayerState {
|
||||||
|
pub fn new(setting: RateLimitSetting, num_trusted_proxies: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
default_limiter: Arc::new(FixedWindowLimiter::new(setting)),
|
||||||
|
list_buckets_limiter: None,
|
||||||
|
bucket_ops_limiter: None,
|
||||||
|
object_ops_limiter: None,
|
||||||
|
head_ops_limiter: None,
|
||||||
|
num_trusted_proxies,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_per_op(
|
||||||
|
default: RateLimitSetting,
|
||||||
|
list_buckets: RateLimitSetting,
|
||||||
|
bucket_ops: RateLimitSetting,
|
||||||
|
object_ops: RateLimitSetting,
|
||||||
|
head_ops: RateLimitSetting,
|
||||||
|
num_trusted_proxies: usize,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
default_limiter: Arc::new(FixedWindowLimiter::new(default)),
|
||||||
|
list_buckets_limiter: (list_buckets != default)
|
||||||
|
.then(|| Arc::new(FixedWindowLimiter::new(list_buckets))),
|
||||||
|
bucket_ops_limiter: (bucket_ops != default)
|
||||||
|
.then(|| Arc::new(FixedWindowLimiter::new(bucket_ops))),
|
||||||
|
object_ops_limiter: (object_ops != default)
|
||||||
|
.then(|| Arc::new(FixedWindowLimiter::new(object_ops))),
|
||||||
|
head_ops_limiter: (head_ops != default)
|
||||||
|
.then(|| Arc::new(FixedWindowLimiter::new(head_ops))),
|
||||||
|
num_trusted_proxies,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn select_limiter(&self, req: &Request) -> &Arc<FixedWindowLimiter> {
|
||||||
|
let path = req.uri().path();
|
||||||
|
let method = req.method();
|
||||||
|
if path == "/" && *method == Method::GET {
|
||||||
|
if let Some(ref limiter) = self.list_buckets_limiter {
|
||||||
|
return limiter;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let segments: Vec<&str> = path
|
||||||
|
.trim_start_matches('/')
|
||||||
|
.split('/')
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
.collect();
|
||||||
|
if *method == Method::HEAD {
|
||||||
|
if let Some(ref limiter) = self.head_ops_limiter {
|
||||||
|
return limiter;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if segments.len() == 1 {
|
||||||
|
if let Some(ref limiter) = self.bucket_ops_limiter {
|
||||||
|
return limiter;
|
||||||
|
}
|
||||||
|
} else if segments.len() >= 2 {
|
||||||
|
if let Some(ref limiter) = self.object_ops_limiter {
|
||||||
|
return limiter;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
&self.default_limiter
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct FixedWindowLimiter {
|
||||||
|
setting: RateLimitSetting,
|
||||||
|
state: Mutex<LimiterState>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct LimiterState {
|
||||||
|
entries: HashMap<String, LimitEntry>,
|
||||||
|
last_sweep: Instant,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
struct LimitEntry {
|
||||||
|
window_started: Instant,
|
||||||
|
count: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
const SWEEP_MIN_INTERVAL: Duration = Duration::from_secs(60);
|
||||||
|
const SWEEP_ENTRY_THRESHOLD: usize = 1024;
|
||||||
|
|
||||||
|
impl FixedWindowLimiter {
|
||||||
|
fn new(setting: RateLimitSetting) -> Self {
|
||||||
|
Self {
|
||||||
|
setting,
|
||||||
|
state: Mutex::new(LimiterState {
|
||||||
|
entries: HashMap::new(),
|
||||||
|
last_sweep: Instant::now(),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check(&self, key: &str) -> Result<(), u64> {
|
||||||
|
let now = Instant::now();
|
||||||
|
let window = Duration::from_secs(self.setting.window_seconds.max(1));
|
||||||
|
let mut state = self.state.lock();
|
||||||
|
|
||||||
|
if state.entries.len() >= SWEEP_ENTRY_THRESHOLD
|
||||||
|
&& now.duration_since(state.last_sweep) >= SWEEP_MIN_INTERVAL
|
||||||
|
{
|
||||||
|
state
|
||||||
|
.entries
|
||||||
|
.retain(|_, entry| now.duration_since(entry.window_started) < window);
|
||||||
|
state.last_sweep = now;
|
||||||
|
}
|
||||||
|
|
||||||
|
let entry = state.entries.entry(key.to_string()).or_insert(LimitEntry {
|
||||||
|
window_started: now,
|
||||||
|
count: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
if now.duration_since(entry.window_started) >= window {
|
||||||
|
entry.window_started = now;
|
||||||
|
entry.count = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.count >= self.setting.max_requests {
|
||||||
|
let elapsed = now.duration_since(entry.window_started);
|
||||||
|
let retry_after = window.saturating_sub(elapsed).as_secs().max(1);
|
||||||
|
return Err(retry_after);
|
||||||
|
}
|
||||||
|
|
||||||
|
entry.count += 1;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn rate_limit_layer(
|
||||||
|
State(state): State<RateLimitLayerState>,
|
||||||
|
req: Request,
|
||||||
|
next: Next,
|
||||||
|
) -> Response {
|
||||||
|
let key = rate_limit_key(&req, state.num_trusted_proxies);
|
||||||
|
let limiter = state.select_limiter(&req);
|
||||||
|
match limiter.check(&key) {
|
||||||
|
Ok(()) => next.run(req).await,
|
||||||
|
Err(retry_after) => {
|
||||||
|
let resource = req.uri().path().to_string();
|
||||||
|
too_many_requests(retry_after, &resource)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn too_many_requests(retry_after: u64, resource: &str) -> Response {
|
||||||
|
let request_id = uuid::Uuid::new_v4().simple().to_string();
|
||||||
|
let body = myfsio_xml::response::rate_limit_exceeded_xml(resource, &request_id);
|
||||||
|
let mut response = (
|
||||||
|
StatusCode::SERVICE_UNAVAILABLE,
|
||||||
|
[
|
||||||
|
(header::CONTENT_TYPE, "application/xml".to_string()),
|
||||||
|
(header::RETRY_AFTER, retry_after.to_string()),
|
||||||
|
],
|
||||||
|
body,
|
||||||
|
)
|
||||||
|
.into_response();
|
||||||
|
if let Ok(value) = request_id.parse() {
|
||||||
|
response
|
||||||
|
.headers_mut()
|
||||||
|
.insert("x-amz-request-id", value);
|
||||||
|
}
|
||||||
|
response
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rate_limit_key(req: &Request, num_trusted_proxies: usize) -> String {
|
||||||
|
format!("ip:{}", client_ip(req, num_trusted_proxies))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn client_ip(req: &Request, num_trusted_proxies: usize) -> String {
|
||||||
|
if num_trusted_proxies > 0 {
|
||||||
|
if let Some(value) = req
|
||||||
|
.headers()
|
||||||
|
.get("x-forwarded-for")
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
{
|
||||||
|
let parts = value
|
||||||
|
.split(',')
|
||||||
|
.map(|part| part.trim())
|
||||||
|
.filter(|part| !part.is_empty())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if parts.len() > num_trusted_proxies {
|
||||||
|
let index = parts.len() - num_trusted_proxies - 1;
|
||||||
|
return parts[index].to_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(value) = req.headers().get("x-real-ip").and_then(|v| v.to_str().ok()) {
|
||||||
|
if !value.trim().is_empty() {
|
||||||
|
return value.trim().to_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req.extensions()
|
||||||
|
.get::<ConnectInfo<SocketAddr>>()
|
||||||
|
.map(|ConnectInfo(addr)| addr.ip().to_string())
|
||||||
|
.unwrap_or_else(|| "unknown".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use axum::body::Body;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn honors_trusted_proxy_count_for_forwarded_for() {
|
||||||
|
let req = Request::builder()
|
||||||
|
.header("x-forwarded-for", "198.51.100.1, 10.0.0.1, 10.0.0.2")
|
||||||
|
.body(Body::empty())
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(rate_limit_key(&req, 2), "ip:198.51.100.1");
|
||||||
|
assert_eq!(rate_limit_key(&req, 1), "ip:10.0.0.1");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn falls_back_to_connect_info_when_forwarded_for_has_too_few_hops() {
|
||||||
|
let mut req = Request::builder()
|
||||||
|
.header("x-forwarded-for", "198.51.100.1")
|
||||||
|
.body(Body::empty())
|
||||||
|
.unwrap();
|
||||||
|
req.extensions_mut()
|
||||||
|
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 9], 443))));
|
||||||
|
|
||||||
|
assert_eq!(rate_limit_key(&req, 2), "ip:203.0.113.9");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ignores_forwarded_headers_when_no_proxies_are_trusted() {
|
||||||
|
let mut req = Request::builder()
|
||||||
|
.header("x-forwarded-for", "198.51.100.1")
|
||||||
|
.header("x-real-ip", "198.51.100.2")
|
||||||
|
.body(Body::empty())
|
||||||
|
.unwrap();
|
||||||
|
req.extensions_mut()
|
||||||
|
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 9], 443))));
|
||||||
|
|
||||||
|
assert_eq!(rate_limit_key(&req, 0), "ip:203.0.113.9");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn uses_connect_info_for_direct_clients() {
|
||||||
|
let mut req = Request::builder().body(Body::empty()).unwrap();
|
||||||
|
req.extensions_mut()
|
||||||
|
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 10], 443))));
|
||||||
|
|
||||||
|
assert_eq!(rate_limit_key(&req, 0), "ip:203.0.113.10");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fixed_window_rejects_after_quota() {
|
||||||
|
let limiter = FixedWindowLimiter::new(RateLimitSetting::new(2, 60));
|
||||||
|
assert!(limiter.check("k").is_ok());
|
||||||
|
assert!(limiter.check("k").is_ok());
|
||||||
|
assert!(limiter.check("k").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sweep_removes_expired_entries() {
|
||||||
|
let limiter = FixedWindowLimiter::new(RateLimitSetting::new(10, 1));
|
||||||
|
let far_past = Instant::now() - (SWEEP_MIN_INTERVAL + Duration::from_secs(5));
|
||||||
|
{
|
||||||
|
let mut state = limiter.state.lock();
|
||||||
|
for i in 0..(SWEEP_ENTRY_THRESHOLD + 1024) {
|
||||||
|
state.entries.insert(
|
||||||
|
format!("stale-{}", i),
|
||||||
|
LimitEntry {
|
||||||
|
window_started: far_past,
|
||||||
|
count: 5,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
state.last_sweep = far_past;
|
||||||
|
}
|
||||||
|
let seeded = limiter.state.lock().entries.len();
|
||||||
|
assert_eq!(seeded, SWEEP_ENTRY_THRESHOLD + 1024);
|
||||||
|
|
||||||
|
assert!(limiter.check("fresh").is_ok());
|
||||||
|
|
||||||
|
let remaining = limiter.state.lock().entries.len();
|
||||||
|
assert_eq!(
|
||||||
|
remaining, 1,
|
||||||
|
"expected sweep to leave only the fresh entry, got {}",
|
||||||
|
remaining
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -90,7 +90,11 @@ pub async fn session_layer(
|
|||||||
resp
|
resp
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn csrf_layer(req: Request, next: Next) -> Response {
|
pub async fn csrf_layer(
|
||||||
|
State(state): State<crate::state::AppState>,
|
||||||
|
req: Request,
|
||||||
|
next: Next,
|
||||||
|
) -> Response {
|
||||||
const CSRF_HEADER_ALIAS: &str = "x-csrftoken";
|
const CSRF_HEADER_ALIAS: &str = "x-csrftoken";
|
||||||
|
|
||||||
let method = req.method().clone();
|
let method = req.method().clone();
|
||||||
@@ -169,7 +173,32 @@ pub async fn csrf_layer(req: Request, next: Next) -> Response {
|
|||||||
header_present = header_token.is_some(),
|
header_present = header_token.is_some(),
|
||||||
"CSRF token mismatch"
|
"CSRF token mismatch"
|
||||||
);
|
);
|
||||||
(StatusCode::FORBIDDEN, "Invalid CSRF token").into_response()
|
|
||||||
|
let accept = parts
|
||||||
|
.headers
|
||||||
|
.get(header::ACCEPT)
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
.unwrap_or("");
|
||||||
|
let is_form_submit = content_type.starts_with("application/x-www-form-urlencoded")
|
||||||
|
|| content_type.starts_with("multipart/form-data");
|
||||||
|
let wants_json =
|
||||||
|
accept.contains("application/json") || content_type.starts_with("application/json");
|
||||||
|
|
||||||
|
if is_form_submit && !wants_json {
|
||||||
|
let ctx = crate::handlers::ui::base_context(&handle, None);
|
||||||
|
let mut resp = crate::handlers::ui::render(&state, "csrf_error.html", &ctx);
|
||||||
|
*resp.status_mut() = StatusCode::FORBIDDEN;
|
||||||
|
return resp;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut resp = (
|
||||||
|
StatusCode::FORBIDDEN,
|
||||||
|
[(header::CONTENT_TYPE, "application/json")],
|
||||||
|
r#"{"error":"Invalid CSRF token"}"#,
|
||||||
|
)
|
||||||
|
.into_response();
|
||||||
|
*resp.status_mut() = StatusCode::FORBIDDEN;
|
||||||
|
resp
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extract_multipart_token(content_type: &str, body: &[u8]) -> Option<String> {
|
fn extract_multipart_token(content_type: &str, body: &[u8]) -> Option<String> {
|
||||||
107
crates/myfsio-server/src/middleware/sha_body.rs
Normal file
107
crates/myfsio-server/src/middleware/sha_body.rs
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
use axum::body::Body;
|
||||||
|
use bytes::Bytes;
|
||||||
|
use http_body::{Body as HttpBody, Frame};
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
use std::error::Error;
|
||||||
|
use std::fmt;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Sha256MismatchError {
|
||||||
|
expected: String,
|
||||||
|
computed: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sha256MismatchError {
|
||||||
|
fn message(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"The x-amz-content-sha256 you specified did not match what we received (expected {}, computed {})",
|
||||||
|
self.expected, self.computed
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Sha256MismatchError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"XAmzContentSHA256Mismatch: expected {}, computed {}",
|
||||||
|
self.expected, self.computed
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error for Sha256MismatchError {}
|
||||||
|
|
||||||
|
pub struct Sha256VerifyBody {
|
||||||
|
inner: Body,
|
||||||
|
expected: String,
|
||||||
|
hasher: Option<Sha256>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sha256VerifyBody {
|
||||||
|
pub fn new(inner: Body, expected_hex: String) -> Self {
|
||||||
|
Self {
|
||||||
|
inner,
|
||||||
|
expected: expected_hex.to_ascii_lowercase(),
|
||||||
|
hasher: Some(Sha256::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HttpBody for Sha256VerifyBody {
|
||||||
|
type Data = Bytes;
|
||||||
|
type Error = Box<dyn std::error::Error + Send + Sync>;
|
||||||
|
|
||||||
|
fn poll_frame(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
|
||||||
|
let this = self.as_mut().get_mut();
|
||||||
|
match Pin::new(&mut this.inner).poll_frame(cx) {
|
||||||
|
Poll::Pending => Poll::Pending,
|
||||||
|
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(Box::new(e)))),
|
||||||
|
Poll::Ready(Some(Ok(frame))) => {
|
||||||
|
if let Some(data) = frame.data_ref() {
|
||||||
|
if let Some(h) = this.hasher.as_mut() {
|
||||||
|
h.update(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Poll::Ready(Some(Ok(frame)))
|
||||||
|
}
|
||||||
|
Poll::Ready(None) => {
|
||||||
|
if let Some(hasher) = this.hasher.take() {
|
||||||
|
let computed = hex::encode(hasher.finalize());
|
||||||
|
if computed != this.expected {
|
||||||
|
return Poll::Ready(Some(Err(Box::new(Sha256MismatchError {
|
||||||
|
expected: this.expected.clone(),
|
||||||
|
computed,
|
||||||
|
}))));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Poll::Ready(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_end_stream(&self) -> bool {
|
||||||
|
self.inner.is_end_stream()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size_hint(&self) -> http_body::SizeHint {
|
||||||
|
self.inner.size_hint()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_hex_sha256(s: &str) -> bool {
|
||||||
|
s.len() == 64 && s.bytes().all(|b| b.is_ascii_hexdigit())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sha256_mismatch_message(err: &(dyn Error + 'static)) -> Option<String> {
|
||||||
|
if let Some(mismatch) = err.downcast_ref::<Sha256MismatchError>() {
|
||||||
|
return Some(mismatch.message());
|
||||||
|
}
|
||||||
|
|
||||||
|
err.source().and_then(sha256_mismatch_message)
|
||||||
|
}
|
||||||
276
crates/myfsio-server/src/services/acl.rs
Normal file
276
crates/myfsio-server/src/services/acl.rs
Normal file
@@ -0,0 +1,276 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
|
pub const ACL_METADATA_KEY: &str = "__acl__";
|
||||||
|
pub const GRANTEE_ALL_USERS: &str = "*";
|
||||||
|
pub const GRANTEE_AUTHENTICATED_USERS: &str = "authenticated";
|
||||||
|
|
||||||
|
const ACL_PERMISSION_FULL_CONTROL: &str = "FULL_CONTROL";
|
||||||
|
const ACL_PERMISSION_WRITE: &str = "WRITE";
|
||||||
|
const ACL_PERMISSION_WRITE_ACP: &str = "WRITE_ACP";
|
||||||
|
const ACL_PERMISSION_READ: &str = "READ";
|
||||||
|
const ACL_PERMISSION_READ_ACP: &str = "READ_ACP";
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct AclGrant {
|
||||||
|
pub grantee: String,
|
||||||
|
pub permission: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct Acl {
|
||||||
|
pub owner: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub grants: Vec<AclGrant>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Acl {
|
||||||
|
pub fn allowed_actions(
|
||||||
|
&self,
|
||||||
|
principal_id: Option<&str>,
|
||||||
|
is_authenticated: bool,
|
||||||
|
) -> HashSet<&'static str> {
|
||||||
|
let mut actions = HashSet::new();
|
||||||
|
if let Some(principal_id) = principal_id {
|
||||||
|
if principal_id == self.owner {
|
||||||
|
actions.extend(permission_to_actions(ACL_PERMISSION_FULL_CONTROL));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for grant in &self.grants {
|
||||||
|
if grant.grantee == GRANTEE_ALL_USERS {
|
||||||
|
actions.extend(permission_to_actions(&grant.permission));
|
||||||
|
} else if grant.grantee == GRANTEE_AUTHENTICATED_USERS && is_authenticated {
|
||||||
|
actions.extend(permission_to_actions(&grant.permission));
|
||||||
|
} else if let Some(principal_id) = principal_id {
|
||||||
|
if grant.grantee == principal_id {
|
||||||
|
actions.extend(permission_to_actions(&grant.permission));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
actions
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_canned_acl(canned_acl: &str, owner: &str) -> Acl {
|
||||||
|
let owner_grant = AclGrant {
|
||||||
|
grantee: owner.to_string(),
|
||||||
|
permission: ACL_PERMISSION_FULL_CONTROL.to_string(),
|
||||||
|
};
|
||||||
|
match canned_acl {
|
||||||
|
"public-read" => Acl {
|
||||||
|
owner: owner.to_string(),
|
||||||
|
grants: vec![
|
||||||
|
owner_grant,
|
||||||
|
AclGrant {
|
||||||
|
grantee: GRANTEE_ALL_USERS.to_string(),
|
||||||
|
permission: ACL_PERMISSION_READ.to_string(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
"public-read-write" => Acl {
|
||||||
|
owner: owner.to_string(),
|
||||||
|
grants: vec![
|
||||||
|
owner_grant,
|
||||||
|
AclGrant {
|
||||||
|
grantee: GRANTEE_ALL_USERS.to_string(),
|
||||||
|
permission: ACL_PERMISSION_READ.to_string(),
|
||||||
|
},
|
||||||
|
AclGrant {
|
||||||
|
grantee: GRANTEE_ALL_USERS.to_string(),
|
||||||
|
permission: ACL_PERMISSION_WRITE.to_string(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
"authenticated-read" => Acl {
|
||||||
|
owner: owner.to_string(),
|
||||||
|
grants: vec![
|
||||||
|
owner_grant,
|
||||||
|
AclGrant {
|
||||||
|
grantee: GRANTEE_AUTHENTICATED_USERS.to_string(),
|
||||||
|
permission: ACL_PERMISSION_READ.to_string(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
"bucket-owner-read" | "bucket-owner-full-control" | "private" | _ => Acl {
|
||||||
|
owner: owner.to_string(),
|
||||||
|
grants: vec![owner_grant],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn acl_to_xml(acl: &Acl) -> String {
|
||||||
|
let mut xml = format!(
|
||||||
|
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||||
|
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
|
||||||
|
<Owner><ID>{}</ID><DisplayName>{}</DisplayName></Owner>\
|
||||||
|
<AccessControlList>",
|
||||||
|
xml_escape(&acl.owner),
|
||||||
|
xml_escape(&acl.owner),
|
||||||
|
);
|
||||||
|
for grant in &acl.grants {
|
||||||
|
xml.push_str("<Grant>");
|
||||||
|
match grant.grantee.as_str() {
|
||||||
|
GRANTEE_ALL_USERS => {
|
||||||
|
xml.push_str(
|
||||||
|
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\">\
|
||||||
|
<URI>http://acs.amazonaws.com/groups/global/AllUsers</URI>\
|
||||||
|
</Grantee>",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
GRANTEE_AUTHENTICATED_USERS => {
|
||||||
|
xml.push_str(
|
||||||
|
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\">\
|
||||||
|
<URI>http://acs.amazonaws.com/groups/global/AuthenticatedUsers</URI>\
|
||||||
|
</Grantee>",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
other => {
|
||||||
|
xml.push_str(&format!(
|
||||||
|
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
|
||||||
|
<ID>{}</ID><DisplayName>{}</DisplayName>\
|
||||||
|
</Grantee>",
|
||||||
|
xml_escape(other),
|
||||||
|
xml_escape(other),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
xml.push_str(&format!(
|
||||||
|
"<Permission>{}</Permission></Grant>",
|
||||||
|
xml_escape(&grant.permission)
|
||||||
|
));
|
||||||
|
}
|
||||||
|
xml.push_str("</AccessControlList></AccessControlPolicy>");
|
||||||
|
xml
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn acl_from_bucket_config(value: &Value) -> Option<Acl> {
|
||||||
|
match value {
|
||||||
|
Value::String(raw) => acl_from_xml(raw).or_else(|| serde_json::from_str(raw).ok()),
|
||||||
|
Value::Object(_) => serde_json::from_value(value.clone()).ok(),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn acl_from_object_metadata(metadata: &HashMap<String, String>) -> Option<Acl> {
|
||||||
|
metadata
|
||||||
|
.get(ACL_METADATA_KEY)
|
||||||
|
.and_then(|raw| serde_json::from_str::<Acl>(raw).ok())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn store_object_acl(metadata: &mut HashMap<String, String>, acl: &Acl) {
|
||||||
|
if let Ok(serialized) = serde_json::to_string(acl) {
|
||||||
|
metadata.insert(ACL_METADATA_KEY.to_string(), serialized);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn acl_from_xml(xml: &str) -> Option<Acl> {
|
||||||
|
let doc = roxmltree::Document::parse(xml).ok()?;
|
||||||
|
let owner = doc
|
||||||
|
.descendants()
|
||||||
|
.find(|node| node.is_element() && node.tag_name().name() == "Owner")
|
||||||
|
.and_then(|node| {
|
||||||
|
node.children()
|
||||||
|
.find(|child| child.is_element() && child.tag_name().name() == "ID")
|
||||||
|
.and_then(|child| child.text())
|
||||||
|
})
|
||||||
|
.unwrap_or("myfsio")
|
||||||
|
.trim()
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let mut grants = Vec::new();
|
||||||
|
for grant in doc
|
||||||
|
.descendants()
|
||||||
|
.filter(|node| node.is_element() && node.tag_name().name() == "Grant")
|
||||||
|
{
|
||||||
|
let permission = grant
|
||||||
|
.children()
|
||||||
|
.find(|child| child.is_element() && child.tag_name().name() == "Permission")
|
||||||
|
.and_then(|child| child.text())
|
||||||
|
.unwrap_or_default()
|
||||||
|
.trim()
|
||||||
|
.to_string();
|
||||||
|
if permission.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let grantee_node = grant
|
||||||
|
.children()
|
||||||
|
.find(|child| child.is_element() && child.tag_name().name() == "Grantee");
|
||||||
|
let grantee = grantee_node
|
||||||
|
.and_then(|node| {
|
||||||
|
let uri = node
|
||||||
|
.children()
|
||||||
|
.find(|child| child.is_element() && child.tag_name().name() == "URI")
|
||||||
|
.and_then(|child| child.text())
|
||||||
|
.map(|text| text.trim().to_string());
|
||||||
|
match uri.as_deref() {
|
||||||
|
Some("http://acs.amazonaws.com/groups/global/AllUsers") => {
|
||||||
|
Some(GRANTEE_ALL_USERS.to_string())
|
||||||
|
}
|
||||||
|
Some("http://acs.amazonaws.com/groups/global/AuthenticatedUsers") => {
|
||||||
|
Some(GRANTEE_AUTHENTICATED_USERS.to_string())
|
||||||
|
}
|
||||||
|
_ => node
|
||||||
|
.children()
|
||||||
|
.find(|child| child.is_element() && child.tag_name().name() == "ID")
|
||||||
|
.and_then(|child| child.text())
|
||||||
|
.map(|text| text.trim().to_string()),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap_or_default();
|
||||||
|
if grantee.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
grants.push(AclGrant {
|
||||||
|
grantee,
|
||||||
|
permission,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Acl { owner, grants })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn permission_to_actions(permission: &str) -> &'static [&'static str] {
|
||||||
|
match permission {
|
||||||
|
ACL_PERMISSION_FULL_CONTROL => &["read", "write", "delete", "list", "share"],
|
||||||
|
ACL_PERMISSION_WRITE => &["write", "delete"],
|
||||||
|
ACL_PERMISSION_WRITE_ACP => &["share"],
|
||||||
|
ACL_PERMISSION_READ => &["read", "list"],
|
||||||
|
ACL_PERMISSION_READ_ACP => &["share"],
|
||||||
|
_ => &[],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn xml_escape(s: &str) -> String {
|
||||||
|
s.replace('&', "&")
|
||||||
|
.replace('<', "<")
|
||||||
|
.replace('>', ">")
|
||||||
|
.replace('"', """)
|
||||||
|
.replace('\'', "'")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn canned_acl_grants_public_read() {
|
||||||
|
let acl = create_canned_acl("public-read", "owner");
|
||||||
|
let actions = acl.allowed_actions(None, false);
|
||||||
|
assert!(actions.contains("read"));
|
||||||
|
assert!(actions.contains("list"));
|
||||||
|
assert!(!actions.contains("write"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn xml_round_trip_preserves_grants() {
|
||||||
|
let acl = create_canned_acl("authenticated-read", "owner");
|
||||||
|
let parsed = acl_from_bucket_config(&Value::String(acl_to_xml(&acl))).unwrap();
|
||||||
|
assert_eq!(parsed.owner, "owner");
|
||||||
|
assert_eq!(parsed.grants.len(), 2);
|
||||||
|
assert!(parsed
|
||||||
|
.grants
|
||||||
|
.iter()
|
||||||
|
.any(|grant| grant.grantee == GRANTEE_AUTHENTICATED_USERS));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -24,6 +24,35 @@ impl Default for GcConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn dry_run_reports_but_does_not_delete_temp_files() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let tmp_dir = tmp.path().join(".myfsio.sys").join("tmp");
|
||||||
|
std::fs::create_dir_all(&tmp_dir).unwrap();
|
||||||
|
let file_path = tmp_dir.join("stale.tmp");
|
||||||
|
std::fs::write(&file_path, b"temporary").unwrap();
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(5)).await;
|
||||||
|
|
||||||
|
let service = GcService::new(
|
||||||
|
tmp.path().to_path_buf(),
|
||||||
|
GcConfig {
|
||||||
|
temp_file_max_age_hours: 0.0,
|
||||||
|
dry_run: true,
|
||||||
|
..GcConfig::default()
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let result = service.run_now(false).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(result["temp_files_deleted"], 1);
|
||||||
|
assert!(file_path.exists());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct GcService {
|
pub struct GcService {
|
||||||
storage_root: PathBuf,
|
storage_root: PathBuf,
|
||||||
config: GcConfig,
|
config: GcConfig,
|
||||||
637
crates/myfsio-server/src/services/lifecycle.rs
Normal file
637
crates/myfsio-server/src/services/lifecycle.rs
Normal file
@@ -0,0 +1,637 @@
|
|||||||
|
use chrono::{DateTime, Duration, Utc};
|
||||||
|
use myfsio_storage::fs_backend::FsStorageBackend;
|
||||||
|
use myfsio_storage::traits::StorageEngine;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
|
pub struct LifecycleConfig {
|
||||||
|
pub interval_seconds: u64,
|
||||||
|
pub max_history_per_bucket: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for LifecycleConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
interval_seconds: 3600,
|
||||||
|
max_history_per_bucket: 50,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct LifecycleExecutionRecord {
|
||||||
|
pub timestamp: f64,
|
||||||
|
pub bucket_name: String,
|
||||||
|
pub objects_deleted: u64,
|
||||||
|
pub versions_deleted: u64,
|
||||||
|
pub uploads_aborted: u64,
|
||||||
|
#[serde(default)]
|
||||||
|
pub errors: Vec<String>,
|
||||||
|
pub execution_time_seconds: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
struct BucketLifecycleResult {
|
||||||
|
bucket_name: String,
|
||||||
|
objects_deleted: u64,
|
||||||
|
versions_deleted: u64,
|
||||||
|
uploads_aborted: u64,
|
||||||
|
errors: Vec<String>,
|
||||||
|
execution_time_seconds: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
struct ParsedLifecycleRule {
|
||||||
|
status: String,
|
||||||
|
prefix: String,
|
||||||
|
expiration_days: Option<u64>,
|
||||||
|
expiration_date: Option<DateTime<Utc>>,
|
||||||
|
noncurrent_days: Option<u64>,
|
||||||
|
abort_incomplete_multipart_days: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct LifecycleService {
|
||||||
|
storage: Arc<FsStorageBackend>,
|
||||||
|
storage_root: PathBuf,
|
||||||
|
config: LifecycleConfig,
|
||||||
|
running: Arc<RwLock<bool>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LifecycleService {
|
||||||
|
pub fn new(
|
||||||
|
storage: Arc<FsStorageBackend>,
|
||||||
|
storage_root: impl Into<PathBuf>,
|
||||||
|
config: LifecycleConfig,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
storage,
|
||||||
|
storage_root: storage_root.into(),
|
||||||
|
config,
|
||||||
|
running: Arc::new(RwLock::new(false)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_cycle(&self) -> Result<Value, String> {
|
||||||
|
{
|
||||||
|
let mut running = self.running.write().await;
|
||||||
|
if *running {
|
||||||
|
return Err("Lifecycle already running".to_string());
|
||||||
|
}
|
||||||
|
*running = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
let result = self.evaluate_rules().await;
|
||||||
|
*self.running.write().await = false;
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn evaluate_rules(&self) -> Value {
|
||||||
|
let buckets = match self.storage.list_buckets().await {
|
||||||
|
Ok(buckets) => buckets,
|
||||||
|
Err(err) => return json!({ "error": err.to_string() }),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut bucket_results = Vec::new();
|
||||||
|
let mut total_objects_deleted = 0u64;
|
||||||
|
let mut total_versions_deleted = 0u64;
|
||||||
|
let mut total_uploads_aborted = 0u64;
|
||||||
|
let mut errors = Vec::new();
|
||||||
|
|
||||||
|
for bucket in &buckets {
|
||||||
|
let started_at = std::time::Instant::now();
|
||||||
|
let mut result = BucketLifecycleResult {
|
||||||
|
bucket_name: bucket.name.clone(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let config = match self.storage.get_bucket_config(&bucket.name).await {
|
||||||
|
Ok(config) => config,
|
||||||
|
Err(err) => {
|
||||||
|
result.errors.push(err.to_string());
|
||||||
|
result.execution_time_seconds = started_at.elapsed().as_secs_f64();
|
||||||
|
self.append_history(&result);
|
||||||
|
errors.extend(result.errors.clone());
|
||||||
|
bucket_results.push(result);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let Some(lifecycle) = config.lifecycle.as_ref() else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let rules = parse_lifecycle_rules(lifecycle);
|
||||||
|
if rules.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for rule in &rules {
|
||||||
|
if rule.status != "Enabled" {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if let Some(err) = self
|
||||||
|
.apply_expiration_rule(&bucket.name, rule, &mut result)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
result.errors.push(err);
|
||||||
|
}
|
||||||
|
if let Some(err) = self
|
||||||
|
.apply_noncurrent_expiration_rule(&bucket.name, rule, &mut result)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
result.errors.push(err);
|
||||||
|
}
|
||||||
|
if let Some(err) = self
|
||||||
|
.apply_abort_incomplete_multipart_rule(&bucket.name, rule, &mut result)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
result.errors.push(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result.execution_time_seconds = started_at.elapsed().as_secs_f64();
|
||||||
|
if result.objects_deleted > 0
|
||||||
|
|| result.versions_deleted > 0
|
||||||
|
|| result.uploads_aborted > 0
|
||||||
|
|| !result.errors.is_empty()
|
||||||
|
{
|
||||||
|
total_objects_deleted += result.objects_deleted;
|
||||||
|
total_versions_deleted += result.versions_deleted;
|
||||||
|
total_uploads_aborted += result.uploads_aborted;
|
||||||
|
errors.extend(result.errors.clone());
|
||||||
|
self.append_history(&result);
|
||||||
|
bucket_results.push(result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
json!({
|
||||||
|
"objects_deleted": total_objects_deleted,
|
||||||
|
"versions_deleted": total_versions_deleted,
|
||||||
|
"multipart_aborted": total_uploads_aborted,
|
||||||
|
"buckets_evaluated": buckets.len(),
|
||||||
|
"results": bucket_results.iter().map(result_to_json).collect::<Vec<_>>(),
|
||||||
|
"errors": errors,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn apply_expiration_rule(
|
||||||
|
&self,
|
||||||
|
bucket: &str,
|
||||||
|
rule: &ParsedLifecycleRule,
|
||||||
|
result: &mut BucketLifecycleResult,
|
||||||
|
) -> Option<String> {
|
||||||
|
let cutoff = if let Some(days) = rule.expiration_days {
|
||||||
|
Some(Utc::now() - Duration::days(days as i64))
|
||||||
|
} else {
|
||||||
|
rule.expiration_date
|
||||||
|
};
|
||||||
|
let Some(cutoff) = cutoff else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
let params = myfsio_common::types::ListParams {
|
||||||
|
max_keys: 10_000,
|
||||||
|
prefix: if rule.prefix.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(rule.prefix.clone())
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
match self.storage.list_objects(bucket, ¶ms).await {
|
||||||
|
Ok(objects) => {
|
||||||
|
for object in &objects.objects {
|
||||||
|
if object.last_modified < cutoff {
|
||||||
|
if let Err(err) = self.storage.delete_object(bucket, &object.key).await {
|
||||||
|
result
|
||||||
|
.errors
|
||||||
|
.push(format!("{}:{}: {}", bucket, object.key, err));
|
||||||
|
} else {
|
||||||
|
result.objects_deleted += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
Err(err) => Some(format!("Failed to list objects for {}: {}", bucket, err)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn apply_noncurrent_expiration_rule(
|
||||||
|
&self,
|
||||||
|
bucket: &str,
|
||||||
|
rule: &ParsedLifecycleRule,
|
||||||
|
result: &mut BucketLifecycleResult,
|
||||||
|
) -> Option<String> {
|
||||||
|
let Some(days) = rule.noncurrent_days else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
let cutoff = Utc::now() - Duration::days(days as i64);
|
||||||
|
let versions_root = version_root_for_bucket(&self.storage_root, bucket);
|
||||||
|
if !versions_root.exists() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut stack = VecDeque::from([versions_root]);
|
||||||
|
while let Some(current) = stack.pop_front() {
|
||||||
|
let entries = match std::fs::read_dir(¤t) {
|
||||||
|
Ok(entries) => entries,
|
||||||
|
Err(err) => return Some(err.to_string()),
|
||||||
|
};
|
||||||
|
for entry in entries.flatten() {
|
||||||
|
let file_type = match entry.file_type() {
|
||||||
|
Ok(file_type) => file_type,
|
||||||
|
Err(_) => continue,
|
||||||
|
};
|
||||||
|
if file_type.is_dir() {
|
||||||
|
stack.push_back(entry.path());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if entry.path().extension().and_then(|ext| ext.to_str()) != Some("json") {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let contents = match std::fs::read_to_string(entry.path()) {
|
||||||
|
Ok(contents) => contents,
|
||||||
|
Err(_) => continue,
|
||||||
|
};
|
||||||
|
let Ok(manifest) = serde_json::from_str::<Value>(&contents) else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let key = manifest
|
||||||
|
.get("key")
|
||||||
|
.and_then(|value| value.as_str())
|
||||||
|
.unwrap_or_default()
|
||||||
|
.to_string();
|
||||||
|
if !rule.prefix.is_empty() && !key.starts_with(&rule.prefix) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let archived_at = manifest
|
||||||
|
.get("archived_at")
|
||||||
|
.and_then(|value| value.as_str())
|
||||||
|
.and_then(|value| DateTime::parse_from_rfc3339(value).ok())
|
||||||
|
.map(|value| value.with_timezone(&Utc));
|
||||||
|
if archived_at.is_none() || archived_at.unwrap() >= cutoff {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let version_id = manifest
|
||||||
|
.get("version_id")
|
||||||
|
.and_then(|value| value.as_str())
|
||||||
|
.unwrap_or_default();
|
||||||
|
let data_path = entry.path().with_file_name(format!("{}.bin", version_id));
|
||||||
|
let _ = std::fs::remove_file(&data_path);
|
||||||
|
let _ = std::fs::remove_file(entry.path());
|
||||||
|
result.versions_deleted += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn apply_abort_incomplete_multipart_rule(
|
||||||
|
&self,
|
||||||
|
bucket: &str,
|
||||||
|
rule: &ParsedLifecycleRule,
|
||||||
|
result: &mut BucketLifecycleResult,
|
||||||
|
) -> Option<String> {
|
||||||
|
let Some(days) = rule.abort_incomplete_multipart_days else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
let cutoff = Utc::now() - Duration::days(days as i64);
|
||||||
|
match self.storage.list_multipart_uploads(bucket).await {
|
||||||
|
Ok(uploads) => {
|
||||||
|
for upload in &uploads {
|
||||||
|
if upload.initiated < cutoff {
|
||||||
|
if let Err(err) = self
|
||||||
|
.storage
|
||||||
|
.abort_multipart(bucket, &upload.upload_id)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
result
|
||||||
|
.errors
|
||||||
|
.push(format!("abort {}: {}", upload.upload_id, err));
|
||||||
|
} else {
|
||||||
|
result.uploads_aborted += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
Err(err) => Some(format!(
|
||||||
|
"Failed to list multipart uploads for {}: {}",
|
||||||
|
bucket, err
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn append_history(&self, result: &BucketLifecycleResult) {
|
||||||
|
let path = lifecycle_history_path(&self.storage_root, &result.bucket_name);
|
||||||
|
let mut history = load_history(&path);
|
||||||
|
history.insert(
|
||||||
|
0,
|
||||||
|
LifecycleExecutionRecord {
|
||||||
|
timestamp: Utc::now().timestamp_millis() as f64 / 1000.0,
|
||||||
|
bucket_name: result.bucket_name.clone(),
|
||||||
|
objects_deleted: result.objects_deleted,
|
||||||
|
versions_deleted: result.versions_deleted,
|
||||||
|
uploads_aborted: result.uploads_aborted,
|
||||||
|
errors: result.errors.clone(),
|
||||||
|
execution_time_seconds: result.execution_time_seconds,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
history.truncate(self.config.max_history_per_bucket);
|
||||||
|
let payload = json!({
|
||||||
|
"executions": history,
|
||||||
|
});
|
||||||
|
if let Some(parent) = path.parent() {
|
||||||
|
let _ = std::fs::create_dir_all(parent);
|
||||||
|
}
|
||||||
|
let _ = std::fs::write(
|
||||||
|
&path,
|
||||||
|
serde_json::to_string_pretty(&payload).unwrap_or_else(|_| "{}".to_string()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
||||||
|
let interval = std::time::Duration::from_secs(self.config.interval_seconds);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut timer = tokio::time::interval(interval);
|
||||||
|
timer.tick().await;
|
||||||
|
loop {
|
||||||
|
timer.tick().await;
|
||||||
|
tracing::info!("Lifecycle evaluation starting");
|
||||||
|
match self.run_cycle().await {
|
||||||
|
Ok(result) => tracing::info!("Lifecycle cycle complete: {:?}", result),
|
||||||
|
Err(err) => tracing::warn!("Lifecycle cycle failed: {}", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_history(storage_root: &Path, bucket_name: &str, limit: usize, offset: usize) -> Value {
|
||||||
|
let path = lifecycle_history_path(storage_root, bucket_name);
|
||||||
|
let mut history = load_history(&path);
|
||||||
|
let total = history.len();
|
||||||
|
let executions = history
|
||||||
|
.drain(offset.min(total)..)
|
||||||
|
.take(limit)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
json!({
|
||||||
|
"executions": executions,
|
||||||
|
"total": total,
|
||||||
|
"limit": limit,
|
||||||
|
"offset": offset,
|
||||||
|
"enabled": true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn load_history(path: &Path) -> Vec<LifecycleExecutionRecord> {
|
||||||
|
if !path.exists() {
|
||||||
|
return Vec::new();
|
||||||
|
}
|
||||||
|
std::fs::read_to_string(path)
|
||||||
|
.ok()
|
||||||
|
.and_then(|contents| serde_json::from_str::<Value>(&contents).ok())
|
||||||
|
.and_then(|value| value.get("executions").cloned())
|
||||||
|
.and_then(|value| serde_json::from_value::<Vec<LifecycleExecutionRecord>>(value).ok())
|
||||||
|
.unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn lifecycle_history_path(storage_root: &Path, bucket_name: &str) -> PathBuf {
|
||||||
|
storage_root
|
||||||
|
.join(".myfsio.sys")
|
||||||
|
.join("buckets")
|
||||||
|
.join(bucket_name)
|
||||||
|
.join("lifecycle_history.json")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn version_root_for_bucket(storage_root: &Path, bucket_name: &str) -> PathBuf {
|
||||||
|
storage_root
|
||||||
|
.join(".myfsio.sys")
|
||||||
|
.join("buckets")
|
||||||
|
.join(bucket_name)
|
||||||
|
.join("versions")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_lifecycle_rules(value: &Value) -> Vec<ParsedLifecycleRule> {
|
||||||
|
match value {
|
||||||
|
Value::String(raw) => parse_lifecycle_rules_from_string(raw),
|
||||||
|
Value::Array(items) => items.iter().filter_map(parse_lifecycle_rule).collect(),
|
||||||
|
Value::Object(map) => map
|
||||||
|
.get("Rules")
|
||||||
|
.and_then(|rules| rules.as_array())
|
||||||
|
.map(|rules| rules.iter().filter_map(parse_lifecycle_rule).collect())
|
||||||
|
.unwrap_or_default(),
|
||||||
|
_ => Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_lifecycle_rules_from_string(raw: &str) -> Vec<ParsedLifecycleRule> {
|
||||||
|
if let Ok(json) = serde_json::from_str::<Value>(raw) {
|
||||||
|
return parse_lifecycle_rules(&json);
|
||||||
|
}
|
||||||
|
let Ok(doc) = roxmltree::Document::parse(raw) else {
|
||||||
|
return Vec::new();
|
||||||
|
};
|
||||||
|
doc.descendants()
|
||||||
|
.filter(|node| node.is_element() && node.tag_name().name() == "Rule")
|
||||||
|
.map(|rule| ParsedLifecycleRule {
|
||||||
|
status: child_text(&rule, "Status").unwrap_or_else(|| "Enabled".to_string()),
|
||||||
|
prefix: child_text(&rule, "Prefix")
|
||||||
|
.or_else(|| {
|
||||||
|
rule.descendants()
|
||||||
|
.find(|node| {
|
||||||
|
node.is_element()
|
||||||
|
&& node.tag_name().name() == "Filter"
|
||||||
|
&& node.children().any(|child| {
|
||||||
|
child.is_element() && child.tag_name().name() == "Prefix"
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.and_then(|filter| child_text(&filter, "Prefix"))
|
||||||
|
})
|
||||||
|
.unwrap_or_default(),
|
||||||
|
expiration_days: rule
|
||||||
|
.descendants()
|
||||||
|
.find(|node| node.is_element() && node.tag_name().name() == "Expiration")
|
||||||
|
.and_then(|expiration| child_text(&expiration, "Days"))
|
||||||
|
.and_then(|value| value.parse::<u64>().ok()),
|
||||||
|
expiration_date: rule
|
||||||
|
.descendants()
|
||||||
|
.find(|node| node.is_element() && node.tag_name().name() == "Expiration")
|
||||||
|
.and_then(|expiration| child_text(&expiration, "Date"))
|
||||||
|
.as_deref()
|
||||||
|
.and_then(parse_datetime),
|
||||||
|
noncurrent_days: rule
|
||||||
|
.descendants()
|
||||||
|
.find(|node| {
|
||||||
|
node.is_element() && node.tag_name().name() == "NoncurrentVersionExpiration"
|
||||||
|
})
|
||||||
|
.and_then(|node| child_text(&node, "NoncurrentDays"))
|
||||||
|
.and_then(|value| value.parse::<u64>().ok()),
|
||||||
|
abort_incomplete_multipart_days: rule
|
||||||
|
.descendants()
|
||||||
|
.find(|node| {
|
||||||
|
node.is_element() && node.tag_name().name() == "AbortIncompleteMultipartUpload"
|
||||||
|
})
|
||||||
|
.and_then(|node| child_text(&node, "DaysAfterInitiation"))
|
||||||
|
.and_then(|value| value.parse::<u64>().ok()),
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_lifecycle_rule(value: &Value) -> Option<ParsedLifecycleRule> {
|
||||||
|
let map = value.as_object()?;
|
||||||
|
Some(ParsedLifecycleRule {
|
||||||
|
status: map
|
||||||
|
.get("Status")
|
||||||
|
.and_then(|value| value.as_str())
|
||||||
|
.unwrap_or("Enabled")
|
||||||
|
.to_string(),
|
||||||
|
prefix: map
|
||||||
|
.get("Prefix")
|
||||||
|
.and_then(|value| value.as_str())
|
||||||
|
.or_else(|| {
|
||||||
|
map.get("Filter")
|
||||||
|
.and_then(|value| value.get("Prefix"))
|
||||||
|
.and_then(|value| value.as_str())
|
||||||
|
})
|
||||||
|
.unwrap_or_default()
|
||||||
|
.to_string(),
|
||||||
|
expiration_days: map
|
||||||
|
.get("Expiration")
|
||||||
|
.and_then(|value| value.get("Days"))
|
||||||
|
.and_then(|value| value.as_u64()),
|
||||||
|
expiration_date: map
|
||||||
|
.get("Expiration")
|
||||||
|
.and_then(|value| value.get("Date"))
|
||||||
|
.and_then(|value| value.as_str())
|
||||||
|
.and_then(parse_datetime),
|
||||||
|
noncurrent_days: map
|
||||||
|
.get("NoncurrentVersionExpiration")
|
||||||
|
.and_then(|value| value.get("NoncurrentDays"))
|
||||||
|
.and_then(|value| value.as_u64()),
|
||||||
|
abort_incomplete_multipart_days: map
|
||||||
|
.get("AbortIncompleteMultipartUpload")
|
||||||
|
.and_then(|value| value.get("DaysAfterInitiation"))
|
||||||
|
.and_then(|value| value.as_u64()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_datetime(value: &str) -> Option<DateTime<Utc>> {
|
||||||
|
DateTime::parse_from_rfc3339(value)
|
||||||
|
.ok()
|
||||||
|
.map(|value| value.with_timezone(&Utc))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
|
||||||
|
node.children()
|
||||||
|
.find(|child| child.is_element() && child.tag_name().name() == name)
|
||||||
|
.and_then(|child| child.text())
|
||||||
|
.map(|text| text.trim().to_string())
|
||||||
|
.filter(|text| !text.is_empty())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn result_to_json(result: &BucketLifecycleResult) -> Value {
|
||||||
|
json!({
|
||||||
|
"bucket_name": result.bucket_name,
|
||||||
|
"objects_deleted": result.objects_deleted,
|
||||||
|
"versions_deleted": result.versions_deleted,
|
||||||
|
"uploads_aborted": result.uploads_aborted,
|
||||||
|
"errors": result.errors,
|
||||||
|
"execution_time_seconds": result.execution_time_seconds,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use chrono::Duration;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parses_rules_from_xml() {
|
||||||
|
let xml = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<LifecycleConfiguration>
|
||||||
|
<Rule>
|
||||||
|
<Status>Enabled</Status>
|
||||||
|
<Filter><Prefix>logs/</Prefix></Filter>
|
||||||
|
<Expiration><Days>10</Days></Expiration>
|
||||||
|
<NoncurrentVersionExpiration><NoncurrentDays>30</NoncurrentDays></NoncurrentVersionExpiration>
|
||||||
|
<AbortIncompleteMultipartUpload><DaysAfterInitiation>7</DaysAfterInitiation></AbortIncompleteMultipartUpload>
|
||||||
|
</Rule>
|
||||||
|
</LifecycleConfiguration>"#;
|
||||||
|
let rules = parse_lifecycle_rules(&Value::String(xml.to_string()));
|
||||||
|
assert_eq!(rules.len(), 1);
|
||||||
|
assert_eq!(rules[0].prefix, "logs/");
|
||||||
|
assert_eq!(rules[0].expiration_days, Some(10));
|
||||||
|
assert_eq!(rules[0].noncurrent_days, Some(30));
|
||||||
|
assert_eq!(rules[0].abort_incomplete_multipart_days, Some(7));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn run_cycle_writes_history_and_deletes_noncurrent_versions() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let storage = Arc::new(FsStorageBackend::new(tmp.path().to_path_buf()));
|
||||||
|
storage.create_bucket("docs").await.unwrap();
|
||||||
|
storage.set_versioning("docs", true).await.unwrap();
|
||||||
|
|
||||||
|
storage
|
||||||
|
.put_object(
|
||||||
|
"docs",
|
||||||
|
"logs/file.txt",
|
||||||
|
Box::pin(std::io::Cursor::new(b"old".to_vec())),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
storage
|
||||||
|
.put_object(
|
||||||
|
"docs",
|
||||||
|
"logs/file.txt",
|
||||||
|
Box::pin(std::io::Cursor::new(b"new".to_vec())),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let versions_root = version_root_for_bucket(tmp.path(), "docs")
|
||||||
|
.join("logs")
|
||||||
|
.join("file.txt");
|
||||||
|
let manifest = std::fs::read_dir(&versions_root)
|
||||||
|
.unwrap()
|
||||||
|
.flatten()
|
||||||
|
.find(|entry| entry.path().extension().and_then(|ext| ext.to_str()) == Some("json"))
|
||||||
|
.unwrap()
|
||||||
|
.path();
|
||||||
|
let old_manifest = json!({
|
||||||
|
"version_id": "ver-1",
|
||||||
|
"key": "logs/file.txt",
|
||||||
|
"size": 3,
|
||||||
|
"archived_at": (Utc::now() - Duration::days(45)).to_rfc3339(),
|
||||||
|
"etag": "etag",
|
||||||
|
});
|
||||||
|
std::fs::write(&manifest, serde_json::to_string(&old_manifest).unwrap()).unwrap();
|
||||||
|
std::fs::write(manifest.with_file_name("ver-1.bin"), b"old").unwrap();
|
||||||
|
|
||||||
|
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<LifecycleConfiguration>
|
||||||
|
<Rule>
|
||||||
|
<Status>Enabled</Status>
|
||||||
|
<Filter><Prefix>logs/</Prefix></Filter>
|
||||||
|
<NoncurrentVersionExpiration><NoncurrentDays>30</NoncurrentDays></NoncurrentVersionExpiration>
|
||||||
|
</Rule>
|
||||||
|
</LifecycleConfiguration>"#;
|
||||||
|
let mut config = storage.get_bucket_config("docs").await.unwrap();
|
||||||
|
config.lifecycle = Some(Value::String(lifecycle_xml.to_string()));
|
||||||
|
storage.set_bucket_config("docs", &config).await.unwrap();
|
||||||
|
|
||||||
|
let service =
|
||||||
|
LifecycleService::new(storage.clone(), tmp.path(), LifecycleConfig::default());
|
||||||
|
let result = service.run_cycle().await.unwrap();
|
||||||
|
assert_eq!(result["versions_deleted"], 1);
|
||||||
|
|
||||||
|
let history = read_history(tmp.path(), "docs", 50, 0);
|
||||||
|
assert_eq!(history["total"], 1);
|
||||||
|
assert_eq!(history["executions"][0]["versions_deleted"], 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,8 +1,11 @@
|
|||||||
pub mod access_logging;
|
pub mod access_logging;
|
||||||
|
pub mod acl;
|
||||||
pub mod gc;
|
pub mod gc;
|
||||||
pub mod integrity;
|
pub mod integrity;
|
||||||
pub mod lifecycle;
|
pub mod lifecycle;
|
||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
|
pub mod notifications;
|
||||||
|
pub mod object_lock;
|
||||||
pub mod replication;
|
pub mod replication;
|
||||||
pub mod s3_client;
|
pub mod s3_client;
|
||||||
pub mod site_registry;
|
pub mod site_registry;
|
||||||
296
crates/myfsio-server/src/services/notifications.rs
Normal file
296
crates/myfsio-server/src/services/notifications.rs
Normal file
@@ -0,0 +1,296 @@
|
|||||||
|
use crate::state::AppState;
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use myfsio_storage::traits::StorageEngine;
|
||||||
|
use serde::Serialize;
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct WebhookDestination {
|
||||||
|
pub url: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct NotificationConfiguration {
|
||||||
|
pub id: String,
|
||||||
|
pub events: Vec<String>,
|
||||||
|
pub destination: WebhookDestination,
|
||||||
|
pub prefix_filter: String,
|
||||||
|
pub suffix_filter: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct NotificationEvent {
|
||||||
|
#[serde(rename = "eventVersion")]
|
||||||
|
event_version: &'static str,
|
||||||
|
#[serde(rename = "eventSource")]
|
||||||
|
event_source: &'static str,
|
||||||
|
#[serde(rename = "awsRegion")]
|
||||||
|
aws_region: &'static str,
|
||||||
|
#[serde(rename = "eventTime")]
|
||||||
|
event_time: String,
|
||||||
|
#[serde(rename = "eventName")]
|
||||||
|
event_name: String,
|
||||||
|
#[serde(rename = "userIdentity")]
|
||||||
|
user_identity: serde_json::Value,
|
||||||
|
#[serde(rename = "requestParameters")]
|
||||||
|
request_parameters: serde_json::Value,
|
||||||
|
#[serde(rename = "responseElements")]
|
||||||
|
response_elements: serde_json::Value,
|
||||||
|
s3: serde_json::Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NotificationConfiguration {
|
||||||
|
pub fn matches_event(&self, event_name: &str, object_key: &str) -> bool {
|
||||||
|
let event_match = self.events.iter().any(|pattern| {
|
||||||
|
if let Some(prefix) = pattern.strip_suffix('*') {
|
||||||
|
event_name.starts_with(prefix)
|
||||||
|
} else {
|
||||||
|
pattern == event_name
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if !event_match {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if !self.prefix_filter.is_empty() && !object_key.starts_with(&self.prefix_filter) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if !self.suffix_filter.is_empty() && !object_key.ends_with(&self.suffix_filter) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse_notification_configurations(
|
||||||
|
xml: &str,
|
||||||
|
) -> Result<Vec<NotificationConfiguration>, String> {
|
||||||
|
let doc = roxmltree::Document::parse(xml).map_err(|err| err.to_string())?;
|
||||||
|
let mut configs = Vec::new();
|
||||||
|
|
||||||
|
for webhook in doc
|
||||||
|
.descendants()
|
||||||
|
.filter(|node| node.is_element() && node.tag_name().name() == "WebhookConfiguration")
|
||||||
|
{
|
||||||
|
let id = child_text(&webhook, "Id").unwrap_or_else(|| uuid::Uuid::new_v4().to_string());
|
||||||
|
let events = webhook
|
||||||
|
.children()
|
||||||
|
.filter(|node| node.is_element() && node.tag_name().name() == "Event")
|
||||||
|
.filter_map(|node| node.text())
|
||||||
|
.map(|text| text.trim().to_string())
|
||||||
|
.filter(|text| !text.is_empty())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let destination = webhook
|
||||||
|
.children()
|
||||||
|
.find(|node| node.is_element() && node.tag_name().name() == "Destination");
|
||||||
|
let url = destination
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|node| child_text(node, "Url"))
|
||||||
|
.unwrap_or_default();
|
||||||
|
if url.trim().is_empty() {
|
||||||
|
return Err("Destination URL is required".to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut prefix_filter = String::new();
|
||||||
|
let mut suffix_filter = String::new();
|
||||||
|
if let Some(filter) = webhook
|
||||||
|
.children()
|
||||||
|
.find(|node| node.is_element() && node.tag_name().name() == "Filter")
|
||||||
|
{
|
||||||
|
if let Some(key) = filter
|
||||||
|
.children()
|
||||||
|
.find(|node| node.is_element() && node.tag_name().name() == "S3Key")
|
||||||
|
{
|
||||||
|
for rule in key
|
||||||
|
.children()
|
||||||
|
.filter(|node| node.is_element() && node.tag_name().name() == "FilterRule")
|
||||||
|
{
|
||||||
|
let name = child_text(&rule, "Name").unwrap_or_default();
|
||||||
|
let value = child_text(&rule, "Value").unwrap_or_default();
|
||||||
|
if name == "prefix" {
|
||||||
|
prefix_filter = value;
|
||||||
|
} else if name == "suffix" {
|
||||||
|
suffix_filter = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
configs.push(NotificationConfiguration {
|
||||||
|
id,
|
||||||
|
events,
|
||||||
|
destination: WebhookDestination { url },
|
||||||
|
prefix_filter,
|
||||||
|
suffix_filter,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(configs)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn emit_object_created(
|
||||||
|
state: &AppState,
|
||||||
|
bucket: &str,
|
||||||
|
key: &str,
|
||||||
|
size: u64,
|
||||||
|
etag: Option<&str>,
|
||||||
|
request_id: &str,
|
||||||
|
source_ip: &str,
|
||||||
|
user_identity: &str,
|
||||||
|
operation: &str,
|
||||||
|
) {
|
||||||
|
emit_notifications(
|
||||||
|
state.clone(),
|
||||||
|
bucket.to_string(),
|
||||||
|
key.to_string(),
|
||||||
|
format!("s3:ObjectCreated:{}", operation),
|
||||||
|
size,
|
||||||
|
etag.unwrap_or_default().to_string(),
|
||||||
|
request_id.to_string(),
|
||||||
|
source_ip.to_string(),
|
||||||
|
user_identity.to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn emit_object_removed(
|
||||||
|
state: &AppState,
|
||||||
|
bucket: &str,
|
||||||
|
key: &str,
|
||||||
|
request_id: &str,
|
||||||
|
source_ip: &str,
|
||||||
|
user_identity: &str,
|
||||||
|
operation: &str,
|
||||||
|
) {
|
||||||
|
emit_notifications(
|
||||||
|
state.clone(),
|
||||||
|
bucket.to_string(),
|
||||||
|
key.to_string(),
|
||||||
|
format!("s3:ObjectRemoved:{}", operation),
|
||||||
|
0,
|
||||||
|
String::new(),
|
||||||
|
request_id.to_string(),
|
||||||
|
source_ip.to_string(),
|
||||||
|
user_identity.to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn emit_notifications(
|
||||||
|
state: AppState,
|
||||||
|
bucket: String,
|
||||||
|
key: String,
|
||||||
|
event_name: String,
|
||||||
|
size: u64,
|
||||||
|
etag: String,
|
||||||
|
request_id: String,
|
||||||
|
source_ip: String,
|
||||||
|
user_identity: String,
|
||||||
|
) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let config = match state.storage.get_bucket_config(&bucket).await {
|
||||||
|
Ok(config) => config,
|
||||||
|
Err(_) => return,
|
||||||
|
};
|
||||||
|
let raw = match config.notification {
|
||||||
|
Some(serde_json::Value::String(raw)) => raw,
|
||||||
|
_ => return,
|
||||||
|
};
|
||||||
|
let configs = match parse_notification_configurations(&raw) {
|
||||||
|
Ok(configs) => configs,
|
||||||
|
Err(err) => {
|
||||||
|
tracing::warn!("Invalid notification config for bucket {}: {}", bucket, err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let record = NotificationEvent {
|
||||||
|
event_version: "2.1",
|
||||||
|
event_source: "myfsio:s3",
|
||||||
|
aws_region: "local",
|
||||||
|
event_time: format_event_time(Utc::now()),
|
||||||
|
event_name: event_name.clone(),
|
||||||
|
user_identity: json!({ "principalId": if user_identity.is_empty() { "ANONYMOUS" } else { &user_identity } }),
|
||||||
|
request_parameters: json!({ "sourceIPAddress": if source_ip.is_empty() { "127.0.0.1" } else { &source_ip } }),
|
||||||
|
response_elements: json!({
|
||||||
|
"x-amz-request-id": request_id,
|
||||||
|
"x-amz-id-2": request_id,
|
||||||
|
}),
|
||||||
|
s3: json!({
|
||||||
|
"s3SchemaVersion": "1.0",
|
||||||
|
"configurationId": "notification",
|
||||||
|
"bucket": {
|
||||||
|
"name": bucket,
|
||||||
|
"ownerIdentity": { "principalId": "local" },
|
||||||
|
"arn": format!("arn:aws:s3:::{}", bucket),
|
||||||
|
},
|
||||||
|
"object": {
|
||||||
|
"key": key,
|
||||||
|
"size": size,
|
||||||
|
"eTag": etag,
|
||||||
|
"versionId": "null",
|
||||||
|
"sequencer": format!("{:016X}", Utc::now().timestamp_millis()),
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
let payload = json!({ "Records": [record] });
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
|
||||||
|
for config in configs {
|
||||||
|
if !config.matches_event(&event_name, &key) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let result = client
|
||||||
|
.post(&config.destination.url)
|
||||||
|
.header("content-type", "application/json")
|
||||||
|
.json(&payload)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
if let Err(err) = result {
|
||||||
|
tracing::warn!(
|
||||||
|
"Failed to deliver notification for {} to {}: {}",
|
||||||
|
event_name,
|
||||||
|
config.destination.url,
|
||||||
|
err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn format_event_time(value: DateTime<Utc>) -> String {
|
||||||
|
value.format("%Y-%m-%dT%H:%M:%S.000Z").to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
|
||||||
|
node.children()
|
||||||
|
.find(|child| child.is_element() && child.tag_name().name() == name)
|
||||||
|
.and_then(|child| child.text())
|
||||||
|
.map(|text| text.trim().to_string())
|
||||||
|
.filter(|text| !text.is_empty())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_webhook_configuration() {
|
||||||
|
let xml = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||||
|
<WebhookConfiguration>
|
||||||
|
<Id>upload</Id>
|
||||||
|
<Event>s3:ObjectCreated:*</Event>
|
||||||
|
<Destination><Url>https://example.com/hook</Url></Destination>
|
||||||
|
<Filter>
|
||||||
|
<S3Key>
|
||||||
|
<FilterRule><Name>prefix</Name><Value>logs/</Value></FilterRule>
|
||||||
|
<FilterRule><Name>suffix</Name><Value>.txt</Value></FilterRule>
|
||||||
|
</S3Key>
|
||||||
|
</Filter>
|
||||||
|
</WebhookConfiguration>
|
||||||
|
</NotificationConfiguration>"#;
|
||||||
|
let configs = parse_notification_configurations(xml).unwrap();
|
||||||
|
assert_eq!(configs.len(), 1);
|
||||||
|
assert!(configs[0].matches_event("s3:ObjectCreated:Put", "logs/test.txt"));
|
||||||
|
assert!(!configs[0].matches_event("s3:ObjectRemoved:Delete", "logs/test.txt"));
|
||||||
|
}
|
||||||
|
}
|
||||||
128
crates/myfsio-server/src/services/object_lock.rs
Normal file
128
crates/myfsio-server/src/services/object_lock.rs
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
pub const LEGAL_HOLD_METADATA_KEY: &str = "__legal_hold__";
|
||||||
|
pub const RETENTION_METADATA_KEY: &str = "__object_retention__";
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub enum RetentionMode {
|
||||||
|
GOVERNANCE,
|
||||||
|
COMPLIANCE,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub struct ObjectLockRetention {
|
||||||
|
pub mode: RetentionMode,
|
||||||
|
pub retain_until_date: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ObjectLockRetention {
|
||||||
|
pub fn is_expired(&self) -> bool {
|
||||||
|
Utc::now() > self.retain_until_date
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_object_retention(metadata: &HashMap<String, String>) -> Option<ObjectLockRetention> {
|
||||||
|
metadata
|
||||||
|
.get(RETENTION_METADATA_KEY)
|
||||||
|
.and_then(|raw| serde_json::from_str::<ObjectLockRetention>(raw).ok())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_object_retention(
|
||||||
|
metadata: &mut HashMap<String, String>,
|
||||||
|
retention: &ObjectLockRetention,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
let encoded = serde_json::to_string(retention).map_err(|err| err.to_string())?;
|
||||||
|
metadata.insert(RETENTION_METADATA_KEY.to_string(), encoded);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_legal_hold(metadata: &HashMap<String, String>) -> bool {
|
||||||
|
metadata
|
||||||
|
.get(LEGAL_HOLD_METADATA_KEY)
|
||||||
|
.map(|value| value.eq_ignore_ascii_case("ON") || value.eq_ignore_ascii_case("true"))
|
||||||
|
.unwrap_or(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_legal_hold(metadata: &mut HashMap<String, String>, enabled: bool) {
|
||||||
|
metadata.insert(
|
||||||
|
LEGAL_HOLD_METADATA_KEY.to_string(),
|
||||||
|
if enabled { "ON" } else { "OFF" }.to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ensure_retention_mutable(
|
||||||
|
metadata: &HashMap<String, String>,
|
||||||
|
bypass_governance: bool,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
let Some(existing) = get_object_retention(metadata) else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
if existing.is_expired() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
match existing.mode {
|
||||||
|
RetentionMode::COMPLIANCE => Err(format!(
|
||||||
|
"Cannot modify retention on object with COMPLIANCE mode until retention expires"
|
||||||
|
)),
|
||||||
|
RetentionMode::GOVERNANCE if !bypass_governance => Err(
|
||||||
|
"Cannot modify GOVERNANCE retention without bypass-governance permission".to_string(),
|
||||||
|
),
|
||||||
|
RetentionMode::GOVERNANCE => Ok(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn can_delete_object(
|
||||||
|
metadata: &HashMap<String, String>,
|
||||||
|
bypass_governance: bool,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
if get_legal_hold(metadata) {
|
||||||
|
return Err("Object is under legal hold".to_string());
|
||||||
|
}
|
||||||
|
if let Some(retention) = get_object_retention(metadata) {
|
||||||
|
if !retention.is_expired() {
|
||||||
|
return match retention.mode {
|
||||||
|
RetentionMode::COMPLIANCE => Err(format!(
|
||||||
|
"Object is locked in COMPLIANCE mode until {}",
|
||||||
|
retention.retain_until_date.to_rfc3339()
|
||||||
|
)),
|
||||||
|
RetentionMode::GOVERNANCE if !bypass_governance => Err(format!(
|
||||||
|
"Object is locked in GOVERNANCE mode until {}",
|
||||||
|
retention.retain_until_date.to_rfc3339()
|
||||||
|
)),
|
||||||
|
RetentionMode::GOVERNANCE => Ok(()),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use chrono::Duration;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn legal_hold_blocks_delete() {
|
||||||
|
let mut metadata = HashMap::new();
|
||||||
|
set_legal_hold(&mut metadata, true);
|
||||||
|
let err = can_delete_object(&metadata, false).unwrap_err();
|
||||||
|
assert!(err.contains("legal hold"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn governance_requires_bypass() {
|
||||||
|
let mut metadata = HashMap::new();
|
||||||
|
set_object_retention(
|
||||||
|
&mut metadata,
|
||||||
|
&ObjectLockRetention {
|
||||||
|
mode: RetentionMode::GOVERNANCE,
|
||||||
|
retain_until_date: Utc::now() + Duration::hours(1),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
assert!(can_delete_object(&metadata, false).is_err());
|
||||||
|
assert!(can_delete_object(&metadata, true).is_ok());
|
||||||
|
}
|
||||||
|
}
|
||||||
197
crates/myfsio-server/src/services/website_domains.rs
Normal file
197
crates/myfsio-server/src/services/website_domains.rs
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
use parking_lot::RwLock;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
struct DomainData {
|
||||||
|
#[serde(default)]
|
||||||
|
mappings: HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
enum DomainDataFile {
|
||||||
|
Wrapped(DomainData),
|
||||||
|
Flat(HashMap<String, String>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DomainDataFile {
|
||||||
|
fn into_domain_data(self) -> DomainData {
|
||||||
|
match self {
|
||||||
|
Self::Wrapped(data) => data,
|
||||||
|
Self::Flat(mappings) => DomainData {
|
||||||
|
mappings: mappings
|
||||||
|
.into_iter()
|
||||||
|
.map(|(domain, bucket)| (normalize_domain(&domain), bucket))
|
||||||
|
.collect(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct WebsiteDomainStore {
|
||||||
|
path: PathBuf,
|
||||||
|
data: Arc<RwLock<DomainData>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WebsiteDomainStore {
|
||||||
|
pub fn new(storage_root: &std::path::Path) -> Self {
|
||||||
|
let path = storage_root
|
||||||
|
.join(".myfsio.sys")
|
||||||
|
.join("config")
|
||||||
|
.join("website_domains.json");
|
||||||
|
let data = if path.exists() {
|
||||||
|
std::fs::read_to_string(&path)
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| serde_json::from_str::<DomainDataFile>(&s).ok())
|
||||||
|
.map(DomainDataFile::into_domain_data)
|
||||||
|
.unwrap_or_default()
|
||||||
|
} else {
|
||||||
|
DomainData::default()
|
||||||
|
};
|
||||||
|
Self {
|
||||||
|
path,
|
||||||
|
data: Arc::new(RwLock::new(data)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn save(&self) {
|
||||||
|
let data = self.data.read();
|
||||||
|
if let Some(parent) = self.path.parent() {
|
||||||
|
let _ = std::fs::create_dir_all(parent);
|
||||||
|
}
|
||||||
|
if let Ok(json) = serde_json::to_string_pretty(&data.mappings) {
|
||||||
|
let _ = std::fs::write(&self.path, json);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn list_all(&self) -> Vec<serde_json::Value> {
|
||||||
|
self.data
|
||||||
|
.read()
|
||||||
|
.mappings
|
||||||
|
.iter()
|
||||||
|
.map(|(domain, bucket)| {
|
||||||
|
serde_json::json!({
|
||||||
|
"domain": domain,
|
||||||
|
"bucket": bucket,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_bucket(&self, domain: &str) -> Option<String> {
|
||||||
|
let domain = normalize_domain(domain);
|
||||||
|
self.data.read().mappings.get(&domain).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_mapping(&self, domain: &str, bucket: &str) {
|
||||||
|
let domain = normalize_domain(domain);
|
||||||
|
self.data
|
||||||
|
.write()
|
||||||
|
.mappings
|
||||||
|
.insert(domain, bucket.to_string());
|
||||||
|
self.save();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn delete_mapping(&self, domain: &str) -> bool {
|
||||||
|
let domain = normalize_domain(domain);
|
||||||
|
let removed = self.data.write().mappings.remove(&domain).is_some();
|
||||||
|
if removed {
|
||||||
|
self.save();
|
||||||
|
}
|
||||||
|
removed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn normalize_domain(domain: &str) -> String {
|
||||||
|
domain.trim().to_ascii_lowercase()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_valid_domain(domain: &str) -> bool {
|
||||||
|
if domain.is_empty() || domain.len() > 253 {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
let labels: Vec<&str> = domain.split('.').collect();
|
||||||
|
if labels.len() < 2 {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
for label in &labels {
|
||||||
|
if label.is_empty() || label.len() > 63 {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if !label.chars().all(|c| c.is_ascii_alphanumeric() || c == '-') {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if label.starts_with('-') || label.ends_with('-') {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::WebsiteDomainStore;
|
||||||
|
use serde_json::json;
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn loads_legacy_flat_mapping_file() {
|
||||||
|
let tmp = tempdir().expect("tempdir");
|
||||||
|
let config_dir = tmp.path().join(".myfsio.sys").join("config");
|
||||||
|
std::fs::create_dir_all(&config_dir).expect("create config dir");
|
||||||
|
std::fs::write(
|
||||||
|
config_dir.join("website_domains.json"),
|
||||||
|
r#"{"Example.COM":"site-bucket"}"#,
|
||||||
|
)
|
||||||
|
.expect("write config");
|
||||||
|
|
||||||
|
let store = WebsiteDomainStore::new(tmp.path());
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
store.get_bucket("example.com"),
|
||||||
|
Some("site-bucket".to_string())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn loads_wrapped_mapping_file() {
|
||||||
|
let tmp = tempdir().expect("tempdir");
|
||||||
|
let config_dir = tmp.path().join(".myfsio.sys").join("config");
|
||||||
|
std::fs::create_dir_all(&config_dir).expect("create config dir");
|
||||||
|
std::fs::write(
|
||||||
|
config_dir.join("website_domains.json"),
|
||||||
|
r#"{"mappings":{"example.com":"site-bucket"}}"#,
|
||||||
|
)
|
||||||
|
.expect("write config");
|
||||||
|
|
||||||
|
let store = WebsiteDomainStore::new(tmp.path());
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
store.get_bucket("example.com"),
|
||||||
|
Some("site-bucket".to_string())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn saves_in_shared_plain_mapping_format() {
|
||||||
|
let tmp = tempdir().expect("tempdir");
|
||||||
|
let store = WebsiteDomainStore::new(tmp.path());
|
||||||
|
|
||||||
|
store.set_mapping("Example.COM", "site-bucket");
|
||||||
|
|
||||||
|
let saved = std::fs::read_to_string(
|
||||||
|
tmp.path()
|
||||||
|
.join(".myfsio.sys")
|
||||||
|
.join("config")
|
||||||
|
.join("website_domains.json"),
|
||||||
|
)
|
||||||
|
.expect("read config");
|
||||||
|
let json: serde_json::Value = serde_json::from_str(&saved).expect("parse config");
|
||||||
|
|
||||||
|
assert_eq!(json, json!({"example.com": "site-bucket"}));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -15,9 +15,9 @@ use crate::session::SessionStore;
|
|||||||
use crate::stores::connections::ConnectionStore;
|
use crate::stores::connections::ConnectionStore;
|
||||||
use crate::templates::TemplateEngine;
|
use crate::templates::TemplateEngine;
|
||||||
use myfsio_auth::iam::IamService;
|
use myfsio_auth::iam::IamService;
|
||||||
use myfsio_crypto::encryption::EncryptionService;
|
use myfsio_crypto::encryption::{EncryptionConfig, EncryptionService};
|
||||||
use myfsio_crypto::kms::KmsService;
|
use myfsio_crypto::kms::KmsService;
|
||||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
use myfsio_storage::fs_backend::{FsStorageBackend, FsStorageBackendConfig};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AppState {
|
pub struct AppState {
|
||||||
@@ -42,7 +42,16 @@ pub struct AppState {
|
|||||||
|
|
||||||
impl AppState {
|
impl AppState {
|
||||||
pub fn new(config: ServerConfig) -> Self {
|
pub fn new(config: ServerConfig) -> Self {
|
||||||
let storage = Arc::new(FsStorageBackend::new(config.storage_root.clone()));
|
let storage = Arc::new(FsStorageBackend::new_with_config(
|
||||||
|
config.storage_root.clone(),
|
||||||
|
FsStorageBackendConfig {
|
||||||
|
object_key_max_length_bytes: config.object_key_max_length_bytes,
|
||||||
|
object_cache_max_size: config.object_cache_max_size,
|
||||||
|
bucket_config_cache_ttl: Duration::from_secs_f64(
|
||||||
|
config.bucket_config_cache_ttl_seconds,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
));
|
||||||
let iam = Arc::new(IamService::new_with_secret(
|
let iam = Arc::new(IamService::new_with_secret(
|
||||||
config.iam_config_path.clone(),
|
config.iam_config_path.clone(),
|
||||||
config.secret_key.clone(),
|
config.secret_key.clone(),
|
||||||
@@ -51,7 +60,13 @@ impl AppState {
|
|||||||
let gc = if config.gc_enabled {
|
let gc = if config.gc_enabled {
|
||||||
Some(Arc::new(GcService::new(
|
Some(Arc::new(GcService::new(
|
||||||
config.storage_root.clone(),
|
config.storage_root.clone(),
|
||||||
crate::services::gc::GcConfig::default(),
|
crate::services::gc::GcConfig {
|
||||||
|
interval_hours: config.gc_interval_hours,
|
||||||
|
temp_file_max_age_hours: config.gc_temp_file_max_age_hours,
|
||||||
|
multipart_max_age_days: config.gc_multipart_max_age_days,
|
||||||
|
lock_file_max_age_hours: config.gc_lock_file_max_age_hours,
|
||||||
|
dry_run: config.gc_dry_run,
|
||||||
|
},
|
||||||
)))
|
)))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@@ -92,7 +107,22 @@ impl AppState {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let site_registry = Some(Arc::new(SiteRegistry::new(&config.storage_root)));
|
let site_registry = {
|
||||||
|
let registry = SiteRegistry::new(&config.storage_root);
|
||||||
|
if let (Some(site_id), Some(endpoint)) =
|
||||||
|
(config.site_id.as_deref(), config.site_endpoint.as_deref())
|
||||||
|
{
|
||||||
|
registry.set_local_site(crate::services::site_registry::SiteInfo {
|
||||||
|
site_id: site_id.to_string(),
|
||||||
|
endpoint: endpoint.to_string(),
|
||||||
|
region: config.site_region.clone(),
|
||||||
|
priority: config.site_priority,
|
||||||
|
display_name: site_id.to_string(),
|
||||||
|
created_at: Some(chrono::Utc::now().to_rfc3339()),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Some(Arc::new(registry))
|
||||||
|
};
|
||||||
|
|
||||||
let website_domains = if config.website_hosting_enabled {
|
let website_domains = if config.website_hosting_enabled {
|
||||||
Some(Arc::new(WebsiteDomainStore::new(&config.storage_root)))
|
Some(Arc::new(WebsiteDomainStore::new(&config.storage_root)))
|
||||||
@@ -132,6 +162,7 @@ impl AppState {
|
|||||||
|
|
||||||
let templates = init_templates(&config.templates_dir);
|
let templates = init_templates(&config.templates_dir);
|
||||||
let access_logging = Arc::new(AccessLoggingService::new(&config.storage_root));
|
let access_logging = Arc::new(AccessLoggingService::new(&config.storage_root));
|
||||||
|
let session_ttl = Duration::from_secs(config.session_lifetime_days.saturating_mul(86_400));
|
||||||
Self {
|
Self {
|
||||||
config,
|
config,
|
||||||
storage,
|
storage,
|
||||||
@@ -148,7 +179,7 @@ impl AppState {
|
|||||||
replication,
|
replication,
|
||||||
site_sync,
|
site_sync,
|
||||||
templates,
|
templates,
|
||||||
sessions: Arc::new(SessionStore::new(Duration::from_secs(60 * 60 * 12))),
|
sessions: Arc::new(SessionStore::new(session_ttl)),
|
||||||
access_logging,
|
access_logging,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -172,7 +203,13 @@ impl AppState {
|
|||||||
|
|
||||||
let encryption = if config.encryption_enabled {
|
let encryption = if config.encryption_enabled {
|
||||||
match myfsio_crypto::kms::load_or_create_master_key(&keys_dir).await {
|
match myfsio_crypto::kms::load_or_create_master_key(&keys_dir).await {
|
||||||
Ok(master_key) => Some(Arc::new(EncryptionService::new(master_key, kms.clone()))),
|
Ok(master_key) => Some(Arc::new(EncryptionService::with_config(
|
||||||
|
master_key,
|
||||||
|
kms.clone(),
|
||||||
|
EncryptionConfig {
|
||||||
|
chunk_size: config.encryption_chunk_size_bytes,
|
||||||
|
},
|
||||||
|
))),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
tracing::error!("Failed to initialize encryption: {}", e);
|
tracing::error!("Failed to initialize encryption: {}", e);
|
||||||
None
|
None
|
||||||
|
Before Width: | Height: | Size: 200 KiB After Width: | Height: | Size: 200 KiB |
|
Before Width: | Height: | Size: 872 KiB After Width: | Height: | Size: 872 KiB |
@@ -73,16 +73,13 @@
|
|||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Build or run the Rust server and launch the API plus web UI from a single process.</p>
|
<p class="text-muted">Build or run the Rust server and launch the API plus web UI from a single process.</p>
|
||||||
<div class="alert alert-light border small mb-3">
|
<div class="alert alert-light border small mb-3">
|
||||||
Runtime note: MyFSIO now runs from the Rust server in <code>rust/myfsio-engine</code>. For the verified runtime configuration list, use the repository <code>docs.md</code>.
|
Runtime note: the repository root is the Cargo workspace. For the verified runtime configuration list, use the repository <code>docs.md</code>.
|
||||||
</div>
|
</div>
|
||||||
<ol class="docs-steps">
|
<ol class="docs-steps">
|
||||||
<li>Install a current Rust toolchain.</li>
|
<li>Install a current Rust toolchain.</li>
|
||||||
<li>Change into <code>rust/myfsio-engine</code>.</li>
|
<li>From the repository root, start the server with <code>cargo run -p myfsio-server --</code>.</li>
|
||||||
<li>Start the server with <code>cargo run -p myfsio-server --</code>.</li>
|
|
||||||
</ol>
|
</ol>
|
||||||
<pre class="mb-3"><code class="language-bash">cd rust/myfsio-engine
|
<pre class="mb-3"><code class="language-bash"># Run API + UI
|
||||||
|
|
||||||
# Run API + UI
|
|
||||||
cargo run -p myfsio-server --
|
cargo run -p myfsio-server --
|
||||||
|
|
||||||
# Show resolved configuration
|
# Show resolved configuration
|
||||||
@@ -112,7 +109,7 @@ cargo build --release -p myfsio-server
|
|||||||
<tbody>
|
<tbody>
|
||||||
<tr>
|
<tr>
|
||||||
<td><code>API_BASE_URL</code></td>
|
<td><code>API_BASE_URL</code></td>
|
||||||
<td><code>http://127.0.0.1:5000</code></td>
|
<td>Derived from <code>HOST</code>/<code>PORT</code></td>
|
||||||
<td>Internal S3 API URL used by the web UI proxy. Also used for presigned URL generation. Set to your public URL if running behind a reverse proxy.</td>
|
<td>Internal S3 API URL used by the web UI proxy. Also used for presigned URL generation. Set to your public URL if running behind a reverse proxy.</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
@@ -184,33 +181,18 @@ cargo build --release -p myfsio-server
|
|||||||
<tr>
|
<tr>
|
||||||
<td><code>RATE_LIMIT_DEFAULT</code></td>
|
<td><code>RATE_LIMIT_DEFAULT</code></td>
|
||||||
<td><code>200 per minute</code></td>
|
<td><code>200 per minute</code></td>
|
||||||
<td>Default API rate limit.</td>
|
<td>Default rate limit for S3 and KMS API endpoints.</td>
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><code>RATE_LIMIT_LIST_BUCKETS</code></td>
|
|
||||||
<td><code>60 per minute</code></td>
|
|
||||||
<td>Rate limit for listing buckets.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><code>RATE_LIMIT_BUCKET_OPS</code></td>
|
|
||||||
<td><code>120 per minute</code></td>
|
|
||||||
<td>Rate limit for bucket operations.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><code>RATE_LIMIT_OBJECT_OPS</code></td>
|
|
||||||
<td><code>240 per minute</code></td>
|
|
||||||
<td>Rate limit for object operations.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><code>RATE_LIMIT_HEAD_OPS</code></td>
|
|
||||||
<td><code>100 per minute</code></td>
|
|
||||||
<td>Rate limit for HEAD requests.</td>
|
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><code>RATE_LIMIT_ADMIN</code></td>
|
<td><code>RATE_LIMIT_ADMIN</code></td>
|
||||||
<td><code>60 per minute</code></td>
|
<td><code>60 per minute</code></td>
|
||||||
<td>Rate limit for admin API endpoints (<code>/admin/*</code>).</td>
|
<td>Rate limit for admin API endpoints (<code>/admin/*</code>).</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>RATE_LIMIT_STORAGE_URI</code></td>
|
||||||
|
<td><code>memory://</code></td>
|
||||||
|
<td>Rate limit storage backend. Only in-memory storage is currently supported.</td>
|
||||||
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><code>ADMIN_ACCESS_KEY</code></td>
|
<td><code>ADMIN_ACCESS_KEY</code></td>
|
||||||
<td>(none)</td>
|
<td>(none)</td>
|
||||||
@@ -377,8 +359,8 @@ cargo build --release -p myfsio-server
|
|||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><code>NUM_TRUSTED_PROXIES</code></td>
|
<td><code>NUM_TRUSTED_PROXIES</code></td>
|
||||||
<td><code>1</code></td>
|
<td><code>0</code></td>
|
||||||
<td>Number of trusted reverse proxies for <code>X-Forwarded-*</code> headers.</td>
|
<td>Number of trusted reverse proxies for <code>X-Forwarded-*</code> headers. Forwarded IP headers are ignored when this is <code>0</code>.</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><code>ALLOWED_REDIRECT_HOSTS</code></td>
|
<td><code>ALLOWED_REDIRECT_HOSTS</code></td>
|
||||||
@@ -569,7 +551,7 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
|
|||||||
<h3 class="h6 text-uppercase text-muted">Policies & versioning</h3>
|
<h3 class="h6 text-uppercase text-muted">Policies & versioning</h3>
|
||||||
<ul>
|
<ul>
|
||||||
<li>Toggle versioning (requires write access). Archived-only keys are flagged so you can restore them quickly.</li>
|
<li>Toggle versioning (requires write access). Archived-only keys are flagged so you can restore them quickly.</li>
|
||||||
<li>The policy editor saves drafts, ships with presets, and hot-reloads <code>data/.myfsio.sys/config/bucket_policies.json</code>.</li>
|
<li>The policy editor saves each bucket policy in that bucket's <code>.bucket.json</code>; legacy <code>data/.myfsio.sys/config/bucket_policies.json</code> entries are still read as a fallback.</li>
|
||||||
</ul>
|
</ul>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -2058,7 +2040,7 @@ curl "{{ api_base | replace(from="/api", to="/ui") }}/metrics/operations/history
|
|||||||
<tr>
|
<tr>
|
||||||
<td>UI shows stale policy/object data</td>
|
<td>UI shows stale policy/object data</td>
|
||||||
<td>Browser cached prior state</td>
|
<td>Browser cached prior state</td>
|
||||||
<td>Refresh; the server hot-reloads <code>data/.myfsio.sys/config/bucket_policies.json</code> and storage metadata.</td>
|
<td>Refresh; the server hot-reloads bucket <code>.bucket.json</code> policy data and legacy <code>data/.myfsio.sys/config/bucket_policies.json</code> fallback entries.</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td>Presign dialog returns 403</td>
|
<td>Presign dialog returns 403</td>
|
||||||
@@ -2078,7 +2060,7 @@ curl "{{ api_base | replace(from="/api", to="/ui") }}/metrics/operations/history
|
|||||||
<tr>
|
<tr>
|
||||||
<td>Large folder uploads hitting rate limits (429)</td>
|
<td>Large folder uploads hitting rate limits (429)</td>
|
||||||
<td><code>RATE_LIMIT_DEFAULT</code> exceeded (200/min)</td>
|
<td><code>RATE_LIMIT_DEFAULT</code> exceeded (200/min)</td>
|
||||||
<td>Increase rate limit in env config, use Redis backend (<code>RATE_LIMIT_STORAGE_URI=redis://host:port</code>) for distributed setups, or upload in smaller batches.</td>
|
<td>Increase <code>RATE_LIMIT_DEFAULT</code> in env config or upload in smaller batches. Distributed rate-limit storage is not supported yet.</td>
|
||||||
</tr>
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
@@ -133,7 +133,7 @@
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
<div class="row g-3">
|
<div class="row g-3">
|
||||||
{% for user in users %}
|
{% for user in users %}
|
||||||
<div class="col-md-6 col-xl-4 iam-user-item" data-user-id="{{ user.user_id }}" data-access-key="{{ user.access_key }}" data-display-name="{{ user.display_name|lower }}" data-access-key-filter="{{ user.access_key|lower }}">
|
<div class="col-md-6 col-xl-4 iam-user-item" data-user-id="{{ user.user_id }}" data-access-key="{{ user.access_key }}" data-display-name="{{ user.display_name|lower }}" data-access-key-filter="{{ user.access_key|lower }}" data-update-url="{{ url_for(endpoint="ui.update_iam_user", user_id=user.user_id) }}">
|
||||||
<div class="card h-100 iam-user-card{% if user.is_admin %} iam-admin-card{% else %}{% endif %}">
|
<div class="card h-100 iam-user-card{% if user.is_admin %} iam-admin-card{% else %}{% endif %}">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-start justify-content-between mb-3">
|
<div class="d-flex align-items-start justify-content-between mb-3">
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -341,3 +341,84 @@ fn render_bucket_detail() {
|
|||||||
ctx.insert("objects_stream_url", &"");
|
ctx.insert("objects_stream_url", &"");
|
||||||
render_or_panic("bucket_detail.html", &ctx);
|
render_or_panic("bucket_detail.html", &ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn render_bucket_detail_without_error_document() {
|
||||||
|
let mut ctx = base_ctx();
|
||||||
|
ctx.insert("bucket_name", &"site-bucket");
|
||||||
|
ctx.insert(
|
||||||
|
"bucket",
|
||||||
|
&json!({
|
||||||
|
"name": "site-bucket",
|
||||||
|
"creation_date": "2025-01-01T00:00:00Z",
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
ctx.insert("objects", &Vec::<Value>::new());
|
||||||
|
ctx.insert("prefixes", &Vec::<Value>::new());
|
||||||
|
ctx.insert("total_objects", &0u64);
|
||||||
|
ctx.insert("total_bytes", &0u64);
|
||||||
|
ctx.insert("current_objects", &0u64);
|
||||||
|
ctx.insert("current_bytes", &0u64);
|
||||||
|
ctx.insert("version_count", &0u64);
|
||||||
|
ctx.insert("version_bytes", &0u64);
|
||||||
|
ctx.insert("max_objects", &Value::Null);
|
||||||
|
ctx.insert("max_bytes", &Value::Null);
|
||||||
|
ctx.insert("has_max_objects", &false);
|
||||||
|
ctx.insert("has_max_bytes", &false);
|
||||||
|
ctx.insert("obj_pct", &0);
|
||||||
|
ctx.insert("bytes_pct", &0);
|
||||||
|
ctx.insert("has_quota", &false);
|
||||||
|
ctx.insert("versioning_enabled", &false);
|
||||||
|
ctx.insert("versioning_status", &"Disabled");
|
||||||
|
ctx.insert("encryption_config", &json!({"Rules": []}));
|
||||||
|
ctx.insert("enc_rules", &Vec::<Value>::new());
|
||||||
|
ctx.insert("enc_algorithm", &"");
|
||||||
|
ctx.insert("enc_kms_key", &"");
|
||||||
|
ctx.insert("replication_rules", &Vec::<Value>::new());
|
||||||
|
ctx.insert("replication_rule", &Value::Null);
|
||||||
|
ctx.insert("website_config", &json!({"index_document": "index.html"}));
|
||||||
|
ctx.insert("bucket_policy", &"");
|
||||||
|
ctx.insert("bucket_policy_text", &"");
|
||||||
|
ctx.insert("connections", &Vec::<Value>::new());
|
||||||
|
ctx.insert("current_prefix", &"");
|
||||||
|
ctx.insert("parent_prefix", &"");
|
||||||
|
ctx.insert("has_more", &false);
|
||||||
|
ctx.insert("next_token", &"");
|
||||||
|
ctx.insert("active_tab", &"objects");
|
||||||
|
ctx.insert("multipart_uploads", &Vec::<Value>::new());
|
||||||
|
ctx.insert("target_conn", &Value::Null);
|
||||||
|
ctx.insert("target_conn_name", &"");
|
||||||
|
ctx.insert("preset_choice", &"");
|
||||||
|
ctx.insert("default_policy", &"");
|
||||||
|
ctx.insert("can_manage_cors", &true);
|
||||||
|
ctx.insert("can_manage_lifecycle", &true);
|
||||||
|
ctx.insert("can_manage_quota", &true);
|
||||||
|
ctx.insert("can_manage_versioning", &true);
|
||||||
|
ctx.insert("can_manage_website", &true);
|
||||||
|
ctx.insert("can_edit_policy", &true);
|
||||||
|
ctx.insert("is_replication_admin", &true);
|
||||||
|
ctx.insert("lifecycle_enabled", &false);
|
||||||
|
ctx.insert("site_sync_enabled", &false);
|
||||||
|
ctx.insert("website_hosting_enabled", &true);
|
||||||
|
ctx.insert("website_domains", &Vec::<Value>::new());
|
||||||
|
ctx.insert("kms_keys", &Vec::<Value>::new());
|
||||||
|
ctx.insert(
|
||||||
|
"bucket_stats",
|
||||||
|
&json!({
|
||||||
|
"bytes": 0, "objects": 0, "total_bytes": 0, "total_objects": 0,
|
||||||
|
"version_bytes": 0, "version_count": 0
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
ctx.insert(
|
||||||
|
"bucket_quota",
|
||||||
|
&json!({ "max_bytes": null, "max_objects": null }),
|
||||||
|
);
|
||||||
|
ctx.insert("buckets_for_copy_url", &"");
|
||||||
|
ctx.insert("acl_url", &"");
|
||||||
|
ctx.insert("cors_url", &"");
|
||||||
|
ctx.insert("folders_url", &"");
|
||||||
|
ctx.insert("lifecycle_url", &"");
|
||||||
|
ctx.insert("objects_api_url", &"");
|
||||||
|
ctx.insert("objects_stream_url", &"");
|
||||||
|
render_or_panic("bucket_detail.html", &ctx);
|
||||||
|
}
|
||||||
@@ -11,10 +11,24 @@ pub enum StorageError {
|
|||||||
BucketNotEmpty(String),
|
BucketNotEmpty(String),
|
||||||
#[error("Object not found: {bucket}/{key}")]
|
#[error("Object not found: {bucket}/{key}")]
|
||||||
ObjectNotFound { bucket: String, key: String },
|
ObjectNotFound { bucket: String, key: String },
|
||||||
|
#[error("Object version not found: {bucket}/{key}?versionId={version_id}")]
|
||||||
|
VersionNotFound {
|
||||||
|
bucket: String,
|
||||||
|
key: String,
|
||||||
|
version_id: String,
|
||||||
|
},
|
||||||
|
#[error("Object is a delete marker: {bucket}/{key}")]
|
||||||
|
DeleteMarker {
|
||||||
|
bucket: String,
|
||||||
|
key: String,
|
||||||
|
version_id: String,
|
||||||
|
},
|
||||||
#[error("Invalid bucket name: {0}")]
|
#[error("Invalid bucket name: {0}")]
|
||||||
InvalidBucketName(String),
|
InvalidBucketName(String),
|
||||||
#[error("Invalid object key: {0}")]
|
#[error("Invalid object key: {0}")]
|
||||||
InvalidObjectKey(String),
|
InvalidObjectKey(String),
|
||||||
|
#[error("Method not allowed: {0}")]
|
||||||
|
MethodNotAllowed(String),
|
||||||
#[error("Upload not found: {0}")]
|
#[error("Upload not found: {0}")]
|
||||||
UploadNotFound(String),
|
UploadNotFound(String),
|
||||||
#[error("Quota exceeded: {0}")]
|
#[error("Quota exceeded: {0}")]
|
||||||
@@ -36,7 +50,7 @@ impl From<StorageError> for S3Error {
|
|||||||
S3Error::from_code(S3ErrorCode::NoSuchBucket).with_resource(format!("/{}", name))
|
S3Error::from_code(S3ErrorCode::NoSuchBucket).with_resource(format!("/{}", name))
|
||||||
}
|
}
|
||||||
StorageError::BucketAlreadyExists(name) => {
|
StorageError::BucketAlreadyExists(name) => {
|
||||||
S3Error::from_code(S3ErrorCode::BucketAlreadyExists)
|
S3Error::from_code(S3ErrorCode::BucketAlreadyOwnedByYou)
|
||||||
.with_resource(format!("/{}", name))
|
.with_resource(format!("/{}", name))
|
||||||
}
|
}
|
||||||
StorageError::BucketNotEmpty(name) => {
|
StorageError::BucketNotEmpty(name) => {
|
||||||
@@ -46,10 +60,23 @@ impl From<StorageError> for S3Error {
|
|||||||
S3Error::from_code(S3ErrorCode::NoSuchKey)
|
S3Error::from_code(S3ErrorCode::NoSuchKey)
|
||||||
.with_resource(format!("/{}/{}", bucket, key))
|
.with_resource(format!("/{}/{}", bucket, key))
|
||||||
}
|
}
|
||||||
|
StorageError::VersionNotFound {
|
||||||
|
bucket,
|
||||||
|
key,
|
||||||
|
version_id,
|
||||||
|
} => S3Error::from_code(S3ErrorCode::NoSuchVersion)
|
||||||
|
.with_resource(format!("/{}/{}?versionId={}", bucket, key, version_id)),
|
||||||
|
StorageError::DeleteMarker {
|
||||||
|
bucket,
|
||||||
|
key,
|
||||||
|
version_id,
|
||||||
|
} => S3Error::from_code(S3ErrorCode::MethodNotAllowed)
|
||||||
|
.with_resource(format!("/{}/{}?versionId={}", bucket, key, version_id)),
|
||||||
StorageError::InvalidBucketName(msg) => {
|
StorageError::InvalidBucketName(msg) => {
|
||||||
S3Error::new(S3ErrorCode::InvalidBucketName, msg)
|
S3Error::new(S3ErrorCode::InvalidBucketName, msg)
|
||||||
}
|
}
|
||||||
StorageError::InvalidObjectKey(msg) => S3Error::new(S3ErrorCode::InvalidKey, msg),
|
StorageError::InvalidObjectKey(msg) => S3Error::new(S3ErrorCode::InvalidKey, msg),
|
||||||
|
StorageError::MethodNotAllowed(msg) => S3Error::new(S3ErrorCode::MethodNotAllowed, msg),
|
||||||
StorageError::UploadNotFound(id) => S3Error::new(
|
StorageError::UploadNotFound(id) => S3Error::new(
|
||||||
S3ErrorCode::NoSuchUpload,
|
S3ErrorCode::NoSuchUpload,
|
||||||
format!("Upload {} not found", id),
|
format!("Upload {} not found", id),
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -34,7 +34,42 @@ pub trait StorageEngine: Send + Sync {
|
|||||||
|
|
||||||
async fn head_object(&self, bucket: &str, key: &str) -> StorageResult<ObjectMeta>;
|
async fn head_object(&self, bucket: &str, key: &str) -> StorageResult<ObjectMeta>;
|
||||||
|
|
||||||
async fn delete_object(&self, bucket: &str, key: &str) -> StorageResult<()>;
|
async fn get_object_version(
|
||||||
|
&self,
|
||||||
|
bucket: &str,
|
||||||
|
key: &str,
|
||||||
|
version_id: &str,
|
||||||
|
) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
|
||||||
|
|
||||||
|
async fn get_object_version_path(
|
||||||
|
&self,
|
||||||
|
bucket: &str,
|
||||||
|
key: &str,
|
||||||
|
version_id: &str,
|
||||||
|
) -> StorageResult<PathBuf>;
|
||||||
|
|
||||||
|
async fn head_object_version(
|
||||||
|
&self,
|
||||||
|
bucket: &str,
|
||||||
|
key: &str,
|
||||||
|
version_id: &str,
|
||||||
|
) -> StorageResult<ObjectMeta>;
|
||||||
|
|
||||||
|
async fn get_object_version_metadata(
|
||||||
|
&self,
|
||||||
|
bucket: &str,
|
||||||
|
key: &str,
|
||||||
|
version_id: &str,
|
||||||
|
) -> StorageResult<HashMap<String, String>>;
|
||||||
|
|
||||||
|
async fn delete_object(&self, bucket: &str, key: &str) -> StorageResult<DeleteOutcome>;
|
||||||
|
|
||||||
|
async fn delete_object_version(
|
||||||
|
&self,
|
||||||
|
bucket: &str,
|
||||||
|
key: &str,
|
||||||
|
version_id: &str,
|
||||||
|
) -> StorageResult<DeleteOutcome>;
|
||||||
|
|
||||||
async fn copy_object(
|
async fn copy_object(
|
||||||
&self,
|
&self,
|
||||||
@@ -120,6 +155,12 @@ pub trait StorageEngine: Send + Sync {
|
|||||||
key: &str,
|
key: &str,
|
||||||
) -> StorageResult<Vec<VersionInfo>>;
|
) -> StorageResult<Vec<VersionInfo>>;
|
||||||
|
|
||||||
|
async fn list_bucket_object_versions(
|
||||||
|
&self,
|
||||||
|
bucket: &str,
|
||||||
|
prefix: Option<&str>,
|
||||||
|
) -> StorageResult<Vec<VersionInfo>>;
|
||||||
|
|
||||||
async fn get_object_tags(&self, bucket: &str, key: &str) -> StorageResult<Vec<Tag>>;
|
async fn get_object_tags(&self, bucket: &str, key: &str) -> StorageResult<Vec<Tag>>;
|
||||||
|
|
||||||
async fn set_object_tags(&self, bucket: &str, key: &str, tags: &[Tag]) -> StorageResult<()>;
|
async fn set_object_tags(&self, bucket: &str, key: &str, tags: &[Tag]) -> StorageResult<()>;
|
||||||
@@ -47,6 +47,7 @@ pub fn validate_object_key(
|
|||||||
normalized.split('/').collect()
|
normalized.split('/').collect()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
for part in &parts {
|
for part in &parts {
|
||||||
if part.is_empty() {
|
if part.is_empty() {
|
||||||
continue;
|
continue;
|
||||||
@@ -60,6 +61,12 @@ pub fn validate_object_key(
|
|||||||
return Some("Object key contains invalid segments".to_string());
|
return Some("Object key contains invalid segments".to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if part.len() > 255 {
|
||||||
|
return Some(
|
||||||
|
"Object key contains a path segment that exceeds 255 bytes".to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
if part.chars().any(|c| (c as u32) < 32) {
|
if part.chars().any(|c| (c as u32) < 32) {
|
||||||
return Some("Object key contains control characters".to_string());
|
return Some("Object key contains control characters".to_string());
|
||||||
}
|
}
|
||||||
@@ -98,6 +105,15 @@ pub fn validate_object_key(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for part in &non_empty_parts {
|
||||||
|
if *part == ".__myfsio_dirobj__"
|
||||||
|
|| *part == ".__myfsio_empty__"
|
||||||
|
|| part.starts_with("_index.json")
|
||||||
|
{
|
||||||
|
return Some("Object key segment uses a reserved internal name".to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,6 +148,13 @@ pub fn validate_bucket_name(bucket_name: &str) -> Option<String> {
|
|||||||
return Some("Bucket name must not be formatted as an IP address".to_string());
|
return Some("Bucket name must not be formatted as an IP address".to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if bucket_name.starts_with("xn--") {
|
||||||
|
return Some("Bucket name must not start with the reserved prefix 'xn--'".to_string());
|
||||||
|
}
|
||||||
|
if bucket_name.ends_with("-s3alias") || bucket_name.ends_with("--ol-s3") {
|
||||||
|
return Some("Bucket name must not end with a reserved suffix".to_string());
|
||||||
|
}
|
||||||
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -8,3 +8,4 @@ myfsio-common = { path = "../myfsio-common" }
|
|||||||
quick-xml = { workspace = true }
|
quick-xml = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
chrono = { workspace = true }
|
chrono = { workspace = true }
|
||||||
|
percent-encoding = { workspace = true }
|
||||||
@@ -1,13 +1,13 @@
|
|||||||
use quick_xml::events::Event;
|
use quick_xml::events::Event;
|
||||||
use quick_xml::Reader;
|
use quick_xml::Reader;
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default, Clone)]
|
||||||
pub struct DeleteObjectsRequest {
|
pub struct DeleteObjectsRequest {
|
||||||
pub objects: Vec<ObjectIdentifier>,
|
pub objects: Vec<ObjectIdentifier>,
|
||||||
pub quiet: bool,
|
pub quiet: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct ObjectIdentifier {
|
pub struct ObjectIdentifier {
|
||||||
pub key: String,
|
pub key: String,
|
||||||
pub version_id: Option<String>,
|
pub version_id: Option<String>,
|
||||||
@@ -86,6 +86,11 @@ pub fn parse_complete_multipart_upload(xml: &str) -> Result<CompleteMultipartUpl
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse_delete_objects(xml: &str) -> Result<DeleteObjectsRequest, String> {
|
pub fn parse_delete_objects(xml: &str) -> Result<DeleteObjectsRequest, String> {
|
||||||
|
let trimmed = xml.trim();
|
||||||
|
if trimmed.is_empty() {
|
||||||
|
return Err("Request body is empty".to_string());
|
||||||
|
}
|
||||||
|
|
||||||
let mut reader = Reader::from_str(xml);
|
let mut reader = Reader::from_str(xml);
|
||||||
let mut result = DeleteObjectsRequest::default();
|
let mut result = DeleteObjectsRequest::default();
|
||||||
let mut buf = Vec::new();
|
let mut buf = Vec::new();
|
||||||
@@ -93,18 +98,43 @@ pub fn parse_delete_objects(xml: &str) -> Result<DeleteObjectsRequest, String> {
|
|||||||
let mut current_key: Option<String> = None;
|
let mut current_key: Option<String> = None;
|
||||||
let mut current_version_id: Option<String> = None;
|
let mut current_version_id: Option<String> = None;
|
||||||
let mut in_object = false;
|
let mut in_object = false;
|
||||||
|
let mut saw_delete_root = false;
|
||||||
|
let mut first_element_seen = false;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
match reader.read_event_into(&mut buf) {
|
let event = reader.read_event_into(&mut buf);
|
||||||
|
match event {
|
||||||
Ok(Event::Start(ref e)) => {
|
Ok(Event::Start(ref e)) => {
|
||||||
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
|
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
|
||||||
current_tag = name.clone();
|
current_tag = name.clone();
|
||||||
if name == "Object" {
|
if !first_element_seen {
|
||||||
|
first_element_seen = true;
|
||||||
|
if name != "Delete" {
|
||||||
|
return Err(format!(
|
||||||
|
"Expected <Delete> root element, found <{}>",
|
||||||
|
name
|
||||||
|
));
|
||||||
|
}
|
||||||
|
saw_delete_root = true;
|
||||||
|
} else if name == "Object" {
|
||||||
in_object = true;
|
in_object = true;
|
||||||
current_key = None;
|
current_key = None;
|
||||||
current_version_id = None;
|
current_version_id = None;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(Event::Empty(ref e)) => {
|
||||||
|
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
|
||||||
|
if !first_element_seen {
|
||||||
|
first_element_seen = true;
|
||||||
|
if name != "Delete" {
|
||||||
|
return Err(format!(
|
||||||
|
"Expected <Delete> root element, found <{}>",
|
||||||
|
name
|
||||||
|
));
|
||||||
|
}
|
||||||
|
saw_delete_root = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
Ok(Event::Text(ref e)) => {
|
Ok(Event::Text(ref e)) => {
|
||||||
let text = e.unescape().map_err(|e| e.to_string())?.to_string();
|
let text = e.unescape().map_err(|e| e.to_string())?.to_string();
|
||||||
match current_tag.as_str() {
|
match current_tag.as_str() {
|
||||||
@@ -139,6 +169,13 @@ pub fn parse_delete_objects(xml: &str) -> Result<DeleteObjectsRequest, String> {
|
|||||||
buf.clear();
|
buf.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !saw_delete_root {
|
||||||
|
return Err("Expected <Delete> root element".to_string());
|
||||||
|
}
|
||||||
|
if result.objects.is_empty() {
|
||||||
|
return Err("Delete request must contain at least one <Object>".to_string());
|
||||||
|
}
|
||||||
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -8,6 +8,23 @@ pub fn format_s3_datetime(dt: &DateTime<Utc>) -> String {
|
|||||||
dt.format("%Y-%m-%dT%H:%M:%S%.3fZ").to_string()
|
dt.format("%Y-%m-%dT%H:%M:%S%.3fZ").to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn rate_limit_exceeded_xml(resource: &str, request_id: &str) -> String {
|
||||||
|
format!(
|
||||||
|
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||||
|
<Error><Code>SlowDown</Code><Message>Please reduce your request rate</Message><Resource>{}</Resource><RequestId>{}</RequestId></Error>",
|
||||||
|
xml_escape(resource),
|
||||||
|
xml_escape(request_id),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn xml_escape(s: &str) -> String {
|
||||||
|
s.replace('&', "&")
|
||||||
|
.replace('<', "<")
|
||||||
|
.replace('>', ">")
|
||||||
|
.replace('"', """)
|
||||||
|
.replace('\'', "'")
|
||||||
|
}
|
||||||
|
|
||||||
pub fn list_buckets_xml(owner_id: &str, owner_name: &str, buckets: &[BucketMeta]) -> String {
|
pub fn list_buckets_xml(owner_id: &str, owner_name: &str, buckets: &[BucketMeta]) -> String {
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||||
|
|
||||||
@@ -56,6 +73,21 @@ pub fn list_buckets_xml(owner_id: &str, owner_name: &str, buckets: &[BucketMeta]
|
|||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn maybe_url_encode(value: &str, encoding_type: Option<&str>) -> String {
|
||||||
|
if matches!(encoding_type, Some(v) if v.eq_ignore_ascii_case("url")) {
|
||||||
|
percent_encoding::utf8_percent_encode(value, KEY_ENCODE_SET).to_string()
|
||||||
|
} else {
|
||||||
|
value.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const KEY_ENCODE_SET: &percent_encoding::AsciiSet = &percent_encoding::NON_ALPHANUMERIC
|
||||||
|
.remove(b'-')
|
||||||
|
.remove(b'_')
|
||||||
|
.remove(b'.')
|
||||||
|
.remove(b'~')
|
||||||
|
.remove(b'/');
|
||||||
|
|
||||||
pub fn list_objects_v2_xml(
|
pub fn list_objects_v2_xml(
|
||||||
bucket_name: &str,
|
bucket_name: &str,
|
||||||
prefix: &str,
|
prefix: &str,
|
||||||
@@ -67,6 +99,34 @@ pub fn list_objects_v2_xml(
|
|||||||
continuation_token: Option<&str>,
|
continuation_token: Option<&str>,
|
||||||
next_continuation_token: Option<&str>,
|
next_continuation_token: Option<&str>,
|
||||||
key_count: usize,
|
key_count: usize,
|
||||||
|
) -> String {
|
||||||
|
list_objects_v2_xml_with_encoding(
|
||||||
|
bucket_name,
|
||||||
|
prefix,
|
||||||
|
delimiter,
|
||||||
|
max_keys,
|
||||||
|
objects,
|
||||||
|
common_prefixes,
|
||||||
|
is_truncated,
|
||||||
|
continuation_token,
|
||||||
|
next_continuation_token,
|
||||||
|
key_count,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn list_objects_v2_xml_with_encoding(
|
||||||
|
bucket_name: &str,
|
||||||
|
prefix: &str,
|
||||||
|
delimiter: &str,
|
||||||
|
max_keys: usize,
|
||||||
|
objects: &[ObjectMeta],
|
||||||
|
common_prefixes: &[String],
|
||||||
|
is_truncated: bool,
|
||||||
|
continuation_token: Option<&str>,
|
||||||
|
next_continuation_token: Option<&str>,
|
||||||
|
key_count: usize,
|
||||||
|
encoding_type: Option<&str>,
|
||||||
) -> String {
|
) -> String {
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||||
|
|
||||||
@@ -79,13 +139,22 @@ pub fn list_objects_v2_xml(
|
|||||||
writer.write_event(Event::Start(start)).unwrap();
|
writer.write_event(Event::Start(start)).unwrap();
|
||||||
|
|
||||||
write_text_element(&mut writer, "Name", bucket_name);
|
write_text_element(&mut writer, "Name", bucket_name);
|
||||||
write_text_element(&mut writer, "Prefix", prefix);
|
write_text_element(&mut writer, "Prefix", &maybe_url_encode(prefix, encoding_type));
|
||||||
if !delimiter.is_empty() {
|
if !delimiter.is_empty() {
|
||||||
write_text_element(&mut writer, "Delimiter", delimiter);
|
write_text_element(
|
||||||
|
&mut writer,
|
||||||
|
"Delimiter",
|
||||||
|
&maybe_url_encode(delimiter, encoding_type),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
write_text_element(&mut writer, "MaxKeys", &max_keys.to_string());
|
write_text_element(&mut writer, "MaxKeys", &max_keys.to_string());
|
||||||
write_text_element(&mut writer, "KeyCount", &key_count.to_string());
|
write_text_element(&mut writer, "KeyCount", &key_count.to_string());
|
||||||
write_text_element(&mut writer, "IsTruncated", &is_truncated.to_string());
|
write_text_element(&mut writer, "IsTruncated", &is_truncated.to_string());
|
||||||
|
if let Some(encoding) = encoding_type {
|
||||||
|
if !encoding.is_empty() {
|
||||||
|
write_text_element(&mut writer, "EncodingType", encoding);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(token) = continuation_token {
|
if let Some(token) = continuation_token {
|
||||||
write_text_element(&mut writer, "ContinuationToken", token);
|
write_text_element(&mut writer, "ContinuationToken", token);
|
||||||
@@ -98,7 +167,7 @@ pub fn list_objects_v2_xml(
|
|||||||
writer
|
writer
|
||||||
.write_event(Event::Start(BytesStart::new("Contents")))
|
.write_event(Event::Start(BytesStart::new("Contents")))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
write_text_element(&mut writer, "Key", &obj.key);
|
write_text_element(&mut writer, "Key", &maybe_url_encode(&obj.key, encoding_type));
|
||||||
write_text_element(
|
write_text_element(
|
||||||
&mut writer,
|
&mut writer,
|
||||||
"LastModified",
|
"LastModified",
|
||||||
@@ -122,7 +191,7 @@ pub fn list_objects_v2_xml(
|
|||||||
writer
|
writer
|
||||||
.write_event(Event::Start(BytesStart::new("CommonPrefixes")))
|
.write_event(Event::Start(BytesStart::new("CommonPrefixes")))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
write_text_element(&mut writer, "Prefix", prefix);
|
write_text_element(&mut writer, "Prefix", &maybe_url_encode(prefix, encoding_type));
|
||||||
writer
|
writer
|
||||||
.write_event(Event::End(BytesEnd::new("CommonPrefixes")))
|
.write_event(Event::End(BytesEnd::new("CommonPrefixes")))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -145,6 +214,32 @@ pub fn list_objects_v1_xml(
|
|||||||
common_prefixes: &[String],
|
common_prefixes: &[String],
|
||||||
is_truncated: bool,
|
is_truncated: bool,
|
||||||
next_marker: Option<&str>,
|
next_marker: Option<&str>,
|
||||||
|
) -> String {
|
||||||
|
list_objects_v1_xml_with_encoding(
|
||||||
|
bucket_name,
|
||||||
|
prefix,
|
||||||
|
marker,
|
||||||
|
delimiter,
|
||||||
|
max_keys,
|
||||||
|
objects,
|
||||||
|
common_prefixes,
|
||||||
|
is_truncated,
|
||||||
|
next_marker,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn list_objects_v1_xml_with_encoding(
|
||||||
|
bucket_name: &str,
|
||||||
|
prefix: &str,
|
||||||
|
marker: &str,
|
||||||
|
delimiter: &str,
|
||||||
|
max_keys: usize,
|
||||||
|
objects: &[ObjectMeta],
|
||||||
|
common_prefixes: &[String],
|
||||||
|
is_truncated: bool,
|
||||||
|
next_marker: Option<&str>,
|
||||||
|
encoding_type: Option<&str>,
|
||||||
) -> String {
|
) -> String {
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
||||||
|
|
||||||
@@ -157,27 +252,36 @@ pub fn list_objects_v1_xml(
|
|||||||
writer.write_event(Event::Start(start)).unwrap();
|
writer.write_event(Event::Start(start)).unwrap();
|
||||||
|
|
||||||
write_text_element(&mut writer, "Name", bucket_name);
|
write_text_element(&mut writer, "Name", bucket_name);
|
||||||
write_text_element(&mut writer, "Prefix", prefix);
|
write_text_element(&mut writer, "Prefix", &maybe_url_encode(prefix, encoding_type));
|
||||||
write_text_element(&mut writer, "Marker", marker);
|
write_text_element(&mut writer, "Marker", &maybe_url_encode(marker, encoding_type));
|
||||||
write_text_element(&mut writer, "MaxKeys", &max_keys.to_string());
|
write_text_element(&mut writer, "MaxKeys", &max_keys.to_string());
|
||||||
write_text_element(&mut writer, "IsTruncated", &is_truncated.to_string());
|
write_text_element(&mut writer, "IsTruncated", &is_truncated.to_string());
|
||||||
|
|
||||||
if !delimiter.is_empty() {
|
if !delimiter.is_empty() {
|
||||||
write_text_element(&mut writer, "Delimiter", delimiter);
|
write_text_element(
|
||||||
|
&mut writer,
|
||||||
|
"Delimiter",
|
||||||
|
&maybe_url_encode(delimiter, encoding_type),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
if !delimiter.is_empty() && is_truncated {
|
if !delimiter.is_empty() && is_truncated {
|
||||||
if let Some(nm) = next_marker {
|
if let Some(nm) = next_marker {
|
||||||
if !nm.is_empty() {
|
if !nm.is_empty() {
|
||||||
write_text_element(&mut writer, "NextMarker", nm);
|
write_text_element(&mut writer, "NextMarker", &maybe_url_encode(nm, encoding_type));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if let Some(encoding) = encoding_type {
|
||||||
|
if !encoding.is_empty() {
|
||||||
|
write_text_element(&mut writer, "EncodingType", encoding);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for obj in objects {
|
for obj in objects {
|
||||||
writer
|
writer
|
||||||
.write_event(Event::Start(BytesStart::new("Contents")))
|
.write_event(Event::Start(BytesStart::new("Contents")))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
write_text_element(&mut writer, "Key", &obj.key);
|
write_text_element(&mut writer, "Key", &maybe_url_encode(&obj.key, encoding_type));
|
||||||
write_text_element(
|
write_text_element(
|
||||||
&mut writer,
|
&mut writer,
|
||||||
"LastModified",
|
"LastModified",
|
||||||
@@ -196,7 +300,7 @@ pub fn list_objects_v1_xml(
|
|||||||
writer
|
writer
|
||||||
.write_event(Event::Start(BytesStart::new("CommonPrefixes")))
|
.write_event(Event::Start(BytesStart::new("CommonPrefixes")))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
write_text_element(&mut writer, "Prefix", cp);
|
write_text_element(&mut writer, "Prefix", &maybe_url_encode(cp, encoding_type));
|
||||||
writer
|
writer
|
||||||
.write_event(Event::End(BytesEnd::new("CommonPrefixes")))
|
.write_event(Event::End(BytesEnd::new("CommonPrefixes")))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -319,8 +423,15 @@ pub fn copy_object_result_xml(etag: &str, last_modified: &str) -> String {
|
|||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct DeletedEntry {
|
||||||
|
pub key: String,
|
||||||
|
pub version_id: Option<String>,
|
||||||
|
pub delete_marker: bool,
|
||||||
|
pub delete_marker_version_id: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
pub fn delete_result_xml(
|
pub fn delete_result_xml(
|
||||||
deleted: &[(String, Option<String>)],
|
deleted: &[DeletedEntry],
|
||||||
errors: &[(String, String, String)],
|
errors: &[(String, String, String)],
|
||||||
quiet: bool,
|
quiet: bool,
|
||||||
) -> String {
|
) -> String {
|
||||||
@@ -334,14 +445,20 @@ pub fn delete_result_xml(
|
|||||||
writer.write_event(Event::Start(start)).unwrap();
|
writer.write_event(Event::Start(start)).unwrap();
|
||||||
|
|
||||||
if !quiet {
|
if !quiet {
|
||||||
for (key, version_id) in deleted {
|
for entry in deleted {
|
||||||
writer
|
writer
|
||||||
.write_event(Event::Start(BytesStart::new("Deleted")))
|
.write_event(Event::Start(BytesStart::new("Deleted")))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
write_text_element(&mut writer, "Key", key);
|
write_text_element(&mut writer, "Key", &entry.key);
|
||||||
if let Some(vid) = version_id {
|
if let Some(ref vid) = entry.version_id {
|
||||||
write_text_element(&mut writer, "VersionId", vid);
|
write_text_element(&mut writer, "VersionId", vid);
|
||||||
}
|
}
|
||||||
|
if entry.delete_marker {
|
||||||
|
write_text_element(&mut writer, "DeleteMarker", "true");
|
||||||
|
if let Some(ref dm_vid) = entry.delete_marker_version_id {
|
||||||
|
write_text_element(&mut writer, "DeleteMarkerVersionId", dm_vid);
|
||||||
|
}
|
||||||
|
}
|
||||||
writer
|
writer
|
||||||
.write_event(Event::End(BytesEnd::new("Deleted")))
|
.write_event(Event::End(BytesEnd::new("Deleted")))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user