Compare commits
34 Commits
ad7b2a02cb
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| ae11c654f9 | |||
| f0c95ac0a9 | |||
| 8ff4797041 | |||
| 50fb5aa387 | |||
| cc161bf362 | |||
| 2a0e77a754 | |||
| eb0e435a5a | |||
| 7633007a08 | |||
| de0d869c9f | |||
| fdd068feee | |||
| 66b7677d2c | |||
| 4d90ead816 | |||
| b37a51ed1d | |||
| 0462a7b62e | |||
| 52660570c1 | |||
| 35f61313e0 | |||
| c470cfb576 | |||
| d96955deee | |||
| 85181f0be6 | |||
| d5ca7a8be1 | |||
| 476dc79e42 | |||
| bb6590fc5e | |||
| 899db3421b | |||
| caf01d6ada | |||
| bb366cb4cd | |||
| a2745ff2ee | |||
| 28cb656d94 | |||
| 3c44152fc6 | |||
| 397515edce | |||
| 980fced7e4 | |||
| bae5009ec4 | |||
| 233780617f | |||
| fd8fb21517 | |||
| c6cbe822e1 |
@@ -11,5 +11,3 @@ htmlcov
|
|||||||
logs
|
logs
|
||||||
data
|
data
|
||||||
tmp
|
tmp
|
||||||
myfsio_core/target
|
|
||||||
myfsio-engine/target
|
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -30,9 +30,6 @@ dist/
|
|||||||
myfsio_core/target/
|
myfsio_core/target/
|
||||||
myfsio_core/Cargo.lock
|
myfsio_core/Cargo.lock
|
||||||
|
|
||||||
# Rust engine build artifacts
|
|
||||||
myfsio-engine/target/
|
|
||||||
|
|
||||||
# Local runtime artifacts
|
# Local runtime artifacts
|
||||||
logs/
|
logs/
|
||||||
*.log
|
*.log
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ RUN apt-get update \
|
|||||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||||
|
|
||||||
COPY requirements.txt ./
|
COPY requirements.txt ./
|
||||||
|
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
@@ -22,11 +21,8 @@ RUN pip install --no-cache-dir maturin \
|
|||||||
&& cd myfsio_core \
|
&& cd myfsio_core \
|
||||||
&& maturin build --release \
|
&& maturin build --release \
|
||||||
&& pip install target/wheels/*.whl \
|
&& pip install target/wheels/*.whl \
|
||||||
&& cd ../myfsio-engine \
|
|
||||||
&& cargo build --release \
|
|
||||||
&& cp target/release/myfsio-server /usr/local/bin/myfsio-server \
|
|
||||||
&& cd .. \
|
&& cd .. \
|
||||||
&& rm -rf myfsio_core/target myfsio-engine/target \
|
&& rm -rf myfsio_core/target \
|
||||||
&& pip uninstall -y maturin \
|
&& pip uninstall -y maturin \
|
||||||
&& rustup self uninstall -y
|
&& rustup self uninstall -y
|
||||||
|
|
||||||
@@ -41,8 +37,7 @@ USER myfsio
|
|||||||
EXPOSE 5000 5100
|
EXPOSE 5000 5100
|
||||||
ENV APP_HOST=0.0.0.0 \
|
ENV APP_HOST=0.0.0.0 \
|
||||||
FLASK_ENV=production \
|
FLASK_ENV=production \
|
||||||
FLASK_DEBUG=0 \
|
FLASK_DEBUG=0
|
||||||
ENGINE=rust
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||||
CMD python -c "import requests; requests.get('http://localhost:5000/myfsio/health', timeout=2)"
|
CMD python -c "import requests; requests.get('http://localhost:5000/myfsio/health', timeout=2)"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
APP_VERSION = "0.4.3"
|
APP_VERSION = "0.4.2"
|
||||||
|
|
||||||
|
|
||||||
def get_version() -> str:
|
def get_version() -> str:
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
ENGINE="${ENGINE:-rust}"
|
# Run both services using the python runner in production mode
|
||||||
|
exec python run.py --prod
|
||||||
exec python run.py --prod --engine "$ENGINE"
|
|
||||||
|
|||||||
3443
myfsio-engine/Cargo.lock
generated
3443
myfsio-engine/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,45 +0,0 @@
|
|||||||
[workspace]
|
|
||||||
resolver = "2"
|
|
||||||
members = [
|
|
||||||
"crates/myfsio-common",
|
|
||||||
"crates/myfsio-auth",
|
|
||||||
"crates/myfsio-crypto",
|
|
||||||
"crates/myfsio-storage",
|
|
||||||
"crates/myfsio-xml",
|
|
||||||
"crates/myfsio-server",
|
|
||||||
]
|
|
||||||
|
|
||||||
[workspace.dependencies]
|
|
||||||
tokio = { version = "1", features = ["full"] }
|
|
||||||
axum = { version = "0.8" }
|
|
||||||
tower = { version = "0.5" }
|
|
||||||
tower-http = { version = "0.6", features = ["cors", "trace"] }
|
|
||||||
hyper = { version = "1" }
|
|
||||||
bytes = "1"
|
|
||||||
serde = { version = "1", features = ["derive"] }
|
|
||||||
serde_json = "1"
|
|
||||||
quick-xml = { version = "0.37", features = ["serialize"] }
|
|
||||||
hmac = "0.12"
|
|
||||||
sha2 = "0.10"
|
|
||||||
md-5 = "0.10"
|
|
||||||
hex = "0.4"
|
|
||||||
aes = "0.8"
|
|
||||||
aes-gcm = "0.10"
|
|
||||||
cbc = { version = "0.1", features = ["alloc"] }
|
|
||||||
hkdf = "0.12"
|
|
||||||
uuid = { version = "1", features = ["v4"] }
|
|
||||||
parking_lot = "0.12"
|
|
||||||
lru = "0.14"
|
|
||||||
percent-encoding = "2"
|
|
||||||
regex = "1"
|
|
||||||
unicode-normalization = "0.1"
|
|
||||||
tracing = "0.1"
|
|
||||||
tracing-subscriber = "0.3"
|
|
||||||
thiserror = "2"
|
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
|
||||||
base64 = "0.22"
|
|
||||||
tokio-util = { version = "0.7", features = ["io"] }
|
|
||||||
futures = "0.3"
|
|
||||||
dashmap = "6"
|
|
||||||
crc32fast = "1"
|
|
||||||
duckdb = { version = "1", features = ["bundled"] }
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "myfsio-auth"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
myfsio-common = { path = "../myfsio-common" }
|
|
||||||
hmac = { workspace = true }
|
|
||||||
sha2 = { workspace = true }
|
|
||||||
hex = { workspace = true }
|
|
||||||
aes = { workspace = true }
|
|
||||||
cbc = { workspace = true }
|
|
||||||
base64 = { workspace = true }
|
|
||||||
pbkdf2 = "0.12"
|
|
||||||
lru = { workspace = true }
|
|
||||||
parking_lot = { workspace = true }
|
|
||||||
percent-encoding = { workspace = true }
|
|
||||||
serde = { workspace = true }
|
|
||||||
serde_json = { workspace = true }
|
|
||||||
thiserror = { workspace = true }
|
|
||||||
chrono = { workspace = true }
|
|
||||||
tracing = { workspace = true }
|
|
||||||
uuid = { workspace = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tempfile = "3"
|
|
||||||
@@ -1,80 +0,0 @@
|
|||||||
use aes::cipher::{block_padding::Pkcs7, BlockDecryptMut, KeyIvInit};
|
|
||||||
use base64::{engine::general_purpose::URL_SAFE, Engine};
|
|
||||||
use hmac::{Hmac, Mac};
|
|
||||||
use sha2::Sha256;
|
|
||||||
|
|
||||||
type Aes128CbcDec = cbc::Decryptor<aes::Aes128>;
|
|
||||||
type HmacSha256 = Hmac<Sha256>;
|
|
||||||
|
|
||||||
pub fn derive_fernet_key(secret: &str) -> String {
|
|
||||||
let mut derived = [0u8; 32];
|
|
||||||
pbkdf2::pbkdf2_hmac::<Sha256>(
|
|
||||||
secret.as_bytes(),
|
|
||||||
b"myfsio-iam-encryption",
|
|
||||||
100_000,
|
|
||||||
&mut derived,
|
|
||||||
);
|
|
||||||
URL_SAFE.encode(derived)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn decrypt(key_b64: &str, token: &str) -> Result<Vec<u8>, &'static str> {
|
|
||||||
let key_bytes = URL_SAFE
|
|
||||||
.decode(key_b64)
|
|
||||||
.map_err(|_| "invalid fernet key base64")?;
|
|
||||||
if key_bytes.len() != 32 {
|
|
||||||
return Err("fernet key must be 32 bytes");
|
|
||||||
}
|
|
||||||
|
|
||||||
let signing_key = &key_bytes[..16];
|
|
||||||
let encryption_key = &key_bytes[16..];
|
|
||||||
|
|
||||||
let token_bytes = URL_SAFE
|
|
||||||
.decode(token)
|
|
||||||
.map_err(|_| "invalid fernet token base64")?;
|
|
||||||
|
|
||||||
if token_bytes.len() < 57 {
|
|
||||||
return Err("fernet token too short");
|
|
||||||
}
|
|
||||||
|
|
||||||
if token_bytes[0] != 0x80 {
|
|
||||||
return Err("invalid fernet version");
|
|
||||||
}
|
|
||||||
|
|
||||||
let hmac_offset = token_bytes.len() - 32;
|
|
||||||
let payload = &token_bytes[..hmac_offset];
|
|
||||||
let expected_hmac = &token_bytes[hmac_offset..];
|
|
||||||
|
|
||||||
let mut mac =
|
|
||||||
HmacSha256::new_from_slice(signing_key).map_err(|_| "hmac key error")?;
|
|
||||||
mac.update(payload);
|
|
||||||
mac.verify_slice(expected_hmac)
|
|
||||||
.map_err(|_| "HMAC verification failed")?;
|
|
||||||
|
|
||||||
let iv = &token_bytes[9..25];
|
|
||||||
let ciphertext = &token_bytes[25..hmac_offset];
|
|
||||||
|
|
||||||
let plaintext = Aes128CbcDec::new(encryption_key.into(), iv.into())
|
|
||||||
.decrypt_padded_vec_mut::<Pkcs7>(ciphertext)
|
|
||||||
.map_err(|_| "AES-CBC decryption failed")?;
|
|
||||||
|
|
||||||
Ok(plaintext)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_derive_fernet_key_format() {
|
|
||||||
let key = derive_fernet_key("test-secret");
|
|
||||||
let decoded = URL_SAFE.decode(&key).unwrap();
|
|
||||||
assert_eq!(decoded.len(), 32);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_roundtrip_with_python_compat() {
|
|
||||||
let key = derive_fernet_key("dev-secret-key");
|
|
||||||
let decoded = URL_SAFE.decode(&key).unwrap();
|
|
||||||
assert_eq!(decoded.len(), 32);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,812 +0,0 @@
|
|||||||
use chrono::{DateTime, Utc};
|
|
||||||
use myfsio_common::types::Principal;
|
|
||||||
use parking_lot::RwLock;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::{Instant, SystemTime};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct IamConfig {
|
|
||||||
#[serde(default = "default_version")]
|
|
||||||
pub version: u32,
|
|
||||||
#[serde(default)]
|
|
||||||
pub users: Vec<IamUser>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_version() -> u32 {
|
|
||||||
2
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct IamUser {
|
|
||||||
pub user_id: String,
|
|
||||||
pub display_name: String,
|
|
||||||
#[serde(default = "default_enabled")]
|
|
||||||
pub enabled: bool,
|
|
||||||
#[serde(default)]
|
|
||||||
pub expires_at: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub access_keys: Vec<AccessKey>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub policies: Vec<IamPolicy>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
struct RawIamConfig {
|
|
||||||
#[serde(default)]
|
|
||||||
pub users: Vec<RawIamUser>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
struct RawIamUser {
|
|
||||||
pub user_id: Option<String>,
|
|
||||||
pub display_name: Option<String>,
|
|
||||||
#[serde(default = "default_enabled")]
|
|
||||||
pub enabled: bool,
|
|
||||||
#[serde(default)]
|
|
||||||
pub expires_at: Option<String>,
|
|
||||||
pub access_key: Option<String>,
|
|
||||||
pub secret_key: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub access_keys: Vec<AccessKey>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub policies: Vec<IamPolicy>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RawIamUser {
|
|
||||||
fn normalize(self) -> IamUser {
|
|
||||||
let mut access_keys = self.access_keys;
|
|
||||||
if access_keys.is_empty() {
|
|
||||||
if let (Some(ak), Some(sk)) = (self.access_key, self.secret_key) {
|
|
||||||
access_keys.push(AccessKey {
|
|
||||||
access_key: ak,
|
|
||||||
secret_key: sk,
|
|
||||||
status: "active".to_string(),
|
|
||||||
created_at: None,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let display_name = self.display_name.unwrap_or_else(|| {
|
|
||||||
access_keys.first().map(|k| k.access_key.clone()).unwrap_or_else(|| "unknown".to_string())
|
|
||||||
});
|
|
||||||
let user_id = self.user_id.unwrap_or_else(|| {
|
|
||||||
format!("u-{}", display_name.to_ascii_lowercase().replace(' ', "-"))
|
|
||||||
});
|
|
||||||
IamUser {
|
|
||||||
user_id,
|
|
||||||
display_name,
|
|
||||||
enabled: self.enabled,
|
|
||||||
expires_at: self.expires_at,
|
|
||||||
access_keys,
|
|
||||||
policies: self.policies,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_enabled() -> bool {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct AccessKey {
|
|
||||||
pub access_key: String,
|
|
||||||
pub secret_key: String,
|
|
||||||
#[serde(default = "default_status")]
|
|
||||||
pub status: String,
|
|
||||||
#[serde(default)]
|
|
||||||
pub created_at: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_status() -> String {
|
|
||||||
"active".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct IamPolicy {
|
|
||||||
pub bucket: String,
|
|
||||||
pub actions: Vec<String>,
|
|
||||||
#[serde(default = "default_prefix")]
|
|
||||||
pub prefix: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_prefix() -> String {
|
|
||||||
"*".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
struct IamState {
|
|
||||||
key_secrets: HashMap<String, String>,
|
|
||||||
key_index: HashMap<String, String>,
|
|
||||||
key_status: HashMap<String, String>,
|
|
||||||
user_records: HashMap<String, IamUser>,
|
|
||||||
file_mtime: Option<SystemTime>,
|
|
||||||
last_check: Instant,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct IamService {
|
|
||||||
config_path: PathBuf,
|
|
||||||
state: Arc<RwLock<IamState>>,
|
|
||||||
check_interval: std::time::Duration,
|
|
||||||
fernet_key: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IamService {
|
|
||||||
pub fn new(config_path: PathBuf) -> Self {
|
|
||||||
Self::new_with_secret(config_path, None)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_with_secret(config_path: PathBuf, secret_key: Option<String>) -> Self {
|
|
||||||
let fernet_key = secret_key.map(|s| crate::fernet::derive_fernet_key(&s));
|
|
||||||
let service = Self {
|
|
||||||
config_path,
|
|
||||||
state: Arc::new(RwLock::new(IamState {
|
|
||||||
key_secrets: HashMap::new(),
|
|
||||||
key_index: HashMap::new(),
|
|
||||||
key_status: HashMap::new(),
|
|
||||||
user_records: HashMap::new(),
|
|
||||||
file_mtime: None,
|
|
||||||
last_check: Instant::now(),
|
|
||||||
})),
|
|
||||||
check_interval: std::time::Duration::from_secs(2),
|
|
||||||
fernet_key,
|
|
||||||
};
|
|
||||||
service.reload();
|
|
||||||
service
|
|
||||||
}
|
|
||||||
|
|
||||||
fn reload_if_needed(&self) {
|
|
||||||
{
|
|
||||||
let state = self.state.read();
|
|
||||||
if state.last_check.elapsed() < self.check_interval {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let current_mtime = std::fs::metadata(&self.config_path)
|
|
||||||
.and_then(|m| m.modified())
|
|
||||||
.ok();
|
|
||||||
|
|
||||||
let needs_reload = {
|
|
||||||
let state = self.state.read();
|
|
||||||
match (&state.file_mtime, ¤t_mtime) {
|
|
||||||
(None, Some(_)) => true,
|
|
||||||
(Some(old), Some(new)) => old != new,
|
|
||||||
(Some(_), None) => true,
|
|
||||||
(None, None) => state.key_secrets.is_empty(),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if needs_reload {
|
|
||||||
self.reload();
|
|
||||||
}
|
|
||||||
|
|
||||||
self.state.write().last_check = Instant::now();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn reload(&self) {
|
|
||||||
let content = match std::fs::read_to_string(&self.config_path) {
|
|
||||||
Ok(c) => c,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::warn!("Failed to read IAM config {}: {}", self.config_path.display(), e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let raw = if content.starts_with("MYFSIO_IAM_ENC:") {
|
|
||||||
let encrypted_token = &content["MYFSIO_IAM_ENC:".len()..];
|
|
||||||
match &self.fernet_key {
|
|
||||||
Some(key) => match crate::fernet::decrypt(key, encrypted_token.trim()) {
|
|
||||||
Ok(plaintext) => match String::from_utf8(plaintext) {
|
|
||||||
Ok(s) => s,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Decrypted IAM config is not valid UTF-8: {}", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Failed to decrypt IAM config: {}. SECRET_KEY may have changed.", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
None => {
|
|
||||||
tracing::error!("IAM config is encrypted but no SECRET_KEY configured");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
content
|
|
||||||
};
|
|
||||||
|
|
||||||
let raw_config: RawIamConfig = match serde_json::from_str(&raw) {
|
|
||||||
Ok(c) => c,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Failed to parse IAM config: {}", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let users: Vec<IamUser> = raw_config.users.into_iter().map(|u| u.normalize()).collect();
|
|
||||||
|
|
||||||
let mut key_secrets = HashMap::new();
|
|
||||||
let mut key_index = HashMap::new();
|
|
||||||
let mut key_status = HashMap::new();
|
|
||||||
let mut user_records = HashMap::new();
|
|
||||||
|
|
||||||
for user in &users {
|
|
||||||
user_records.insert(user.user_id.clone(), user.clone());
|
|
||||||
for ak in &user.access_keys {
|
|
||||||
key_secrets.insert(ak.access_key.clone(), ak.secret_key.clone());
|
|
||||||
key_index.insert(ak.access_key.clone(), user.user_id.clone());
|
|
||||||
key_status.insert(ak.access_key.clone(), ak.status.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let file_mtime = std::fs::metadata(&self.config_path)
|
|
||||||
.and_then(|m| m.modified())
|
|
||||||
.ok();
|
|
||||||
|
|
||||||
let mut state = self.state.write();
|
|
||||||
state.key_secrets = key_secrets;
|
|
||||||
state.key_index = key_index;
|
|
||||||
state.key_status = key_status;
|
|
||||||
state.user_records = user_records;
|
|
||||||
state.file_mtime = file_mtime;
|
|
||||||
state.last_check = Instant::now();
|
|
||||||
|
|
||||||
tracing::info!("IAM config reloaded: {} users, {} keys",
|
|
||||||
users.len(),
|
|
||||||
state.key_secrets.len());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_secret_key(&self, access_key: &str) -> Option<String> {
|
|
||||||
self.reload_if_needed();
|
|
||||||
let state = self.state.read();
|
|
||||||
|
|
||||||
let status = state.key_status.get(access_key)?;
|
|
||||||
if status != "active" {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let user_id = state.key_index.get(access_key)?;
|
|
||||||
let user = state.user_records.get(user_id)?;
|
|
||||||
if !user.enabled {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref expires_at) = user.expires_at {
|
|
||||||
if let Ok(exp) = expires_at.parse::<DateTime<Utc>>() {
|
|
||||||
if Utc::now() > exp {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
state.key_secrets.get(access_key).cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_principal(&self, access_key: &str) -> Option<Principal> {
|
|
||||||
self.reload_if_needed();
|
|
||||||
let state = self.state.read();
|
|
||||||
|
|
||||||
let status = state.key_status.get(access_key)?;
|
|
||||||
if status != "active" {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let user_id = state.key_index.get(access_key)?;
|
|
||||||
let user = state.user_records.get(user_id)?;
|
|
||||||
if !user.enabled {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref expires_at) = user.expires_at {
|
|
||||||
if let Ok(exp) = expires_at.parse::<DateTime<Utc>>() {
|
|
||||||
if Utc::now() > exp {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let is_admin = user.policies.iter().any(|p| {
|
|
||||||
p.bucket == "*" && p.actions.iter().any(|a| a == "*")
|
|
||||||
});
|
|
||||||
|
|
||||||
Some(Principal::new(
|
|
||||||
access_key.to_string(),
|
|
||||||
user.user_id.clone(),
|
|
||||||
user.display_name.clone(),
|
|
||||||
is_admin,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn authenticate(&self, access_key: &str, secret_key: &str) -> Option<Principal> {
|
|
||||||
let stored_secret = self.get_secret_key(access_key)?;
|
|
||||||
if !crate::sigv4::constant_time_compare(&stored_secret, secret_key) {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
self.get_principal(access_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn authorize(
|
|
||||||
&self,
|
|
||||||
principal: &Principal,
|
|
||||||
bucket_name: Option<&str>,
|
|
||||||
action: &str,
|
|
||||||
object_key: Option<&str>,
|
|
||||||
) -> bool {
|
|
||||||
self.reload_if_needed();
|
|
||||||
|
|
||||||
if principal.is_admin {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
let normalized_bucket = bucket_name
|
|
||||||
.unwrap_or("*")
|
|
||||||
.trim()
|
|
||||||
.to_ascii_lowercase();
|
|
||||||
let normalized_action = action.trim().to_ascii_lowercase();
|
|
||||||
|
|
||||||
let state = self.state.read();
|
|
||||||
let user = match state.user_records.get(&principal.user_id) {
|
|
||||||
Some(u) => u,
|
|
||||||
None => return false,
|
|
||||||
};
|
|
||||||
|
|
||||||
if !user.enabled {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref expires_at) = user.expires_at {
|
|
||||||
if let Ok(exp) = expires_at.parse::<DateTime<Utc>>() {
|
|
||||||
if Utc::now() > exp {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for policy in &user.policies {
|
|
||||||
if !bucket_matches(&policy.bucket, &normalized_bucket) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if !action_matches(&policy.actions, &normalized_action) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if let Some(key) = object_key {
|
|
||||||
if !prefix_matches(&policy.prefix, key) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn list_users(&self) -> Vec<serde_json::Value> {
|
|
||||||
self.reload_if_needed();
|
|
||||||
let state = self.state.read();
|
|
||||||
state
|
|
||||||
.user_records
|
|
||||||
.values()
|
|
||||||
.map(|u| {
|
|
||||||
serde_json::json!({
|
|
||||||
"user_id": u.user_id,
|
|
||||||
"display_name": u.display_name,
|
|
||||||
"enabled": u.enabled,
|
|
||||||
"access_keys": u.access_keys.iter().map(|k| {
|
|
||||||
serde_json::json!({
|
|
||||||
"access_key": k.access_key,
|
|
||||||
"status": k.status,
|
|
||||||
"created_at": k.created_at,
|
|
||||||
})
|
|
||||||
}).collect::<Vec<_>>(),
|
|
||||||
"policy_count": u.policies.len(),
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_user(&self, identifier: &str) -> Option<serde_json::Value> {
|
|
||||||
self.reload_if_needed();
|
|
||||||
let state = self.state.read();
|
|
||||||
|
|
||||||
let user = state
|
|
||||||
.user_records
|
|
||||||
.get(identifier)
|
|
||||||
.or_else(|| {
|
|
||||||
state.key_index.get(identifier).and_then(|uid| state.user_records.get(uid))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Some(serde_json::json!({
|
|
||||||
"user_id": user.user_id,
|
|
||||||
"display_name": user.display_name,
|
|
||||||
"enabled": user.enabled,
|
|
||||||
"expires_at": user.expires_at,
|
|
||||||
"access_keys": user.access_keys.iter().map(|k| {
|
|
||||||
serde_json::json!({
|
|
||||||
"access_key": k.access_key,
|
|
||||||
"status": k.status,
|
|
||||||
"created_at": k.created_at,
|
|
||||||
})
|
|
||||||
}).collect::<Vec<_>>(),
|
|
||||||
"policies": user.policies,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn set_user_enabled(&self, identifier: &str, enabled: bool) -> Result<(), String> {
|
|
||||||
let content = std::fs::read_to_string(&self.config_path)
|
|
||||||
.map_err(|e| format!("Failed to read IAM config: {}", e))?;
|
|
||||||
|
|
||||||
let raw: RawIamConfig = serde_json::from_str(&content)
|
|
||||||
.map_err(|e| format!("Failed to parse IAM config: {}", e))?;
|
|
||||||
let mut config = IamConfig {
|
|
||||||
version: 2,
|
|
||||||
users: raw.users.into_iter().map(|u| u.normalize()).collect(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let user = config
|
|
||||||
.users
|
|
||||||
.iter_mut()
|
|
||||||
.find(|u| {
|
|
||||||
u.user_id == identifier
|
|
||||||
|| u.access_keys.iter().any(|k| k.access_key == identifier)
|
|
||||||
})
|
|
||||||
.ok_or_else(|| "User not found".to_string())?;
|
|
||||||
|
|
||||||
user.enabled = enabled;
|
|
||||||
|
|
||||||
let json = serde_json::to_string_pretty(&config)
|
|
||||||
.map_err(|e| format!("Failed to serialize IAM config: {}", e))?;
|
|
||||||
std::fs::write(&self.config_path, json)
|
|
||||||
.map_err(|e| format!("Failed to write IAM config: {}", e))?;
|
|
||||||
|
|
||||||
self.reload();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_user_policies(&self, identifier: &str) -> Option<Vec<serde_json::Value>> {
|
|
||||||
self.reload_if_needed();
|
|
||||||
let state = self.state.read();
|
|
||||||
let user = state
|
|
||||||
.user_records
|
|
||||||
.get(identifier)
|
|
||||||
.or_else(|| {
|
|
||||||
state.key_index.get(identifier).and_then(|uid| state.user_records.get(uid))
|
|
||||||
})?;
|
|
||||||
Some(
|
|
||||||
user.policies
|
|
||||||
.iter()
|
|
||||||
.map(|p| serde_json::to_value(p).unwrap_or_default())
|
|
||||||
.collect(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_access_key(&self, identifier: &str) -> Result<serde_json::Value, String> {
|
|
||||||
let content = std::fs::read_to_string(&self.config_path)
|
|
||||||
.map_err(|e| format!("Failed to read IAM config: {}", e))?;
|
|
||||||
let raw: RawIamConfig = serde_json::from_str(&content)
|
|
||||||
.map_err(|e| format!("Failed to parse IAM config: {}", e))?;
|
|
||||||
let mut config = IamConfig {
|
|
||||||
version: 2,
|
|
||||||
users: raw.users.into_iter().map(|u| u.normalize()).collect(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let user = config
|
|
||||||
.users
|
|
||||||
.iter_mut()
|
|
||||||
.find(|u| {
|
|
||||||
u.user_id == identifier
|
|
||||||
|| u.access_keys.iter().any(|k| k.access_key == identifier)
|
|
||||||
})
|
|
||||||
.ok_or_else(|| format!("User '{}' not found", identifier))?;
|
|
||||||
|
|
||||||
let new_ak = format!("AK{}", uuid::Uuid::new_v4().simple());
|
|
||||||
let new_sk = format!("SK{}", uuid::Uuid::new_v4().simple());
|
|
||||||
|
|
||||||
let key = AccessKey {
|
|
||||||
access_key: new_ak.clone(),
|
|
||||||
secret_key: new_sk.clone(),
|
|
||||||
status: "active".to_string(),
|
|
||||||
created_at: Some(chrono::Utc::now().to_rfc3339()),
|
|
||||||
};
|
|
||||||
user.access_keys.push(key);
|
|
||||||
|
|
||||||
let json = serde_json::to_string_pretty(&config)
|
|
||||||
.map_err(|e| format!("Failed to serialize IAM config: {}", e))?;
|
|
||||||
std::fs::write(&self.config_path, json)
|
|
||||||
.map_err(|e| format!("Failed to write IAM config: {}", e))?;
|
|
||||||
|
|
||||||
self.reload();
|
|
||||||
Ok(serde_json::json!({
|
|
||||||
"access_key": new_ak,
|
|
||||||
"secret_key": new_sk,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn delete_access_key(&self, access_key: &str) -> Result<(), String> {
|
|
||||||
let content = std::fs::read_to_string(&self.config_path)
|
|
||||||
.map_err(|e| format!("Failed to read IAM config: {}", e))?;
|
|
||||||
let raw: RawIamConfig = serde_json::from_str(&content)
|
|
||||||
.map_err(|e| format!("Failed to parse IAM config: {}", e))?;
|
|
||||||
let mut config = IamConfig {
|
|
||||||
version: 2,
|
|
||||||
users: raw.users.into_iter().map(|u| u.normalize()).collect(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut found = false;
|
|
||||||
for user in &mut config.users {
|
|
||||||
if user.access_keys.iter().any(|k| k.access_key == access_key) {
|
|
||||||
if user.access_keys.len() <= 1 {
|
|
||||||
return Err("Cannot delete the last access key".to_string());
|
|
||||||
}
|
|
||||||
user.access_keys.retain(|k| k.access_key != access_key);
|
|
||||||
found = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return Err(format!("Access key '{}' not found", access_key));
|
|
||||||
}
|
|
||||||
|
|
||||||
let json = serde_json::to_string_pretty(&config)
|
|
||||||
.map_err(|e| format!("Failed to serialize IAM config: {}", e))?;
|
|
||||||
std::fs::write(&self.config_path, json)
|
|
||||||
.map_err(|e| format!("Failed to write IAM config: {}", e))?;
|
|
||||||
|
|
||||||
self.reload();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn bucket_matches(policy_bucket: &str, bucket: &str) -> bool {
|
|
||||||
let pb = policy_bucket.trim().to_ascii_lowercase();
|
|
||||||
pb == "*" || pb == bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
fn action_matches(policy_actions: &[String], action: &str) -> bool {
|
|
||||||
for policy_action in policy_actions {
|
|
||||||
let pa = policy_action.trim().to_ascii_lowercase();
|
|
||||||
if pa == "*" || pa == action {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if pa == "iam:*" && action.starts_with("iam:") {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
fn prefix_matches(policy_prefix: &str, object_key: &str) -> bool {
|
|
||||||
let p = policy_prefix.trim();
|
|
||||||
if p.is_empty() || p == "*" {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
let base = p.trim_end_matches('*');
|
|
||||||
object_key.starts_with(base)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use std::io::Write;
|
|
||||||
|
|
||||||
fn test_iam_json() -> String {
|
|
||||||
serde_json::json!({
|
|
||||||
"version": 2,
|
|
||||||
"users": [{
|
|
||||||
"user_id": "u-test1234",
|
|
||||||
"display_name": "admin",
|
|
||||||
"enabled": true,
|
|
||||||
"access_keys": [{
|
|
||||||
"access_key": "AKIAIOSFODNN7EXAMPLE",
|
|
||||||
"secret_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
|
||||||
"status": "active",
|
|
||||||
"created_at": "2024-01-01T00:00:00Z"
|
|
||||||
}],
|
|
||||||
"policies": [{
|
|
||||||
"bucket": "*",
|
|
||||||
"actions": ["*"],
|
|
||||||
"prefix": "*"
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
.to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_load_and_lookup() {
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(test_iam_json().as_bytes()).unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
|
|
||||||
let svc = IamService::new(tmp.path().to_path_buf());
|
|
||||||
let secret = svc.get_secret_key("AKIAIOSFODNN7EXAMPLE");
|
|
||||||
assert_eq!(
|
|
||||||
secret.unwrap(),
|
|
||||||
"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_get_principal() {
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(test_iam_json().as_bytes()).unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
|
|
||||||
let svc = IamService::new(tmp.path().to_path_buf());
|
|
||||||
let principal = svc.get_principal("AKIAIOSFODNN7EXAMPLE").unwrap();
|
|
||||||
assert_eq!(principal.display_name, "admin");
|
|
||||||
assert_eq!(principal.user_id, "u-test1234");
|
|
||||||
assert!(principal.is_admin);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_authenticate_success() {
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(test_iam_json().as_bytes()).unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
|
|
||||||
let svc = IamService::new(tmp.path().to_path_buf());
|
|
||||||
let principal = svc
|
|
||||||
.authenticate(
|
|
||||||
"AKIAIOSFODNN7EXAMPLE",
|
|
||||||
"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(principal.display_name, "admin");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_authenticate_wrong_secret() {
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(test_iam_json().as_bytes()).unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
|
|
||||||
let svc = IamService::new(tmp.path().to_path_buf());
|
|
||||||
assert!(svc.authenticate("AKIAIOSFODNN7EXAMPLE", "wrongsecret").is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_unknown_key_returns_none() {
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(test_iam_json().as_bytes()).unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
|
|
||||||
let svc = IamService::new(tmp.path().to_path_buf());
|
|
||||||
assert!(svc.get_secret_key("NONEXISTENTKEY").is_none());
|
|
||||||
assert!(svc.get_principal("NONEXISTENTKEY").is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_disabled_user() {
|
|
||||||
let json = serde_json::json!({
|
|
||||||
"version": 2,
|
|
||||||
"users": [{
|
|
||||||
"user_id": "u-disabled",
|
|
||||||
"display_name": "disabled-user",
|
|
||||||
"enabled": false,
|
|
||||||
"access_keys": [{
|
|
||||||
"access_key": "DISABLED_KEY",
|
|
||||||
"secret_key": "secret123",
|
|
||||||
"status": "active"
|
|
||||||
}],
|
|
||||||
"policies": []
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(json.as_bytes()).unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
|
|
||||||
let svc = IamService::new(tmp.path().to_path_buf());
|
|
||||||
assert!(svc.get_secret_key("DISABLED_KEY").is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_inactive_key() {
|
|
||||||
let json = serde_json::json!({
|
|
||||||
"version": 2,
|
|
||||||
"users": [{
|
|
||||||
"user_id": "u-test",
|
|
||||||
"display_name": "test",
|
|
||||||
"enabled": true,
|
|
||||||
"access_keys": [{
|
|
||||||
"access_key": "INACTIVE_KEY",
|
|
||||||
"secret_key": "secret123",
|
|
||||||
"status": "inactive"
|
|
||||||
}],
|
|
||||||
"policies": []
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(json.as_bytes()).unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
|
|
||||||
let svc = IamService::new(tmp.path().to_path_buf());
|
|
||||||
assert!(svc.get_secret_key("INACTIVE_KEY").is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_v1_flat_format() {
|
|
||||||
let json = serde_json::json!({
|
|
||||||
"users": [{
|
|
||||||
"access_key": "test",
|
|
||||||
"secret_key": "secret",
|
|
||||||
"display_name": "Test User",
|
|
||||||
"policies": [{"bucket": "*", "actions": ["*"], "prefix": "*"}]
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(json.as_bytes()).unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
|
|
||||||
let svc = IamService::new(tmp.path().to_path_buf());
|
|
||||||
let secret = svc.get_secret_key("test");
|
|
||||||
assert_eq!(secret.unwrap(), "secret");
|
|
||||||
|
|
||||||
let principal = svc.get_principal("test").unwrap();
|
|
||||||
assert_eq!(principal.display_name, "Test User");
|
|
||||||
assert!(principal.is_admin);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_authorize_allows_matching_policy() {
|
|
||||||
let json = serde_json::json!({
|
|
||||||
"version": 2,
|
|
||||||
"users": [{
|
|
||||||
"user_id": "u-reader",
|
|
||||||
"display_name": "reader",
|
|
||||||
"enabled": true,
|
|
||||||
"access_keys": [{
|
|
||||||
"access_key": "READER_KEY",
|
|
||||||
"secret_key": "reader-secret",
|
|
||||||
"status": "active"
|
|
||||||
}],
|
|
||||||
"policies": [{
|
|
||||||
"bucket": "docs",
|
|
||||||
"actions": ["read"],
|
|
||||||
"prefix": "reports/"
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(json.as_bytes()).unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
|
|
||||||
let svc = IamService::new(tmp.path().to_path_buf());
|
|
||||||
let principal = svc.get_principal("READER_KEY").unwrap();
|
|
||||||
|
|
||||||
assert!(svc.authorize(
|
|
||||||
&principal,
|
|
||||||
Some("docs"),
|
|
||||||
"read",
|
|
||||||
Some("reports/2026.csv"),
|
|
||||||
));
|
|
||||||
assert!(!svc.authorize(
|
|
||||||
&principal,
|
|
||||||
Some("docs"),
|
|
||||||
"write",
|
|
||||||
Some("reports/2026.csv"),
|
|
||||||
));
|
|
||||||
assert!(!svc.authorize(
|
|
||||||
&principal,
|
|
||||||
Some("docs"),
|
|
||||||
"read",
|
|
||||||
Some("private/2026.csv"),
|
|
||||||
));
|
|
||||||
assert!(!svc.authorize(
|
|
||||||
&principal,
|
|
||||||
Some("other"),
|
|
||||||
"read",
|
|
||||||
Some("reports/2026.csv"),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
pub mod sigv4;
|
|
||||||
pub mod principal;
|
|
||||||
pub mod iam;
|
|
||||||
mod fernet;
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
pub use myfsio_common::types::Principal;
|
|
||||||
@@ -1,258 +0,0 @@
|
|||||||
use hmac::{Hmac, Mac};
|
|
||||||
use lru::LruCache;
|
|
||||||
use parking_lot::Mutex;
|
|
||||||
use percent_encoding::{percent_encode, AsciiSet, NON_ALPHANUMERIC};
|
|
||||||
use sha2::{Digest, Sha256};
|
|
||||||
use std::num::NonZeroUsize;
|
|
||||||
use std::sync::LazyLock;
|
|
||||||
use std::time::Instant;
|
|
||||||
|
|
||||||
type HmacSha256 = Hmac<Sha256>;
|
|
||||||
|
|
||||||
struct CacheEntry {
|
|
||||||
key: Vec<u8>,
|
|
||||||
created: Instant,
|
|
||||||
}
|
|
||||||
|
|
||||||
static SIGNING_KEY_CACHE: LazyLock<Mutex<LruCache<(String, String, String, String), CacheEntry>>> =
|
|
||||||
LazyLock::new(|| Mutex::new(LruCache::new(NonZeroUsize::new(256).unwrap())));
|
|
||||||
|
|
||||||
const CACHE_TTL_SECS: u64 = 60;
|
|
||||||
|
|
||||||
const AWS_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC
|
|
||||||
.remove(b'-')
|
|
||||||
.remove(b'_')
|
|
||||||
.remove(b'.')
|
|
||||||
.remove(b'~');
|
|
||||||
|
|
||||||
fn hmac_sha256(key: &[u8], msg: &[u8]) -> Vec<u8> {
|
|
||||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
|
|
||||||
mac.update(msg);
|
|
||||||
mac.finalize().into_bytes().to_vec()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sha256_hex(data: &[u8]) -> String {
|
|
||||||
let mut hasher = Sha256::new();
|
|
||||||
hasher.update(data);
|
|
||||||
hex::encode(hasher.finalize())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn aws_uri_encode(input: &str) -> String {
|
|
||||||
percent_encode(input.as_bytes(), AWS_ENCODE_SET).to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn derive_signing_key_cached(
|
|
||||||
secret_key: &str,
|
|
||||||
date_stamp: &str,
|
|
||||||
region: &str,
|
|
||||||
service: &str,
|
|
||||||
) -> Vec<u8> {
|
|
||||||
let cache_key = (
|
|
||||||
secret_key.to_owned(),
|
|
||||||
date_stamp.to_owned(),
|
|
||||||
region.to_owned(),
|
|
||||||
service.to_owned(),
|
|
||||||
);
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
|
||||||
if let Some(entry) = cache.get(&cache_key) {
|
|
||||||
if entry.created.elapsed().as_secs() < CACHE_TTL_SECS {
|
|
||||||
return entry.key.clone();
|
|
||||||
}
|
|
||||||
cache.pop(&cache_key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date_stamp.as_bytes());
|
|
||||||
let k_region = hmac_sha256(&k_date, region.as_bytes());
|
|
||||||
let k_service = hmac_sha256(&k_region, service.as_bytes());
|
|
||||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
|
||||||
cache.put(
|
|
||||||
cache_key,
|
|
||||||
CacheEntry {
|
|
||||||
key: k_signing.clone(),
|
|
||||||
created: Instant::now(),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
k_signing
|
|
||||||
}
|
|
||||||
|
|
||||||
fn constant_time_compare_inner(a: &[u8], b: &[u8]) -> bool {
|
|
||||||
if a.len() != b.len() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
let mut result: u8 = 0;
|
|
||||||
for (x, y) in a.iter().zip(b.iter()) {
|
|
||||||
result |= x ^ y;
|
|
||||||
}
|
|
||||||
result == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn verify_sigv4_signature(
|
|
||||||
method: &str,
|
|
||||||
canonical_uri: &str,
|
|
||||||
query_params: &[(String, String)],
|
|
||||||
signed_headers_str: &str,
|
|
||||||
header_values: &[(String, String)],
|
|
||||||
payload_hash: &str,
|
|
||||||
amz_date: &str,
|
|
||||||
date_stamp: &str,
|
|
||||||
region: &str,
|
|
||||||
service: &str,
|
|
||||||
secret_key: &str,
|
|
||||||
provided_signature: &str,
|
|
||||||
) -> bool {
|
|
||||||
let mut sorted_params = query_params.to_vec();
|
|
||||||
sorted_params.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1)));
|
|
||||||
|
|
||||||
let canonical_query_string = sorted_params
|
|
||||||
.iter()
|
|
||||||
.map(|(k, v)| format!("{}={}", aws_uri_encode(k), aws_uri_encode(v)))
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join("&");
|
|
||||||
|
|
||||||
let mut canonical_headers = String::new();
|
|
||||||
for (name, value) in header_values {
|
|
||||||
let lower_name = name.to_lowercase();
|
|
||||||
let normalized = value.split_whitespace().collect::<Vec<_>>().join(" ");
|
|
||||||
let final_value = if lower_name == "expect" && normalized.is_empty() {
|
|
||||||
"100-continue"
|
|
||||||
} else {
|
|
||||||
&normalized
|
|
||||||
};
|
|
||||||
canonical_headers.push_str(&lower_name);
|
|
||||||
canonical_headers.push(':');
|
|
||||||
canonical_headers.push_str(final_value);
|
|
||||||
canonical_headers.push('\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
let canonical_request = format!(
|
|
||||||
"{}\n{}\n{}\n{}\n{}\n{}",
|
|
||||||
method, canonical_uri, canonical_query_string, canonical_headers, signed_headers_str,
|
|
||||||
payload_hash
|
|
||||||
);
|
|
||||||
|
|
||||||
let credential_scope = format!("{}/{}/{}/aws4_request", date_stamp, region, service);
|
|
||||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
|
||||||
let string_to_sign = format!(
|
|
||||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
|
||||||
amz_date, credential_scope, cr_hash
|
|
||||||
);
|
|
||||||
|
|
||||||
let signing_key = derive_signing_key_cached(secret_key, date_stamp, region, service);
|
|
||||||
let calculated = hmac_sha256(&signing_key, string_to_sign.as_bytes());
|
|
||||||
let calculated_hex = hex::encode(&calculated);
|
|
||||||
|
|
||||||
constant_time_compare_inner(calculated_hex.as_bytes(), provided_signature.as_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn derive_signing_key(
|
|
||||||
secret_key: &str,
|
|
||||||
date_stamp: &str,
|
|
||||||
region: &str,
|
|
||||||
service: &str,
|
|
||||||
) -> Vec<u8> {
|
|
||||||
derive_signing_key_cached(secret_key, date_stamp, region, service)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn compute_signature(signing_key: &[u8], string_to_sign: &str) -> String {
|
|
||||||
let sig = hmac_sha256(signing_key, string_to_sign.as_bytes());
|
|
||||||
hex::encode(sig)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build_string_to_sign(
|
|
||||||
amz_date: &str,
|
|
||||||
credential_scope: &str,
|
|
||||||
canonical_request: &str,
|
|
||||||
) -> String {
|
|
||||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
|
||||||
format!(
|
|
||||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
|
||||||
amz_date, credential_scope, cr_hash
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn constant_time_compare(a: &str, b: &str) -> bool {
|
|
||||||
constant_time_compare_inner(a.as_bytes(), b.as_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn clear_signing_key_cache() {
|
|
||||||
SIGNING_KEY_CACHE.lock().clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_derive_signing_key() {
|
|
||||||
let key = derive_signing_key("wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", "20130524", "us-east-1", "s3");
|
|
||||||
assert_eq!(key.len(), 32);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_derive_signing_key_cached() {
|
|
||||||
let key1 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
|
|
||||||
let key2 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
|
|
||||||
assert_eq!(key1, key2);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_constant_time_compare() {
|
|
||||||
assert!(constant_time_compare("abc", "abc"));
|
|
||||||
assert!(!constant_time_compare("abc", "abd"));
|
|
||||||
assert!(!constant_time_compare("abc", "abcd"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_build_string_to_sign() {
|
|
||||||
let result = build_string_to_sign("20130524T000000Z", "20130524/us-east-1/s3/aws4_request", "GET\n/\n\nhost:example.com\n\nhost\nUNSIGNED-PAYLOAD");
|
|
||||||
assert!(result.starts_with("AWS4-HMAC-SHA256\n"));
|
|
||||||
assert!(result.contains("20130524T000000Z"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_aws_uri_encode() {
|
|
||||||
assert_eq!(aws_uri_encode("hello world"), "hello%20world");
|
|
||||||
assert_eq!(aws_uri_encode("test-file_name.txt"), "test-file_name.txt");
|
|
||||||
assert_eq!(aws_uri_encode("a/b"), "a%2Fb");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_verify_sigv4_roundtrip() {
|
|
||||||
let secret = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
|
|
||||||
let date_stamp = "20130524";
|
|
||||||
let region = "us-east-1";
|
|
||||||
let service = "s3";
|
|
||||||
let amz_date = "20130524T000000Z";
|
|
||||||
|
|
||||||
let signing_key = derive_signing_key(secret, date_stamp, region, service);
|
|
||||||
|
|
||||||
let canonical_request = "GET\n/\n\nhost:examplebucket.s3.amazonaws.com\n\nhost\nUNSIGNED-PAYLOAD";
|
|
||||||
let string_to_sign = build_string_to_sign(amz_date, &format!("{}/{}/{}/aws4_request", date_stamp, region, service), canonical_request);
|
|
||||||
|
|
||||||
let signature = compute_signature(&signing_key, &string_to_sign);
|
|
||||||
|
|
||||||
let result = verify_sigv4_signature(
|
|
||||||
"GET",
|
|
||||||
"/",
|
|
||||||
&[],
|
|
||||||
"host",
|
|
||||||
&[("host".to_string(), "examplebucket.s3.amazonaws.com".to_string())],
|
|
||||||
"UNSIGNED-PAYLOAD",
|
|
||||||
amz_date,
|
|
||||||
date_stamp,
|
|
||||||
region,
|
|
||||||
service,
|
|
||||||
secret,
|
|
||||||
&signature,
|
|
||||||
);
|
|
||||||
assert!(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "myfsio-common"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
thiserror = { workspace = true }
|
|
||||||
serde = { workspace = true }
|
|
||||||
serde_json = { workspace = true }
|
|
||||||
chrono = { workspace = true }
|
|
||||||
uuid = { workspace = true }
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
pub const SYSTEM_ROOT: &str = ".myfsio.sys";
|
|
||||||
pub const SYSTEM_BUCKETS_DIR: &str = "buckets";
|
|
||||||
pub const SYSTEM_MULTIPART_DIR: &str = "multipart";
|
|
||||||
pub const BUCKET_META_DIR: &str = "meta";
|
|
||||||
pub const BUCKET_VERSIONS_DIR: &str = "versions";
|
|
||||||
pub const BUCKET_CONFIG_FILE: &str = ".bucket.json";
|
|
||||||
pub const STATS_FILE: &str = "stats.json";
|
|
||||||
pub const ETAG_INDEX_FILE: &str = "etag_index.json";
|
|
||||||
pub const INDEX_FILE: &str = "_index.json";
|
|
||||||
pub const MANIFEST_FILE: &str = "manifest.json";
|
|
||||||
|
|
||||||
pub const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
|
||||||
|
|
||||||
pub const DEFAULT_REGION: &str = "us-east-1";
|
|
||||||
pub const AWS_SERVICE: &str = "s3";
|
|
||||||
|
|
||||||
pub const DEFAULT_MAX_KEYS: usize = 1000;
|
|
||||||
pub const DEFAULT_OBJECT_KEY_MAX_BYTES: usize = 1024;
|
|
||||||
pub const DEFAULT_CHUNK_SIZE: usize = 65536;
|
|
||||||
pub const STREAM_CHUNK_SIZE: usize = 1_048_576;
|
|
||||||
@@ -1,221 +0,0 @@
|
|||||||
use std::fmt;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
||||||
pub enum S3ErrorCode {
|
|
||||||
AccessDenied,
|
|
||||||
BucketAlreadyExists,
|
|
||||||
BucketNotEmpty,
|
|
||||||
EntityTooLarge,
|
|
||||||
InternalError,
|
|
||||||
InvalidAccessKeyId,
|
|
||||||
InvalidArgument,
|
|
||||||
InvalidBucketName,
|
|
||||||
InvalidKey,
|
|
||||||
InvalidRange,
|
|
||||||
InvalidRequest,
|
|
||||||
MalformedXML,
|
|
||||||
MethodNotAllowed,
|
|
||||||
NoSuchBucket,
|
|
||||||
NoSuchKey,
|
|
||||||
NoSuchUpload,
|
|
||||||
NoSuchVersion,
|
|
||||||
NoSuchTagSet,
|
|
||||||
PreconditionFailed,
|
|
||||||
NotModified,
|
|
||||||
QuotaExceeded,
|
|
||||||
SignatureDoesNotMatch,
|
|
||||||
SlowDown,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl S3ErrorCode {
|
|
||||||
pub fn http_status(&self) -> u16 {
|
|
||||||
match self {
|
|
||||||
Self::AccessDenied => 403,
|
|
||||||
Self::BucketAlreadyExists => 409,
|
|
||||||
Self::BucketNotEmpty => 409,
|
|
||||||
Self::EntityTooLarge => 413,
|
|
||||||
Self::InternalError => 500,
|
|
||||||
Self::InvalidAccessKeyId => 403,
|
|
||||||
Self::InvalidArgument => 400,
|
|
||||||
Self::InvalidBucketName => 400,
|
|
||||||
Self::InvalidKey => 400,
|
|
||||||
Self::InvalidRange => 416,
|
|
||||||
Self::InvalidRequest => 400,
|
|
||||||
Self::MalformedXML => 400,
|
|
||||||
Self::MethodNotAllowed => 405,
|
|
||||||
Self::NoSuchBucket => 404,
|
|
||||||
Self::NoSuchKey => 404,
|
|
||||||
Self::NoSuchUpload => 404,
|
|
||||||
Self::NoSuchVersion => 404,
|
|
||||||
Self::NoSuchTagSet => 404,
|
|
||||||
Self::PreconditionFailed => 412,
|
|
||||||
Self::NotModified => 304,
|
|
||||||
Self::QuotaExceeded => 403,
|
|
||||||
Self::SignatureDoesNotMatch => 403,
|
|
||||||
Self::SlowDown => 429,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn as_str(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
Self::AccessDenied => "AccessDenied",
|
|
||||||
Self::BucketAlreadyExists => "BucketAlreadyExists",
|
|
||||||
Self::BucketNotEmpty => "BucketNotEmpty",
|
|
||||||
Self::EntityTooLarge => "EntityTooLarge",
|
|
||||||
Self::InternalError => "InternalError",
|
|
||||||
Self::InvalidAccessKeyId => "InvalidAccessKeyId",
|
|
||||||
Self::InvalidArgument => "InvalidArgument",
|
|
||||||
Self::InvalidBucketName => "InvalidBucketName",
|
|
||||||
Self::InvalidKey => "InvalidKey",
|
|
||||||
Self::InvalidRange => "InvalidRange",
|
|
||||||
Self::InvalidRequest => "InvalidRequest",
|
|
||||||
Self::MalformedXML => "MalformedXML",
|
|
||||||
Self::MethodNotAllowed => "MethodNotAllowed",
|
|
||||||
Self::NoSuchBucket => "NoSuchBucket",
|
|
||||||
Self::NoSuchKey => "NoSuchKey",
|
|
||||||
Self::NoSuchUpload => "NoSuchUpload",
|
|
||||||
Self::NoSuchVersion => "NoSuchVersion",
|
|
||||||
Self::NoSuchTagSet => "NoSuchTagSet",
|
|
||||||
Self::PreconditionFailed => "PreconditionFailed",
|
|
||||||
Self::NotModified => "NotModified",
|
|
||||||
Self::QuotaExceeded => "QuotaExceeded",
|
|
||||||
Self::SignatureDoesNotMatch => "SignatureDoesNotMatch",
|
|
||||||
Self::SlowDown => "SlowDown",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn default_message(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
Self::AccessDenied => "Access Denied",
|
|
||||||
Self::BucketAlreadyExists => "The requested bucket name is not available",
|
|
||||||
Self::BucketNotEmpty => "The bucket you tried to delete is not empty",
|
|
||||||
Self::EntityTooLarge => "Your proposed upload exceeds the maximum allowed size",
|
|
||||||
Self::InternalError => "We encountered an internal error. Please try again.",
|
|
||||||
Self::InvalidAccessKeyId => "The access key ID you provided does not exist",
|
|
||||||
Self::InvalidArgument => "Invalid argument",
|
|
||||||
Self::InvalidBucketName => "The specified bucket is not valid",
|
|
||||||
Self::InvalidKey => "The specified key is not valid",
|
|
||||||
Self::InvalidRange => "The requested range is not satisfiable",
|
|
||||||
Self::InvalidRequest => "Invalid request",
|
|
||||||
Self::MalformedXML => "The XML you provided was not well-formed",
|
|
||||||
Self::MethodNotAllowed => "The specified method is not allowed against this resource",
|
|
||||||
Self::NoSuchBucket => "The specified bucket does not exist",
|
|
||||||
Self::NoSuchKey => "The specified key does not exist",
|
|
||||||
Self::NoSuchUpload => "The specified multipart upload does not exist",
|
|
||||||
Self::NoSuchVersion => "The specified version does not exist",
|
|
||||||
Self::NoSuchTagSet => "The TagSet does not exist",
|
|
||||||
Self::PreconditionFailed => "At least one of the preconditions you specified did not hold",
|
|
||||||
Self::NotModified => "Not Modified",
|
|
||||||
Self::QuotaExceeded => "The bucket quota has been exceeded",
|
|
||||||
Self::SignatureDoesNotMatch => "The request signature we calculated does not match the signature you provided",
|
|
||||||
Self::SlowDown => "Please reduce your request rate",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for S3ErrorCode {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
f.write_str(self.as_str())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct S3Error {
|
|
||||||
pub code: S3ErrorCode,
|
|
||||||
pub message: String,
|
|
||||||
pub resource: String,
|
|
||||||
pub request_id: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl S3Error {
|
|
||||||
pub fn new(code: S3ErrorCode, message: impl Into<String>) -> Self {
|
|
||||||
Self {
|
|
||||||
code,
|
|
||||||
message: message.into(),
|
|
||||||
resource: String::new(),
|
|
||||||
request_id: String::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_code(code: S3ErrorCode) -> Self {
|
|
||||||
Self::new(code, code.default_message())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_resource(mut self, resource: impl Into<String>) -> Self {
|
|
||||||
self.resource = resource.into();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_request_id(mut self, request_id: impl Into<String>) -> Self {
|
|
||||||
self.request_id = request_id.into();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn http_status(&self) -> u16 {
|
|
||||||
self.code.http_status()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn to_xml(&self) -> String {
|
|
||||||
format!(
|
|
||||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
|
||||||
<Error>\
|
|
||||||
<Code>{}</Code>\
|
|
||||||
<Message>{}</Message>\
|
|
||||||
<Resource>{}</Resource>\
|
|
||||||
<RequestId>{}</RequestId>\
|
|
||||||
</Error>",
|
|
||||||
self.code.as_str(),
|
|
||||||
xml_escape(&self.message),
|
|
||||||
xml_escape(&self.resource),
|
|
||||||
xml_escape(&self.request_id),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for S3Error {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "{}: {}", self.code, self.message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::error::Error for S3Error {}
|
|
||||||
|
|
||||||
fn xml_escape(s: &str) -> String {
|
|
||||||
s.replace('&', "&")
|
|
||||||
.replace('<', "<")
|
|
||||||
.replace('>', ">")
|
|
||||||
.replace('"', """)
|
|
||||||
.replace('\'', "'")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_error_codes() {
|
|
||||||
assert_eq!(S3ErrorCode::NoSuchKey.http_status(), 404);
|
|
||||||
assert_eq!(S3ErrorCode::AccessDenied.http_status(), 403);
|
|
||||||
assert_eq!(S3ErrorCode::NoSuchBucket.as_str(), "NoSuchBucket");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_error_to_xml() {
|
|
||||||
let err = S3Error::from_code(S3ErrorCode::NoSuchKey)
|
|
||||||
.with_resource("/test-bucket/test-key")
|
|
||||||
.with_request_id("abc123");
|
|
||||||
let xml = err.to_xml();
|
|
||||||
assert!(xml.contains("<Code>NoSuchKey</Code>"));
|
|
||||||
assert!(xml.contains("<Resource>/test-bucket/test-key</Resource>"));
|
|
||||||
assert!(xml.contains("<RequestId>abc123</RequestId>"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_xml_escape() {
|
|
||||||
let err = S3Error::new(S3ErrorCode::InvalidArgument, "key <test> & \"value\"")
|
|
||||||
.with_resource("/bucket/key&");
|
|
||||||
let xml = err.to_xml();
|
|
||||||
assert!(xml.contains("<test>"));
|
|
||||||
assert!(xml.contains("&"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
pub mod constants;
|
|
||||||
pub mod error;
|
|
||||||
pub mod types;
|
|
||||||
@@ -1,176 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct ObjectMeta {
|
|
||||||
pub key: String,
|
|
||||||
pub size: u64,
|
|
||||||
pub last_modified: DateTime<Utc>,
|
|
||||||
pub etag: Option<String>,
|
|
||||||
pub content_type: Option<String>,
|
|
||||||
pub storage_class: Option<String>,
|
|
||||||
pub metadata: HashMap<String, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ObjectMeta {
|
|
||||||
pub fn new(key: String, size: u64, last_modified: DateTime<Utc>) -> Self {
|
|
||||||
Self {
|
|
||||||
key,
|
|
||||||
size,
|
|
||||||
last_modified,
|
|
||||||
etag: None,
|
|
||||||
content_type: None,
|
|
||||||
storage_class: Some("STANDARD".to_string()),
|
|
||||||
metadata: HashMap::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct BucketMeta {
|
|
||||||
pub name: String,
|
|
||||||
pub creation_date: DateTime<Utc>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default)]
|
|
||||||
pub struct BucketStats {
|
|
||||||
pub objects: u64,
|
|
||||||
pub bytes: u64,
|
|
||||||
pub version_count: u64,
|
|
||||||
pub version_bytes: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BucketStats {
|
|
||||||
pub fn total_objects(&self) -> u64 {
|
|
||||||
self.objects + self.version_count
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn total_bytes(&self) -> u64 {
|
|
||||||
self.bytes + self.version_bytes
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct ListObjectsResult {
|
|
||||||
pub objects: Vec<ObjectMeta>,
|
|
||||||
pub is_truncated: bool,
|
|
||||||
pub next_continuation_token: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct ShallowListResult {
|
|
||||||
pub objects: Vec<ObjectMeta>,
|
|
||||||
pub common_prefixes: Vec<String>,
|
|
||||||
pub is_truncated: bool,
|
|
||||||
pub next_continuation_token: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default)]
|
|
||||||
pub struct ListParams {
|
|
||||||
pub max_keys: usize,
|
|
||||||
pub continuation_token: Option<String>,
|
|
||||||
pub prefix: Option<String>,
|
|
||||||
pub start_after: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default)]
|
|
||||||
pub struct ShallowListParams {
|
|
||||||
pub prefix: String,
|
|
||||||
pub delimiter: String,
|
|
||||||
pub max_keys: usize,
|
|
||||||
pub continuation_token: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct PartMeta {
|
|
||||||
pub part_number: u32,
|
|
||||||
pub etag: String,
|
|
||||||
pub size: u64,
|
|
||||||
pub last_modified: Option<DateTime<Utc>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct PartInfo {
|
|
||||||
pub part_number: u32,
|
|
||||||
pub etag: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct MultipartUploadInfo {
|
|
||||||
pub upload_id: String,
|
|
||||||
pub key: String,
|
|
||||||
pub initiated: DateTime<Utc>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct VersionInfo {
|
|
||||||
pub version_id: String,
|
|
||||||
pub key: String,
|
|
||||||
pub size: u64,
|
|
||||||
pub last_modified: DateTime<Utc>,
|
|
||||||
pub etag: Option<String>,
|
|
||||||
pub is_latest: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct Tag {
|
|
||||||
pub key: String,
|
|
||||||
pub value: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
|
||||||
pub struct BucketConfig {
|
|
||||||
#[serde(default)]
|
|
||||||
pub versioning_enabled: bool,
|
|
||||||
#[serde(default)]
|
|
||||||
pub tags: Vec<Tag>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub cors: Option<serde_json::Value>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub encryption: Option<serde_json::Value>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub lifecycle: Option<serde_json::Value>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub website: Option<serde_json::Value>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub quota: Option<QuotaConfig>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub acl: Option<serde_json::Value>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub notification: Option<serde_json::Value>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub logging: Option<serde_json::Value>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub object_lock: Option<serde_json::Value>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub policy: Option<serde_json::Value>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub replication: Option<serde_json::Value>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct QuotaConfig {
|
|
||||||
pub max_bytes: Option<u64>,
|
|
||||||
pub max_objects: Option<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct Principal {
|
|
||||||
pub access_key: String,
|
|
||||||
pub user_id: String,
|
|
||||||
pub display_name: String,
|
|
||||||
pub is_admin: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Principal {
|
|
||||||
pub fn new(access_key: String, user_id: String, display_name: String, is_admin: bool) -> Self {
|
|
||||||
Self {
|
|
||||||
access_key,
|
|
||||||
user_id,
|
|
||||||
display_name,
|
|
||||||
is_admin,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "myfsio-crypto"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
myfsio-common = { path = "../myfsio-common" }
|
|
||||||
md-5 = { workspace = true }
|
|
||||||
sha2 = { workspace = true }
|
|
||||||
hex = { workspace = true }
|
|
||||||
aes-gcm = { workspace = true }
|
|
||||||
hkdf = { workspace = true }
|
|
||||||
thiserror = { workspace = true }
|
|
||||||
tokio = { workspace = true }
|
|
||||||
serde = { workspace = true }
|
|
||||||
serde_json = { workspace = true }
|
|
||||||
uuid = { workspace = true }
|
|
||||||
chrono = { workspace = true }
|
|
||||||
base64 = { workspace = true }
|
|
||||||
rand = "0.8"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
|
|
||||||
tempfile = "3"
|
|
||||||
@@ -1,238 +0,0 @@
|
|||||||
use aes_gcm::aead::Aead;
|
|
||||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
|
||||||
use hkdf::Hkdf;
|
|
||||||
use sha2::Sha256;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::{Read, Seek, SeekFrom, Write};
|
|
||||||
use std::path::Path;
|
|
||||||
use thiserror::Error;
|
|
||||||
|
|
||||||
const DEFAULT_CHUNK_SIZE: usize = 65536;
|
|
||||||
const HEADER_SIZE: usize = 4;
|
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
|
||||||
pub enum CryptoError {
|
|
||||||
#[error("IO error: {0}")]
|
|
||||||
Io(#[from] std::io::Error),
|
|
||||||
#[error("Invalid key size: expected 32 bytes, got {0}")]
|
|
||||||
InvalidKeySize(usize),
|
|
||||||
#[error("Invalid nonce size: expected 12 bytes, got {0}")]
|
|
||||||
InvalidNonceSize(usize),
|
|
||||||
#[error("Encryption failed: {0}")]
|
|
||||||
EncryptionFailed(String),
|
|
||||||
#[error("Decryption failed at chunk {0}")]
|
|
||||||
DecryptionFailed(u32),
|
|
||||||
#[error("HKDF expand failed: {0}")]
|
|
||||||
HkdfFailed(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_exact_chunk(reader: &mut impl Read, buf: &mut [u8]) -> std::io::Result<usize> {
|
|
||||||
let mut filled = 0;
|
|
||||||
while filled < buf.len() {
|
|
||||||
match reader.read(&mut buf[filled..]) {
|
|
||||||
Ok(0) => break,
|
|
||||||
Ok(n) => filled += n,
|
|
||||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(filled)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn derive_chunk_nonce(base_nonce: &[u8], chunk_index: u32) -> Result<[u8; 12], CryptoError> {
|
|
||||||
let hkdf = Hkdf::<Sha256>::new(Some(base_nonce), b"chunk_nonce");
|
|
||||||
let mut okm = [0u8; 12];
|
|
||||||
hkdf.expand(&chunk_index.to_be_bytes(), &mut okm)
|
|
||||||
.map_err(|e| CryptoError::HkdfFailed(e.to_string()))?;
|
|
||||||
Ok(okm)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn encrypt_stream_chunked(
|
|
||||||
input_path: &Path,
|
|
||||||
output_path: &Path,
|
|
||||||
key: &[u8],
|
|
||||||
base_nonce: &[u8],
|
|
||||||
chunk_size: Option<usize>,
|
|
||||||
) -> Result<u32, CryptoError> {
|
|
||||||
if key.len() != 32 {
|
|
||||||
return Err(CryptoError::InvalidKeySize(key.len()));
|
|
||||||
}
|
|
||||||
if base_nonce.len() != 12 {
|
|
||||||
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let chunk_size = chunk_size.unwrap_or(DEFAULT_CHUNK_SIZE);
|
|
||||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
|
||||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
|
||||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
|
||||||
|
|
||||||
let mut infile = File::open(input_path)?;
|
|
||||||
let mut outfile = File::create(output_path)?;
|
|
||||||
|
|
||||||
outfile.write_all(&[0u8; 4])?;
|
|
||||||
|
|
||||||
let mut buf = vec![0u8; chunk_size];
|
|
||||||
let mut chunk_index: u32 = 0;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let n = read_exact_chunk(&mut infile, &mut buf)?;
|
|
||||||
if n == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
|
|
||||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
|
||||||
|
|
||||||
let encrypted = cipher
|
|
||||||
.encrypt(nonce, &buf[..n])
|
|
||||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
|
||||||
|
|
||||||
let size = encrypted.len() as u32;
|
|
||||||
outfile.write_all(&size.to_be_bytes())?;
|
|
||||||
outfile.write_all(&encrypted)?;
|
|
||||||
|
|
||||||
chunk_index += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
outfile.seek(SeekFrom::Start(0))?;
|
|
||||||
outfile.write_all(&chunk_index.to_be_bytes())?;
|
|
||||||
|
|
||||||
Ok(chunk_index)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn decrypt_stream_chunked(
|
|
||||||
input_path: &Path,
|
|
||||||
output_path: &Path,
|
|
||||||
key: &[u8],
|
|
||||||
base_nonce: &[u8],
|
|
||||||
) -> Result<u32, CryptoError> {
|
|
||||||
if key.len() != 32 {
|
|
||||||
return Err(CryptoError::InvalidKeySize(key.len()));
|
|
||||||
}
|
|
||||||
if base_nonce.len() != 12 {
|
|
||||||
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
|
||||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
|
||||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
|
||||||
|
|
||||||
let mut infile = File::open(input_path)?;
|
|
||||||
let mut outfile = File::create(output_path)?;
|
|
||||||
|
|
||||||
let mut header = [0u8; HEADER_SIZE];
|
|
||||||
infile.read_exact(&mut header)?;
|
|
||||||
let chunk_count = u32::from_be_bytes(header);
|
|
||||||
|
|
||||||
let mut size_buf = [0u8; HEADER_SIZE];
|
|
||||||
for chunk_index in 0..chunk_count {
|
|
||||||
infile.read_exact(&mut size_buf)?;
|
|
||||||
let chunk_size = u32::from_be_bytes(size_buf) as usize;
|
|
||||||
|
|
||||||
let mut encrypted = vec![0u8; chunk_size];
|
|
||||||
infile.read_exact(&mut encrypted)?;
|
|
||||||
|
|
||||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
|
|
||||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
|
||||||
|
|
||||||
let decrypted = cipher
|
|
||||||
.decrypt(nonce, encrypted.as_ref())
|
|
||||||
.map_err(|_| CryptoError::DecryptionFailed(chunk_index))?;
|
|
||||||
|
|
||||||
outfile.write_all(&decrypted)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(chunk_count)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn encrypt_stream_chunked_async(
|
|
||||||
input_path: &Path,
|
|
||||||
output_path: &Path,
|
|
||||||
key: &[u8],
|
|
||||||
base_nonce: &[u8],
|
|
||||||
chunk_size: Option<usize>,
|
|
||||||
) -> Result<u32, CryptoError> {
|
|
||||||
let input_path = input_path.to_owned();
|
|
||||||
let output_path = output_path.to_owned();
|
|
||||||
let key = key.to_vec();
|
|
||||||
let base_nonce = base_nonce.to_vec();
|
|
||||||
tokio::task::spawn_blocking(move || {
|
|
||||||
encrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce, chunk_size)
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn decrypt_stream_chunked_async(
|
|
||||||
input_path: &Path,
|
|
||||||
output_path: &Path,
|
|
||||||
key: &[u8],
|
|
||||||
base_nonce: &[u8],
|
|
||||||
) -> Result<u32, CryptoError> {
|
|
||||||
let input_path = input_path.to_owned();
|
|
||||||
let output_path = output_path.to_owned();
|
|
||||||
let key = key.to_vec();
|
|
||||||
let base_nonce = base_nonce.to_vec();
|
|
||||||
tokio::task::spawn_blocking(move || {
|
|
||||||
decrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce)
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use std::io::Write as IoWrite;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_encrypt_decrypt_roundtrip() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let input = dir.path().join("input.bin");
|
|
||||||
let encrypted = dir.path().join("encrypted.bin");
|
|
||||||
let decrypted = dir.path().join("decrypted.bin");
|
|
||||||
|
|
||||||
let data = b"Hello, this is a test of AES-256-GCM chunked encryption!";
|
|
||||||
std::fs::File::create(&input).unwrap().write_all(data).unwrap();
|
|
||||||
|
|
||||||
let key = [0x42u8; 32];
|
|
||||||
let nonce = [0x01u8; 12];
|
|
||||||
|
|
||||||
let chunks = encrypt_stream_chunked(&input, &encrypted, &key, &nonce, Some(16)).unwrap();
|
|
||||||
assert!(chunks > 0);
|
|
||||||
|
|
||||||
let chunks2 = decrypt_stream_chunked(&encrypted, &decrypted, &key, &nonce).unwrap();
|
|
||||||
assert_eq!(chunks, chunks2);
|
|
||||||
|
|
||||||
let result = std::fs::read(&decrypted).unwrap();
|
|
||||||
assert_eq!(result, data);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_invalid_key_size() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let input = dir.path().join("input.bin");
|
|
||||||
std::fs::File::create(&input).unwrap().write_all(b"test").unwrap();
|
|
||||||
|
|
||||||
let result = encrypt_stream_chunked(&input, &dir.path().join("out"), &[0u8; 16], &[0u8; 12], None);
|
|
||||||
assert!(matches!(result, Err(CryptoError::InvalidKeySize(16))));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_wrong_key_fails_decrypt() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let input = dir.path().join("input.bin");
|
|
||||||
let encrypted = dir.path().join("encrypted.bin");
|
|
||||||
let decrypted = dir.path().join("decrypted.bin");
|
|
||||||
|
|
||||||
std::fs::File::create(&input).unwrap().write_all(b"secret data").unwrap();
|
|
||||||
|
|
||||||
let key = [0x42u8; 32];
|
|
||||||
let nonce = [0x01u8; 12];
|
|
||||||
encrypt_stream_chunked(&input, &encrypted, &key, &nonce, None).unwrap();
|
|
||||||
|
|
||||||
let wrong_key = [0x43u8; 32];
|
|
||||||
let result = decrypt_stream_chunked(&encrypted, &decrypted, &wrong_key, &nonce);
|
|
||||||
assert!(matches!(result, Err(CryptoError::DecryptionFailed(_))));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,375 +0,0 @@
|
|||||||
use base64::engine::general_purpose::STANDARD as B64;
|
|
||||||
use base64::Engine;
|
|
||||||
use rand::RngCore;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use crate::aes_gcm::{
|
|
||||||
encrypt_stream_chunked, decrypt_stream_chunked, CryptoError,
|
|
||||||
};
|
|
||||||
use crate::kms::KmsService;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
pub enum SseAlgorithm {
|
|
||||||
Aes256,
|
|
||||||
AwsKms,
|
|
||||||
CustomerProvided,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SseAlgorithm {
|
|
||||||
pub fn as_str(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
SseAlgorithm::Aes256 => "AES256",
|
|
||||||
SseAlgorithm::AwsKms => "aws:kms",
|
|
||||||
SseAlgorithm::CustomerProvided => "AES256",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct EncryptionContext {
|
|
||||||
pub algorithm: SseAlgorithm,
|
|
||||||
pub kms_key_id: Option<String>,
|
|
||||||
pub customer_key: Option<Vec<u8>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct EncryptionMetadata {
|
|
||||||
pub algorithm: String,
|
|
||||||
pub nonce: String,
|
|
||||||
pub encrypted_data_key: Option<String>,
|
|
||||||
pub kms_key_id: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EncryptionMetadata {
|
|
||||||
pub fn to_metadata_map(&self) -> HashMap<String, String> {
|
|
||||||
let mut map = HashMap::new();
|
|
||||||
map.insert(
|
|
||||||
"x-amz-server-side-encryption".to_string(),
|
|
||||||
self.algorithm.clone(),
|
|
||||||
);
|
|
||||||
map.insert("x-amz-encryption-nonce".to_string(), self.nonce.clone());
|
|
||||||
if let Some(ref dk) = self.encrypted_data_key {
|
|
||||||
map.insert("x-amz-encrypted-data-key".to_string(), dk.clone());
|
|
||||||
}
|
|
||||||
if let Some(ref kid) = self.kms_key_id {
|
|
||||||
map.insert("x-amz-encryption-key-id".to_string(), kid.clone());
|
|
||||||
}
|
|
||||||
map
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_metadata(meta: &HashMap<String, String>) -> Option<Self> {
|
|
||||||
let algorithm = meta.get("x-amz-server-side-encryption")?;
|
|
||||||
let nonce = meta.get("x-amz-encryption-nonce")?;
|
|
||||||
Some(Self {
|
|
||||||
algorithm: algorithm.clone(),
|
|
||||||
nonce: nonce.clone(),
|
|
||||||
encrypted_data_key: meta.get("x-amz-encrypted-data-key").cloned(),
|
|
||||||
kms_key_id: meta.get("x-amz-encryption-key-id").cloned(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_encrypted(meta: &HashMap<String, String>) -> bool {
|
|
||||||
meta.contains_key("x-amz-server-side-encryption")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn clean_metadata(meta: &mut HashMap<String, String>) {
|
|
||||||
meta.remove("x-amz-server-side-encryption");
|
|
||||||
meta.remove("x-amz-encryption-nonce");
|
|
||||||
meta.remove("x-amz-encrypted-data-key");
|
|
||||||
meta.remove("x-amz-encryption-key-id");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct EncryptionService {
|
|
||||||
master_key: [u8; 32],
|
|
||||||
kms: Option<std::sync::Arc<KmsService>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EncryptionService {
|
|
||||||
pub fn new(master_key: [u8; 32], kms: Option<std::sync::Arc<KmsService>>) -> Self {
|
|
||||||
Self { master_key, kms }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn generate_data_key(&self) -> ([u8; 32], [u8; 12]) {
|
|
||||||
let mut data_key = [0u8; 32];
|
|
||||||
let mut nonce = [0u8; 12];
|
|
||||||
rand::thread_rng().fill_bytes(&mut data_key);
|
|
||||||
rand::thread_rng().fill_bytes(&mut nonce);
|
|
||||||
(data_key, nonce)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn wrap_data_key(&self, data_key: &[u8; 32]) -> Result<String, CryptoError> {
|
|
||||||
use aes_gcm::aead::Aead;
|
|
||||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
|
||||||
|
|
||||||
let cipher = Aes256Gcm::new((&self.master_key).into());
|
|
||||||
let mut nonce_bytes = [0u8; 12];
|
|
||||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
|
||||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
|
||||||
|
|
||||||
let encrypted = cipher
|
|
||||||
.encrypt(nonce, data_key.as_slice())
|
|
||||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
|
||||||
|
|
||||||
let mut combined = Vec::with_capacity(12 + encrypted.len());
|
|
||||||
combined.extend_from_slice(&nonce_bytes);
|
|
||||||
combined.extend_from_slice(&encrypted);
|
|
||||||
Ok(B64.encode(&combined))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn unwrap_data_key(&self, wrapped_b64: &str) -> Result<[u8; 32], CryptoError> {
|
|
||||||
use aes_gcm::aead::Aead;
|
|
||||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
|
||||||
|
|
||||||
let combined = B64.decode(wrapped_b64).map_err(|e| {
|
|
||||||
CryptoError::EncryptionFailed(format!("Bad wrapped key encoding: {}", e))
|
|
||||||
})?;
|
|
||||||
if combined.len() < 12 {
|
|
||||||
return Err(CryptoError::EncryptionFailed(
|
|
||||||
"Wrapped key too short".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let (nonce_bytes, ciphertext) = combined.split_at(12);
|
|
||||||
let cipher = Aes256Gcm::new((&self.master_key).into());
|
|
||||||
let nonce = Nonce::from_slice(nonce_bytes);
|
|
||||||
|
|
||||||
let plaintext = cipher
|
|
||||||
.decrypt(nonce, ciphertext)
|
|
||||||
.map_err(|_| CryptoError::DecryptionFailed(0))?;
|
|
||||||
|
|
||||||
if plaintext.len() != 32 {
|
|
||||||
return Err(CryptoError::InvalidKeySize(plaintext.len()));
|
|
||||||
}
|
|
||||||
let mut key = [0u8; 32];
|
|
||||||
key.copy_from_slice(&plaintext);
|
|
||||||
Ok(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn encrypt_object(
|
|
||||||
&self,
|
|
||||||
input_path: &Path,
|
|
||||||
output_path: &Path,
|
|
||||||
ctx: &EncryptionContext,
|
|
||||||
) -> Result<EncryptionMetadata, CryptoError> {
|
|
||||||
let (data_key, nonce) = self.generate_data_key();
|
|
||||||
|
|
||||||
let (encrypted_data_key, kms_key_id) = match ctx.algorithm {
|
|
||||||
SseAlgorithm::Aes256 => {
|
|
||||||
let wrapped = self.wrap_data_key(&data_key)?;
|
|
||||||
(Some(wrapped), None)
|
|
||||||
}
|
|
||||||
SseAlgorithm::AwsKms => {
|
|
||||||
let kms = self
|
|
||||||
.kms
|
|
||||||
.as_ref()
|
|
||||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
|
|
||||||
let kid = ctx
|
|
||||||
.kms_key_id
|
|
||||||
.as_ref()
|
|
||||||
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID".into()))?;
|
|
||||||
let ciphertext = kms.encrypt_data(kid, &data_key).await?;
|
|
||||||
(Some(B64.encode(&ciphertext)), Some(kid.clone()))
|
|
||||||
}
|
|
||||||
SseAlgorithm::CustomerProvided => {
|
|
||||||
(None, None)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let actual_key = if ctx.algorithm == SseAlgorithm::CustomerProvided {
|
|
||||||
let ck = ctx.customer_key.as_ref().ok_or_else(|| {
|
|
||||||
CryptoError::EncryptionFailed("No customer key provided".into())
|
|
||||||
})?;
|
|
||||||
if ck.len() != 32 {
|
|
||||||
return Err(CryptoError::InvalidKeySize(ck.len()));
|
|
||||||
}
|
|
||||||
let mut k = [0u8; 32];
|
|
||||||
k.copy_from_slice(ck);
|
|
||||||
k
|
|
||||||
} else {
|
|
||||||
data_key
|
|
||||||
};
|
|
||||||
|
|
||||||
let ip = input_path.to_owned();
|
|
||||||
let op = output_path.to_owned();
|
|
||||||
let ak = actual_key;
|
|
||||||
let n = nonce;
|
|
||||||
tokio::task::spawn_blocking(move || {
|
|
||||||
encrypt_stream_chunked(&ip, &op, &ak, &n, None)
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
|
|
||||||
|
|
||||||
Ok(EncryptionMetadata {
|
|
||||||
algorithm: ctx.algorithm.as_str().to_string(),
|
|
||||||
nonce: B64.encode(nonce),
|
|
||||||
encrypted_data_key,
|
|
||||||
kms_key_id,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn decrypt_object(
|
|
||||||
&self,
|
|
||||||
input_path: &Path,
|
|
||||||
output_path: &Path,
|
|
||||||
enc_meta: &EncryptionMetadata,
|
|
||||||
customer_key: Option<&[u8]>,
|
|
||||||
) -> Result<(), CryptoError> {
|
|
||||||
let nonce_bytes = B64.decode(&enc_meta.nonce).map_err(|e| {
|
|
||||||
CryptoError::EncryptionFailed(format!("Bad nonce encoding: {}", e))
|
|
||||||
})?;
|
|
||||||
if nonce_bytes.len() != 12 {
|
|
||||||
return Err(CryptoError::InvalidNonceSize(nonce_bytes.len()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let data_key: [u8; 32] = if let Some(ck) = customer_key {
|
|
||||||
if ck.len() != 32 {
|
|
||||||
return Err(CryptoError::InvalidKeySize(ck.len()));
|
|
||||||
}
|
|
||||||
let mut k = [0u8; 32];
|
|
||||||
k.copy_from_slice(ck);
|
|
||||||
k
|
|
||||||
} else if enc_meta.algorithm == "aws:kms" {
|
|
||||||
let kms = self
|
|
||||||
.kms
|
|
||||||
.as_ref()
|
|
||||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
|
|
||||||
let kid = enc_meta
|
|
||||||
.kms_key_id
|
|
||||||
.as_ref()
|
|
||||||
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID in metadata".into()))?;
|
|
||||||
let encrypted_dk = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
|
|
||||||
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
|
|
||||||
})?;
|
|
||||||
let ct = B64.decode(encrypted_dk).map_err(|e| {
|
|
||||||
CryptoError::EncryptionFailed(format!("Bad data key encoding: {}", e))
|
|
||||||
})?;
|
|
||||||
let dk = kms.decrypt_data(kid, &ct).await?;
|
|
||||||
if dk.len() != 32 {
|
|
||||||
return Err(CryptoError::InvalidKeySize(dk.len()));
|
|
||||||
}
|
|
||||||
let mut k = [0u8; 32];
|
|
||||||
k.copy_from_slice(&dk);
|
|
||||||
k
|
|
||||||
} else {
|
|
||||||
let wrapped = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
|
|
||||||
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
|
|
||||||
})?;
|
|
||||||
self.unwrap_data_key(wrapped)?
|
|
||||||
};
|
|
||||||
|
|
||||||
let ip = input_path.to_owned();
|
|
||||||
let op = output_path.to_owned();
|
|
||||||
let nb: [u8; 12] = nonce_bytes.try_into().unwrap();
|
|
||||||
tokio::task::spawn_blocking(move || {
|
|
||||||
decrypt_stream_chunked(&ip, &op, &data_key, &nb)
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use std::io::Write;
|
|
||||||
|
|
||||||
fn test_master_key() -> [u8; 32] {
|
|
||||||
[0x42u8; 32]
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_wrap_unwrap_data_key() {
|
|
||||||
let svc = EncryptionService::new(test_master_key(), None);
|
|
||||||
let dk = [0xAAu8; 32];
|
|
||||||
let wrapped = svc.wrap_data_key(&dk).unwrap();
|
|
||||||
let unwrapped = svc.unwrap_data_key(&wrapped).unwrap();
|
|
||||||
assert_eq!(dk, unwrapped);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_encrypt_decrypt_object_sse_s3() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let input = dir.path().join("plain.bin");
|
|
||||||
let encrypted = dir.path().join("enc.bin");
|
|
||||||
let decrypted = dir.path().join("dec.bin");
|
|
||||||
|
|
||||||
let data = b"SSE-S3 encrypted content for testing!";
|
|
||||||
std::fs::File::create(&input).unwrap().write_all(data).unwrap();
|
|
||||||
|
|
||||||
let svc = EncryptionService::new(test_master_key(), None);
|
|
||||||
|
|
||||||
let ctx = EncryptionContext {
|
|
||||||
algorithm: SseAlgorithm::Aes256,
|
|
||||||
kms_key_id: None,
|
|
||||||
customer_key: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
|
|
||||||
assert_eq!(meta.algorithm, "AES256");
|
|
||||||
assert!(meta.encrypted_data_key.is_some());
|
|
||||||
|
|
||||||
svc.decrypt_object(&encrypted, &decrypted, &meta, None)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let result = std::fs::read(&decrypted).unwrap();
|
|
||||||
assert_eq!(result, data);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_encrypt_decrypt_object_sse_c() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let input = dir.path().join("plain.bin");
|
|
||||||
let encrypted = dir.path().join("enc.bin");
|
|
||||||
let decrypted = dir.path().join("dec.bin");
|
|
||||||
|
|
||||||
let data = b"SSE-C encrypted content!";
|
|
||||||
std::fs::File::create(&input).unwrap().write_all(data).unwrap();
|
|
||||||
|
|
||||||
let customer_key = [0xBBu8; 32];
|
|
||||||
let svc = EncryptionService::new(test_master_key(), None);
|
|
||||||
|
|
||||||
let ctx = EncryptionContext {
|
|
||||||
algorithm: SseAlgorithm::CustomerProvided,
|
|
||||||
kms_key_id: None,
|
|
||||||
customer_key: Some(customer_key.to_vec()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
|
|
||||||
assert!(meta.encrypted_data_key.is_none());
|
|
||||||
|
|
||||||
svc.decrypt_object(&encrypted, &decrypted, &meta, Some(&customer_key))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let result = std::fs::read(&decrypted).unwrap();
|
|
||||||
assert_eq!(result, data);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_encryption_metadata_roundtrip() {
|
|
||||||
let meta = EncryptionMetadata {
|
|
||||||
algorithm: "AES256".to_string(),
|
|
||||||
nonce: "dGVzdG5vbmNlMTI=".to_string(),
|
|
||||||
encrypted_data_key: Some("c29tZWtleQ==".to_string()),
|
|
||||||
kms_key_id: None,
|
|
||||||
};
|
|
||||||
let map = meta.to_metadata_map();
|
|
||||||
let restored = EncryptionMetadata::from_metadata(&map).unwrap();
|
|
||||||
assert_eq!(restored.algorithm, "AES256");
|
|
||||||
assert_eq!(restored.nonce, meta.nonce);
|
|
||||||
assert_eq!(restored.encrypted_data_key, meta.encrypted_data_key);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_is_encrypted() {
|
|
||||||
let mut meta = HashMap::new();
|
|
||||||
assert!(!EncryptionMetadata::is_encrypted(&meta));
|
|
||||||
meta.insert("x-amz-server-side-encryption".to_string(), "AES256".to_string());
|
|
||||||
assert!(EncryptionMetadata::is_encrypted(&meta));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,132 +0,0 @@
|
|||||||
use md5::{Digest, Md5};
|
|
||||||
use sha2::Sha256;
|
|
||||||
use std::io::Read;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
const CHUNK_SIZE: usize = 65536;
|
|
||||||
|
|
||||||
pub fn md5_file(path: &Path) -> std::io::Result<String> {
|
|
||||||
let mut file = std::fs::File::open(path)?;
|
|
||||||
let mut hasher = Md5::new();
|
|
||||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
|
||||||
loop {
|
|
||||||
let n = file.read(&mut buf)?;
|
|
||||||
if n == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
hasher.update(&buf[..n]);
|
|
||||||
}
|
|
||||||
Ok(format!("{:x}", hasher.finalize()))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn md5_bytes(data: &[u8]) -> String {
|
|
||||||
let mut hasher = Md5::new();
|
|
||||||
hasher.update(data);
|
|
||||||
format!("{:x}", hasher.finalize())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sha256_file(path: &Path) -> std::io::Result<String> {
|
|
||||||
let mut file = std::fs::File::open(path)?;
|
|
||||||
let mut hasher = Sha256::new();
|
|
||||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
|
||||||
loop {
|
|
||||||
let n = file.read(&mut buf)?;
|
|
||||||
if n == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
hasher.update(&buf[..n]);
|
|
||||||
}
|
|
||||||
Ok(format!("{:x}", hasher.finalize()))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sha256_bytes(data: &[u8]) -> String {
|
|
||||||
let mut hasher = Sha256::new();
|
|
||||||
hasher.update(data);
|
|
||||||
format!("{:x}", hasher.finalize())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn md5_sha256_file(path: &Path) -> std::io::Result<(String, String)> {
|
|
||||||
let mut file = std::fs::File::open(path)?;
|
|
||||||
let mut md5_hasher = Md5::new();
|
|
||||||
let mut sha_hasher = Sha256::new();
|
|
||||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
|
||||||
loop {
|
|
||||||
let n = file.read(&mut buf)?;
|
|
||||||
if n == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
md5_hasher.update(&buf[..n]);
|
|
||||||
sha_hasher.update(&buf[..n]);
|
|
||||||
}
|
|
||||||
Ok((
|
|
||||||
format!("{:x}", md5_hasher.finalize()),
|
|
||||||
format!("{:x}", sha_hasher.finalize()),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn md5_file_async(path: &Path) -> std::io::Result<String> {
|
|
||||||
let path = path.to_owned();
|
|
||||||
tokio::task::spawn_blocking(move || md5_file(&path))
|
|
||||||
.await
|
|
||||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn sha256_file_async(path: &Path) -> std::io::Result<String> {
|
|
||||||
let path = path.to_owned();
|
|
||||||
tokio::task::spawn_blocking(move || sha256_file(&path))
|
|
||||||
.await
|
|
||||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn md5_sha256_file_async(path: &Path) -> std::io::Result<(String, String)> {
|
|
||||||
let path = path.to_owned();
|
|
||||||
tokio::task::spawn_blocking(move || md5_sha256_file(&path))
|
|
||||||
.await
|
|
||||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use std::io::Write;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_md5_bytes() {
|
|
||||||
assert_eq!(md5_bytes(b""), "d41d8cd98f00b204e9800998ecf8427e");
|
|
||||||
assert_eq!(md5_bytes(b"hello"), "5d41402abc4b2a76b9719d911017c592");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_sha256_bytes() {
|
|
||||||
let hash = sha256_bytes(b"hello");
|
|
||||||
assert_eq!(hash, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_md5_file() {
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(b"hello").unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
let hash = md5_file(tmp.path()).unwrap();
|
|
||||||
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_md5_sha256_file() {
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(b"hello").unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
let (md5, sha) = md5_sha256_file(tmp.path()).unwrap();
|
|
||||||
assert_eq!(md5, "5d41402abc4b2a76b9719d911017c592");
|
|
||||||
assert_eq!(sha, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_md5_file_async() {
|
|
||||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
|
||||||
tmp.write_all(b"hello").unwrap();
|
|
||||||
tmp.flush().unwrap();
|
|
||||||
let hash = md5_file_async(tmp.path()).await.unwrap();
|
|
||||||
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,453 +0,0 @@
|
|||||||
use aes_gcm::aead::Aead;
|
|
||||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
|
||||||
use base64::engine::general_purpose::STANDARD as B64;
|
|
||||||
use base64::Engine;
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use rand::RngCore;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
|
|
||||||
use crate::aes_gcm::CryptoError;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct KmsKey {
|
|
||||||
#[serde(rename = "KeyId")]
|
|
||||||
pub key_id: String,
|
|
||||||
#[serde(rename = "Arn")]
|
|
||||||
pub arn: String,
|
|
||||||
#[serde(rename = "Description")]
|
|
||||||
pub description: String,
|
|
||||||
#[serde(rename = "CreationDate")]
|
|
||||||
pub creation_date: DateTime<Utc>,
|
|
||||||
#[serde(rename = "Enabled")]
|
|
||||||
pub enabled: bool,
|
|
||||||
#[serde(rename = "KeyState")]
|
|
||||||
pub key_state: String,
|
|
||||||
#[serde(rename = "KeyUsage")]
|
|
||||||
pub key_usage: String,
|
|
||||||
#[serde(rename = "KeySpec")]
|
|
||||||
pub key_spec: String,
|
|
||||||
#[serde(rename = "EncryptedKeyMaterial")]
|
|
||||||
pub encrypted_key_material: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
struct KmsStore {
|
|
||||||
keys: Vec<KmsKey>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct KmsService {
|
|
||||||
keys_path: PathBuf,
|
|
||||||
master_key: Arc<RwLock<[u8; 32]>>,
|
|
||||||
keys: Arc<RwLock<Vec<KmsKey>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KmsService {
|
|
||||||
pub async fn new(keys_dir: &Path) -> Result<Self, CryptoError> {
|
|
||||||
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
|
|
||||||
|
|
||||||
let keys_path = keys_dir.join("kms_keys.json");
|
|
||||||
|
|
||||||
let master_key = Self::load_or_create_master_key(&keys_dir.join("kms_master.key"))?;
|
|
||||||
|
|
||||||
let keys = if keys_path.exists() {
|
|
||||||
let data = std::fs::read_to_string(&keys_path).map_err(CryptoError::Io)?;
|
|
||||||
let store: KmsStore = serde_json::from_str(&data)
|
|
||||||
.map_err(|e| CryptoError::EncryptionFailed(format!("Bad KMS store: {}", e)))?;
|
|
||||||
store.keys
|
|
||||||
} else {
|
|
||||||
Vec::new()
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
keys_path,
|
|
||||||
master_key: Arc::new(RwLock::new(master_key)),
|
|
||||||
keys: Arc::new(RwLock::new(keys)),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_or_create_master_key(path: &Path) -> Result<[u8; 32], CryptoError> {
|
|
||||||
if path.exists() {
|
|
||||||
let encoded = std::fs::read_to_string(path).map_err(CryptoError::Io)?;
|
|
||||||
let decoded = B64.decode(encoded.trim()).map_err(|e| {
|
|
||||||
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
|
|
||||||
})?;
|
|
||||||
if decoded.len() != 32 {
|
|
||||||
return Err(CryptoError::InvalidKeySize(decoded.len()));
|
|
||||||
}
|
|
||||||
let mut key = [0u8; 32];
|
|
||||||
key.copy_from_slice(&decoded);
|
|
||||||
Ok(key)
|
|
||||||
} else {
|
|
||||||
let mut key = [0u8; 32];
|
|
||||||
rand::thread_rng().fill_bytes(&mut key);
|
|
||||||
let encoded = B64.encode(key);
|
|
||||||
std::fs::write(path, &encoded).map_err(CryptoError::Io)?;
|
|
||||||
Ok(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn encrypt_key_material(
|
|
||||||
master_key: &[u8; 32],
|
|
||||||
plaintext_key: &[u8],
|
|
||||||
) -> Result<String, CryptoError> {
|
|
||||||
let cipher = Aes256Gcm::new(master_key.into());
|
|
||||||
let mut nonce_bytes = [0u8; 12];
|
|
||||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
|
||||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
|
||||||
|
|
||||||
let ciphertext = cipher
|
|
||||||
.encrypt(nonce, plaintext_key)
|
|
||||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
|
||||||
|
|
||||||
let mut combined = Vec::with_capacity(12 + ciphertext.len());
|
|
||||||
combined.extend_from_slice(&nonce_bytes);
|
|
||||||
combined.extend_from_slice(&ciphertext);
|
|
||||||
Ok(B64.encode(&combined))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn decrypt_key_material(
|
|
||||||
master_key: &[u8; 32],
|
|
||||||
encrypted_b64: &str,
|
|
||||||
) -> Result<Vec<u8>, CryptoError> {
|
|
||||||
let combined = B64.decode(encrypted_b64).map_err(|e| {
|
|
||||||
CryptoError::EncryptionFailed(format!("Bad key material encoding: {}", e))
|
|
||||||
})?;
|
|
||||||
if combined.len() < 12 {
|
|
||||||
return Err(CryptoError::EncryptionFailed(
|
|
||||||
"Encrypted key material too short".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let (nonce_bytes, ciphertext) = combined.split_at(12);
|
|
||||||
let cipher = Aes256Gcm::new(master_key.into());
|
|
||||||
let nonce = Nonce::from_slice(nonce_bytes);
|
|
||||||
|
|
||||||
cipher
|
|
||||||
.decrypt(nonce, ciphertext)
|
|
||||||
.map_err(|_| CryptoError::DecryptionFailed(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn save(&self) -> Result<(), CryptoError> {
|
|
||||||
let keys = self.keys.read().await;
|
|
||||||
let store = KmsStore {
|
|
||||||
keys: keys.clone(),
|
|
||||||
};
|
|
||||||
let json = serde_json::to_string_pretty(&store)
|
|
||||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
|
||||||
std::fs::write(&self.keys_path, json).map_err(CryptoError::Io)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn create_key(&self, description: &str) -> Result<KmsKey, CryptoError> {
|
|
||||||
let key_id = uuid::Uuid::new_v4().to_string();
|
|
||||||
let arn = format!("arn:aws:kms:local:000000000000:key/{}", key_id);
|
|
||||||
|
|
||||||
let mut plaintext_key = [0u8; 32];
|
|
||||||
rand::thread_rng().fill_bytes(&mut plaintext_key);
|
|
||||||
|
|
||||||
let master = self.master_key.read().await;
|
|
||||||
let encrypted = Self::encrypt_key_material(&master, &plaintext_key)?;
|
|
||||||
|
|
||||||
let kms_key = KmsKey {
|
|
||||||
key_id: key_id.clone(),
|
|
||||||
arn,
|
|
||||||
description: description.to_string(),
|
|
||||||
creation_date: Utc::now(),
|
|
||||||
enabled: true,
|
|
||||||
key_state: "Enabled".to_string(),
|
|
||||||
key_usage: "ENCRYPT_DECRYPT".to_string(),
|
|
||||||
key_spec: "SYMMETRIC_DEFAULT".to_string(),
|
|
||||||
encrypted_key_material: encrypted,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.keys.write().await.push(kms_key.clone());
|
|
||||||
self.save().await?;
|
|
||||||
Ok(kms_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn list_keys(&self) -> Vec<KmsKey> {
|
|
||||||
self.keys.read().await.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_key(&self, key_id: &str) -> Option<KmsKey> {
|
|
||||||
let keys = self.keys.read().await;
|
|
||||||
keys.iter()
|
|
||||||
.find(|k| k.key_id == key_id || k.arn == key_id)
|
|
||||||
.cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn delete_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
|
||||||
let mut keys = self.keys.write().await;
|
|
||||||
let len_before = keys.len();
|
|
||||||
keys.retain(|k| k.key_id != key_id && k.arn != key_id);
|
|
||||||
let removed = keys.len() < len_before;
|
|
||||||
drop(keys);
|
|
||||||
if removed {
|
|
||||||
self.save().await?;
|
|
||||||
}
|
|
||||||
Ok(removed)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn enable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
|
||||||
let mut keys = self.keys.write().await;
|
|
||||||
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
|
|
||||||
key.enabled = true;
|
|
||||||
key.key_state = "Enabled".to_string();
|
|
||||||
drop(keys);
|
|
||||||
self.save().await?;
|
|
||||||
Ok(true)
|
|
||||||
} else {
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn disable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
|
||||||
let mut keys = self.keys.write().await;
|
|
||||||
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
|
|
||||||
key.enabled = false;
|
|
||||||
key.key_state = "Disabled".to_string();
|
|
||||||
drop(keys);
|
|
||||||
self.save().await?;
|
|
||||||
Ok(true)
|
|
||||||
} else {
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn decrypt_data_key(&self, key_id: &str) -> Result<Vec<u8>, CryptoError> {
|
|
||||||
let keys = self.keys.read().await;
|
|
||||||
let key = keys
|
|
||||||
.iter()
|
|
||||||
.find(|k| k.key_id == key_id || k.arn == key_id)
|
|
||||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS key not found".to_string()))?;
|
|
||||||
|
|
||||||
if !key.enabled {
|
|
||||||
return Err(CryptoError::EncryptionFailed(
|
|
||||||
"KMS key is disabled".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let master = self.master_key.read().await;
|
|
||||||
Self::decrypt_key_material(&master, &key.encrypted_key_material)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn encrypt_data(
|
|
||||||
&self,
|
|
||||||
key_id: &str,
|
|
||||||
plaintext: &[u8],
|
|
||||||
) -> Result<Vec<u8>, CryptoError> {
|
|
||||||
let data_key = self.decrypt_data_key(key_id).await?;
|
|
||||||
if data_key.len() != 32 {
|
|
||||||
return Err(CryptoError::InvalidKeySize(data_key.len()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let key_arr: [u8; 32] = data_key.try_into().unwrap();
|
|
||||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
|
||||||
let mut nonce_bytes = [0u8; 12];
|
|
||||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
|
||||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
|
||||||
|
|
||||||
let ciphertext = cipher
|
|
||||||
.encrypt(nonce, plaintext)
|
|
||||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
|
||||||
|
|
||||||
let mut result = Vec::with_capacity(12 + ciphertext.len());
|
|
||||||
result.extend_from_slice(&nonce_bytes);
|
|
||||||
result.extend_from_slice(&ciphertext);
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn decrypt_data(
|
|
||||||
&self,
|
|
||||||
key_id: &str,
|
|
||||||
ciphertext: &[u8],
|
|
||||||
) -> Result<Vec<u8>, CryptoError> {
|
|
||||||
if ciphertext.len() < 12 {
|
|
||||||
return Err(CryptoError::EncryptionFailed(
|
|
||||||
"Ciphertext too short".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let data_key = self.decrypt_data_key(key_id).await?;
|
|
||||||
if data_key.len() != 32 {
|
|
||||||
return Err(CryptoError::InvalidKeySize(data_key.len()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let key_arr: [u8; 32] = data_key.try_into().unwrap();
|
|
||||||
let (nonce_bytes, ct) = ciphertext.split_at(12);
|
|
||||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
|
||||||
let nonce = Nonce::from_slice(nonce_bytes);
|
|
||||||
|
|
||||||
cipher
|
|
||||||
.decrypt(nonce, ct)
|
|
||||||
.map_err(|_| CryptoError::DecryptionFailed(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn generate_data_key(
|
|
||||||
&self,
|
|
||||||
key_id: &str,
|
|
||||||
num_bytes: usize,
|
|
||||||
) -> Result<(Vec<u8>, Vec<u8>), CryptoError> {
|
|
||||||
let kms_key = self.decrypt_data_key(key_id).await?;
|
|
||||||
if kms_key.len() != 32 {
|
|
||||||
return Err(CryptoError::InvalidKeySize(kms_key.len()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut plaintext_key = vec![0u8; num_bytes];
|
|
||||||
rand::thread_rng().fill_bytes(&mut plaintext_key);
|
|
||||||
|
|
||||||
let key_arr: [u8; 32] = kms_key.try_into().unwrap();
|
|
||||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
|
||||||
let mut nonce_bytes = [0u8; 12];
|
|
||||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
|
||||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
|
||||||
|
|
||||||
let encrypted = cipher
|
|
||||||
.encrypt(nonce, plaintext_key.as_slice())
|
|
||||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
|
||||||
|
|
||||||
let mut wrapped = Vec::with_capacity(12 + encrypted.len());
|
|
||||||
wrapped.extend_from_slice(&nonce_bytes);
|
|
||||||
wrapped.extend_from_slice(&encrypted);
|
|
||||||
|
|
||||||
Ok((plaintext_key, wrapped))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn load_or_create_master_key(keys_dir: &Path) -> Result<[u8; 32], CryptoError> {
|
|
||||||
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
|
|
||||||
let path = keys_dir.join("master.key");
|
|
||||||
|
|
||||||
if path.exists() {
|
|
||||||
let encoded = std::fs::read_to_string(&path).map_err(CryptoError::Io)?;
|
|
||||||
let decoded = B64.decode(encoded.trim()).map_err(|e| {
|
|
||||||
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
|
|
||||||
})?;
|
|
||||||
if decoded.len() != 32 {
|
|
||||||
return Err(CryptoError::InvalidKeySize(decoded.len()));
|
|
||||||
}
|
|
||||||
let mut key = [0u8; 32];
|
|
||||||
key.copy_from_slice(&decoded);
|
|
||||||
Ok(key)
|
|
||||||
} else {
|
|
||||||
let mut key = [0u8; 32];
|
|
||||||
rand::thread_rng().fill_bytes(&mut key);
|
|
||||||
let encoded = B64.encode(key);
|
|
||||||
std::fs::write(&path, &encoded).map_err(CryptoError::Io)?;
|
|
||||||
Ok(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_create_and_list_keys() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
|
||||||
|
|
||||||
let key = kms.create_key("test key").await.unwrap();
|
|
||||||
assert!(key.enabled);
|
|
||||||
assert_eq!(key.description, "test key");
|
|
||||||
assert!(key.key_id.len() > 0);
|
|
||||||
|
|
||||||
let keys = kms.list_keys().await;
|
|
||||||
assert_eq!(keys.len(), 1);
|
|
||||||
assert_eq!(keys[0].key_id, key.key_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_enable_disable_key() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
|
||||||
|
|
||||||
let key = kms.create_key("toggle").await.unwrap();
|
|
||||||
assert!(key.enabled);
|
|
||||||
|
|
||||||
kms.disable_key(&key.key_id).await.unwrap();
|
|
||||||
let k = kms.get_key(&key.key_id).await.unwrap();
|
|
||||||
assert!(!k.enabled);
|
|
||||||
|
|
||||||
kms.enable_key(&key.key_id).await.unwrap();
|
|
||||||
let k = kms.get_key(&key.key_id).await.unwrap();
|
|
||||||
assert!(k.enabled);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_delete_key() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
|
||||||
|
|
||||||
let key = kms.create_key("doomed").await.unwrap();
|
|
||||||
assert!(kms.delete_key(&key.key_id).await.unwrap());
|
|
||||||
assert!(kms.get_key(&key.key_id).await.is_none());
|
|
||||||
assert_eq!(kms.list_keys().await.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_encrypt_decrypt_data() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
|
||||||
|
|
||||||
let key = kms.create_key("enc-key").await.unwrap();
|
|
||||||
let plaintext = b"Hello, KMS!";
|
|
||||||
|
|
||||||
let ciphertext = kms.encrypt_data(&key.key_id, plaintext).await.unwrap();
|
|
||||||
assert_ne!(&ciphertext, plaintext);
|
|
||||||
|
|
||||||
let decrypted = kms.decrypt_data(&key.key_id, &ciphertext).await.unwrap();
|
|
||||||
assert_eq!(decrypted, plaintext);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_generate_data_key() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
|
||||||
|
|
||||||
let key = kms.create_key("data-key-gen").await.unwrap();
|
|
||||||
let (plaintext, wrapped) = kms.generate_data_key(&key.key_id, 32).await.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(plaintext.len(), 32);
|
|
||||||
assert!(wrapped.len() > 32);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_disabled_key_cannot_encrypt() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
|
||||||
|
|
||||||
let key = kms.create_key("disabled").await.unwrap();
|
|
||||||
kms.disable_key(&key.key_id).await.unwrap();
|
|
||||||
|
|
||||||
let result = kms.encrypt_data(&key.key_id, b"test").await;
|
|
||||||
assert!(result.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_persistence_across_reload() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
|
|
||||||
let key_id = {
|
|
||||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
|
||||||
let key = kms.create_key("persistent").await.unwrap();
|
|
||||||
key.key_id
|
|
||||||
};
|
|
||||||
|
|
||||||
let kms2 = KmsService::new(dir.path()).await.unwrap();
|
|
||||||
let key = kms2.get_key(&key_id).await;
|
|
||||||
assert!(key.is_some());
|
|
||||||
assert_eq!(key.unwrap().description, "persistent");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_master_key_roundtrip() {
|
|
||||||
let dir = tempfile::tempdir().unwrap();
|
|
||||||
let key1 = load_or_create_master_key(dir.path()).await.unwrap();
|
|
||||||
let key2 = load_or_create_master_key(dir.path()).await.unwrap();
|
|
||||||
assert_eq!(key1, key2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
pub mod hashing;
|
|
||||||
pub mod aes_gcm;
|
|
||||||
pub mod kms;
|
|
||||||
pub mod encryption;
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "myfsio-server"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
myfsio-common = { path = "../myfsio-common" }
|
|
||||||
myfsio-auth = { path = "../myfsio-auth" }
|
|
||||||
myfsio-crypto = { path = "../myfsio-crypto" }
|
|
||||||
myfsio-storage = { path = "../myfsio-storage" }
|
|
||||||
myfsio-xml = { path = "../myfsio-xml" }
|
|
||||||
base64 = { workspace = true }
|
|
||||||
axum = { workspace = true }
|
|
||||||
tokio = { workspace = true }
|
|
||||||
tower = { workspace = true }
|
|
||||||
tower-http = { workspace = true }
|
|
||||||
hyper = { workspace = true }
|
|
||||||
bytes = { workspace = true }
|
|
||||||
serde = { workspace = true }
|
|
||||||
serde_json = { workspace = true }
|
|
||||||
tracing = { workspace = true }
|
|
||||||
tracing-subscriber = { workspace = true }
|
|
||||||
tokio-util = { workspace = true }
|
|
||||||
chrono = { workspace = true }
|
|
||||||
uuid = { workspace = true }
|
|
||||||
futures = { workspace = true }
|
|
||||||
http-body-util = "0.1"
|
|
||||||
percent-encoding = { workspace = true }
|
|
||||||
quick-xml = { workspace = true }
|
|
||||||
mime_guess = "2"
|
|
||||||
crc32fast = { workspace = true }
|
|
||||||
duckdb = { workspace = true }
|
|
||||||
roxmltree = "0.20"
|
|
||||||
parking_lot = { workspace = true }
|
|
||||||
regex = "1"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tempfile = "3"
|
|
||||||
tower = { workspace = true, features = ["util"] }
|
|
||||||
@@ -1,117 +0,0 @@
|
|||||||
use std::net::SocketAddr;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct ServerConfig {
|
|
||||||
pub bind_addr: SocketAddr,
|
|
||||||
pub storage_root: PathBuf,
|
|
||||||
pub region: String,
|
|
||||||
pub iam_config_path: PathBuf,
|
|
||||||
pub sigv4_timestamp_tolerance_secs: u64,
|
|
||||||
pub presigned_url_min_expiry: u64,
|
|
||||||
pub presigned_url_max_expiry: u64,
|
|
||||||
pub secret_key: Option<String>,
|
|
||||||
pub encryption_enabled: bool,
|
|
||||||
pub kms_enabled: bool,
|
|
||||||
pub gc_enabled: bool,
|
|
||||||
pub integrity_enabled: bool,
|
|
||||||
pub metrics_enabled: bool,
|
|
||||||
pub lifecycle_enabled: bool,
|
|
||||||
pub website_hosting_enabled: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServerConfig {
|
|
||||||
pub fn from_env() -> Self {
|
|
||||||
let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
|
|
||||||
let port: u16 = std::env::var("PORT")
|
|
||||||
.unwrap_or_else(|_| "5000".to_string())
|
|
||||||
.parse()
|
|
||||||
.unwrap_or(5000);
|
|
||||||
let storage_root = std::env::var("STORAGE_ROOT")
|
|
||||||
.unwrap_or_else(|_| "./data".to_string());
|
|
||||||
let region = std::env::var("AWS_REGION")
|
|
||||||
.unwrap_or_else(|_| "us-east-1".to_string());
|
|
||||||
|
|
||||||
let storage_path = PathBuf::from(&storage_root);
|
|
||||||
let iam_config_path = std::env::var("IAM_CONFIG")
|
|
||||||
.map(PathBuf::from)
|
|
||||||
.unwrap_or_else(|_| {
|
|
||||||
storage_path.join(".myfsio.sys").join("config").join("iam.json")
|
|
||||||
});
|
|
||||||
|
|
||||||
let sigv4_timestamp_tolerance_secs: u64 = std::env::var("SIGV4_TIMESTAMP_TOLERANCE_SECONDS")
|
|
||||||
.unwrap_or_else(|_| "900".to_string())
|
|
||||||
.parse()
|
|
||||||
.unwrap_or(900);
|
|
||||||
|
|
||||||
let presigned_url_min_expiry: u64 = std::env::var("PRESIGNED_URL_MIN_EXPIRY_SECONDS")
|
|
||||||
.unwrap_or_else(|_| "1".to_string())
|
|
||||||
.parse()
|
|
||||||
.unwrap_or(1);
|
|
||||||
|
|
||||||
let presigned_url_max_expiry: u64 = std::env::var("PRESIGNED_URL_MAX_EXPIRY_SECONDS")
|
|
||||||
.unwrap_or_else(|_| "604800".to_string())
|
|
||||||
.parse()
|
|
||||||
.unwrap_or(604800);
|
|
||||||
|
|
||||||
let secret_key = {
|
|
||||||
let env_key = std::env::var("SECRET_KEY").ok();
|
|
||||||
match env_key {
|
|
||||||
Some(k) if !k.is_empty() && k != "dev-secret-key" => Some(k),
|
|
||||||
_ => {
|
|
||||||
let secret_file = storage_path
|
|
||||||
.join(".myfsio.sys")
|
|
||||||
.join("config")
|
|
||||||
.join(".secret");
|
|
||||||
std::fs::read_to_string(&secret_file).ok().map(|s| s.trim().to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let encryption_enabled = std::env::var("ENCRYPTION_ENABLED")
|
|
||||||
.unwrap_or_else(|_| "false".to_string())
|
|
||||||
.to_lowercase() == "true";
|
|
||||||
|
|
||||||
let kms_enabled = std::env::var("KMS_ENABLED")
|
|
||||||
.unwrap_or_else(|_| "false".to_string())
|
|
||||||
.to_lowercase() == "true";
|
|
||||||
|
|
||||||
let gc_enabled = std::env::var("GC_ENABLED")
|
|
||||||
.unwrap_or_else(|_| "false".to_string())
|
|
||||||
.to_lowercase() == "true";
|
|
||||||
|
|
||||||
let integrity_enabled = std::env::var("INTEGRITY_ENABLED")
|
|
||||||
.unwrap_or_else(|_| "false".to_string())
|
|
||||||
.to_lowercase() == "true";
|
|
||||||
|
|
||||||
let metrics_enabled = std::env::var("OPERATION_METRICS_ENABLED")
|
|
||||||
.unwrap_or_else(|_| "false".to_string())
|
|
||||||
.to_lowercase() == "true";
|
|
||||||
|
|
||||||
let lifecycle_enabled = std::env::var("LIFECYCLE_ENABLED")
|
|
||||||
.unwrap_or_else(|_| "false".to_string())
|
|
||||||
.to_lowercase() == "true";
|
|
||||||
|
|
||||||
let website_hosting_enabled = std::env::var("WEBSITE_HOSTING_ENABLED")
|
|
||||||
.unwrap_or_else(|_| "false".to_string())
|
|
||||||
.to_lowercase() == "true";
|
|
||||||
|
|
||||||
Self {
|
|
||||||
bind_addr: SocketAddr::new(host.parse().unwrap(), port),
|
|
||||||
storage_root: storage_path,
|
|
||||||
region,
|
|
||||||
iam_config_path,
|
|
||||||
sigv4_timestamp_tolerance_secs,
|
|
||||||
presigned_url_min_expiry,
|
|
||||||
presigned_url_max_expiry,
|
|
||||||
secret_key,
|
|
||||||
encryption_enabled,
|
|
||||||
kms_enabled,
|
|
||||||
gc_enabled,
|
|
||||||
integrity_enabled,
|
|
||||||
metrics_enabled,
|
|
||||||
lifecycle_enabled,
|
|
||||||
website_hosting_enabled,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,704 +0,0 @@
|
|||||||
use axum::body::Body;
|
|
||||||
use axum::extract::{Path, State};
|
|
||||||
use axum::http::StatusCode;
|
|
||||||
use axum::response::{IntoResponse, Response};
|
|
||||||
use axum::Extension;
|
|
||||||
use myfsio_common::types::Principal;
|
|
||||||
use myfsio_storage::traits::StorageEngine;
|
|
||||||
|
|
||||||
use crate::services::site_registry::{PeerSite, SiteInfo};
|
|
||||||
use crate::services::website_domains::{is_valid_domain, normalize_domain};
|
|
||||||
use crate::state::AppState;
|
|
||||||
|
|
||||||
fn json_response(status: StatusCode, value: serde_json::Value) -> Response {
|
|
||||||
(
|
|
||||||
status,
|
|
||||||
[("content-type", "application/json")],
|
|
||||||
value.to_string(),
|
|
||||||
)
|
|
||||||
.into_response()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn json_error(code: &str, message: &str, status: StatusCode) -> Response {
|
|
||||||
json_response(
|
|
||||||
status,
|
|
||||||
serde_json::json!({"error": {"code": code, "message": message}}),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn require_admin(principal: &Principal) -> Option<Response> {
|
|
||||||
if !principal.is_admin {
|
|
||||||
return Some(json_error("AccessDenied", "Admin access required", StatusCode::FORBIDDEN));
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn read_json_body(body: Body) -> Option<serde_json::Value> {
|
|
||||||
let bytes = http_body_util::BodyExt::collect(body).await.ok()?.to_bytes();
|
|
||||||
serde_json::from_slice(&bytes).ok()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn validate_site_id(site_id: &str) -> Option<String> {
|
|
||||||
if site_id.is_empty() || site_id.len() > 63 {
|
|
||||||
return Some("site_id must be 1-63 characters".to_string());
|
|
||||||
}
|
|
||||||
let first = site_id.chars().next().unwrap();
|
|
||||||
if !first.is_ascii_alphanumeric() {
|
|
||||||
return Some("site_id must start with alphanumeric".to_string());
|
|
||||||
}
|
|
||||||
if !site_id.chars().all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_') {
|
|
||||||
return Some("site_id must contain only alphanumeric, hyphens, underscores".to_string());
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn validate_endpoint(endpoint: &str) -> Option<String> {
|
|
||||||
if !endpoint.starts_with("http://") && !endpoint.starts_with("https://") {
|
|
||||||
return Some("Endpoint must be http or https URL".to_string());
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn validate_region(region: &str) -> Option<String> {
|
|
||||||
let re = regex::Regex::new(r"^[a-z]{2,}-[a-z]+-\d+$").unwrap();
|
|
||||||
if !re.is_match(region) {
|
|
||||||
return Some("Region must match format like us-east-1".to_string());
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn validate_priority(priority: i64) -> Option<String> {
|
|
||||||
if priority < 0 || priority > 1000 {
|
|
||||||
return Some("Priority must be between 0 and 1000".to_string());
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_local_site(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
|
|
||||||
if let Some(ref registry) = state.site_registry {
|
|
||||||
if let Some(local) = registry.get_local_site() {
|
|
||||||
return json_response(StatusCode::OK, serde_json::to_value(&local).unwrap());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
json_error("NotFound", "Local site not configured", StatusCode::NOT_FOUND)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn update_local_site(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
body: Body,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let registry = match &state.site_registry {
|
|
||||||
Some(r) => r,
|
|
||||||
None => return json_error("InvalidRequest", "Site registry not available", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let payload = match read_json_body(body).await {
|
|
||||||
Some(v) => v,
|
|
||||||
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let site_id = match payload.get("site_id").and_then(|v| v.as_str()) {
|
|
||||||
Some(s) => s.to_string(),
|
|
||||||
None => return json_error("ValidationError", "site_id is required", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(err) = validate_site_id(&site_id) {
|
|
||||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
|
|
||||||
let endpoint = payload.get("endpoint").and_then(|v| v.as_str()).unwrap_or("").to_string();
|
|
||||||
if !endpoint.is_empty() {
|
|
||||||
if let Some(err) = validate_endpoint(&endpoint) {
|
|
||||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(p) = payload.get("priority").and_then(|v| v.as_i64()) {
|
|
||||||
if let Some(err) = validate_priority(p) {
|
|
||||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(r) = payload.get("region").and_then(|v| v.as_str()) {
|
|
||||||
if let Some(err) = validate_region(r) {
|
|
||||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let existing = registry.get_local_site();
|
|
||||||
let site = SiteInfo {
|
|
||||||
site_id: site_id.clone(),
|
|
||||||
endpoint,
|
|
||||||
region: payload.get("region").and_then(|v| v.as_str()).unwrap_or("us-east-1").to_string(),
|
|
||||||
priority: payload.get("priority").and_then(|v| v.as_i64()).unwrap_or(100) as i32,
|
|
||||||
display_name: payload.get("display_name").and_then(|v| v.as_str()).unwrap_or(&site_id).to_string(),
|
|
||||||
created_at: existing.and_then(|e| e.created_at),
|
|
||||||
};
|
|
||||||
|
|
||||||
registry.set_local_site(site.clone());
|
|
||||||
json_response(StatusCode::OK, serde_json::to_value(&site).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn list_all_sites(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let registry = match &state.site_registry {
|
|
||||||
Some(r) => r,
|
|
||||||
None => return json_response(StatusCode::OK, serde_json::json!({"local": null, "peers": [], "total_peers": 0})),
|
|
||||||
};
|
|
||||||
|
|
||||||
let local = registry.get_local_site();
|
|
||||||
let peers = registry.list_peers();
|
|
||||||
|
|
||||||
json_response(StatusCode::OK, serde_json::json!({
|
|
||||||
"local": local,
|
|
||||||
"peers": peers,
|
|
||||||
"total_peers": peers.len(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn register_peer_site(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
body: Body,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let registry = match &state.site_registry {
|
|
||||||
Some(r) => r,
|
|
||||||
None => return json_error("InvalidRequest", "Site registry not available", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let payload = match read_json_body(body).await {
|
|
||||||
Some(v) => v,
|
|
||||||
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let site_id = match payload.get("site_id").and_then(|v| v.as_str()) {
|
|
||||||
Some(s) => s.to_string(),
|
|
||||||
None => return json_error("ValidationError", "site_id is required", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
if let Some(err) = validate_site_id(&site_id) {
|
|
||||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
|
|
||||||
let endpoint = match payload.get("endpoint").and_then(|v| v.as_str()) {
|
|
||||||
Some(e) => e.to_string(),
|
|
||||||
None => return json_error("ValidationError", "endpoint is required", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
if let Some(err) = validate_endpoint(&endpoint) {
|
|
||||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
|
|
||||||
let region = payload.get("region").and_then(|v| v.as_str()).unwrap_or("us-east-1").to_string();
|
|
||||||
if let Some(err) = validate_region(®ion) {
|
|
||||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
|
|
||||||
let priority = payload.get("priority").and_then(|v| v.as_i64()).unwrap_or(100);
|
|
||||||
if let Some(err) = validate_priority(priority) {
|
|
||||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
|
|
||||||
if registry.get_peer(&site_id).is_some() {
|
|
||||||
return json_error("AlreadyExists", &format!("Peer site '{}' already exists", site_id), StatusCode::CONFLICT);
|
|
||||||
}
|
|
||||||
|
|
||||||
let peer = PeerSite {
|
|
||||||
site_id: site_id.clone(),
|
|
||||||
endpoint,
|
|
||||||
region,
|
|
||||||
priority: priority as i32,
|
|
||||||
display_name: payload.get("display_name").and_then(|v| v.as_str()).unwrap_or(&site_id).to_string(),
|
|
||||||
connection_id: payload.get("connection_id").and_then(|v| v.as_str()).map(|s| s.to_string()),
|
|
||||||
created_at: Some(chrono::Utc::now().to_rfc3339()),
|
|
||||||
is_healthy: false,
|
|
||||||
last_health_check: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
registry.add_peer(peer.clone());
|
|
||||||
json_response(StatusCode::CREATED, serde_json::to_value(&peer).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_peer_site(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(site_id): Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let registry = match &state.site_registry {
|
|
||||||
Some(r) => r,
|
|
||||||
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
|
|
||||||
};
|
|
||||||
|
|
||||||
match registry.get_peer(&site_id) {
|
|
||||||
Some(peer) => json_response(StatusCode::OK, serde_json::to_value(&peer).unwrap()),
|
|
||||||
None => json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn update_peer_site(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(site_id): Path<String>,
|
|
||||||
body: Body,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let registry = match &state.site_registry {
|
|
||||||
Some(r) => r,
|
|
||||||
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
|
|
||||||
};
|
|
||||||
|
|
||||||
let existing = match registry.get_peer(&site_id) {
|
|
||||||
Some(p) => p,
|
|
||||||
None => return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND),
|
|
||||||
};
|
|
||||||
|
|
||||||
let payload = match read_json_body(body).await {
|
|
||||||
Some(v) => v,
|
|
||||||
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(ep) = payload.get("endpoint").and_then(|v| v.as_str()) {
|
|
||||||
if let Some(err) = validate_endpoint(ep) {
|
|
||||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(p) = payload.get("priority").and_then(|v| v.as_i64()) {
|
|
||||||
if let Some(err) = validate_priority(p) {
|
|
||||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(r) = payload.get("region").and_then(|v| v.as_str()) {
|
|
||||||
if let Some(err) = validate_region(r) {
|
|
||||||
return json_error("ValidationError", &err, StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let peer = PeerSite {
|
|
||||||
site_id: site_id.clone(),
|
|
||||||
endpoint: payload.get("endpoint").and_then(|v| v.as_str()).unwrap_or(&existing.endpoint).to_string(),
|
|
||||||
region: payload.get("region").and_then(|v| v.as_str()).unwrap_or(&existing.region).to_string(),
|
|
||||||
priority: payload.get("priority").and_then(|v| v.as_i64()).unwrap_or(existing.priority as i64) as i32,
|
|
||||||
display_name: payload.get("display_name").and_then(|v| v.as_str()).unwrap_or(&existing.display_name).to_string(),
|
|
||||||
connection_id: payload.get("connection_id").and_then(|v| v.as_str()).map(|s| s.to_string()).or(existing.connection_id),
|
|
||||||
created_at: existing.created_at,
|
|
||||||
is_healthy: existing.is_healthy,
|
|
||||||
last_health_check: existing.last_health_check,
|
|
||||||
};
|
|
||||||
|
|
||||||
registry.update_peer(peer.clone());
|
|
||||||
json_response(StatusCode::OK, serde_json::to_value(&peer).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn delete_peer_site(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(site_id): Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let registry = match &state.site_registry {
|
|
||||||
Some(r) => r,
|
|
||||||
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
|
|
||||||
};
|
|
||||||
|
|
||||||
if !registry.delete_peer(&site_id) {
|
|
||||||
return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND);
|
|
||||||
}
|
|
||||||
StatusCode::NO_CONTENT.into_response()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn check_peer_health(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(site_id): Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let registry = match &state.site_registry {
|
|
||||||
Some(r) => r,
|
|
||||||
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
|
|
||||||
};
|
|
||||||
|
|
||||||
if registry.get_peer(&site_id).is_none() {
|
|
||||||
return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND);
|
|
||||||
}
|
|
||||||
|
|
||||||
json_response(StatusCode::OK, serde_json::json!({
|
|
||||||
"site_id": site_id,
|
|
||||||
"is_healthy": false,
|
|
||||||
"error": "Health check not implemented in standalone mode",
|
|
||||||
"checked_at": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_topology(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let registry = match &state.site_registry {
|
|
||||||
Some(r) => r,
|
|
||||||
None => return json_response(StatusCode::OK, serde_json::json!({"sites": [], "total": 0, "healthy_count": 0})),
|
|
||||||
};
|
|
||||||
|
|
||||||
let local = registry.get_local_site();
|
|
||||||
let peers = registry.list_peers();
|
|
||||||
|
|
||||||
let mut sites: Vec<serde_json::Value> = Vec::new();
|
|
||||||
if let Some(l) = local {
|
|
||||||
let mut v = serde_json::to_value(&l).unwrap();
|
|
||||||
v.as_object_mut().unwrap().insert("is_local".to_string(), serde_json::json!(true));
|
|
||||||
v.as_object_mut().unwrap().insert("is_healthy".to_string(), serde_json::json!(true));
|
|
||||||
sites.push(v);
|
|
||||||
}
|
|
||||||
for p in &peers {
|
|
||||||
let mut v = serde_json::to_value(p).unwrap();
|
|
||||||
v.as_object_mut().unwrap().insert("is_local".to_string(), serde_json::json!(false));
|
|
||||||
sites.push(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
sites.sort_by_key(|s| s.get("priority").and_then(|v| v.as_i64()).unwrap_or(100));
|
|
||||||
|
|
||||||
let healthy_count = sites.iter().filter(|s| s.get("is_healthy").and_then(|v| v.as_bool()).unwrap_or(false)).count();
|
|
||||||
|
|
||||||
json_response(StatusCode::OK, serde_json::json!({
|
|
||||||
"sites": sites,
|
|
||||||
"total": sites.len(),
|
|
||||||
"healthy_count": healthy_count,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn check_bidirectional_status(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(site_id): Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let registry = match &state.site_registry {
|
|
||||||
Some(r) => r,
|
|
||||||
None => return json_error("NotFound", "Site registry not available", StatusCode::NOT_FOUND),
|
|
||||||
};
|
|
||||||
|
|
||||||
if registry.get_peer(&site_id).is_none() {
|
|
||||||
return json_error("NotFound", &format!("Peer site '{}' not found", site_id), StatusCode::NOT_FOUND);
|
|
||||||
}
|
|
||||||
|
|
||||||
let local = registry.get_local_site();
|
|
||||||
json_response(StatusCode::OK, serde_json::json!({
|
|
||||||
"site_id": site_id,
|
|
||||||
"local_site_id": local.as_ref().map(|l| &l.site_id),
|
|
||||||
"local_endpoint": local.as_ref().map(|l| &l.endpoint),
|
|
||||||
"local_bidirectional_rules": [],
|
|
||||||
"local_site_sync_enabled": false,
|
|
||||||
"remote_status": null,
|
|
||||||
"issues": [{"code": "NOT_IMPLEMENTED", "message": "Bidirectional status check not implemented in standalone mode", "severity": "warning"}],
|
|
||||||
"is_fully_configured": false,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn iam_list_users(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let users = state.iam.list_users().await;
|
|
||||||
json_response(StatusCode::OK, serde_json::json!({"users": users}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn iam_get_user(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(identifier): Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
match state.iam.get_user(&identifier).await {
|
|
||||||
Some(user) => json_response(StatusCode::OK, user),
|
|
||||||
None => json_error("NotFound", &format!("User '{}' not found", identifier), StatusCode::NOT_FOUND),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn iam_get_user_policies(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(identifier): Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
match state.iam.get_user_policies(&identifier) {
|
|
||||||
Some(policies) => json_response(StatusCode::OK, serde_json::json!({"policies": policies})),
|
|
||||||
None => json_error("NotFound", &format!("User '{}' not found", identifier), StatusCode::NOT_FOUND),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn iam_create_access_key(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(identifier): Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
match state.iam.create_access_key(&identifier) {
|
|
||||||
Ok(result) => json_response(StatusCode::CREATED, result),
|
|
||||||
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn iam_delete_access_key(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path((_identifier, access_key)): Path<(String, String)>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
match state.iam.delete_access_key(&access_key) {
|
|
||||||
Ok(()) => StatusCode::NO_CONTENT.into_response(),
|
|
||||||
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn iam_disable_user(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(identifier): Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
match state.iam.set_user_enabled(&identifier, false).await {
|
|
||||||
Ok(()) => json_response(StatusCode::OK, serde_json::json!({"status": "disabled"})),
|
|
||||||
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn iam_enable_user(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(identifier): Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
match state.iam.set_user_enabled(&identifier, true).await {
|
|
||||||
Ok(()) => json_response(StatusCode::OK, serde_json::json!({"status": "enabled"})),
|
|
||||||
Err(e) => json_error("InvalidRequest", &e, StatusCode::BAD_REQUEST),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn list_website_domains(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let store = match &state.website_domains {
|
|
||||||
Some(s) => s,
|
|
||||||
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
json_response(StatusCode::OK, serde_json::json!(store.list_all()))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn create_website_domain(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
body: Body,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let store = match &state.website_domains {
|
|
||||||
Some(s) => s,
|
|
||||||
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let payload = match read_json_body(body).await {
|
|
||||||
Some(v) => v,
|
|
||||||
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let domain = normalize_domain(payload.get("domain").and_then(|v| v.as_str()).unwrap_or(""));
|
|
||||||
if domain.is_empty() {
|
|
||||||
return json_error("ValidationError", "domain is required", StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
if !is_valid_domain(&domain) {
|
|
||||||
return json_error("ValidationError", &format!("Invalid domain: '{}'", domain), StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
|
|
||||||
let bucket = payload.get("bucket").and_then(|v| v.as_str()).unwrap_or("").trim().to_string();
|
|
||||||
if bucket.is_empty() {
|
|
||||||
return json_error("ValidationError", "bucket is required", StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
|
|
||||||
match state.storage.bucket_exists(&bucket).await {
|
|
||||||
Ok(true) => {}
|
|
||||||
_ => return json_error("NoSuchBucket", &format!("Bucket '{}' does not exist", bucket), StatusCode::NOT_FOUND),
|
|
||||||
}
|
|
||||||
|
|
||||||
if store.get_bucket(&domain).is_some() {
|
|
||||||
return json_error("Conflict", &format!("Domain '{}' is already mapped", domain), StatusCode::CONFLICT);
|
|
||||||
}
|
|
||||||
|
|
||||||
store.set_mapping(&domain, &bucket);
|
|
||||||
json_response(StatusCode::CREATED, serde_json::json!({"domain": domain, "bucket": bucket}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_website_domain(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(domain): Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let store = match &state.website_domains {
|
|
||||||
Some(s) => s,
|
|
||||||
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let domain = normalize_domain(&domain);
|
|
||||||
match store.get_bucket(&domain) {
|
|
||||||
Some(bucket) => json_response(StatusCode::OK, serde_json::json!({"domain": domain, "bucket": bucket})),
|
|
||||||
None => json_error("NotFound", &format!("No mapping found for domain '{}'", domain), StatusCode::NOT_FOUND),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn update_website_domain(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(domain): Path<String>,
|
|
||||||
body: Body,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let store = match &state.website_domains {
|
|
||||||
Some(s) => s,
|
|
||||||
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let domain = normalize_domain(&domain);
|
|
||||||
let payload = match read_json_body(body).await {
|
|
||||||
Some(v) => v,
|
|
||||||
None => return json_error("MalformedJSON", "Invalid JSON body", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let bucket = payload.get("bucket").and_then(|v| v.as_str()).unwrap_or("").trim().to_string();
|
|
||||||
if bucket.is_empty() {
|
|
||||||
return json_error("ValidationError", "bucket is required", StatusCode::BAD_REQUEST);
|
|
||||||
}
|
|
||||||
|
|
||||||
match state.storage.bucket_exists(&bucket).await {
|
|
||||||
Ok(true) => {}
|
|
||||||
_ => return json_error("NoSuchBucket", &format!("Bucket '{}' does not exist", bucket), StatusCode::NOT_FOUND),
|
|
||||||
}
|
|
||||||
|
|
||||||
if store.get_bucket(&domain).is_none() {
|
|
||||||
return json_error("NotFound", &format!("No mapping found for domain '{}'", domain), StatusCode::NOT_FOUND);
|
|
||||||
}
|
|
||||||
|
|
||||||
store.set_mapping(&domain, &bucket);
|
|
||||||
json_response(StatusCode::OK, serde_json::json!({"domain": domain, "bucket": bucket}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn delete_website_domain(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
Path(domain): Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let store = match &state.website_domains {
|
|
||||||
Some(s) => s,
|
|
||||||
None => return json_error("InvalidRequest", "Website hosting is not enabled", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let domain = normalize_domain(&domain);
|
|
||||||
if !store.delete_mapping(&domain) {
|
|
||||||
return json_error("NotFound", &format!("No mapping found for domain '{}'", domain), StatusCode::NOT_FOUND);
|
|
||||||
}
|
|
||||||
StatusCode::NO_CONTENT.into_response()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(serde::Deserialize, Default)]
|
|
||||||
pub struct PaginationQuery {
|
|
||||||
pub limit: Option<usize>,
|
|
||||||
pub offset: Option<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn gc_status(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
match &state.gc {
|
|
||||||
Some(gc) => json_response(StatusCode::OK, gc.status().await),
|
|
||||||
None => json_response(StatusCode::OK, serde_json::json!({"enabled": false, "message": "GC is not enabled. Set GC_ENABLED=true to enable."})),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn gc_run(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
body: Body,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let gc = match &state.gc {
|
|
||||||
Some(gc) => gc,
|
|
||||||
None => return json_error("InvalidRequest", "GC is not enabled", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let payload = read_json_body(body).await.unwrap_or(serde_json::json!({}));
|
|
||||||
let dry_run = payload.get("dry_run").and_then(|v| v.as_bool()).unwrap_or(false);
|
|
||||||
|
|
||||||
match gc.run_now(dry_run).await {
|
|
||||||
Ok(result) => json_response(StatusCode::OK, result),
|
|
||||||
Err(e) => json_error("Conflict", &e, StatusCode::CONFLICT),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn gc_history(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
match &state.gc {
|
|
||||||
Some(gc) => json_response(StatusCode::OK, serde_json::json!({"executions": gc.history().await})),
|
|
||||||
None => json_response(StatusCode::OK, serde_json::json!({"executions": []})),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn integrity_status(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
match &state.integrity {
|
|
||||||
Some(checker) => json_response(StatusCode::OK, checker.status().await),
|
|
||||||
None => json_response(StatusCode::OK, serde_json::json!({"enabled": false, "message": "Integrity checker is not enabled. Set INTEGRITY_ENABLED=true to enable."})),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn integrity_run(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
body: Body,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
let checker = match &state.integrity {
|
|
||||||
Some(c) => c,
|
|
||||||
None => return json_error("InvalidRequest", "Integrity checker is not enabled", StatusCode::BAD_REQUEST),
|
|
||||||
};
|
|
||||||
|
|
||||||
let payload = read_json_body(body).await.unwrap_or(serde_json::json!({}));
|
|
||||||
let dry_run = payload.get("dry_run").and_then(|v| v.as_bool()).unwrap_or(false);
|
|
||||||
let auto_heal = payload.get("auto_heal").and_then(|v| v.as_bool()).unwrap_or(false);
|
|
||||||
|
|
||||||
match checker.run_now(dry_run, auto_heal).await {
|
|
||||||
Ok(result) => json_response(StatusCode::OK, result),
|
|
||||||
Err(e) => json_error("Conflict", &e, StatusCode::CONFLICT),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn integrity_history(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(principal): Extension<Principal>,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(err) = require_admin(&principal) { return err; }
|
|
||||||
match &state.integrity {
|
|
||||||
Some(checker) => json_response(StatusCode::OK, serde_json::json!({"executions": checker.history().await})),
|
|
||||||
None => json_response(StatusCode::OK, serde_json::json!({"executions": []})),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,278 +0,0 @@
|
|||||||
use axum::body::Body;
|
|
||||||
use axum::extract::State;
|
|
||||||
use axum::http::StatusCode;
|
|
||||||
use axum::response::{IntoResponse, Response};
|
|
||||||
use base64::engine::general_purpose::STANDARD as B64;
|
|
||||||
use base64::Engine;
|
|
||||||
use serde_json::json;
|
|
||||||
|
|
||||||
use crate::state::AppState;
|
|
||||||
|
|
||||||
fn json_ok(value: serde_json::Value) -> Response {
|
|
||||||
(
|
|
||||||
StatusCode::OK,
|
|
||||||
[("content-type", "application/json")],
|
|
||||||
value.to_string(),
|
|
||||||
)
|
|
||||||
.into_response()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn json_err(status: StatusCode, msg: &str) -> Response {
|
|
||||||
(
|
|
||||||
status,
|
|
||||||
[("content-type", "application/json")],
|
|
||||||
json!({"error": msg}).to_string(),
|
|
||||||
)
|
|
||||||
.into_response()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn list_keys(State(state): State<AppState>) -> Response {
|
|
||||||
let kms = match &state.kms {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let keys = kms.list_keys().await;
|
|
||||||
let keys_json: Vec<serde_json::Value> = keys
|
|
||||||
.iter()
|
|
||||||
.map(|k| {
|
|
||||||
json!({
|
|
||||||
"KeyId": k.key_id,
|
|
||||||
"Arn": k.arn,
|
|
||||||
"Description": k.description,
|
|
||||||
"CreationDate": k.creation_date.to_rfc3339(),
|
|
||||||
"Enabled": k.enabled,
|
|
||||||
"KeyState": k.key_state,
|
|
||||||
"KeyUsage": k.key_usage,
|
|
||||||
"KeySpec": k.key_spec,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
json_ok(json!({"keys": keys_json}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn create_key(State(state): State<AppState>, body: Body) -> Response {
|
|
||||||
let kms = match &state.kms {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
|
||||||
Ok(c) => c.to_bytes(),
|
|
||||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let description = if body_bytes.is_empty() {
|
|
||||||
String::new()
|
|
||||||
} else {
|
|
||||||
match serde_json::from_slice::<serde_json::Value>(&body_bytes) {
|
|
||||||
Ok(v) => v
|
|
||||||
.get("Description")
|
|
||||||
.or_else(|| v.get("description"))
|
|
||||||
.and_then(|d| d.as_str())
|
|
||||||
.unwrap_or("")
|
|
||||||
.to_string(),
|
|
||||||
Err(_) => String::new(),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
match kms.create_key(&description).await {
|
|
||||||
Ok(key) => json_ok(json!({
|
|
||||||
"KeyId": key.key_id,
|
|
||||||
"Arn": key.arn,
|
|
||||||
"Description": key.description,
|
|
||||||
"CreationDate": key.creation_date.to_rfc3339(),
|
|
||||||
"Enabled": key.enabled,
|
|
||||||
"KeyState": key.key_state,
|
|
||||||
})),
|
|
||||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_key(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
let kms = match &state.kms {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
|
||||||
};
|
|
||||||
|
|
||||||
match kms.get_key(&key_id).await {
|
|
||||||
Some(key) => json_ok(json!({
|
|
||||||
"KeyId": key.key_id,
|
|
||||||
"Arn": key.arn,
|
|
||||||
"Description": key.description,
|
|
||||||
"CreationDate": key.creation_date.to_rfc3339(),
|
|
||||||
"Enabled": key.enabled,
|
|
||||||
"KeyState": key.key_state,
|
|
||||||
"KeyUsage": key.key_usage,
|
|
||||||
"KeySpec": key.key_spec,
|
|
||||||
})),
|
|
||||||
None => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn delete_key(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
let kms = match &state.kms {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
|
||||||
};
|
|
||||||
|
|
||||||
match kms.delete_key(&key_id).await {
|
|
||||||
Ok(true) => StatusCode::NO_CONTENT.into_response(),
|
|
||||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
|
||||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn enable_key(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
let kms = match &state.kms {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
|
||||||
};
|
|
||||||
|
|
||||||
match kms.enable_key(&key_id).await {
|
|
||||||
Ok(true) => json_ok(json!({"status": "enabled"})),
|
|
||||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
|
||||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn disable_key(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
|
||||||
) -> Response {
|
|
||||||
let kms = match &state.kms {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
|
||||||
};
|
|
||||||
|
|
||||||
match kms.disable_key(&key_id).await {
|
|
||||||
Ok(true) => json_ok(json!({"status": "disabled"})),
|
|
||||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
|
||||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn encrypt(State(state): State<AppState>, body: Body) -> Response {
|
|
||||||
let kms = match &state.kms {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
|
||||||
Ok(c) => c.to_bytes(),
|
|
||||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
|
|
||||||
};
|
|
||||||
let plaintext_b64 = match req.get("Plaintext").and_then(|v| v.as_str()) {
|
|
||||||
Some(p) => p,
|
|
||||||
None => return json_err(StatusCode::BAD_REQUEST, "Missing Plaintext"),
|
|
||||||
};
|
|
||||||
let plaintext = match B64.decode(plaintext_b64) {
|
|
||||||
Ok(p) => p,
|
|
||||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid base64 Plaintext"),
|
|
||||||
};
|
|
||||||
|
|
||||||
match kms.encrypt_data(key_id, &plaintext).await {
|
|
||||||
Ok(ct) => json_ok(json!({
|
|
||||||
"KeyId": key_id,
|
|
||||||
"CiphertextBlob": B64.encode(&ct),
|
|
||||||
})),
|
|
||||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn decrypt(State(state): State<AppState>, body: Body) -> Response {
|
|
||||||
let kms = match &state.kms {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
|
||||||
Ok(c) => c.to_bytes(),
|
|
||||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
|
|
||||||
};
|
|
||||||
let ct_b64 = match req.get("CiphertextBlob").and_then(|v| v.as_str()) {
|
|
||||||
Some(c) => c,
|
|
||||||
None => return json_err(StatusCode::BAD_REQUEST, "Missing CiphertextBlob"),
|
|
||||||
};
|
|
||||||
let ciphertext = match B64.decode(ct_b64) {
|
|
||||||
Ok(c) => c,
|
|
||||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid base64"),
|
|
||||||
};
|
|
||||||
|
|
||||||
match kms.decrypt_data(key_id, &ciphertext).await {
|
|
||||||
Ok(pt) => json_ok(json!({
|
|
||||||
"KeyId": key_id,
|
|
||||||
"Plaintext": B64.encode(&pt),
|
|
||||||
})),
|
|
||||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn generate_data_key(State(state): State<AppState>, body: Body) -> Response {
|
|
||||||
let kms = match &state.kms {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let body_bytes = match http_body_util::BodyExt::collect(body).await {
|
|
||||||
Ok(c) => c.to_bytes(),
|
|
||||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
|
|
||||||
};
|
|
||||||
let num_bytes = req
|
|
||||||
.get("NumberOfBytes")
|
|
||||||
.and_then(|v| v.as_u64())
|
|
||||||
.unwrap_or(32) as usize;
|
|
||||||
|
|
||||||
if num_bytes < 1 || num_bytes > 1024 {
|
|
||||||
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
|
|
||||||
}
|
|
||||||
|
|
||||||
match kms.generate_data_key(key_id, num_bytes).await {
|
|
||||||
Ok((plaintext, wrapped)) => json_ok(json!({
|
|
||||||
"KeyId": key_id,
|
|
||||||
"Plaintext": B64.encode(&plaintext),
|
|
||||||
"CiphertextBlob": B64.encode(&wrapped),
|
|
||||||
})),
|
|
||||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,552 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use axum::body::Body;
|
|
||||||
use axum::http::{HeaderMap, HeaderName, StatusCode};
|
|
||||||
use axum::response::{IntoResponse, Response};
|
|
||||||
use base64::Engine;
|
|
||||||
use bytes::Bytes;
|
|
||||||
use crc32fast::Hasher;
|
|
||||||
use duckdb::types::ValueRef;
|
|
||||||
use duckdb::Connection;
|
|
||||||
use futures::stream;
|
|
||||||
use http_body_util::BodyExt;
|
|
||||||
use myfsio_common::error::{S3Error, S3ErrorCode};
|
|
||||||
use myfsio_storage::traits::StorageEngine;
|
|
||||||
|
|
||||||
use crate::state::AppState;
|
|
||||||
|
|
||||||
#[cfg(target_os = "windows")]
|
|
||||||
#[link(name = "Rstrtmgr")]
|
|
||||||
extern "system" {}
|
|
||||||
|
|
||||||
const CHUNK_SIZE: usize = 65_536;
|
|
||||||
|
|
||||||
pub async fn post_select_object_content(
|
|
||||||
state: &AppState,
|
|
||||||
bucket: &str,
|
|
||||||
key: &str,
|
|
||||||
headers: &HeaderMap,
|
|
||||||
body: Body,
|
|
||||||
) -> Response {
|
|
||||||
if let Some(resp) = require_xml_content_type(headers) {
|
|
||||||
return resp;
|
|
||||||
}
|
|
||||||
|
|
||||||
let body_bytes = match body.collect().await {
|
|
||||||
Ok(collected) => collected.to_bytes(),
|
|
||||||
Err(_) => {
|
|
||||||
return s3_error_response(S3Error::new(
|
|
||||||
S3ErrorCode::MalformedXML,
|
|
||||||
"Unable to parse XML document",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let request = match parse_select_request(&body_bytes) {
|
|
||||||
Ok(r) => r,
|
|
||||||
Err(err) => return s3_error_response(err),
|
|
||||||
};
|
|
||||||
|
|
||||||
let object_path = match state.storage.get_object_path(bucket, key).await {
|
|
||||||
Ok(path) => path,
|
|
||||||
Err(_) => {
|
|
||||||
return s3_error_response(S3Error::new(
|
|
||||||
S3ErrorCode::NoSuchKey,
|
|
||||||
"Object not found",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let join_res = tokio::task::spawn_blocking(move || execute_select_query(object_path, request)).await;
|
|
||||||
let chunks = match join_res {
|
|
||||||
Ok(Ok(chunks)) => chunks,
|
|
||||||
Ok(Err(message)) => {
|
|
||||||
return s3_error_response(S3Error::new(S3ErrorCode::InvalidRequest, message));
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
return s3_error_response(S3Error::new(
|
|
||||||
S3ErrorCode::InternalError,
|
|
||||||
"SelectObjectContent execution failed",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let bytes_returned: usize = chunks.iter().map(|c| c.len()).sum();
|
|
||||||
let mut events: Vec<Bytes> = Vec::with_capacity(chunks.len() + 2);
|
|
||||||
for chunk in chunks {
|
|
||||||
events.push(Bytes::from(encode_select_event("Records", &chunk)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let stats_payload = build_stats_xml(0, bytes_returned);
|
|
||||||
events.push(Bytes::from(encode_select_event("Stats", stats_payload.as_bytes())));
|
|
||||||
events.push(Bytes::from(encode_select_event("End", b"")));
|
|
||||||
|
|
||||||
let stream = stream::iter(events.into_iter().map(Ok::<Bytes, std::io::Error>));
|
|
||||||
let body = Body::from_stream(stream);
|
|
||||||
|
|
||||||
let mut response = (StatusCode::OK, body).into_response();
|
|
||||||
response.headers_mut().insert(
|
|
||||||
HeaderName::from_static("content-type"),
|
|
||||||
"application/octet-stream".parse().unwrap(),
|
|
||||||
);
|
|
||||||
response.headers_mut().insert(
|
|
||||||
HeaderName::from_static("x-amz-request-charged"),
|
|
||||||
"requester".parse().unwrap(),
|
|
||||||
);
|
|
||||||
response
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct SelectRequest {
|
|
||||||
expression: String,
|
|
||||||
input_format: InputFormat,
|
|
||||||
output_format: OutputFormat,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
enum InputFormat {
|
|
||||||
Csv(CsvInputConfig),
|
|
||||||
Json(JsonInputConfig),
|
|
||||||
Parquet,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct CsvInputConfig {
|
|
||||||
file_header_info: String,
|
|
||||||
field_delimiter: String,
|
|
||||||
quote_character: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct JsonInputConfig {
|
|
||||||
json_type: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
enum OutputFormat {
|
|
||||||
Csv(CsvOutputConfig),
|
|
||||||
Json(JsonOutputConfig),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct CsvOutputConfig {
|
|
||||||
field_delimiter: String,
|
|
||||||
record_delimiter: String,
|
|
||||||
quote_character: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct JsonOutputConfig {
|
|
||||||
record_delimiter: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_select_request(payload: &[u8]) -> Result<SelectRequest, S3Error> {
|
|
||||||
let xml = String::from_utf8_lossy(payload);
|
|
||||||
let doc = roxmltree::Document::parse(&xml)
|
|
||||||
.map_err(|_| S3Error::new(S3ErrorCode::MalformedXML, "Unable to parse XML document"))?;
|
|
||||||
|
|
||||||
let root = doc.root_element();
|
|
||||||
if root.tag_name().name() != "SelectObjectContentRequest" {
|
|
||||||
return Err(S3Error::new(
|
|
||||||
S3ErrorCode::MalformedXML,
|
|
||||||
"Root element must be SelectObjectContentRequest",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let expression = child_text(&root, "Expression")
|
|
||||||
.filter(|v| !v.is_empty())
|
|
||||||
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "Expression is required"))?;
|
|
||||||
|
|
||||||
let expression_type = child_text(&root, "ExpressionType").unwrap_or_else(|| "SQL".to_string());
|
|
||||||
if !expression_type.eq_ignore_ascii_case("SQL") {
|
|
||||||
return Err(S3Error::new(
|
|
||||||
S3ErrorCode::InvalidRequest,
|
|
||||||
"Only SQL expression type is supported",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let input_node = child(&root, "InputSerialization")
|
|
||||||
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "InputSerialization is required"))?;
|
|
||||||
let output_node = child(&root, "OutputSerialization")
|
|
||||||
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "OutputSerialization is required"))?;
|
|
||||||
|
|
||||||
let input_format = parse_input_format(&input_node)?;
|
|
||||||
let output_format = parse_output_format(&output_node)?;
|
|
||||||
|
|
||||||
Ok(SelectRequest {
|
|
||||||
expression,
|
|
||||||
input_format,
|
|
||||||
output_format,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_input_format(node: &roxmltree::Node<'_, '_>) -> Result<InputFormat, S3Error> {
|
|
||||||
if let Some(csv_node) = child(node, "CSV") {
|
|
||||||
return Ok(InputFormat::Csv(CsvInputConfig {
|
|
||||||
file_header_info: child_text(&csv_node, "FileHeaderInfo")
|
|
||||||
.unwrap_or_else(|| "NONE".to_string())
|
|
||||||
.to_ascii_uppercase(),
|
|
||||||
field_delimiter: child_text(&csv_node, "FieldDelimiter").unwrap_or_else(|| ",".to_string()),
|
|
||||||
quote_character: child_text(&csv_node, "QuoteCharacter").unwrap_or_else(|| "\"".to_string()),
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(json_node) = child(node, "JSON") {
|
|
||||||
return Ok(InputFormat::Json(JsonInputConfig {
|
|
||||||
json_type: child_text(&json_node, "Type")
|
|
||||||
.unwrap_or_else(|| "DOCUMENT".to_string())
|
|
||||||
.to_ascii_uppercase(),
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
if child(node, "Parquet").is_some() {
|
|
||||||
return Ok(InputFormat::Parquet);
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(S3Error::new(
|
|
||||||
S3ErrorCode::InvalidRequest,
|
|
||||||
"InputSerialization must specify CSV, JSON, or Parquet",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_output_format(node: &roxmltree::Node<'_, '_>) -> Result<OutputFormat, S3Error> {
|
|
||||||
if let Some(csv_node) = child(node, "CSV") {
|
|
||||||
return Ok(OutputFormat::Csv(CsvOutputConfig {
|
|
||||||
field_delimiter: child_text(&csv_node, "FieldDelimiter").unwrap_or_else(|| ",".to_string()),
|
|
||||||
record_delimiter: child_text(&csv_node, "RecordDelimiter").unwrap_or_else(|| "\n".to_string()),
|
|
||||||
quote_character: child_text(&csv_node, "QuoteCharacter").unwrap_or_else(|| "\"".to_string()),
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(json_node) = child(node, "JSON") {
|
|
||||||
return Ok(OutputFormat::Json(JsonOutputConfig {
|
|
||||||
record_delimiter: child_text(&json_node, "RecordDelimiter").unwrap_or_else(|| "\n".to_string()),
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(S3Error::new(
|
|
||||||
S3ErrorCode::InvalidRequest,
|
|
||||||
"OutputSerialization must specify CSV or JSON",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn child<'a, 'input>(node: &'a roxmltree::Node<'a, 'input>, name: &str) -> Option<roxmltree::Node<'a, 'input>> {
|
|
||||||
node.children()
|
|
||||||
.find(|n| n.is_element() && n.tag_name().name() == name)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
|
|
||||||
child(node, name)
|
|
||||||
.and_then(|n| n.text())
|
|
||||||
.map(|s| s.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn execute_select_query(path: PathBuf, request: SelectRequest) -> Result<Vec<Vec<u8>>, String> {
|
|
||||||
let conn = Connection::open_in_memory().map_err(|e| format!("DuckDB connection error: {}", e))?;
|
|
||||||
|
|
||||||
load_input_table(&conn, &path, &request.input_format)?;
|
|
||||||
|
|
||||||
let expression = request
|
|
||||||
.expression
|
|
||||||
.replace("s3object", "data")
|
|
||||||
.replace("S3Object", "data");
|
|
||||||
|
|
||||||
let mut stmt = conn
|
|
||||||
.prepare(&expression)
|
|
||||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
|
||||||
let mut rows = stmt
|
|
||||||
.query([])
|
|
||||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
|
||||||
let stmt_ref = rows
|
|
||||||
.as_ref()
|
|
||||||
.ok_or_else(|| "SQL execution error: statement metadata unavailable".to_string())?;
|
|
||||||
let col_count = stmt_ref.column_count();
|
|
||||||
let mut columns: Vec<String> = Vec::with_capacity(col_count);
|
|
||||||
for i in 0..col_count {
|
|
||||||
let name = stmt_ref
|
|
||||||
.column_name(i)
|
|
||||||
.map(|s| s.to_string())
|
|
||||||
.unwrap_or_else(|_| format!("_{}", i));
|
|
||||||
columns.push(name);
|
|
||||||
}
|
|
||||||
|
|
||||||
match request.output_format {
|
|
||||||
OutputFormat::Csv(cfg) => collect_csv_chunks(&mut rows, col_count, cfg),
|
|
||||||
OutputFormat::Json(cfg) => collect_json_chunks(&mut rows, col_count, &columns, cfg),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_input_table(conn: &Connection, path: &Path, input: &InputFormat) -> Result<(), String> {
|
|
||||||
let path_str = path.to_string_lossy().replace('\\', "/");
|
|
||||||
match input {
|
|
||||||
InputFormat::Csv(cfg) => {
|
|
||||||
let header = cfg.file_header_info == "USE" || cfg.file_header_info == "IGNORE";
|
|
||||||
let delimiter = normalize_single_char(&cfg.field_delimiter, ',');
|
|
||||||
let quote = normalize_single_char(&cfg.quote_character, '"');
|
|
||||||
|
|
||||||
let sql = format!(
|
|
||||||
"CREATE TABLE data AS SELECT * FROM read_csv('{}', header={}, delim='{}', quote='{}')",
|
|
||||||
sql_escape(&path_str),
|
|
||||||
if header { "true" } else { "false" },
|
|
||||||
sql_escape(&delimiter),
|
|
||||||
sql_escape("e)
|
|
||||||
);
|
|
||||||
conn.execute_batch(&sql)
|
|
||||||
.map_err(|e| format!("Failed loading CSV data: {}", e))?;
|
|
||||||
}
|
|
||||||
InputFormat::Json(cfg) => {
|
|
||||||
let format = if cfg.json_type == "LINES" {
|
|
||||||
"newline_delimited"
|
|
||||||
} else {
|
|
||||||
"array"
|
|
||||||
};
|
|
||||||
let sql = format!(
|
|
||||||
"CREATE TABLE data AS SELECT * FROM read_json_auto('{}', format='{}')",
|
|
||||||
sql_escape(&path_str),
|
|
||||||
format
|
|
||||||
);
|
|
||||||
conn.execute_batch(&sql)
|
|
||||||
.map_err(|e| format!("Failed loading JSON data: {}", e))?;
|
|
||||||
}
|
|
||||||
InputFormat::Parquet => {
|
|
||||||
let sql = format!(
|
|
||||||
"CREATE TABLE data AS SELECT * FROM read_parquet('{}')",
|
|
||||||
sql_escape(&path_str)
|
|
||||||
);
|
|
||||||
conn.execute_batch(&sql)
|
|
||||||
.map_err(|e| format!("Failed loading Parquet data: {}", e))?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sql_escape(value: &str) -> String {
|
|
||||||
value.replace('\'', "''")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn normalize_single_char(value: &str, default_char: char) -> String {
|
|
||||||
value.chars().next().unwrap_or(default_char).to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn collect_csv_chunks(
|
|
||||||
rows: &mut duckdb::Rows<'_>,
|
|
||||||
col_count: usize,
|
|
||||||
cfg: CsvOutputConfig,
|
|
||||||
) -> Result<Vec<Vec<u8>>, String> {
|
|
||||||
let delimiter = cfg.field_delimiter;
|
|
||||||
let record_delimiter = cfg.record_delimiter;
|
|
||||||
let quote = cfg.quote_character;
|
|
||||||
|
|
||||||
let mut chunks: Vec<Vec<u8>> = Vec::new();
|
|
||||||
let mut buffer = String::new();
|
|
||||||
|
|
||||||
while let Some(row) = rows.next().map_err(|e| format!("SQL execution error: {}", e))? {
|
|
||||||
let mut fields: Vec<String> = Vec::with_capacity(col_count);
|
|
||||||
for i in 0..col_count {
|
|
||||||
let value = row
|
|
||||||
.get_ref(i)
|
|
||||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
|
||||||
if matches!(value, ValueRef::Null) {
|
|
||||||
fields.push(String::new());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut text = value_ref_to_string(value);
|
|
||||||
if text.contains(&delimiter) || text.contains("e) || text.contains(&record_delimiter) {
|
|
||||||
text = text.replace("e, &(quote.clone() + "e));
|
|
||||||
text = format!("{}{}{}", quote, text, quote);
|
|
||||||
}
|
|
||||||
fields.push(text);
|
|
||||||
}
|
|
||||||
buffer.push_str(&fields.join(&delimiter));
|
|
||||||
buffer.push_str(&record_delimiter);
|
|
||||||
|
|
||||||
while buffer.len() >= CHUNK_SIZE {
|
|
||||||
let rest = buffer.split_off(CHUNK_SIZE);
|
|
||||||
chunks.push(buffer.into_bytes());
|
|
||||||
buffer = rest;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !buffer.is_empty() {
|
|
||||||
chunks.push(buffer.into_bytes());
|
|
||||||
}
|
|
||||||
Ok(chunks)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn collect_json_chunks(
|
|
||||||
rows: &mut duckdb::Rows<'_>,
|
|
||||||
col_count: usize,
|
|
||||||
columns: &[String],
|
|
||||||
cfg: JsonOutputConfig,
|
|
||||||
) -> Result<Vec<Vec<u8>>, String> {
|
|
||||||
let record_delimiter = cfg.record_delimiter;
|
|
||||||
let mut chunks: Vec<Vec<u8>> = Vec::new();
|
|
||||||
let mut buffer = String::new();
|
|
||||||
|
|
||||||
while let Some(row) = rows.next().map_err(|e| format!("SQL execution error: {}", e))? {
|
|
||||||
let mut record: HashMap<String, serde_json::Value> = HashMap::with_capacity(col_count);
|
|
||||||
for i in 0..col_count {
|
|
||||||
let value = row
|
|
||||||
.get_ref(i)
|
|
||||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
|
||||||
let key = columns
|
|
||||||
.get(i)
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or_else(|| format!("_{}", i));
|
|
||||||
record.insert(key, value_ref_to_json(value));
|
|
||||||
}
|
|
||||||
let line = serde_json::to_string(&record)
|
|
||||||
.map_err(|e| format!("JSON output encoding failed: {}", e))?;
|
|
||||||
buffer.push_str(&line);
|
|
||||||
buffer.push_str(&record_delimiter);
|
|
||||||
|
|
||||||
while buffer.len() >= CHUNK_SIZE {
|
|
||||||
let rest = buffer.split_off(CHUNK_SIZE);
|
|
||||||
chunks.push(buffer.into_bytes());
|
|
||||||
buffer = rest;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !buffer.is_empty() {
|
|
||||||
chunks.push(buffer.into_bytes());
|
|
||||||
}
|
|
||||||
Ok(chunks)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn value_ref_to_string(value: ValueRef<'_>) -> String {
|
|
||||||
match value {
|
|
||||||
ValueRef::Null => String::new(),
|
|
||||||
ValueRef::Boolean(v) => v.to_string(),
|
|
||||||
ValueRef::TinyInt(v) => v.to_string(),
|
|
||||||
ValueRef::SmallInt(v) => v.to_string(),
|
|
||||||
ValueRef::Int(v) => v.to_string(),
|
|
||||||
ValueRef::BigInt(v) => v.to_string(),
|
|
||||||
ValueRef::UTinyInt(v) => v.to_string(),
|
|
||||||
ValueRef::USmallInt(v) => v.to_string(),
|
|
||||||
ValueRef::UInt(v) => v.to_string(),
|
|
||||||
ValueRef::UBigInt(v) => v.to_string(),
|
|
||||||
ValueRef::Float(v) => v.to_string(),
|
|
||||||
ValueRef::Double(v) => v.to_string(),
|
|
||||||
ValueRef::Decimal(v) => v.to_string(),
|
|
||||||
ValueRef::Text(v) => String::from_utf8_lossy(v).into_owned(),
|
|
||||||
ValueRef::Blob(v) => base64::engine::general_purpose::STANDARD.encode(v),
|
|
||||||
_ => format!("{:?}", value),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn value_ref_to_json(value: ValueRef<'_>) -> serde_json::Value {
|
|
||||||
match value {
|
|
||||||
ValueRef::Null => serde_json::Value::Null,
|
|
||||||
ValueRef::Boolean(v) => serde_json::Value::Bool(v),
|
|
||||||
ValueRef::TinyInt(v) => serde_json::json!(v),
|
|
||||||
ValueRef::SmallInt(v) => serde_json::json!(v),
|
|
||||||
ValueRef::Int(v) => serde_json::json!(v),
|
|
||||||
ValueRef::BigInt(v) => serde_json::json!(v),
|
|
||||||
ValueRef::UTinyInt(v) => serde_json::json!(v),
|
|
||||||
ValueRef::USmallInt(v) => serde_json::json!(v),
|
|
||||||
ValueRef::UInt(v) => serde_json::json!(v),
|
|
||||||
ValueRef::UBigInt(v) => serde_json::json!(v),
|
|
||||||
ValueRef::Float(v) => serde_json::json!(v),
|
|
||||||
ValueRef::Double(v) => serde_json::json!(v),
|
|
||||||
ValueRef::Decimal(v) => serde_json::Value::String(v.to_string()),
|
|
||||||
ValueRef::Text(v) => serde_json::Value::String(String::from_utf8_lossy(v).into_owned()),
|
|
||||||
ValueRef::Blob(v) => serde_json::Value::String(base64::engine::general_purpose::STANDARD.encode(v)),
|
|
||||||
_ => serde_json::Value::String(format!("{:?}", value)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn require_xml_content_type(headers: &HeaderMap) -> Option<Response> {
|
|
||||||
let value = headers
|
|
||||||
.get("content-type")
|
|
||||||
.and_then(|v| v.to_str().ok())
|
|
||||||
.unwrap_or("")
|
|
||||||
.trim();
|
|
||||||
if value.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
let lowered = value.to_ascii_lowercase();
|
|
||||||
if lowered.starts_with("application/xml") || lowered.starts_with("text/xml") {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
Some(s3_error_response(S3Error::new(
|
|
||||||
S3ErrorCode::InvalidRequest,
|
|
||||||
"Content-Type must be application/xml or text/xml",
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn s3_error_response(err: S3Error) -> Response {
|
|
||||||
let status = StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
|
|
||||||
let resource = if err.resource.is_empty() {
|
|
||||||
"/".to_string()
|
|
||||||
} else {
|
|
||||||
err.resource.clone()
|
|
||||||
};
|
|
||||||
let body = err
|
|
||||||
.with_resource(resource)
|
|
||||||
.with_request_id(uuid::Uuid::new_v4().simple().to_string())
|
|
||||||
.to_xml();
|
|
||||||
(
|
|
||||||
status,
|
|
||||||
[("content-type", "application/xml")],
|
|
||||||
body,
|
|
||||||
)
|
|
||||||
.into_response()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_stats_xml(bytes_scanned: usize, bytes_returned: usize) -> String {
|
|
||||||
format!(
|
|
||||||
"<Stats><BytesScanned>{}</BytesScanned><BytesProcessed>{}</BytesProcessed><BytesReturned>{}</BytesReturned></Stats>",
|
|
||||||
bytes_scanned,
|
|
||||||
bytes_scanned,
|
|
||||||
bytes_returned
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn encode_select_event(event_type: &str, payload: &[u8]) -> Vec<u8> {
|
|
||||||
let mut headers = Vec::new();
|
|
||||||
headers.extend(encode_select_header(":event-type", event_type));
|
|
||||||
if event_type == "Records" {
|
|
||||||
headers.extend(encode_select_header(":content-type", "application/octet-stream"));
|
|
||||||
} else if event_type == "Stats" {
|
|
||||||
headers.extend(encode_select_header(":content-type", "text/xml"));
|
|
||||||
}
|
|
||||||
headers.extend(encode_select_header(":message-type", "event"));
|
|
||||||
|
|
||||||
let headers_len = headers.len() as u32;
|
|
||||||
let total_len = 4 + 4 + 4 + headers.len() + payload.len() + 4;
|
|
||||||
|
|
||||||
let mut message = Vec::with_capacity(total_len);
|
|
||||||
let mut prelude = Vec::with_capacity(8);
|
|
||||||
prelude.extend((total_len as u32).to_be_bytes());
|
|
||||||
prelude.extend(headers_len.to_be_bytes());
|
|
||||||
|
|
||||||
let prelude_crc = crc32(&prelude);
|
|
||||||
message.extend(prelude);
|
|
||||||
message.extend(prelude_crc.to_be_bytes());
|
|
||||||
message.extend(headers);
|
|
||||||
message.extend(payload);
|
|
||||||
|
|
||||||
let msg_crc = crc32(&message);
|
|
||||||
message.extend(msg_crc.to_be_bytes());
|
|
||||||
message
|
|
||||||
}
|
|
||||||
|
|
||||||
fn encode_select_header(name: &str, value: &str) -> Vec<u8> {
|
|
||||||
let name_bytes = name.as_bytes();
|
|
||||||
let value_bytes = value.as_bytes();
|
|
||||||
let mut header = Vec::with_capacity(1 + name_bytes.len() + 1 + 2 + value_bytes.len());
|
|
||||||
header.push(name_bytes.len() as u8);
|
|
||||||
header.extend(name_bytes);
|
|
||||||
header.push(7);
|
|
||||||
header.extend((value_bytes.len() as u16).to_be_bytes());
|
|
||||||
header.extend(value_bytes);
|
|
||||||
header
|
|
||||||
}
|
|
||||||
|
|
||||||
fn crc32(data: &[u8]) -> u32 {
|
|
||||||
let mut hasher = Hasher::new();
|
|
||||||
hasher.update(data);
|
|
||||||
hasher.finalize()
|
|
||||||
}
|
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
pub mod config;
|
|
||||||
pub mod handlers;
|
|
||||||
pub mod middleware;
|
|
||||||
pub mod services;
|
|
||||||
pub mod state;
|
|
||||||
|
|
||||||
use axum::Router;
|
|
||||||
|
|
||||||
pub const SERVER_HEADER: &str = concat!("MyFSIO-Rust/", env!("CARGO_PKG_VERSION"));
|
|
||||||
|
|
||||||
pub fn create_router(state: state::AppState) -> Router {
|
|
||||||
let mut router = Router::new()
|
|
||||||
.route("/", axum::routing::get(handlers::list_buckets))
|
|
||||||
.route(
|
|
||||||
"/{bucket}",
|
|
||||||
axum::routing::put(handlers::create_bucket)
|
|
||||||
.get(handlers::get_bucket)
|
|
||||||
.delete(handlers::delete_bucket)
|
|
||||||
.head(handlers::head_bucket)
|
|
||||||
.post(handlers::post_bucket),
|
|
||||||
)
|
|
||||||
.route(
|
|
||||||
"/{bucket}/{*key}",
|
|
||||||
axum::routing::put(handlers::put_object)
|
|
||||||
.get(handlers::get_object)
|
|
||||||
.delete(handlers::delete_object)
|
|
||||||
.head(handlers::head_object)
|
|
||||||
.post(handlers::post_object),
|
|
||||||
);
|
|
||||||
|
|
||||||
if state.config.kms_enabled {
|
|
||||||
router = router
|
|
||||||
.route("/kms/keys", axum::routing::get(handlers::kms::list_keys).post(handlers::kms::create_key))
|
|
||||||
.route("/kms/keys/{key_id}", axum::routing::get(handlers::kms::get_key).delete(handlers::kms::delete_key))
|
|
||||||
.route("/kms/keys/{key_id}/enable", axum::routing::post(handlers::kms::enable_key))
|
|
||||||
.route("/kms/keys/{key_id}/disable", axum::routing::post(handlers::kms::disable_key))
|
|
||||||
.route("/kms/encrypt", axum::routing::post(handlers::kms::encrypt))
|
|
||||||
.route("/kms/decrypt", axum::routing::post(handlers::kms::decrypt))
|
|
||||||
.route("/kms/generate-data-key", axum::routing::post(handlers::kms::generate_data_key));
|
|
||||||
}
|
|
||||||
|
|
||||||
router = router
|
|
||||||
.route("/admin/site/local", axum::routing::get(handlers::admin::get_local_site).put(handlers::admin::update_local_site))
|
|
||||||
.route("/admin/site/all", axum::routing::get(handlers::admin::list_all_sites))
|
|
||||||
.route("/admin/site/peers", axum::routing::post(handlers::admin::register_peer_site))
|
|
||||||
.route("/admin/site/peers/{site_id}", axum::routing::get(handlers::admin::get_peer_site).put(handlers::admin::update_peer_site).delete(handlers::admin::delete_peer_site))
|
|
||||||
.route("/admin/site/peers/{site_id}/health", axum::routing::post(handlers::admin::check_peer_health))
|
|
||||||
.route("/admin/site/topology", axum::routing::get(handlers::admin::get_topology))
|
|
||||||
.route("/admin/site/peers/{site_id}/bidirectional-status", axum::routing::get(handlers::admin::check_bidirectional_status))
|
|
||||||
.route("/admin/iam/users", axum::routing::get(handlers::admin::iam_list_users))
|
|
||||||
.route("/admin/iam/users/{identifier}", axum::routing::get(handlers::admin::iam_get_user))
|
|
||||||
.route("/admin/iam/users/{identifier}/policies", axum::routing::get(handlers::admin::iam_get_user_policies))
|
|
||||||
.route("/admin/iam/users/{identifier}/access-keys", axum::routing::post(handlers::admin::iam_create_access_key))
|
|
||||||
.route("/admin/iam/users/{identifier}/access-keys/{access_key}", axum::routing::delete(handlers::admin::iam_delete_access_key))
|
|
||||||
.route("/admin/iam/users/{identifier}/disable", axum::routing::post(handlers::admin::iam_disable_user))
|
|
||||||
.route("/admin/iam/users/{identifier}/enable", axum::routing::post(handlers::admin::iam_enable_user))
|
|
||||||
.route("/admin/website-domains", axum::routing::get(handlers::admin::list_website_domains).post(handlers::admin::create_website_domain))
|
|
||||||
.route("/admin/website-domains/{domain}", axum::routing::get(handlers::admin::get_website_domain).put(handlers::admin::update_website_domain).delete(handlers::admin::delete_website_domain))
|
|
||||||
.route("/admin/gc/status", axum::routing::get(handlers::admin::gc_status))
|
|
||||||
.route("/admin/gc/run", axum::routing::post(handlers::admin::gc_run))
|
|
||||||
.route("/admin/gc/history", axum::routing::get(handlers::admin::gc_history))
|
|
||||||
.route("/admin/integrity/status", axum::routing::get(handlers::admin::integrity_status))
|
|
||||||
.route("/admin/integrity/run", axum::routing::post(handlers::admin::integrity_run))
|
|
||||||
.route("/admin/integrity/history", axum::routing::get(handlers::admin::integrity_history));
|
|
||||||
|
|
||||||
router
|
|
||||||
.layer(axum::middleware::from_fn_with_state(
|
|
||||||
state.clone(),
|
|
||||||
middleware::auth_layer,
|
|
||||||
))
|
|
||||||
.layer(axum::middleware::from_fn(middleware::server_header))
|
|
||||||
.with_state(state)
|
|
||||||
}
|
|
||||||
@@ -1,97 +0,0 @@
|
|||||||
use myfsio_server::config::ServerConfig;
|
|
||||||
use myfsio_server::state::AppState;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
tracing_subscriber::fmt::init();
|
|
||||||
|
|
||||||
let config = ServerConfig::from_env();
|
|
||||||
let bind_addr = config.bind_addr;
|
|
||||||
|
|
||||||
tracing::info!("MyFSIO Rust Engine starting on {}", bind_addr);
|
|
||||||
tracing::info!("Storage root: {}", config.storage_root.display());
|
|
||||||
tracing::info!("Region: {}", config.region);
|
|
||||||
tracing::info!(
|
|
||||||
"Encryption: {}, KMS: {}, GC: {}, Lifecycle: {}, Integrity: {}, Metrics: {}",
|
|
||||||
config.encryption_enabled,
|
|
||||||
config.kms_enabled,
|
|
||||||
config.gc_enabled,
|
|
||||||
config.lifecycle_enabled,
|
|
||||||
config.integrity_enabled,
|
|
||||||
config.metrics_enabled
|
|
||||||
);
|
|
||||||
|
|
||||||
let state = if config.encryption_enabled || config.kms_enabled {
|
|
||||||
AppState::new_with_encryption(config.clone()).await
|
|
||||||
} else {
|
|
||||||
AppState::new(config.clone())
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut bg_handles: Vec<tokio::task::JoinHandle<()>> = Vec::new();
|
|
||||||
|
|
||||||
if let Some(ref gc) = state.gc {
|
|
||||||
bg_handles.push(gc.clone().start_background());
|
|
||||||
tracing::info!("GC background service started");
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref integrity) = state.integrity {
|
|
||||||
bg_handles.push(integrity.clone().start_background());
|
|
||||||
tracing::info!("Integrity checker background service started");
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref metrics) = state.metrics {
|
|
||||||
bg_handles.push(metrics.clone().start_background());
|
|
||||||
tracing::info!("Metrics collector background service started");
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.lifecycle_enabled {
|
|
||||||
let lifecycle = std::sync::Arc::new(
|
|
||||||
myfsio_server::services::lifecycle::LifecycleService::new(
|
|
||||||
state.storage.clone(),
|
|
||||||
myfsio_server::services::lifecycle::LifecycleConfig::default(),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
bg_handles.push(lifecycle.start_background());
|
|
||||||
tracing::info!("Lifecycle manager background service started");
|
|
||||||
}
|
|
||||||
|
|
||||||
let app = myfsio_server::create_router(state);
|
|
||||||
|
|
||||||
let listener = match tokio::net::TcpListener::bind(bind_addr).await {
|
|
||||||
Ok(listener) => listener,
|
|
||||||
Err(err) => {
|
|
||||||
if err.kind() == std::io::ErrorKind::AddrInUse {
|
|
||||||
tracing::error!("Port already in use: {}", bind_addr);
|
|
||||||
} else {
|
|
||||||
tracing::error!("Failed to bind {}: {}", bind_addr, err);
|
|
||||||
}
|
|
||||||
for handle in bg_handles {
|
|
||||||
handle.abort();
|
|
||||||
}
|
|
||||||
std::process::exit(1);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
tracing::info!("Listening on {}", bind_addr);
|
|
||||||
|
|
||||||
if let Err(err) = axum::serve(listener, app)
|
|
||||||
.with_graceful_shutdown(shutdown_signal())
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
tracing::error!("Server exited with error: {}", err);
|
|
||||||
for handle in bg_handles {
|
|
||||||
handle.abort();
|
|
||||||
}
|
|
||||||
std::process::exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
for handle in bg_handles {
|
|
||||||
handle.abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn shutdown_signal() {
|
|
||||||
tokio::signal::ctrl_c()
|
|
||||||
.await
|
|
||||||
.expect("Failed to listen for Ctrl+C");
|
|
||||||
tracing::info!("Shutdown signal received");
|
|
||||||
}
|
|
||||||
@@ -1,569 +0,0 @@
|
|||||||
use axum::extract::{Request, State};
|
|
||||||
use axum::http::{Method, StatusCode};
|
|
||||||
use axum::middleware::Next;
|
|
||||||
use axum::response::{IntoResponse, Response};
|
|
||||||
|
|
||||||
use chrono::{NaiveDateTime, Utc};
|
|
||||||
use myfsio_auth::sigv4;
|
|
||||||
use myfsio_common::error::{S3Error, S3ErrorCode};
|
|
||||||
use myfsio_common::types::Principal;
|
|
||||||
|
|
||||||
use crate::state::AppState;
|
|
||||||
|
|
||||||
pub async fn auth_layer(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
mut req: Request,
|
|
||||||
next: Next,
|
|
||||||
) -> Response {
|
|
||||||
let uri = req.uri().clone();
|
|
||||||
let path = uri.path().to_string();
|
|
||||||
|
|
||||||
if path == "/" && req.method() == axum::http::Method::GET {
|
|
||||||
match try_auth(&state, &req) {
|
|
||||||
AuthResult::Ok(principal) => {
|
|
||||||
if let Err(err) = authorize_request(&state, &principal, &req) {
|
|
||||||
return error_response(err, &path);
|
|
||||||
}
|
|
||||||
req.extensions_mut().insert(principal);
|
|
||||||
}
|
|
||||||
AuthResult::Denied(err) => return error_response(err, &path),
|
|
||||||
AuthResult::NoAuth => {
|
|
||||||
return error_response(
|
|
||||||
S3Error::new(S3ErrorCode::AccessDenied, "Missing credentials"),
|
|
||||||
&path,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return next.run(req).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
match try_auth(&state, &req) {
|
|
||||||
AuthResult::Ok(principal) => {
|
|
||||||
if let Err(err) = authorize_request(&state, &principal, &req) {
|
|
||||||
return error_response(err, &path);
|
|
||||||
}
|
|
||||||
req.extensions_mut().insert(principal);
|
|
||||||
next.run(req).await
|
|
||||||
}
|
|
||||||
AuthResult::Denied(err) => error_response(err, &path),
|
|
||||||
AuthResult::NoAuth => {
|
|
||||||
error_response(
|
|
||||||
S3Error::new(S3ErrorCode::AccessDenied, "Missing credentials"),
|
|
||||||
&path,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enum AuthResult {
|
|
||||||
Ok(Principal),
|
|
||||||
Denied(S3Error),
|
|
||||||
NoAuth,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn authorize_request(state: &AppState, principal: &Principal, req: &Request) -> Result<(), S3Error> {
|
|
||||||
let path = req.uri().path();
|
|
||||||
if path == "/" {
|
|
||||||
if state.iam.authorize(principal, None, "list", None) {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
|
|
||||||
}
|
|
||||||
|
|
||||||
if path.starts_with("/admin/") || path.starts_with("/kms/") {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut segments = path.trim_start_matches('/').split('/').filter(|s| !s.is_empty());
|
|
||||||
let bucket = match segments.next() {
|
|
||||||
Some(b) => b,
|
|
||||||
None => {
|
|
||||||
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let remaining: Vec<&str> = segments.collect();
|
|
||||||
let query = req.uri().query().unwrap_or("");
|
|
||||||
|
|
||||||
if remaining.is_empty() {
|
|
||||||
let action = resolve_bucket_action(req.method(), query);
|
|
||||||
if state.iam.authorize(principal, Some(bucket), action, None) {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
|
|
||||||
}
|
|
||||||
|
|
||||||
let object_key = remaining.join("/");
|
|
||||||
if req.method() == Method::PUT {
|
|
||||||
if let Some(copy_source) = req
|
|
||||||
.headers()
|
|
||||||
.get("x-amz-copy-source")
|
|
||||||
.and_then(|v| v.to_str().ok())
|
|
||||||
{
|
|
||||||
let source = copy_source.strip_prefix('/').unwrap_or(copy_source);
|
|
||||||
if let Some((src_bucket, src_key)) = source.split_once('/') {
|
|
||||||
let source_allowed =
|
|
||||||
state.iam.authorize(principal, Some(src_bucket), "read", Some(src_key));
|
|
||||||
let dest_allowed =
|
|
||||||
state.iam.authorize(principal, Some(bucket), "write", Some(&object_key));
|
|
||||||
if source_allowed && dest_allowed {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
return Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let action = resolve_object_action(req.method(), query);
|
|
||||||
if state
|
|
||||||
.iam
|
|
||||||
.authorize(principal, Some(bucket), action, Some(&object_key))
|
|
||||||
{
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(S3Error::new(S3ErrorCode::AccessDenied, "Access denied"))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn resolve_bucket_action(method: &Method, query: &str) -> &'static str {
|
|
||||||
if has_query_key(query, "versioning") {
|
|
||||||
return "versioning";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "tagging") {
|
|
||||||
return "tagging";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "cors") {
|
|
||||||
return "cors";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "location") {
|
|
||||||
return "list";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "encryption") {
|
|
||||||
return "encryption";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "lifecycle") {
|
|
||||||
return "lifecycle";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "acl") {
|
|
||||||
return "share";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "policy") || has_query_key(query, "policyStatus") {
|
|
||||||
return "policy";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "replication") {
|
|
||||||
return "replication";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "quota") {
|
|
||||||
return "quota";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "website") {
|
|
||||||
return "website";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "object-lock") {
|
|
||||||
return "object_lock";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "notification") {
|
|
||||||
return "notification";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "logging") {
|
|
||||||
return "logging";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "versions") || has_query_key(query, "uploads") {
|
|
||||||
return "list";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "delete") {
|
|
||||||
return "delete";
|
|
||||||
}
|
|
||||||
|
|
||||||
match *method {
|
|
||||||
Method::GET => "list",
|
|
||||||
Method::HEAD => "read",
|
|
||||||
Method::PUT => "create_bucket",
|
|
||||||
Method::DELETE => "delete_bucket",
|
|
||||||
Method::POST => "write",
|
|
||||||
_ => "list",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn resolve_object_action(method: &Method, query: &str) -> &'static str {
|
|
||||||
if has_query_key(query, "tagging") {
|
|
||||||
return if *method == Method::GET { "read" } else { "write" };
|
|
||||||
}
|
|
||||||
if has_query_key(query, "acl") {
|
|
||||||
return if *method == Method::GET { "read" } else { "write" };
|
|
||||||
}
|
|
||||||
if has_query_key(query, "retention") || has_query_key(query, "legal-hold") {
|
|
||||||
return "object_lock";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "attributes") {
|
|
||||||
return "read";
|
|
||||||
}
|
|
||||||
if has_query_key(query, "uploads") || has_query_key(query, "uploadId") {
|
|
||||||
return match *method {
|
|
||||||
Method::GET => "read",
|
|
||||||
_ => "write",
|
|
||||||
};
|
|
||||||
}
|
|
||||||
if has_query_key(query, "select") {
|
|
||||||
return "read";
|
|
||||||
}
|
|
||||||
|
|
||||||
match *method {
|
|
||||||
Method::GET | Method::HEAD => "read",
|
|
||||||
Method::PUT => "write",
|
|
||||||
Method::DELETE => "delete",
|
|
||||||
Method::POST => "write",
|
|
||||||
_ => "read",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn has_query_key(query: &str, key: &str) -> bool {
|
|
||||||
if query.is_empty() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
query
|
|
||||||
.split('&')
|
|
||||||
.filter(|part| !part.is_empty())
|
|
||||||
.any(|part| part == key || part.starts_with(&format!("{}=", key)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn try_auth(state: &AppState, req: &Request) -> AuthResult {
|
|
||||||
if let Some(auth_header) = req.headers().get("authorization") {
|
|
||||||
if let Ok(auth_str) = auth_header.to_str() {
|
|
||||||
if auth_str.starts_with("AWS4-HMAC-SHA256 ") {
|
|
||||||
return verify_sigv4_header(state, req, auth_str);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let query = req.uri().query().unwrap_or("");
|
|
||||||
if query.contains("X-Amz-Algorithm=AWS4-HMAC-SHA256") {
|
|
||||||
return verify_sigv4_query(state, req);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let (Some(ak), Some(sk)) = (
|
|
||||||
req.headers().get("x-access-key").and_then(|v| v.to_str().ok()),
|
|
||||||
req.headers().get("x-secret-key").and_then(|v| v.to_str().ok()),
|
|
||||||
) {
|
|
||||||
return match state.iam.authenticate(ak, sk) {
|
|
||||||
Some(principal) => AuthResult::Ok(principal),
|
|
||||||
None => AuthResult::Denied(
|
|
||||||
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
|
|
||||||
),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
AuthResult::NoAuth
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_sigv4_header(state: &AppState, req: &Request, auth_str: &str) -> AuthResult {
|
|
||||||
let parts: Vec<&str> = auth_str
|
|
||||||
.strip_prefix("AWS4-HMAC-SHA256 ")
|
|
||||||
.unwrap()
|
|
||||||
.split(", ")
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if parts.len() != 3 {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed Authorization header"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let credential = parts[0].strip_prefix("Credential=").unwrap_or("");
|
|
||||||
let signed_headers_str = parts[1].strip_prefix("SignedHeaders=").unwrap_or("");
|
|
||||||
let provided_signature = parts[2].strip_prefix("Signature=").unwrap_or("");
|
|
||||||
|
|
||||||
let cred_parts: Vec<&str> = credential.split('/').collect();
|
|
||||||
if cred_parts.len() != 5 {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed credential"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let access_key = cred_parts[0];
|
|
||||||
let date_stamp = cred_parts[1];
|
|
||||||
let region = cred_parts[2];
|
|
||||||
let service = cred_parts[3];
|
|
||||||
|
|
||||||
let amz_date = req
|
|
||||||
.headers()
|
|
||||||
.get("x-amz-date")
|
|
||||||
.or_else(|| req.headers().get("date"))
|
|
||||||
.and_then(|v| v.to_str().ok())
|
|
||||||
.unwrap_or("");
|
|
||||||
|
|
||||||
if amz_date.is_empty() {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::AccessDenied, "Missing Date header"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(err) = check_timestamp_freshness(amz_date, state.config.sigv4_timestamp_tolerance_secs) {
|
|
||||||
return AuthResult::Denied(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
let secret_key = match state.iam.get_secret_key(access_key) {
|
|
||||||
Some(sk) => sk,
|
|
||||||
None => {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let method = req.method().as_str();
|
|
||||||
let canonical_uri = req.uri().path();
|
|
||||||
|
|
||||||
let query_params = parse_query_params(req.uri().query().unwrap_or(""));
|
|
||||||
|
|
||||||
let payload_hash = req
|
|
||||||
.headers()
|
|
||||||
.get("x-amz-content-sha256")
|
|
||||||
.and_then(|v| v.to_str().ok())
|
|
||||||
.unwrap_or("UNSIGNED-PAYLOAD");
|
|
||||||
|
|
||||||
let signed_headers: Vec<&str> = signed_headers_str.split(';').collect();
|
|
||||||
let header_values: Vec<(String, String)> = signed_headers
|
|
||||||
.iter()
|
|
||||||
.map(|&name| {
|
|
||||||
let value = req
|
|
||||||
.headers()
|
|
||||||
.get(name)
|
|
||||||
.and_then(|v| v.to_str().ok())
|
|
||||||
.unwrap_or("");
|
|
||||||
(name.to_string(), value.to_string())
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let verified = sigv4::verify_sigv4_signature(
|
|
||||||
method,
|
|
||||||
canonical_uri,
|
|
||||||
&query_params,
|
|
||||||
signed_headers_str,
|
|
||||||
&header_values,
|
|
||||||
payload_hash,
|
|
||||||
amz_date,
|
|
||||||
date_stamp,
|
|
||||||
region,
|
|
||||||
service,
|
|
||||||
&secret_key,
|
|
||||||
provided_signature,
|
|
||||||
);
|
|
||||||
|
|
||||||
if !verified {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
match state.iam.get_principal(access_key) {
|
|
||||||
Some(p) => AuthResult::Ok(p),
|
|
||||||
None => AuthResult::Denied(
|
|
||||||
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_sigv4_query(state: &AppState, req: &Request) -> AuthResult {
|
|
||||||
let query = req.uri().query().unwrap_or("");
|
|
||||||
let params = parse_query_params(query);
|
|
||||||
let param_map: std::collections::HashMap<&str, &str> = params
|
|
||||||
.iter()
|
|
||||||
.map(|(k, v)| (k.as_str(), v.as_str()))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let credential = match param_map.get("X-Amz-Credential") {
|
|
||||||
Some(c) => *c,
|
|
||||||
None => {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Credential"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let signed_headers_str = param_map
|
|
||||||
.get("X-Amz-SignedHeaders")
|
|
||||||
.copied()
|
|
||||||
.unwrap_or("host");
|
|
||||||
let provided_signature = match param_map.get("X-Amz-Signature") {
|
|
||||||
Some(s) => *s,
|
|
||||||
None => {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Signature"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let amz_date = match param_map.get("X-Amz-Date") {
|
|
||||||
Some(d) => *d,
|
|
||||||
None => {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Date"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let expires_str = match param_map.get("X-Amz-Expires") {
|
|
||||||
Some(e) => *e,
|
|
||||||
None => {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Expires"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let cred_parts: Vec<&str> = credential.split('/').collect();
|
|
||||||
if cred_parts.len() != 5 {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed credential"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let access_key = cred_parts[0];
|
|
||||||
let date_stamp = cred_parts[1];
|
|
||||||
let region = cred_parts[2];
|
|
||||||
let service = cred_parts[3];
|
|
||||||
|
|
||||||
let expires: u64 = match expires_str.parse() {
|
|
||||||
Ok(e) => e,
|
|
||||||
Err(_) => {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::InvalidArgument, "Invalid X-Amz-Expires"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if expires < state.config.presigned_url_min_expiry
|
|
||||||
|| expires > state.config.presigned_url_max_expiry
|
|
||||||
{
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::InvalidArgument, "X-Amz-Expires out of range"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(request_time) =
|
|
||||||
NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ")
|
|
||||||
{
|
|
||||||
let request_utc = request_time.and_utc();
|
|
||||||
let now = Utc::now();
|
|
||||||
let elapsed = (now - request_utc).num_seconds();
|
|
||||||
if elapsed > expires as i64 {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::AccessDenied, "Request has expired"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if elapsed < -(state.config.sigv4_timestamp_tolerance_secs as i64) {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::new(S3ErrorCode::AccessDenied, "Request is too far in the future"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let secret_key = match state.iam.get_secret_key(access_key) {
|
|
||||||
Some(sk) => sk,
|
|
||||||
None => {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let method = req.method().as_str();
|
|
||||||
let canonical_uri = req.uri().path();
|
|
||||||
|
|
||||||
let query_params_no_sig: Vec<(String, String)> = params
|
|
||||||
.iter()
|
|
||||||
.filter(|(k, _)| k != "X-Amz-Signature")
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let payload_hash = "UNSIGNED-PAYLOAD";
|
|
||||||
|
|
||||||
let signed_headers: Vec<&str> = signed_headers_str.split(';').collect();
|
|
||||||
let header_values: Vec<(String, String)> = signed_headers
|
|
||||||
.iter()
|
|
||||||
.map(|&name| {
|
|
||||||
let value = req
|
|
||||||
.headers()
|
|
||||||
.get(name)
|
|
||||||
.and_then(|v| v.to_str().ok())
|
|
||||||
.unwrap_or("");
|
|
||||||
(name.to_string(), value.to_string())
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let verified = sigv4::verify_sigv4_signature(
|
|
||||||
method,
|
|
||||||
canonical_uri,
|
|
||||||
&query_params_no_sig,
|
|
||||||
signed_headers_str,
|
|
||||||
&header_values,
|
|
||||||
payload_hash,
|
|
||||||
amz_date,
|
|
||||||
date_stamp,
|
|
||||||
region,
|
|
||||||
service,
|
|
||||||
&secret_key,
|
|
||||||
provided_signature,
|
|
||||||
);
|
|
||||||
|
|
||||||
if !verified {
|
|
||||||
return AuthResult::Denied(
|
|
||||||
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
match state.iam.get_principal(access_key) {
|
|
||||||
Some(p) => AuthResult::Ok(p),
|
|
||||||
None => AuthResult::Denied(
|
|
||||||
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_timestamp_freshness(amz_date: &str, tolerance_secs: u64) -> Option<S3Error> {
|
|
||||||
let request_time = NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ").ok()?;
|
|
||||||
let request_utc = request_time.and_utc();
|
|
||||||
let now = Utc::now();
|
|
||||||
let diff = (now - request_utc).num_seconds().unsigned_abs();
|
|
||||||
|
|
||||||
if diff > tolerance_secs {
|
|
||||||
return Some(S3Error::new(
|
|
||||||
S3ErrorCode::AccessDenied,
|
|
||||||
"Request timestamp too old or too far in the future",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_query_params(query: &str) -> Vec<(String, String)> {
|
|
||||||
if query.is_empty() {
|
|
||||||
return Vec::new();
|
|
||||||
}
|
|
||||||
query
|
|
||||||
.split('&')
|
|
||||||
.filter_map(|pair| {
|
|
||||||
let mut parts = pair.splitn(2, '=');
|
|
||||||
let key = parts.next()?;
|
|
||||||
let value = parts.next().unwrap_or("");
|
|
||||||
Some((
|
|
||||||
urlencoding_decode(key),
|
|
||||||
urlencoding_decode(value),
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn urlencoding_decode(s: &str) -> String {
|
|
||||||
percent_encoding::percent_decode_str(s)
|
|
||||||
.decode_utf8_lossy()
|
|
||||||
.into_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn error_response(err: S3Error, resource: &str) -> Response {
|
|
||||||
let status =
|
|
||||||
StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
|
|
||||||
let request_id = uuid::Uuid::new_v4().simple().to_string();
|
|
||||||
let body = err
|
|
||||||
.with_resource(resource.to_string())
|
|
||||||
.with_request_id(request_id)
|
|
||||||
.to_xml();
|
|
||||||
(status, [("content-type", "application/xml")], body).into_response()
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
mod auth;
|
|
||||||
|
|
||||||
pub use auth::auth_layer;
|
|
||||||
|
|
||||||
use axum::extract::Request;
|
|
||||||
use axum::middleware::Next;
|
|
||||||
use axum::response::Response;
|
|
||||||
|
|
||||||
pub async fn server_header(req: Request, next: Next) -> Response {
|
|
||||||
let mut resp = next.run(req).await;
|
|
||||||
resp.headers_mut().insert(
|
|
||||||
"server",
|
|
||||||
crate::SERVER_HEADER.parse().unwrap(),
|
|
||||||
);
|
|
||||||
resp
|
|
||||||
}
|
|
||||||
@@ -1,263 +0,0 @@
|
|||||||
use serde_json::{json, Value};
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Instant;
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
|
|
||||||
pub struct GcConfig {
|
|
||||||
pub interval_hours: f64,
|
|
||||||
pub temp_file_max_age_hours: f64,
|
|
||||||
pub multipart_max_age_days: u64,
|
|
||||||
pub lock_file_max_age_hours: f64,
|
|
||||||
pub dry_run: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for GcConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
interval_hours: 6.0,
|
|
||||||
temp_file_max_age_hours: 24.0,
|
|
||||||
multipart_max_age_days: 7,
|
|
||||||
lock_file_max_age_hours: 1.0,
|
|
||||||
dry_run: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct GcService {
|
|
||||||
storage_root: PathBuf,
|
|
||||||
config: GcConfig,
|
|
||||||
running: Arc<RwLock<bool>>,
|
|
||||||
history: Arc<RwLock<Vec<Value>>>,
|
|
||||||
history_path: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GcService {
|
|
||||||
pub fn new(storage_root: PathBuf, config: GcConfig) -> Self {
|
|
||||||
let history_path = storage_root
|
|
||||||
.join(".myfsio.sys")
|
|
||||||
.join("config")
|
|
||||||
.join("gc_history.json");
|
|
||||||
|
|
||||||
let history = if history_path.exists() {
|
|
||||||
std::fs::read_to_string(&history_path)
|
|
||||||
.ok()
|
|
||||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
|
||||||
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
|
|
||||||
.unwrap_or_default()
|
|
||||||
} else {
|
|
||||||
Vec::new()
|
|
||||||
};
|
|
||||||
|
|
||||||
Self {
|
|
||||||
storage_root,
|
|
||||||
config,
|
|
||||||
running: Arc::new(RwLock::new(false)),
|
|
||||||
history: Arc::new(RwLock::new(history)),
|
|
||||||
history_path,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn status(&self) -> Value {
|
|
||||||
let running = *self.running.read().await;
|
|
||||||
json!({
|
|
||||||
"enabled": true,
|
|
||||||
"running": running,
|
|
||||||
"interval_hours": self.config.interval_hours,
|
|
||||||
"temp_file_max_age_hours": self.config.temp_file_max_age_hours,
|
|
||||||
"multipart_max_age_days": self.config.multipart_max_age_days,
|
|
||||||
"lock_file_max_age_hours": self.config.lock_file_max_age_hours,
|
|
||||||
"dry_run": self.config.dry_run,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn history(&self) -> Value {
|
|
||||||
let history = self.history.read().await;
|
|
||||||
json!({ "executions": *history })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run_now(&self, dry_run: bool) -> Result<Value, String> {
|
|
||||||
{
|
|
||||||
let mut running = self.running.write().await;
|
|
||||||
if *running {
|
|
||||||
return Err("GC already running".to_string());
|
|
||||||
}
|
|
||||||
*running = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
let start = Instant::now();
|
|
||||||
let result = self.execute_gc(dry_run || self.config.dry_run).await;
|
|
||||||
let elapsed = start.elapsed().as_secs_f64();
|
|
||||||
|
|
||||||
*self.running.write().await = false;
|
|
||||||
|
|
||||||
let mut result_json = result.clone();
|
|
||||||
if let Some(obj) = result_json.as_object_mut() {
|
|
||||||
obj.insert("execution_time_seconds".to_string(), json!(elapsed));
|
|
||||||
}
|
|
||||||
|
|
||||||
let record = json!({
|
|
||||||
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
|
|
||||||
"dry_run": dry_run || self.config.dry_run,
|
|
||||||
"result": result_json,
|
|
||||||
});
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut history = self.history.write().await;
|
|
||||||
history.push(record);
|
|
||||||
if history.len() > 50 {
|
|
||||||
let excess = history.len() - 50;
|
|
||||||
history.drain(..excess);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.save_history().await;
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn execute_gc(&self, dry_run: bool) -> Value {
|
|
||||||
let mut temp_files_deleted = 0u64;
|
|
||||||
let mut temp_bytes_freed = 0u64;
|
|
||||||
let mut multipart_uploads_deleted = 0u64;
|
|
||||||
let mut lock_files_deleted = 0u64;
|
|
||||||
let mut empty_dirs_removed = 0u64;
|
|
||||||
let mut errors: Vec<String> = Vec::new();
|
|
||||||
|
|
||||||
let now = std::time::SystemTime::now();
|
|
||||||
let temp_max_age = std::time::Duration::from_secs_f64(self.config.temp_file_max_age_hours * 3600.0);
|
|
||||||
let multipart_max_age = std::time::Duration::from_secs(self.config.multipart_max_age_days * 86400);
|
|
||||||
let lock_max_age = std::time::Duration::from_secs_f64(self.config.lock_file_max_age_hours * 3600.0);
|
|
||||||
|
|
||||||
let tmp_dir = self.storage_root.join(".myfsio.sys").join("tmp");
|
|
||||||
if tmp_dir.exists() {
|
|
||||||
match std::fs::read_dir(&tmp_dir) {
|
|
||||||
Ok(entries) => {
|
|
||||||
for entry in entries.flatten() {
|
|
||||||
if let Ok(metadata) = entry.metadata() {
|
|
||||||
if let Ok(modified) = metadata.modified() {
|
|
||||||
if let Ok(age) = now.duration_since(modified) {
|
|
||||||
if age > temp_max_age {
|
|
||||||
let size = metadata.len();
|
|
||||||
if !dry_run {
|
|
||||||
if let Err(e) = std::fs::remove_file(entry.path()) {
|
|
||||||
errors.push(format!("Failed to remove temp file: {}", e));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
temp_files_deleted += 1;
|
|
||||||
temp_bytes_freed += size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => errors.push(format!("Failed to read tmp dir: {}", e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let multipart_dir = self.storage_root.join(".myfsio.sys").join("multipart");
|
|
||||||
if multipart_dir.exists() {
|
|
||||||
if let Ok(bucket_dirs) = std::fs::read_dir(&multipart_dir) {
|
|
||||||
for bucket_entry in bucket_dirs.flatten() {
|
|
||||||
if let Ok(uploads) = std::fs::read_dir(bucket_entry.path()) {
|
|
||||||
for upload in uploads.flatten() {
|
|
||||||
if let Ok(metadata) = upload.metadata() {
|
|
||||||
if let Ok(modified) = metadata.modified() {
|
|
||||||
if let Ok(age) = now.duration_since(modified) {
|
|
||||||
if age > multipart_max_age {
|
|
||||||
if !dry_run {
|
|
||||||
let _ = std::fs::remove_dir_all(upload.path());
|
|
||||||
}
|
|
||||||
multipart_uploads_deleted += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let buckets_dir = self.storage_root.join(".myfsio.sys").join("buckets");
|
|
||||||
if buckets_dir.exists() {
|
|
||||||
if let Ok(bucket_dirs) = std::fs::read_dir(&buckets_dir) {
|
|
||||||
for bucket_entry in bucket_dirs.flatten() {
|
|
||||||
let locks_dir = bucket_entry.path().join("locks");
|
|
||||||
if locks_dir.exists() {
|
|
||||||
if let Ok(locks) = std::fs::read_dir(&locks_dir) {
|
|
||||||
for lock in locks.flatten() {
|
|
||||||
if let Ok(metadata) = lock.metadata() {
|
|
||||||
if let Ok(modified) = metadata.modified() {
|
|
||||||
if let Ok(age) = now.duration_since(modified) {
|
|
||||||
if age > lock_max_age {
|
|
||||||
if !dry_run {
|
|
||||||
let _ = std::fs::remove_file(lock.path());
|
|
||||||
}
|
|
||||||
lock_files_deleted += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !dry_run {
|
|
||||||
for dir in [&tmp_dir, &multipart_dir] {
|
|
||||||
if dir.exists() {
|
|
||||||
if let Ok(entries) = std::fs::read_dir(dir) {
|
|
||||||
for entry in entries.flatten() {
|
|
||||||
if entry.path().is_dir() {
|
|
||||||
if let Ok(mut contents) = std::fs::read_dir(entry.path()) {
|
|
||||||
if contents.next().is_none() {
|
|
||||||
let _ = std::fs::remove_dir(entry.path());
|
|
||||||
empty_dirs_removed += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
json!({
|
|
||||||
"temp_files_deleted": temp_files_deleted,
|
|
||||||
"temp_bytes_freed": temp_bytes_freed,
|
|
||||||
"multipart_uploads_deleted": multipart_uploads_deleted,
|
|
||||||
"lock_files_deleted": lock_files_deleted,
|
|
||||||
"empty_dirs_removed": empty_dirs_removed,
|
|
||||||
"errors": errors,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn save_history(&self) {
|
|
||||||
let history = self.history.read().await;
|
|
||||||
let data = json!({ "executions": *history });
|
|
||||||
if let Some(parent) = self.history_path.parent() {
|
|
||||||
let _ = std::fs::create_dir_all(parent);
|
|
||||||
}
|
|
||||||
let _ = std::fs::write(&self.history_path, serde_json::to_string_pretty(&data).unwrap_or_default());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
|
||||||
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let mut timer = tokio::time::interval(interval);
|
|
||||||
timer.tick().await;
|
|
||||||
loop {
|
|
||||||
timer.tick().await;
|
|
||||||
tracing::info!("GC cycle starting");
|
|
||||||
match self.run_now(false).await {
|
|
||||||
Ok(result) => tracing::info!("GC cycle complete: {:?}", result),
|
|
||||||
Err(e) => tracing::warn!("GC cycle failed: {}", e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,204 +0,0 @@
|
|||||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
|
||||||
use myfsio_storage::traits::StorageEngine;
|
|
||||||
use serde_json::{json, Value};
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Instant;
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
|
|
||||||
pub struct IntegrityConfig {
|
|
||||||
pub interval_hours: f64,
|
|
||||||
pub batch_size: usize,
|
|
||||||
pub auto_heal: bool,
|
|
||||||
pub dry_run: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for IntegrityConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
interval_hours: 24.0,
|
|
||||||
batch_size: 1000,
|
|
||||||
auto_heal: false,
|
|
||||||
dry_run: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct IntegrityService {
|
|
||||||
storage: Arc<FsStorageBackend>,
|
|
||||||
config: IntegrityConfig,
|
|
||||||
running: Arc<RwLock<bool>>,
|
|
||||||
history: Arc<RwLock<Vec<Value>>>,
|
|
||||||
history_path: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IntegrityService {
|
|
||||||
pub fn new(
|
|
||||||
storage: Arc<FsStorageBackend>,
|
|
||||||
storage_root: &std::path::Path,
|
|
||||||
config: IntegrityConfig,
|
|
||||||
) -> Self {
|
|
||||||
let history_path = storage_root
|
|
||||||
.join(".myfsio.sys")
|
|
||||||
.join("config")
|
|
||||||
.join("integrity_history.json");
|
|
||||||
|
|
||||||
let history = if history_path.exists() {
|
|
||||||
std::fs::read_to_string(&history_path)
|
|
||||||
.ok()
|
|
||||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
|
||||||
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
|
|
||||||
.unwrap_or_default()
|
|
||||||
} else {
|
|
||||||
Vec::new()
|
|
||||||
};
|
|
||||||
|
|
||||||
Self {
|
|
||||||
storage,
|
|
||||||
config,
|
|
||||||
running: Arc::new(RwLock::new(false)),
|
|
||||||
history: Arc::new(RwLock::new(history)),
|
|
||||||
history_path,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn status(&self) -> Value {
|
|
||||||
let running = *self.running.read().await;
|
|
||||||
json!({
|
|
||||||
"enabled": true,
|
|
||||||
"running": running,
|
|
||||||
"interval_hours": self.config.interval_hours,
|
|
||||||
"batch_size": self.config.batch_size,
|
|
||||||
"auto_heal": self.config.auto_heal,
|
|
||||||
"dry_run": self.config.dry_run,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn history(&self) -> Value {
|
|
||||||
let history = self.history.read().await;
|
|
||||||
json!({ "executions": *history })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run_now(&self, dry_run: bool, auto_heal: bool) -> Result<Value, String> {
|
|
||||||
{
|
|
||||||
let mut running = self.running.write().await;
|
|
||||||
if *running {
|
|
||||||
return Err("Integrity check already running".to_string());
|
|
||||||
}
|
|
||||||
*running = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
let start = Instant::now();
|
|
||||||
let result = self.check_integrity(dry_run, auto_heal).await;
|
|
||||||
let elapsed = start.elapsed().as_secs_f64();
|
|
||||||
|
|
||||||
*self.running.write().await = false;
|
|
||||||
|
|
||||||
let mut result_json = result.clone();
|
|
||||||
if let Some(obj) = result_json.as_object_mut() {
|
|
||||||
obj.insert("execution_time_seconds".to_string(), json!(elapsed));
|
|
||||||
}
|
|
||||||
|
|
||||||
let record = json!({
|
|
||||||
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
|
|
||||||
"dry_run": dry_run,
|
|
||||||
"auto_heal": auto_heal,
|
|
||||||
"result": result_json,
|
|
||||||
});
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut history = self.history.write().await;
|
|
||||||
history.push(record);
|
|
||||||
if history.len() > 50 {
|
|
||||||
let excess = history.len() - 50;
|
|
||||||
history.drain(..excess);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.save_history().await;
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn check_integrity(&self, _dry_run: bool, _auto_heal: bool) -> Value {
|
|
||||||
let buckets = match self.storage.list_buckets().await {
|
|
||||||
Ok(b) => b,
|
|
||||||
Err(e) => return json!({"error": e.to_string()}),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut objects_scanned = 0u64;
|
|
||||||
let mut corrupted = 0u64;
|
|
||||||
let mut phantom_metadata = 0u64;
|
|
||||||
let mut errors: Vec<String> = Vec::new();
|
|
||||||
|
|
||||||
for bucket in &buckets {
|
|
||||||
let params = myfsio_common::types::ListParams {
|
|
||||||
max_keys: self.config.batch_size,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let objects = match self.storage.list_objects(&bucket.name, ¶ms).await {
|
|
||||||
Ok(r) => r.objects,
|
|
||||||
Err(e) => {
|
|
||||||
errors.push(format!("{}: {}", bucket.name, e));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
for obj in &objects {
|
|
||||||
objects_scanned += 1;
|
|
||||||
match self.storage.get_object_path(&bucket.name, &obj.key).await {
|
|
||||||
Ok(path) => {
|
|
||||||
if !path.exists() {
|
|
||||||
phantom_metadata += 1;
|
|
||||||
} else if let Some(ref expected_etag) = obj.etag {
|
|
||||||
match myfsio_crypto::hashing::md5_file(&path) {
|
|
||||||
Ok(actual_etag) => {
|
|
||||||
if &actual_etag != expected_etag {
|
|
||||||
corrupted += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
json!({
|
|
||||||
"objects_scanned": objects_scanned,
|
|
||||||
"buckets_scanned": buckets.len(),
|
|
||||||
"corrupted_objects": corrupted,
|
|
||||||
"phantom_metadata": phantom_metadata,
|
|
||||||
"errors": errors,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn save_history(&self) {
|
|
||||||
let history = self.history.read().await;
|
|
||||||
let data = json!({ "executions": *history });
|
|
||||||
if let Some(parent) = self.history_path.parent() {
|
|
||||||
let _ = std::fs::create_dir_all(parent);
|
|
||||||
}
|
|
||||||
let _ = std::fs::write(
|
|
||||||
&self.history_path,
|
|
||||||
serde_json::to_string_pretty(&data).unwrap_or_default(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
|
||||||
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let mut timer = tokio::time::interval(interval);
|
|
||||||
timer.tick().await;
|
|
||||||
loop {
|
|
||||||
timer.tick().await;
|
|
||||||
tracing::info!("Integrity check starting");
|
|
||||||
match self.run_now(false, false).await {
|
|
||||||
Ok(result) => tracing::info!("Integrity check complete: {:?}", result),
|
|
||||||
Err(e) => tracing::warn!("Integrity check failed: {}", e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,153 +0,0 @@
|
|||||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
|
||||||
use myfsio_storage::traits::StorageEngine;
|
|
||||||
use serde_json::{json, Value};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
|
|
||||||
pub struct LifecycleConfig {
|
|
||||||
pub interval_seconds: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for LifecycleConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
interval_seconds: 3600,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct LifecycleService {
|
|
||||||
storage: Arc<FsStorageBackend>,
|
|
||||||
config: LifecycleConfig,
|
|
||||||
running: Arc<RwLock<bool>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LifecycleService {
|
|
||||||
pub fn new(storage: Arc<FsStorageBackend>, config: LifecycleConfig) -> Self {
|
|
||||||
Self {
|
|
||||||
storage,
|
|
||||||
config,
|
|
||||||
running: Arc::new(RwLock::new(false)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run_cycle(&self) -> Result<Value, String> {
|
|
||||||
{
|
|
||||||
let mut running = self.running.write().await;
|
|
||||||
if *running {
|
|
||||||
return Err("Lifecycle already running".to_string());
|
|
||||||
}
|
|
||||||
*running = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = self.evaluate_rules().await;
|
|
||||||
*self.running.write().await = false;
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn evaluate_rules(&self) -> Value {
|
|
||||||
let buckets = match self.storage.list_buckets().await {
|
|
||||||
Ok(b) => b,
|
|
||||||
Err(e) => return json!({"error": e.to_string()}),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut total_expired = 0u64;
|
|
||||||
let mut total_multipart_aborted = 0u64;
|
|
||||||
let mut errors: Vec<String> = Vec::new();
|
|
||||||
|
|
||||||
for bucket in &buckets {
|
|
||||||
let config = match self.storage.get_bucket_config(&bucket.name).await {
|
|
||||||
Ok(c) => c,
|
|
||||||
Err(_) => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
let lifecycle = match &config.lifecycle {
|
|
||||||
Some(lc) => lc,
|
|
||||||
None => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
let rules = match lifecycle.as_str().and_then(|s| serde_json::from_str::<Value>(s).ok()) {
|
|
||||||
Some(v) => v,
|
|
||||||
None => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
let rules_arr = match rules.get("Rules").and_then(|r| r.as_array()) {
|
|
||||||
Some(a) => a.clone(),
|
|
||||||
None => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
for rule in &rules_arr {
|
|
||||||
if rule.get("Status").and_then(|s| s.as_str()) != Some("Enabled") {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let prefix = rule
|
|
||||||
.get("Filter")
|
|
||||||
.and_then(|f| f.get("Prefix"))
|
|
||||||
.and_then(|p| p.as_str())
|
|
||||||
.or_else(|| rule.get("Prefix").and_then(|p| p.as_str()))
|
|
||||||
.unwrap_or("");
|
|
||||||
|
|
||||||
if let Some(exp) = rule.get("Expiration") {
|
|
||||||
if let Some(days) = exp.get("Days").and_then(|d| d.as_u64()) {
|
|
||||||
let cutoff = chrono::Utc::now() - chrono::Duration::days(days as i64);
|
|
||||||
let params = myfsio_common::types::ListParams {
|
|
||||||
max_keys: 1000,
|
|
||||||
prefix: if prefix.is_empty() { None } else { Some(prefix.to_string()) },
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
if let Ok(result) = self.storage.list_objects(&bucket.name, ¶ms).await {
|
|
||||||
for obj in &result.objects {
|
|
||||||
if obj.last_modified < cutoff {
|
|
||||||
match self.storage.delete_object(&bucket.name, &obj.key).await {
|
|
||||||
Ok(()) => total_expired += 1,
|
|
||||||
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(abort) = rule.get("AbortIncompleteMultipartUpload") {
|
|
||||||
if let Some(days) = abort.get("DaysAfterInitiation").and_then(|d| d.as_u64()) {
|
|
||||||
let cutoff = chrono::Utc::now() - chrono::Duration::days(days as i64);
|
|
||||||
if let Ok(uploads) = self.storage.list_multipart_uploads(&bucket.name).await {
|
|
||||||
for upload in &uploads {
|
|
||||||
if upload.initiated < cutoff {
|
|
||||||
match self.storage.abort_multipart(&bucket.name, &upload.upload_id).await {
|
|
||||||
Ok(()) => total_multipart_aborted += 1,
|
|
||||||
Err(e) => errors.push(format!("abort {}: {}", upload.upload_id, e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
json!({
|
|
||||||
"objects_expired": total_expired,
|
|
||||||
"multipart_aborted": total_multipart_aborted,
|
|
||||||
"buckets_evaluated": buckets.len(),
|
|
||||||
"errors": errors,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
|
||||||
let interval = std::time::Duration::from_secs(self.config.interval_seconds);
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let mut timer = tokio::time::interval(interval);
|
|
||||||
timer.tick().await;
|
|
||||||
loop {
|
|
||||||
timer.tick().await;
|
|
||||||
tracing::info!("Lifecycle evaluation starting");
|
|
||||||
match self.run_cycle().await {
|
|
||||||
Ok(result) => tracing::info!("Lifecycle cycle complete: {:?}", result),
|
|
||||||
Err(e) => tracing::warn!("Lifecycle cycle failed: {}", e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,219 +0,0 @@
|
|||||||
use serde_json::{json, Value};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Instant;
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
|
|
||||||
pub struct MetricsConfig {
|
|
||||||
pub interval_minutes: u64,
|
|
||||||
pub retention_hours: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for MetricsConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
interval_minutes: 5,
|
|
||||||
retention_hours: 24,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MethodStats {
|
|
||||||
count: u64,
|
|
||||||
success_count: u64,
|
|
||||||
error_count: u64,
|
|
||||||
bytes_in: u64,
|
|
||||||
bytes_out: u64,
|
|
||||||
latencies: Vec<f64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MethodStats {
|
|
||||||
fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
count: 0,
|
|
||||||
success_count: 0,
|
|
||||||
error_count: 0,
|
|
||||||
bytes_in: 0,
|
|
||||||
bytes_out: 0,
|
|
||||||
latencies: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_json(&self) -> Value {
|
|
||||||
let (min, max, avg, p50, p95, p99) = if self.latencies.is_empty() {
|
|
||||||
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
|
|
||||||
} else {
|
|
||||||
let mut sorted = self.latencies.clone();
|
|
||||||
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
|
|
||||||
let len = sorted.len();
|
|
||||||
let sum: f64 = sorted.iter().sum();
|
|
||||||
(
|
|
||||||
sorted[0],
|
|
||||||
sorted[len - 1],
|
|
||||||
sum / len as f64,
|
|
||||||
sorted[len / 2],
|
|
||||||
sorted[((len as f64 * 0.95) as usize).min(len - 1)],
|
|
||||||
sorted[((len as f64 * 0.99) as usize).min(len - 1)],
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
json!({
|
|
||||||
"count": self.count,
|
|
||||||
"success_count": self.success_count,
|
|
||||||
"error_count": self.error_count,
|
|
||||||
"bytes_in": self.bytes_in,
|
|
||||||
"bytes_out": self.bytes_out,
|
|
||||||
"latency_min_ms": min,
|
|
||||||
"latency_max_ms": max,
|
|
||||||
"latency_avg_ms": avg,
|
|
||||||
"latency_p50_ms": p50,
|
|
||||||
"latency_p95_ms": p95,
|
|
||||||
"latency_p99_ms": p99,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct CurrentWindow {
|
|
||||||
by_method: HashMap<String, MethodStats>,
|
|
||||||
by_status_class: HashMap<String, u64>,
|
|
||||||
start_time: Instant,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CurrentWindow {
|
|
||||||
fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
by_method: HashMap::new(),
|
|
||||||
by_status_class: HashMap::new(),
|
|
||||||
start_time: Instant::now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.by_method.clear();
|
|
||||||
self.by_status_class.clear();
|
|
||||||
self.start_time = Instant::now();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct MetricsService {
|
|
||||||
config: MetricsConfig,
|
|
||||||
current: Arc<RwLock<CurrentWindow>>,
|
|
||||||
snapshots: Arc<RwLock<Vec<Value>>>,
|
|
||||||
snapshots_path: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MetricsService {
|
|
||||||
pub fn new(storage_root: &std::path::Path, config: MetricsConfig) -> Self {
|
|
||||||
let snapshots_path = storage_root
|
|
||||||
.join(".myfsio.sys")
|
|
||||||
.join("config")
|
|
||||||
.join("operation_metrics.json");
|
|
||||||
|
|
||||||
let snapshots = if snapshots_path.exists() {
|
|
||||||
std::fs::read_to_string(&snapshots_path)
|
|
||||||
.ok()
|
|
||||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
|
||||||
.and_then(|v| v.get("snapshots").and_then(|s| s.as_array().cloned()))
|
|
||||||
.unwrap_or_default()
|
|
||||||
} else {
|
|
||||||
Vec::new()
|
|
||||||
};
|
|
||||||
|
|
||||||
Self {
|
|
||||||
config,
|
|
||||||
current: Arc::new(RwLock::new(CurrentWindow::new())),
|
|
||||||
snapshots: Arc::new(RwLock::new(snapshots)),
|
|
||||||
snapshots_path,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn record(&self, method: &str, status: u16, latency_ms: f64, bytes_in: u64, bytes_out: u64) {
|
|
||||||
let mut window = self.current.write().await;
|
|
||||||
let stats = window.by_method.entry(method.to_string()).or_insert_with(MethodStats::new);
|
|
||||||
stats.count += 1;
|
|
||||||
if status < 400 {
|
|
||||||
stats.success_count += 1;
|
|
||||||
} else {
|
|
||||||
stats.error_count += 1;
|
|
||||||
}
|
|
||||||
stats.bytes_in += bytes_in;
|
|
||||||
stats.bytes_out += bytes_out;
|
|
||||||
stats.latencies.push(latency_ms);
|
|
||||||
|
|
||||||
let class = format!("{}xx", status / 100);
|
|
||||||
*window.by_status_class.entry(class).or_insert(0) += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn snapshot(&self) -> Value {
|
|
||||||
let window = self.current.read().await;
|
|
||||||
let mut by_method = serde_json::Map::new();
|
|
||||||
for (method, stats) in &window.by_method {
|
|
||||||
by_method.insert(method.clone(), stats.to_json());
|
|
||||||
}
|
|
||||||
|
|
||||||
let snapshots = self.snapshots.read().await;
|
|
||||||
json!({
|
|
||||||
"enabled": true,
|
|
||||||
"current_window": {
|
|
||||||
"by_method": by_method,
|
|
||||||
"by_status_class": window.by_status_class,
|
|
||||||
"window_start_elapsed_secs": window.start_time.elapsed().as_secs_f64(),
|
|
||||||
},
|
|
||||||
"snapshots": *snapshots,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn flush_window(&self) {
|
|
||||||
let snap = {
|
|
||||||
let mut window = self.current.write().await;
|
|
||||||
let mut by_method = serde_json::Map::new();
|
|
||||||
for (method, stats) in &window.by_method {
|
|
||||||
by_method.insert(method.clone(), stats.to_json());
|
|
||||||
}
|
|
||||||
let snap = json!({
|
|
||||||
"timestamp": chrono::Utc::now().to_rfc3339(),
|
|
||||||
"window_seconds": self.config.interval_minutes * 60,
|
|
||||||
"by_method": by_method,
|
|
||||||
"by_status_class": window.by_status_class,
|
|
||||||
});
|
|
||||||
window.reset();
|
|
||||||
snap
|
|
||||||
};
|
|
||||||
|
|
||||||
let max_snapshots = (self.config.retention_hours * 60 / self.config.interval_minutes) as usize;
|
|
||||||
{
|
|
||||||
let mut snapshots = self.snapshots.write().await;
|
|
||||||
snapshots.push(snap);
|
|
||||||
if snapshots.len() > max_snapshots {
|
|
||||||
let excess = snapshots.len() - max_snapshots;
|
|
||||||
snapshots.drain(..excess);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.save_snapshots().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn save_snapshots(&self) {
|
|
||||||
let snapshots = self.snapshots.read().await;
|
|
||||||
let data = json!({ "snapshots": *snapshots });
|
|
||||||
if let Some(parent) = self.snapshots_path.parent() {
|
|
||||||
let _ = std::fs::create_dir_all(parent);
|
|
||||||
}
|
|
||||||
let _ = std::fs::write(
|
|
||||||
&self.snapshots_path,
|
|
||||||
serde_json::to_string_pretty(&data).unwrap_or_default(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
|
||||||
let interval = std::time::Duration::from_secs(self.config.interval_minutes * 60);
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let mut timer = tokio::time::interval(interval);
|
|
||||||
timer.tick().await;
|
|
||||||
loop {
|
|
||||||
timer.tick().await;
|
|
||||||
self.flush_window().await;
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
pub mod gc;
|
|
||||||
pub mod lifecycle;
|
|
||||||
pub mod integrity;
|
|
||||||
pub mod metrics;
|
|
||||||
pub mod site_registry;
|
|
||||||
pub mod website_domains;
|
|
||||||
@@ -1,143 +0,0 @@
|
|||||||
use chrono::Utc;
|
|
||||||
use parking_lot::RwLock;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct SiteInfo {
|
|
||||||
pub site_id: String,
|
|
||||||
pub endpoint: String,
|
|
||||||
#[serde(default = "default_region")]
|
|
||||||
pub region: String,
|
|
||||||
#[serde(default = "default_priority")]
|
|
||||||
pub priority: i32,
|
|
||||||
#[serde(default)]
|
|
||||||
pub display_name: String,
|
|
||||||
#[serde(default)]
|
|
||||||
pub created_at: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_region() -> String {
|
|
||||||
"us-east-1".to_string()
|
|
||||||
}
|
|
||||||
fn default_priority() -> i32 {
|
|
||||||
100
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct PeerSite {
|
|
||||||
pub site_id: String,
|
|
||||||
pub endpoint: String,
|
|
||||||
#[serde(default = "default_region")]
|
|
||||||
pub region: String,
|
|
||||||
#[serde(default = "default_priority")]
|
|
||||||
pub priority: i32,
|
|
||||||
#[serde(default)]
|
|
||||||
pub display_name: String,
|
|
||||||
#[serde(default)]
|
|
||||||
pub connection_id: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub created_at: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub is_healthy: bool,
|
|
||||||
#[serde(default)]
|
|
||||||
pub last_health_check: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
|
||||||
struct RegistryData {
|
|
||||||
#[serde(default)]
|
|
||||||
local: Option<SiteInfo>,
|
|
||||||
#[serde(default)]
|
|
||||||
peers: Vec<PeerSite>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SiteRegistry {
|
|
||||||
path: PathBuf,
|
|
||||||
data: Arc<RwLock<RegistryData>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SiteRegistry {
|
|
||||||
pub fn new(storage_root: &std::path::Path) -> Self {
|
|
||||||
let path = storage_root
|
|
||||||
.join(".myfsio.sys")
|
|
||||||
.join("config")
|
|
||||||
.join("site_registry.json");
|
|
||||||
let data = if path.exists() {
|
|
||||||
std::fs::read_to_string(&path)
|
|
||||||
.ok()
|
|
||||||
.and_then(|s| serde_json::from_str(&s).ok())
|
|
||||||
.unwrap_or_default()
|
|
||||||
} else {
|
|
||||||
RegistryData::default()
|
|
||||||
};
|
|
||||||
Self {
|
|
||||||
path,
|
|
||||||
data: Arc::new(RwLock::new(data)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn save(&self) {
|
|
||||||
let data = self.data.read();
|
|
||||||
if let Some(parent) = self.path.parent() {
|
|
||||||
let _ = std::fs::create_dir_all(parent);
|
|
||||||
}
|
|
||||||
if let Ok(json) = serde_json::to_string_pretty(&*data) {
|
|
||||||
let _ = std::fs::write(&self.path, json);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_local_site(&self) -> Option<SiteInfo> {
|
|
||||||
self.data.read().local.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_local_site(&self, site: SiteInfo) {
|
|
||||||
self.data.write().local = Some(site);
|
|
||||||
self.save();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn list_peers(&self) -> Vec<PeerSite> {
|
|
||||||
self.data.read().peers.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_peer(&self, site_id: &str) -> Option<PeerSite> {
|
|
||||||
self.data.read().peers.iter().find(|p| p.site_id == site_id).cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_peer(&self, peer: PeerSite) {
|
|
||||||
self.data.write().peers.push(peer);
|
|
||||||
self.save();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn update_peer(&self, peer: PeerSite) {
|
|
||||||
let mut data = self.data.write();
|
|
||||||
if let Some(existing) = data.peers.iter_mut().find(|p| p.site_id == peer.site_id) {
|
|
||||||
*existing = peer;
|
|
||||||
}
|
|
||||||
drop(data);
|
|
||||||
self.save();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn delete_peer(&self, site_id: &str) -> bool {
|
|
||||||
let mut data = self.data.write();
|
|
||||||
let len_before = data.peers.len();
|
|
||||||
data.peers.retain(|p| p.site_id != site_id);
|
|
||||||
let removed = data.peers.len() < len_before;
|
|
||||||
drop(data);
|
|
||||||
if removed {
|
|
||||||
self.save();
|
|
||||||
}
|
|
||||||
removed
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn update_health(&self, site_id: &str, is_healthy: bool) {
|
|
||||||
let mut data = self.data.write();
|
|
||||||
if let Some(peer) = data.peers.iter_mut().find(|p| p.site_id == site_id) {
|
|
||||||
peer.is_healthy = is_healthy;
|
|
||||||
peer.last_health_check = Some(Utc::now().to_rfc3339());
|
|
||||||
}
|
|
||||||
drop(data);
|
|
||||||
self.save();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,104 +0,0 @@
|
|||||||
use parking_lot::RwLock;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
|
||||||
struct DomainData {
|
|
||||||
#[serde(default)]
|
|
||||||
mappings: HashMap<String, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct WebsiteDomainStore {
|
|
||||||
path: PathBuf,
|
|
||||||
data: Arc<RwLock<DomainData>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WebsiteDomainStore {
|
|
||||||
pub fn new(storage_root: &std::path::Path) -> Self {
|
|
||||||
let path = storage_root
|
|
||||||
.join(".myfsio.sys")
|
|
||||||
.join("config")
|
|
||||||
.join("website_domains.json");
|
|
||||||
let data = if path.exists() {
|
|
||||||
std::fs::read_to_string(&path)
|
|
||||||
.ok()
|
|
||||||
.and_then(|s| serde_json::from_str(&s).ok())
|
|
||||||
.unwrap_or_default()
|
|
||||||
} else {
|
|
||||||
DomainData::default()
|
|
||||||
};
|
|
||||||
Self {
|
|
||||||
path,
|
|
||||||
data: Arc::new(RwLock::new(data)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn save(&self) {
|
|
||||||
let data = self.data.read();
|
|
||||||
if let Some(parent) = self.path.parent() {
|
|
||||||
let _ = std::fs::create_dir_all(parent);
|
|
||||||
}
|
|
||||||
if let Ok(json) = serde_json::to_string_pretty(&*data) {
|
|
||||||
let _ = std::fs::write(&self.path, json);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn list_all(&self) -> Vec<serde_json::Value> {
|
|
||||||
self.data
|
|
||||||
.read()
|
|
||||||
.mappings
|
|
||||||
.iter()
|
|
||||||
.map(|(domain, bucket)| {
|
|
||||||
serde_json::json!({
|
|
||||||
"domain": domain,
|
|
||||||
"bucket": bucket,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_bucket(&self, domain: &str) -> Option<String> {
|
|
||||||
self.data.read().mappings.get(domain).cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_mapping(&self, domain: &str, bucket: &str) {
|
|
||||||
self.data.write().mappings.insert(domain.to_string(), bucket.to_string());
|
|
||||||
self.save();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn delete_mapping(&self, domain: &str) -> bool {
|
|
||||||
let removed = self.data.write().mappings.remove(domain).is_some();
|
|
||||||
if removed {
|
|
||||||
self.save();
|
|
||||||
}
|
|
||||||
removed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn normalize_domain(domain: &str) -> String {
|
|
||||||
domain.trim().to_ascii_lowercase()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_valid_domain(domain: &str) -> bool {
|
|
||||||
if domain.is_empty() || domain.len() > 253 {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
let labels: Vec<&str> = domain.split('.').collect();
|
|
||||||
if labels.len() < 2 {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
for label in &labels {
|
|
||||||
if label.is_empty() || label.len() > 63 {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !label.chars().all(|c| c.is_ascii_alphanumeric() || c == '-') {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if label.starts_with('-') || label.ends_with('-') {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
@@ -1,121 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::config::ServerConfig;
|
|
||||||
use crate::services::gc::GcService;
|
|
||||||
use crate::services::integrity::IntegrityService;
|
|
||||||
use crate::services::metrics::MetricsService;
|
|
||||||
use crate::services::site_registry::SiteRegistry;
|
|
||||||
use crate::services::website_domains::WebsiteDomainStore;
|
|
||||||
use myfsio_auth::iam::IamService;
|
|
||||||
use myfsio_crypto::encryption::EncryptionService;
|
|
||||||
use myfsio_crypto::kms::KmsService;
|
|
||||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct AppState {
|
|
||||||
pub config: ServerConfig,
|
|
||||||
pub storage: Arc<FsStorageBackend>,
|
|
||||||
pub iam: Arc<IamService>,
|
|
||||||
pub encryption: Option<Arc<EncryptionService>>,
|
|
||||||
pub kms: Option<Arc<KmsService>>,
|
|
||||||
pub gc: Option<Arc<GcService>>,
|
|
||||||
pub integrity: Option<Arc<IntegrityService>>,
|
|
||||||
pub metrics: Option<Arc<MetricsService>>,
|
|
||||||
pub site_registry: Option<Arc<SiteRegistry>>,
|
|
||||||
pub website_domains: Option<Arc<WebsiteDomainStore>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AppState {
|
|
||||||
pub fn new(config: ServerConfig) -> Self {
|
|
||||||
let storage = Arc::new(FsStorageBackend::new(config.storage_root.clone()));
|
|
||||||
let iam = Arc::new(IamService::new_with_secret(
|
|
||||||
config.iam_config_path.clone(),
|
|
||||||
config.secret_key.clone(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let gc = if config.gc_enabled {
|
|
||||||
Some(Arc::new(GcService::new(
|
|
||||||
config.storage_root.clone(),
|
|
||||||
crate::services::gc::GcConfig::default(),
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let integrity = if config.integrity_enabled {
|
|
||||||
Some(Arc::new(IntegrityService::new(
|
|
||||||
storage.clone(),
|
|
||||||
&config.storage_root,
|
|
||||||
crate::services::integrity::IntegrityConfig::default(),
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let metrics = if config.metrics_enabled {
|
|
||||||
Some(Arc::new(MetricsService::new(
|
|
||||||
&config.storage_root,
|
|
||||||
crate::services::metrics::MetricsConfig::default(),
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let site_registry = Some(Arc::new(SiteRegistry::new(&config.storage_root)));
|
|
||||||
|
|
||||||
let website_domains = if config.website_hosting_enabled {
|
|
||||||
Some(Arc::new(WebsiteDomainStore::new(&config.storage_root)))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
Self {
|
|
||||||
config,
|
|
||||||
storage,
|
|
||||||
iam,
|
|
||||||
encryption: None,
|
|
||||||
kms: None,
|
|
||||||
gc,
|
|
||||||
integrity,
|
|
||||||
metrics,
|
|
||||||
site_registry,
|
|
||||||
website_domains,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn new_with_encryption(config: ServerConfig) -> Self {
|
|
||||||
let mut state = Self::new(config.clone());
|
|
||||||
|
|
||||||
let keys_dir = config.storage_root.join(".myfsio.sys").join("keys");
|
|
||||||
|
|
||||||
let kms = if config.kms_enabled {
|
|
||||||
match KmsService::new(&keys_dir).await {
|
|
||||||
Ok(k) => Some(Arc::new(k)),
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Failed to initialize KMS: {}", e);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let encryption = if config.encryption_enabled {
|
|
||||||
match myfsio_crypto::kms::load_or_create_master_key(&keys_dir).await {
|
|
||||||
Ok(master_key) => {
|
|
||||||
Some(Arc::new(EncryptionService::new(master_key, kms.clone())))
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Failed to initialize encryption: {}", e);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
state.encryption = encryption;
|
|
||||||
state.kms = kms;
|
|
||||||
state
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,26 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "myfsio-storage"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
myfsio-common = { path = "../myfsio-common" }
|
|
||||||
myfsio-crypto = { path = "../myfsio-crypto" }
|
|
||||||
serde = { workspace = true }
|
|
||||||
serde_json = { workspace = true }
|
|
||||||
tokio = { workspace = true }
|
|
||||||
dashmap = { workspace = true }
|
|
||||||
parking_lot = { workspace = true }
|
|
||||||
uuid = { workspace = true }
|
|
||||||
chrono = { workspace = true }
|
|
||||||
thiserror = { workspace = true }
|
|
||||||
tracing = { workspace = true }
|
|
||||||
regex = { workspace = true }
|
|
||||||
unicode-normalization = { workspace = true }
|
|
||||||
md-5 = { workspace = true }
|
|
||||||
sha2 = { workspace = true }
|
|
||||||
hex = { workspace = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
|
|
||||||
tempfile = "3"
|
|
||||||
@@ -1,59 +0,0 @@
|
|||||||
use myfsio_common::error::{S3Error, S3ErrorCode};
|
|
||||||
use thiserror::Error;
|
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
|
||||||
pub enum StorageError {
|
|
||||||
#[error("Bucket not found: {0}")]
|
|
||||||
BucketNotFound(String),
|
|
||||||
#[error("Bucket already exists: {0}")]
|
|
||||||
BucketAlreadyExists(String),
|
|
||||||
#[error("Bucket not empty: {0}")]
|
|
||||||
BucketNotEmpty(String),
|
|
||||||
#[error("Object not found: {bucket}/{key}")]
|
|
||||||
ObjectNotFound { bucket: String, key: String },
|
|
||||||
#[error("Invalid bucket name: {0}")]
|
|
||||||
InvalidBucketName(String),
|
|
||||||
#[error("Invalid object key: {0}")]
|
|
||||||
InvalidObjectKey(String),
|
|
||||||
#[error("Upload not found: {0}")]
|
|
||||||
UploadNotFound(String),
|
|
||||||
#[error("Quota exceeded: {0}")]
|
|
||||||
QuotaExceeded(String),
|
|
||||||
#[error("IO error: {0}")]
|
|
||||||
Io(#[from] std::io::Error),
|
|
||||||
#[error("JSON error: {0}")]
|
|
||||||
Json(#[from] serde_json::Error),
|
|
||||||
#[error("Internal error: {0}")]
|
|
||||||
Internal(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<StorageError> for S3Error {
|
|
||||||
fn from(err: StorageError) -> Self {
|
|
||||||
match err {
|
|
||||||
StorageError::BucketNotFound(name) => {
|
|
||||||
S3Error::from_code(S3ErrorCode::NoSuchBucket).with_resource(format!("/{}", name))
|
|
||||||
}
|
|
||||||
StorageError::BucketAlreadyExists(name) => {
|
|
||||||
S3Error::from_code(S3ErrorCode::BucketAlreadyExists)
|
|
||||||
.with_resource(format!("/{}", name))
|
|
||||||
}
|
|
||||||
StorageError::BucketNotEmpty(name) => {
|
|
||||||
S3Error::from_code(S3ErrorCode::BucketNotEmpty)
|
|
||||||
.with_resource(format!("/{}", name))
|
|
||||||
}
|
|
||||||
StorageError::ObjectNotFound { bucket, key } => {
|
|
||||||
S3Error::from_code(S3ErrorCode::NoSuchKey)
|
|
||||||
.with_resource(format!("/{}/{}", bucket, key))
|
|
||||||
}
|
|
||||||
StorageError::InvalidBucketName(msg) => S3Error::new(S3ErrorCode::InvalidBucketName, msg),
|
|
||||||
StorageError::InvalidObjectKey(msg) => S3Error::new(S3ErrorCode::InvalidKey, msg),
|
|
||||||
StorageError::UploadNotFound(id) => {
|
|
||||||
S3Error::new(S3ErrorCode::NoSuchUpload, format!("Upload {} not found", id))
|
|
||||||
}
|
|
||||||
StorageError::QuotaExceeded(msg) => S3Error::new(S3ErrorCode::QuotaExceeded, msg),
|
|
||||||
StorageError::Io(e) => S3Error::new(S3ErrorCode::InternalError, e.to_string()),
|
|
||||||
StorageError::Json(e) => S3Error::new(S3ErrorCode::InternalError, e.to_string()),
|
|
||||||
StorageError::Internal(msg) => S3Error::new(S3ErrorCode::InternalError, msg),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +0,0 @@
|
|||||||
pub mod validation;
|
|
||||||
pub mod traits;
|
|
||||||
pub mod error;
|
|
||||||
pub mod fs_backend;
|
|
||||||
@@ -1,125 +0,0 @@
|
|||||||
use crate::error::StorageError;
|
|
||||||
use myfsio_common::types::*;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::pin::Pin;
|
|
||||||
use tokio::io::AsyncRead;
|
|
||||||
|
|
||||||
pub type StorageResult<T> = Result<T, StorageError>;
|
|
||||||
pub type AsyncReadStream = Pin<Box<dyn AsyncRead + Send>>;
|
|
||||||
|
|
||||||
#[allow(async_fn_in_trait)]
|
|
||||||
pub trait StorageEngine: Send + Sync {
|
|
||||||
async fn list_buckets(&self) -> StorageResult<Vec<BucketMeta>>;
|
|
||||||
async fn create_bucket(&self, name: &str) -> StorageResult<()>;
|
|
||||||
async fn delete_bucket(&self, name: &str) -> StorageResult<()>;
|
|
||||||
async fn bucket_exists(&self, name: &str) -> StorageResult<bool>;
|
|
||||||
async fn bucket_stats(&self, name: &str) -> StorageResult<BucketStats>;
|
|
||||||
|
|
||||||
async fn put_object(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
key: &str,
|
|
||||||
stream: AsyncReadStream,
|
|
||||||
metadata: Option<HashMap<String, String>>,
|
|
||||||
) -> StorageResult<ObjectMeta>;
|
|
||||||
|
|
||||||
async fn get_object(&self, bucket: &str, key: &str) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
|
|
||||||
|
|
||||||
async fn get_object_path(&self, bucket: &str, key: &str) -> StorageResult<PathBuf>;
|
|
||||||
|
|
||||||
async fn head_object(&self, bucket: &str, key: &str) -> StorageResult<ObjectMeta>;
|
|
||||||
|
|
||||||
async fn delete_object(&self, bucket: &str, key: &str) -> StorageResult<()>;
|
|
||||||
|
|
||||||
async fn copy_object(
|
|
||||||
&self,
|
|
||||||
src_bucket: &str,
|
|
||||||
src_key: &str,
|
|
||||||
dst_bucket: &str,
|
|
||||||
dst_key: &str,
|
|
||||||
) -> StorageResult<ObjectMeta>;
|
|
||||||
|
|
||||||
async fn get_object_metadata(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
key: &str,
|
|
||||||
) -> StorageResult<HashMap<String, String>>;
|
|
||||||
|
|
||||||
async fn put_object_metadata(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
key: &str,
|
|
||||||
metadata: &HashMap<String, String>,
|
|
||||||
) -> StorageResult<()>;
|
|
||||||
|
|
||||||
async fn list_objects(&self, bucket: &str, params: &ListParams) -> StorageResult<ListObjectsResult>;
|
|
||||||
|
|
||||||
async fn list_objects_shallow(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
params: &ShallowListParams,
|
|
||||||
) -> StorageResult<ShallowListResult>;
|
|
||||||
|
|
||||||
async fn initiate_multipart(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
key: &str,
|
|
||||||
metadata: Option<HashMap<String, String>>,
|
|
||||||
) -> StorageResult<String>;
|
|
||||||
|
|
||||||
async fn upload_part(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
upload_id: &str,
|
|
||||||
part_number: u32,
|
|
||||||
stream: AsyncReadStream,
|
|
||||||
) -> StorageResult<String>;
|
|
||||||
|
|
||||||
async fn complete_multipart(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
upload_id: &str,
|
|
||||||
parts: &[PartInfo],
|
|
||||||
) -> StorageResult<ObjectMeta>;
|
|
||||||
|
|
||||||
async fn abort_multipart(&self, bucket: &str, upload_id: &str) -> StorageResult<()>;
|
|
||||||
|
|
||||||
async fn list_parts(&self, bucket: &str, upload_id: &str) -> StorageResult<Vec<PartMeta>>;
|
|
||||||
|
|
||||||
async fn list_multipart_uploads(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
) -> StorageResult<Vec<MultipartUploadInfo>>;
|
|
||||||
|
|
||||||
async fn get_bucket_config(&self, bucket: &str) -> StorageResult<BucketConfig>;
|
|
||||||
async fn set_bucket_config(&self, bucket: &str, config: &BucketConfig) -> StorageResult<()>;
|
|
||||||
|
|
||||||
async fn is_versioning_enabled(&self, bucket: &str) -> StorageResult<bool>;
|
|
||||||
async fn set_versioning(&self, bucket: &str, enabled: bool) -> StorageResult<()>;
|
|
||||||
|
|
||||||
async fn list_object_versions(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
key: &str,
|
|
||||||
) -> StorageResult<Vec<VersionInfo>>;
|
|
||||||
|
|
||||||
async fn get_object_tags(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
key: &str,
|
|
||||||
) -> StorageResult<Vec<Tag>>;
|
|
||||||
|
|
||||||
async fn set_object_tags(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
key: &str,
|
|
||||||
tags: &[Tag],
|
|
||||||
) -> StorageResult<()>;
|
|
||||||
|
|
||||||
async fn delete_object_tags(
|
|
||||||
&self,
|
|
||||||
bucket: &str,
|
|
||||||
key: &str,
|
|
||||||
) -> StorageResult<()>;
|
|
||||||
}
|
|
||||||
@@ -1,194 +0,0 @@
|
|||||||
use std::sync::LazyLock;
|
|
||||||
use unicode_normalization::UnicodeNormalization;
|
|
||||||
|
|
||||||
const WINDOWS_RESERVED: &[&str] = &[
|
|
||||||
"CON", "PRN", "AUX", "NUL", "COM0", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7",
|
|
||||||
"COM8", "COM9", "LPT0", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8",
|
|
||||||
"LPT9",
|
|
||||||
];
|
|
||||||
|
|
||||||
const WINDOWS_ILLEGAL_CHARS: &[char] = &['<', '>', ':', '"', '/', '\\', '|', '?', '*'];
|
|
||||||
|
|
||||||
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
|
||||||
const SYSTEM_ROOT: &str = ".myfsio.sys";
|
|
||||||
|
|
||||||
static IP_REGEX: LazyLock<regex::Regex> =
|
|
||||||
LazyLock::new(|| regex::Regex::new(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$").unwrap());
|
|
||||||
|
|
||||||
pub fn validate_object_key(
|
|
||||||
object_key: &str,
|
|
||||||
max_length_bytes: usize,
|
|
||||||
is_windows: bool,
|
|
||||||
reserved_prefixes: Option<&[&str]>,
|
|
||||||
) -> Option<String> {
|
|
||||||
if object_key.is_empty() {
|
|
||||||
return Some("Object key required".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if object_key.contains('\0') {
|
|
||||||
return Some("Object key contains null bytes".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
let normalized: String = object_key.nfc().collect();
|
|
||||||
|
|
||||||
if normalized.len() > max_length_bytes {
|
|
||||||
return Some(format!(
|
|
||||||
"Object key exceeds maximum length of {} bytes",
|
|
||||||
max_length_bytes
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if normalized.starts_with('/') || normalized.starts_with('\\') {
|
|
||||||
return Some("Object key cannot start with a slash".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
let parts: Vec<&str> = if cfg!(windows) || is_windows {
|
|
||||||
normalized.split(['/', '\\']).collect()
|
|
||||||
} else {
|
|
||||||
normalized.split('/').collect()
|
|
||||||
};
|
|
||||||
|
|
||||||
for part in &parts {
|
|
||||||
if part.is_empty() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if *part == ".." {
|
|
||||||
return Some("Object key contains parent directory references".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if *part == "." {
|
|
||||||
return Some("Object key contains invalid segments".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if part.chars().any(|c| (c as u32) < 32) {
|
|
||||||
return Some("Object key contains control characters".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if is_windows {
|
|
||||||
if part.chars().any(|c| WINDOWS_ILLEGAL_CHARS.contains(&c)) {
|
|
||||||
return Some(
|
|
||||||
"Object key contains characters not supported on Windows filesystems"
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if part.ends_with(' ') || part.ends_with('.') {
|
|
||||||
return Some(
|
|
||||||
"Object key segments cannot end with spaces or periods on Windows".to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
let trimmed = part.trim_end_matches(['.', ' ']).to_uppercase();
|
|
||||||
if WINDOWS_RESERVED.contains(&trimmed.as_str()) {
|
|
||||||
return Some(format!("Invalid filename segment: {}", part));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let non_empty_parts: Vec<&str> = parts.iter().filter(|p| !p.is_empty()).copied().collect();
|
|
||||||
if let Some(top) = non_empty_parts.first() {
|
|
||||||
if INTERNAL_FOLDERS.contains(top) || *top == SYSTEM_ROOT {
|
|
||||||
return Some("Object key uses a reserved prefix".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(prefixes) = reserved_prefixes {
|
|
||||||
for prefix in prefixes {
|
|
||||||
if *top == *prefix {
|
|
||||||
return Some("Object key uses a reserved prefix".to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn validate_bucket_name(bucket_name: &str) -> Option<String> {
|
|
||||||
let len = bucket_name.len();
|
|
||||||
if len < 3 || len > 63 {
|
|
||||||
return Some("Bucket name must be between 3 and 63 characters".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
let bytes = bucket_name.as_bytes();
|
|
||||||
if !bytes[0].is_ascii_lowercase() && !bytes[0].is_ascii_digit() {
|
|
||||||
return Some(
|
|
||||||
"Bucket name must start and end with a lowercase letter or digit".to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if !bytes[len - 1].is_ascii_lowercase() && !bytes[len - 1].is_ascii_digit() {
|
|
||||||
return Some(
|
|
||||||
"Bucket name must start and end with a lowercase letter or digit".to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
for &b in bytes {
|
|
||||||
if !b.is_ascii_lowercase() && !b.is_ascii_digit() && b != b'.' && b != b'-' {
|
|
||||||
return Some(
|
|
||||||
"Bucket name can only contain lowercase letters, digits, dots, and hyphens"
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if bucket_name.contains("..") {
|
|
||||||
return Some("Bucket name must not contain consecutive periods".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if IP_REGEX.is_match(bucket_name) {
|
|
||||||
return Some("Bucket name must not be formatted as an IP address".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_valid_bucket_names() {
|
|
||||||
assert!(validate_bucket_name("my-bucket").is_none());
|
|
||||||
assert!(validate_bucket_name("test123").is_none());
|
|
||||||
assert!(validate_bucket_name("my.bucket.name").is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_invalid_bucket_names() {
|
|
||||||
assert!(validate_bucket_name("ab").is_some());
|
|
||||||
assert!(validate_bucket_name("My-Bucket").is_some());
|
|
||||||
assert!(validate_bucket_name("-bucket").is_some());
|
|
||||||
assert!(validate_bucket_name("bucket-").is_some());
|
|
||||||
assert!(validate_bucket_name("my..bucket").is_some());
|
|
||||||
assert!(validate_bucket_name("192.168.1.1").is_some());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_valid_object_keys() {
|
|
||||||
assert!(validate_object_key("file.txt", 1024, false, None).is_none());
|
|
||||||
assert!(validate_object_key("path/to/file.txt", 1024, false, None).is_none());
|
|
||||||
assert!(validate_object_key("a", 1024, false, None).is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_invalid_object_keys() {
|
|
||||||
assert!(validate_object_key("", 1024, false, None).is_some());
|
|
||||||
assert!(validate_object_key("/leading-slash", 1024, false, None).is_some());
|
|
||||||
assert!(validate_object_key("path/../escape", 1024, false, None).is_some());
|
|
||||||
assert!(validate_object_key(".myfsio.sys/secret", 1024, false, None).is_some());
|
|
||||||
assert!(validate_object_key(".meta/data", 1024, false, None).is_some());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_object_key_max_length() {
|
|
||||||
let long_key = "a".repeat(1025);
|
|
||||||
assert!(validate_object_key(&long_key, 1024, false, None).is_some());
|
|
||||||
let ok_key = "a".repeat(1024);
|
|
||||||
assert!(validate_object_key(&ok_key, 1024, false, None).is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_windows_validation() {
|
|
||||||
assert!(validate_object_key("CON", 1024, true, None).is_some());
|
|
||||||
assert!(validate_object_key("file<name", 1024, true, None).is_some());
|
|
||||||
assert!(validate_object_key("file.txt ", 1024, true, None).is_some());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "myfsio-xml"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
myfsio-common = { path = "../myfsio-common" }
|
|
||||||
quick-xml = { workspace = true }
|
|
||||||
serde = { workspace = true }
|
|
||||||
chrono = { workspace = true }
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
pub mod response;
|
|
||||||
pub mod request;
|
|
||||||
|
|
||||||
use quick_xml::Writer;
|
|
||||||
use std::io::Cursor;
|
|
||||||
|
|
||||||
pub fn write_xml_element(tag: &str, text: &str) -> String {
|
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
|
||||||
writer
|
|
||||||
.create_element(tag)
|
|
||||||
.write_text_content(quick_xml::events::BytesText::new(text))
|
|
||||||
.unwrap();
|
|
||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
|
||||||
}
|
|
||||||
@@ -1,159 +0,0 @@
|
|||||||
use quick_xml::events::Event;
|
|
||||||
use quick_xml::Reader;
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct DeleteObjectsRequest {
|
|
||||||
pub objects: Vec<ObjectIdentifier>,
|
|
||||||
pub quiet: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ObjectIdentifier {
|
|
||||||
pub key: String,
|
|
||||||
pub version_id: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct CompleteMultipartUpload {
|
|
||||||
pub parts: Vec<CompletedPart>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct CompletedPart {
|
|
||||||
pub part_number: u32,
|
|
||||||
pub etag: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn parse_complete_multipart_upload(xml: &str) -> Result<CompleteMultipartUpload, String> {
|
|
||||||
let mut reader = Reader::from_str(xml);
|
|
||||||
let mut result = CompleteMultipartUpload::default();
|
|
||||||
let mut buf = Vec::new();
|
|
||||||
let mut current_tag = String::new();
|
|
||||||
let mut part_number: Option<u32> = None;
|
|
||||||
let mut etag: Option<String> = None;
|
|
||||||
let mut in_part = false;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
match reader.read_event_into(&mut buf) {
|
|
||||||
Ok(Event::Start(ref e)) => {
|
|
||||||
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
|
|
||||||
current_tag = name.clone();
|
|
||||||
if name == "Part" {
|
|
||||||
in_part = true;
|
|
||||||
part_number = None;
|
|
||||||
etag = None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Event::Text(ref e)) => {
|
|
||||||
if in_part {
|
|
||||||
let text = e.unescape().map_err(|e| e.to_string())?.to_string();
|
|
||||||
match current_tag.as_str() {
|
|
||||||
"PartNumber" => {
|
|
||||||
part_number = Some(text.trim().parse().map_err(|e: std::num::ParseIntError| e.to_string())?);
|
|
||||||
}
|
|
||||||
"ETag" => {
|
|
||||||
etag = Some(text.trim().trim_matches('"').to_string());
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Event::End(ref e)) => {
|
|
||||||
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
|
|
||||||
if name == "Part" && in_part {
|
|
||||||
if let (Some(pn), Some(et)) = (part_number.take(), etag.take()) {
|
|
||||||
result.parts.push(CompletedPart {
|
|
||||||
part_number: pn,
|
|
||||||
etag: et,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
in_part = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Event::Eof) => break,
|
|
||||||
Err(e) => return Err(format!("XML parse error: {}", e)),
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
buf.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
result.parts.sort_by_key(|p| p.part_number);
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn parse_delete_objects(xml: &str) -> Result<DeleteObjectsRequest, String> {
|
|
||||||
let mut reader = Reader::from_str(xml);
|
|
||||||
let mut result = DeleteObjectsRequest::default();
|
|
||||||
let mut buf = Vec::new();
|
|
||||||
let mut current_tag = String::new();
|
|
||||||
let mut current_key: Option<String> = None;
|
|
||||||
let mut current_version_id: Option<String> = None;
|
|
||||||
let mut in_object = false;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
match reader.read_event_into(&mut buf) {
|
|
||||||
Ok(Event::Start(ref e)) => {
|
|
||||||
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
|
|
||||||
current_tag = name.clone();
|
|
||||||
if name == "Object" {
|
|
||||||
in_object = true;
|
|
||||||
current_key = None;
|
|
||||||
current_version_id = None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Event::Text(ref e)) => {
|
|
||||||
let text = e.unescape().map_err(|e| e.to_string())?.to_string();
|
|
||||||
match current_tag.as_str() {
|
|
||||||
"Key" if in_object => {
|
|
||||||
current_key = Some(text.trim().to_string());
|
|
||||||
}
|
|
||||||
"VersionId" if in_object => {
|
|
||||||
current_version_id = Some(text.trim().to_string());
|
|
||||||
}
|
|
||||||
"Quiet" => {
|
|
||||||
result.quiet = text.trim() == "true";
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Event::End(ref e)) => {
|
|
||||||
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
|
|
||||||
if name == "Object" && in_object {
|
|
||||||
if let Some(key) = current_key.take() {
|
|
||||||
result.objects.push(ObjectIdentifier {
|
|
||||||
key,
|
|
||||||
version_id: current_version_id.take(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
in_object = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Event::Eof) => break,
|
|
||||||
Err(e) => return Err(format!("XML parse error: {}", e)),
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
buf.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_complete_multipart() {
|
|
||||||
let xml = r#"<CompleteMultipartUpload>
|
|
||||||
<Part><PartNumber>2</PartNumber><ETag>"etag2"</ETag></Part>
|
|
||||||
<Part><PartNumber>1</PartNumber><ETag>"etag1"</ETag></Part>
|
|
||||||
</CompleteMultipartUpload>"#;
|
|
||||||
|
|
||||||
let result = parse_complete_multipart_upload(xml).unwrap();
|
|
||||||
assert_eq!(result.parts.len(), 2);
|
|
||||||
assert_eq!(result.parts[0].part_number, 1);
|
|
||||||
assert_eq!(result.parts[0].etag, "etag1");
|
|
||||||
assert_eq!(result.parts[1].part_number, 2);
|
|
||||||
assert_eq!(result.parts[1].etag, "etag2");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,363 +0,0 @@
|
|||||||
use chrono::{DateTime, Utc};
|
|
||||||
use myfsio_common::types::{BucketMeta, ObjectMeta};
|
|
||||||
use quick_xml::events::{BytesDecl, BytesEnd, BytesStart, BytesText, Event};
|
|
||||||
use quick_xml::Writer;
|
|
||||||
use std::io::Cursor;
|
|
||||||
|
|
||||||
pub fn format_s3_datetime(dt: &DateTime<Utc>) -> String {
|
|
||||||
dt.format("%Y-%m-%dT%H:%M:%S%.3fZ").to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn list_buckets_xml(owner_id: &str, owner_name: &str, buckets: &[BucketMeta]) -> String {
|
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
|
||||||
|
|
||||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
|
||||||
|
|
||||||
let start = BytesStart::new("ListAllMyBucketsResult")
|
|
||||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
|
||||||
writer.write_event(Event::Start(start)).unwrap();
|
|
||||||
|
|
||||||
writer.write_event(Event::Start(BytesStart::new("Owner"))).unwrap();
|
|
||||||
write_text_element(&mut writer, "ID", owner_id);
|
|
||||||
write_text_element(&mut writer, "DisplayName", owner_name);
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("Owner"))).unwrap();
|
|
||||||
|
|
||||||
writer.write_event(Event::Start(BytesStart::new("Buckets"))).unwrap();
|
|
||||||
for bucket in buckets {
|
|
||||||
writer.write_event(Event::Start(BytesStart::new("Bucket"))).unwrap();
|
|
||||||
write_text_element(&mut writer, "Name", &bucket.name);
|
|
||||||
write_text_element(&mut writer, "CreationDate", &format_s3_datetime(&bucket.creation_date));
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("Bucket"))).unwrap();
|
|
||||||
}
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("Buckets"))).unwrap();
|
|
||||||
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("ListAllMyBucketsResult"))).unwrap();
|
|
||||||
|
|
||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn list_objects_v2_xml(
|
|
||||||
bucket_name: &str,
|
|
||||||
prefix: &str,
|
|
||||||
delimiter: &str,
|
|
||||||
max_keys: usize,
|
|
||||||
objects: &[ObjectMeta],
|
|
||||||
common_prefixes: &[String],
|
|
||||||
is_truncated: bool,
|
|
||||||
continuation_token: Option<&str>,
|
|
||||||
next_continuation_token: Option<&str>,
|
|
||||||
key_count: usize,
|
|
||||||
) -> String {
|
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
|
||||||
|
|
||||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
|
||||||
|
|
||||||
let start = BytesStart::new("ListBucketResult")
|
|
||||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
|
||||||
writer.write_event(Event::Start(start)).unwrap();
|
|
||||||
|
|
||||||
write_text_element(&mut writer, "Name", bucket_name);
|
|
||||||
write_text_element(&mut writer, "Prefix", prefix);
|
|
||||||
if !delimiter.is_empty() {
|
|
||||||
write_text_element(&mut writer, "Delimiter", delimiter);
|
|
||||||
}
|
|
||||||
write_text_element(&mut writer, "MaxKeys", &max_keys.to_string());
|
|
||||||
write_text_element(&mut writer, "KeyCount", &key_count.to_string());
|
|
||||||
write_text_element(&mut writer, "IsTruncated", &is_truncated.to_string());
|
|
||||||
|
|
||||||
if let Some(token) = continuation_token {
|
|
||||||
write_text_element(&mut writer, "ContinuationToken", token);
|
|
||||||
}
|
|
||||||
if let Some(token) = next_continuation_token {
|
|
||||||
write_text_element(&mut writer, "NextContinuationToken", token);
|
|
||||||
}
|
|
||||||
|
|
||||||
for obj in objects {
|
|
||||||
writer.write_event(Event::Start(BytesStart::new("Contents"))).unwrap();
|
|
||||||
write_text_element(&mut writer, "Key", &obj.key);
|
|
||||||
write_text_element(&mut writer, "LastModified", &format_s3_datetime(&obj.last_modified));
|
|
||||||
if let Some(ref etag) = obj.etag {
|
|
||||||
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
|
|
||||||
}
|
|
||||||
write_text_element(&mut writer, "Size", &obj.size.to_string());
|
|
||||||
write_text_element(&mut writer, "StorageClass", obj.storage_class.as_deref().unwrap_or("STANDARD"));
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("Contents"))).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
for prefix in common_prefixes {
|
|
||||||
writer.write_event(Event::Start(BytesStart::new("CommonPrefixes"))).unwrap();
|
|
||||||
write_text_element(&mut writer, "Prefix", prefix);
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("CommonPrefixes"))).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("ListBucketResult"))).unwrap();
|
|
||||||
|
|
||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn list_objects_v1_xml(
|
|
||||||
bucket_name: &str,
|
|
||||||
prefix: &str,
|
|
||||||
marker: &str,
|
|
||||||
delimiter: &str,
|
|
||||||
max_keys: usize,
|
|
||||||
objects: &[ObjectMeta],
|
|
||||||
common_prefixes: &[String],
|
|
||||||
is_truncated: bool,
|
|
||||||
next_marker: Option<&str>,
|
|
||||||
) -> String {
|
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
|
||||||
|
|
||||||
writer
|
|
||||||
.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None)))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let start = BytesStart::new("ListBucketResult")
|
|
||||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
|
||||||
writer.write_event(Event::Start(start)).unwrap();
|
|
||||||
|
|
||||||
write_text_element(&mut writer, "Name", bucket_name);
|
|
||||||
write_text_element(&mut writer, "Prefix", prefix);
|
|
||||||
write_text_element(&mut writer, "Marker", marker);
|
|
||||||
write_text_element(&mut writer, "MaxKeys", &max_keys.to_string());
|
|
||||||
write_text_element(&mut writer, "IsTruncated", &is_truncated.to_string());
|
|
||||||
|
|
||||||
if !delimiter.is_empty() {
|
|
||||||
write_text_element(&mut writer, "Delimiter", delimiter);
|
|
||||||
}
|
|
||||||
if !delimiter.is_empty() && is_truncated {
|
|
||||||
if let Some(nm) = next_marker {
|
|
||||||
if !nm.is_empty() {
|
|
||||||
write_text_element(&mut writer, "NextMarker", nm);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for obj in objects {
|
|
||||||
writer
|
|
||||||
.write_event(Event::Start(BytesStart::new("Contents")))
|
|
||||||
.unwrap();
|
|
||||||
write_text_element(&mut writer, "Key", &obj.key);
|
|
||||||
write_text_element(&mut writer, "LastModified", &format_s3_datetime(&obj.last_modified));
|
|
||||||
if let Some(ref etag) = obj.etag {
|
|
||||||
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
|
|
||||||
}
|
|
||||||
write_text_element(&mut writer, "Size", &obj.size.to_string());
|
|
||||||
writer
|
|
||||||
.write_event(Event::End(BytesEnd::new("Contents")))
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
for cp in common_prefixes {
|
|
||||||
writer
|
|
||||||
.write_event(Event::Start(BytesStart::new("CommonPrefixes")))
|
|
||||||
.unwrap();
|
|
||||||
write_text_element(&mut writer, "Prefix", cp);
|
|
||||||
writer
|
|
||||||
.write_event(Event::End(BytesEnd::new("CommonPrefixes")))
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
writer
|
|
||||||
.write_event(Event::End(BytesEnd::new("ListBucketResult")))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_text_element(writer: &mut Writer<Cursor<Vec<u8>>>, tag: &str, text: &str) {
|
|
||||||
writer.write_event(Event::Start(BytesStart::new(tag))).unwrap();
|
|
||||||
writer.write_event(Event::Text(BytesText::new(text))).unwrap();
|
|
||||||
writer.write_event(Event::End(BytesEnd::new(tag))).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn initiate_multipart_upload_xml(bucket: &str, key: &str, upload_id: &str) -> String {
|
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
|
||||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
|
||||||
|
|
||||||
let start = BytesStart::new("InitiateMultipartUploadResult")
|
|
||||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
|
||||||
writer.write_event(Event::Start(start)).unwrap();
|
|
||||||
write_text_element(&mut writer, "Bucket", bucket);
|
|
||||||
write_text_element(&mut writer, "Key", key);
|
|
||||||
write_text_element(&mut writer, "UploadId", upload_id);
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("InitiateMultipartUploadResult"))).unwrap();
|
|
||||||
|
|
||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn complete_multipart_upload_xml(
|
|
||||||
bucket: &str,
|
|
||||||
key: &str,
|
|
||||||
etag: &str,
|
|
||||||
location: &str,
|
|
||||||
) -> String {
|
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
|
||||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
|
||||||
|
|
||||||
let start = BytesStart::new("CompleteMultipartUploadResult")
|
|
||||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
|
||||||
writer.write_event(Event::Start(start)).unwrap();
|
|
||||||
write_text_element(&mut writer, "Location", location);
|
|
||||||
write_text_element(&mut writer, "Bucket", bucket);
|
|
||||||
write_text_element(&mut writer, "Key", key);
|
|
||||||
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("CompleteMultipartUploadResult"))).unwrap();
|
|
||||||
|
|
||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn copy_object_result_xml(etag: &str, last_modified: &str) -> String {
|
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
|
||||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
|
||||||
|
|
||||||
let start = BytesStart::new("CopyObjectResult")
|
|
||||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
|
||||||
writer.write_event(Event::Start(start)).unwrap();
|
|
||||||
write_text_element(&mut writer, "ETag", &format!("\"{}\"", etag));
|
|
||||||
write_text_element(&mut writer, "LastModified", last_modified);
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("CopyObjectResult"))).unwrap();
|
|
||||||
|
|
||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn delete_result_xml(
|
|
||||||
deleted: &[(String, Option<String>)],
|
|
||||||
errors: &[(String, String, String)],
|
|
||||||
quiet: bool,
|
|
||||||
) -> String {
|
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
|
||||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
|
||||||
|
|
||||||
let start = BytesStart::new("DeleteResult")
|
|
||||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
|
||||||
writer.write_event(Event::Start(start)).unwrap();
|
|
||||||
|
|
||||||
if !quiet {
|
|
||||||
for (key, version_id) in deleted {
|
|
||||||
writer.write_event(Event::Start(BytesStart::new("Deleted"))).unwrap();
|
|
||||||
write_text_element(&mut writer, "Key", key);
|
|
||||||
if let Some(vid) = version_id {
|
|
||||||
write_text_element(&mut writer, "VersionId", vid);
|
|
||||||
}
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("Deleted"))).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (key, code, message) in errors {
|
|
||||||
writer.write_event(Event::Start(BytesStart::new("Error"))).unwrap();
|
|
||||||
write_text_element(&mut writer, "Key", key);
|
|
||||||
write_text_element(&mut writer, "Code", code);
|
|
||||||
write_text_element(&mut writer, "Message", message);
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("Error"))).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("DeleteResult"))).unwrap();
|
|
||||||
|
|
||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn list_multipart_uploads_xml(
|
|
||||||
bucket: &str,
|
|
||||||
uploads: &[myfsio_common::types::MultipartUploadInfo],
|
|
||||||
) -> String {
|
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
|
||||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
|
||||||
|
|
||||||
let start = BytesStart::new("ListMultipartUploadsResult")
|
|
||||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
|
||||||
writer.write_event(Event::Start(start)).unwrap();
|
|
||||||
write_text_element(&mut writer, "Bucket", bucket);
|
|
||||||
|
|
||||||
for upload in uploads {
|
|
||||||
writer.write_event(Event::Start(BytesStart::new("Upload"))).unwrap();
|
|
||||||
write_text_element(&mut writer, "Key", &upload.key);
|
|
||||||
write_text_element(&mut writer, "UploadId", &upload.upload_id);
|
|
||||||
write_text_element(&mut writer, "Initiated", &format_s3_datetime(&upload.initiated));
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("Upload"))).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("ListMultipartUploadsResult"))).unwrap();
|
|
||||||
|
|
||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn list_parts_xml(
|
|
||||||
bucket: &str,
|
|
||||||
key: &str,
|
|
||||||
upload_id: &str,
|
|
||||||
parts: &[myfsio_common::types::PartMeta],
|
|
||||||
) -> String {
|
|
||||||
let mut writer = Writer::new(Cursor::new(Vec::new()));
|
|
||||||
writer.write_event(Event::Decl(BytesDecl::new("1.0", Some("UTF-8"), None))).unwrap();
|
|
||||||
|
|
||||||
let start = BytesStart::new("ListPartsResult")
|
|
||||||
.with_attributes([("xmlns", "http://s3.amazonaws.com/doc/2006-03-01/")]);
|
|
||||||
writer.write_event(Event::Start(start)).unwrap();
|
|
||||||
write_text_element(&mut writer, "Bucket", bucket);
|
|
||||||
write_text_element(&mut writer, "Key", key);
|
|
||||||
write_text_element(&mut writer, "UploadId", upload_id);
|
|
||||||
|
|
||||||
for part in parts {
|
|
||||||
writer.write_event(Event::Start(BytesStart::new("Part"))).unwrap();
|
|
||||||
write_text_element(&mut writer, "PartNumber", &part.part_number.to_string());
|
|
||||||
write_text_element(&mut writer, "ETag", &format!("\"{}\"", part.etag));
|
|
||||||
write_text_element(&mut writer, "Size", &part.size.to_string());
|
|
||||||
if let Some(ref lm) = part.last_modified {
|
|
||||||
write_text_element(&mut writer, "LastModified", &format_s3_datetime(lm));
|
|
||||||
}
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("Part"))).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
writer.write_event(Event::End(BytesEnd::new("ListPartsResult"))).unwrap();
|
|
||||||
|
|
||||||
String::from_utf8(writer.into_inner().into_inner()).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use chrono::Utc;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_list_buckets_xml() {
|
|
||||||
let buckets = vec![BucketMeta {
|
|
||||||
name: "test-bucket".to_string(),
|
|
||||||
creation_date: Utc::now(),
|
|
||||||
}];
|
|
||||||
let xml = list_buckets_xml("owner-id", "owner-name", &buckets);
|
|
||||||
assert!(xml.contains("<Name>test-bucket</Name>"));
|
|
||||||
assert!(xml.contains("<ID>owner-id</ID>"));
|
|
||||||
assert!(xml.contains("ListAllMyBucketsResult"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_list_objects_v2_xml() {
|
|
||||||
let objects = vec![ObjectMeta::new("file.txt".to_string(), 1024, Utc::now())];
|
|
||||||
let xml = list_objects_v2_xml(
|
|
||||||
"my-bucket", "", "/", 1000, &objects, &[], false, None, None, 1,
|
|
||||||
);
|
|
||||||
assert!(xml.contains("<Key>file.txt</Key>"));
|
|
||||||
assert!(xml.contains("<Size>1024</Size>"));
|
|
||||||
assert!(xml.contains("<IsTruncated>false</IsTruncated>"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_list_objects_v1_xml() {
|
|
||||||
let objects = vec![ObjectMeta::new("file.txt".to_string(), 1024, Utc::now())];
|
|
||||||
let xml = list_objects_v1_xml(
|
|
||||||
"my-bucket",
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
"/",
|
|
||||||
1000,
|
|
||||||
&objects,
|
|
||||||
&[],
|
|
||||||
false,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
assert!(xml.contains("<Key>file.txt</Key>"));
|
|
||||||
assert!(xml.contains("<Size>1024</Size>"));
|
|
||||||
assert!(xml.contains("<Marker></Marker>"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -125,7 +125,7 @@ pub fn delete_index_entry(py: Python<'_>, path: &str, entry_name: &str) -> PyRes
|
|||||||
fs::write(&path_owned, serialized)
|
fs::write(&path_owned, serialized)
|
||||||
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
|
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
|
||||||
|
|
||||||
Ok(true)
|
Ok(false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
57
run.py
57
run.py
@@ -5,7 +5,6 @@ import argparse
|
|||||||
import atexit
|
import atexit
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
import subprocess
|
|
||||||
import sys
|
import sys
|
||||||
import warnings
|
import warnings
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
@@ -75,49 +74,6 @@ def _serve_granian(target: str, port: int, config: Optional[AppConfig] = None) -
|
|||||||
server.serve()
|
server.serve()
|
||||||
|
|
||||||
|
|
||||||
def _find_rust_binary() -> Optional[Path]:
|
|
||||||
candidates = [
|
|
||||||
Path("/usr/local/bin/myfsio-server"),
|
|
||||||
Path(__file__).parent / "myfsio-engine" / "target" / "release" / "myfsio-server.exe",
|
|
||||||
Path(__file__).parent / "myfsio-engine" / "target" / "release" / "myfsio-server",
|
|
||||||
Path(__file__).parent / "myfsio-engine" / "target" / "debug" / "myfsio-server.exe",
|
|
||||||
Path(__file__).parent / "myfsio-engine" / "target" / "debug" / "myfsio-server",
|
|
||||||
]
|
|
||||||
for p in candidates:
|
|
||||||
if p.exists():
|
|
||||||
return p
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def serve_rust_api(port: int, config: Optional[AppConfig] = None) -> None:
|
|
||||||
binary = _find_rust_binary()
|
|
||||||
if binary is None:
|
|
||||||
print("ERROR: Rust engine binary not found. Build it first:")
|
|
||||||
print(" cd myfsio-engine && cargo build --release")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
env = os.environ.copy()
|
|
||||||
env["PORT"] = str(port)
|
|
||||||
env["HOST"] = _server_host()
|
|
||||||
if config:
|
|
||||||
env["STORAGE_ROOT"] = str(config.storage_root)
|
|
||||||
env["AWS_REGION"] = config.aws_region
|
|
||||||
if config.secret_key:
|
|
||||||
env["SECRET_KEY"] = config.secret_key
|
|
||||||
env.setdefault("ENCRYPTION_ENABLED", str(config.encryption_enabled).lower())
|
|
||||||
env.setdefault("KMS_ENABLED", str(config.kms_enabled).lower())
|
|
||||||
env.setdefault("LIFECYCLE_ENABLED", str(config.lifecycle_enabled).lower())
|
|
||||||
env.setdefault("RUST_LOG", "info")
|
|
||||||
|
|
||||||
print(f"Starting Rust S3 engine: {binary}")
|
|
||||||
proc = subprocess.Popen([str(binary)], env=env)
|
|
||||||
try:
|
|
||||||
proc.wait()
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
proc.terminate()
|
|
||||||
proc.wait(timeout=5)
|
|
||||||
|
|
||||||
|
|
||||||
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||||
if prod:
|
if prod:
|
||||||
_serve_granian("app:create_api_app", port, config)
|
_serve_granian("app:create_api_app", port, config)
|
||||||
@@ -271,7 +227,6 @@ if __name__ == "__main__":
|
|||||||
parser.add_argument("--ui-port", type=int, default=5100)
|
parser.add_argument("--ui-port", type=int, default=5100)
|
||||||
parser.add_argument("--prod", action="store_true", help="Run in production mode using Granian")
|
parser.add_argument("--prod", action="store_true", help="Run in production mode using Granian")
|
||||||
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
|
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
|
||||||
parser.add_argument("--engine", choices=["python", "rust"], default=os.getenv("ENGINE", "python"), help="API engine: python (Flask) or rust (myfsio-engine)")
|
|
||||||
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
|
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
|
||||||
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
|
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
|
||||||
parser.add_argument("--reset-cred", action="store_true", help="Reset admin credentials and exit")
|
parser.add_argument("--reset-cred", action="store_true", help="Reset admin credentials and exit")
|
||||||
@@ -325,17 +280,9 @@ if __name__ == "__main__":
|
|||||||
else:
|
else:
|
||||||
print("Running in development mode (Flask dev server)")
|
print("Running in development mode (Flask dev server)")
|
||||||
|
|
||||||
use_rust = args.engine == "rust"
|
|
||||||
|
|
||||||
if args.mode in {"api", "both"}:
|
if args.mode in {"api", "both"}:
|
||||||
if use_rust:
|
print(f"Starting API server on port {args.api_port}...")
|
||||||
print(f"Starting Rust API engine on port {args.api_port}...")
|
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config))
|
||||||
else:
|
|
||||||
print(f"Starting API server on port {args.api_port}...")
|
|
||||||
if use_rust:
|
|
||||||
api_proc = Process(target=serve_rust_api, args=(args.api_port, config))
|
|
||||||
else:
|
|
||||||
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config))
|
|
||||||
api_proc.start()
|
api_proc.start()
|
||||||
else:
|
else:
|
||||||
api_proc = None
|
api_proc = None
|
||||||
|
|||||||
Reference in New Issue
Block a user