Full migration and transition to Rust; Remove python artifacts

This commit is contained in:
2026-04-22 17:19:19 +08:00
parent 51d54b42ac
commit 217af6d1c6
204 changed files with 30 additions and 54451 deletions

View File

@@ -0,0 +1,564 @@
use std::net::SocketAddr;
use std::path::PathBuf;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct RateLimitSetting {
pub max_requests: u32,
pub window_seconds: u64,
}
impl RateLimitSetting {
pub const fn new(max_requests: u32, window_seconds: u64) -> Self {
Self {
max_requests,
window_seconds,
}
}
}
#[derive(Debug, Clone)]
pub struct ServerConfig {
pub bind_addr: SocketAddr,
pub ui_bind_addr: SocketAddr,
pub storage_root: PathBuf,
pub region: String,
pub iam_config_path: PathBuf,
pub sigv4_timestamp_tolerance_secs: u64,
pub presigned_url_min_expiry: u64,
pub presigned_url_max_expiry: u64,
pub secret_key: Option<String>,
pub encryption_enabled: bool,
pub encryption_chunk_size_bytes: usize,
pub kms_enabled: bool,
pub kms_generate_data_key_min_bytes: usize,
pub kms_generate_data_key_max_bytes: usize,
pub gc_enabled: bool,
pub gc_interval_hours: f64,
pub gc_temp_file_max_age_hours: f64,
pub gc_multipart_max_age_days: u64,
pub gc_lock_file_max_age_hours: f64,
pub gc_dry_run: bool,
pub integrity_enabled: bool,
pub metrics_enabled: bool,
pub metrics_history_enabled: bool,
pub metrics_interval_minutes: u64,
pub metrics_retention_hours: u64,
pub metrics_history_interval_minutes: u64,
pub metrics_history_retention_hours: u64,
pub lifecycle_enabled: bool,
pub lifecycle_max_history_per_bucket: usize,
pub website_hosting_enabled: bool,
pub object_key_max_length_bytes: usize,
pub object_tag_limit: usize,
pub object_cache_max_size: usize,
pub bucket_config_cache_ttl_seconds: f64,
pub replication_connect_timeout_secs: u64,
pub replication_read_timeout_secs: u64,
pub replication_max_retries: u32,
pub replication_streaming_threshold_bytes: u64,
pub replication_max_failures_per_bucket: usize,
pub site_sync_enabled: bool,
pub site_sync_interval_secs: u64,
pub site_sync_batch_size: usize,
pub site_sync_connect_timeout_secs: u64,
pub site_sync_read_timeout_secs: u64,
pub site_sync_max_retries: u32,
pub site_sync_clock_skew_tolerance: f64,
pub site_id: Option<String>,
pub site_endpoint: Option<String>,
pub site_region: String,
pub site_priority: i32,
pub api_base_url: String,
pub num_trusted_proxies: usize,
pub allowed_redirect_hosts: Vec<String>,
pub allow_internal_endpoints: bool,
pub cors_origins: Vec<String>,
pub cors_methods: Vec<String>,
pub cors_allow_headers: Vec<String>,
pub cors_expose_headers: Vec<String>,
pub session_lifetime_days: u64,
pub log_level: String,
pub multipart_min_part_size: u64,
pub bulk_delete_max_keys: usize,
pub stream_chunk_size: usize,
pub ratelimit_default: RateLimitSetting,
pub ratelimit_admin: RateLimitSetting,
pub ratelimit_storage_uri: String,
pub ui_enabled: bool,
pub templates_dir: PathBuf,
pub static_dir: PathBuf,
}
impl ServerConfig {
pub fn from_env() -> Self {
let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
let port: u16 = std::env::var("PORT")
.unwrap_or_else(|_| "5000".to_string())
.parse()
.unwrap_or(5000);
let host_ip: std::net::IpAddr = host.parse().unwrap();
let bind_addr = SocketAddr::new(host_ip, port);
let ui_port: u16 = std::env::var("UI_PORT")
.unwrap_or_else(|_| "5100".to_string())
.parse()
.unwrap_or(5100);
let storage_root = std::env::var("STORAGE_ROOT").unwrap_or_else(|_| "./data".to_string());
let region = std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string());
let storage_path = PathBuf::from(&storage_root);
let iam_config_path = std::env::var("IAM_CONFIG")
.map(PathBuf::from)
.unwrap_or_else(|_| {
storage_path
.join(".myfsio.sys")
.join("config")
.join("iam.json")
});
let sigv4_timestamp_tolerance_secs: u64 =
std::env::var("SIGV4_TIMESTAMP_TOLERANCE_SECONDS")
.unwrap_or_else(|_| "900".to_string())
.parse()
.unwrap_or(900);
let presigned_url_min_expiry: u64 = std::env::var("PRESIGNED_URL_MIN_EXPIRY_SECONDS")
.unwrap_or_else(|_| "1".to_string())
.parse()
.unwrap_or(1);
let presigned_url_max_expiry: u64 = std::env::var("PRESIGNED_URL_MAX_EXPIRY_SECONDS")
.unwrap_or_else(|_| "604800".to_string())
.parse()
.unwrap_or(604800);
let secret_key = {
let env_key = std::env::var("SECRET_KEY").ok();
match env_key {
Some(k) if !k.is_empty() && k != "dev-secret-key" => Some(k),
_ => {
let secret_file = storage_path
.join(".myfsio.sys")
.join("config")
.join(".secret");
std::fs::read_to_string(&secret_file)
.ok()
.map(|s| s.trim().to_string())
}
}
};
let encryption_enabled = parse_bool_env("ENCRYPTION_ENABLED", false);
let encryption_chunk_size_bytes = parse_usize_env("ENCRYPTION_CHUNK_SIZE_BYTES", 65_536);
let kms_enabled = parse_bool_env("KMS_ENABLED", false);
let kms_generate_data_key_min_bytes = parse_usize_env("KMS_GENERATE_DATA_KEY_MIN_BYTES", 1);
let kms_generate_data_key_max_bytes =
parse_usize_env("KMS_GENERATE_DATA_KEY_MAX_BYTES", 1024);
let gc_enabled = parse_bool_env("GC_ENABLED", false);
let gc_interval_hours = parse_f64_env("GC_INTERVAL_HOURS", 6.0);
let gc_temp_file_max_age_hours = parse_f64_env("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0);
let gc_multipart_max_age_days = parse_u64_env("GC_MULTIPART_MAX_AGE_DAYS", 7);
let gc_lock_file_max_age_hours = parse_f64_env("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0);
let gc_dry_run = parse_bool_env("GC_DRY_RUN", false);
let integrity_enabled = parse_bool_env("INTEGRITY_ENABLED", false);
let metrics_enabled = parse_bool_env("OPERATION_METRICS_ENABLED", false);
let metrics_history_enabled = parse_bool_env("METRICS_HISTORY_ENABLED", false);
let metrics_interval_minutes = parse_u64_env("OPERATION_METRICS_INTERVAL_MINUTES", 5);
let metrics_retention_hours = parse_u64_env("OPERATION_METRICS_RETENTION_HOURS", 24);
let metrics_history_interval_minutes = parse_u64_env("METRICS_HISTORY_INTERVAL_MINUTES", 5);
let metrics_history_retention_hours = parse_u64_env("METRICS_HISTORY_RETENTION_HOURS", 24);
let lifecycle_enabled = parse_bool_env("LIFECYCLE_ENABLED", false);
let lifecycle_max_history_per_bucket =
parse_usize_env("LIFECYCLE_MAX_HISTORY_PER_BUCKET", 50);
let website_hosting_enabled = parse_bool_env("WEBSITE_HOSTING_ENABLED", false);
let object_key_max_length_bytes = parse_usize_env("OBJECT_KEY_MAX_LENGTH_BYTES", 1024);
let object_tag_limit = parse_usize_env("OBJECT_TAG_LIMIT", 50);
let object_cache_max_size = parse_usize_env("OBJECT_CACHE_MAX_SIZE", 100);
let bucket_config_cache_ttl_seconds =
parse_f64_env("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0);
let replication_connect_timeout_secs =
parse_u64_env("REPLICATION_CONNECT_TIMEOUT_SECONDS", 5);
let replication_read_timeout_secs = parse_u64_env("REPLICATION_READ_TIMEOUT_SECONDS", 30);
let replication_max_retries = parse_u64_env("REPLICATION_MAX_RETRIES", 2) as u32;
let replication_streaming_threshold_bytes =
parse_u64_env("REPLICATION_STREAMING_THRESHOLD_BYTES", 10_485_760);
let replication_max_failures_per_bucket =
parse_u64_env("REPLICATION_MAX_FAILURES_PER_BUCKET", 50) as usize;
let site_sync_enabled = parse_bool_env("SITE_SYNC_ENABLED", false);
let site_sync_interval_secs = parse_u64_env("SITE_SYNC_INTERVAL_SECONDS", 60);
let site_sync_batch_size = parse_u64_env("SITE_SYNC_BATCH_SIZE", 100) as usize;
let site_sync_connect_timeout_secs = parse_u64_env("SITE_SYNC_CONNECT_TIMEOUT_SECONDS", 10);
let site_sync_read_timeout_secs = parse_u64_env("SITE_SYNC_READ_TIMEOUT_SECONDS", 120);
let site_sync_max_retries = parse_u64_env("SITE_SYNC_MAX_RETRIES", 2) as u32;
let site_sync_clock_skew_tolerance: f64 =
std::env::var("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(1.0);
let site_id = parse_optional_string_env("SITE_ID");
let site_endpoint = parse_optional_string_env("SITE_ENDPOINT");
let site_region = std::env::var("SITE_REGION").unwrap_or_else(|_| region.clone());
let site_priority = parse_i32_env("SITE_PRIORITY", 100);
let api_base_url = std::env::var("API_BASE_URL")
.unwrap_or_else(|_| format!("http://{}", bind_addr))
.trim_end_matches('/')
.to_string();
let num_trusted_proxies = parse_usize_env("NUM_TRUSTED_PROXIES", 0);
let allowed_redirect_hosts = parse_list_env("ALLOWED_REDIRECT_HOSTS", "");
let allow_internal_endpoints = parse_bool_env("ALLOW_INTERNAL_ENDPOINTS", false);
let cors_origins = parse_list_env("CORS_ORIGINS", "*");
let cors_methods = parse_list_env("CORS_METHODS", "GET,PUT,POST,DELETE,OPTIONS,HEAD");
let cors_allow_headers = parse_list_env("CORS_ALLOW_HEADERS", "*");
let cors_expose_headers = parse_list_env("CORS_EXPOSE_HEADERS", "*");
let session_lifetime_days = parse_u64_env("SESSION_LIFETIME_DAYS", 1);
let log_level = std::env::var("LOG_LEVEL").unwrap_or_else(|_| "INFO".to_string());
let multipart_min_part_size = parse_u64_env("MULTIPART_MIN_PART_SIZE", 5_242_880);
let bulk_delete_max_keys = parse_usize_env("BULK_DELETE_MAX_KEYS", 1000);
let stream_chunk_size = parse_usize_env("STREAM_CHUNK_SIZE", 1_048_576);
let ratelimit_default =
parse_rate_limit_env("RATE_LIMIT_DEFAULT", RateLimitSetting::new(200, 60));
let ratelimit_admin =
parse_rate_limit_env("RATE_LIMIT_ADMIN", RateLimitSetting::new(60, 60));
let ratelimit_storage_uri =
std::env::var("RATE_LIMIT_STORAGE_URI").unwrap_or_else(|_| "memory://".to_string());
let ui_enabled = parse_bool_env("UI_ENABLED", true);
let templates_dir = std::env::var("TEMPLATES_DIR")
.map(PathBuf::from)
.unwrap_or_else(|_| default_templates_dir());
let static_dir = std::env::var("STATIC_DIR")
.map(PathBuf::from)
.unwrap_or_else(|_| default_static_dir());
Self {
bind_addr,
ui_bind_addr: SocketAddr::new(host_ip, ui_port),
storage_root: storage_path,
region,
iam_config_path,
sigv4_timestamp_tolerance_secs,
presigned_url_min_expiry,
presigned_url_max_expiry,
secret_key,
encryption_enabled,
encryption_chunk_size_bytes,
kms_enabled,
kms_generate_data_key_min_bytes,
kms_generate_data_key_max_bytes,
gc_enabled,
gc_interval_hours,
gc_temp_file_max_age_hours,
gc_multipart_max_age_days,
gc_lock_file_max_age_hours,
gc_dry_run,
integrity_enabled,
metrics_enabled,
metrics_history_enabled,
metrics_interval_minutes,
metrics_retention_hours,
metrics_history_interval_minutes,
metrics_history_retention_hours,
lifecycle_enabled,
lifecycle_max_history_per_bucket,
website_hosting_enabled,
object_key_max_length_bytes,
object_tag_limit,
object_cache_max_size,
bucket_config_cache_ttl_seconds,
replication_connect_timeout_secs,
replication_read_timeout_secs,
replication_max_retries,
replication_streaming_threshold_bytes,
replication_max_failures_per_bucket,
site_sync_enabled,
site_sync_interval_secs,
site_sync_batch_size,
site_sync_connect_timeout_secs,
site_sync_read_timeout_secs,
site_sync_max_retries,
site_sync_clock_skew_tolerance,
site_id,
site_endpoint,
site_region,
site_priority,
api_base_url,
num_trusted_proxies,
allowed_redirect_hosts,
allow_internal_endpoints,
cors_origins,
cors_methods,
cors_allow_headers,
cors_expose_headers,
session_lifetime_days,
log_level,
multipart_min_part_size,
bulk_delete_max_keys,
stream_chunk_size,
ratelimit_default,
ratelimit_admin,
ratelimit_storage_uri,
ui_enabled,
templates_dir,
static_dir,
}
}
}
impl Default for ServerConfig {
fn default() -> Self {
Self {
bind_addr: "127.0.0.1:5000".parse().unwrap(),
ui_bind_addr: "127.0.0.1:5100".parse().unwrap(),
storage_root: PathBuf::from("./data"),
region: "us-east-1".to_string(),
iam_config_path: PathBuf::from("./data/.myfsio.sys/config/iam.json"),
sigv4_timestamp_tolerance_secs: 900,
presigned_url_min_expiry: 1,
presigned_url_max_expiry: 604_800,
secret_key: None,
encryption_enabled: false,
encryption_chunk_size_bytes: 65_536,
kms_enabled: false,
kms_generate_data_key_min_bytes: 1,
kms_generate_data_key_max_bytes: 1024,
gc_enabled: false,
gc_interval_hours: 6.0,
gc_temp_file_max_age_hours: 24.0,
gc_multipart_max_age_days: 7,
gc_lock_file_max_age_hours: 1.0,
gc_dry_run: false,
integrity_enabled: false,
metrics_enabled: false,
metrics_history_enabled: false,
metrics_interval_minutes: 5,
metrics_retention_hours: 24,
metrics_history_interval_minutes: 5,
metrics_history_retention_hours: 24,
lifecycle_enabled: false,
lifecycle_max_history_per_bucket: 50,
website_hosting_enabled: false,
object_key_max_length_bytes: 1024,
object_tag_limit: 50,
object_cache_max_size: 100,
bucket_config_cache_ttl_seconds: 30.0,
replication_connect_timeout_secs: 5,
replication_read_timeout_secs: 30,
replication_max_retries: 2,
replication_streaming_threshold_bytes: 10_485_760,
replication_max_failures_per_bucket: 50,
site_sync_enabled: false,
site_sync_interval_secs: 60,
site_sync_batch_size: 100,
site_sync_connect_timeout_secs: 10,
site_sync_read_timeout_secs: 120,
site_sync_max_retries: 2,
site_sync_clock_skew_tolerance: 1.0,
site_id: None,
site_endpoint: None,
site_region: "us-east-1".to_string(),
site_priority: 100,
api_base_url: "http://127.0.0.1:5000".to_string(),
num_trusted_proxies: 0,
allowed_redirect_hosts: Vec::new(),
allow_internal_endpoints: false,
cors_origins: vec!["*".to_string()],
cors_methods: vec![
"GET".to_string(),
"PUT".to_string(),
"POST".to_string(),
"DELETE".to_string(),
"OPTIONS".to_string(),
"HEAD".to_string(),
],
cors_allow_headers: vec!["*".to_string()],
cors_expose_headers: vec!["*".to_string()],
session_lifetime_days: 1,
log_level: "INFO".to_string(),
multipart_min_part_size: 5_242_880,
bulk_delete_max_keys: 1000,
stream_chunk_size: 1_048_576,
ratelimit_default: RateLimitSetting::new(200, 60),
ratelimit_admin: RateLimitSetting::new(60, 60),
ratelimit_storage_uri: "memory://".to_string(),
ui_enabled: true,
templates_dir: default_templates_dir(),
static_dir: default_static_dir(),
}
}
}
fn default_templates_dir() -> PathBuf {
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
manifest_dir.join("templates")
}
fn default_static_dir() -> PathBuf {
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
for candidate in [
manifest_dir.join("static"),
manifest_dir.join("..").join("..").join("..").join("static"),
] {
if candidate.exists() {
return candidate;
}
}
manifest_dir.join("static")
}
fn parse_u64_env(key: &str, default: u64) -> u64 {
std::env::var(key)
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(default)
}
fn parse_usize_env(key: &str, default: usize) -> usize {
std::env::var(key)
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(default)
}
fn parse_i32_env(key: &str, default: i32) -> i32 {
std::env::var(key)
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(default)
}
fn parse_f64_env(key: &str, default: f64) -> f64 {
std::env::var(key)
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(default)
}
fn parse_bool_env(key: &str, default: bool) -> bool {
std::env::var(key)
.ok()
.map(|value| {
matches!(
value.trim().to_ascii_lowercase().as_str(),
"1" | "true" | "yes" | "on"
)
})
.unwrap_or(default)
}
fn parse_optional_string_env(key: &str) -> Option<String> {
std::env::var(key)
.ok()
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty())
}
fn parse_list_env(key: &str, default: &str) -> Vec<String> {
std::env::var(key)
.unwrap_or_else(|_| default.to_string())
.split(',')
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty())
.collect()
}
pub fn parse_rate_limit(value: &str) -> Option<RateLimitSetting> {
let parts = value.split_whitespace().collect::<Vec<_>>();
if parts.len() != 3 || !parts[1].eq_ignore_ascii_case("per") {
return None;
}
let max_requests = parts[0].parse::<u32>().ok()?;
if max_requests == 0 {
return None;
}
let window_seconds = match parts[2].to_ascii_lowercase().as_str() {
"second" | "seconds" => 1,
"minute" | "minutes" => 60,
"hour" | "hours" => 3600,
"day" | "days" => 86_400,
_ => return None,
};
Some(RateLimitSetting::new(max_requests, window_seconds))
}
fn parse_rate_limit_env(key: &str, default: RateLimitSetting) -> RateLimitSetting {
std::env::var(key)
.ok()
.and_then(|value| parse_rate_limit(&value))
.unwrap_or(default)
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Mutex, OnceLock};
fn env_lock() -> &'static Mutex<()> {
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
LOCK.get_or_init(|| Mutex::new(()))
}
#[test]
fn parses_rate_limit_text() {
assert_eq!(
parse_rate_limit("200 per minute"),
Some(RateLimitSetting::new(200, 60))
);
assert_eq!(
parse_rate_limit("3 per hours"),
Some(RateLimitSetting::new(3, 3600))
);
assert_eq!(parse_rate_limit("0 per minute"), None);
assert_eq!(parse_rate_limit("bad"), None);
}
#[test]
fn env_defaults_and_invalid_values_fall_back() {
let _guard = env_lock().lock().unwrap();
std::env::remove_var("OBJECT_KEY_MAX_LENGTH_BYTES");
std::env::set_var("OBJECT_TAG_LIMIT", "not-a-number");
std::env::set_var("RATE_LIMIT_DEFAULT", "invalid");
let config = ServerConfig::from_env();
assert_eq!(config.object_key_max_length_bytes, 1024);
assert_eq!(config.object_tag_limit, 50);
assert_eq!(config.ratelimit_default, RateLimitSetting::new(200, 60));
std::env::remove_var("OBJECT_TAG_LIMIT");
std::env::remove_var("RATE_LIMIT_DEFAULT");
}
#[test]
fn env_overrides_new_values() {
let _guard = env_lock().lock().unwrap();
std::env::set_var("OBJECT_KEY_MAX_LENGTH_BYTES", "2048");
std::env::set_var("GC_DRY_RUN", "true");
std::env::set_var("RATE_LIMIT_ADMIN", "7 per second");
std::env::set_var("HOST", "127.0.0.1");
std::env::set_var("PORT", "5501");
std::env::remove_var("API_BASE_URL");
let config = ServerConfig::from_env();
assert_eq!(config.object_key_max_length_bytes, 2048);
assert!(config.gc_dry_run);
assert_eq!(config.ratelimit_admin, RateLimitSetting::new(7, 1));
assert_eq!(config.api_base_url, "http://127.0.0.1:5501");
std::env::remove_var("OBJECT_KEY_MAX_LENGTH_BYTES");
std::env::remove_var("GC_DRY_RUN");
std::env::remove_var("RATE_LIMIT_ADMIN");
std::env::remove_var("HOST");
std::env::remove_var("PORT");
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,184 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::{Buf, BytesMut};
use tokio::io::{AsyncRead, ReadBuf};
enum State {
ReadSize,
ReadData(u64),
ReadTrailer,
Finished,
}
pub struct AwsChunkedStream<S> {
inner: S,
buffer: BytesMut,
state: State,
pending: BytesMut,
eof: bool,
}
impl<S> AwsChunkedStream<S> {
pub fn new(inner: S) -> Self {
Self {
inner,
buffer: BytesMut::with_capacity(8192),
state: State::ReadSize,
pending: BytesMut::new(),
eof: false,
}
}
fn find_crlf(&self) -> Option<usize> {
for i in 0..self.buffer.len().saturating_sub(1) {
if self.buffer[i] == b'\r' && self.buffer[i + 1] == b'\n' {
return Some(i);
}
}
None
}
fn parse_chunk_size(line: &[u8]) -> std::io::Result<u64> {
let text = std::str::from_utf8(line).map_err(|_| {
std::io::Error::new(
std::io::ErrorKind::InvalidData,
"invalid chunk size encoding",
)
})?;
let head = text.split(';').next().unwrap_or("").trim();
u64::from_str_radix(head, 16).map_err(|_| {
std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("invalid chunk size: {}", head),
)
})
}
fn try_advance(&mut self, out: &mut ReadBuf<'_>) -> std::io::Result<bool> {
loop {
if out.remaining() == 0 {
return Ok(true);
}
if !self.pending.is_empty() {
let take = std::cmp::min(self.pending.len(), out.remaining());
out.put_slice(&self.pending[..take]);
self.pending.advance(take);
continue;
}
match self.state {
State::Finished => return Ok(true),
State::ReadSize => {
let idx = match self.find_crlf() {
Some(i) => i,
None => return Ok(false),
};
let line = self.buffer.split_to(idx);
self.buffer.advance(2);
let size = Self::parse_chunk_size(&line)?;
if size == 0 {
self.state = State::ReadTrailer;
} else {
self.state = State::ReadData(size);
}
}
State::ReadData(remaining) => {
if self.buffer.is_empty() {
return Ok(false);
}
let avail = std::cmp::min(self.buffer.len() as u64, remaining) as usize;
let take = std::cmp::min(avail, out.remaining());
out.put_slice(&self.buffer[..take]);
self.buffer.advance(take);
let new_remaining = remaining - take as u64;
if new_remaining == 0 {
if self.buffer.len() < 2 {
self.state = State::ReadData(0);
return Ok(false);
}
if &self.buffer[..2] != b"\r\n" {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"malformed chunk terminator",
));
}
self.buffer.advance(2);
self.state = State::ReadSize;
} else {
self.state = State::ReadData(new_remaining);
}
}
State::ReadTrailer => {
let idx = match self.find_crlf() {
Some(i) => i,
None => return Ok(false),
};
if idx == 0 {
self.buffer.advance(2);
self.state = State::Finished;
} else {
self.buffer.advance(idx + 2);
}
}
}
}
}
}
impl<S> AsyncRead for AwsChunkedStream<S>
where
S: AsyncRead + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
loop {
let before = buf.filled().len();
let done = match self.try_advance(buf) {
Ok(v) => v,
Err(e) => return Poll::Ready(Err(e)),
};
if buf.filled().len() > before {
return Poll::Ready(Ok(()));
}
if done {
return Poll::Ready(Ok(()));
}
if self.eof {
return Poll::Ready(Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
"unexpected EOF in aws-chunked stream",
)));
}
let mut tmp = [0u8; 8192];
let mut rb = ReadBuf::new(&mut tmp);
match Pin::new(&mut self.inner).poll_read(cx, &mut rb) {
Poll::Ready(Ok(())) => {
let n = rb.filled().len();
if n == 0 {
self.eof = true;
continue;
}
self.buffer.extend_from_slice(rb.filled());
}
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
Poll::Pending => return Poll::Pending,
}
}
}
}
pub fn decode_body(body: axum::body::Body) -> impl AsyncRead + Send + Unpin {
use futures::TryStreamExt;
let stream = tokio_util::io::StreamReader::new(
http_body_util::BodyStream::new(body)
.map_ok(|frame| frame.into_data().unwrap_or_default())
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)),
);
AwsChunkedStream::new(stream)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,559 @@
use aes_gcm::aead::Aead;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
use axum::body::Body;
use axum::extract::State;
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use base64::engine::general_purpose::STANDARD as B64;
use base64::Engine;
use rand::RngCore;
use serde_json::{json, Value};
use crate::state::AppState;
fn json_ok(value: Value) -> Response {
(
StatusCode::OK,
[("content-type", "application/json")],
value.to_string(),
)
.into_response()
}
fn json_err(status: StatusCode, msg: &str) -> Response {
(
status,
[("content-type", "application/json")],
json!({"error": msg}).to_string(),
)
.into_response()
}
async fn read_json(body: Body) -> Result<Value, Response> {
let body_bytes = http_body_util::BodyExt::collect(body)
.await
.map_err(|_| json_err(StatusCode::BAD_REQUEST, "Invalid request body"))?
.to_bytes();
if body_bytes.is_empty() {
Ok(json!({}))
} else {
serde_json::from_slice(&body_bytes)
.map_err(|_| json_err(StatusCode::BAD_REQUEST, "Invalid JSON"))
}
}
fn require_kms(
state: &AppState,
) -> Result<&std::sync::Arc<myfsio_crypto::kms::KmsService>, Response> {
state
.kms
.as_ref()
.ok_or_else(|| json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"))
}
fn decode_b64(value: &str, field: &str) -> Result<Vec<u8>, Response> {
B64.decode(value).map_err(|_| {
json_err(
StatusCode::BAD_REQUEST,
&format!("Invalid base64 {}", field),
)
})
}
fn require_str<'a>(value: &'a Value, names: &[&str], message: &str) -> Result<&'a str, Response> {
for name in names {
if let Some(found) = value.get(*name).and_then(|v| v.as_str()) {
return Ok(found);
}
}
Err(json_err(StatusCode::BAD_REQUEST, message))
}
pub async fn list_keys(State(state): State<AppState>) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let keys = kms.list_keys().await;
let keys_json: Vec<Value> = keys
.iter()
.map(|k| {
json!({
"KeyId": k.key_id,
"Arn": k.arn,
"Description": k.description,
"CreationDate": k.creation_date.to_rfc3339(),
"Enabled": k.enabled,
"KeyState": k.key_state,
"KeyUsage": k.key_usage,
"KeySpec": k.key_spec,
})
})
.collect();
json_ok(json!({"keys": keys_json}))
}
pub async fn create_key(State(state): State<AppState>, body: Body) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let description = req
.get("Description")
.or_else(|| req.get("description"))
.and_then(|d| d.as_str())
.unwrap_or("");
match kms.create_key(description).await {
Ok(key) => json_ok(json!({
"KeyId": key.key_id,
"Arn": key.arn,
"Description": key.description,
"CreationDate": key.creation_date.to_rfc3339(),
"Enabled": key.enabled,
"KeyState": key.key_state,
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn get_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
match kms.get_key(&key_id).await {
Some(key) => json_ok(json!({
"KeyId": key.key_id,
"Arn": key.arn,
"Description": key.description,
"CreationDate": key.creation_date.to_rfc3339(),
"Enabled": key.enabled,
"KeyState": key.key_state,
"KeyUsage": key.key_usage,
"KeySpec": key.key_spec,
})),
None => json_err(StatusCode::NOT_FOUND, "Key not found"),
}
}
pub async fn delete_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
match kms.delete_key(&key_id).await {
Ok(true) => StatusCode::NO_CONTENT.into_response(),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn enable_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
match kms.enable_key(&key_id).await {
Ok(true) => json_ok(json!({"status": "enabled"})),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn disable_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
match kms.disable_key(&key_id).await {
Ok(true) => json_ok(json!({"status": "disabled"})),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn encrypt(State(state): State<AppState>, body: Body) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
Ok(value) => value,
Err(response) => return response,
};
let plaintext_b64 = match require_str(&req, &["Plaintext", "plaintext"], "Missing Plaintext") {
Ok(value) => value,
Err(response) => return response,
};
let plaintext = match decode_b64(plaintext_b64, "Plaintext") {
Ok(value) => value,
Err(response) => return response,
};
match kms.encrypt_data(key_id, &plaintext).await {
Ok(ct) => json_ok(json!({
"KeyId": key_id,
"CiphertextBlob": B64.encode(&ct),
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn decrypt(State(state): State<AppState>, body: Body) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
Ok(value) => value,
Err(response) => return response,
};
let ciphertext_b64 = match require_str(
&req,
&["CiphertextBlob", "ciphertext_blob"],
"Missing CiphertextBlob",
) {
Ok(value) => value,
Err(response) => return response,
};
let ciphertext = match decode_b64(ciphertext_b64, "CiphertextBlob") {
Ok(value) => value,
Err(response) => return response,
};
match kms.decrypt_data(key_id, &ciphertext).await {
Ok(pt) => json_ok(json!({
"KeyId": key_id,
"Plaintext": B64.encode(&pt),
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn generate_data_key(State(state): State<AppState>, body: Body) -> Response {
generate_data_key_inner(state, body, true).await
}
pub async fn generate_data_key_without_plaintext(
State(state): State<AppState>,
body: Body,
) -> Response {
generate_data_key_inner(state, body, false).await
}
async fn generate_data_key_inner(state: AppState, body: Body, include_plaintext: bool) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
Ok(value) => value,
Err(response) => return response,
};
let num_bytes = req
.get("NumberOfBytes")
.and_then(|v| v.as_u64())
.unwrap_or(32) as usize;
if num_bytes < state.config.kms_generate_data_key_min_bytes
|| num_bytes > state.config.kms_generate_data_key_max_bytes
{
return json_err(
StatusCode::BAD_REQUEST,
&format!(
"NumberOfBytes must be {}-{}",
state.config.kms_generate_data_key_min_bytes,
state.config.kms_generate_data_key_max_bytes
),
);
}
match kms.generate_data_key(key_id, num_bytes).await {
Ok((plaintext, wrapped)) => {
let mut value = json!({
"KeyId": key_id,
"CiphertextBlob": B64.encode(&wrapped),
});
if include_plaintext {
value["Plaintext"] = json!(B64.encode(&plaintext));
}
json_ok(value)
}
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn re_encrypt(State(state): State<AppState>, body: Body) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let ciphertext_b64 = match require_str(
&req,
&["CiphertextBlob", "ciphertext_blob"],
"CiphertextBlob is required",
) {
Ok(value) => value,
Err(response) => return response,
};
let destination_key_id = match require_str(
&req,
&["DestinationKeyId", "destination_key_id"],
"DestinationKeyId is required",
) {
Ok(value) => value,
Err(response) => return response,
};
let ciphertext = match decode_b64(ciphertext_b64, "CiphertextBlob") {
Ok(value) => value,
Err(response) => return response,
};
let keys = kms.list_keys().await;
let mut source_key_id: Option<String> = None;
let mut plaintext: Option<Vec<u8>> = None;
for key in keys {
if !key.enabled {
continue;
}
if let Ok(value) = kms.decrypt_data(&key.key_id, &ciphertext).await {
source_key_id = Some(key.key_id);
plaintext = Some(value);
break;
}
}
let Some(source_key_id) = source_key_id else {
return json_err(
StatusCode::BAD_REQUEST,
"Could not determine source key for CiphertextBlob",
);
};
let plaintext = plaintext.unwrap_or_default();
match kms.encrypt_data(destination_key_id, &plaintext).await {
Ok(new_ciphertext) => json_ok(json!({
"CiphertextBlob": B64.encode(&new_ciphertext),
"SourceKeyId": source_key_id,
"KeyId": destination_key_id,
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn generate_random(State(state): State<AppState>, body: Body) -> Response {
if let Err(response) = require_kms(&state) {
return response;
}
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let num_bytes = req
.get("NumberOfBytes")
.and_then(|v| v.as_u64())
.unwrap_or(32) as usize;
if num_bytes < state.config.kms_generate_data_key_min_bytes
|| num_bytes > state.config.kms_generate_data_key_max_bytes
{
return json_err(
StatusCode::BAD_REQUEST,
&format!(
"NumberOfBytes must be {}-{}",
state.config.kms_generate_data_key_min_bytes,
state.config.kms_generate_data_key_max_bytes
),
);
}
let mut bytes = vec![0u8; num_bytes];
rand::thread_rng().fill_bytes(&mut bytes);
json_ok(json!({
"Plaintext": B64.encode(bytes),
}))
}
pub async fn client_generate_key(State(state): State<AppState>) -> Response {
let _ = state;
let mut key = [0u8; 32];
rand::thread_rng().fill_bytes(&mut key);
json_ok(json!({
"Key": B64.encode(key),
"Algorithm": "AES-256-GCM",
"KeySize": 32,
}))
}
pub async fn client_encrypt(State(state): State<AppState>, body: Body) -> Response {
let _ = state;
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let plaintext_b64 =
match require_str(&req, &["Plaintext", "plaintext"], "Plaintext is required") {
Ok(value) => value,
Err(response) => return response,
};
let key_b64 = match require_str(&req, &["Key", "key"], "Key is required") {
Ok(value) => value,
Err(response) => return response,
};
let plaintext = match decode_b64(plaintext_b64, "Plaintext") {
Ok(value) => value,
Err(response) => return response,
};
let key_bytes = match decode_b64(key_b64, "Key") {
Ok(value) => value,
Err(response) => return response,
};
if key_bytes.len() != 32 {
return json_err(StatusCode::BAD_REQUEST, "Key must decode to 32 bytes");
}
let cipher = match Aes256Gcm::new_from_slice(&key_bytes) {
Ok(cipher) => cipher,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid encryption key"),
};
let mut nonce_bytes = [0u8; 12];
rand::thread_rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
match cipher.encrypt(nonce, plaintext.as_ref()) {
Ok(ciphertext) => json_ok(json!({
"Ciphertext": B64.encode(ciphertext),
"Nonce": B64.encode(nonce_bytes),
"Algorithm": "AES-256-GCM",
})),
Err(e) => json_err(StatusCode::BAD_REQUEST, &e.to_string()),
}
}
pub async fn client_decrypt(State(state): State<AppState>, body: Body) -> Response {
let _ = state;
let req = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
let ciphertext_b64 = match require_str(
&req,
&["Ciphertext", "ciphertext"],
"Ciphertext is required",
) {
Ok(value) => value,
Err(response) => return response,
};
let nonce_b64 = match require_str(&req, &["Nonce", "nonce"], "Nonce is required") {
Ok(value) => value,
Err(response) => return response,
};
let key_b64 = match require_str(&req, &["Key", "key"], "Key is required") {
Ok(value) => value,
Err(response) => return response,
};
let ciphertext = match decode_b64(ciphertext_b64, "Ciphertext") {
Ok(value) => value,
Err(response) => return response,
};
let nonce_bytes = match decode_b64(nonce_b64, "Nonce") {
Ok(value) => value,
Err(response) => return response,
};
let key_bytes = match decode_b64(key_b64, "Key") {
Ok(value) => value,
Err(response) => return response,
};
if key_bytes.len() != 32 {
return json_err(StatusCode::BAD_REQUEST, "Key must decode to 32 bytes");
}
if nonce_bytes.len() != 12 {
return json_err(StatusCode::BAD_REQUEST, "Nonce must decode to 12 bytes");
}
let cipher = match Aes256Gcm::new_from_slice(&key_bytes) {
Ok(cipher) => cipher,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid encryption key"),
};
let nonce = Nonce::from_slice(&nonce_bytes);
match cipher.decrypt(nonce, ciphertext.as_ref()) {
Ok(plaintext) => json_ok(json!({
"Plaintext": B64.encode(plaintext),
})),
Err(e) => json_err(StatusCode::BAD_REQUEST, &e.to_string()),
}
}
pub async fn materials(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
body: Body,
) -> Response {
let kms = match require_kms(&state) {
Ok(kms) => kms,
Err(response) => return response,
};
let _ = match read_json(body).await {
Ok(req) => req,
Err(response) => return response,
};
match kms.generate_data_key(&key_id, 32).await {
Ok((plaintext, wrapped)) => json_ok(json!({
"PlaintextKey": B64.encode(plaintext),
"EncryptedKey": B64.encode(wrapped),
"KeyId": key_id,
"Algorithm": "AES-256-GCM",
"KeyWrapAlgorithm": "kms",
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,578 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use axum::body::Body;
use axum::http::{HeaderMap, HeaderName, StatusCode};
use axum::response::{IntoResponse, Response};
use base64::Engine;
use bytes::Bytes;
use crc32fast::Hasher;
use duckdb::types::ValueRef;
use duckdb::Connection;
use futures::stream;
use http_body_util::BodyExt;
use myfsio_common::error::{S3Error, S3ErrorCode};
use myfsio_storage::traits::StorageEngine;
use crate::state::AppState;
#[cfg(target_os = "windows")]
#[link(name = "Rstrtmgr")]
extern "system" {}
const CHUNK_SIZE: usize = 65_536;
pub async fn post_select_object_content(
state: &AppState,
bucket: &str,
key: &str,
headers: &HeaderMap,
body: Body,
) -> Response {
if let Some(resp) = require_xml_content_type(headers) {
return resp;
}
let body_bytes = match body.collect().await {
Ok(collected) => collected.to_bytes(),
Err(_) => {
return s3_error_response(S3Error::new(
S3ErrorCode::MalformedXML,
"Unable to parse XML document",
));
}
};
let request = match parse_select_request(&body_bytes) {
Ok(r) => r,
Err(err) => return s3_error_response(err),
};
let object_path = match state.storage.get_object_path(bucket, key).await {
Ok(path) => path,
Err(_) => {
return s3_error_response(S3Error::new(S3ErrorCode::NoSuchKey, "Object not found"));
}
};
let join_res =
tokio::task::spawn_blocking(move || execute_select_query(object_path, request)).await;
let chunks = match join_res {
Ok(Ok(chunks)) => chunks,
Ok(Err(message)) => {
return s3_error_response(S3Error::new(S3ErrorCode::InvalidRequest, message));
}
Err(_) => {
return s3_error_response(S3Error::new(
S3ErrorCode::InternalError,
"SelectObjectContent execution failed",
));
}
};
let bytes_returned: usize = chunks.iter().map(|c| c.len()).sum();
let mut events: Vec<Bytes> = Vec::with_capacity(chunks.len() + 2);
for chunk in chunks {
events.push(Bytes::from(encode_select_event("Records", &chunk)));
}
let stats_payload = build_stats_xml(0, bytes_returned);
events.push(Bytes::from(encode_select_event(
"Stats",
stats_payload.as_bytes(),
)));
events.push(Bytes::from(encode_select_event("End", b"")));
let stream = stream::iter(events.into_iter().map(Ok::<Bytes, std::io::Error>));
let body = Body::from_stream(stream);
let mut response = (StatusCode::OK, body).into_response();
response.headers_mut().insert(
HeaderName::from_static("content-type"),
"application/octet-stream".parse().unwrap(),
);
response.headers_mut().insert(
HeaderName::from_static("x-amz-request-charged"),
"requester".parse().unwrap(),
);
response
}
#[derive(Clone)]
struct SelectRequest {
expression: String,
input_format: InputFormat,
output_format: OutputFormat,
}
#[derive(Clone)]
enum InputFormat {
Csv(CsvInputConfig),
Json(JsonInputConfig),
Parquet,
}
#[derive(Clone)]
struct CsvInputConfig {
file_header_info: String,
field_delimiter: String,
quote_character: String,
}
#[derive(Clone)]
struct JsonInputConfig {
json_type: String,
}
#[derive(Clone)]
enum OutputFormat {
Csv(CsvOutputConfig),
Json(JsonOutputConfig),
}
#[derive(Clone)]
struct CsvOutputConfig {
field_delimiter: String,
record_delimiter: String,
quote_character: String,
}
#[derive(Clone)]
struct JsonOutputConfig {
record_delimiter: String,
}
fn parse_select_request(payload: &[u8]) -> Result<SelectRequest, S3Error> {
let xml = String::from_utf8_lossy(payload);
let doc = roxmltree::Document::parse(&xml)
.map_err(|_| S3Error::new(S3ErrorCode::MalformedXML, "Unable to parse XML document"))?;
let root = doc.root_element();
if root.tag_name().name() != "SelectObjectContentRequest" {
return Err(S3Error::new(
S3ErrorCode::MalformedXML,
"Root element must be SelectObjectContentRequest",
));
}
let expression = child_text(&root, "Expression")
.filter(|v| !v.is_empty())
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "Expression is required"))?;
let expression_type = child_text(&root, "ExpressionType").unwrap_or_else(|| "SQL".to_string());
if !expression_type.eq_ignore_ascii_case("SQL") {
return Err(S3Error::new(
S3ErrorCode::InvalidRequest,
"Only SQL expression type is supported",
));
}
let input_node = child(&root, "InputSerialization").ok_or_else(|| {
S3Error::new(
S3ErrorCode::InvalidRequest,
"InputSerialization is required",
)
})?;
let output_node = child(&root, "OutputSerialization").ok_or_else(|| {
S3Error::new(
S3ErrorCode::InvalidRequest,
"OutputSerialization is required",
)
})?;
let input_format = parse_input_format(&input_node)?;
let output_format = parse_output_format(&output_node)?;
Ok(SelectRequest {
expression,
input_format,
output_format,
})
}
fn parse_input_format(node: &roxmltree::Node<'_, '_>) -> Result<InputFormat, S3Error> {
if let Some(csv_node) = child(node, "CSV") {
return Ok(InputFormat::Csv(CsvInputConfig {
file_header_info: child_text(&csv_node, "FileHeaderInfo")
.unwrap_or_else(|| "NONE".to_string())
.to_ascii_uppercase(),
field_delimiter: child_text(&csv_node, "FieldDelimiter")
.unwrap_or_else(|| ",".to_string()),
quote_character: child_text(&csv_node, "QuoteCharacter")
.unwrap_or_else(|| "\"".to_string()),
}));
}
if let Some(json_node) = child(node, "JSON") {
return Ok(InputFormat::Json(JsonInputConfig {
json_type: child_text(&json_node, "Type")
.unwrap_or_else(|| "DOCUMENT".to_string())
.to_ascii_uppercase(),
}));
}
if child(node, "Parquet").is_some() {
return Ok(InputFormat::Parquet);
}
Err(S3Error::new(
S3ErrorCode::InvalidRequest,
"InputSerialization must specify CSV, JSON, or Parquet",
))
}
fn parse_output_format(node: &roxmltree::Node<'_, '_>) -> Result<OutputFormat, S3Error> {
if let Some(csv_node) = child(node, "CSV") {
return Ok(OutputFormat::Csv(CsvOutputConfig {
field_delimiter: child_text(&csv_node, "FieldDelimiter")
.unwrap_or_else(|| ",".to_string()),
record_delimiter: child_text(&csv_node, "RecordDelimiter")
.unwrap_or_else(|| "\n".to_string()),
quote_character: child_text(&csv_node, "QuoteCharacter")
.unwrap_or_else(|| "\"".to_string()),
}));
}
if let Some(json_node) = child(node, "JSON") {
return Ok(OutputFormat::Json(JsonOutputConfig {
record_delimiter: child_text(&json_node, "RecordDelimiter")
.unwrap_or_else(|| "\n".to_string()),
}));
}
Err(S3Error::new(
S3ErrorCode::InvalidRequest,
"OutputSerialization must specify CSV or JSON",
))
}
fn child<'a, 'input>(
node: &'a roxmltree::Node<'a, 'input>,
name: &str,
) -> Option<roxmltree::Node<'a, 'input>> {
node.children()
.find(|n| n.is_element() && n.tag_name().name() == name)
}
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
child(node, name)
.and_then(|n| n.text())
.map(|s| s.to_string())
}
fn execute_select_query(path: PathBuf, request: SelectRequest) -> Result<Vec<Vec<u8>>, String> {
let conn =
Connection::open_in_memory().map_err(|e| format!("DuckDB connection error: {}", e))?;
load_input_table(&conn, &path, &request.input_format)?;
let expression = request
.expression
.replace("s3object", "data")
.replace("S3Object", "data");
let mut stmt = conn
.prepare(&expression)
.map_err(|e| format!("SQL execution error: {}", e))?;
let mut rows = stmt
.query([])
.map_err(|e| format!("SQL execution error: {}", e))?;
let stmt_ref = rows
.as_ref()
.ok_or_else(|| "SQL execution error: statement metadata unavailable".to_string())?;
let col_count = stmt_ref.column_count();
let mut columns: Vec<String> = Vec::with_capacity(col_count);
for i in 0..col_count {
let name = stmt_ref
.column_name(i)
.map(|s| s.to_string())
.unwrap_or_else(|_| format!("_{}", i));
columns.push(name);
}
match request.output_format {
OutputFormat::Csv(cfg) => collect_csv_chunks(&mut rows, col_count, cfg),
OutputFormat::Json(cfg) => collect_json_chunks(&mut rows, col_count, &columns, cfg),
}
}
fn load_input_table(conn: &Connection, path: &Path, input: &InputFormat) -> Result<(), String> {
let path_str = path.to_string_lossy().replace('\\', "/");
match input {
InputFormat::Csv(cfg) => {
let header = cfg.file_header_info == "USE" || cfg.file_header_info == "IGNORE";
let delimiter = normalize_single_char(&cfg.field_delimiter, ',');
let quote = normalize_single_char(&cfg.quote_character, '"');
let sql = format!(
"CREATE TABLE data AS SELECT * FROM read_csv('{}', header={}, delim='{}', quote='{}')",
sql_escape(&path_str),
if header { "true" } else { "false" },
sql_escape(&delimiter),
sql_escape(&quote)
);
conn.execute_batch(&sql)
.map_err(|e| format!("Failed loading CSV data: {}", e))?;
}
InputFormat::Json(cfg) => {
let format = if cfg.json_type == "LINES" {
"newline_delimited"
} else {
"array"
};
let sql = format!(
"CREATE TABLE data AS SELECT * FROM read_json_auto('{}', format='{}')",
sql_escape(&path_str),
format
);
conn.execute_batch(&sql)
.map_err(|e| format!("Failed loading JSON data: {}", e))?;
}
InputFormat::Parquet => {
let sql = format!(
"CREATE TABLE data AS SELECT * FROM read_parquet('{}')",
sql_escape(&path_str)
);
conn.execute_batch(&sql)
.map_err(|e| format!("Failed loading Parquet data: {}", e))?;
}
}
Ok(())
}
fn sql_escape(value: &str) -> String {
value.replace('\'', "''")
}
fn normalize_single_char(value: &str, default_char: char) -> String {
value.chars().next().unwrap_or(default_char).to_string()
}
fn collect_csv_chunks(
rows: &mut duckdb::Rows<'_>,
col_count: usize,
cfg: CsvOutputConfig,
) -> Result<Vec<Vec<u8>>, String> {
let delimiter = cfg.field_delimiter;
let record_delimiter = cfg.record_delimiter;
let quote = cfg.quote_character;
let mut chunks: Vec<Vec<u8>> = Vec::new();
let mut buffer = String::new();
while let Some(row) = rows
.next()
.map_err(|e| format!("SQL execution error: {}", e))?
{
let mut fields: Vec<String> = Vec::with_capacity(col_count);
for i in 0..col_count {
let value = row
.get_ref(i)
.map_err(|e| format!("SQL execution error: {}", e))?;
if matches!(value, ValueRef::Null) {
fields.push(String::new());
continue;
}
let mut text = value_ref_to_string(value);
if text.contains(&delimiter)
|| text.contains(&quote)
|| text.contains(&record_delimiter)
{
text = text.replace(&quote, &(quote.clone() + &quote));
text = format!("{}{}{}", quote, text, quote);
}
fields.push(text);
}
buffer.push_str(&fields.join(&delimiter));
buffer.push_str(&record_delimiter);
while buffer.len() >= CHUNK_SIZE {
let rest = buffer.split_off(CHUNK_SIZE);
chunks.push(buffer.into_bytes());
buffer = rest;
}
}
if !buffer.is_empty() {
chunks.push(buffer.into_bytes());
}
Ok(chunks)
}
fn collect_json_chunks(
rows: &mut duckdb::Rows<'_>,
col_count: usize,
columns: &[String],
cfg: JsonOutputConfig,
) -> Result<Vec<Vec<u8>>, String> {
let record_delimiter = cfg.record_delimiter;
let mut chunks: Vec<Vec<u8>> = Vec::new();
let mut buffer = String::new();
while let Some(row) = rows
.next()
.map_err(|e| format!("SQL execution error: {}", e))?
{
let mut record: HashMap<String, serde_json::Value> = HashMap::with_capacity(col_count);
for i in 0..col_count {
let value = row
.get_ref(i)
.map_err(|e| format!("SQL execution error: {}", e))?;
let key = columns.get(i).cloned().unwrap_or_else(|| format!("_{}", i));
record.insert(key, value_ref_to_json(value));
}
let line = serde_json::to_string(&record)
.map_err(|e| format!("JSON output encoding failed: {}", e))?;
buffer.push_str(&line);
buffer.push_str(&record_delimiter);
while buffer.len() >= CHUNK_SIZE {
let rest = buffer.split_off(CHUNK_SIZE);
chunks.push(buffer.into_bytes());
buffer = rest;
}
}
if !buffer.is_empty() {
chunks.push(buffer.into_bytes());
}
Ok(chunks)
}
fn value_ref_to_string(value: ValueRef<'_>) -> String {
match value {
ValueRef::Null => String::new(),
ValueRef::Boolean(v) => v.to_string(),
ValueRef::TinyInt(v) => v.to_string(),
ValueRef::SmallInt(v) => v.to_string(),
ValueRef::Int(v) => v.to_string(),
ValueRef::BigInt(v) => v.to_string(),
ValueRef::UTinyInt(v) => v.to_string(),
ValueRef::USmallInt(v) => v.to_string(),
ValueRef::UInt(v) => v.to_string(),
ValueRef::UBigInt(v) => v.to_string(),
ValueRef::Float(v) => v.to_string(),
ValueRef::Double(v) => v.to_string(),
ValueRef::Decimal(v) => v.to_string(),
ValueRef::Text(v) => String::from_utf8_lossy(v).into_owned(),
ValueRef::Blob(v) => base64::engine::general_purpose::STANDARD.encode(v),
_ => format!("{:?}", value),
}
}
fn value_ref_to_json(value: ValueRef<'_>) -> serde_json::Value {
match value {
ValueRef::Null => serde_json::Value::Null,
ValueRef::Boolean(v) => serde_json::Value::Bool(v),
ValueRef::TinyInt(v) => serde_json::json!(v),
ValueRef::SmallInt(v) => serde_json::json!(v),
ValueRef::Int(v) => serde_json::json!(v),
ValueRef::BigInt(v) => serde_json::json!(v),
ValueRef::UTinyInt(v) => serde_json::json!(v),
ValueRef::USmallInt(v) => serde_json::json!(v),
ValueRef::UInt(v) => serde_json::json!(v),
ValueRef::UBigInt(v) => serde_json::json!(v),
ValueRef::Float(v) => serde_json::json!(v),
ValueRef::Double(v) => serde_json::json!(v),
ValueRef::Decimal(v) => serde_json::Value::String(v.to_string()),
ValueRef::Text(v) => serde_json::Value::String(String::from_utf8_lossy(v).into_owned()),
ValueRef::Blob(v) => {
serde_json::Value::String(base64::engine::general_purpose::STANDARD.encode(v))
}
_ => serde_json::Value::String(format!("{:?}", value)),
}
}
fn require_xml_content_type(headers: &HeaderMap) -> Option<Response> {
let value = headers
.get("content-type")
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.trim();
if value.is_empty() {
return None;
}
let lowered = value.to_ascii_lowercase();
if lowered.starts_with("application/xml") || lowered.starts_with("text/xml") {
return None;
}
Some(s3_error_response(S3Error::new(
S3ErrorCode::InvalidRequest,
"Content-Type must be application/xml or text/xml",
)))
}
fn s3_error_response(err: S3Error) -> Response {
let status =
StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
let resource = if err.resource.is_empty() {
"/".to_string()
} else {
err.resource.clone()
};
let body = err
.with_resource(resource)
.with_request_id(uuid::Uuid::new_v4().simple().to_string())
.to_xml();
(status, [("content-type", "application/xml")], body).into_response()
}
fn build_stats_xml(bytes_scanned: usize, bytes_returned: usize) -> String {
format!(
"<Stats><BytesScanned>{}</BytesScanned><BytesProcessed>{}</BytesProcessed><BytesReturned>{}</BytesReturned></Stats>",
bytes_scanned,
bytes_scanned,
bytes_returned
)
}
fn encode_select_event(event_type: &str, payload: &[u8]) -> Vec<u8> {
let mut headers = Vec::new();
headers.extend(encode_select_header(":event-type", event_type));
if event_type == "Records" {
headers.extend(encode_select_header(
":content-type",
"application/octet-stream",
));
} else if event_type == "Stats" {
headers.extend(encode_select_header(":content-type", "text/xml"));
}
headers.extend(encode_select_header(":message-type", "event"));
let headers_len = headers.len() as u32;
let total_len = 4 + 4 + 4 + headers.len() + payload.len() + 4;
let mut message = Vec::with_capacity(total_len);
let mut prelude = Vec::with_capacity(8);
prelude.extend((total_len as u32).to_be_bytes());
prelude.extend(headers_len.to_be_bytes());
let prelude_crc = crc32(&prelude);
message.extend(prelude);
message.extend(prelude_crc.to_be_bytes());
message.extend(headers);
message.extend(payload);
let msg_crc = crc32(&message);
message.extend(msg_crc.to_be_bytes());
message
}
fn encode_select_header(name: &str, value: &str) -> Vec<u8> {
let name_bytes = name.as_bytes();
let value_bytes = value.as_bytes();
let mut header = Vec::with_capacity(1 + name_bytes.len() + 1 + 2 + value_bytes.len());
header.push(name_bytes.len() as u8);
header.extend(name_bytes);
header.push(7);
header.extend((value_bytes.len() as u16).to_be_bytes());
header.extend(value_bytes);
header
}
fn crc32(data: &[u8]) -> u32 {
let mut hasher = Hasher::new();
hasher.update(data);
hasher.finalize()
}

View File

@@ -0,0 +1,236 @@
use std::collections::HashMap;
use std::error::Error as StdError;
use axum::extract::{Extension, Form, State};
use axum::http::{header, HeaderMap, StatusCode};
use axum::response::{IntoResponse, Redirect, Response};
use tera::Context;
use crate::middleware::session::SessionHandle;
use crate::session::FlashMessage;
use crate::state::AppState;
pub async fn login_page(
State(state): State<AppState>,
Extension(session): Extension<SessionHandle>,
) -> Response {
if session.read(|s| s.is_authenticated()) {
return Redirect::to("/ui/buckets").into_response();
}
let mut ctx = base_context(&session, None);
let flashed = session.write(|s| s.take_flash());
inject_flash(&mut ctx, flashed);
render(&state, "login.html", &ctx)
}
#[derive(serde::Deserialize)]
pub struct LoginForm {
pub access_key: String,
pub secret_key: String,
#[serde(default)]
pub csrf_token: String,
#[serde(default)]
pub next: Option<String>,
}
pub async fn login_submit(
State(state): State<AppState>,
Extension(session): Extension<SessionHandle>,
Form(form): Form<LoginForm>,
) -> Response {
let access_key = form.access_key.trim();
let secret_key = form.secret_key.trim();
match state.iam.get_secret_key(access_key) {
Some(expected) if constant_time_eq_str(&expected, secret_key) => {
let display = state
.iam
.get_user(access_key)
.await
.and_then(|v| {
v.get("display_name")
.and_then(|d| d.as_str())
.map(|s| s.to_string())
})
.unwrap_or_else(|| access_key.to_string());
session.write(|s| {
s.user_id = Some(access_key.to_string());
s.display_name = Some(display);
s.rotate_csrf();
s.push_flash("success", "Signed in successfully.");
});
let next = form
.next
.as_deref()
.filter(|n| is_allowed_redirect(n, &state.config.allowed_redirect_hosts))
.unwrap_or("/ui/buckets")
.to_string();
Redirect::to(&next).into_response()
}
_ => {
session.write(|s| {
s.push_flash("danger", "Invalid access key or secret key.");
});
Redirect::to("/login").into_response()
}
}
}
fn is_allowed_redirect(target: &str, allowed_hosts: &[String]) -> bool {
if target == "/ui" || target.starts_with("/ui/") {
return true;
}
let Some(rest) = target
.strip_prefix("https://")
.or_else(|| target.strip_prefix("http://"))
else {
return false;
};
let host = rest
.split('/')
.next()
.unwrap_or_default()
.split('@')
.last()
.unwrap_or_default()
.split(':')
.next()
.unwrap_or_default()
.to_ascii_lowercase();
allowed_hosts
.iter()
.any(|allowed| allowed.eq_ignore_ascii_case(&host))
}
pub async fn logout(Extension(session): Extension<SessionHandle>) -> Response {
session.write(|s| {
s.user_id = None;
s.display_name = None;
s.flash.clear();
s.rotate_csrf();
s.push_flash("info", "Signed out.");
});
Redirect::to("/login").into_response()
}
pub async fn csrf_error_page(
State(state): State<AppState>,
Extension(session): Extension<SessionHandle>,
) -> Response {
let ctx = base_context(&session, None);
let mut resp = render(&state, "csrf_error.html", &ctx);
*resp.status_mut() = StatusCode::FORBIDDEN;
resp
}
pub async fn root_redirect() -> Response {
Redirect::to("/ui/buckets").into_response()
}
pub async fn not_found_page(
State(state): State<AppState>,
Extension(session): Extension<SessionHandle>,
) -> Response {
let ctx = base_context(&session, None);
let mut resp = render(&state, "404.html", &ctx);
*resp.status_mut() = StatusCode::NOT_FOUND;
resp
}
pub async fn require_login(
Extension(session): Extension<SessionHandle>,
req: axum::extract::Request,
next: axum::middleware::Next,
) -> Response {
if session.read(|s| s.is_authenticated()) {
return next.run(req).await;
}
let path = req.uri().path().to_string();
let query = req
.uri()
.query()
.map(|q| format!("?{}", q))
.unwrap_or_default();
let next_url = format!("{}{}", path, query);
let encoded =
percent_encoding::utf8_percent_encode(&next_url, percent_encoding::NON_ALPHANUMERIC)
.to_string();
let target = format!("/login?next={}", encoded);
Redirect::to(&target).into_response()
}
pub fn render(state: &AppState, template: &str, ctx: &Context) -> Response {
let engine = match &state.templates {
Some(e) => e,
None => {
return (
StatusCode::INTERNAL_SERVER_ERROR,
"Templates not configured",
)
.into_response();
}
};
match engine.render(template, ctx) {
Ok(html) => {
let mut headers = HeaderMap::new();
headers.insert(
header::CONTENT_TYPE,
"text/html; charset=utf-8".parse().unwrap(),
);
(StatusCode::OK, headers, html).into_response()
}
Err(e) => {
let mut detail = format!("{}", e);
let mut src = StdError::source(&e);
while let Some(s) = src {
detail.push_str(" | ");
detail.push_str(&s.to_string());
src = s.source();
}
tracing::error!("Template render failed ({}): {}", template, detail);
let fallback_ctx = Context::new();
let body = if template != "500.html" {
engine
.render("500.html", &fallback_ctx)
.unwrap_or_else(|_| "Internal Server Error".to_string())
} else {
"Internal Server Error".to_string()
};
let mut headers = HeaderMap::new();
headers.insert(
header::CONTENT_TYPE,
"text/html; charset=utf-8".parse().unwrap(),
);
(StatusCode::INTERNAL_SERVER_ERROR, headers, body).into_response()
}
}
}
pub fn base_context(session: &SessionHandle, endpoint: Option<&str>) -> Context {
let mut ctx = Context::new();
let snapshot = session.snapshot();
ctx.insert("csrf_token_value", &snapshot.csrf_token);
ctx.insert("is_authenticated", &snapshot.user_id.is_some());
ctx.insert("current_user", &snapshot.user_id);
ctx.insert("current_user_display_name", &snapshot.display_name);
ctx.insert("current_endpoint", &endpoint.unwrap_or(""));
ctx.insert("request_args", &HashMap::<String, String>::new());
ctx.insert("null", &serde_json::Value::Null);
ctx.insert("none", &serde_json::Value::Null);
ctx
}
pub fn inject_flash(ctx: &mut Context, flashed: Vec<FlashMessage>) {
ctx.insert("flashed_messages", &flashed);
}
fn constant_time_eq_str(a: &str, b: &str) -> bool {
if a.len() != b.len() {
return false;
}
subtle::ConstantTimeEq::ct_eq(a.as_bytes(), b.as_bytes()).into()
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,645 @@
pub mod config;
pub mod handlers;
pub mod middleware;
pub mod services;
pub mod session;
pub mod state;
pub mod stores;
pub mod templates;
use axum::Router;
pub const SERVER_HEADER: &str = concat!("MyFSIO-Rust/", env!("CARGO_PKG_VERSION"));
pub fn create_ui_router(state: state::AppState) -> Router {
use axum::routing::{delete, get, post, put};
use handlers::ui;
use handlers::ui_api;
use handlers::ui_pages;
let protected = Router::new()
.route("/", get(ui::root_redirect))
.route("/ui", get(ui::root_redirect))
.route("/ui/", get(ui::root_redirect))
.route(
"/ui/buckets",
get(ui_pages::buckets_overview).post(ui_pages::create_bucket),
)
.route("/ui/buckets/create", post(ui_pages::create_bucket))
.route("/ui/buckets/{bucket_name}", get(ui_pages::bucket_detail))
.route(
"/ui/buckets/{bucket_name}/delete",
post(ui_pages::delete_bucket),
)
.route(
"/ui/buckets/{bucket_name}/versioning",
post(ui_pages::update_bucket_versioning),
)
.route(
"/ui/buckets/{bucket_name}/quota",
post(ui_pages::update_bucket_quota),
)
.route(
"/ui/buckets/{bucket_name}/encryption",
post(ui_pages::update_bucket_encryption),
)
.route(
"/ui/buckets/{bucket_name}/policy",
post(ui_pages::update_bucket_policy),
)
.route(
"/ui/buckets/{bucket_name}/replication",
post(ui_pages::update_bucket_replication),
)
.route(
"/ui/buckets/{bucket_name}/website",
post(ui_pages::update_bucket_website),
)
.route(
"/ui/buckets/{bucket_name}/upload",
post(ui_api::upload_object),
)
.route(
"/ui/buckets/{bucket_name}/multipart/initiate",
post(ui_api::initiate_multipart_upload),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}/part",
put(ui_api::upload_multipart_part),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}/parts",
put(ui_api::upload_multipart_part),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}/complete",
post(ui_api::complete_multipart_upload),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}/abort",
delete(ui_api::abort_multipart_upload),
)
.route(
"/ui/buckets/{bucket_name}/multipart/{upload_id}",
delete(ui_api::abort_multipart_upload),
)
.route(
"/ui/buckets/{bucket_name}/objects",
get(ui_api::list_bucket_objects),
)
.route(
"/ui/buckets/{bucket_name}/objects/stream",
get(ui_api::stream_bucket_objects),
)
.route(
"/ui/buckets/{bucket_name}/folders",
get(ui_api::list_bucket_folders),
)
.route(
"/ui/buckets/{bucket_name}/copy-targets",
get(ui_api::list_copy_targets),
)
.route(
"/ui/buckets/{bucket_name}/list-for-copy",
get(ui_api::list_copy_targets),
)
.route(
"/ui/buckets/{bucket_name}/objects/bulk-delete",
post(ui_api::bulk_delete_objects),
)
.route(
"/ui/buckets/{bucket_name}/objects/bulk-download",
post(ui_api::bulk_download_objects),
)
.route(
"/ui/buckets/{bucket_name}/objects/{*rest}",
get(ui_api::object_get_dispatch).post(ui_api::object_post_dispatch),
)
.route(
"/ui/buckets/{bucket_name}/acl",
get(ui_api::bucket_acl).post(ui_api::update_bucket_acl),
)
.route(
"/ui/buckets/{bucket_name}/cors",
get(ui_api::bucket_cors).post(ui_api::update_bucket_cors),
)
.route(
"/ui/buckets/{bucket_name}/lifecycle",
get(ui_api::bucket_lifecycle).post(ui_api::update_bucket_lifecycle),
)
.route(
"/ui/buckets/{bucket_name}/lifecycle/history",
get(ui_api::lifecycle_history),
)
.route(
"/ui/buckets/{bucket_name}/replication/status",
get(ui_api::replication_status),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures",
get(ui_api::replication_failures).delete(ui_api::clear_replication_failures),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures/retry",
post(ui_api::retry_replication_failure),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures/retry-all",
post(ui_api::retry_all_replication_failures),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures/dismiss",
delete(ui_api::dismiss_replication_failure),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures/clear",
delete(ui_api::clear_replication_failures),
)
.route(
"/ui/buckets/{bucket_name}/replication/failures/{*rest}",
post(ui_api::retry_replication_failure_path)
.delete(ui_api::dismiss_replication_failure_path),
)
.route(
"/ui/buckets/{bucket_name}/bulk-delete",
post(ui_api::bulk_delete_objects),
)
.route(
"/ui/buckets/{bucket_name}/bulk-download",
post(ui_api::bulk_download_objects),
)
.route(
"/ui/buckets/{bucket_name}/archived",
get(ui_api::archived_objects),
)
.route(
"/ui/buckets/{bucket_name}/archived/{*rest}",
post(ui_api::archived_post_dispatch),
)
.route("/ui/iam", get(ui_pages::iam_dashboard))
.route("/ui/iam/users", post(ui_pages::create_iam_user))
.route("/ui/iam/users/{user_id}", post(ui_pages::update_iam_user))
.route(
"/ui/iam/users/{user_id}/delete",
post(ui_pages::delete_iam_user),
)
.route(
"/ui/iam/users/{user_id}/update",
post(ui_pages::update_iam_user),
)
.route(
"/ui/iam/users/{user_id}/policies",
post(ui_pages::update_iam_policies),
)
.route(
"/ui/iam/users/{user_id}/expiry",
post(ui_pages::update_iam_expiry),
)
.route(
"/ui/iam/users/{user_id}/rotate-secret",
post(ui_pages::rotate_iam_secret),
)
.route(
"/ui/iam/users/{user_id}/rotate",
post(ui_pages::rotate_iam_secret),
)
.route("/ui/connections/create", post(ui_pages::create_connection))
.route("/ui/connections/test", post(ui_api::test_connection))
.route(
"/ui/connections/{connection_id}",
post(ui_pages::update_connection),
)
.route(
"/ui/connections/{connection_id}/update",
post(ui_pages::update_connection),
)
.route(
"/ui/connections/{connection_id}/delete",
post(ui_pages::delete_connection),
)
.route(
"/ui/connections/{connection_id}/health",
get(ui_api::connection_health),
)
.route("/ui/sites", get(ui_pages::sites_dashboard))
.route("/ui/sites/local", post(ui_pages::update_local_site))
.route("/ui/sites/peers", post(ui_pages::add_peer_site))
.route(
"/ui/sites/peers/{site_id}/update",
post(ui_pages::update_peer_site),
)
.route(
"/ui/sites/peers/{site_id}/delete",
post(ui_pages::delete_peer_site),
)
.route("/ui/sites/peers/{site_id}/health", get(ui_api::peer_health))
.route(
"/ui/sites/peers/{site_id}/sync-stats",
get(ui_api::peer_sync_stats),
)
.route(
"/ui/sites/peers/{site_id}/bidirectional-status",
get(ui_api::peer_bidirectional_status),
)
.route(
"/ui/connections",
get(ui_pages::connections_dashboard).post(ui_pages::create_connection),
)
.route("/ui/metrics", get(ui_pages::metrics_dashboard))
.route(
"/ui/metrics/settings",
get(ui_api::metrics_settings).put(ui_api::update_metrics_settings),
)
.route("/ui/metrics/api", get(ui_api::metrics_api))
.route("/ui/metrics/history", get(ui_api::metrics_history))
.route("/ui/metrics/operations", get(ui_api::metrics_operations))
.route(
"/ui/metrics/operations/history",
get(ui_api::metrics_operations_history),
)
.route("/ui/system", get(ui_pages::system_dashboard))
.route("/ui/system/gc/status", get(ui_api::gc_status_ui))
.route("/ui/system/gc/run", post(ui_api::gc_run_ui))
.route("/ui/system/gc/history", get(ui_api::gc_history_ui))
.route(
"/ui/system/integrity/status",
get(ui_api::integrity_status_ui),
)
.route("/ui/system/integrity/run", post(ui_api::integrity_run_ui))
.route(
"/ui/system/integrity/history",
get(ui_api::integrity_history_ui),
)
.route(
"/ui/website-domains",
get(ui_pages::website_domains_dashboard),
)
.route(
"/ui/website-domains/create",
post(ui_pages::create_website_domain),
)
.route(
"/ui/website-domains/{domain}",
post(ui_pages::update_website_domain),
)
.route(
"/ui/website-domains/{domain}/update",
post(ui_pages::update_website_domain),
)
.route(
"/ui/website-domains/{domain}/delete",
post(ui_pages::delete_website_domain),
)
.route("/ui/replication/new", get(ui_pages::replication_wizard))
.route(
"/ui/replication/create",
post(ui_pages::create_peer_replication_rules_from_query),
)
.route(
"/ui/sites/peers/{site_id}/replication-rules",
post(ui_pages::create_peer_replication_rules),
)
.route("/ui/docs", get(ui_pages::docs_page))
.layer(axum::middleware::from_fn(ui::require_login));
let public = Router::new()
.route("/login", get(ui::login_page).post(ui::login_submit))
.route("/logout", post(ui::logout).get(ui::logout))
.route("/csrf-error", get(ui::csrf_error_page));
let session_state = middleware::SessionLayerState {
store: state.sessions.clone(),
secure: false,
};
let static_service = tower_http::services::ServeDir::new(&state.config.static_dir);
protected
.merge(public)
.fallback(ui::not_found_page)
.layer(axum::middleware::from_fn(middleware::csrf_layer))
.layer(axum::middleware::from_fn_with_state(
session_state,
middleware::session_layer,
))
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::ui_metrics_layer,
))
.with_state(state)
.nest_service("/static", static_service)
.layer(axum::middleware::from_fn(middleware::server_header))
.layer(tower_http::compression::CompressionLayer::new())
}
pub fn create_router(state: state::AppState) -> Router {
let default_rate_limit = middleware::RateLimitLayerState::new(
state.config.ratelimit_default,
state.config.num_trusted_proxies,
);
let admin_rate_limit = middleware::RateLimitLayerState::new(
state.config.ratelimit_admin,
state.config.num_trusted_proxies,
);
let mut api_router = Router::new()
.route("/myfsio/health", axum::routing::get(handlers::health_check))
.route("/", axum::routing::get(handlers::list_buckets))
.route(
"/{bucket}",
axum::routing::put(handlers::create_bucket)
.get(handlers::get_bucket)
.delete(handlers::delete_bucket)
.head(handlers::head_bucket)
.post(handlers::post_bucket),
)
.route(
"/{bucket}/",
axum::routing::put(handlers::create_bucket)
.get(handlers::get_bucket)
.delete(handlers::delete_bucket)
.head(handlers::head_bucket)
.post(handlers::post_bucket),
)
.route(
"/{bucket}/{*key}",
axum::routing::put(handlers::put_object)
.get(handlers::get_object)
.delete(handlers::delete_object)
.head(handlers::head_object)
.post(handlers::post_object),
);
if state.config.kms_enabled {
api_router = api_router
.route(
"/kms/keys",
axum::routing::get(handlers::kms::list_keys).post(handlers::kms::create_key),
)
.route(
"/kms/keys/{key_id}",
axum::routing::get(handlers::kms::get_key).delete(handlers::kms::delete_key),
)
.route(
"/kms/keys/{key_id}/enable",
axum::routing::post(handlers::kms::enable_key),
)
.route(
"/kms/keys/{key_id}/disable",
axum::routing::post(handlers::kms::disable_key),
)
.route("/kms/encrypt", axum::routing::post(handlers::kms::encrypt))
.route("/kms/decrypt", axum::routing::post(handlers::kms::decrypt))
.route(
"/kms/generate-data-key",
axum::routing::post(handlers::kms::generate_data_key),
)
.route(
"/kms/generate-data-key-without-plaintext",
axum::routing::post(handlers::kms::generate_data_key_without_plaintext),
)
.route(
"/kms/re-encrypt",
axum::routing::post(handlers::kms::re_encrypt),
)
.route(
"/kms/generate-random",
axum::routing::post(handlers::kms::generate_random),
)
.route(
"/kms/client/generate-key",
axum::routing::post(handlers::kms::client_generate_key),
)
.route(
"/kms/client/encrypt",
axum::routing::post(handlers::kms::client_encrypt),
)
.route(
"/kms/client/decrypt",
axum::routing::post(handlers::kms::client_decrypt),
)
.route(
"/kms/materials/{key_id}",
axum::routing::post(handlers::kms::materials),
);
}
api_router = api_router
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::auth_layer,
))
.layer(axum::middleware::from_fn_with_state(
default_rate_limit,
middleware::rate_limit_layer,
));
let admin_router = Router::new()
.route(
"/admin/site",
axum::routing::get(handlers::admin::get_local_site)
.put(handlers::admin::update_local_site),
)
.route(
"/admin/sites",
axum::routing::get(handlers::admin::list_all_sites)
.post(handlers::admin::register_peer_site),
)
.route(
"/admin/sites/{site_id}",
axum::routing::get(handlers::admin::get_peer_site)
.put(handlers::admin::update_peer_site)
.delete(handlers::admin::delete_peer_site),
)
.route(
"/admin/sites/{site_id}/health",
axum::routing::get(handlers::admin::check_peer_health)
.post(handlers::admin::check_peer_health),
)
.route(
"/admin/sites/{site_id}/bidirectional-status",
axum::routing::get(handlers::admin::check_bidirectional_status),
)
.route(
"/admin/topology",
axum::routing::get(handlers::admin::get_topology),
)
.route(
"/admin/site/local",
axum::routing::get(handlers::admin::get_local_site)
.put(handlers::admin::update_local_site),
)
.route(
"/admin/site/all",
axum::routing::get(handlers::admin::list_all_sites),
)
.route(
"/admin/site/peers",
axum::routing::post(handlers::admin::register_peer_site),
)
.route(
"/admin/site/peers/{site_id}",
axum::routing::get(handlers::admin::get_peer_site)
.put(handlers::admin::update_peer_site)
.delete(handlers::admin::delete_peer_site),
)
.route(
"/admin/site/peers/{site_id}/health",
axum::routing::post(handlers::admin::check_peer_health),
)
.route(
"/admin/site/topology",
axum::routing::get(handlers::admin::get_topology),
)
.route(
"/admin/site/peers/{site_id}/bidirectional-status",
axum::routing::get(handlers::admin::check_bidirectional_status),
)
.route(
"/admin/iam/users",
axum::routing::get(handlers::admin::iam_list_users),
)
.route(
"/admin/iam/users/{identifier}",
axum::routing::get(handlers::admin::iam_get_user),
)
.route(
"/admin/iam/users/{identifier}/policies",
axum::routing::get(handlers::admin::iam_get_user_policies),
)
.route(
"/admin/iam/users/{identifier}/access-keys",
axum::routing::post(handlers::admin::iam_create_access_key),
)
.route(
"/admin/iam/users/{identifier}/keys",
axum::routing::post(handlers::admin::iam_create_access_key),
)
.route(
"/admin/iam/users/{identifier}/access-keys/{access_key}",
axum::routing::delete(handlers::admin::iam_delete_access_key),
)
.route(
"/admin/iam/users/{identifier}/keys/{access_key}",
axum::routing::delete(handlers::admin::iam_delete_access_key),
)
.route(
"/admin/iam/users/{identifier}/disable",
axum::routing::post(handlers::admin::iam_disable_user),
)
.route(
"/admin/iam/users/{identifier}/enable",
axum::routing::post(handlers::admin::iam_enable_user),
)
.route(
"/admin/website-domains",
axum::routing::get(handlers::admin::list_website_domains)
.post(handlers::admin::create_website_domain),
)
.route(
"/admin/website-domains/{domain}",
axum::routing::get(handlers::admin::get_website_domain)
.put(handlers::admin::update_website_domain)
.delete(handlers::admin::delete_website_domain),
)
.route(
"/admin/gc/status",
axum::routing::get(handlers::admin::gc_status),
)
.route(
"/admin/gc/run",
axum::routing::post(handlers::admin::gc_run),
)
.route(
"/admin/gc/history",
axum::routing::get(handlers::admin::gc_history),
)
.route(
"/admin/integrity/status",
axum::routing::get(handlers::admin::integrity_status),
)
.route(
"/admin/integrity/run",
axum::routing::post(handlers::admin::integrity_run),
)
.route(
"/admin/integrity/history",
axum::routing::get(handlers::admin::integrity_history),
)
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::auth_layer,
))
.layer(axum::middleware::from_fn_with_state(
admin_rate_limit,
middleware::rate_limit_layer,
));
api_router
.merge(admin_router)
.layer(axum::middleware::from_fn(middleware::server_header))
.layer(cors_layer(&state.config))
.layer(tower_http::compression::CompressionLayer::new())
.with_state(state)
}
fn cors_layer(config: &config::ServerConfig) -> tower_http::cors::CorsLayer {
use axum::http::{HeaderName, HeaderValue, Method};
use tower_http::cors::{Any, CorsLayer};
let mut layer = CorsLayer::new();
if config.cors_origins.iter().any(|origin| origin == "*") {
layer = layer.allow_origin(Any);
} else {
let origins = config
.cors_origins
.iter()
.filter_map(|origin| HeaderValue::from_str(origin).ok())
.collect::<Vec<_>>();
if !origins.is_empty() {
layer = layer.allow_origin(origins);
}
}
let methods = config
.cors_methods
.iter()
.filter_map(|method| method.parse::<Method>().ok())
.collect::<Vec<_>>();
if !methods.is_empty() {
layer = layer.allow_methods(methods);
}
if config.cors_allow_headers.iter().any(|header| header == "*") {
layer = layer.allow_headers(Any);
} else {
let headers = config
.cors_allow_headers
.iter()
.filter_map(|header| header.parse::<HeaderName>().ok())
.collect::<Vec<_>>();
if !headers.is_empty() {
layer = layer.allow_headers(headers);
}
}
if config
.cors_expose_headers
.iter()
.any(|header| header == "*")
{
layer = layer.expose_headers(Any);
} else {
let headers = config
.cors_expose_headers
.iter()
.filter_map(|header| header.parse::<HeaderName>().ok())
.collect::<Vec<_>>();
if !headers.is_empty() {
layer = layer.expose_headers(headers);
}
}
layer
}

View File

@@ -0,0 +1,547 @@
use clap::{Parser, Subcommand};
use myfsio_server::config::ServerConfig;
use myfsio_server::state::AppState;
#[derive(Parser)]
#[command(
name = "myfsio",
version,
about = "MyFSIO S3-compatible storage engine"
)]
struct Cli {
#[arg(long, help = "Validate configuration and exit")]
check_config: bool,
#[arg(long, help = "Show configuration summary and exit")]
show_config: bool,
#[arg(long, help = "Reset admin credentials and exit")]
reset_cred: bool,
#[command(subcommand)]
command: Option<Command>,
}
#[derive(Subcommand)]
enum Command {
Serve,
Version,
}
#[tokio::main]
async fn main() {
load_env_files();
init_tracing();
let cli = Cli::parse();
let config = ServerConfig::from_env();
if !config
.ratelimit_storage_uri
.eq_ignore_ascii_case("memory://")
{
tracing::warn!(
"RATE_LIMIT_STORAGE_URI={} is not supported yet; using in-memory rate limits",
config.ratelimit_storage_uri
);
}
if cli.reset_cred {
reset_admin_credentials(&config);
return;
}
if cli.check_config || cli.show_config {
print_config_summary(&config);
if cli.check_config {
let issues = validate_config(&config);
for issue in &issues {
println!("{issue}");
}
if issues.iter().any(|issue| issue.starts_with("CRITICAL:")) {
std::process::exit(1);
}
}
return;
}
match cli.command.unwrap_or(Command::Serve) {
Command::Version => {
println!("myfsio {}", env!("CARGO_PKG_VERSION"));
return;
}
Command::Serve => {}
}
ensure_iam_bootstrap(&config);
let bind_addr = config.bind_addr;
let ui_bind_addr = config.ui_bind_addr;
tracing::info!("MyFSIO Rust Engine starting — API on {}", bind_addr);
if config.ui_enabled {
tracing::info!("UI will bind on {}", ui_bind_addr);
}
tracing::info!("Storage root: {}", config.storage_root.display());
tracing::info!("Region: {}", config.region);
tracing::info!(
"Encryption: {}, KMS: {}, GC: {}, Lifecycle: {}, Integrity: {}, Metrics History: {}, Operation Metrics: {}, UI: {}",
config.encryption_enabled,
config.kms_enabled,
config.gc_enabled,
config.lifecycle_enabled,
config.integrity_enabled,
config.metrics_history_enabled,
config.metrics_enabled,
config.ui_enabled
);
let state = if config.encryption_enabled || config.kms_enabled {
AppState::new_with_encryption(config.clone()).await
} else {
AppState::new(config.clone())
};
let mut bg_handles: Vec<tokio::task::JoinHandle<()>> = Vec::new();
if let Some(ref gc) = state.gc {
bg_handles.push(gc.clone().start_background());
tracing::info!("GC background service started");
}
if let Some(ref integrity) = state.integrity {
bg_handles.push(integrity.clone().start_background());
tracing::info!("Integrity checker background service started");
}
if let Some(ref metrics) = state.metrics {
bg_handles.push(metrics.clone().start_background());
tracing::info!("Metrics collector background service started");
}
if let Some(ref system_metrics) = state.system_metrics {
bg_handles.push(system_metrics.clone().start_background());
tracing::info!("System metrics history collector started");
}
if config.lifecycle_enabled {
let lifecycle =
std::sync::Arc::new(myfsio_server::services::lifecycle::LifecycleService::new(
state.storage.clone(),
config.storage_root.clone(),
myfsio_server::services::lifecycle::LifecycleConfig {
interval_seconds: 3600,
max_history_per_bucket: config.lifecycle_max_history_per_bucket,
},
));
bg_handles.push(lifecycle.start_background());
tracing::info!("Lifecycle manager background service started");
}
if let Some(ref site_sync) = state.site_sync {
let worker = site_sync.clone();
bg_handles.push(tokio::spawn(async move {
worker.run().await;
}));
tracing::info!("Site sync worker started");
}
let ui_enabled = config.ui_enabled;
let api_app = myfsio_server::create_router(state.clone());
let ui_app = if ui_enabled {
Some(myfsio_server::create_ui_router(state.clone()))
} else {
None
};
let api_listener = match tokio::net::TcpListener::bind(bind_addr).await {
Ok(listener) => listener,
Err(err) => {
if err.kind() == std::io::ErrorKind::AddrInUse {
tracing::error!("API port already in use: {}", bind_addr);
} else {
tracing::error!("Failed to bind API {}: {}", bind_addr, err);
}
for handle in bg_handles {
handle.abort();
}
std::process::exit(1);
}
};
tracing::info!("API listening on {}", bind_addr);
let ui_listener = if let Some(ref app) = ui_app {
let _ = app;
match tokio::net::TcpListener::bind(ui_bind_addr).await {
Ok(listener) => {
tracing::info!("UI listening on {}", ui_bind_addr);
Some(listener)
}
Err(err) => {
if err.kind() == std::io::ErrorKind::AddrInUse {
tracing::error!("UI port already in use: {}", ui_bind_addr);
} else {
tracing::error!("Failed to bind UI {}: {}", ui_bind_addr, err);
}
for handle in bg_handles {
handle.abort();
}
std::process::exit(1);
}
}
} else {
None
};
let shutdown = shutdown_signal_shared();
let api_shutdown = shutdown.clone();
let api_task = tokio::spawn(async move {
axum::serve(
api_listener,
api_app.into_make_service_with_connect_info::<std::net::SocketAddr>(),
)
.with_graceful_shutdown(async move {
api_shutdown.notified().await;
})
.await
});
let ui_task = if let (Some(listener), Some(app)) = (ui_listener, ui_app) {
let ui_shutdown = shutdown.clone();
Some(tokio::spawn(async move {
axum::serve(listener, app)
.with_graceful_shutdown(async move {
ui_shutdown.notified().await;
})
.await
}))
} else {
None
};
tokio::signal::ctrl_c()
.await
.expect("Failed to listen for Ctrl+C");
tracing::info!("Shutdown signal received");
shutdown.notify_waiters();
if let Err(err) = api_task.await.unwrap_or(Ok(())) {
tracing::error!("API server exited with error: {}", err);
}
if let Some(task) = ui_task {
if let Err(err) = task.await.unwrap_or(Ok(())) {
tracing::error!("UI server exited with error: {}", err);
}
}
for handle in bg_handles {
handle.abort();
}
}
fn print_config_summary(config: &ServerConfig) {
println!("MyFSIO Rust Configuration");
println!("Version: {}", env!("CARGO_PKG_VERSION"));
println!("API bind: {}", config.bind_addr);
println!("UI bind: {}", config.ui_bind_addr);
println!("UI enabled: {}", config.ui_enabled);
println!("Storage root: {}", config.storage_root.display());
println!("IAM config: {}", config.iam_config_path.display());
println!("Region: {}", config.region);
println!("Encryption enabled: {}", config.encryption_enabled);
println!(
"Encryption chunk size: {} bytes",
config.encryption_chunk_size_bytes
);
println!("KMS enabled: {}", config.kms_enabled);
println!(
"KMS data key bounds: {}-{} bytes",
config.kms_generate_data_key_min_bytes, config.kms_generate_data_key_max_bytes
);
println!("GC enabled: {}", config.gc_enabled);
println!(
"GC interval: {} hours, dry run: {}",
config.gc_interval_hours, config.gc_dry_run
);
println!("Integrity enabled: {}", config.integrity_enabled);
println!("Lifecycle enabled: {}", config.lifecycle_enabled);
println!(
"Lifecycle history limit: {}",
config.lifecycle_max_history_per_bucket
);
println!(
"Website hosting enabled: {}",
config.website_hosting_enabled
);
println!("Site sync enabled: {}", config.site_sync_enabled);
println!("API base URL: {}", config.api_base_url);
println!(
"Object key max: {} bytes, tag limit: {}",
config.object_key_max_length_bytes, config.object_tag_limit
);
println!(
"Rate limits: default {} per {}s, admin {} per {}s",
config.ratelimit_default.max_requests,
config.ratelimit_default.window_seconds,
config.ratelimit_admin.max_requests,
config.ratelimit_admin.window_seconds
);
println!(
"Metrics history enabled: {}",
config.metrics_history_enabled
);
println!("Operation metrics enabled: {}", config.metrics_enabled);
}
fn validate_config(config: &ServerConfig) -> Vec<String> {
let mut issues = Vec::new();
if config.ui_enabled && config.bind_addr == config.ui_bind_addr {
issues.push(
"CRITICAL: API and UI bind addresses cannot be identical when UI is enabled."
.to_string(),
);
}
if config.presigned_url_min_expiry > config.presigned_url_max_expiry {
issues.push("CRITICAL: PRESIGNED_URL_MIN_EXPIRY_SECONDS cannot exceed PRESIGNED_URL_MAX_EXPIRY_SECONDS.".to_string());
}
if config.encryption_chunk_size_bytes == 0 {
issues.push("CRITICAL: ENCRYPTION_CHUNK_SIZE_BYTES must be greater than zero.".to_string());
}
if config.kms_generate_data_key_min_bytes == 0 {
issues.push(
"CRITICAL: KMS_GENERATE_DATA_KEY_MIN_BYTES must be greater than zero.".to_string(),
);
}
if config.kms_generate_data_key_min_bytes > config.kms_generate_data_key_max_bytes {
issues.push("CRITICAL: KMS_GENERATE_DATA_KEY_MIN_BYTES cannot exceed KMS_GENERATE_DATA_KEY_MAX_BYTES.".to_string());
}
if config.gc_interval_hours <= 0.0 {
issues.push("CRITICAL: GC_INTERVAL_HOURS must be greater than zero.".to_string());
}
if config.bucket_config_cache_ttl_seconds < 0.0 {
issues.push("CRITICAL: BUCKET_CONFIG_CACHE_TTL_SECONDS cannot be negative.".to_string());
}
if !config
.ratelimit_storage_uri
.eq_ignore_ascii_case("memory://")
{
issues.push(format!(
"WARNING: RATE_LIMIT_STORAGE_URI={} is not supported yet; using in-memory limits.",
config.ratelimit_storage_uri
));
}
if let Err(err) = std::fs::create_dir_all(&config.storage_root) {
issues.push(format!(
"CRITICAL: Cannot create storage root {}: {}",
config.storage_root.display(),
err
));
}
if let Some(parent) = config.iam_config_path.parent() {
if let Err(err) = std::fs::create_dir_all(parent) {
issues.push(format!(
"CRITICAL: Cannot create IAM config directory {}: {}",
parent.display(),
err
));
}
}
if config.encryption_enabled && config.secret_key.is_none() {
issues.push(
"WARNING: ENCRYPTION_ENABLED=true but SECRET_KEY is not configured; secure-at-rest config encryption is unavailable.".to_string(),
);
}
if config.site_sync_enabled && !config.website_hosting_enabled {
issues.push(
"INFO: SITE_SYNC_ENABLED=true without WEBSITE_HOSTING_ENABLED; this is valid but unrelated.".to_string(),
);
}
issues
}
fn init_tracing() {
use tracing_subscriber::EnvFilter;
let filter = EnvFilter::try_from_env("RUST_LOG")
.or_else(|_| {
EnvFilter::try_new(std::env::var("LOG_LEVEL").unwrap_or_else(|_| "INFO".to_string()))
})
.unwrap_or_else(|_| EnvFilter::new("INFO"));
tracing_subscriber::fmt().with_env_filter(filter).init();
}
fn shutdown_signal_shared() -> std::sync::Arc<tokio::sync::Notify> {
std::sync::Arc::new(tokio::sync::Notify::new())
}
fn load_env_files() {
let cwd = std::env::current_dir().ok();
let mut candidates: Vec<std::path::PathBuf> = Vec::new();
candidates.push(std::path::PathBuf::from("/opt/myfsio/myfsio.env"));
if let Some(ref dir) = cwd {
candidates.push(dir.join(".env"));
candidates.push(dir.join("myfsio.env"));
for ancestor in dir.ancestors().skip(1).take(4) {
candidates.push(ancestor.join(".env"));
candidates.push(ancestor.join("myfsio.env"));
}
}
let mut seen = std::collections::HashSet::new();
for path in candidates {
if !seen.insert(path.clone()) {
continue;
}
if path.is_file() {
match dotenvy::from_path_override(&path) {
Ok(()) => eprintln!("Loaded env file: {}", path.display()),
Err(e) => eprintln!("Failed to load env file {}: {}", path.display(), e),
}
}
}
}
fn ensure_iam_bootstrap(config: &ServerConfig) {
let iam_path = &config.iam_config_path;
if iam_path.exists() {
return;
}
let access_key = std::env::var("ADMIN_ACCESS_KEY")
.ok()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.unwrap_or_else(|| format!("AK{}", uuid::Uuid::new_v4().simple()));
let secret_key = std::env::var("ADMIN_SECRET_KEY")
.ok()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.unwrap_or_else(|| format!("SK{}", uuid::Uuid::new_v4().simple()));
let user_id = format!("u-{}", &uuid::Uuid::new_v4().simple().to_string()[..16]);
let created_at = chrono::Utc::now().to_rfc3339();
let body = serde_json::json!({
"version": 2,
"users": [{
"user_id": user_id,
"display_name": "Local Admin",
"enabled": true,
"access_keys": [{
"access_key": access_key,
"secret_key": secret_key,
"status": "active",
"created_at": created_at,
}],
"policies": [{
"bucket": "*",
"actions": ["*"],
"prefix": "*",
}]
}]
});
let json = match serde_json::to_string_pretty(&body) {
Ok(s) => s,
Err(e) => {
tracing::error!("Failed to serialize IAM bootstrap config: {}", e);
return;
}
};
if let Some(parent) = iam_path.parent() {
if let Err(e) = std::fs::create_dir_all(parent) {
tracing::error!(
"Failed to create IAM config dir {}: {}",
parent.display(),
e
);
return;
}
}
if let Err(e) = std::fs::write(iam_path, json) {
tracing::error!(
"Failed to write IAM bootstrap config {}: {}",
iam_path.display(),
e
);
return;
}
tracing::info!("============================================================");
tracing::info!("MYFSIO - ADMIN CREDENTIALS INITIALIZED");
tracing::info!("============================================================");
tracing::info!("Access Key: {}", access_key);
tracing::info!("Secret Key: {}", secret_key);
tracing::info!("Saved to: {}", iam_path.display());
tracing::info!("============================================================");
}
fn reset_admin_credentials(config: &ServerConfig) {
if let Some(parent) = config.iam_config_path.parent() {
if let Err(err) = std::fs::create_dir_all(parent) {
eprintln!(
"Failed to create IAM config directory {}: {}",
parent.display(),
err
);
std::process::exit(1);
}
}
if config.iam_config_path.exists() {
let backup = config
.iam_config_path
.with_extension(format!("bak-{}", chrono::Utc::now().timestamp()));
if let Err(err) = std::fs::rename(&config.iam_config_path, &backup) {
eprintln!(
"Failed to back up existing IAM config {}: {}",
config.iam_config_path.display(),
err
);
std::process::exit(1);
}
println!("Backed up existing IAM config to {}", backup.display());
prune_iam_backups(&config.iam_config_path, 5);
}
ensure_iam_bootstrap(config);
println!("Admin credentials reset.");
}
fn prune_iam_backups(iam_path: &std::path::Path, keep: usize) {
let parent = match iam_path.parent() {
Some(p) => p,
None => return,
};
let stem = match iam_path.file_stem().and_then(|s| s.to_str()) {
Some(s) => s,
None => return,
};
let prefix = format!("{}.bak-", stem);
let entries = match std::fs::read_dir(parent) {
Ok(entries) => entries,
Err(_) => return,
};
let mut backups: Vec<(i64, std::path::PathBuf)> = entries
.filter_map(|e| e.ok())
.filter_map(|e| {
let path = e.path();
let name = path.file_name()?.to_str()?;
let rest = name.strip_prefix(&prefix)?;
let ts: i64 = rest.parse().ok()?;
Some((ts, path))
})
.collect();
backups.sort_by(|a, b| b.0.cmp(&a.0));
for (_, path) in backups.into_iter().skip(keep) {
if let Err(err) = std::fs::remove_file(&path) {
eprintln!(
"Failed to remove old IAM backup {}: {}",
path.display(),
err
);
} else {
println!("Pruned old IAM backup {}", path.display());
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,87 @@
mod auth;
pub mod ratelimit;
pub mod session;
pub use auth::auth_layer;
pub use ratelimit::{rate_limit_layer, RateLimitLayerState};
pub use session::{csrf_layer, session_layer, SessionHandle, SessionLayerState};
use axum::extract::{Request, State};
use axum::middleware::Next;
use axum::response::Response;
use std::time::Instant;
use crate::state::AppState;
pub async fn server_header(req: Request, next: Next) -> Response {
let mut resp = next.run(req).await;
resp.headers_mut()
.insert("server", crate::SERVER_HEADER.parse().unwrap());
resp
}
pub async fn ui_metrics_layer(State(state): State<AppState>, req: Request, next: Next) -> Response {
let metrics = match state.metrics.clone() {
Some(m) => m,
None => return next.run(req).await,
};
let start = Instant::now();
let method = req.method().clone();
let path = req.uri().path().to_string();
let endpoint_type = classify_ui_endpoint(&path);
let bytes_in = req
.headers()
.get(axum::http::header::CONTENT_LENGTH)
.and_then(|v| v.to_str().ok())
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(0);
let response = next.run(req).await;
let latency_ms = start.elapsed().as_secs_f64() * 1000.0;
let status = response.status().as_u16();
let bytes_out = response
.headers()
.get(axum::http::header::CONTENT_LENGTH)
.and_then(|v| v.to_str().ok())
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(0);
let error_code = if status >= 400 { Some("UIError") } else { None };
metrics.record_request(
method.as_str(),
endpoint_type,
status,
latency_ms,
bytes_in,
bytes_out,
error_code,
);
response
}
fn classify_ui_endpoint(path: &str) -> &'static str {
if path.contains("/upload") {
"ui_upload"
} else if path.starts_with("/ui/buckets/") {
"ui_bucket"
} else if path.starts_with("/ui/iam") {
"ui_iam"
} else if path.starts_with("/ui/sites") {
"ui_sites"
} else if path.starts_with("/ui/connections") {
"ui_connections"
} else if path.starts_with("/ui/metrics") {
"ui_metrics"
} else if path.starts_with("/ui/system") {
"ui_system"
} else if path.starts_with("/ui/website-domains") {
"ui_website_domains"
} else if path.starts_with("/ui/replication") {
"ui_replication"
} else if path.starts_with("/login") || path.starts_with("/logout") {
"ui_auth"
} else {
"ui_other"
}
}

View File

@@ -0,0 +1,241 @@
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::{Duration, Instant};
use axum::extract::{ConnectInfo, Request, State};
use axum::http::{header, StatusCode};
use axum::middleware::Next;
use axum::response::{IntoResponse, Response};
use parking_lot::Mutex;
use crate::config::RateLimitSetting;
#[derive(Clone)]
pub struct RateLimitLayerState {
limiter: Arc<FixedWindowLimiter>,
num_trusted_proxies: usize,
}
impl RateLimitLayerState {
pub fn new(setting: RateLimitSetting, num_trusted_proxies: usize) -> Self {
Self {
limiter: Arc::new(FixedWindowLimiter::new(setting)),
num_trusted_proxies,
}
}
}
#[derive(Debug)]
struct FixedWindowLimiter {
setting: RateLimitSetting,
state: Mutex<LimiterState>,
}
#[derive(Debug)]
struct LimiterState {
entries: HashMap<String, LimitEntry>,
last_sweep: Instant,
}
#[derive(Debug, Clone, Copy)]
struct LimitEntry {
window_started: Instant,
count: u32,
}
const SWEEP_MIN_INTERVAL: Duration = Duration::from_secs(60);
const SWEEP_ENTRY_THRESHOLD: usize = 1024;
impl FixedWindowLimiter {
fn new(setting: RateLimitSetting) -> Self {
Self {
setting,
state: Mutex::new(LimiterState {
entries: HashMap::new(),
last_sweep: Instant::now(),
}),
}
}
fn check(&self, key: &str) -> Result<(), u64> {
let now = Instant::now();
let window = Duration::from_secs(self.setting.window_seconds.max(1));
let mut state = self.state.lock();
if state.entries.len() >= SWEEP_ENTRY_THRESHOLD
&& now.duration_since(state.last_sweep) >= SWEEP_MIN_INTERVAL
{
state
.entries
.retain(|_, entry| now.duration_since(entry.window_started) < window);
state.last_sweep = now;
}
let entry = state.entries.entry(key.to_string()).or_insert(LimitEntry {
window_started: now,
count: 0,
});
if now.duration_since(entry.window_started) >= window {
entry.window_started = now;
entry.count = 0;
}
if entry.count >= self.setting.max_requests {
let elapsed = now.duration_since(entry.window_started);
let retry_after = window.saturating_sub(elapsed).as_secs().max(1);
return Err(retry_after);
}
entry.count += 1;
Ok(())
}
}
pub async fn rate_limit_layer(
State(state): State<RateLimitLayerState>,
req: Request,
next: Next,
) -> Response {
let key = rate_limit_key(&req, state.num_trusted_proxies);
match state.limiter.check(&key) {
Ok(()) => next.run(req).await,
Err(retry_after) => too_many_requests(retry_after),
}
}
fn too_many_requests(retry_after: u64) -> Response {
(
StatusCode::TOO_MANY_REQUESTS,
[
(header::CONTENT_TYPE, "application/xml".to_string()),
(header::RETRY_AFTER, retry_after.to_string()),
],
myfsio_xml::response::rate_limit_exceeded_xml(),
)
.into_response()
}
fn rate_limit_key(req: &Request, num_trusted_proxies: usize) -> String {
format!("ip:{}", client_ip(req, num_trusted_proxies))
}
fn client_ip(req: &Request, num_trusted_proxies: usize) -> String {
if num_trusted_proxies > 0 {
if let Some(value) = req
.headers()
.get("x-forwarded-for")
.and_then(|v| v.to_str().ok())
{
let parts = value
.split(',')
.map(|part| part.trim())
.filter(|part| !part.is_empty())
.collect::<Vec<_>>();
if parts.len() > num_trusted_proxies {
let index = parts.len() - num_trusted_proxies - 1;
return parts[index].to_string();
}
}
if let Some(value) = req.headers().get("x-real-ip").and_then(|v| v.to_str().ok()) {
if !value.trim().is_empty() {
return value.trim().to_string();
}
}
}
req.extensions()
.get::<ConnectInfo<SocketAddr>>()
.map(|ConnectInfo(addr)| addr.ip().to_string())
.unwrap_or_else(|| "unknown".to_string())
}
#[cfg(test)]
mod tests {
use super::*;
use axum::body::Body;
#[test]
fn honors_trusted_proxy_count_for_forwarded_for() {
let req = Request::builder()
.header("x-forwarded-for", "198.51.100.1, 10.0.0.1, 10.0.0.2")
.body(Body::empty())
.unwrap();
assert_eq!(rate_limit_key(&req, 2), "ip:198.51.100.1");
assert_eq!(rate_limit_key(&req, 1), "ip:10.0.0.1");
}
#[test]
fn falls_back_to_connect_info_when_forwarded_for_has_too_few_hops() {
let mut req = Request::builder()
.header("x-forwarded-for", "198.51.100.1")
.body(Body::empty())
.unwrap();
req.extensions_mut()
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 9], 443))));
assert_eq!(rate_limit_key(&req, 2), "ip:203.0.113.9");
}
#[test]
fn ignores_forwarded_headers_when_no_proxies_are_trusted() {
let mut req = Request::builder()
.header("x-forwarded-for", "198.51.100.1")
.header("x-real-ip", "198.51.100.2")
.body(Body::empty())
.unwrap();
req.extensions_mut()
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 9], 443))));
assert_eq!(rate_limit_key(&req, 0), "ip:203.0.113.9");
}
#[test]
fn uses_connect_info_for_direct_clients() {
let mut req = Request::builder().body(Body::empty()).unwrap();
req.extensions_mut()
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 10], 443))));
assert_eq!(rate_limit_key(&req, 0), "ip:203.0.113.10");
}
#[test]
fn fixed_window_rejects_after_quota() {
let limiter = FixedWindowLimiter::new(RateLimitSetting::new(2, 60));
assert!(limiter.check("k").is_ok());
assert!(limiter.check("k").is_ok());
assert!(limiter.check("k").is_err());
}
#[test]
fn sweep_removes_expired_entries() {
let limiter = FixedWindowLimiter::new(RateLimitSetting::new(10, 1));
let far_past = Instant::now() - (SWEEP_MIN_INTERVAL + Duration::from_secs(5));
{
let mut state = limiter.state.lock();
for i in 0..(SWEEP_ENTRY_THRESHOLD + 1024) {
state.entries.insert(
format!("stale-{}", i),
LimitEntry {
window_started: far_past,
count: 5,
},
);
}
state.last_sweep = far_past;
}
let seeded = limiter.state.lock().entries.len();
assert_eq!(seeded, SWEEP_ENTRY_THRESHOLD + 1024);
assert!(limiter.check("fresh").is_ok());
let remaining = limiter.state.lock().entries.len();
assert_eq!(
remaining, 1,
"expected sweep to leave only the fresh entry, got {}",
remaining
);
}
}

View File

@@ -0,0 +1,228 @@
use std::sync::Arc;
use axum::extract::{Request, State};
use axum::http::{header, HeaderValue, StatusCode};
use axum::middleware::Next;
use axum::response::{IntoResponse, Response};
use cookie::{Cookie, SameSite};
use parking_lot::Mutex;
use crate::session::{
csrf_tokens_match, SessionData, SessionStore, CSRF_FIELD_NAME, CSRF_HEADER_NAME,
SESSION_COOKIE_NAME,
};
#[derive(Clone)]
pub struct SessionLayerState {
pub store: Arc<SessionStore>,
pub secure: bool,
}
#[derive(Clone)]
pub struct SessionHandle {
pub id: String,
inner: Arc<Mutex<SessionData>>,
dirty: Arc<Mutex<bool>>,
}
impl SessionHandle {
pub fn new(id: String, data: SessionData) -> Self {
Self {
id,
inner: Arc::new(Mutex::new(data)),
dirty: Arc::new(Mutex::new(false)),
}
}
pub fn read<R>(&self, f: impl FnOnce(&SessionData) -> R) -> R {
let guard = self.inner.lock();
f(&guard)
}
pub fn write<R>(&self, f: impl FnOnce(&mut SessionData) -> R) -> R {
let mut guard = self.inner.lock();
let out = f(&mut guard);
*self.dirty.lock() = true;
out
}
pub fn snapshot(&self) -> SessionData {
self.inner.lock().clone()
}
pub fn is_dirty(&self) -> bool {
*self.dirty.lock()
}
}
pub async fn session_layer(
State(state): State<SessionLayerState>,
mut req: Request,
next: Next,
) -> Response {
let cookie_id = extract_session_cookie(&req);
let (session_id, session_data, is_new) =
match cookie_id.and_then(|id| state.store.get(&id).map(|data| (id.clone(), data))) {
Some((id, data)) => (id, data, false),
None => {
let (id, data) = state.store.create();
(id, data, true)
}
};
let handle = SessionHandle::new(session_id.clone(), session_data);
req.extensions_mut().insert(handle.clone());
let mut resp = next.run(req).await;
if handle.is_dirty() {
state.store.save(&handle.id, handle.snapshot());
}
if is_new {
let cookie = build_session_cookie(&session_id, state.secure);
if let Ok(value) = HeaderValue::from_str(&cookie.to_string()) {
resp.headers_mut().append(header::SET_COOKIE, value);
}
}
resp
}
pub async fn csrf_layer(req: Request, next: Next) -> Response {
const CSRF_HEADER_ALIAS: &str = "x-csrftoken";
let method = req.method().clone();
let needs_check = matches!(
method,
axum::http::Method::POST
| axum::http::Method::PUT
| axum::http::Method::PATCH
| axum::http::Method::DELETE
);
if !needs_check {
return next.run(req).await;
}
let is_ui = req.uri().path().starts_with("/ui/")
|| req.uri().path() == "/ui"
|| req.uri().path() == "/login"
|| req.uri().path() == "/logout";
if !is_ui {
return next.run(req).await;
}
let handle = match req.extensions().get::<SessionHandle>() {
Some(h) => h.clone(),
None => return (StatusCode::FORBIDDEN, "Missing session").into_response(),
};
let expected = handle.read(|s| s.csrf_token.clone());
let header_token = req
.headers()
.get(CSRF_HEADER_NAME)
.or_else(|| req.headers().get(CSRF_HEADER_ALIAS))
.and_then(|v| v.to_str().ok())
.map(|s| s.to_string());
if let Some(token) = header_token.as_deref() {
if csrf_tokens_match(&expected, token) {
return next.run(req).await;
}
}
let content_type = req
.headers()
.get(header::CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.to_string();
let (parts, body) = req.into_parts();
let bytes = match axum::body::to_bytes(body, usize::MAX).await {
Ok(b) => b,
Err(_) => return (StatusCode::BAD_REQUEST, "Body read failed").into_response(),
};
let form_token = if content_type.starts_with("application/x-www-form-urlencoded") {
extract_form_token(&bytes)
} else if content_type.starts_with("multipart/form-data") {
extract_multipart_token(&content_type, &bytes)
} else {
None
};
if let Some(token) = form_token {
if csrf_tokens_match(&expected, &token) {
let req = Request::from_parts(parts, axum::body::Body::from(bytes));
return next.run(req).await;
}
}
tracing::warn!(
path = %parts.uri.path(),
content_type = %content_type,
expected_len = expected.len(),
header_present = header_token.is_some(),
"CSRF token mismatch"
);
(StatusCode::FORBIDDEN, "Invalid CSRF token").into_response()
}
fn extract_multipart_token(content_type: &str, body: &[u8]) -> Option<String> {
let boundary = multer::parse_boundary(content_type).ok()?;
let prefix = format!("--{}", boundary);
let text = std::str::from_utf8(body).ok()?;
let needle = "name=\"csrf_token\"";
let idx = text.find(needle)?;
let after = &text[idx + needle.len()..];
let body_start = after.find("\r\n\r\n")? + 4;
let tail = &after[body_start..];
let end = tail
.find(&format!("\r\n--{}", prefix.trim_start_matches("--")))
.or_else(|| tail.find("\r\n--"))
.unwrap_or(tail.len());
Some(tail[..end].trim().to_string())
}
fn extract_session_cookie(req: &Request) -> Option<String> {
let raw = req.headers().get(header::COOKIE)?.to_str().ok()?;
for pair in raw.split(';') {
if let Ok(cookie) = Cookie::parse(pair.trim().to_string()) {
if cookie.name() == SESSION_COOKIE_NAME {
return Some(cookie.value().to_string());
}
}
}
None
}
fn build_session_cookie(id: &str, secure: bool) -> Cookie<'static> {
let mut cookie = Cookie::new(SESSION_COOKIE_NAME, id.to_string());
cookie.set_http_only(true);
cookie.set_same_site(SameSite::Lax);
cookie.set_secure(secure);
cookie.set_path("/");
cookie
}
fn extract_form_token(body: &[u8]) -> Option<String> {
let text = std::str::from_utf8(body).ok()?;
let prefix = format!("{}=", CSRF_FIELD_NAME);
for pair in text.split('&') {
if let Some(rest) = pair.strip_prefix(&prefix) {
return urldecode(rest);
}
}
None
}
fn urldecode(s: &str) -> Option<String> {
percent_encoding::percent_decode_str(&s.replace('+', " "))
.decode_utf8()
.ok()
.map(|c| c.into_owned())
}

View File

@@ -0,0 +1,105 @@
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LoggingConfiguration {
pub target_bucket: String,
#[serde(default)]
pub target_prefix: String,
#[serde(default = "default_enabled")]
pub enabled: bool,
}
fn default_enabled() -> bool {
true
}
#[derive(Serialize, Deserialize)]
struct StoredLoggingFile {
#[serde(rename = "LoggingEnabled")]
logging_enabled: Option<StoredLoggingEnabled>,
}
#[derive(Serialize, Deserialize)]
struct StoredLoggingEnabled {
#[serde(rename = "TargetBucket")]
target_bucket: String,
#[serde(rename = "TargetPrefix", default)]
target_prefix: String,
}
pub struct AccessLoggingService {
storage_root: PathBuf,
cache: RwLock<HashMap<String, Option<LoggingConfiguration>>>,
}
impl AccessLoggingService {
pub fn new(storage_root: &Path) -> Self {
Self {
storage_root: storage_root.to_path_buf(),
cache: RwLock::new(HashMap::new()),
}
}
fn config_path(&self, bucket: &str) -> PathBuf {
self.storage_root
.join(".myfsio.sys")
.join("buckets")
.join(bucket)
.join("logging.json")
}
pub fn get(&self, bucket: &str) -> Option<LoggingConfiguration> {
if let Some(cached) = self.cache.read().get(bucket).cloned() {
return cached;
}
let path = self.config_path(bucket);
let config = if path.exists() {
std::fs::read_to_string(&path)
.ok()
.and_then(|s| serde_json::from_str::<StoredLoggingFile>(&s).ok())
.and_then(|f| f.logging_enabled)
.map(|e| LoggingConfiguration {
target_bucket: e.target_bucket,
target_prefix: e.target_prefix,
enabled: true,
})
} else {
None
};
self.cache
.write()
.insert(bucket.to_string(), config.clone());
config
}
pub fn set(&self, bucket: &str, config: LoggingConfiguration) -> std::io::Result<()> {
let path = self.config_path(bucket);
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)?;
}
let stored = StoredLoggingFile {
logging_enabled: Some(StoredLoggingEnabled {
target_bucket: config.target_bucket.clone(),
target_prefix: config.target_prefix.clone(),
}),
};
let json = serde_json::to_string_pretty(&stored)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
std::fs::write(&path, json)?;
self.cache.write().insert(bucket.to_string(), Some(config));
Ok(())
}
pub fn delete(&self, bucket: &str) {
let path = self.config_path(bucket);
if path.exists() {
let _ = std::fs::remove_file(&path);
}
self.cache.write().insert(bucket.to_string(), None);
}
}

View File

@@ -0,0 +1,276 @@
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::{HashMap, HashSet};
pub const ACL_METADATA_KEY: &str = "__acl__";
pub const GRANTEE_ALL_USERS: &str = "*";
pub const GRANTEE_AUTHENTICATED_USERS: &str = "authenticated";
const ACL_PERMISSION_FULL_CONTROL: &str = "FULL_CONTROL";
const ACL_PERMISSION_WRITE: &str = "WRITE";
const ACL_PERMISSION_WRITE_ACP: &str = "WRITE_ACP";
const ACL_PERMISSION_READ: &str = "READ";
const ACL_PERMISSION_READ_ACP: &str = "READ_ACP";
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct AclGrant {
pub grantee: String,
pub permission: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Acl {
pub owner: String,
#[serde(default)]
pub grants: Vec<AclGrant>,
}
impl Acl {
pub fn allowed_actions(
&self,
principal_id: Option<&str>,
is_authenticated: bool,
) -> HashSet<&'static str> {
let mut actions = HashSet::new();
if let Some(principal_id) = principal_id {
if principal_id == self.owner {
actions.extend(permission_to_actions(ACL_PERMISSION_FULL_CONTROL));
}
}
for grant in &self.grants {
if grant.grantee == GRANTEE_ALL_USERS {
actions.extend(permission_to_actions(&grant.permission));
} else if grant.grantee == GRANTEE_AUTHENTICATED_USERS && is_authenticated {
actions.extend(permission_to_actions(&grant.permission));
} else if let Some(principal_id) = principal_id {
if grant.grantee == principal_id {
actions.extend(permission_to_actions(&grant.permission));
}
}
}
actions
}
}
pub fn create_canned_acl(canned_acl: &str, owner: &str) -> Acl {
let owner_grant = AclGrant {
grantee: owner.to_string(),
permission: ACL_PERMISSION_FULL_CONTROL.to_string(),
};
match canned_acl {
"public-read" => Acl {
owner: owner.to_string(),
grants: vec![
owner_grant,
AclGrant {
grantee: GRANTEE_ALL_USERS.to_string(),
permission: ACL_PERMISSION_READ.to_string(),
},
],
},
"public-read-write" => Acl {
owner: owner.to_string(),
grants: vec![
owner_grant,
AclGrant {
grantee: GRANTEE_ALL_USERS.to_string(),
permission: ACL_PERMISSION_READ.to_string(),
},
AclGrant {
grantee: GRANTEE_ALL_USERS.to_string(),
permission: ACL_PERMISSION_WRITE.to_string(),
},
],
},
"authenticated-read" => Acl {
owner: owner.to_string(),
grants: vec![
owner_grant,
AclGrant {
grantee: GRANTEE_AUTHENTICATED_USERS.to_string(),
permission: ACL_PERMISSION_READ.to_string(),
},
],
},
"bucket-owner-read" | "bucket-owner-full-control" | "private" | _ => Acl {
owner: owner.to_string(),
grants: vec![owner_grant],
},
}
}
pub fn acl_to_xml(acl: &Acl) -> String {
let mut xml = format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Owner><ID>{}</ID><DisplayName>{}</DisplayName></Owner>\
<AccessControlList>",
xml_escape(&acl.owner),
xml_escape(&acl.owner),
);
for grant in &acl.grants {
xml.push_str("<Grant>");
match grant.grantee.as_str() {
GRANTEE_ALL_USERS => {
xml.push_str(
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\">\
<URI>http://acs.amazonaws.com/groups/global/AllUsers</URI>\
</Grantee>",
);
}
GRANTEE_AUTHENTICATED_USERS => {
xml.push_str(
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\">\
<URI>http://acs.amazonaws.com/groups/global/AuthenticatedUsers</URI>\
</Grantee>",
);
}
other => {
xml.push_str(&format!(
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
<ID>{}</ID><DisplayName>{}</DisplayName>\
</Grantee>",
xml_escape(other),
xml_escape(other),
));
}
}
xml.push_str(&format!(
"<Permission>{}</Permission></Grant>",
xml_escape(&grant.permission)
));
}
xml.push_str("</AccessControlList></AccessControlPolicy>");
xml
}
pub fn acl_from_bucket_config(value: &Value) -> Option<Acl> {
match value {
Value::String(raw) => acl_from_xml(raw).or_else(|| serde_json::from_str(raw).ok()),
Value::Object(_) => serde_json::from_value(value.clone()).ok(),
_ => None,
}
}
pub fn acl_from_object_metadata(metadata: &HashMap<String, String>) -> Option<Acl> {
metadata
.get(ACL_METADATA_KEY)
.and_then(|raw| serde_json::from_str::<Acl>(raw).ok())
}
pub fn store_object_acl(metadata: &mut HashMap<String, String>, acl: &Acl) {
if let Ok(serialized) = serde_json::to_string(acl) {
metadata.insert(ACL_METADATA_KEY.to_string(), serialized);
}
}
fn acl_from_xml(xml: &str) -> Option<Acl> {
let doc = roxmltree::Document::parse(xml).ok()?;
let owner = doc
.descendants()
.find(|node| node.is_element() && node.tag_name().name() == "Owner")
.and_then(|node| {
node.children()
.find(|child| child.is_element() && child.tag_name().name() == "ID")
.and_then(|child| child.text())
})
.unwrap_or("myfsio")
.trim()
.to_string();
let mut grants = Vec::new();
for grant in doc
.descendants()
.filter(|node| node.is_element() && node.tag_name().name() == "Grant")
{
let permission = grant
.children()
.find(|child| child.is_element() && child.tag_name().name() == "Permission")
.and_then(|child| child.text())
.unwrap_or_default()
.trim()
.to_string();
if permission.is_empty() {
continue;
}
let grantee_node = grant
.children()
.find(|child| child.is_element() && child.tag_name().name() == "Grantee");
let grantee = grantee_node
.and_then(|node| {
let uri = node
.children()
.find(|child| child.is_element() && child.tag_name().name() == "URI")
.and_then(|child| child.text())
.map(|text| text.trim().to_string());
match uri.as_deref() {
Some("http://acs.amazonaws.com/groups/global/AllUsers") => {
Some(GRANTEE_ALL_USERS.to_string())
}
Some("http://acs.amazonaws.com/groups/global/AuthenticatedUsers") => {
Some(GRANTEE_AUTHENTICATED_USERS.to_string())
}
_ => node
.children()
.find(|child| child.is_element() && child.tag_name().name() == "ID")
.and_then(|child| child.text())
.map(|text| text.trim().to_string()),
}
})
.unwrap_or_default();
if grantee.is_empty() {
continue;
}
grants.push(AclGrant {
grantee,
permission,
});
}
Some(Acl { owner, grants })
}
fn permission_to_actions(permission: &str) -> &'static [&'static str] {
match permission {
ACL_PERMISSION_FULL_CONTROL => &["read", "write", "delete", "list", "share"],
ACL_PERMISSION_WRITE => &["write", "delete"],
ACL_PERMISSION_WRITE_ACP => &["share"],
ACL_PERMISSION_READ => &["read", "list"],
ACL_PERMISSION_READ_ACP => &["share"],
_ => &[],
}
}
fn xml_escape(s: &str) -> String {
s.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
.replace('"', "&quot;")
.replace('\'', "&apos;")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn canned_acl_grants_public_read() {
let acl = create_canned_acl("public-read", "owner");
let actions = acl.allowed_actions(None, false);
assert!(actions.contains("read"));
assert!(actions.contains("list"));
assert!(!actions.contains("write"));
}
#[test]
fn xml_round_trip_preserves_grants() {
let acl = create_canned_acl("authenticated-read", "owner");
let parsed = acl_from_bucket_config(&Value::String(acl_to_xml(&acl))).unwrap();
assert_eq!(parsed.owner, "owner");
assert_eq!(parsed.grants.len(), 2);
assert!(parsed
.grants
.iter()
.any(|grant| grant.grantee == GRANTEE_AUTHENTICATED_USERS));
}
}

View File

@@ -0,0 +1,315 @@
use serde_json::{json, Value};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
pub struct GcConfig {
pub interval_hours: f64,
pub temp_file_max_age_hours: f64,
pub multipart_max_age_days: u64,
pub lock_file_max_age_hours: f64,
pub dry_run: bool,
}
impl Default for GcConfig {
fn default() -> Self {
Self {
interval_hours: 6.0,
temp_file_max_age_hours: 24.0,
multipart_max_age_days: 7,
lock_file_max_age_hours: 1.0,
dry_run: false,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn dry_run_reports_but_does_not_delete_temp_files() {
let tmp = tempfile::tempdir().unwrap();
let tmp_dir = tmp.path().join(".myfsio.sys").join("tmp");
std::fs::create_dir_all(&tmp_dir).unwrap();
let file_path = tmp_dir.join("stale.tmp");
std::fs::write(&file_path, b"temporary").unwrap();
tokio::time::sleep(std::time::Duration::from_millis(5)).await;
let service = GcService::new(
tmp.path().to_path_buf(),
GcConfig {
temp_file_max_age_hours: 0.0,
dry_run: true,
..GcConfig::default()
},
);
let result = service.run_now(false).await.unwrap();
assert_eq!(result["temp_files_deleted"], 1);
assert!(file_path.exists());
}
}
pub struct GcService {
storage_root: PathBuf,
config: GcConfig,
running: Arc<RwLock<bool>>,
started_at: Arc<RwLock<Option<Instant>>>,
history: Arc<RwLock<Vec<Value>>>,
history_path: PathBuf,
}
impl GcService {
pub fn new(storage_root: PathBuf, config: GcConfig) -> Self {
let history_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("gc_history.json");
let history = if history_path.exists() {
std::fs::read_to_string(&history_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
.unwrap_or_default()
} else {
Vec::new()
};
Self {
storage_root,
config,
running: Arc::new(RwLock::new(false)),
started_at: Arc::new(RwLock::new(None)),
history: Arc::new(RwLock::new(history)),
history_path,
}
}
pub async fn status(&self) -> Value {
let running = *self.running.read().await;
let scan_elapsed_seconds = self
.started_at
.read()
.await
.as_ref()
.map(|started| started.elapsed().as_secs_f64());
json!({
"enabled": true,
"running": running,
"scanning": running,
"scan_elapsed_seconds": scan_elapsed_seconds,
"interval_hours": self.config.interval_hours,
"temp_file_max_age_hours": self.config.temp_file_max_age_hours,
"multipart_max_age_days": self.config.multipart_max_age_days,
"lock_file_max_age_hours": self.config.lock_file_max_age_hours,
"dry_run": self.config.dry_run,
})
}
pub async fn history(&self) -> Value {
let history = self.history.read().await;
let mut executions: Vec<Value> = history.iter().cloned().collect();
executions.reverse();
json!({ "executions": executions })
}
pub async fn run_now(&self, dry_run: bool) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("GC already running".to_string());
}
*running = true;
}
*self.started_at.write().await = Some(Instant::now());
let start = Instant::now();
let result = self.execute_gc(dry_run || self.config.dry_run).await;
let elapsed = start.elapsed().as_secs_f64();
*self.running.write().await = false;
*self.started_at.write().await = None;
let mut result_json = result.clone();
if let Some(obj) = result_json.as_object_mut() {
obj.insert("execution_time_seconds".to_string(), json!(elapsed));
}
let record = json!({
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
"dry_run": dry_run || self.config.dry_run,
"result": result_json,
});
{
let mut history = self.history.write().await;
history.push(record);
if history.len() > 50 {
let excess = history.len() - 50;
history.drain(..excess);
}
}
self.save_history().await;
Ok(result)
}
async fn execute_gc(&self, dry_run: bool) -> Value {
let mut temp_files_deleted = 0u64;
let mut temp_bytes_freed = 0u64;
let mut multipart_uploads_deleted = 0u64;
let mut lock_files_deleted = 0u64;
let mut empty_dirs_removed = 0u64;
let mut errors: Vec<String> = Vec::new();
let now = std::time::SystemTime::now();
let temp_max_age =
std::time::Duration::from_secs_f64(self.config.temp_file_max_age_hours * 3600.0);
let multipart_max_age =
std::time::Duration::from_secs(self.config.multipart_max_age_days * 86400);
let lock_max_age =
std::time::Duration::from_secs_f64(self.config.lock_file_max_age_hours * 3600.0);
let tmp_dir = self.storage_root.join(".myfsio.sys").join("tmp");
if tmp_dir.exists() {
match std::fs::read_dir(&tmp_dir) {
Ok(entries) => {
for entry in entries.flatten() {
if let Ok(metadata) = entry.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > temp_max_age {
let size = metadata.len();
if !dry_run {
if let Err(e) = std::fs::remove_file(entry.path()) {
errors.push(format!(
"Failed to remove temp file: {}",
e
));
continue;
}
}
temp_files_deleted += 1;
temp_bytes_freed += size;
}
}
}
}
}
}
Err(e) => errors.push(format!("Failed to read tmp dir: {}", e)),
}
}
let multipart_dir = self.storage_root.join(".myfsio.sys").join("multipart");
if multipart_dir.exists() {
if let Ok(bucket_dirs) = std::fs::read_dir(&multipart_dir) {
for bucket_entry in bucket_dirs.flatten() {
if let Ok(uploads) = std::fs::read_dir(bucket_entry.path()) {
for upload in uploads.flatten() {
if let Ok(metadata) = upload.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > multipart_max_age {
if !dry_run {
let _ = std::fs::remove_dir_all(upload.path());
}
multipart_uploads_deleted += 1;
}
}
}
}
}
}
}
}
}
let buckets_dir = self.storage_root.join(".myfsio.sys").join("buckets");
if buckets_dir.exists() {
if let Ok(bucket_dirs) = std::fs::read_dir(&buckets_dir) {
for bucket_entry in bucket_dirs.flatten() {
let locks_dir = bucket_entry.path().join("locks");
if locks_dir.exists() {
if let Ok(locks) = std::fs::read_dir(&locks_dir) {
for lock in locks.flatten() {
if let Ok(metadata) = lock.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > lock_max_age {
if !dry_run {
let _ = std::fs::remove_file(lock.path());
}
lock_files_deleted += 1;
}
}
}
}
}
}
}
}
}
}
if !dry_run {
for dir in [&tmp_dir, &multipart_dir] {
if dir.exists() {
if let Ok(entries) = std::fs::read_dir(dir) {
for entry in entries.flatten() {
if entry.path().is_dir() {
if let Ok(mut contents) = std::fs::read_dir(entry.path()) {
if contents.next().is_none() {
let _ = std::fs::remove_dir(entry.path());
empty_dirs_removed += 1;
}
}
}
}
}
}
}
}
json!({
"temp_files_deleted": temp_files_deleted,
"temp_bytes_freed": temp_bytes_freed,
"multipart_uploads_deleted": multipart_uploads_deleted,
"lock_files_deleted": lock_files_deleted,
"empty_dirs_removed": empty_dirs_removed,
"errors": errors,
})
}
async fn save_history(&self) {
let history = self.history.read().await;
let data = json!({ "executions": *history });
if let Some(parent) = self.history_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&self.history_path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("GC cycle starting");
match self.run_now(false).await {
Ok(result) => tracing::info!("GC cycle complete: {:?}", result),
Err(e) => tracing::warn!("GC cycle failed: {}", e),
}
}
})
}
}

View File

@@ -0,0 +1,732 @@
use myfsio_common::constants::{
BUCKET_META_DIR, BUCKET_VERSIONS_DIR, INDEX_FILE, SYSTEM_BUCKETS_DIR, SYSTEM_ROOT,
};
use myfsio_storage::fs_backend::FsStorageBackend;
use serde_json::{json, Map, Value};
use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
const MAX_ISSUES: usize = 500;
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
pub struct IntegrityConfig {
pub interval_hours: f64,
pub batch_size: usize,
pub auto_heal: bool,
pub dry_run: bool,
}
impl Default for IntegrityConfig {
fn default() -> Self {
Self {
interval_hours: 24.0,
batch_size: 10_000,
auto_heal: false,
dry_run: false,
}
}
}
pub struct IntegrityService {
#[allow(dead_code)]
storage: Arc<FsStorageBackend>,
storage_root: PathBuf,
config: IntegrityConfig,
running: Arc<RwLock<bool>>,
started_at: Arc<RwLock<Option<Instant>>>,
history: Arc<RwLock<Vec<Value>>>,
history_path: PathBuf,
}
#[derive(Default)]
struct ScanState {
objects_scanned: u64,
buckets_scanned: u64,
corrupted_objects: u64,
orphaned_objects: u64,
phantom_metadata: u64,
stale_versions: u64,
etag_cache_inconsistencies: u64,
issues: Vec<Value>,
errors: Vec<String>,
}
impl ScanState {
fn batch_exhausted(&self, batch_size: usize) -> bool {
self.objects_scanned >= batch_size as u64
}
fn push_issue(&mut self, issue_type: &str, bucket: &str, key: &str, detail: String) {
if self.issues.len() < MAX_ISSUES {
self.issues.push(json!({
"issue_type": issue_type,
"bucket": bucket,
"key": key,
"detail": detail,
}));
}
}
fn into_json(self, elapsed: f64) -> Value {
json!({
"objects_scanned": self.objects_scanned,
"buckets_scanned": self.buckets_scanned,
"corrupted_objects": self.corrupted_objects,
"orphaned_objects": self.orphaned_objects,
"phantom_metadata": self.phantom_metadata,
"stale_versions": self.stale_versions,
"etag_cache_inconsistencies": self.etag_cache_inconsistencies,
"issues_healed": 0,
"issues": self.issues,
"errors": self.errors,
"execution_time_seconds": elapsed,
})
}
}
impl IntegrityService {
pub fn new(
storage: Arc<FsStorageBackend>,
storage_root: &Path,
config: IntegrityConfig,
) -> Self {
let history_path = storage_root
.join(SYSTEM_ROOT)
.join("config")
.join("integrity_history.json");
let history = if history_path.exists() {
std::fs::read_to_string(&history_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
.unwrap_or_default()
} else {
Vec::new()
};
Self {
storage,
storage_root: storage_root.to_path_buf(),
config,
running: Arc::new(RwLock::new(false)),
started_at: Arc::new(RwLock::new(None)),
history: Arc::new(RwLock::new(history)),
history_path,
}
}
pub async fn status(&self) -> Value {
let running = *self.running.read().await;
let scan_elapsed_seconds = self
.started_at
.read()
.await
.as_ref()
.map(|started| started.elapsed().as_secs_f64());
json!({
"enabled": true,
"running": running,
"scanning": running,
"scan_elapsed_seconds": scan_elapsed_seconds,
"interval_hours": self.config.interval_hours,
"batch_size": self.config.batch_size,
"auto_heal": self.config.auto_heal,
"dry_run": self.config.dry_run,
})
}
pub async fn history(&self) -> Value {
let history = self.history.read().await;
let mut executions: Vec<Value> = history.iter().cloned().collect();
executions.reverse();
json!({ "executions": executions })
}
pub async fn run_now(&self, dry_run: bool, auto_heal: bool) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("Integrity check already running".to_string());
}
*running = true;
}
*self.started_at.write().await = Some(Instant::now());
let start = Instant::now();
let storage_root = self.storage_root.clone();
let batch_size = self.config.batch_size;
let result =
tokio::task::spawn_blocking(move || scan_all_buckets(&storage_root, batch_size))
.await
.unwrap_or_else(|e| {
let mut st = ScanState::default();
st.errors.push(format!("scan task failed: {}", e));
st
});
let elapsed = start.elapsed().as_secs_f64();
*self.running.write().await = false;
*self.started_at.write().await = None;
let result_json = result.into_json(elapsed);
let record = json!({
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
"dry_run": dry_run,
"auto_heal": auto_heal,
"result": result_json.clone(),
});
{
let mut history = self.history.write().await;
history.push(record);
if history.len() > 50 {
let excess = history.len() - 50;
history.drain(..excess);
}
}
self.save_history().await;
Ok(result_json)
}
async fn save_history(&self) {
let history = self.history.read().await;
let data = json!({ "executions": *history });
if let Some(parent) = self.history_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&self.history_path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("Integrity check starting");
match self.run_now(false, false).await {
Ok(result) => tracing::info!("Integrity check complete: {:?}", result),
Err(e) => tracing::warn!("Integrity check failed: {}", e),
}
}
})
}
}
fn scan_all_buckets(storage_root: &Path, batch_size: usize) -> ScanState {
let mut state = ScanState::default();
let buckets = match list_bucket_names(storage_root) {
Ok(b) => b,
Err(e) => {
state.errors.push(format!("list buckets: {}", e));
return state;
}
};
for bucket in &buckets {
if state.batch_exhausted(batch_size) {
break;
}
state.buckets_scanned += 1;
let bucket_path = storage_root.join(bucket);
let meta_root = storage_root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join(BUCKET_META_DIR);
let index_entries = collect_index_entries(&meta_root);
check_corrupted(&mut state, bucket, &bucket_path, &index_entries, batch_size);
check_phantom(&mut state, bucket, &bucket_path, &index_entries, batch_size);
check_orphaned(&mut state, bucket, &bucket_path, &index_entries, batch_size);
check_stale_versions(&mut state, storage_root, bucket, batch_size);
check_etag_cache(&mut state, storage_root, bucket, &index_entries, batch_size);
}
state
}
fn list_bucket_names(storage_root: &Path) -> std::io::Result<Vec<String>> {
let mut names = Vec::new();
if !storage_root.exists() {
return Ok(names);
}
for entry in std::fs::read_dir(storage_root)? {
let entry = entry?;
let name = entry.file_name().to_string_lossy().to_string();
if name == SYSTEM_ROOT {
continue;
}
if entry.file_type().map(|t| t.is_dir()).unwrap_or(false) {
names.push(name);
}
}
Ok(names)
}
#[allow(dead_code)]
struct IndexEntryInfo {
entry: Value,
index_file: PathBuf,
key_name: String,
}
fn collect_index_entries(meta_root: &Path) -> HashMap<String, IndexEntryInfo> {
let mut out: HashMap<String, IndexEntryInfo> = HashMap::new();
if !meta_root.exists() {
return out;
}
let mut stack: Vec<PathBuf> = vec![meta_root.to_path_buf()];
while let Some(dir) = stack.pop() {
let rd = match std::fs::read_dir(&dir) {
Ok(r) => r,
Err(_) => continue,
};
for entry in rd.flatten() {
let path = entry.path();
let ft = match entry.file_type() {
Ok(t) => t,
Err(_) => continue,
};
if ft.is_dir() {
stack.push(path);
continue;
}
if entry.file_name().to_string_lossy() != INDEX_FILE {
continue;
}
let rel_dir = match path.parent().and_then(|p| p.strip_prefix(meta_root).ok()) {
Some(p) => p.to_path_buf(),
None => continue,
};
let dir_prefix = if rel_dir.as_os_str().is_empty() {
String::new()
} else {
rel_dir
.components()
.map(|c| c.as_os_str().to_string_lossy().to_string())
.collect::<Vec<_>>()
.join("/")
};
let content = match std::fs::read_to_string(&path) {
Ok(c) => c,
Err(_) => continue,
};
let index_data: Map<String, Value> = match serde_json::from_str(&content) {
Ok(Value::Object(m)) => m,
_ => continue,
};
for (key_name, entry_val) in index_data {
let full_key = if dir_prefix.is_empty() {
key_name.clone()
} else {
format!("{}/{}", dir_prefix, key_name)
};
out.insert(
full_key,
IndexEntryInfo {
entry: entry_val,
index_file: path.clone(),
key_name,
},
);
}
}
}
out
}
fn stored_etag(entry: &Value) -> Option<String> {
entry
.get("metadata")
.and_then(|m| m.get("__etag__"))
.and_then(|v| v.as_str())
.map(|s| s.to_string())
}
fn check_corrupted(
state: &mut ScanState,
bucket: &str,
bucket_path: &Path,
entries: &HashMap<String, IndexEntryInfo>,
batch_size: usize,
) {
let mut keys: Vec<&String> = entries.keys().collect();
keys.sort();
for full_key in keys {
if state.batch_exhausted(batch_size) {
return;
}
let info = &entries[full_key];
let object_path = bucket_path.join(full_key);
if !object_path.exists() {
continue;
}
state.objects_scanned += 1;
let Some(stored) = stored_etag(&info.entry) else {
continue;
};
match myfsio_crypto::hashing::md5_file(&object_path) {
Ok(actual) => {
if actual != stored {
state.corrupted_objects += 1;
state.push_issue(
"corrupted_object",
bucket,
full_key,
format!("stored_etag={} actual_etag={}", stored, actual),
);
}
}
Err(e) => state
.errors
.push(format!("hash {}/{}: {}", bucket, full_key, e)),
}
}
}
fn check_phantom(
state: &mut ScanState,
bucket: &str,
bucket_path: &Path,
entries: &HashMap<String, IndexEntryInfo>,
batch_size: usize,
) {
let mut keys: Vec<&String> = entries.keys().collect();
keys.sort();
for full_key in keys {
if state.batch_exhausted(batch_size) {
return;
}
state.objects_scanned += 1;
let object_path = bucket_path.join(full_key);
if !object_path.exists() {
state.phantom_metadata += 1;
state.push_issue(
"phantom_metadata",
bucket,
full_key,
"metadata entry without file on disk".to_string(),
);
}
}
}
fn check_orphaned(
state: &mut ScanState,
bucket: &str,
bucket_path: &Path,
entries: &HashMap<String, IndexEntryInfo>,
batch_size: usize,
) {
let indexed: HashSet<&String> = entries.keys().collect();
let mut stack: Vec<(PathBuf, String)> = vec![(bucket_path.to_path_buf(), String::new())];
while let Some((dir, prefix)) = stack.pop() {
if state.batch_exhausted(batch_size) {
return;
}
let rd = match std::fs::read_dir(&dir) {
Ok(r) => r,
Err(_) => continue,
};
for entry in rd.flatten() {
if state.batch_exhausted(batch_size) {
return;
}
let name = entry.file_name().to_string_lossy().to_string();
let ft = match entry.file_type() {
Ok(t) => t,
Err(_) => continue,
};
if ft.is_dir() {
if prefix.is_empty() && INTERNAL_FOLDERS.contains(&name.as_str()) {
continue;
}
let new_prefix = if prefix.is_empty() {
name
} else {
format!("{}/{}", prefix, name)
};
stack.push((entry.path(), new_prefix));
} else if ft.is_file() {
let full_key = if prefix.is_empty() {
name
} else {
format!("{}/{}", prefix, name)
};
state.objects_scanned += 1;
if !indexed.contains(&full_key) {
state.orphaned_objects += 1;
state.push_issue(
"orphaned_object",
bucket,
&full_key,
"file exists without metadata entry".to_string(),
);
}
}
}
}
}
fn check_stale_versions(
state: &mut ScanState,
storage_root: &Path,
bucket: &str,
batch_size: usize,
) {
let versions_root = storage_root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join(BUCKET_VERSIONS_DIR);
if !versions_root.exists() {
return;
}
let mut stack: Vec<PathBuf> = vec![versions_root.clone()];
while let Some(dir) = stack.pop() {
if state.batch_exhausted(batch_size) {
return;
}
let rd = match std::fs::read_dir(&dir) {
Ok(r) => r,
Err(_) => continue,
};
let mut bin_stems: HashMap<String, PathBuf> = HashMap::new();
let mut json_stems: HashMap<String, PathBuf> = HashMap::new();
let mut subdirs: Vec<PathBuf> = Vec::new();
for entry in rd.flatten() {
let ft = match entry.file_type() {
Ok(t) => t,
Err(_) => continue,
};
let path = entry.path();
if ft.is_dir() {
subdirs.push(path);
continue;
}
let name = entry.file_name().to_string_lossy().to_string();
if let Some(stem) = name.strip_suffix(".bin") {
bin_stems.insert(stem.to_string(), path);
} else if let Some(stem) = name.strip_suffix(".json") {
json_stems.insert(stem.to_string(), path);
}
}
for (stem, path) in &bin_stems {
if state.batch_exhausted(batch_size) {
return;
}
state.objects_scanned += 1;
if !json_stems.contains_key(stem) {
state.stale_versions += 1;
let key = path
.strip_prefix(&versions_root)
.map(|p| p.to_string_lossy().replace('\\', "/"))
.unwrap_or_else(|_| path.display().to_string());
state.push_issue(
"stale_version",
bucket,
&key,
"version data without manifest".to_string(),
);
}
}
for (stem, path) in &json_stems {
if state.batch_exhausted(batch_size) {
return;
}
state.objects_scanned += 1;
if !bin_stems.contains_key(stem) {
state.stale_versions += 1;
let key = path
.strip_prefix(&versions_root)
.map(|p| p.to_string_lossy().replace('\\', "/"))
.unwrap_or_else(|_| path.display().to_string());
state.push_issue(
"stale_version",
bucket,
&key,
"version manifest without data".to_string(),
);
}
}
stack.extend(subdirs);
}
}
fn check_etag_cache(
state: &mut ScanState,
storage_root: &Path,
bucket: &str,
entries: &HashMap<String, IndexEntryInfo>,
batch_size: usize,
) {
let etag_index_path = storage_root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join("etag_index.json");
if !etag_index_path.exists() {
return;
}
let cache: HashMap<String, Value> = match std::fs::read_to_string(&etag_index_path)
.ok()
.and_then(|s| serde_json::from_str(&s).ok())
{
Some(Value::Object(m)) => m.into_iter().collect(),
_ => return,
};
for (full_key, cached_val) in cache {
if state.batch_exhausted(batch_size) {
return;
}
state.objects_scanned += 1;
let Some(cached_etag) = cached_val.as_str() else {
continue;
};
let Some(info) = entries.get(&full_key) else {
continue;
};
let Some(stored) = stored_etag(&info.entry) else {
continue;
};
if cached_etag != stored {
state.etag_cache_inconsistencies += 1;
state.push_issue(
"etag_cache_inconsistency",
bucket,
&full_key,
format!("cached_etag={} index_etag={}", cached_etag, stored),
);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
fn md5_hex(bytes: &[u8]) -> String {
myfsio_crypto::hashing::md5_bytes(bytes)
}
fn write_index(meta_dir: &Path, entries: &[(&str, &str)]) {
fs::create_dir_all(meta_dir).unwrap();
let mut map = Map::new();
for (name, etag) in entries {
map.insert(
name.to_string(),
json!({ "metadata": { "__etag__": etag } }),
);
}
fs::write(
meta_dir.join(INDEX_FILE),
serde_json::to_string(&Value::Object(map)).unwrap(),
)
.unwrap();
}
#[test]
fn scan_detects_each_issue_type() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let bucket = "testbucket";
let bucket_path = root.join(bucket);
let meta_root = root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join(BUCKET_META_DIR);
fs::create_dir_all(&bucket_path).unwrap();
let clean_bytes = b"clean file contents";
let clean_etag = md5_hex(clean_bytes);
fs::write(bucket_path.join("clean.txt"), clean_bytes).unwrap();
let corrupted_bytes = b"actual content";
fs::write(bucket_path.join("corrupted.txt"), corrupted_bytes).unwrap();
fs::write(bucket_path.join("orphan.txt"), b"no metadata").unwrap();
write_index(
&meta_root,
&[
("clean.txt", &clean_etag),
("corrupted.txt", "00000000000000000000000000000000"),
("phantom.txt", "deadbeefdeadbeefdeadbeefdeadbeef"),
],
);
let versions_root = root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join(BUCKET_VERSIONS_DIR)
.join("someobject");
fs::create_dir_all(&versions_root).unwrap();
fs::write(versions_root.join("v1.bin"), b"orphan bin").unwrap();
fs::write(versions_root.join("v2.json"), b"{}").unwrap();
let etag_index = root
.join(SYSTEM_ROOT)
.join(SYSTEM_BUCKETS_DIR)
.join(bucket)
.join("etag_index.json");
fs::write(
&etag_index,
serde_json::to_string(&json!({ "clean.txt": "stale-cached-etag" })).unwrap(),
)
.unwrap();
let state = scan_all_buckets(root, 10_000);
assert_eq!(state.corrupted_objects, 1, "corrupted");
assert_eq!(state.phantom_metadata, 1, "phantom");
assert_eq!(state.orphaned_objects, 1, "orphaned");
assert_eq!(state.stale_versions, 2, "stale versions");
assert_eq!(state.etag_cache_inconsistencies, 1, "etag cache");
assert_eq!(state.buckets_scanned, 1);
assert!(
state.errors.is_empty(),
"unexpected errors: {:?}",
state.errors
);
}
#[test]
fn skips_system_root_as_bucket() {
let tmp = tempfile::tempdir().unwrap();
fs::create_dir_all(tmp.path().join(SYSTEM_ROOT).join("config")).unwrap();
let state = scan_all_buckets(tmp.path(), 100);
assert_eq!(state.buckets_scanned, 0);
}
}

View File

@@ -0,0 +1,637 @@
use chrono::{DateTime, Duration, Utc};
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::traits::StorageEngine;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::VecDeque;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tokio::sync::RwLock;
pub struct LifecycleConfig {
pub interval_seconds: u64,
pub max_history_per_bucket: usize,
}
impl Default for LifecycleConfig {
fn default() -> Self {
Self {
interval_seconds: 3600,
max_history_per_bucket: 50,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LifecycleExecutionRecord {
pub timestamp: f64,
pub bucket_name: String,
pub objects_deleted: u64,
pub versions_deleted: u64,
pub uploads_aborted: u64,
#[serde(default)]
pub errors: Vec<String>,
pub execution_time_seconds: f64,
}
#[derive(Debug, Clone, Default)]
struct BucketLifecycleResult {
bucket_name: String,
objects_deleted: u64,
versions_deleted: u64,
uploads_aborted: u64,
errors: Vec<String>,
execution_time_seconds: f64,
}
#[derive(Debug, Clone, Default)]
struct ParsedLifecycleRule {
status: String,
prefix: String,
expiration_days: Option<u64>,
expiration_date: Option<DateTime<Utc>>,
noncurrent_days: Option<u64>,
abort_incomplete_multipart_days: Option<u64>,
}
pub struct LifecycleService {
storage: Arc<FsStorageBackend>,
storage_root: PathBuf,
config: LifecycleConfig,
running: Arc<RwLock<bool>>,
}
impl LifecycleService {
pub fn new(
storage: Arc<FsStorageBackend>,
storage_root: impl Into<PathBuf>,
config: LifecycleConfig,
) -> Self {
Self {
storage,
storage_root: storage_root.into(),
config,
running: Arc::new(RwLock::new(false)),
}
}
pub async fn run_cycle(&self) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("Lifecycle already running".to_string());
}
*running = true;
}
let result = self.evaluate_rules().await;
*self.running.write().await = false;
Ok(result)
}
async fn evaluate_rules(&self) -> Value {
let buckets = match self.storage.list_buckets().await {
Ok(buckets) => buckets,
Err(err) => return json!({ "error": err.to_string() }),
};
let mut bucket_results = Vec::new();
let mut total_objects_deleted = 0u64;
let mut total_versions_deleted = 0u64;
let mut total_uploads_aborted = 0u64;
let mut errors = Vec::new();
for bucket in &buckets {
let started_at = std::time::Instant::now();
let mut result = BucketLifecycleResult {
bucket_name: bucket.name.clone(),
..Default::default()
};
let config = match self.storage.get_bucket_config(&bucket.name).await {
Ok(config) => config,
Err(err) => {
result.errors.push(err.to_string());
result.execution_time_seconds = started_at.elapsed().as_secs_f64();
self.append_history(&result);
errors.extend(result.errors.clone());
bucket_results.push(result);
continue;
}
};
let Some(lifecycle) = config.lifecycle.as_ref() else {
continue;
};
let rules = parse_lifecycle_rules(lifecycle);
if rules.is_empty() {
continue;
}
for rule in &rules {
if rule.status != "Enabled" {
continue;
}
if let Some(err) = self
.apply_expiration_rule(&bucket.name, rule, &mut result)
.await
{
result.errors.push(err);
}
if let Some(err) = self
.apply_noncurrent_expiration_rule(&bucket.name, rule, &mut result)
.await
{
result.errors.push(err);
}
if let Some(err) = self
.apply_abort_incomplete_multipart_rule(&bucket.name, rule, &mut result)
.await
{
result.errors.push(err);
}
}
result.execution_time_seconds = started_at.elapsed().as_secs_f64();
if result.objects_deleted > 0
|| result.versions_deleted > 0
|| result.uploads_aborted > 0
|| !result.errors.is_empty()
{
total_objects_deleted += result.objects_deleted;
total_versions_deleted += result.versions_deleted;
total_uploads_aborted += result.uploads_aborted;
errors.extend(result.errors.clone());
self.append_history(&result);
bucket_results.push(result);
}
}
json!({
"objects_deleted": total_objects_deleted,
"versions_deleted": total_versions_deleted,
"multipart_aborted": total_uploads_aborted,
"buckets_evaluated": buckets.len(),
"results": bucket_results.iter().map(result_to_json).collect::<Vec<_>>(),
"errors": errors,
})
}
async fn apply_expiration_rule(
&self,
bucket: &str,
rule: &ParsedLifecycleRule,
result: &mut BucketLifecycleResult,
) -> Option<String> {
let cutoff = if let Some(days) = rule.expiration_days {
Some(Utc::now() - Duration::days(days as i64))
} else {
rule.expiration_date
};
let Some(cutoff) = cutoff else {
return None;
};
let params = myfsio_common::types::ListParams {
max_keys: 10_000,
prefix: if rule.prefix.is_empty() {
None
} else {
Some(rule.prefix.clone())
},
..Default::default()
};
match self.storage.list_objects(bucket, &params).await {
Ok(objects) => {
for object in &objects.objects {
if object.last_modified < cutoff {
if let Err(err) = self.storage.delete_object(bucket, &object.key).await {
result
.errors
.push(format!("{}:{}: {}", bucket, object.key, err));
} else {
result.objects_deleted += 1;
}
}
}
None
}
Err(err) => Some(format!("Failed to list objects for {}: {}", bucket, err)),
}
}
async fn apply_noncurrent_expiration_rule(
&self,
bucket: &str,
rule: &ParsedLifecycleRule,
result: &mut BucketLifecycleResult,
) -> Option<String> {
let Some(days) = rule.noncurrent_days else {
return None;
};
let cutoff = Utc::now() - Duration::days(days as i64);
let versions_root = version_root_for_bucket(&self.storage_root, bucket);
if !versions_root.exists() {
return None;
}
let mut stack = VecDeque::from([versions_root]);
while let Some(current) = stack.pop_front() {
let entries = match std::fs::read_dir(&current) {
Ok(entries) => entries,
Err(err) => return Some(err.to_string()),
};
for entry in entries.flatten() {
let file_type = match entry.file_type() {
Ok(file_type) => file_type,
Err(_) => continue,
};
if file_type.is_dir() {
stack.push_back(entry.path());
continue;
}
if entry.path().extension().and_then(|ext| ext.to_str()) != Some("json") {
continue;
}
let contents = match std::fs::read_to_string(entry.path()) {
Ok(contents) => contents,
Err(_) => continue,
};
let Ok(manifest) = serde_json::from_str::<Value>(&contents) else {
continue;
};
let key = manifest
.get("key")
.and_then(|value| value.as_str())
.unwrap_or_default()
.to_string();
if !rule.prefix.is_empty() && !key.starts_with(&rule.prefix) {
continue;
}
let archived_at = manifest
.get("archived_at")
.and_then(|value| value.as_str())
.and_then(|value| DateTime::parse_from_rfc3339(value).ok())
.map(|value| value.with_timezone(&Utc));
if archived_at.is_none() || archived_at.unwrap() >= cutoff {
continue;
}
let version_id = manifest
.get("version_id")
.and_then(|value| value.as_str())
.unwrap_or_default();
let data_path = entry.path().with_file_name(format!("{}.bin", version_id));
let _ = std::fs::remove_file(&data_path);
let _ = std::fs::remove_file(entry.path());
result.versions_deleted += 1;
}
}
None
}
async fn apply_abort_incomplete_multipart_rule(
&self,
bucket: &str,
rule: &ParsedLifecycleRule,
result: &mut BucketLifecycleResult,
) -> Option<String> {
let Some(days) = rule.abort_incomplete_multipart_days else {
return None;
};
let cutoff = Utc::now() - Duration::days(days as i64);
match self.storage.list_multipart_uploads(bucket).await {
Ok(uploads) => {
for upload in &uploads {
if upload.initiated < cutoff {
if let Err(err) = self
.storage
.abort_multipart(bucket, &upload.upload_id)
.await
{
result
.errors
.push(format!("abort {}: {}", upload.upload_id, err));
} else {
result.uploads_aborted += 1;
}
}
}
None
}
Err(err) => Some(format!(
"Failed to list multipart uploads for {}: {}",
bucket, err
)),
}
}
fn append_history(&self, result: &BucketLifecycleResult) {
let path = lifecycle_history_path(&self.storage_root, &result.bucket_name);
let mut history = load_history(&path);
history.insert(
0,
LifecycleExecutionRecord {
timestamp: Utc::now().timestamp_millis() as f64 / 1000.0,
bucket_name: result.bucket_name.clone(),
objects_deleted: result.objects_deleted,
versions_deleted: result.versions_deleted,
uploads_aborted: result.uploads_aborted,
errors: result.errors.clone(),
execution_time_seconds: result.execution_time_seconds,
},
);
history.truncate(self.config.max_history_per_bucket);
let payload = json!({
"executions": history,
});
if let Some(parent) = path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&path,
serde_json::to_string_pretty(&payload).unwrap_or_else(|_| "{}".to_string()),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs(self.config.interval_seconds);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("Lifecycle evaluation starting");
match self.run_cycle().await {
Ok(result) => tracing::info!("Lifecycle cycle complete: {:?}", result),
Err(err) => tracing::warn!("Lifecycle cycle failed: {}", err),
}
}
})
}
}
pub fn read_history(storage_root: &Path, bucket_name: &str, limit: usize, offset: usize) -> Value {
let path = lifecycle_history_path(storage_root, bucket_name);
let mut history = load_history(&path);
let total = history.len();
let executions = history
.drain(offset.min(total)..)
.take(limit)
.collect::<Vec<_>>();
json!({
"executions": executions,
"total": total,
"limit": limit,
"offset": offset,
"enabled": true,
})
}
fn load_history(path: &Path) -> Vec<LifecycleExecutionRecord> {
if !path.exists() {
return Vec::new();
}
std::fs::read_to_string(path)
.ok()
.and_then(|contents| serde_json::from_str::<Value>(&contents).ok())
.and_then(|value| value.get("executions").cloned())
.and_then(|value| serde_json::from_value::<Vec<LifecycleExecutionRecord>>(value).ok())
.unwrap_or_default()
}
fn lifecycle_history_path(storage_root: &Path, bucket_name: &str) -> PathBuf {
storage_root
.join(".myfsio.sys")
.join("buckets")
.join(bucket_name)
.join("lifecycle_history.json")
}
fn version_root_for_bucket(storage_root: &Path, bucket_name: &str) -> PathBuf {
storage_root
.join(".myfsio.sys")
.join("buckets")
.join(bucket_name)
.join("versions")
}
fn parse_lifecycle_rules(value: &Value) -> Vec<ParsedLifecycleRule> {
match value {
Value::String(raw) => parse_lifecycle_rules_from_string(raw),
Value::Array(items) => items.iter().filter_map(parse_lifecycle_rule).collect(),
Value::Object(map) => map
.get("Rules")
.and_then(|rules| rules.as_array())
.map(|rules| rules.iter().filter_map(parse_lifecycle_rule).collect())
.unwrap_or_default(),
_ => Vec::new(),
}
}
fn parse_lifecycle_rules_from_string(raw: &str) -> Vec<ParsedLifecycleRule> {
if let Ok(json) = serde_json::from_str::<Value>(raw) {
return parse_lifecycle_rules(&json);
}
let Ok(doc) = roxmltree::Document::parse(raw) else {
return Vec::new();
};
doc.descendants()
.filter(|node| node.is_element() && node.tag_name().name() == "Rule")
.map(|rule| ParsedLifecycleRule {
status: child_text(&rule, "Status").unwrap_or_else(|| "Enabled".to_string()),
prefix: child_text(&rule, "Prefix")
.or_else(|| {
rule.descendants()
.find(|node| {
node.is_element()
&& node.tag_name().name() == "Filter"
&& node.children().any(|child| {
child.is_element() && child.tag_name().name() == "Prefix"
})
})
.and_then(|filter| child_text(&filter, "Prefix"))
})
.unwrap_or_default(),
expiration_days: rule
.descendants()
.find(|node| node.is_element() && node.tag_name().name() == "Expiration")
.and_then(|expiration| child_text(&expiration, "Days"))
.and_then(|value| value.parse::<u64>().ok()),
expiration_date: rule
.descendants()
.find(|node| node.is_element() && node.tag_name().name() == "Expiration")
.and_then(|expiration| child_text(&expiration, "Date"))
.as_deref()
.and_then(parse_datetime),
noncurrent_days: rule
.descendants()
.find(|node| {
node.is_element() && node.tag_name().name() == "NoncurrentVersionExpiration"
})
.and_then(|node| child_text(&node, "NoncurrentDays"))
.and_then(|value| value.parse::<u64>().ok()),
abort_incomplete_multipart_days: rule
.descendants()
.find(|node| {
node.is_element() && node.tag_name().name() == "AbortIncompleteMultipartUpload"
})
.and_then(|node| child_text(&node, "DaysAfterInitiation"))
.and_then(|value| value.parse::<u64>().ok()),
})
.collect()
}
fn parse_lifecycle_rule(value: &Value) -> Option<ParsedLifecycleRule> {
let map = value.as_object()?;
Some(ParsedLifecycleRule {
status: map
.get("Status")
.and_then(|value| value.as_str())
.unwrap_or("Enabled")
.to_string(),
prefix: map
.get("Prefix")
.and_then(|value| value.as_str())
.or_else(|| {
map.get("Filter")
.and_then(|value| value.get("Prefix"))
.and_then(|value| value.as_str())
})
.unwrap_or_default()
.to_string(),
expiration_days: map
.get("Expiration")
.and_then(|value| value.get("Days"))
.and_then(|value| value.as_u64()),
expiration_date: map
.get("Expiration")
.and_then(|value| value.get("Date"))
.and_then(|value| value.as_str())
.and_then(parse_datetime),
noncurrent_days: map
.get("NoncurrentVersionExpiration")
.and_then(|value| value.get("NoncurrentDays"))
.and_then(|value| value.as_u64()),
abort_incomplete_multipart_days: map
.get("AbortIncompleteMultipartUpload")
.and_then(|value| value.get("DaysAfterInitiation"))
.and_then(|value| value.as_u64()),
})
}
fn parse_datetime(value: &str) -> Option<DateTime<Utc>> {
DateTime::parse_from_rfc3339(value)
.ok()
.map(|value| value.with_timezone(&Utc))
}
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
node.children()
.find(|child| child.is_element() && child.tag_name().name() == name)
.and_then(|child| child.text())
.map(|text| text.trim().to_string())
.filter(|text| !text.is_empty())
}
fn result_to_json(result: &BucketLifecycleResult) -> Value {
json!({
"bucket_name": result.bucket_name,
"objects_deleted": result.objects_deleted,
"versions_deleted": result.versions_deleted,
"uploads_aborted": result.uploads_aborted,
"errors": result.errors,
"execution_time_seconds": result.execution_time_seconds,
})
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration;
#[test]
fn parses_rules_from_xml() {
let xml = r#"<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration>
<Rule>
<Status>Enabled</Status>
<Filter><Prefix>logs/</Prefix></Filter>
<Expiration><Days>10</Days></Expiration>
<NoncurrentVersionExpiration><NoncurrentDays>30</NoncurrentDays></NoncurrentVersionExpiration>
<AbortIncompleteMultipartUpload><DaysAfterInitiation>7</DaysAfterInitiation></AbortIncompleteMultipartUpload>
</Rule>
</LifecycleConfiguration>"#;
let rules = parse_lifecycle_rules(&Value::String(xml.to_string()));
assert_eq!(rules.len(), 1);
assert_eq!(rules[0].prefix, "logs/");
assert_eq!(rules[0].expiration_days, Some(10));
assert_eq!(rules[0].noncurrent_days, Some(30));
assert_eq!(rules[0].abort_incomplete_multipart_days, Some(7));
}
#[tokio::test]
async fn run_cycle_writes_history_and_deletes_noncurrent_versions() {
let tmp = tempfile::tempdir().unwrap();
let storage = Arc::new(FsStorageBackend::new(tmp.path().to_path_buf()));
storage.create_bucket("docs").await.unwrap();
storage.set_versioning("docs", true).await.unwrap();
storage
.put_object(
"docs",
"logs/file.txt",
Box::pin(std::io::Cursor::new(b"old".to_vec())),
None,
)
.await
.unwrap();
storage
.put_object(
"docs",
"logs/file.txt",
Box::pin(std::io::Cursor::new(b"new".to_vec())),
None,
)
.await
.unwrap();
let versions_root = version_root_for_bucket(tmp.path(), "docs")
.join("logs")
.join("file.txt");
let manifest = std::fs::read_dir(&versions_root)
.unwrap()
.flatten()
.find(|entry| entry.path().extension().and_then(|ext| ext.to_str()) == Some("json"))
.unwrap()
.path();
let old_manifest = json!({
"version_id": "ver-1",
"key": "logs/file.txt",
"size": 3,
"archived_at": (Utc::now() - Duration::days(45)).to_rfc3339(),
"etag": "etag",
});
std::fs::write(&manifest, serde_json::to_string(&old_manifest).unwrap()).unwrap();
std::fs::write(manifest.with_file_name("ver-1.bin"), b"old").unwrap();
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration>
<Rule>
<Status>Enabled</Status>
<Filter><Prefix>logs/</Prefix></Filter>
<NoncurrentVersionExpiration><NoncurrentDays>30</NoncurrentDays></NoncurrentVersionExpiration>
</Rule>
</LifecycleConfiguration>"#;
let mut config = storage.get_bucket_config("docs").await.unwrap();
config.lifecycle = Some(Value::String(lifecycle_xml.to_string()));
storage.set_bucket_config("docs", &config).await.unwrap();
let service =
LifecycleService::new(storage.clone(), tmp.path(), LifecycleConfig::default());
let result = service.run_cycle().await.unwrap();
assert_eq!(result["versions_deleted"], 1);
let history = read_history(tmp.path(), "docs", 50, 0);
assert_eq!(history["total"], 1);
assert_eq!(history["executions"][0]["versions_deleted"], 1);
}
}

View File

@@ -0,0 +1,368 @@
use chrono::{DateTime, Utc};
use parking_lot::Mutex;
use rand::Rng;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
const MAX_LATENCY_SAMPLES: usize = 5000;
pub struct MetricsConfig {
pub interval_minutes: u64,
pub retention_hours: u64,
}
impl Default for MetricsConfig {
fn default() -> Self {
Self {
interval_minutes: 5,
retention_hours: 24,
}
}
}
#[derive(Debug, Clone)]
struct OperationStats {
count: u64,
success_count: u64,
error_count: u64,
latency_sum_ms: f64,
latency_min_ms: f64,
latency_max_ms: f64,
bytes_in: u64,
bytes_out: u64,
latency_samples: Vec<f64>,
}
impl Default for OperationStats {
fn default() -> Self {
Self {
count: 0,
success_count: 0,
error_count: 0,
latency_sum_ms: 0.0,
latency_min_ms: f64::INFINITY,
latency_max_ms: 0.0,
bytes_in: 0,
bytes_out: 0,
latency_samples: Vec::new(),
}
}
}
impl OperationStats {
fn record(&mut self, latency_ms: f64, success: bool, bytes_in: u64, bytes_out: u64) {
self.count += 1;
if success {
self.success_count += 1;
} else {
self.error_count += 1;
}
self.latency_sum_ms += latency_ms;
if latency_ms < self.latency_min_ms {
self.latency_min_ms = latency_ms;
}
if latency_ms > self.latency_max_ms {
self.latency_max_ms = latency_ms;
}
self.bytes_in += bytes_in;
self.bytes_out += bytes_out;
if self.latency_samples.len() < MAX_LATENCY_SAMPLES {
self.latency_samples.push(latency_ms);
} else {
let mut rng = rand::thread_rng();
let j = rng.gen_range(0..self.count as usize);
if j < MAX_LATENCY_SAMPLES {
self.latency_samples[j] = latency_ms;
}
}
}
fn compute_percentile(sorted: &[f64], p: f64) -> f64 {
if sorted.is_empty() {
return 0.0;
}
let k = (sorted.len() - 1) as f64 * (p / 100.0);
let f = k.floor() as usize;
let c = (f + 1).min(sorted.len() - 1);
let d = k - f as f64;
sorted[f] + d * (sorted[c] - sorted[f])
}
fn to_json(&self) -> Value {
let avg = if self.count > 0 {
self.latency_sum_ms / self.count as f64
} else {
0.0
};
let min = if self.latency_min_ms.is_infinite() {
0.0
} else {
self.latency_min_ms
};
let mut sorted = self.latency_samples.clone();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
json!({
"count": self.count,
"success_count": self.success_count,
"error_count": self.error_count,
"latency_avg_ms": round2(avg),
"latency_min_ms": round2(min),
"latency_max_ms": round2(self.latency_max_ms),
"latency_p50_ms": round2(Self::compute_percentile(&sorted, 50.0)),
"latency_p95_ms": round2(Self::compute_percentile(&sorted, 95.0)),
"latency_p99_ms": round2(Self::compute_percentile(&sorted, 99.0)),
"bytes_in": self.bytes_in,
"bytes_out": self.bytes_out,
})
}
}
fn round2(v: f64) -> f64 {
(v * 100.0).round() / 100.0
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MetricsSnapshot {
pub timestamp: DateTime<Utc>,
pub window_seconds: u64,
pub by_method: HashMap<String, Value>,
pub by_endpoint: HashMap<String, Value>,
pub by_status_class: HashMap<String, u64>,
pub error_codes: HashMap<String, u64>,
pub totals: Value,
}
struct Inner {
by_method: HashMap<String, OperationStats>,
by_endpoint: HashMap<String, OperationStats>,
by_status_class: HashMap<String, u64>,
error_codes: HashMap<String, u64>,
totals: OperationStats,
window_start: f64,
snapshots: Vec<MetricsSnapshot>,
}
pub struct MetricsService {
config: MetricsConfig,
inner: Arc<Mutex<Inner>>,
snapshots_path: PathBuf,
}
impl MetricsService {
pub fn new(storage_root: &Path, config: MetricsConfig) -> Self {
let snapshots_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("operation_metrics.json");
let mut snapshots: Vec<MetricsSnapshot> = if snapshots_path.exists() {
std::fs::read_to_string(&snapshots_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| {
v.get("snapshots").and_then(|s| {
serde_json::from_value::<Vec<MetricsSnapshot>>(s.clone()).ok()
})
})
.unwrap_or_default()
} else {
Vec::new()
};
let cutoff = now_secs() - (config.retention_hours * 3600) as f64;
snapshots.retain(|s| s.timestamp.timestamp() as f64 > cutoff);
Self {
config,
inner: Arc::new(Mutex::new(Inner {
by_method: HashMap::new(),
by_endpoint: HashMap::new(),
by_status_class: HashMap::new(),
error_codes: HashMap::new(),
totals: OperationStats::default(),
window_start: now_secs(),
snapshots,
})),
snapshots_path,
}
}
pub fn record_request(
&self,
method: &str,
endpoint_type: &str,
status_code: u16,
latency_ms: f64,
bytes_in: u64,
bytes_out: u64,
error_code: Option<&str>,
) {
let success = (200..400).contains(&status_code);
let status_class = format!("{}xx", status_code / 100);
let mut inner = self.inner.lock();
inner
.by_method
.entry(method.to_string())
.or_default()
.record(latency_ms, success, bytes_in, bytes_out);
inner
.by_endpoint
.entry(endpoint_type.to_string())
.or_default()
.record(latency_ms, success, bytes_in, bytes_out);
*inner.by_status_class.entry(status_class).or_insert(0) += 1;
if let Some(code) = error_code {
*inner.error_codes.entry(code.to_string()).or_insert(0) += 1;
}
inner
.totals
.record(latency_ms, success, bytes_in, bytes_out);
}
pub fn get_current_stats(&self) -> Value {
let inner = self.inner.lock();
let window_seconds = (now_secs() - inner.window_start).max(0.0) as u64;
let by_method: HashMap<String, Value> = inner
.by_method
.iter()
.map(|(k, v)| (k.clone(), v.to_json()))
.collect();
let by_endpoint: HashMap<String, Value> = inner
.by_endpoint
.iter()
.map(|(k, v)| (k.clone(), v.to_json()))
.collect();
json!({
"timestamp": Utc::now().to_rfc3339(),
"window_seconds": window_seconds,
"by_method": by_method,
"by_endpoint": by_endpoint,
"by_status_class": inner.by_status_class,
"error_codes": inner.error_codes,
"totals": inner.totals.to_json(),
})
}
pub fn get_history(&self, hours: Option<u64>) -> Vec<MetricsSnapshot> {
let inner = self.inner.lock();
let mut snapshots = inner.snapshots.clone();
if let Some(h) = hours {
let cutoff = now_secs() - (h * 3600) as f64;
snapshots.retain(|s| s.timestamp.timestamp() as f64 > cutoff);
}
snapshots
}
pub fn snapshot(&self) -> Value {
let current = self.get_current_stats();
let history = self.get_history(None);
json!({
"enabled": true,
"current": current,
"snapshots": history,
})
}
fn take_snapshot(&self) {
let snapshot = {
let mut inner = self.inner.lock();
let window_seconds = (now_secs() - inner.window_start).max(0.0) as u64;
let by_method: HashMap<String, Value> = inner
.by_method
.iter()
.map(|(k, v)| (k.clone(), v.to_json()))
.collect();
let by_endpoint: HashMap<String, Value> = inner
.by_endpoint
.iter()
.map(|(k, v)| (k.clone(), v.to_json()))
.collect();
let snap = MetricsSnapshot {
timestamp: Utc::now(),
window_seconds,
by_method,
by_endpoint,
by_status_class: inner.by_status_class.clone(),
error_codes: inner.error_codes.clone(),
totals: inner.totals.to_json(),
};
inner.snapshots.push(snap.clone());
let cutoff = now_secs() - (self.config.retention_hours * 3600) as f64;
inner
.snapshots
.retain(|s| s.timestamp.timestamp() as f64 > cutoff);
inner.by_method.clear();
inner.by_endpoint.clear();
inner.by_status_class.clear();
inner.error_codes.clear();
inner.totals = OperationStats::default();
inner.window_start = now_secs();
snap
};
let _ = snapshot;
self.save_snapshots();
}
fn save_snapshots(&self) {
let snapshots = { self.inner.lock().snapshots.clone() };
if let Some(parent) = self.snapshots_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let data = json!({ "snapshots": snapshots });
let _ = std::fs::write(
&self.snapshots_path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs(self.config.interval_minutes * 60);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
self.take_snapshot();
}
})
}
}
pub fn classify_endpoint(path: &str) -> &'static str {
if path.is_empty() || path == "/" {
return "service";
}
let trimmed = path.trim_end_matches('/');
if trimmed.starts_with("/ui") {
return "ui";
}
if trimmed.starts_with("/kms") {
return "kms";
}
if trimmed.starts_with("/myfsio") {
return "service";
}
let parts: Vec<&str> = trimmed.trim_start_matches('/').split('/').collect();
match parts.len() {
0 => "service",
1 => "bucket",
_ => "object",
}
}
fn now_secs() -> f64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs_f64())
.unwrap_or(0.0)
}

View File

@@ -0,0 +1,14 @@
pub mod access_logging;
pub mod acl;
pub mod gc;
pub mod integrity;
pub mod lifecycle;
pub mod metrics;
pub mod notifications;
pub mod object_lock;
pub mod replication;
pub mod s3_client;
pub mod site_registry;
pub mod site_sync;
pub mod system_metrics;
pub mod website_domains;

View File

@@ -0,0 +1,296 @@
use crate::state::AppState;
use chrono::{DateTime, Utc};
use myfsio_storage::traits::StorageEngine;
use serde::Serialize;
use serde_json::json;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WebhookDestination {
pub url: String,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct NotificationConfiguration {
pub id: String,
pub events: Vec<String>,
pub destination: WebhookDestination,
pub prefix_filter: String,
pub suffix_filter: String,
}
#[derive(Debug, Clone, Serialize)]
pub struct NotificationEvent {
#[serde(rename = "eventVersion")]
event_version: &'static str,
#[serde(rename = "eventSource")]
event_source: &'static str,
#[serde(rename = "awsRegion")]
aws_region: &'static str,
#[serde(rename = "eventTime")]
event_time: String,
#[serde(rename = "eventName")]
event_name: String,
#[serde(rename = "userIdentity")]
user_identity: serde_json::Value,
#[serde(rename = "requestParameters")]
request_parameters: serde_json::Value,
#[serde(rename = "responseElements")]
response_elements: serde_json::Value,
s3: serde_json::Value,
}
impl NotificationConfiguration {
pub fn matches_event(&self, event_name: &str, object_key: &str) -> bool {
let event_match = self.events.iter().any(|pattern| {
if let Some(prefix) = pattern.strip_suffix('*') {
event_name.starts_with(prefix)
} else {
pattern == event_name
}
});
if !event_match {
return false;
}
if !self.prefix_filter.is_empty() && !object_key.starts_with(&self.prefix_filter) {
return false;
}
if !self.suffix_filter.is_empty() && !object_key.ends_with(&self.suffix_filter) {
return false;
}
true
}
}
pub fn parse_notification_configurations(
xml: &str,
) -> Result<Vec<NotificationConfiguration>, String> {
let doc = roxmltree::Document::parse(xml).map_err(|err| err.to_string())?;
let mut configs = Vec::new();
for webhook in doc
.descendants()
.filter(|node| node.is_element() && node.tag_name().name() == "WebhookConfiguration")
{
let id = child_text(&webhook, "Id").unwrap_or_else(|| uuid::Uuid::new_v4().to_string());
let events = webhook
.children()
.filter(|node| node.is_element() && node.tag_name().name() == "Event")
.filter_map(|node| node.text())
.map(|text| text.trim().to_string())
.filter(|text| !text.is_empty())
.collect::<Vec<_>>();
let destination = webhook
.children()
.find(|node| node.is_element() && node.tag_name().name() == "Destination");
let url = destination
.as_ref()
.and_then(|node| child_text(node, "Url"))
.unwrap_or_default();
if url.trim().is_empty() {
return Err("Destination URL is required".to_string());
}
let mut prefix_filter = String::new();
let mut suffix_filter = String::new();
if let Some(filter) = webhook
.children()
.find(|node| node.is_element() && node.tag_name().name() == "Filter")
{
if let Some(key) = filter
.children()
.find(|node| node.is_element() && node.tag_name().name() == "S3Key")
{
for rule in key
.children()
.filter(|node| node.is_element() && node.tag_name().name() == "FilterRule")
{
let name = child_text(&rule, "Name").unwrap_or_default();
let value = child_text(&rule, "Value").unwrap_or_default();
if name == "prefix" {
prefix_filter = value;
} else if name == "suffix" {
suffix_filter = value;
}
}
}
}
configs.push(NotificationConfiguration {
id,
events,
destination: WebhookDestination { url },
prefix_filter,
suffix_filter,
});
}
Ok(configs)
}
pub fn emit_object_created(
state: &AppState,
bucket: &str,
key: &str,
size: u64,
etag: Option<&str>,
request_id: &str,
source_ip: &str,
user_identity: &str,
operation: &str,
) {
emit_notifications(
state.clone(),
bucket.to_string(),
key.to_string(),
format!("s3:ObjectCreated:{}", operation),
size,
etag.unwrap_or_default().to_string(),
request_id.to_string(),
source_ip.to_string(),
user_identity.to_string(),
);
}
pub fn emit_object_removed(
state: &AppState,
bucket: &str,
key: &str,
request_id: &str,
source_ip: &str,
user_identity: &str,
operation: &str,
) {
emit_notifications(
state.clone(),
bucket.to_string(),
key.to_string(),
format!("s3:ObjectRemoved:{}", operation),
0,
String::new(),
request_id.to_string(),
source_ip.to_string(),
user_identity.to_string(),
);
}
fn emit_notifications(
state: AppState,
bucket: String,
key: String,
event_name: String,
size: u64,
etag: String,
request_id: String,
source_ip: String,
user_identity: String,
) {
tokio::spawn(async move {
let config = match state.storage.get_bucket_config(&bucket).await {
Ok(config) => config,
Err(_) => return,
};
let raw = match config.notification {
Some(serde_json::Value::String(raw)) => raw,
_ => return,
};
let configs = match parse_notification_configurations(&raw) {
Ok(configs) => configs,
Err(err) => {
tracing::warn!("Invalid notification config for bucket {}: {}", bucket, err);
return;
}
};
let record = NotificationEvent {
event_version: "2.1",
event_source: "myfsio:s3",
aws_region: "local",
event_time: format_event_time(Utc::now()),
event_name: event_name.clone(),
user_identity: json!({ "principalId": if user_identity.is_empty() { "ANONYMOUS" } else { &user_identity } }),
request_parameters: json!({ "sourceIPAddress": if source_ip.is_empty() { "127.0.0.1" } else { &source_ip } }),
response_elements: json!({
"x-amz-request-id": request_id,
"x-amz-id-2": request_id,
}),
s3: json!({
"s3SchemaVersion": "1.0",
"configurationId": "notification",
"bucket": {
"name": bucket,
"ownerIdentity": { "principalId": "local" },
"arn": format!("arn:aws:s3:::{}", bucket),
},
"object": {
"key": key,
"size": size,
"eTag": etag,
"versionId": "null",
"sequencer": format!("{:016X}", Utc::now().timestamp_millis()),
}
}),
};
let payload = json!({ "Records": [record] });
let client = reqwest::Client::new();
for config in configs {
if !config.matches_event(&event_name, &key) {
continue;
}
let result = client
.post(&config.destination.url)
.header("content-type", "application/json")
.json(&payload)
.send()
.await;
if let Err(err) = result {
tracing::warn!(
"Failed to deliver notification for {} to {}: {}",
event_name,
config.destination.url,
err
);
}
}
});
}
fn format_event_time(value: DateTime<Utc>) -> String {
value.format("%Y-%m-%dT%H:%M:%S.000Z").to_string()
}
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
node.children()
.find(|child| child.is_element() && child.tag_name().name() == name)
.and_then(|child| child.text())
.map(|text| text.trim().to_string())
.filter(|text| !text.is_empty())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_webhook_configuration() {
let xml = r#"<?xml version="1.0" encoding="UTF-8"?>
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<WebhookConfiguration>
<Id>upload</Id>
<Event>s3:ObjectCreated:*</Event>
<Destination><Url>https://example.com/hook</Url></Destination>
<Filter>
<S3Key>
<FilterRule><Name>prefix</Name><Value>logs/</Value></FilterRule>
<FilterRule><Name>suffix</Name><Value>.txt</Value></FilterRule>
</S3Key>
</Filter>
</WebhookConfiguration>
</NotificationConfiguration>"#;
let configs = parse_notification_configurations(xml).unwrap();
assert_eq!(configs.len(), 1);
assert!(configs[0].matches_event("s3:ObjectCreated:Put", "logs/test.txt"));
assert!(!configs[0].matches_event("s3:ObjectRemoved:Delete", "logs/test.txt"));
}
}

View File

@@ -0,0 +1,128 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
pub const LEGAL_HOLD_METADATA_KEY: &str = "__legal_hold__";
pub const RETENTION_METADATA_KEY: &str = "__object_retention__";
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum RetentionMode {
GOVERNANCE,
COMPLIANCE,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ObjectLockRetention {
pub mode: RetentionMode,
pub retain_until_date: DateTime<Utc>,
}
impl ObjectLockRetention {
pub fn is_expired(&self) -> bool {
Utc::now() > self.retain_until_date
}
}
pub fn get_object_retention(metadata: &HashMap<String, String>) -> Option<ObjectLockRetention> {
metadata
.get(RETENTION_METADATA_KEY)
.and_then(|raw| serde_json::from_str::<ObjectLockRetention>(raw).ok())
}
pub fn set_object_retention(
metadata: &mut HashMap<String, String>,
retention: &ObjectLockRetention,
) -> Result<(), String> {
let encoded = serde_json::to_string(retention).map_err(|err| err.to_string())?;
metadata.insert(RETENTION_METADATA_KEY.to_string(), encoded);
Ok(())
}
pub fn get_legal_hold(metadata: &HashMap<String, String>) -> bool {
metadata
.get(LEGAL_HOLD_METADATA_KEY)
.map(|value| value.eq_ignore_ascii_case("ON") || value.eq_ignore_ascii_case("true"))
.unwrap_or(false)
}
pub fn set_legal_hold(metadata: &mut HashMap<String, String>, enabled: bool) {
metadata.insert(
LEGAL_HOLD_METADATA_KEY.to_string(),
if enabled { "ON" } else { "OFF" }.to_string(),
);
}
pub fn ensure_retention_mutable(
metadata: &HashMap<String, String>,
bypass_governance: bool,
) -> Result<(), String> {
let Some(existing) = get_object_retention(metadata) else {
return Ok(());
};
if existing.is_expired() {
return Ok(());
}
match existing.mode {
RetentionMode::COMPLIANCE => Err(format!(
"Cannot modify retention on object with COMPLIANCE mode until retention expires"
)),
RetentionMode::GOVERNANCE if !bypass_governance => Err(
"Cannot modify GOVERNANCE retention without bypass-governance permission".to_string(),
),
RetentionMode::GOVERNANCE => Ok(()),
}
}
pub fn can_delete_object(
metadata: &HashMap<String, String>,
bypass_governance: bool,
) -> Result<(), String> {
if get_legal_hold(metadata) {
return Err("Object is under legal hold".to_string());
}
if let Some(retention) = get_object_retention(metadata) {
if !retention.is_expired() {
return match retention.mode {
RetentionMode::COMPLIANCE => Err(format!(
"Object is locked in COMPLIANCE mode until {}",
retention.retain_until_date.to_rfc3339()
)),
RetentionMode::GOVERNANCE if !bypass_governance => Err(format!(
"Object is locked in GOVERNANCE mode until {}",
retention.retain_until_date.to_rfc3339()
)),
RetentionMode::GOVERNANCE => Ok(()),
};
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration;
#[test]
fn legal_hold_blocks_delete() {
let mut metadata = HashMap::new();
set_legal_hold(&mut metadata, true);
let err = can_delete_object(&metadata, false).unwrap_err();
assert!(err.contains("legal hold"));
}
#[test]
fn governance_requires_bypass() {
let mut metadata = HashMap::new();
set_object_retention(
&mut metadata,
&ObjectLockRetention {
mode: RetentionMode::GOVERNANCE,
retain_until_date: Utc::now() + Duration::hours(1),
},
)
.unwrap();
assert!(can_delete_object(&metadata, false).is_err());
assert!(can_delete_object(&metadata, true).is_ok());
}
}

View File

@@ -0,0 +1,713 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use aws_sdk_s3::primitives::ByteStream;
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
use tokio::sync::Semaphore;
use myfsio_common::types::ListParams;
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::traits::StorageEngine;
use crate::services::s3_client::{build_client, check_endpoint_health, ClientOptions};
use crate::stores::connections::{ConnectionStore, RemoteConnection};
pub const MODE_NEW_ONLY: &str = "new_only";
pub const MODE_ALL: &str = "all";
pub const MODE_BIDIRECTIONAL: &str = "bidirectional";
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ReplicationStats {
#[serde(default)]
pub objects_synced: u64,
#[serde(default)]
pub objects_pending: u64,
#[serde(default)]
pub objects_orphaned: u64,
#[serde(default)]
pub bytes_synced: u64,
#[serde(default)]
pub last_sync_at: Option<f64>,
#[serde(default)]
pub last_sync_key: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplicationRule {
pub bucket_name: String,
pub target_connection_id: String,
pub target_bucket: String,
#[serde(default = "default_true")]
pub enabled: bool,
#[serde(default = "default_mode")]
pub mode: String,
#[serde(default)]
pub created_at: Option<f64>,
#[serde(default)]
pub stats: ReplicationStats,
#[serde(default = "default_true")]
pub sync_deletions: bool,
#[serde(default)]
pub last_pull_at: Option<f64>,
#[serde(default)]
pub filter_prefix: Option<String>,
}
fn default_true() -> bool {
true
}
fn default_mode() -> String {
MODE_NEW_ONLY.to_string()
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplicationFailure {
pub object_key: String,
pub error_message: String,
pub timestamp: f64,
pub failure_count: u32,
pub bucket_name: String,
pub action: String,
#[serde(default)]
pub last_error_code: Option<String>,
}
pub struct ReplicationFailureStore {
storage_root: PathBuf,
max_failures_per_bucket: usize,
cache: Mutex<HashMap<String, Vec<ReplicationFailure>>>,
}
impl ReplicationFailureStore {
pub fn new(storage_root: PathBuf, max_failures_per_bucket: usize) -> Self {
Self {
storage_root,
max_failures_per_bucket,
cache: Mutex::new(HashMap::new()),
}
}
fn path(&self, bucket: &str) -> PathBuf {
self.storage_root
.join(".myfsio.sys")
.join("buckets")
.join(bucket)
.join("replication_failures.json")
}
fn load_from_disk(&self, bucket: &str) -> Vec<ReplicationFailure> {
let path = self.path(bucket);
if !path.exists() {
return Vec::new();
}
match std::fs::read_to_string(&path) {
Ok(text) => {
let parsed: serde_json::Value = match serde_json::from_str(&text) {
Ok(v) => v,
Err(_) => return Vec::new(),
};
parsed
.get("failures")
.and_then(|v| serde_json::from_value(v.clone()).ok())
.unwrap_or_default()
}
Err(_) => Vec::new(),
}
}
fn save_to_disk(&self, bucket: &str, failures: &[ReplicationFailure]) {
let path = self.path(bucket);
if let Some(parent) = path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let trimmed = &failures[..failures.len().min(self.max_failures_per_bucket)];
let data = serde_json::json!({ "failures": trimmed });
let _ = std::fs::write(
&path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn load(&self, bucket: &str) -> Vec<ReplicationFailure> {
let mut cache = self.cache.lock();
if let Some(existing) = cache.get(bucket) {
return existing.clone();
}
let loaded = self.load_from_disk(bucket);
cache.insert(bucket.to_string(), loaded.clone());
loaded
}
pub fn save(&self, bucket: &str, failures: Vec<ReplicationFailure>) {
let trimmed: Vec<ReplicationFailure> = failures
.into_iter()
.take(self.max_failures_per_bucket)
.collect();
self.save_to_disk(bucket, &trimmed);
self.cache.lock().insert(bucket.to_string(), trimmed);
}
pub fn add(&self, bucket: &str, failure: ReplicationFailure) {
let mut failures = self.load(bucket);
if let Some(existing) = failures
.iter_mut()
.find(|f| f.object_key == failure.object_key)
{
existing.failure_count += 1;
existing.timestamp = failure.timestamp;
existing.error_message = failure.error_message.clone();
existing.last_error_code = failure.last_error_code.clone();
} else {
failures.insert(0, failure);
}
self.save(bucket, failures);
}
pub fn remove(&self, bucket: &str, object_key: &str) -> bool {
let failures = self.load(bucket);
let before = failures.len();
let after: Vec<_> = failures
.into_iter()
.filter(|f| f.object_key != object_key)
.collect();
if after.len() != before {
self.save(bucket, after);
true
} else {
false
}
}
pub fn clear(&self, bucket: &str) {
self.cache.lock().remove(bucket);
let path = self.path(bucket);
let _ = std::fs::remove_file(path);
}
pub fn get(&self, bucket: &str, object_key: &str) -> Option<ReplicationFailure> {
self.load(bucket)
.into_iter()
.find(|f| f.object_key == object_key)
}
pub fn count(&self, bucket: &str) -> usize {
self.load(bucket).len()
}
}
pub struct ReplicationManager {
storage: Arc<FsStorageBackend>,
connections: Arc<ConnectionStore>,
rules_path: PathBuf,
rules: Mutex<HashMap<String, ReplicationRule>>,
client_options: ClientOptions,
streaming_threshold_bytes: u64,
pub failures: Arc<ReplicationFailureStore>,
semaphore: Arc<Semaphore>,
}
impl ReplicationManager {
pub fn new(
storage: Arc<FsStorageBackend>,
connections: Arc<ConnectionStore>,
storage_root: &Path,
connect_timeout: Duration,
read_timeout: Duration,
max_retries: u32,
streaming_threshold_bytes: u64,
max_failures_per_bucket: usize,
) -> Self {
let rules_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("replication_rules.json");
let rules = load_rules(&rules_path);
let failures = Arc::new(ReplicationFailureStore::new(
storage_root.to_path_buf(),
max_failures_per_bucket,
));
let client_options = ClientOptions {
connect_timeout,
read_timeout,
max_attempts: max_retries,
};
Self {
storage,
connections,
rules_path,
rules: Mutex::new(rules),
client_options,
streaming_threshold_bytes,
failures,
semaphore: Arc::new(Semaphore::new(4)),
}
}
pub fn reload_rules(&self) {
*self.rules.lock() = load_rules(&self.rules_path);
}
pub fn list_rules(&self) -> Vec<ReplicationRule> {
self.rules.lock().values().cloned().collect()
}
pub fn get_rule(&self, bucket: &str) -> Option<ReplicationRule> {
self.rules.lock().get(bucket).cloned()
}
pub fn set_rule(&self, rule: ReplicationRule) {
{
let mut guard = self.rules.lock();
guard.insert(rule.bucket_name.clone(), rule);
}
self.save_rules();
}
pub fn delete_rule(&self, bucket: &str) {
{
let mut guard = self.rules.lock();
guard.remove(bucket);
}
self.save_rules();
}
pub fn save_rules(&self) {
let snapshot: HashMap<String, ReplicationRule> = self.rules.lock().clone();
if let Some(parent) = self.rules_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
if let Ok(text) = serde_json::to_string_pretty(&snapshot) {
let _ = std::fs::write(&self.rules_path, text);
}
}
fn update_last_sync(&self, bucket: &str, key: &str) {
{
let mut guard = self.rules.lock();
if let Some(rule) = guard.get_mut(bucket) {
rule.stats.last_sync_at = Some(now_secs());
rule.stats.last_sync_key = Some(key.to_string());
}
}
self.save_rules();
}
pub async fn trigger(self: Arc<Self>, bucket: String, key: String, action: String) {
let rule = match self.get_rule(&bucket) {
Some(r) if r.enabled => r,
_ => return,
};
let connection = match self.connections.get(&rule.target_connection_id) {
Some(c) => c,
None => {
tracing::warn!(
"Replication skipped for {}/{}: connection {} not found",
bucket,
key,
rule.target_connection_id
);
return;
}
};
let permit = match self.semaphore.clone().try_acquire_owned() {
Ok(p) => p,
Err(_) => {
let sem = self.semaphore.clone();
match sem.acquire_owned().await {
Ok(p) => p,
Err(_) => return,
}
}
};
let manager = self.clone();
tokio::spawn(async move {
let _permit = permit;
manager
.replicate_task(&bucket, &key, &rule, &connection, &action)
.await;
});
}
pub async fn replicate_existing_objects(self: Arc<Self>, bucket: String) -> usize {
let rule = match self.get_rule(&bucket) {
Some(r) if r.enabled => r,
_ => return 0,
};
let connection = match self.connections.get(&rule.target_connection_id) {
Some(c) => c,
None => {
tracing::warn!(
"Cannot replicate existing objects for {}: connection {} not found",
bucket,
rule.target_connection_id
);
return 0;
}
};
if !self.check_endpoint(&connection).await {
tracing::warn!(
"Cannot replicate existing objects for {}: endpoint {} is unreachable",
bucket,
connection.endpoint_url
);
return 0;
}
let mut continuation_token: Option<String> = None;
let mut submitted = 0usize;
loop {
let page = match self
.storage
.list_objects(
&bucket,
&ListParams {
max_keys: 1000,
continuation_token: continuation_token.clone(),
prefix: rule.filter_prefix.clone(),
start_after: None,
},
)
.await
{
Ok(page) => page,
Err(err) => {
tracing::error!(
"Failed to list existing objects for replication in {}: {}",
bucket,
err
);
break;
}
};
let next_token = page.next_continuation_token.clone();
let is_truncated = page.is_truncated;
for object in page.objects {
submitted += 1;
self.clone()
.trigger(bucket.clone(), object.key, "write".to_string())
.await;
}
if !is_truncated {
break;
}
continuation_token = next_token;
if continuation_token.is_none() {
break;
}
}
submitted
}
pub fn schedule_existing_objects_sync(self: Arc<Self>, bucket: String) {
tokio::spawn(async move {
let submitted = self
.clone()
.replicate_existing_objects(bucket.clone())
.await;
if submitted > 0 {
tracing::info!(
"Scheduled {} existing object(s) for replication in {}",
submitted,
bucket
);
}
});
}
async fn replicate_task(
&self,
bucket: &str,
object_key: &str,
rule: &ReplicationRule,
conn: &RemoteConnection,
action: &str,
) {
if object_key.contains("..") || object_key.starts_with('/') || object_key.starts_with('\\')
{
tracing::error!("Invalid object key (path traversal): {}", object_key);
return;
}
let client = build_client(conn, &self.client_options);
if action == "delete" {
match client
.delete_object()
.bucket(&rule.target_bucket)
.key(object_key)
.send()
.await
{
Ok(_) => {
tracing::info!(
"Replicated DELETE {}/{} to {} ({})",
bucket,
object_key,
conn.name,
rule.target_bucket
);
self.update_last_sync(bucket, object_key);
self.failures.remove(bucket, object_key);
}
Err(err) => {
let msg = format!("{:?}", err);
tracing::error!(
"Replication DELETE failed {}/{}: {}",
bucket,
object_key,
msg
);
self.failures.add(
bucket,
ReplicationFailure {
object_key: object_key.to_string(),
error_message: msg,
timestamp: now_secs(),
failure_count: 1,
bucket_name: bucket.to_string(),
action: "delete".to_string(),
last_error_code: None,
},
);
}
}
return;
}
let src_path = match self.storage.get_object_path(bucket, object_key).await {
Ok(p) => p,
Err(_) => {
tracing::error!("Source object not found: {}/{}", bucket, object_key);
return;
}
};
let file_size = match tokio::fs::metadata(&src_path).await {
Ok(m) => m.len(),
Err(_) => 0,
};
let content_type = mime_guess::from_path(&src_path)
.first_raw()
.map(|s| s.to_string());
let upload_result = upload_object(
&client,
&rule.target_bucket,
object_key,
&src_path,
file_size,
self.streaming_threshold_bytes,
content_type.as_deref(),
)
.await;
let final_result = match upload_result {
Err(err) if is_no_such_bucket(&err) => {
tracing::info!(
"Target bucket {} not found, creating it",
rule.target_bucket
);
match client
.create_bucket()
.bucket(&rule.target_bucket)
.send()
.await
{
Ok(_) | Err(_) => {
upload_object(
&client,
&rule.target_bucket,
object_key,
&src_path,
file_size,
self.streaming_threshold_bytes,
content_type.as_deref(),
)
.await
}
}
}
other => other,
};
match final_result {
Ok(()) => {
tracing::info!(
"Replicated {}/{} to {} ({})",
bucket,
object_key,
conn.name,
rule.target_bucket
);
self.update_last_sync(bucket, object_key);
self.failures.remove(bucket, object_key);
}
Err(err) => {
let msg = err.to_string();
tracing::error!("Replication failed {}/{}: {}", bucket, object_key, msg);
self.failures.add(
bucket,
ReplicationFailure {
object_key: object_key.to_string(),
error_message: msg,
timestamp: now_secs(),
failure_count: 1,
bucket_name: bucket.to_string(),
action: action.to_string(),
last_error_code: None,
},
);
}
}
}
pub async fn check_endpoint(&self, conn: &RemoteConnection) -> bool {
let client = build_client(conn, &self.client_options);
check_endpoint_health(&client).await
}
pub async fn retry_failed(&self, bucket: &str, object_key: &str) -> bool {
let failure = match self.failures.get(bucket, object_key) {
Some(f) => f,
None => return false,
};
let rule = match self.get_rule(bucket) {
Some(r) if r.enabled => r,
_ => return false,
};
let conn = match self.connections.get(&rule.target_connection_id) {
Some(c) => c,
None => return false,
};
self.replicate_task(bucket, object_key, &rule, &conn, &failure.action)
.await;
true
}
pub async fn retry_all(&self, bucket: &str) -> (usize, usize) {
let failures = self.failures.load(bucket);
if failures.is_empty() {
return (0, 0);
}
let rule = match self.get_rule(bucket) {
Some(r) if r.enabled => r,
_ => return (0, failures.len()),
};
let conn = match self.connections.get(&rule.target_connection_id) {
Some(c) => c,
None => return (0, failures.len()),
};
let mut submitted = 0;
for failure in failures {
self.replicate_task(bucket, &failure.object_key, &rule, &conn, &failure.action)
.await;
submitted += 1;
}
(submitted, 0)
}
pub fn get_failure_count(&self, bucket: &str) -> usize {
self.failures.count(bucket)
}
pub fn get_failed_items(
&self,
bucket: &str,
limit: usize,
offset: usize,
) -> Vec<ReplicationFailure> {
self.failures
.load(bucket)
.into_iter()
.skip(offset)
.take(limit)
.collect()
}
pub fn dismiss_failure(&self, bucket: &str, key: &str) -> bool {
self.failures.remove(bucket, key)
}
pub fn clear_failures(&self, bucket: &str) {
self.failures.clear(bucket);
}
pub fn rules_snapshot(&self) -> HashMap<String, ReplicationRule> {
self.rules.lock().clone()
}
pub fn update_last_pull(&self, bucket: &str, at: f64) {
{
let mut guard = self.rules.lock();
if let Some(rule) = guard.get_mut(bucket) {
rule.last_pull_at = Some(at);
}
}
self.save_rules();
}
pub fn client_options(&self) -> &ClientOptions {
&self.client_options
}
}
fn is_no_such_bucket<E: std::fmt::Debug>(err: &E) -> bool {
let text = format!("{:?}", err);
text.contains("NoSuchBucket")
}
async fn upload_object(
client: &aws_sdk_s3::Client,
bucket: &str,
key: &str,
path: &Path,
file_size: u64,
streaming_threshold: u64,
content_type: Option<&str>,
) -> Result<(), aws_sdk_s3::error::SdkError<aws_sdk_s3::operation::put_object::PutObjectError>> {
let mut req = client.put_object().bucket(bucket).key(key);
if let Some(ct) = content_type {
req = req.content_type(ct);
}
let body = if file_size >= streaming_threshold {
ByteStream::from_path(path).await.map_err(|e| {
aws_sdk_s3::error::SdkError::construction_failure(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
e,
)))
})?
} else {
let bytes = tokio::fs::read(path)
.await
.map_err(|e| aws_sdk_s3::error::SdkError::construction_failure(Box::new(e)))?;
ByteStream::from(bytes)
};
req.body(body).send().await.map(|_| ())
}
fn load_rules(path: &Path) -> HashMap<String, ReplicationRule> {
if !path.exists() {
return HashMap::new();
}
match std::fs::read_to_string(path) {
Ok(text) => serde_json::from_str(&text).unwrap_or_default(),
Err(_) => HashMap::new(),
}
}
fn now_secs() -> f64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs_f64())
.unwrap_or(0.0)
}

View File

@@ -0,0 +1,64 @@
use std::time::Duration;
use aws_config::BehaviorVersion;
use aws_credential_types::Credentials;
use aws_sdk_s3::config::{Region, SharedCredentialsProvider};
use aws_sdk_s3::Client;
use crate::stores::connections::RemoteConnection;
pub struct ClientOptions {
pub connect_timeout: Duration,
pub read_timeout: Duration,
pub max_attempts: u32,
}
impl Default for ClientOptions {
fn default() -> Self {
Self {
connect_timeout: Duration::from_secs(5),
read_timeout: Duration::from_secs(30),
max_attempts: 2,
}
}
}
pub fn build_client(connection: &RemoteConnection, options: &ClientOptions) -> Client {
let credentials = Credentials::new(
connection.access_key.clone(),
connection.secret_key.clone(),
None,
None,
"myfsio-replication",
);
let timeout_config = aws_smithy_types::timeout::TimeoutConfig::builder()
.connect_timeout(options.connect_timeout)
.read_timeout(options.read_timeout)
.build();
let retry_config =
aws_smithy_types::retry::RetryConfig::standard().with_max_attempts(options.max_attempts);
let config = aws_sdk_s3::config::Builder::new()
.behavior_version(BehaviorVersion::latest())
.credentials_provider(SharedCredentialsProvider::new(credentials))
.region(Region::new(connection.region.clone()))
.endpoint_url(connection.endpoint_url.clone())
.force_path_style(true)
.timeout_config(timeout_config)
.retry_config(retry_config)
.build();
Client::from_conf(config)
}
pub async fn check_endpoint_health(client: &Client) -> bool {
match client.list_buckets().send().await {
Ok(_) => true,
Err(err) => {
tracing::warn!("Endpoint health check failed: {:?}", err);
false
}
}
}

View File

@@ -0,0 +1,148 @@
use chrono::Utc;
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::sync::Arc;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SiteInfo {
pub site_id: String,
pub endpoint: String,
#[serde(default = "default_region")]
pub region: String,
#[serde(default = "default_priority")]
pub priority: i32,
#[serde(default)]
pub display_name: String,
#[serde(default)]
pub created_at: Option<String>,
}
fn default_region() -> String {
"us-east-1".to_string()
}
fn default_priority() -> i32 {
100
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PeerSite {
pub site_id: String,
pub endpoint: String,
#[serde(default = "default_region")]
pub region: String,
#[serde(default = "default_priority")]
pub priority: i32,
#[serde(default)]
pub display_name: String,
#[serde(default)]
pub connection_id: Option<String>,
#[serde(default)]
pub created_at: Option<String>,
#[serde(default)]
pub is_healthy: bool,
#[serde(default)]
pub last_health_check: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct RegistryData {
#[serde(default)]
local: Option<SiteInfo>,
#[serde(default)]
peers: Vec<PeerSite>,
}
pub struct SiteRegistry {
path: PathBuf,
data: Arc<RwLock<RegistryData>>,
}
impl SiteRegistry {
pub fn new(storage_root: &std::path::Path) -> Self {
let path = storage_root
.join(".myfsio.sys")
.join("config")
.join("site_registry.json");
let data = if path.exists() {
std::fs::read_to_string(&path)
.ok()
.and_then(|s| serde_json::from_str(&s).ok())
.unwrap_or_default()
} else {
RegistryData::default()
};
Self {
path,
data: Arc::new(RwLock::new(data)),
}
}
fn save(&self) {
let data = self.data.read();
if let Some(parent) = self.path.parent() {
let _ = std::fs::create_dir_all(parent);
}
if let Ok(json) = serde_json::to_string_pretty(&*data) {
let _ = std::fs::write(&self.path, json);
}
}
pub fn get_local_site(&self) -> Option<SiteInfo> {
self.data.read().local.clone()
}
pub fn set_local_site(&self, site: SiteInfo) {
self.data.write().local = Some(site);
self.save();
}
pub fn list_peers(&self) -> Vec<PeerSite> {
self.data.read().peers.clone()
}
pub fn get_peer(&self, site_id: &str) -> Option<PeerSite> {
self.data
.read()
.peers
.iter()
.find(|p| p.site_id == site_id)
.cloned()
}
pub fn add_peer(&self, peer: PeerSite) {
self.data.write().peers.push(peer);
self.save();
}
pub fn update_peer(&self, peer: PeerSite) {
let mut data = self.data.write();
if let Some(existing) = data.peers.iter_mut().find(|p| p.site_id == peer.site_id) {
*existing = peer;
}
drop(data);
self.save();
}
pub fn delete_peer(&self, site_id: &str) -> bool {
let mut data = self.data.write();
let len_before = data.peers.len();
data.peers.retain(|p| p.site_id != site_id);
let removed = data.peers.len() < len_before;
drop(data);
if removed {
self.save();
}
removed
}
pub fn update_health(&self, site_id: &str, is_healthy: bool) {
let mut data = self.data.write();
if let Some(peer) = data.peers.iter_mut().find(|p| p.site_id == site_id) {
peer.is_healthy = is_healthy;
peer.last_health_check = Some(Utc::now().to_rfc3339());
}
drop(data);
self.save();
}
}

View File

@@ -0,0 +1,498 @@
use std::collections::HashMap;
use std::path::PathBuf;
use std::pin::Pin;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use aws_sdk_s3::Client;
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
use tokio::io::AsyncRead;
use tokio::sync::Notify;
use myfsio_common::types::{ListParams, ObjectMeta};
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::traits::StorageEngine;
use crate::services::replication::{ReplicationManager, ReplicationRule, MODE_BIDIRECTIONAL};
use crate::services::s3_client::{build_client, ClientOptions};
use crate::stores::connections::ConnectionStore;
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct SyncedObjectInfo {
pub last_synced_at: f64,
pub remote_etag: String,
pub source: String,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct SyncState {
#[serde(default)]
pub synced_objects: HashMap<String, SyncedObjectInfo>,
#[serde(default)]
pub last_full_sync: Option<f64>,
}
#[derive(Debug, Clone, Default, Serialize)]
pub struct SiteSyncStats {
pub last_sync_at: Option<f64>,
pub objects_pulled: u64,
pub objects_skipped: u64,
pub conflicts_resolved: u64,
pub deletions_applied: u64,
pub errors: u64,
}
#[derive(Debug, Clone)]
struct RemoteObjectMeta {
last_modified: f64,
etag: String,
}
pub struct SiteSyncWorker {
storage: Arc<FsStorageBackend>,
connections: Arc<ConnectionStore>,
replication: Arc<ReplicationManager>,
storage_root: PathBuf,
interval: Duration,
batch_size: usize,
clock_skew_tolerance: f64,
client_options: ClientOptions,
bucket_stats: Mutex<HashMap<String, SiteSyncStats>>,
shutdown: Arc<Notify>,
}
impl SiteSyncWorker {
pub fn new(
storage: Arc<FsStorageBackend>,
connections: Arc<ConnectionStore>,
replication: Arc<ReplicationManager>,
storage_root: PathBuf,
interval_seconds: u64,
batch_size: usize,
connect_timeout: Duration,
read_timeout: Duration,
max_retries: u32,
clock_skew_tolerance: f64,
) -> Self {
Self {
storage,
connections,
replication,
storage_root,
interval: Duration::from_secs(interval_seconds),
batch_size,
clock_skew_tolerance,
client_options: ClientOptions {
connect_timeout,
read_timeout,
max_attempts: max_retries,
},
bucket_stats: Mutex::new(HashMap::new()),
shutdown: Arc::new(Notify::new()),
}
}
pub fn shutdown(&self) {
self.shutdown.notify_waiters();
}
pub fn get_stats(&self, bucket: &str) -> Option<SiteSyncStats> {
self.bucket_stats.lock().get(bucket).cloned()
}
pub async fn run(self: Arc<Self>) {
tracing::info!(
"Site sync worker started (interval={}s)",
self.interval.as_secs()
);
loop {
tokio::select! {
_ = tokio::time::sleep(self.interval) => {}
_ = self.shutdown.notified() => {
tracing::info!("Site sync worker shutting down");
return;
}
}
self.run_cycle().await;
}
}
async fn run_cycle(&self) {
let rules = self.replication.rules_snapshot();
for (bucket, rule) in rules {
if rule.mode != MODE_BIDIRECTIONAL || !rule.enabled {
continue;
}
match self.sync_bucket(&rule).await {
Ok(stats) => {
self.bucket_stats.lock().insert(bucket, stats);
}
Err(e) => {
tracing::error!("Site sync failed for bucket {}: {}", bucket, e);
}
}
}
}
pub async fn trigger_sync(&self, bucket: &str) -> Option<SiteSyncStats> {
let rule = self.replication.get_rule(bucket)?;
if rule.mode != MODE_BIDIRECTIONAL || !rule.enabled {
return None;
}
match self.sync_bucket(&rule).await {
Ok(stats) => {
self.bucket_stats
.lock()
.insert(bucket.to_string(), stats.clone());
Some(stats)
}
Err(e) => {
tracing::error!("Site sync trigger failed for {}: {}", bucket, e);
None
}
}
}
async fn sync_bucket(&self, rule: &ReplicationRule) -> Result<SiteSyncStats, String> {
let mut stats = SiteSyncStats::default();
let connection = self
.connections
.get(&rule.target_connection_id)
.ok_or_else(|| format!("connection {} not found", rule.target_connection_id))?;
let local_objects = self
.list_local_objects(&rule.bucket_name)
.await
.map_err(|e| format!("list local failed: {}", e))?;
let client = build_client(&connection, &self.client_options);
let remote_objects = self
.list_remote_objects(&client, &rule.target_bucket)
.await
.map_err(|e| format!("list remote failed: {}", e))?;
let mut sync_state = self.load_sync_state(&rule.bucket_name);
let mut to_pull: Vec<String> = Vec::new();
for (key, remote_meta) in &remote_objects {
if let Some(local_meta) = local_objects.get(key) {
match self.resolve_conflict(local_meta, remote_meta) {
"pull" => {
to_pull.push(key.clone());
stats.conflicts_resolved += 1;
}
_ => {
stats.objects_skipped += 1;
}
}
} else {
to_pull.push(key.clone());
}
}
let mut pulled = 0usize;
for key in &to_pull {
if pulled >= self.batch_size {
break;
}
let remote_meta = match remote_objects.get(key) {
Some(m) => m,
None => continue,
};
if self
.pull_object(&client, &rule.target_bucket, &rule.bucket_name, key)
.await
{
stats.objects_pulled += 1;
pulled += 1;
sync_state.synced_objects.insert(
key.clone(),
SyncedObjectInfo {
last_synced_at: now_secs(),
remote_etag: remote_meta.etag.clone(),
source: "remote".to_string(),
},
);
} else {
stats.errors += 1;
}
}
if rule.sync_deletions {
let tracked_keys: Vec<String> = sync_state.synced_objects.keys().cloned().collect();
for key in tracked_keys {
if remote_objects.contains_key(&key) {
continue;
}
let local_meta = match local_objects.get(&key) {
Some(m) => m,
None => continue,
};
let tracked = match sync_state.synced_objects.get(&key) {
Some(t) => t.clone(),
None => continue,
};
if tracked.source != "remote" {
continue;
}
let local_ts = local_meta.last_modified.timestamp() as f64;
if local_ts <= tracked.last_synced_at
&& self.apply_remote_deletion(&rule.bucket_name, &key).await
{
stats.deletions_applied += 1;
sync_state.synced_objects.remove(&key);
}
}
}
sync_state.last_full_sync = Some(now_secs());
self.save_sync_state(&rule.bucket_name, &sync_state);
self.replication
.update_last_pull(&rule.bucket_name, now_secs());
stats.last_sync_at = Some(now_secs());
tracing::info!(
"Site sync completed for {}: pulled={}, skipped={}, conflicts={}, deletions={}, errors={}",
rule.bucket_name,
stats.objects_pulled,
stats.objects_skipped,
stats.conflicts_resolved,
stats.deletions_applied,
stats.errors,
);
Ok(stats)
}
async fn list_local_objects(
&self,
bucket: &str,
) -> Result<HashMap<String, ObjectMeta>, String> {
let mut result = HashMap::new();
let mut token: Option<String> = None;
loop {
let params = ListParams {
max_keys: 1000,
continuation_token: token.clone(),
prefix: None,
start_after: None,
};
let page = self
.storage
.list_objects(bucket, &params)
.await
.map_err(|e| e.to_string())?;
for obj in page.objects {
result.insert(obj.key.clone(), obj);
}
if !page.is_truncated {
break;
}
token = page.next_continuation_token;
if token.is_none() {
break;
}
}
Ok(result)
}
async fn list_remote_objects(
&self,
client: &Client,
bucket: &str,
) -> Result<HashMap<String, RemoteObjectMeta>, String> {
let mut result = HashMap::new();
let mut continuation: Option<String> = None;
loop {
let mut req = client.list_objects_v2().bucket(bucket);
if let Some(ref t) = continuation {
req = req.continuation_token(t);
}
let resp = match req.send().await {
Ok(r) => r,
Err(err) => {
if is_not_found_error(&err) {
return Ok(result);
}
return Err(format!("{:?}", err));
}
};
for obj in resp.contents() {
let key = match obj.key() {
Some(k) => k.to_string(),
None => continue,
};
let last_modified = obj
.last_modified()
.and_then(|t| {
let secs = t.secs();
let nanos = t.subsec_nanos();
Some(secs as f64 + nanos as f64 / 1_000_000_000.0)
})
.unwrap_or(0.0);
let etag = obj.e_tag().unwrap_or("").trim_matches('"').to_string();
result.insert(
key,
RemoteObjectMeta {
last_modified,
etag,
},
);
}
if resp.is_truncated().unwrap_or(false) {
continuation = resp.next_continuation_token().map(|s| s.to_string());
if continuation.is_none() {
break;
}
} else {
break;
}
}
Ok(result)
}
fn resolve_conflict(&self, local: &ObjectMeta, remote: &RemoteObjectMeta) -> &'static str {
let local_ts = local.last_modified.timestamp() as f64
+ local.last_modified.timestamp_subsec_nanos() as f64 / 1_000_000_000.0;
let remote_ts = remote.last_modified;
if (remote_ts - local_ts).abs() < self.clock_skew_tolerance {
let local_etag = local.etag.clone().unwrap_or_default();
let local_etag_trim = local_etag.trim_matches('"');
if remote.etag == local_etag_trim {
return "skip";
}
if remote.etag.as_str() > local_etag_trim {
return "pull";
}
return "keep";
}
if remote_ts > local_ts {
"pull"
} else {
"keep"
}
}
async fn pull_object(
&self,
client: &Client,
remote_bucket: &str,
local_bucket: &str,
key: &str,
) -> bool {
let resp = match client
.get_object()
.bucket(remote_bucket)
.key(key)
.send()
.await
{
Ok(r) => r,
Err(err) => {
tracing::error!("Pull GetObject failed {}/{}: {:?}", local_bucket, key, err);
return false;
}
};
let head = match client
.head_object()
.bucket(remote_bucket)
.key(key)
.send()
.await
{
Ok(r) => r,
Err(err) => {
tracing::error!("Pull HeadObject failed {}/{}: {:?}", local_bucket, key, err);
return false;
}
};
let metadata: Option<HashMap<String, String>> = head
.metadata()
.map(|m| m.iter().map(|(k, v)| (k.clone(), v.clone())).collect());
let stream = resp.body.into_async_read();
let boxed: Pin<Box<dyn AsyncRead + Send>> = Box::pin(stream);
match self
.storage
.put_object(local_bucket, key, boxed, metadata)
.await
{
Ok(_) => {
tracing::debug!("Pulled object {}/{} from remote", local_bucket, key);
true
}
Err(err) => {
tracing::error!(
"Store pulled object failed {}/{}: {}",
local_bucket,
key,
err
);
false
}
}
}
async fn apply_remote_deletion(&self, bucket: &str, key: &str) -> bool {
match self.storage.delete_object(bucket, key).await {
Ok(_) => {
tracing::debug!("Applied remote deletion for {}/{}", bucket, key);
true
}
Err(err) => {
tracing::error!("Remote deletion failed {}/{}: {}", bucket, key, err);
false
}
}
}
fn sync_state_path(&self, bucket: &str) -> PathBuf {
self.storage_root
.join(".myfsio.sys")
.join("buckets")
.join(bucket)
.join("site_sync_state.json")
}
fn load_sync_state(&self, bucket: &str) -> SyncState {
let path = self.sync_state_path(bucket);
if !path.exists() {
return SyncState::default();
}
match std::fs::read_to_string(&path) {
Ok(text) => serde_json::from_str(&text).unwrap_or_default(),
Err(_) => SyncState::default(),
}
}
fn save_sync_state(&self, bucket: &str, state: &SyncState) {
let path = self.sync_state_path(bucket);
if let Some(parent) = path.parent() {
let _ = std::fs::create_dir_all(parent);
}
if let Ok(text) = serde_json::to_string_pretty(state) {
let _ = std::fs::write(&path, text);
}
}
}
fn now_secs() -> f64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs_f64())
.unwrap_or(0.0)
}
fn is_not_found_error<E: std::fmt::Debug>(err: &aws_sdk_s3::error::SdkError<E>) -> bool {
let msg = format!("{:?}", err);
msg.contains("NoSuchBucket")
|| msg.contains("code: Some(\"NotFound\")")
|| msg.contains("code: Some(\"NoSuchBucket\")")
|| msg.contains("status: 404")
}

View File

@@ -0,0 +1,203 @@
use chrono::{DateTime, Utc};
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::traits::StorageEngine;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use sysinfo::{Disks, System};
use tokio::sync::RwLock;
#[derive(Debug, Clone)]
pub struct SystemMetricsConfig {
pub interval_minutes: u64,
pub retention_hours: u64,
}
impl Default for SystemMetricsConfig {
fn default() -> Self {
Self {
interval_minutes: 5,
retention_hours: 24,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SystemMetricsSnapshot {
pub timestamp: DateTime<Utc>,
pub cpu_percent: f64,
pub memory_percent: f64,
pub disk_percent: f64,
pub storage_bytes: u64,
}
pub struct SystemMetricsService {
storage_root: PathBuf,
storage: Arc<FsStorageBackend>,
config: SystemMetricsConfig,
history: Arc<RwLock<Vec<SystemMetricsSnapshot>>>,
history_path: PathBuf,
}
impl SystemMetricsService {
pub fn new(
storage_root: &Path,
storage: Arc<FsStorageBackend>,
config: SystemMetricsConfig,
) -> Self {
let history_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("metrics_history.json");
let mut history = if history_path.exists() {
std::fs::read_to_string(&history_path)
.ok()
.and_then(|s| serde_json::from_str::<serde_json::Value>(&s).ok())
.and_then(|v| {
v.get("history").and_then(|h| {
serde_json::from_value::<Vec<SystemMetricsSnapshot>>(h.clone()).ok()
})
})
.unwrap_or_default()
} else {
Vec::new()
};
prune_history(&mut history, config.retention_hours);
Self {
storage_root: storage_root.to_path_buf(),
storage,
config,
history: Arc::new(RwLock::new(history)),
history_path,
}
}
pub async fn get_history(&self, hours: Option<u64>) -> Vec<SystemMetricsSnapshot> {
let mut history = self.history.read().await.clone();
prune_history(&mut history, hours.unwrap_or(self.config.retention_hours));
history
}
async fn take_snapshot(&self) {
let snapshot = collect_snapshot(&self.storage_root, &self.storage).await;
let mut history = self.history.write().await;
history.push(snapshot);
prune_history(&mut history, self.config.retention_hours);
drop(history);
self.save_history().await;
}
async fn save_history(&self) {
let history = self.history.read().await;
let data = json!({ "history": *history });
if let Some(parent) = self.history_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&self.history_path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval =
std::time::Duration::from_secs(self.config.interval_minutes.saturating_mul(60));
tokio::spawn(async move {
self.take_snapshot().await;
let mut timer = tokio::time::interval(interval);
loop {
timer.tick().await;
self.take_snapshot().await;
}
})
}
}
fn prune_history(history: &mut Vec<SystemMetricsSnapshot>, retention_hours: u64) {
let cutoff = Utc::now() - chrono::Duration::hours(retention_hours as i64);
history.retain(|item| item.timestamp > cutoff);
}
fn sample_system_now() -> (f64, f64) {
let mut system = System::new();
system.refresh_cpu_usage();
std::thread::sleep(sysinfo::MINIMUM_CPU_UPDATE_INTERVAL);
system.refresh_cpu_usage();
system.refresh_memory();
let cpu_percent = system.global_cpu_usage() as f64;
let memory_percent = if system.total_memory() > 0 {
(system.used_memory() as f64 / system.total_memory() as f64) * 100.0
} else {
0.0
};
(cpu_percent, memory_percent)
}
fn normalize_path_for_mount(path: &Path) -> String {
let canonical = path.canonicalize().unwrap_or_else(|_| path.to_path_buf());
let raw = canonical.to_string_lossy().to_string();
let stripped = raw.strip_prefix(r"\\?\").unwrap_or(&raw);
stripped.to_lowercase()
}
fn sample_disk(path: &Path) -> (u64, u64) {
let disks = Disks::new_with_refreshed_list();
let path_str = normalize_path_for_mount(path);
let mut best: Option<(usize, u64, u64)> = None;
for disk in disks.list() {
let mount_raw = disk.mount_point().to_string_lossy().to_string();
let mount = mount_raw
.strip_prefix(r"\\?\")
.unwrap_or(&mount_raw)
.to_lowercase();
let total = disk.total_space();
let free = disk.available_space();
if path_str.starts_with(&mount) {
let len = mount.len();
match best {
Some((best_len, _, _)) if len <= best_len => {}
_ => best = Some((len, total, free)),
}
}
}
best.map(|(_, total, free)| (total, free)).unwrap_or((0, 0))
}
async fn collect_snapshot(
storage_root: &Path,
storage: &Arc<FsStorageBackend>,
) -> SystemMetricsSnapshot {
let (cpu_percent, memory_percent) = sample_system_now();
let (disk_total, disk_free) = sample_disk(storage_root);
let disk_percent = if disk_total > 0 {
((disk_total - disk_free) as f64 / disk_total as f64) * 100.0
} else {
0.0
};
let mut storage_bytes = 0u64;
let buckets = storage.list_buckets().await.unwrap_or_default();
for bucket in buckets {
if let Ok(stats) = storage.bucket_stats(&bucket.name).await {
storage_bytes += stats.total_bytes();
}
}
SystemMetricsSnapshot {
timestamp: Utc::now(),
cpu_percent: round2(cpu_percent),
memory_percent: round2(memory_percent),
disk_percent: round2(disk_percent),
storage_bytes,
}
}
fn round2(value: f64) -> f64 {
(value * 100.0).round() / 100.0
}

View File

@@ -0,0 +1,197 @@
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(deny_unknown_fields)]
struct DomainData {
#[serde(default)]
mappings: HashMap<String, String>,
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum DomainDataFile {
Wrapped(DomainData),
Flat(HashMap<String, String>),
}
impl DomainDataFile {
fn into_domain_data(self) -> DomainData {
match self {
Self::Wrapped(data) => data,
Self::Flat(mappings) => DomainData {
mappings: mappings
.into_iter()
.map(|(domain, bucket)| (normalize_domain(&domain), bucket))
.collect(),
},
}
}
}
pub struct WebsiteDomainStore {
path: PathBuf,
data: Arc<RwLock<DomainData>>,
}
impl WebsiteDomainStore {
pub fn new(storage_root: &std::path::Path) -> Self {
let path = storage_root
.join(".myfsio.sys")
.join("config")
.join("website_domains.json");
let data = if path.exists() {
std::fs::read_to_string(&path)
.ok()
.and_then(|s| serde_json::from_str::<DomainDataFile>(&s).ok())
.map(DomainDataFile::into_domain_data)
.unwrap_or_default()
} else {
DomainData::default()
};
Self {
path,
data: Arc::new(RwLock::new(data)),
}
}
fn save(&self) {
let data = self.data.read();
if let Some(parent) = self.path.parent() {
let _ = std::fs::create_dir_all(parent);
}
if let Ok(json) = serde_json::to_string_pretty(&data.mappings) {
let _ = std::fs::write(&self.path, json);
}
}
pub fn list_all(&self) -> Vec<serde_json::Value> {
self.data
.read()
.mappings
.iter()
.map(|(domain, bucket)| {
serde_json::json!({
"domain": domain,
"bucket": bucket,
})
})
.collect()
}
pub fn get_bucket(&self, domain: &str) -> Option<String> {
let domain = normalize_domain(domain);
self.data.read().mappings.get(&domain).cloned()
}
pub fn set_mapping(&self, domain: &str, bucket: &str) {
let domain = normalize_domain(domain);
self.data
.write()
.mappings
.insert(domain, bucket.to_string());
self.save();
}
pub fn delete_mapping(&self, domain: &str) -> bool {
let domain = normalize_domain(domain);
let removed = self.data.write().mappings.remove(&domain).is_some();
if removed {
self.save();
}
removed
}
}
pub fn normalize_domain(domain: &str) -> String {
domain.trim().to_ascii_lowercase()
}
pub fn is_valid_domain(domain: &str) -> bool {
if domain.is_empty() || domain.len() > 253 {
return false;
}
let labels: Vec<&str> = domain.split('.').collect();
if labels.len() < 2 {
return false;
}
for label in &labels {
if label.is_empty() || label.len() > 63 {
return false;
}
if !label.chars().all(|c| c.is_ascii_alphanumeric() || c == '-') {
return false;
}
if label.starts_with('-') || label.ends_with('-') {
return false;
}
}
true
}
#[cfg(test)]
mod tests {
use super::WebsiteDomainStore;
use serde_json::json;
use tempfile::tempdir;
#[test]
fn loads_legacy_flat_mapping_file() {
let tmp = tempdir().expect("tempdir");
let config_dir = tmp.path().join(".myfsio.sys").join("config");
std::fs::create_dir_all(&config_dir).expect("create config dir");
std::fs::write(
config_dir.join("website_domains.json"),
r#"{"Example.COM":"site-bucket"}"#,
)
.expect("write config");
let store = WebsiteDomainStore::new(tmp.path());
assert_eq!(
store.get_bucket("example.com"),
Some("site-bucket".to_string())
);
}
#[test]
fn loads_wrapped_mapping_file() {
let tmp = tempdir().expect("tempdir");
let config_dir = tmp.path().join(".myfsio.sys").join("config");
std::fs::create_dir_all(&config_dir).expect("create config dir");
std::fs::write(
config_dir.join("website_domains.json"),
r#"{"mappings":{"example.com":"site-bucket"}}"#,
)
.expect("write config");
let store = WebsiteDomainStore::new(tmp.path());
assert_eq!(
store.get_bucket("example.com"),
Some("site-bucket".to_string())
);
}
#[test]
fn saves_in_shared_plain_mapping_format() {
let tmp = tempdir().expect("tempdir");
let store = WebsiteDomainStore::new(tmp.path());
store.set_mapping("Example.COM", "site-bucket");
let saved = std::fs::read_to_string(
tmp.path()
.join(".myfsio.sys")
.join("config")
.join("website_domains.json"),
)
.expect("read config");
let json: serde_json::Value = serde_json::from_str(&saved).expect("parse config");
assert_eq!(json, json!({"example.com": "site-bucket"}));
}
}

View File

@@ -0,0 +1,133 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine};
use parking_lot::RwLock;
use rand::RngCore;
use serde::{Deserialize, Serialize};
pub const SESSION_COOKIE_NAME: &str = "myfsio_session";
pub const CSRF_FIELD_NAME: &str = "csrf_token";
pub const CSRF_HEADER_NAME: &str = "x-csrf-token";
const SESSION_ID_BYTES: usize = 32;
const CSRF_TOKEN_BYTES: usize = 32;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct FlashMessage {
pub category: String,
pub message: String,
}
#[derive(Clone, Debug)]
pub struct SessionData {
pub user_id: Option<String>,
pub display_name: Option<String>,
pub csrf_token: String,
pub flash: Vec<FlashMessage>,
pub extra: HashMap<String, String>,
last_accessed: Instant,
}
impl SessionData {
pub fn new() -> Self {
Self {
user_id: None,
display_name: None,
csrf_token: generate_token(CSRF_TOKEN_BYTES),
flash: Vec::new(),
extra: HashMap::new(),
last_accessed: Instant::now(),
}
}
pub fn is_authenticated(&self) -> bool {
self.user_id.is_some()
}
pub fn push_flash(&mut self, category: impl Into<String>, message: impl Into<String>) {
self.flash.push(FlashMessage {
category: category.into(),
message: message.into(),
});
}
pub fn take_flash(&mut self) -> Vec<FlashMessage> {
std::mem::take(&mut self.flash)
}
pub fn rotate_csrf(&mut self) {
self.csrf_token = generate_token(CSRF_TOKEN_BYTES);
}
}
impl Default for SessionData {
fn default() -> Self {
Self::new()
}
}
pub struct SessionStore {
sessions: RwLock<HashMap<String, SessionData>>,
ttl: Duration,
}
impl SessionStore {
pub fn new(ttl: Duration) -> Self {
Self {
sessions: RwLock::new(HashMap::new()),
ttl,
}
}
pub fn create(&self) -> (String, SessionData) {
let id = generate_token(SESSION_ID_BYTES);
let data = SessionData::new();
self.sessions.write().insert(id.clone(), data.clone());
(id, data)
}
pub fn get(&self, id: &str) -> Option<SessionData> {
let mut guard = self.sessions.write();
let entry = guard.get_mut(id)?;
if entry.last_accessed.elapsed() > self.ttl {
guard.remove(id);
return None;
}
entry.last_accessed = Instant::now();
Some(entry.clone())
}
pub fn save(&self, id: &str, data: SessionData) {
let mut guard = self.sessions.write();
let mut updated = data;
updated.last_accessed = Instant::now();
guard.insert(id.to_string(), updated);
}
pub fn destroy(&self, id: &str) {
self.sessions.write().remove(id);
}
pub fn sweep(&self) {
let ttl = self.ttl;
let mut guard = self.sessions.write();
guard.retain(|_, data| data.last_accessed.elapsed() <= ttl);
}
}
pub type SharedSessionStore = Arc<SessionStore>;
pub fn generate_token(bytes: usize) -> String {
let mut buf = vec![0u8; bytes];
rand::thread_rng().fill_bytes(&mut buf);
URL_SAFE_NO_PAD.encode(&buf)
}
pub fn csrf_tokens_match(a: &str, b: &str) -> bool {
if a.len() != b.len() {
return false;
}
subtle::ConstantTimeEq::ct_eq(a.as_bytes(), b.as_bytes()).into()
}

View File

@@ -0,0 +1,240 @@
use std::sync::Arc;
use std::time::Duration;
use crate::config::ServerConfig;
use crate::services::access_logging::AccessLoggingService;
use crate::services::gc::GcService;
use crate::services::integrity::IntegrityService;
use crate::services::metrics::MetricsService;
use crate::services::replication::ReplicationManager;
use crate::services::site_registry::SiteRegistry;
use crate::services::site_sync::SiteSyncWorker;
use crate::services::system_metrics::SystemMetricsService;
use crate::services::website_domains::WebsiteDomainStore;
use crate::session::SessionStore;
use crate::stores::connections::ConnectionStore;
use crate::templates::TemplateEngine;
use myfsio_auth::iam::IamService;
use myfsio_crypto::encryption::{EncryptionConfig, EncryptionService};
use myfsio_crypto::kms::KmsService;
use myfsio_storage::fs_backend::{FsStorageBackend, FsStorageBackendConfig};
#[derive(Clone)]
pub struct AppState {
pub config: ServerConfig,
pub storage: Arc<FsStorageBackend>,
pub iam: Arc<IamService>,
pub encryption: Option<Arc<EncryptionService>>,
pub kms: Option<Arc<KmsService>>,
pub gc: Option<Arc<GcService>>,
pub integrity: Option<Arc<IntegrityService>>,
pub metrics: Option<Arc<MetricsService>>,
pub system_metrics: Option<Arc<SystemMetricsService>>,
pub site_registry: Option<Arc<SiteRegistry>>,
pub website_domains: Option<Arc<WebsiteDomainStore>>,
pub connections: Arc<ConnectionStore>,
pub replication: Arc<ReplicationManager>,
pub site_sync: Option<Arc<SiteSyncWorker>>,
pub templates: Option<Arc<TemplateEngine>>,
pub sessions: Arc<SessionStore>,
pub access_logging: Arc<AccessLoggingService>,
}
impl AppState {
pub fn new(config: ServerConfig) -> Self {
let storage = Arc::new(FsStorageBackend::new_with_config(
config.storage_root.clone(),
FsStorageBackendConfig {
object_key_max_length_bytes: config.object_key_max_length_bytes,
object_cache_max_size: config.object_cache_max_size,
bucket_config_cache_ttl: Duration::from_secs_f64(
config.bucket_config_cache_ttl_seconds,
),
},
));
let iam = Arc::new(IamService::new_with_secret(
config.iam_config_path.clone(),
config.secret_key.clone(),
));
let gc = if config.gc_enabled {
Some(Arc::new(GcService::new(
config.storage_root.clone(),
crate::services::gc::GcConfig {
interval_hours: config.gc_interval_hours,
temp_file_max_age_hours: config.gc_temp_file_max_age_hours,
multipart_max_age_days: config.gc_multipart_max_age_days,
lock_file_max_age_hours: config.gc_lock_file_max_age_hours,
dry_run: config.gc_dry_run,
},
)))
} else {
None
};
let integrity = if config.integrity_enabled {
Some(Arc::new(IntegrityService::new(
storage.clone(),
&config.storage_root,
crate::services::integrity::IntegrityConfig::default(),
)))
} else {
None
};
let metrics = if config.metrics_enabled {
Some(Arc::new(MetricsService::new(
&config.storage_root,
crate::services::metrics::MetricsConfig {
interval_minutes: config.metrics_interval_minutes,
retention_hours: config.metrics_retention_hours,
},
)))
} else {
None
};
let system_metrics = if config.metrics_history_enabled {
Some(Arc::new(SystemMetricsService::new(
&config.storage_root,
storage.clone(),
crate::services::system_metrics::SystemMetricsConfig {
interval_minutes: config.metrics_history_interval_minutes,
retention_hours: config.metrics_history_retention_hours,
},
)))
} else {
None
};
let site_registry = {
let registry = SiteRegistry::new(&config.storage_root);
if let (Some(site_id), Some(endpoint)) =
(config.site_id.as_deref(), config.site_endpoint.as_deref())
{
registry.set_local_site(crate::services::site_registry::SiteInfo {
site_id: site_id.to_string(),
endpoint: endpoint.to_string(),
region: config.site_region.clone(),
priority: config.site_priority,
display_name: site_id.to_string(),
created_at: Some(chrono::Utc::now().to_rfc3339()),
});
}
Some(Arc::new(registry))
};
let website_domains = if config.website_hosting_enabled {
Some(Arc::new(WebsiteDomainStore::new(&config.storage_root)))
} else {
None
};
let connections = Arc::new(ConnectionStore::new(&config.storage_root));
let replication = Arc::new(ReplicationManager::new(
storage.clone(),
connections.clone(),
&config.storage_root,
Duration::from_secs(config.replication_connect_timeout_secs),
Duration::from_secs(config.replication_read_timeout_secs),
config.replication_max_retries,
config.replication_streaming_threshold_bytes,
config.replication_max_failures_per_bucket,
));
let site_sync = if config.site_sync_enabled {
Some(Arc::new(SiteSyncWorker::new(
storage.clone(),
connections.clone(),
replication.clone(),
config.storage_root.clone(),
config.site_sync_interval_secs,
config.site_sync_batch_size,
Duration::from_secs(config.site_sync_connect_timeout_secs),
Duration::from_secs(config.site_sync_read_timeout_secs),
config.site_sync_max_retries,
config.site_sync_clock_skew_tolerance,
)))
} else {
None
};
let templates = init_templates(&config.templates_dir);
let access_logging = Arc::new(AccessLoggingService::new(&config.storage_root));
let session_ttl = Duration::from_secs(config.session_lifetime_days.saturating_mul(86_400));
Self {
config,
storage,
iam,
encryption: None,
kms: None,
gc,
integrity,
metrics,
system_metrics,
site_registry,
website_domains,
connections,
replication,
site_sync,
templates,
sessions: Arc::new(SessionStore::new(session_ttl)),
access_logging,
}
}
pub async fn new_with_encryption(config: ServerConfig) -> Self {
let mut state = Self::new(config.clone());
let keys_dir = config.storage_root.join(".myfsio.sys").join("keys");
let kms = if config.kms_enabled {
match KmsService::new(&keys_dir).await {
Ok(k) => Some(Arc::new(k)),
Err(e) => {
tracing::error!("Failed to initialize KMS: {}", e);
None
}
}
} else {
None
};
let encryption = if config.encryption_enabled {
match myfsio_crypto::kms::load_or_create_master_key(&keys_dir).await {
Ok(master_key) => Some(Arc::new(EncryptionService::with_config(
master_key,
kms.clone(),
EncryptionConfig {
chunk_size: config.encryption_chunk_size_bytes,
},
))),
Err(e) => {
tracing::error!("Failed to initialize encryption: {}", e);
None
}
}
} else {
None
};
state.encryption = encryption;
state.kms = kms;
state
}
}
fn init_templates(templates_dir: &std::path::Path) -> Option<Arc<TemplateEngine>> {
let glob = format!("{}/*.html", templates_dir.display()).replace('\\', "/");
match TemplateEngine::new(&glob) {
Ok(engine) => {
crate::handlers::ui_pages::register_ui_endpoints(&engine);
Some(Arc::new(engine))
}
Err(e) => {
tracing::error!("Template engine init failed: {}", e);
None
}
}
}

View File

@@ -0,0 +1,94 @@
use std::path::{Path, PathBuf};
use std::sync::Arc;
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RemoteConnection {
pub id: String,
pub name: String,
pub endpoint_url: String,
pub access_key: String,
pub secret_key: String,
#[serde(default = "default_region")]
pub region: String,
}
fn default_region() -> String {
"us-east-1".to_string()
}
pub struct ConnectionStore {
path: PathBuf,
inner: Arc<RwLock<Vec<RemoteConnection>>>,
}
impl ConnectionStore {
pub fn new(storage_root: &Path) -> Self {
let path = storage_root
.join(".myfsio.sys")
.join("config")
.join("connections.json");
let inner = Arc::new(RwLock::new(load_from_disk(&path)));
Self { path, inner }
}
pub fn reload(&self) {
let loaded = load_from_disk(&self.path);
*self.inner.write() = loaded;
}
pub fn list(&self) -> Vec<RemoteConnection> {
self.inner.read().clone()
}
pub fn get(&self, id: &str) -> Option<RemoteConnection> {
self.inner.read().iter().find(|c| c.id == id).cloned()
}
pub fn add(&self, connection: RemoteConnection) -> std::io::Result<()> {
{
let mut guard = self.inner.write();
if let Some(existing) = guard.iter_mut().find(|c| c.id == connection.id) {
*existing = connection;
} else {
guard.push(connection);
}
}
self.save()
}
pub fn delete(&self, id: &str) -> std::io::Result<bool> {
let removed = {
let mut guard = self.inner.write();
let before = guard.len();
guard.retain(|c| c.id != id);
guard.len() != before
};
if removed {
self.save()?;
}
Ok(removed)
}
fn save(&self) -> std::io::Result<()> {
if let Some(parent) = self.path.parent() {
std::fs::create_dir_all(parent)?;
}
let snapshot = self.inner.read().clone();
let bytes = serde_json::to_vec_pretty(&snapshot)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
std::fs::write(&self.path, bytes)
}
}
fn load_from_disk(path: &Path) -> Vec<RemoteConnection> {
if !path.exists() {
return Vec::new();
}
match std::fs::read_to_string(path) {
Ok(text) => serde_json::from_str(&text).unwrap_or_default(),
Err(_) => Vec::new(),
}
}

View File

@@ -0,0 +1 @@
pub mod connections;

View File

@@ -0,0 +1,355 @@
use std::collections::HashMap;
use std::sync::Arc;
use chrono::{DateTime, Utc};
use parking_lot::RwLock;
use serde_json::Value;
use tera::{Context, Error as TeraError, Tera};
pub type EndpointResolver =
Arc<dyn Fn(&str, &HashMap<String, Value>) -> Option<String> + Send + Sync>;
#[derive(Clone)]
pub struct TemplateEngine {
tera: Arc<RwLock<Tera>>,
endpoints: Arc<RwLock<HashMap<String, String>>>,
}
impl TemplateEngine {
pub fn new(template_glob: &str) -> Result<Self, TeraError> {
let mut tera = Tera::new(template_glob)?;
tera.set_escape_fn(html_escape);
register_filters(&mut tera);
let endpoints: Arc<RwLock<HashMap<String, String>>> = Arc::new(RwLock::new(HashMap::new()));
register_functions(&mut tera, endpoints.clone());
Ok(Self {
tera: Arc::new(RwLock::new(tera)),
endpoints,
})
}
pub fn register_endpoint(&self, name: &str, path_template: &str) {
self.endpoints
.write()
.insert(name.to_string(), path_template.to_string());
}
pub fn register_endpoints(&self, pairs: &[(&str, &str)]) {
let mut guard = self.endpoints.write();
for (n, p) in pairs {
guard.insert((*n).to_string(), (*p).to_string());
}
}
pub fn render(&self, name: &str, context: &Context) -> Result<String, TeraError> {
self.tera.read().render(name, context)
}
pub fn reload(&self) -> Result<(), TeraError> {
self.tera.write().full_reload()
}
}
fn html_escape(input: &str) -> String {
let mut out = String::with_capacity(input.len());
for c in input.chars() {
match c {
'&' => out.push_str("&amp;"),
'<' => out.push_str("&lt;"),
'>' => out.push_str("&gt;"),
'"' => out.push_str("&quot;"),
'\'' => out.push_str("&#x27;"),
_ => out.push(c),
}
}
out
}
fn register_filters(tera: &mut Tera) {
tera.register_filter("format_datetime", format_datetime_filter);
tera.register_filter("filesizeformat", filesizeformat_filter);
tera.register_filter("slice", slice_filter);
}
fn register_functions(tera: &mut Tera, endpoints: Arc<RwLock<HashMap<String, String>>>) {
let endpoints_for_url = endpoints.clone();
tera.register_function(
"url_for",
move |args: &HashMap<String, Value>| -> tera::Result<Value> {
let endpoint = args
.get("endpoint")
.and_then(|v| v.as_str())
.ok_or_else(|| tera::Error::msg("url_for requires endpoint"))?;
if endpoint == "static" {
let filename = args.get("filename").and_then(|v| v.as_str()).unwrap_or("");
return Ok(Value::String(format!("/static/{}", filename)));
}
let path = match endpoints_for_url.read().get(endpoint) {
Some(p) => p.clone(),
None => {
return Ok(Value::String(format!("/__missing__/{}", endpoint)));
}
};
Ok(Value::String(substitute_path_params(&path, args)))
},
);
tera.register_function(
"csrf_token",
|args: &HashMap<String, Value>| -> tera::Result<Value> {
if let Some(token) = args.get("token").and_then(|v| v.as_str()) {
return Ok(Value::String(token.to_string()));
}
Ok(Value::String(String::new()))
},
);
}
fn substitute_path_params(template: &str, args: &HashMap<String, Value>) -> String {
let mut path = template.to_string();
let mut query: Vec<(String, String)> = Vec::new();
for (k, v) in args {
if k == "endpoint" || k == "filename" {
continue;
}
let value_str = value_to_string(v);
let placeholder = format!("{{{}}}", k);
if path.contains(&placeholder) {
let encoded = urlencode_path(&value_str);
path = path.replace(&placeholder, &encoded);
} else {
query.push((k.clone(), value_str));
}
}
if !query.is_empty() {
let qs: Vec<String> = query
.into_iter()
.map(|(k, v)| format!("{}={}", urlencode_query(&k), urlencode_query(&v)))
.collect();
path.push('?');
path.push_str(&qs.join("&"));
}
path
}
fn value_to_string(v: &Value) -> String {
match v {
Value::String(s) => s.clone(),
Value::Number(n) => n.to_string(),
Value::Bool(b) => b.to_string(),
Value::Null => String::new(),
other => other.to_string(),
}
}
const UNRESERVED: &percent_encoding::AsciiSet = &percent_encoding::NON_ALPHANUMERIC
.remove(b'-')
.remove(b'_')
.remove(b'.')
.remove(b'~');
fn urlencode_path(s: &str) -> String {
percent_encoding::utf8_percent_encode(s, UNRESERVED).to_string()
}
fn urlencode_query(s: &str) -> String {
percent_encoding::utf8_percent_encode(s, UNRESERVED).to_string()
}
fn format_datetime_filter(value: &Value, args: &HashMap<String, Value>) -> tera::Result<Value> {
let format = args
.get("format")
.and_then(|v| v.as_str())
.unwrap_or("%Y-%m-%d %H:%M:%S UTC");
let dt: Option<DateTime<Utc>> = match value {
Value::String(s) => DateTime::parse_from_rfc3339(s)
.ok()
.map(|d| d.with_timezone(&Utc))
.or_else(|| {
DateTime::parse_from_rfc2822(s)
.ok()
.map(|d| d.with_timezone(&Utc))
}),
Value::Number(n) => n.as_f64().and_then(|f| {
let secs = f as i64;
let nanos = ((f - secs as f64) * 1_000_000_000.0) as u32;
DateTime::<Utc>::from_timestamp(secs, nanos)
}),
_ => None,
};
match dt {
Some(d) => Ok(Value::String(d.format(format).to_string())),
None => Ok(value.clone()),
}
}
fn slice_filter(value: &Value, args: &HashMap<String, Value>) -> tera::Result<Value> {
let start = args.get("start").and_then(|v| v.as_i64()).unwrap_or(0);
let end = args.get("end").and_then(|v| v.as_i64());
match value {
Value::String(s) => {
let chars: Vec<char> = s.chars().collect();
let len = chars.len() as i64;
let norm = |i: i64| -> usize {
if i < 0 {
(len + i).max(0) as usize
} else {
i.min(len) as usize
}
};
let s_idx = norm(start);
let e_idx = match end {
Some(e) => norm(e),
None => len as usize,
};
let e_idx = e_idx.max(s_idx);
Ok(Value::String(chars[s_idx..e_idx].iter().collect()))
}
Value::Array(arr) => {
let len = arr.len() as i64;
let norm = |i: i64| -> usize {
if i < 0 {
(len + i).max(0) as usize
} else {
i.min(len) as usize
}
};
let s_idx = norm(start);
let e_idx = match end {
Some(e) => norm(e),
None => len as usize,
};
let e_idx = e_idx.max(s_idx);
Ok(Value::Array(arr[s_idx..e_idx].to_vec()))
}
Value::Null => Ok(Value::String(String::new())),
_ => Err(tera::Error::msg("slice: unsupported value type")),
}
}
fn filesizeformat_filter(value: &Value, _args: &HashMap<String, Value>) -> tera::Result<Value> {
let bytes = match value {
Value::Number(n) => n.as_f64().unwrap_or(0.0),
Value::String(s) => s.parse::<f64>().unwrap_or(0.0),
_ => 0.0,
};
const UNITS: [&str; 6] = ["B", "KB", "MB", "GB", "TB", "PB"];
let mut size = bytes;
let mut unit = 0;
while size >= 1024.0 && unit < UNITS.len() - 1 {
size /= 1024.0;
unit += 1;
}
let formatted = if unit == 0 {
format!("{} {}", size as u64, UNITS[unit])
} else {
format!("{:.1} {}", size, UNITS[unit])
};
Ok(Value::String(formatted))
}
#[cfg(test)]
mod tests {
use super::*;
fn test_engine() -> TemplateEngine {
let tmp = tempfile::TempDir::new().unwrap();
let tpl = tmp.path().join("t.html");
std::fs::write(&tpl, "").unwrap();
let glob = format!("{}/*.html", tmp.path().display());
let engine = TemplateEngine::new(&glob).unwrap();
engine.register_endpoints(&[
("ui.buckets_overview", "/ui/buckets"),
("ui.bucket_detail", "/ui/buckets/{bucket_name}"),
(
"ui.abort_multipart_upload",
"/ui/buckets/{bucket_name}/multipart/{upload_id}/abort",
),
]);
engine
}
fn render_inline(engine: &TemplateEngine, tpl: &str) -> String {
let mut tera = engine.tera.write();
tera.add_raw_template("__inline__", tpl).unwrap();
drop(tera);
engine.render("__inline__", &Context::new()).unwrap()
}
#[test]
fn static_url() {
let e = test_engine();
let out = render_inline(
&e,
"{{ url_for(endpoint='static', filename='css/main.css') }}",
);
assert_eq!(out, "/static/css/main.css");
}
#[test]
fn path_param_substitution() {
let e = test_engine();
let out = render_inline(
&e,
"{{ url_for(endpoint='ui.bucket_detail', bucket_name='my-bucket') }}",
);
assert_eq!(out, "/ui/buckets/my-bucket");
}
#[test]
fn extra_args_become_query() {
let e = test_engine();
let out = render_inline(
&e,
"{{ url_for(endpoint='ui.bucket_detail', bucket_name='b', tab='replication') }}",
);
assert_eq!(out, "/ui/buckets/b?tab=replication");
}
#[test]
fn filesizeformat_basic() {
let v = filesizeformat_filter(&Value::Number(1024.into()), &HashMap::new()).unwrap();
assert_eq!(v, Value::String("1.0 KB".into()));
let v = filesizeformat_filter(&Value::Number(1_048_576.into()), &HashMap::new()).unwrap();
assert_eq!(v, Value::String("1.0 MB".into()));
let v = filesizeformat_filter(&Value::Number(500.into()), &HashMap::new()).unwrap();
assert_eq!(v, Value::String("500 B".into()));
}
#[test]
fn project_templates_parse() {
let mut path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("templates");
path.push("*.html");
let glob = path.to_string_lossy().replace('\\', "/");
let engine = TemplateEngine::new(&glob).expect("Tera parse failed");
let names: Vec<String> = engine
.tera
.read()
.get_template_names()
.map(|s| s.to_string())
.collect();
assert!(
names.len() >= 10,
"expected 10+ templates, got {}",
names.len()
);
}
#[test]
fn format_datetime_rfc3339() {
let v = format_datetime_filter(
&Value::String("2024-06-15T12:34:56Z".into()),
&HashMap::new(),
)
.unwrap();
assert_eq!(v, Value::String("2024-06-15 12:34:56 UTC".into()));
}
}