Add Rust storage engine foundation

This commit is contained in:
2026-04-02 17:00:58 +08:00
parent 1eadc7b75c
commit 926a7e6366
48 changed files with 12189 additions and 3 deletions

View File

@@ -0,0 +1,33 @@
[package]
name = "myfsio-server"
version = "0.1.0"
edition = "2021"
[dependencies]
myfsio-common = { path = "../myfsio-common" }
myfsio-auth = { path = "../myfsio-auth" }
myfsio-crypto = { path = "../myfsio-crypto" }
myfsio-storage = { path = "../myfsio-storage" }
myfsio-xml = { path = "../myfsio-xml" }
base64 = { workspace = true }
axum = { workspace = true }
tokio = { workspace = true }
tower = { workspace = true }
tower-http = { workspace = true }
hyper = { workspace = true }
bytes = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
tokio-util = { workspace = true }
chrono = { workspace = true }
uuid = { workspace = true }
futures = { workspace = true }
http-body-util = "0.1"
percent-encoding = { workspace = true }
quick-xml = { workspace = true }
[dev-dependencies]
tempfile = "3"
tower = { workspace = true, features = ["util"] }

View File

@@ -0,0 +1,111 @@
use std::net::SocketAddr;
use std::path::PathBuf;
#[derive(Debug, Clone)]
pub struct ServerConfig {
pub bind_addr: SocketAddr,
pub storage_root: PathBuf,
pub region: String,
pub iam_config_path: PathBuf,
pub sigv4_timestamp_tolerance_secs: u64,
pub presigned_url_min_expiry: u64,
pub presigned_url_max_expiry: u64,
pub secret_key: Option<String>,
pub encryption_enabled: bool,
pub kms_enabled: bool,
pub gc_enabled: bool,
pub integrity_enabled: bool,
pub metrics_enabled: bool,
pub lifecycle_enabled: bool,
}
impl ServerConfig {
pub fn from_env() -> Self {
let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
let port: u16 = std::env::var("PORT")
.unwrap_or_else(|_| "5000".to_string())
.parse()
.unwrap_or(5000);
let storage_root = std::env::var("STORAGE_ROOT")
.unwrap_or_else(|_| "./data".to_string());
let region = std::env::var("AWS_REGION")
.unwrap_or_else(|_| "us-east-1".to_string());
let storage_path = PathBuf::from(&storage_root);
let iam_config_path = std::env::var("IAM_CONFIG")
.map(PathBuf::from)
.unwrap_or_else(|_| {
storage_path.join(".myfsio.sys").join("config").join("iam.json")
});
let sigv4_timestamp_tolerance_secs: u64 = std::env::var("SIGV4_TIMESTAMP_TOLERANCE_SECONDS")
.unwrap_or_else(|_| "900".to_string())
.parse()
.unwrap_or(900);
let presigned_url_min_expiry: u64 = std::env::var("PRESIGNED_URL_MIN_EXPIRY_SECONDS")
.unwrap_or_else(|_| "1".to_string())
.parse()
.unwrap_or(1);
let presigned_url_max_expiry: u64 = std::env::var("PRESIGNED_URL_MAX_EXPIRY_SECONDS")
.unwrap_or_else(|_| "604800".to_string())
.parse()
.unwrap_or(604800);
let secret_key = {
let env_key = std::env::var("SECRET_KEY").ok();
match env_key {
Some(k) if !k.is_empty() && k != "dev-secret-key" => Some(k),
_ => {
let secret_file = storage_path
.join(".myfsio.sys")
.join("config")
.join(".secret");
std::fs::read_to_string(&secret_file).ok().map(|s| s.trim().to_string())
}
}
};
let encryption_enabled = std::env::var("ENCRYPTION_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
let kms_enabled = std::env::var("KMS_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
let gc_enabled = std::env::var("GC_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
let integrity_enabled = std::env::var("INTEGRITY_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
let metrics_enabled = std::env::var("OPERATION_METRICS_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
let lifecycle_enabled = std::env::var("LIFECYCLE_ENABLED")
.unwrap_or_else(|_| "false".to_string())
.to_lowercase() == "true";
Self {
bind_addr: SocketAddr::new(host.parse().unwrap(), port),
storage_root: storage_path,
region,
iam_config_path,
sigv4_timestamp_tolerance_secs,
presigned_url_min_expiry,
presigned_url_max_expiry,
secret_key,
encryption_enabled,
kms_enabled,
gc_enabled,
integrity_enabled,
metrics_enabled,
lifecycle_enabled,
}
}
}

View File

@@ -0,0 +1,623 @@
use axum::body::Body;
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use myfsio_common::error::{S3Error, S3ErrorCode};
use myfsio_storage::traits::StorageEngine;
use crate::state::AppState;
fn xml_response(status: StatusCode, xml: String) -> Response {
(status, [("content-type", "application/xml")], xml).into_response()
}
fn storage_err(err: myfsio_storage::error::StorageError) -> Response {
let s3err = S3Error::from(err);
let status = StatusCode::from_u16(s3err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
(status, [("content-type", "application/xml")], s3err.to_xml()).into_response()
}
pub async fn get_versioning(state: &AppState, bucket: &str) -> Response {
match state.storage.is_versioning_enabled(bucket).await {
Ok(enabled) => {
let status_str = if enabled { "Enabled" } else { "Suspended" };
let xml = format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Status>{}</Status>\
</VersioningConfiguration>",
status_str
);
xml_response(StatusCode::OK, xml)
}
Err(e) => storage_err(e),
}
}
pub async fn put_versioning(state: &AppState, bucket: &str, body: Body) -> Response {
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(collected) => collected.to_bytes(),
Err(_) => {
return xml_response(
StatusCode::BAD_REQUEST,
S3Error::from_code(S3ErrorCode::MalformedXML).to_xml(),
);
}
};
let xml_str = String::from_utf8_lossy(&body_bytes);
let enabled = xml_str.contains("<Status>Enabled</Status>");
match state.storage.set_versioning(bucket, enabled).await {
Ok(()) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
pub async fn get_tagging(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
let mut xml = String::from(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<Tagging xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><TagSet>"
);
for tag in &config.tags {
xml.push_str(&format!(
"<Tag><Key>{}</Key><Value>{}</Value></Tag>",
tag.key, tag.value
));
}
xml.push_str("</TagSet></Tagging>");
xml_response(StatusCode::OK, xml)
}
Err(e) => storage_err(e),
}
}
pub async fn put_tagging(state: &AppState, bucket: &str, body: Body) -> Response {
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(collected) => collected.to_bytes(),
Err(_) => {
return xml_response(
StatusCode::BAD_REQUEST,
S3Error::from_code(S3ErrorCode::MalformedXML).to_xml(),
);
}
};
let xml_str = String::from_utf8_lossy(&body_bytes);
let tags = parse_tagging_xml(&xml_str);
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.tags = tags;
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
Err(e) => storage_err(e),
}
}
pub async fn delete_tagging(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.tags.clear();
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::NO_CONTENT.into_response(),
Err(e) => storage_err(e),
}
}
Err(e) => storage_err(e),
}
}
pub async fn get_cors(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(cors) = &config.cors {
xml_response(StatusCode::OK, cors.to_string())
} else {
xml_response(
StatusCode::NOT_FOUND,
S3Error::new(S3ErrorCode::NoSuchKey, "The CORS configuration does not exist").to_xml(),
)
}
}
Err(e) => storage_err(e),
}
}
pub async fn put_cors(state: &AppState, bucket: &str, body: Body) -> Response {
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(collected) => collected.to_bytes(),
Err(_) => return StatusCode::BAD_REQUEST.into_response(),
};
let body_str = String::from_utf8_lossy(&body_bytes);
let value = serde_json::Value::String(body_str.to_string());
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.cors = Some(value);
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
Err(e) => storage_err(e),
}
}
pub async fn delete_cors(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.cors = None;
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::NO_CONTENT.into_response(),
Err(e) => storage_err(e),
}
}
Err(e) => storage_err(e),
}
}
pub async fn get_location(state: &AppState, _bucket: &str) -> Response {
let xml = format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">{}</LocationConstraint>",
state.config.region
);
xml_response(StatusCode::OK, xml)
}
pub async fn get_encryption(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(enc) = &config.encryption {
xml_response(StatusCode::OK, enc.to_string())
} else {
xml_response(
StatusCode::NOT_FOUND,
S3Error::new(
S3ErrorCode::InvalidRequest,
"The server side encryption configuration was not found",
).to_xml(),
)
}
}
Err(e) => storage_err(e),
}
}
pub async fn put_encryption(state: &AppState, bucket: &str, body: Body) -> Response {
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(collected) => collected.to_bytes(),
Err(_) => return StatusCode::BAD_REQUEST.into_response(),
};
let value = serde_json::Value::String(String::from_utf8_lossy(&body_bytes).to_string());
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.encryption = Some(value);
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
Err(e) => storage_err(e),
}
}
pub async fn delete_encryption(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.encryption = None;
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::NO_CONTENT.into_response(),
Err(e) => storage_err(e),
}
}
Err(e) => storage_err(e),
}
}
pub async fn get_lifecycle(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(lc) = &config.lifecycle {
xml_response(StatusCode::OK, lc.to_string())
} else {
xml_response(
StatusCode::NOT_FOUND,
S3Error::new(S3ErrorCode::NoSuchKey, "The lifecycle configuration does not exist").to_xml(),
)
}
}
Err(e) => storage_err(e),
}
}
pub async fn put_lifecycle(state: &AppState, bucket: &str, body: Body) -> Response {
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(collected) => collected.to_bytes(),
Err(_) => return StatusCode::BAD_REQUEST.into_response(),
};
let value = serde_json::Value::String(String::from_utf8_lossy(&body_bytes).to_string());
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.lifecycle = Some(value);
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
Err(e) => storage_err(e),
}
}
pub async fn delete_lifecycle(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.lifecycle = None;
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::NO_CONTENT.into_response(),
Err(e) => storage_err(e),
}
}
Err(e) => storage_err(e),
}
}
pub async fn get_acl(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(acl) = &config.acl {
xml_response(StatusCode::OK, acl.to_string())
} else {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Owner><ID>myfsio</ID><DisplayName>myfsio</DisplayName></Owner>\
<AccessControlList>\
<Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
<ID>myfsio</ID><DisplayName>myfsio</DisplayName></Grantee>\
<Permission>FULL_CONTROL</Permission></Grant>\
</AccessControlList></AccessControlPolicy>";
xml_response(StatusCode::OK, xml.to_string())
}
}
Err(e) => storage_err(e),
}
}
pub async fn put_acl(state: &AppState, bucket: &str, body: Body) -> Response {
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(collected) => collected.to_bytes(),
Err(_) => return StatusCode::BAD_REQUEST.into_response(),
};
let value = serde_json::Value::String(String::from_utf8_lossy(&body_bytes).to_string());
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.acl = Some(value);
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
Err(e) => storage_err(e),
}
}
pub async fn get_website(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(ws) = &config.website {
xml_response(StatusCode::OK, ws.to_string())
} else {
xml_response(
StatusCode::NOT_FOUND,
S3Error::new(S3ErrorCode::NoSuchKey, "The website configuration does not exist").to_xml(),
)
}
}
Err(e) => storage_err(e),
}
}
pub async fn put_website(state: &AppState, bucket: &str, body: Body) -> Response {
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(collected) => collected.to_bytes(),
Err(_) => return StatusCode::BAD_REQUEST.into_response(),
};
let value = serde_json::Value::String(String::from_utf8_lossy(&body_bytes).to_string());
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.website = Some(value);
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
Err(e) => storage_err(e),
}
}
pub async fn delete_website(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(mut config) => {
config.website = None;
match state.storage.set_bucket_config(bucket, &config).await {
Ok(()) => StatusCode::NO_CONTENT.into_response(),
Err(e) => storage_err(e),
}
}
Err(e) => storage_err(e),
}
}
pub async fn get_object_lock(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(ol) = &config.object_lock {
xml_response(StatusCode::OK, ol.to_string())
} else {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<ObjectLockConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<ObjectLockEnabled>Disabled</ObjectLockEnabled>\
</ObjectLockConfiguration>";
xml_response(StatusCode::OK, xml.to_string())
}
}
Err(e) => storage_err(e),
}
}
pub async fn get_notification(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(n) = &config.notification {
xml_response(StatusCode::OK, n.to_string())
} else {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
</NotificationConfiguration>";
xml_response(StatusCode::OK, xml.to_string())
}
}
Err(e) => storage_err(e),
}
}
pub async fn get_logging(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(l) = &config.logging {
xml_response(StatusCode::OK, l.to_string())
} else {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<BucketLoggingStatus xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
</BucketLoggingStatus>";
xml_response(StatusCode::OK, xml.to_string())
}
}
Err(e) => storage_err(e),
}
}
pub async fn list_object_versions(state: &AppState, bucket: &str) -> Response {
match state.storage.list_buckets().await {
Ok(buckets) => {
if !buckets.iter().any(|b| b.name == bucket) {
return storage_err(myfsio_storage::error::StorageError::BucketNotFound(
bucket.to_string(),
));
}
}
Err(e) => return storage_err(e),
}
let params = myfsio_common::types::ListParams {
max_keys: 1000,
..Default::default()
};
let objects = match state.storage.list_objects(bucket, &params).await {
Ok(result) => result.objects,
Err(e) => return storage_err(e),
};
let mut xml = String::from(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<ListVersionsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">"
);
xml.push_str(&format!("<Name>{}</Name>", bucket));
for obj in &objects {
xml.push_str("<Version>");
xml.push_str(&format!("<Key>{}</Key>", obj.key));
xml.push_str("<VersionId>null</VersionId>");
xml.push_str("<IsLatest>true</IsLatest>");
xml.push_str(&format!(
"<LastModified>{}</LastModified>",
obj.last_modified.to_rfc3339()
));
if let Some(ref etag) = obj.etag {
xml.push_str(&format!("<ETag>\"{}\"</ETag>", etag));
}
xml.push_str(&format!("<Size>{}</Size>", obj.size));
xml.push_str("<StorageClass>STANDARD</StorageClass>");
xml.push_str("</Version>");
}
xml.push_str("</ListVersionsResult>");
xml_response(StatusCode::OK, xml)
}
pub async fn get_object_tagging(state: &AppState, bucket: &str, key: &str) -> Response {
match state.storage.get_object_tags(bucket, key).await {
Ok(tags) => {
let mut xml = String::from(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<Tagging xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><TagSet>"
);
for tag in &tags {
xml.push_str(&format!(
"<Tag><Key>{}</Key><Value>{}</Value></Tag>",
tag.key, tag.value
));
}
xml.push_str("</TagSet></Tagging>");
xml_response(StatusCode::OK, xml)
}
Err(e) => storage_err(e),
}
}
pub async fn put_object_tagging(state: &AppState, bucket: &str, key: &str, body: Body) -> Response {
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(collected) => collected.to_bytes(),
Err(_) => {
return xml_response(
StatusCode::BAD_REQUEST,
S3Error::from_code(S3ErrorCode::MalformedXML).to_xml(),
);
}
};
let xml_str = String::from_utf8_lossy(&body_bytes);
let tags = parse_tagging_xml(&xml_str);
match state.storage.set_object_tags(bucket, key, &tags).await {
Ok(()) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
pub async fn delete_object_tagging(state: &AppState, bucket: &str, key: &str) -> Response {
match state.storage.delete_object_tags(bucket, key).await {
Ok(()) => StatusCode::NO_CONTENT.into_response(),
Err(e) => storage_err(e),
}
}
pub async fn get_object_acl(state: &AppState, bucket: &str, key: &str) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Owner><ID>myfsio</ID><DisplayName>myfsio</DisplayName></Owner>\
<AccessControlList>\
<Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
<ID>myfsio</ID><DisplayName>myfsio</DisplayName></Grantee>\
<Permission>FULL_CONTROL</Permission></Grant>\
</AccessControlList></AccessControlPolicy>";
xml_response(StatusCode::OK, xml.to_string())
}
Err(e) => storage_err(e),
}
}
pub async fn put_object_acl(state: &AppState, bucket: &str, key: &str, _body: Body) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
pub async fn get_object_retention(state: &AppState, bucket: &str, key: &str) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => {
xml_response(
StatusCode::NOT_FOUND,
S3Error::new(
S3ErrorCode::InvalidRequest,
"No retention policy configured",
).to_xml(),
)
}
Err(e) => storage_err(e),
}
}
pub async fn put_object_retention(state: &AppState, bucket: &str, key: &str, _body: Body) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
pub async fn get_object_legal_hold(state: &AppState, bucket: &str, key: &str) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<LegalHold xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Status>OFF</Status></LegalHold>";
xml_response(StatusCode::OK, xml.to_string())
}
Err(e) => storage_err(e),
}
}
pub async fn put_object_legal_hold(state: &AppState, bucket: &str, key: &str, _body: Body) -> Response {
match state.storage.head_object(bucket, key).await {
Ok(_) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
}
fn parse_tagging_xml(xml: &str) -> Vec<myfsio_common::types::Tag> {
let mut tags = Vec::new();
let mut in_tag = false;
let mut current_key = String::new();
let mut current_value = String::new();
let mut current_element = String::new();
let mut reader = quick_xml::Reader::from_str(xml);
let mut buf = Vec::new();
loop {
match reader.read_event_into(&mut buf) {
Ok(quick_xml::events::Event::Start(ref e)) => {
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
current_element = name.clone();
if name == "Tag" {
in_tag = true;
current_key.clear();
current_value.clear();
}
}
Ok(quick_xml::events::Event::Text(ref e)) => {
if in_tag {
let text = e.unescape().unwrap_or_default().to_string();
match current_element.as_str() {
"Key" => current_key = text,
"Value" => current_value = text,
_ => {}
}
}
}
Ok(quick_xml::events::Event::End(ref e)) => {
let name = String::from_utf8_lossy(e.name().as_ref()).to_string();
if name == "Tag" && in_tag {
if !current_key.is_empty() {
tags.push(myfsio_common::types::Tag {
key: current_key.clone(),
value: current_value.clone(),
});
}
in_tag = false;
}
}
Ok(quick_xml::events::Event::Eof) => break,
Err(_) => break,
_ => {}
}
buf.clear();
}
tags
}

View File

@@ -0,0 +1,278 @@
use axum::body::Body;
use axum::extract::State;
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use base64::engine::general_purpose::STANDARD as B64;
use base64::Engine;
use serde_json::json;
use crate::state::AppState;
fn json_ok(value: serde_json::Value) -> Response {
(
StatusCode::OK,
[("content-type", "application/json")],
value.to_string(),
)
.into_response()
}
fn json_err(status: StatusCode, msg: &str) -> Response {
(
status,
[("content-type", "application/json")],
json!({"error": msg}).to_string(),
)
.into_response()
}
pub async fn list_keys(State(state): State<AppState>) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
let keys = kms.list_keys().await;
let keys_json: Vec<serde_json::Value> = keys
.iter()
.map(|k| {
json!({
"KeyId": k.key_id,
"Arn": k.arn,
"Description": k.description,
"CreationDate": k.creation_date.to_rfc3339(),
"Enabled": k.enabled,
"KeyState": k.key_state,
"KeyUsage": k.key_usage,
"KeySpec": k.key_spec,
})
})
.collect();
json_ok(json!({"keys": keys_json}))
}
pub async fn create_key(State(state): State<AppState>, body: Body) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(c) => c.to_bytes(),
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
};
let description = if body_bytes.is_empty() {
String::new()
} else {
match serde_json::from_slice::<serde_json::Value>(&body_bytes) {
Ok(v) => v
.get("Description")
.or_else(|| v.get("description"))
.and_then(|d| d.as_str())
.unwrap_or("")
.to_string(),
Err(_) => String::new(),
}
};
match kms.create_key(&description).await {
Ok(key) => json_ok(json!({
"KeyId": key.key_id,
"Arn": key.arn,
"Description": key.description,
"CreationDate": key.creation_date.to_rfc3339(),
"Enabled": key.enabled,
"KeyState": key.key_state,
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn get_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
match kms.get_key(&key_id).await {
Some(key) => json_ok(json!({
"KeyId": key.key_id,
"Arn": key.arn,
"Description": key.description,
"CreationDate": key.creation_date.to_rfc3339(),
"Enabled": key.enabled,
"KeyState": key.key_state,
"KeyUsage": key.key_usage,
"KeySpec": key.key_spec,
})),
None => json_err(StatusCode::NOT_FOUND, "Key not found"),
}
}
pub async fn delete_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
match kms.delete_key(&key_id).await {
Ok(true) => StatusCode::NO_CONTENT.into_response(),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn enable_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
match kms.enable_key(&key_id).await {
Ok(true) => json_ok(json!({"status": "enabled"})),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn disable_key(
State(state): State<AppState>,
axum::extract::Path(key_id): axum::extract::Path<String>,
) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
match kms.disable_key(&key_id).await {
Ok(true) => json_ok(json!({"status": "disabled"})),
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn encrypt(State(state): State<AppState>, body: Body) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(c) => c.to_bytes(),
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
};
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
Ok(v) => v,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
};
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
Some(k) => k,
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
};
let plaintext_b64 = match req.get("Plaintext").and_then(|v| v.as_str()) {
Some(p) => p,
None => return json_err(StatusCode::BAD_REQUEST, "Missing Plaintext"),
};
let plaintext = match B64.decode(plaintext_b64) {
Ok(p) => p,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid base64 Plaintext"),
};
match kms.encrypt_data(key_id, &plaintext).await {
Ok(ct) => json_ok(json!({
"KeyId": key_id,
"CiphertextBlob": B64.encode(&ct),
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn decrypt(State(state): State<AppState>, body: Body) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(c) => c.to_bytes(),
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
};
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
Ok(v) => v,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
};
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
Some(k) => k,
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
};
let ct_b64 = match req.get("CiphertextBlob").and_then(|v| v.as_str()) {
Some(c) => c,
None => return json_err(StatusCode::BAD_REQUEST, "Missing CiphertextBlob"),
};
let ciphertext = match B64.decode(ct_b64) {
Ok(c) => c,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid base64"),
};
match kms.decrypt_data(key_id, &ciphertext).await {
Ok(pt) => json_ok(json!({
"KeyId": key_id,
"Plaintext": B64.encode(&pt),
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}
pub async fn generate_data_key(State(state): State<AppState>, body: Body) -> Response {
let kms = match &state.kms {
Some(k) => k,
None => return json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"),
};
let body_bytes = match http_body_util::BodyExt::collect(body).await {
Ok(c) => c.to_bytes(),
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid request body"),
};
let req: serde_json::Value = match serde_json::from_slice(&body_bytes) {
Ok(v) => v,
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid JSON"),
};
let key_id = match req.get("KeyId").and_then(|v| v.as_str()) {
Some(k) => k,
None => return json_err(StatusCode::BAD_REQUEST, "Missing KeyId"),
};
let num_bytes = req
.get("NumberOfBytes")
.and_then(|v| v.as_u64())
.unwrap_or(32) as usize;
if num_bytes < 1 || num_bytes > 1024 {
return json_err(StatusCode::BAD_REQUEST, "NumberOfBytes must be 1-1024");
}
match kms.generate_data_key(key_id, num_bytes).await {
Ok((plaintext, wrapped)) => json_ok(json!({
"KeyId": key_id,
"Plaintext": B64.encode(&plaintext),
"CiphertextBlob": B64.encode(&wrapped),
})),
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,49 @@
pub mod config;
pub mod handlers;
pub mod middleware;
pub mod services;
pub mod state;
use axum::Router;
pub const SERVER_HEADER: &str = concat!("MyFSIO-Rust/", env!("CARGO_PKG_VERSION"));
pub fn create_router(state: state::AppState) -> Router {
let mut router = Router::new()
.route("/", axum::routing::get(handlers::list_buckets))
.route(
"/{bucket}",
axum::routing::put(handlers::create_bucket)
.get(handlers::get_bucket)
.delete(handlers::delete_bucket)
.head(handlers::head_bucket)
.post(handlers::post_bucket),
)
.route(
"/{bucket}/{*key}",
axum::routing::put(handlers::put_object)
.get(handlers::get_object)
.delete(handlers::delete_object)
.head(handlers::head_object)
.post(handlers::post_object),
);
if state.config.kms_enabled {
router = router
.route("/kms/keys", axum::routing::get(handlers::kms::list_keys).post(handlers::kms::create_key))
.route("/kms/keys/{key_id}", axum::routing::get(handlers::kms::get_key).delete(handlers::kms::delete_key))
.route("/kms/keys/{key_id}/enable", axum::routing::post(handlers::kms::enable_key))
.route("/kms/keys/{key_id}/disable", axum::routing::post(handlers::kms::disable_key))
.route("/kms/encrypt", axum::routing::post(handlers::kms::encrypt))
.route("/kms/decrypt", axum::routing::post(handlers::kms::decrypt))
.route("/kms/generate-data-key", axum::routing::post(handlers::kms::generate_data_key));
}
router
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::auth_layer,
))
.layer(axum::middleware::from_fn(middleware::server_header))
.with_state(state)
}

View File

@@ -0,0 +1,78 @@
use myfsio_server::config::ServerConfig;
use myfsio_server::state::AppState;
#[tokio::main]
async fn main() {
tracing_subscriber::fmt::init();
let config = ServerConfig::from_env();
let bind_addr = config.bind_addr;
tracing::info!("MyFSIO Rust Engine starting on {}", bind_addr);
tracing::info!("Storage root: {}", config.storage_root.display());
tracing::info!("Region: {}", config.region);
tracing::info!(
"Encryption: {}, KMS: {}, GC: {}, Lifecycle: {}, Integrity: {}, Metrics: {}",
config.encryption_enabled,
config.kms_enabled,
config.gc_enabled,
config.lifecycle_enabled,
config.integrity_enabled,
config.metrics_enabled
);
let state = if config.encryption_enabled || config.kms_enabled {
AppState::new_with_encryption(config.clone()).await
} else {
AppState::new(config.clone())
};
let mut bg_handles: Vec<tokio::task::JoinHandle<()>> = Vec::new();
if let Some(ref gc) = state.gc {
bg_handles.push(gc.clone().start_background());
tracing::info!("GC background service started");
}
if let Some(ref integrity) = state.integrity {
bg_handles.push(integrity.clone().start_background());
tracing::info!("Integrity checker background service started");
}
if let Some(ref metrics) = state.metrics {
bg_handles.push(metrics.clone().start_background());
tracing::info!("Metrics collector background service started");
}
if config.lifecycle_enabled {
let lifecycle = std::sync::Arc::new(
myfsio_server::services::lifecycle::LifecycleService::new(
state.storage.clone(),
myfsio_server::services::lifecycle::LifecycleConfig::default(),
),
);
bg_handles.push(lifecycle.start_background());
tracing::info!("Lifecycle manager background service started");
}
let app = myfsio_server::create_router(state);
let listener = tokio::net::TcpListener::bind(bind_addr).await.unwrap();
tracing::info!("Listening on {}", bind_addr);
axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal())
.await
.unwrap();
for handle in bg_handles {
handle.abort();
}
}
async fn shutdown_signal() {
tokio::signal::ctrl_c()
.await
.expect("Failed to listen for Ctrl+C");
tracing::info!("Shutdown signal received");
}

View File

@@ -0,0 +1,390 @@
use axum::extract::{Request, State};
use axum::http::StatusCode;
use axum::middleware::Next;
use axum::response::{IntoResponse, Response};
use chrono::{NaiveDateTime, Utc};
use myfsio_auth::sigv4;
use myfsio_common::error::{S3Error, S3ErrorCode};
use myfsio_common::types::Principal;
use crate::state::AppState;
pub async fn auth_layer(
State(state): State<AppState>,
mut req: Request,
next: Next,
) -> Response {
let uri = req.uri().clone();
let path = uri.path();
if path == "/" && req.method() == axum::http::Method::GET {
match try_auth(&state, &req) {
AuthResult::Ok(principal) => {
req.extensions_mut().insert(principal);
}
AuthResult::Denied(err) => return error_response(err),
AuthResult::NoAuth => {
return error_response(
S3Error::from_code(S3ErrorCode::AccessDenied),
);
}
}
return next.run(req).await;
}
match try_auth(&state, &req) {
AuthResult::Ok(principal) => {
req.extensions_mut().insert(principal);
next.run(req).await
}
AuthResult::Denied(err) => error_response(err),
AuthResult::NoAuth => {
error_response(S3Error::from_code(S3ErrorCode::AccessDenied))
}
}
}
enum AuthResult {
Ok(Principal),
Denied(S3Error),
NoAuth,
}
fn try_auth(state: &AppState, req: &Request) -> AuthResult {
if let Some(auth_header) = req.headers().get("authorization") {
if let Ok(auth_str) = auth_header.to_str() {
if auth_str.starts_with("AWS4-HMAC-SHA256 ") {
return verify_sigv4_header(state, req, auth_str);
}
}
}
let query = req.uri().query().unwrap_or("");
if query.contains("X-Amz-Algorithm=AWS4-HMAC-SHA256") {
return verify_sigv4_query(state, req);
}
if let (Some(ak), Some(sk)) = (
req.headers().get("x-access-key").and_then(|v| v.to_str().ok()),
req.headers().get("x-secret-key").and_then(|v| v.to_str().ok()),
) {
return match state.iam.authenticate(ak, sk) {
Some(principal) => AuthResult::Ok(principal),
None => AuthResult::Denied(
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
),
};
}
AuthResult::NoAuth
}
fn verify_sigv4_header(state: &AppState, req: &Request, auth_str: &str) -> AuthResult {
let parts: Vec<&str> = auth_str
.strip_prefix("AWS4-HMAC-SHA256 ")
.unwrap()
.split(", ")
.collect();
if parts.len() != 3 {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed Authorization header"),
);
}
let credential = parts[0].strip_prefix("Credential=").unwrap_or("");
let signed_headers_str = parts[1].strip_prefix("SignedHeaders=").unwrap_or("");
let provided_signature = parts[2].strip_prefix("Signature=").unwrap_or("");
let cred_parts: Vec<&str> = credential.split('/').collect();
if cred_parts.len() != 5 {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed credential"),
);
}
let access_key = cred_parts[0];
let date_stamp = cred_parts[1];
let region = cred_parts[2];
let service = cred_parts[3];
let amz_date = req
.headers()
.get("x-amz-date")
.or_else(|| req.headers().get("date"))
.and_then(|v| v.to_str().ok())
.unwrap_or("");
if amz_date.is_empty() {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::AccessDenied, "Missing Date header"),
);
}
if let Some(err) = check_timestamp_freshness(amz_date, state.config.sigv4_timestamp_tolerance_secs) {
return AuthResult::Denied(err);
}
let secret_key = match state.iam.get_secret_key(access_key) {
Some(sk) => sk,
None => {
return AuthResult::Denied(
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
);
}
};
let method = req.method().as_str();
let canonical_uri = req.uri().path();
let query_params = parse_query_params(req.uri().query().unwrap_or(""));
let payload_hash = req
.headers()
.get("x-amz-content-sha256")
.and_then(|v| v.to_str().ok())
.unwrap_or("UNSIGNED-PAYLOAD");
let signed_headers: Vec<&str> = signed_headers_str.split(';').collect();
let header_values: Vec<(String, String)> = signed_headers
.iter()
.map(|&name| {
let value = req
.headers()
.get(name)
.and_then(|v| v.to_str().ok())
.unwrap_or("");
(name.to_string(), value.to_string())
})
.collect();
let verified = sigv4::verify_sigv4_signature(
method,
canonical_uri,
&query_params,
signed_headers_str,
&header_values,
payload_hash,
amz_date,
date_stamp,
region,
service,
&secret_key,
provided_signature,
);
if !verified {
return AuthResult::Denied(
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
);
}
match state.iam.get_principal(access_key) {
Some(p) => AuthResult::Ok(p),
None => AuthResult::Denied(
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
),
}
}
fn verify_sigv4_query(state: &AppState, req: &Request) -> AuthResult {
let query = req.uri().query().unwrap_or("");
let params = parse_query_params(query);
let param_map: std::collections::HashMap<&str, &str> = params
.iter()
.map(|(k, v)| (k.as_str(), v.as_str()))
.collect();
let credential = match param_map.get("X-Amz-Credential") {
Some(c) => *c,
None => {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Credential"),
);
}
};
let signed_headers_str = param_map
.get("X-Amz-SignedHeaders")
.copied()
.unwrap_or("host");
let provided_signature = match param_map.get("X-Amz-Signature") {
Some(s) => *s,
None => {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Signature"),
);
}
};
let amz_date = match param_map.get("X-Amz-Date") {
Some(d) => *d,
None => {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Date"),
);
}
};
let expires_str = match param_map.get("X-Amz-Expires") {
Some(e) => *e,
None => {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Missing X-Amz-Expires"),
);
}
};
let cred_parts: Vec<&str> = credential.split('/').collect();
if cred_parts.len() != 5 {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Malformed credential"),
);
}
let access_key = cred_parts[0];
let date_stamp = cred_parts[1];
let region = cred_parts[2];
let service = cred_parts[3];
let expires: u64 = match expires_str.parse() {
Ok(e) => e,
Err(_) => {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "Invalid X-Amz-Expires"),
);
}
};
if expires < state.config.presigned_url_min_expiry
|| expires > state.config.presigned_url_max_expiry
{
return AuthResult::Denied(
S3Error::new(S3ErrorCode::InvalidArgument, "X-Amz-Expires out of range"),
);
}
if let Ok(request_time) =
NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ")
{
let request_utc = request_time.and_utc();
let now = Utc::now();
let elapsed = (now - request_utc).num_seconds();
if elapsed > expires as i64 {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::AccessDenied, "Request has expired"),
);
}
if elapsed < -(state.config.sigv4_timestamp_tolerance_secs as i64) {
return AuthResult::Denied(
S3Error::new(S3ErrorCode::AccessDenied, "Request is too far in the future"),
);
}
}
let secret_key = match state.iam.get_secret_key(access_key) {
Some(sk) => sk,
None => {
return AuthResult::Denied(
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
);
}
};
let method = req.method().as_str();
let canonical_uri = req.uri().path();
let query_params_no_sig: Vec<(String, String)> = params
.iter()
.filter(|(k, _)| k != "X-Amz-Signature")
.cloned()
.collect();
let payload_hash = "UNSIGNED-PAYLOAD";
let signed_headers: Vec<&str> = signed_headers_str.split(';').collect();
let header_values: Vec<(String, String)> = signed_headers
.iter()
.map(|&name| {
let value = req
.headers()
.get(name)
.and_then(|v| v.to_str().ok())
.unwrap_or("");
(name.to_string(), value.to_string())
})
.collect();
let verified = sigv4::verify_sigv4_signature(
method,
canonical_uri,
&query_params_no_sig,
signed_headers_str,
&header_values,
payload_hash,
amz_date,
date_stamp,
region,
service,
&secret_key,
provided_signature,
);
if !verified {
return AuthResult::Denied(
S3Error::from_code(S3ErrorCode::SignatureDoesNotMatch),
);
}
match state.iam.get_principal(access_key) {
Some(p) => AuthResult::Ok(p),
None => AuthResult::Denied(
S3Error::from_code(S3ErrorCode::InvalidAccessKeyId),
),
}
}
fn check_timestamp_freshness(amz_date: &str, tolerance_secs: u64) -> Option<S3Error> {
let request_time = NaiveDateTime::parse_from_str(amz_date, "%Y%m%dT%H%M%SZ").ok()?;
let request_utc = request_time.and_utc();
let now = Utc::now();
let diff = (now - request_utc).num_seconds().unsigned_abs();
if diff > tolerance_secs {
return Some(S3Error::new(
S3ErrorCode::AccessDenied,
"Request timestamp too old or too far in the future",
));
}
None
}
fn parse_query_params(query: &str) -> Vec<(String, String)> {
if query.is_empty() {
return Vec::new();
}
query
.split('&')
.filter_map(|pair| {
let mut parts = pair.splitn(2, '=');
let key = parts.next()?;
let value = parts.next().unwrap_or("");
Some((
urlencoding_decode(key),
urlencoding_decode(value),
))
})
.collect()
}
fn urlencoding_decode(s: &str) -> String {
percent_encoding::percent_decode_str(s)
.decode_utf8_lossy()
.into_owned()
}
fn error_response(err: S3Error) -> Response {
let status =
StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
let body = err.to_xml();
(status, [("content-type", "application/xml")], body).into_response()
}

View File

@@ -0,0 +1,16 @@
mod auth;
pub use auth::auth_layer;
use axum::extract::Request;
use axum::middleware::Next;
use axum::response::Response;
pub async fn server_header(req: Request, next: Next) -> Response {
let mut resp = next.run(req).await;
resp.headers_mut().insert(
"server",
crate::SERVER_HEADER.parse().unwrap(),
);
resp
}

View File

@@ -0,0 +1,263 @@
use serde_json::{json, Value};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
pub struct GcConfig {
pub interval_hours: f64,
pub temp_file_max_age_hours: f64,
pub multipart_max_age_days: u64,
pub lock_file_max_age_hours: f64,
pub dry_run: bool,
}
impl Default for GcConfig {
fn default() -> Self {
Self {
interval_hours: 6.0,
temp_file_max_age_hours: 24.0,
multipart_max_age_days: 7,
lock_file_max_age_hours: 1.0,
dry_run: false,
}
}
}
pub struct GcService {
storage_root: PathBuf,
config: GcConfig,
running: Arc<RwLock<bool>>,
history: Arc<RwLock<Vec<Value>>>,
history_path: PathBuf,
}
impl GcService {
pub fn new(storage_root: PathBuf, config: GcConfig) -> Self {
let history_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("gc_history.json");
let history = if history_path.exists() {
std::fs::read_to_string(&history_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
.unwrap_or_default()
} else {
Vec::new()
};
Self {
storage_root,
config,
running: Arc::new(RwLock::new(false)),
history: Arc::new(RwLock::new(history)),
history_path,
}
}
pub async fn status(&self) -> Value {
let running = *self.running.read().await;
json!({
"enabled": true,
"running": running,
"interval_hours": self.config.interval_hours,
"temp_file_max_age_hours": self.config.temp_file_max_age_hours,
"multipart_max_age_days": self.config.multipart_max_age_days,
"lock_file_max_age_hours": self.config.lock_file_max_age_hours,
"dry_run": self.config.dry_run,
})
}
pub async fn history(&self) -> Value {
let history = self.history.read().await;
json!({ "executions": *history })
}
pub async fn run_now(&self, dry_run: bool) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("GC already running".to_string());
}
*running = true;
}
let start = Instant::now();
let result = self.execute_gc(dry_run || self.config.dry_run).await;
let elapsed = start.elapsed().as_secs_f64();
*self.running.write().await = false;
let mut result_json = result.clone();
if let Some(obj) = result_json.as_object_mut() {
obj.insert("execution_time_seconds".to_string(), json!(elapsed));
}
let record = json!({
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
"dry_run": dry_run || self.config.dry_run,
"result": result_json,
});
{
let mut history = self.history.write().await;
history.push(record);
if history.len() > 50 {
let excess = history.len() - 50;
history.drain(..excess);
}
}
self.save_history().await;
Ok(result)
}
async fn execute_gc(&self, dry_run: bool) -> Value {
let mut temp_files_deleted = 0u64;
let mut temp_bytes_freed = 0u64;
let mut multipart_uploads_deleted = 0u64;
let mut lock_files_deleted = 0u64;
let mut empty_dirs_removed = 0u64;
let mut errors: Vec<String> = Vec::new();
let now = std::time::SystemTime::now();
let temp_max_age = std::time::Duration::from_secs_f64(self.config.temp_file_max_age_hours * 3600.0);
let multipart_max_age = std::time::Duration::from_secs(self.config.multipart_max_age_days * 86400);
let lock_max_age = std::time::Duration::from_secs_f64(self.config.lock_file_max_age_hours * 3600.0);
let tmp_dir = self.storage_root.join(".myfsio.sys").join("tmp");
if tmp_dir.exists() {
match std::fs::read_dir(&tmp_dir) {
Ok(entries) => {
for entry in entries.flatten() {
if let Ok(metadata) = entry.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > temp_max_age {
let size = metadata.len();
if !dry_run {
if let Err(e) = std::fs::remove_file(entry.path()) {
errors.push(format!("Failed to remove temp file: {}", e));
continue;
}
}
temp_files_deleted += 1;
temp_bytes_freed += size;
}
}
}
}
}
}
Err(e) => errors.push(format!("Failed to read tmp dir: {}", e)),
}
}
let multipart_dir = self.storage_root.join(".myfsio.sys").join("multipart");
if multipart_dir.exists() {
if let Ok(bucket_dirs) = std::fs::read_dir(&multipart_dir) {
for bucket_entry in bucket_dirs.flatten() {
if let Ok(uploads) = std::fs::read_dir(bucket_entry.path()) {
for upload in uploads.flatten() {
if let Ok(metadata) = upload.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > multipart_max_age {
if !dry_run {
let _ = std::fs::remove_dir_all(upload.path());
}
multipart_uploads_deleted += 1;
}
}
}
}
}
}
}
}
}
let buckets_dir = self.storage_root.join(".myfsio.sys").join("buckets");
if buckets_dir.exists() {
if let Ok(bucket_dirs) = std::fs::read_dir(&buckets_dir) {
for bucket_entry in bucket_dirs.flatten() {
let locks_dir = bucket_entry.path().join("locks");
if locks_dir.exists() {
if let Ok(locks) = std::fs::read_dir(&locks_dir) {
for lock in locks.flatten() {
if let Ok(metadata) = lock.metadata() {
if let Ok(modified) = metadata.modified() {
if let Ok(age) = now.duration_since(modified) {
if age > lock_max_age {
if !dry_run {
let _ = std::fs::remove_file(lock.path());
}
lock_files_deleted += 1;
}
}
}
}
}
}
}
}
}
}
if !dry_run {
for dir in [&tmp_dir, &multipart_dir] {
if dir.exists() {
if let Ok(entries) = std::fs::read_dir(dir) {
for entry in entries.flatten() {
if entry.path().is_dir() {
if let Ok(mut contents) = std::fs::read_dir(entry.path()) {
if contents.next().is_none() {
let _ = std::fs::remove_dir(entry.path());
empty_dirs_removed += 1;
}
}
}
}
}
}
}
}
json!({
"temp_files_deleted": temp_files_deleted,
"temp_bytes_freed": temp_bytes_freed,
"multipart_uploads_deleted": multipart_uploads_deleted,
"lock_files_deleted": lock_files_deleted,
"empty_dirs_removed": empty_dirs_removed,
"errors": errors,
})
}
async fn save_history(&self) {
let history = self.history.read().await;
let data = json!({ "executions": *history });
if let Some(parent) = self.history_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(&self.history_path, serde_json::to_string_pretty(&data).unwrap_or_default());
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("GC cycle starting");
match self.run_now(false).await {
Ok(result) => tracing::info!("GC cycle complete: {:?}", result),
Err(e) => tracing::warn!("GC cycle failed: {}", e),
}
}
})
}
}

View File

@@ -0,0 +1,204 @@
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::traits::StorageEngine;
use serde_json::{json, Value};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
pub struct IntegrityConfig {
pub interval_hours: f64,
pub batch_size: usize,
pub auto_heal: bool,
pub dry_run: bool,
}
impl Default for IntegrityConfig {
fn default() -> Self {
Self {
interval_hours: 24.0,
batch_size: 1000,
auto_heal: false,
dry_run: false,
}
}
}
pub struct IntegrityService {
storage: Arc<FsStorageBackend>,
config: IntegrityConfig,
running: Arc<RwLock<bool>>,
history: Arc<RwLock<Vec<Value>>>,
history_path: PathBuf,
}
impl IntegrityService {
pub fn new(
storage: Arc<FsStorageBackend>,
storage_root: &std::path::Path,
config: IntegrityConfig,
) -> Self {
let history_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("integrity_history.json");
let history = if history_path.exists() {
std::fs::read_to_string(&history_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
.unwrap_or_default()
} else {
Vec::new()
};
Self {
storage,
config,
running: Arc::new(RwLock::new(false)),
history: Arc::new(RwLock::new(history)),
history_path,
}
}
pub async fn status(&self) -> Value {
let running = *self.running.read().await;
json!({
"enabled": true,
"running": running,
"interval_hours": self.config.interval_hours,
"batch_size": self.config.batch_size,
"auto_heal": self.config.auto_heal,
"dry_run": self.config.dry_run,
})
}
pub async fn history(&self) -> Value {
let history = self.history.read().await;
json!({ "executions": *history })
}
pub async fn run_now(&self, dry_run: bool, auto_heal: bool) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("Integrity check already running".to_string());
}
*running = true;
}
let start = Instant::now();
let result = self.check_integrity(dry_run, auto_heal).await;
let elapsed = start.elapsed().as_secs_f64();
*self.running.write().await = false;
let mut result_json = result.clone();
if let Some(obj) = result_json.as_object_mut() {
obj.insert("execution_time_seconds".to_string(), json!(elapsed));
}
let record = json!({
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
"dry_run": dry_run,
"auto_heal": auto_heal,
"result": result_json,
});
{
let mut history = self.history.write().await;
history.push(record);
if history.len() > 50 {
let excess = history.len() - 50;
history.drain(..excess);
}
}
self.save_history().await;
Ok(result)
}
async fn check_integrity(&self, _dry_run: bool, _auto_heal: bool) -> Value {
let buckets = match self.storage.list_buckets().await {
Ok(b) => b,
Err(e) => return json!({"error": e.to_string()}),
};
let mut objects_scanned = 0u64;
let mut corrupted = 0u64;
let mut phantom_metadata = 0u64;
let mut errors: Vec<String> = Vec::new();
for bucket in &buckets {
let params = myfsio_common::types::ListParams {
max_keys: self.config.batch_size,
..Default::default()
};
let objects = match self.storage.list_objects(&bucket.name, &params).await {
Ok(r) => r.objects,
Err(e) => {
errors.push(format!("{}: {}", bucket.name, e));
continue;
}
};
for obj in &objects {
objects_scanned += 1;
match self.storage.get_object_path(&bucket.name, &obj.key).await {
Ok(path) => {
if !path.exists() {
phantom_metadata += 1;
} else if let Some(ref expected_etag) = obj.etag {
match myfsio_crypto::hashing::md5_file(&path) {
Ok(actual_etag) => {
if &actual_etag != expected_etag {
corrupted += 1;
}
}
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
}
}
}
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
}
}
}
json!({
"objects_scanned": objects_scanned,
"buckets_scanned": buckets.len(),
"corrupted_objects": corrupted,
"phantom_metadata": phantom_metadata,
"errors": errors,
})
}
async fn save_history(&self) {
let history = self.history.read().await;
let data = json!({ "executions": *history });
if let Some(parent) = self.history_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&self.history_path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("Integrity check starting");
match self.run_now(false, false).await {
Ok(result) => tracing::info!("Integrity check complete: {:?}", result),
Err(e) => tracing::warn!("Integrity check failed: {}", e),
}
}
})
}
}

View File

@@ -0,0 +1,153 @@
use myfsio_storage::fs_backend::FsStorageBackend;
use myfsio_storage::traits::StorageEngine;
use serde_json::{json, Value};
use std::sync::Arc;
use tokio::sync::RwLock;
pub struct LifecycleConfig {
pub interval_seconds: u64,
}
impl Default for LifecycleConfig {
fn default() -> Self {
Self {
interval_seconds: 3600,
}
}
}
pub struct LifecycleService {
storage: Arc<FsStorageBackend>,
config: LifecycleConfig,
running: Arc<RwLock<bool>>,
}
impl LifecycleService {
pub fn new(storage: Arc<FsStorageBackend>, config: LifecycleConfig) -> Self {
Self {
storage,
config,
running: Arc::new(RwLock::new(false)),
}
}
pub async fn run_cycle(&self) -> Result<Value, String> {
{
let mut running = self.running.write().await;
if *running {
return Err("Lifecycle already running".to_string());
}
*running = true;
}
let result = self.evaluate_rules().await;
*self.running.write().await = false;
Ok(result)
}
async fn evaluate_rules(&self) -> Value {
let buckets = match self.storage.list_buckets().await {
Ok(b) => b,
Err(e) => return json!({"error": e.to_string()}),
};
let mut total_expired = 0u64;
let mut total_multipart_aborted = 0u64;
let mut errors: Vec<String> = Vec::new();
for bucket in &buckets {
let config = match self.storage.get_bucket_config(&bucket.name).await {
Ok(c) => c,
Err(_) => continue,
};
let lifecycle = match &config.lifecycle {
Some(lc) => lc,
None => continue,
};
let rules = match lifecycle.as_str().and_then(|s| serde_json::from_str::<Value>(s).ok()) {
Some(v) => v,
None => continue,
};
let rules_arr = match rules.get("Rules").and_then(|r| r.as_array()) {
Some(a) => a.clone(),
None => continue,
};
for rule in &rules_arr {
if rule.get("Status").and_then(|s| s.as_str()) != Some("Enabled") {
continue;
}
let prefix = rule
.get("Filter")
.and_then(|f| f.get("Prefix"))
.and_then(|p| p.as_str())
.or_else(|| rule.get("Prefix").and_then(|p| p.as_str()))
.unwrap_or("");
if let Some(exp) = rule.get("Expiration") {
if let Some(days) = exp.get("Days").and_then(|d| d.as_u64()) {
let cutoff = chrono::Utc::now() - chrono::Duration::days(days as i64);
let params = myfsio_common::types::ListParams {
max_keys: 1000,
prefix: if prefix.is_empty() { None } else { Some(prefix.to_string()) },
..Default::default()
};
if let Ok(result) = self.storage.list_objects(&bucket.name, &params).await {
for obj in &result.objects {
if obj.last_modified < cutoff {
match self.storage.delete_object(&bucket.name, &obj.key).await {
Ok(()) => total_expired += 1,
Err(e) => errors.push(format!("{}:{}: {}", bucket.name, obj.key, e)),
}
}
}
}
}
}
if let Some(abort) = rule.get("AbortIncompleteMultipartUpload") {
if let Some(days) = abort.get("DaysAfterInitiation").and_then(|d| d.as_u64()) {
let cutoff = chrono::Utc::now() - chrono::Duration::days(days as i64);
if let Ok(uploads) = self.storage.list_multipart_uploads(&bucket.name).await {
for upload in &uploads {
if upload.initiated < cutoff {
match self.storage.abort_multipart(&bucket.name, &upload.upload_id).await {
Ok(()) => total_multipart_aborted += 1,
Err(e) => errors.push(format!("abort {}: {}", upload.upload_id, e)),
}
}
}
}
}
}
}
}
json!({
"objects_expired": total_expired,
"multipart_aborted": total_multipart_aborted,
"buckets_evaluated": buckets.len(),
"errors": errors,
})
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs(self.config.interval_seconds);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
tracing::info!("Lifecycle evaluation starting");
match self.run_cycle().await {
Ok(result) => tracing::info!("Lifecycle cycle complete: {:?}", result),
Err(e) => tracing::warn!("Lifecycle cycle failed: {}", e),
}
}
})
}
}

View File

@@ -0,0 +1,219 @@
use serde_json::{json, Value};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
pub struct MetricsConfig {
pub interval_minutes: u64,
pub retention_hours: u64,
}
impl Default for MetricsConfig {
fn default() -> Self {
Self {
interval_minutes: 5,
retention_hours: 24,
}
}
}
struct MethodStats {
count: u64,
success_count: u64,
error_count: u64,
bytes_in: u64,
bytes_out: u64,
latencies: Vec<f64>,
}
impl MethodStats {
fn new() -> Self {
Self {
count: 0,
success_count: 0,
error_count: 0,
bytes_in: 0,
bytes_out: 0,
latencies: Vec::new(),
}
}
fn to_json(&self) -> Value {
let (min, max, avg, p50, p95, p99) = if self.latencies.is_empty() {
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
} else {
let mut sorted = self.latencies.clone();
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let len = sorted.len();
let sum: f64 = sorted.iter().sum();
(
sorted[0],
sorted[len - 1],
sum / len as f64,
sorted[len / 2],
sorted[((len as f64 * 0.95) as usize).min(len - 1)],
sorted[((len as f64 * 0.99) as usize).min(len - 1)],
)
};
json!({
"count": self.count,
"success_count": self.success_count,
"error_count": self.error_count,
"bytes_in": self.bytes_in,
"bytes_out": self.bytes_out,
"latency_min_ms": min,
"latency_max_ms": max,
"latency_avg_ms": avg,
"latency_p50_ms": p50,
"latency_p95_ms": p95,
"latency_p99_ms": p99,
})
}
}
struct CurrentWindow {
by_method: HashMap<String, MethodStats>,
by_status_class: HashMap<String, u64>,
start_time: Instant,
}
impl CurrentWindow {
fn new() -> Self {
Self {
by_method: HashMap::new(),
by_status_class: HashMap::new(),
start_time: Instant::now(),
}
}
fn reset(&mut self) {
self.by_method.clear();
self.by_status_class.clear();
self.start_time = Instant::now();
}
}
pub struct MetricsService {
config: MetricsConfig,
current: Arc<RwLock<CurrentWindow>>,
snapshots: Arc<RwLock<Vec<Value>>>,
snapshots_path: PathBuf,
}
impl MetricsService {
pub fn new(storage_root: &std::path::Path, config: MetricsConfig) -> Self {
let snapshots_path = storage_root
.join(".myfsio.sys")
.join("config")
.join("operation_metrics.json");
let snapshots = if snapshots_path.exists() {
std::fs::read_to_string(&snapshots_path)
.ok()
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
.and_then(|v| v.get("snapshots").and_then(|s| s.as_array().cloned()))
.unwrap_or_default()
} else {
Vec::new()
};
Self {
config,
current: Arc::new(RwLock::new(CurrentWindow::new())),
snapshots: Arc::new(RwLock::new(snapshots)),
snapshots_path,
}
}
pub async fn record(&self, method: &str, status: u16, latency_ms: f64, bytes_in: u64, bytes_out: u64) {
let mut window = self.current.write().await;
let stats = window.by_method.entry(method.to_string()).or_insert_with(MethodStats::new);
stats.count += 1;
if status < 400 {
stats.success_count += 1;
} else {
stats.error_count += 1;
}
stats.bytes_in += bytes_in;
stats.bytes_out += bytes_out;
stats.latencies.push(latency_ms);
let class = format!("{}xx", status / 100);
*window.by_status_class.entry(class).or_insert(0) += 1;
}
pub async fn snapshot(&self) -> Value {
let window = self.current.read().await;
let mut by_method = serde_json::Map::new();
for (method, stats) in &window.by_method {
by_method.insert(method.clone(), stats.to_json());
}
let snapshots = self.snapshots.read().await;
json!({
"enabled": true,
"current_window": {
"by_method": by_method,
"by_status_class": window.by_status_class,
"window_start_elapsed_secs": window.start_time.elapsed().as_secs_f64(),
},
"snapshots": *snapshots,
})
}
async fn flush_window(&self) {
let snap = {
let mut window = self.current.write().await;
let mut by_method = serde_json::Map::new();
for (method, stats) in &window.by_method {
by_method.insert(method.clone(), stats.to_json());
}
let snap = json!({
"timestamp": chrono::Utc::now().to_rfc3339(),
"window_seconds": self.config.interval_minutes * 60,
"by_method": by_method,
"by_status_class": window.by_status_class,
});
window.reset();
snap
};
let max_snapshots = (self.config.retention_hours * 60 / self.config.interval_minutes) as usize;
{
let mut snapshots = self.snapshots.write().await;
snapshots.push(snap);
if snapshots.len() > max_snapshots {
let excess = snapshots.len() - max_snapshots;
snapshots.drain(..excess);
}
}
self.save_snapshots().await;
}
async fn save_snapshots(&self) {
let snapshots = self.snapshots.read().await;
let data = json!({ "snapshots": *snapshots });
if let Some(parent) = self.snapshots_path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(
&self.snapshots_path,
serde_json::to_string_pretty(&data).unwrap_or_default(),
);
}
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
let interval = std::time::Duration::from_secs(self.config.interval_minutes * 60);
tokio::spawn(async move {
let mut timer = tokio::time::interval(interval);
timer.tick().await;
loop {
timer.tick().await;
self.flush_window().await;
}
})
}
}

View File

@@ -0,0 +1,4 @@
pub mod gc;
pub mod lifecycle;
pub mod integrity;
pub mod metrics;

View File

@@ -0,0 +1,107 @@
use std::sync::Arc;
use crate::config::ServerConfig;
use crate::services::gc::GcService;
use crate::services::integrity::IntegrityService;
use crate::services::metrics::MetricsService;
use myfsio_auth::iam::IamService;
use myfsio_crypto::encryption::EncryptionService;
use myfsio_crypto::kms::KmsService;
use myfsio_storage::fs_backend::FsStorageBackend;
#[derive(Clone)]
pub struct AppState {
pub config: ServerConfig,
pub storage: Arc<FsStorageBackend>,
pub iam: Arc<IamService>,
pub encryption: Option<Arc<EncryptionService>>,
pub kms: Option<Arc<KmsService>>,
pub gc: Option<Arc<GcService>>,
pub integrity: Option<Arc<IntegrityService>>,
pub metrics: Option<Arc<MetricsService>>,
}
impl AppState {
pub fn new(config: ServerConfig) -> Self {
let storage = Arc::new(FsStorageBackend::new(config.storage_root.clone()));
let iam = Arc::new(IamService::new_with_secret(
config.iam_config_path.clone(),
config.secret_key.clone(),
));
let gc = if config.gc_enabled {
Some(Arc::new(GcService::new(
config.storage_root.clone(),
crate::services::gc::GcConfig::default(),
)))
} else {
None
};
let integrity = if config.integrity_enabled {
Some(Arc::new(IntegrityService::new(
storage.clone(),
&config.storage_root,
crate::services::integrity::IntegrityConfig::default(),
)))
} else {
None
};
let metrics = if config.metrics_enabled {
Some(Arc::new(MetricsService::new(
&config.storage_root,
crate::services::metrics::MetricsConfig::default(),
)))
} else {
None
};
Self {
config,
storage,
iam,
encryption: None,
kms: None,
gc,
integrity,
metrics,
}
}
pub async fn new_with_encryption(config: ServerConfig) -> Self {
let mut state = Self::new(config.clone());
let keys_dir = config.storage_root.join(".myfsio.sys").join("keys");
let kms = if config.kms_enabled {
match KmsService::new(&keys_dir).await {
Ok(k) => Some(Arc::new(k)),
Err(e) => {
tracing::error!("Failed to initialize KMS: {}", e);
None
}
}
} else {
None
};
let encryption = if config.encryption_enabled {
match myfsio_crypto::kms::load_or_create_master_key(&keys_dir).await {
Ok(master_key) => {
Some(Arc::new(EncryptionService::new(master_key, kms.clone())))
}
Err(e) => {
tracing::error!("Failed to initialize encryption: {}", e);
None
}
}
} else {
None
};
state.encryption = encryption;
state.kms = kms;
state
}
}

File diff suppressed because it is too large Load Diff