Fix S3 conformance: XML config round-trip, Suspended versioning, ListVersions pagination, per-bucket CORS, canned ACL/SSE rejection, checksum attrs, request logging

This commit is contained in:
2026-04-24 13:09:30 +08:00
parent f2df64479c
commit 1ea6dfae07
10 changed files with 898 additions and 161 deletions

View File

@@ -135,11 +135,31 @@ pub struct Tag {
pub value: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)]
pub enum VersioningStatus {
#[default]
Disabled,
Enabled,
Suspended,
}
impl VersioningStatus {
pub fn is_enabled(self) -> bool {
matches!(self, VersioningStatus::Enabled)
}
pub fn is_active(self) -> bool {
matches!(self, VersioningStatus::Enabled | VersioningStatus::Suspended)
}
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct BucketConfig {
#[serde(default)]
pub versioning_enabled: bool,
#[serde(default)]
pub versioning_suspended: bool,
#[serde(default)]
pub tags: Vec<Tag>,
#[serde(default)]
pub cors: Option<serde_json::Value>,
@@ -165,6 +185,35 @@ pub struct BucketConfig {
pub replication: Option<serde_json::Value>,
}
impl BucketConfig {
pub fn versioning_status(&self) -> VersioningStatus {
if self.versioning_enabled {
VersioningStatus::Enabled
} else if self.versioning_suspended {
VersioningStatus::Suspended
} else {
VersioningStatus::Disabled
}
}
pub fn set_versioning_status(&mut self, status: VersioningStatus) {
match status {
VersioningStatus::Enabled => {
self.versioning_enabled = true;
self.versioning_suspended = false;
}
VersioningStatus::Suspended => {
self.versioning_enabled = false;
self.versioning_suspended = true;
}
VersioningStatus::Disabled => {
self.versioning_enabled = false;
self.versioning_suspended = false;
}
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QuotaConfig {
pub max_bytes: Option<u64>,

View File

@@ -232,7 +232,7 @@ impl ServerConfig {
let stream_chunk_size = parse_usize_env("STREAM_CHUNK_SIZE", 1_048_576);
let request_body_timeout_secs = parse_u64_env("REQUEST_BODY_TIMEOUT_SECONDS", 60);
let ratelimit_default =
parse_rate_limit_env("RATE_LIMIT_DEFAULT", RateLimitSetting::new(500, 60));
parse_rate_limit_env("RATE_LIMIT_DEFAULT", RateLimitSetting::new(5000, 60));
let ratelimit_list_buckets =
parse_rate_limit_env("RATE_LIMIT_LIST_BUCKETS", ratelimit_default);
let ratelimit_bucket_ops =
@@ -407,11 +407,11 @@ impl Default for ServerConfig {
bulk_delete_max_keys: 1000,
stream_chunk_size: 1_048_576,
request_body_timeout_secs: 60,
ratelimit_default: RateLimitSetting::new(500, 60),
ratelimit_list_buckets: RateLimitSetting::new(500, 60),
ratelimit_bucket_ops: RateLimitSetting::new(500, 60),
ratelimit_object_ops: RateLimitSetting::new(500, 60),
ratelimit_head_ops: RateLimitSetting::new(500, 60),
ratelimit_default: RateLimitSetting::new(5000, 60),
ratelimit_list_buckets: RateLimitSetting::new(5000, 60),
ratelimit_bucket_ops: RateLimitSetting::new(5000, 60),
ratelimit_object_ops: RateLimitSetting::new(5000, 60),
ratelimit_head_ops: RateLimitSetting::new(5000, 60),
ratelimit_admin: RateLimitSetting::new(60, 60),
ratelimit_storage_uri: "memory://".to_string(),
ui_enabled: true,
@@ -589,7 +589,7 @@ mod tests {
assert_eq!(config.object_key_max_length_bytes, 1024);
assert_eq!(config.object_tag_limit, 50);
assert_eq!(config.ratelimit_default, RateLimitSetting::new(500, 60));
assert_eq!(config.ratelimit_default, RateLimitSetting::new(5000, 60));
std::env::remove_var("OBJECT_TAG_LIMIT");
std::env::remove_var("RATE_LIMIT_DEFAULT");

View File

@@ -20,6 +20,13 @@ fn xml_response(status: StatusCode, xml: String) -> Response {
(status, [("content-type", "application/xml")], xml).into_response()
}
fn stored_xml(value: &serde_json::Value) -> String {
match value {
serde_json::Value::String(s) => s.clone(),
other => other.to_string(),
}
}
fn storage_err(err: myfsio_storage::error::StorageError) -> Response {
let s3err = S3Error::from(err);
let status =
@@ -52,17 +59,31 @@ fn custom_xml_error(status: StatusCode, code: &str, message: &str) -> Response {
}
pub async fn get_versioning(state: &AppState, bucket: &str) -> Response {
match state.storage.is_versioning_enabled(bucket).await {
Ok(enabled) => {
let status_str = if enabled { "Enabled" } else { "Suspended" };
let xml = format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Status>{}</Status>\
</VersioningConfiguration>",
status_str
);
xml_response(StatusCode::OK, xml)
match state.storage.get_versioning_status(bucket).await {
Ok(status) => {
let body = match status {
myfsio_common::types::VersioningStatus::Enabled => {
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Status>Enabled</Status>\
</VersioningConfiguration>"
.to_string()
}
myfsio_common::types::VersioningStatus::Suspended => {
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<Status>Suspended</Status>\
</VersioningConfiguration>"
.to_string()
}
myfsio_common::types::VersioningStatus::Disabled => {
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
</VersioningConfiguration>"
.to_string()
}
};
xml_response(StatusCode::OK, body)
}
Err(e) => storage_err(e),
}
@@ -80,9 +101,22 @@ pub async fn put_versioning(state: &AppState, bucket: &str, body: Body) -> Respo
};
let xml_str = String::from_utf8_lossy(&body_bytes);
let enabled = xml_str.contains("<Status>Enabled</Status>");
let status = if xml_str.contains("<Status>Enabled</Status>") {
myfsio_common::types::VersioningStatus::Enabled
} else if xml_str.contains("<Status>Suspended</Status>") {
myfsio_common::types::VersioningStatus::Suspended
} else {
return xml_response(
StatusCode::BAD_REQUEST,
S3Error::new(
S3ErrorCode::MalformedXML,
"VersioningConfiguration Status must be Enabled or Suspended",
)
.to_xml(),
);
};
match state.storage.set_versioning(bucket, enabled).await {
match state.storage.set_versioning_status(bucket, status).await {
Ok(()) => StatusCode::OK.into_response(),
Err(e) => storage_err(e),
}
@@ -151,7 +185,7 @@ pub async fn get_cors(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(cors) = &config.cors {
xml_response(StatusCode::OK, cors.to_string())
xml_response(StatusCode::OK, stored_xml(cors))
} else {
xml_response(
StatusCode::NOT_FOUND,
@@ -214,7 +248,7 @@ pub async fn get_encryption(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(enc) = &config.encryption {
xml_response(StatusCode::OK, enc.to_string())
xml_response(StatusCode::OK, stored_xml(enc))
} else {
xml_response(
StatusCode::NOT_FOUND,
@@ -263,7 +297,7 @@ pub async fn get_lifecycle(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(lc) = &config.lifecycle {
xml_response(StatusCode::OK, lc.to_string())
xml_response(StatusCode::OK, stored_xml(lc))
} else {
xml_response(
StatusCode::NOT_FOUND,
@@ -490,10 +524,7 @@ pub async fn get_replication(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(replication) = &config.replication {
match replication {
serde_json::Value::String(s) => xml_response(StatusCode::OK, s.clone()),
other => xml_response(StatusCode::OK, other.to_string()),
}
xml_response(StatusCode::OK, stored_xml(replication))
} else {
xml_response(
StatusCode::NOT_FOUND,
@@ -586,7 +617,7 @@ pub async fn get_acl(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(acl) = &config.acl {
xml_response(StatusCode::OK, acl.to_string())
xml_response(StatusCode::OK, stored_xml(acl))
} else {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
@@ -626,7 +657,7 @@ pub async fn get_website(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(ws) = &config.website {
xml_response(StatusCode::OK, ws.to_string())
xml_response(StatusCode::OK, stored_xml(ws))
} else {
xml_response(
StatusCode::NOT_FOUND,
@@ -678,7 +709,7 @@ pub async fn get_object_lock(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(ol) = &config.object_lock {
xml_response(StatusCode::OK, ol.to_string())
xml_response(StatusCode::OK, stored_xml(ol))
} else {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<ObjectLockConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
@@ -695,7 +726,7 @@ pub async fn get_notification(state: &AppState, bucket: &str) -> Response {
match state.storage.get_bucket_config(bucket).await {
Ok(config) => {
if let Some(n) = &config.notification {
xml_response(StatusCode::OK, n.to_string())
xml_response(StatusCode::OK, stored_xml(n))
} else {
let xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
@@ -1035,22 +1066,23 @@ pub async fn list_object_versions(
state: &AppState,
bucket: &str,
prefix: Option<&str>,
delimiter: Option<&str>,
key_marker: Option<&str>,
version_id_marker: Option<&str>,
max_keys: usize,
) -> Response {
match state.storage.list_buckets().await {
Ok(buckets) => {
if !buckets.iter().any(|b| b.name == bucket) {
return storage_err(myfsio_storage::error::StorageError::BucketNotFound(
bucket.to_string(),
));
}
match state.storage.bucket_exists(bucket).await {
Ok(true) => {}
Ok(false) => {
return storage_err(myfsio_storage::error::StorageError::BucketNotFound(
bucket.to_string(),
));
}
Err(e) => return storage_err(e),
}
let fetch_limit = max_keys.saturating_add(1).max(1);
let params = myfsio_common::types::ListParams {
max_keys: fetch_limit,
max_keys: usize::MAX,
prefix: prefix.map(ToOwned::to_owned),
..Default::default()
};
@@ -1059,7 +1091,8 @@ pub async fn list_object_versions(
Ok(result) => result,
Err(e) => return storage_err(e),
};
let objects = object_result.objects;
let live_objects = object_result.objects;
let archived_versions = match state
.storage
.list_bucket_object_versions(bucket, prefix)
@@ -1069,107 +1102,215 @@ pub async fn list_object_versions(
Err(e) => return storage_err(e),
};
#[derive(Clone)]
struct Entry {
key: String,
version_id: String,
last_modified: chrono::DateTime<chrono::Utc>,
etag: Option<String>,
size: u64,
storage_class: String,
is_delete_marker: bool,
}
let mut entries: Vec<Entry> = Vec::with_capacity(live_objects.len() + archived_versions.len());
for obj in &live_objects {
entries.push(Entry {
key: obj.key.clone(),
version_id: obj.version_id.clone().unwrap_or_else(|| "null".to_string()),
last_modified: obj.last_modified,
etag: obj.etag.clone(),
size: obj.size,
storage_class: obj
.storage_class
.clone()
.unwrap_or_else(|| "STANDARD".to_string()),
is_delete_marker: false,
});
}
for version in &archived_versions {
entries.push(Entry {
key: version.key.clone(),
version_id: version.version_id.clone(),
last_modified: version.last_modified,
etag: version.etag.clone(),
size: version.size,
storage_class: "STANDARD".to_string(),
is_delete_marker: version.is_delete_marker,
});
}
entries.sort_by(|a, b| {
a.key
.cmp(&b.key)
.then_with(|| b.last_modified.cmp(&a.last_modified))
.then_with(|| a.version_id.cmp(&b.version_id))
});
let mut latest_marked: std::collections::HashSet<String> = std::collections::HashSet::new();
let mut is_latest_flags: Vec<bool> = Vec::with_capacity(entries.len());
for entry in &entries {
if latest_marked.insert(entry.key.clone()) {
is_latest_flags.push(true);
} else {
is_latest_flags.push(false);
}
}
let km = key_marker.unwrap_or("");
let vim = version_id_marker.unwrap_or("");
let start_index = if km.is_empty() {
0
} else if vim.is_empty() {
entries
.iter()
.position(|e| e.key.as_str() > km)
.unwrap_or(entries.len())
} else if let Some(pos) = entries
.iter()
.position(|e| e.key == km && e.version_id == vim)
{
pos + 1
} else {
entries
.iter()
.position(|e| e.key.as_str() > km)
.unwrap_or(entries.len())
};
let delim = delimiter.unwrap_or("");
let prefix_str = prefix.unwrap_or("");
let mut common_prefixes: Vec<String> = Vec::new();
let mut seen_prefixes: std::collections::HashSet<String> = std::collections::HashSet::new();
let mut rendered = String::new();
let mut count = 0usize;
let mut is_truncated = false;
let mut next_key_marker: Option<String> = None;
let mut next_version_id_marker: Option<String> = None;
let mut last_emitted: Option<(String, String)> = None;
let mut idx = start_index;
while idx < entries.len() {
let entry = &entries[idx];
let is_latest = is_latest_flags[idx];
if !delim.is_empty() {
let rest = entry.key.strip_prefix(prefix_str).unwrap_or(&entry.key);
if let Some(delim_pos) = rest.find(delim) {
let grouped = entry.key[..prefix_str.len() + delim_pos + delim.len()].to_string();
if seen_prefixes.contains(&grouped) {
idx += 1;
continue;
}
if count >= max_keys {
is_truncated = true;
if let Some((k, v)) = last_emitted.clone() {
next_key_marker = Some(k);
next_version_id_marker = Some(v);
}
break;
}
common_prefixes.push(grouped.clone());
seen_prefixes.insert(grouped.clone());
count += 1;
let mut group_last = (entry.key.clone(), entry.version_id.clone());
idx += 1;
while idx < entries.len() && entries[idx].key.starts_with(&grouped) {
group_last = (entries[idx].key.clone(), entries[idx].version_id.clone());
idx += 1;
}
last_emitted = Some(group_last);
continue;
}
}
if count >= max_keys {
is_truncated = true;
if let Some((k, v)) = last_emitted.clone() {
next_key_marker = Some(k);
next_version_id_marker = Some(v);
}
break;
}
let tag = if entry.is_delete_marker {
"DeleteMarker"
} else {
"Version"
};
rendered.push_str(&format!("<{}>", tag));
rendered.push_str(&format!("<Key>{}</Key>", xml_escape(&entry.key)));
rendered.push_str(&format!(
"<VersionId>{}</VersionId>",
xml_escape(&entry.version_id)
));
rendered.push_str(&format!("<IsLatest>{}</IsLatest>", is_latest));
rendered.push_str(&format!(
"<LastModified>{}</LastModified>",
myfsio_xml::response::format_s3_datetime(&entry.last_modified)
));
if !entry.is_delete_marker {
if let Some(ref etag) = entry.etag {
rendered.push_str(&format!("<ETag>\"{}\"</ETag>", xml_escape(etag)));
}
rendered.push_str(&format!("<Size>{}</Size>", entry.size));
rendered.push_str(&format!(
"<StorageClass>{}</StorageClass>",
xml_escape(&entry.storage_class)
));
}
rendered.push_str(&format!("</{}>", tag));
last_emitted = Some((entry.key.clone(), entry.version_id.clone()));
count += 1;
idx += 1;
}
let mut xml = String::from(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<ListVersionsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">",
);
xml.push_str(&format!("<Name>{}</Name>", xml_escape(bucket)));
xml.push_str(&format!(
"<Prefix>{}</Prefix>",
xml_escape(prefix.unwrap_or(""))
));
xml.push_str(&format!("<Prefix>{}</Prefix>", xml_escape(prefix_str)));
if !km.is_empty() {
xml.push_str(&format!("<KeyMarker>{}</KeyMarker>", xml_escape(km)));
} else {
xml.push_str("<KeyMarker></KeyMarker>");
}
if !vim.is_empty() {
xml.push_str(&format!(
"<VersionIdMarker>{}</VersionIdMarker>",
xml_escape(vim)
));
} else {
xml.push_str("<VersionIdMarker></VersionIdMarker>");
}
xml.push_str(&format!("<MaxKeys>{}</MaxKeys>", max_keys));
let current_count = objects.len().min(max_keys);
let remaining = max_keys.saturating_sub(current_count);
let archived_count = archived_versions.len().min(remaining);
let is_truncated = object_result.is_truncated
|| objects.len() > current_count
|| archived_versions.len() > archived_count;
if !delim.is_empty() {
xml.push_str(&format!("<Delimiter>{}</Delimiter>", xml_escape(delim)));
}
xml.push_str(&format!("<IsTruncated>{}</IsTruncated>", is_truncated));
let current_keys: std::collections::HashSet<String> = objects
.iter()
.take(current_count)
.map(|o| o.key.clone())
.collect();
let mut latest_archived_per_key: std::collections::HashMap<String, String> =
std::collections::HashMap::new();
for v in archived_versions.iter().take(archived_count) {
if current_keys.contains(&v.key) {
continue;
}
let existing = latest_archived_per_key.get(&v.key).cloned();
match existing {
None => {
latest_archived_per_key.insert(v.key.clone(), v.version_id.clone());
}
Some(existing_id) => {
let existing_ts = archived_versions
.iter()
.find(|x| x.key == v.key && x.version_id == existing_id)
.map(|x| x.last_modified)
.unwrap_or(v.last_modified);
if v.last_modified > existing_ts {
latest_archived_per_key.insert(v.key.clone(), v.version_id.clone());
}
}
}
if let Some(ref nk) = next_key_marker {
xml.push_str(&format!(
"<NextKeyMarker>{}</NextKeyMarker>",
xml_escape(nk)
));
}
if let Some(ref nv) = next_version_id_marker {
xml.push_str(&format!(
"<NextVersionIdMarker>{}</NextVersionIdMarker>",
xml_escape(nv)
));
}
for obj in objects.iter().take(current_count) {
let version_id = obj.version_id.clone().unwrap_or_else(|| "null".to_string());
xml.push_str("<Version>");
xml.push_str(&format!("<Key>{}</Key>", xml_escape(&obj.key)));
xml.push_str(&rendered);
for cp in &common_prefixes {
xml.push_str(&format!(
"<VersionId>{}</VersionId>",
xml_escape(&version_id)
"<CommonPrefixes><Prefix>{}</Prefix></CommonPrefixes>",
xml_escape(cp)
));
xml.push_str("<IsLatest>true</IsLatest>");
xml.push_str(&format!(
"<LastModified>{}</LastModified>",
myfsio_xml::response::format_s3_datetime(&obj.last_modified)
));
if let Some(ref etag) = obj.etag {
xml.push_str(&format!("<ETag>\"{}\"</ETag>", xml_escape(etag)));
}
xml.push_str(&format!("<Size>{}</Size>", obj.size));
xml.push_str(&format!(
"<StorageClass>{}</StorageClass>",
xml_escape(obj.storage_class.as_deref().unwrap_or("STANDARD"))
));
xml.push_str("</Version>");
}
for version in archived_versions.iter().take(archived_count) {
let is_latest = latest_archived_per_key
.get(&version.key)
.map(|id| id == &version.version_id)
.unwrap_or(false);
let tag = if version.is_delete_marker {
"DeleteMarker"
} else {
"Version"
};
xml.push_str(&format!("<{}>", tag));
xml.push_str(&format!("<Key>{}</Key>", xml_escape(&version.key)));
xml.push_str(&format!(
"<VersionId>{}</VersionId>",
xml_escape(&version.version_id)
));
xml.push_str(&format!("<IsLatest>{}</IsLatest>", is_latest));
xml.push_str(&format!(
"<LastModified>{}</LastModified>",
myfsio_xml::response::format_s3_datetime(&version.last_modified)
));
if !version.is_delete_marker {
if let Some(ref etag) = version.etag {
xml.push_str(&format!("<ETag>\"{}\"</ETag>", xml_escape(etag)));
}
xml.push_str(&format!("<Size>{}</Size>", version.size));
xml.push_str("<StorageClass>STANDARD</StorageClass>");
}
xml.push_str(&format!("</{}>", tag));
}
xml.push_str("</ListVersionsResult>");

View File

@@ -319,6 +319,10 @@ pub struct BucketQuery {
pub notification: Option<String>,
pub logging: Option<String>,
pub versions: Option<String>,
#[serde(rename = "key-marker")]
pub key_marker: Option<String>,
#[serde(rename = "version-id-marker")]
pub version_id_marker: Option<String>,
}
async fn virtual_host_bucket_from_headers(state: &AppState, headers: &HeaderMap) -> Option<String> {
@@ -410,6 +414,9 @@ pub async fn get_bucket(
&state,
&bucket,
query.prefix.as_deref(),
query.delimiter.as_deref(),
query.key_marker.as_deref(),
query.version_id_marker.as_deref(),
query.max_keys.unwrap_or(1000),
)
.await;
@@ -966,6 +973,71 @@ fn insert_standard_object_metadata(
Ok(())
}
const CANNED_ACL_VALUES: &[&str] = &[
"private",
"public-read",
"public-read-write",
"authenticated-read",
"bucket-owner-read",
"bucket-owner-full-control",
"aws-exec-read",
];
fn apply_canned_acl_header(
headers: &HeaderMap,
metadata: &mut HashMap<String, String>,
) -> Result<(), Response> {
let Some(raw) = headers.get("x-amz-acl").and_then(|v| v.to_str().ok()) else {
return Ok(());
};
let value = raw.trim();
if value.is_empty() {
return Ok(());
}
if !CANNED_ACL_VALUES
.iter()
.any(|known| known.eq_ignore_ascii_case(value))
{
return Err(s3_error_response(S3Error::new(
S3ErrorCode::InvalidArgument,
format!("Unsupported canned ACL: {}", value),
)));
}
let acl = crate::services::acl::create_canned_acl(value, "myfsio");
crate::services::acl::store_object_acl(metadata, &acl);
Ok(())
}
fn validate_sse_request(state: &AppState, headers: &HeaderMap) -> Result<(), Response> {
let alg = headers
.get("x-amz-server-side-encryption")
.and_then(|v| v.to_str().ok())
.map(str::trim)
.filter(|s| !s.is_empty());
let Some(alg) = alg else {
return Ok(());
};
if alg != "AES256" && alg != "aws:kms" {
return Err(s3_error_response(S3Error::new(
S3ErrorCode::InvalidArgument,
format!("Unsupported server-side encryption algorithm: {}", alg),
)));
}
if alg == "aws:kms" && !state.config.kms_enabled {
return Err(s3_error_response(S3Error::new(
S3ErrorCode::InvalidArgument,
"KMS is not enabled on this server",
)));
}
if state.encryption.is_none() {
return Err(s3_error_response(S3Error::new(
S3ErrorCode::InvalidArgument,
"Server-side encryption is not enabled on this server",
)));
}
Ok(())
}
fn apply_stored_response_headers(headers: &mut HeaderMap, metadata: &HashMap<String, String>) {
for (_, metadata_key, response_header) in internal_header_pairs() {
if let Some(value) = metadata
@@ -1289,6 +1361,12 @@ pub async fn put_object(
if let Err(response) = insert_standard_object_metadata(&headers, &mut metadata) {
return response;
}
if let Err(response) = apply_canned_acl_header(&headers, &mut metadata) {
return response;
}
if let Err(response) = validate_sse_request(&state, &headers) {
return response;
}
for (name, value) in headers.iter() {
let name_str = name.as_str();
@@ -2143,6 +2221,12 @@ async fn object_attributes_handler(
.collect();
let all = attrs.is_empty();
let stored_meta = state
.storage
.get_object_metadata(bucket, key)
.await
.unwrap_or_default();
let mut xml = String::from("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
xml.push_str("<GetObjectAttributesResponse xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">");
@@ -2159,8 +2243,32 @@ async fn object_attributes_handler(
if all || attrs.contains("objectsize") {
xml.push_str(&format!("<ObjectSize>{}</ObjectSize>", meta.size));
}
if attrs.contains("checksum") {
xml.push_str("<Checksum></Checksum>");
if all || attrs.contains("checksum") {
let mut checksum_xml = String::new();
for (algo, tag) in [
("sha256", "ChecksumSHA256"),
("sha1", "ChecksumSHA1"),
("crc32", "ChecksumCRC32"),
("crc32c", "ChecksumCRC32C"),
("crc64nvme", "ChecksumCRC64NVME"),
] {
let key_name = format!("__checksum_{}__", algo);
if let Some(value) = stored_meta.get(&key_name) {
let trimmed = value.trim();
if !trimmed.is_empty() {
checksum_xml.push_str(&format!(
"<{tag}>{}</{tag}>",
xml_escape(trimmed),
tag = tag
));
}
}
}
if !checksum_xml.is_empty() {
xml.push_str("<Checksum>");
xml.push_str(&checksum_xml);
xml.push_str("</Checksum>");
}
}
if attrs.contains("objectparts") {
xml.push_str("<ObjectParts></ObjectParts>");

View File

@@ -588,6 +588,11 @@ pub fn create_router(state: state::AppState) -> Router {
.merge(admin_router)
.layer(axum::middleware::from_fn(middleware::server_header))
.layer(cors_layer(&state.config))
.layer(axum::middleware::from_fn_with_state(
state.clone(),
middleware::bucket_cors_layer,
))
.layer(axum::middleware::from_fn(middleware::request_log_layer))
.layer(tower_http::compression::CompressionLayer::new())
.layer(tower_http::timeout::RequestBodyTimeoutLayer::new(
request_body_timeout,

View File

@@ -0,0 +1,281 @@
use axum::extract::{Request, State};
use axum::http::{HeaderMap, HeaderValue, Method, StatusCode};
use axum::middleware::Next;
use axum::response::{IntoResponse, Response};
use myfsio_storage::traits::StorageEngine;
use crate::state::AppState;
#[derive(Debug, Default, Clone)]
struct CorsRule {
allowed_origins: Vec<String>,
allowed_methods: Vec<String>,
allowed_headers: Vec<String>,
expose_headers: Vec<String>,
max_age_seconds: Option<u64>,
}
fn parse_cors_config(xml: &str) -> Vec<CorsRule> {
let doc = match roxmltree::Document::parse(xml) {
Ok(d) => d,
Err(_) => return Vec::new(),
};
let mut rules = Vec::new();
for rule_node in doc
.descendants()
.filter(|node| node.is_element() && node.tag_name().name() == "CORSRule")
{
let mut rule = CorsRule::default();
for child in rule_node.children().filter(|n| n.is_element()) {
let text = child.text().unwrap_or("").trim().to_string();
match child.tag_name().name() {
"AllowedOrigin" => rule.allowed_origins.push(text),
"AllowedMethod" => rule.allowed_methods.push(text.to_ascii_uppercase()),
"AllowedHeader" => rule.allowed_headers.push(text),
"ExposeHeader" => rule.expose_headers.push(text),
"MaxAgeSeconds" => {
if let Ok(v) = text.parse::<u64>() {
rule.max_age_seconds = Some(v);
}
}
_ => {}
}
}
rules.push(rule);
}
rules
}
fn match_origin(pattern: &str, origin: &str) -> bool {
if pattern == "*" {
return true;
}
if pattern == origin {
return true;
}
if let Some(suffix) = pattern.strip_prefix('*') {
return origin.ends_with(suffix);
}
if let Some(prefix) = pattern.strip_suffix('*') {
return origin.starts_with(prefix);
}
false
}
fn match_header(pattern: &str, header: &str) -> bool {
if pattern == "*" {
return true;
}
pattern.eq_ignore_ascii_case(header)
}
fn find_matching_rule<'a>(
rules: &'a [CorsRule],
origin: &str,
method: &str,
request_headers: &[&str],
) -> Option<&'a CorsRule> {
rules.iter().find(|rule| {
let origin_match = rule
.allowed_origins
.iter()
.any(|p| match_origin(p, origin));
if !origin_match {
return false;
}
let method_match = rule
.allowed_methods
.iter()
.any(|m| m.eq_ignore_ascii_case(method));
if !method_match {
return false;
}
request_headers.iter().all(|h| {
rule.allowed_headers
.iter()
.any(|pattern| match_header(pattern, h))
})
})
}
fn find_matching_rule_for_actual<'a>(
rules: &'a [CorsRule],
origin: &str,
method: &str,
) -> Option<&'a CorsRule> {
rules.iter().find(|rule| {
rule.allowed_origins
.iter()
.any(|p| match_origin(p, origin))
&& rule
.allowed_methods
.iter()
.any(|m| m.eq_ignore_ascii_case(method))
})
}
fn bucket_from_path(path: &str) -> Option<&str> {
let trimmed = path.trim_start_matches('/');
if trimmed.is_empty() {
return None;
}
if trimmed.starts_with("admin/")
|| trimmed.starts_with("myfsio/")
|| trimmed.starts_with("kms/")
{
return None;
}
let first = trimmed.split('/').next().unwrap_or("");
if myfsio_storage::validation::validate_bucket_name(first).is_some() {
return None;
}
Some(first)
}
async fn bucket_from_host(state: &AppState, headers: &HeaderMap) -> Option<String> {
let host = headers
.get("host")
.and_then(|value| value.to_str().ok())
.and_then(|value| value.split(':').next())?
.trim()
.to_ascii_lowercase();
let (candidate, _) = host.split_once('.')?;
if myfsio_storage::validation::validate_bucket_name(candidate).is_some() {
return None;
}
match state.storage.bucket_exists(candidate).await {
Ok(true) => Some(candidate.to_string()),
_ => None,
}
}
async fn resolve_bucket(state: &AppState, headers: &HeaderMap, path: &str) -> Option<String> {
if let Some(name) = bucket_from_host(state, headers).await {
return Some(name);
}
bucket_from_path(path).map(str::to_string)
}
fn apply_rule_headers(headers: &mut axum::http::HeaderMap, rule: &CorsRule, origin: &str) {
headers.remove("access-control-allow-origin");
headers.remove("vary");
if let Ok(val) = HeaderValue::from_str(origin) {
headers.insert("access-control-allow-origin", val);
}
headers.insert("vary", HeaderValue::from_static("Origin"));
if !rule.expose_headers.is_empty() {
let value = rule.expose_headers.join(", ");
if let Ok(val) = HeaderValue::from_str(&value) {
headers.remove("access-control-expose-headers");
headers.insert("access-control-expose-headers", val);
}
}
}
fn strip_cors_response_headers(headers: &mut HeaderMap) {
headers.remove("access-control-allow-origin");
headers.remove("access-control-allow-credentials");
headers.remove("access-control-expose-headers");
headers.remove("access-control-allow-methods");
headers.remove("access-control-allow-headers");
headers.remove("access-control-max-age");
}
pub async fn bucket_cors_layer(
State(state): State<AppState>,
req: Request,
next: Next,
) -> Response {
let path = req.uri().path().to_string();
let bucket = match resolve_bucket(&state, req.headers(), &path).await {
Some(name) => name,
None => return next.run(req).await,
};
let origin = req
.headers()
.get("origin")
.and_then(|v| v.to_str().ok())
.map(|s| s.to_string());
let bucket_rules = if origin.is_some() {
match state.storage.get_bucket_config(&bucket).await {
Ok(cfg) => cfg
.cors
.as_ref()
.map(|v| match v {
serde_json::Value::String(s) => s.clone(),
other => other.to_string(),
})
.map(|xml| parse_cors_config(&xml))
.filter(|rules| !rules.is_empty()),
Err(_) => None,
}
} else {
None
};
let is_preflight = req.method() == Method::OPTIONS
&& req.headers().contains_key("access-control-request-method");
if is_preflight {
if let (Some(origin), Some(rules)) = (origin.as_deref(), bucket_rules.as_ref()) {
let req_method = req
.headers()
.get("access-control-request-method")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
let req_headers_raw = req
.headers()
.get("access-control-request-headers")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
let req_headers: Vec<&str> = req_headers_raw
.split(',')
.map(str::trim)
.filter(|s| !s.is_empty())
.collect();
if let Some(rule) = find_matching_rule(rules, origin, req_method, &req_headers) {
let mut resp = StatusCode::NO_CONTENT.into_response();
apply_rule_headers(resp.headers_mut(), rule, origin);
let methods_value = rule.allowed_methods.join(", ");
if let Ok(val) = HeaderValue::from_str(&methods_value) {
resp.headers_mut()
.insert("access-control-allow-methods", val);
}
let headers_value = if rule.allowed_headers.iter().any(|h| h == "*") {
req_headers_raw.to_string()
} else {
rule.allowed_headers.join(", ")
};
if !headers_value.is_empty() {
if let Ok(val) = HeaderValue::from_str(&headers_value) {
resp.headers_mut()
.insert("access-control-allow-headers", val);
}
}
if let Some(max_age) = rule.max_age_seconds {
if let Ok(val) = HeaderValue::from_str(&max_age.to_string()) {
resp.headers_mut().insert("access-control-max-age", val);
}
}
return resp;
}
return (StatusCode::FORBIDDEN, "CORSResponse: CORS is not enabled").into_response();
}
}
let method = req.method().clone();
let mut resp = next.run(req).await;
if let (Some(origin), Some(rules)) = (origin.as_deref(), bucket_rules.as_ref()) {
if let Some(rule) = find_matching_rule_for_actual(rules, origin, method.as_str()) {
apply_rule_headers(resp.headers_mut(), rule, origin);
} else {
strip_cors_response_headers(resp.headers_mut());
}
}
resp
}

View File

@@ -1,9 +1,11 @@
mod auth;
mod bucket_cors;
pub mod ratelimit;
pub mod session;
pub(crate) mod sha_body;
pub use auth::auth_layer;
pub use bucket_cors::bucket_cors_layer;
pub use ratelimit::{rate_limit_layer, RateLimitLayerState};
pub use session::{csrf_layer, session_layer, SessionHandle, SessionLayerState};
@@ -21,6 +23,42 @@ pub async fn server_header(req: Request, next: Next) -> Response {
resp
}
pub async fn request_log_layer(req: Request, next: Next) -> Response {
let start = Instant::now();
let method = req.method().clone();
let uri = req.uri().clone();
let version = req.version();
let remote = req
.extensions()
.get::<axum::extract::ConnectInfo<std::net::SocketAddr>>()
.map(|ci| ci.0.ip().to_string())
.unwrap_or_else(|| "-".to_string());
let response = next.run(req).await;
let status = response.status().as_u16();
let elapsed_ms = start.elapsed().as_secs_f64() * 1000.0;
let bytes_out = response
.headers()
.get(axum::http::header::CONTENT_LENGTH)
.and_then(|v| v.to_str().ok())
.and_then(|v| v.parse::<u64>().ok());
tracing::info!(
target: "myfsio::access",
remote = %remote,
method = %method,
uri = %uri,
version = ?version,
status,
bytes_out = bytes_out.unwrap_or(0),
elapsed_ms = format!("{:.3}", elapsed_ms),
"request"
);
response
}
pub async fn ui_metrics_layer(State(state): State<AppState>, req: Request, next: Next) -> Response {
let metrics = match state.metrics.clone() {
Some(m) => m,

View File

@@ -2209,6 +2209,42 @@ async fn test_bucket_versioning() {
)
.unwrap();
assert!(body.contains("VersioningConfiguration"));
assert!(!body.contains("<Status>Enabled</Status>"));
assert!(!body.contains("<Status>Suspended</Status>"));
app.clone()
.oneshot(
Request::builder()
.method(Method::PUT)
.uri("/ver-bucket?versioning")
.header("x-access-key", TEST_ACCESS_KEY)
.header("x-secret-key", TEST_SECRET_KEY)
.body(Body::from(
"<VersioningConfiguration><Status>Suspended</Status></VersioningConfiguration>",
))
.unwrap(),
)
.await
.unwrap();
let resp = app
.clone()
.oneshot(signed_request(
Method::GET,
"/ver-bucket?versioning",
Body::empty(),
))
.await
.unwrap();
let body = String::from_utf8(
resp.into_body()
.collect()
.await
.unwrap()
.to_bytes()
.to_vec(),
)
.unwrap();
assert!(body.contains("<Status>Suspended</Status>"));
app.clone()

View File

@@ -928,11 +928,33 @@ impl FsStorageBackend {
let etag = Self::compute_etag_sync(&source).unwrap_or_default();
let live_last_modified = metadata
.get("__last_modified__")
.and_then(|value| value.parse::<f64>().ok())
.map(|mtime| {
Utc.timestamp_opt(mtime as i64, ((mtime % 1.0) * 1_000_000_000.0) as u32)
.single()
.unwrap_or_else(Utc::now)
})
.or_else(|| {
source_meta
.modified()
.ok()
.and_then(|t| t.duration_since(std::time::UNIX_EPOCH).ok())
.map(|d| {
Utc.timestamp_opt(d.as_secs() as i64, d.subsec_nanos())
.single()
.unwrap_or_else(Utc::now)
})
})
.unwrap_or(now);
let record = serde_json::json!({
"version_id": version_id,
"key": key,
"size": source_size,
"archived_at": now.to_rfc3339(),
"last_modified": live_last_modified.to_rfc3339(),
"etag": etag,
"metadata": metadata,
"reason": reason,
@@ -1214,8 +1236,9 @@ impl FsStorageBackend {
.and_then(Value::as_u64)
.unwrap_or(data_len);
let last_modified = record
.get("archived_at")
.get("last_modified")
.and_then(Value::as_str)
.or_else(|| record.get("archived_at").and_then(Value::as_str))
.and_then(|value| DateTime::parse_from_rfc3339(value).ok())
.map(|value| value.with_timezone(&Utc))
.unwrap_or_else(Utc::now);
@@ -1262,9 +1285,10 @@ impl FsStorageBackend {
.unwrap_or(fallback_key)
.to_string();
let size = record.get("size").and_then(Value::as_u64).unwrap_or(0);
let archived_at = record
.get("archived_at")
let last_modified = record
.get("last_modified")
.and_then(Value::as_str)
.or_else(|| record.get("archived_at").and_then(Value::as_str))
.and_then(|s| DateTime::parse_from_rfc3339(s).ok())
.map(|d| d.with_timezone(&Utc))
.unwrap_or_else(Utc::now);
@@ -1281,7 +1305,7 @@ impl FsStorageBackend {
version_id,
key,
size,
last_modified: archived_at,
last_modified,
etag,
is_latest: false,
is_delete_marker,
@@ -1570,11 +1594,9 @@ impl FsStorageBackend {
let rel_dir_prefix = if rel_dir.as_os_str().is_empty() {
String::new()
} else {
let mut s = rel_dir.to_string_lossy().into_owned();
let s = rel_dir.to_string_lossy().into_owned();
#[cfg(windows)]
{
s = s.replace('\\', "/");
}
let s = s.replace('\\', "/");
let mut decoded = fs_decode_key(&s);
if !decoded.ends_with('/') {
decoded.push('/');
@@ -1840,10 +1862,26 @@ impl FsStorageBackend {
let lock_dir = self.system_bucket_root(bucket_name).join("locks");
std::fs::create_dir_all(&lock_dir).map_err(StorageError::Io)?;
let versioning_enabled = bucket_config.versioning_enabled;
if versioning_enabled && is_overwrite {
self.archive_current_version_sync(bucket_name, key, "overwrite")
.map_err(StorageError::Io)?;
let versioning_status = bucket_config.versioning_status();
if is_overwrite {
match versioning_status {
VersioningStatus::Enabled => {
self.archive_current_version_sync(bucket_name, key, "overwrite")
.map_err(StorageError::Io)?;
}
VersioningStatus::Suspended => {
let existing_meta = self.read_metadata_sync(bucket_name, key);
let existing_vid = existing_meta
.get("__version_id__")
.map(String::as_str)
.unwrap_or("");
if !existing_vid.is_empty() && existing_vid != "null" {
self.archive_current_version_sync(bucket_name, key, "overwrite")
.map_err(StorageError::Io)?;
}
}
VersioningStatus::Disabled => {}
}
}
std::fs::rename(tmp_path, &destination).map_err(|e| {
@@ -1861,10 +1899,10 @@ impl FsStorageBackend {
.map(|d| d.as_secs_f64())
.unwrap_or(0.0);
let new_version_id = if versioning_enabled {
Some(Self::new_version_id_sync())
} else {
None
let new_version_id = match versioning_status {
VersioningStatus::Enabled => Some(Self::new_version_id_sync()),
VersioningStatus::Suspended => Some("null".to_string()),
VersioningStatus::Disabled => None,
};
let mut internal_meta = HashMap::new();
@@ -1884,7 +1922,7 @@ impl FsStorageBackend {
self.write_metadata_sync(bucket_name, key, &internal_meta)
.map_err(StorageError::Io)?;
if versioning_enabled {
if versioning_status.is_active() {
self.clear_delete_marker_sync(bucket_name, key);
}
@@ -2072,7 +2110,7 @@ impl crate::traits::StorageEngine for FsStorageBackend {
self.require_bucket(bucket)?;
let path = self.object_path(bucket, key)?;
if !path.is_file() {
if self.read_bucket_config_sync(bucket).versioning_enabled {
if self.read_bucket_config_sync(bucket).versioning_status().is_active() {
if let Some((dm_version_id, _)) = self.read_delete_marker_sync(bucket, key) {
return Err(StorageError::DeleteMarker {
bucket: bucket.to_string(),
@@ -2148,7 +2186,7 @@ impl crate::traits::StorageEngine for FsStorageBackend {
self.require_bucket(bucket)?;
let path = self.object_path(bucket, key)?;
if !path.is_file() {
if self.read_bucket_config_sync(bucket).versioning_enabled {
if self.read_bucket_config_sync(bucket).versioning_status().is_active() {
if let Some((dm_version_id, _)) = self.read_delete_marker_sync(bucket, key) {
return Err(StorageError::DeleteMarker {
bucket: bucket.to_string(),
@@ -2268,12 +2306,26 @@ impl crate::traits::StorageEngine for FsStorageBackend {
run_blocking(|| {
let bucket_path = self.require_bucket(bucket)?;
let path = self.object_path(bucket, key)?;
let versioning_enabled = self.read_bucket_config_sync(bucket).versioning_enabled;
let versioning_status = self.read_bucket_config_sync(bucket).versioning_status();
if versioning_enabled {
if versioning_status.is_active() {
if path.exists() {
self.archive_current_version_sync(bucket, key, "delete")
.map_err(StorageError::Io)?;
let existing_meta = self.read_metadata_sync(bucket, key);
let existing_vid = existing_meta
.get("__version_id__")
.map(String::as_str)
.unwrap_or("");
let should_archive = match versioning_status {
VersioningStatus::Enabled => true,
VersioningStatus::Suspended => {
!existing_vid.is_empty() && existing_vid != "null"
}
VersioningStatus::Disabled => false,
};
if should_archive {
self.archive_current_version_sync(bucket, key, "delete")
.map_err(StorageError::Io)?;
}
Self::safe_unlink(&path).map_err(StorageError::Io)?;
self.delete_metadata_sync(bucket, key)
.map_err(StorageError::Io)?;
@@ -2867,7 +2919,30 @@ impl crate::traits::StorageEngine for FsStorageBackend {
async fn set_versioning(&self, bucket: &str, enabled: bool) -> StorageResult<()> {
self.require_bucket(bucket)?;
let mut config = self.read_bucket_config_sync(bucket);
config.versioning_enabled = enabled;
let new_status = if enabled {
VersioningStatus::Enabled
} else if config.versioning_enabled || config.versioning_suspended {
VersioningStatus::Suspended
} else {
VersioningStatus::Disabled
};
config.set_versioning_status(new_status);
self.write_bucket_config_sync(bucket, &config)
.map_err(StorageError::Io)
}
async fn get_versioning_status(&self, bucket: &str) -> StorageResult<VersioningStatus> {
Ok(self.read_bucket_config_sync(bucket).versioning_status())
}
async fn set_versioning_status(
&self,
bucket: &str,
status: VersioningStatus,
) -> StorageResult<()> {
self.require_bucket(bucket)?;
let mut config = self.read_bucket_config_sync(bucket);
config.set_versioning_status(status);
self.write_bucket_config_sync(bucket, &config)
.map_err(StorageError::Io)
}
@@ -2945,11 +3020,9 @@ impl crate::traits::StorageEngine for FsStorageBackend {
.parent()
.and_then(|parent| parent.strip_prefix(&root).ok())
.map(|rel| {
let mut s = rel.to_string_lossy().into_owned();
let s = rel.to_string_lossy().into_owned();
#[cfg(windows)]
{
s = s.replace('\\', "/");
}
let s = s.replace('\\', "/");
fs_decode_key(&s)
})
.unwrap_or_default();

View File

@@ -148,6 +148,12 @@ pub trait StorageEngine: Send + Sync {
async fn is_versioning_enabled(&self, bucket: &str) -> StorageResult<bool>;
async fn set_versioning(&self, bucket: &str, enabled: bool) -> StorageResult<()>;
async fn get_versioning_status(&self, bucket: &str) -> StorageResult<VersioningStatus>;
async fn set_versioning_status(
&self,
bucket: &str,
status: VersioningStatus,
) -> StorageResult<()>;
async fn list_object_versions(
&self,