Add snapshot/range storage primitives, gate GET preconditions on served snapshot, support partial-decrypt Range GET for SSE-encrypted objects
This commit is contained in:
13
Cargo.lock
generated
13
Cargo.lock
generated
@@ -2639,7 +2639,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "myfsio-auth"
|
||||
version = "0.4.4"
|
||||
version = "0.4.5"
|
||||
dependencies = [
|
||||
"aes",
|
||||
"base64",
|
||||
@@ -2664,7 +2664,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "myfsio-common"
|
||||
version = "0.4.4"
|
||||
version = "0.4.5"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"serde",
|
||||
@@ -2675,7 +2675,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "myfsio-crypto"
|
||||
version = "0.4.4"
|
||||
version = "0.4.5"
|
||||
dependencies = [
|
||||
"aes-gcm",
|
||||
"base64",
|
||||
@@ -2696,7 +2696,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "myfsio-server"
|
||||
version = "0.4.4"
|
||||
version = "0.4.5"
|
||||
dependencies = [
|
||||
"aes-gcm",
|
||||
"async-trait",
|
||||
@@ -2753,7 +2753,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "myfsio-storage"
|
||||
version = "0.4.4"
|
||||
version = "0.4.5"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"dashmap",
|
||||
@@ -2769,6 +2769,7 @@ dependencies = [
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"unicode-normalization",
|
||||
"uuid",
|
||||
@@ -2776,7 +2777,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "myfsio-xml"
|
||||
version = "0.4.4"
|
||||
version = "0.4.5"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"myfsio-common",
|
||||
|
||||
@@ -10,7 +10,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.4"
|
||||
version = "0.4.5"
|
||||
edition = "2021"
|
||||
|
||||
[workspace.dependencies]
|
||||
@@ -42,7 +42,7 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
thiserror = "2"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
base64 = "0.22"
|
||||
tokio-util = { version = "0.7", features = ["io"] }
|
||||
tokio-util = { version = "0.7", features = ["io", "io-util"] }
|
||||
tokio-stream = "0.1"
|
||||
futures = "0.3"
|
||||
dashmap = "6"
|
||||
|
||||
@@ -16,6 +16,8 @@ pub struct ObjectMeta {
|
||||
pub version_id: Option<String>,
|
||||
#[serde(default)]
|
||||
pub is_delete_marker: bool,
|
||||
#[serde(default, skip_serializing)]
|
||||
pub internal_metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl ObjectMeta {
|
||||
@@ -30,6 +32,7 @@ impl ObjectMeta {
|
||||
metadata: HashMap::new(),
|
||||
version_id: None,
|
||||
is_delete_marker: false,
|
||||
internal_metadata: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,6 +145,113 @@ pub fn decrypt_stream_chunked(
|
||||
Ok(chunk_count)
|
||||
}
|
||||
|
||||
const GCM_TAG_LEN: usize = 16;
|
||||
|
||||
pub fn decrypt_stream_chunked_range(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
chunk_plain_size: usize,
|
||||
plaintext_size: u64,
|
||||
plain_start: u64,
|
||||
plain_end_inclusive: u64,
|
||||
) -> Result<u64, CryptoError> {
|
||||
if key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(key.len()));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
|
||||
}
|
||||
if chunk_plain_size == 0 {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"chunk_plain_size must be > 0".into(),
|
||||
));
|
||||
}
|
||||
if plaintext_size == 0 {
|
||||
let _ = File::create(output_path)?;
|
||||
return Ok(0);
|
||||
}
|
||||
if plain_start > plain_end_inclusive || plain_end_inclusive >= plaintext_size {
|
||||
return Err(CryptoError::EncryptionFailed(format!(
|
||||
"range [{}, {}] invalid for plaintext size {}",
|
||||
plain_start, plain_end_inclusive, plaintext_size
|
||||
)));
|
||||
}
|
||||
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let n = chunk_plain_size as u64;
|
||||
let first_chunk = (plain_start / n) as u32;
|
||||
let last_chunk = (plain_end_inclusive / n) as u32;
|
||||
let total_chunks = plaintext_size.div_ceil(n) as u32;
|
||||
let final_chunk_plain = plaintext_size - (total_chunks as u64 - 1) * n;
|
||||
|
||||
let mut infile = File::open(input_path)?;
|
||||
|
||||
let mut header = [0u8; HEADER_SIZE];
|
||||
infile.read_exact(&mut header)?;
|
||||
let stored_chunk_count = u32::from_be_bytes(header);
|
||||
if stored_chunk_count != total_chunks {
|
||||
return Err(CryptoError::EncryptionFailed(format!(
|
||||
"chunk count mismatch: header says {}, plaintext_size implies {}",
|
||||
stored_chunk_count, total_chunks
|
||||
)));
|
||||
}
|
||||
|
||||
let mut outfile = File::create(output_path)?;
|
||||
|
||||
let stride = n + GCM_TAG_LEN as u64 + HEADER_SIZE as u64;
|
||||
let first_offset = HEADER_SIZE as u64 + first_chunk as u64 * stride;
|
||||
infile.seek(SeekFrom::Start(first_offset))?;
|
||||
|
||||
let mut size_buf = [0u8; HEADER_SIZE];
|
||||
let mut bytes_written: u64 = 0;
|
||||
|
||||
for chunk_index in first_chunk..=last_chunk {
|
||||
infile.read_exact(&mut size_buf)?;
|
||||
let ct_len = u32::from_be_bytes(size_buf) as usize;
|
||||
|
||||
let expected_plain = if chunk_index + 1 == total_chunks {
|
||||
final_chunk_plain as usize
|
||||
} else {
|
||||
chunk_plain_size
|
||||
};
|
||||
let expected_ct = expected_plain + GCM_TAG_LEN;
|
||||
if ct_len != expected_ct {
|
||||
return Err(CryptoError::EncryptionFailed(format!(
|
||||
"chunk {} stored length {} != expected {} (corrupt file or chunk_size mismatch)",
|
||||
chunk_index, ct_len, expected_ct
|
||||
)));
|
||||
}
|
||||
|
||||
let mut encrypted = vec![0u8; ct_len];
|
||||
infile.read_exact(&mut encrypted)?;
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
let decrypted = cipher
|
||||
.decrypt(nonce, encrypted.as_ref())
|
||||
.map_err(|_| CryptoError::DecryptionFailed(chunk_index))?;
|
||||
|
||||
let chunk_plain_start = chunk_index as u64 * n;
|
||||
let chunk_plain_end_exclusive = chunk_plain_start + decrypted.len() as u64;
|
||||
|
||||
let slice_start = plain_start.saturating_sub(chunk_plain_start) as usize;
|
||||
let slice_end = (plain_end_inclusive + 1).min(chunk_plain_end_exclusive);
|
||||
let slice_end_local = (slice_end - chunk_plain_start) as usize;
|
||||
|
||||
if slice_end_local > slice_start {
|
||||
outfile.write_all(&decrypted[slice_start..slice_end_local])?;
|
||||
bytes_written += (slice_end_local - slice_start) as u64;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(bytes_written)
|
||||
}
|
||||
|
||||
pub async fn encrypt_stream_chunked_async(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
@@ -230,6 +337,191 @@ mod tests {
|
||||
assert!(matches!(result, Err(CryptoError::InvalidKeySize(16))));
|
||||
}
|
||||
|
||||
fn write_file(path: &Path, data: &[u8]) {
|
||||
std::fs::File::create(path).unwrap().write_all(data).unwrap();
|
||||
}
|
||||
|
||||
fn make_encrypted_file(
|
||||
dir: &Path,
|
||||
data: &[u8],
|
||||
key: &[u8; 32],
|
||||
nonce: &[u8; 12],
|
||||
chunk: usize,
|
||||
) -> std::path::PathBuf {
|
||||
let input = dir.join("input.bin");
|
||||
let encrypted = dir.join("encrypted.bin");
|
||||
write_file(&input, data);
|
||||
encrypt_stream_chunked(&input, &encrypted, key, nonce, Some(chunk)).unwrap();
|
||||
encrypted
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_within_single_chunk() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let data: Vec<u8> = (0u8..=255).cycle().take(4096).collect();
|
||||
let key = [0x33u8; 32];
|
||||
let nonce = [0x07u8; 12];
|
||||
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 1024);
|
||||
let out = dir.path().join("range.bin");
|
||||
|
||||
let n = decrypt_stream_chunked_range(
|
||||
&encrypted,
|
||||
&out,
|
||||
&key,
|
||||
&nonce,
|
||||
1024,
|
||||
data.len() as u64,
|
||||
200,
|
||||
399,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(n, 200);
|
||||
let got = std::fs::read(&out).unwrap();
|
||||
assert_eq!(got, &data[200..400]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_spanning_multiple_chunks() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let data: Vec<u8> = (0..5000u32).map(|i| (i % 251) as u8).collect();
|
||||
let key = [0x44u8; 32];
|
||||
let nonce = [0x02u8; 12];
|
||||
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 512);
|
||||
let out = dir.path().join("range.bin");
|
||||
|
||||
let n = decrypt_stream_chunked_range(
|
||||
&encrypted,
|
||||
&out,
|
||||
&key,
|
||||
&nonce,
|
||||
512,
|
||||
data.len() as u64,
|
||||
100,
|
||||
2999,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(n, 2900);
|
||||
let got = std::fs::read(&out).unwrap();
|
||||
assert_eq!(got, &data[100..3000]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_covers_final_partial_chunk() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let data: Vec<u8> = (0..1300u32).map(|i| (i % 71) as u8).collect();
|
||||
let key = [0x55u8; 32];
|
||||
let nonce = [0x0au8; 12];
|
||||
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 512);
|
||||
let out = dir.path().join("range.bin");
|
||||
|
||||
let n = decrypt_stream_chunked_range(
|
||||
&encrypted,
|
||||
&out,
|
||||
&key,
|
||||
&nonce,
|
||||
512,
|
||||
data.len() as u64,
|
||||
900,
|
||||
1299,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(n, 400);
|
||||
let got = std::fs::read(&out).unwrap();
|
||||
assert_eq!(got, &data[900..1300]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_full_object() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let data: Vec<u8> = (0..2048u32).map(|i| (i % 13) as u8).collect();
|
||||
let key = [0x11u8; 32];
|
||||
let nonce = [0x33u8; 12];
|
||||
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 512);
|
||||
let out = dir.path().join("range.bin");
|
||||
|
||||
let n = decrypt_stream_chunked_range(
|
||||
&encrypted,
|
||||
&out,
|
||||
&key,
|
||||
&nonce,
|
||||
512,
|
||||
data.len() as u64,
|
||||
0,
|
||||
data.len() as u64 - 1,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(n, data.len() as u64);
|
||||
let got = std::fs::read(&out).unwrap();
|
||||
assert_eq!(got, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_wrong_key_fails() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let data = b"range-auth-check".repeat(100);
|
||||
let key = [0x66u8; 32];
|
||||
let nonce = [0x09u8; 12];
|
||||
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 256);
|
||||
let out = dir.path().join("range.bin");
|
||||
|
||||
let wrong = [0x67u8; 32];
|
||||
let r = decrypt_stream_chunked_range(
|
||||
&encrypted,
|
||||
&out,
|
||||
&wrong,
|
||||
&nonce,
|
||||
256,
|
||||
data.len() as u64,
|
||||
0,
|
||||
data.len() as u64 - 1,
|
||||
);
|
||||
assert!(matches!(r, Err(CryptoError::DecryptionFailed(_))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_out_of_bounds_rejected() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let data = vec![0u8; 100];
|
||||
let key = [0x22u8; 32];
|
||||
let nonce = [0x44u8; 12];
|
||||
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 64);
|
||||
let out = dir.path().join("range.bin");
|
||||
|
||||
let r = decrypt_stream_chunked_range(
|
||||
&encrypted,
|
||||
&out,
|
||||
&key,
|
||||
&nonce,
|
||||
64,
|
||||
data.len() as u64,
|
||||
50,
|
||||
200,
|
||||
);
|
||||
assert!(r.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_mismatched_chunk_size_detected() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let data: Vec<u8> = (0..2048u32).map(|i| i as u8).collect();
|
||||
let key = [0x77u8; 32];
|
||||
let nonce = [0x88u8; 12];
|
||||
let encrypted = make_encrypted_file(dir.path(), &data, &key, &nonce, 512);
|
||||
let out = dir.path().join("range.bin");
|
||||
|
||||
let r = decrypt_stream_chunked_range(
|
||||
&encrypted,
|
||||
&out,
|
||||
&key,
|
||||
&nonce,
|
||||
1024,
|
||||
data.len() as u64,
|
||||
0,
|
||||
1023,
|
||||
);
|
||||
assert!(r.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_key_fails_decrypt() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
|
||||
@@ -4,7 +4,9 @@ use rand::RngCore;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use crate::aes_gcm::{decrypt_stream_chunked, encrypt_stream_chunked, CryptoError};
|
||||
use crate::aes_gcm::{
|
||||
decrypt_stream_chunked, decrypt_stream_chunked_range, encrypt_stream_chunked, CryptoError,
|
||||
};
|
||||
use crate::kms::KmsService;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
@@ -37,6 +39,8 @@ pub struct EncryptionMetadata {
|
||||
pub nonce: String,
|
||||
pub encrypted_data_key: Option<String>,
|
||||
pub kms_key_id: Option<String>,
|
||||
pub chunk_size: Option<usize>,
|
||||
pub plaintext_size: Option<u64>,
|
||||
}
|
||||
|
||||
impl EncryptionMetadata {
|
||||
@@ -53,6 +57,15 @@ impl EncryptionMetadata {
|
||||
if let Some(ref kid) = self.kms_key_id {
|
||||
map.insert("x-amz-encryption-key-id".to_string(), kid.clone());
|
||||
}
|
||||
if let Some(cs) = self.chunk_size {
|
||||
map.insert("x-amz-encryption-chunk-size".to_string(), cs.to_string());
|
||||
}
|
||||
if let Some(ps) = self.plaintext_size {
|
||||
map.insert(
|
||||
"x-amz-encryption-plaintext-size".to_string(),
|
||||
ps.to_string(),
|
||||
);
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
@@ -64,6 +77,12 @@ impl EncryptionMetadata {
|
||||
nonce: nonce.clone(),
|
||||
encrypted_data_key: meta.get("x-amz-encrypted-data-key").cloned(),
|
||||
kms_key_id: meta.get("x-amz-encryption-key-id").cloned(),
|
||||
chunk_size: meta
|
||||
.get("x-amz-encryption-chunk-size")
|
||||
.and_then(|s| s.parse().ok()),
|
||||
plaintext_size: meta
|
||||
.get("x-amz-encryption-plaintext-size")
|
||||
.and_then(|s| s.parse().ok()),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -76,6 +95,8 @@ impl EncryptionMetadata {
|
||||
meta.remove("x-amz-encryption-nonce");
|
||||
meta.remove("x-amz-encrypted-data-key");
|
||||
meta.remove("x-amz-encryption-key-id");
|
||||
meta.remove("x-amz-encryption-chunk-size");
|
||||
meta.remove("x-amz-encryption-plaintext-size");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -212,6 +233,11 @@ impl EncryptionService {
|
||||
data_key
|
||||
};
|
||||
|
||||
let plaintext_size = tokio::fs::metadata(input_path)
|
||||
.await
|
||||
.map_err(CryptoError::Io)?
|
||||
.len();
|
||||
|
||||
let ip = input_path.to_owned();
|
||||
let op = output_path.to_owned();
|
||||
let ak = actual_key;
|
||||
@@ -228,22 +254,23 @@ impl EncryptionService {
|
||||
nonce: B64.encode(nonce),
|
||||
encrypted_data_key,
|
||||
kms_key_id,
|
||||
chunk_size: Some(chunk_size),
|
||||
plaintext_size: Some(plaintext_size),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn decrypt_object(
|
||||
async fn resolve_data_key(
|
||||
&self,
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
enc_meta: &EncryptionMetadata,
|
||||
customer_key: Option<&[u8]>,
|
||||
) -> Result<(), CryptoError> {
|
||||
) -> Result<([u8; 32], [u8; 12]), CryptoError> {
|
||||
let nonce_bytes = B64
|
||||
.decode(&enc_meta.nonce)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(format!("Bad nonce encoding: {}", e)))?;
|
||||
if nonce_bytes.len() != 12 {
|
||||
return Err(CryptoError::InvalidNonceSize(nonce_bytes.len()));
|
||||
}
|
||||
let nonce: [u8; 12] = nonce_bytes.try_into().unwrap();
|
||||
|
||||
let data_key: [u8; 32] = if let Some(ck) = customer_key {
|
||||
if ck.len() != 32 {
|
||||
@@ -281,15 +308,62 @@ impl EncryptionService {
|
||||
self.unwrap_data_key(wrapped)?
|
||||
};
|
||||
|
||||
Ok((data_key, nonce))
|
||||
}
|
||||
|
||||
pub async fn decrypt_object(
|
||||
&self,
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
enc_meta: &EncryptionMetadata,
|
||||
customer_key: Option<&[u8]>,
|
||||
) -> Result<(), CryptoError> {
|
||||
let (data_key, nonce) = self.resolve_data_key(enc_meta, customer_key).await?;
|
||||
|
||||
let ip = input_path.to_owned();
|
||||
let op = output_path.to_owned();
|
||||
let nb: [u8; 12] = nonce_bytes.try_into().unwrap();
|
||||
tokio::task::spawn_blocking(move || decrypt_stream_chunked(&ip, &op, &data_key, &nb))
|
||||
tokio::task::spawn_blocking(move || decrypt_stream_chunked(&ip, &op, &data_key, &nonce))
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn decrypt_object_range(
|
||||
&self,
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
enc_meta: &EncryptionMetadata,
|
||||
customer_key: Option<&[u8]>,
|
||||
plain_start: u64,
|
||||
plain_end_inclusive: u64,
|
||||
) -> Result<u64, CryptoError> {
|
||||
let chunk_size = enc_meta.chunk_size.ok_or_else(|| {
|
||||
CryptoError::EncryptionFailed("chunk_size missing from encryption metadata".into())
|
||||
})?;
|
||||
let plaintext_size = enc_meta.plaintext_size.ok_or_else(|| {
|
||||
CryptoError::EncryptionFailed("plaintext_size missing from encryption metadata".into())
|
||||
})?;
|
||||
|
||||
let (data_key, nonce) = self.resolve_data_key(enc_meta, customer_key).await?;
|
||||
|
||||
let ip = input_path.to_owned();
|
||||
let op = output_path.to_owned();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
decrypt_stream_chunked_range(
|
||||
&ip,
|
||||
&op,
|
||||
&data_key,
|
||||
&nonce,
|
||||
chunk_size,
|
||||
plaintext_size,
|
||||
plain_start,
|
||||
plain_end_inclusive,
|
||||
)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -383,12 +457,26 @@ mod tests {
|
||||
nonce: "dGVzdG5vbmNlMTI=".to_string(),
|
||||
encrypted_data_key: Some("c29tZWtleQ==".to_string()),
|
||||
kms_key_id: None,
|
||||
chunk_size: Some(65_536),
|
||||
plaintext_size: Some(1_234_567),
|
||||
};
|
||||
let map = meta.to_metadata_map();
|
||||
let restored = EncryptionMetadata::from_metadata(&map).unwrap();
|
||||
assert_eq!(restored.algorithm, "AES256");
|
||||
assert_eq!(restored.nonce, meta.nonce);
|
||||
assert_eq!(restored.encrypted_data_key, meta.encrypted_data_key);
|
||||
assert_eq!(restored.chunk_size, Some(65_536));
|
||||
assert_eq!(restored.plaintext_size, Some(1_234_567));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encryption_metadata_legacy_missing_sizes() {
|
||||
let mut map = HashMap::new();
|
||||
map.insert("x-amz-server-side-encryption".to_string(), "AES256".into());
|
||||
map.insert("x-amz-encryption-nonce".to_string(), "aGVsbG8=".into());
|
||||
let restored = EncryptionMetadata::from_metadata(&map).unwrap();
|
||||
assert_eq!(restored.chunk_size, None);
|
||||
assert_eq!(restored.plaintext_size, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -31,6 +31,33 @@ use crate::services::notifications;
|
||||
use crate::services::object_lock;
|
||||
use crate::state::AppState;
|
||||
|
||||
async fn open_self_deleting(path: std::path::PathBuf) -> std::io::Result<tokio::fs::File> {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let file = tokio::fs::File::open(&path).await?;
|
||||
let _ = tokio::fs::remove_file(&path).await;
|
||||
Ok(file)
|
||||
}
|
||||
#[cfg(windows)]
|
||||
{
|
||||
use std::os::windows::fs::OpenOptionsExt;
|
||||
const FILE_FLAG_DELETE_ON_CLOSE: u32 = 0x0400_0000;
|
||||
const FILE_SHARE_READ: u32 = 0x0000_0001;
|
||||
const FILE_SHARE_WRITE: u32 = 0x0000_0002;
|
||||
const FILE_SHARE_DELETE: u32 = 0x0000_0004;
|
||||
let file = tokio::task::spawn_blocking(move || {
|
||||
std::fs::OpenOptions::new()
|
||||
.read(true)
|
||||
.custom_flags(FILE_FLAG_DELETE_ON_CLOSE)
|
||||
.share_mode(FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE)
|
||||
.open(&path)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))??;
|
||||
Ok(tokio::fs::File::from_std(file))
|
||||
}
|
||||
}
|
||||
|
||||
fn s3_error_response(err: S3Error) -> Response {
|
||||
let status =
|
||||
StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
|
||||
@@ -1568,23 +1595,6 @@ pub async fn get_object(
|
||||
.version_id
|
||||
.as_deref()
|
||||
.filter(|value| !is_null_version(Some(*value)));
|
||||
let head_meta = match version_id {
|
||||
Some(version_id) => match state
|
||||
.storage
|
||||
.head_object_version(&bucket, &key, version_id)
|
||||
.await
|
||||
{
|
||||
Ok(m) => m,
|
||||
Err(e) => return storage_err_response(e),
|
||||
},
|
||||
None => match state.storage.head_object(&bucket, &key).await {
|
||||
Ok(m) => m,
|
||||
Err(e) => return storage_err_response(e),
|
||||
},
|
||||
};
|
||||
if let Some(resp) = evaluate_get_preconditions(&headers, &head_meta) {
|
||||
return resp;
|
||||
}
|
||||
|
||||
let range_header = headers
|
||||
.get("range")
|
||||
@@ -1592,163 +1602,144 @@ pub async fn get_object(
|
||||
.map(|s| s.to_string());
|
||||
|
||||
if let Some(ref range_str) = range_header {
|
||||
return range_get_handler(&state, &bucket, &key, range_str, &query).await;
|
||||
return range_get_handler(&state, &bucket, &key, range_str, &query, &headers).await;
|
||||
}
|
||||
|
||||
let all_meta = match version_id {
|
||||
Some(version_id) => state
|
||||
.storage
|
||||
.get_object_version_metadata(&bucket, &key, version_id)
|
||||
.await
|
||||
.unwrap_or_default(),
|
||||
None => state
|
||||
.storage
|
||||
.get_object_metadata(&bucket, &key)
|
||||
.await
|
||||
.unwrap_or_default(),
|
||||
};
|
||||
let enc_meta = myfsio_crypto::encryption::EncryptionMetadata::from_metadata(&all_meta);
|
||||
let stream_cap = state.config.stream_chunk_size.max(64 * 1024);
|
||||
|
||||
if let (Some(ref enc_info), Some(ref enc_svc)) = (&enc_meta, &state.encryption) {
|
||||
let obj_path = match version_id {
|
||||
Some(version_id) => match state
|
||||
.storage
|
||||
.get_object_version_path(&bucket, &key, version_id)
|
||||
.await
|
||||
{
|
||||
Ok(p) => p,
|
||||
Err(e) => return storage_err_response(e),
|
||||
},
|
||||
None => match state.storage.get_object_path(&bucket, &key).await {
|
||||
Ok(p) => p,
|
||||
Err(e) => return storage_err_response(e),
|
||||
},
|
||||
};
|
||||
let tmp_dir = state.config.storage_root.join(".myfsio.sys").join("tmp");
|
||||
let _ = tokio::fs::create_dir_all(&tmp_dir).await;
|
||||
let dec_tmp = tmp_dir.join(format!("dec-{}", uuid::Uuid::new_v4()));
|
||||
|
||||
let customer_key = extract_sse_c_key(&headers);
|
||||
let ck_ref = customer_key.as_deref();
|
||||
|
||||
if let Err(e) = enc_svc
|
||||
.decrypt_object(&obj_path, &dec_tmp, enc_info, ck_ref)
|
||||
.await
|
||||
{
|
||||
let _ = tokio::fs::remove_file(&dec_tmp).await;
|
||||
return s3_error_response(S3Error::new(
|
||||
myfsio_common::error::S3ErrorCode::InternalError,
|
||||
format!("Decryption failed: {}", e),
|
||||
));
|
||||
}
|
||||
|
||||
let file = match tokio::fs::File::open(&dec_tmp).await {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
let _ = tokio::fs::remove_file(&dec_tmp).await;
|
||||
return storage_err_response(myfsio_storage::error::StorageError::Io(e));
|
||||
}
|
||||
};
|
||||
let file_size = file.metadata().await.map(|m| m.len()).unwrap_or(0);
|
||||
let stream = ReaderStream::with_capacity(file, 256 * 1024);
|
||||
let body = Body::from_stream(stream);
|
||||
|
||||
let meta = head_meta.clone();
|
||||
|
||||
let tmp_path = dec_tmp.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
|
||||
let _ = tokio::fs::remove_file(&tmp_path).await;
|
||||
});
|
||||
|
||||
let mut resp_headers = HeaderMap::new();
|
||||
resp_headers.insert("content-length", file_size.to_string().parse().unwrap());
|
||||
if let Some(ref etag) = meta.etag {
|
||||
resp_headers.insert("etag", format!("\"{}\"", etag).parse().unwrap());
|
||||
}
|
||||
insert_content_type(&mut resp_headers, &key, meta.content_type.as_deref());
|
||||
resp_headers.insert(
|
||||
"last-modified",
|
||||
meta.last_modified
|
||||
.format("%a, %d %b %Y %H:%M:%S GMT")
|
||||
.to_string()
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
resp_headers.insert("accept-ranges", "bytes".parse().unwrap());
|
||||
resp_headers.insert(
|
||||
"x-amz-server-side-encryption",
|
||||
enc_info.algorithm.parse().unwrap(),
|
||||
);
|
||||
apply_stored_response_headers(&mut resp_headers, &all_meta);
|
||||
apply_stored_checksum_headers(&mut resp_headers, &all_meta);
|
||||
if let Some(ref requested_version) = query.version_id {
|
||||
if let Ok(value) = requested_version.parse() {
|
||||
resp_headers.insert("x-amz-version-id", value);
|
||||
}
|
||||
} else if let Some(vid) = all_meta.get("__version_id__") {
|
||||
if let Ok(value) = vid.parse() {
|
||||
resp_headers.insert("x-amz-version-id", value);
|
||||
}
|
||||
}
|
||||
|
||||
apply_user_metadata(&mut resp_headers, &meta.metadata);
|
||||
|
||||
apply_response_overrides(&mut resp_headers, &query);
|
||||
|
||||
return (StatusCode::OK, resp_headers, body).into_response();
|
||||
}
|
||||
|
||||
let object_result = match version_id {
|
||||
Some(version_id) => {
|
||||
// Take a single snapshot of the live object BEFORE deciding whether it's
|
||||
// encrypted. If we sniffed encryption from head_meta first, a PUT could
|
||||
// flip the object's encryption state between head and snapshot — leaving
|
||||
// us either serving ciphertext through the raw path or failing because
|
||||
// the snapshot no longer has encryption metadata. All decisions must
|
||||
// come from this snapshot.
|
||||
let tmp_dir = state.config.storage_root.join(".myfsio.sys").join("tmp");
|
||||
let _ = tokio::fs::create_dir_all(&tmp_dir).await;
|
||||
let snap_link = tmp_dir.join(format!("src-{}", uuid::Uuid::new_v4()));
|
||||
let snap_res = match version_id {
|
||||
Some(v) => {
|
||||
state
|
||||
.storage
|
||||
.get_object_version(&bucket, &key, version_id)
|
||||
.snapshot_object_version_to_link(&bucket, &key, v, &snap_link)
|
||||
.await
|
||||
}
|
||||
None => state.storage.get_object(&bucket, &key).await,
|
||||
None => {
|
||||
state
|
||||
.storage
|
||||
.snapshot_object_to_link(&bucket, &key, &snap_link)
|
||||
.await
|
||||
}
|
||||
};
|
||||
let snap_meta = match snap_res {
|
||||
Ok(m) => m,
|
||||
Err(e) => return storage_err_response(e),
|
||||
};
|
||||
|
||||
match object_result {
|
||||
Ok((meta, reader)) => {
|
||||
let stream = ReaderStream::with_capacity(reader, 256 * 1024);
|
||||
let body = Body::from_stream(stream);
|
||||
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("content-length", meta.size.to_string().parse().unwrap());
|
||||
if let Some(ref etag) = meta.etag {
|
||||
headers.insert("etag", format!("\"{}\"", etag).parse().unwrap());
|
||||
}
|
||||
insert_content_type(&mut headers, &key, meta.content_type.as_deref());
|
||||
headers.insert(
|
||||
"last-modified",
|
||||
meta.last_modified
|
||||
.format("%a, %d %b %Y %H:%M:%S GMT")
|
||||
.to_string()
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
headers.insert("accept-ranges", "bytes".parse().unwrap());
|
||||
apply_stored_response_headers(&mut headers, &all_meta);
|
||||
apply_stored_checksum_headers(&mut headers, &all_meta);
|
||||
if let Some(ref requested_version) = query.version_id {
|
||||
if let Ok(value) = requested_version.parse() {
|
||||
headers.insert("x-amz-version-id", value);
|
||||
}
|
||||
} else if let Some(ref vid) = meta.version_id {
|
||||
if let Ok(value) = vid.parse() {
|
||||
headers.insert("x-amz-version-id", value);
|
||||
}
|
||||
}
|
||||
|
||||
apply_user_metadata(&mut headers, &meta.metadata);
|
||||
|
||||
apply_response_overrides(&mut headers, &query);
|
||||
|
||||
(StatusCode::OK, headers, body).into_response()
|
||||
}
|
||||
Err(e) => storage_err_response(e),
|
||||
// Evaluate preconditions against the served snapshot's metadata. A HEAD
|
||||
// taken earlier could disagree with the snapshot if a concurrent PUT
|
||||
// landed in between, causing us to serve a body that doesn't satisfy
|
||||
// the caller's If-Match / If-None-Match / time conditions.
|
||||
if let Some(resp) = evaluate_get_preconditions(&headers, &snap_meta) {
|
||||
let _ = tokio::fs::remove_file(&snap_link).await;
|
||||
return resp;
|
||||
}
|
||||
|
||||
let enc_info = myfsio_crypto::encryption::EncryptionMetadata::from_metadata(
|
||||
&snap_meta.internal_metadata,
|
||||
);
|
||||
|
||||
let (file, file_size, enc_header): (tokio::fs::File, u64, Option<&str>) = match (
|
||||
enc_info.as_ref(),
|
||||
state.encryption.as_ref(),
|
||||
) {
|
||||
(Some(enc_info), Some(enc_svc)) => {
|
||||
let dec_tmp = tmp_dir.join(format!("dec-{}", uuid::Uuid::new_v4()));
|
||||
let customer_key = extract_sse_c_key(&headers);
|
||||
let decrypt_res = enc_svc
|
||||
.decrypt_object(&snap_link, &dec_tmp, enc_info, customer_key.as_deref())
|
||||
.await;
|
||||
// Hardlink served its purpose; the decrypted plaintext is in
|
||||
// dec_tmp now.
|
||||
let _ = tokio::fs::remove_file(&snap_link).await;
|
||||
if let Err(e) = decrypt_res {
|
||||
let _ = tokio::fs::remove_file(&dec_tmp).await;
|
||||
return s3_error_response(S3Error::new(
|
||||
myfsio_common::error::S3ErrorCode::InternalError,
|
||||
format!("Decryption failed: {}", e),
|
||||
));
|
||||
}
|
||||
let file = match open_self_deleting(dec_tmp.clone()).await {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
let _ = tokio::fs::remove_file(&dec_tmp).await;
|
||||
return storage_err_response(myfsio_storage::error::StorageError::Io(e));
|
||||
}
|
||||
};
|
||||
let file_size = file.metadata().await.map(|m| m.len()).unwrap_or(0);
|
||||
(file, file_size, Some(enc_info.algorithm.as_str()))
|
||||
}
|
||||
(Some(_), None) => {
|
||||
// Snapshot is encrypted but the server has no encryption
|
||||
// service configured to decrypt it. Serving ciphertext as
|
||||
// plaintext would be actively wrong; refuse explicitly.
|
||||
let _ = tokio::fs::remove_file(&snap_link).await;
|
||||
return s3_error_response(S3Error::new(
|
||||
myfsio_common::error::S3ErrorCode::InternalError,
|
||||
"Object is encrypted but encryption service is disabled".to_string(),
|
||||
));
|
||||
}
|
||||
(None, _) => {
|
||||
// Raw path: stream directly from the hardlink, which becomes
|
||||
// self-deleting on open (kernel keeps the inode alive via our
|
||||
// fd).
|
||||
let file = match open_self_deleting(snap_link.clone()).await {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
let _ = tokio::fs::remove_file(&snap_link).await;
|
||||
return storage_err_response(myfsio_storage::error::StorageError::Io(e));
|
||||
}
|
||||
};
|
||||
(file, snap_meta.size, None)
|
||||
}
|
||||
};
|
||||
|
||||
let stream = ReaderStream::with_capacity(file, stream_cap);
|
||||
let body = Body::from_stream(stream);
|
||||
|
||||
let meta = &snap_meta;
|
||||
let mut resp_headers = HeaderMap::new();
|
||||
resp_headers.insert("content-length", file_size.to_string().parse().unwrap());
|
||||
if let Some(ref etag) = meta.etag {
|
||||
resp_headers.insert("etag", format!("\"{}\"", etag).parse().unwrap());
|
||||
}
|
||||
insert_content_type(&mut resp_headers, &key, meta.content_type.as_deref());
|
||||
resp_headers.insert(
|
||||
"last-modified",
|
||||
meta.last_modified
|
||||
.format("%a, %d %b %Y %H:%M:%S GMT")
|
||||
.to_string()
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
resp_headers.insert("accept-ranges", "bytes".parse().unwrap());
|
||||
if let Some(alg) = enc_header {
|
||||
resp_headers.insert("x-amz-server-side-encryption", alg.parse().unwrap());
|
||||
}
|
||||
apply_stored_response_headers(&mut resp_headers, &meta.internal_metadata);
|
||||
apply_stored_checksum_headers(&mut resp_headers, &meta.internal_metadata);
|
||||
if let Some(ref requested_version) = query.version_id {
|
||||
if let Ok(value) = requested_version.parse() {
|
||||
resp_headers.insert("x-amz-version-id", value);
|
||||
}
|
||||
} else if let Some(ref vid) = meta.version_id {
|
||||
if let Ok(value) = vid.parse() {
|
||||
resp_headers.insert("x-amz-version-id", value);
|
||||
}
|
||||
}
|
||||
apply_user_metadata(&mut resp_headers, &meta.metadata);
|
||||
apply_response_overrides(&mut resp_headers, &query);
|
||||
|
||||
(StatusCode::OK, resp_headers, body).into_response()
|
||||
}
|
||||
|
||||
pub async fn post_object(
|
||||
@@ -1874,18 +1865,6 @@ pub async fn head_object(
|
||||
if let Some(resp) = evaluate_get_preconditions(&headers, &meta) {
|
||||
return resp;
|
||||
}
|
||||
let all_meta = match version_id {
|
||||
Some(version_id) => state
|
||||
.storage
|
||||
.get_object_version_metadata(&bucket, &key, version_id)
|
||||
.await
|
||||
.unwrap_or_default(),
|
||||
None => state
|
||||
.storage
|
||||
.get_object_metadata(&bucket, &key)
|
||||
.await
|
||||
.unwrap_or_default(),
|
||||
};
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("content-length", meta.size.to_string().parse().unwrap());
|
||||
if let Some(ref etag) = meta.etag {
|
||||
@@ -1901,8 +1880,8 @@ pub async fn head_object(
|
||||
.unwrap(),
|
||||
);
|
||||
headers.insert("accept-ranges", "bytes".parse().unwrap());
|
||||
apply_stored_response_headers(&mut headers, &all_meta);
|
||||
apply_stored_checksum_headers(&mut headers, &all_meta);
|
||||
apply_stored_response_headers(&mut headers, &meta.internal_metadata);
|
||||
apply_stored_checksum_headers(&mut headers, &meta.internal_metadata);
|
||||
if let Some(ref requested_version) = query.version_id {
|
||||
if let Ok(value) = requested_version.parse() {
|
||||
headers.insert("x-amz-version-id", value);
|
||||
@@ -2607,71 +2586,191 @@ async fn range_get_handler(
|
||||
key: &str,
|
||||
range_str: &str,
|
||||
query: &ObjectQuery,
|
||||
headers: &HeaderMap,
|
||||
) -> Response {
|
||||
let version_id = query
|
||||
.version_id
|
||||
.as_deref()
|
||||
.filter(|value| !is_null_version(Some(*value)));
|
||||
let meta = match version_id {
|
||||
Some(version_id) => match state
|
||||
.storage
|
||||
.head_object_version(bucket, key, version_id)
|
||||
.await
|
||||
{
|
||||
Ok(m) => m,
|
||||
Err(e) => return storage_err_response(e),
|
||||
},
|
||||
None => match state.storage.head_object(bucket, key).await {
|
||||
Ok(m) => m,
|
||||
Err(e) => return storage_err_response(e),
|
||||
},
|
||||
|
||||
let tmp_dir = state.config.storage_root.join(".myfsio.sys").join("tmp");
|
||||
let _ = tokio::fs::create_dir_all(&tmp_dir).await;
|
||||
let snap_link = tmp_dir.join(format!("rsrc-{}", uuid::Uuid::new_v4()));
|
||||
|
||||
let snap_meta = match version_id {
|
||||
Some(v) => {
|
||||
state
|
||||
.storage
|
||||
.snapshot_object_version_to_link(bucket, key, v, &snap_link)
|
||||
.await
|
||||
}
|
||||
None => {
|
||||
state
|
||||
.storage
|
||||
.snapshot_object_to_link(bucket, key, &snap_link)
|
||||
.await
|
||||
}
|
||||
};
|
||||
let meta = match snap_meta {
|
||||
Ok(m) => m,
|
||||
Err(e) => return storage_err_response(e),
|
||||
};
|
||||
|
||||
let total_size = meta.size;
|
||||
let (start, end) = match parse_range(range_str, total_size) {
|
||||
if let Some(resp) = evaluate_get_preconditions(headers, &meta) {
|
||||
let _ = tokio::fs::remove_file(&snap_link).await;
|
||||
return resp;
|
||||
}
|
||||
|
||||
let enc_info =
|
||||
myfsio_crypto::encryption::EncryptionMetadata::from_metadata(&meta.internal_metadata);
|
||||
|
||||
let (body_path, plaintext_size, enc_header): (std::path::PathBuf, u64, Option<&str>) =
|
||||
match (enc_info.as_ref(), state.encryption.as_ref()) {
|
||||
(Some(enc_info), Some(enc_svc)) => {
|
||||
let customer_key = extract_sse_c_key(headers);
|
||||
let has_fast_path = enc_info.chunk_size.is_some()
|
||||
&& enc_info.plaintext_size.is_some();
|
||||
|
||||
if has_fast_path {
|
||||
let plaintext_size = enc_info.plaintext_size.unwrap();
|
||||
let (start, end) = match parse_range(range_str, plaintext_size) {
|
||||
Some(r) => r,
|
||||
None => {
|
||||
let _ = tokio::fs::remove_file(&snap_link).await;
|
||||
return s3_error_response(S3Error::new(
|
||||
myfsio_common::error::S3ErrorCode::InvalidRange,
|
||||
format!("Range not satisfiable for size {}", plaintext_size),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let dec_tmp = tmp_dir.join(format!("rdec-{}", uuid::Uuid::new_v4()));
|
||||
let res = enc_svc
|
||||
.decrypt_object_range(
|
||||
&snap_link,
|
||||
&dec_tmp,
|
||||
enc_info,
|
||||
customer_key.as_deref(),
|
||||
start,
|
||||
end,
|
||||
)
|
||||
.await;
|
||||
let _ = tokio::fs::remove_file(&snap_link).await;
|
||||
if let Err(e) = res {
|
||||
let _ = tokio::fs::remove_file(&dec_tmp).await;
|
||||
return s3_error_response(S3Error::new(
|
||||
myfsio_common::error::S3ErrorCode::InternalError,
|
||||
format!("Decryption failed: {}", e),
|
||||
));
|
||||
}
|
||||
|
||||
return stream_partial_content(
|
||||
state,
|
||||
&dec_tmp,
|
||||
start,
|
||||
end,
|
||||
plaintext_size,
|
||||
&meta,
|
||||
key,
|
||||
query,
|
||||
Some(enc_info.algorithm.as_str()),
|
||||
/* already_trimmed */ true,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let dec_tmp = tmp_dir.join(format!("rdec-{}", uuid::Uuid::new_v4()));
|
||||
let res = enc_svc
|
||||
.decrypt_object(&snap_link, &dec_tmp, enc_info, customer_key.as_deref())
|
||||
.await;
|
||||
let _ = tokio::fs::remove_file(&snap_link).await;
|
||||
if let Err(e) = res {
|
||||
let _ = tokio::fs::remove_file(&dec_tmp).await;
|
||||
return s3_error_response(S3Error::new(
|
||||
myfsio_common::error::S3ErrorCode::InternalError,
|
||||
format!("Decryption failed: {}", e),
|
||||
));
|
||||
}
|
||||
let plaintext_size = tokio::fs::metadata(&dec_tmp)
|
||||
.await
|
||||
.map(|m| m.len())
|
||||
.unwrap_or(0);
|
||||
(dec_tmp, plaintext_size, Some(enc_info.algorithm.as_str()))
|
||||
}
|
||||
(Some(_), None) => {
|
||||
let _ = tokio::fs::remove_file(&snap_link).await;
|
||||
return s3_error_response(S3Error::new(
|
||||
myfsio_common::error::S3ErrorCode::InternalError,
|
||||
"Object is encrypted but encryption service is disabled".to_string(),
|
||||
));
|
||||
}
|
||||
(None, _) => (snap_link.clone(), meta.size, None),
|
||||
};
|
||||
|
||||
let (start, end) = match parse_range(range_str, plaintext_size) {
|
||||
Some(r) => r,
|
||||
None => {
|
||||
let _ = tokio::fs::remove_file(&body_path).await;
|
||||
return s3_error_response(S3Error::new(
|
||||
myfsio_common::error::S3ErrorCode::InvalidRange,
|
||||
format!("Range not satisfiable for size {}", total_size),
|
||||
format!("Range not satisfiable for size {}", plaintext_size),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let path = match version_id {
|
||||
Some(version_id) => match state
|
||||
.storage
|
||||
.get_object_version_path(bucket, key, version_id)
|
||||
.await
|
||||
{
|
||||
Ok(p) => p,
|
||||
Err(e) => return storage_err_response(e),
|
||||
},
|
||||
None => match state.storage.get_object_path(bucket, key).await {
|
||||
Ok(p) => p,
|
||||
Err(e) => return storage_err_response(e),
|
||||
},
|
||||
};
|
||||
|
||||
let mut file = match tokio::fs::File::open(&path).await {
|
||||
Ok(f) => f,
|
||||
Err(e) => return storage_err_response(myfsio_storage::error::StorageError::Io(e)),
|
||||
};
|
||||
|
||||
if let Err(e) = file.seek(std::io::SeekFrom::Start(start)).await {
|
||||
return storage_err_response(myfsio_storage::error::StorageError::Io(e));
|
||||
}
|
||||
stream_partial_content(
|
||||
state,
|
||||
&body_path,
|
||||
start,
|
||||
end,
|
||||
plaintext_size,
|
||||
&meta,
|
||||
key,
|
||||
query,
|
||||
enc_header,
|
||||
/* already_trimmed */ false,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn stream_partial_content(
|
||||
state: &AppState,
|
||||
body_path: &std::path::Path,
|
||||
start: u64,
|
||||
end: u64,
|
||||
plaintext_size: u64,
|
||||
meta: &myfsio_common::types::ObjectMeta,
|
||||
key: &str,
|
||||
query: &ObjectQuery,
|
||||
enc_header: Option<&str>,
|
||||
already_trimmed: bool,
|
||||
) -> Response {
|
||||
let length = end - start + 1;
|
||||
|
||||
let mut file = match open_self_deleting(body_path.to_path_buf()).await {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
let _ = tokio::fs::remove_file(body_path).await;
|
||||
return storage_err_response(myfsio_storage::error::StorageError::Io(e));
|
||||
}
|
||||
};
|
||||
|
||||
if !already_trimmed {
|
||||
if let Err(e) = file.seek(std::io::SeekFrom::Start(start)).await {
|
||||
return storage_err_response(myfsio_storage::error::StorageError::Io(e));
|
||||
}
|
||||
}
|
||||
let limited = file.take(length);
|
||||
let stream = ReaderStream::with_capacity(limited, 256 * 1024);
|
||||
|
||||
let stream_cap = state.config.stream_chunk_size.max(64 * 1024);
|
||||
let stream = ReaderStream::with_capacity(limited, stream_cap);
|
||||
let body = Body::from_stream(stream);
|
||||
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("content-length", length.to_string().parse().unwrap());
|
||||
headers.insert(
|
||||
"content-range",
|
||||
format!("bytes {}-{}/{}", start, end, total_size)
|
||||
format!("bytes {}-{}/{}", start, end, plaintext_size)
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
@@ -2680,10 +2779,19 @@ async fn range_get_handler(
|
||||
}
|
||||
insert_content_type(&mut headers, key, meta.content_type.as_deref());
|
||||
headers.insert("accept-ranges", "bytes".parse().unwrap());
|
||||
if let Some(alg) = enc_header {
|
||||
headers.insert("x-amz-server-side-encryption", alg.parse().unwrap());
|
||||
}
|
||||
apply_stored_response_headers(&mut headers, &meta.internal_metadata);
|
||||
apply_stored_checksum_headers(&mut headers, &meta.internal_metadata);
|
||||
if let Some(ref requested_version) = query.version_id {
|
||||
if let Ok(value) = requested_version.parse() {
|
||||
headers.insert("x-amz-version-id", value);
|
||||
}
|
||||
} else if let Some(ref vid) = meta.version_id {
|
||||
if let Ok(value) = vid.parse() {
|
||||
headers.insert("x-amz-version-id", value);
|
||||
}
|
||||
}
|
||||
|
||||
apply_response_overrides(&mut headers, query);
|
||||
|
||||
@@ -50,6 +50,7 @@ impl AppState {
|
||||
bucket_config_cache_ttl: Duration::from_secs_f64(
|
||||
config.bucket_config_cache_ttl_seconds,
|
||||
),
|
||||
stream_chunk_size: config.stream_chunk_size,
|
||||
},
|
||||
));
|
||||
let iam = Arc::new(IamService::new_with_secret(
|
||||
|
||||
@@ -4853,3 +4853,197 @@ async fn test_kms_encrypt_decrypt() {
|
||||
let result = B64.decode(pt_b64).unwrap();
|
||||
assert_eq!(result, plaintext);
|
||||
}
|
||||
|
||||
fn deterministic_payload(len: usize) -> Vec<u8> {
|
||||
(0..len).map(|i| ((i * 2654435761usize) >> 16) as u8).collect()
|
||||
}
|
||||
|
||||
async fn put_sse_s3(
|
||||
app: &axum::routing::RouterIntoService<Body>,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
body: Vec<u8>,
|
||||
) {
|
||||
let req = Request::builder()
|
||||
.method(Method::PUT)
|
||||
.uri(format!("/{}", bucket))
|
||||
.header("x-access-key", TEST_ACCESS_KEY)
|
||||
.header("x-secret-key", TEST_SECRET_KEY)
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
let _ = tower::ServiceExt::oneshot(app.clone(), req).await.unwrap();
|
||||
|
||||
let req = Request::builder()
|
||||
.method(Method::PUT)
|
||||
.uri(format!("/{}/{}", bucket, key))
|
||||
.header("x-access-key", TEST_ACCESS_KEY)
|
||||
.header("x-secret-key", TEST_SECRET_KEY)
|
||||
.header("x-amz-server-side-encryption", "AES256")
|
||||
.header("content-type", "application/octet-stream")
|
||||
.body(Body::from(body))
|
||||
.unwrap();
|
||||
let resp = tower::ServiceExt::oneshot(app.clone(), req).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
}
|
||||
|
||||
async fn range_get(
|
||||
app: &axum::routing::RouterIntoService<Body>,
|
||||
uri: &str,
|
||||
range: &str,
|
||||
extra_headers: &[(&str, &str)],
|
||||
) -> axum::http::Response<Body> {
|
||||
let mut builder = Request::builder()
|
||||
.method(Method::GET)
|
||||
.uri(uri)
|
||||
.header("x-access-key", TEST_ACCESS_KEY)
|
||||
.header("x-secret-key", TEST_SECRET_KEY)
|
||||
.header("range", range);
|
||||
for (k, v) in extra_headers {
|
||||
builder = builder.header(*k, *v);
|
||||
}
|
||||
tower::ServiceExt::oneshot(app.clone(), builder.body(Body::empty()).unwrap())
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn body_bytes(resp: axum::http::Response<Body>) -> Vec<u8> {
|
||||
resp.into_body().collect().await.unwrap().to_bytes().to_vec()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sse_s3_range_get_multi_chunk() {
|
||||
let (app, _tmp) = test_app_encrypted().await;
|
||||
let app = app.into_service();
|
||||
let payload = deterministic_payload(200_000);
|
||||
put_sse_s3(&app, "rng-mc", "obj.bin", payload.clone()).await;
|
||||
|
||||
let resp = range_get(&app, "/rng-mc/obj.bin", "bytes=60000-140000", &[]).await;
|
||||
assert_eq!(resp.status(), StatusCode::PARTIAL_CONTENT);
|
||||
assert_eq!(resp.headers().get("content-length").unwrap(), "80001");
|
||||
assert_eq!(
|
||||
resp.headers().get("content-range").unwrap(),
|
||||
"bytes 60000-140000/200000"
|
||||
);
|
||||
assert_eq!(
|
||||
resp.headers().get("x-amz-server-side-encryption").unwrap(),
|
||||
"AES256"
|
||||
);
|
||||
assert_eq!(body_bytes(resp).await, payload[60000..=140000]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sse_s3_range_get_within_single_chunk() {
|
||||
let (app, _tmp) = test_app_encrypted().await;
|
||||
let app = app.into_service();
|
||||
let payload = deterministic_payload(200_000);
|
||||
put_sse_s3(&app, "rng-sc", "obj.bin", payload.clone()).await;
|
||||
|
||||
let resp = range_get(&app, "/rng-sc/obj.bin", "bytes=100-4999", &[]).await;
|
||||
assert_eq!(resp.status(), StatusCode::PARTIAL_CONTENT);
|
||||
assert_eq!(resp.headers().get("content-length").unwrap(), "4900");
|
||||
assert_eq!(body_bytes(resp).await, payload[100..=4999]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sse_s3_range_get_suffix() {
|
||||
let (app, _tmp) = test_app_encrypted().await;
|
||||
let app = app.into_service();
|
||||
let payload = deterministic_payload(200_000);
|
||||
put_sse_s3(&app, "rng-sx", "obj.bin", payload.clone()).await;
|
||||
|
||||
let resp = range_get(&app, "/rng-sx/obj.bin", "bytes=-1024", &[]).await;
|
||||
assert_eq!(resp.status(), StatusCode::PARTIAL_CONTENT);
|
||||
assert_eq!(resp.headers().get("content-length").unwrap(), "1024");
|
||||
assert_eq!(
|
||||
resp.headers().get("content-range").unwrap(),
|
||||
"bytes 198976-199999/200000"
|
||||
);
|
||||
assert_eq!(body_bytes(resp).await, payload[198_976..]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sse_s3_range_get_final_partial_chunk() {
|
||||
let (app, _tmp) = test_app_encrypted().await;
|
||||
let app = app.into_service();
|
||||
let size = 65_536 + 12_345;
|
||||
let payload = deterministic_payload(size);
|
||||
put_sse_s3(&app, "rng-fp", "obj.bin", payload.clone()).await;
|
||||
|
||||
let last_start = 70_000;
|
||||
let last_end = size as u64 - 1;
|
||||
let range = format!("bytes={}-{}", last_start, last_end);
|
||||
let resp = range_get(&app, "/rng-fp/obj.bin", &range, &[]).await;
|
||||
assert_eq!(resp.status(), StatusCode::PARTIAL_CONTENT);
|
||||
let expected_len = (last_end - last_start + 1).to_string();
|
||||
assert_eq!(
|
||||
resp.headers().get("content-length").unwrap(),
|
||||
&expected_len.as_str()
|
||||
);
|
||||
assert_eq!(
|
||||
body_bytes(resp).await,
|
||||
payload[last_start as usize..=last_end as usize]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sse_s3_range_get_open_ended() {
|
||||
let (app, _tmp) = test_app_encrypted().await;
|
||||
let app = app.into_service();
|
||||
let payload = deterministic_payload(100_000);
|
||||
put_sse_s3(&app, "rng-oe", "obj.bin", payload.clone()).await;
|
||||
|
||||
let resp = range_get(&app, "/rng-oe/obj.bin", "bytes=90000-", &[]).await;
|
||||
assert_eq!(resp.status(), StatusCode::PARTIAL_CONTENT);
|
||||
assert_eq!(resp.headers().get("content-length").unwrap(), "10000");
|
||||
assert_eq!(
|
||||
resp.headers().get("content-range").unwrap(),
|
||||
"bytes 90000-99999/100000"
|
||||
);
|
||||
assert_eq!(body_bytes(resp).await, payload[90_000..]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_sse_s3_range_unsatisfiable_for_plaintext_size() {
|
||||
let (app, _tmp) = test_app_encrypted().await;
|
||||
let app = app.into_service();
|
||||
let payload = deterministic_payload(10_000);
|
||||
put_sse_s3(&app, "rng-un", "obj.bin", payload).await;
|
||||
|
||||
let resp = range_get(&app, "/rng-un/obj.bin", "bytes=20000-30000", &[]).await;
|
||||
assert!(
|
||||
resp.status() == StatusCode::RANGE_NOT_SATISFIABLE
|
||||
|| resp.status() == StatusCode::BAD_REQUEST,
|
||||
"unexpected status: {}",
|
||||
resp.status()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_plaintext_range_still_works() {
|
||||
let (app, _tmp) = test_app_encrypted().await;
|
||||
let app = app.into_service();
|
||||
let req = Request::builder()
|
||||
.method(Method::PUT)
|
||||
.uri("/plain-rng")
|
||||
.header("x-access-key", TEST_ACCESS_KEY)
|
||||
.header("x-secret-key", TEST_SECRET_KEY)
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
let _ = tower::ServiceExt::oneshot(app.clone(), req).await.unwrap();
|
||||
|
||||
let payload = deterministic_payload(8_000);
|
||||
let req = Request::builder()
|
||||
.method(Method::PUT)
|
||||
.uri("/plain-rng/obj.bin")
|
||||
.header("x-access-key", TEST_ACCESS_KEY)
|
||||
.header("x-secret-key", TEST_SECRET_KEY)
|
||||
.body(Body::from(payload.clone()))
|
||||
.unwrap();
|
||||
let resp = tower::ServiceExt::oneshot(app.clone(), req).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
|
||||
let resp = range_get(&app, "/plain-rng/obj.bin", "bytes=100-199", &[]).await;
|
||||
assert_eq!(resp.status(), StatusCode::PARTIAL_CONTENT);
|
||||
assert!(resp.headers().get("x-amz-server-side-encryption").is_none());
|
||||
assert_eq!(body_bytes(resp).await, payload[100..=199]);
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ myfsio-crypto = { path = "../myfsio-crypto" }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
dashmap = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -30,8 +30,44 @@ pub trait StorageEngine: Send + Sync {
|
||||
key: &str,
|
||||
) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
|
||||
|
||||
async fn get_object_range(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
start: u64,
|
||||
len: Option<u64>,
|
||||
) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
|
||||
|
||||
async fn get_object_snapshot(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
) -> StorageResult<(ObjectMeta, tokio::fs::File)>;
|
||||
|
||||
async fn get_object_version_snapshot(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
version_id: &str,
|
||||
) -> StorageResult<(ObjectMeta, tokio::fs::File)>;
|
||||
|
||||
async fn get_object_path(&self, bucket: &str, key: &str) -> StorageResult<PathBuf>;
|
||||
|
||||
async fn snapshot_object_to_link(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
link_path: &std::path::Path,
|
||||
) -> StorageResult<ObjectMeta>;
|
||||
|
||||
async fn snapshot_object_version_to_link(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
version_id: &str,
|
||||
link_path: &std::path::Path,
|
||||
) -> StorageResult<ObjectMeta>;
|
||||
|
||||
async fn head_object(&self, bucket: &str, key: &str) -> StorageResult<ObjectMeta>;
|
||||
|
||||
async fn get_object_version(
|
||||
@@ -41,6 +77,15 @@ pub trait StorageEngine: Send + Sync {
|
||||
version_id: &str,
|
||||
) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
|
||||
|
||||
async fn get_object_version_range(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
version_id: &str,
|
||||
start: u64,
|
||||
len: Option<u64>,
|
||||
) -> StorageResult<(ObjectMeta, AsyncReadStream)>;
|
||||
|
||||
async fn get_object_version_path(
|
||||
&self,
|
||||
bucket: &str,
|
||||
|
||||
Reference in New Issue
Block a user