Improve object storage performance via caching
This commit is contained in:
103
app/s3_api.py
103
app/s3_api.py
@@ -23,7 +23,7 @@ from .storage import ObjectStorage, StorageError, QuotaExceededError
|
||||
s3_api_bp = Blueprint("s3_api", __name__)
|
||||
|
||||
|
||||
# ---------------------- helpers ----------------------
|
||||
# Helper functions for accessing app extensions and generating responses
|
||||
def _storage() -> ObjectStorage:
|
||||
return current_app.extensions["object_storage"]
|
||||
|
||||
@@ -69,8 +69,7 @@ def _get_signature_key(key: str, date_stamp: str, region_name: str, service_name
|
||||
|
||||
|
||||
def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
||||
# Parse Authorization header
|
||||
# AWS4-HMAC-SHA256 Credential=AKIA.../20230101/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=...
|
||||
# Parse Authorization header: AWS4-HMAC-SHA256 Credential=AKIA.../20230101/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=...
|
||||
match = re.match(
|
||||
r"AWS4-HMAC-SHA256 Credential=([^/]+)/([^/]+)/([^/]+)/([^/]+)/aws4_request, SignedHeaders=([^,]+), Signature=(.+)",
|
||||
auth_header,
|
||||
@@ -79,17 +78,14 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
||||
return None
|
||||
|
||||
access_key, date_stamp, region, service, signed_headers_str, signature = match.groups()
|
||||
|
||||
# Get secret key
|
||||
secret_key = _iam().get_secret_key(access_key)
|
||||
if not secret_key:
|
||||
raise IamError("Invalid access key")
|
||||
|
||||
# Canonical Request
|
||||
# Build canonical request
|
||||
method = req.method
|
||||
canonical_uri = quote(req.path, safe="/-_.~")
|
||||
|
||||
# Canonical Query String
|
||||
query_args = []
|
||||
for key, value in req.args.items(multi=True):
|
||||
query_args.append((key, value))
|
||||
@@ -100,7 +96,6 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
||||
canonical_query_parts.append(f"{quote(k, safe='-_.~')}={quote(v, safe='-_.~')}")
|
||||
canonical_query_string = "&".join(canonical_query_parts)
|
||||
|
||||
# Canonical Headers
|
||||
signed_headers_list = signed_headers_str.split(";")
|
||||
canonical_headers_parts = []
|
||||
for header in signed_headers_list:
|
||||
@@ -112,18 +107,13 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
||||
canonical_headers_parts.append(f"{header.lower()}:{header_val}\n")
|
||||
canonical_headers = "".join(canonical_headers_parts)
|
||||
|
||||
# Payload Hash
|
||||
payload_hash = req.headers.get("X-Amz-Content-Sha256")
|
||||
if not payload_hash:
|
||||
payload_hash = hashlib.sha256(req.get_data()).hexdigest()
|
||||
|
||||
canonical_request = f"{method}\n{canonical_uri}\n{canonical_query_string}\n{canonical_headers}\n{signed_headers_str}\n{payload_hash}"
|
||||
|
||||
# String to Sign
|
||||
amz_date = req.headers.get("X-Amz-Date")
|
||||
if not amz_date:
|
||||
amz_date = req.headers.get("Date")
|
||||
|
||||
amz_date = req.headers.get("X-Amz-Date") or req.headers.get("Date")
|
||||
if not amz_date:
|
||||
raise IamError("Missing Date header")
|
||||
|
||||
@@ -134,13 +124,13 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
time_diff = abs((now - request_time).total_seconds())
|
||||
if time_diff > 900: # 15 minutes
|
||||
if time_diff > 900: # AWS standard: 15-minute request validity window
|
||||
raise IamError("Request timestamp too old or too far in the future")
|
||||
|
||||
required_headers = {'host', 'x-amz-date'}
|
||||
signed_headers_set = set(signed_headers_str.split(';'))
|
||||
if not required_headers.issubset(signed_headers_set):
|
||||
# Some clients might sign 'date' instead of 'x-amz-date'
|
||||
# Some clients use 'date' instead of 'x-amz-date'
|
||||
if 'date' in signed_headers_set:
|
||||
required_headers.remove('x-amz-date')
|
||||
required_headers.add('date')
|
||||
@@ -187,11 +177,10 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
|
||||
if not secret_key:
|
||||
raise IamError("Invalid access key")
|
||||
|
||||
# Canonical Request
|
||||
# Build canonical request
|
||||
method = req.method
|
||||
canonical_uri = quote(req.path, safe="/-_.~")
|
||||
|
||||
# Canonical Query String
|
||||
query_args = []
|
||||
for key, value in req.args.items(multi=True):
|
||||
if key != "X-Amz-Signature":
|
||||
@@ -203,7 +192,6 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
|
||||
canonical_query_parts.append(f"{quote(k, safe='-_.~')}={quote(v, safe='-_.~')}")
|
||||
canonical_query_string = "&".join(canonical_query_parts)
|
||||
|
||||
# Canonical Headers
|
||||
signed_headers_list = signed_headers_str.split(";")
|
||||
canonical_headers_parts = []
|
||||
for header in signed_headers_list:
|
||||
@@ -212,7 +200,6 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
|
||||
canonical_headers_parts.append(f"{header}:{val}\n")
|
||||
canonical_headers = "".join(canonical_headers_parts)
|
||||
|
||||
# Payload Hash
|
||||
payload_hash = "UNSIGNED-PAYLOAD"
|
||||
|
||||
canonical_request = "\n".join([
|
||||
@@ -224,7 +211,7 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
|
||||
payload_hash
|
||||
])
|
||||
|
||||
# String to Sign
|
||||
# Build signature
|
||||
algorithm = "AWS4-HMAC-SHA256"
|
||||
credential_scope = f"{date_stamp}/{region}/{service}/aws4_request"
|
||||
hashed_request = hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()
|
||||
@@ -235,7 +222,6 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
|
||||
hashed_request
|
||||
])
|
||||
|
||||
# Signature
|
||||
signing_key = _get_signature_key(secret_key, date_stamp, region, service)
|
||||
calculated_signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
|
||||
|
||||
@@ -493,7 +479,7 @@ def _generate_presigned_url(
|
||||
}
|
||||
canonical_query = _encode_query_params(query_params)
|
||||
|
||||
# Determine host and scheme from config or request
|
||||
# Get presigned URL host and scheme from config or request headers
|
||||
api_base = current_app.config.get("API_BASE_URL")
|
||||
if api_base:
|
||||
parsed = urlparse(api_base)
|
||||
@@ -914,7 +900,7 @@ def _object_tagging_handler(bucket_name: str, object_key: str) -> Response:
|
||||
if error:
|
||||
return error
|
||||
|
||||
# For tagging, we use read permission for GET, write for PUT/DELETE
|
||||
# Use read permission for GET, write for PUT/DELETE
|
||||
action = "read" if request.method == "GET" else "write"
|
||||
try:
|
||||
_authorize_action(principal, bucket_name, action, object_key=object_key)
|
||||
@@ -1093,10 +1079,9 @@ def _bucket_location_handler(bucket_name: str) -> Response:
|
||||
if not storage.bucket_exists(bucket_name):
|
||||
return _error_response("NoSuchBucket", "Bucket does not exist", 404)
|
||||
|
||||
# Return the configured AWS_REGION
|
||||
# Return bucket location (empty for us-east-1 per AWS spec)
|
||||
region = current_app.config.get("AWS_REGION", "us-east-1")
|
||||
root = Element("LocationConstraint")
|
||||
# AWS returns empty for us-east-1, but we'll be explicit
|
||||
root.text = region if region != "us-east-1" else None
|
||||
return _xml_response(root)
|
||||
|
||||
@@ -1116,13 +1101,12 @@ def _bucket_acl_handler(bucket_name: str) -> Response:
|
||||
return _error_response("NoSuchBucket", "Bucket does not exist", 404)
|
||||
|
||||
if request.method == "PUT":
|
||||
# We don't fully implement ACLs, but we accept the request for compatibility
|
||||
# Check for canned ACL header
|
||||
# Accept canned ACL headers for S3 compatibility (not fully implemented)
|
||||
canned_acl = request.headers.get("x-amz-acl", "private")
|
||||
current_app.logger.info("Bucket ACL set (canned)", extra={"bucket": bucket_name, "acl": canned_acl})
|
||||
return Response(status=200)
|
||||
|
||||
# GET - Return a basic ACL document showing full control for owner
|
||||
# Return basic ACL document showing owner's full control
|
||||
root = Element("AccessControlPolicy")
|
||||
owner = SubElement(root, "Owner")
|
||||
SubElement(owner, "ID").text = principal.access_key if principal else "anonymous"
|
||||
@@ -1188,10 +1172,10 @@ def _bucket_list_versions_handler(bucket_name: str) -> Response:
|
||||
is_truncated = True
|
||||
break
|
||||
|
||||
# Current version
|
||||
# Add current version to response
|
||||
version = SubElement(root, "Version")
|
||||
SubElement(version, "Key").text = obj.key
|
||||
SubElement(version, "VersionId").text = "null" # Current version ID
|
||||
SubElement(version, "VersionId").text = "null"
|
||||
SubElement(version, "IsLatest").text = "true"
|
||||
SubElement(version, "LastModified").text = obj.last_modified.strftime("%Y-%m-%dT%H:%M:%S.000Z")
|
||||
SubElement(version, "ETag").text = f'"{obj.etag}"'
|
||||
@@ -1296,7 +1280,7 @@ def _render_lifecycle_config(config: list) -> Element:
|
||||
|
||||
SubElement(rule_el, "Status").text = rule.get("Status", "Enabled")
|
||||
|
||||
# Expiration
|
||||
# Add expiration rule if present
|
||||
if "Expiration" in rule:
|
||||
exp = rule["Expiration"]
|
||||
exp_el = SubElement(rule_el, "Expiration")
|
||||
@@ -1307,14 +1291,14 @@ def _render_lifecycle_config(config: list) -> Element:
|
||||
if exp.get("ExpiredObjectDeleteMarker"):
|
||||
SubElement(exp_el, "ExpiredObjectDeleteMarker").text = "true"
|
||||
|
||||
# NoncurrentVersionExpiration
|
||||
# Add noncurrent version expiration if present
|
||||
if "NoncurrentVersionExpiration" in rule:
|
||||
nve = rule["NoncurrentVersionExpiration"]
|
||||
nve_el = SubElement(rule_el, "NoncurrentVersionExpiration")
|
||||
if "NoncurrentDays" in nve:
|
||||
SubElement(nve_el, "NoncurrentDays").text = str(nve["NoncurrentDays"])
|
||||
|
||||
# AbortIncompleteMultipartUpload
|
||||
# Add incomplete multipart upload cleanup if present
|
||||
if "AbortIncompleteMultipartUpload" in rule:
|
||||
aimu = rule["AbortIncompleteMultipartUpload"]
|
||||
aimu_el = SubElement(rule_el, "AbortIncompleteMultipartUpload")
|
||||
@@ -1338,29 +1322,29 @@ def _parse_lifecycle_config(payload: bytes) -> list:
|
||||
for rule_el in root.findall("{*}Rule") or root.findall("Rule"):
|
||||
rule: dict = {}
|
||||
|
||||
# ID
|
||||
# Extract rule ID
|
||||
id_el = rule_el.find("{*}ID") or rule_el.find("ID")
|
||||
if id_el is not None and id_el.text:
|
||||
rule["ID"] = id_el.text.strip()
|
||||
|
||||
# Filter/Prefix
|
||||
# Extract filter prefix
|
||||
filter_el = rule_el.find("{*}Filter") or rule_el.find("Filter")
|
||||
if filter_el is not None:
|
||||
prefix_el = filter_el.find("{*}Prefix") or filter_el.find("Prefix")
|
||||
if prefix_el is not None and prefix_el.text:
|
||||
rule["Prefix"] = prefix_el.text
|
||||
|
||||
# Legacy Prefix (outside Filter)
|
||||
# Fall back to legacy Prefix element (outside Filter)
|
||||
if "Prefix" not in rule:
|
||||
prefix_el = rule_el.find("{*}Prefix") or rule_el.find("Prefix")
|
||||
if prefix_el is not None:
|
||||
rule["Prefix"] = prefix_el.text or ""
|
||||
|
||||
# Status
|
||||
# Extract status
|
||||
status_el = rule_el.find("{*}Status") or rule_el.find("Status")
|
||||
rule["Status"] = (status_el.text or "Enabled").strip() if status_el is not None else "Enabled"
|
||||
|
||||
# Expiration
|
||||
# Parse expiration rule
|
||||
exp_el = rule_el.find("{*}Expiration") or rule_el.find("Expiration")
|
||||
if exp_el is not None:
|
||||
expiration: dict = {}
|
||||
@@ -1564,7 +1548,7 @@ def _bulk_delete_handler(bucket_name: str) -> Response:
|
||||
return _xml_response(result, status=200)
|
||||
|
||||
|
||||
# ---------------------- routes ----------------------
|
||||
# Route handlers for S3 API endpoints
|
||||
@s3_api_bp.get("/")
|
||||
@limiter.limit("60 per minute")
|
||||
def list_buckets() -> Response:
|
||||
@@ -1642,7 +1626,7 @@ def bucket_handler(bucket_name: str) -> Response:
|
||||
current_app.logger.info("Bucket deleted", extra={"bucket": bucket_name})
|
||||
return Response(status=204)
|
||||
|
||||
# GET - list objects (supports both ListObjects and ListObjectsV2)
|
||||
# Handle GET - list objects (supports both ListObjects and ListObjectsV2)
|
||||
principal, error = _require_principal()
|
||||
try:
|
||||
_authorize_action(principal, bucket_name, "list")
|
||||
@@ -1650,18 +1634,13 @@ def bucket_handler(bucket_name: str) -> Response:
|
||||
if error:
|
||||
return error
|
||||
return _error_response("AccessDenied", str(exc), 403)
|
||||
try:
|
||||
objects = storage.list_objects_all(bucket_name)
|
||||
except StorageError as exc:
|
||||
return _error_response("NoSuchBucket", str(exc), 404)
|
||||
|
||||
# Check if this is ListObjectsV2 (list-type=2)
|
||||
list_type = request.args.get("list-type")
|
||||
prefix = request.args.get("prefix", "")
|
||||
delimiter = request.args.get("delimiter", "")
|
||||
max_keys = min(int(request.args.get("max-keys", current_app.config["UI_PAGE_SIZE"])), 1000)
|
||||
|
||||
# Pagination markers
|
||||
# Use appropriate markers for pagination depending on API version
|
||||
marker = request.args.get("marker", "") # ListObjects v1
|
||||
continuation_token = request.args.get("continuation-token", "") # ListObjectsV2
|
||||
start_after = request.args.get("start-after", "") # ListObjectsV2
|
||||
@@ -1681,11 +1660,18 @@ def bucket_handler(bucket_name: str) -> Response:
|
||||
else:
|
||||
effective_start = marker
|
||||
|
||||
if prefix:
|
||||
objects = [obj for obj in objects if obj.key.startswith(prefix)]
|
||||
|
||||
if effective_start:
|
||||
objects = [obj for obj in objects if obj.key > effective_start]
|
||||
# Fetch with buffer for delimiter processing; delimiter requires extra objects to compute prefixes
|
||||
fetch_keys = max_keys * 10 if delimiter else max_keys
|
||||
try:
|
||||
list_result = storage.list_objects(
|
||||
bucket_name,
|
||||
max_keys=fetch_keys,
|
||||
continuation_token=effective_start or None,
|
||||
prefix=prefix or None,
|
||||
)
|
||||
objects = list_result.objects
|
||||
except StorageError as exc:
|
||||
return _error_response("NoSuchBucket", str(exc), 404)
|
||||
|
||||
common_prefixes: list[str] = []
|
||||
filtered_objects: list = []
|
||||
@@ -1694,7 +1680,7 @@ def bucket_handler(bucket_name: str) -> Response:
|
||||
for obj in objects:
|
||||
key_after_prefix = obj.key[len(prefix):] if prefix else obj.key
|
||||
if delimiter in key_after_prefix:
|
||||
# This is a "folder" - extract the common prefix
|
||||
# Extract common prefix (folder-like structure)
|
||||
common_prefix = prefix + key_after_prefix.split(delimiter)[0] + delimiter
|
||||
if common_prefix not in seen_prefixes:
|
||||
seen_prefixes.add(common_prefix)
|
||||
@@ -1705,7 +1691,7 @@ def bucket_handler(bucket_name: str) -> Response:
|
||||
common_prefixes = sorted(common_prefixes)
|
||||
|
||||
total_items = len(objects) + len(common_prefixes)
|
||||
is_truncated = total_items > max_keys
|
||||
is_truncated = total_items > max_keys or list_result.is_truncated
|
||||
|
||||
if len(objects) >= max_keys:
|
||||
objects = objects[:max_keys]
|
||||
@@ -1845,9 +1831,8 @@ def object_handler(bucket_name: str, object_key: str):
|
||||
response = Response(status=200)
|
||||
response.headers["ETag"] = f'"{meta.etag}"'
|
||||
|
||||
# Trigger replication if not a replication request
|
||||
user_agent = request.headers.get("User-Agent", "")
|
||||
if "S3ReplicationAgent" not in user_agent:
|
||||
# Trigger replication for non-replication requests
|
||||
if "S3ReplicationAgent" not in request.headers.get("User-Agent", ""):
|
||||
_replication_manager().trigger_replication(bucket_name, object_key, action="write")
|
||||
|
||||
return response
|
||||
@@ -1866,17 +1851,15 @@ def object_handler(bucket_name: str, object_key: str):
|
||||
metadata = storage.get_object_metadata(bucket_name, object_key)
|
||||
mimetype = mimetypes.guess_type(object_key)[0] or "application/octet-stream"
|
||||
|
||||
# Check if object is encrypted and needs decryption
|
||||
# Decrypt encrypted objects
|
||||
is_encrypted = "x-amz-server-side-encryption" in metadata
|
||||
|
||||
if request.method == "GET":
|
||||
if is_encrypted and hasattr(storage, 'get_object_data'):
|
||||
# Use encrypted storage to decrypt
|
||||
try:
|
||||
data, clean_metadata = storage.get_object_data(bucket_name, object_key)
|
||||
response = Response(data, mimetype=mimetype)
|
||||
logged_bytes = len(data)
|
||||
# Use decrypted size for Content-Length
|
||||
response.headers["Content-Length"] = len(data)
|
||||
etag = hashlib.md5(data).hexdigest()
|
||||
except StorageError as exc:
|
||||
|
||||
Reference in New Issue
Block a user