MyFSIO v0.3.6 Release

Reviewed-on: #29
This commit was merged in pull request #29.
This commit is contained in:
2026-03-08 04:46:31 +00:00
7 changed files with 154 additions and 81 deletions

View File

@@ -486,10 +486,6 @@ def _configure_logging(app: Flask) -> None:
g.request_id = f"{os.getpid():x}{next(_request_counter):012x}" g.request_id = f"{os.getpid():x}{next(_request_counter):012x}"
g.request_started_at = time.perf_counter() g.request_started_at = time.perf_counter()
g.request_bytes_in = request.content_length or 0 g.request_bytes_in = request.content_length or 0
app.logger.info(
"Request started",
extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr},
)
@app.before_request @app.before_request
def _maybe_serve_website(): def _maybe_serve_website():
@@ -620,14 +616,15 @@ def _configure_logging(app: Flask) -> None:
duration_ms = (time.perf_counter() - g.request_started_at) * 1000 duration_ms = (time.perf_counter() - g.request_started_at) * 1000
request_id = getattr(g, "request_id", f"{os.getpid():x}{next(_request_counter):012x}") request_id = getattr(g, "request_id", f"{os.getpid():x}{next(_request_counter):012x}")
response.headers.setdefault("X-Request-ID", request_id) response.headers.setdefault("X-Request-ID", request_id)
app.logger.info( if app.logger.isEnabledFor(logging.INFO):
"Request completed", app.logger.info(
extra={ "Request completed",
"path": request.path, extra={
"method": request.method, "path": request.path,
"remote_addr": request.remote_addr, "method": request.method,
}, "remote_addr": request.remote_addr,
) },
)
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}" response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
operation_metrics = app.extensions.get("operation_metrics") operation_metrics = app.extensions.get("operation_metrics")

View File

@@ -2,6 +2,7 @@ from __future__ import annotations
import ipaddress import ipaddress
import json import json
import os
import re import re
import time import time
from dataclasses import dataclass, field from dataclasses import dataclass, field
@@ -268,7 +269,7 @@ class BucketPolicyStore:
self._last_mtime = self._current_mtime() self._last_mtime = self._current_mtime()
# Performance: Avoid stat() on every request # Performance: Avoid stat() on every request
self._last_stat_check = 0.0 self._last_stat_check = 0.0
self._stat_check_interval = 1.0 # Only check mtime every 1 second self._stat_check_interval = float(os.environ.get("BUCKET_POLICY_STAT_CHECK_INTERVAL_SECONDS", "2.0"))
def maybe_reload(self) -> None: def maybe_reload(self) -> None:
# Performance: Skip stat check if we checked recently # Performance: Skip stat check if we checked recently

View File

@@ -125,7 +125,7 @@ class IamService:
self._secret_key_cache: Dict[str, Tuple[str, float]] = {} self._secret_key_cache: Dict[str, Tuple[str, float]] = {}
self._cache_ttl = float(os.environ.get("IAM_CACHE_TTL_SECONDS", "5.0")) self._cache_ttl = float(os.environ.get("IAM_CACHE_TTL_SECONDS", "5.0"))
self._last_stat_check = 0.0 self._last_stat_check = 0.0
self._stat_check_interval = 1.0 self._stat_check_interval = float(os.environ.get("IAM_STAT_CHECK_INTERVAL_SECONDS", "2.0"))
self._sessions: Dict[str, Dict[str, Any]] = {} self._sessions: Dict[str, Dict[str, Any]] = {}
self._session_lock = threading.Lock() self._session_lock = threading.Lock()
self._load() self._load()

View File

@@ -85,6 +85,9 @@ def _bucket_policies() -> BucketPolicyStore:
def _build_policy_context() -> Dict[str, Any]: def _build_policy_context() -> Dict[str, Any]:
cached = getattr(g, "_policy_context", None)
if cached is not None:
return cached
ctx: Dict[str, Any] = {} ctx: Dict[str, Any] = {}
if request.headers.get("Referer"): if request.headers.get("Referer"):
ctx["aws:Referer"] = request.headers.get("Referer") ctx["aws:Referer"] = request.headers.get("Referer")
@@ -98,6 +101,7 @@ def _build_policy_context() -> Dict[str, Any]:
ctx["aws:SecureTransport"] = str(request.is_secure).lower() ctx["aws:SecureTransport"] = str(request.is_secure).lower()
if request.headers.get("User-Agent"): if request.headers.get("User-Agent"):
ctx["aws:UserAgent"] = request.headers.get("User-Agent") ctx["aws:UserAgent"] = request.headers.get("User-Agent")
g._policy_context = ctx
return ctx return ctx
@@ -1021,11 +1025,15 @@ def _apply_object_headers(
file_stat, file_stat,
metadata: Dict[str, str] | None, metadata: Dict[str, str] | None,
etag: str, etag: str,
size_override: int | None = None,
mtime_override: float | None = None,
) -> None: ) -> None:
if file_stat is not None: effective_size = size_override if size_override is not None else (file_stat.st_size if file_stat is not None else None)
if response.status_code != 206: effective_mtime = mtime_override if mtime_override is not None else (file_stat.st_mtime if file_stat is not None else None)
response.headers["Content-Length"] = str(file_stat.st_size) if effective_size is not None and response.status_code != 206:
response.headers["Last-Modified"] = http_date(file_stat.st_mtime) response.headers["Content-Length"] = str(effective_size)
if effective_mtime is not None:
response.headers["Last-Modified"] = http_date(effective_mtime)
response.headers["ETag"] = f'"{etag}"' response.headers["ETag"] = f'"{etag}"'
response.headers["Accept-Ranges"] = "bytes" response.headers["Accept-Ranges"] = "bytes"
for key, value in (metadata or {}).items(): for key, value in (metadata or {}).items():
@@ -2820,6 +2828,8 @@ def object_handler(bucket_name: str, object_key: str):
if validation_error: if validation_error:
return _error_response("InvalidArgument", validation_error, 400) return _error_response("InvalidArgument", validation_error, 400)
metadata["__content_type__"] = content_type or mimetypes.guess_type(object_key)[0] or "application/octet-stream"
try: try:
meta = storage.put_object( meta = storage.put_object(
bucket_name, bucket_name,
@@ -2834,10 +2844,23 @@ def object_handler(bucket_name: str, object_key: str):
if "Bucket" in message: if "Bucket" in message:
return _error_response("NoSuchBucket", message, 404) return _error_response("NoSuchBucket", message, 404)
return _error_response("InvalidArgument", message, 400) return _error_response("InvalidArgument", message, 400)
current_app.logger.info(
"Object uploaded", content_md5 = request.headers.get("Content-MD5")
extra={"bucket": bucket_name, "key": object_key, "size": meta.size}, if content_md5 and meta.etag:
) try:
expected_md5 = base64.b64decode(content_md5).hex()
except Exception:
storage.delete_object(bucket_name, object_key)
return _error_response("InvalidDigest", "Content-MD5 header is not valid base64", 400)
if expected_md5 != meta.etag:
storage.delete_object(bucket_name, object_key)
return _error_response("BadDigest", "The Content-MD5 you specified did not match what we received", 400)
if current_app.logger.isEnabledFor(logging.INFO):
current_app.logger.info(
"Object uploaded",
extra={"bucket": bucket_name, "key": object_key, "size": meta.size},
)
response = Response(status=200) response = Response(status=200)
if meta.etag: if meta.etag:
response.headers["ETag"] = f'"{meta.etag}"' response.headers["ETag"] = f'"{meta.etag}"'
@@ -2871,7 +2894,7 @@ def object_handler(bucket_name: str, object_key: str):
except StorageError as exc: except StorageError as exc:
return _error_response("NoSuchKey", str(exc), 404) return _error_response("NoSuchKey", str(exc), 404)
metadata = storage.get_object_metadata(bucket_name, object_key) metadata = storage.get_object_metadata(bucket_name, object_key)
mimetype = mimetypes.guess_type(object_key)[0] or "application/octet-stream" mimetype = metadata.get("__content_type__") or mimetypes.guess_type(object_key)[0] or "application/octet-stream"
is_encrypted = "x-amz-server-side-encryption" in metadata is_encrypted = "x-amz-server-side-encryption" in metadata
@@ -2963,10 +2986,7 @@ def object_handler(bucket_name: str, object_key: str):
response.headers["Content-Type"] = mimetype response.headers["Content-Type"] = mimetype
logged_bytes = 0 logged_bytes = 0
try: file_stat = stat if not is_encrypted else None
file_stat = path.stat() if not is_encrypted else None
except (PermissionError, OSError):
file_stat = None
_apply_object_headers(response, file_stat=file_stat, metadata=metadata, etag=etag) _apply_object_headers(response, file_stat=file_stat, metadata=metadata, etag=etag)
if request.method == "GET": if request.method == "GET":
@@ -2983,8 +3003,9 @@ def object_handler(bucket_name: str, object_key: str):
if value: if value:
response.headers[header] = _sanitize_header_value(value) response.headers[header] = _sanitize_header_value(value)
action = "Object read" if request.method == "GET" else "Object head" if current_app.logger.isEnabledFor(logging.INFO):
current_app.logger.info(action, extra={"bucket": bucket_name, "key": object_key, "bytes": logged_bytes}) action = "Object read" if request.method == "GET" else "Object head"
current_app.logger.info(action, extra={"bucket": bucket_name, "key": object_key, "bytes": logged_bytes})
return response return response
if "uploadId" in request.args: if "uploadId" in request.args:
@@ -3002,7 +3023,8 @@ def object_handler(bucket_name: str, object_key: str):
storage.delete_object(bucket_name, object_key) storage.delete_object(bucket_name, object_key)
lock_service.delete_object_lock_metadata(bucket_name, object_key) lock_service.delete_object_lock_metadata(bucket_name, object_key)
current_app.logger.info("Object deleted", extra={"bucket": bucket_name, "key": object_key}) if current_app.logger.isEnabledFor(logging.INFO):
current_app.logger.info("Object deleted", extra={"bucket": bucket_name, "key": object_key})
principal, _ = _require_principal() principal, _ = _require_principal()
_notifications().emit_object_removed( _notifications().emit_object_removed(
@@ -3343,12 +3365,20 @@ def head_object(bucket_name: str, object_key: str) -> Response:
_authorize_action(principal, bucket_name, "read", object_key=object_key) _authorize_action(principal, bucket_name, "read", object_key=object_key)
path = _storage().get_object_path(bucket_name, object_key) path = _storage().get_object_path(bucket_name, object_key)
metadata = _storage().get_object_metadata(bucket_name, object_key) metadata = _storage().get_object_metadata(bucket_name, object_key)
stat = path.stat()
etag = metadata.get("__etag__") or _storage()._compute_etag(path) etag = metadata.get("__etag__") or _storage()._compute_etag(path)
response = Response(status=200) cached_size = metadata.get("__size__")
_apply_object_headers(response, file_stat=stat, metadata=metadata, etag=etag) cached_mtime = metadata.get("__last_modified__")
response.headers["Content-Type"] = mimetypes.guess_type(object_key)[0] or "application/octet-stream" if cached_size is not None and cached_mtime is not None:
size_val = int(cached_size)
mtime_val = float(cached_mtime)
response = Response(status=200)
_apply_object_headers(response, file_stat=None, metadata=metadata, etag=etag, size_override=size_val, mtime_override=mtime_val)
else:
stat = path.stat()
response = Response(status=200)
_apply_object_headers(response, file_stat=stat, metadata=metadata, etag=etag)
response.headers["Content-Type"] = metadata.get("__content_type__") or mimetypes.guess_type(object_key)[0] or "application/octet-stream"
return response return response
except (StorageError, FileNotFoundError): except (StorageError, FileNotFoundError):
return _error_response("NoSuchKey", "Object not found", 404) return _error_response("NoSuchKey", "Object not found", 404)
@@ -3578,10 +3608,12 @@ def _initiate_multipart_upload(bucket_name: str, object_key: str) -> Response:
return error return error
metadata = _extract_request_metadata() metadata = _extract_request_metadata()
content_type = request.headers.get("Content-Type")
metadata["__content_type__"] = content_type or mimetypes.guess_type(object_key)[0] or "application/octet-stream"
try: try:
upload_id = _storage().initiate_multipart_upload( upload_id = _storage().initiate_multipart_upload(
bucket_name, bucket_name,
object_key, object_key,
metadata=metadata or None metadata=metadata or None
) )
except StorageError as exc: except StorageError as exc:
@@ -3630,6 +3662,15 @@ def _upload_part(bucket_name: str, object_key: str) -> Response:
return _error_response("NoSuchUpload", str(exc), 404) return _error_response("NoSuchUpload", str(exc), 404)
return _error_response("InvalidArgument", str(exc), 400) return _error_response("InvalidArgument", str(exc), 400)
content_md5 = request.headers.get("Content-MD5")
if content_md5 and etag:
try:
expected_md5 = base64.b64decode(content_md5).hex()
except Exception:
return _error_response("InvalidDigest", "Content-MD5 header is not valid base64", 400)
if expected_md5 != etag:
return _error_response("BadDigest", "The Content-MD5 you specified did not match what we received", 400)
response = Response(status=200) response = Response(status=200)
response.headers["ETag"] = f'"{etag}"' response.headers["ETag"] = f'"{etag}"'
return response return response

View File

@@ -361,7 +361,7 @@ class ObjectStorage:
try: try:
cache_path.parent.mkdir(parents=True, exist_ok=True) cache_path.parent.mkdir(parents=True, exist_ok=True)
cache_path.write_text(json.dumps(stats), encoding="utf-8") self._atomic_write_json(cache_path, stats)
except OSError: except OSError:
pass pass
@@ -423,7 +423,7 @@ class ObjectStorage:
cache_path = self._system_bucket_root(bucket_id) / "stats.json" cache_path = self._system_bucket_root(bucket_id) / "stats.json"
try: try:
cache_path.parent.mkdir(parents=True, exist_ok=True) cache_path.parent.mkdir(parents=True, exist_ok=True)
cache_path.write_text(json.dumps(data), encoding="utf-8") self._atomic_write_json(cache_path, data)
except OSError: except OSError:
pass pass
@@ -879,11 +879,6 @@ class ObjectStorage:
is_overwrite = destination.exists() is_overwrite = destination.exists()
existing_size = destination.stat().st_size if is_overwrite else 0 existing_size = destination.stat().st_size if is_overwrite else 0
archived_version_size = 0
if self._is_versioning_enabled(bucket_path) and is_overwrite:
archived_version_size = existing_size
self._archive_current_version(bucket_id, safe_key, reason="overwrite")
tmp_dir = self._system_root_path() / self.SYSTEM_TMP_DIR tmp_dir = self._system_root_path() / self.SYSTEM_TMP_DIR
tmp_dir.mkdir(parents=True, exist_ok=True) tmp_dir.mkdir(parents=True, exist_ok=True)
@@ -910,19 +905,21 @@ class ObjectStorage:
quota_check["quota"], quota_check["quota"],
quota_check["usage"], quota_check["usage"],
) )
except BaseException:
shutil.move(str(tmp_path), str(destination))
finally:
if tmp_path: if tmp_path:
try: try:
tmp_path.unlink(missing_ok=True) tmp_path.unlink(missing_ok=True)
except OSError: except OSError:
pass pass
raise
else: else:
tmp_path = tmp_dir / f"{uuid.uuid4().hex}.tmp" tmp_path = tmp_dir / f"{uuid.uuid4().hex}.tmp"
try: try:
checksum = hashlib.md5()
with tmp_path.open("wb") as target: with tmp_path.open("wb") as target:
shutil.copyfileobj(stream, target) shutil.copyfileobj(_HashingReader(stream, checksum), target)
target.flush()
os.fsync(target.fileno())
new_size = tmp_path.stat().st_size new_size = tmp_path.stat().st_size
size_delta = new_size - existing_size size_delta = new_size - existing_size
@@ -941,27 +938,43 @@ class ObjectStorage:
quota_check["usage"], quota_check["usage"],
) )
checksum = hashlib.md5()
with tmp_path.open("rb") as f:
while True:
chunk = f.read(1048576)
if not chunk:
break
checksum.update(chunk)
etag = checksum.hexdigest() etag = checksum.hexdigest()
except BaseException:
shutil.move(str(tmp_path), str(destination))
finally:
try: try:
tmp_path.unlink(missing_ok=True) tmp_path.unlink(missing_ok=True)
except OSError: except OSError:
pass pass
raise
stat = destination.stat() lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock"
try:
internal_meta = {"__etag__": etag, "__size__": str(stat.st_size)} with _atomic_lock_file(lock_file_path):
combined_meta = {**internal_meta, **(metadata or {})} archived_version_size = 0
self._write_metadata(bucket_id, safe_key, combined_meta) if self._is_versioning_enabled(bucket_path) and is_overwrite:
archived_version_size = existing_size
self._archive_current_version(bucket_id, safe_key, reason="overwrite")
shutil.move(str(tmp_path), str(destination))
tmp_path = None
stat = destination.stat()
internal_meta = {"__etag__": etag, "__size__": str(stat.st_size), "__last_modified__": str(stat.st_mtime)}
combined_meta = {**internal_meta, **(metadata or {})}
self._write_metadata(bucket_id, safe_key, combined_meta)
except BlockingIOError:
try:
if tmp_path:
tmp_path.unlink(missing_ok=True)
except OSError:
pass
raise StorageError("Another upload to this key is in progress")
finally:
if tmp_path:
try:
tmp_path.unlink(missing_ok=True)
except OSError:
pass
self._update_bucket_stats_cache( self._update_bucket_stats_cache(
bucket_id, bucket_id,
@@ -1553,18 +1566,16 @@ class ObjectStorage:
temp_path = upload_root / f".{part_filename}.tmp" temp_path = upload_root / f".{part_filename}.tmp"
try: try:
with temp_path.open("wb") as target:
shutil.copyfileobj(stream, target)
if _HAS_RUST: if _HAS_RUST:
with temp_path.open("wb") as target:
shutil.copyfileobj(stream, target)
part_etag = _rc.md5_file(str(temp_path)) part_etag = _rc.md5_file(str(temp_path))
else: else:
checksum = hashlib.md5() checksum = hashlib.md5()
with temp_path.open("rb") as f: with temp_path.open("wb") as target:
while True: shutil.copyfileobj(_HashingReader(stream, checksum), target)
chunk = f.read(1048576) target.flush()
if not chunk: os.fsync(target.fileno())
break
checksum.update(chunk)
part_etag = checksum.hexdigest() part_etag = checksum.hexdigest()
temp_path.replace(part_path) temp_path.replace(part_path)
except OSError: except OSError:
@@ -1598,7 +1609,7 @@ class ObjectStorage:
parts = manifest.setdefault("parts", {}) parts = manifest.setdefault("parts", {})
parts[str(part_number)] = record parts[str(part_number)] = record
manifest_path.write_text(json.dumps(manifest), encoding="utf-8") self._atomic_write_json(manifest_path, manifest)
break break
except OSError as exc: except OSError as exc:
if attempt < max_retries - 1: if attempt < max_retries - 1:
@@ -1691,7 +1702,7 @@ class ObjectStorage:
parts = manifest.setdefault("parts", {}) parts = manifest.setdefault("parts", {})
parts[str(part_number)] = record parts[str(part_number)] = record
manifest_path.write_text(json.dumps(manifest), encoding="utf-8") self._atomic_write_json(manifest_path, manifest)
break break
except OSError as exc: except OSError as exc:
if attempt < max_retries - 1: if attempt < max_retries - 1:
@@ -1797,6 +1808,8 @@ class ObjectStorage:
break break
checksum.update(data) checksum.update(data)
target.write(data) target.write(data)
target.flush()
os.fsync(target.fileno())
checksum_hex = checksum.hexdigest() checksum_hex = checksum.hexdigest()
except BlockingIOError: except BlockingIOError:
raise StorageError("Another upload to this key is in progress") raise StorageError("Another upload to this key is in progress")
@@ -1815,7 +1828,7 @@ class ObjectStorage:
etag = checksum_hex etag = checksum_hex
metadata = manifest.get("metadata") metadata = manifest.get("metadata")
internal_meta = {"__etag__": etag, "__size__": str(stat.st_size)} internal_meta = {"__etag__": etag, "__size__": str(stat.st_size), "__last_modified__": str(stat.st_mtime)}
combined_meta = {**internal_meta, **(metadata or {})} combined_meta = {**internal_meta, **(metadata or {})}
self._write_metadata(bucket_id, safe_key, combined_meta) self._write_metadata(bucket_id, safe_key, combined_meta)
@@ -2303,6 +2316,23 @@ class ObjectStorage:
): ):
path.mkdir(parents=True, exist_ok=True) path.mkdir(parents=True, exist_ok=True)
@staticmethod
def _atomic_write_json(path: Path, data: Any) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
tmp_path = path.with_suffix(".tmp")
try:
with tmp_path.open("w", encoding="utf-8") as f:
json.dump(data, f)
f.flush()
os.fsync(f.fileno())
tmp_path.replace(path)
except BaseException:
try:
tmp_path.unlink(missing_ok=True)
except OSError:
pass
raise
def _multipart_dir(self, bucket_name: str, upload_id: str) -> Path: def _multipart_dir(self, bucket_name: str, upload_id: str) -> Path:
return self._multipart_bucket_root(bucket_name) / upload_id return self._multipart_bucket_root(bucket_name) / upload_id
@@ -2337,7 +2367,7 @@ class ObjectStorage:
def _write_bucket_config(self, bucket_name: str, payload: dict[str, Any]) -> None: def _write_bucket_config(self, bucket_name: str, payload: dict[str, Any]) -> None:
config_path = self._bucket_config_path(bucket_name) config_path = self._bucket_config_path(bucket_name)
config_path.parent.mkdir(parents=True, exist_ok=True) config_path.parent.mkdir(parents=True, exist_ok=True)
config_path.write_text(json.dumps(payload), encoding="utf-8") self._atomic_write_json(config_path, payload)
try: try:
mtime = config_path.stat().st_mtime mtime = config_path.stat().st_mtime
except OSError: except OSError:
@@ -2371,8 +2401,7 @@ class ObjectStorage:
def _write_multipart_manifest(self, upload_root: Path, manifest: dict[str, Any]) -> None: def _write_multipart_manifest(self, upload_root: Path, manifest: dict[str, Any]) -> None:
manifest_path = upload_root / self.MULTIPART_MANIFEST manifest_path = upload_root / self.MULTIPART_MANIFEST
manifest_path.parent.mkdir(parents=True, exist_ok=True) self._atomic_write_json(manifest_path, manifest)
manifest_path.write_text(json.dumps(manifest), encoding="utf-8")
def _metadata_file(self, bucket_name: str, key: Path) -> Path: def _metadata_file(self, bucket_name: str, key: Path) -> Path:
meta_root = self._bucket_meta_root(bucket_name) meta_root = self._bucket_meta_root(bucket_name)
@@ -2442,7 +2471,7 @@ class ObjectStorage:
except (OSError, json.JSONDecodeError): except (OSError, json.JSONDecodeError):
pass pass
index_data[entry_name] = entry index_data[entry_name] = entry
index_path.write_text(json.dumps(index_data), encoding="utf-8") self._atomic_write_json(index_path, index_data)
self._invalidate_meta_read_cache(bucket_name, key) self._invalidate_meta_read_cache(bucket_name, key)
def _delete_index_entry(self, bucket_name: str, key: Path) -> None: def _delete_index_entry(self, bucket_name: str, key: Path) -> None:
@@ -2463,7 +2492,7 @@ class ObjectStorage:
if entry_name in index_data: if entry_name in index_data:
del index_data[entry_name] del index_data[entry_name]
if index_data: if index_data:
index_path.write_text(json.dumps(index_data), encoding="utf-8") self._atomic_write_json(index_path, index_data)
else: else:
try: try:
index_path.unlink() index_path.unlink()
@@ -2512,7 +2541,7 @@ class ObjectStorage:
"reason": reason, "reason": reason,
} }
manifest_path = version_dir / f"{version_id}.json" manifest_path = version_dir / f"{version_id}.json"
manifest_path.write_text(json.dumps(record), encoding="utf-8") self._atomic_write_json(manifest_path, record)
def _read_metadata(self, bucket_name: str, key: Path) -> Dict[str, str]: def _read_metadata(self, bucket_name: str, key: Path) -> Dict[str, str]:
entry = self._read_index_entry(bucket_name, key) entry = self._read_index_entry(bucket_name, key)

View File

@@ -1,6 +1,6 @@
from __future__ import annotations from __future__ import annotations
APP_VERSION = "0.3.5" APP_VERSION = "0.3.6"
def get_version() -> str: def get_version() -> str:

View File

@@ -46,6 +46,8 @@ pub fn stream_to_file_with_md5(
py.check_signals()?; py.check_signals()?;
} }
file.sync_all()
.map_err(|e| PyIOError::new_err(format!("Failed to fsync: {}", e)))?;
Ok(()) Ok(())
})(); })();
@@ -102,6 +104,9 @@ pub fn assemble_parts_with_md5(
} }
} }
target.sync_all()
.map_err(|e| PyIOError::new_err(format!("Failed to fsync: {}", e)))?;
Ok(format!("{:x}", hasher.finalize())) Ok(format!("{:x}", hasher.finalize()))
}) })
} }