Compare commits
5 Commits
v0.1.8
...
4ab58e59c2
| Author | SHA1 | Date | |
|---|---|---|---|
| 4ab58e59c2 | |||
| 32232211a1 | |||
| 1cacb80dd6 | |||
| e89bbb62dc | |||
| c8eb3de629 |
@@ -2,10 +2,12 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
|
import time
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from fnmatch import fnmatch
|
from fnmatch import fnmatch, translate
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, Iterable, List, Optional, Sequence
|
from typing import Any, Dict, Iterable, List, Optional, Pattern, Sequence, Tuple
|
||||||
|
|
||||||
|
|
||||||
RESOURCE_PREFIX = "arn:aws:s3:::"
|
RESOURCE_PREFIX = "arn:aws:s3:::"
|
||||||
@@ -133,7 +135,22 @@ class BucketPolicyStatement:
|
|||||||
effect: str
|
effect: str
|
||||||
principals: List[str] | str
|
principals: List[str] | str
|
||||||
actions: List[str]
|
actions: List[str]
|
||||||
resources: List[tuple[str | None, str | None]]
|
resources: List[Tuple[str | None, str | None]]
|
||||||
|
# Performance: Pre-compiled regex patterns for resource matching
|
||||||
|
_compiled_patterns: List[Tuple[str | None, Optional[Pattern[str]]]] | None = None
|
||||||
|
|
||||||
|
def _get_compiled_patterns(self) -> List[Tuple[str | None, Optional[Pattern[str]]]]:
|
||||||
|
"""Lazily compile fnmatch patterns to regex for faster matching."""
|
||||||
|
if self._compiled_patterns is None:
|
||||||
|
self._compiled_patterns = []
|
||||||
|
for resource_bucket, key_pattern in self.resources:
|
||||||
|
if key_pattern is None:
|
||||||
|
self._compiled_patterns.append((resource_bucket, None))
|
||||||
|
else:
|
||||||
|
# Convert fnmatch pattern to regex
|
||||||
|
regex_pattern = translate(key_pattern)
|
||||||
|
self._compiled_patterns.append((resource_bucket, re.compile(regex_pattern)))
|
||||||
|
return self._compiled_patterns
|
||||||
|
|
||||||
def matches_principal(self, access_key: Optional[str]) -> bool:
|
def matches_principal(self, access_key: Optional[str]) -> bool:
|
||||||
if self.principals == "*":
|
if self.principals == "*":
|
||||||
@@ -149,15 +166,16 @@ class BucketPolicyStatement:
|
|||||||
def matches_resource(self, bucket: Optional[str], object_key: Optional[str]) -> bool:
|
def matches_resource(self, bucket: Optional[str], object_key: Optional[str]) -> bool:
|
||||||
bucket = (bucket or "*").lower()
|
bucket = (bucket or "*").lower()
|
||||||
key = object_key or ""
|
key = object_key or ""
|
||||||
for resource_bucket, key_pattern in self.resources:
|
for resource_bucket, compiled_pattern in self._get_compiled_patterns():
|
||||||
resource_bucket = (resource_bucket or "*").lower()
|
resource_bucket = (resource_bucket or "*").lower()
|
||||||
if resource_bucket not in {"*", bucket}:
|
if resource_bucket not in {"*", bucket}:
|
||||||
continue
|
continue
|
||||||
if key_pattern is None:
|
if compiled_pattern is None:
|
||||||
if not key:
|
if not key:
|
||||||
return True
|
return True
|
||||||
continue
|
continue
|
||||||
if fnmatch(key, key_pattern):
|
# Performance: Use pre-compiled regex instead of fnmatch
|
||||||
|
if compiled_pattern.match(key):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -174,8 +192,16 @@ class BucketPolicyStore:
|
|||||||
self._policies: Dict[str, List[BucketPolicyStatement]] = {}
|
self._policies: Dict[str, List[BucketPolicyStatement]] = {}
|
||||||
self._load()
|
self._load()
|
||||||
self._last_mtime = self._current_mtime()
|
self._last_mtime = self._current_mtime()
|
||||||
|
# Performance: Avoid stat() on every request
|
||||||
|
self._last_stat_check = 0.0
|
||||||
|
self._stat_check_interval = 1.0 # Only check mtime every 1 second
|
||||||
|
|
||||||
def maybe_reload(self) -> None:
|
def maybe_reload(self) -> None:
|
||||||
|
# Performance: Skip stat check if we checked recently
|
||||||
|
now = time.time()
|
||||||
|
if now - self._last_stat_check < self._stat_check_interval:
|
||||||
|
return
|
||||||
|
self._last_stat_check = now
|
||||||
current = self._current_mtime()
|
current = self._current_mtime()
|
||||||
if current is None or current == self._last_mtime:
|
if current is None or current == self._last_mtime:
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -90,6 +90,8 @@ class EncryptedObjectStorage:
|
|||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
ObjectMeta with object information
|
ObjectMeta with object information
|
||||||
|
|
||||||
|
Performance: Uses streaming encryption for large files to reduce memory usage.
|
||||||
"""
|
"""
|
||||||
should_encrypt, algorithm, detected_kms_key = self._should_encrypt(
|
should_encrypt, algorithm, detected_kms_key = self._should_encrypt(
|
||||||
bucket_name, server_side_encryption
|
bucket_name, server_side_encryption
|
||||||
@@ -99,20 +101,17 @@ class EncryptedObjectStorage:
|
|||||||
kms_key_id = detected_kms_key
|
kms_key_id = detected_kms_key
|
||||||
|
|
||||||
if should_encrypt:
|
if should_encrypt:
|
||||||
data = stream.read()
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ciphertext, enc_metadata = self.encryption.encrypt_object(
|
# Performance: Use streaming encryption to avoid loading entire file into memory
|
||||||
data,
|
encrypted_stream, enc_metadata = self.encryption.encrypt_stream(
|
||||||
|
stream,
|
||||||
algorithm=algorithm,
|
algorithm=algorithm,
|
||||||
kms_key_id=kms_key_id,
|
|
||||||
context={"bucket": bucket_name, "key": object_key},
|
context={"bucket": bucket_name, "key": object_key},
|
||||||
)
|
)
|
||||||
|
|
||||||
combined_metadata = metadata.copy() if metadata else {}
|
combined_metadata = metadata.copy() if metadata else {}
|
||||||
combined_metadata.update(enc_metadata.to_dict())
|
combined_metadata.update(enc_metadata.to_dict())
|
||||||
|
|
||||||
encrypted_stream = io.BytesIO(ciphertext)
|
|
||||||
result = self.storage.put_object(
|
result = self.storage.put_object(
|
||||||
bucket_name,
|
bucket_name,
|
||||||
object_key,
|
object_key,
|
||||||
@@ -138,23 +137,24 @@ class EncryptedObjectStorage:
|
|||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (data, metadata)
|
Tuple of (data, metadata)
|
||||||
|
|
||||||
|
Performance: Uses streaming decryption to reduce memory usage.
|
||||||
"""
|
"""
|
||||||
path = self.storage.get_object_path(bucket_name, object_key)
|
path = self.storage.get_object_path(bucket_name, object_key)
|
||||||
metadata = self.storage.get_object_metadata(bucket_name, object_key)
|
metadata = self.storage.get_object_metadata(bucket_name, object_key)
|
||||||
|
|
||||||
with path.open("rb") as f:
|
|
||||||
data = f.read()
|
|
||||||
|
|
||||||
enc_metadata = EncryptionMetadata.from_dict(metadata)
|
enc_metadata = EncryptionMetadata.from_dict(metadata)
|
||||||
if enc_metadata:
|
if enc_metadata:
|
||||||
try:
|
try:
|
||||||
data = self.encryption.decrypt_object(
|
# Performance: Use streaming decryption to avoid loading entire file into memory
|
||||||
data,
|
with path.open("rb") as f:
|
||||||
enc_metadata,
|
decrypted_stream = self.encryption.decrypt_stream(f, enc_metadata)
|
||||||
context={"bucket": bucket_name, "key": object_key},
|
data = decrypted_stream.read()
|
||||||
)
|
|
||||||
except EncryptionError as exc:
|
except EncryptionError as exc:
|
||||||
raise StorageError(f"Decryption failed: {exc}") from exc
|
raise StorageError(f"Decryption failed: {exc}") from exc
|
||||||
|
else:
|
||||||
|
with path.open("rb") as f:
|
||||||
|
data = f.read()
|
||||||
|
|
||||||
clean_metadata = {
|
clean_metadata = {
|
||||||
k: v for k, v in metadata.items()
|
k: v for k, v in metadata.items()
|
||||||
|
|||||||
@@ -183,21 +183,26 @@ class StreamingEncryptor:
|
|||||||
self.chunk_size = chunk_size
|
self.chunk_size = chunk_size
|
||||||
|
|
||||||
def _derive_chunk_nonce(self, base_nonce: bytes, chunk_index: int) -> bytes:
|
def _derive_chunk_nonce(self, base_nonce: bytes, chunk_index: int) -> bytes:
|
||||||
"""Derive a unique nonce for each chunk."""
|
"""Derive a unique nonce for each chunk.
|
||||||
# XOR the base nonce with the chunk index
|
|
||||||
nonce_int = int.from_bytes(base_nonce, "big")
|
Performance: Use direct byte manipulation instead of full int conversion.
|
||||||
derived = nonce_int ^ chunk_index
|
"""
|
||||||
return derived.to_bytes(12, "big")
|
# Performance: Only modify last 4 bytes instead of full 12-byte conversion
|
||||||
|
return base_nonce[:8] + (chunk_index ^ int.from_bytes(base_nonce[8:], "big")).to_bytes(4, "big")
|
||||||
|
|
||||||
def encrypt_stream(self, stream: BinaryIO,
|
def encrypt_stream(self, stream: BinaryIO,
|
||||||
context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
|
context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
|
||||||
"""Encrypt a stream and return encrypted stream + metadata."""
|
"""Encrypt a stream and return encrypted stream + metadata.
|
||||||
|
|
||||||
|
Performance: Writes chunks directly to output buffer instead of accumulating in list.
|
||||||
|
"""
|
||||||
data_key, encrypted_data_key = self.provider.generate_data_key()
|
data_key, encrypted_data_key = self.provider.generate_data_key()
|
||||||
base_nonce = secrets.token_bytes(12)
|
base_nonce = secrets.token_bytes(12)
|
||||||
|
|
||||||
aesgcm = AESGCM(data_key)
|
aesgcm = AESGCM(data_key)
|
||||||
encrypted_chunks = []
|
# Performance: Write directly to BytesIO instead of accumulating chunks
|
||||||
|
output = io.BytesIO()
|
||||||
|
output.write(b"\x00\x00\x00\x00") # Placeholder for chunk count
|
||||||
chunk_index = 0
|
chunk_index = 0
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
@@ -208,12 +213,15 @@ class StreamingEncryptor:
|
|||||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||||
encrypted_chunk = aesgcm.encrypt(chunk_nonce, chunk, None)
|
encrypted_chunk = aesgcm.encrypt(chunk_nonce, chunk, None)
|
||||||
|
|
||||||
size_prefix = len(encrypted_chunk).to_bytes(self.HEADER_SIZE, "big")
|
# Write size prefix + encrypted chunk directly
|
||||||
encrypted_chunks.append(size_prefix + encrypted_chunk)
|
output.write(len(encrypted_chunk).to_bytes(self.HEADER_SIZE, "big"))
|
||||||
|
output.write(encrypted_chunk)
|
||||||
chunk_index += 1
|
chunk_index += 1
|
||||||
|
|
||||||
header = chunk_index.to_bytes(4, "big")
|
# Write actual chunk count to header
|
||||||
encrypted_data = header + b"".join(encrypted_chunks)
|
output.seek(0)
|
||||||
|
output.write(chunk_index.to_bytes(4, "big"))
|
||||||
|
output.seek(0)
|
||||||
|
|
||||||
metadata = EncryptionMetadata(
|
metadata = EncryptionMetadata(
|
||||||
algorithm="AES256",
|
algorithm="AES256",
|
||||||
@@ -222,10 +230,13 @@ class StreamingEncryptor:
|
|||||||
encrypted_data_key=encrypted_data_key,
|
encrypted_data_key=encrypted_data_key,
|
||||||
)
|
)
|
||||||
|
|
||||||
return io.BytesIO(encrypted_data), metadata
|
return output, metadata
|
||||||
|
|
||||||
def decrypt_stream(self, stream: BinaryIO, metadata: EncryptionMetadata) -> BinaryIO:
|
def decrypt_stream(self, stream: BinaryIO, metadata: EncryptionMetadata) -> BinaryIO:
|
||||||
"""Decrypt a stream using the provided metadata."""
|
"""Decrypt a stream using the provided metadata.
|
||||||
|
|
||||||
|
Performance: Writes chunks directly to output buffer instead of accumulating in list.
|
||||||
|
"""
|
||||||
if isinstance(self.provider, LocalKeyEncryption):
|
if isinstance(self.provider, LocalKeyEncryption):
|
||||||
data_key = self.provider._decrypt_data_key(metadata.encrypted_data_key)
|
data_key = self.provider._decrypt_data_key(metadata.encrypted_data_key)
|
||||||
else:
|
else:
|
||||||
@@ -239,7 +250,8 @@ class StreamingEncryptor:
|
|||||||
raise EncryptionError("Invalid encrypted stream: missing header")
|
raise EncryptionError("Invalid encrypted stream: missing header")
|
||||||
chunk_count = int.from_bytes(chunk_count_bytes, "big")
|
chunk_count = int.from_bytes(chunk_count_bytes, "big")
|
||||||
|
|
||||||
decrypted_chunks = []
|
# Performance: Write directly to BytesIO instead of accumulating chunks
|
||||||
|
output = io.BytesIO()
|
||||||
for chunk_index in range(chunk_count):
|
for chunk_index in range(chunk_count):
|
||||||
size_bytes = stream.read(self.HEADER_SIZE)
|
size_bytes = stream.read(self.HEADER_SIZE)
|
||||||
if len(size_bytes) < self.HEADER_SIZE:
|
if len(size_bytes) < self.HEADER_SIZE:
|
||||||
@@ -253,11 +265,12 @@ class StreamingEncryptor:
|
|||||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||||
try:
|
try:
|
||||||
decrypted_chunk = aesgcm.decrypt(chunk_nonce, encrypted_chunk, None)
|
decrypted_chunk = aesgcm.decrypt(chunk_nonce, encrypted_chunk, None)
|
||||||
decrypted_chunks.append(decrypted_chunk)
|
output.write(decrypted_chunk) # Write directly instead of appending to list
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise EncryptionError(f"Failed to decrypt chunk {chunk_index}: {exc}") from exc
|
raise EncryptionError(f"Failed to decrypt chunk {chunk_index}: {exc}") from exc
|
||||||
|
|
||||||
return io.BytesIO(b"".join(decrypted_chunks))
|
output.seek(0)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
class EncryptionManager:
|
class EncryptionManager:
|
||||||
|
|||||||
65
app/iam.py
65
app/iam.py
@@ -4,11 +4,12 @@ from __future__ import annotations
|
|||||||
import json
|
import json
|
||||||
import math
|
import math
|
||||||
import secrets
|
import secrets
|
||||||
|
import time
|
||||||
from collections import deque
|
from collections import deque
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Deque, Dict, Iterable, List, Optional, Sequence, Set
|
from typing import Any, Deque, Dict, Iterable, List, Optional, Sequence, Set, Tuple
|
||||||
|
|
||||||
|
|
||||||
class IamError(RuntimeError):
|
class IamError(RuntimeError):
|
||||||
@@ -115,13 +116,24 @@ class IamService:
|
|||||||
self._raw_config: Dict[str, Any] = {}
|
self._raw_config: Dict[str, Any] = {}
|
||||||
self._failed_attempts: Dict[str, Deque[datetime]] = {}
|
self._failed_attempts: Dict[str, Deque[datetime]] = {}
|
||||||
self._last_load_time = 0.0
|
self._last_load_time = 0.0
|
||||||
|
# Performance: credential cache with TTL
|
||||||
|
self._credential_cache: Dict[str, Tuple[str, Principal, float]] = {}
|
||||||
|
self._cache_ttl = 60.0 # Cache credentials for 60 seconds
|
||||||
|
self._last_stat_check = 0.0
|
||||||
|
self._stat_check_interval = 1.0 # Only stat() file every 1 second
|
||||||
self._load()
|
self._load()
|
||||||
|
|
||||||
def _maybe_reload(self) -> None:
|
def _maybe_reload(self) -> None:
|
||||||
"""Reload configuration if the file has changed on disk."""
|
"""Reload configuration if the file has changed on disk."""
|
||||||
|
# Performance: Skip stat check if we checked recently
|
||||||
|
now = time.time()
|
||||||
|
if now - self._last_stat_check < self._stat_check_interval:
|
||||||
|
return
|
||||||
|
self._last_stat_check = now
|
||||||
try:
|
try:
|
||||||
if self.config_path.stat().st_mtime > self._last_load_time:
|
if self.config_path.stat().st_mtime > self._last_load_time:
|
||||||
self._load()
|
self._load()
|
||||||
|
self._credential_cache.clear() # Invalidate cache on reload
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -181,17 +193,37 @@ class IamService:
|
|||||||
return int(max(0, self.auth_lockout_window.total_seconds() - elapsed))
|
return int(max(0, self.auth_lockout_window.total_seconds() - elapsed))
|
||||||
|
|
||||||
def principal_for_key(self, access_key: str) -> Principal:
|
def principal_for_key(self, access_key: str) -> Principal:
|
||||||
|
# Performance: Check cache first
|
||||||
|
now = time.time()
|
||||||
|
cached = self._credential_cache.get(access_key)
|
||||||
|
if cached:
|
||||||
|
secret, principal, cached_time = cached
|
||||||
|
if now - cached_time < self._cache_ttl:
|
||||||
|
return principal
|
||||||
|
|
||||||
self._maybe_reload()
|
self._maybe_reload()
|
||||||
record = self._users.get(access_key)
|
record = self._users.get(access_key)
|
||||||
if not record:
|
if not record:
|
||||||
raise IamError("Unknown access key")
|
raise IamError("Unknown access key")
|
||||||
return self._build_principal(access_key, record)
|
principal = self._build_principal(access_key, record)
|
||||||
|
self._credential_cache[access_key] = (record["secret_key"], principal, now)
|
||||||
|
return principal
|
||||||
|
|
||||||
def secret_for_key(self, access_key: str) -> str:
|
def secret_for_key(self, access_key: str) -> str:
|
||||||
|
# Performance: Check cache first
|
||||||
|
now = time.time()
|
||||||
|
cached = self._credential_cache.get(access_key)
|
||||||
|
if cached:
|
||||||
|
secret, principal, cached_time = cached
|
||||||
|
if now - cached_time < self._cache_ttl:
|
||||||
|
return secret
|
||||||
|
|
||||||
self._maybe_reload()
|
self._maybe_reload()
|
||||||
record = self._users.get(access_key)
|
record = self._users.get(access_key)
|
||||||
if not record:
|
if not record:
|
||||||
raise IamError("Unknown access key")
|
raise IamError("Unknown access key")
|
||||||
|
principal = self._build_principal(access_key, record)
|
||||||
|
self._credential_cache[access_key] = (record["secret_key"], principal, now)
|
||||||
return record["secret_key"]
|
return record["secret_key"]
|
||||||
|
|
||||||
def authorize(self, principal: Principal, bucket_name: str | None, action: str) -> None:
|
def authorize(self, principal: Principal, bucket_name: str | None, action: str) -> None:
|
||||||
@@ -442,11 +474,36 @@ class IamService:
|
|||||||
raise IamError("User not found")
|
raise IamError("User not found")
|
||||||
|
|
||||||
def get_secret_key(self, access_key: str) -> str | None:
|
def get_secret_key(self, access_key: str) -> str | None:
|
||||||
|
# Performance: Check cache first
|
||||||
|
now = time.time()
|
||||||
|
cached = self._credential_cache.get(access_key)
|
||||||
|
if cached:
|
||||||
|
secret, principal, cached_time = cached
|
||||||
|
if now - cached_time < self._cache_ttl:
|
||||||
|
return secret
|
||||||
|
|
||||||
self._maybe_reload()
|
self._maybe_reload()
|
||||||
record = self._users.get(access_key)
|
record = self._users.get(access_key)
|
||||||
return record["secret_key"] if record else None
|
if record:
|
||||||
|
# Cache the result
|
||||||
|
principal = self._build_principal(access_key, record)
|
||||||
|
self._credential_cache[access_key] = (record["secret_key"], principal, now)
|
||||||
|
return record["secret_key"]
|
||||||
|
return None
|
||||||
|
|
||||||
def get_principal(self, access_key: str) -> Principal | None:
|
def get_principal(self, access_key: str) -> Principal | None:
|
||||||
|
# Performance: Check cache first
|
||||||
|
now = time.time()
|
||||||
|
cached = self._credential_cache.get(access_key)
|
||||||
|
if cached:
|
||||||
|
secret, principal, cached_time = cached
|
||||||
|
if now - cached_time < self._cache_ttl:
|
||||||
|
return principal
|
||||||
|
|
||||||
self._maybe_reload()
|
self._maybe_reload()
|
||||||
record = self._users.get(access_key)
|
record = self._users.get(access_key)
|
||||||
return self._build_principal(access_key, record) if record else None
|
if record:
|
||||||
|
principal = self._build_principal(access_key, record)
|
||||||
|
self._credential_cache[access_key] = (record["secret_key"], principal, now)
|
||||||
|
return principal
|
||||||
|
return None
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import time
|
|||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, Optional
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
import boto3
|
import boto3
|
||||||
from botocore.config import Config
|
from botocore.config import Config
|
||||||
@@ -24,11 +24,42 @@ logger = logging.getLogger(__name__)
|
|||||||
REPLICATION_USER_AGENT = "S3ReplicationAgent/1.0"
|
REPLICATION_USER_AGENT = "S3ReplicationAgent/1.0"
|
||||||
REPLICATION_CONNECT_TIMEOUT = 5
|
REPLICATION_CONNECT_TIMEOUT = 5
|
||||||
REPLICATION_READ_TIMEOUT = 30
|
REPLICATION_READ_TIMEOUT = 30
|
||||||
|
STREAMING_THRESHOLD_BYTES = 10 * 1024 * 1024 # 10 MiB - use streaming for larger files
|
||||||
|
|
||||||
REPLICATION_MODE_NEW_ONLY = "new_only"
|
REPLICATION_MODE_NEW_ONLY = "new_only"
|
||||||
REPLICATION_MODE_ALL = "all"
|
REPLICATION_MODE_ALL = "all"
|
||||||
|
|
||||||
|
|
||||||
|
def _create_s3_client(connection: RemoteConnection, *, health_check: bool = False) -> Any:
|
||||||
|
"""Create a boto3 S3 client for the given connection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connection: Remote S3 connection configuration
|
||||||
|
health_check: If True, use minimal retries for quick health checks
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Configured boto3 S3 client
|
||||||
|
"""
|
||||||
|
config = Config(
|
||||||
|
user_agent_extra=REPLICATION_USER_AGENT,
|
||||||
|
connect_timeout=REPLICATION_CONNECT_TIMEOUT,
|
||||||
|
read_timeout=REPLICATION_READ_TIMEOUT,
|
||||||
|
retries={'max_attempts': 1 if health_check else 2},
|
||||||
|
signature_version='s3v4',
|
||||||
|
s3={'addressing_style': 'path'},
|
||||||
|
request_checksum_calculation='when_required',
|
||||||
|
response_checksum_validation='when_required',
|
||||||
|
)
|
||||||
|
return boto3.client(
|
||||||
|
"s3",
|
||||||
|
endpoint_url=connection.endpoint_url,
|
||||||
|
aws_access_key_id=connection.access_key,
|
||||||
|
aws_secret_access_key=connection.secret_key,
|
||||||
|
region_name=connection.region or 'us-east-1',
|
||||||
|
config=config,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ReplicationStats:
|
class ReplicationStats:
|
||||||
"""Statistics for replication operations - computed dynamically."""
|
"""Statistics for replication operations - computed dynamically."""
|
||||||
@@ -102,8 +133,19 @@ class ReplicationManager:
|
|||||||
self._rules: Dict[str, ReplicationRule] = {}
|
self._rules: Dict[str, ReplicationRule] = {}
|
||||||
self._stats_lock = threading.Lock()
|
self._stats_lock = threading.Lock()
|
||||||
self._executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="ReplicationWorker")
|
self._executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="ReplicationWorker")
|
||||||
|
self._shutdown = False
|
||||||
self.reload_rules()
|
self.reload_rules()
|
||||||
|
|
||||||
|
def shutdown(self, wait: bool = True) -> None:
|
||||||
|
"""Shutdown the replication executor gracefully.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
wait: If True, wait for pending tasks to complete
|
||||||
|
"""
|
||||||
|
self._shutdown = True
|
||||||
|
self._executor.shutdown(wait=wait)
|
||||||
|
logger.info("Replication manager shut down")
|
||||||
|
|
||||||
def reload_rules(self) -> None:
|
def reload_rules(self) -> None:
|
||||||
if not self.rules_path.exists():
|
if not self.rules_path.exists():
|
||||||
self._rules = {}
|
self._rules = {}
|
||||||
@@ -129,20 +171,7 @@ class ReplicationManager:
|
|||||||
Uses short timeouts to prevent blocking.
|
Uses short timeouts to prevent blocking.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
config = Config(
|
s3 = _create_s3_client(connection, health_check=True)
|
||||||
user_agent_extra=REPLICATION_USER_AGENT,
|
|
||||||
connect_timeout=REPLICATION_CONNECT_TIMEOUT,
|
|
||||||
read_timeout=REPLICATION_READ_TIMEOUT,
|
|
||||||
retries={'max_attempts': 1}
|
|
||||||
)
|
|
||||||
s3 = boto3.client(
|
|
||||||
"s3",
|
|
||||||
endpoint_url=connection.endpoint_url,
|
|
||||||
aws_access_key_id=connection.access_key,
|
|
||||||
aws_secret_access_key=connection.secret_key,
|
|
||||||
region_name=connection.region,
|
|
||||||
config=config,
|
|
||||||
)
|
|
||||||
s3.list_buckets()
|
s3.list_buckets()
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -185,13 +214,7 @@ class ReplicationManager:
|
|||||||
source_objects = self.storage.list_objects_all(bucket_name)
|
source_objects = self.storage.list_objects_all(bucket_name)
|
||||||
source_keys = {obj.key: obj.size for obj in source_objects}
|
source_keys = {obj.key: obj.size for obj in source_objects}
|
||||||
|
|
||||||
s3 = boto3.client(
|
s3 = _create_s3_client(connection)
|
||||||
"s3",
|
|
||||||
endpoint_url=connection.endpoint_url,
|
|
||||||
aws_access_key_id=connection.access_key,
|
|
||||||
aws_secret_access_key=connection.secret_key,
|
|
||||||
region_name=connection.region,
|
|
||||||
)
|
|
||||||
|
|
||||||
dest_keys = set()
|
dest_keys = set()
|
||||||
bytes_synced = 0
|
bytes_synced = 0
|
||||||
@@ -257,13 +280,7 @@ class ReplicationManager:
|
|||||||
raise ValueError(f"Connection {connection_id} not found")
|
raise ValueError(f"Connection {connection_id} not found")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
s3 = boto3.client(
|
s3 = _create_s3_client(connection)
|
||||||
"s3",
|
|
||||||
endpoint_url=connection.endpoint_url,
|
|
||||||
aws_access_key_id=connection.access_key,
|
|
||||||
aws_secret_access_key=connection.secret_key,
|
|
||||||
region_name=connection.region,
|
|
||||||
)
|
|
||||||
s3.create_bucket(Bucket=bucket_name)
|
s3.create_bucket(Bucket=bucket_name)
|
||||||
except ClientError as e:
|
except ClientError as e:
|
||||||
logger.error(f"Failed to create remote bucket {bucket_name}: {e}")
|
logger.error(f"Failed to create remote bucket {bucket_name}: {e}")
|
||||||
@@ -286,6 +303,15 @@ class ReplicationManager:
|
|||||||
self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, action)
|
self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, action)
|
||||||
|
|
||||||
def _replicate_task(self, bucket_name: str, object_key: str, rule: ReplicationRule, conn: RemoteConnection, action: str) -> None:
|
def _replicate_task(self, bucket_name: str, object_key: str, rule: ReplicationRule, conn: RemoteConnection, action: str) -> None:
|
||||||
|
if self._shutdown:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Re-check if rule is still enabled (may have been paused after task was submitted)
|
||||||
|
current_rule = self.get_rule(bucket_name)
|
||||||
|
if not current_rule or not current_rule.enabled:
|
||||||
|
logger.debug(f"Replication skipped for {bucket_name}/{object_key}: rule disabled or removed")
|
||||||
|
return
|
||||||
|
|
||||||
if ".." in object_key or object_key.startswith("/") or object_key.startswith("\\"):
|
if ".." in object_key or object_key.startswith("/") or object_key.startswith("\\"):
|
||||||
logger.error(f"Invalid object key in replication (path traversal attempt): {object_key}")
|
logger.error(f"Invalid object key in replication (path traversal attempt): {object_key}")
|
||||||
return
|
return
|
||||||
@@ -297,30 +323,8 @@ class ReplicationManager:
|
|||||||
logger.error(f"Object key validation failed in replication: {e}")
|
logger.error(f"Object key validation failed in replication: {e}")
|
||||||
return
|
return
|
||||||
|
|
||||||
file_size = 0
|
|
||||||
try:
|
try:
|
||||||
config = Config(
|
s3 = _create_s3_client(conn)
|
||||||
user_agent_extra=REPLICATION_USER_AGENT,
|
|
||||||
connect_timeout=REPLICATION_CONNECT_TIMEOUT,
|
|
||||||
read_timeout=REPLICATION_READ_TIMEOUT,
|
|
||||||
retries={'max_attempts': 2},
|
|
||||||
signature_version='s3v4',
|
|
||||||
s3={
|
|
||||||
'addressing_style': 'path',
|
|
||||||
},
|
|
||||||
# Disable SDK automatic checksums - they cause SignatureDoesNotMatch errors
|
|
||||||
# with S3-compatible servers that don't support CRC32 checksum headers
|
|
||||||
request_checksum_calculation='when_required',
|
|
||||||
response_checksum_validation='when_required',
|
|
||||||
)
|
|
||||||
s3 = boto3.client(
|
|
||||||
"s3",
|
|
||||||
endpoint_url=conn.endpoint_url,
|
|
||||||
aws_access_key_id=conn.access_key,
|
|
||||||
aws_secret_access_key=conn.secret_key,
|
|
||||||
region_name=conn.region or 'us-east-1',
|
|
||||||
config=config,
|
|
||||||
)
|
|
||||||
|
|
||||||
if action == "delete":
|
if action == "delete":
|
||||||
try:
|
try:
|
||||||
@@ -337,34 +341,42 @@ class ReplicationManager:
|
|||||||
logger.error(f"Source object not found: {bucket_name}/{object_key}")
|
logger.error(f"Source object not found: {bucket_name}/{object_key}")
|
||||||
return
|
return
|
||||||
|
|
||||||
# Don't replicate metadata - destination server will generate its own
|
|
||||||
# __etag__ and __size__. Replicating them causes signature mismatches when they have None/empty values.
|
|
||||||
|
|
||||||
content_type, _ = mimetypes.guess_type(path)
|
content_type, _ = mimetypes.guess_type(path)
|
||||||
file_size = path.stat().st_size
|
file_size = path.stat().st_size
|
||||||
|
|
||||||
logger.info(f"Replicating {bucket_name}/{object_key}: Size={file_size}, ContentType={content_type}")
|
logger.info(f"Replicating {bucket_name}/{object_key}: Size={file_size}, ContentType={content_type}")
|
||||||
|
|
||||||
def do_put_object() -> None:
|
def do_upload() -> None:
|
||||||
"""Helper to upload object.
|
"""Upload object using appropriate method based on file size.
|
||||||
|
|
||||||
Reads the file content into memory first to avoid signature calculation
|
For small files (< 10 MiB): Read into memory for simpler handling
|
||||||
issues with certain binary file types (like GIFs) when streaming.
|
For large files: Use streaming upload to avoid memory issues
|
||||||
Do NOT set ContentLength explicitly - boto3 calculates it from the bytes
|
|
||||||
and setting it manually can cause SignatureDoesNotMatch errors.
|
|
||||||
"""
|
"""
|
||||||
|
extra_args = {}
|
||||||
|
if content_type:
|
||||||
|
extra_args["ContentType"] = content_type
|
||||||
|
|
||||||
|
if file_size >= STREAMING_THRESHOLD_BYTES:
|
||||||
|
# Use multipart upload for large files
|
||||||
|
s3.upload_file(
|
||||||
|
str(path),
|
||||||
|
rule.target_bucket,
|
||||||
|
object_key,
|
||||||
|
ExtraArgs=extra_args if extra_args else None,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Read small files into memory
|
||||||
file_content = path.read_bytes()
|
file_content = path.read_bytes()
|
||||||
put_kwargs = {
|
put_kwargs = {
|
||||||
"Bucket": rule.target_bucket,
|
"Bucket": rule.target_bucket,
|
||||||
"Key": object_key,
|
"Key": object_key,
|
||||||
"Body": file_content,
|
"Body": file_content,
|
||||||
|
**extra_args,
|
||||||
}
|
}
|
||||||
if content_type:
|
|
||||||
put_kwargs["ContentType"] = content_type
|
|
||||||
s3.put_object(**put_kwargs)
|
s3.put_object(**put_kwargs)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
do_put_object()
|
do_upload()
|
||||||
except (ClientError, S3UploadFailedError) as e:
|
except (ClientError, S3UploadFailedError) as e:
|
||||||
error_code = None
|
error_code = None
|
||||||
if isinstance(e, ClientError):
|
if isinstance(e, ClientError):
|
||||||
@@ -389,7 +401,7 @@ class ReplicationManager:
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
if bucket_ready:
|
if bucket_ready:
|
||||||
do_put_object()
|
do_upload()
|
||||||
else:
|
else:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|||||||
177
app/s3_api.py
177
app/s3_api.py
@@ -1,13 +1,15 @@
|
|||||||
"""Flask blueprint exposing a subset of the S3 REST API."""
|
"""Flask blueprint exposing a subset of the S3 REST API."""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import base64
|
||||||
import hashlib
|
import hashlib
|
||||||
import hmac
|
import hmac
|
||||||
|
import logging
|
||||||
import mimetypes
|
import mimetypes
|
||||||
import re
|
import re
|
||||||
import uuid
|
import uuid
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict, Optional
|
||||||
from urllib.parse import quote, urlencode, urlparse, unquote
|
from urllib.parse import quote, urlencode, urlparse, unquote
|
||||||
from xml.etree.ElementTree import Element, SubElement, tostring, fromstring, ParseError
|
from xml.etree.ElementTree import Element, SubElement, tostring, fromstring, ParseError
|
||||||
|
|
||||||
@@ -20,6 +22,8 @@ from .iam import IamError, Principal
|
|||||||
from .replication import ReplicationManager
|
from .replication import ReplicationManager
|
||||||
from .storage import ObjectStorage, StorageError, QuotaExceededError
|
from .storage import ObjectStorage, StorageError, QuotaExceededError
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
s3_api_bp = Blueprint("s3_api", __name__)
|
s3_api_bp = Blueprint("s3_api", __name__)
|
||||||
|
|
||||||
def _storage() -> ObjectStorage:
|
def _storage() -> ObjectStorage:
|
||||||
@@ -118,6 +122,9 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
|||||||
if header_val is None:
|
if header_val is None:
|
||||||
header_val = ""
|
header_val = ""
|
||||||
|
|
||||||
|
if header.lower() == 'expect' and header_val == "":
|
||||||
|
header_val = "100-continue"
|
||||||
|
|
||||||
header_val = " ".join(header_val.split())
|
header_val = " ".join(header_val.split())
|
||||||
canonical_headers_parts.append(f"{header.lower()}:{header_val}\n")
|
canonical_headers_parts.append(f"{header.lower()}:{header_val}\n")
|
||||||
canonical_headers = "".join(canonical_headers_parts)
|
canonical_headers = "".join(canonical_headers_parts)
|
||||||
@@ -128,15 +135,6 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
|||||||
|
|
||||||
canonical_request = f"{method}\n{canonical_uri}\n{canonical_query_string}\n{canonical_headers}\n{signed_headers_str}\n{payload_hash}"
|
canonical_request = f"{method}\n{canonical_uri}\n{canonical_query_string}\n{canonical_headers}\n{signed_headers_str}\n{payload_hash}"
|
||||||
|
|
||||||
# Debug logging for signature issues
|
|
||||||
import logging
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
logger.debug(f"SigV4 Debug - Method: {method}, URI: {canonical_uri}")
|
|
||||||
logger.debug(f"SigV4 Debug - Payload hash from header: {req.headers.get('X-Amz-Content-Sha256')}")
|
|
||||||
logger.debug(f"SigV4 Debug - Signed headers: {signed_headers_str}")
|
|
||||||
logger.debug(f"SigV4 Debug - Content-Type: {req.headers.get('Content-Type')}")
|
|
||||||
logger.debug(f"SigV4 Debug - Content-Length: {req.headers.get('Content-Length')}")
|
|
||||||
|
|
||||||
amz_date = req.headers.get("X-Amz-Date") or req.headers.get("Date")
|
amz_date = req.headers.get("X-Amz-Date") or req.headers.get("Date")
|
||||||
if not amz_date:
|
if not amz_date:
|
||||||
raise IamError("Missing Date header")
|
raise IamError("Missing Date header")
|
||||||
@@ -167,24 +165,18 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
|||||||
calculated_signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
|
calculated_signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
|
||||||
|
|
||||||
if not hmac.compare_digest(calculated_signature, signature):
|
if not hmac.compare_digest(calculated_signature, signature):
|
||||||
# Debug logging for signature mismatch
|
# Only log detailed signature debug info if DEBUG_SIGV4 is enabled
|
||||||
import logging
|
if current_app.config.get("DEBUG_SIGV4"):
|
||||||
logger = logging.getLogger(__name__)
|
logger.warning(
|
||||||
logger.error(f"Signature mismatch for {req.path}")
|
"SigV4 signature mismatch",
|
||||||
logger.error(f" Content-Type: {req.headers.get('Content-Type')}")
|
extra={
|
||||||
logger.error(f" Content-Length: {req.headers.get('Content-Length')}")
|
"path": req.path,
|
||||||
logger.error(f" X-Amz-Content-Sha256: {req.headers.get('X-Amz-Content-Sha256')}")
|
"method": method,
|
||||||
logger.error(f" Canonical URI: {canonical_uri}")
|
"signed_headers": signed_headers_str,
|
||||||
logger.error(f" Signed headers: {signed_headers_str}")
|
"content_type": req.headers.get("Content-Type"),
|
||||||
# Log each signed header's value
|
"content_length": req.headers.get("Content-Length"),
|
||||||
for h in signed_headers_list:
|
}
|
||||||
logger.error(f" Header '{h}': {repr(req.headers.get(h))}")
|
)
|
||||||
logger.error(f" Expected sig: {signature[:16]}...")
|
|
||||||
logger.error(f" Calculated sig: {calculated_signature[:16]}...")
|
|
||||||
# Log first part of canonical request to compare
|
|
||||||
logger.error(f" Canonical request hash: {hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()[:16]}...")
|
|
||||||
# Log the full canonical request for debugging
|
|
||||||
logger.error(f" Canonical request:\n{canonical_request[:500]}...")
|
|
||||||
raise IamError("SignatureDoesNotMatch")
|
raise IamError("SignatureDoesNotMatch")
|
||||||
|
|
||||||
return _iam().get_principal(access_key)
|
return _iam().get_principal(access_key)
|
||||||
@@ -236,6 +228,8 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
|
|||||||
canonical_headers_parts = []
|
canonical_headers_parts = []
|
||||||
for header in signed_headers_list:
|
for header in signed_headers_list:
|
||||||
val = req.headers.get(header, "").strip()
|
val = req.headers.get(header, "").strip()
|
||||||
|
if header.lower() == 'expect' and val == "":
|
||||||
|
val = "100-continue"
|
||||||
val = " ".join(val.split())
|
val = " ".join(val.split())
|
||||||
canonical_headers_parts.append(f"{header}:{val}\n")
|
canonical_headers_parts.append(f"{header}:{val}\n")
|
||||||
canonical_headers = "".join(canonical_headers_parts)
|
canonical_headers = "".join(canonical_headers_parts)
|
||||||
@@ -569,6 +563,28 @@ def _strip_ns(tag: str | None) -> str:
|
|||||||
return tag.split("}")[-1]
|
return tag.split("}")[-1]
|
||||||
|
|
||||||
|
|
||||||
|
def _find_element(parent: Element, name: str) -> Optional[Element]:
|
||||||
|
"""Find a child element by name, trying both namespaced and non-namespaced variants.
|
||||||
|
|
||||||
|
This handles XML documents that may or may not include namespace prefixes.
|
||||||
|
"""
|
||||||
|
el = parent.find(f"{{*}}{name}")
|
||||||
|
if el is None:
|
||||||
|
el = parent.find(name)
|
||||||
|
return el
|
||||||
|
|
||||||
|
|
||||||
|
def _find_element_text(parent: Element, name: str, default: str = "") -> str:
|
||||||
|
"""Find a child element and return its text content.
|
||||||
|
|
||||||
|
Returns the default value if element not found or has no text.
|
||||||
|
"""
|
||||||
|
el = _find_element(parent, name)
|
||||||
|
if el is None or el.text is None:
|
||||||
|
return default
|
||||||
|
return el.text.strip()
|
||||||
|
|
||||||
|
|
||||||
def _parse_tagging_document(payload: bytes) -> list[dict[str, str]]:
|
def _parse_tagging_document(payload: bytes) -> list[dict[str, str]]:
|
||||||
try:
|
try:
|
||||||
root = fromstring(payload)
|
root = fromstring(payload)
|
||||||
@@ -585,17 +601,11 @@ def _parse_tagging_document(payload: bytes) -> list[dict[str, str]]:
|
|||||||
for tag_el in list(tagset):
|
for tag_el in list(tagset):
|
||||||
if _strip_ns(tag_el.tag) != "Tag":
|
if _strip_ns(tag_el.tag) != "Tag":
|
||||||
continue
|
continue
|
||||||
key_el = tag_el.find("{*}Key")
|
key = _find_element_text(tag_el, "Key")
|
||||||
if key_el is None:
|
|
||||||
key_el = tag_el.find("Key")
|
|
||||||
value_el = tag_el.find("{*}Value")
|
|
||||||
if value_el is None:
|
|
||||||
value_el = tag_el.find("Value")
|
|
||||||
key = (key_el.text or "").strip() if key_el is not None else ""
|
|
||||||
if not key:
|
if not key:
|
||||||
continue
|
continue
|
||||||
value = value_el.text if value_el is not None else ""
|
value = _find_element_text(tag_el, "Value")
|
||||||
tags.append({"Key": key, "Value": value or ""})
|
tags.append({"Key": key, "Value": value})
|
||||||
return tags
|
return tags
|
||||||
|
|
||||||
|
|
||||||
@@ -1439,7 +1449,7 @@ def _bucket_quota_handler(bucket_name: str) -> Response:
|
|||||||
|
|
||||||
if request.method == "DELETE":
|
if request.method == "DELETE":
|
||||||
try:
|
try:
|
||||||
storage.set_bucket_quota(bucket_name, max_size_bytes=None, max_objects=None)
|
storage.set_bucket_quota(bucket_name, max_bytes=None, max_objects=None)
|
||||||
except StorageError as exc:
|
except StorageError as exc:
|
||||||
return _error_response("NoSuchBucket", str(exc), 404)
|
return _error_response("NoSuchBucket", str(exc), 404)
|
||||||
current_app.logger.info("Bucket quota deleted", extra={"bucket": bucket_name})
|
current_app.logger.info("Bucket quota deleted", extra={"bucket": bucket_name})
|
||||||
@@ -1473,7 +1483,7 @@ def _bucket_quota_handler(bucket_name: str) -> Response:
|
|||||||
return _error_response("InvalidArgument", f"max_objects {exc}", 400)
|
return _error_response("InvalidArgument", f"max_objects {exc}", 400)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
storage.set_bucket_quota(bucket_name, max_size_bytes=max_size_bytes, max_objects=max_objects)
|
storage.set_bucket_quota(bucket_name, max_bytes=max_size_bytes, max_objects=max_objects)
|
||||||
except StorageError as exc:
|
except StorageError as exc:
|
||||||
return _error_response("NoSuchBucket", str(exc), 404)
|
return _error_response("NoSuchBucket", str(exc), 404)
|
||||||
|
|
||||||
@@ -1665,7 +1675,6 @@ def bucket_handler(bucket_name: str) -> Response:
|
|||||||
effective_start = ""
|
effective_start = ""
|
||||||
if list_type == "2":
|
if list_type == "2":
|
||||||
if continuation_token:
|
if continuation_token:
|
||||||
import base64
|
|
||||||
try:
|
try:
|
||||||
effective_start = base64.urlsafe_b64decode(continuation_token.encode()).decode("utf-8")
|
effective_start = base64.urlsafe_b64decode(continuation_token.encode()).decode("utf-8")
|
||||||
except Exception:
|
except Exception:
|
||||||
@@ -1722,7 +1731,6 @@ def bucket_handler(bucket_name: str) -> Response:
|
|||||||
next_marker = common_prefixes[-1].rstrip(delimiter) if delimiter else common_prefixes[-1]
|
next_marker = common_prefixes[-1].rstrip(delimiter) if delimiter else common_prefixes[-1]
|
||||||
|
|
||||||
if list_type == "2" and next_marker:
|
if list_type == "2" and next_marker:
|
||||||
import base64
|
|
||||||
next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8")
|
next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8")
|
||||||
|
|
||||||
if list_type == "2":
|
if list_type == "2":
|
||||||
@@ -2163,47 +2171,88 @@ def _copy_object(dest_bucket: str, dest_key: str, copy_source: str) -> Response:
|
|||||||
|
|
||||||
|
|
||||||
class AwsChunkedDecoder:
|
class AwsChunkedDecoder:
|
||||||
"""Decodes aws-chunked encoded streams."""
|
"""Decodes aws-chunked encoded streams.
|
||||||
|
|
||||||
|
Performance optimized with buffered line reading instead of byte-by-byte.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
self.stream = stream
|
self.stream = stream
|
||||||
self.buffer = b""
|
self._read_buffer = bytearray() # Performance: Pre-allocated buffer
|
||||||
self.chunk_remaining = 0
|
self.chunk_remaining = 0
|
||||||
self.finished = False
|
self.finished = False
|
||||||
|
|
||||||
|
def _read_line(self) -> bytes:
|
||||||
|
"""Read until CRLF using buffered reads instead of byte-by-byte.
|
||||||
|
|
||||||
|
Performance: Reads in batches of 64-256 bytes instead of 1 byte at a time.
|
||||||
|
"""
|
||||||
|
line = bytearray()
|
||||||
|
while True:
|
||||||
|
# Check if we have data in buffer
|
||||||
|
if self._read_buffer:
|
||||||
|
# Look for CRLF in buffer
|
||||||
|
idx = self._read_buffer.find(b"\r\n")
|
||||||
|
if idx != -1:
|
||||||
|
# Found CRLF - extract line and update buffer
|
||||||
|
line.extend(self._read_buffer[: idx + 2])
|
||||||
|
del self._read_buffer[: idx + 2]
|
||||||
|
return bytes(line)
|
||||||
|
# No CRLF yet - consume entire buffer
|
||||||
|
line.extend(self._read_buffer)
|
||||||
|
self._read_buffer.clear()
|
||||||
|
|
||||||
|
# Read more data in larger chunks (64 bytes is enough for chunk headers)
|
||||||
|
chunk = self.stream.read(64)
|
||||||
|
if not chunk:
|
||||||
|
return bytes(line) if line else b""
|
||||||
|
self._read_buffer.extend(chunk)
|
||||||
|
|
||||||
|
def _read_exact(self, n: int) -> bytes:
|
||||||
|
"""Read exactly n bytes, using buffer first."""
|
||||||
|
result = bytearray()
|
||||||
|
# Use buffered data first
|
||||||
|
if self._read_buffer:
|
||||||
|
take = min(len(self._read_buffer), n)
|
||||||
|
result.extend(self._read_buffer[:take])
|
||||||
|
del self._read_buffer[:take]
|
||||||
|
n -= take
|
||||||
|
|
||||||
|
# Read remaining directly from stream
|
||||||
|
if n > 0:
|
||||||
|
data = self.stream.read(n)
|
||||||
|
if data:
|
||||||
|
result.extend(data)
|
||||||
|
|
||||||
|
return bytes(result)
|
||||||
|
|
||||||
def read(self, size=-1):
|
def read(self, size=-1):
|
||||||
if self.finished:
|
if self.finished:
|
||||||
return b""
|
return b""
|
||||||
|
|
||||||
result = b""
|
result = bytearray() # Performance: Use bytearray for building result
|
||||||
while size == -1 or len(result) < size:
|
while size == -1 or len(result) < size:
|
||||||
if self.chunk_remaining > 0:
|
if self.chunk_remaining > 0:
|
||||||
to_read = self.chunk_remaining
|
to_read = self.chunk_remaining
|
||||||
if size != -1:
|
if size != -1:
|
||||||
to_read = min(to_read, size - len(result))
|
to_read = min(to_read, size - len(result))
|
||||||
|
|
||||||
chunk = self.stream.read(to_read)
|
chunk = self._read_exact(to_read)
|
||||||
if not chunk:
|
if not chunk:
|
||||||
raise IOError("Unexpected EOF in chunk data")
|
raise IOError("Unexpected EOF in chunk data")
|
||||||
|
|
||||||
result += chunk
|
result.extend(chunk)
|
||||||
self.chunk_remaining -= len(chunk)
|
self.chunk_remaining -= len(chunk)
|
||||||
|
|
||||||
if self.chunk_remaining == 0:
|
if self.chunk_remaining == 0:
|
||||||
crlf = self.stream.read(2)
|
crlf = self._read_exact(2)
|
||||||
if crlf != b"\r\n":
|
if crlf != b"\r\n":
|
||||||
raise IOError("Malformed chunk: missing CRLF")
|
raise IOError("Malformed chunk: missing CRLF")
|
||||||
else:
|
else:
|
||||||
line = b""
|
line = self._read_line()
|
||||||
while True:
|
|
||||||
char = self.stream.read(1)
|
|
||||||
if not char:
|
|
||||||
if not line:
|
if not line:
|
||||||
self.finished = True
|
self.finished = True
|
||||||
return result
|
return bytes(result)
|
||||||
raise IOError("Unexpected EOF in chunk size")
|
|
||||||
line += char
|
|
||||||
if line.endswith(b"\r\n"):
|
|
||||||
break
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
line_str = line.decode("ascii").strip()
|
line_str = line.decode("ascii").strip()
|
||||||
@@ -2215,22 +2264,16 @@ class AwsChunkedDecoder:
|
|||||||
|
|
||||||
if chunk_size == 0:
|
if chunk_size == 0:
|
||||||
self.finished = True
|
self.finished = True
|
||||||
|
# Skip trailing headers
|
||||||
while True:
|
while True:
|
||||||
line = b""
|
trailer = self._read_line()
|
||||||
while True:
|
if trailer == b"\r\n" or not trailer:
|
||||||
char = self.stream.read(1)
|
|
||||||
if not char:
|
|
||||||
break
|
break
|
||||||
line += char
|
return bytes(result)
|
||||||
if line.endswith(b"\r\n"):
|
|
||||||
break
|
|
||||||
if line == b"\r\n" or not line:
|
|
||||||
break
|
|
||||||
return result
|
|
||||||
|
|
||||||
self.chunk_remaining = chunk_size
|
self.chunk_remaining = chunk_size
|
||||||
|
|
||||||
return result
|
return bytes(result)
|
||||||
|
|
||||||
|
|
||||||
def _initiate_multipart_upload(bucket_name: str, object_key: str) -> Response:
|
def _initiate_multipart_upload(bucket_name: str, object_key: str) -> Response:
|
||||||
|
|||||||
236
app/storage.py
236
app/storage.py
@@ -7,9 +7,11 @@ import os
|
|||||||
import re
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
import stat
|
import stat
|
||||||
|
import threading
|
||||||
import time
|
import time
|
||||||
import unicodedata
|
import unicodedata
|
||||||
import uuid
|
import uuid
|
||||||
|
from collections import OrderedDict
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
@@ -129,12 +131,29 @@ class ObjectStorage:
|
|||||||
MULTIPART_MANIFEST = "manifest.json"
|
MULTIPART_MANIFEST = "manifest.json"
|
||||||
BUCKET_CONFIG_FILE = ".bucket.json"
|
BUCKET_CONFIG_FILE = ".bucket.json"
|
||||||
KEY_INDEX_CACHE_TTL = 30
|
KEY_INDEX_CACHE_TTL = 30
|
||||||
|
OBJECT_CACHE_MAX_SIZE = 100 # Maximum number of buckets to cache
|
||||||
|
|
||||||
def __init__(self, root: Path) -> None:
|
def __init__(self, root: Path) -> None:
|
||||||
self.root = Path(root)
|
self.root = Path(root)
|
||||||
self.root.mkdir(parents=True, exist_ok=True)
|
self.root.mkdir(parents=True, exist_ok=True)
|
||||||
self._ensure_system_roots()
|
self._ensure_system_roots()
|
||||||
self._object_cache: Dict[str, tuple[Dict[str, ObjectMeta], float]] = {}
|
# LRU cache for object metadata with thread-safe access
|
||||||
|
self._object_cache: OrderedDict[str, tuple[Dict[str, ObjectMeta], float]] = OrderedDict()
|
||||||
|
self._cache_lock = threading.Lock() # Global lock for cache structure
|
||||||
|
# Performance: Per-bucket locks to reduce contention
|
||||||
|
self._bucket_locks: Dict[str, threading.Lock] = {}
|
||||||
|
# Cache version counter for detecting stale reads
|
||||||
|
self._cache_version: Dict[str, int] = {}
|
||||||
|
# Performance: Bucket config cache with TTL
|
||||||
|
self._bucket_config_cache: Dict[str, tuple[dict[str, Any], float]] = {}
|
||||||
|
self._bucket_config_cache_ttl = 30.0 # 30 second TTL
|
||||||
|
|
||||||
|
def _get_bucket_lock(self, bucket_id: str) -> threading.Lock:
|
||||||
|
"""Get or create a lock for a specific bucket. Reduces global lock contention."""
|
||||||
|
with self._cache_lock:
|
||||||
|
if bucket_id not in self._bucket_locks:
|
||||||
|
self._bucket_locks[bucket_id] = threading.Lock()
|
||||||
|
return self._bucket_locks[bucket_id]
|
||||||
|
|
||||||
def list_buckets(self) -> List[BucketMeta]:
|
def list_buckets(self) -> List[BucketMeta]:
|
||||||
buckets: List[BucketMeta] = []
|
buckets: List[BucketMeta] = []
|
||||||
@@ -240,11 +259,13 @@ class ObjectStorage:
|
|||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
raise StorageError("Bucket does not exist")
|
raise StorageError("Bucket does not exist")
|
||||||
if self._has_visible_objects(bucket_path):
|
# Performance: Single check instead of three separate traversals
|
||||||
|
has_objects, has_versions, has_multipart = self._check_bucket_contents(bucket_path)
|
||||||
|
if has_objects:
|
||||||
raise StorageError("Bucket not empty")
|
raise StorageError("Bucket not empty")
|
||||||
if self._has_archived_versions(bucket_path):
|
if has_versions:
|
||||||
raise StorageError("Bucket contains archived object versions")
|
raise StorageError("Bucket contains archived object versions")
|
||||||
if self._has_active_multipart_uploads(bucket_path):
|
if has_multipart:
|
||||||
raise StorageError("Bucket has active multipart uploads")
|
raise StorageError("Bucket has active multipart uploads")
|
||||||
self._remove_tree(bucket_path)
|
self._remove_tree(bucket_path)
|
||||||
self._remove_tree(self._system_bucket_root(bucket_path.name))
|
self._remove_tree(self._system_bucket_root(bucket_path.name))
|
||||||
@@ -388,15 +409,18 @@ class ObjectStorage:
|
|||||||
self._write_metadata(bucket_id, safe_key, combined_meta)
|
self._write_metadata(bucket_id, safe_key, combined_meta)
|
||||||
|
|
||||||
self._invalidate_bucket_stats_cache(bucket_id)
|
self._invalidate_bucket_stats_cache(bucket_id)
|
||||||
self._invalidate_object_cache(bucket_id)
|
|
||||||
|
|
||||||
return ObjectMeta(
|
# Performance: Lazy update - only update the affected key instead of invalidating whole cache
|
||||||
|
obj_meta = ObjectMeta(
|
||||||
key=safe_key.as_posix(),
|
key=safe_key.as_posix(),
|
||||||
size=stat.st_size,
|
size=stat.st_size,
|
||||||
last_modified=datetime.fromtimestamp(stat.st_mtime, timezone.utc),
|
last_modified=datetime.fromtimestamp(stat.st_mtime, timezone.utc),
|
||||||
etag=etag,
|
etag=etag,
|
||||||
metadata=metadata,
|
metadata=metadata,
|
||||||
)
|
)
|
||||||
|
self._update_object_cache_entry(bucket_id, safe_key.as_posix(), obj_meta)
|
||||||
|
|
||||||
|
return obj_meta
|
||||||
|
|
||||||
def get_object_path(self, bucket_name: str, object_key: str) -> Path:
|
def get_object_path(self, bucket_name: str, object_key: str) -> Path:
|
||||||
path = self._object_path(bucket_name, object_key)
|
path = self._object_path(bucket_name, object_key)
|
||||||
@@ -444,7 +468,8 @@ class ObjectStorage:
|
|||||||
self._delete_metadata(bucket_id, rel)
|
self._delete_metadata(bucket_id, rel)
|
||||||
|
|
||||||
self._invalidate_bucket_stats_cache(bucket_id)
|
self._invalidate_bucket_stats_cache(bucket_id)
|
||||||
self._invalidate_object_cache(bucket_id)
|
# Performance: Lazy update - only remove the affected key instead of invalidating whole cache
|
||||||
|
self._update_object_cache_entry(bucket_id, safe_key.as_posix(), None)
|
||||||
self._cleanup_empty_parents(path, bucket_path)
|
self._cleanup_empty_parents(path, bucket_path)
|
||||||
|
|
||||||
def purge_object(self, bucket_name: str, object_key: str) -> None:
|
def purge_object(self, bucket_name: str, object_key: str) -> None:
|
||||||
@@ -466,7 +491,8 @@ class ObjectStorage:
|
|||||||
shutil.rmtree(legacy_version_dir, ignore_errors=True)
|
shutil.rmtree(legacy_version_dir, ignore_errors=True)
|
||||||
|
|
||||||
self._invalidate_bucket_stats_cache(bucket_id)
|
self._invalidate_bucket_stats_cache(bucket_id)
|
||||||
self._invalidate_object_cache(bucket_id)
|
# Performance: Lazy update - only remove the affected key instead of invalidating whole cache
|
||||||
|
self._update_object_cache_entry(bucket_id, rel.as_posix(), None)
|
||||||
self._cleanup_empty_parents(target, bucket_path)
|
self._cleanup_empty_parents(target, bucket_path)
|
||||||
|
|
||||||
def is_versioning_enabled(self, bucket_name: str) -> bool:
|
def is_versioning_enabled(self, bucket_name: str) -> bool:
|
||||||
@@ -729,8 +755,6 @@ class ObjectStorage:
|
|||||||
bucket_id = bucket_path.name
|
bucket_id = bucket_path.name
|
||||||
safe_key = self._sanitize_object_key(object_key)
|
safe_key = self._sanitize_object_key(object_key)
|
||||||
version_dir = self._version_dir(bucket_id, safe_key)
|
version_dir = self._version_dir(bucket_id, safe_key)
|
||||||
if not version_dir.exists():
|
|
||||||
version_dir = self._legacy_version_dir(bucket_id, safe_key)
|
|
||||||
if not version_dir.exists():
|
if not version_dir.exists():
|
||||||
version_dir = self._legacy_version_dir(bucket_id, safe_key)
|
version_dir = self._legacy_version_dir(bucket_id, safe_key)
|
||||||
if not version_dir.exists():
|
if not version_dir.exists():
|
||||||
@@ -879,6 +903,10 @@ class ObjectStorage:
|
|||||||
part_number: int,
|
part_number: int,
|
||||||
stream: BinaryIO,
|
stream: BinaryIO,
|
||||||
) -> str:
|
) -> str:
|
||||||
|
"""Upload a part for a multipart upload.
|
||||||
|
|
||||||
|
Uses file locking to safely update the manifest and handle concurrent uploads.
|
||||||
|
"""
|
||||||
if part_number < 1:
|
if part_number < 1:
|
||||||
raise StorageError("part_number must be >= 1")
|
raise StorageError("part_number must be >= 1")
|
||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
@@ -889,11 +917,26 @@ class ObjectStorage:
|
|||||||
if not upload_root.exists():
|
if not upload_root.exists():
|
||||||
raise StorageError("Multipart upload not found")
|
raise StorageError("Multipart upload not found")
|
||||||
|
|
||||||
|
# Write part to temporary file first, then rename atomically
|
||||||
checksum = hashlib.md5()
|
checksum = hashlib.md5()
|
||||||
part_filename = f"part-{part_number:05d}.part"
|
part_filename = f"part-{part_number:05d}.part"
|
||||||
part_path = upload_root / part_filename
|
part_path = upload_root / part_filename
|
||||||
with part_path.open("wb") as target:
|
temp_path = upload_root / f".{part_filename}.tmp"
|
||||||
|
|
||||||
|
try:
|
||||||
|
with temp_path.open("wb") as target:
|
||||||
shutil.copyfileobj(_HashingReader(stream, checksum), target)
|
shutil.copyfileobj(_HashingReader(stream, checksum), target)
|
||||||
|
|
||||||
|
# Atomic rename (or replace on Windows)
|
||||||
|
temp_path.replace(part_path)
|
||||||
|
except OSError:
|
||||||
|
# Clean up temp file on failure
|
||||||
|
try:
|
||||||
|
temp_path.unlink(missing_ok=True)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
raise
|
||||||
|
|
||||||
record = {
|
record = {
|
||||||
"etag": checksum.hexdigest(),
|
"etag": checksum.hexdigest(),
|
||||||
"size": part_path.stat().st_size,
|
"size": part_path.stat().st_size,
|
||||||
@@ -903,16 +946,29 @@ class ObjectStorage:
|
|||||||
manifest_path = upload_root / self.MULTIPART_MANIFEST
|
manifest_path = upload_root / self.MULTIPART_MANIFEST
|
||||||
lock_path = upload_root / ".manifest.lock"
|
lock_path = upload_root / ".manifest.lock"
|
||||||
|
|
||||||
|
# Retry loop for handling transient lock/read failures
|
||||||
|
max_retries = 3
|
||||||
|
for attempt in range(max_retries):
|
||||||
|
try:
|
||||||
with lock_path.open("w") as lock_file:
|
with lock_path.open("w") as lock_file:
|
||||||
with _file_lock(lock_file):
|
with _file_lock(lock_file):
|
||||||
try:
|
try:
|
||||||
manifest = json.loads(manifest_path.read_text(encoding="utf-8"))
|
manifest = json.loads(manifest_path.read_text(encoding="utf-8"))
|
||||||
except (OSError, json.JSONDecodeError) as exc:
|
except (OSError, json.JSONDecodeError) as exc:
|
||||||
|
if attempt < max_retries - 1:
|
||||||
|
time.sleep(0.1 * (attempt + 1))
|
||||||
|
continue
|
||||||
raise StorageError("Multipart manifest unreadable") from exc
|
raise StorageError("Multipart manifest unreadable") from exc
|
||||||
|
|
||||||
parts = manifest.setdefault("parts", {})
|
parts = manifest.setdefault("parts", {})
|
||||||
parts[str(part_number)] = record
|
parts[str(part_number)] = record
|
||||||
manifest_path.write_text(json.dumps(manifest), encoding="utf-8")
|
manifest_path.write_text(json.dumps(manifest), encoding="utf-8")
|
||||||
|
break
|
||||||
|
except OSError as exc:
|
||||||
|
if attempt < max_retries - 1:
|
||||||
|
time.sleep(0.1 * (attempt + 1))
|
||||||
|
continue
|
||||||
|
raise StorageError(f"Failed to update multipart manifest: {exc}") from exc
|
||||||
|
|
||||||
return record["etag"]
|
return record["etag"]
|
||||||
|
|
||||||
@@ -1019,13 +1075,17 @@ class ObjectStorage:
|
|||||||
self._invalidate_bucket_stats_cache(bucket_id)
|
self._invalidate_bucket_stats_cache(bucket_id)
|
||||||
|
|
||||||
stat = destination.stat()
|
stat = destination.stat()
|
||||||
return ObjectMeta(
|
# Performance: Lazy update - only update the affected key instead of invalidating whole cache
|
||||||
|
obj_meta = ObjectMeta(
|
||||||
key=safe_key.as_posix(),
|
key=safe_key.as_posix(),
|
||||||
size=stat.st_size,
|
size=stat.st_size,
|
||||||
last_modified=datetime.fromtimestamp(stat.st_mtime, timezone.utc),
|
last_modified=datetime.fromtimestamp(stat.st_mtime, timezone.utc),
|
||||||
etag=checksum.hexdigest(),
|
etag=checksum.hexdigest(),
|
||||||
metadata=metadata,
|
metadata=metadata,
|
||||||
)
|
)
|
||||||
|
self._update_object_cache_entry(bucket_id, safe_key.as_posix(), obj_meta)
|
||||||
|
|
||||||
|
return obj_meta
|
||||||
|
|
||||||
def abort_multipart_upload(self, bucket_name: str, upload_id: str) -> None:
|
def abort_multipart_upload(self, bucket_name: str, upload_id: str) -> None:
|
||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
@@ -1264,28 +1324,85 @@ class ObjectStorage:
|
|||||||
return objects
|
return objects
|
||||||
|
|
||||||
def _get_object_cache(self, bucket_id: str, bucket_path: Path) -> Dict[str, ObjectMeta]:
|
def _get_object_cache(self, bucket_id: str, bucket_path: Path) -> Dict[str, ObjectMeta]:
|
||||||
"""Get cached object metadata for a bucket, refreshing if stale."""
|
"""Get cached object metadata for a bucket, refreshing if stale.
|
||||||
now = time.time()
|
|
||||||
cached = self._object_cache.get(bucket_id)
|
|
||||||
|
|
||||||
|
Uses LRU eviction to prevent unbounded cache growth.
|
||||||
|
Thread-safe with per-bucket locks to reduce contention.
|
||||||
|
"""
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
# Quick check with global lock (brief)
|
||||||
|
with self._cache_lock:
|
||||||
|
cached = self._object_cache.get(bucket_id)
|
||||||
if cached:
|
if cached:
|
||||||
objects, timestamp = cached
|
objects, timestamp = cached
|
||||||
if now - timestamp < self.KEY_INDEX_CACHE_TTL:
|
if now - timestamp < self.KEY_INDEX_CACHE_TTL:
|
||||||
|
self._object_cache.move_to_end(bucket_id)
|
||||||
|
return objects
|
||||||
|
cache_version = self._cache_version.get(bucket_id, 0)
|
||||||
|
|
||||||
|
# Use per-bucket lock for cache building (allows parallel builds for different buckets)
|
||||||
|
bucket_lock = self._get_bucket_lock(bucket_id)
|
||||||
|
with bucket_lock:
|
||||||
|
# Double-check cache after acquiring per-bucket lock
|
||||||
|
with self._cache_lock:
|
||||||
|
cached = self._object_cache.get(bucket_id)
|
||||||
|
if cached:
|
||||||
|
objects, timestamp = cached
|
||||||
|
if now - timestamp < self.KEY_INDEX_CACHE_TTL:
|
||||||
|
self._object_cache.move_to_end(bucket_id)
|
||||||
return objects
|
return objects
|
||||||
|
|
||||||
|
# Build cache with per-bucket lock held (prevents duplicate work)
|
||||||
objects = self._build_object_cache(bucket_path)
|
objects = self._build_object_cache(bucket_path)
|
||||||
self._object_cache[bucket_id] = (objects, now)
|
|
||||||
|
with self._cache_lock:
|
||||||
|
# Check if cache was invalidated while we were building
|
||||||
|
current_version = self._cache_version.get(bucket_id, 0)
|
||||||
|
if current_version != cache_version:
|
||||||
|
objects = self._build_object_cache(bucket_path)
|
||||||
|
|
||||||
|
# Evict oldest entries if cache is full
|
||||||
|
while len(self._object_cache) >= self.OBJECT_CACHE_MAX_SIZE:
|
||||||
|
self._object_cache.popitem(last=False)
|
||||||
|
|
||||||
|
self._object_cache[bucket_id] = (objects, time.time())
|
||||||
|
self._object_cache.move_to_end(bucket_id)
|
||||||
|
|
||||||
return objects
|
return objects
|
||||||
|
|
||||||
def _invalidate_object_cache(self, bucket_id: str) -> None:
|
def _invalidate_object_cache(self, bucket_id: str) -> None:
|
||||||
"""Invalidate the object cache and etag index for a bucket."""
|
"""Invalidate the object cache and etag index for a bucket.
|
||||||
|
|
||||||
|
Increments version counter to signal stale reads.
|
||||||
|
"""
|
||||||
|
with self._cache_lock:
|
||||||
self._object_cache.pop(bucket_id, None)
|
self._object_cache.pop(bucket_id, None)
|
||||||
|
self._cache_version[bucket_id] = self._cache_version.get(bucket_id, 0) + 1
|
||||||
|
|
||||||
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
|
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
|
||||||
try:
|
try:
|
||||||
etag_index_path.unlink(missing_ok=True)
|
etag_index_path.unlink(missing_ok=True)
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def _update_object_cache_entry(self, bucket_id: str, key: str, meta: Optional[ObjectMeta]) -> None:
|
||||||
|
"""Update a single entry in the object cache instead of invalidating the whole cache.
|
||||||
|
|
||||||
|
This is a performance optimization - lazy update instead of full invalidation.
|
||||||
|
"""
|
||||||
|
with self._cache_lock:
|
||||||
|
cached = self._object_cache.get(bucket_id)
|
||||||
|
if cached:
|
||||||
|
objects, timestamp = cached
|
||||||
|
if meta is None:
|
||||||
|
# Delete operation - remove key from cache
|
||||||
|
objects.pop(key, None)
|
||||||
|
else:
|
||||||
|
# Put operation - update/add key in cache
|
||||||
|
objects[key] = meta
|
||||||
|
# Keep same timestamp - don't reset TTL for single key updates
|
||||||
|
|
||||||
def _ensure_system_roots(self) -> None:
|
def _ensure_system_roots(self) -> None:
|
||||||
for path in (
|
for path in (
|
||||||
self._system_root_path(),
|
self._system_root_path(),
|
||||||
@@ -1305,19 +1422,33 @@ class ObjectStorage:
|
|||||||
return self._system_bucket_root(bucket_name) / self.BUCKET_CONFIG_FILE
|
return self._system_bucket_root(bucket_name) / self.BUCKET_CONFIG_FILE
|
||||||
|
|
||||||
def _read_bucket_config(self, bucket_name: str) -> dict[str, Any]:
|
def _read_bucket_config(self, bucket_name: str) -> dict[str, Any]:
|
||||||
|
# Performance: Check cache first
|
||||||
|
now = time.time()
|
||||||
|
cached = self._bucket_config_cache.get(bucket_name)
|
||||||
|
if cached:
|
||||||
|
config, cached_time = cached
|
||||||
|
if now - cached_time < self._bucket_config_cache_ttl:
|
||||||
|
return config.copy() # Return copy to prevent mutation
|
||||||
|
|
||||||
config_path = self._bucket_config_path(bucket_name)
|
config_path = self._bucket_config_path(bucket_name)
|
||||||
if not config_path.exists():
|
if not config_path.exists():
|
||||||
|
self._bucket_config_cache[bucket_name] = ({}, now)
|
||||||
return {}
|
return {}
|
||||||
try:
|
try:
|
||||||
data = json.loads(config_path.read_text(encoding="utf-8"))
|
data = json.loads(config_path.read_text(encoding="utf-8"))
|
||||||
return data if isinstance(data, dict) else {}
|
config = data if isinstance(data, dict) else {}
|
||||||
|
self._bucket_config_cache[bucket_name] = (config, now)
|
||||||
|
return config.copy()
|
||||||
except (OSError, json.JSONDecodeError):
|
except (OSError, json.JSONDecodeError):
|
||||||
|
self._bucket_config_cache[bucket_name] = ({}, now)
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
def _write_bucket_config(self, bucket_name: str, payload: dict[str, Any]) -> None:
|
def _write_bucket_config(self, bucket_name: str, payload: dict[str, Any]) -> None:
|
||||||
config_path = self._bucket_config_path(bucket_name)
|
config_path = self._bucket_config_path(bucket_name)
|
||||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
config_path.write_text(json.dumps(payload), encoding="utf-8")
|
config_path.write_text(json.dumps(payload), encoding="utf-8")
|
||||||
|
# Performance: Update cache immediately after write
|
||||||
|
self._bucket_config_cache[bucket_name] = (payload.copy(), time.time())
|
||||||
|
|
||||||
def _set_bucket_config_entry(self, bucket_name: str, key: str, value: Any | None) -> None:
|
def _set_bucket_config_entry(self, bucket_name: str, key: str, value: Any | None) -> None:
|
||||||
config = self._read_bucket_config(bucket_name)
|
config = self._read_bucket_config(bucket_name)
|
||||||
@@ -1439,33 +1570,68 @@ class ObjectStorage:
|
|||||||
except OSError:
|
except OSError:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
def _has_visible_objects(self, bucket_path: Path) -> bool:
|
def _check_bucket_contents(self, bucket_path: Path) -> tuple[bool, bool, bool]:
|
||||||
|
"""Check bucket for objects, versions, and multipart uploads in a single pass.
|
||||||
|
|
||||||
|
Performance optimization: Combines three separate rglob traversals into one.
|
||||||
|
Returns (has_visible_objects, has_archived_versions, has_active_multipart_uploads).
|
||||||
|
Uses early exit when all three are found.
|
||||||
|
"""
|
||||||
|
has_objects = False
|
||||||
|
has_versions = False
|
||||||
|
has_multipart = False
|
||||||
|
bucket_name = bucket_path.name
|
||||||
|
|
||||||
|
# Check visible objects in bucket
|
||||||
for path in bucket_path.rglob("*"):
|
for path in bucket_path.rglob("*"):
|
||||||
|
if has_objects:
|
||||||
|
break
|
||||||
if not path.is_file():
|
if not path.is_file():
|
||||||
continue
|
continue
|
||||||
rel = path.relative_to(bucket_path)
|
rel = path.relative_to(bucket_path)
|
||||||
if rel.parts and rel.parts[0] in self.INTERNAL_FOLDERS:
|
if rel.parts and rel.parts[0] in self.INTERNAL_FOLDERS:
|
||||||
continue
|
continue
|
||||||
return True
|
has_objects = True
|
||||||
return False
|
|
||||||
|
# Check archived versions (only if needed)
|
||||||
|
for version_root in (
|
||||||
|
self._bucket_versions_root(bucket_name),
|
||||||
|
self._legacy_versions_root(bucket_name),
|
||||||
|
):
|
||||||
|
if has_versions:
|
||||||
|
break
|
||||||
|
if version_root.exists():
|
||||||
|
for path in version_root.rglob("*"):
|
||||||
|
if path.is_file():
|
||||||
|
has_versions = True
|
||||||
|
break
|
||||||
|
|
||||||
|
# Check multipart uploads (only if needed)
|
||||||
|
for uploads_root in (
|
||||||
|
self._multipart_bucket_root(bucket_name),
|
||||||
|
self._legacy_multipart_bucket_root(bucket_name),
|
||||||
|
):
|
||||||
|
if has_multipart:
|
||||||
|
break
|
||||||
|
if uploads_root.exists():
|
||||||
|
for path in uploads_root.rglob("*"):
|
||||||
|
if path.is_file():
|
||||||
|
has_multipart = True
|
||||||
|
break
|
||||||
|
|
||||||
|
return has_objects, has_versions, has_multipart
|
||||||
|
|
||||||
|
def _has_visible_objects(self, bucket_path: Path) -> bool:
|
||||||
|
has_objects, _, _ = self._check_bucket_contents(bucket_path)
|
||||||
|
return has_objects
|
||||||
|
|
||||||
def _has_archived_versions(self, bucket_path: Path) -> bool:
|
def _has_archived_versions(self, bucket_path: Path) -> bool:
|
||||||
for version_root in (
|
_, has_versions, _ = self._check_bucket_contents(bucket_path)
|
||||||
self._bucket_versions_root(bucket_path.name),
|
return has_versions
|
||||||
self._legacy_versions_root(bucket_path.name),
|
|
||||||
):
|
|
||||||
if version_root.exists() and any(path.is_file() for path in version_root.rglob("*")):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _has_active_multipart_uploads(self, bucket_path: Path) -> bool:
|
def _has_active_multipart_uploads(self, bucket_path: Path) -> bool:
|
||||||
for uploads_root in (
|
_, _, has_multipart = self._check_bucket_contents(bucket_path)
|
||||||
self._multipart_bucket_root(bucket_path.name),
|
return has_multipart
|
||||||
self._legacy_multipart_bucket_root(bucket_path.name),
|
|
||||||
):
|
|
||||||
if uploads_root.exists() and any(path.is_file() for path in uploads_root.rglob("*")):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _remove_tree(self, path: Path) -> None:
|
def _remove_tree(self, path: Path) -> None:
|
||||||
if not path.exists():
|
if not path.exists():
|
||||||
|
|||||||
31
app/ui.py
31
app/ui.py
@@ -415,7 +415,7 @@ def list_bucket_objects(bucket_name: str):
|
|||||||
except IamError as exc:
|
except IamError as exc:
|
||||||
return jsonify({"error": str(exc)}), 403
|
return jsonify({"error": str(exc)}), 403
|
||||||
|
|
||||||
max_keys = min(int(request.args.get("max_keys", 1000)), 10000)
|
max_keys = min(int(request.args.get("max_keys", 1000)), 100000)
|
||||||
continuation_token = request.args.get("continuation_token") or None
|
continuation_token = request.args.get("continuation_token") or None
|
||||||
prefix = request.args.get("prefix") or None
|
prefix = request.args.get("prefix") or None
|
||||||
|
|
||||||
@@ -434,6 +434,14 @@ def list_bucket_objects(bucket_name: str):
|
|||||||
except StorageError:
|
except StorageError:
|
||||||
versioning_enabled = False
|
versioning_enabled = False
|
||||||
|
|
||||||
|
# Pre-compute URL templates once (not per-object) for performance
|
||||||
|
# Frontend will construct actual URLs by replacing KEY_PLACEHOLDER
|
||||||
|
preview_template = url_for("ui.object_preview", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
delete_template = url_for("ui.delete_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
presign_template = url_for("ui.object_presign", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
versions_template = url_for("ui.object_versions", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
restore_template = url_for("ui.restore_object_version", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER", version_id="VERSION_ID_PLACEHOLDER")
|
||||||
|
|
||||||
objects_data = []
|
objects_data = []
|
||||||
for obj in result.objects:
|
for obj in result.objects:
|
||||||
objects_data.append({
|
objects_data.append({
|
||||||
@@ -442,13 +450,6 @@ def list_bucket_objects(bucket_name: str):
|
|||||||
"last_modified": obj.last_modified.isoformat(),
|
"last_modified": obj.last_modified.isoformat(),
|
||||||
"last_modified_display": obj.last_modified.strftime("%b %d, %Y %H:%M"),
|
"last_modified_display": obj.last_modified.strftime("%b %d, %Y %H:%M"),
|
||||||
"etag": obj.etag,
|
"etag": obj.etag,
|
||||||
"metadata": obj.metadata or {},
|
|
||||||
"preview_url": url_for("ui.object_preview", bucket_name=bucket_name, object_key=obj.key),
|
|
||||||
"download_url": url_for("ui.object_preview", bucket_name=bucket_name, object_key=obj.key) + "?download=1",
|
|
||||||
"presign_endpoint": url_for("ui.object_presign", bucket_name=bucket_name, object_key=obj.key),
|
|
||||||
"delete_endpoint": url_for("ui.delete_object", bucket_name=bucket_name, object_key=obj.key),
|
|
||||||
"versions_endpoint": url_for("ui.object_versions", bucket_name=bucket_name, object_key=obj.key),
|
|
||||||
"restore_template": url_for("ui.restore_object_version", bucket_name=bucket_name, object_key=obj.key, version_id="VERSION_ID_PLACEHOLDER"),
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return jsonify({
|
return jsonify({
|
||||||
@@ -457,6 +458,14 @@ def list_bucket_objects(bucket_name: str):
|
|||||||
"next_continuation_token": result.next_continuation_token,
|
"next_continuation_token": result.next_continuation_token,
|
||||||
"total_count": result.total_count,
|
"total_count": result.total_count,
|
||||||
"versioning_enabled": versioning_enabled,
|
"versioning_enabled": versioning_enabled,
|
||||||
|
"url_templates": {
|
||||||
|
"preview": preview_template,
|
||||||
|
"download": preview_template + "?download=1",
|
||||||
|
"presign": presign_template,
|
||||||
|
"delete": delete_template,
|
||||||
|
"versions": versions_template,
|
||||||
|
"restore": restore_template,
|
||||||
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
@@ -1458,10 +1467,16 @@ def update_bucket_replication(bucket_name: str):
|
|||||||
else:
|
else:
|
||||||
flash("No replication configuration to pause", "warning")
|
flash("No replication configuration to pause", "warning")
|
||||||
elif action == "resume":
|
elif action == "resume":
|
||||||
|
from .replication import REPLICATION_MODE_ALL
|
||||||
rule = _replication().get_rule(bucket_name)
|
rule = _replication().get_rule(bucket_name)
|
||||||
if rule:
|
if rule:
|
||||||
rule.enabled = True
|
rule.enabled = True
|
||||||
_replication().set_rule(rule)
|
_replication().set_rule(rule)
|
||||||
|
# When resuming, sync any pending objects that accumulated while paused
|
||||||
|
if rule.mode == REPLICATION_MODE_ALL:
|
||||||
|
_replication().replicate_existing_objects(bucket_name)
|
||||||
|
flash("Replication resumed. Syncing pending objects in background.", "success")
|
||||||
|
else:
|
||||||
flash("Replication resumed", "success")
|
flash("Replication resumed", "success")
|
||||||
else:
|
else:
|
||||||
flash("No replication configuration to resume", "warning")
|
flash("No replication configuration to resume", "warning")
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
"""Central location for the application version string."""
|
"""Central location for the application version string."""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
APP_VERSION = "0.1.8"
|
APP_VERSION = "0.2.0"
|
||||||
|
|
||||||
|
|
||||||
def get_version() -> str:
|
def get_version() -> str:
|
||||||
|
|||||||
@@ -362,6 +362,68 @@ code {
|
|||||||
color: #2563eb;
|
color: #2563eb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.docs-sidebar-mobile {
|
||||||
|
border-radius: 0.75rem;
|
||||||
|
border: 1px solid var(--myfsio-card-border);
|
||||||
|
}
|
||||||
|
|
||||||
|
.docs-sidebar-mobile .docs-toc {
|
||||||
|
display: flex;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
gap: 0.5rem 1rem;
|
||||||
|
padding-top: 0.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.docs-sidebar-mobile .docs-toc li {
|
||||||
|
flex: 1 0 45%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.min-width-0 {
|
||||||
|
min-width: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ensure pre blocks don't overflow on mobile */
|
||||||
|
.alert pre {
|
||||||
|
max-width: 100%;
|
||||||
|
overflow-x: auto;
|
||||||
|
-webkit-overflow-scrolling: touch;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* IAM User Cards */
|
||||||
|
.iam-user-card {
|
||||||
|
border: 1px solid var(--myfsio-card-border);
|
||||||
|
border-radius: 0.75rem;
|
||||||
|
transition: box-shadow 0.2s ease, transform 0.2s ease;
|
||||||
|
}
|
||||||
|
|
||||||
|
.iam-user-card:hover {
|
||||||
|
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
|
||||||
|
}
|
||||||
|
|
||||||
|
[data-theme='dark'] .iam-user-card:hover {
|
||||||
|
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3);
|
||||||
|
}
|
||||||
|
|
||||||
|
.user-avatar-lg {
|
||||||
|
width: 48px;
|
||||||
|
height: 48px;
|
||||||
|
border-radius: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-icon {
|
||||||
|
padding: 0.25rem;
|
||||||
|
line-height: 1;
|
||||||
|
border: none;
|
||||||
|
background: transparent;
|
||||||
|
color: var(--myfsio-muted);
|
||||||
|
border-radius: 0.375rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-icon:hover {
|
||||||
|
background: var(--myfsio-hover-bg);
|
||||||
|
color: var(--myfsio-text);
|
||||||
|
}
|
||||||
|
|
||||||
.badge {
|
.badge {
|
||||||
font-weight: 500;
|
font-weight: 500;
|
||||||
padding: 0.35em 0.65em;
|
padding: 0.35em 0.65em;
|
||||||
|
|||||||
@@ -13,8 +13,7 @@
|
|||||||
<div class="d-flex align-items-center gap-3">
|
<div class="d-flex align-items-center gap-3">
|
||||||
<div class="bucket-icon" style="width: 48px; height: 48px; border-radius: 12px;">
|
<div class="bucket-icon" style="width: 48px; height: 48px; border-radius: 12px;">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" viewBox="0 0 16 16">
|
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" viewBox="0 0 16 16">
|
||||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H11a.5.5 0 0 1 0 1h-1v1h1a.5.5 0 0 1 0 1h-1v1a.5.5 0 0 1-1 0v-1H6v1a.5.5 0 0 1-1 0v-1H4a.5.5 0 0 1 0-1h1v-1H4a.5.5 0 0 1 0-1h1.5A1.5 1.5 0 0 1 7 10.5V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1zm5 7.5v1h3v-1a.5.5 0 0 0-.5-.5h-2a.5.5 0 0 0-.5.5z"/>
|
|
||||||
</svg>
|
</svg>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
@@ -173,14 +172,16 @@
|
|||||||
</div>
|
</div>
|
||||||
<div class="d-flex align-items-center gap-1">
|
<div class="d-flex align-items-center gap-1">
|
||||||
<span class="text-muted">Batch</span>
|
<span class="text-muted">Batch</span>
|
||||||
<select id="page-size-select" class="form-select form-select-sm py-0" style="width: auto; font-size: 0.75rem;">
|
<select id="page-size-select" class="form-select form-select-sm py-0" style="width: auto; font-size: 0.75rem;" title="Number of objects to load per batch">
|
||||||
<option value="1000">1K</option>
|
<option value="1000">1K</option>
|
||||||
<option value="5000" selected>5K</option>
|
<option value="5000" selected>5K</option>
|
||||||
<option value="10000">10K</option>
|
<option value="10000">10K</option>
|
||||||
<option value="25000">25K</option>
|
<option value="25000">25K</option>
|
||||||
<option value="50000">50K</option>
|
<option value="50000">50K</option>
|
||||||
|
<option value="75000">75K</option>
|
||||||
|
<option value="100000">100K</option>
|
||||||
</select>
|
</select>
|
||||||
<span class="text-muted">objects</span>
|
<span class="text-muted">per batch</span>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -1144,13 +1145,18 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
{% elif replication_rule and not replication_rule.enabled %}
|
{% elif replication_rule and not replication_rule.enabled %}
|
||||||
<div class="alert alert-warning d-flex align-items-center mb-4" role="alert">
|
<div class="alert alert-warning d-flex align-items-start mb-4" role="alert">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="flex-shrink-0 me-2" viewBox="0 0 16 16">
|
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="flex-shrink-0 me-2 mt-1" viewBox="0 0 16 16">
|
||||||
<path d="M5.5 3.5A1.5 1.5 0 0 1 7 5v6a1.5 1.5 0 0 1-3 0V5a1.5 1.5 0 0 1 1.5-1.5zm5 0A1.5 1.5 0 0 1 12 5v6a1.5 1.5 0 0 1-3 0V5a1.5 1.5 0 0 1 1.5-1.5z"/>
|
<path d="M5.5 3.5A1.5 1.5 0 0 1 7 5v6a1.5 1.5 0 0 1-3 0V5a1.5 1.5 0 0 1 1.5-1.5zm5 0A1.5 1.5 0 0 1 12 5v6a1.5 1.5 0 0 1-3 0V5a1.5 1.5 0 0 1 1.5-1.5z"/>
|
||||||
</svg>
|
</svg>
|
||||||
<div>
|
<div>
|
||||||
<strong>Replication Paused</strong> —
|
<strong>Replication Paused</strong>
|
||||||
Replication is configured but currently paused. New uploads will not be replicated until resumed.
|
<p class="mb-1">Replication is configured but currently paused. New uploads will not be replicated until resumed.</p>
|
||||||
|
{% if replication_rule.mode == 'all' %}
|
||||||
|
<p class="mb-0 small text-dark"><strong>Tip:</strong> When you resume, any objects uploaded while paused will be automatically synced to the target.</p>
|
||||||
|
{% else %}
|
||||||
|
<p class="mb-0 small text-dark"><strong>Note:</strong> Objects uploaded while paused will not be synced (mode: new_only). Consider switching to "All Objects" mode if you need to sync missed uploads.</p>
|
||||||
|
{% endif %}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@@ -1777,6 +1783,91 @@
|
|||||||
|
|
||||||
{% block extra_scripts %}
|
{% block extra_scripts %}
|
||||||
<script>
|
<script>
|
||||||
|
// Auto-indent for JSON textareas
|
||||||
|
function setupJsonAutoIndent(textarea) {
|
||||||
|
if (!textarea) return;
|
||||||
|
|
||||||
|
textarea.addEventListener('keydown', function(e) {
|
||||||
|
if (e.key === 'Enter') {
|
||||||
|
e.preventDefault();
|
||||||
|
|
||||||
|
const start = this.selectionStart;
|
||||||
|
const end = this.selectionEnd;
|
||||||
|
const value = this.value;
|
||||||
|
|
||||||
|
// Get the current line
|
||||||
|
const lineStart = value.lastIndexOf('\n', start - 1) + 1;
|
||||||
|
const currentLine = value.substring(lineStart, start);
|
||||||
|
|
||||||
|
// Calculate base indentation (leading whitespace of current line)
|
||||||
|
const indentMatch = currentLine.match(/^(\s*)/);
|
||||||
|
let indent = indentMatch ? indentMatch[1] : '';
|
||||||
|
|
||||||
|
// Check if the line ends with { or [ (should increase indent)
|
||||||
|
const trimmedLine = currentLine.trim();
|
||||||
|
const lastChar = trimmedLine.slice(-1);
|
||||||
|
|
||||||
|
let newIndent = indent;
|
||||||
|
let insertAfter = '';
|
||||||
|
|
||||||
|
if (lastChar === '{' || lastChar === '[') {
|
||||||
|
// Add extra indentation
|
||||||
|
newIndent = indent + ' ';
|
||||||
|
|
||||||
|
// Check if we need to add closing bracket on new line
|
||||||
|
const charAfterCursor = value.substring(start, start + 1).trim();
|
||||||
|
if ((lastChar === '{' && charAfterCursor === '}') ||
|
||||||
|
(lastChar === '[' && charAfterCursor === ']')) {
|
||||||
|
insertAfter = '\n' + indent;
|
||||||
|
}
|
||||||
|
} else if (lastChar === ',' || lastChar === ':') {
|
||||||
|
// Keep same indentation for continuation
|
||||||
|
newIndent = indent;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert newline with proper indentation
|
||||||
|
const insertion = '\n' + newIndent + insertAfter;
|
||||||
|
const newValue = value.substring(0, start) + insertion + value.substring(end);
|
||||||
|
|
||||||
|
this.value = newValue;
|
||||||
|
|
||||||
|
// Set cursor position after the indentation
|
||||||
|
const newCursorPos = start + 1 + newIndent.length;
|
||||||
|
this.selectionStart = this.selectionEnd = newCursorPos;
|
||||||
|
|
||||||
|
// Trigger input event for any listeners
|
||||||
|
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle Tab key for indentation
|
||||||
|
if (e.key === 'Tab') {
|
||||||
|
e.preventDefault();
|
||||||
|
const start = this.selectionStart;
|
||||||
|
const end = this.selectionEnd;
|
||||||
|
|
||||||
|
if (e.shiftKey) {
|
||||||
|
// Outdent: remove 2 spaces from start of line
|
||||||
|
const lineStart = this.value.lastIndexOf('\n', start - 1) + 1;
|
||||||
|
const lineContent = this.value.substring(lineStart, start);
|
||||||
|
if (lineContent.startsWith(' ')) {
|
||||||
|
this.value = this.value.substring(0, lineStart) +
|
||||||
|
this.value.substring(lineStart + 2);
|
||||||
|
this.selectionStart = this.selectionEnd = Math.max(lineStart, start - 2);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Indent: insert 2 spaces
|
||||||
|
this.value = this.value.substring(0, start) + ' ' + this.value.substring(end);
|
||||||
|
this.selectionStart = this.selectionEnd = start + 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply auto-indent to policy editor textarea
|
||||||
|
setupJsonAutoIndent(document.getElementById('policyDocument'));
|
||||||
|
|
||||||
const formatBytes = (bytes) => {
|
const formatBytes = (bytes) => {
|
||||||
if (!Number.isFinite(bytes)) return `${bytes} bytes`;
|
if (!Number.isFinite(bytes)) return `${bytes} bytes`;
|
||||||
const units = ['bytes', 'KB', 'MB', 'GB', 'TB'];
|
const units = ['bytes', 'KB', 'MB', 'GB', 'TB'];
|
||||||
@@ -1882,6 +1973,13 @@
|
|||||||
let pageSize = 5000; // Load large batches for virtual scrolling
|
let pageSize = 5000; // Load large batches for virtual scrolling
|
||||||
let currentPrefix = ''; // Current folder prefix for navigation
|
let currentPrefix = ''; // Current folder prefix for navigation
|
||||||
let allObjects = []; // All loaded object metadata (lightweight)
|
let allObjects = []; // All loaded object metadata (lightweight)
|
||||||
|
let urlTemplates = null; // URL templates from API for constructing object URLs
|
||||||
|
|
||||||
|
// Helper to build URL from template by replacing KEY_PLACEHOLDER with encoded key
|
||||||
|
const buildUrlFromTemplate = (template, key) => {
|
||||||
|
if (!template) return '';
|
||||||
|
return template.replace('KEY_PLACEHOLDER', encodeURIComponent(key).replace(/%2F/g, '/'));
|
||||||
|
};
|
||||||
|
|
||||||
// Virtual scrolling state
|
// Virtual scrolling state
|
||||||
const ROW_HEIGHT = 53; // Height of each table row in pixels
|
const ROW_HEIGHT = 53; // Height of each table row in pixels
|
||||||
@@ -1928,7 +2026,7 @@
|
|||||||
title="Download"
|
title="Download"
|
||||||
aria-label="Download"
|
aria-label="Download"
|
||||||
>
|
>
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-download" viewBox="0 0 16 16" aria-hidden="true">
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="#0d6efd" class="bi bi-download" viewBox="0 0 16 16" aria-hidden="true">
|
||||||
<path d="M.5 9.9a.5.5 0 0 1 .5.5v2.5a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1v-2.5a.5.5 0 0 1 1 0v2.5a2 2 0 0 1-2 2H2a2 2 0 0 1-2-2v-2.5a.5.5 0 0 1 .5-.5z" />
|
<path d="M.5 9.9a.5.5 0 0 1 .5.5v2.5a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1v-2.5a.5.5 0 0 1 1 0v2.5a2 2 0 0 1-2 2H2a2 2 0 0 1-2-2v-2.5a.5.5 0 0 1 .5-.5z" />
|
||||||
<path d="M7.646 11.854a.5.5 0 0 0 .708 0l3-3a.5.5 0 0 0-.708-.708L8.5 10.293V1.5a.5.5 0 0 0-1 0v8.793L5.354 8.146a.5.5 0 1 0-.708.708l3 3z" />
|
<path d="M7.646 11.854a.5.5 0 0 0 .708 0l3-3a.5.5 0 0 0-.708-.708L8.5 10.293V1.5a.5.5 0 0 0-1 0v8.793L5.354 8.146a.5.5 0 1 0-.708.708l3 3z" />
|
||||||
</svg>
|
</svg>
|
||||||
@@ -1940,7 +2038,7 @@
|
|||||||
title="Delete"
|
title="Delete"
|
||||||
aria-label="Delete"
|
aria-label="Delete"
|
||||||
>
|
>
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="13" height="13" fill="currentColor" class="bi bi-trash" viewBox="0 0 16 16" aria-hidden="true">
|
<svg xmlns="http://www.w3.org/2000/svg" width="13" height="13" fill="#dc3545" class="bi bi-trash" viewBox="0 0 16 16" aria-hidden="true">
|
||||||
<path d="M5.5 5.5a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm3 .5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 1 0z" />
|
<path d="M5.5 5.5a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm3 .5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 1 0z" />
|
||||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z" />
|
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z" />
|
||||||
</svg>
|
</svg>
|
||||||
@@ -2223,22 +2321,26 @@
|
|||||||
objectsLoadingRow.remove();
|
objectsLoadingRow.remove();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store lightweight object metadata (no DOM elements!)
|
if (data.url_templates && !urlTemplates) {
|
||||||
|
urlTemplates = data.url_templates;
|
||||||
|
}
|
||||||
|
|
||||||
data.objects.forEach(obj => {
|
data.objects.forEach(obj => {
|
||||||
loadedObjectCount++;
|
loadedObjectCount++;
|
||||||
|
const key = obj.key;
|
||||||
allObjects.push({
|
allObjects.push({
|
||||||
key: obj.key,
|
key: key,
|
||||||
size: obj.size,
|
size: obj.size,
|
||||||
lastModified: obj.last_modified,
|
lastModified: obj.last_modified,
|
||||||
lastModifiedDisplay: obj.last_modified_display,
|
lastModifiedDisplay: obj.last_modified_display,
|
||||||
etag: obj.etag,
|
etag: obj.etag,
|
||||||
previewUrl: obj.preview_url,
|
previewUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.preview, key) : '',
|
||||||
downloadUrl: obj.download_url,
|
downloadUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.download, key) : '',
|
||||||
presignEndpoint: obj.presign_endpoint,
|
presignEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.presign, key) : '',
|
||||||
deleteEndpoint: obj.delete_endpoint,
|
deleteEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.delete, key) : '',
|
||||||
metadata: JSON.stringify(obj.metadata || {}),
|
metadata: '{}',
|
||||||
versionsEndpoint: obj.versions_endpoint,
|
versionsEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.versions, key) : '',
|
||||||
restoreTemplate: obj.restore_template
|
restoreTemplate: urlTemplates ? urlTemplates.restore.replace('KEY_PLACEHOLDER', encodeURIComponent(key).replace(/%2F/g, '/')) : ''
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -3784,40 +3886,39 @@
|
|||||||
selectAllCheckbox?.addEventListener('change', (event) => {
|
selectAllCheckbox?.addEventListener('change', (event) => {
|
||||||
const shouldSelect = Boolean(event.target?.checked);
|
const shouldSelect = Boolean(event.target?.checked);
|
||||||
|
|
||||||
if (hasFolders()) {
|
// Get all file items in the current view (works with virtual scrolling)
|
||||||
|
const filesInView = visibleItems.filter(item => item.type === 'file');
|
||||||
|
|
||||||
const objectsInCurrentView = allObjects.filter(obj => obj.key.startsWith(currentPrefix));
|
// Update selectedRows directly using object keys (not DOM elements)
|
||||||
objectsInCurrentView.forEach(obj => {
|
filesInView.forEach(item => {
|
||||||
const checkbox = obj.element.querySelector('[data-object-select]');
|
if (shouldSelect) {
|
||||||
if (checkbox && !checkbox.disabled) {
|
selectedRows.set(item.data.key, item.data);
|
||||||
checkbox.checked = shouldSelect;
|
} else {
|
||||||
|
selectedRows.delete(item.data.key);
|
||||||
}
|
}
|
||||||
toggleRowSelection(obj.element, shouldSelect);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Update folder checkboxes in DOM (folders are always rendered)
|
||||||
document.querySelectorAll('[data-folder-select]').forEach(cb => {
|
document.querySelectorAll('[data-folder-select]').forEach(cb => {
|
||||||
cb.checked = shouldSelect;
|
cb.checked = shouldSelect;
|
||||||
});
|
});
|
||||||
} else {
|
|
||||||
|
|
||||||
|
// Update any currently rendered object checkboxes
|
||||||
document.querySelectorAll('[data-object-row]').forEach((row) => {
|
document.querySelectorAll('[data-object-row]').forEach((row) => {
|
||||||
if (row.style.display === 'none') return;
|
|
||||||
const checkbox = row.querySelector('[data-object-select]');
|
const checkbox = row.querySelector('[data-object-select]');
|
||||||
if (!checkbox || checkbox.disabled) {
|
if (checkbox) {
|
||||||
return;
|
|
||||||
}
|
|
||||||
checkbox.checked = shouldSelect;
|
checkbox.checked = shouldSelect;
|
||||||
toggleRowSelection(row, shouldSelect);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
updateBulkDeleteState();
|
||||||
setTimeout(updateBulkDownloadState, 0);
|
setTimeout(updateBulkDownloadState, 0);
|
||||||
});
|
});
|
||||||
|
|
||||||
bulkDownloadButton?.addEventListener('click', async () => {
|
bulkDownloadButton?.addEventListener('click', async () => {
|
||||||
if (!bulkDownloadEndpoint) return;
|
if (!bulkDownloadEndpoint) return;
|
||||||
const selected = Array.from(document.querySelectorAll('[data-object-select]:checked')).map(
|
// Use selectedRows which tracks all selected objects (not just rendered ones)
|
||||||
(cb) => cb.closest('tr').dataset.key
|
const selected = Array.from(selectedRows.keys());
|
||||||
);
|
|
||||||
if (selected.length === 0) return;
|
if (selected.length === 0) return;
|
||||||
|
|
||||||
bulkDownloadButton.disabled = true;
|
bulkDownloadButton.disabled = true;
|
||||||
|
|||||||
@@ -46,8 +46,7 @@
|
|||||||
<div class="d-flex align-items-center gap-3">
|
<div class="d-flex align-items-center gap-3">
|
||||||
<div class="bucket-icon">
|
<div class="bucket-icon">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" fill="currentColor" viewBox="0 0 16 16">
|
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" fill="currentColor" viewBox="0 0 16 16">
|
||||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H11a.5.5 0 0 1 0 1h-1v1h1a.5.5 0 0 1 0 1h-1v1a.5.5 0 0 1-1 0v-1H6v1a.5.5 0 0 1-1 0v-1H4a.5.5 0 0 1 0-1h1v-1H4a.5.5 0 0 1 0-1h1.5A1.5 1.5 0 0 1 7 10.5V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1zm5 7.5v1h3v-1a.5.5 0 0 0-.5-.5h-2a.5.5 0 0 0-.5.5z"/>
|
|
||||||
</svg>
|
</svg>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
|
|||||||
@@ -8,8 +8,8 @@
|
|||||||
<p class="text-uppercase text-muted small mb-1">Replication</p>
|
<p class="text-uppercase text-muted small mb-1">Replication</p>
|
||||||
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>
|
||||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H12a.5.5 0 0 1 0 1H4a.5.5 0 0 1 0-1h2A1.5 1.5 0 0 1 7.5 10V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1z"/>
|
<path d="M10.232 8.768l.546-.353a.25.25 0 0 0 0-.418l-.546-.354a.25.25 0 0 1-.116-.21V6.25a.25.25 0 0 0-.25-.25h-.5a.25.25 0 0 0-.25.25v1.183a.25.25 0 0 1-.116.21l-.546.354a.25.25 0 0 0 0 .418l.546.353a.25.25 0 0 1 .116.21v1.183a.25.25 0 0 0 .25.25h.5a.25.25 0 0 0 .25-.25V8.978a.25.25 0 0 1 .116-.21z"/>
|
||||||
</svg>
|
</svg>
|
||||||
Remote Connections
|
Remote Connections
|
||||||
</h1>
|
</h1>
|
||||||
@@ -124,8 +124,7 @@
|
|||||||
<div class="d-flex align-items-center gap-2">
|
<div class="d-flex align-items-center gap-2">
|
||||||
<div class="connection-icon">
|
<div class="connection-icon">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>
|
||||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H12a.5.5 0 0 1 0 1H4a.5.5 0 0 1 0-1h2A1.5 1.5 0 0 1 7.5 10V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1z"/>
|
|
||||||
</svg>
|
</svg>
|
||||||
</div>
|
</div>
|
||||||
<span class="fw-medium">{{ conn.name }}</span>
|
<span class="fw-medium">{{ conn.name }}</span>
|
||||||
@@ -174,8 +173,7 @@
|
|||||||
<div class="empty-state text-center py-5">
|
<div class="empty-state text-center py-5">
|
||||||
<div class="empty-state-icon mx-auto mb-3">
|
<div class="empty-state-icon mx-auto mb-3">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor" viewBox="0 0 16 16">
|
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor" viewBox="0 0 16 16">
|
||||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>
|
||||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H12a.5.5 0 0 1 0 1H4a.5.5 0 0 1 0-1h2A1.5 1.5 0 0 1 7.5 10V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1z"/>
|
|
||||||
</svg>
|
</svg>
|
||||||
</div>
|
</div>
|
||||||
<h5 class="fw-semibold mb-2">No connections yet</h5>
|
<h5 class="fw-semibold mb-2">No connections yet</h5>
|
||||||
|
|||||||
@@ -14,6 +14,36 @@
|
|||||||
</div>
|
</div>
|
||||||
</section>
|
</section>
|
||||||
<div class="row g-4">
|
<div class="row g-4">
|
||||||
|
<div class="col-12 d-xl-none">
|
||||||
|
<div class="card shadow-sm docs-sidebar-mobile mb-0">
|
||||||
|
<div class="card-body py-3">
|
||||||
|
<div class="d-flex align-items-center justify-content-between mb-2">
|
||||||
|
<h3 class="h6 text-uppercase text-muted mb-0">On this page</h3>
|
||||||
|
<button class="btn btn-sm btn-outline-secondary" type="button" data-bs-toggle="collapse" data-bs-target="#mobileDocsToc" aria-expanded="false" aria-controls="mobileDocsToc">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path fill-rule="evenodd" d="M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z"/>
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
<div class="collapse" id="mobileDocsToc">
|
||||||
|
<ul class="list-unstyled docs-toc mb-0 small">
|
||||||
|
<li><a href="#setup">Set up & run</a></li>
|
||||||
|
<li><a href="#background">Running in background</a></li>
|
||||||
|
<li><a href="#auth">Authentication & IAM</a></li>
|
||||||
|
<li><a href="#console">Console tour</a></li>
|
||||||
|
<li><a href="#automation">Automation / CLI</a></li>
|
||||||
|
<li><a href="#api">REST endpoints</a></li>
|
||||||
|
<li><a href="#examples">API Examples</a></li>
|
||||||
|
<li><a href="#replication">Site Replication</a></li>
|
||||||
|
<li><a href="#versioning">Object Versioning</a></li>
|
||||||
|
<li><a href="#quotas">Bucket Quotas</a></li>
|
||||||
|
<li><a href="#encryption">Encryption</a></li>
|
||||||
|
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
<div class="col-xl-8">
|
<div class="col-xl-8">
|
||||||
<article id="setup" class="card shadow-sm docs-section">
|
<article id="setup" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
@@ -407,10 +437,62 @@ curl -X POST {{ api_base }}/presign/demo/notes.txt \
|
|||||||
<span class="docs-section-kicker">07</span>
|
<span class="docs-section-kicker">07</span>
|
||||||
<h2 class="h4 mb-0">API Examples</h2>
|
<h2 class="h4 mb-0">API Examples</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Common operations using boto3.</p>
|
<p class="text-muted">Common operations using popular SDKs and tools.</p>
|
||||||
|
|
||||||
<h5 class="mt-4">Multipart Upload</h5>
|
<h3 class="h6 text-uppercase text-muted mt-4">Python (boto3)</h3>
|
||||||
<pre><code class="language-python">import boto3
|
<pre class="mb-4"><code class="language-python">import boto3
|
||||||
|
|
||||||
|
s3 = boto3.client(
|
||||||
|
's3',
|
||||||
|
endpoint_url='{{ api_base }}',
|
||||||
|
aws_access_key_id='<access_key>',
|
||||||
|
aws_secret_access_key='<secret_key>'
|
||||||
|
)
|
||||||
|
|
||||||
|
# List buckets
|
||||||
|
buckets = s3.list_buckets()['Buckets']
|
||||||
|
|
||||||
|
# Create bucket
|
||||||
|
s3.create_bucket(Bucket='mybucket')
|
||||||
|
|
||||||
|
# Upload file
|
||||||
|
s3.upload_file('local.txt', 'mybucket', 'remote.txt')
|
||||||
|
|
||||||
|
# Download file
|
||||||
|
s3.download_file('mybucket', 'remote.txt', 'downloaded.txt')
|
||||||
|
|
||||||
|
# Generate presigned URL (valid 1 hour)
|
||||||
|
url = s3.generate_presigned_url(
|
||||||
|
'get_object',
|
||||||
|
Params={'Bucket': 'mybucket', 'Key': 'remote.txt'},
|
||||||
|
ExpiresIn=3600
|
||||||
|
)</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">JavaScript (AWS SDK v3)</h3>
|
||||||
|
<pre class="mb-4"><code class="language-javascript">import { S3Client, ListBucketsCommand, PutObjectCommand } from '@aws-sdk/client-s3';
|
||||||
|
|
||||||
|
const s3 = new S3Client({
|
||||||
|
endpoint: '{{ api_base }}',
|
||||||
|
region: 'us-east-1',
|
||||||
|
credentials: {
|
||||||
|
accessKeyId: '<access_key>',
|
||||||
|
secretAccessKey: '<secret_key>'
|
||||||
|
},
|
||||||
|
forcePathStyle: true // Required for S3-compatible services
|
||||||
|
});
|
||||||
|
|
||||||
|
// List buckets
|
||||||
|
const { Buckets } = await s3.send(new ListBucketsCommand({}));
|
||||||
|
|
||||||
|
// Upload object
|
||||||
|
await s3.send(new PutObjectCommand({
|
||||||
|
Bucket: 'mybucket',
|
||||||
|
Key: 'hello.txt',
|
||||||
|
Body: 'Hello, World!'
|
||||||
|
}));</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Multipart Upload (Python)</h3>
|
||||||
|
<pre class="mb-4"><code class="language-python">import boto3
|
||||||
|
|
||||||
s3 = boto3.client('s3', endpoint_url='{{ api_base }}')
|
s3 = boto3.client('s3', endpoint_url='{{ api_base }}')
|
||||||
|
|
||||||
@@ -418,9 +500,9 @@ s3 = boto3.client('s3', endpoint_url='{{ api_base }}')
|
|||||||
response = s3.create_multipart_upload(Bucket='mybucket', Key='large.bin')
|
response = s3.create_multipart_upload(Bucket='mybucket', Key='large.bin')
|
||||||
upload_id = response['UploadId']
|
upload_id = response['UploadId']
|
||||||
|
|
||||||
# Upload parts
|
# Upload parts (minimum 5MB each, except last part)
|
||||||
parts = []
|
parts = []
|
||||||
chunks = [b'chunk1', b'chunk2'] # Example data chunks
|
chunks = [b'chunk1...', b'chunk2...']
|
||||||
for part_number, chunk in enumerate(chunks, start=1):
|
for part_number, chunk in enumerate(chunks, start=1):
|
||||||
response = s3.upload_part(
|
response = s3.upload_part(
|
||||||
Bucket='mybucket',
|
Bucket='mybucket',
|
||||||
@@ -438,6 +520,19 @@ s3.complete_multipart_upload(
|
|||||||
UploadId=upload_id,
|
UploadId=upload_id,
|
||||||
MultipartUpload={'Parts': parts}
|
MultipartUpload={'Parts': parts}
|
||||||
)</code></pre>
|
)</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Presigned URLs for Sharing</h3>
|
||||||
|
<pre class="mb-0"><code class="language-bash"># Generate a download link valid for 15 minutes
|
||||||
|
curl -X POST "{{ api_base }}/presign/mybucket/photo.jpg" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{"method": "GET", "expires_in": 900}'
|
||||||
|
|
||||||
|
# Generate an upload link (PUT) valid for 1 hour
|
||||||
|
curl -X POST "{{ api_base }}/presign/mybucket/upload.bin" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{"method": "PUT", "expires_in": 3600}'</code></pre>
|
||||||
</div>
|
</div>
|
||||||
</article>
|
</article>
|
||||||
<article id="replication" class="card shadow-sm docs-section">
|
<article id="replication" class="card shadow-sm docs-section">
|
||||||
@@ -461,15 +556,46 @@ s3.complete_multipart_upload(
|
|||||||
</li>
|
</li>
|
||||||
</ol>
|
</ol>
|
||||||
|
|
||||||
<div class="alert alert-light border mb-0">
|
<div class="alert alert-light border mb-3 overflow-hidden">
|
||||||
<div class="d-flex gap-2">
|
<div class="d-flex flex-column flex-sm-row gap-2 mb-2">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-terminal text-muted mt-1" viewBox="0 0 16 16">
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-terminal text-muted mt-1 flex-shrink-0 d-none d-sm-block" viewBox="0 0 16 16">
|
||||||
<path d="M6 9a.5.5 0 0 1 .5-.5h3a.5.5 0 0 1 0 1h-3A.5.5 0 0 1 6 9zM3.854 4.146a.5.5 0 1 0-.708.708L4.793 6.5 3.146 8.146a.5.5 0 1 0 .708.708l2-2a.5.5 0 0 0 0-.708l-2-2z"/>
|
<path d="M6 9a.5.5 0 0 1 .5-.5h3a.5.5 0 0 1 0 1h-3A.5.5 0 0 1 6 9zM3.854 4.146a.5.5 0 1 0-.708.708L4.793 6.5 3.146 8.146a.5.5 0 1 0 .708.708l2-2a.5.5 0 0 0 0-.708l-2-2z"/>
|
||||||
<path d="M2 1a2 2 0 0 0-2 2v10a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V3a2 2 0 0 0-2-2H2zm12 1a1 1 0 0 1 1 1v10a1 1 0 0 1-1 1H2a1 1 0 0 1-1-1V3a1 1 0 0 1 1-1h12z"/>
|
<path d="M2 1a2 2 0 0 0-2 2v10a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V3a2 2 0 0 0-2-2H2zm12 1a1 1 0 0 1 1 1v10a1 1 0 0 1-1 1H2a1 1 0 0 1-1-1V3a1 1 0 0 1 1-1h12z"/>
|
||||||
</svg>
|
</svg>
|
||||||
<div>
|
<div class="flex-grow-1 min-width-0">
|
||||||
<strong>Headless Target Setup?</strong>
|
<strong>Headless Target Setup</strong>
|
||||||
<p class="small text-muted mb-0">If your target server has no UI, use the Python API directly to bootstrap credentials. See <code>docs.md</code> in the project root for the <code>setup_target.py</code> script.</p>
|
<p class="small text-muted mb-2">If your target server has no UI, create a <code>setup_target.py</code> script to bootstrap credentials:</p>
|
||||||
|
<pre class="mb-0 overflow-auto" style="max-width: 100%;"><code class="language-python"># setup_target.py
|
||||||
|
from pathlib import Path
|
||||||
|
from app.iam import IamService
|
||||||
|
from app.storage import ObjectStorage
|
||||||
|
|
||||||
|
# Initialize services (paths match default config)
|
||||||
|
data_dir = Path("data")
|
||||||
|
iam = IamService(data_dir / ".myfsio.sys" / "config" / "iam.json")
|
||||||
|
storage = ObjectStorage(data_dir)
|
||||||
|
|
||||||
|
# 1. Create the bucket
|
||||||
|
bucket_name = "backup-bucket"
|
||||||
|
try:
|
||||||
|
storage.create_bucket(bucket_name)
|
||||||
|
print(f"Bucket '{bucket_name}' created.")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Bucket creation skipped: {e}")
|
||||||
|
|
||||||
|
# 2. Create the user
|
||||||
|
try:
|
||||||
|
creds = iam.create_user(
|
||||||
|
display_name="Replication User",
|
||||||
|
policies=[{"bucket": bucket_name, "actions": ["write", "read", "list"]}]
|
||||||
|
)
|
||||||
|
print("\n--- CREDENTIALS GENERATED ---")
|
||||||
|
print(f"Access Key: {creds['access_key']}")
|
||||||
|
print(f"Secret Key: {creds['secret_key']}")
|
||||||
|
print("-----------------------------")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"User creation failed: {e}")</code></pre>
|
||||||
|
<p class="small text-muted mt-2 mb-0">Save and run: <code>python setup_target.py</code></p>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -487,6 +613,86 @@ s3.complete_multipart_upload(
|
|||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
</article>
|
</article>
|
||||||
|
<article id="versioning" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">09</span>
|
||||||
|
<h2 class="h4 mb-0">Object Versioning</h2>
|
||||||
|
</div>
|
||||||
|
<p class="text-muted">Keep multiple versions of objects to protect against accidental deletions and overwrites. Restore previous versions at any time.</p>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Enabling Versioning</h3>
|
||||||
|
<ol class="docs-steps mb-3">
|
||||||
|
<li>Navigate to your bucket's <strong>Properties</strong> tab.</li>
|
||||||
|
<li>Find the <strong>Versioning</strong> card and click <strong>Enable</strong>.</li>
|
||||||
|
<li>All subsequent uploads will create new versions instead of overwriting.</li>
|
||||||
|
</ol>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Version Operations</h3>
|
||||||
|
<div class="table-responsive mb-3">
|
||||||
|
<table class="table table-sm table-bordered small">
|
||||||
|
<thead class="table-light">
|
||||||
|
<tr>
|
||||||
|
<th>Operation</th>
|
||||||
|
<th>Description</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td><strong>View Versions</strong></td>
|
||||||
|
<td>Click the version icon on any object to see all historical versions with timestamps and sizes.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><strong>Restore Version</strong></td>
|
||||||
|
<td>Click <strong>Restore</strong> on any version to make it the current version (creates a copy).</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><strong>Delete Current</strong></td>
|
||||||
|
<td>Deleting an object archives it. Previous versions remain accessible.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><strong>Purge All</strong></td>
|
||||||
|
<td>Permanently delete an object and all its versions. This cannot be undone.</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Archived Objects</h3>
|
||||||
|
<p class="small text-muted mb-3">When you delete a versioned object, it becomes "archived" - the current version is removed but historical versions remain. The <strong>Archived</strong> tab shows these objects so you can restore them.</p>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">API Usage</h3>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Enable versioning
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>?versioning" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{"Status": "Enabled"}'
|
||||||
|
|
||||||
|
# Get versioning status
|
||||||
|
curl "{{ api_base }}/<bucket>?versioning" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||||
|
|
||||||
|
# List object versions
|
||||||
|
curl "{{ api_base }}/<bucket>?versions" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||||
|
|
||||||
|
# Get specific version
|
||||||
|
curl "{{ api_base }}/<bucket>/<key>?versionId=<version-id>" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||||
|
|
||||||
|
<div class="alert alert-light border mb-0">
|
||||||
|
<div class="d-flex gap-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||||
|
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
|
||||||
|
</svg>
|
||||||
|
<div>
|
||||||
|
<strong>Storage Impact:</strong> Each version consumes storage. Enable quotas to limit total bucket size including all versions.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
<article id="quotas" class="card shadow-sm docs-section">
|
<article id="quotas" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
@@ -709,6 +915,7 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
|
|||||||
<li><a href="#api">REST endpoints</a></li>
|
<li><a href="#api">REST endpoints</a></li>
|
||||||
<li><a href="#examples">API Examples</a></li>
|
<li><a href="#examples">API Examples</a></li>
|
||||||
<li><a href="#replication">Site Replication</a></li>
|
<li><a href="#replication">Site Replication</a></li>
|
||||||
|
<li><a href="#versioning">Object Versioning</a></li>
|
||||||
<li><a href="#quotas">Bucket Quotas</a></li>
|
<li><a href="#quotas">Bucket Quotas</a></li>
|
||||||
<li><a href="#encryption">Encryption</a></li>
|
<li><a href="#encryption">Encryption</a></li>
|
||||||
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
</svg>
|
</svg>
|
||||||
IAM Configuration
|
IAM Configuration
|
||||||
</h1>
|
</h1>
|
||||||
|
<p class="text-muted mb-0 mt-1">Create and manage users with fine-grained bucket permissions.</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="d-flex gap-2">
|
<div class="d-flex gap-2">
|
||||||
{% if not iam_locked %}
|
{% if not iam_locked %}
|
||||||
@@ -109,35 +110,68 @@
|
|||||||
{% else %}
|
{% else %}
|
||||||
<div class="card-body px-4 pb-4">
|
<div class="card-body px-4 pb-4">
|
||||||
{% if users %}
|
{% if users %}
|
||||||
<div class="table-responsive">
|
<div class="row g-3">
|
||||||
<table class="table table-hover align-middle mb-0">
|
|
||||||
<thead class="table-light">
|
|
||||||
<tr>
|
|
||||||
<th scope="col">User</th>
|
|
||||||
<th scope="col">Policies</th>
|
|
||||||
<th scope="col" class="text-end">Actions</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{% for user in users %}
|
{% for user in users %}
|
||||||
<tr>
|
<div class="col-md-6 col-xl-4">
|
||||||
<td>
|
<div class="card h-100 iam-user-card">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-start justify-content-between mb-3">
|
||||||
<div class="d-flex align-items-center gap-3">
|
<div class="d-flex align-items-center gap-3">
|
||||||
<div class="user-avatar">
|
<div class="user-avatar user-avatar-lg">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" viewBox="0 0 16 16">
|
||||||
<path d="M8 8a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm2-3a2 2 0 1 1-4 0 2 2 0 0 1 4 0zm4 8c0 1-1 1-1 1H3s-1 0-1-1 1-4 6-4 6 3 6 4zm-1-.004c-.001-.246-.154-.986-.832-1.664C11.516 10.68 10.289 10 8 10c-2.29 0-3.516.68-4.168 1.332-.678.678-.83 1.418-.832 1.664h10z"/>
|
<path d="M8 8a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm2-3a2 2 0 1 1-4 0 2 2 0 0 1 4 0zm4 8c0 1-1 1-1 1H3s-1 0-1-1 1-4 6-4 6 3 6 4zm-1-.004c-.001-.246-.154-.986-.832-1.664C11.516 10.68 10.289 10 8 10c-2.29 0-3.516.68-4.168 1.332-.678.678-.83 1.418-.832 1.664h10z"/>
|
||||||
</svg>
|
</svg>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div class="min-width-0">
|
||||||
<div class="fw-medium">{{ user.display_name }}</div>
|
<h6 class="fw-semibold mb-0 text-truncate" title="{{ user.display_name }}">{{ user.display_name }}</h6>
|
||||||
<code class="small text-muted">{{ user.access_key }}</code>
|
<code class="small text-muted d-block text-truncate" title="{{ user.access_key }}">{{ user.access_key }}</code>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</td>
|
<div class="dropdown">
|
||||||
<td>
|
<button class="btn btn-sm btn-icon" type="button" data-bs-toggle="dropdown" aria-expanded="false">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M9.5 13a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0z"/>
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
<ul class="dropdown-menu dropdown-menu-end">
|
||||||
|
<li>
|
||||||
|
<button class="dropdown-item" type="button" data-edit-user="{{ user.access_key }}" data-display-name="{{ user.display_name }}">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||||
|
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||||
|
</svg>
|
||||||
|
Edit Name
|
||||||
|
</button>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<button class="dropdown-item" type="button" data-rotate-user="{{ user.access_key }}">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||||
|
<path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/>
|
||||||
|
<path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/>
|
||||||
|
</svg>
|
||||||
|
Rotate Secret
|
||||||
|
</button>
|
||||||
|
</li>
|
||||||
|
<li><hr class="dropdown-divider"></li>
|
||||||
|
<li>
|
||||||
|
<button class="dropdown-item text-danger" type="button" data-delete-user="{{ user.access_key }}">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||||
|
<path d="M5.5 5.5a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm3 .5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 1 0z"/>
|
||||||
|
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||||
|
</svg>
|
||||||
|
Delete User
|
||||||
|
</button>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<div class="small text-muted mb-2">Bucket Permissions</div>
|
||||||
<div class="d-flex flex-wrap gap-1">
|
<div class="d-flex flex-wrap gap-1">
|
||||||
{% for policy in user.policies %}
|
{% for policy in user.policies %}
|
||||||
<span class="badge bg-primary bg-opacity-10 text-primary">
|
<span class="badge bg-primary bg-opacity-10 text-primary">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="10" height="10" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||||
|
</svg>
|
||||||
{{ policy.bucket }}
|
{{ policy.bucket }}
|
||||||
{% if '*' in policy.actions %}
|
{% if '*' in policy.actions %}
|
||||||
<span class="opacity-75">(full)</span>
|
<span class="opacity-75">(full)</span>
|
||||||
@@ -149,38 +183,18 @@
|
|||||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">No policies</span>
|
<span class="badge bg-secondary bg-opacity-10 text-secondary">No policies</span>
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</div>
|
</div>
|
||||||
</td>
|
</div>
|
||||||
<td class="text-end">
|
<button class="btn btn-outline-primary btn-sm w-100" type="button" data-policy-editor data-access-key="{{ user.access_key }}">
|
||||||
<div class="btn-group btn-group-sm" role="group">
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
<button class="btn btn-outline-primary" type="button" data-rotate-user="{{ user.access_key }}" title="Rotate Secret">
|
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
|
||||||
<path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/>
|
|
||||||
<path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/>
|
|
||||||
</svg>
|
|
||||||
</button>
|
|
||||||
<button class="btn btn-outline-secondary" type="button" data-edit-user="{{ user.access_key }}" data-display-name="{{ user.display_name }}" title="Edit User">
|
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
|
||||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
|
||||||
</svg>
|
|
||||||
</button>
|
|
||||||
<button class="btn btn-outline-secondary" type="button" data-policy-editor data-access-key="{{ user.access_key }}" title="Edit Policies">
|
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
|
||||||
<path d="M8 4.754a3.246 3.246 0 1 0 0 6.492 3.246 3.246 0 0 0 0-6.492zM5.754 8a2.246 2.246 0 1 1 4.492 0 2.246 2.246 0 0 1-4.492 0z"/>
|
<path d="M8 4.754a3.246 3.246 0 1 0 0 6.492 3.246 3.246 0 0 0 0-6.492zM5.754 8a2.246 2.246 0 1 1 4.492 0 2.246 2.246 0 0 1-4.492 0z"/>
|
||||||
<path d="M9.796 1.343c-.527-1.79-3.065-1.79-3.592 0l-.094.319a.873.873 0 0 1-1.255.52l-.292-.16c-1.64-.892-3.433.902-2.54 2.541l.159.292a.873.873 0 0 1-.52 1.255l-.319.094c-1.79.527-1.79 3.065 0 3.592l.319.094a.873.873 0 0 1 .52 1.255l-.16.292c-.892 1.64.901 3.434 2.541 2.54l.292-.159a.873.873 0 0 1 1.255.52l.094.319c.527 1.79 3.065 1.79 3.592 0l.094-.319a.873.873 0 0 1 1.255-.52l.292.16c1.64.893 3.434-.902 2.54-2.541l-.159-.292a.873.873 0 0 1 .52-1.255l.319-.094c1.79-.527 1.79-3.065 0-3.592l-.319-.094a.873.873 0 0 1-.52-1.255l.16-.292c.893-1.64-.902-3.433-2.541-2.54l-.292.159a.873.873 0 0 1-1.255-.52l-.094-.319z"/>
|
<path d="M9.796 1.343c-.527-1.79-3.065-1.79-3.592 0l-.094.319a.873.873 0 0 1-1.255.52l-.292-.16c-1.64-.892-3.433.902-2.54 2.541l.159.292a.873.873 0 0 1-.52 1.255l-.319.094c-1.79.527-1.79 3.065 0 3.592l.319.094a.873.873 0 0 1 .52 1.255l-.16.292c-.892 1.64.901 3.434 2.541 2.54l.292-.159a.873.873 0 0 1 1.255.52l.094.319c.527 1.79 3.065 1.79 3.592 0l.094-.319a.873.873 0 0 1 1.255-.52l.292.16c1.64.893 3.434-.902 2.54-2.541l-.159-.292a.873.873 0 0 1 .52-1.255l.319-.094c1.79-.527 1.79-3.065 0-3.592l-.319-.094a.873.873 0 0 1-.52-1.255l.16-.292c.893-1.64-.902-3.433-2.541-2.54l-.292.159a.873.873 0 0 1-1.255-.52l-.094-.319z"/>
|
||||||
</svg>
|
</svg>
|
||||||
</button>
|
Manage Policies
|
||||||
<button class="btn btn-outline-danger" type="button" data-delete-user="{{ user.access_key }}" title="Delete User">
|
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
|
||||||
<path d="M5.5 5.5a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm3 .5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 1 0z"/>
|
|
||||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
|
||||||
</svg>
|
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
</td>
|
</div>
|
||||||
</tr>
|
</div>
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
</div>
|
||||||
{% else %}
|
{% else %}
|
||||||
<div class="empty-state text-center py-5">
|
<div class="empty-state text-center py-5">
|
||||||
@@ -442,6 +456,95 @@
|
|||||||
{{ super() }}
|
{{ super() }}
|
||||||
<script>
|
<script>
|
||||||
(function () {
|
(function () {
|
||||||
|
// Auto-indent for JSON textareas
|
||||||
|
function setupJsonAutoIndent(textarea) {
|
||||||
|
if (!textarea) return;
|
||||||
|
|
||||||
|
textarea.addEventListener('keydown', function(e) {
|
||||||
|
if (e.key === 'Enter') {
|
||||||
|
e.preventDefault();
|
||||||
|
|
||||||
|
const start = this.selectionStart;
|
||||||
|
const end = this.selectionEnd;
|
||||||
|
const value = this.value;
|
||||||
|
|
||||||
|
// Get the current line
|
||||||
|
const lineStart = value.lastIndexOf('\n', start - 1) + 1;
|
||||||
|
const currentLine = value.substring(lineStart, start);
|
||||||
|
|
||||||
|
// Calculate base indentation (leading whitespace of current line)
|
||||||
|
const indentMatch = currentLine.match(/^(\s*)/);
|
||||||
|
let indent = indentMatch ? indentMatch[1] : '';
|
||||||
|
|
||||||
|
// Check if the line ends with { or [ (should increase indent)
|
||||||
|
const trimmedLine = currentLine.trim();
|
||||||
|
const lastChar = trimmedLine.slice(-1);
|
||||||
|
|
||||||
|
// Check the character before cursor
|
||||||
|
const charBeforeCursor = value.substring(start - 1, start).trim();
|
||||||
|
|
||||||
|
let newIndent = indent;
|
||||||
|
let insertAfter = '';
|
||||||
|
|
||||||
|
if (lastChar === '{' || lastChar === '[') {
|
||||||
|
// Add extra indentation
|
||||||
|
newIndent = indent + ' ';
|
||||||
|
|
||||||
|
// Check if we need to add closing bracket on new line
|
||||||
|
const charAfterCursor = value.substring(start, start + 1).trim();
|
||||||
|
if ((lastChar === '{' && charAfterCursor === '}') ||
|
||||||
|
(lastChar === '[' && charAfterCursor === ']')) {
|
||||||
|
insertAfter = '\n' + indent;
|
||||||
|
}
|
||||||
|
} else if (lastChar === ',' || lastChar === ':') {
|
||||||
|
// Keep same indentation for continuation
|
||||||
|
newIndent = indent;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert newline with proper indentation
|
||||||
|
const insertion = '\n' + newIndent + insertAfter;
|
||||||
|
const newValue = value.substring(0, start) + insertion + value.substring(end);
|
||||||
|
|
||||||
|
this.value = newValue;
|
||||||
|
|
||||||
|
// Set cursor position after the indentation
|
||||||
|
const newCursorPos = start + 1 + newIndent.length;
|
||||||
|
this.selectionStart = this.selectionEnd = newCursorPos;
|
||||||
|
|
||||||
|
// Trigger input event for any listeners
|
||||||
|
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle Tab key for indentation
|
||||||
|
if (e.key === 'Tab') {
|
||||||
|
e.preventDefault();
|
||||||
|
const start = this.selectionStart;
|
||||||
|
const end = this.selectionEnd;
|
||||||
|
|
||||||
|
if (e.shiftKey) {
|
||||||
|
// Outdent: remove 2 spaces from start of line
|
||||||
|
const lineStart = this.value.lastIndexOf('\n', start - 1) + 1;
|
||||||
|
const lineContent = this.value.substring(lineStart, start);
|
||||||
|
if (lineContent.startsWith(' ')) {
|
||||||
|
this.value = this.value.substring(0, lineStart) +
|
||||||
|
this.value.substring(lineStart + 2);
|
||||||
|
this.selectionStart = this.selectionEnd = Math.max(lineStart, start - 2);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Indent: insert 2 spaces
|
||||||
|
this.value = this.value.substring(0, start) + ' ' + this.value.substring(end);
|
||||||
|
this.selectionStart = this.selectionEnd = start + 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply auto-indent to policy editor textareas
|
||||||
|
setupJsonAutoIndent(document.getElementById('policyEditorDocument'));
|
||||||
|
setupJsonAutoIndent(document.getElementById('createUserPolicies'));
|
||||||
|
|
||||||
const currentUserKey = {{ principal.access_key | tojson }};
|
const currentUserKey = {{ principal.access_key | tojson }};
|
||||||
const configCopyButtons = document.querySelectorAll('.config-copy');
|
const configCopyButtons = document.querySelectorAll('.config-copy');
|
||||||
configCopyButtons.forEach((button) => {
|
configCopyButtons.forEach((button) => {
|
||||||
|
|||||||
@@ -219,24 +219,42 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="col-lg-4">
|
<div class="col-lg-4">
|
||||||
<div class="card shadow-sm border-0 h-100 overflow-hidden" style="background: linear-gradient(135deg, #3b82f6 0%, #8b5cf6 100%);">
|
{% set has_issues = (cpu_percent > 80) or (memory.percent > 85) or (disk.percent > 90) %}
|
||||||
|
<div class="card shadow-sm border-0 h-100 overflow-hidden" style="background: linear-gradient(135deg, {% if has_issues %}#ef4444 0%, #f97316{% else %}#3b82f6 0%, #8b5cf6{% endif %} 100%);">
|
||||||
<div class="card-body p-4 d-flex flex-column justify-content-center text-white position-relative">
|
<div class="card-body p-4 d-flex flex-column justify-content-center text-white position-relative">
|
||||||
<div class="position-absolute top-0 end-0 opacity-25" style="transform: translate(20%, -20%);">
|
<div class="position-absolute top-0 end-0 opacity-25" style="transform: translate(20%, -20%);">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="160" height="160" fill="currentColor" class="bi bi-cloud-check" viewBox="0 0 16 16">
|
<svg xmlns="http://www.w3.org/2000/svg" width="160" height="160" fill="currentColor" class="bi bi-{% if has_issues %}exclamation-triangle{% else %}cloud-check{% endif %}" viewBox="0 0 16 16">
|
||||||
|
{% if has_issues %}
|
||||||
|
<path d="M7.938 2.016A.13.13 0 0 1 8.002 2a.13.13 0 0 1 .063.016.146.146 0 0 1 .054.057l6.857 11.667c.036.06.035.124.002.183a.163.163 0 0 1-.054.06.116.116 0 0 1-.066.017H1.146a.115.115 0 0 1-.066-.017.163.163 0 0 1-.054-.06.176.176 0 0 1 .002-.183L7.884 2.073a.147.147 0 0 1 .054-.057zm1.044-.45a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566z"/>
|
||||||
|
<path d="M7.002 12a1 1 0 1 1 2 0 1 1 0 0 1-2 0zM7.1 5.995a.905.905 0 1 1 1.8 0l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995z"/>
|
||||||
|
{% else %}
|
||||||
<path fill-rule="evenodd" d="M10.354 6.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7 8.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
|
<path fill-rule="evenodd" d="M10.354 6.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7 8.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
|
||||||
<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>
|
<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>
|
||||||
|
{% endif %}
|
||||||
</svg>
|
</svg>
|
||||||
</div>
|
</div>
|
||||||
<div class="mb-3">
|
<div class="mb-3">
|
||||||
<span class="badge bg-white text-primary fw-semibold px-3 py-2">
|
<span class="badge bg-white {% if has_issues %}text-danger{% else %}text-primary{% endif %} fw-semibold px-3 py-2">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-check-circle-fill me-1" viewBox="0 0 16 16">
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-{% if has_issues %}exclamation-circle-fill{% else %}check-circle-fill{% endif %} me-1" viewBox="0 0 16 16">
|
||||||
|
{% if has_issues %}
|
||||||
|
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM8 4a.905.905 0 0 0-.9.995l.35 3.507a.552.552 0 0 0 1.1 0l.35-3.507A.905.905 0 0 0 8 4zm.002 6a1 1 0 1 0 0 2 1 1 0 0 0 0-2z"/>
|
||||||
|
{% else %}
|
||||||
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
|
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
|
||||||
|
{% endif %}
|
||||||
</svg>
|
</svg>
|
||||||
v{{ app.version }}
|
v{{ app.version }}
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<h4 class="card-title fw-bold mb-3">System Status</h4>
|
<h4 class="card-title fw-bold mb-3">System Health</h4>
|
||||||
<p class="card-text opacity-90 mb-4">All systems operational. Your storage infrastructure is running smoothly with no detected issues.</p>
|
{% if has_issues %}
|
||||||
|
<ul class="list-unstyled small mb-4 opacity-90">
|
||||||
|
{% if cpu_percent > 80 %}<li class="mb-1">CPU usage is high ({{ cpu_percent }}%)</li>{% endif %}
|
||||||
|
{% if memory.percent > 85 %}<li class="mb-1">Memory usage is high ({{ memory.percent }}%)</li>{% endif %}
|
||||||
|
{% if disk.percent > 90 %}<li class="mb-1">Disk space is critically low ({{ disk.percent }}% used)</li>{% endif %}
|
||||||
|
</ul>
|
||||||
|
{% else %}
|
||||||
|
<p class="card-text opacity-90 mb-4 small">All resources are within normal operating parameters.</p>
|
||||||
|
{% endif %}
|
||||||
<div class="d-flex gap-4">
|
<div class="d-flex gap-4">
|
||||||
<div>
|
<div>
|
||||||
<div class="h3 fw-bold mb-0">{{ app.uptime_days }}d</div>
|
<div class="h3 fw-bold mb-0">{{ app.uptime_days }}d</div>
|
||||||
|
|||||||
@@ -157,9 +157,14 @@ class TestPaginatedObjectListing:
|
|||||||
assert "last_modified" in obj
|
assert "last_modified" in obj
|
||||||
assert "last_modified_display" in obj
|
assert "last_modified_display" in obj
|
||||||
assert "etag" in obj
|
assert "etag" in obj
|
||||||
assert "preview_url" in obj
|
|
||||||
assert "download_url" in obj
|
# URLs are now returned as templates (not per-object) for performance
|
||||||
assert "delete_endpoint" in obj
|
assert "url_templates" in data
|
||||||
|
templates = data["url_templates"]
|
||||||
|
assert "preview" in templates
|
||||||
|
assert "download" in templates
|
||||||
|
assert "delete" in templates
|
||||||
|
assert "KEY_PLACEHOLDER" in templates["preview"]
|
||||||
|
|
||||||
def test_bucket_detail_page_loads_without_objects(self, tmp_path):
|
def test_bucket_detail_page_loads_without_objects(self, tmp_path):
|
||||||
"""Bucket detail page should load even with many objects."""
|
"""Bucket detail page should load even with many objects."""
|
||||||
|
|||||||
Reference in New Issue
Block a user