Compare commits
13 Commits
c0603c592b
...
v0.2.2
| Author | SHA1 | Date | |
|---|---|---|---|
| bb6590fc5e | |||
| 899db3421b | |||
| caf01d6ada | |||
| bb366cb4cd | |||
| a2745ff2ee | |||
| 28cb656d94 | |||
| 3c44152fc6 | |||
| 397515edce | |||
| 980fced7e4 | |||
| bae5009ec4 | |||
| 233780617f | |||
| fd8fb21517 | |||
| c6cbe822e1 |
@@ -197,17 +197,6 @@ def create_app(
|
||||
)
|
||||
app.extensions["operation_metrics"] = operation_metrics_collector
|
||||
|
||||
system_metrics_collector = None
|
||||
if app.config.get("METRICS_HISTORY_ENABLED", False):
|
||||
from .system_metrics import SystemMetricsCollector
|
||||
system_metrics_collector = SystemMetricsCollector(
|
||||
storage_root,
|
||||
interval_minutes=app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
|
||||
retention_hours=app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
|
||||
)
|
||||
system_metrics_collector.set_storage(storage)
|
||||
app.extensions["system_metrics"] = system_metrics_collector
|
||||
|
||||
@app.errorhandler(500)
|
||||
def internal_error(error):
|
||||
return render_template('500.html'), 500
|
||||
|
||||
@@ -90,10 +90,6 @@ class AppConfig:
|
||||
operation_metrics_enabled: bool
|
||||
operation_metrics_interval_minutes: int
|
||||
operation_metrics_retention_hours: int
|
||||
server_threads: int
|
||||
server_connection_limit: int
|
||||
server_backlog: int
|
||||
server_channel_timeout: int
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
|
||||
@@ -197,11 +193,6 @@ class AppConfig:
|
||||
operation_metrics_interval_minutes = int(_get("OPERATION_METRICS_INTERVAL_MINUTES", 5))
|
||||
operation_metrics_retention_hours = int(_get("OPERATION_METRICS_RETENTION_HOURS", 24))
|
||||
|
||||
server_threads = int(_get("SERVER_THREADS", 4))
|
||||
server_connection_limit = int(_get("SERVER_CONNECTION_LIMIT", 100))
|
||||
server_backlog = int(_get("SERVER_BACKLOG", 1024))
|
||||
server_channel_timeout = int(_get("SERVER_CHANNEL_TIMEOUT", 120))
|
||||
|
||||
return cls(storage_root=storage_root,
|
||||
max_upload_size=max_upload_size,
|
||||
ui_page_size=ui_page_size,
|
||||
@@ -245,11 +236,7 @@ class AppConfig:
|
||||
metrics_history_interval_minutes=metrics_history_interval_minutes,
|
||||
operation_metrics_enabled=operation_metrics_enabled,
|
||||
operation_metrics_interval_minutes=operation_metrics_interval_minutes,
|
||||
operation_metrics_retention_hours=operation_metrics_retention_hours,
|
||||
server_threads=server_threads,
|
||||
server_connection_limit=server_connection_limit,
|
||||
server_backlog=server_backlog,
|
||||
server_channel_timeout=server_channel_timeout)
|
||||
operation_metrics_retention_hours=operation_metrics_retention_hours)
|
||||
|
||||
def validate_and_report(self) -> list[str]:
|
||||
"""Validate configuration and return a list of warnings/issues.
|
||||
@@ -309,35 +296,7 @@ class AppConfig:
|
||||
|
||||
if "*" in self.cors_origins:
|
||||
issues.append("INFO: CORS_ORIGINS is set to '*'. Consider restricting to specific domains in production.")
|
||||
|
||||
if not (1 <= self.server_threads <= 64):
|
||||
issues.append(f"CRITICAL: SERVER_THREADS={self.server_threads} is outside valid range (1-64). Server cannot start.")
|
||||
if not (10 <= self.server_connection_limit <= 1000):
|
||||
issues.append(f"CRITICAL: SERVER_CONNECTION_LIMIT={self.server_connection_limit} is outside valid range (10-1000). Server cannot start.")
|
||||
if not (64 <= self.server_backlog <= 4096):
|
||||
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (64-4096). Server cannot start.")
|
||||
if not (10 <= self.server_channel_timeout <= 300):
|
||||
issues.append(f"CRITICAL: SERVER_CHANNEL_TIMEOUT={self.server_channel_timeout} is outside valid range (10-300). Server cannot start.")
|
||||
|
||||
if sys.platform != "win32":
|
||||
try:
|
||||
import resource
|
||||
soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
threshold = int(soft_limit * 0.8)
|
||||
if self.server_connection_limit > threshold:
|
||||
issues.append(f"WARNING: SERVER_CONNECTION_LIMIT={self.server_connection_limit} exceeds 80% of system file descriptor limit (soft={soft_limit}). Consider running 'ulimit -n {self.server_connection_limit + 100}'.")
|
||||
except (ImportError, OSError):
|
||||
pass
|
||||
|
||||
try:
|
||||
import psutil
|
||||
available_mb = psutil.virtual_memory().available / (1024 * 1024)
|
||||
estimated_mb = self.server_threads * 50
|
||||
if estimated_mb > available_mb * 0.5:
|
||||
issues.append(f"WARNING: SERVER_THREADS={self.server_threads} may require ~{estimated_mb}MB memory, exceeding 50% of available RAM ({int(available_mb)}MB).")
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
return issues
|
||||
|
||||
def print_startup_summary(self) -> None:
|
||||
@@ -355,10 +314,6 @@ class AppConfig:
|
||||
print(f" ENCRYPTION: Enabled (Master key: {self.encryption_master_key_path})")
|
||||
if self.kms_enabled:
|
||||
print(f" KMS: Enabled (Keys: {self.kms_keys_path})")
|
||||
print(f" SERVER_THREADS: {self.server_threads}")
|
||||
print(f" CONNECTION_LIMIT: {self.server_connection_limit}")
|
||||
print(f" BACKLOG: {self.server_backlog}")
|
||||
print(f" CHANNEL_TIMEOUT: {self.server_channel_timeout}s")
|
||||
print("=" * 60)
|
||||
|
||||
issues = self.validate_and_report()
|
||||
@@ -416,8 +371,4 @@ class AppConfig:
|
||||
"OPERATION_METRICS_ENABLED": self.operation_metrics_enabled,
|
||||
"OPERATION_METRICS_INTERVAL_MINUTES": self.operation_metrics_interval_minutes,
|
||||
"OPERATION_METRICS_RETENTION_HOURS": self.operation_metrics_retention_hours,
|
||||
"SERVER_THREADS": self.server_threads,
|
||||
"SERVER_CONNECTION_LIMIT": self.server_connection_limit,
|
||||
"SERVER_BACKLOG": self.server_backlog,
|
||||
"SERVER_CHANNEL_TIMEOUT": self.server_channel_timeout,
|
||||
}
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
import psutil
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .storage import ObjectStorage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemMetricsSnapshot:
|
||||
timestamp: datetime
|
||||
cpu_percent: float
|
||||
memory_percent: float
|
||||
disk_percent: float
|
||||
storage_bytes: int
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"timestamp": self.timestamp.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
||||
"cpu_percent": round(self.cpu_percent, 2),
|
||||
"memory_percent": round(self.memory_percent, 2),
|
||||
"disk_percent": round(self.disk_percent, 2),
|
||||
"storage_bytes": self.storage_bytes,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "SystemMetricsSnapshot":
|
||||
timestamp_str = data["timestamp"]
|
||||
if timestamp_str.endswith("Z"):
|
||||
timestamp_str = timestamp_str[:-1] + "+00:00"
|
||||
return cls(
|
||||
timestamp=datetime.fromisoformat(timestamp_str),
|
||||
cpu_percent=data.get("cpu_percent", 0.0),
|
||||
memory_percent=data.get("memory_percent", 0.0),
|
||||
disk_percent=data.get("disk_percent", 0.0),
|
||||
storage_bytes=data.get("storage_bytes", 0),
|
||||
)
|
||||
|
||||
|
||||
class SystemMetricsCollector:
|
||||
def __init__(
|
||||
self,
|
||||
storage_root: Path,
|
||||
interval_minutes: int = 5,
|
||||
retention_hours: int = 24,
|
||||
):
|
||||
self.storage_root = storage_root
|
||||
self.interval_seconds = interval_minutes * 60
|
||||
self.retention_hours = retention_hours
|
||||
self._lock = threading.Lock()
|
||||
self._shutdown = threading.Event()
|
||||
self._snapshots: List[SystemMetricsSnapshot] = []
|
||||
self._storage_ref: Optional["ObjectStorage"] = None
|
||||
|
||||
self._load_history()
|
||||
|
||||
self._snapshot_thread = threading.Thread(
|
||||
target=self._snapshot_loop,
|
||||
name="system-metrics-snapshot",
|
||||
daemon=True,
|
||||
)
|
||||
self._snapshot_thread.start()
|
||||
|
||||
def set_storage(self, storage: "ObjectStorage") -> None:
|
||||
with self._lock:
|
||||
self._storage_ref = storage
|
||||
|
||||
def _config_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "metrics_history.json"
|
||||
|
||||
def _load_history(self) -> None:
|
||||
config_path = self._config_path()
|
||||
if not config_path.exists():
|
||||
return
|
||||
try:
|
||||
data = json.loads(config_path.read_text(encoding="utf-8"))
|
||||
history_data = data.get("history", [])
|
||||
self._snapshots = [SystemMetricsSnapshot.from_dict(s) for s in history_data]
|
||||
self._prune_old_snapshots()
|
||||
except (json.JSONDecodeError, OSError, KeyError) as e:
|
||||
logger.warning(f"Failed to load system metrics history: {e}")
|
||||
|
||||
def _save_history(self) -> None:
|
||||
config_path = self._config_path()
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
data = {"history": [s.to_dict() for s in self._snapshots]}
|
||||
config_path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
||||
except OSError as e:
|
||||
logger.warning(f"Failed to save system metrics history: {e}")
|
||||
|
||||
def _prune_old_snapshots(self) -> None:
|
||||
if not self._snapshots:
|
||||
return
|
||||
cutoff = datetime.now(timezone.utc).timestamp() - (self.retention_hours * 3600)
|
||||
self._snapshots = [
|
||||
s for s in self._snapshots if s.timestamp.timestamp() > cutoff
|
||||
]
|
||||
|
||||
def _snapshot_loop(self) -> None:
|
||||
while not self._shutdown.is_set():
|
||||
self._shutdown.wait(timeout=self.interval_seconds)
|
||||
if not self._shutdown.is_set():
|
||||
self._take_snapshot()
|
||||
|
||||
def _take_snapshot(self) -> None:
|
||||
try:
|
||||
cpu_percent = psutil.cpu_percent(interval=0.1)
|
||||
memory = psutil.virtual_memory()
|
||||
disk = psutil.disk_usage(str(self.storage_root))
|
||||
|
||||
storage_bytes = 0
|
||||
with self._lock:
|
||||
storage = self._storage_ref
|
||||
if storage:
|
||||
try:
|
||||
buckets = storage.list_buckets()
|
||||
for bucket in buckets:
|
||||
stats = storage.bucket_stats(bucket.name, cache_ttl=60)
|
||||
storage_bytes += stats.get("total_bytes", stats.get("bytes", 0))
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect bucket stats: {e}")
|
||||
|
||||
snapshot = SystemMetricsSnapshot(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
cpu_percent=cpu_percent,
|
||||
memory_percent=memory.percent,
|
||||
disk_percent=disk.percent,
|
||||
storage_bytes=storage_bytes,
|
||||
)
|
||||
|
||||
with self._lock:
|
||||
self._snapshots.append(snapshot)
|
||||
self._prune_old_snapshots()
|
||||
self._save_history()
|
||||
|
||||
logger.debug(f"System metrics snapshot taken: CPU={cpu_percent:.1f}%, Memory={memory.percent:.1f}%")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to take system metrics snapshot: {e}")
|
||||
|
||||
def get_current(self) -> Dict[str, Any]:
|
||||
cpu_percent = psutil.cpu_percent(interval=0.1)
|
||||
memory = psutil.virtual_memory()
|
||||
disk = psutil.disk_usage(str(self.storage_root))
|
||||
boot_time = psutil.boot_time()
|
||||
uptime_seconds = time.time() - boot_time
|
||||
uptime_days = int(uptime_seconds / 86400)
|
||||
|
||||
total_buckets = 0
|
||||
total_objects = 0
|
||||
total_bytes_used = 0
|
||||
total_versions = 0
|
||||
|
||||
with self._lock:
|
||||
storage = self._storage_ref
|
||||
if storage:
|
||||
try:
|
||||
buckets = storage.list_buckets()
|
||||
total_buckets = len(buckets)
|
||||
for bucket in buckets:
|
||||
stats = storage.bucket_stats(bucket.name, cache_ttl=60)
|
||||
total_objects += stats.get("total_objects", stats.get("objects", 0))
|
||||
total_bytes_used += stats.get("total_bytes", stats.get("bytes", 0))
|
||||
total_versions += stats.get("version_count", 0)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect current bucket stats: {e}")
|
||||
|
||||
return {
|
||||
"cpu_percent": round(cpu_percent, 2),
|
||||
"memory": {
|
||||
"total": memory.total,
|
||||
"available": memory.available,
|
||||
"used": memory.used,
|
||||
"percent": round(memory.percent, 2),
|
||||
},
|
||||
"disk": {
|
||||
"total": disk.total,
|
||||
"free": disk.free,
|
||||
"used": disk.used,
|
||||
"percent": round(disk.percent, 2),
|
||||
},
|
||||
"app": {
|
||||
"buckets": total_buckets,
|
||||
"objects": total_objects,
|
||||
"versions": total_versions,
|
||||
"storage_bytes": total_bytes_used,
|
||||
"uptime_days": uptime_days,
|
||||
},
|
||||
}
|
||||
|
||||
def get_history(self, hours: Optional[int] = None) -> List[Dict[str, Any]]:
|
||||
with self._lock:
|
||||
snapshots = list(self._snapshots)
|
||||
|
||||
if hours:
|
||||
cutoff = datetime.now(timezone.utc).timestamp() - (hours * 3600)
|
||||
snapshots = [s for s in snapshots if s.timestamp.timestamp() > cutoff]
|
||||
|
||||
return [s.to_dict() for s in snapshots]
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._shutdown.set()
|
||||
self._take_snapshot()
|
||||
self._snapshot_thread.join(timeout=5.0)
|
||||
79
app/ui.py
79
app/ui.py
@@ -158,6 +158,69 @@ def _format_bytes(num: int) -> str:
|
||||
return f"{value:.1f} PB"
|
||||
|
||||
|
||||
_metrics_last_save_time: float = 0.0
|
||||
|
||||
|
||||
def _get_metrics_history_path() -> Path:
|
||||
storage_root = Path(current_app.config["STORAGE_ROOT"])
|
||||
return storage_root / ".myfsio.sys" / "config" / "metrics_history.json"
|
||||
|
||||
|
||||
def _load_metrics_history() -> dict:
|
||||
path = _get_metrics_history_path()
|
||||
if not path.exists():
|
||||
return {"history": []}
|
||||
try:
|
||||
return json.loads(path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return {"history": []}
|
||||
|
||||
|
||||
def _save_metrics_snapshot(cpu_percent: float, memory_percent: float, disk_percent: float, storage_bytes: int) -> None:
|
||||
global _metrics_last_save_time
|
||||
|
||||
if not current_app.config.get("METRICS_HISTORY_ENABLED", False):
|
||||
return
|
||||
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
|
||||
interval_minutes = current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5)
|
||||
now_ts = time.time()
|
||||
if now_ts - _metrics_last_save_time < interval_minutes * 60:
|
||||
return
|
||||
|
||||
path = _get_metrics_history_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
data = _load_metrics_history()
|
||||
history = data.get("history", [])
|
||||
retention_hours = current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24)
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
snapshot = {
|
||||
"timestamp": now.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
||||
"cpu_percent": round(cpu_percent, 2),
|
||||
"memory_percent": round(memory_percent, 2),
|
||||
"disk_percent": round(disk_percent, 2),
|
||||
"storage_bytes": storage_bytes,
|
||||
}
|
||||
history.append(snapshot)
|
||||
|
||||
cutoff = now.timestamp() - (retention_hours * 3600)
|
||||
history = [
|
||||
h for h in history
|
||||
if datetime.fromisoformat(h["timestamp"].replace("Z", "+00:00")).timestamp() > cutoff
|
||||
]
|
||||
|
||||
data["history"] = history
|
||||
try:
|
||||
path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
||||
_metrics_last_save_time = now_ts
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def _friendly_error_message(exc: Exception) -> str:
|
||||
message = str(exc) or "An unexpected error occurred"
|
||||
if isinstance(exc, IamError):
|
||||
@@ -2177,6 +2240,8 @@ def metrics_api():
|
||||
uptime_seconds = time.time() - boot_time
|
||||
uptime_days = int(uptime_seconds / 86400)
|
||||
|
||||
_save_metrics_snapshot(cpu_percent, memory.percent, disk.percent, total_bytes_used)
|
||||
|
||||
return jsonify({
|
||||
"cpu_percent": round(cpu_percent, 2),
|
||||
"memory": {
|
||||
@@ -2211,15 +2276,23 @@ def metrics_history():
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
system_metrics = current_app.extensions.get("system_metrics")
|
||||
if not system_metrics:
|
||||
if not current_app.config.get("METRICS_HISTORY_ENABLED", False):
|
||||
return jsonify({"enabled": False, "history": []})
|
||||
|
||||
hours = request.args.get("hours", type=int)
|
||||
if hours is None:
|
||||
hours = current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24)
|
||||
|
||||
history = system_metrics.get_history(hours=hours)
|
||||
data = _load_metrics_history()
|
||||
history = data.get("history", [])
|
||||
|
||||
if hours:
|
||||
from datetime import datetime, timezone
|
||||
cutoff = datetime.now(timezone.utc).timestamp() - (hours * 3600)
|
||||
history = [
|
||||
h for h in history
|
||||
if datetime.fromisoformat(h["timestamp"].replace("Z", "+00:00")).timestamp() > cutoff
|
||||
]
|
||||
|
||||
return jsonify({
|
||||
"enabled": True,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
APP_VERSION = "0.2.3"
|
||||
APP_VERSION = "0.2.2"
|
||||
|
||||
|
||||
def get_version() -> str:
|
||||
|
||||
9
docs.md
9
docs.md
@@ -168,15 +168,6 @@ All configuration is done via environment variables. The table below lists every
|
||||
| `RATE_LIMIT_DEFAULT` | `200 per minute` | Default rate limit for API endpoints. |
|
||||
| `RATE_LIMIT_STORAGE_URI` | `memory://` | Storage backend for rate limits. Use `redis://host:port` for distributed setups. |
|
||||
|
||||
### Server Configuration
|
||||
|
||||
| Variable | Default | Notes |
|
||||
| --- | --- | --- |
|
||||
| `SERVER_THREADS` | `4` | Waitress worker threads (1-64). More threads handle more concurrent requests but use more memory. |
|
||||
| `SERVER_CONNECTION_LIMIT` | `100` | Maximum concurrent connections (10-1000). Ensure OS file descriptor limits support this value. |
|
||||
| `SERVER_BACKLOG` | `1024` | TCP listen backlog (64-4096). Connections queue here when all threads are busy. |
|
||||
| `SERVER_CHANNEL_TIMEOUT` | `120` | Seconds before idle connections are closed (10-300). |
|
||||
|
||||
### Logging
|
||||
|
||||
| Variable | Default | Notes |
|
||||
|
||||
56
run.py
56
run.py
@@ -18,8 +18,6 @@ for _env_file in [
|
||||
if _env_file.exists():
|
||||
load_dotenv(_env_file, override=True)
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from app import create_api_app, create_ui_app
|
||||
from app.config import AppConfig
|
||||
|
||||
@@ -38,23 +36,11 @@ def _is_frozen() -> bool:
|
||||
return getattr(sys, 'frozen', False) or '__compiled__' in globals()
|
||||
|
||||
|
||||
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
def serve_api(port: int, prod: bool = False) -> None:
|
||||
app = create_api_app()
|
||||
if prod:
|
||||
from waitress import serve
|
||||
if config:
|
||||
serve(
|
||||
app,
|
||||
host=_server_host(),
|
||||
port=port,
|
||||
ident="MyFSIO",
|
||||
threads=config.server_threads,
|
||||
connection_limit=config.server_connection_limit,
|
||||
backlog=config.server_backlog,
|
||||
channel_timeout=config.server_channel_timeout,
|
||||
)
|
||||
else:
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
else:
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
@@ -62,23 +48,11 @@ def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None)
|
||||
app.run(host=_server_host(), port=port, debug=debug)
|
||||
|
||||
|
||||
def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
def serve_ui(port: int, prod: bool = False) -> None:
|
||||
app = create_ui_app()
|
||||
if prod:
|
||||
from waitress import serve
|
||||
if config:
|
||||
serve(
|
||||
app,
|
||||
host=_server_host(),
|
||||
port=port,
|
||||
ident="MyFSIO",
|
||||
threads=config.server_threads,
|
||||
connection_limit=config.server_connection_limit,
|
||||
backlog=config.server_backlog,
|
||||
channel_timeout=config.server_channel_timeout,
|
||||
)
|
||||
else:
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
else:
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
@@ -97,6 +71,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Handle config check/show modes
|
||||
if args.check_config or args.show_config:
|
||||
config = AppConfig.from_env()
|
||||
config.print_startup_summary()
|
||||
@@ -106,50 +81,49 @@ if __name__ == "__main__":
|
||||
sys.exit(1 if critical else 0)
|
||||
sys.exit(0)
|
||||
|
||||
# Default to production mode when running as compiled binary
|
||||
# unless --dev is explicitly passed
|
||||
prod_mode = args.prod or (_is_frozen() and not args.dev)
|
||||
|
||||
# Validate configuration before starting
|
||||
config = AppConfig.from_env()
|
||||
|
||||
# Show startup summary only on first run (when marker file doesn't exist)
|
||||
first_run_marker = config.storage_root / ".myfsio.sys" / ".initialized"
|
||||
is_first_run = not first_run_marker.exists()
|
||||
|
||||
if is_first_run:
|
||||
config.print_startup_summary()
|
||||
|
||||
# Check for critical issues that should prevent startup
|
||||
issues = config.validate_and_report()
|
||||
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
if critical_issues:
|
||||
print("ABORTING: Critical configuration issues detected. Please fix them before starting.")
|
||||
print("ABORTING: Critical configuration issues detected. Fix them before starting.")
|
||||
sys.exit(1)
|
||||
|
||||
# Create the marker file to indicate successful first run
|
||||
try:
|
||||
first_run_marker.parent.mkdir(parents=True, exist_ok=True)
|
||||
first_run_marker.write_text(f"Initialized on {__import__('datetime').datetime.now().isoformat()}\n")
|
||||
except OSError:
|
||||
pass
|
||||
pass # Non-critical, just skip marker creation
|
||||
|
||||
if prod_mode:
|
||||
print("Running in production mode (Waitress)")
|
||||
issues = config.validate_and_report()
|
||||
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
if critical_issues:
|
||||
for issue in critical_issues:
|
||||
print(f" {issue}")
|
||||
print("ABORTING: Critical configuration issues detected. Please fix them before starting.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Running in development mode (Flask dev server)")
|
||||
|
||||
if args.mode in {"api", "both"}:
|
||||
print(f"Starting API server on port {args.api_port}...")
|
||||
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config), daemon=True)
|
||||
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode), daemon=True)
|
||||
api_proc.start()
|
||||
else:
|
||||
api_proc = None
|
||||
|
||||
if args.mode in {"ui", "both"}:
|
||||
print(f"Starting UI server on port {args.ui_port}...")
|
||||
serve_ui(args.ui_port, prod_mode, config)
|
||||
serve_ui(args.ui_port, prod_mode)
|
||||
elif api_proc:
|
||||
try:
|
||||
api_proc.join()
|
||||
|
||||
@@ -157,29 +157,6 @@ python run.py --mode ui
|
||||
<td><code>200 per minute</code></td>
|
||||
<td>Default API rate limit.</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Server Settings</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>SERVER_THREADS</code></td>
|
||||
<td><code>4</code></td>
|
||||
<td>Waitress worker threads (1-64).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>SERVER_CONNECTION_LIMIT</code></td>
|
||||
<td><code>100</code></td>
|
||||
<td>Max concurrent connections (10-1000).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>SERVER_BACKLOG</code></td>
|
||||
<td><code>1024</code></td>
|
||||
<td>TCP listen backlog (64-4096).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>SERVER_CHANNEL_TIMEOUT</code></td>
|
||||
<td><code>120</code></td>
|
||||
<td>Idle connection timeout in seconds (10-300).</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Encryption Settings</td>
|
||||
</tr>
|
||||
|
||||
Reference in New Issue
Block a user