Compare commits
205 Commits
v0.1.5
...
217af6d1c6
| Author | SHA1 | Date | |
|---|---|---|---|
| 217af6d1c6 | |||
| 51d54b42ac | |||
| 9ec5797919 | |||
| 8935188c8f | |||
| c77c592832 | |||
| 501d563df2 | |||
| ddcdb4026c | |||
| 3e7c0af019 | |||
| 476b9bd2e4 | |||
| c2ef37b84e | |||
| be8e030940 | |||
| ad7b2a02cb | |||
| 72ddd9822c | |||
| 4c30efd802 | |||
| 926a7e6366 | |||
| 1eadc7b75c | |||
| 4a224a127b | |||
| c498fe7aee | |||
| 3838aed954 | |||
| 6a193dbb1c | |||
| e94b341a5b | |||
| 2ad3736852 | |||
| f05b2668c0 | |||
| f7c1c1f809 | |||
| 0e392e18b4 | |||
| 8996f1ce06 | |||
| f60dbaf9c9 | |||
| 1a5a7aa9e1 | |||
| 326367ae4c | |||
| a7f9b0a22f | |||
| 0e525713b1 | |||
| f43fad02fb | |||
| eff3e378f3 | |||
| 5e32cef792 | |||
| 9898167f8d | |||
| 4a553555d3 | |||
| 7a3202c996 | |||
| bd20ca86ab | |||
| 532cf95d59 | |||
| 366f8ce60d | |||
| 7612cb054a | |||
| 966d524dca | |||
| e84f1f1851 | |||
| a059f0502d | |||
| afd7173ba0 | |||
| c807bb2388 | |||
| aa4f9f5566 | |||
| 14786151e5 | |||
| a496862902 | |||
| df4f27ca2e | |||
| d72e0a347e | |||
| 6ed4b7d8ea | |||
| 31ebbea680 | |||
| d878134ebf | |||
| 55568d6892 | |||
| a4ae81c77c | |||
| 9da7104887 | |||
| de5377e5ac | |||
| 80b77b64eb | |||
| 6c912a3d71 | |||
| c6e368324a | |||
| 7b6c096bb7 | |||
| 03353a0aec | |||
| 72f5d9d70c | |||
| be63e27c15 | |||
| 81ef0fe4c7 | |||
| 5f24bd920d | |||
| 8552f193de | |||
| 5536330aeb | |||
| d4657c389d | |||
| 3827235232 | |||
| dfc0058d0d | |||
| 27aef84311 | |||
| 5003514a3d | |||
| 20a314e030 | |||
| d8232340c3 | |||
| a356bb0c4e | |||
| 1c328ee3af | |||
| 5bf7962c04 | |||
| e06f653606 | |||
| 9c2809c195 | |||
| fb32ca0a7d | |||
| 6ab702a818 | |||
| 550e7d435c | |||
| 776967e80d | |||
| 082a7fbcd1 | |||
| ff287cf67b | |||
| bddf36d52d | |||
| cf6cec9cab | |||
| d425839e57 | |||
| 4c661477d5 | |||
| f3f52f14a5 | |||
| d19ba3e305 | |||
| c627f41f53 | |||
| bcad0cd3da | |||
| 67f057ca1c | |||
| 01e79e6993 | |||
| 1e3c4b545f | |||
| 4ecd32a554 | |||
| aa6d7c4d28 | |||
| 6e6d6d32bf | |||
| 54705ab9c4 | |||
| 77a46d0725 | |||
| 0f750b9d89 | |||
| e0dee9db36 | |||
| 126657c99f | |||
| 07fb1ac773 | |||
| 147962e1dd | |||
| 2643a79121 | |||
| e9a035827b | |||
| 033b8a82be | |||
| e76c311231 | |||
| cbdf1a27c8 | |||
| 4a60cb269a | |||
| ebe7f6222d | |||
| 70b61fd8e6 | |||
| a779b002d7 | |||
| 45d21cce21 | |||
| 9629507acd | |||
| 5d6cb4efa1 | |||
| 56ad83bbaf | |||
| 847933b7c0 | |||
| be55d08c0a | |||
| 8c4bf67974 | |||
| 9385d1fe1c | |||
| 0ea54457e8 | |||
| ae26d22388 | |||
| 6b715851b9 | |||
| 62c36f7a6c | |||
| b32f1f94f7 | |||
| 6e3d280a75 | |||
| 704f79dc44 | |||
| 87c7f1bc7d | |||
| 23ea164215 | |||
| 7a8acfb933 | |||
| 71327bcbf1 | |||
| c0603c592b | |||
| 912a7dc74f | |||
| 4de936cea9 | |||
| adb9017580 | |||
| 4adfcc4131 | |||
| ebc315c1cc | |||
| 5ab62a00ff | |||
| 9c3518de63 | |||
| a52657e684 | |||
| 53297abe1e | |||
| a3b9db544c | |||
| f5d2e1c488 | |||
| f04c6a9cdc | |||
| 7a494abb96 | |||
| 956d17a649 | |||
| 5522f9ac04 | |||
| 3742f0228e | |||
| ba694cb717 | |||
| 433d291b4b | |||
| e3509e997f | |||
| 1c30200db0 | |||
| 7ff422d4dc | |||
| 546d51af9a | |||
| 0d1fe05fd0 | |||
| c5d4b2f1cd | |||
| a5d19e2982 | |||
| 692e7e3a6e | |||
| 78dba93ee0 | |||
| 93a5aa6618 | |||
| 9ab750650c | |||
| 609e9db2f7 | |||
| 94a55cf2b7 | |||
| b9cfc45aa2 | |||
| 2d60e36fbf | |||
| c78f7fa6b0 | |||
| b3dce8d13e | |||
| e792b86485 | |||
| cdb86aeea7 | |||
| cdbc156b5b | |||
| 1df8ff9d25 | |||
| 05f1b00473 | |||
| 5ebc97300e | |||
| d2f9c3bded | |||
| 9f347f2caa | |||
| 4ab58e59c2 | |||
| 32232211a1 | |||
| 1cacb80dd6 | |||
| e89bbb62dc | |||
| c8eb3de629 | |||
| 9165e365e6 | |||
| 01e26754e8 | |||
| b592fa9fdb | |||
| cd9734b398 | |||
| 90893cac27 | |||
| 6e659902bd | |||
| 39a707ecbc | |||
| 4199f8e6c7 | |||
| adc6770273 | |||
| f5451c162b | |||
| aab9ef696a | |||
| be48f59452 | |||
| 86c04f85f6 | |||
| 992d9eccd9 | |||
| 40f3192c5c | |||
| 2498b950f6 | |||
| 97435f15e5 | |||
| 97860669ec | |||
| 4a5dd76286 | |||
| d2dc293722 |
@@ -1,13 +1,9 @@
|
||||
.git
|
||||
.gitignore
|
||||
.venv
|
||||
__pycache__
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.pyd
|
||||
.pytest_cache
|
||||
.coverage
|
||||
htmlcov
|
||||
logs
|
||||
data
|
||||
tmp
|
||||
target
|
||||
crates/*/tests
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -26,6 +26,9 @@ dist/
|
||||
*.egg-info/
|
||||
.eggs/
|
||||
|
||||
# Rust engine build artifacts
|
||||
target/
|
||||
|
||||
# Local runtime artifacts
|
||||
logs/
|
||||
*.log
|
||||
|
||||
5237
Cargo.lock
generated
Normal file
5237
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
61
Cargo.toml
Normal file
61
Cargo.toml
Normal file
@@ -0,0 +1,61 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"crates/myfsio-common",
|
||||
"crates/myfsio-auth",
|
||||
"crates/myfsio-crypto",
|
||||
"crates/myfsio-storage",
|
||||
"crates/myfsio-xml",
|
||||
"crates/myfsio-server",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.3"
|
||||
edition = "2021"
|
||||
|
||||
[workspace.dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
axum = { version = "0.8" }
|
||||
tower = { version = "0.5" }
|
||||
tower-http = { version = "0.6", features = ["cors", "trace", "fs", "compression-gzip"] }
|
||||
hyper = { version = "1" }
|
||||
bytes = "1"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
quick-xml = { version = "0.37", features = ["serialize"] }
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
md-5 = "0.10"
|
||||
hex = "0.4"
|
||||
aes = "0.8"
|
||||
aes-gcm = "0.10"
|
||||
cbc = { version = "0.1", features = ["alloc"] }
|
||||
hkdf = "0.12"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
parking_lot = "0.12"
|
||||
lru = "0.14"
|
||||
percent-encoding = "2"
|
||||
regex = "1"
|
||||
unicode-normalization = "0.1"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
thiserror = "2"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
base64 = "0.22"
|
||||
tokio-util = { version = "0.7", features = ["io"] }
|
||||
futures = "0.3"
|
||||
dashmap = "6"
|
||||
crc32fast = "1"
|
||||
duckdb = { version = "1", features = ["bundled"] }
|
||||
reqwest = { version = "0.12", default-features = false, features = ["stream", "rustls-tls", "json"] }
|
||||
aws-sdk-s3 = { version = "1", features = ["behavior-version-latest", "rt-tokio"] }
|
||||
aws-config = { version = "1", features = ["behavior-version-latest"] }
|
||||
aws-credential-types = "1"
|
||||
aws-smithy-runtime-api = "1"
|
||||
aws-smithy-types = "1"
|
||||
async-trait = "0.1"
|
||||
tera = "1"
|
||||
cookie = "0.18"
|
||||
subtle = "2"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
dotenvy = "0.15"
|
||||
61
Dockerfile
61
Dockerfile
@@ -1,37 +1,50 @@
|
||||
# syntax=docker/dockerfile:1.7
|
||||
FROM python:3.11-slim
|
||||
FROM rust:1-slim-bookworm AS builder
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
WORKDIR /build
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential pkg-config libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY crates ./crates
|
||||
|
||||
RUN cargo build --release --bin myfsio-server \
|
||||
&& strip target/release/myfsio-server
|
||||
|
||||
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install build deps for any wheels that need compilation, then clean up
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends ca-certificates curl \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& mkdir -p /app/data \
|
||||
&& useradd -m -u 1000 myfsio \
|
||||
&& chown -R myfsio:myfsio /app
|
||||
|
||||
COPY requirements.txt ./
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
COPY --from=builder /build/target/release/myfsio-server /usr/local/bin/myfsio-server
|
||||
COPY --from=builder /build/crates/myfsio-server/templates /app/templates
|
||||
COPY --from=builder /build/crates/myfsio-server/static /app/static
|
||||
COPY docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||
|
||||
COPY . .
|
||||
|
||||
# Make entrypoint executable
|
||||
RUN chmod +x docker-entrypoint.sh
|
||||
|
||||
# Create data directory and set permissions
|
||||
RUN mkdir -p /app/data \
|
||||
&& useradd -m -u 1000 myfsio \
|
||||
RUN chmod +x /app/docker-entrypoint.sh \
|
||||
&& chown -R myfsio:myfsio /app
|
||||
|
||||
USER myfsio
|
||||
|
||||
EXPOSE 5000 5100
|
||||
ENV APP_HOST=0.0.0.0 \
|
||||
FLASK_ENV=production \
|
||||
FLASK_DEBUG=0
|
||||
EXPOSE 5000
|
||||
EXPOSE 5100
|
||||
ENV HOST=0.0.0.0 \
|
||||
PORT=5000 \
|
||||
UI_PORT=5100 \
|
||||
STORAGE_ROOT=/app/data \
|
||||
TEMPLATES_DIR=/app/templates \
|
||||
STATIC_DIR=/app/static \
|
||||
RUST_LOG=info
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD python -c "import requests; requests.get('http://localhost:5000/healthz', timeout=2)"
|
||||
CMD curl -fsS "http://localhost:${PORT}/myfsio/health" || exit 1
|
||||
|
||||
CMD ["./docker-entrypoint.sh"]
|
||||
CMD ["/app/docker-entrypoint.sh"]
|
||||
|
||||
661
LICENSE
Normal file
661
LICENSE
Normal file
@@ -0,0 +1,661 @@
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
252
README.md
252
README.md
@@ -1,117 +1,205 @@
|
||||
# MyFSIO (Flask S3 + IAM)
|
||||
# MyFSIO
|
||||
|
||||
MyFSIO is a batteries-included, Flask-based recreation of Amazon S3 and IAM workflows built for local development. The design mirrors the [AWS S3 documentation](https://docs.aws.amazon.com/s3/) wherever practical: bucket naming, Signature Version 4 presigning, Version 2012-10-17 bucket policies, IAM-style users, and familiar REST endpoints.
|
||||
MyFSIO is an S3-compatible object storage server with a Rust runtime and a filesystem-backed storage engine. The repository root is the Cargo workspace; the server serves both the S3 API and the built-in web UI from a single process.
|
||||
|
||||
## Why MyFSIO?
|
||||
## Features
|
||||
|
||||
- **Dual servers:** Run both the API (port 5000) and UI (port 5100) with a single command: `python run.py`.
|
||||
- **IAM + access keys:** Users, access keys, key rotation, and bucket-scoped actions (`list/read/write/delete/policy`) now live in `data/.myfsio.sys/config/iam.json` and are editable from the IAM dashboard.
|
||||
- **Bucket policies + hot reload:** `data/.myfsio.sys/config/bucket_policies.json` uses AWS' policy grammar (Version `2012-10-17`) with a built-in watcher, so editing the JSON file applies immediately. The UI also ships Public/Private/Custom presets for faster edits.
|
||||
- **Presigned URLs everywhere:** Signature Version 4 presigned URLs respect IAM + bucket policies and replace the now-removed "share link" feature for public access scenarios.
|
||||
- **Modern UI:** Responsive tables, quick filters, preview sidebar, object-level delete buttons, a presign modal, and an inline JSON policy editor that respects dark mode keep bucket management friendly.
|
||||
- **Tests & health:** `/healthz` for smoke checks and `pytest` coverage for IAM, CRUD, presign, and policy flows.
|
||||
- S3-compatible REST API with Signature Version 4 authentication
|
||||
- Browser UI for buckets, objects, IAM users, policies, replication, metrics, and site administration
|
||||
- Filesystem-backed storage rooted at `data/`
|
||||
- Bucket versioning, multipart uploads, presigned URLs, CORS, object and bucket tagging
|
||||
- Server-side encryption and built-in KMS support
|
||||
- Optional background services for lifecycle, garbage collection, integrity scanning, operation metrics, and system metrics history
|
||||
- Replication, site sync, and static website hosting support
|
||||
|
||||
## Architecture at a Glance
|
||||
## Runtime Model
|
||||
|
||||
```
|
||||
+-----------------+ +----------------+
|
||||
| API Server |<----->| Object storage |
|
||||
| (port 5000) | | (filesystem) |
|
||||
| - S3 routes | +----------------+
|
||||
| - Presigned URLs |
|
||||
| - Bucket policy |
|
||||
+-----------------+
|
||||
^
|
||||
|
|
||||
+-----------------+
|
||||
| UI Server |
|
||||
| (port 5100) |
|
||||
| - Auth console |
|
||||
| - IAM dashboard|
|
||||
| - Bucket editor|
|
||||
+-----------------+
|
||||
```
|
||||
MyFSIO now runs as one Rust process:
|
||||
|
||||
Both apps load the same configuration via `AppConfig` so IAM data and bucket policies stay consistent no matter which process you run.
|
||||
Bucket policies are automatically reloaded whenever `bucket_policies.json` changes—no restarts required.
|
||||
- API listener on `HOST` + `PORT` (default `127.0.0.1:5000`)
|
||||
- UI listener on `HOST` + `UI_PORT` (default `127.0.0.1:5100`)
|
||||
- Shared state for storage, IAM, policies, sessions, metrics, and background workers
|
||||
|
||||
## Getting Started
|
||||
If you want API-only mode, set `UI_ENABLED=false`. There is no separate "UI-only" runtime anymore.
|
||||
|
||||
## Quick Start
|
||||
|
||||
From the repository root:
|
||||
|
||||
```bash
|
||||
python -m venv .venv
|
||||
. .venv/Scripts/activate # PowerShell: .\.venv\Scripts\Activate.ps1
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Run both API and UI (default)
|
||||
python run.py
|
||||
|
||||
# Or run individually:
|
||||
# python run.py --mode api
|
||||
# python run.py --mode ui
|
||||
cargo run -p myfsio-server --
|
||||
```
|
||||
|
||||
Visit `http://127.0.0.1:5100/ui` for the console and `http://127.0.0.1:5000/` for the raw API. Override ports/hosts with the environment variables listed below.
|
||||
Useful URLs:
|
||||
|
||||
## IAM, Access Keys, and Bucket Policies
|
||||
- UI: `http://127.0.0.1:5100/ui`
|
||||
- API: `http://127.0.0.1:5000/`
|
||||
- Health: `http://127.0.0.1:5000/myfsio/health`
|
||||
|
||||
- First run creates `data/.myfsio.sys/config/iam.json` with `localadmin / localadmin` (full control). Sign in via the UI, then use the **IAM** tab to create users, rotate secrets, or edit inline policies without touching JSON by hand.
|
||||
- Bucket policies live in `data/.myfsio.sys/config/bucket_policies.json` and follow the AWS `arn:aws:s3:::bucket/key` resource syntax with Version `2012-10-17`. Attach/replace/remove policies from the bucket detail page or edit the JSON by hand—changes hot reload automatically.
|
||||
- IAM actions include extended verbs (`iam:list_users`, `iam:create_user`, `iam:update_policy`, etc.) so you can control who is allowed to manage other users and policies.
|
||||
On first boot, MyFSIO creates `data/.myfsio.sys/config/iam.json` and prints the generated admin access key and secret key to the console.
|
||||
|
||||
### Bucket Policy Presets & Hot Reload
|
||||
### Common CLI commands
|
||||
|
||||
- **Presets:** Every bucket detail view includes Public (read-only), Private (detach policy), and Custom presets. Public auto-populates a policy that grants anonymous `s3:ListBucket` + `s3:GetObject` access to the entire bucket.
|
||||
- **Custom drafts:** Switching back to Custom restores your last manual edit so you can toggle between presets without losing work.
|
||||
- **Hot reload:** The server watches `bucket_policies.json` and reloads statements on-the-fly—ideal for editing policies in your favorite editor while testing Via curl or the UI.
|
||||
```bash
|
||||
# Show resolved configuration
|
||||
cargo run -p myfsio-server -- --show-config
|
||||
|
||||
## Presigned URLs
|
||||
# Validate configuration and exit non-zero on critical issues
|
||||
cargo run -p myfsio-server -- --check-config
|
||||
|
||||
Presigned URLs follow the AWS CLI playbook:
|
||||
# Reset admin credentials
|
||||
cargo run -p myfsio-server -- --reset-cred
|
||||
|
||||
- Call `POST /presign/<bucket>/<key>` (or use the "Presign" button in the UI) to request a Signature Version 4 URL valid for 1 second to 7 days.
|
||||
- The generated URL honors IAM permissions and bucket-policy decisions at generation-time and again when somebody fetches it.
|
||||
- Because presigned URLs cover both authenticated and public sharing scenarios, the legacy "share link" feature has been removed.
|
||||
# API only
|
||||
UI_ENABLED=false cargo run -p myfsio-server --
|
||||
```
|
||||
|
||||
## Building a Binary
|
||||
|
||||
```bash
|
||||
cargo build --release -p myfsio-server
|
||||
```
|
||||
|
||||
Binary locations:
|
||||
|
||||
- Linux/macOS: `target/release/myfsio-server`
|
||||
- Windows: `target/release/myfsio-server.exe`
|
||||
|
||||
Run the built binary directly:
|
||||
|
||||
```bash
|
||||
./target/release/myfsio-server
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
The server reads environment variables from the process environment and also loads, when present:
|
||||
|
||||
- `/opt/myfsio/myfsio.env`
|
||||
- `.env`
|
||||
- `myfsio.env`
|
||||
|
||||
Core settings:
|
||||
|
||||
| Variable | Default | Description |
|
||||
| --- | --- | --- |
|
||||
| `STORAGE_ROOT` | `<project>/data` | Filesystem root for bucket directories |
|
||||
| `MAX_UPLOAD_SIZE` | `1073741824` | Maximum upload size (bytes) |
|
||||
| `UI_PAGE_SIZE` | `100` | `MaxKeys` hint for listings |
|
||||
| `SECRET_KEY` | `dev-secret-key` | Flask session secret for the UI |
|
||||
| `IAM_CONFIG` | `<project>/data/.myfsio.sys/config/iam.json` | IAM user + policy store |
|
||||
| `BUCKET_POLICY_PATH` | `<project>/data/.myfsio.sys/config/bucket_policies.json` | Bucket policy store |
|
||||
| `API_BASE_URL` | `http://127.0.0.1:5000` | Used by the UI when calling API endpoints (presign, bucket policy) |
|
||||
| `AWS_REGION` | `us-east-1` | Region used in Signature V4 scope |
|
||||
| `AWS_SERVICE` | `s3` | Service used in Signature V4 scope |
|
||||
| `HOST` | `127.0.0.1` | Bind address for API and UI listeners |
|
||||
| `PORT` | `5000` | API port |
|
||||
| `UI_PORT` | `5100` | UI port |
|
||||
| `UI_ENABLED` | `true` | Disable to run API-only |
|
||||
| `STORAGE_ROOT` | `./data` | Root directory for buckets and system metadata |
|
||||
| `IAM_CONFIG` | `<STORAGE_ROOT>/.myfsio.sys/config/iam.json` | IAM config path |
|
||||
| `API_BASE_URL` | unset | Public API base used by the UI and presigned URL generation |
|
||||
| `AWS_REGION` | `us-east-1` | Region used in SigV4 scope |
|
||||
| `SIGV4_TIMESTAMP_TOLERANCE_SECONDS` | `900` | Allowed request time skew |
|
||||
| `PRESIGNED_URL_MIN_EXPIRY_SECONDS` | `1` | Minimum presigned URL expiry |
|
||||
| `PRESIGNED_URL_MAX_EXPIRY_SECONDS` | `604800` | Maximum presigned URL expiry |
|
||||
| `SECRET_KEY` | loaded from `.myfsio.sys/config/.secret` if present | Session signing key and IAM-at-rest encryption key |
|
||||
| `ADMIN_ACCESS_KEY` | unset | Optional first-run or reset access key |
|
||||
| `ADMIN_SECRET_KEY` | unset | Optional first-run or reset secret key |
|
||||
|
||||
> Buckets now live directly under `data/` while system metadata (versions, IAM, bucket policies, multipart uploads, etc.) lives in `data/.myfsio.sys`.
|
||||
Feature toggles:
|
||||
|
||||
## API Cheatsheet (IAM headers required)
|
||||
| Variable | Default |
|
||||
| --- | --- |
|
||||
| `ENCRYPTION_ENABLED` | `false` |
|
||||
| `KMS_ENABLED` | `false` |
|
||||
| `GC_ENABLED` | `false` |
|
||||
| `INTEGRITY_ENABLED` | `false` |
|
||||
| `LIFECYCLE_ENABLED` | `false` |
|
||||
| `METRICS_HISTORY_ENABLED` | `false` |
|
||||
| `OPERATION_METRICS_ENABLED` | `false` |
|
||||
| `WEBSITE_HOSTING_ENABLED` | `false` |
|
||||
| `SITE_SYNC_ENABLED` | `false` |
|
||||
|
||||
Metrics and replication tuning:
|
||||
|
||||
| Variable | Default |
|
||||
| --- | --- |
|
||||
| `OPERATION_METRICS_INTERVAL_MINUTES` | `5` |
|
||||
| `OPERATION_METRICS_RETENTION_HOURS` | `24` |
|
||||
| `METRICS_HISTORY_INTERVAL_MINUTES` | `5` |
|
||||
| `METRICS_HISTORY_RETENTION_HOURS` | `24` |
|
||||
| `REPLICATION_CONNECT_TIMEOUT_SECONDS` | `5` |
|
||||
| `REPLICATION_READ_TIMEOUT_SECONDS` | `30` |
|
||||
| `REPLICATION_MAX_RETRIES` | `2` |
|
||||
| `REPLICATION_STREAMING_THRESHOLD_BYTES` | `10485760` |
|
||||
| `REPLICATION_MAX_FAILURES_PER_BUCKET` | `50` |
|
||||
| `SITE_SYNC_INTERVAL_SECONDS` | `60` |
|
||||
| `SITE_SYNC_BATCH_SIZE` | `100` |
|
||||
| `SITE_SYNC_CONNECT_TIMEOUT_SECONDS` | `10` |
|
||||
| `SITE_SYNC_READ_TIMEOUT_SECONDS` | `120` |
|
||||
| `SITE_SYNC_MAX_RETRIES` | `2` |
|
||||
| `SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS` | `1.0` |
|
||||
|
||||
UI asset overrides:
|
||||
|
||||
| Variable | Default |
|
||||
| --- | --- |
|
||||
| `TEMPLATES_DIR` | built-in crate templates directory |
|
||||
| `STATIC_DIR` | built-in crate static directory |
|
||||
|
||||
See [docs.md](./docs.md) for the full Rust-side operations guide.
|
||||
|
||||
## Data Layout
|
||||
|
||||
```text
|
||||
data/
|
||||
<bucket>/
|
||||
.myfsio.sys/
|
||||
config/
|
||||
iam.json
|
||||
bucket_policies.json
|
||||
connections.json
|
||||
operation_metrics.json
|
||||
metrics_history.json
|
||||
buckets/<bucket>/
|
||||
meta/
|
||||
versions/
|
||||
multipart/
|
||||
keys/
|
||||
```
|
||||
GET / -> List buckets (XML)
|
||||
PUT /<bucket> -> Create bucket
|
||||
DELETE /<bucket> -> Delete bucket (must be empty)
|
||||
GET /<bucket> -> List objects (XML)
|
||||
PUT /<bucket>/<key> -> Upload object (binary stream)
|
||||
GET /<bucket>/<key> -> Download object
|
||||
DELETE /<bucket>/<key> -> Delete object
|
||||
POST /presign/<bucket>/<key> -> Generate AWS SigV4 presigned URL (JSON)
|
||||
GET /bucket-policy/<bucket> -> Fetch bucket policy (JSON)
|
||||
PUT /bucket-policy/<bucket> -> Attach/replace bucket policy (JSON)
|
||||
DELETE /bucket-policy/<bucket> -> Remove bucket policy
|
||||
|
||||
## Docker
|
||||
|
||||
Build the Rust image from the repository root:
|
||||
|
||||
```bash
|
||||
docker build -t myfsio .
|
||||
docker run --rm -p 5000:5000 -p 5100:5100 -v "${PWD}/data:/app/data" myfsio
|
||||
```
|
||||
|
||||
If the instance sits behind a reverse proxy, set `API_BASE_URL` to the public S3 endpoint.
|
||||
|
||||
## Linux Installation
|
||||
|
||||
The repository includes `scripts/install.sh` for systemd-style Linux installs. Build the Rust binary first, then pass it to the installer:
|
||||
|
||||
```bash
|
||||
cargo build --release -p myfsio-server
|
||||
|
||||
sudo ./scripts/install.sh --binary ./target/release/myfsio-server
|
||||
```
|
||||
|
||||
The installer copies the binary into `/opt/myfsio/myfsio`, writes `/opt/myfsio/myfsio.env`, and can register a `myfsio.service` unit.
|
||||
|
||||
## Testing
|
||||
|
||||
Run the Rust test suite from the workspace:
|
||||
|
||||
```bash
|
||||
pytest -q
|
||||
cargo test
|
||||
```
|
||||
|
||||
## References
|
||||
## Health Check
|
||||
|
||||
- [Amazon Simple Storage Service Documentation](https://docs.aws.amazon.com/s3/)
|
||||
- [Signature Version 4 Signing Process](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)
|
||||
- [Amazon S3 Bucket Policy Examples](https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html)
|
||||
`GET /myfsio/health` returns:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "ok",
|
||||
"version": "0.5.0"
|
||||
}
|
||||
```
|
||||
|
||||
The `version` field comes from the Rust crate version in `crates/myfsio-server/Cargo.toml`.
|
||||
|
||||
270
app/__init__.py
270
app/__init__.py
@@ -1,270 +0,0 @@
|
||||
"""Application factory for the mini S3-compatible object store."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from datetime import timedelta
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from flask import Flask, g, has_request_context, redirect, render_template, request, url_for
|
||||
from flask_cors import CORS
|
||||
from flask_wtf.csrf import CSRFError
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
|
||||
from .bucket_policies import BucketPolicyStore
|
||||
from .config import AppConfig
|
||||
from .connections import ConnectionStore
|
||||
from .encryption import EncryptionManager
|
||||
from .extensions import limiter, csrf
|
||||
from .iam import IamService
|
||||
from .kms import KMSManager
|
||||
from .replication import ReplicationManager
|
||||
from .secret_store import EphemeralSecretStore
|
||||
from .storage import ObjectStorage
|
||||
from .version import get_version
|
||||
|
||||
|
||||
def create_app(
|
||||
test_config: Optional[Dict[str, Any]] = None,
|
||||
*,
|
||||
include_api: bool = True,
|
||||
include_ui: bool = True,
|
||||
) -> Flask:
|
||||
"""Create and configure the Flask application."""
|
||||
config = AppConfig.from_env(test_config)
|
||||
|
||||
if getattr(sys, "frozen", False):
|
||||
project_root = Path(sys._MEIPASS)
|
||||
else:
|
||||
project_root = Path(__file__).resolve().parent.parent
|
||||
|
||||
app = Flask(
|
||||
__name__,
|
||||
static_folder=str(project_root / "static"),
|
||||
template_folder=str(project_root / "templates"),
|
||||
)
|
||||
app.config.update(config.to_flask_config())
|
||||
if test_config:
|
||||
app.config.update(test_config)
|
||||
app.config.setdefault("APP_VERSION", get_version())
|
||||
app.permanent_session_lifetime = timedelta(days=int(app.config.get("SESSION_LIFETIME_DAYS", 30)))
|
||||
if app.config.get("TESTING"):
|
||||
app.config.setdefault("WTF_CSRF_ENABLED", False)
|
||||
|
||||
# Trust X-Forwarded-* headers from proxies
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1, x_host=1, x_prefix=1)
|
||||
|
||||
_configure_cors(app)
|
||||
_configure_logging(app)
|
||||
|
||||
limiter.init_app(app)
|
||||
csrf.init_app(app)
|
||||
|
||||
storage = ObjectStorage(Path(app.config["STORAGE_ROOT"]))
|
||||
iam = IamService(
|
||||
Path(app.config["IAM_CONFIG"]),
|
||||
auth_max_attempts=app.config.get("AUTH_MAX_ATTEMPTS", 5),
|
||||
auth_lockout_minutes=app.config.get("AUTH_LOCKOUT_MINUTES", 15),
|
||||
)
|
||||
bucket_policies = BucketPolicyStore(Path(app.config["BUCKET_POLICY_PATH"]))
|
||||
secret_store = EphemeralSecretStore(default_ttl=app.config.get("SECRET_TTL_SECONDS", 300))
|
||||
|
||||
# Initialize Replication components
|
||||
connections_path = Path(app.config["STORAGE_ROOT"]) / ".connections.json"
|
||||
replication_rules_path = Path(app.config["STORAGE_ROOT"]) / ".replication_rules.json"
|
||||
|
||||
connections = ConnectionStore(connections_path)
|
||||
replication = ReplicationManager(storage, connections, replication_rules_path)
|
||||
|
||||
# Initialize encryption and KMS
|
||||
encryption_config = {
|
||||
"encryption_enabled": app.config.get("ENCRYPTION_ENABLED", False),
|
||||
"encryption_master_key_path": app.config.get("ENCRYPTION_MASTER_KEY_PATH"),
|
||||
"default_encryption_algorithm": app.config.get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"),
|
||||
}
|
||||
encryption_manager = EncryptionManager(encryption_config)
|
||||
|
||||
kms_manager = None
|
||||
if app.config.get("KMS_ENABLED", False):
|
||||
kms_keys_path = Path(app.config.get("KMS_KEYS_PATH", ""))
|
||||
kms_master_key_path = Path(app.config.get("ENCRYPTION_MASTER_KEY_PATH", ""))
|
||||
kms_manager = KMSManager(kms_keys_path, kms_master_key_path)
|
||||
encryption_manager.set_kms_provider(kms_manager)
|
||||
|
||||
# Wrap storage with encryption layer if encryption is enabled
|
||||
if app.config.get("ENCRYPTION_ENABLED", False):
|
||||
from .encrypted_storage import EncryptedObjectStorage
|
||||
storage = EncryptedObjectStorage(storage, encryption_manager)
|
||||
|
||||
app.extensions["object_storage"] = storage
|
||||
app.extensions["iam"] = iam
|
||||
app.extensions["bucket_policies"] = bucket_policies
|
||||
app.extensions["secret_store"] = secret_store
|
||||
app.extensions["limiter"] = limiter
|
||||
app.extensions["connections"] = connections
|
||||
app.extensions["replication"] = replication
|
||||
app.extensions["encryption"] = encryption_manager
|
||||
app.extensions["kms"] = kms_manager
|
||||
|
||||
@app.errorhandler(500)
|
||||
def internal_error(error):
|
||||
return render_template('500.html'), 500
|
||||
|
||||
@app.errorhandler(CSRFError)
|
||||
def handle_csrf_error(e):
|
||||
return render_template('csrf_error.html', reason=e.description), 400
|
||||
|
||||
@app.template_filter("filesizeformat")
|
||||
def filesizeformat(value: int) -> str:
|
||||
"""Format bytes as human-readable file size."""
|
||||
for unit in ["B", "KB", "MB", "GB", "TB", "PB"]:
|
||||
if abs(value) < 1024.0 or unit == "PB":
|
||||
if unit == "B":
|
||||
return f"{int(value)} {unit}"
|
||||
return f"{value:.1f} {unit}"
|
||||
value /= 1024.0
|
||||
return f"{value:.1f} PB"
|
||||
|
||||
@app.template_filter("timestamp_to_datetime")
|
||||
def timestamp_to_datetime(value: float) -> str:
|
||||
"""Format Unix timestamp as human-readable datetime."""
|
||||
from datetime import datetime
|
||||
if not value:
|
||||
return "Never"
|
||||
try:
|
||||
dt = datetime.fromtimestamp(value)
|
||||
return dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
except (ValueError, OSError):
|
||||
return "Unknown"
|
||||
|
||||
if include_api:
|
||||
from .s3_api import s3_api_bp
|
||||
from .kms_api import kms_api_bp
|
||||
|
||||
app.register_blueprint(s3_api_bp)
|
||||
app.register_blueprint(kms_api_bp)
|
||||
csrf.exempt(s3_api_bp)
|
||||
csrf.exempt(kms_api_bp)
|
||||
|
||||
if include_ui:
|
||||
from .ui import ui_bp
|
||||
|
||||
app.register_blueprint(ui_bp)
|
||||
if not include_api:
|
||||
@app.get("/")
|
||||
def ui_root_redirect():
|
||||
return redirect(url_for("ui.buckets_overview"))
|
||||
|
||||
@app.errorhandler(404)
|
||||
def handle_not_found(error):
|
||||
wants_html = request.accept_mimetypes.accept_html
|
||||
path = request.path or ""
|
||||
if include_ui and wants_html:
|
||||
if not include_api or path.startswith("/ui") or path == "/":
|
||||
return render_template("404.html"), 404
|
||||
return error
|
||||
|
||||
@app.get("/healthz")
|
||||
def healthcheck() -> Dict[str, str]:
|
||||
return {"status": "ok", "version": app.config.get("APP_VERSION", "unknown")}
|
||||
|
||||
return app
|
||||
|
||||
|
||||
def create_api_app(test_config: Optional[Dict[str, Any]] = None) -> Flask:
|
||||
return create_app(test_config, include_api=True, include_ui=False)
|
||||
|
||||
|
||||
def create_ui_app(test_config: Optional[Dict[str, Any]] = None) -> Flask:
|
||||
return create_app(test_config, include_api=False, include_ui=True)
|
||||
|
||||
|
||||
def _configure_cors(app: Flask) -> None:
|
||||
origins = app.config.get("CORS_ORIGINS", ["*"])
|
||||
methods = app.config.get("CORS_METHODS", ["GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"])
|
||||
allow_headers = app.config.get("CORS_ALLOW_HEADERS", ["*"])
|
||||
expose_headers = app.config.get("CORS_EXPOSE_HEADERS", ["*"])
|
||||
CORS(
|
||||
app,
|
||||
resources={r"/*": {"origins": origins, "methods": methods, "allow_headers": allow_headers, "expose_headers": expose_headers}},
|
||||
supports_credentials=True,
|
||||
)
|
||||
|
||||
|
||||
class _RequestContextFilter(logging.Filter):
|
||||
"""Inject request-specific attributes into log records."""
|
||||
|
||||
def filter(self, record: logging.LogRecord) -> bool: # pragma: no cover - simple boilerplate
|
||||
if has_request_context():
|
||||
record.request_id = getattr(g, "request_id", "-")
|
||||
record.path = request.path
|
||||
record.method = request.method
|
||||
record.remote_addr = request.remote_addr or "-"
|
||||
else:
|
||||
record.request_id = getattr(record, "request_id", "-")
|
||||
record.path = getattr(record, "path", "-")
|
||||
record.method = getattr(record, "method", "-")
|
||||
record.remote_addr = getattr(record, "remote_addr", "-")
|
||||
return True
|
||||
|
||||
|
||||
def _configure_logging(app: Flask) -> None:
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s | %(levelname)s | %(request_id)s | %(method)s %(path)s | %(message)s"
|
||||
)
|
||||
|
||||
# Stream Handler (stdout) - Primary for Docker
|
||||
stream_handler = logging.StreamHandler(sys.stdout)
|
||||
stream_handler.setFormatter(formatter)
|
||||
stream_handler.addFilter(_RequestContextFilter())
|
||||
|
||||
logger = app.logger
|
||||
logger.handlers.clear()
|
||||
logger.addHandler(stream_handler)
|
||||
|
||||
# File Handler (optional, if configured)
|
||||
if app.config.get("LOG_TO_FILE"):
|
||||
log_file = Path(app.config["LOG_FILE"])
|
||||
log_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_handler = RotatingFileHandler(
|
||||
log_file,
|
||||
maxBytes=int(app.config.get("LOG_MAX_BYTES", 5 * 1024 * 1024)),
|
||||
backupCount=int(app.config.get("LOG_BACKUP_COUNT", 3)),
|
||||
encoding="utf-8",
|
||||
)
|
||||
file_handler.setFormatter(formatter)
|
||||
file_handler.addFilter(_RequestContextFilter())
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
logger.setLevel(getattr(logging, app.config.get("LOG_LEVEL", "INFO"), logging.INFO))
|
||||
|
||||
@app.before_request
|
||||
def _log_request_start() -> None:
|
||||
g.request_id = uuid.uuid4().hex
|
||||
g.request_started_at = time.perf_counter()
|
||||
app.logger.info(
|
||||
"Request started",
|
||||
extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr},
|
||||
)
|
||||
|
||||
@app.after_request
|
||||
def _log_request_end(response):
|
||||
duration_ms = 0.0
|
||||
if hasattr(g, "request_started_at"):
|
||||
duration_ms = (time.perf_counter() - g.request_started_at) * 1000
|
||||
request_id = getattr(g, "request_id", uuid.uuid4().hex)
|
||||
response.headers.setdefault("X-Request-ID", request_id)
|
||||
app.logger.info(
|
||||
"Request completed",
|
||||
extra={
|
||||
"path": request.path,
|
||||
"method": request.method,
|
||||
"remote_addr": request.remote_addr,
|
||||
},
|
||||
)
|
||||
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
|
||||
return response
|
||||
@@ -1,283 +0,0 @@
|
||||
"""Bucket policy loader/enforcer with a subset of AWS semantics."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass
|
||||
from fnmatch import fnmatch
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterable, List, Optional, Sequence
|
||||
|
||||
|
||||
RESOURCE_PREFIX = "arn:aws:s3:::"
|
||||
|
||||
ACTION_ALIASES = {
|
||||
# List actions
|
||||
"s3:listbucket": "list",
|
||||
"s3:listallmybuckets": "list",
|
||||
"s3:listbucketversions": "list",
|
||||
"s3:listmultipartuploads": "list",
|
||||
"s3:listparts": "list",
|
||||
# Read actions
|
||||
"s3:getobject": "read",
|
||||
"s3:getobjectversion": "read",
|
||||
"s3:getobjecttagging": "read",
|
||||
"s3:getobjectversiontagging": "read",
|
||||
"s3:getobjectacl": "read",
|
||||
"s3:getbucketversioning": "read",
|
||||
"s3:headobject": "read",
|
||||
"s3:headbucket": "read",
|
||||
# Write actions
|
||||
"s3:putobject": "write",
|
||||
"s3:createbucket": "write",
|
||||
"s3:putobjecttagging": "write",
|
||||
"s3:putbucketversioning": "write",
|
||||
"s3:createmultipartupload": "write",
|
||||
"s3:uploadpart": "write",
|
||||
"s3:completemultipartupload": "write",
|
||||
"s3:abortmultipartupload": "write",
|
||||
"s3:copyobject": "write",
|
||||
# Delete actions
|
||||
"s3:deleteobject": "delete",
|
||||
"s3:deleteobjectversion": "delete",
|
||||
"s3:deletebucket": "delete",
|
||||
"s3:deleteobjecttagging": "delete",
|
||||
# Share actions (ACL)
|
||||
"s3:putobjectacl": "share",
|
||||
"s3:putbucketacl": "share",
|
||||
"s3:getbucketacl": "share",
|
||||
# Policy actions
|
||||
"s3:putbucketpolicy": "policy",
|
||||
"s3:getbucketpolicy": "policy",
|
||||
"s3:deletebucketpolicy": "policy",
|
||||
# Replication actions
|
||||
"s3:getreplicationconfiguration": "replication",
|
||||
"s3:putreplicationconfiguration": "replication",
|
||||
"s3:deletereplicationconfiguration": "replication",
|
||||
"s3:replicateobject": "replication",
|
||||
"s3:replicatetags": "replication",
|
||||
"s3:replicatedelete": "replication",
|
||||
}
|
||||
|
||||
|
||||
def _normalize_action(action: str) -> str:
|
||||
action = action.strip().lower()
|
||||
if action == "*":
|
||||
return "*"
|
||||
return ACTION_ALIASES.get(action, action)
|
||||
|
||||
|
||||
def _normalize_actions(actions: Iterable[str]) -> List[str]:
|
||||
values: List[str] = []
|
||||
for action in actions:
|
||||
canonical = _normalize_action(action)
|
||||
if canonical == "*" and "*" not in values:
|
||||
return ["*"]
|
||||
if canonical and canonical not in values:
|
||||
values.append(canonical)
|
||||
return values
|
||||
|
||||
|
||||
def _normalize_principals(principal_field: Any) -> List[str] | str:
|
||||
if principal_field == "*":
|
||||
return "*"
|
||||
|
||||
def _collect(values: Any) -> List[str]:
|
||||
if values is None:
|
||||
return []
|
||||
if values == "*":
|
||||
return ["*"]
|
||||
if isinstance(values, str):
|
||||
return [values]
|
||||
if isinstance(values, dict):
|
||||
aggregated: List[str] = []
|
||||
for nested in values.values():
|
||||
chunk = _collect(nested)
|
||||
if "*" in chunk:
|
||||
return ["*"]
|
||||
aggregated.extend(chunk)
|
||||
return aggregated
|
||||
if isinstance(values, Iterable):
|
||||
aggregated = []
|
||||
for nested in values:
|
||||
chunk = _collect(nested)
|
||||
if "*" in chunk:
|
||||
return ["*"]
|
||||
aggregated.extend(chunk)
|
||||
return aggregated
|
||||
return [str(values)]
|
||||
|
||||
normalized: List[str] = []
|
||||
for entry in _collect(principal_field):
|
||||
token = str(entry).strip()
|
||||
if token == "*":
|
||||
return "*"
|
||||
if token and token not in normalized:
|
||||
normalized.append(token)
|
||||
return normalized or "*"
|
||||
|
||||
|
||||
def _parse_resource(resource: str) -> tuple[str | None, str | None]:
|
||||
if not resource.startswith(RESOURCE_PREFIX):
|
||||
return None, None
|
||||
remainder = resource[len(RESOURCE_PREFIX) :]
|
||||
if "/" not in remainder:
|
||||
bucket = remainder or "*"
|
||||
return bucket, None
|
||||
bucket, _, key_pattern = remainder.partition("/")
|
||||
return bucket or "*", key_pattern or "*"
|
||||
|
||||
|
||||
@dataclass
|
||||
class BucketPolicyStatement:
|
||||
sid: Optional[str]
|
||||
effect: str
|
||||
principals: List[str] | str
|
||||
actions: List[str]
|
||||
resources: List[tuple[str | None, str | None]]
|
||||
|
||||
def matches_principal(self, access_key: Optional[str]) -> bool:
|
||||
if self.principals == "*":
|
||||
return True
|
||||
if access_key is None:
|
||||
return False
|
||||
return access_key in self.principals
|
||||
|
||||
def matches_action(self, action: str) -> bool:
|
||||
action = _normalize_action(action)
|
||||
return "*" in self.actions or action in self.actions
|
||||
|
||||
def matches_resource(self, bucket: Optional[str], object_key: Optional[str]) -> bool:
|
||||
bucket = (bucket or "*").lower()
|
||||
key = object_key or ""
|
||||
for resource_bucket, key_pattern in self.resources:
|
||||
resource_bucket = (resource_bucket or "*").lower()
|
||||
if resource_bucket not in {"*", bucket}:
|
||||
continue
|
||||
if key_pattern is None:
|
||||
if not key:
|
||||
return True
|
||||
continue
|
||||
if fnmatch(key, key_pattern):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class BucketPolicyStore:
|
||||
"""Loads bucket policies from disk and evaluates statements."""
|
||||
|
||||
def __init__(self, policy_path: Path) -> None:
|
||||
self.policy_path = Path(policy_path)
|
||||
self.policy_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if not self.policy_path.exists():
|
||||
self.policy_path.write_text(json.dumps({"policies": {}}, indent=2))
|
||||
self._raw: Dict[str, Any] = {}
|
||||
self._policies: Dict[str, List[BucketPolicyStatement]] = {}
|
||||
self._load()
|
||||
self._last_mtime = self._current_mtime()
|
||||
|
||||
def maybe_reload(self) -> None:
|
||||
current = self._current_mtime()
|
||||
if current is None or current == self._last_mtime:
|
||||
return
|
||||
self._load()
|
||||
self._last_mtime = current
|
||||
|
||||
def _current_mtime(self) -> float | None:
|
||||
try:
|
||||
return self.policy_path.stat().st_mtime
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def evaluate(
|
||||
self,
|
||||
access_key: Optional[str],
|
||||
bucket: Optional[str],
|
||||
object_key: Optional[str],
|
||||
action: str,
|
||||
) -> str | None:
|
||||
bucket = (bucket or "").lower()
|
||||
statements = self._policies.get(bucket) or []
|
||||
decision: Optional[str] = None
|
||||
for statement in statements:
|
||||
if not statement.matches_principal(access_key):
|
||||
continue
|
||||
if not statement.matches_action(action):
|
||||
continue
|
||||
if not statement.matches_resource(bucket, object_key):
|
||||
continue
|
||||
if statement.effect == "deny":
|
||||
return "deny"
|
||||
decision = "allow"
|
||||
return decision
|
||||
|
||||
def get_policy(self, bucket: str) -> Dict[str, Any] | None:
|
||||
return self._raw.get(bucket.lower())
|
||||
|
||||
def set_policy(self, bucket: str, policy_payload: Dict[str, Any]) -> None:
|
||||
bucket = bucket.lower()
|
||||
statements = self._normalize_policy(policy_payload)
|
||||
if not statements:
|
||||
raise ValueError("Policy must include at least one valid statement")
|
||||
self._raw[bucket] = policy_payload
|
||||
self._policies[bucket] = statements
|
||||
self._persist()
|
||||
|
||||
def delete_policy(self, bucket: str) -> None:
|
||||
bucket = bucket.lower()
|
||||
self._raw.pop(bucket, None)
|
||||
self._policies.pop(bucket, None)
|
||||
self._persist()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def _load(self) -> None:
|
||||
try:
|
||||
content = self.policy_path.read_text(encoding='utf-8')
|
||||
raw_payload = json.loads(content)
|
||||
except FileNotFoundError:
|
||||
raw_payload = {"policies": {}}
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Corrupted bucket policy file (invalid JSON): {e}")
|
||||
except PermissionError as e:
|
||||
raise ValueError(f"Cannot read bucket policy file (permission denied): {e}")
|
||||
except (OSError, ValueError) as e:
|
||||
raise ValueError(f"Failed to load bucket policies: {e}")
|
||||
|
||||
policies: Dict[str, Any] = raw_payload.get("policies", {})
|
||||
parsed: Dict[str, List[BucketPolicyStatement]] = {}
|
||||
for bucket, policy in policies.items():
|
||||
parsed[bucket.lower()] = self._normalize_policy(policy)
|
||||
self._raw = {bucket.lower(): policy for bucket, policy in policies.items()}
|
||||
self._policies = parsed
|
||||
|
||||
def _persist(self) -> None:
|
||||
payload = {"policies": self._raw}
|
||||
self.policy_path.write_text(json.dumps(payload, indent=2))
|
||||
|
||||
def _normalize_policy(self, policy: Dict[str, Any]) -> List[BucketPolicyStatement]:
|
||||
statements_raw: Sequence[Dict[str, Any]] = policy.get("Statement", [])
|
||||
statements: List[BucketPolicyStatement] = []
|
||||
for statement in statements_raw:
|
||||
actions = _normalize_actions(statement.get("Action", []))
|
||||
principals = _normalize_principals(statement.get("Principal", "*"))
|
||||
resources_field = statement.get("Resource", [])
|
||||
if isinstance(resources_field, str):
|
||||
resources_field = [resources_field]
|
||||
resources: List[tuple[str | None, str | None]] = []
|
||||
for resource in resources_field:
|
||||
bucket, pattern = _parse_resource(str(resource))
|
||||
if bucket:
|
||||
resources.append((bucket, pattern))
|
||||
if not resources:
|
||||
continue
|
||||
effect = statement.get("Effect", "Allow").lower()
|
||||
statements.append(
|
||||
BucketPolicyStatement(
|
||||
sid=statement.get("Sid"),
|
||||
effect=effect,
|
||||
principals=principals,
|
||||
actions=actions or ["*"],
|
||||
resources=resources,
|
||||
)
|
||||
)
|
||||
return statements
|
||||
333
app/config.py
333
app/config.py
@@ -1,333 +0,0 @@
|
||||
"""Configuration helpers for the S3 clone application."""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import secrets
|
||||
import shutil
|
||||
import sys
|
||||
import warnings
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
if getattr(sys, "frozen", False):
|
||||
# Running in a PyInstaller bundle
|
||||
PROJECT_ROOT = Path(sys._MEIPASS)
|
||||
else:
|
||||
# Running in a normal Python environment
|
||||
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
||||
|
||||
|
||||
def _prepare_config_file(active_path: Path, legacy_path: Optional[Path] = None) -> Path:
|
||||
"""Ensure config directories exist and migrate legacy files when possible."""
|
||||
active_path = Path(active_path)
|
||||
active_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if legacy_path:
|
||||
legacy_path = Path(legacy_path)
|
||||
if not active_path.exists() and legacy_path.exists():
|
||||
legacy_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
shutil.move(str(legacy_path), str(active_path))
|
||||
except OSError:
|
||||
shutil.copy2(legacy_path, active_path)
|
||||
try:
|
||||
legacy_path.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
return active_path
|
||||
|
||||
|
||||
@dataclass
|
||||
class AppConfig:
|
||||
storage_root: Path
|
||||
max_upload_size: int
|
||||
ui_page_size: int
|
||||
secret_key: str
|
||||
iam_config_path: Path
|
||||
bucket_policy_path: Path
|
||||
api_base_url: Optional[str]
|
||||
aws_region: str
|
||||
aws_service: str
|
||||
ui_enforce_bucket_policies: bool
|
||||
log_level: str
|
||||
log_to_file: bool
|
||||
log_path: Path
|
||||
log_max_bytes: int
|
||||
log_backup_count: int
|
||||
ratelimit_default: str
|
||||
ratelimit_storage_uri: str
|
||||
cors_origins: list[str]
|
||||
cors_methods: list[str]
|
||||
cors_allow_headers: list[str]
|
||||
cors_expose_headers: list[str]
|
||||
session_lifetime_days: int
|
||||
auth_max_attempts: int
|
||||
auth_lockout_minutes: int
|
||||
bulk_delete_max_keys: int
|
||||
secret_ttl_seconds: int
|
||||
stream_chunk_size: int
|
||||
multipart_min_part_size: int
|
||||
bucket_stats_cache_ttl: int
|
||||
encryption_enabled: bool
|
||||
encryption_master_key_path: Path
|
||||
kms_enabled: bool
|
||||
kms_keys_path: Path
|
||||
default_encryption_algorithm: str
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
|
||||
overrides = overrides or {}
|
||||
|
||||
def _get(name: str, default: Any) -> Any:
|
||||
return overrides.get(name, os.getenv(name, default))
|
||||
|
||||
storage_root = Path(_get("STORAGE_ROOT", PROJECT_ROOT / "data")).resolve()
|
||||
max_upload_size = int(_get("MAX_UPLOAD_SIZE", 1024 * 1024 * 1024)) # 1 GiB default
|
||||
ui_page_size = int(_get("UI_PAGE_SIZE", 100))
|
||||
auth_max_attempts = int(_get("AUTH_MAX_ATTEMPTS", 5))
|
||||
auth_lockout_minutes = int(_get("AUTH_LOCKOUT_MINUTES", 15))
|
||||
bulk_delete_max_keys = int(_get("BULK_DELETE_MAX_KEYS", 500))
|
||||
secret_ttl_seconds = int(_get("SECRET_TTL_SECONDS", 300))
|
||||
stream_chunk_size = int(_get("STREAM_CHUNK_SIZE", 64 * 1024))
|
||||
multipart_min_part_size = int(_get("MULTIPART_MIN_PART_SIZE", 5 * 1024 * 1024))
|
||||
default_secret = "dev-secret-key"
|
||||
secret_key = str(_get("SECRET_KEY", default_secret))
|
||||
|
||||
if not secret_key or secret_key == default_secret:
|
||||
secret_file = storage_root / ".myfsio.sys" / "config" / ".secret"
|
||||
if secret_file.exists():
|
||||
secret_key = secret_file.read_text().strip()
|
||||
else:
|
||||
generated = secrets.token_urlsafe(32)
|
||||
if secret_key == default_secret:
|
||||
warnings.warn("Using insecure default SECRET_KEY. A random value has been generated and persisted; set SECRET_KEY for production", RuntimeWarning)
|
||||
try:
|
||||
secret_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
secret_file.write_text(generated)
|
||||
secret_key = generated
|
||||
except OSError:
|
||||
secret_key = generated
|
||||
|
||||
iam_env_override = "IAM_CONFIG" in overrides or "IAM_CONFIG" in os.environ
|
||||
bucket_policy_override = "BUCKET_POLICY_PATH" in overrides or "BUCKET_POLICY_PATH" in os.environ
|
||||
|
||||
default_iam_path = storage_root / ".myfsio.sys" / "config" / "iam.json"
|
||||
default_bucket_policy_path = storage_root / ".myfsio.sys" / "config" / "bucket_policies.json"
|
||||
|
||||
iam_config_path = Path(_get("IAM_CONFIG", default_iam_path)).resolve()
|
||||
bucket_policy_path = Path(_get("BUCKET_POLICY_PATH", default_bucket_policy_path)).resolve()
|
||||
|
||||
iam_config_path = _prepare_config_file(
|
||||
iam_config_path,
|
||||
legacy_path=None if iam_env_override else storage_root / "iam.json",
|
||||
)
|
||||
bucket_policy_path = _prepare_config_file(
|
||||
bucket_policy_path,
|
||||
legacy_path=None if bucket_policy_override else storage_root / "bucket_policies.json",
|
||||
)
|
||||
api_base_url = _get("API_BASE_URL", None)
|
||||
if api_base_url:
|
||||
api_base_url = str(api_base_url)
|
||||
|
||||
aws_region = str(_get("AWS_REGION", "us-east-1"))
|
||||
aws_service = str(_get("AWS_SERVICE", "s3"))
|
||||
enforce_ui_policies = str(_get("UI_ENFORCE_BUCKET_POLICIES", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
log_level = str(_get("LOG_LEVEL", "INFO")).upper()
|
||||
log_to_file = str(_get("LOG_TO_FILE", "1")).lower() in {"1", "true", "yes", "on"}
|
||||
log_dir = Path(_get("LOG_DIR", storage_root.parent / "logs")).resolve()
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_path = log_dir / str(_get("LOG_FILE", "app.log"))
|
||||
log_max_bytes = int(_get("LOG_MAX_BYTES", 5 * 1024 * 1024))
|
||||
log_backup_count = int(_get("LOG_BACKUP_COUNT", 3))
|
||||
ratelimit_default = str(_get("RATE_LIMIT_DEFAULT", "200 per minute"))
|
||||
ratelimit_storage_uri = str(_get("RATE_LIMIT_STORAGE_URI", "memory://"))
|
||||
|
||||
def _csv(value: str, default: list[str]) -> list[str]:
|
||||
if not value:
|
||||
return default
|
||||
parts = [segment.strip() for segment in value.split(",") if segment.strip()]
|
||||
return parts or default
|
||||
|
||||
cors_origins = _csv(str(_get("CORS_ORIGINS", "*")), ["*"])
|
||||
cors_methods = _csv(str(_get("CORS_METHODS", "GET,PUT,POST,DELETE,OPTIONS,HEAD")), ["GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"])
|
||||
cors_allow_headers = _csv(str(_get("CORS_ALLOW_HEADERS", "*")), ["*"])
|
||||
cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"])
|
||||
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
|
||||
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60)) # Default 60 seconds
|
||||
|
||||
# Encryption settings
|
||||
encryption_enabled = str(_get("ENCRYPTION_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
encryption_keys_dir = storage_root / ".myfsio.sys" / "keys"
|
||||
encryption_master_key_path = Path(_get("ENCRYPTION_MASTER_KEY_PATH", encryption_keys_dir / "master.key")).resolve()
|
||||
kms_enabled = str(_get("KMS_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
kms_keys_path = Path(_get("KMS_KEYS_PATH", encryption_keys_dir / "kms_keys.json")).resolve()
|
||||
default_encryption_algorithm = str(_get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"))
|
||||
|
||||
return cls(storage_root=storage_root,
|
||||
max_upload_size=max_upload_size,
|
||||
ui_page_size=ui_page_size,
|
||||
secret_key=secret_key,
|
||||
iam_config_path=iam_config_path,
|
||||
bucket_policy_path=bucket_policy_path,
|
||||
api_base_url=api_base_url,
|
||||
aws_region=aws_region,
|
||||
aws_service=aws_service,
|
||||
ui_enforce_bucket_policies=enforce_ui_policies,
|
||||
log_level=log_level,
|
||||
log_to_file=log_to_file,
|
||||
log_path=log_path,
|
||||
log_max_bytes=log_max_bytes,
|
||||
log_backup_count=log_backup_count,
|
||||
ratelimit_default=ratelimit_default,
|
||||
ratelimit_storage_uri=ratelimit_storage_uri,
|
||||
cors_origins=cors_origins,
|
||||
cors_methods=cors_methods,
|
||||
cors_allow_headers=cors_allow_headers,
|
||||
cors_expose_headers=cors_expose_headers,
|
||||
session_lifetime_days=session_lifetime_days,
|
||||
auth_max_attempts=auth_max_attempts,
|
||||
auth_lockout_minutes=auth_lockout_minutes,
|
||||
bulk_delete_max_keys=bulk_delete_max_keys,
|
||||
secret_ttl_seconds=secret_ttl_seconds,
|
||||
stream_chunk_size=stream_chunk_size,
|
||||
multipart_min_part_size=multipart_min_part_size,
|
||||
bucket_stats_cache_ttl=bucket_stats_cache_ttl,
|
||||
encryption_enabled=encryption_enabled,
|
||||
encryption_master_key_path=encryption_master_key_path,
|
||||
kms_enabled=kms_enabled,
|
||||
kms_keys_path=kms_keys_path,
|
||||
default_encryption_algorithm=default_encryption_algorithm)
|
||||
|
||||
def validate_and_report(self) -> list[str]:
|
||||
"""Validate configuration and return a list of warnings/issues.
|
||||
|
||||
Call this at startup to detect potential misconfigurations before
|
||||
the application fully commits to running.
|
||||
"""
|
||||
issues = []
|
||||
|
||||
# Check if storage_root is writable
|
||||
try:
|
||||
test_file = self.storage_root / ".write_test"
|
||||
test_file.touch()
|
||||
test_file.unlink()
|
||||
except (OSError, PermissionError) as e:
|
||||
issues.append(f"CRITICAL: STORAGE_ROOT '{self.storage_root}' is not writable: {e}")
|
||||
|
||||
# Check if storage_root looks like a temp directory
|
||||
storage_str = str(self.storage_root).lower()
|
||||
if "/tmp" in storage_str or "\\temp" in storage_str or "appdata\\local\\temp" in storage_str:
|
||||
issues.append(f"WARNING: STORAGE_ROOT '{self.storage_root}' appears to be a temporary directory. Data may be lost on reboot!")
|
||||
|
||||
# Check if IAM config path is under storage_root
|
||||
try:
|
||||
self.iam_config_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: IAM_CONFIG '{self.iam_config_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting IAM_CONFIG explicitly or ensuring paths are aligned.")
|
||||
|
||||
# Check if bucket policy path is under storage_root
|
||||
try:
|
||||
self.bucket_policy_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: BUCKET_POLICY_PATH '{self.bucket_policy_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting BUCKET_POLICY_PATH explicitly.")
|
||||
|
||||
# Check if log path is writable
|
||||
try:
|
||||
self.log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
test_log = self.log_path.parent / ".write_test"
|
||||
test_log.touch()
|
||||
test_log.unlink()
|
||||
except (OSError, PermissionError) as e:
|
||||
issues.append(f"WARNING: Log directory '{self.log_path.parent}' is not writable: {e}")
|
||||
|
||||
# Check log path location
|
||||
log_str = str(self.log_path).lower()
|
||||
if "/tmp" in log_str or "\\temp" in log_str or "appdata\\local\\temp" in log_str:
|
||||
issues.append(f"WARNING: LOG_DIR '{self.log_path.parent}' appears to be a temporary directory. Logs may be lost on reboot!")
|
||||
|
||||
# Check if encryption keys path is under storage_root (when encryption is enabled)
|
||||
if self.encryption_enabled:
|
||||
try:
|
||||
self.encryption_master_key_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: ENCRYPTION_MASTER_KEY_PATH '{self.encryption_master_key_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
|
||||
|
||||
# Check if KMS keys path is under storage_root (when KMS is enabled)
|
||||
if self.kms_enabled:
|
||||
try:
|
||||
self.kms_keys_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: KMS_KEYS_PATH '{self.kms_keys_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
|
||||
|
||||
# Warn about production settings
|
||||
if self.secret_key == "dev-secret-key":
|
||||
issues.append("WARNING: Using default SECRET_KEY. Set SECRET_KEY environment variable for production.")
|
||||
|
||||
if "*" in self.cors_origins:
|
||||
issues.append("INFO: CORS_ORIGINS is set to '*'. Consider restricting to specific domains in production.")
|
||||
|
||||
return issues
|
||||
|
||||
def print_startup_summary(self) -> None:
|
||||
"""Print a summary of the configuration at startup."""
|
||||
print("\n" + "=" * 60)
|
||||
print("MyFSIO Configuration Summary")
|
||||
print("=" * 60)
|
||||
print(f" STORAGE_ROOT: {self.storage_root}")
|
||||
print(f" IAM_CONFIG: {self.iam_config_path}")
|
||||
print(f" BUCKET_POLICY: {self.bucket_policy_path}")
|
||||
print(f" LOG_PATH: {self.log_path}")
|
||||
if self.api_base_url:
|
||||
print(f" API_BASE_URL: {self.api_base_url}")
|
||||
if self.encryption_enabled:
|
||||
print(f" ENCRYPTION: Enabled (Master key: {self.encryption_master_key_path})")
|
||||
if self.kms_enabled:
|
||||
print(f" KMS: Enabled (Keys: {self.kms_keys_path})")
|
||||
print("=" * 60)
|
||||
|
||||
issues = self.validate_and_report()
|
||||
if issues:
|
||||
print("\nConfiguration Issues Detected:")
|
||||
for issue in issues:
|
||||
print(f" • {issue}")
|
||||
print()
|
||||
else:
|
||||
print(" ✓ Configuration validated successfully\n")
|
||||
|
||||
def to_flask_config(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"STORAGE_ROOT": str(self.storage_root),
|
||||
"MAX_CONTENT_LENGTH": self.max_upload_size,
|
||||
"UI_PAGE_SIZE": self.ui_page_size,
|
||||
"SECRET_KEY": self.secret_key,
|
||||
"IAM_CONFIG": str(self.iam_config_path),
|
||||
"BUCKET_POLICY_PATH": str(self.bucket_policy_path),
|
||||
"API_BASE_URL": self.api_base_url,
|
||||
"AWS_REGION": self.aws_region,
|
||||
"AWS_SERVICE": self.aws_service,
|
||||
"UI_ENFORCE_BUCKET_POLICIES": self.ui_enforce_bucket_policies,
|
||||
"AUTH_MAX_ATTEMPTS": self.auth_max_attempts,
|
||||
"AUTH_LOCKOUT_MINUTES": self.auth_lockout_minutes,
|
||||
"BULK_DELETE_MAX_KEYS": self.bulk_delete_max_keys,
|
||||
"SECRET_TTL_SECONDS": self.secret_ttl_seconds,
|
||||
"STREAM_CHUNK_SIZE": self.stream_chunk_size,
|
||||
"MULTIPART_MIN_PART_SIZE": self.multipart_min_part_size,
|
||||
"BUCKET_STATS_CACHE_TTL": self.bucket_stats_cache_ttl,
|
||||
"LOG_LEVEL": self.log_level,
|
||||
"LOG_TO_FILE": self.log_to_file,
|
||||
"LOG_FILE": str(self.log_path),
|
||||
"LOG_MAX_BYTES": self.log_max_bytes,
|
||||
"LOG_BACKUP_COUNT": self.log_backup_count,
|
||||
"RATELIMIT_DEFAULT": self.ratelimit_default,
|
||||
"RATELIMIT_STORAGE_URI": self.ratelimit_storage_uri,
|
||||
"CORS_ORIGINS": self.cors_origins,
|
||||
"CORS_METHODS": self.cors_methods,
|
||||
"CORS_ALLOW_HEADERS": self.cors_allow_headers,
|
||||
"CORS_EXPOSE_HEADERS": self.cors_expose_headers,
|
||||
"SESSION_LIFETIME_DAYS": self.session_lifetime_days,
|
||||
"ENCRYPTION_ENABLED": self.encryption_enabled,
|
||||
"ENCRYPTION_MASTER_KEY_PATH": str(self.encryption_master_key_path),
|
||||
"KMS_ENABLED": self.kms_enabled,
|
||||
"KMS_KEYS_PATH": str(self.kms_keys_path),
|
||||
"DEFAULT_ENCRYPTION_ALGORITHM": self.default_encryption_algorithm,
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
"""Manage remote S3 connections."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from .config import AppConfig
|
||||
|
||||
|
||||
@dataclass
|
||||
class RemoteConnection:
|
||||
id: str
|
||||
name: str
|
||||
endpoint_url: str
|
||||
access_key: str
|
||||
secret_key: str
|
||||
region: str = "us-east-1"
|
||||
|
||||
|
||||
class ConnectionStore:
|
||||
def __init__(self, config_path: Path) -> None:
|
||||
self.config_path = config_path
|
||||
self._connections: Dict[str, RemoteConnection] = {}
|
||||
self.reload()
|
||||
|
||||
def reload(self) -> None:
|
||||
if not self.config_path.exists():
|
||||
self._connections = {}
|
||||
return
|
||||
|
||||
try:
|
||||
with open(self.config_path, "r") as f:
|
||||
data = json.load(f)
|
||||
for item in data:
|
||||
conn = RemoteConnection(**item)
|
||||
self._connections[conn.id] = conn
|
||||
except (OSError, json.JSONDecodeError):
|
||||
self._connections = {}
|
||||
|
||||
def save(self) -> None:
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = [asdict(conn) for conn in self._connections.values()]
|
||||
with open(self.config_path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
def list(self) -> List[RemoteConnection]:
|
||||
return list(self._connections.values())
|
||||
|
||||
def get(self, connection_id: str) -> Optional[RemoteConnection]:
|
||||
return self._connections.get(connection_id)
|
||||
|
||||
def add(self, connection: RemoteConnection) -> None:
|
||||
self._connections[connection.id] = connection
|
||||
self.save()
|
||||
|
||||
def delete(self, connection_id: str) -> None:
|
||||
if connection_id in self._connections:
|
||||
del self._connections[connection_id]
|
||||
self.save()
|
||||
@@ -1,276 +0,0 @@
|
||||
"""Encrypted storage layer that wraps ObjectStorage with encryption support."""
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
from pathlib import Path
|
||||
from typing import Any, BinaryIO, Dict, Optional
|
||||
|
||||
from .encryption import EncryptionManager, EncryptionMetadata, EncryptionError
|
||||
from .storage import ObjectStorage, ObjectMeta, StorageError
|
||||
|
||||
|
||||
class EncryptedObjectStorage:
|
||||
"""Object storage with transparent server-side encryption.
|
||||
|
||||
This class wraps ObjectStorage and provides transparent encryption/decryption
|
||||
of objects based on bucket encryption configuration.
|
||||
|
||||
Encryption is applied when:
|
||||
1. Bucket has default encryption configured (SSE-S3 or SSE-KMS)
|
||||
2. Client explicitly requests encryption via headers
|
||||
|
||||
The encryption metadata is stored alongside object metadata.
|
||||
"""
|
||||
|
||||
STREAMING_THRESHOLD = 64 * 1024
|
||||
|
||||
def __init__(self, storage: ObjectStorage, encryption_manager: EncryptionManager):
|
||||
self.storage = storage
|
||||
self.encryption = encryption_manager
|
||||
|
||||
@property
|
||||
def root(self) -> Path:
|
||||
return self.storage.root
|
||||
|
||||
def _should_encrypt(self, bucket_name: str,
|
||||
server_side_encryption: str | None = None) -> tuple[bool, str, str | None]:
|
||||
"""Determine if object should be encrypted.
|
||||
|
||||
Returns:
|
||||
Tuple of (should_encrypt, algorithm, kms_key_id)
|
||||
"""
|
||||
if not self.encryption.enabled:
|
||||
return False, "", None
|
||||
|
||||
if server_side_encryption:
|
||||
if server_side_encryption == "AES256":
|
||||
return True, "AES256", None
|
||||
elif server_side_encryption.startswith("aws:kms"):
|
||||
parts = server_side_encryption.split(":")
|
||||
kms_key_id = parts[2] if len(parts) > 2 else None
|
||||
return True, "aws:kms", kms_key_id
|
||||
|
||||
try:
|
||||
encryption_config = self.storage.get_bucket_encryption(bucket_name)
|
||||
if encryption_config and encryption_config.get("Rules"):
|
||||
rule = encryption_config["Rules"][0]
|
||||
# AWS format: Rules[].ApplyServerSideEncryptionByDefault.SSEAlgorithm
|
||||
sse_default = rule.get("ApplyServerSideEncryptionByDefault", {})
|
||||
algorithm = sse_default.get("SSEAlgorithm", "AES256")
|
||||
kms_key_id = sse_default.get("KMSMasterKeyID")
|
||||
return True, algorithm, kms_key_id
|
||||
except StorageError:
|
||||
pass
|
||||
|
||||
return False, "", None
|
||||
|
||||
def _is_encrypted(self, metadata: Dict[str, str]) -> bool:
|
||||
"""Check if object is encrypted based on its metadata."""
|
||||
return "x-amz-server-side-encryption" in metadata
|
||||
|
||||
def put_object(
|
||||
self,
|
||||
bucket_name: str,
|
||||
object_key: str,
|
||||
stream: BinaryIO,
|
||||
*,
|
||||
metadata: Optional[Dict[str, str]] = None,
|
||||
server_side_encryption: Optional[str] = None,
|
||||
kms_key_id: Optional[str] = None,
|
||||
) -> ObjectMeta:
|
||||
"""Store an object, optionally with encryption.
|
||||
|
||||
Args:
|
||||
bucket_name: Name of the bucket
|
||||
object_key: Key for the object
|
||||
stream: Binary stream of object data
|
||||
metadata: Optional user metadata
|
||||
server_side_encryption: Encryption algorithm ("AES256" or "aws:kms")
|
||||
kms_key_id: KMS key ID (for aws:kms encryption)
|
||||
|
||||
Returns:
|
||||
ObjectMeta with object information
|
||||
"""
|
||||
should_encrypt, algorithm, detected_kms_key = self._should_encrypt(
|
||||
bucket_name, server_side_encryption
|
||||
)
|
||||
|
||||
if kms_key_id is None:
|
||||
kms_key_id = detected_kms_key
|
||||
|
||||
if should_encrypt:
|
||||
data = stream.read()
|
||||
|
||||
try:
|
||||
ciphertext, enc_metadata = self.encryption.encrypt_object(
|
||||
data,
|
||||
algorithm=algorithm,
|
||||
kms_key_id=kms_key_id,
|
||||
context={"bucket": bucket_name, "key": object_key},
|
||||
)
|
||||
|
||||
combined_metadata = metadata.copy() if metadata else {}
|
||||
combined_metadata.update(enc_metadata.to_dict())
|
||||
|
||||
encrypted_stream = io.BytesIO(ciphertext)
|
||||
result = self.storage.put_object(
|
||||
bucket_name,
|
||||
object_key,
|
||||
encrypted_stream,
|
||||
metadata=combined_metadata,
|
||||
)
|
||||
|
||||
result.metadata = combined_metadata
|
||||
return result
|
||||
|
||||
except EncryptionError as exc:
|
||||
raise StorageError(f"Encryption failed: {exc}") from exc
|
||||
else:
|
||||
return self.storage.put_object(
|
||||
bucket_name,
|
||||
object_key,
|
||||
stream,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
def get_object_data(self, bucket_name: str, object_key: str) -> tuple[bytes, Dict[str, str]]:
|
||||
"""Get object data, decrypting if necessary.
|
||||
|
||||
Returns:
|
||||
Tuple of (data, metadata)
|
||||
"""
|
||||
path = self.storage.get_object_path(bucket_name, object_key)
|
||||
metadata = self.storage.get_object_metadata(bucket_name, object_key)
|
||||
|
||||
with path.open("rb") as f:
|
||||
data = f.read()
|
||||
|
||||
enc_metadata = EncryptionMetadata.from_dict(metadata)
|
||||
if enc_metadata:
|
||||
try:
|
||||
data = self.encryption.decrypt_object(
|
||||
data,
|
||||
enc_metadata,
|
||||
context={"bucket": bucket_name, "key": object_key},
|
||||
)
|
||||
except EncryptionError as exc:
|
||||
raise StorageError(f"Decryption failed: {exc}") from exc
|
||||
|
||||
clean_metadata = {
|
||||
k: v for k, v in metadata.items()
|
||||
if not k.startswith("x-amz-encryption")
|
||||
and k != "x-amz-encrypted-data-key"
|
||||
}
|
||||
|
||||
return data, clean_metadata
|
||||
|
||||
def get_object_stream(self, bucket_name: str, object_key: str) -> tuple[BinaryIO, Dict[str, str], int]:
|
||||
"""Get object as a stream, decrypting if necessary.
|
||||
|
||||
Returns:
|
||||
Tuple of (stream, metadata, original_size)
|
||||
"""
|
||||
data, metadata = self.get_object_data(bucket_name, object_key)
|
||||
return io.BytesIO(data), metadata, len(data)
|
||||
|
||||
def list_buckets(self):
|
||||
return self.storage.list_buckets()
|
||||
|
||||
def bucket_exists(self, bucket_name: str) -> bool:
|
||||
return self.storage.bucket_exists(bucket_name)
|
||||
|
||||
def create_bucket(self, bucket_name: str) -> None:
|
||||
return self.storage.create_bucket(bucket_name)
|
||||
|
||||
def delete_bucket(self, bucket_name: str) -> None:
|
||||
return self.storage.delete_bucket(bucket_name)
|
||||
|
||||
def bucket_stats(self, bucket_name: str, cache_ttl: int = 60):
|
||||
return self.storage.bucket_stats(bucket_name, cache_ttl)
|
||||
|
||||
def list_objects(self, bucket_name: str):
|
||||
return self.storage.list_objects(bucket_name)
|
||||
|
||||
def get_object_path(self, bucket_name: str, object_key: str):
|
||||
return self.storage.get_object_path(bucket_name, object_key)
|
||||
|
||||
def get_object_metadata(self, bucket_name: str, object_key: str):
|
||||
return self.storage.get_object_metadata(bucket_name, object_key)
|
||||
|
||||
def delete_object(self, bucket_name: str, object_key: str) -> None:
|
||||
return self.storage.delete_object(bucket_name, object_key)
|
||||
|
||||
def purge_object(self, bucket_name: str, object_key: str) -> None:
|
||||
return self.storage.purge_object(bucket_name, object_key)
|
||||
|
||||
def is_versioning_enabled(self, bucket_name: str) -> bool:
|
||||
return self.storage.is_versioning_enabled(bucket_name)
|
||||
|
||||
def set_bucket_versioning(self, bucket_name: str, enabled: bool) -> None:
|
||||
return self.storage.set_bucket_versioning(bucket_name, enabled)
|
||||
|
||||
def get_bucket_tags(self, bucket_name: str):
|
||||
return self.storage.get_bucket_tags(bucket_name)
|
||||
|
||||
def set_bucket_tags(self, bucket_name: str, tags):
|
||||
return self.storage.set_bucket_tags(bucket_name, tags)
|
||||
|
||||
def get_bucket_cors(self, bucket_name: str):
|
||||
return self.storage.get_bucket_cors(bucket_name)
|
||||
|
||||
def set_bucket_cors(self, bucket_name: str, rules):
|
||||
return self.storage.set_bucket_cors(bucket_name, rules)
|
||||
|
||||
def get_bucket_encryption(self, bucket_name: str):
|
||||
return self.storage.get_bucket_encryption(bucket_name)
|
||||
|
||||
def set_bucket_encryption(self, bucket_name: str, config_payload):
|
||||
return self.storage.set_bucket_encryption(bucket_name, config_payload)
|
||||
|
||||
def get_bucket_lifecycle(self, bucket_name: str):
|
||||
return self.storage.get_bucket_lifecycle(bucket_name)
|
||||
|
||||
def set_bucket_lifecycle(self, bucket_name: str, rules):
|
||||
return self.storage.set_bucket_lifecycle(bucket_name, rules)
|
||||
|
||||
def get_object_tags(self, bucket_name: str, object_key: str):
|
||||
return self.storage.get_object_tags(bucket_name, object_key)
|
||||
|
||||
def set_object_tags(self, bucket_name: str, object_key: str, tags):
|
||||
return self.storage.set_object_tags(bucket_name, object_key, tags)
|
||||
|
||||
def delete_object_tags(self, bucket_name: str, object_key: str):
|
||||
return self.storage.delete_object_tags(bucket_name, object_key)
|
||||
|
||||
def list_object_versions(self, bucket_name: str, object_key: str):
|
||||
return self.storage.list_object_versions(bucket_name, object_key)
|
||||
|
||||
def restore_object_version(self, bucket_name: str, object_key: str, version_id: str):
|
||||
return self.storage.restore_object_version(bucket_name, object_key, version_id)
|
||||
|
||||
def list_orphaned_objects(self, bucket_name: str):
|
||||
return self.storage.list_orphaned_objects(bucket_name)
|
||||
|
||||
def initiate_multipart_upload(self, bucket_name: str, object_key: str, *, metadata=None) -> str:
|
||||
return self.storage.initiate_multipart_upload(bucket_name, object_key, metadata=metadata)
|
||||
|
||||
def upload_multipart_part(self, bucket_name: str, upload_id: str, part_number: int, stream: BinaryIO) -> str:
|
||||
return self.storage.upload_multipart_part(bucket_name, upload_id, part_number, stream)
|
||||
|
||||
def complete_multipart_upload(self, bucket_name: str, upload_id: str, ordered_parts):
|
||||
return self.storage.complete_multipart_upload(bucket_name, upload_id, ordered_parts)
|
||||
|
||||
def abort_multipart_upload(self, bucket_name: str, upload_id: str) -> None:
|
||||
return self.storage.abort_multipart_upload(bucket_name, upload_id)
|
||||
|
||||
def list_multipart_parts(self, bucket_name: str, upload_id: str):
|
||||
return self.storage.list_multipart_parts(bucket_name, upload_id)
|
||||
|
||||
def get_bucket_quota(self, bucket_name: str):
|
||||
return self.storage.get_bucket_quota(bucket_name)
|
||||
|
||||
def set_bucket_quota(self, bucket_name: str, *, max_bytes=None, max_objects=None):
|
||||
return self.storage.set_bucket_quota(bucket_name, max_bytes=max_bytes, max_objects=max_objects)
|
||||
|
||||
def _compute_etag(self, path: Path) -> str:
|
||||
return self.storage._compute_etag(path)
|
||||
@@ -1,395 +0,0 @@
|
||||
"""Encryption providers for server-side and client-side encryption."""
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
import secrets
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, BinaryIO, Dict, Generator, Optional
|
||||
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
|
||||
class EncryptionError(Exception):
|
||||
"""Raised when encryption/decryption fails."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class EncryptionResult:
|
||||
"""Result of encrypting data."""
|
||||
ciphertext: bytes
|
||||
nonce: bytes
|
||||
key_id: str
|
||||
encrypted_data_key: bytes
|
||||
|
||||
|
||||
@dataclass
|
||||
class EncryptionMetadata:
|
||||
"""Metadata stored with encrypted objects."""
|
||||
algorithm: str
|
||||
key_id: str
|
||||
nonce: bytes
|
||||
encrypted_data_key: bytes
|
||||
|
||||
def to_dict(self) -> Dict[str, str]:
|
||||
return {
|
||||
"x-amz-server-side-encryption": self.algorithm,
|
||||
"x-amz-encryption-key-id": self.key_id,
|
||||
"x-amz-encryption-nonce": base64.b64encode(self.nonce).decode(),
|
||||
"x-amz-encrypted-data-key": base64.b64encode(self.encrypted_data_key).decode(),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, str]) -> Optional["EncryptionMetadata"]:
|
||||
algorithm = data.get("x-amz-server-side-encryption")
|
||||
if not algorithm:
|
||||
return None
|
||||
try:
|
||||
return cls(
|
||||
algorithm=algorithm,
|
||||
key_id=data.get("x-amz-encryption-key-id", "local"),
|
||||
nonce=base64.b64decode(data.get("x-amz-encryption-nonce", "")),
|
||||
encrypted_data_key=base64.b64decode(data.get("x-amz-encrypted-data-key", "")),
|
||||
)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
class EncryptionProvider:
|
||||
"""Base class for encryption providers."""
|
||||
|
||||
def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
|
||||
raise NotImplementedError
|
||||
|
||||
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
||||
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
||||
raise NotImplementedError
|
||||
|
||||
def generate_data_key(self) -> tuple[bytes, bytes]:
|
||||
"""Generate a data key and its encrypted form.
|
||||
|
||||
Returns:
|
||||
Tuple of (plaintext_key, encrypted_key)
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class LocalKeyEncryption(EncryptionProvider):
|
||||
"""SSE-S3 style encryption using a local master key.
|
||||
|
||||
Uses envelope encryption:
|
||||
1. Generate a unique data key for each object
|
||||
2. Encrypt the data with the data key (AES-256-GCM)
|
||||
3. Encrypt the data key with the master key
|
||||
4. Store the encrypted data key alongside the ciphertext
|
||||
"""
|
||||
|
||||
KEY_ID = "local"
|
||||
|
||||
def __init__(self, master_key_path: Path):
|
||||
self.master_key_path = master_key_path
|
||||
self._master_key: bytes | None = None
|
||||
|
||||
@property
|
||||
def master_key(self) -> bytes:
|
||||
if self._master_key is None:
|
||||
self._master_key = self._load_or_create_master_key()
|
||||
return self._master_key
|
||||
|
||||
def _load_or_create_master_key(self) -> bytes:
|
||||
"""Load master key from file or generate a new one."""
|
||||
if self.master_key_path.exists():
|
||||
try:
|
||||
return base64.b64decode(self.master_key_path.read_text().strip())
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to load master key: {exc}") from exc
|
||||
|
||||
key = secrets.token_bytes(32)
|
||||
try:
|
||||
self.master_key_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.master_key_path.write_text(base64.b64encode(key).decode())
|
||||
except OSError as exc:
|
||||
raise EncryptionError(f"Failed to save master key: {exc}") from exc
|
||||
return key
|
||||
|
||||
def _encrypt_data_key(self, data_key: bytes) -> bytes:
|
||||
"""Encrypt the data key with the master key."""
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
encrypted = aesgcm.encrypt(nonce, data_key, None)
|
||||
return nonce + encrypted
|
||||
|
||||
def _decrypt_data_key(self, encrypted_data_key: bytes) -> bytes:
|
||||
"""Decrypt the data key using the master key."""
|
||||
if len(encrypted_data_key) < 12 + 32 + 16: # nonce + key + tag
|
||||
raise EncryptionError("Invalid encrypted data key")
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
nonce = encrypted_data_key[:12]
|
||||
ciphertext = encrypted_data_key[12:]
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt data key: {exc}") from exc
|
||||
|
||||
def generate_data_key(self) -> tuple[bytes, bytes]:
|
||||
"""Generate a data key and its encrypted form."""
|
||||
plaintext_key = secrets.token_bytes(32)
|
||||
encrypted_key = self._encrypt_data_key(plaintext_key)
|
||||
return plaintext_key, encrypted_key
|
||||
|
||||
def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
|
||||
"""Encrypt data using envelope encryption."""
|
||||
data_key, encrypted_data_key = self.generate_data_key()
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext, None)
|
||||
|
||||
return EncryptionResult(
|
||||
ciphertext=ciphertext,
|
||||
nonce=nonce,
|
||||
key_id=self.KEY_ID,
|
||||
encrypted_data_key=encrypted_data_key,
|
||||
)
|
||||
|
||||
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
||||
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Decrypt data using envelope encryption."""
|
||||
# Decrypt the data key
|
||||
data_key = self._decrypt_data_key(encrypted_data_key)
|
||||
|
||||
# Decrypt the data
|
||||
aesgcm = AESGCM(data_key)
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt data: {exc}") from exc
|
||||
|
||||
|
||||
class StreamingEncryptor:
|
||||
"""Encrypts/decrypts data in streaming fashion for large files.
|
||||
|
||||
For large files, we encrypt in chunks. Each chunk is encrypted with the
|
||||
same data key but a unique nonce derived from the base nonce + chunk index.
|
||||
"""
|
||||
|
||||
CHUNK_SIZE = 64 * 1024
|
||||
HEADER_SIZE = 4
|
||||
|
||||
def __init__(self, provider: EncryptionProvider, chunk_size: int = CHUNK_SIZE):
|
||||
self.provider = provider
|
||||
self.chunk_size = chunk_size
|
||||
|
||||
def _derive_chunk_nonce(self, base_nonce: bytes, chunk_index: int) -> bytes:
|
||||
"""Derive a unique nonce for each chunk."""
|
||||
# XOR the base nonce with the chunk index
|
||||
nonce_int = int.from_bytes(base_nonce, "big")
|
||||
derived = nonce_int ^ chunk_index
|
||||
return derived.to_bytes(12, "big")
|
||||
|
||||
def encrypt_stream(self, stream: BinaryIO,
|
||||
context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
|
||||
"""Encrypt a stream and return encrypted stream + metadata."""
|
||||
|
||||
data_key, encrypted_data_key = self.provider.generate_data_key()
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
encrypted_chunks = []
|
||||
chunk_index = 0
|
||||
|
||||
while True:
|
||||
chunk = stream.read(self.chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
encrypted_chunk = aesgcm.encrypt(chunk_nonce, chunk, None)
|
||||
|
||||
size_prefix = len(encrypted_chunk).to_bytes(self.HEADER_SIZE, "big")
|
||||
encrypted_chunks.append(size_prefix + encrypted_chunk)
|
||||
chunk_index += 1
|
||||
|
||||
header = chunk_index.to_bytes(4, "big")
|
||||
encrypted_data = header + b"".join(encrypted_chunks)
|
||||
|
||||
metadata = EncryptionMetadata(
|
||||
algorithm="AES256",
|
||||
key_id=self.provider.KEY_ID if hasattr(self.provider, "KEY_ID") else "local",
|
||||
nonce=base_nonce,
|
||||
encrypted_data_key=encrypted_data_key,
|
||||
)
|
||||
|
||||
return io.BytesIO(encrypted_data), metadata
|
||||
|
||||
def decrypt_stream(self, stream: BinaryIO, metadata: EncryptionMetadata) -> BinaryIO:
|
||||
"""Decrypt a stream using the provided metadata."""
|
||||
if isinstance(self.provider, LocalKeyEncryption):
|
||||
data_key = self.provider._decrypt_data_key(metadata.encrypted_data_key)
|
||||
else:
|
||||
raise EncryptionError("Unsupported provider for streaming decryption")
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
base_nonce = metadata.nonce
|
||||
|
||||
chunk_count_bytes = stream.read(4)
|
||||
if len(chunk_count_bytes) < 4:
|
||||
raise EncryptionError("Invalid encrypted stream: missing header")
|
||||
chunk_count = int.from_bytes(chunk_count_bytes, "big")
|
||||
|
||||
decrypted_chunks = []
|
||||
for chunk_index in range(chunk_count):
|
||||
size_bytes = stream.read(self.HEADER_SIZE)
|
||||
if len(size_bytes) < self.HEADER_SIZE:
|
||||
raise EncryptionError(f"Invalid encrypted stream: truncated at chunk {chunk_index}")
|
||||
chunk_size = int.from_bytes(size_bytes, "big")
|
||||
|
||||
encrypted_chunk = stream.read(chunk_size)
|
||||
if len(encrypted_chunk) < chunk_size:
|
||||
raise EncryptionError(f"Invalid encrypted stream: incomplete chunk {chunk_index}")
|
||||
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
try:
|
||||
decrypted_chunk = aesgcm.decrypt(chunk_nonce, encrypted_chunk, None)
|
||||
decrypted_chunks.append(decrypted_chunk)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt chunk {chunk_index}: {exc}") from exc
|
||||
|
||||
return io.BytesIO(b"".join(decrypted_chunks))
|
||||
|
||||
|
||||
class EncryptionManager:
|
||||
"""Manages encryption providers and operations."""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
self.config = config
|
||||
self._local_provider: LocalKeyEncryption | None = None
|
||||
self._kms_provider: Any = None # Set by KMS module
|
||||
self._streaming_encryptor: StreamingEncryptor | None = None
|
||||
|
||||
@property
|
||||
def enabled(self) -> bool:
|
||||
return self.config.get("encryption_enabled", False)
|
||||
|
||||
@property
|
||||
def default_algorithm(self) -> str:
|
||||
return self.config.get("default_encryption_algorithm", "AES256")
|
||||
|
||||
def get_local_provider(self) -> LocalKeyEncryption:
|
||||
if self._local_provider is None:
|
||||
key_path = Path(self.config.get("encryption_master_key_path", "data/.myfsio.sys/keys/master.key"))
|
||||
self._local_provider = LocalKeyEncryption(key_path)
|
||||
return self._local_provider
|
||||
|
||||
def set_kms_provider(self, kms_provider: Any) -> None:
|
||||
"""Set the KMS provider (injected from kms module)."""
|
||||
self._kms_provider = kms_provider
|
||||
|
||||
def get_provider(self, algorithm: str, kms_key_id: str | None = None) -> EncryptionProvider:
|
||||
"""Get the appropriate encryption provider for the algorithm."""
|
||||
if algorithm == "AES256":
|
||||
return self.get_local_provider()
|
||||
elif algorithm == "aws:kms":
|
||||
if self._kms_provider is None:
|
||||
raise EncryptionError("KMS is not configured")
|
||||
return self._kms_provider.get_provider(kms_key_id)
|
||||
else:
|
||||
raise EncryptionError(f"Unsupported encryption algorithm: {algorithm}")
|
||||
|
||||
def get_streaming_encryptor(self) -> StreamingEncryptor:
|
||||
if self._streaming_encryptor is None:
|
||||
self._streaming_encryptor = StreamingEncryptor(self.get_local_provider())
|
||||
return self._streaming_encryptor
|
||||
|
||||
def encrypt_object(self, data: bytes, algorithm: str = "AES256",
|
||||
kms_key_id: str | None = None,
|
||||
context: Dict[str, str] | None = None) -> tuple[bytes, EncryptionMetadata]:
|
||||
"""Encrypt object data."""
|
||||
provider = self.get_provider(algorithm, kms_key_id)
|
||||
result = provider.encrypt(data, context)
|
||||
|
||||
metadata = EncryptionMetadata(
|
||||
algorithm=algorithm,
|
||||
key_id=result.key_id,
|
||||
nonce=result.nonce,
|
||||
encrypted_data_key=result.encrypted_data_key,
|
||||
)
|
||||
|
||||
return result.ciphertext, metadata
|
||||
|
||||
def decrypt_object(self, ciphertext: bytes, metadata: EncryptionMetadata,
|
||||
context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Decrypt object data."""
|
||||
provider = self.get_provider(metadata.algorithm, metadata.key_id)
|
||||
return provider.decrypt(
|
||||
ciphertext,
|
||||
metadata.nonce,
|
||||
metadata.encrypted_data_key,
|
||||
metadata.key_id,
|
||||
context,
|
||||
)
|
||||
|
||||
def encrypt_stream(self, stream: BinaryIO, algorithm: str = "AES256",
|
||||
context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
|
||||
"""Encrypt a stream for large files."""
|
||||
encryptor = self.get_streaming_encryptor()
|
||||
return encryptor.encrypt_stream(stream, context)
|
||||
|
||||
def decrypt_stream(self, stream: BinaryIO, metadata: EncryptionMetadata) -> BinaryIO:
|
||||
"""Decrypt a stream."""
|
||||
encryptor = self.get_streaming_encryptor()
|
||||
return encryptor.decrypt_stream(stream, metadata)
|
||||
|
||||
|
||||
class ClientEncryptionHelper:
|
||||
"""Helpers for client-side encryption.
|
||||
|
||||
Client-side encryption is performed by the client, but this helper
|
||||
provides key generation and materials for clients that need them.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def generate_client_key() -> Dict[str, str]:
|
||||
"""Generate a new client encryption key."""
|
||||
from datetime import datetime, timezone
|
||||
key = secrets.token_bytes(32)
|
||||
return {
|
||||
"key": base64.b64encode(key).decode(),
|
||||
"algorithm": "AES-256-GCM",
|
||||
"created_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def encrypt_with_key(plaintext: bytes, key_b64: str) -> Dict[str, str]:
|
||||
"""Encrypt data with a client-provided key."""
|
||||
key = base64.b64decode(key_b64)
|
||||
if len(key) != 32:
|
||||
raise EncryptionError("Key must be 256 bits (32 bytes)")
|
||||
|
||||
aesgcm = AESGCM(key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext, None)
|
||||
|
||||
return {
|
||||
"ciphertext": base64.b64encode(ciphertext).decode(),
|
||||
"nonce": base64.b64encode(nonce).decode(),
|
||||
"algorithm": "AES-256-GCM",
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def decrypt_with_key(ciphertext_b64: str, nonce_b64: str, key_b64: str) -> bytes:
|
||||
"""Decrypt data with a client-provided key."""
|
||||
key = base64.b64decode(key_b64)
|
||||
nonce = base64.b64decode(nonce_b64)
|
||||
ciphertext = base64.b64decode(ciphertext_b64)
|
||||
|
||||
if len(key) != 32:
|
||||
raise EncryptionError("Key must be 256 bits (32 bytes)")
|
||||
|
||||
aesgcm = AESGCM(key)
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Decryption failed: {exc}") from exc
|
||||
187
app/errors.py
187
app/errors.py
@@ -1,187 +0,0 @@
|
||||
"""Standardized error handling for API and UI responses."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, Dict, Any
|
||||
from xml.etree.ElementTree import Element, SubElement, tostring
|
||||
|
||||
from flask import Response, jsonify, request, flash, redirect, url_for, g
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AppError(Exception):
|
||||
"""Base application error with multi-format response support."""
|
||||
code: str
|
||||
message: str
|
||||
status_code: int = 500
|
||||
details: Optional[Dict[str, Any]] = field(default=None)
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__(self.message)
|
||||
|
||||
def to_xml_response(self) -> Response:
|
||||
"""Convert to S3 API XML error response."""
|
||||
error = Element("Error")
|
||||
SubElement(error, "Code").text = self.code
|
||||
SubElement(error, "Message").text = self.message
|
||||
request_id = getattr(g, 'request_id', None) if g else None
|
||||
SubElement(error, "RequestId").text = request_id or "unknown"
|
||||
xml_bytes = tostring(error, encoding="utf-8")
|
||||
return Response(xml_bytes, status=self.status_code, mimetype="application/xml")
|
||||
|
||||
def to_json_response(self) -> tuple[Response, int]:
|
||||
"""Convert to JSON error response for UI AJAX calls."""
|
||||
payload: Dict[str, Any] = {
|
||||
"success": False,
|
||||
"error": {
|
||||
"code": self.code,
|
||||
"message": self.message
|
||||
}
|
||||
}
|
||||
if self.details:
|
||||
payload["error"]["details"] = self.details
|
||||
return jsonify(payload), self.status_code
|
||||
|
||||
def to_flash_message(self) -> str:
|
||||
"""Convert to user-friendly flash message."""
|
||||
return self.message
|
||||
|
||||
|
||||
@dataclass
|
||||
class BucketNotFoundError(AppError):
|
||||
"""Bucket does not exist."""
|
||||
code: str = "NoSuchBucket"
|
||||
message: str = "The specified bucket does not exist"
|
||||
status_code: int = 404
|
||||
|
||||
|
||||
@dataclass
|
||||
class BucketAlreadyExistsError(AppError):
|
||||
"""Bucket already exists."""
|
||||
code: str = "BucketAlreadyExists"
|
||||
message: str = "The requested bucket name is not available"
|
||||
status_code: int = 409
|
||||
|
||||
|
||||
@dataclass
|
||||
class BucketNotEmptyError(AppError):
|
||||
"""Bucket is not empty."""
|
||||
code: str = "BucketNotEmpty"
|
||||
message: str = "The bucket you tried to delete is not empty"
|
||||
status_code: int = 409
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObjectNotFoundError(AppError):
|
||||
"""Object does not exist."""
|
||||
code: str = "NoSuchKey"
|
||||
message: str = "The specified key does not exist"
|
||||
status_code: int = 404
|
||||
|
||||
|
||||
@dataclass
|
||||
class InvalidObjectKeyError(AppError):
|
||||
"""Invalid object key."""
|
||||
code: str = "InvalidKey"
|
||||
message: str = "The specified key is not valid"
|
||||
status_code: int = 400
|
||||
|
||||
|
||||
@dataclass
|
||||
class AccessDeniedError(AppError):
|
||||
"""Access denied."""
|
||||
code: str = "AccessDenied"
|
||||
message: str = "Access Denied"
|
||||
status_code: int = 403
|
||||
|
||||
|
||||
@dataclass
|
||||
class InvalidCredentialsError(AppError):
|
||||
"""Invalid credentials."""
|
||||
code: str = "InvalidAccessKeyId"
|
||||
message: str = "The access key ID you provided does not exist"
|
||||
status_code: int = 403
|
||||
|
||||
@dataclass
|
||||
class MalformedRequestError(AppError):
|
||||
"""Malformed request."""
|
||||
code: str = "MalformedXML"
|
||||
message: str = "The XML you provided was not well-formed"
|
||||
status_code: int = 400
|
||||
|
||||
|
||||
@dataclass
|
||||
class InvalidArgumentError(AppError):
|
||||
"""Invalid argument."""
|
||||
code: str = "InvalidArgument"
|
||||
message: str = "Invalid argument"
|
||||
status_code: int = 400
|
||||
|
||||
|
||||
@dataclass
|
||||
class EntityTooLargeError(AppError):
|
||||
"""Entity too large."""
|
||||
code: str = "EntityTooLarge"
|
||||
message: str = "Your proposed upload exceeds the maximum allowed size"
|
||||
status_code: int = 413
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuotaExceededAppError(AppError):
|
||||
"""Bucket quota exceeded."""
|
||||
code: str = "QuotaExceeded"
|
||||
message: str = "The bucket quota has been exceeded"
|
||||
status_code: int = 403
|
||||
quota: Optional[Dict[str, Any]] = None
|
||||
usage: Optional[Dict[str, int]] = None
|
||||
|
||||
def __post_init__(self):
|
||||
if self.quota or self.usage:
|
||||
self.details = {}
|
||||
if self.quota:
|
||||
self.details["quota"] = self.quota
|
||||
if self.usage:
|
||||
self.details["usage"] = self.usage
|
||||
super().__post_init__()
|
||||
|
||||
|
||||
def handle_app_error(error: AppError) -> Response:
|
||||
"""Handle application errors with appropriate response format."""
|
||||
log_extra = {"error_code": error.code}
|
||||
if error.details:
|
||||
log_extra["details"] = error.details
|
||||
|
||||
logger.error(f"{error.code}: {error.message}", extra=log_extra)
|
||||
|
||||
if request.path.startswith('/ui'):
|
||||
wants_json = (
|
||||
request.is_json or
|
||||
request.headers.get('X-Requested-With') == 'XMLHttpRequest' or
|
||||
'application/json' in request.accept_mimetypes.values()
|
||||
)
|
||||
if wants_json:
|
||||
return error.to_json_response()
|
||||
flash(error.to_flash_message(), 'danger')
|
||||
referrer = request.referrer
|
||||
if referrer and request.host in referrer:
|
||||
return redirect(referrer)
|
||||
return redirect(url_for('ui.buckets_overview'))
|
||||
else:
|
||||
return error.to_xml_response()
|
||||
|
||||
|
||||
def register_error_handlers(app):
|
||||
"""Register error handlers with a Flask app."""
|
||||
app.register_error_handler(AppError, handle_app_error)
|
||||
|
||||
for error_class in [
|
||||
BucketNotFoundError, BucketAlreadyExistsError, BucketNotEmptyError,
|
||||
ObjectNotFoundError, InvalidObjectKeyError,
|
||||
AccessDeniedError, InvalidCredentialsError,
|
||||
MalformedRequestError, InvalidArgumentError, EntityTooLargeError,
|
||||
QuotaExceededAppError,
|
||||
]:
|
||||
app.register_error_handler(error_class, handle_app_error)
|
||||
@@ -1,17 +0,0 @@
|
||||
"""Application-wide extension instances."""
|
||||
from flask import g
|
||||
from flask_limiter import Limiter
|
||||
from flask_limiter.util import get_remote_address
|
||||
from flask_wtf import CSRFProtect
|
||||
|
||||
def get_rate_limit_key():
|
||||
"""Generate rate limit key based on authenticated user."""
|
||||
if hasattr(g, 'principal') and g.principal:
|
||||
return g.principal.access_key
|
||||
return get_remote_address()
|
||||
|
||||
# Shared rate limiter instance; configured in app factory.
|
||||
limiter = Limiter(key_func=get_rate_limit_key)
|
||||
|
||||
# Global CSRF protection for UI routes.
|
||||
csrf = CSRFProtect()
|
||||
456
app/iam.py
456
app/iam.py
@@ -1,456 +0,0 @@
|
||||
"""Lightweight IAM-style user and policy management."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import math
|
||||
import secrets
|
||||
from collections import deque
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Any, Deque, Dict, Iterable, List, Optional, Sequence, Set
|
||||
|
||||
|
||||
class IamError(RuntimeError):
|
||||
"""Raised when authentication or authorization fails."""
|
||||
|
||||
|
||||
S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication"}
|
||||
IAM_ACTIONS = {
|
||||
"iam:list_users",
|
||||
"iam:create_user",
|
||||
"iam:delete_user",
|
||||
"iam:rotate_key",
|
||||
"iam:update_policy",
|
||||
}
|
||||
ALLOWED_ACTIONS = (S3_ACTIONS | IAM_ACTIONS) | {"iam:*"}
|
||||
|
||||
ACTION_ALIASES = {
|
||||
# List actions
|
||||
"list": "list",
|
||||
"s3:listbucket": "list",
|
||||
"s3:listallmybuckets": "list",
|
||||
"s3:listbucketversions": "list",
|
||||
"s3:listmultipartuploads": "list",
|
||||
"s3:listparts": "list",
|
||||
# Read actions
|
||||
"read": "read",
|
||||
"s3:getobject": "read",
|
||||
"s3:getobjectversion": "read",
|
||||
"s3:getobjecttagging": "read",
|
||||
"s3:getobjectversiontagging": "read",
|
||||
"s3:getobjectacl": "read",
|
||||
"s3:getbucketversioning": "read",
|
||||
"s3:headobject": "read",
|
||||
"s3:headbucket": "read",
|
||||
# Write actions
|
||||
"write": "write",
|
||||
"s3:putobject": "write",
|
||||
"s3:createbucket": "write",
|
||||
"s3:putobjecttagging": "write",
|
||||
"s3:putbucketversioning": "write",
|
||||
"s3:createmultipartupload": "write",
|
||||
"s3:uploadpart": "write",
|
||||
"s3:completemultipartupload": "write",
|
||||
"s3:abortmultipartupload": "write",
|
||||
"s3:copyobject": "write",
|
||||
# Delete actions
|
||||
"delete": "delete",
|
||||
"s3:deleteobject": "delete",
|
||||
"s3:deleteobjectversion": "delete",
|
||||
"s3:deletebucket": "delete",
|
||||
"s3:deleteobjecttagging": "delete",
|
||||
# Share actions (ACL)
|
||||
"share": "share",
|
||||
"s3:putobjectacl": "share",
|
||||
"s3:putbucketacl": "share",
|
||||
"s3:getbucketacl": "share",
|
||||
# Policy actions
|
||||
"policy": "policy",
|
||||
"s3:putbucketpolicy": "policy",
|
||||
"s3:getbucketpolicy": "policy",
|
||||
"s3:deletebucketpolicy": "policy",
|
||||
# Replication actions
|
||||
"replication": "replication",
|
||||
"s3:getreplicationconfiguration": "replication",
|
||||
"s3:putreplicationconfiguration": "replication",
|
||||
"s3:deletereplicationconfiguration": "replication",
|
||||
"s3:replicateobject": "replication",
|
||||
"s3:replicatetags": "replication",
|
||||
"s3:replicatedelete": "replication",
|
||||
# IAM actions
|
||||
"iam:listusers": "iam:list_users",
|
||||
"iam:createuser": "iam:create_user",
|
||||
"iam:deleteuser": "iam:delete_user",
|
||||
"iam:rotateaccesskey": "iam:rotate_key",
|
||||
"iam:putuserpolicy": "iam:update_policy",
|
||||
"iam:*": "iam:*",
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class Policy:
|
||||
bucket: str
|
||||
actions: Set[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Principal:
|
||||
access_key: str
|
||||
display_name: str
|
||||
policies: List[Policy]
|
||||
|
||||
|
||||
class IamService:
|
||||
"""Loads IAM configuration, manages users, and evaluates policies."""
|
||||
|
||||
def __init__(self, config_path: Path, auth_max_attempts: int = 5, auth_lockout_minutes: int = 15) -> None:
|
||||
self.config_path = Path(config_path)
|
||||
self.auth_max_attempts = auth_max_attempts
|
||||
self.auth_lockout_window = timedelta(minutes=auth_lockout_minutes)
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if not self.config_path.exists():
|
||||
self._write_default()
|
||||
self._users: Dict[str, Dict[str, Any]] = {}
|
||||
self._raw_config: Dict[str, Any] = {}
|
||||
self._failed_attempts: Dict[str, Deque[datetime]] = {}
|
||||
self._last_load_time = 0.0
|
||||
self._load()
|
||||
|
||||
def _maybe_reload(self) -> None:
|
||||
"""Reload configuration if the file has changed on disk."""
|
||||
try:
|
||||
if self.config_path.stat().st_mtime > self._last_load_time:
|
||||
self._load()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# ---------------------- authz helpers ----------------------
|
||||
def authenticate(self, access_key: str, secret_key: str) -> Principal:
|
||||
self._maybe_reload()
|
||||
access_key = (access_key or "").strip()
|
||||
secret_key = (secret_key or "").strip()
|
||||
if not access_key or not secret_key:
|
||||
raise IamError("Missing access credentials")
|
||||
if self._is_locked_out(access_key):
|
||||
seconds = self._seconds_until_unlock(access_key)
|
||||
raise IamError(
|
||||
f"Access temporarily locked. Try again in {seconds} seconds."
|
||||
)
|
||||
record = self._users.get(access_key)
|
||||
if not record or record["secret_key"] != secret_key:
|
||||
self._record_failed_attempt(access_key)
|
||||
raise IamError("Invalid credentials")
|
||||
self._clear_failed_attempts(access_key)
|
||||
return self._build_principal(access_key, record)
|
||||
|
||||
def _record_failed_attempt(self, access_key: str) -> None:
|
||||
if not access_key:
|
||||
return
|
||||
attempts = self._failed_attempts.setdefault(access_key, deque())
|
||||
self._prune_attempts(attempts)
|
||||
attempts.append(datetime.now())
|
||||
|
||||
def _clear_failed_attempts(self, access_key: str) -> None:
|
||||
if not access_key:
|
||||
return
|
||||
self._failed_attempts.pop(access_key, None)
|
||||
|
||||
def _prune_attempts(self, attempts: Deque[datetime]) -> None:
|
||||
cutoff = datetime.now() - self.auth_lockout_window
|
||||
while attempts and attempts[0] < cutoff:
|
||||
attempts.popleft()
|
||||
|
||||
def _is_locked_out(self, access_key: str) -> bool:
|
||||
if not access_key:
|
||||
return False
|
||||
attempts = self._failed_attempts.get(access_key)
|
||||
if not attempts:
|
||||
return False
|
||||
self._prune_attempts(attempts)
|
||||
return len(attempts) >= self.auth_max_attempts
|
||||
|
||||
def _seconds_until_unlock(self, access_key: str) -> int:
|
||||
attempts = self._failed_attempts.get(access_key)
|
||||
if not attempts:
|
||||
return 0
|
||||
self._prune_attempts(attempts)
|
||||
if len(attempts) < self.auth_max_attempts:
|
||||
return 0
|
||||
oldest = attempts[0]
|
||||
elapsed = (datetime.now() - oldest).total_seconds()
|
||||
return int(max(0, self.auth_lockout_window.total_seconds() - elapsed))
|
||||
|
||||
def principal_for_key(self, access_key: str) -> Principal:
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if not record:
|
||||
raise IamError("Unknown access key")
|
||||
return self._build_principal(access_key, record)
|
||||
|
||||
def secret_for_key(self, access_key: str) -> str:
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if not record:
|
||||
raise IamError("Unknown access key")
|
||||
return record["secret_key"]
|
||||
|
||||
def authorize(self, principal: Principal, bucket_name: str | None, action: str) -> None:
|
||||
action = self._normalize_action(action)
|
||||
if action not in ALLOWED_ACTIONS:
|
||||
raise IamError(f"Unknown action '{action}'")
|
||||
bucket_name = bucket_name or "*"
|
||||
normalized = bucket_name.lower() if bucket_name != "*" else bucket_name
|
||||
if not self._is_allowed(principal, normalized, action):
|
||||
raise IamError(f"Access denied for action '{action}' on bucket '{bucket_name}'")
|
||||
|
||||
def buckets_for_principal(self, principal: Principal, buckets: Iterable[str]) -> List[str]:
|
||||
return [bucket for bucket in buckets if self._is_allowed(principal, bucket, "list")]
|
||||
|
||||
def _is_allowed(self, principal: Principal, bucket_name: str, action: str) -> bool:
|
||||
bucket_name = bucket_name.lower()
|
||||
for policy in principal.policies:
|
||||
if policy.bucket not in {"*", bucket_name}:
|
||||
continue
|
||||
if "*" in policy.actions or action in policy.actions:
|
||||
return True
|
||||
if "iam:*" in policy.actions and action.startswith("iam:"):
|
||||
return True
|
||||
return False
|
||||
|
||||
# ---------------------- management helpers ----------------------
|
||||
def list_users(self) -> List[Dict[str, Any]]:
|
||||
listing: List[Dict[str, Any]] = []
|
||||
for access_key, record in self._users.items():
|
||||
listing.append(
|
||||
{
|
||||
"access_key": access_key,
|
||||
"display_name": record["display_name"],
|
||||
"policies": [
|
||||
{"bucket": policy.bucket, "actions": sorted(policy.actions)}
|
||||
for policy in record["policies"]
|
||||
],
|
||||
}
|
||||
)
|
||||
return listing
|
||||
|
||||
def create_user(
|
||||
self,
|
||||
*,
|
||||
display_name: str,
|
||||
policies: Optional[Sequence[Dict[str, Any]]] = None,
|
||||
access_key: str | None = None,
|
||||
secret_key: str | None = None,
|
||||
) -> Dict[str, str]:
|
||||
access_key = (access_key or self._generate_access_key()).strip()
|
||||
if not access_key:
|
||||
raise IamError("Access key cannot be empty")
|
||||
if access_key in self._users:
|
||||
raise IamError("Access key already exists")
|
||||
secret_key = secret_key or self._generate_secret_key()
|
||||
sanitized_policies = self._prepare_policy_payload(policies)
|
||||
record = {
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"display_name": display_name or access_key,
|
||||
"policies": sanitized_policies,
|
||||
}
|
||||
self._raw_config.setdefault("users", []).append(record)
|
||||
self._save()
|
||||
self._load()
|
||||
return {"access_key": access_key, "secret_key": secret_key}
|
||||
|
||||
def rotate_secret(self, access_key: str) -> str:
|
||||
user = self._get_raw_user(access_key)
|
||||
new_secret = self._generate_secret_key()
|
||||
user["secret_key"] = new_secret
|
||||
self._save()
|
||||
self._load()
|
||||
return new_secret
|
||||
|
||||
def update_user(self, access_key: str, display_name: str) -> None:
|
||||
user = self._get_raw_user(access_key)
|
||||
user["display_name"] = display_name
|
||||
self._save()
|
||||
self._load()
|
||||
|
||||
def delete_user(self, access_key: str) -> None:
|
||||
users = self._raw_config.get("users", [])
|
||||
if len(users) <= 1:
|
||||
raise IamError("Cannot delete the only user")
|
||||
remaining = [user for user in users if user["access_key"] != access_key]
|
||||
if len(remaining) == len(users):
|
||||
raise IamError("User not found")
|
||||
self._raw_config["users"] = remaining
|
||||
self._save()
|
||||
self._load()
|
||||
|
||||
def update_user_policies(self, access_key: str, policies: Sequence[Dict[str, Any]]) -> None:
|
||||
user = self._get_raw_user(access_key)
|
||||
user["policies"] = self._prepare_policy_payload(policies)
|
||||
self._save()
|
||||
self._load()
|
||||
|
||||
# ---------------------- config helpers ----------------------
|
||||
def _load(self) -> None:
|
||||
try:
|
||||
self._last_load_time = self.config_path.stat().st_mtime
|
||||
content = self.config_path.read_text(encoding='utf-8')
|
||||
raw = json.loads(content)
|
||||
except FileNotFoundError:
|
||||
raise IamError(f"IAM config not found: {self.config_path}")
|
||||
except json.JSONDecodeError as e:
|
||||
raise IamError(f"Corrupted IAM config (invalid JSON): {e}")
|
||||
except PermissionError as e:
|
||||
raise IamError(f"Cannot read IAM config (permission denied): {e}")
|
||||
except (OSError, ValueError) as e:
|
||||
raise IamError(f"Failed to load IAM config: {e}")
|
||||
|
||||
users: Dict[str, Dict[str, Any]] = {}
|
||||
for user in raw.get("users", []):
|
||||
policies = self._build_policy_objects(user.get("policies", []))
|
||||
users[user["access_key"]] = {
|
||||
"secret_key": user["secret_key"],
|
||||
"display_name": user.get("display_name", user["access_key"]),
|
||||
"policies": policies,
|
||||
}
|
||||
if not users:
|
||||
raise IamError("IAM configuration contains no users")
|
||||
self._users = users
|
||||
self._raw_config = {
|
||||
"users": [
|
||||
{
|
||||
"access_key": entry["access_key"],
|
||||
"secret_key": entry["secret_key"],
|
||||
"display_name": entry.get("display_name", entry["access_key"]),
|
||||
"policies": entry.get("policies", []),
|
||||
}
|
||||
for entry in raw.get("users", [])
|
||||
]
|
||||
}
|
||||
|
||||
def _save(self) -> None:
|
||||
try:
|
||||
temp_path = self.config_path.with_suffix('.json.tmp')
|
||||
temp_path.write_text(json.dumps(self._raw_config, indent=2), encoding='utf-8')
|
||||
temp_path.replace(self.config_path)
|
||||
except (OSError, PermissionError) as e:
|
||||
raise IamError(f"Cannot save IAM config: {e}")
|
||||
|
||||
# ---------------------- insight helpers ----------------------
|
||||
def config_summary(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"path": str(self.config_path),
|
||||
"user_count": len(self._users),
|
||||
"allowed_actions": sorted(ALLOWED_ACTIONS),
|
||||
}
|
||||
|
||||
def export_config(self, mask_secrets: bool = True) -> Dict[str, Any]:
|
||||
payload: Dict[str, Any] = {"users": []}
|
||||
for user in self._raw_config.get("users", []):
|
||||
record = dict(user)
|
||||
if mask_secrets and "secret_key" in record:
|
||||
record["secret_key"] = "••••••••••"
|
||||
payload["users"].append(record)
|
||||
return payload
|
||||
|
||||
def _build_policy_objects(self, policies: Sequence[Dict[str, Any]]) -> List[Policy]:
|
||||
entries: List[Policy] = []
|
||||
for policy in policies:
|
||||
bucket = str(policy.get("bucket", "*")).lower()
|
||||
raw_actions = policy.get("actions", [])
|
||||
if isinstance(raw_actions, str):
|
||||
raw_actions = [raw_actions]
|
||||
action_set: Set[str] = set()
|
||||
for action in raw_actions:
|
||||
canonical = self._normalize_action(action)
|
||||
if canonical == "*":
|
||||
action_set = set(ALLOWED_ACTIONS)
|
||||
break
|
||||
if canonical:
|
||||
action_set.add(canonical)
|
||||
if action_set:
|
||||
entries.append(Policy(bucket=bucket, actions=action_set))
|
||||
return entries
|
||||
|
||||
def _prepare_policy_payload(self, policies: Optional[Sequence[Dict[str, Any]]]) -> List[Dict[str, Any]]:
|
||||
if not policies:
|
||||
policies = (
|
||||
{
|
||||
"bucket": "*",
|
||||
"actions": ["list", "read", "write", "delete", "share", "policy"],
|
||||
},
|
||||
)
|
||||
sanitized: List[Dict[str, Any]] = []
|
||||
for policy in policies:
|
||||
bucket = str(policy.get("bucket", "*")).lower()
|
||||
raw_actions = policy.get("actions", [])
|
||||
if isinstance(raw_actions, str):
|
||||
raw_actions = [raw_actions]
|
||||
action_set: Set[str] = set()
|
||||
for action in raw_actions:
|
||||
canonical = self._normalize_action(action)
|
||||
if canonical == "*":
|
||||
action_set = set(ALLOWED_ACTIONS)
|
||||
break
|
||||
if canonical:
|
||||
action_set.add(canonical)
|
||||
if not action_set:
|
||||
continue
|
||||
sanitized.append({"bucket": bucket, "actions": sorted(action_set)})
|
||||
if not sanitized:
|
||||
raise IamError("At least one policy with valid actions is required")
|
||||
return sanitized
|
||||
|
||||
def _build_principal(self, access_key: str, record: Dict[str, Any]) -> Principal:
|
||||
return Principal(
|
||||
access_key=access_key,
|
||||
display_name=record["display_name"],
|
||||
policies=record["policies"],
|
||||
)
|
||||
|
||||
def _normalize_action(self, action: str) -> str:
|
||||
if not action:
|
||||
return ""
|
||||
lowered = action.strip().lower()
|
||||
if lowered == "*":
|
||||
return "*"
|
||||
candidate = ACTION_ALIASES.get(lowered, lowered)
|
||||
return candidate if candidate in ALLOWED_ACTIONS else ""
|
||||
|
||||
def _write_default(self) -> None:
|
||||
default = {
|
||||
"users": [
|
||||
{
|
||||
"access_key": "localadmin",
|
||||
"secret_key": "localadmin",
|
||||
"display_name": "Local Admin",
|
||||
"policies": [
|
||||
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
self.config_path.write_text(json.dumps(default, indent=2))
|
||||
|
||||
def _generate_access_key(self) -> str:
|
||||
return secrets.token_hex(8)
|
||||
|
||||
def _generate_secret_key(self) -> str:
|
||||
return secrets.token_urlsafe(24)
|
||||
|
||||
def _get_raw_user(self, access_key: str) -> Dict[str, Any]:
|
||||
for user in self._raw_config.get("users", []):
|
||||
if user["access_key"] == access_key:
|
||||
return user
|
||||
raise IamError("User not found")
|
||||
|
||||
def get_secret_key(self, access_key: str) -> str | None:
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
return record["secret_key"] if record else None
|
||||
|
||||
def get_principal(self, access_key: str) -> Principal | None:
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
return self._build_principal(access_key, record) if record else None
|
||||
344
app/kms.py
344
app/kms.py
@@ -1,344 +0,0 @@
|
||||
"""Key Management Service (KMS) for encryption key management."""
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import secrets
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
from .encryption import EncryptionError, EncryptionProvider, EncryptionResult
|
||||
|
||||
|
||||
@dataclass
|
||||
class KMSKey:
|
||||
"""Represents a KMS encryption key."""
|
||||
key_id: str
|
||||
description: str
|
||||
created_at: str
|
||||
enabled: bool = True
|
||||
key_material: bytes = field(default_factory=lambda: b"", repr=False)
|
||||
|
||||
@property
|
||||
def arn(self) -> str:
|
||||
return f"arn:aws:kms:local:000000000000:key/{self.key_id}"
|
||||
|
||||
def to_dict(self, include_key: bool = False) -> Dict[str, Any]:
|
||||
data = {
|
||||
"KeyId": self.key_id,
|
||||
"Arn": self.arn,
|
||||
"Description": self.description,
|
||||
"CreationDate": self.created_at,
|
||||
"Enabled": self.enabled,
|
||||
"KeyState": "Enabled" if self.enabled else "Disabled",
|
||||
"KeyUsage": "ENCRYPT_DECRYPT",
|
||||
"KeySpec": "SYMMETRIC_DEFAULT",
|
||||
}
|
||||
if include_key:
|
||||
data["KeyMaterial"] = base64.b64encode(self.key_material).decode()
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "KMSKey":
|
||||
key_material = b""
|
||||
if "KeyMaterial" in data:
|
||||
key_material = base64.b64decode(data["KeyMaterial"])
|
||||
return cls(
|
||||
key_id=data["KeyId"],
|
||||
description=data.get("Description", ""),
|
||||
created_at=data.get("CreationDate", datetime.now(timezone.utc).isoformat()),
|
||||
enabled=data.get("Enabled", True),
|
||||
key_material=key_material,
|
||||
)
|
||||
|
||||
|
||||
class KMSEncryptionProvider(EncryptionProvider):
|
||||
"""Encryption provider using a specific KMS key."""
|
||||
|
||||
def __init__(self, kms: "KMSManager", key_id: str):
|
||||
self.kms = kms
|
||||
self.key_id = key_id
|
||||
|
||||
@property
|
||||
def KEY_ID(self) -> str:
|
||||
return self.key_id
|
||||
|
||||
def generate_data_key(self) -> tuple[bytes, bytes]:
|
||||
"""Generate a data key encrypted with the KMS key."""
|
||||
return self.kms.generate_data_key(self.key_id)
|
||||
|
||||
def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
|
||||
"""Encrypt data using envelope encryption with KMS."""
|
||||
data_key, encrypted_data_key = self.generate_data_key()
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext,
|
||||
json.dumps(context).encode() if context else None)
|
||||
|
||||
return EncryptionResult(
|
||||
ciphertext=ciphertext,
|
||||
nonce=nonce,
|
||||
key_id=self.key_id,
|
||||
encrypted_data_key=encrypted_data_key,
|
||||
)
|
||||
|
||||
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
||||
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Decrypt data using envelope encryption with KMS."""
|
||||
# Note: Data key is encrypted without context (AAD), so we decrypt without context
|
||||
data_key = self.kms.decrypt_data_key(key_id, encrypted_data_key, context=None)
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext,
|
||||
json.dumps(context).encode() if context else None)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt data: {exc}") from exc
|
||||
|
||||
|
||||
class KMSManager:
|
||||
"""Manages KMS keys and operations.
|
||||
|
||||
This is a local implementation that mimics AWS KMS functionality.
|
||||
Keys are stored encrypted on disk.
|
||||
"""
|
||||
|
||||
def __init__(self, keys_path: Path, master_key_path: Path):
|
||||
self.keys_path = keys_path
|
||||
self.master_key_path = master_key_path
|
||||
self._keys: Dict[str, KMSKey] = {}
|
||||
self._master_key: bytes | None = None
|
||||
self._loaded = False
|
||||
|
||||
@property
|
||||
def master_key(self) -> bytes:
|
||||
"""Load or create the master key for encrypting KMS keys."""
|
||||
if self._master_key is None:
|
||||
if self.master_key_path.exists():
|
||||
self._master_key = base64.b64decode(
|
||||
self.master_key_path.read_text().strip()
|
||||
)
|
||||
else:
|
||||
self._master_key = secrets.token_bytes(32)
|
||||
self.master_key_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.master_key_path.write_text(
|
||||
base64.b64encode(self._master_key).decode()
|
||||
)
|
||||
return self._master_key
|
||||
|
||||
def _load_keys(self) -> None:
|
||||
"""Load keys from disk."""
|
||||
if self._loaded:
|
||||
return
|
||||
|
||||
if self.keys_path.exists():
|
||||
try:
|
||||
data = json.loads(self.keys_path.read_text(encoding="utf-8"))
|
||||
for key_data in data.get("keys", []):
|
||||
key = KMSKey.from_dict(key_data)
|
||||
if key_data.get("EncryptedKeyMaterial"):
|
||||
encrypted = base64.b64decode(key_data["EncryptedKeyMaterial"])
|
||||
key.key_material = self._decrypt_key_material(encrypted)
|
||||
self._keys[key.key_id] = key
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self._loaded = True
|
||||
|
||||
def _save_keys(self) -> None:
|
||||
"""Save keys to disk (with encrypted key material)."""
|
||||
keys_data = []
|
||||
for key in self._keys.values():
|
||||
data = key.to_dict(include_key=False)
|
||||
encrypted = self._encrypt_key_material(key.key_material)
|
||||
data["EncryptedKeyMaterial"] = base64.b64encode(encrypted).decode()
|
||||
keys_data.append(data)
|
||||
|
||||
self.keys_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.keys_path.write_text(
|
||||
json.dumps({"keys": keys_data}, indent=2),
|
||||
encoding="utf-8"
|
||||
)
|
||||
|
||||
def _encrypt_key_material(self, key_material: bytes) -> bytes:
|
||||
"""Encrypt key material with the master key."""
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
ciphertext = aesgcm.encrypt(nonce, key_material, None)
|
||||
return nonce + ciphertext
|
||||
|
||||
def _decrypt_key_material(self, encrypted: bytes) -> bytes:
|
||||
"""Decrypt key material with the master key."""
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
nonce = encrypted[:12]
|
||||
ciphertext = encrypted[12:]
|
||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
||||
|
||||
def create_key(self, description: str = "", key_id: str | None = None) -> KMSKey:
|
||||
"""Create a new KMS key."""
|
||||
self._load_keys()
|
||||
|
||||
if key_id is None:
|
||||
key_id = str(uuid.uuid4())
|
||||
|
||||
if key_id in self._keys:
|
||||
raise EncryptionError(f"Key already exists: {key_id}")
|
||||
|
||||
key = KMSKey(
|
||||
key_id=key_id,
|
||||
description=description,
|
||||
created_at=datetime.now(timezone.utc).isoformat(),
|
||||
enabled=True,
|
||||
key_material=secrets.token_bytes(32),
|
||||
)
|
||||
|
||||
self._keys[key_id] = key
|
||||
self._save_keys()
|
||||
return key
|
||||
|
||||
def get_key(self, key_id: str) -> KMSKey | None:
|
||||
"""Get a key by ID."""
|
||||
self._load_keys()
|
||||
return self._keys.get(key_id)
|
||||
|
||||
def list_keys(self) -> List[KMSKey]:
|
||||
"""List all keys."""
|
||||
self._load_keys()
|
||||
return list(self._keys.values())
|
||||
|
||||
def enable_key(self, key_id: str) -> None:
|
||||
"""Enable a key."""
|
||||
self._load_keys()
|
||||
key = self._keys.get(key_id)
|
||||
if not key:
|
||||
raise EncryptionError(f"Key not found: {key_id}")
|
||||
key.enabled = True
|
||||
self._save_keys()
|
||||
|
||||
def disable_key(self, key_id: str) -> None:
|
||||
"""Disable a key."""
|
||||
self._load_keys()
|
||||
key = self._keys.get(key_id)
|
||||
if not key:
|
||||
raise EncryptionError(f"Key not found: {key_id}")
|
||||
key.enabled = False
|
||||
self._save_keys()
|
||||
|
||||
def delete_key(self, key_id: str) -> None:
|
||||
"""Delete a key (schedule for deletion in real KMS)."""
|
||||
self._load_keys()
|
||||
if key_id not in self._keys:
|
||||
raise EncryptionError(f"Key not found: {key_id}")
|
||||
del self._keys[key_id]
|
||||
self._save_keys()
|
||||
|
||||
def encrypt(self, key_id: str, plaintext: bytes,
|
||||
context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Encrypt data directly with a KMS key."""
|
||||
self._load_keys()
|
||||
key = self._keys.get(key_id)
|
||||
if not key:
|
||||
raise EncryptionError(f"Key not found: {key_id}")
|
||||
if not key.enabled:
|
||||
raise EncryptionError(f"Key is disabled: {key_id}")
|
||||
|
||||
aesgcm = AESGCM(key.key_material)
|
||||
nonce = secrets.token_bytes(12)
|
||||
aad = json.dumps(context).encode() if context else None
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext, aad)
|
||||
|
||||
key_id_bytes = key_id.encode("utf-8")
|
||||
return len(key_id_bytes).to_bytes(2, "big") + key_id_bytes + nonce + ciphertext
|
||||
|
||||
def decrypt(self, ciphertext: bytes,
|
||||
context: Dict[str, str] | None = None) -> tuple[bytes, str]:
|
||||
"""Decrypt data directly with a KMS key.
|
||||
|
||||
Returns:
|
||||
Tuple of (plaintext, key_id)
|
||||
"""
|
||||
self._load_keys()
|
||||
|
||||
key_id_len = int.from_bytes(ciphertext[:2], "big")
|
||||
key_id = ciphertext[2:2 + key_id_len].decode("utf-8")
|
||||
rest = ciphertext[2 + key_id_len:]
|
||||
|
||||
key = self._keys.get(key_id)
|
||||
if not key:
|
||||
raise EncryptionError(f"Key not found: {key_id}")
|
||||
if not key.enabled:
|
||||
raise EncryptionError(f"Key is disabled: {key_id}")
|
||||
|
||||
nonce = rest[:12]
|
||||
encrypted = rest[12:]
|
||||
|
||||
aesgcm = AESGCM(key.key_material)
|
||||
aad = json.dumps(context).encode() if context else None
|
||||
try:
|
||||
plaintext = aesgcm.decrypt(nonce, encrypted, aad)
|
||||
return plaintext, key_id
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Decryption failed: {exc}") from exc
|
||||
|
||||
def generate_data_key(self, key_id: str,
|
||||
context: Dict[str, str] | None = None) -> tuple[bytes, bytes]:
|
||||
"""Generate a data key and return both plaintext and encrypted versions.
|
||||
|
||||
Returns:
|
||||
Tuple of (plaintext_key, encrypted_key)
|
||||
"""
|
||||
self._load_keys()
|
||||
key = self._keys.get(key_id)
|
||||
if not key:
|
||||
raise EncryptionError(f"Key not found: {key_id}")
|
||||
if not key.enabled:
|
||||
raise EncryptionError(f"Key is disabled: {key_id}")
|
||||
|
||||
plaintext_key = secrets.token_bytes(32)
|
||||
|
||||
encrypted_key = self.encrypt(key_id, plaintext_key, context)
|
||||
|
||||
return plaintext_key, encrypted_key
|
||||
|
||||
def decrypt_data_key(self, key_id: str, encrypted_key: bytes,
|
||||
context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Decrypt a data key."""
|
||||
plaintext, _ = self.decrypt(encrypted_key, context)
|
||||
return plaintext
|
||||
|
||||
def get_provider(self, key_id: str | None = None) -> KMSEncryptionProvider:
|
||||
"""Get an encryption provider for a specific key."""
|
||||
self._load_keys()
|
||||
|
||||
if key_id is None:
|
||||
if not self._keys:
|
||||
key = self.create_key("Default KMS Key")
|
||||
key_id = key.key_id
|
||||
else:
|
||||
key_id = next(iter(self._keys.keys()))
|
||||
|
||||
if key_id not in self._keys:
|
||||
raise EncryptionError(f"Key not found: {key_id}")
|
||||
|
||||
return KMSEncryptionProvider(self, key_id)
|
||||
|
||||
def re_encrypt(self, ciphertext: bytes, destination_key_id: str,
|
||||
source_context: Dict[str, str] | None = None,
|
||||
destination_context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Re-encrypt data with a different key."""
|
||||
|
||||
plaintext, source_key_id = self.decrypt(ciphertext, source_context)
|
||||
|
||||
return self.encrypt(destination_key_id, plaintext, destination_context)
|
||||
|
||||
def generate_random(self, num_bytes: int = 32) -> bytes:
|
||||
"""Generate cryptographically secure random bytes."""
|
||||
if num_bytes < 1 or num_bytes > 1024:
|
||||
raise EncryptionError("Number of bytes must be between 1 and 1024")
|
||||
return secrets.token_bytes(num_bytes)
|
||||
463
app/kms_api.py
463
app/kms_api.py
@@ -1,463 +0,0 @@
|
||||
"""KMS and encryption API endpoints."""
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import uuid
|
||||
from typing import Any, Dict
|
||||
|
||||
from flask import Blueprint, Response, current_app, jsonify, request
|
||||
|
||||
from .encryption import ClientEncryptionHelper, EncryptionError
|
||||
from .extensions import limiter
|
||||
from .iam import IamError
|
||||
|
||||
kms_api_bp = Blueprint("kms_api", __name__, url_prefix="/kms")
|
||||
|
||||
|
||||
def _require_principal():
|
||||
"""Require authentication for KMS operations."""
|
||||
from .s3_api import _require_principal as s3_require_principal
|
||||
return s3_require_principal()
|
||||
|
||||
|
||||
def _kms():
|
||||
"""Get KMS manager from app extensions."""
|
||||
return current_app.extensions.get("kms")
|
||||
|
||||
|
||||
def _encryption():
|
||||
"""Get encryption manager from app extensions."""
|
||||
return current_app.extensions.get("encryption")
|
||||
|
||||
|
||||
def _error_response(code: str, message: str, status: int) -> tuple[Dict[str, Any], int]:
|
||||
return {"__type": code, "message": message}, status
|
||||
|
||||
|
||||
# ---------------------- Key Management ----------------------
|
||||
|
||||
@kms_api_bp.route("/keys", methods=["GET", "POST"])
|
||||
@limiter.limit("30 per minute")
|
||||
def list_or_create_keys():
|
||||
"""List all KMS keys or create a new key."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
kms = _kms()
|
||||
if not kms:
|
||||
return _error_response("KMSNotEnabled", "KMS is not configured", 400)
|
||||
|
||||
if request.method == "POST":
|
||||
payload = request.get_json(silent=True) or {}
|
||||
key_id = payload.get("KeyId") or payload.get("key_id")
|
||||
description = payload.get("Description") or payload.get("description", "")
|
||||
|
||||
try:
|
||||
key = kms.create_key(description=description, key_id=key_id)
|
||||
current_app.logger.info(
|
||||
"KMS key created",
|
||||
extra={"key_id": key.key_id, "principal": principal.access_key},
|
||||
)
|
||||
return jsonify({
|
||||
"KeyMetadata": key.to_dict(),
|
||||
})
|
||||
except EncryptionError as exc:
|
||||
return _error_response("KMSInternalException", str(exc), 400)
|
||||
|
||||
# GET - List keys
|
||||
keys = kms.list_keys()
|
||||
return jsonify({
|
||||
"Keys": [{"KeyId": k.key_id, "KeyArn": k.arn} for k in keys],
|
||||
"Truncated": False,
|
||||
})
|
||||
|
||||
|
||||
@kms_api_bp.route("/keys/<key_id>", methods=["GET", "DELETE"])
|
||||
@limiter.limit("30 per minute")
|
||||
def get_or_delete_key(key_id: str):
|
||||
"""Get or delete a specific KMS key."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
kms = _kms()
|
||||
if not kms:
|
||||
return _error_response("KMSNotEnabled", "KMS is not configured", 400)
|
||||
|
||||
if request.method == "DELETE":
|
||||
try:
|
||||
kms.delete_key(key_id)
|
||||
current_app.logger.info(
|
||||
"KMS key deleted",
|
||||
extra={"key_id": key_id, "principal": principal.access_key},
|
||||
)
|
||||
return Response(status=204)
|
||||
except EncryptionError as exc:
|
||||
return _error_response("NotFoundException", str(exc), 404)
|
||||
|
||||
# GET
|
||||
key = kms.get_key(key_id)
|
||||
if not key:
|
||||
return _error_response("NotFoundException", f"Key not found: {key_id}", 404)
|
||||
|
||||
return jsonify({"KeyMetadata": key.to_dict()})
|
||||
|
||||
|
||||
@kms_api_bp.route("/keys/<key_id>/enable", methods=["POST"])
|
||||
@limiter.limit("30 per minute")
|
||||
def enable_key(key_id: str):
|
||||
"""Enable a KMS key."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
kms = _kms()
|
||||
if not kms:
|
||||
return _error_response("KMSNotEnabled", "KMS is not configured", 400)
|
||||
|
||||
try:
|
||||
kms.enable_key(key_id)
|
||||
current_app.logger.info(
|
||||
"KMS key enabled",
|
||||
extra={"key_id": key_id, "principal": principal.access_key},
|
||||
)
|
||||
return Response(status=200)
|
||||
except EncryptionError as exc:
|
||||
return _error_response("NotFoundException", str(exc), 404)
|
||||
|
||||
|
||||
@kms_api_bp.route("/keys/<key_id>/disable", methods=["POST"])
|
||||
@limiter.limit("30 per minute")
|
||||
def disable_key(key_id: str):
|
||||
"""Disable a KMS key."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
kms = _kms()
|
||||
if not kms:
|
||||
return _error_response("KMSNotEnabled", "KMS is not configured", 400)
|
||||
|
||||
try:
|
||||
kms.disable_key(key_id)
|
||||
current_app.logger.info(
|
||||
"KMS key disabled",
|
||||
extra={"key_id": key_id, "principal": principal.access_key},
|
||||
)
|
||||
return Response(status=200)
|
||||
except EncryptionError as exc:
|
||||
return _error_response("NotFoundException", str(exc), 404)
|
||||
|
||||
|
||||
# ---------------------- Encryption Operations ----------------------
|
||||
|
||||
@kms_api_bp.route("/encrypt", methods=["POST"])
|
||||
@limiter.limit("60 per minute")
|
||||
def encrypt_data():
|
||||
"""Encrypt data using a KMS key."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
kms = _kms()
|
||||
if not kms:
|
||||
return _error_response("KMSNotEnabled", "KMS is not configured", 400)
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
key_id = payload.get("KeyId")
|
||||
plaintext_b64 = payload.get("Plaintext")
|
||||
context = payload.get("EncryptionContext")
|
||||
|
||||
if not key_id:
|
||||
return _error_response("ValidationException", "KeyId is required", 400)
|
||||
if not plaintext_b64:
|
||||
return _error_response("ValidationException", "Plaintext is required", 400)
|
||||
|
||||
try:
|
||||
plaintext = base64.b64decode(plaintext_b64)
|
||||
except Exception:
|
||||
return _error_response("ValidationException", "Plaintext must be base64 encoded", 400)
|
||||
|
||||
try:
|
||||
ciphertext = kms.encrypt(key_id, plaintext, context)
|
||||
return jsonify({
|
||||
"CiphertextBlob": base64.b64encode(ciphertext).decode(),
|
||||
"KeyId": key_id,
|
||||
"EncryptionAlgorithm": "SYMMETRIC_DEFAULT",
|
||||
})
|
||||
except EncryptionError as exc:
|
||||
return _error_response("KMSInternalException", str(exc), 400)
|
||||
|
||||
|
||||
@kms_api_bp.route("/decrypt", methods=["POST"])
|
||||
@limiter.limit("60 per minute")
|
||||
def decrypt_data():
|
||||
"""Decrypt data using a KMS key."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
kms = _kms()
|
||||
if not kms:
|
||||
return _error_response("KMSNotEnabled", "KMS is not configured", 400)
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
ciphertext_b64 = payload.get("CiphertextBlob")
|
||||
context = payload.get("EncryptionContext")
|
||||
|
||||
if not ciphertext_b64:
|
||||
return _error_response("ValidationException", "CiphertextBlob is required", 400)
|
||||
|
||||
try:
|
||||
ciphertext = base64.b64decode(ciphertext_b64)
|
||||
except Exception:
|
||||
return _error_response("ValidationException", "CiphertextBlob must be base64 encoded", 400)
|
||||
|
||||
try:
|
||||
plaintext, key_id = kms.decrypt(ciphertext, context)
|
||||
return jsonify({
|
||||
"Plaintext": base64.b64encode(plaintext).decode(),
|
||||
"KeyId": key_id,
|
||||
"EncryptionAlgorithm": "SYMMETRIC_DEFAULT",
|
||||
})
|
||||
except EncryptionError as exc:
|
||||
return _error_response("InvalidCiphertextException", str(exc), 400)
|
||||
|
||||
|
||||
@kms_api_bp.route("/generate-data-key", methods=["POST"])
|
||||
@limiter.limit("60 per minute")
|
||||
def generate_data_key():
|
||||
"""Generate a data encryption key."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
kms = _kms()
|
||||
if not kms:
|
||||
return _error_response("KMSNotEnabled", "KMS is not configured", 400)
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
key_id = payload.get("KeyId")
|
||||
context = payload.get("EncryptionContext")
|
||||
key_spec = payload.get("KeySpec", "AES_256")
|
||||
|
||||
if not key_id:
|
||||
return _error_response("ValidationException", "KeyId is required", 400)
|
||||
|
||||
if key_spec not in {"AES_256", "AES_128"}:
|
||||
return _error_response("ValidationException", "KeySpec must be AES_256 or AES_128", 400)
|
||||
|
||||
try:
|
||||
plaintext_key, encrypted_key = kms.generate_data_key(key_id, context)
|
||||
|
||||
# Trim key if AES_128 requested
|
||||
if key_spec == "AES_128":
|
||||
plaintext_key = plaintext_key[:16]
|
||||
|
||||
return jsonify({
|
||||
"Plaintext": base64.b64encode(plaintext_key).decode(),
|
||||
"CiphertextBlob": base64.b64encode(encrypted_key).decode(),
|
||||
"KeyId": key_id,
|
||||
})
|
||||
except EncryptionError as exc:
|
||||
return _error_response("KMSInternalException", str(exc), 400)
|
||||
|
||||
|
||||
@kms_api_bp.route("/generate-data-key-without-plaintext", methods=["POST"])
|
||||
@limiter.limit("60 per minute")
|
||||
def generate_data_key_without_plaintext():
|
||||
"""Generate a data encryption key without returning the plaintext."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
kms = _kms()
|
||||
if not kms:
|
||||
return _error_response("KMSNotEnabled", "KMS is not configured", 400)
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
key_id = payload.get("KeyId")
|
||||
context = payload.get("EncryptionContext")
|
||||
|
||||
if not key_id:
|
||||
return _error_response("ValidationException", "KeyId is required", 400)
|
||||
|
||||
try:
|
||||
_, encrypted_key = kms.generate_data_key(key_id, context)
|
||||
return jsonify({
|
||||
"CiphertextBlob": base64.b64encode(encrypted_key).decode(),
|
||||
"KeyId": key_id,
|
||||
})
|
||||
except EncryptionError as exc:
|
||||
return _error_response("KMSInternalException", str(exc), 400)
|
||||
|
||||
|
||||
@kms_api_bp.route("/re-encrypt", methods=["POST"])
|
||||
@limiter.limit("30 per minute")
|
||||
def re_encrypt():
|
||||
"""Re-encrypt data with a different key."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
kms = _kms()
|
||||
if not kms:
|
||||
return _error_response("KMSNotEnabled", "KMS is not configured", 400)
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
ciphertext_b64 = payload.get("CiphertextBlob")
|
||||
destination_key_id = payload.get("DestinationKeyId")
|
||||
source_context = payload.get("SourceEncryptionContext")
|
||||
destination_context = payload.get("DestinationEncryptionContext")
|
||||
|
||||
if not ciphertext_b64:
|
||||
return _error_response("ValidationException", "CiphertextBlob is required", 400)
|
||||
if not destination_key_id:
|
||||
return _error_response("ValidationException", "DestinationKeyId is required", 400)
|
||||
|
||||
try:
|
||||
ciphertext = base64.b64decode(ciphertext_b64)
|
||||
except Exception:
|
||||
return _error_response("ValidationException", "CiphertextBlob must be base64 encoded", 400)
|
||||
|
||||
try:
|
||||
# First decrypt, get source key id
|
||||
plaintext, source_key_id = kms.decrypt(ciphertext, source_context)
|
||||
|
||||
# Re-encrypt with destination key
|
||||
new_ciphertext = kms.encrypt(destination_key_id, plaintext, destination_context)
|
||||
|
||||
return jsonify({
|
||||
"CiphertextBlob": base64.b64encode(new_ciphertext).decode(),
|
||||
"SourceKeyId": source_key_id,
|
||||
"KeyId": destination_key_id,
|
||||
})
|
||||
except EncryptionError as exc:
|
||||
return _error_response("KMSInternalException", str(exc), 400)
|
||||
|
||||
|
||||
@kms_api_bp.route("/generate-random", methods=["POST"])
|
||||
@limiter.limit("60 per minute")
|
||||
def generate_random():
|
||||
"""Generate random bytes."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
kms = _kms()
|
||||
if not kms:
|
||||
return _error_response("KMSNotEnabled", "KMS is not configured", 400)
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
num_bytes = payload.get("NumberOfBytes", 32)
|
||||
|
||||
try:
|
||||
num_bytes = int(num_bytes)
|
||||
except (TypeError, ValueError):
|
||||
return _error_response("ValidationException", "NumberOfBytes must be an integer", 400)
|
||||
|
||||
try:
|
||||
random_bytes = kms.generate_random(num_bytes)
|
||||
return jsonify({
|
||||
"Plaintext": base64.b64encode(random_bytes).decode(),
|
||||
})
|
||||
except EncryptionError as exc:
|
||||
return _error_response("ValidationException", str(exc), 400)
|
||||
|
||||
|
||||
# ---------------------- Client-Side Encryption Helpers ----------------------
|
||||
|
||||
@kms_api_bp.route("/client/generate-key", methods=["POST"])
|
||||
@limiter.limit("30 per minute")
|
||||
def generate_client_key():
|
||||
"""Generate a client-side encryption key."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
key_info = ClientEncryptionHelper.generate_client_key()
|
||||
return jsonify(key_info)
|
||||
|
||||
|
||||
@kms_api_bp.route("/client/encrypt", methods=["POST"])
|
||||
@limiter.limit("60 per minute")
|
||||
def client_encrypt():
|
||||
"""Encrypt data using client-side encryption."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
plaintext_b64 = payload.get("Plaintext")
|
||||
key_b64 = payload.get("Key")
|
||||
|
||||
if not plaintext_b64 or not key_b64:
|
||||
return _error_response("ValidationException", "Plaintext and Key are required", 400)
|
||||
|
||||
try:
|
||||
plaintext = base64.b64decode(plaintext_b64)
|
||||
result = ClientEncryptionHelper.encrypt_with_key(plaintext, key_b64)
|
||||
return jsonify(result)
|
||||
except Exception as exc:
|
||||
return _error_response("EncryptionError", str(exc), 400)
|
||||
|
||||
|
||||
@kms_api_bp.route("/client/decrypt", methods=["POST"])
|
||||
@limiter.limit("60 per minute")
|
||||
def client_decrypt():
|
||||
"""Decrypt data using client-side encryption."""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
ciphertext_b64 = payload.get("Ciphertext") or payload.get("ciphertext")
|
||||
nonce_b64 = payload.get("Nonce") or payload.get("nonce")
|
||||
key_b64 = payload.get("Key") or payload.get("key")
|
||||
|
||||
if not ciphertext_b64 or not nonce_b64 or not key_b64:
|
||||
return _error_response("ValidationException", "Ciphertext, Nonce, and Key are required", 400)
|
||||
|
||||
try:
|
||||
plaintext = ClientEncryptionHelper.decrypt_with_key(ciphertext_b64, nonce_b64, key_b64)
|
||||
return jsonify({
|
||||
"Plaintext": base64.b64encode(plaintext).decode(),
|
||||
})
|
||||
except Exception as exc:
|
||||
return _error_response("DecryptionError", str(exc), 400)
|
||||
|
||||
|
||||
# ---------------------- Encryption Materials for S3 Client-Side Encryption ----------------------
|
||||
|
||||
@kms_api_bp.route("/materials/<key_id>", methods=["POST"])
|
||||
@limiter.limit("60 per minute")
|
||||
def get_encryption_materials(key_id: str):
|
||||
"""Get encryption materials for client-side S3 encryption.
|
||||
|
||||
This is used by S3 encryption clients that want to use KMS for
|
||||
key management but perform encryption client-side.
|
||||
"""
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
|
||||
kms = _kms()
|
||||
if not kms:
|
||||
return _error_response("KMSNotEnabled", "KMS is not configured", 400)
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
context = payload.get("EncryptionContext")
|
||||
|
||||
try:
|
||||
plaintext_key, encrypted_key = kms.generate_data_key(key_id, context)
|
||||
|
||||
return jsonify({
|
||||
"PlaintextKey": base64.b64encode(plaintext_key).decode(),
|
||||
"EncryptedKey": base64.b64encode(encrypted_key).decode(),
|
||||
"KeyId": key_id,
|
||||
"Algorithm": "AES-256-GCM",
|
||||
"KeyWrapAlgorithm": "kms",
|
||||
})
|
||||
except EncryptionError as exc:
|
||||
return _error_response("KMSInternalException", str(exc), 400)
|
||||
@@ -1,356 +0,0 @@
|
||||
"""Background replication worker."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import mimetypes
|
||||
import threading
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
from boto3.exceptions import S3UploadFailedError
|
||||
|
||||
from .connections import ConnectionStore, RemoteConnection
|
||||
from .storage import ObjectStorage, StorageError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
REPLICATION_USER_AGENT = "S3ReplicationAgent/1.0"
|
||||
|
||||
REPLICATION_MODE_NEW_ONLY = "new_only"
|
||||
REPLICATION_MODE_ALL = "all"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReplicationStats:
|
||||
"""Statistics for replication operations - computed dynamically."""
|
||||
objects_synced: int = 0 # Objects that exist in both source and destination
|
||||
objects_pending: int = 0 # Objects in source but not in destination
|
||||
objects_orphaned: int = 0 # Objects in destination but not in source (will be deleted)
|
||||
bytes_synced: int = 0 # Total bytes synced to destination
|
||||
last_sync_at: Optional[float] = None
|
||||
last_sync_key: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"objects_synced": self.objects_synced,
|
||||
"objects_pending": self.objects_pending,
|
||||
"objects_orphaned": self.objects_orphaned,
|
||||
"bytes_synced": self.bytes_synced,
|
||||
"last_sync_at": self.last_sync_at,
|
||||
"last_sync_key": self.last_sync_key,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "ReplicationStats":
|
||||
return cls(
|
||||
objects_synced=data.get("objects_synced", 0),
|
||||
objects_pending=data.get("objects_pending", 0),
|
||||
objects_orphaned=data.get("objects_orphaned", 0),
|
||||
bytes_synced=data.get("bytes_synced", 0),
|
||||
last_sync_at=data.get("last_sync_at"),
|
||||
last_sync_key=data.get("last_sync_key"),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReplicationRule:
|
||||
bucket_name: str
|
||||
target_connection_id: str
|
||||
target_bucket: str
|
||||
enabled: bool = True
|
||||
mode: str = REPLICATION_MODE_NEW_ONLY
|
||||
created_at: Optional[float] = None
|
||||
stats: ReplicationStats = field(default_factory=ReplicationStats)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"bucket_name": self.bucket_name,
|
||||
"target_connection_id": self.target_connection_id,
|
||||
"target_bucket": self.target_bucket,
|
||||
"enabled": self.enabled,
|
||||
"mode": self.mode,
|
||||
"created_at": self.created_at,
|
||||
"stats": self.stats.to_dict(),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "ReplicationRule":
|
||||
stats_data = data.pop("stats", {})
|
||||
# Handle old rules without mode/created_at
|
||||
if "mode" not in data:
|
||||
data["mode"] = REPLICATION_MODE_NEW_ONLY
|
||||
if "created_at" not in data:
|
||||
data["created_at"] = None
|
||||
rule = cls(**data)
|
||||
rule.stats = ReplicationStats.from_dict(stats_data) if stats_data else ReplicationStats()
|
||||
return rule
|
||||
|
||||
|
||||
class ReplicationManager:
|
||||
def __init__(self, storage: ObjectStorage, connections: ConnectionStore, rules_path: Path) -> None:
|
||||
self.storage = storage
|
||||
self.connections = connections
|
||||
self.rules_path = rules_path
|
||||
self._rules: Dict[str, ReplicationRule] = {}
|
||||
self._stats_lock = threading.Lock()
|
||||
self._executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="ReplicationWorker")
|
||||
self.reload_rules()
|
||||
|
||||
def reload_rules(self) -> None:
|
||||
if not self.rules_path.exists():
|
||||
self._rules = {}
|
||||
return
|
||||
try:
|
||||
with open(self.rules_path, "r") as f:
|
||||
data = json.load(f)
|
||||
for bucket, rule_data in data.items():
|
||||
self._rules[bucket] = ReplicationRule.from_dict(rule_data)
|
||||
except (OSError, ValueError) as e:
|
||||
logger.error(f"Failed to load replication rules: {e}")
|
||||
|
||||
def save_rules(self) -> None:
|
||||
data = {b: rule.to_dict() for b, rule in self._rules.items()}
|
||||
self.rules_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(self.rules_path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
def get_rule(self, bucket_name: str) -> Optional[ReplicationRule]:
|
||||
return self._rules.get(bucket_name)
|
||||
|
||||
def set_rule(self, rule: ReplicationRule) -> None:
|
||||
self._rules[rule.bucket_name] = rule
|
||||
self.save_rules()
|
||||
|
||||
def delete_rule(self, bucket_name: str) -> None:
|
||||
if bucket_name in self._rules:
|
||||
del self._rules[bucket_name]
|
||||
self.save_rules()
|
||||
|
||||
def _update_last_sync(self, bucket_name: str, object_key: str = "") -> None:
|
||||
"""Update last sync timestamp after a successful operation."""
|
||||
with self._stats_lock:
|
||||
rule = self._rules.get(bucket_name)
|
||||
if not rule:
|
||||
return
|
||||
rule.stats.last_sync_at = time.time()
|
||||
rule.stats.last_sync_key = object_key
|
||||
self.save_rules()
|
||||
|
||||
def get_sync_status(self, bucket_name: str) -> Optional[ReplicationStats]:
|
||||
"""Dynamically compute replication status by comparing source and destination buckets."""
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule:
|
||||
return None
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
return rule.stats # Return cached stats if connection unavailable
|
||||
|
||||
try:
|
||||
# Get source objects
|
||||
source_objects = self.storage.list_objects(bucket_name)
|
||||
source_keys = {obj.key: obj.size for obj in source_objects}
|
||||
|
||||
# Get destination objects
|
||||
s3 = boto3.client(
|
||||
"s3",
|
||||
endpoint_url=connection.endpoint_url,
|
||||
aws_access_key_id=connection.access_key,
|
||||
aws_secret_access_key=connection.secret_key,
|
||||
region_name=connection.region,
|
||||
)
|
||||
|
||||
dest_keys = set()
|
||||
bytes_synced = 0
|
||||
paginator = s3.get_paginator('list_objects_v2')
|
||||
try:
|
||||
for page in paginator.paginate(Bucket=rule.target_bucket):
|
||||
for obj in page.get('Contents', []):
|
||||
dest_keys.add(obj['Key'])
|
||||
if obj['Key'] in source_keys:
|
||||
bytes_synced += obj.get('Size', 0)
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'NoSuchBucket':
|
||||
# Destination bucket doesn't exist yet
|
||||
dest_keys = set()
|
||||
else:
|
||||
raise
|
||||
|
||||
# Compute stats
|
||||
synced = source_keys.keys() & dest_keys # Objects in both
|
||||
orphaned = dest_keys - source_keys.keys() # In dest but not source
|
||||
|
||||
# For "new_only" mode, we can't determine pending since we don't know
|
||||
# which objects existed before replication was enabled. Only "all" mode
|
||||
# should show pending (objects that should be replicated but aren't yet).
|
||||
if rule.mode == REPLICATION_MODE_ALL:
|
||||
pending = source_keys.keys() - dest_keys # In source but not dest
|
||||
else:
|
||||
pending = set() # New-only mode: don't show pre-existing as pending
|
||||
|
||||
# Update cached stats with computed values
|
||||
rule.stats.objects_synced = len(synced)
|
||||
rule.stats.objects_pending = len(pending)
|
||||
rule.stats.objects_orphaned = len(orphaned)
|
||||
rule.stats.bytes_synced = bytes_synced
|
||||
|
||||
return rule.stats
|
||||
|
||||
except (ClientError, StorageError) as e:
|
||||
logger.error(f"Failed to compute sync status for {bucket_name}: {e}")
|
||||
return rule.stats # Return cached stats on error
|
||||
|
||||
def replicate_existing_objects(self, bucket_name: str) -> None:
|
||||
"""Trigger replication for all existing objects in a bucket."""
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule or not rule.enabled:
|
||||
return
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning(f"Cannot replicate existing objects: Connection {rule.target_connection_id} not found")
|
||||
return
|
||||
|
||||
try:
|
||||
objects = self.storage.list_objects(bucket_name)
|
||||
logger.info(f"Starting replication of {len(objects)} existing objects from {bucket_name}")
|
||||
for obj in objects:
|
||||
self._executor.submit(self._replicate_task, bucket_name, obj.key, rule, connection, "write")
|
||||
except StorageError as e:
|
||||
logger.error(f"Failed to list objects for replication: {e}")
|
||||
|
||||
def create_remote_bucket(self, connection_id: str, bucket_name: str) -> None:
|
||||
"""Create a bucket on the remote connection."""
|
||||
connection = self.connections.get(connection_id)
|
||||
if not connection:
|
||||
raise ValueError(f"Connection {connection_id} not found")
|
||||
|
||||
try:
|
||||
s3 = boto3.client(
|
||||
"s3",
|
||||
endpoint_url=connection.endpoint_url,
|
||||
aws_access_key_id=connection.access_key,
|
||||
aws_secret_access_key=connection.secret_key,
|
||||
region_name=connection.region,
|
||||
)
|
||||
s3.create_bucket(Bucket=bucket_name)
|
||||
except ClientError as e:
|
||||
logger.error(f"Failed to create remote bucket {bucket_name}: {e}")
|
||||
raise
|
||||
|
||||
def trigger_replication(self, bucket_name: str, object_key: str, action: str = "write") -> None:
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule or not rule.enabled:
|
||||
return
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning(f"Replication skipped for {bucket_name}/{object_key}: Connection {rule.target_connection_id} not found")
|
||||
return
|
||||
|
||||
self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, action)
|
||||
|
||||
def _replicate_task(self, bucket_name: str, object_key: str, rule: ReplicationRule, conn: RemoteConnection, action: str) -> None:
|
||||
if ".." in object_key or object_key.startswith("/") or object_key.startswith("\\"):
|
||||
logger.error(f"Invalid object key in replication (path traversal attempt): {object_key}")
|
||||
return
|
||||
|
||||
try:
|
||||
from .storage import ObjectStorage
|
||||
ObjectStorage._sanitize_object_key(object_key)
|
||||
except StorageError as e:
|
||||
logger.error(f"Object key validation failed in replication: {e}")
|
||||
return
|
||||
|
||||
file_size = 0
|
||||
try:
|
||||
config = Config(user_agent_extra=REPLICATION_USER_AGENT)
|
||||
s3 = boto3.client(
|
||||
"s3",
|
||||
endpoint_url=conn.endpoint_url,
|
||||
aws_access_key_id=conn.access_key,
|
||||
aws_secret_access_key=conn.secret_key,
|
||||
region_name=conn.region,
|
||||
config=config,
|
||||
)
|
||||
|
||||
if action == "delete":
|
||||
try:
|
||||
s3.delete_object(Bucket=rule.target_bucket, Key=object_key)
|
||||
logger.info(f"Replicated DELETE {bucket_name}/{object_key} to {conn.name} ({rule.target_bucket})")
|
||||
self._update_last_sync(bucket_name, object_key)
|
||||
except ClientError as e:
|
||||
logger.error(f"Replication DELETE failed for {bucket_name}/{object_key}: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
path = self.storage.get_object_path(bucket_name, object_key)
|
||||
except StorageError:
|
||||
logger.error(f"Source object not found: {bucket_name}/{object_key}")
|
||||
return
|
||||
|
||||
metadata = self.storage.get_object_metadata(bucket_name, object_key)
|
||||
|
||||
extra_args = {}
|
||||
if metadata:
|
||||
extra_args["Metadata"] = metadata
|
||||
|
||||
# Guess content type to prevent corruption/wrong handling
|
||||
content_type, _ = mimetypes.guess_type(path)
|
||||
file_size = path.stat().st_size
|
||||
|
||||
logger.info(f"Replicating {bucket_name}/{object_key}: Size={file_size}, ContentType={content_type}")
|
||||
|
||||
try:
|
||||
with path.open("rb") as f:
|
||||
s3.put_object(
|
||||
Bucket=rule.target_bucket,
|
||||
Key=object_key,
|
||||
Body=f,
|
||||
ContentLength=file_size,
|
||||
ContentType=content_type or "application/octet-stream",
|
||||
Metadata=metadata or {}
|
||||
)
|
||||
except (ClientError, S3UploadFailedError) as e:
|
||||
is_no_bucket = False
|
||||
if isinstance(e, ClientError):
|
||||
if e.response['Error']['Code'] == 'NoSuchBucket':
|
||||
is_no_bucket = True
|
||||
elif isinstance(e, S3UploadFailedError):
|
||||
if "NoSuchBucket" in str(e):
|
||||
is_no_bucket = True
|
||||
|
||||
if is_no_bucket:
|
||||
logger.info(f"Target bucket {rule.target_bucket} not found. Attempting to create it.")
|
||||
try:
|
||||
s3.create_bucket(Bucket=rule.target_bucket)
|
||||
# Retry upload
|
||||
with path.open("rb") as f:
|
||||
s3.put_object(
|
||||
Bucket=rule.target_bucket,
|
||||
Key=object_key,
|
||||
Body=f,
|
||||
ContentLength=file_size,
|
||||
ContentType=content_type or "application/octet-stream",
|
||||
Metadata=metadata or {}
|
||||
)
|
||||
except Exception as create_err:
|
||||
logger.error(f"Failed to create target bucket {rule.target_bucket}: {create_err}")
|
||||
raise e # Raise original error
|
||||
else:
|
||||
raise e
|
||||
|
||||
logger.info(f"Replicated {bucket_name}/{object_key} to {conn.name} ({rule.target_bucket})")
|
||||
self._update_last_sync(bucket_name, object_key)
|
||||
|
||||
except (ClientError, OSError, ValueError) as e:
|
||||
logger.error(f"Replication failed for {bucket_name}/{object_key}: {e}")
|
||||
except Exception:
|
||||
logger.exception(f"Unexpected error during replication for {bucket_name}/{object_key}")
|
||||
2404
app/s3_api.py
2404
app/s3_api.py
File diff suppressed because it is too large
Load Diff
@@ -1,37 +0,0 @@
|
||||
"""Ephemeral store for one-time secrets communicated to the UI."""
|
||||
from __future__ import annotations
|
||||
|
||||
import secrets
|
||||
import time
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
|
||||
class EphemeralSecretStore:
|
||||
"""Keeps values in-memory for a short period and returns them once."""
|
||||
|
||||
def __init__(self, default_ttl: int = 300) -> None:
|
||||
self._default_ttl = max(default_ttl, 1)
|
||||
self._store: Dict[str, tuple[Any, float]] = {}
|
||||
|
||||
def remember(self, payload: Any, *, ttl: Optional[int] = None) -> str:
|
||||
token = secrets.token_urlsafe(16)
|
||||
expires_at = time.time() + (ttl or self._default_ttl)
|
||||
self._store[token] = (payload, expires_at)
|
||||
return token
|
||||
|
||||
def pop(self, token: str | None) -> Any | None:
|
||||
if not token:
|
||||
return None
|
||||
entry = self._store.pop(token, None)
|
||||
if not entry:
|
||||
return None
|
||||
payload, expires_at = entry
|
||||
if expires_at < time.time():
|
||||
return None
|
||||
return payload
|
||||
|
||||
def purge_expired(self) -> None:
|
||||
now = time.time()
|
||||
stale = [token for token, (_, expires_at) in self._store.items() if expires_at < now]
|
||||
for token in stale:
|
||||
self._store.pop(token, None)
|
||||
1370
app/storage.py
1370
app/storage.py
File diff suppressed because it is too large
Load Diff
@@ -1,9 +0,0 @@
|
||||
"""Central location for the application version string."""
|
||||
from __future__ import annotations
|
||||
|
||||
APP_VERSION = "0.1.5"
|
||||
|
||||
|
||||
def get_version() -> str:
|
||||
"""Return the current application version."""
|
||||
return APP_VERSION
|
||||
27
crates/myfsio-auth/Cargo.toml
Normal file
27
crates/myfsio-auth/Cargo.toml
Normal file
@@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "myfsio-auth"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
myfsio-common = { path = "../myfsio-common" }
|
||||
hmac = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
aes = { workspace = true }
|
||||
cbc = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
pbkdf2 = "0.12"
|
||||
rand = "0.8"
|
||||
lru = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
percent-encoding = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
118
crates/myfsio-auth/src/fernet.rs
Normal file
118
crates/myfsio-auth/src/fernet.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use aes::cipher::{block_padding::Pkcs7, BlockDecryptMut, BlockEncryptMut, KeyIvInit};
|
||||
use base64::{engine::general_purpose::URL_SAFE, Engine};
|
||||
use hmac::{Hmac, Mac};
|
||||
use rand::RngCore;
|
||||
use sha2::Sha256;
|
||||
|
||||
type Aes128CbcDec = cbc::Decryptor<aes::Aes128>;
|
||||
type Aes128CbcEnc = cbc::Encryptor<aes::Aes128>;
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
pub fn derive_fernet_key(secret: &str) -> String {
|
||||
let mut derived = [0u8; 32];
|
||||
pbkdf2::pbkdf2_hmac::<Sha256>(
|
||||
secret.as_bytes(),
|
||||
b"myfsio-iam-encryption",
|
||||
100_000,
|
||||
&mut derived,
|
||||
);
|
||||
URL_SAFE.encode(derived)
|
||||
}
|
||||
|
||||
pub fn decrypt(key_b64: &str, token: &str) -> Result<Vec<u8>, &'static str> {
|
||||
let key_bytes = URL_SAFE
|
||||
.decode(key_b64)
|
||||
.map_err(|_| "invalid fernet key base64")?;
|
||||
if key_bytes.len() != 32 {
|
||||
return Err("fernet key must be 32 bytes");
|
||||
}
|
||||
|
||||
let signing_key = &key_bytes[..16];
|
||||
let encryption_key = &key_bytes[16..];
|
||||
|
||||
let token_bytes = URL_SAFE
|
||||
.decode(token)
|
||||
.map_err(|_| "invalid fernet token base64")?;
|
||||
|
||||
if token_bytes.len() < 57 {
|
||||
return Err("fernet token too short");
|
||||
}
|
||||
|
||||
if token_bytes[0] != 0x80 {
|
||||
return Err("invalid fernet version");
|
||||
}
|
||||
|
||||
let hmac_offset = token_bytes.len() - 32;
|
||||
let payload = &token_bytes[..hmac_offset];
|
||||
let expected_hmac = &token_bytes[hmac_offset..];
|
||||
|
||||
let mut mac = HmacSha256::new_from_slice(signing_key).map_err(|_| "hmac key error")?;
|
||||
mac.update(payload);
|
||||
mac.verify_slice(expected_hmac)
|
||||
.map_err(|_| "HMAC verification failed")?;
|
||||
|
||||
let iv = &token_bytes[9..25];
|
||||
let ciphertext = &token_bytes[25..hmac_offset];
|
||||
|
||||
let plaintext = Aes128CbcDec::new(encryption_key.into(), iv.into())
|
||||
.decrypt_padded_vec_mut::<Pkcs7>(ciphertext)
|
||||
.map_err(|_| "AES-CBC decryption failed")?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
pub fn encrypt(key_b64: &str, plaintext: &[u8]) -> Result<String, &'static str> {
|
||||
let key_bytes = URL_SAFE
|
||||
.decode(key_b64)
|
||||
.map_err(|_| "invalid fernet key base64")?;
|
||||
if key_bytes.len() != 32 {
|
||||
return Err("fernet key must be 32 bytes");
|
||||
}
|
||||
|
||||
let signing_key = &key_bytes[..16];
|
||||
let encryption_key = &key_bytes[16..];
|
||||
|
||||
let mut iv = [0u8; 16];
|
||||
rand::thread_rng().fill_bytes(&mut iv);
|
||||
|
||||
let timestamp = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.map_err(|_| "system time error")?
|
||||
.as_secs();
|
||||
|
||||
let ciphertext = Aes128CbcEnc::new(encryption_key.into(), (&iv).into())
|
||||
.encrypt_padded_vec_mut::<Pkcs7>(plaintext);
|
||||
|
||||
let mut payload = Vec::with_capacity(1 + 8 + 16 + ciphertext.len());
|
||||
payload.push(0x80);
|
||||
payload.extend_from_slice(×tamp.to_be_bytes());
|
||||
payload.extend_from_slice(&iv);
|
||||
payload.extend_from_slice(&ciphertext);
|
||||
|
||||
let mut mac = HmacSha256::new_from_slice(signing_key).map_err(|_| "hmac key error")?;
|
||||
mac.update(&payload);
|
||||
let tag = mac.finalize().into_bytes();
|
||||
|
||||
let mut token_bytes = payload;
|
||||
token_bytes.extend_from_slice(&tag);
|
||||
Ok(URL_SAFE.encode(&token_bytes))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_derive_fernet_key_format() {
|
||||
let key = derive_fernet_key("test-secret");
|
||||
let decoded = URL_SAFE.decode(&key).unwrap();
|
||||
assert_eq!(decoded.len(), 32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roundtrip_with_python_compat() {
|
||||
let key = derive_fernet_key("dev-secret-key");
|
||||
let decoded = URL_SAFE.decode(&key).unwrap();
|
||||
assert_eq!(decoded.len(), 32);
|
||||
}
|
||||
}
|
||||
1015
crates/myfsio-auth/src/iam.rs
Normal file
1015
crates/myfsio-auth/src/iam.rs
Normal file
File diff suppressed because it is too large
Load Diff
4
crates/myfsio-auth/src/lib.rs
Normal file
4
crates/myfsio-auth/src/lib.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
mod fernet;
|
||||
pub mod iam;
|
||||
pub mod principal;
|
||||
pub mod sigv4;
|
||||
1
crates/myfsio-auth/src/principal.rs
Normal file
1
crates/myfsio-auth/src/principal.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub use myfsio_common::types::Principal;
|
||||
287
crates/myfsio-auth/src/sigv4.rs
Normal file
287
crates/myfsio-auth/src/sigv4.rs
Normal file
@@ -0,0 +1,287 @@
|
||||
use hmac::{Hmac, Mac};
|
||||
use lru::LruCache;
|
||||
use parking_lot::Mutex;
|
||||
use percent_encoding::{percent_encode, AsciiSet, NON_ALPHANUMERIC};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::num::NonZeroUsize;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Instant;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
struct CacheEntry {
|
||||
key: Vec<u8>,
|
||||
created: Instant,
|
||||
}
|
||||
|
||||
static SIGNING_KEY_CACHE: LazyLock<Mutex<LruCache<(String, String, String, String), CacheEntry>>> =
|
||||
LazyLock::new(|| Mutex::new(LruCache::new(NonZeroUsize::new(256).unwrap())));
|
||||
|
||||
const CACHE_TTL_SECS: u64 = 60;
|
||||
|
||||
const AWS_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC
|
||||
.remove(b'-')
|
||||
.remove(b'_')
|
||||
.remove(b'.')
|
||||
.remove(b'~');
|
||||
|
||||
fn hmac_sha256(key: &[u8], msg: &[u8]) -> Vec<u8> {
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
|
||||
mac.update(msg);
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
fn sha256_hex(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
hex::encode(hasher.finalize())
|
||||
}
|
||||
|
||||
fn aws_uri_encode(input: &str) -> String {
|
||||
percent_encode(input.as_bytes(), AWS_ENCODE_SET).to_string()
|
||||
}
|
||||
|
||||
pub fn derive_signing_key_cached(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
let cache_key = (
|
||||
secret_key.to_owned(),
|
||||
date_stamp.to_owned(),
|
||||
region.to_owned(),
|
||||
service.to_owned(),
|
||||
);
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
if let Some(entry) = cache.get(&cache_key) {
|
||||
if entry.created.elapsed().as_secs() < CACHE_TTL_SECS {
|
||||
return entry.key.clone();
|
||||
}
|
||||
cache.pop(&cache_key);
|
||||
}
|
||||
}
|
||||
|
||||
let k_date = hmac_sha256(
|
||||
format!("AWS4{}", secret_key).as_bytes(),
|
||||
date_stamp.as_bytes(),
|
||||
);
|
||||
let k_region = hmac_sha256(&k_date, region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, service.as_bytes());
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
cache.put(
|
||||
cache_key,
|
||||
CacheEntry {
|
||||
key: k_signing.clone(),
|
||||
created: Instant::now(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
k_signing
|
||||
}
|
||||
|
||||
fn constant_time_compare_inner(a: &[u8], b: &[u8]) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
let mut result: u8 = 0;
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
result |= x ^ y;
|
||||
}
|
||||
result == 0
|
||||
}
|
||||
|
||||
pub fn verify_sigv4_signature(
|
||||
method: &str,
|
||||
canonical_uri: &str,
|
||||
query_params: &[(String, String)],
|
||||
signed_headers_str: &str,
|
||||
header_values: &[(String, String)],
|
||||
payload_hash: &str,
|
||||
amz_date: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
secret_key: &str,
|
||||
provided_signature: &str,
|
||||
) -> bool {
|
||||
let mut sorted_params = query_params.to_vec();
|
||||
sorted_params.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1)));
|
||||
|
||||
let canonical_query_string = sorted_params
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{}={}", aws_uri_encode(k), aws_uri_encode(v)))
|
||||
.collect::<Vec<_>>()
|
||||
.join("&");
|
||||
|
||||
let mut canonical_headers = String::new();
|
||||
for (name, value) in header_values {
|
||||
let lower_name = name.to_lowercase();
|
||||
let normalized = value.split_whitespace().collect::<Vec<_>>().join(" ");
|
||||
let final_value = if lower_name == "expect" && normalized.is_empty() {
|
||||
"100-continue"
|
||||
} else {
|
||||
&normalized
|
||||
};
|
||||
canonical_headers.push_str(&lower_name);
|
||||
canonical_headers.push(':');
|
||||
canonical_headers.push_str(final_value);
|
||||
canonical_headers.push('\n');
|
||||
}
|
||||
|
||||
let canonical_request = format!(
|
||||
"{}\n{}\n{}\n{}\n{}\n{}",
|
||||
method,
|
||||
canonical_uri,
|
||||
canonical_query_string,
|
||||
canonical_headers,
|
||||
signed_headers_str,
|
||||
payload_hash
|
||||
);
|
||||
|
||||
let credential_scope = format!("{}/{}/{}/aws4_request", date_stamp, region, service);
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
);
|
||||
|
||||
let signing_key = derive_signing_key_cached(secret_key, date_stamp, region, service);
|
||||
let calculated = hmac_sha256(&signing_key, string_to_sign.as_bytes());
|
||||
let calculated_hex = hex::encode(&calculated);
|
||||
|
||||
constant_time_compare_inner(calculated_hex.as_bytes(), provided_signature.as_bytes())
|
||||
}
|
||||
|
||||
pub fn derive_signing_key(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
derive_signing_key_cached(secret_key, date_stamp, region, service)
|
||||
}
|
||||
|
||||
pub fn compute_signature(signing_key: &[u8], string_to_sign: &str) -> String {
|
||||
let sig = hmac_sha256(signing_key, string_to_sign.as_bytes());
|
||||
hex::encode(sig)
|
||||
}
|
||||
|
||||
pub fn compute_post_policy_signature(signing_key: &[u8], policy_b64: &str) -> String {
|
||||
let sig = hmac_sha256(signing_key, policy_b64.as_bytes());
|
||||
hex::encode(sig)
|
||||
}
|
||||
|
||||
pub fn build_string_to_sign(
|
||||
amz_date: &str,
|
||||
credential_scope: &str,
|
||||
canonical_request: &str,
|
||||
) -> String {
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
)
|
||||
}
|
||||
|
||||
pub fn constant_time_compare(a: &str, b: &str) -> bool {
|
||||
constant_time_compare_inner(a.as_bytes(), b.as_bytes())
|
||||
}
|
||||
|
||||
pub fn clear_signing_key_cache() {
|
||||
SIGNING_KEY_CACHE.lock().clear();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_derive_signing_key() {
|
||||
let key = derive_signing_key(
|
||||
"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
"20130524",
|
||||
"us-east-1",
|
||||
"s3",
|
||||
);
|
||||
assert_eq!(key.len(), 32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_derive_signing_key_cached() {
|
||||
let key1 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
|
||||
let key2 = derive_signing_key("secret", "20240101", "us-east-1", "s3");
|
||||
assert_eq!(key1, key2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_constant_time_compare() {
|
||||
assert!(constant_time_compare("abc", "abc"));
|
||||
assert!(!constant_time_compare("abc", "abd"));
|
||||
assert!(!constant_time_compare("abc", "abcd"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_string_to_sign() {
|
||||
let result = build_string_to_sign(
|
||||
"20130524T000000Z",
|
||||
"20130524/us-east-1/s3/aws4_request",
|
||||
"GET\n/\n\nhost:example.com\n\nhost\nUNSIGNED-PAYLOAD",
|
||||
);
|
||||
assert!(result.starts_with("AWS4-HMAC-SHA256\n"));
|
||||
assert!(result.contains("20130524T000000Z"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aws_uri_encode() {
|
||||
assert_eq!(aws_uri_encode("hello world"), "hello%20world");
|
||||
assert_eq!(aws_uri_encode("test-file_name.txt"), "test-file_name.txt");
|
||||
assert_eq!(aws_uri_encode("a/b"), "a%2Fb");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_sigv4_roundtrip() {
|
||||
let secret = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
|
||||
let date_stamp = "20130524";
|
||||
let region = "us-east-1";
|
||||
let service = "s3";
|
||||
let amz_date = "20130524T000000Z";
|
||||
|
||||
let signing_key = derive_signing_key(secret, date_stamp, region, service);
|
||||
|
||||
let canonical_request =
|
||||
"GET\n/\n\nhost:examplebucket.s3.amazonaws.com\n\nhost\nUNSIGNED-PAYLOAD";
|
||||
let string_to_sign = build_string_to_sign(
|
||||
amz_date,
|
||||
&format!("{}/{}/{}/aws4_request", date_stamp, region, service),
|
||||
canonical_request,
|
||||
);
|
||||
|
||||
let signature = compute_signature(&signing_key, &string_to_sign);
|
||||
|
||||
let result = verify_sigv4_signature(
|
||||
"GET",
|
||||
"/",
|
||||
&[],
|
||||
"host",
|
||||
&[(
|
||||
"host".to_string(),
|
||||
"examplebucket.s3.amazonaws.com".to_string(),
|
||||
)],
|
||||
"UNSIGNED-PAYLOAD",
|
||||
amz_date,
|
||||
date_stamp,
|
||||
region,
|
||||
service,
|
||||
secret,
|
||||
&signature,
|
||||
);
|
||||
assert!(result);
|
||||
}
|
||||
}
|
||||
11
crates/myfsio-common/Cargo.toml
Normal file
11
crates/myfsio-common/Cargo.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "myfsio-common"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
20
crates/myfsio-common/src/constants.rs
Normal file
20
crates/myfsio-common/src/constants.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
pub const SYSTEM_ROOT: &str = ".myfsio.sys";
|
||||
pub const SYSTEM_BUCKETS_DIR: &str = "buckets";
|
||||
pub const SYSTEM_MULTIPART_DIR: &str = "multipart";
|
||||
pub const BUCKET_META_DIR: &str = "meta";
|
||||
pub const BUCKET_VERSIONS_DIR: &str = "versions";
|
||||
pub const BUCKET_CONFIG_FILE: &str = ".bucket.json";
|
||||
pub const STATS_FILE: &str = "stats.json";
|
||||
pub const ETAG_INDEX_FILE: &str = "etag_index.json";
|
||||
pub const INDEX_FILE: &str = "_index.json";
|
||||
pub const MANIFEST_FILE: &str = "manifest.json";
|
||||
|
||||
pub const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
|
||||
pub const DEFAULT_REGION: &str = "us-east-1";
|
||||
pub const AWS_SERVICE: &str = "s3";
|
||||
|
||||
pub const DEFAULT_MAX_KEYS: usize = 1000;
|
||||
pub const DEFAULT_OBJECT_KEY_MAX_BYTES: usize = 1024;
|
||||
pub const DEFAULT_CHUNK_SIZE: usize = 65536;
|
||||
pub const STREAM_CHUNK_SIZE: usize = 1_048_576;
|
||||
233
crates/myfsio-common/src/error.rs
Normal file
233
crates/myfsio-common/src/error.rs
Normal file
@@ -0,0 +1,233 @@
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum S3ErrorCode {
|
||||
AccessDenied,
|
||||
BadDigest,
|
||||
BucketAlreadyExists,
|
||||
BucketNotEmpty,
|
||||
EntityTooLarge,
|
||||
InternalError,
|
||||
InvalidAccessKeyId,
|
||||
InvalidArgument,
|
||||
InvalidBucketName,
|
||||
InvalidKey,
|
||||
InvalidPolicyDocument,
|
||||
InvalidRange,
|
||||
InvalidRequest,
|
||||
InvalidTag,
|
||||
MalformedXML,
|
||||
MethodNotAllowed,
|
||||
NoSuchBucket,
|
||||
NoSuchKey,
|
||||
NoSuchUpload,
|
||||
NoSuchVersion,
|
||||
NoSuchTagSet,
|
||||
PreconditionFailed,
|
||||
NotModified,
|
||||
QuotaExceeded,
|
||||
SignatureDoesNotMatch,
|
||||
SlowDown,
|
||||
}
|
||||
|
||||
impl S3ErrorCode {
|
||||
pub fn http_status(&self) -> u16 {
|
||||
match self {
|
||||
Self::AccessDenied => 403,
|
||||
Self::BadDigest => 400,
|
||||
Self::BucketAlreadyExists => 409,
|
||||
Self::BucketNotEmpty => 409,
|
||||
Self::EntityTooLarge => 413,
|
||||
Self::InternalError => 500,
|
||||
Self::InvalidAccessKeyId => 403,
|
||||
Self::InvalidArgument => 400,
|
||||
Self::InvalidBucketName => 400,
|
||||
Self::InvalidKey => 400,
|
||||
Self::InvalidPolicyDocument => 400,
|
||||
Self::InvalidRange => 416,
|
||||
Self::InvalidRequest => 400,
|
||||
Self::InvalidTag => 400,
|
||||
Self::MalformedXML => 400,
|
||||
Self::MethodNotAllowed => 405,
|
||||
Self::NoSuchBucket => 404,
|
||||
Self::NoSuchKey => 404,
|
||||
Self::NoSuchUpload => 404,
|
||||
Self::NoSuchVersion => 404,
|
||||
Self::NoSuchTagSet => 404,
|
||||
Self::PreconditionFailed => 412,
|
||||
Self::NotModified => 304,
|
||||
Self::QuotaExceeded => 403,
|
||||
Self::SignatureDoesNotMatch => 403,
|
||||
Self::SlowDown => 429,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::AccessDenied => "AccessDenied",
|
||||
Self::BadDigest => "BadDigest",
|
||||
Self::BucketAlreadyExists => "BucketAlreadyExists",
|
||||
Self::BucketNotEmpty => "BucketNotEmpty",
|
||||
Self::EntityTooLarge => "EntityTooLarge",
|
||||
Self::InternalError => "InternalError",
|
||||
Self::InvalidAccessKeyId => "InvalidAccessKeyId",
|
||||
Self::InvalidArgument => "InvalidArgument",
|
||||
Self::InvalidBucketName => "InvalidBucketName",
|
||||
Self::InvalidKey => "InvalidKey",
|
||||
Self::InvalidPolicyDocument => "InvalidPolicyDocument",
|
||||
Self::InvalidRange => "InvalidRange",
|
||||
Self::InvalidRequest => "InvalidRequest",
|
||||
Self::InvalidTag => "InvalidTag",
|
||||
Self::MalformedXML => "MalformedXML",
|
||||
Self::MethodNotAllowed => "MethodNotAllowed",
|
||||
Self::NoSuchBucket => "NoSuchBucket",
|
||||
Self::NoSuchKey => "NoSuchKey",
|
||||
Self::NoSuchUpload => "NoSuchUpload",
|
||||
Self::NoSuchVersion => "NoSuchVersion",
|
||||
Self::NoSuchTagSet => "NoSuchTagSet",
|
||||
Self::PreconditionFailed => "PreconditionFailed",
|
||||
Self::NotModified => "NotModified",
|
||||
Self::QuotaExceeded => "QuotaExceeded",
|
||||
Self::SignatureDoesNotMatch => "SignatureDoesNotMatch",
|
||||
Self::SlowDown => "SlowDown",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_message(&self) -> &'static str {
|
||||
match self {
|
||||
Self::AccessDenied => "Access Denied",
|
||||
Self::BadDigest => "The Content-MD5 or checksum value you specified did not match what we received",
|
||||
Self::BucketAlreadyExists => "The requested bucket name is not available",
|
||||
Self::BucketNotEmpty => "The bucket you tried to delete is not empty",
|
||||
Self::EntityTooLarge => "Your proposed upload exceeds the maximum allowed size",
|
||||
Self::InternalError => "We encountered an internal error. Please try again.",
|
||||
Self::InvalidAccessKeyId => "The access key ID you provided does not exist",
|
||||
Self::InvalidArgument => "Invalid argument",
|
||||
Self::InvalidBucketName => "The specified bucket is not valid",
|
||||
Self::InvalidKey => "The specified key is not valid",
|
||||
Self::InvalidPolicyDocument => "The content of the form does not meet the conditions specified in the policy document",
|
||||
Self::InvalidRange => "The requested range is not satisfiable",
|
||||
Self::InvalidRequest => "Invalid request",
|
||||
Self::InvalidTag => "The Tagging header is invalid",
|
||||
Self::MalformedXML => "The XML you provided was not well-formed",
|
||||
Self::MethodNotAllowed => "The specified method is not allowed against this resource",
|
||||
Self::NoSuchBucket => "The specified bucket does not exist",
|
||||
Self::NoSuchKey => "The specified key does not exist",
|
||||
Self::NoSuchUpload => "The specified multipart upload does not exist",
|
||||
Self::NoSuchVersion => "The specified version does not exist",
|
||||
Self::NoSuchTagSet => "The TagSet does not exist",
|
||||
Self::PreconditionFailed => "At least one of the preconditions you specified did not hold",
|
||||
Self::NotModified => "Not Modified",
|
||||
Self::QuotaExceeded => "The bucket quota has been exceeded",
|
||||
Self::SignatureDoesNotMatch => "The request signature we calculated does not match the signature you provided",
|
||||
Self::SlowDown => "Please reduce your request rate",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for S3ErrorCode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct S3Error {
|
||||
pub code: S3ErrorCode,
|
||||
pub message: String,
|
||||
pub resource: String,
|
||||
pub request_id: String,
|
||||
}
|
||||
|
||||
impl S3Error {
|
||||
pub fn new(code: S3ErrorCode, message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
code,
|
||||
message: message.into(),
|
||||
resource: String::new(),
|
||||
request_id: String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_code(code: S3ErrorCode) -> Self {
|
||||
Self::new(code, code.default_message())
|
||||
}
|
||||
|
||||
pub fn with_resource(mut self, resource: impl Into<String>) -> Self {
|
||||
self.resource = resource.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_request_id(mut self, request_id: impl Into<String>) -> Self {
|
||||
self.request_id = request_id.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn http_status(&self) -> u16 {
|
||||
self.code.http_status()
|
||||
}
|
||||
|
||||
pub fn to_xml(&self) -> String {
|
||||
format!(
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||
<Error>\
|
||||
<Code>{}</Code>\
|
||||
<Message>{}</Message>\
|
||||
<Resource>{}</Resource>\
|
||||
<RequestId>{}</RequestId>\
|
||||
</Error>",
|
||||
self.code.as_str(),
|
||||
xml_escape(&self.message),
|
||||
xml_escape(&self.resource),
|
||||
xml_escape(&self.request_id),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for S3Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}: {}", self.code, self.message)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for S3Error {}
|
||||
|
||||
fn xml_escape(s: &str) -> String {
|
||||
s.replace('&', "&")
|
||||
.replace('<', "<")
|
||||
.replace('>', ">")
|
||||
.replace('"', """)
|
||||
.replace('\'', "'")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_error_codes() {
|
||||
assert_eq!(S3ErrorCode::NoSuchKey.http_status(), 404);
|
||||
assert_eq!(S3ErrorCode::AccessDenied.http_status(), 403);
|
||||
assert_eq!(S3ErrorCode::NoSuchBucket.as_str(), "NoSuchBucket");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_to_xml() {
|
||||
let err = S3Error::from_code(S3ErrorCode::NoSuchKey)
|
||||
.with_resource("/test-bucket/test-key")
|
||||
.with_request_id("abc123");
|
||||
let xml = err.to_xml();
|
||||
assert!(xml.contains("<Code>NoSuchKey</Code>"));
|
||||
assert!(xml.contains("<Resource>/test-bucket/test-key</Resource>"));
|
||||
assert!(xml.contains("<RequestId>abc123</RequestId>"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_xml_escape() {
|
||||
let err = S3Error::new(S3ErrorCode::InvalidArgument, "key <test> & \"value\"")
|
||||
.with_resource("/bucket/key&");
|
||||
let xml = err.to_xml();
|
||||
assert!(xml.contains("<test>"));
|
||||
assert!(xml.contains("&"));
|
||||
}
|
||||
}
|
||||
3
crates/myfsio-common/src/lib.rs
Normal file
3
crates/myfsio-common/src/lib.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod constants;
|
||||
pub mod error;
|
||||
pub mod types;
|
||||
178
crates/myfsio-common/src/types.rs
Normal file
178
crates/myfsio-common/src/types.rs
Normal file
@@ -0,0 +1,178 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ObjectMeta {
|
||||
pub key: String,
|
||||
pub size: u64,
|
||||
pub last_modified: DateTime<Utc>,
|
||||
pub etag: Option<String>,
|
||||
pub content_type: Option<String>,
|
||||
pub storage_class: Option<String>,
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl ObjectMeta {
|
||||
pub fn new(key: String, size: u64, last_modified: DateTime<Utc>) -> Self {
|
||||
Self {
|
||||
key,
|
||||
size,
|
||||
last_modified,
|
||||
etag: None,
|
||||
content_type: None,
|
||||
storage_class: Some("STANDARD".to_string()),
|
||||
metadata: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BucketMeta {
|
||||
pub name: String,
|
||||
pub creation_date: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct BucketStats {
|
||||
pub objects: u64,
|
||||
pub bytes: u64,
|
||||
pub version_count: u64,
|
||||
pub version_bytes: u64,
|
||||
}
|
||||
|
||||
impl BucketStats {
|
||||
pub fn total_objects(&self) -> u64 {
|
||||
self.objects + self.version_count
|
||||
}
|
||||
|
||||
pub fn total_bytes(&self) -> u64 {
|
||||
self.bytes + self.version_bytes
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ListObjectsResult {
|
||||
pub objects: Vec<ObjectMeta>,
|
||||
pub is_truncated: bool,
|
||||
pub next_continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ShallowListResult {
|
||||
pub objects: Vec<ObjectMeta>,
|
||||
pub common_prefixes: Vec<String>,
|
||||
pub is_truncated: bool,
|
||||
pub next_continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ListParams {
|
||||
pub max_keys: usize,
|
||||
pub continuation_token: Option<String>,
|
||||
pub prefix: Option<String>,
|
||||
pub start_after: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ShallowListParams {
|
||||
pub prefix: String,
|
||||
pub delimiter: String,
|
||||
pub max_keys: usize,
|
||||
pub continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PartMeta {
|
||||
pub part_number: u32,
|
||||
pub etag: String,
|
||||
pub size: u64,
|
||||
pub last_modified: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PartInfo {
|
||||
pub part_number: u32,
|
||||
pub etag: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MultipartUploadInfo {
|
||||
pub upload_id: String,
|
||||
pub key: String,
|
||||
pub initiated: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VersionInfo {
|
||||
pub version_id: String,
|
||||
pub key: String,
|
||||
pub size: u64,
|
||||
pub last_modified: DateTime<Utc>,
|
||||
pub etag: Option<String>,
|
||||
pub is_latest: bool,
|
||||
#[serde(default)]
|
||||
pub is_delete_marker: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Tag {
|
||||
pub key: String,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct BucketConfig {
|
||||
#[serde(default)]
|
||||
pub versioning_enabled: bool,
|
||||
#[serde(default)]
|
||||
pub tags: Vec<Tag>,
|
||||
#[serde(default)]
|
||||
pub cors: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub encryption: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub lifecycle: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub website: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub quota: Option<QuotaConfig>,
|
||||
#[serde(default)]
|
||||
pub acl: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub notification: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub logging: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub object_lock: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub policy: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
pub replication: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct QuotaConfig {
|
||||
pub max_bytes: Option<u64>,
|
||||
pub max_objects: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Principal {
|
||||
pub access_key: String,
|
||||
pub user_id: String,
|
||||
pub display_name: String,
|
||||
pub is_admin: bool,
|
||||
}
|
||||
|
||||
impl Principal {
|
||||
pub fn new(access_key: String, user_id: String, display_name: String, is_admin: bool) -> Self {
|
||||
Self {
|
||||
access_key,
|
||||
user_id,
|
||||
display_name,
|
||||
is_admin,
|
||||
}
|
||||
}
|
||||
}
|
||||
24
crates/myfsio-crypto/Cargo.toml
Normal file
24
crates/myfsio-crypto/Cargo.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "myfsio-crypto"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
myfsio-common = { path = "../myfsio-common" }
|
||||
md-5 = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
aes-gcm = { workspace = true }
|
||||
hkdf = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
rand = "0.8"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
|
||||
tempfile = "3"
|
||||
253
crates/myfsio-crypto/src/aes_gcm.rs
Normal file
253
crates/myfsio-crypto/src/aes_gcm.rs
Normal file
@@ -0,0 +1,253 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use hkdf::Hkdf;
|
||||
use sha2::Sha256;
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
use std::path::Path;
|
||||
use thiserror::Error;
|
||||
|
||||
const DEFAULT_CHUNK_SIZE: usize = 65536;
|
||||
const HEADER_SIZE: usize = 4;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum CryptoError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error("Invalid key size: expected 32 bytes, got {0}")]
|
||||
InvalidKeySize(usize),
|
||||
#[error("Invalid nonce size: expected 12 bytes, got {0}")]
|
||||
InvalidNonceSize(usize),
|
||||
#[error("Encryption failed: {0}")]
|
||||
EncryptionFailed(String),
|
||||
#[error("Decryption failed at chunk {0}")]
|
||||
DecryptionFailed(u32),
|
||||
#[error("HKDF expand failed: {0}")]
|
||||
HkdfFailed(String),
|
||||
}
|
||||
|
||||
fn read_exact_chunk(reader: &mut impl Read, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
let mut filled = 0;
|
||||
while filled < buf.len() {
|
||||
match reader.read(&mut buf[filled..]) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => filled += n,
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(filled)
|
||||
}
|
||||
|
||||
fn derive_chunk_nonce(base_nonce: &[u8], chunk_index: u32) -> Result<[u8; 12], CryptoError> {
|
||||
let hkdf = Hkdf::<Sha256>::new(Some(base_nonce), b"chunk_nonce");
|
||||
let mut okm = [0u8; 12];
|
||||
hkdf.expand(&chunk_index.to_be_bytes(), &mut okm)
|
||||
.map_err(|e| CryptoError::HkdfFailed(e.to_string()))?;
|
||||
Ok(okm)
|
||||
}
|
||||
|
||||
pub fn encrypt_stream_chunked(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
chunk_size: Option<usize>,
|
||||
) -> Result<u32, CryptoError> {
|
||||
if key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(key.len()));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
|
||||
}
|
||||
|
||||
let chunk_size = chunk_size.unwrap_or(DEFAULT_CHUNK_SIZE);
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(input_path)?;
|
||||
let mut outfile = File::create(output_path)?;
|
||||
|
||||
outfile.write_all(&[0u8; 4])?;
|
||||
|
||||
let mut buf = vec![0u8; chunk_size];
|
||||
let mut chunk_index: u32 = 0;
|
||||
|
||||
loop {
|
||||
let n = read_exact_chunk(&mut infile, &mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, &buf[..n])
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let size = encrypted.len() as u32;
|
||||
outfile.write_all(&size.to_be_bytes())?;
|
||||
outfile.write_all(&encrypted)?;
|
||||
|
||||
chunk_index += 1;
|
||||
}
|
||||
|
||||
outfile.seek(SeekFrom::Start(0))?;
|
||||
outfile.write_all(&chunk_index.to_be_bytes())?;
|
||||
|
||||
Ok(chunk_index)
|
||||
}
|
||||
|
||||
pub fn decrypt_stream_chunked(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
) -> Result<u32, CryptoError> {
|
||||
if key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(key.len()));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(CryptoError::InvalidNonceSize(base_nonce.len()));
|
||||
}
|
||||
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(input_path)?;
|
||||
let mut outfile = File::create(output_path)?;
|
||||
|
||||
let mut header = [0u8; HEADER_SIZE];
|
||||
infile.read_exact(&mut header)?;
|
||||
let chunk_count = u32::from_be_bytes(header);
|
||||
|
||||
let mut size_buf = [0u8; HEADER_SIZE];
|
||||
for chunk_index in 0..chunk_count {
|
||||
infile.read_exact(&mut size_buf)?;
|
||||
let chunk_size = u32::from_be_bytes(size_buf) as usize;
|
||||
|
||||
let mut encrypted = vec![0u8; chunk_size];
|
||||
infile.read_exact(&mut encrypted)?;
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let decrypted = cipher
|
||||
.decrypt(nonce, encrypted.as_ref())
|
||||
.map_err(|_| CryptoError::DecryptionFailed(chunk_index))?;
|
||||
|
||||
outfile.write_all(&decrypted)?;
|
||||
}
|
||||
|
||||
Ok(chunk_count)
|
||||
}
|
||||
|
||||
pub async fn encrypt_stream_chunked_async(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
chunk_size: Option<usize>,
|
||||
) -> Result<u32, CryptoError> {
|
||||
let input_path = input_path.to_owned();
|
||||
let output_path = output_path.to_owned();
|
||||
let key = key.to_vec();
|
||||
let base_nonce = base_nonce.to_vec();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
encrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce, chunk_size)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
|
||||
}
|
||||
|
||||
pub async fn decrypt_stream_chunked_async(
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
) -> Result<u32, CryptoError> {
|
||||
let input_path = input_path.to_owned();
|
||||
let output_path = output_path.to_owned();
|
||||
let key = key.to_vec();
|
||||
let base_nonce = base_nonce.to_vec();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
decrypt_stream_chunked(&input_path, &output_path, &key, &base_nonce)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write as IoWrite;
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_decrypt_roundtrip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("input.bin");
|
||||
let encrypted = dir.path().join("encrypted.bin");
|
||||
let decrypted = dir.path().join("decrypted.bin");
|
||||
|
||||
let data = b"Hello, this is a test of AES-256-GCM chunked encryption!";
|
||||
std::fs::File::create(&input)
|
||||
.unwrap()
|
||||
.write_all(data)
|
||||
.unwrap();
|
||||
|
||||
let key = [0x42u8; 32];
|
||||
let nonce = [0x01u8; 12];
|
||||
|
||||
let chunks = encrypt_stream_chunked(&input, &encrypted, &key, &nonce, Some(16)).unwrap();
|
||||
assert!(chunks > 0);
|
||||
|
||||
let chunks2 = decrypt_stream_chunked(&encrypted, &decrypted, &key, &nonce).unwrap();
|
||||
assert_eq!(chunks, chunks2);
|
||||
|
||||
let result = std::fs::read(&decrypted).unwrap();
|
||||
assert_eq!(result, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_key_size() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("input.bin");
|
||||
std::fs::File::create(&input)
|
||||
.unwrap()
|
||||
.write_all(b"test")
|
||||
.unwrap();
|
||||
|
||||
let result = encrypt_stream_chunked(
|
||||
&input,
|
||||
&dir.path().join("out"),
|
||||
&[0u8; 16],
|
||||
&[0u8; 12],
|
||||
None,
|
||||
);
|
||||
assert!(matches!(result, Err(CryptoError::InvalidKeySize(16))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_key_fails_decrypt() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("input.bin");
|
||||
let encrypted = dir.path().join("encrypted.bin");
|
||||
let decrypted = dir.path().join("decrypted.bin");
|
||||
|
||||
std::fs::File::create(&input)
|
||||
.unwrap()
|
||||
.write_all(b"secret data")
|
||||
.unwrap();
|
||||
|
||||
let key = [0x42u8; 32];
|
||||
let nonce = [0x01u8; 12];
|
||||
encrypt_stream_chunked(&input, &encrypted, &key, &nonce, None).unwrap();
|
||||
|
||||
let wrong_key = [0x43u8; 32];
|
||||
let result = decrypt_stream_chunked(&encrypted, &decrypted, &wrong_key, &nonce);
|
||||
assert!(matches!(result, Err(CryptoError::DecryptionFailed(_))));
|
||||
}
|
||||
}
|
||||
404
crates/myfsio-crypto/src/encryption.rs
Normal file
404
crates/myfsio-crypto/src/encryption.rs
Normal file
@@ -0,0 +1,404 @@
|
||||
use base64::engine::general_purpose::STANDARD as B64;
|
||||
use base64::Engine;
|
||||
use rand::RngCore;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use crate::aes_gcm::{decrypt_stream_chunked, encrypt_stream_chunked, CryptoError};
|
||||
use crate::kms::KmsService;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum SseAlgorithm {
|
||||
Aes256,
|
||||
AwsKms,
|
||||
CustomerProvided,
|
||||
}
|
||||
|
||||
impl SseAlgorithm {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
SseAlgorithm::Aes256 => "AES256",
|
||||
SseAlgorithm::AwsKms => "aws:kms",
|
||||
SseAlgorithm::CustomerProvided => "AES256",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EncryptionContext {
|
||||
pub algorithm: SseAlgorithm,
|
||||
pub kms_key_id: Option<String>,
|
||||
pub customer_key: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EncryptionMetadata {
|
||||
pub algorithm: String,
|
||||
pub nonce: String,
|
||||
pub encrypted_data_key: Option<String>,
|
||||
pub kms_key_id: Option<String>,
|
||||
}
|
||||
|
||||
impl EncryptionMetadata {
|
||||
pub fn to_metadata_map(&self) -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
map.insert(
|
||||
"x-amz-server-side-encryption".to_string(),
|
||||
self.algorithm.clone(),
|
||||
);
|
||||
map.insert("x-amz-encryption-nonce".to_string(), self.nonce.clone());
|
||||
if let Some(ref dk) = self.encrypted_data_key {
|
||||
map.insert("x-amz-encrypted-data-key".to_string(), dk.clone());
|
||||
}
|
||||
if let Some(ref kid) = self.kms_key_id {
|
||||
map.insert("x-amz-encryption-key-id".to_string(), kid.clone());
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
pub fn from_metadata(meta: &HashMap<String, String>) -> Option<Self> {
|
||||
let algorithm = meta.get("x-amz-server-side-encryption")?;
|
||||
let nonce = meta.get("x-amz-encryption-nonce")?;
|
||||
Some(Self {
|
||||
algorithm: algorithm.clone(),
|
||||
nonce: nonce.clone(),
|
||||
encrypted_data_key: meta.get("x-amz-encrypted-data-key").cloned(),
|
||||
kms_key_id: meta.get("x-amz-encryption-key-id").cloned(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_encrypted(meta: &HashMap<String, String>) -> bool {
|
||||
meta.contains_key("x-amz-server-side-encryption")
|
||||
}
|
||||
|
||||
pub fn clean_metadata(meta: &mut HashMap<String, String>) {
|
||||
meta.remove("x-amz-server-side-encryption");
|
||||
meta.remove("x-amz-encryption-nonce");
|
||||
meta.remove("x-amz-encrypted-data-key");
|
||||
meta.remove("x-amz-encryption-key-id");
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EncryptionService {
|
||||
master_key: [u8; 32],
|
||||
kms: Option<std::sync::Arc<KmsService>>,
|
||||
config: EncryptionConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct EncryptionConfig {
|
||||
pub chunk_size: usize,
|
||||
}
|
||||
|
||||
impl Default for EncryptionConfig {
|
||||
fn default() -> Self {
|
||||
Self { chunk_size: 65_536 }
|
||||
}
|
||||
}
|
||||
|
||||
impl EncryptionService {
|
||||
pub fn new(master_key: [u8; 32], kms: Option<std::sync::Arc<KmsService>>) -> Self {
|
||||
Self::with_config(master_key, kms, EncryptionConfig::default())
|
||||
}
|
||||
|
||||
pub fn with_config(
|
||||
master_key: [u8; 32],
|
||||
kms: Option<std::sync::Arc<KmsService>>,
|
||||
config: EncryptionConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
master_key,
|
||||
kms,
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_data_key(&self) -> ([u8; 32], [u8; 12]) {
|
||||
let mut data_key = [0u8; 32];
|
||||
let mut nonce = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut data_key);
|
||||
rand::thread_rng().fill_bytes(&mut nonce);
|
||||
(data_key, nonce)
|
||||
}
|
||||
|
||||
pub fn wrap_data_key(&self, data_key: &[u8; 32]) -> Result<String, CryptoError> {
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
|
||||
let cipher = Aes256Gcm::new((&self.master_key).into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, data_key.as_slice())
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut combined = Vec::with_capacity(12 + encrypted.len());
|
||||
combined.extend_from_slice(&nonce_bytes);
|
||||
combined.extend_from_slice(&encrypted);
|
||||
Ok(B64.encode(&combined))
|
||||
}
|
||||
|
||||
pub fn unwrap_data_key(&self, wrapped_b64: &str) -> Result<[u8; 32], CryptoError> {
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
|
||||
let combined = B64.decode(wrapped_b64).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad wrapped key encoding: {}", e))
|
||||
})?;
|
||||
if combined.len() < 12 {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"Wrapped key too short".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let (nonce_bytes, ciphertext) = combined.split_at(12);
|
||||
let cipher = Aes256Gcm::new((&self.master_key).into());
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| CryptoError::DecryptionFailed(0))?;
|
||||
|
||||
if plaintext.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(plaintext.len()));
|
||||
}
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(&plaintext);
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
pub async fn encrypt_object(
|
||||
&self,
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
ctx: &EncryptionContext,
|
||||
) -> Result<EncryptionMetadata, CryptoError> {
|
||||
let (data_key, nonce) = self.generate_data_key();
|
||||
|
||||
let (encrypted_data_key, kms_key_id) = match ctx.algorithm {
|
||||
SseAlgorithm::Aes256 => {
|
||||
let wrapped = self.wrap_data_key(&data_key)?;
|
||||
(Some(wrapped), None)
|
||||
}
|
||||
SseAlgorithm::AwsKms => {
|
||||
let kms = self
|
||||
.kms
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
|
||||
let kid = ctx
|
||||
.kms_key_id
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID".into()))?;
|
||||
let ciphertext = kms.encrypt_data(kid, &data_key).await?;
|
||||
(Some(B64.encode(&ciphertext)), Some(kid.clone()))
|
||||
}
|
||||
SseAlgorithm::CustomerProvided => (None, None),
|
||||
};
|
||||
|
||||
let actual_key = if ctx.algorithm == SseAlgorithm::CustomerProvided {
|
||||
let ck = ctx
|
||||
.customer_key
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("No customer key provided".into()))?;
|
||||
if ck.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(ck.len()));
|
||||
}
|
||||
let mut k = [0u8; 32];
|
||||
k.copy_from_slice(ck);
|
||||
k
|
||||
} else {
|
||||
data_key
|
||||
};
|
||||
|
||||
let ip = input_path.to_owned();
|
||||
let op = output_path.to_owned();
|
||||
let ak = actual_key;
|
||||
let n = nonce;
|
||||
let chunk_size = self.config.chunk_size;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
encrypt_stream_chunked(&ip, &op, &ak, &n, Some(chunk_size))
|
||||
})
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
|
||||
|
||||
Ok(EncryptionMetadata {
|
||||
algorithm: ctx.algorithm.as_str().to_string(),
|
||||
nonce: B64.encode(nonce),
|
||||
encrypted_data_key,
|
||||
kms_key_id,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn decrypt_object(
|
||||
&self,
|
||||
input_path: &Path,
|
||||
output_path: &Path,
|
||||
enc_meta: &EncryptionMetadata,
|
||||
customer_key: Option<&[u8]>,
|
||||
) -> Result<(), CryptoError> {
|
||||
let nonce_bytes = B64
|
||||
.decode(&enc_meta.nonce)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(format!("Bad nonce encoding: {}", e)))?;
|
||||
if nonce_bytes.len() != 12 {
|
||||
return Err(CryptoError::InvalidNonceSize(nonce_bytes.len()));
|
||||
}
|
||||
|
||||
let data_key: [u8; 32] = if let Some(ck) = customer_key {
|
||||
if ck.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(ck.len()));
|
||||
}
|
||||
let mut k = [0u8; 32];
|
||||
k.copy_from_slice(ck);
|
||||
k
|
||||
} else if enc_meta.algorithm == "aws:kms" {
|
||||
let kms = self
|
||||
.kms
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS not available".into()))?;
|
||||
let kid = enc_meta
|
||||
.kms_key_id
|
||||
.as_ref()
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("No KMS key ID in metadata".into()))?;
|
||||
let encrypted_dk = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
|
||||
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
|
||||
})?;
|
||||
let ct = B64.decode(encrypted_dk).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad data key encoding: {}", e))
|
||||
})?;
|
||||
let dk = kms.decrypt_data(kid, &ct).await?;
|
||||
if dk.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(dk.len()));
|
||||
}
|
||||
let mut k = [0u8; 32];
|
||||
k.copy_from_slice(&dk);
|
||||
k
|
||||
} else {
|
||||
let wrapped = enc_meta.encrypted_data_key.as_ref().ok_or_else(|| {
|
||||
CryptoError::EncryptionFailed("No encrypted data key in metadata".into())
|
||||
})?;
|
||||
self.unwrap_data_key(wrapped)?
|
||||
};
|
||||
|
||||
let ip = input_path.to_owned();
|
||||
let op = output_path.to_owned();
|
||||
let nb: [u8; 12] = nonce_bytes.try_into().unwrap();
|
||||
tokio::task::spawn_blocking(move || decrypt_stream_chunked(&ip, &op, &data_key, &nb))
|
||||
.await
|
||||
.map_err(|e| CryptoError::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
|
||||
fn test_master_key() -> [u8; 32] {
|
||||
[0x42u8; 32]
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrap_unwrap_data_key() {
|
||||
let svc = EncryptionService::new(test_master_key(), None);
|
||||
let dk = [0xAAu8; 32];
|
||||
let wrapped = svc.wrap_data_key(&dk).unwrap();
|
||||
let unwrapped = svc.unwrap_data_key(&wrapped).unwrap();
|
||||
assert_eq!(dk, unwrapped);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypt_decrypt_object_sse_s3() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("plain.bin");
|
||||
let encrypted = dir.path().join("enc.bin");
|
||||
let decrypted = dir.path().join("dec.bin");
|
||||
|
||||
let data = b"SSE-S3 encrypted content for testing!";
|
||||
std::fs::File::create(&input)
|
||||
.unwrap()
|
||||
.write_all(data)
|
||||
.unwrap();
|
||||
|
||||
let svc = EncryptionService::new(test_master_key(), None);
|
||||
|
||||
let ctx = EncryptionContext {
|
||||
algorithm: SseAlgorithm::Aes256,
|
||||
kms_key_id: None,
|
||||
customer_key: None,
|
||||
};
|
||||
|
||||
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
|
||||
assert_eq!(meta.algorithm, "AES256");
|
||||
assert!(meta.encrypted_data_key.is_some());
|
||||
|
||||
svc.decrypt_object(&encrypted, &decrypted, &meta, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = std::fs::read(&decrypted).unwrap();
|
||||
assert_eq!(result, data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypt_decrypt_object_sse_c() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let input = dir.path().join("plain.bin");
|
||||
let encrypted = dir.path().join("enc.bin");
|
||||
let decrypted = dir.path().join("dec.bin");
|
||||
|
||||
let data = b"SSE-C encrypted content!";
|
||||
std::fs::File::create(&input)
|
||||
.unwrap()
|
||||
.write_all(data)
|
||||
.unwrap();
|
||||
|
||||
let customer_key = [0xBBu8; 32];
|
||||
let svc = EncryptionService::new(test_master_key(), None);
|
||||
|
||||
let ctx = EncryptionContext {
|
||||
algorithm: SseAlgorithm::CustomerProvided,
|
||||
kms_key_id: None,
|
||||
customer_key: Some(customer_key.to_vec()),
|
||||
};
|
||||
|
||||
let meta = svc.encrypt_object(&input, &encrypted, &ctx).await.unwrap();
|
||||
assert!(meta.encrypted_data_key.is_none());
|
||||
|
||||
svc.decrypt_object(&encrypted, &decrypted, &meta, Some(&customer_key))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = std::fs::read(&decrypted).unwrap();
|
||||
assert_eq!(result, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encryption_metadata_roundtrip() {
|
||||
let meta = EncryptionMetadata {
|
||||
algorithm: "AES256".to_string(),
|
||||
nonce: "dGVzdG5vbmNlMTI=".to_string(),
|
||||
encrypted_data_key: Some("c29tZWtleQ==".to_string()),
|
||||
kms_key_id: None,
|
||||
};
|
||||
let map = meta.to_metadata_map();
|
||||
let restored = EncryptionMetadata::from_metadata(&map).unwrap();
|
||||
assert_eq!(restored.algorithm, "AES256");
|
||||
assert_eq!(restored.nonce, meta.nonce);
|
||||
assert_eq!(restored.encrypted_data_key, meta.encrypted_data_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_encrypted() {
|
||||
let mut meta = HashMap::new();
|
||||
assert!(!EncryptionMetadata::is_encrypted(&meta));
|
||||
meta.insert(
|
||||
"x-amz-server-side-encryption".to_string(),
|
||||
"AES256".to_string(),
|
||||
);
|
||||
assert!(EncryptionMetadata::is_encrypted(&meta));
|
||||
}
|
||||
}
|
||||
138
crates/myfsio-crypto/src/hashing.rs
Normal file
138
crates/myfsio-crypto/src/hashing.rs
Normal file
@@ -0,0 +1,138 @@
|
||||
use md5::{Digest, Md5};
|
||||
use sha2::Sha256;
|
||||
use std::io::Read;
|
||||
use std::path::Path;
|
||||
|
||||
const CHUNK_SIZE: usize = 65536;
|
||||
|
||||
pub fn md5_file(path: &Path) -> std::io::Result<String> {
|
||||
let mut file = std::fs::File::open(path)?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file.read(&mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
}
|
||||
|
||||
pub fn md5_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Md5::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
pub fn sha256_file(path: &Path) -> std::io::Result<String> {
|
||||
let mut file = std::fs::File::open(path)?;
|
||||
let mut hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file.read(&mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
}
|
||||
|
||||
pub fn sha256_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
pub fn md5_sha256_file(path: &Path) -> std::io::Result<(String, String)> {
|
||||
let mut file = std::fs::File::open(path)?;
|
||||
let mut md5_hasher = Md5::new();
|
||||
let mut sha_hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file.read(&mut buf)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
md5_hasher.update(&buf[..n]);
|
||||
sha_hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok((
|
||||
format!("{:x}", md5_hasher.finalize()),
|
||||
format!("{:x}", sha_hasher.finalize()),
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn md5_file_async(path: &Path) -> std::io::Result<String> {
|
||||
let path = path.to_owned();
|
||||
tokio::task::spawn_blocking(move || md5_file(&path))
|
||||
.await
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
}
|
||||
|
||||
pub async fn sha256_file_async(path: &Path) -> std::io::Result<String> {
|
||||
let path = path.to_owned();
|
||||
tokio::task::spawn_blocking(move || sha256_file(&path))
|
||||
.await
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
}
|
||||
|
||||
pub async fn md5_sha256_file_async(path: &Path) -> std::io::Result<(String, String)> {
|
||||
let path = path.to_owned();
|
||||
tokio::task::spawn_blocking(move || md5_sha256_file(&path))
|
||||
.await
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
|
||||
#[test]
|
||||
fn test_md5_bytes() {
|
||||
assert_eq!(md5_bytes(b""), "d41d8cd98f00b204e9800998ecf8427e");
|
||||
assert_eq!(md5_bytes(b"hello"), "5d41402abc4b2a76b9719d911017c592");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sha256_bytes() {
|
||||
let hash = sha256_bytes(b"hello");
|
||||
assert_eq!(
|
||||
hash,
|
||||
"2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_md5_file() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(b"hello").unwrap();
|
||||
tmp.flush().unwrap();
|
||||
let hash = md5_file(tmp.path()).unwrap();
|
||||
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_md5_sha256_file() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(b"hello").unwrap();
|
||||
tmp.flush().unwrap();
|
||||
let (md5, sha) = md5_sha256_file(tmp.path()).unwrap();
|
||||
assert_eq!(md5, "5d41402abc4b2a76b9719d911017c592");
|
||||
assert_eq!(
|
||||
sha,
|
||||
"2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_md5_file_async() {
|
||||
let mut tmp = tempfile::NamedTempFile::new().unwrap();
|
||||
tmp.write_all(b"hello").unwrap();
|
||||
tmp.flush().unwrap();
|
||||
let hash = md5_file_async(tmp.path()).await.unwrap();
|
||||
assert_eq!(hash, "5d41402abc4b2a76b9719d911017c592");
|
||||
}
|
||||
}
|
||||
451
crates/myfsio-crypto/src/kms.rs
Normal file
451
crates/myfsio-crypto/src/kms.rs
Normal file
@@ -0,0 +1,451 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use base64::engine::general_purpose::STANDARD as B64;
|
||||
use base64::Engine;
|
||||
use chrono::{DateTime, Utc};
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::aes_gcm::CryptoError;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct KmsKey {
|
||||
#[serde(rename = "KeyId")]
|
||||
pub key_id: String,
|
||||
#[serde(rename = "Arn")]
|
||||
pub arn: String,
|
||||
#[serde(rename = "Description")]
|
||||
pub description: String,
|
||||
#[serde(rename = "CreationDate")]
|
||||
pub creation_date: DateTime<Utc>,
|
||||
#[serde(rename = "Enabled")]
|
||||
pub enabled: bool,
|
||||
#[serde(rename = "KeyState")]
|
||||
pub key_state: String,
|
||||
#[serde(rename = "KeyUsage")]
|
||||
pub key_usage: String,
|
||||
#[serde(rename = "KeySpec")]
|
||||
pub key_spec: String,
|
||||
#[serde(rename = "EncryptedKeyMaterial")]
|
||||
pub encrypted_key_material: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct KmsStore {
|
||||
keys: Vec<KmsKey>,
|
||||
}
|
||||
|
||||
pub struct KmsService {
|
||||
keys_path: PathBuf,
|
||||
master_key: Arc<RwLock<[u8; 32]>>,
|
||||
keys: Arc<RwLock<Vec<KmsKey>>>,
|
||||
}
|
||||
|
||||
impl KmsService {
|
||||
pub async fn new(keys_dir: &Path) -> Result<Self, CryptoError> {
|
||||
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
|
||||
|
||||
let keys_path = keys_dir.join("kms_keys.json");
|
||||
|
||||
let master_key = Self::load_or_create_master_key(&keys_dir.join("kms_master.key"))?;
|
||||
|
||||
let keys = if keys_path.exists() {
|
||||
let data = std::fs::read_to_string(&keys_path).map_err(CryptoError::Io)?;
|
||||
let store: KmsStore = serde_json::from_str(&data)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(format!("Bad KMS store: {}", e)))?;
|
||||
store.keys
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
keys_path,
|
||||
master_key: Arc::new(RwLock::new(master_key)),
|
||||
keys: Arc::new(RwLock::new(keys)),
|
||||
})
|
||||
}
|
||||
|
||||
fn load_or_create_master_key(path: &Path) -> Result<[u8; 32], CryptoError> {
|
||||
if path.exists() {
|
||||
let encoded = std::fs::read_to_string(path).map_err(CryptoError::Io)?;
|
||||
let decoded = B64.decode(encoded.trim()).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
|
||||
})?;
|
||||
if decoded.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(decoded.len()));
|
||||
}
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(&decoded);
|
||||
Ok(key)
|
||||
} else {
|
||||
let mut key = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut key);
|
||||
let encoded = B64.encode(key);
|
||||
std::fs::write(path, &encoded).map_err(CryptoError::Io)?;
|
||||
Ok(key)
|
||||
}
|
||||
}
|
||||
|
||||
fn encrypt_key_material(
|
||||
master_key: &[u8; 32],
|
||||
plaintext_key: &[u8],
|
||||
) -> Result<String, CryptoError> {
|
||||
let cipher = Aes256Gcm::new(master_key.into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext_key)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut combined = Vec::with_capacity(12 + ciphertext.len());
|
||||
combined.extend_from_slice(&nonce_bytes);
|
||||
combined.extend_from_slice(&ciphertext);
|
||||
Ok(B64.encode(&combined))
|
||||
}
|
||||
|
||||
fn decrypt_key_material(
|
||||
master_key: &[u8; 32],
|
||||
encrypted_b64: &str,
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
let combined = B64.decode(encrypted_b64).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad key material encoding: {}", e))
|
||||
})?;
|
||||
if combined.len() < 12 {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"Encrypted key material too short".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let (nonce_bytes, ciphertext) = combined.split_at(12);
|
||||
let cipher = Aes256Gcm::new(master_key.into());
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| CryptoError::DecryptionFailed(0))
|
||||
}
|
||||
|
||||
async fn save(&self) -> Result<(), CryptoError> {
|
||||
let keys = self.keys.read().await;
|
||||
let store = KmsStore { keys: keys.clone() };
|
||||
let json = serde_json::to_string_pretty(&store)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
std::fs::write(&self.keys_path, json).map_err(CryptoError::Io)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_key(&self, description: &str) -> Result<KmsKey, CryptoError> {
|
||||
let key_id = uuid::Uuid::new_v4().to_string();
|
||||
let arn = format!("arn:aws:kms:local:000000000000:key/{}", key_id);
|
||||
|
||||
let mut plaintext_key = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut plaintext_key);
|
||||
|
||||
let master = self.master_key.read().await;
|
||||
let encrypted = Self::encrypt_key_material(&master, &plaintext_key)?;
|
||||
|
||||
let kms_key = KmsKey {
|
||||
key_id: key_id.clone(),
|
||||
arn,
|
||||
description: description.to_string(),
|
||||
creation_date: Utc::now(),
|
||||
enabled: true,
|
||||
key_state: "Enabled".to_string(),
|
||||
key_usage: "ENCRYPT_DECRYPT".to_string(),
|
||||
key_spec: "SYMMETRIC_DEFAULT".to_string(),
|
||||
encrypted_key_material: encrypted,
|
||||
};
|
||||
|
||||
self.keys.write().await.push(kms_key.clone());
|
||||
self.save().await?;
|
||||
Ok(kms_key)
|
||||
}
|
||||
|
||||
pub async fn list_keys(&self) -> Vec<KmsKey> {
|
||||
self.keys.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn get_key(&self, key_id: &str) -> Option<KmsKey> {
|
||||
let keys = self.keys.read().await;
|
||||
keys.iter()
|
||||
.find(|k| k.key_id == key_id || k.arn == key_id)
|
||||
.cloned()
|
||||
}
|
||||
|
||||
pub async fn delete_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
||||
let mut keys = self.keys.write().await;
|
||||
let len_before = keys.len();
|
||||
keys.retain(|k| k.key_id != key_id && k.arn != key_id);
|
||||
let removed = keys.len() < len_before;
|
||||
drop(keys);
|
||||
if removed {
|
||||
self.save().await?;
|
||||
}
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
pub async fn enable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
||||
let mut keys = self.keys.write().await;
|
||||
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
|
||||
key.enabled = true;
|
||||
key.key_state = "Enabled".to_string();
|
||||
drop(keys);
|
||||
self.save().await?;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn disable_key(&self, key_id: &str) -> Result<bool, CryptoError> {
|
||||
let mut keys = self.keys.write().await;
|
||||
if let Some(key) = keys.iter_mut().find(|k| k.key_id == key_id) {
|
||||
key.enabled = false;
|
||||
key.key_state = "Disabled".to_string();
|
||||
drop(keys);
|
||||
self.save().await?;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn decrypt_data_key(&self, key_id: &str) -> Result<Vec<u8>, CryptoError> {
|
||||
let keys = self.keys.read().await;
|
||||
let key = keys
|
||||
.iter()
|
||||
.find(|k| k.key_id == key_id || k.arn == key_id)
|
||||
.ok_or_else(|| CryptoError::EncryptionFailed("KMS key not found".to_string()))?;
|
||||
|
||||
if !key.enabled {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"KMS key is disabled".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let master = self.master_key.read().await;
|
||||
Self::decrypt_key_material(&master, &key.encrypted_key_material)
|
||||
}
|
||||
|
||||
pub async fn encrypt_data(
|
||||
&self,
|
||||
key_id: &str,
|
||||
plaintext: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
let data_key = self.decrypt_data_key(key_id).await?;
|
||||
if data_key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(data_key.len()));
|
||||
}
|
||||
|
||||
let key_arr: [u8; 32] = data_key.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext)
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut result = Vec::with_capacity(12 + ciphertext.len());
|
||||
result.extend_from_slice(&nonce_bytes);
|
||||
result.extend_from_slice(&ciphertext);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn decrypt_data(
|
||||
&self,
|
||||
key_id: &str,
|
||||
ciphertext: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
if ciphertext.len() < 12 {
|
||||
return Err(CryptoError::EncryptionFailed(
|
||||
"Ciphertext too short".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let data_key = self.decrypt_data_key(key_id).await?;
|
||||
if data_key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(data_key.len()));
|
||||
}
|
||||
|
||||
let key_arr: [u8; 32] = data_key.try_into().unwrap();
|
||||
let (nonce_bytes, ct) = ciphertext.split_at(12);
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
cipher
|
||||
.decrypt(nonce, ct)
|
||||
.map_err(|_| CryptoError::DecryptionFailed(0))
|
||||
}
|
||||
|
||||
pub async fn generate_data_key(
|
||||
&self,
|
||||
key_id: &str,
|
||||
num_bytes: usize,
|
||||
) -> Result<(Vec<u8>, Vec<u8>), CryptoError> {
|
||||
let kms_key = self.decrypt_data_key(key_id).await?;
|
||||
if kms_key.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(kms_key.len()));
|
||||
}
|
||||
|
||||
let mut plaintext_key = vec![0u8; num_bytes];
|
||||
rand::thread_rng().fill_bytes(&mut plaintext_key);
|
||||
|
||||
let key_arr: [u8; 32] = kms_key.try_into().unwrap();
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, plaintext_key.as_slice())
|
||||
.map_err(|e| CryptoError::EncryptionFailed(e.to_string()))?;
|
||||
|
||||
let mut wrapped = Vec::with_capacity(12 + encrypted.len());
|
||||
wrapped.extend_from_slice(&nonce_bytes);
|
||||
wrapped.extend_from_slice(&encrypted);
|
||||
|
||||
Ok((plaintext_key, wrapped))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_or_create_master_key(keys_dir: &Path) -> Result<[u8; 32], CryptoError> {
|
||||
std::fs::create_dir_all(keys_dir).map_err(CryptoError::Io)?;
|
||||
let path = keys_dir.join("master.key");
|
||||
|
||||
if path.exists() {
|
||||
let encoded = std::fs::read_to_string(&path).map_err(CryptoError::Io)?;
|
||||
let decoded = B64.decode(encoded.trim()).map_err(|e| {
|
||||
CryptoError::EncryptionFailed(format!("Bad master key encoding: {}", e))
|
||||
})?;
|
||||
if decoded.len() != 32 {
|
||||
return Err(CryptoError::InvalidKeySize(decoded.len()));
|
||||
}
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(&decoded);
|
||||
Ok(key)
|
||||
} else {
|
||||
let mut key = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut key);
|
||||
let encoded = B64.encode(key);
|
||||
std::fs::write(&path, &encoded).map_err(CryptoError::Io)?;
|
||||
Ok(key)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_and_list_keys() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("test key").await.unwrap();
|
||||
assert!(key.enabled);
|
||||
assert_eq!(key.description, "test key");
|
||||
assert!(key.key_id.len() > 0);
|
||||
|
||||
let keys = kms.list_keys().await;
|
||||
assert_eq!(keys.len(), 1);
|
||||
assert_eq!(keys[0].key_id, key.key_id);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enable_disable_key() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("toggle").await.unwrap();
|
||||
assert!(key.enabled);
|
||||
|
||||
kms.disable_key(&key.key_id).await.unwrap();
|
||||
let k = kms.get_key(&key.key_id).await.unwrap();
|
||||
assert!(!k.enabled);
|
||||
|
||||
kms.enable_key(&key.key_id).await.unwrap();
|
||||
let k = kms.get_key(&key.key_id).await.unwrap();
|
||||
assert!(k.enabled);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_key() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("doomed").await.unwrap();
|
||||
assert!(kms.delete_key(&key.key_id).await.unwrap());
|
||||
assert!(kms.get_key(&key.key_id).await.is_none());
|
||||
assert_eq!(kms.list_keys().await.len(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypt_decrypt_data() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("enc-key").await.unwrap();
|
||||
let plaintext = b"Hello, KMS!";
|
||||
|
||||
let ciphertext = kms.encrypt_data(&key.key_id, plaintext).await.unwrap();
|
||||
assert_ne!(&ciphertext, plaintext);
|
||||
|
||||
let decrypted = kms.decrypt_data(&key.key_id, &ciphertext).await.unwrap();
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_generate_data_key() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("data-key-gen").await.unwrap();
|
||||
let (plaintext, wrapped) = kms.generate_data_key(&key.key_id, 32).await.unwrap();
|
||||
|
||||
assert_eq!(plaintext.len(), 32);
|
||||
assert!(wrapped.len() > 32);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_disabled_key_cannot_encrypt() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
|
||||
let key = kms.create_key("disabled").await.unwrap();
|
||||
kms.disable_key(&key.key_id).await.unwrap();
|
||||
|
||||
let result = kms.encrypt_data(&key.key_id, b"test").await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_persistence_across_reload() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let key_id = {
|
||||
let kms = KmsService::new(dir.path()).await.unwrap();
|
||||
let key = kms.create_key("persistent").await.unwrap();
|
||||
key.key_id
|
||||
};
|
||||
|
||||
let kms2 = KmsService::new(dir.path()).await.unwrap();
|
||||
let key = kms2.get_key(&key_id).await;
|
||||
assert!(key.is_some());
|
||||
assert_eq!(key.unwrap().description, "persistent");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_master_key_roundtrip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let key1 = load_or_create_master_key(dir.path()).await.unwrap();
|
||||
let key2 = load_or_create_master_key(dir.path()).await.unwrap();
|
||||
assert_eq!(key1, key2);
|
||||
}
|
||||
}
|
||||
4
crates/myfsio-crypto/src/lib.rs
Normal file
4
crates/myfsio-crypto/src/lib.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub mod aes_gcm;
|
||||
pub mod encryption;
|
||||
pub mod hashing;
|
||||
pub mod kms;
|
||||
57
crates/myfsio-server/Cargo.toml
Normal file
57
crates/myfsio-server/Cargo.toml
Normal file
@@ -0,0 +1,57 @@
|
||||
[package]
|
||||
name = "myfsio-server"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
myfsio-common = { path = "../myfsio-common" }
|
||||
myfsio-auth = { path = "../myfsio-auth" }
|
||||
myfsio-crypto = { path = "../myfsio-crypto" }
|
||||
myfsio-storage = { path = "../myfsio-storage" }
|
||||
myfsio-xml = { path = "../myfsio-xml" }
|
||||
base64 = { workspace = true }
|
||||
md-5 = { workspace = true }
|
||||
axum = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tower = { workspace = true }
|
||||
tower-http = { workspace = true }
|
||||
hyper = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_urlencoded = "0.7"
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
http-body-util = "0.1"
|
||||
percent-encoding = { workspace = true }
|
||||
quick-xml = { workspace = true }
|
||||
mime_guess = "2"
|
||||
crc32fast = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
duckdb = { workspace = true }
|
||||
roxmltree = "0.20"
|
||||
parking_lot = { workspace = true }
|
||||
regex = "1"
|
||||
multer = "3"
|
||||
reqwest = { workspace = true }
|
||||
aws-sdk-s3 = { workspace = true }
|
||||
aws-config = { workspace = true }
|
||||
aws-credential-types = { workspace = true }
|
||||
aws-smithy-types = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
rand = "0.8"
|
||||
tera = { workspace = true }
|
||||
cookie = { workspace = true }
|
||||
subtle = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
dotenvy = { workspace = true }
|
||||
sysinfo = "0.32"
|
||||
aes-gcm = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
tower = { workspace = true, features = ["util"] }
|
||||
564
crates/myfsio-server/src/config.rs
Normal file
564
crates/myfsio-server/src/config.rs
Normal file
@@ -0,0 +1,564 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct RateLimitSetting {
|
||||
pub max_requests: u32,
|
||||
pub window_seconds: u64,
|
||||
}
|
||||
|
||||
impl RateLimitSetting {
|
||||
pub const fn new(max_requests: u32, window_seconds: u64) -> Self {
|
||||
Self {
|
||||
max_requests,
|
||||
window_seconds,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServerConfig {
|
||||
pub bind_addr: SocketAddr,
|
||||
pub ui_bind_addr: SocketAddr,
|
||||
pub storage_root: PathBuf,
|
||||
pub region: String,
|
||||
pub iam_config_path: PathBuf,
|
||||
pub sigv4_timestamp_tolerance_secs: u64,
|
||||
pub presigned_url_min_expiry: u64,
|
||||
pub presigned_url_max_expiry: u64,
|
||||
pub secret_key: Option<String>,
|
||||
pub encryption_enabled: bool,
|
||||
pub encryption_chunk_size_bytes: usize,
|
||||
pub kms_enabled: bool,
|
||||
pub kms_generate_data_key_min_bytes: usize,
|
||||
pub kms_generate_data_key_max_bytes: usize,
|
||||
pub gc_enabled: bool,
|
||||
pub gc_interval_hours: f64,
|
||||
pub gc_temp_file_max_age_hours: f64,
|
||||
pub gc_multipart_max_age_days: u64,
|
||||
pub gc_lock_file_max_age_hours: f64,
|
||||
pub gc_dry_run: bool,
|
||||
pub integrity_enabled: bool,
|
||||
pub metrics_enabled: bool,
|
||||
pub metrics_history_enabled: bool,
|
||||
pub metrics_interval_minutes: u64,
|
||||
pub metrics_retention_hours: u64,
|
||||
pub metrics_history_interval_minutes: u64,
|
||||
pub metrics_history_retention_hours: u64,
|
||||
pub lifecycle_enabled: bool,
|
||||
pub lifecycle_max_history_per_bucket: usize,
|
||||
pub website_hosting_enabled: bool,
|
||||
pub object_key_max_length_bytes: usize,
|
||||
pub object_tag_limit: usize,
|
||||
pub object_cache_max_size: usize,
|
||||
pub bucket_config_cache_ttl_seconds: f64,
|
||||
pub replication_connect_timeout_secs: u64,
|
||||
pub replication_read_timeout_secs: u64,
|
||||
pub replication_max_retries: u32,
|
||||
pub replication_streaming_threshold_bytes: u64,
|
||||
pub replication_max_failures_per_bucket: usize,
|
||||
pub site_sync_enabled: bool,
|
||||
pub site_sync_interval_secs: u64,
|
||||
pub site_sync_batch_size: usize,
|
||||
pub site_sync_connect_timeout_secs: u64,
|
||||
pub site_sync_read_timeout_secs: u64,
|
||||
pub site_sync_max_retries: u32,
|
||||
pub site_sync_clock_skew_tolerance: f64,
|
||||
pub site_id: Option<String>,
|
||||
pub site_endpoint: Option<String>,
|
||||
pub site_region: String,
|
||||
pub site_priority: i32,
|
||||
pub api_base_url: String,
|
||||
pub num_trusted_proxies: usize,
|
||||
pub allowed_redirect_hosts: Vec<String>,
|
||||
pub allow_internal_endpoints: bool,
|
||||
pub cors_origins: Vec<String>,
|
||||
pub cors_methods: Vec<String>,
|
||||
pub cors_allow_headers: Vec<String>,
|
||||
pub cors_expose_headers: Vec<String>,
|
||||
pub session_lifetime_days: u64,
|
||||
pub log_level: String,
|
||||
pub multipart_min_part_size: u64,
|
||||
pub bulk_delete_max_keys: usize,
|
||||
pub stream_chunk_size: usize,
|
||||
pub ratelimit_default: RateLimitSetting,
|
||||
pub ratelimit_admin: RateLimitSetting,
|
||||
pub ratelimit_storage_uri: String,
|
||||
pub ui_enabled: bool,
|
||||
pub templates_dir: PathBuf,
|
||||
pub static_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl ServerConfig {
|
||||
pub fn from_env() -> Self {
|
||||
let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
|
||||
let port: u16 = std::env::var("PORT")
|
||||
.unwrap_or_else(|_| "5000".to_string())
|
||||
.parse()
|
||||
.unwrap_or(5000);
|
||||
let host_ip: std::net::IpAddr = host.parse().unwrap();
|
||||
let bind_addr = SocketAddr::new(host_ip, port);
|
||||
let ui_port: u16 = std::env::var("UI_PORT")
|
||||
.unwrap_or_else(|_| "5100".to_string())
|
||||
.parse()
|
||||
.unwrap_or(5100);
|
||||
let storage_root = std::env::var("STORAGE_ROOT").unwrap_or_else(|_| "./data".to_string());
|
||||
let region = std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string());
|
||||
|
||||
let storage_path = PathBuf::from(&storage_root);
|
||||
let iam_config_path = std::env::var("IAM_CONFIG")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|_| {
|
||||
storage_path
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("iam.json")
|
||||
});
|
||||
|
||||
let sigv4_timestamp_tolerance_secs: u64 =
|
||||
std::env::var("SIGV4_TIMESTAMP_TOLERANCE_SECONDS")
|
||||
.unwrap_or_else(|_| "900".to_string())
|
||||
.parse()
|
||||
.unwrap_or(900);
|
||||
|
||||
let presigned_url_min_expiry: u64 = std::env::var("PRESIGNED_URL_MIN_EXPIRY_SECONDS")
|
||||
.unwrap_or_else(|_| "1".to_string())
|
||||
.parse()
|
||||
.unwrap_or(1);
|
||||
|
||||
let presigned_url_max_expiry: u64 = std::env::var("PRESIGNED_URL_MAX_EXPIRY_SECONDS")
|
||||
.unwrap_or_else(|_| "604800".to_string())
|
||||
.parse()
|
||||
.unwrap_or(604800);
|
||||
|
||||
let secret_key = {
|
||||
let env_key = std::env::var("SECRET_KEY").ok();
|
||||
match env_key {
|
||||
Some(k) if !k.is_empty() && k != "dev-secret-key" => Some(k),
|
||||
_ => {
|
||||
let secret_file = storage_path
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join(".secret");
|
||||
std::fs::read_to_string(&secret_file)
|
||||
.ok()
|
||||
.map(|s| s.trim().to_string())
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let encryption_enabled = parse_bool_env("ENCRYPTION_ENABLED", false);
|
||||
let encryption_chunk_size_bytes = parse_usize_env("ENCRYPTION_CHUNK_SIZE_BYTES", 65_536);
|
||||
|
||||
let kms_enabled = parse_bool_env("KMS_ENABLED", false);
|
||||
let kms_generate_data_key_min_bytes = parse_usize_env("KMS_GENERATE_DATA_KEY_MIN_BYTES", 1);
|
||||
let kms_generate_data_key_max_bytes =
|
||||
parse_usize_env("KMS_GENERATE_DATA_KEY_MAX_BYTES", 1024);
|
||||
|
||||
let gc_enabled = parse_bool_env("GC_ENABLED", false);
|
||||
let gc_interval_hours = parse_f64_env("GC_INTERVAL_HOURS", 6.0);
|
||||
let gc_temp_file_max_age_hours = parse_f64_env("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0);
|
||||
let gc_multipart_max_age_days = parse_u64_env("GC_MULTIPART_MAX_AGE_DAYS", 7);
|
||||
let gc_lock_file_max_age_hours = parse_f64_env("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0);
|
||||
let gc_dry_run = parse_bool_env("GC_DRY_RUN", false);
|
||||
|
||||
let integrity_enabled = parse_bool_env("INTEGRITY_ENABLED", false);
|
||||
|
||||
let metrics_enabled = parse_bool_env("OPERATION_METRICS_ENABLED", false);
|
||||
|
||||
let metrics_history_enabled = parse_bool_env("METRICS_HISTORY_ENABLED", false);
|
||||
|
||||
let metrics_interval_minutes = parse_u64_env("OPERATION_METRICS_INTERVAL_MINUTES", 5);
|
||||
let metrics_retention_hours = parse_u64_env("OPERATION_METRICS_RETENTION_HOURS", 24);
|
||||
let metrics_history_interval_minutes = parse_u64_env("METRICS_HISTORY_INTERVAL_MINUTES", 5);
|
||||
let metrics_history_retention_hours = parse_u64_env("METRICS_HISTORY_RETENTION_HOURS", 24);
|
||||
|
||||
let lifecycle_enabled = parse_bool_env("LIFECYCLE_ENABLED", false);
|
||||
let lifecycle_max_history_per_bucket =
|
||||
parse_usize_env("LIFECYCLE_MAX_HISTORY_PER_BUCKET", 50);
|
||||
|
||||
let website_hosting_enabled = parse_bool_env("WEBSITE_HOSTING_ENABLED", false);
|
||||
let object_key_max_length_bytes = parse_usize_env("OBJECT_KEY_MAX_LENGTH_BYTES", 1024);
|
||||
let object_tag_limit = parse_usize_env("OBJECT_TAG_LIMIT", 50);
|
||||
let object_cache_max_size = parse_usize_env("OBJECT_CACHE_MAX_SIZE", 100);
|
||||
let bucket_config_cache_ttl_seconds =
|
||||
parse_f64_env("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0);
|
||||
|
||||
let replication_connect_timeout_secs =
|
||||
parse_u64_env("REPLICATION_CONNECT_TIMEOUT_SECONDS", 5);
|
||||
let replication_read_timeout_secs = parse_u64_env("REPLICATION_READ_TIMEOUT_SECONDS", 30);
|
||||
let replication_max_retries = parse_u64_env("REPLICATION_MAX_RETRIES", 2) as u32;
|
||||
let replication_streaming_threshold_bytes =
|
||||
parse_u64_env("REPLICATION_STREAMING_THRESHOLD_BYTES", 10_485_760);
|
||||
let replication_max_failures_per_bucket =
|
||||
parse_u64_env("REPLICATION_MAX_FAILURES_PER_BUCKET", 50) as usize;
|
||||
|
||||
let site_sync_enabled = parse_bool_env("SITE_SYNC_ENABLED", false);
|
||||
let site_sync_interval_secs = parse_u64_env("SITE_SYNC_INTERVAL_SECONDS", 60);
|
||||
let site_sync_batch_size = parse_u64_env("SITE_SYNC_BATCH_SIZE", 100) as usize;
|
||||
let site_sync_connect_timeout_secs = parse_u64_env("SITE_SYNC_CONNECT_TIMEOUT_SECONDS", 10);
|
||||
let site_sync_read_timeout_secs = parse_u64_env("SITE_SYNC_READ_TIMEOUT_SECONDS", 120);
|
||||
let site_sync_max_retries = parse_u64_env("SITE_SYNC_MAX_RETRIES", 2) as u32;
|
||||
let site_sync_clock_skew_tolerance: f64 =
|
||||
std::env::var("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(1.0);
|
||||
|
||||
let site_id = parse_optional_string_env("SITE_ID");
|
||||
let site_endpoint = parse_optional_string_env("SITE_ENDPOINT");
|
||||
let site_region = std::env::var("SITE_REGION").unwrap_or_else(|_| region.clone());
|
||||
let site_priority = parse_i32_env("SITE_PRIORITY", 100);
|
||||
let api_base_url = std::env::var("API_BASE_URL")
|
||||
.unwrap_or_else(|_| format!("http://{}", bind_addr))
|
||||
.trim_end_matches('/')
|
||||
.to_string();
|
||||
let num_trusted_proxies = parse_usize_env("NUM_TRUSTED_PROXIES", 0);
|
||||
let allowed_redirect_hosts = parse_list_env("ALLOWED_REDIRECT_HOSTS", "");
|
||||
let allow_internal_endpoints = parse_bool_env("ALLOW_INTERNAL_ENDPOINTS", false);
|
||||
let cors_origins = parse_list_env("CORS_ORIGINS", "*");
|
||||
let cors_methods = parse_list_env("CORS_METHODS", "GET,PUT,POST,DELETE,OPTIONS,HEAD");
|
||||
let cors_allow_headers = parse_list_env("CORS_ALLOW_HEADERS", "*");
|
||||
let cors_expose_headers = parse_list_env("CORS_EXPOSE_HEADERS", "*");
|
||||
let session_lifetime_days = parse_u64_env("SESSION_LIFETIME_DAYS", 1);
|
||||
let log_level = std::env::var("LOG_LEVEL").unwrap_or_else(|_| "INFO".to_string());
|
||||
let multipart_min_part_size = parse_u64_env("MULTIPART_MIN_PART_SIZE", 5_242_880);
|
||||
let bulk_delete_max_keys = parse_usize_env("BULK_DELETE_MAX_KEYS", 1000);
|
||||
let stream_chunk_size = parse_usize_env("STREAM_CHUNK_SIZE", 1_048_576);
|
||||
let ratelimit_default =
|
||||
parse_rate_limit_env("RATE_LIMIT_DEFAULT", RateLimitSetting::new(200, 60));
|
||||
let ratelimit_admin =
|
||||
parse_rate_limit_env("RATE_LIMIT_ADMIN", RateLimitSetting::new(60, 60));
|
||||
let ratelimit_storage_uri =
|
||||
std::env::var("RATE_LIMIT_STORAGE_URI").unwrap_or_else(|_| "memory://".to_string());
|
||||
|
||||
let ui_enabled = parse_bool_env("UI_ENABLED", true);
|
||||
let templates_dir = std::env::var("TEMPLATES_DIR")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|_| default_templates_dir());
|
||||
let static_dir = std::env::var("STATIC_DIR")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|_| default_static_dir());
|
||||
|
||||
Self {
|
||||
bind_addr,
|
||||
ui_bind_addr: SocketAddr::new(host_ip, ui_port),
|
||||
storage_root: storage_path,
|
||||
region,
|
||||
iam_config_path,
|
||||
sigv4_timestamp_tolerance_secs,
|
||||
presigned_url_min_expiry,
|
||||
presigned_url_max_expiry,
|
||||
secret_key,
|
||||
encryption_enabled,
|
||||
encryption_chunk_size_bytes,
|
||||
kms_enabled,
|
||||
kms_generate_data_key_min_bytes,
|
||||
kms_generate_data_key_max_bytes,
|
||||
gc_enabled,
|
||||
gc_interval_hours,
|
||||
gc_temp_file_max_age_hours,
|
||||
gc_multipart_max_age_days,
|
||||
gc_lock_file_max_age_hours,
|
||||
gc_dry_run,
|
||||
integrity_enabled,
|
||||
metrics_enabled,
|
||||
metrics_history_enabled,
|
||||
metrics_interval_minutes,
|
||||
metrics_retention_hours,
|
||||
metrics_history_interval_minutes,
|
||||
metrics_history_retention_hours,
|
||||
lifecycle_enabled,
|
||||
lifecycle_max_history_per_bucket,
|
||||
website_hosting_enabled,
|
||||
object_key_max_length_bytes,
|
||||
object_tag_limit,
|
||||
object_cache_max_size,
|
||||
bucket_config_cache_ttl_seconds,
|
||||
replication_connect_timeout_secs,
|
||||
replication_read_timeout_secs,
|
||||
replication_max_retries,
|
||||
replication_streaming_threshold_bytes,
|
||||
replication_max_failures_per_bucket,
|
||||
site_sync_enabled,
|
||||
site_sync_interval_secs,
|
||||
site_sync_batch_size,
|
||||
site_sync_connect_timeout_secs,
|
||||
site_sync_read_timeout_secs,
|
||||
site_sync_max_retries,
|
||||
site_sync_clock_skew_tolerance,
|
||||
site_id,
|
||||
site_endpoint,
|
||||
site_region,
|
||||
site_priority,
|
||||
api_base_url,
|
||||
num_trusted_proxies,
|
||||
allowed_redirect_hosts,
|
||||
allow_internal_endpoints,
|
||||
cors_origins,
|
||||
cors_methods,
|
||||
cors_allow_headers,
|
||||
cors_expose_headers,
|
||||
session_lifetime_days,
|
||||
log_level,
|
||||
multipart_min_part_size,
|
||||
bulk_delete_max_keys,
|
||||
stream_chunk_size,
|
||||
ratelimit_default,
|
||||
ratelimit_admin,
|
||||
ratelimit_storage_uri,
|
||||
ui_enabled,
|
||||
templates_dir,
|
||||
static_dir,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ServerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bind_addr: "127.0.0.1:5000".parse().unwrap(),
|
||||
ui_bind_addr: "127.0.0.1:5100".parse().unwrap(),
|
||||
storage_root: PathBuf::from("./data"),
|
||||
region: "us-east-1".to_string(),
|
||||
iam_config_path: PathBuf::from("./data/.myfsio.sys/config/iam.json"),
|
||||
sigv4_timestamp_tolerance_secs: 900,
|
||||
presigned_url_min_expiry: 1,
|
||||
presigned_url_max_expiry: 604_800,
|
||||
secret_key: None,
|
||||
encryption_enabled: false,
|
||||
encryption_chunk_size_bytes: 65_536,
|
||||
kms_enabled: false,
|
||||
kms_generate_data_key_min_bytes: 1,
|
||||
kms_generate_data_key_max_bytes: 1024,
|
||||
gc_enabled: false,
|
||||
gc_interval_hours: 6.0,
|
||||
gc_temp_file_max_age_hours: 24.0,
|
||||
gc_multipart_max_age_days: 7,
|
||||
gc_lock_file_max_age_hours: 1.0,
|
||||
gc_dry_run: false,
|
||||
integrity_enabled: false,
|
||||
metrics_enabled: false,
|
||||
metrics_history_enabled: false,
|
||||
metrics_interval_minutes: 5,
|
||||
metrics_retention_hours: 24,
|
||||
metrics_history_interval_minutes: 5,
|
||||
metrics_history_retention_hours: 24,
|
||||
lifecycle_enabled: false,
|
||||
lifecycle_max_history_per_bucket: 50,
|
||||
website_hosting_enabled: false,
|
||||
object_key_max_length_bytes: 1024,
|
||||
object_tag_limit: 50,
|
||||
object_cache_max_size: 100,
|
||||
bucket_config_cache_ttl_seconds: 30.0,
|
||||
replication_connect_timeout_secs: 5,
|
||||
replication_read_timeout_secs: 30,
|
||||
replication_max_retries: 2,
|
||||
replication_streaming_threshold_bytes: 10_485_760,
|
||||
replication_max_failures_per_bucket: 50,
|
||||
site_sync_enabled: false,
|
||||
site_sync_interval_secs: 60,
|
||||
site_sync_batch_size: 100,
|
||||
site_sync_connect_timeout_secs: 10,
|
||||
site_sync_read_timeout_secs: 120,
|
||||
site_sync_max_retries: 2,
|
||||
site_sync_clock_skew_tolerance: 1.0,
|
||||
site_id: None,
|
||||
site_endpoint: None,
|
||||
site_region: "us-east-1".to_string(),
|
||||
site_priority: 100,
|
||||
api_base_url: "http://127.0.0.1:5000".to_string(),
|
||||
num_trusted_proxies: 0,
|
||||
allowed_redirect_hosts: Vec::new(),
|
||||
allow_internal_endpoints: false,
|
||||
cors_origins: vec!["*".to_string()],
|
||||
cors_methods: vec![
|
||||
"GET".to_string(),
|
||||
"PUT".to_string(),
|
||||
"POST".to_string(),
|
||||
"DELETE".to_string(),
|
||||
"OPTIONS".to_string(),
|
||||
"HEAD".to_string(),
|
||||
],
|
||||
cors_allow_headers: vec!["*".to_string()],
|
||||
cors_expose_headers: vec!["*".to_string()],
|
||||
session_lifetime_days: 1,
|
||||
log_level: "INFO".to_string(),
|
||||
multipart_min_part_size: 5_242_880,
|
||||
bulk_delete_max_keys: 1000,
|
||||
stream_chunk_size: 1_048_576,
|
||||
ratelimit_default: RateLimitSetting::new(200, 60),
|
||||
ratelimit_admin: RateLimitSetting::new(60, 60),
|
||||
ratelimit_storage_uri: "memory://".to_string(),
|
||||
ui_enabled: true,
|
||||
templates_dir: default_templates_dir(),
|
||||
static_dir: default_static_dir(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_templates_dir() -> PathBuf {
|
||||
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
manifest_dir.join("templates")
|
||||
}
|
||||
|
||||
fn default_static_dir() -> PathBuf {
|
||||
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
for candidate in [
|
||||
manifest_dir.join("static"),
|
||||
manifest_dir.join("..").join("..").join("..").join("static"),
|
||||
] {
|
||||
if candidate.exists() {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
manifest_dir.join("static")
|
||||
}
|
||||
|
||||
fn parse_u64_env(key: &str, default: u64) -> u64 {
|
||||
std::env::var(key)
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(default)
|
||||
}
|
||||
|
||||
fn parse_usize_env(key: &str, default: usize) -> usize {
|
||||
std::env::var(key)
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(default)
|
||||
}
|
||||
|
||||
fn parse_i32_env(key: &str, default: i32) -> i32 {
|
||||
std::env::var(key)
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(default)
|
||||
}
|
||||
|
||||
fn parse_f64_env(key: &str, default: f64) -> f64 {
|
||||
std::env::var(key)
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(default)
|
||||
}
|
||||
|
||||
fn parse_bool_env(key: &str, default: bool) -> bool {
|
||||
std::env::var(key)
|
||||
.ok()
|
||||
.map(|value| {
|
||||
matches!(
|
||||
value.trim().to_ascii_lowercase().as_str(),
|
||||
"1" | "true" | "yes" | "on"
|
||||
)
|
||||
})
|
||||
.unwrap_or(default)
|
||||
}
|
||||
|
||||
fn parse_optional_string_env(key: &str) -> Option<String> {
|
||||
std::env::var(key)
|
||||
.ok()
|
||||
.map(|value| value.trim().to_string())
|
||||
.filter(|value| !value.is_empty())
|
||||
}
|
||||
|
||||
fn parse_list_env(key: &str, default: &str) -> Vec<String> {
|
||||
std::env::var(key)
|
||||
.unwrap_or_else(|_| default.to_string())
|
||||
.split(',')
|
||||
.map(|value| value.trim().to_string())
|
||||
.filter(|value| !value.is_empty())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn parse_rate_limit(value: &str) -> Option<RateLimitSetting> {
|
||||
let parts = value.split_whitespace().collect::<Vec<_>>();
|
||||
if parts.len() != 3 || !parts[1].eq_ignore_ascii_case("per") {
|
||||
return None;
|
||||
}
|
||||
let max_requests = parts[0].parse::<u32>().ok()?;
|
||||
if max_requests == 0 {
|
||||
return None;
|
||||
}
|
||||
let window_seconds = match parts[2].to_ascii_lowercase().as_str() {
|
||||
"second" | "seconds" => 1,
|
||||
"minute" | "minutes" => 60,
|
||||
"hour" | "hours" => 3600,
|
||||
"day" | "days" => 86_400,
|
||||
_ => return None,
|
||||
};
|
||||
Some(RateLimitSetting::new(max_requests, window_seconds))
|
||||
}
|
||||
|
||||
fn parse_rate_limit_env(key: &str, default: RateLimitSetting) -> RateLimitSetting {
|
||||
std::env::var(key)
|
||||
.ok()
|
||||
.and_then(|value| parse_rate_limit(&value))
|
||||
.unwrap_or(default)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
|
||||
fn env_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(|| Mutex::new(()))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parses_rate_limit_text() {
|
||||
assert_eq!(
|
||||
parse_rate_limit("200 per minute"),
|
||||
Some(RateLimitSetting::new(200, 60))
|
||||
);
|
||||
assert_eq!(
|
||||
parse_rate_limit("3 per hours"),
|
||||
Some(RateLimitSetting::new(3, 3600))
|
||||
);
|
||||
assert_eq!(parse_rate_limit("0 per minute"), None);
|
||||
assert_eq!(parse_rate_limit("bad"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn env_defaults_and_invalid_values_fall_back() {
|
||||
let _guard = env_lock().lock().unwrap();
|
||||
std::env::remove_var("OBJECT_KEY_MAX_LENGTH_BYTES");
|
||||
std::env::set_var("OBJECT_TAG_LIMIT", "not-a-number");
|
||||
std::env::set_var("RATE_LIMIT_DEFAULT", "invalid");
|
||||
|
||||
let config = ServerConfig::from_env();
|
||||
|
||||
assert_eq!(config.object_key_max_length_bytes, 1024);
|
||||
assert_eq!(config.object_tag_limit, 50);
|
||||
assert_eq!(config.ratelimit_default, RateLimitSetting::new(200, 60));
|
||||
|
||||
std::env::remove_var("OBJECT_TAG_LIMIT");
|
||||
std::env::remove_var("RATE_LIMIT_DEFAULT");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn env_overrides_new_values() {
|
||||
let _guard = env_lock().lock().unwrap();
|
||||
std::env::set_var("OBJECT_KEY_MAX_LENGTH_BYTES", "2048");
|
||||
std::env::set_var("GC_DRY_RUN", "true");
|
||||
std::env::set_var("RATE_LIMIT_ADMIN", "7 per second");
|
||||
std::env::set_var("HOST", "127.0.0.1");
|
||||
std::env::set_var("PORT", "5501");
|
||||
std::env::remove_var("API_BASE_URL");
|
||||
|
||||
let config = ServerConfig::from_env();
|
||||
|
||||
assert_eq!(config.object_key_max_length_bytes, 2048);
|
||||
assert!(config.gc_dry_run);
|
||||
assert_eq!(config.ratelimit_admin, RateLimitSetting::new(7, 1));
|
||||
assert_eq!(config.api_base_url, "http://127.0.0.1:5501");
|
||||
|
||||
std::env::remove_var("OBJECT_KEY_MAX_LENGTH_BYTES");
|
||||
std::env::remove_var("GC_DRY_RUN");
|
||||
std::env::remove_var("RATE_LIMIT_ADMIN");
|
||||
std::env::remove_var("HOST");
|
||||
std::env::remove_var("PORT");
|
||||
}
|
||||
}
|
||||
1414
crates/myfsio-server/src/handlers/admin.rs
Normal file
1414
crates/myfsio-server/src/handlers/admin.rs
Normal file
File diff suppressed because it is too large
Load Diff
184
crates/myfsio-server/src/handlers/chunked.rs
Normal file
184
crates/myfsio-server/src/handlers/chunked.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use bytes::{Buf, BytesMut};
|
||||
use tokio::io::{AsyncRead, ReadBuf};
|
||||
|
||||
enum State {
|
||||
ReadSize,
|
||||
ReadData(u64),
|
||||
ReadTrailer,
|
||||
Finished,
|
||||
}
|
||||
|
||||
pub struct AwsChunkedStream<S> {
|
||||
inner: S,
|
||||
buffer: BytesMut,
|
||||
state: State,
|
||||
pending: BytesMut,
|
||||
eof: bool,
|
||||
}
|
||||
|
||||
impl<S> AwsChunkedStream<S> {
|
||||
pub fn new(inner: S) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
buffer: BytesMut::with_capacity(8192),
|
||||
state: State::ReadSize,
|
||||
pending: BytesMut::new(),
|
||||
eof: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn find_crlf(&self) -> Option<usize> {
|
||||
for i in 0..self.buffer.len().saturating_sub(1) {
|
||||
if self.buffer[i] == b'\r' && self.buffer[i + 1] == b'\n' {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn parse_chunk_size(line: &[u8]) -> std::io::Result<u64> {
|
||||
let text = std::str::from_utf8(line).map_err(|_| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"invalid chunk size encoding",
|
||||
)
|
||||
})?;
|
||||
let head = text.split(';').next().unwrap_or("").trim();
|
||||
u64::from_str_radix(head, 16).map_err(|_| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("invalid chunk size: {}", head),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn try_advance(&mut self, out: &mut ReadBuf<'_>) -> std::io::Result<bool> {
|
||||
loop {
|
||||
if out.remaining() == 0 {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
if !self.pending.is_empty() {
|
||||
let take = std::cmp::min(self.pending.len(), out.remaining());
|
||||
out.put_slice(&self.pending[..take]);
|
||||
self.pending.advance(take);
|
||||
continue;
|
||||
}
|
||||
|
||||
match self.state {
|
||||
State::Finished => return Ok(true),
|
||||
State::ReadSize => {
|
||||
let idx = match self.find_crlf() {
|
||||
Some(i) => i,
|
||||
None => return Ok(false),
|
||||
};
|
||||
let line = self.buffer.split_to(idx);
|
||||
self.buffer.advance(2);
|
||||
let size = Self::parse_chunk_size(&line)?;
|
||||
if size == 0 {
|
||||
self.state = State::ReadTrailer;
|
||||
} else {
|
||||
self.state = State::ReadData(size);
|
||||
}
|
||||
}
|
||||
State::ReadData(remaining) => {
|
||||
if self.buffer.is_empty() {
|
||||
return Ok(false);
|
||||
}
|
||||
let avail = std::cmp::min(self.buffer.len() as u64, remaining) as usize;
|
||||
let take = std::cmp::min(avail, out.remaining());
|
||||
out.put_slice(&self.buffer[..take]);
|
||||
self.buffer.advance(take);
|
||||
let new_remaining = remaining - take as u64;
|
||||
if new_remaining == 0 {
|
||||
if self.buffer.len() < 2 {
|
||||
self.state = State::ReadData(0);
|
||||
return Ok(false);
|
||||
}
|
||||
if &self.buffer[..2] != b"\r\n" {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"malformed chunk terminator",
|
||||
));
|
||||
}
|
||||
self.buffer.advance(2);
|
||||
self.state = State::ReadSize;
|
||||
} else {
|
||||
self.state = State::ReadData(new_remaining);
|
||||
}
|
||||
}
|
||||
State::ReadTrailer => {
|
||||
let idx = match self.find_crlf() {
|
||||
Some(i) => i,
|
||||
None => return Ok(false),
|
||||
};
|
||||
if idx == 0 {
|
||||
self.buffer.advance(2);
|
||||
self.state = State::Finished;
|
||||
} else {
|
||||
self.buffer.advance(idx + 2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> AsyncRead for AwsChunkedStream<S>
|
||||
where
|
||||
S: AsyncRead + Unpin,
|
||||
{
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<std::io::Result<()>> {
|
||||
loop {
|
||||
let before = buf.filled().len();
|
||||
let done = match self.try_advance(buf) {
|
||||
Ok(v) => v,
|
||||
Err(e) => return Poll::Ready(Err(e)),
|
||||
};
|
||||
if buf.filled().len() > before {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
if done {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
if self.eof {
|
||||
return Poll::Ready(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"unexpected EOF in aws-chunked stream",
|
||||
)));
|
||||
}
|
||||
|
||||
let mut tmp = [0u8; 8192];
|
||||
let mut rb = ReadBuf::new(&mut tmp);
|
||||
match Pin::new(&mut self.inner).poll_read(cx, &mut rb) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
let n = rb.filled().len();
|
||||
if n == 0 {
|
||||
self.eof = true;
|
||||
continue;
|
||||
}
|
||||
self.buffer.extend_from_slice(rb.filled());
|
||||
}
|
||||
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
|
||||
Poll::Pending => return Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode_body(body: axum::body::Body) -> impl AsyncRead + Send + Unpin {
|
||||
use futures::TryStreamExt;
|
||||
let stream = tokio_util::io::StreamReader::new(
|
||||
http_body_util::BodyStream::new(body)
|
||||
.map_ok(|frame| frame.into_data().unwrap_or_default())
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)),
|
||||
);
|
||||
AwsChunkedStream::new(stream)
|
||||
}
|
||||
1567
crates/myfsio-server/src/handlers/config.rs
Normal file
1567
crates/myfsio-server/src/handlers/config.rs
Normal file
File diff suppressed because it is too large
Load Diff
559
crates/myfsio-server/src/handlers/kms.rs
Normal file
559
crates/myfsio-server/src/handlers/kms.rs
Normal file
@@ -0,0 +1,559 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use axum::body::Body;
|
||||
use axum::extract::State;
|
||||
use axum::http::StatusCode;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use base64::engine::general_purpose::STANDARD as B64;
|
||||
use base64::Engine;
|
||||
use rand::RngCore;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use crate::state::AppState;
|
||||
|
||||
fn json_ok(value: Value) -> Response {
|
||||
(
|
||||
StatusCode::OK,
|
||||
[("content-type", "application/json")],
|
||||
value.to_string(),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
fn json_err(status: StatusCode, msg: &str) -> Response {
|
||||
(
|
||||
status,
|
||||
[("content-type", "application/json")],
|
||||
json!({"error": msg}).to_string(),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
async fn read_json(body: Body) -> Result<Value, Response> {
|
||||
let body_bytes = http_body_util::BodyExt::collect(body)
|
||||
.await
|
||||
.map_err(|_| json_err(StatusCode::BAD_REQUEST, "Invalid request body"))?
|
||||
.to_bytes();
|
||||
if body_bytes.is_empty() {
|
||||
Ok(json!({}))
|
||||
} else {
|
||||
serde_json::from_slice(&body_bytes)
|
||||
.map_err(|_| json_err(StatusCode::BAD_REQUEST, "Invalid JSON"))
|
||||
}
|
||||
}
|
||||
|
||||
fn require_kms(
|
||||
state: &AppState,
|
||||
) -> Result<&std::sync::Arc<myfsio_crypto::kms::KmsService>, Response> {
|
||||
state
|
||||
.kms
|
||||
.as_ref()
|
||||
.ok_or_else(|| json_err(StatusCode::SERVICE_UNAVAILABLE, "KMS not enabled"))
|
||||
}
|
||||
|
||||
fn decode_b64(value: &str, field: &str) -> Result<Vec<u8>, Response> {
|
||||
B64.decode(value).map_err(|_| {
|
||||
json_err(
|
||||
StatusCode::BAD_REQUEST,
|
||||
&format!("Invalid base64 {}", field),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn require_str<'a>(value: &'a Value, names: &[&str], message: &str) -> Result<&'a str, Response> {
|
||||
for name in names {
|
||||
if let Some(found) = value.get(*name).and_then(|v| v.as_str()) {
|
||||
return Ok(found);
|
||||
}
|
||||
}
|
||||
Err(json_err(StatusCode::BAD_REQUEST, message))
|
||||
}
|
||||
|
||||
pub async fn list_keys(State(state): State<AppState>) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let keys = kms.list_keys().await;
|
||||
let keys_json: Vec<Value> = keys
|
||||
.iter()
|
||||
.map(|k| {
|
||||
json!({
|
||||
"KeyId": k.key_id,
|
||||
"Arn": k.arn,
|
||||
"Description": k.description,
|
||||
"CreationDate": k.creation_date.to_rfc3339(),
|
||||
"Enabled": k.enabled,
|
||||
"KeyState": k.key_state,
|
||||
"KeyUsage": k.key_usage,
|
||||
"KeySpec": k.key_spec,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
json_ok(json!({"keys": keys_json}))
|
||||
}
|
||||
|
||||
pub async fn create_key(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let description = req
|
||||
.get("Description")
|
||||
.or_else(|| req.get("description"))
|
||||
.and_then(|d| d.as_str())
|
||||
.unwrap_or("");
|
||||
|
||||
match kms.create_key(description).await {
|
||||
Ok(key) => json_ok(json!({
|
||||
"KeyId": key.key_id,
|
||||
"Arn": key.arn,
|
||||
"Description": key.description,
|
||||
"CreationDate": key.creation_date.to_rfc3339(),
|
||||
"Enabled": key.enabled,
|
||||
"KeyState": key.key_state,
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.get_key(&key_id).await {
|
||||
Some(key) => json_ok(json!({
|
||||
"KeyId": key.key_id,
|
||||
"Arn": key.arn,
|
||||
"Description": key.description,
|
||||
"CreationDate": key.creation_date.to_rfc3339(),
|
||||
"Enabled": key.enabled,
|
||||
"KeyState": key.key_state,
|
||||
"KeyUsage": key.key_usage,
|
||||
"KeySpec": key.key_spec,
|
||||
})),
|
||||
None => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.delete_key(&key_id).await {
|
||||
Ok(true) => StatusCode::NO_CONTENT.into_response(),
|
||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn enable_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.enable_key(&key_id).await {
|
||||
Ok(true) => json_ok(json!({"status": "enabled"})),
|
||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn disable_key(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.disable_key(&key_id).await {
|
||||
Ok(true) => json_ok(json!({"status": "disabled"})),
|
||||
Ok(false) => json_err(StatusCode::NOT_FOUND, "Key not found"),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn encrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let plaintext_b64 = match require_str(&req, &["Plaintext", "plaintext"], "Missing Plaintext") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let plaintext = match decode_b64(plaintext_b64, "Plaintext") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.encrypt_data(key_id, &plaintext).await {
|
||||
Ok(ct) => json_ok(json!({
|
||||
"KeyId": key_id,
|
||||
"CiphertextBlob": B64.encode(&ct),
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn decrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let ciphertext_b64 = match require_str(
|
||||
&req,
|
||||
&["CiphertextBlob", "ciphertext_blob"],
|
||||
"Missing CiphertextBlob",
|
||||
) {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let ciphertext = match decode_b64(ciphertext_b64, "CiphertextBlob") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.decrypt_data(key_id, &ciphertext).await {
|
||||
Ok(pt) => json_ok(json!({
|
||||
"KeyId": key_id,
|
||||
"Plaintext": B64.encode(&pt),
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn generate_data_key(State(state): State<AppState>, body: Body) -> Response {
|
||||
generate_data_key_inner(state, body, true).await
|
||||
}
|
||||
|
||||
pub async fn generate_data_key_without_plaintext(
|
||||
State(state): State<AppState>,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
generate_data_key_inner(state, body, false).await
|
||||
}
|
||||
|
||||
async fn generate_data_key_inner(state: AppState, body: Body, include_plaintext: bool) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let key_id = match require_str(&req, &["KeyId", "key_id"], "Missing KeyId") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let num_bytes = req
|
||||
.get("NumberOfBytes")
|
||||
.and_then(|v| v.as_u64())
|
||||
.unwrap_or(32) as usize;
|
||||
|
||||
if num_bytes < state.config.kms_generate_data_key_min_bytes
|
||||
|| num_bytes > state.config.kms_generate_data_key_max_bytes
|
||||
{
|
||||
return json_err(
|
||||
StatusCode::BAD_REQUEST,
|
||||
&format!(
|
||||
"NumberOfBytes must be {}-{}",
|
||||
state.config.kms_generate_data_key_min_bytes,
|
||||
state.config.kms_generate_data_key_max_bytes
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
match kms.generate_data_key(key_id, num_bytes).await {
|
||||
Ok((plaintext, wrapped)) => {
|
||||
let mut value = json!({
|
||||
"KeyId": key_id,
|
||||
"CiphertextBlob": B64.encode(&wrapped),
|
||||
});
|
||||
if include_plaintext {
|
||||
value["Plaintext"] = json!(B64.encode(&plaintext));
|
||||
}
|
||||
json_ok(value)
|
||||
}
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn re_encrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let ciphertext_b64 = match require_str(
|
||||
&req,
|
||||
&["CiphertextBlob", "ciphertext_blob"],
|
||||
"CiphertextBlob is required",
|
||||
) {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let destination_key_id = match require_str(
|
||||
&req,
|
||||
&["DestinationKeyId", "destination_key_id"],
|
||||
"DestinationKeyId is required",
|
||||
) {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let ciphertext = match decode_b64(ciphertext_b64, "CiphertextBlob") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let keys = kms.list_keys().await;
|
||||
let mut source_key_id: Option<String> = None;
|
||||
let mut plaintext: Option<Vec<u8>> = None;
|
||||
for key in keys {
|
||||
if !key.enabled {
|
||||
continue;
|
||||
}
|
||||
if let Ok(value) = kms.decrypt_data(&key.key_id, &ciphertext).await {
|
||||
source_key_id = Some(key.key_id);
|
||||
plaintext = Some(value);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let Some(source_key_id) = source_key_id else {
|
||||
return json_err(
|
||||
StatusCode::BAD_REQUEST,
|
||||
"Could not determine source key for CiphertextBlob",
|
||||
);
|
||||
};
|
||||
let plaintext = plaintext.unwrap_or_default();
|
||||
|
||||
match kms.encrypt_data(destination_key_id, &plaintext).await {
|
||||
Ok(new_ciphertext) => json_ok(json!({
|
||||
"CiphertextBlob": B64.encode(&new_ciphertext),
|
||||
"SourceKeyId": source_key_id,
|
||||
"KeyId": destination_key_id,
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn generate_random(State(state): State<AppState>, body: Body) -> Response {
|
||||
if let Err(response) = require_kms(&state) {
|
||||
return response;
|
||||
}
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let num_bytes = req
|
||||
.get("NumberOfBytes")
|
||||
.and_then(|v| v.as_u64())
|
||||
.unwrap_or(32) as usize;
|
||||
|
||||
if num_bytes < state.config.kms_generate_data_key_min_bytes
|
||||
|| num_bytes > state.config.kms_generate_data_key_max_bytes
|
||||
{
|
||||
return json_err(
|
||||
StatusCode::BAD_REQUEST,
|
||||
&format!(
|
||||
"NumberOfBytes must be {}-{}",
|
||||
state.config.kms_generate_data_key_min_bytes,
|
||||
state.config.kms_generate_data_key_max_bytes
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
let mut bytes = vec![0u8; num_bytes];
|
||||
rand::thread_rng().fill_bytes(&mut bytes);
|
||||
json_ok(json!({
|
||||
"Plaintext": B64.encode(bytes),
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn client_generate_key(State(state): State<AppState>) -> Response {
|
||||
let _ = state;
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut key);
|
||||
json_ok(json!({
|
||||
"Key": B64.encode(key),
|
||||
"Algorithm": "AES-256-GCM",
|
||||
"KeySize": 32,
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn client_encrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let _ = state;
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let plaintext_b64 =
|
||||
match require_str(&req, &["Plaintext", "plaintext"], "Plaintext is required") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let key_b64 = match require_str(&req, &["Key", "key"], "Key is required") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let plaintext = match decode_b64(plaintext_b64, "Plaintext") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let key_bytes = match decode_b64(key_b64, "Key") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
if key_bytes.len() != 32 {
|
||||
return json_err(StatusCode::BAD_REQUEST, "Key must decode to 32 bytes");
|
||||
}
|
||||
|
||||
let cipher = match Aes256Gcm::new_from_slice(&key_bytes) {
|
||||
Ok(cipher) => cipher,
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid encryption key"),
|
||||
};
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::thread_rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
match cipher.encrypt(nonce, plaintext.as_ref()) {
|
||||
Ok(ciphertext) => json_ok(json!({
|
||||
"Ciphertext": B64.encode(ciphertext),
|
||||
"Nonce": B64.encode(nonce_bytes),
|
||||
"Algorithm": "AES-256-GCM",
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::BAD_REQUEST, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn client_decrypt(State(state): State<AppState>, body: Body) -> Response {
|
||||
let _ = state;
|
||||
let req = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let ciphertext_b64 = match require_str(
|
||||
&req,
|
||||
&["Ciphertext", "ciphertext"],
|
||||
"Ciphertext is required",
|
||||
) {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let nonce_b64 = match require_str(&req, &["Nonce", "nonce"], "Nonce is required") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let key_b64 = match require_str(&req, &["Key", "key"], "Key is required") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
let ciphertext = match decode_b64(ciphertext_b64, "Ciphertext") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let nonce_bytes = match decode_b64(nonce_b64, "Nonce") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let key_bytes = match decode_b64(key_b64, "Key") {
|
||||
Ok(value) => value,
|
||||
Err(response) => return response,
|
||||
};
|
||||
if key_bytes.len() != 32 {
|
||||
return json_err(StatusCode::BAD_REQUEST, "Key must decode to 32 bytes");
|
||||
}
|
||||
if nonce_bytes.len() != 12 {
|
||||
return json_err(StatusCode::BAD_REQUEST, "Nonce must decode to 12 bytes");
|
||||
}
|
||||
|
||||
let cipher = match Aes256Gcm::new_from_slice(&key_bytes) {
|
||||
Ok(cipher) => cipher,
|
||||
Err(_) => return json_err(StatusCode::BAD_REQUEST, "Invalid encryption key"),
|
||||
};
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
match cipher.decrypt(nonce, ciphertext.as_ref()) {
|
||||
Ok(plaintext) => json_ok(json!({
|
||||
"Plaintext": B64.encode(plaintext),
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::BAD_REQUEST, &e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn materials(
|
||||
State(state): State<AppState>,
|
||||
axum::extract::Path(key_id): axum::extract::Path<String>,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
let kms = match require_kms(&state) {
|
||||
Ok(kms) => kms,
|
||||
Err(response) => return response,
|
||||
};
|
||||
let _ = match read_json(body).await {
|
||||
Ok(req) => req,
|
||||
Err(response) => return response,
|
||||
};
|
||||
|
||||
match kms.generate_data_key(&key_id, 32).await {
|
||||
Ok((plaintext, wrapped)) => json_ok(json!({
|
||||
"PlaintextKey": B64.encode(plaintext),
|
||||
"EncryptedKey": B64.encode(wrapped),
|
||||
"KeyId": key_id,
|
||||
"Algorithm": "AES-256-GCM",
|
||||
"KeyWrapAlgorithm": "kms",
|
||||
})),
|
||||
Err(e) => json_err(StatusCode::INTERNAL_SERVER_ERROR, &e.to_string()),
|
||||
}
|
||||
}
|
||||
3057
crates/myfsio-server/src/handlers/mod.rs
Normal file
3057
crates/myfsio-server/src/handlers/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
578
crates/myfsio-server/src/handlers/select.rs
Normal file
578
crates/myfsio-server/src/handlers/select.rs
Normal file
@@ -0,0 +1,578 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use axum::body::Body;
|
||||
use axum::http::{HeaderMap, HeaderName, StatusCode};
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use base64::Engine;
|
||||
use bytes::Bytes;
|
||||
use crc32fast::Hasher;
|
||||
use duckdb::types::ValueRef;
|
||||
use duckdb::Connection;
|
||||
use futures::stream;
|
||||
use http_body_util::BodyExt;
|
||||
use myfsio_common::error::{S3Error, S3ErrorCode};
|
||||
use myfsio_storage::traits::StorageEngine;
|
||||
|
||||
use crate::state::AppState;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
#[link(name = "Rstrtmgr")]
|
||||
extern "system" {}
|
||||
|
||||
const CHUNK_SIZE: usize = 65_536;
|
||||
|
||||
pub async fn post_select_object_content(
|
||||
state: &AppState,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
headers: &HeaderMap,
|
||||
body: Body,
|
||||
) -> Response {
|
||||
if let Some(resp) = require_xml_content_type(headers) {
|
||||
return resp;
|
||||
}
|
||||
|
||||
let body_bytes = match body.collect().await {
|
||||
Ok(collected) => collected.to_bytes(),
|
||||
Err(_) => {
|
||||
return s3_error_response(S3Error::new(
|
||||
S3ErrorCode::MalformedXML,
|
||||
"Unable to parse XML document",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let request = match parse_select_request(&body_bytes) {
|
||||
Ok(r) => r,
|
||||
Err(err) => return s3_error_response(err),
|
||||
};
|
||||
|
||||
let object_path = match state.storage.get_object_path(bucket, key).await {
|
||||
Ok(path) => path,
|
||||
Err(_) => {
|
||||
return s3_error_response(S3Error::new(S3ErrorCode::NoSuchKey, "Object not found"));
|
||||
}
|
||||
};
|
||||
|
||||
let join_res =
|
||||
tokio::task::spawn_blocking(move || execute_select_query(object_path, request)).await;
|
||||
let chunks = match join_res {
|
||||
Ok(Ok(chunks)) => chunks,
|
||||
Ok(Err(message)) => {
|
||||
return s3_error_response(S3Error::new(S3ErrorCode::InvalidRequest, message));
|
||||
}
|
||||
Err(_) => {
|
||||
return s3_error_response(S3Error::new(
|
||||
S3ErrorCode::InternalError,
|
||||
"SelectObjectContent execution failed",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let bytes_returned: usize = chunks.iter().map(|c| c.len()).sum();
|
||||
let mut events: Vec<Bytes> = Vec::with_capacity(chunks.len() + 2);
|
||||
for chunk in chunks {
|
||||
events.push(Bytes::from(encode_select_event("Records", &chunk)));
|
||||
}
|
||||
|
||||
let stats_payload = build_stats_xml(0, bytes_returned);
|
||||
events.push(Bytes::from(encode_select_event(
|
||||
"Stats",
|
||||
stats_payload.as_bytes(),
|
||||
)));
|
||||
events.push(Bytes::from(encode_select_event("End", b"")));
|
||||
|
||||
let stream = stream::iter(events.into_iter().map(Ok::<Bytes, std::io::Error>));
|
||||
let body = Body::from_stream(stream);
|
||||
|
||||
let mut response = (StatusCode::OK, body).into_response();
|
||||
response.headers_mut().insert(
|
||||
HeaderName::from_static("content-type"),
|
||||
"application/octet-stream".parse().unwrap(),
|
||||
);
|
||||
response.headers_mut().insert(
|
||||
HeaderName::from_static("x-amz-request-charged"),
|
||||
"requester".parse().unwrap(),
|
||||
);
|
||||
response
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct SelectRequest {
|
||||
expression: String,
|
||||
input_format: InputFormat,
|
||||
output_format: OutputFormat,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum InputFormat {
|
||||
Csv(CsvInputConfig),
|
||||
Json(JsonInputConfig),
|
||||
Parquet,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct CsvInputConfig {
|
||||
file_header_info: String,
|
||||
field_delimiter: String,
|
||||
quote_character: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct JsonInputConfig {
|
||||
json_type: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum OutputFormat {
|
||||
Csv(CsvOutputConfig),
|
||||
Json(JsonOutputConfig),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct CsvOutputConfig {
|
||||
field_delimiter: String,
|
||||
record_delimiter: String,
|
||||
quote_character: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct JsonOutputConfig {
|
||||
record_delimiter: String,
|
||||
}
|
||||
|
||||
fn parse_select_request(payload: &[u8]) -> Result<SelectRequest, S3Error> {
|
||||
let xml = String::from_utf8_lossy(payload);
|
||||
let doc = roxmltree::Document::parse(&xml)
|
||||
.map_err(|_| S3Error::new(S3ErrorCode::MalformedXML, "Unable to parse XML document"))?;
|
||||
|
||||
let root = doc.root_element();
|
||||
if root.tag_name().name() != "SelectObjectContentRequest" {
|
||||
return Err(S3Error::new(
|
||||
S3ErrorCode::MalformedXML,
|
||||
"Root element must be SelectObjectContentRequest",
|
||||
));
|
||||
}
|
||||
|
||||
let expression = child_text(&root, "Expression")
|
||||
.filter(|v| !v.is_empty())
|
||||
.ok_or_else(|| S3Error::new(S3ErrorCode::InvalidRequest, "Expression is required"))?;
|
||||
|
||||
let expression_type = child_text(&root, "ExpressionType").unwrap_or_else(|| "SQL".to_string());
|
||||
if !expression_type.eq_ignore_ascii_case("SQL") {
|
||||
return Err(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"Only SQL expression type is supported",
|
||||
));
|
||||
}
|
||||
|
||||
let input_node = child(&root, "InputSerialization").ok_or_else(|| {
|
||||
S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"InputSerialization is required",
|
||||
)
|
||||
})?;
|
||||
let output_node = child(&root, "OutputSerialization").ok_or_else(|| {
|
||||
S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"OutputSerialization is required",
|
||||
)
|
||||
})?;
|
||||
|
||||
let input_format = parse_input_format(&input_node)?;
|
||||
let output_format = parse_output_format(&output_node)?;
|
||||
|
||||
Ok(SelectRequest {
|
||||
expression,
|
||||
input_format,
|
||||
output_format,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_input_format(node: &roxmltree::Node<'_, '_>) -> Result<InputFormat, S3Error> {
|
||||
if let Some(csv_node) = child(node, "CSV") {
|
||||
return Ok(InputFormat::Csv(CsvInputConfig {
|
||||
file_header_info: child_text(&csv_node, "FileHeaderInfo")
|
||||
.unwrap_or_else(|| "NONE".to_string())
|
||||
.to_ascii_uppercase(),
|
||||
field_delimiter: child_text(&csv_node, "FieldDelimiter")
|
||||
.unwrap_or_else(|| ",".to_string()),
|
||||
quote_character: child_text(&csv_node, "QuoteCharacter")
|
||||
.unwrap_or_else(|| "\"".to_string()),
|
||||
}));
|
||||
}
|
||||
|
||||
if let Some(json_node) = child(node, "JSON") {
|
||||
return Ok(InputFormat::Json(JsonInputConfig {
|
||||
json_type: child_text(&json_node, "Type")
|
||||
.unwrap_or_else(|| "DOCUMENT".to_string())
|
||||
.to_ascii_uppercase(),
|
||||
}));
|
||||
}
|
||||
|
||||
if child(node, "Parquet").is_some() {
|
||||
return Ok(InputFormat::Parquet);
|
||||
}
|
||||
|
||||
Err(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"InputSerialization must specify CSV, JSON, or Parquet",
|
||||
))
|
||||
}
|
||||
|
||||
fn parse_output_format(node: &roxmltree::Node<'_, '_>) -> Result<OutputFormat, S3Error> {
|
||||
if let Some(csv_node) = child(node, "CSV") {
|
||||
return Ok(OutputFormat::Csv(CsvOutputConfig {
|
||||
field_delimiter: child_text(&csv_node, "FieldDelimiter")
|
||||
.unwrap_or_else(|| ",".to_string()),
|
||||
record_delimiter: child_text(&csv_node, "RecordDelimiter")
|
||||
.unwrap_or_else(|| "\n".to_string()),
|
||||
quote_character: child_text(&csv_node, "QuoteCharacter")
|
||||
.unwrap_or_else(|| "\"".to_string()),
|
||||
}));
|
||||
}
|
||||
|
||||
if let Some(json_node) = child(node, "JSON") {
|
||||
return Ok(OutputFormat::Json(JsonOutputConfig {
|
||||
record_delimiter: child_text(&json_node, "RecordDelimiter")
|
||||
.unwrap_or_else(|| "\n".to_string()),
|
||||
}));
|
||||
}
|
||||
|
||||
Err(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"OutputSerialization must specify CSV or JSON",
|
||||
))
|
||||
}
|
||||
|
||||
fn child<'a, 'input>(
|
||||
node: &'a roxmltree::Node<'a, 'input>,
|
||||
name: &str,
|
||||
) -> Option<roxmltree::Node<'a, 'input>> {
|
||||
node.children()
|
||||
.find(|n| n.is_element() && n.tag_name().name() == name)
|
||||
}
|
||||
|
||||
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
|
||||
child(node, name)
|
||||
.and_then(|n| n.text())
|
||||
.map(|s| s.to_string())
|
||||
}
|
||||
|
||||
fn execute_select_query(path: PathBuf, request: SelectRequest) -> Result<Vec<Vec<u8>>, String> {
|
||||
let conn =
|
||||
Connection::open_in_memory().map_err(|e| format!("DuckDB connection error: {}", e))?;
|
||||
|
||||
load_input_table(&conn, &path, &request.input_format)?;
|
||||
|
||||
let expression = request
|
||||
.expression
|
||||
.replace("s3object", "data")
|
||||
.replace("S3Object", "data");
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(&expression)
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
let mut rows = stmt
|
||||
.query([])
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
let stmt_ref = rows
|
||||
.as_ref()
|
||||
.ok_or_else(|| "SQL execution error: statement metadata unavailable".to_string())?;
|
||||
let col_count = stmt_ref.column_count();
|
||||
let mut columns: Vec<String> = Vec::with_capacity(col_count);
|
||||
for i in 0..col_count {
|
||||
let name = stmt_ref
|
||||
.column_name(i)
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|_| format!("_{}", i));
|
||||
columns.push(name);
|
||||
}
|
||||
|
||||
match request.output_format {
|
||||
OutputFormat::Csv(cfg) => collect_csv_chunks(&mut rows, col_count, cfg),
|
||||
OutputFormat::Json(cfg) => collect_json_chunks(&mut rows, col_count, &columns, cfg),
|
||||
}
|
||||
}
|
||||
|
||||
fn load_input_table(conn: &Connection, path: &Path, input: &InputFormat) -> Result<(), String> {
|
||||
let path_str = path.to_string_lossy().replace('\\', "/");
|
||||
match input {
|
||||
InputFormat::Csv(cfg) => {
|
||||
let header = cfg.file_header_info == "USE" || cfg.file_header_info == "IGNORE";
|
||||
let delimiter = normalize_single_char(&cfg.field_delimiter, ',');
|
||||
let quote = normalize_single_char(&cfg.quote_character, '"');
|
||||
|
||||
let sql = format!(
|
||||
"CREATE TABLE data AS SELECT * FROM read_csv('{}', header={}, delim='{}', quote='{}')",
|
||||
sql_escape(&path_str),
|
||||
if header { "true" } else { "false" },
|
||||
sql_escape(&delimiter),
|
||||
sql_escape("e)
|
||||
);
|
||||
conn.execute_batch(&sql)
|
||||
.map_err(|e| format!("Failed loading CSV data: {}", e))?;
|
||||
}
|
||||
InputFormat::Json(cfg) => {
|
||||
let format = if cfg.json_type == "LINES" {
|
||||
"newline_delimited"
|
||||
} else {
|
||||
"array"
|
||||
};
|
||||
let sql = format!(
|
||||
"CREATE TABLE data AS SELECT * FROM read_json_auto('{}', format='{}')",
|
||||
sql_escape(&path_str),
|
||||
format
|
||||
);
|
||||
conn.execute_batch(&sql)
|
||||
.map_err(|e| format!("Failed loading JSON data: {}", e))?;
|
||||
}
|
||||
InputFormat::Parquet => {
|
||||
let sql = format!(
|
||||
"CREATE TABLE data AS SELECT * FROM read_parquet('{}')",
|
||||
sql_escape(&path_str)
|
||||
);
|
||||
conn.execute_batch(&sql)
|
||||
.map_err(|e| format!("Failed loading Parquet data: {}", e))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sql_escape(value: &str) -> String {
|
||||
value.replace('\'', "''")
|
||||
}
|
||||
|
||||
fn normalize_single_char(value: &str, default_char: char) -> String {
|
||||
value.chars().next().unwrap_or(default_char).to_string()
|
||||
}
|
||||
|
||||
fn collect_csv_chunks(
|
||||
rows: &mut duckdb::Rows<'_>,
|
||||
col_count: usize,
|
||||
cfg: CsvOutputConfig,
|
||||
) -> Result<Vec<Vec<u8>>, String> {
|
||||
let delimiter = cfg.field_delimiter;
|
||||
let record_delimiter = cfg.record_delimiter;
|
||||
let quote = cfg.quote_character;
|
||||
|
||||
let mut chunks: Vec<Vec<u8>> = Vec::new();
|
||||
let mut buffer = String::new();
|
||||
|
||||
while let Some(row) = rows
|
||||
.next()
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?
|
||||
{
|
||||
let mut fields: Vec<String> = Vec::with_capacity(col_count);
|
||||
for i in 0..col_count {
|
||||
let value = row
|
||||
.get_ref(i)
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
if matches!(value, ValueRef::Null) {
|
||||
fields.push(String::new());
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut text = value_ref_to_string(value);
|
||||
if text.contains(&delimiter)
|
||||
|| text.contains("e)
|
||||
|| text.contains(&record_delimiter)
|
||||
{
|
||||
text = text.replace("e, &(quote.clone() + "e));
|
||||
text = format!("{}{}{}", quote, text, quote);
|
||||
}
|
||||
fields.push(text);
|
||||
}
|
||||
buffer.push_str(&fields.join(&delimiter));
|
||||
buffer.push_str(&record_delimiter);
|
||||
|
||||
while buffer.len() >= CHUNK_SIZE {
|
||||
let rest = buffer.split_off(CHUNK_SIZE);
|
||||
chunks.push(buffer.into_bytes());
|
||||
buffer = rest;
|
||||
}
|
||||
}
|
||||
|
||||
if !buffer.is_empty() {
|
||||
chunks.push(buffer.into_bytes());
|
||||
}
|
||||
Ok(chunks)
|
||||
}
|
||||
|
||||
fn collect_json_chunks(
|
||||
rows: &mut duckdb::Rows<'_>,
|
||||
col_count: usize,
|
||||
columns: &[String],
|
||||
cfg: JsonOutputConfig,
|
||||
) -> Result<Vec<Vec<u8>>, String> {
|
||||
let record_delimiter = cfg.record_delimiter;
|
||||
let mut chunks: Vec<Vec<u8>> = Vec::new();
|
||||
let mut buffer = String::new();
|
||||
|
||||
while let Some(row) = rows
|
||||
.next()
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?
|
||||
{
|
||||
let mut record: HashMap<String, serde_json::Value> = HashMap::with_capacity(col_count);
|
||||
for i in 0..col_count {
|
||||
let value = row
|
||||
.get_ref(i)
|
||||
.map_err(|e| format!("SQL execution error: {}", e))?;
|
||||
let key = columns.get(i).cloned().unwrap_or_else(|| format!("_{}", i));
|
||||
record.insert(key, value_ref_to_json(value));
|
||||
}
|
||||
let line = serde_json::to_string(&record)
|
||||
.map_err(|e| format!("JSON output encoding failed: {}", e))?;
|
||||
buffer.push_str(&line);
|
||||
buffer.push_str(&record_delimiter);
|
||||
|
||||
while buffer.len() >= CHUNK_SIZE {
|
||||
let rest = buffer.split_off(CHUNK_SIZE);
|
||||
chunks.push(buffer.into_bytes());
|
||||
buffer = rest;
|
||||
}
|
||||
}
|
||||
|
||||
if !buffer.is_empty() {
|
||||
chunks.push(buffer.into_bytes());
|
||||
}
|
||||
Ok(chunks)
|
||||
}
|
||||
|
||||
fn value_ref_to_string(value: ValueRef<'_>) -> String {
|
||||
match value {
|
||||
ValueRef::Null => String::new(),
|
||||
ValueRef::Boolean(v) => v.to_string(),
|
||||
ValueRef::TinyInt(v) => v.to_string(),
|
||||
ValueRef::SmallInt(v) => v.to_string(),
|
||||
ValueRef::Int(v) => v.to_string(),
|
||||
ValueRef::BigInt(v) => v.to_string(),
|
||||
ValueRef::UTinyInt(v) => v.to_string(),
|
||||
ValueRef::USmallInt(v) => v.to_string(),
|
||||
ValueRef::UInt(v) => v.to_string(),
|
||||
ValueRef::UBigInt(v) => v.to_string(),
|
||||
ValueRef::Float(v) => v.to_string(),
|
||||
ValueRef::Double(v) => v.to_string(),
|
||||
ValueRef::Decimal(v) => v.to_string(),
|
||||
ValueRef::Text(v) => String::from_utf8_lossy(v).into_owned(),
|
||||
ValueRef::Blob(v) => base64::engine::general_purpose::STANDARD.encode(v),
|
||||
_ => format!("{:?}", value),
|
||||
}
|
||||
}
|
||||
|
||||
fn value_ref_to_json(value: ValueRef<'_>) -> serde_json::Value {
|
||||
match value {
|
||||
ValueRef::Null => serde_json::Value::Null,
|
||||
ValueRef::Boolean(v) => serde_json::Value::Bool(v),
|
||||
ValueRef::TinyInt(v) => serde_json::json!(v),
|
||||
ValueRef::SmallInt(v) => serde_json::json!(v),
|
||||
ValueRef::Int(v) => serde_json::json!(v),
|
||||
ValueRef::BigInt(v) => serde_json::json!(v),
|
||||
ValueRef::UTinyInt(v) => serde_json::json!(v),
|
||||
ValueRef::USmallInt(v) => serde_json::json!(v),
|
||||
ValueRef::UInt(v) => serde_json::json!(v),
|
||||
ValueRef::UBigInt(v) => serde_json::json!(v),
|
||||
ValueRef::Float(v) => serde_json::json!(v),
|
||||
ValueRef::Double(v) => serde_json::json!(v),
|
||||
ValueRef::Decimal(v) => serde_json::Value::String(v.to_string()),
|
||||
ValueRef::Text(v) => serde_json::Value::String(String::from_utf8_lossy(v).into_owned()),
|
||||
ValueRef::Blob(v) => {
|
||||
serde_json::Value::String(base64::engine::general_purpose::STANDARD.encode(v))
|
||||
}
|
||||
_ => serde_json::Value::String(format!("{:?}", value)),
|
||||
}
|
||||
}
|
||||
|
||||
fn require_xml_content_type(headers: &HeaderMap) -> Option<Response> {
|
||||
let value = headers
|
||||
.get("content-type")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("")
|
||||
.trim();
|
||||
if value.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let lowered = value.to_ascii_lowercase();
|
||||
if lowered.starts_with("application/xml") || lowered.starts_with("text/xml") {
|
||||
return None;
|
||||
}
|
||||
Some(s3_error_response(S3Error::new(
|
||||
S3ErrorCode::InvalidRequest,
|
||||
"Content-Type must be application/xml or text/xml",
|
||||
)))
|
||||
}
|
||||
|
||||
fn s3_error_response(err: S3Error) -> Response {
|
||||
let status =
|
||||
StatusCode::from_u16(err.http_status()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
|
||||
let resource = if err.resource.is_empty() {
|
||||
"/".to_string()
|
||||
} else {
|
||||
err.resource.clone()
|
||||
};
|
||||
let body = err
|
||||
.with_resource(resource)
|
||||
.with_request_id(uuid::Uuid::new_v4().simple().to_string())
|
||||
.to_xml();
|
||||
(status, [("content-type", "application/xml")], body).into_response()
|
||||
}
|
||||
|
||||
fn build_stats_xml(bytes_scanned: usize, bytes_returned: usize) -> String {
|
||||
format!(
|
||||
"<Stats><BytesScanned>{}</BytesScanned><BytesProcessed>{}</BytesProcessed><BytesReturned>{}</BytesReturned></Stats>",
|
||||
bytes_scanned,
|
||||
bytes_scanned,
|
||||
bytes_returned
|
||||
)
|
||||
}
|
||||
|
||||
fn encode_select_event(event_type: &str, payload: &[u8]) -> Vec<u8> {
|
||||
let mut headers = Vec::new();
|
||||
headers.extend(encode_select_header(":event-type", event_type));
|
||||
if event_type == "Records" {
|
||||
headers.extend(encode_select_header(
|
||||
":content-type",
|
||||
"application/octet-stream",
|
||||
));
|
||||
} else if event_type == "Stats" {
|
||||
headers.extend(encode_select_header(":content-type", "text/xml"));
|
||||
}
|
||||
headers.extend(encode_select_header(":message-type", "event"));
|
||||
|
||||
let headers_len = headers.len() as u32;
|
||||
let total_len = 4 + 4 + 4 + headers.len() + payload.len() + 4;
|
||||
|
||||
let mut message = Vec::with_capacity(total_len);
|
||||
let mut prelude = Vec::with_capacity(8);
|
||||
prelude.extend((total_len as u32).to_be_bytes());
|
||||
prelude.extend(headers_len.to_be_bytes());
|
||||
|
||||
let prelude_crc = crc32(&prelude);
|
||||
message.extend(prelude);
|
||||
message.extend(prelude_crc.to_be_bytes());
|
||||
message.extend(headers);
|
||||
message.extend(payload);
|
||||
|
||||
let msg_crc = crc32(&message);
|
||||
message.extend(msg_crc.to_be_bytes());
|
||||
message
|
||||
}
|
||||
|
||||
fn encode_select_header(name: &str, value: &str) -> Vec<u8> {
|
||||
let name_bytes = name.as_bytes();
|
||||
let value_bytes = value.as_bytes();
|
||||
let mut header = Vec::with_capacity(1 + name_bytes.len() + 1 + 2 + value_bytes.len());
|
||||
header.push(name_bytes.len() as u8);
|
||||
header.extend(name_bytes);
|
||||
header.push(7);
|
||||
header.extend((value_bytes.len() as u16).to_be_bytes());
|
||||
header.extend(value_bytes);
|
||||
header
|
||||
}
|
||||
|
||||
fn crc32(data: &[u8]) -> u32 {
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(data);
|
||||
hasher.finalize()
|
||||
}
|
||||
236
crates/myfsio-server/src/handlers/ui.rs
Normal file
236
crates/myfsio-server/src/handlers/ui.rs
Normal file
@@ -0,0 +1,236 @@
|
||||
use std::collections::HashMap;
|
||||
use std::error::Error as StdError;
|
||||
|
||||
use axum::extract::{Extension, Form, State};
|
||||
use axum::http::{header, HeaderMap, StatusCode};
|
||||
use axum::response::{IntoResponse, Redirect, Response};
|
||||
use tera::Context;
|
||||
|
||||
use crate::middleware::session::SessionHandle;
|
||||
use crate::session::FlashMessage;
|
||||
use crate::state::AppState;
|
||||
|
||||
pub async fn login_page(
|
||||
State(state): State<AppState>,
|
||||
Extension(session): Extension<SessionHandle>,
|
||||
) -> Response {
|
||||
if session.read(|s| s.is_authenticated()) {
|
||||
return Redirect::to("/ui/buckets").into_response();
|
||||
}
|
||||
|
||||
let mut ctx = base_context(&session, None);
|
||||
let flashed = session.write(|s| s.take_flash());
|
||||
inject_flash(&mut ctx, flashed);
|
||||
|
||||
render(&state, "login.html", &ctx)
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
pub struct LoginForm {
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
#[serde(default)]
|
||||
pub csrf_token: String,
|
||||
#[serde(default)]
|
||||
pub next: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn login_submit(
|
||||
State(state): State<AppState>,
|
||||
Extension(session): Extension<SessionHandle>,
|
||||
Form(form): Form<LoginForm>,
|
||||
) -> Response {
|
||||
let access_key = form.access_key.trim();
|
||||
let secret_key = form.secret_key.trim();
|
||||
|
||||
match state.iam.get_secret_key(access_key) {
|
||||
Some(expected) if constant_time_eq_str(&expected, secret_key) => {
|
||||
let display = state
|
||||
.iam
|
||||
.get_user(access_key)
|
||||
.await
|
||||
.and_then(|v| {
|
||||
v.get("display_name")
|
||||
.and_then(|d| d.as_str())
|
||||
.map(|s| s.to_string())
|
||||
})
|
||||
.unwrap_or_else(|| access_key.to_string());
|
||||
|
||||
session.write(|s| {
|
||||
s.user_id = Some(access_key.to_string());
|
||||
s.display_name = Some(display);
|
||||
s.rotate_csrf();
|
||||
s.push_flash("success", "Signed in successfully.");
|
||||
});
|
||||
|
||||
let next = form
|
||||
.next
|
||||
.as_deref()
|
||||
.filter(|n| is_allowed_redirect(n, &state.config.allowed_redirect_hosts))
|
||||
.unwrap_or("/ui/buckets")
|
||||
.to_string();
|
||||
Redirect::to(&next).into_response()
|
||||
}
|
||||
_ => {
|
||||
session.write(|s| {
|
||||
s.push_flash("danger", "Invalid access key or secret key.");
|
||||
});
|
||||
Redirect::to("/login").into_response()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_allowed_redirect(target: &str, allowed_hosts: &[String]) -> bool {
|
||||
if target == "/ui" || target.starts_with("/ui/") {
|
||||
return true;
|
||||
}
|
||||
let Some(rest) = target
|
||||
.strip_prefix("https://")
|
||||
.or_else(|| target.strip_prefix("http://"))
|
||||
else {
|
||||
return false;
|
||||
};
|
||||
let host = rest
|
||||
.split('/')
|
||||
.next()
|
||||
.unwrap_or_default()
|
||||
.split('@')
|
||||
.last()
|
||||
.unwrap_or_default()
|
||||
.split(':')
|
||||
.next()
|
||||
.unwrap_or_default()
|
||||
.to_ascii_lowercase();
|
||||
allowed_hosts
|
||||
.iter()
|
||||
.any(|allowed| allowed.eq_ignore_ascii_case(&host))
|
||||
}
|
||||
|
||||
pub async fn logout(Extension(session): Extension<SessionHandle>) -> Response {
|
||||
session.write(|s| {
|
||||
s.user_id = None;
|
||||
s.display_name = None;
|
||||
s.flash.clear();
|
||||
s.rotate_csrf();
|
||||
s.push_flash("info", "Signed out.");
|
||||
});
|
||||
Redirect::to("/login").into_response()
|
||||
}
|
||||
|
||||
pub async fn csrf_error_page(
|
||||
State(state): State<AppState>,
|
||||
Extension(session): Extension<SessionHandle>,
|
||||
) -> Response {
|
||||
let ctx = base_context(&session, None);
|
||||
let mut resp = render(&state, "csrf_error.html", &ctx);
|
||||
*resp.status_mut() = StatusCode::FORBIDDEN;
|
||||
resp
|
||||
}
|
||||
|
||||
pub async fn root_redirect() -> Response {
|
||||
Redirect::to("/ui/buckets").into_response()
|
||||
}
|
||||
|
||||
pub async fn not_found_page(
|
||||
State(state): State<AppState>,
|
||||
Extension(session): Extension<SessionHandle>,
|
||||
) -> Response {
|
||||
let ctx = base_context(&session, None);
|
||||
let mut resp = render(&state, "404.html", &ctx);
|
||||
*resp.status_mut() = StatusCode::NOT_FOUND;
|
||||
resp
|
||||
}
|
||||
|
||||
pub async fn require_login(
|
||||
Extension(session): Extension<SessionHandle>,
|
||||
req: axum::extract::Request,
|
||||
next: axum::middleware::Next,
|
||||
) -> Response {
|
||||
if session.read(|s| s.is_authenticated()) {
|
||||
return next.run(req).await;
|
||||
}
|
||||
let path = req.uri().path().to_string();
|
||||
let query = req
|
||||
.uri()
|
||||
.query()
|
||||
.map(|q| format!("?{}", q))
|
||||
.unwrap_or_default();
|
||||
let next_url = format!("{}{}", path, query);
|
||||
let encoded =
|
||||
percent_encoding::utf8_percent_encode(&next_url, percent_encoding::NON_ALPHANUMERIC)
|
||||
.to_string();
|
||||
let target = format!("/login?next={}", encoded);
|
||||
Redirect::to(&target).into_response()
|
||||
}
|
||||
|
||||
pub fn render(state: &AppState, template: &str, ctx: &Context) -> Response {
|
||||
let engine = match &state.templates {
|
||||
Some(e) => e,
|
||||
None => {
|
||||
return (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"Templates not configured",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
};
|
||||
match engine.render(template, ctx) {
|
||||
Ok(html) => {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
header::CONTENT_TYPE,
|
||||
"text/html; charset=utf-8".parse().unwrap(),
|
||||
);
|
||||
(StatusCode::OK, headers, html).into_response()
|
||||
}
|
||||
Err(e) => {
|
||||
let mut detail = format!("{}", e);
|
||||
let mut src = StdError::source(&e);
|
||||
while let Some(s) = src {
|
||||
detail.push_str(" | ");
|
||||
detail.push_str(&s.to_string());
|
||||
src = s.source();
|
||||
}
|
||||
tracing::error!("Template render failed ({}): {}", template, detail);
|
||||
let fallback_ctx = Context::new();
|
||||
let body = if template != "500.html" {
|
||||
engine
|
||||
.render("500.html", &fallback_ctx)
|
||||
.unwrap_or_else(|_| "Internal Server Error".to_string())
|
||||
} else {
|
||||
"Internal Server Error".to_string()
|
||||
};
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
header::CONTENT_TYPE,
|
||||
"text/html; charset=utf-8".parse().unwrap(),
|
||||
);
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, headers, body).into_response()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn base_context(session: &SessionHandle, endpoint: Option<&str>) -> Context {
|
||||
let mut ctx = Context::new();
|
||||
let snapshot = session.snapshot();
|
||||
ctx.insert("csrf_token_value", &snapshot.csrf_token);
|
||||
ctx.insert("is_authenticated", &snapshot.user_id.is_some());
|
||||
ctx.insert("current_user", &snapshot.user_id);
|
||||
ctx.insert("current_user_display_name", &snapshot.display_name);
|
||||
ctx.insert("current_endpoint", &endpoint.unwrap_or(""));
|
||||
ctx.insert("request_args", &HashMap::<String, String>::new());
|
||||
ctx.insert("null", &serde_json::Value::Null);
|
||||
ctx.insert("none", &serde_json::Value::Null);
|
||||
ctx
|
||||
}
|
||||
|
||||
pub fn inject_flash(ctx: &mut Context, flashed: Vec<FlashMessage>) {
|
||||
ctx.insert("flashed_messages", &flashed);
|
||||
}
|
||||
|
||||
fn constant_time_eq_str(a: &str, b: &str) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
subtle::ConstantTimeEq::ct_eq(a.as_bytes(), b.as_bytes()).into()
|
||||
}
|
||||
3595
crates/myfsio-server/src/handlers/ui_api.rs
Normal file
3595
crates/myfsio-server/src/handlers/ui_api.rs
Normal file
File diff suppressed because it is too large
Load Diff
3086
crates/myfsio-server/src/handlers/ui_pages.rs
Normal file
3086
crates/myfsio-server/src/handlers/ui_pages.rs
Normal file
File diff suppressed because it is too large
Load Diff
645
crates/myfsio-server/src/lib.rs
Normal file
645
crates/myfsio-server/src/lib.rs
Normal file
@@ -0,0 +1,645 @@
|
||||
pub mod config;
|
||||
pub mod handlers;
|
||||
pub mod middleware;
|
||||
pub mod services;
|
||||
pub mod session;
|
||||
pub mod state;
|
||||
pub mod stores;
|
||||
pub mod templates;
|
||||
|
||||
use axum::Router;
|
||||
|
||||
pub const SERVER_HEADER: &str = concat!("MyFSIO-Rust/", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
pub fn create_ui_router(state: state::AppState) -> Router {
|
||||
use axum::routing::{delete, get, post, put};
|
||||
use handlers::ui;
|
||||
use handlers::ui_api;
|
||||
use handlers::ui_pages;
|
||||
|
||||
let protected = Router::new()
|
||||
.route("/", get(ui::root_redirect))
|
||||
.route("/ui", get(ui::root_redirect))
|
||||
.route("/ui/", get(ui::root_redirect))
|
||||
.route(
|
||||
"/ui/buckets",
|
||||
get(ui_pages::buckets_overview).post(ui_pages::create_bucket),
|
||||
)
|
||||
.route("/ui/buckets/create", post(ui_pages::create_bucket))
|
||||
.route("/ui/buckets/{bucket_name}", get(ui_pages::bucket_detail))
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/delete",
|
||||
post(ui_pages::delete_bucket),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/versioning",
|
||||
post(ui_pages::update_bucket_versioning),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/quota",
|
||||
post(ui_pages::update_bucket_quota),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/encryption",
|
||||
post(ui_pages::update_bucket_encryption),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/policy",
|
||||
post(ui_pages::update_bucket_policy),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication",
|
||||
post(ui_pages::update_bucket_replication),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/website",
|
||||
post(ui_pages::update_bucket_website),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/upload",
|
||||
post(ui_api::upload_object),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/initiate",
|
||||
post(ui_api::initiate_multipart_upload),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/part",
|
||||
put(ui_api::upload_multipart_part),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/parts",
|
||||
put(ui_api::upload_multipart_part),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/complete",
|
||||
post(ui_api::complete_multipart_upload),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/abort",
|
||||
delete(ui_api::abort_multipart_upload),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}",
|
||||
delete(ui_api::abort_multipart_upload),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/objects",
|
||||
get(ui_api::list_bucket_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/objects/stream",
|
||||
get(ui_api::stream_bucket_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/folders",
|
||||
get(ui_api::list_bucket_folders),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/copy-targets",
|
||||
get(ui_api::list_copy_targets),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/list-for-copy",
|
||||
get(ui_api::list_copy_targets),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/objects/bulk-delete",
|
||||
post(ui_api::bulk_delete_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/objects/bulk-download",
|
||||
post(ui_api::bulk_download_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/objects/{*rest}",
|
||||
get(ui_api::object_get_dispatch).post(ui_api::object_post_dispatch),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/acl",
|
||||
get(ui_api::bucket_acl).post(ui_api::update_bucket_acl),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/cors",
|
||||
get(ui_api::bucket_cors).post(ui_api::update_bucket_cors),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/lifecycle",
|
||||
get(ui_api::bucket_lifecycle).post(ui_api::update_bucket_lifecycle),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/lifecycle/history",
|
||||
get(ui_api::lifecycle_history),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/status",
|
||||
get(ui_api::replication_status),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures",
|
||||
get(ui_api::replication_failures).delete(ui_api::clear_replication_failures),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures/retry",
|
||||
post(ui_api::retry_replication_failure),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures/retry-all",
|
||||
post(ui_api::retry_all_replication_failures),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures/dismiss",
|
||||
delete(ui_api::dismiss_replication_failure),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures/clear",
|
||||
delete(ui_api::clear_replication_failures),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/replication/failures/{*rest}",
|
||||
post(ui_api::retry_replication_failure_path)
|
||||
.delete(ui_api::dismiss_replication_failure_path),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/bulk-delete",
|
||||
post(ui_api::bulk_delete_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/bulk-download",
|
||||
post(ui_api::bulk_download_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/archived",
|
||||
get(ui_api::archived_objects),
|
||||
)
|
||||
.route(
|
||||
"/ui/buckets/{bucket_name}/archived/{*rest}",
|
||||
post(ui_api::archived_post_dispatch),
|
||||
)
|
||||
.route("/ui/iam", get(ui_pages::iam_dashboard))
|
||||
.route("/ui/iam/users", post(ui_pages::create_iam_user))
|
||||
.route("/ui/iam/users/{user_id}", post(ui_pages::update_iam_user))
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/delete",
|
||||
post(ui_pages::delete_iam_user),
|
||||
)
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/update",
|
||||
post(ui_pages::update_iam_user),
|
||||
)
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/policies",
|
||||
post(ui_pages::update_iam_policies),
|
||||
)
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/expiry",
|
||||
post(ui_pages::update_iam_expiry),
|
||||
)
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/rotate-secret",
|
||||
post(ui_pages::rotate_iam_secret),
|
||||
)
|
||||
.route(
|
||||
"/ui/iam/users/{user_id}/rotate",
|
||||
post(ui_pages::rotate_iam_secret),
|
||||
)
|
||||
.route("/ui/connections/create", post(ui_pages::create_connection))
|
||||
.route("/ui/connections/test", post(ui_api::test_connection))
|
||||
.route(
|
||||
"/ui/connections/{connection_id}",
|
||||
post(ui_pages::update_connection),
|
||||
)
|
||||
.route(
|
||||
"/ui/connections/{connection_id}/update",
|
||||
post(ui_pages::update_connection),
|
||||
)
|
||||
.route(
|
||||
"/ui/connections/{connection_id}/delete",
|
||||
post(ui_pages::delete_connection),
|
||||
)
|
||||
.route(
|
||||
"/ui/connections/{connection_id}/health",
|
||||
get(ui_api::connection_health),
|
||||
)
|
||||
.route("/ui/sites", get(ui_pages::sites_dashboard))
|
||||
.route("/ui/sites/local", post(ui_pages::update_local_site))
|
||||
.route("/ui/sites/peers", post(ui_pages::add_peer_site))
|
||||
.route(
|
||||
"/ui/sites/peers/{site_id}/update",
|
||||
post(ui_pages::update_peer_site),
|
||||
)
|
||||
.route(
|
||||
"/ui/sites/peers/{site_id}/delete",
|
||||
post(ui_pages::delete_peer_site),
|
||||
)
|
||||
.route("/ui/sites/peers/{site_id}/health", get(ui_api::peer_health))
|
||||
.route(
|
||||
"/ui/sites/peers/{site_id}/sync-stats",
|
||||
get(ui_api::peer_sync_stats),
|
||||
)
|
||||
.route(
|
||||
"/ui/sites/peers/{site_id}/bidirectional-status",
|
||||
get(ui_api::peer_bidirectional_status),
|
||||
)
|
||||
.route(
|
||||
"/ui/connections",
|
||||
get(ui_pages::connections_dashboard).post(ui_pages::create_connection),
|
||||
)
|
||||
.route("/ui/metrics", get(ui_pages::metrics_dashboard))
|
||||
.route(
|
||||
"/ui/metrics/settings",
|
||||
get(ui_api::metrics_settings).put(ui_api::update_metrics_settings),
|
||||
)
|
||||
.route("/ui/metrics/api", get(ui_api::metrics_api))
|
||||
.route("/ui/metrics/history", get(ui_api::metrics_history))
|
||||
.route("/ui/metrics/operations", get(ui_api::metrics_operations))
|
||||
.route(
|
||||
"/ui/metrics/operations/history",
|
||||
get(ui_api::metrics_operations_history),
|
||||
)
|
||||
.route("/ui/system", get(ui_pages::system_dashboard))
|
||||
.route("/ui/system/gc/status", get(ui_api::gc_status_ui))
|
||||
.route("/ui/system/gc/run", post(ui_api::gc_run_ui))
|
||||
.route("/ui/system/gc/history", get(ui_api::gc_history_ui))
|
||||
.route(
|
||||
"/ui/system/integrity/status",
|
||||
get(ui_api::integrity_status_ui),
|
||||
)
|
||||
.route("/ui/system/integrity/run", post(ui_api::integrity_run_ui))
|
||||
.route(
|
||||
"/ui/system/integrity/history",
|
||||
get(ui_api::integrity_history_ui),
|
||||
)
|
||||
.route(
|
||||
"/ui/website-domains",
|
||||
get(ui_pages::website_domains_dashboard),
|
||||
)
|
||||
.route(
|
||||
"/ui/website-domains/create",
|
||||
post(ui_pages::create_website_domain),
|
||||
)
|
||||
.route(
|
||||
"/ui/website-domains/{domain}",
|
||||
post(ui_pages::update_website_domain),
|
||||
)
|
||||
.route(
|
||||
"/ui/website-domains/{domain}/update",
|
||||
post(ui_pages::update_website_domain),
|
||||
)
|
||||
.route(
|
||||
"/ui/website-domains/{domain}/delete",
|
||||
post(ui_pages::delete_website_domain),
|
||||
)
|
||||
.route("/ui/replication/new", get(ui_pages::replication_wizard))
|
||||
.route(
|
||||
"/ui/replication/create",
|
||||
post(ui_pages::create_peer_replication_rules_from_query),
|
||||
)
|
||||
.route(
|
||||
"/ui/sites/peers/{site_id}/replication-rules",
|
||||
post(ui_pages::create_peer_replication_rules),
|
||||
)
|
||||
.route("/ui/docs", get(ui_pages::docs_page))
|
||||
.layer(axum::middleware::from_fn(ui::require_login));
|
||||
|
||||
let public = Router::new()
|
||||
.route("/login", get(ui::login_page).post(ui::login_submit))
|
||||
.route("/logout", post(ui::logout).get(ui::logout))
|
||||
.route("/csrf-error", get(ui::csrf_error_page));
|
||||
|
||||
let session_state = middleware::SessionLayerState {
|
||||
store: state.sessions.clone(),
|
||||
secure: false,
|
||||
};
|
||||
|
||||
let static_service = tower_http::services::ServeDir::new(&state.config.static_dir);
|
||||
|
||||
protected
|
||||
.merge(public)
|
||||
.fallback(ui::not_found_page)
|
||||
.layer(axum::middleware::from_fn(middleware::csrf_layer))
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
session_state,
|
||||
middleware::session_layer,
|
||||
))
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
middleware::ui_metrics_layer,
|
||||
))
|
||||
.with_state(state)
|
||||
.nest_service("/static", static_service)
|
||||
.layer(axum::middleware::from_fn(middleware::server_header))
|
||||
.layer(tower_http::compression::CompressionLayer::new())
|
||||
}
|
||||
|
||||
pub fn create_router(state: state::AppState) -> Router {
|
||||
let default_rate_limit = middleware::RateLimitLayerState::new(
|
||||
state.config.ratelimit_default,
|
||||
state.config.num_trusted_proxies,
|
||||
);
|
||||
let admin_rate_limit = middleware::RateLimitLayerState::new(
|
||||
state.config.ratelimit_admin,
|
||||
state.config.num_trusted_proxies,
|
||||
);
|
||||
|
||||
let mut api_router = Router::new()
|
||||
.route("/myfsio/health", axum::routing::get(handlers::health_check))
|
||||
.route("/", axum::routing::get(handlers::list_buckets))
|
||||
.route(
|
||||
"/{bucket}",
|
||||
axum::routing::put(handlers::create_bucket)
|
||||
.get(handlers::get_bucket)
|
||||
.delete(handlers::delete_bucket)
|
||||
.head(handlers::head_bucket)
|
||||
.post(handlers::post_bucket),
|
||||
)
|
||||
.route(
|
||||
"/{bucket}/",
|
||||
axum::routing::put(handlers::create_bucket)
|
||||
.get(handlers::get_bucket)
|
||||
.delete(handlers::delete_bucket)
|
||||
.head(handlers::head_bucket)
|
||||
.post(handlers::post_bucket),
|
||||
)
|
||||
.route(
|
||||
"/{bucket}/{*key}",
|
||||
axum::routing::put(handlers::put_object)
|
||||
.get(handlers::get_object)
|
||||
.delete(handlers::delete_object)
|
||||
.head(handlers::head_object)
|
||||
.post(handlers::post_object),
|
||||
);
|
||||
|
||||
if state.config.kms_enabled {
|
||||
api_router = api_router
|
||||
.route(
|
||||
"/kms/keys",
|
||||
axum::routing::get(handlers::kms::list_keys).post(handlers::kms::create_key),
|
||||
)
|
||||
.route(
|
||||
"/kms/keys/{key_id}",
|
||||
axum::routing::get(handlers::kms::get_key).delete(handlers::kms::delete_key),
|
||||
)
|
||||
.route(
|
||||
"/kms/keys/{key_id}/enable",
|
||||
axum::routing::post(handlers::kms::enable_key),
|
||||
)
|
||||
.route(
|
||||
"/kms/keys/{key_id}/disable",
|
||||
axum::routing::post(handlers::kms::disable_key),
|
||||
)
|
||||
.route("/kms/encrypt", axum::routing::post(handlers::kms::encrypt))
|
||||
.route("/kms/decrypt", axum::routing::post(handlers::kms::decrypt))
|
||||
.route(
|
||||
"/kms/generate-data-key",
|
||||
axum::routing::post(handlers::kms::generate_data_key),
|
||||
)
|
||||
.route(
|
||||
"/kms/generate-data-key-without-plaintext",
|
||||
axum::routing::post(handlers::kms::generate_data_key_without_plaintext),
|
||||
)
|
||||
.route(
|
||||
"/kms/re-encrypt",
|
||||
axum::routing::post(handlers::kms::re_encrypt),
|
||||
)
|
||||
.route(
|
||||
"/kms/generate-random",
|
||||
axum::routing::post(handlers::kms::generate_random),
|
||||
)
|
||||
.route(
|
||||
"/kms/client/generate-key",
|
||||
axum::routing::post(handlers::kms::client_generate_key),
|
||||
)
|
||||
.route(
|
||||
"/kms/client/encrypt",
|
||||
axum::routing::post(handlers::kms::client_encrypt),
|
||||
)
|
||||
.route(
|
||||
"/kms/client/decrypt",
|
||||
axum::routing::post(handlers::kms::client_decrypt),
|
||||
)
|
||||
.route(
|
||||
"/kms/materials/{key_id}",
|
||||
axum::routing::post(handlers::kms::materials),
|
||||
);
|
||||
}
|
||||
|
||||
api_router = api_router
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
middleware::auth_layer,
|
||||
))
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
default_rate_limit,
|
||||
middleware::rate_limit_layer,
|
||||
));
|
||||
|
||||
let admin_router = Router::new()
|
||||
.route(
|
||||
"/admin/site",
|
||||
axum::routing::get(handlers::admin::get_local_site)
|
||||
.put(handlers::admin::update_local_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/sites",
|
||||
axum::routing::get(handlers::admin::list_all_sites)
|
||||
.post(handlers::admin::register_peer_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/sites/{site_id}",
|
||||
axum::routing::get(handlers::admin::get_peer_site)
|
||||
.put(handlers::admin::update_peer_site)
|
||||
.delete(handlers::admin::delete_peer_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/sites/{site_id}/health",
|
||||
axum::routing::get(handlers::admin::check_peer_health)
|
||||
.post(handlers::admin::check_peer_health),
|
||||
)
|
||||
.route(
|
||||
"/admin/sites/{site_id}/bidirectional-status",
|
||||
axum::routing::get(handlers::admin::check_bidirectional_status),
|
||||
)
|
||||
.route(
|
||||
"/admin/topology",
|
||||
axum::routing::get(handlers::admin::get_topology),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/local",
|
||||
axum::routing::get(handlers::admin::get_local_site)
|
||||
.put(handlers::admin::update_local_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/all",
|
||||
axum::routing::get(handlers::admin::list_all_sites),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/peers",
|
||||
axum::routing::post(handlers::admin::register_peer_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/peers/{site_id}",
|
||||
axum::routing::get(handlers::admin::get_peer_site)
|
||||
.put(handlers::admin::update_peer_site)
|
||||
.delete(handlers::admin::delete_peer_site),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/peers/{site_id}/health",
|
||||
axum::routing::post(handlers::admin::check_peer_health),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/topology",
|
||||
axum::routing::get(handlers::admin::get_topology),
|
||||
)
|
||||
.route(
|
||||
"/admin/site/peers/{site_id}/bidirectional-status",
|
||||
axum::routing::get(handlers::admin::check_bidirectional_status),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users",
|
||||
axum::routing::get(handlers::admin::iam_list_users),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}",
|
||||
axum::routing::get(handlers::admin::iam_get_user),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/policies",
|
||||
axum::routing::get(handlers::admin::iam_get_user_policies),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/access-keys",
|
||||
axum::routing::post(handlers::admin::iam_create_access_key),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/keys",
|
||||
axum::routing::post(handlers::admin::iam_create_access_key),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/access-keys/{access_key}",
|
||||
axum::routing::delete(handlers::admin::iam_delete_access_key),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/keys/{access_key}",
|
||||
axum::routing::delete(handlers::admin::iam_delete_access_key),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/disable",
|
||||
axum::routing::post(handlers::admin::iam_disable_user),
|
||||
)
|
||||
.route(
|
||||
"/admin/iam/users/{identifier}/enable",
|
||||
axum::routing::post(handlers::admin::iam_enable_user),
|
||||
)
|
||||
.route(
|
||||
"/admin/website-domains",
|
||||
axum::routing::get(handlers::admin::list_website_domains)
|
||||
.post(handlers::admin::create_website_domain),
|
||||
)
|
||||
.route(
|
||||
"/admin/website-domains/{domain}",
|
||||
axum::routing::get(handlers::admin::get_website_domain)
|
||||
.put(handlers::admin::update_website_domain)
|
||||
.delete(handlers::admin::delete_website_domain),
|
||||
)
|
||||
.route(
|
||||
"/admin/gc/status",
|
||||
axum::routing::get(handlers::admin::gc_status),
|
||||
)
|
||||
.route(
|
||||
"/admin/gc/run",
|
||||
axum::routing::post(handlers::admin::gc_run),
|
||||
)
|
||||
.route(
|
||||
"/admin/gc/history",
|
||||
axum::routing::get(handlers::admin::gc_history),
|
||||
)
|
||||
.route(
|
||||
"/admin/integrity/status",
|
||||
axum::routing::get(handlers::admin::integrity_status),
|
||||
)
|
||||
.route(
|
||||
"/admin/integrity/run",
|
||||
axum::routing::post(handlers::admin::integrity_run),
|
||||
)
|
||||
.route(
|
||||
"/admin/integrity/history",
|
||||
axum::routing::get(handlers::admin::integrity_history),
|
||||
)
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
middleware::auth_layer,
|
||||
))
|
||||
.layer(axum::middleware::from_fn_with_state(
|
||||
admin_rate_limit,
|
||||
middleware::rate_limit_layer,
|
||||
));
|
||||
|
||||
api_router
|
||||
.merge(admin_router)
|
||||
.layer(axum::middleware::from_fn(middleware::server_header))
|
||||
.layer(cors_layer(&state.config))
|
||||
.layer(tower_http::compression::CompressionLayer::new())
|
||||
.with_state(state)
|
||||
}
|
||||
|
||||
fn cors_layer(config: &config::ServerConfig) -> tower_http::cors::CorsLayer {
|
||||
use axum::http::{HeaderName, HeaderValue, Method};
|
||||
use tower_http::cors::{Any, CorsLayer};
|
||||
|
||||
let mut layer = CorsLayer::new();
|
||||
|
||||
if config.cors_origins.iter().any(|origin| origin == "*") {
|
||||
layer = layer.allow_origin(Any);
|
||||
} else {
|
||||
let origins = config
|
||||
.cors_origins
|
||||
.iter()
|
||||
.filter_map(|origin| HeaderValue::from_str(origin).ok())
|
||||
.collect::<Vec<_>>();
|
||||
if !origins.is_empty() {
|
||||
layer = layer.allow_origin(origins);
|
||||
}
|
||||
}
|
||||
|
||||
let methods = config
|
||||
.cors_methods
|
||||
.iter()
|
||||
.filter_map(|method| method.parse::<Method>().ok())
|
||||
.collect::<Vec<_>>();
|
||||
if !methods.is_empty() {
|
||||
layer = layer.allow_methods(methods);
|
||||
}
|
||||
|
||||
if config.cors_allow_headers.iter().any(|header| header == "*") {
|
||||
layer = layer.allow_headers(Any);
|
||||
} else {
|
||||
let headers = config
|
||||
.cors_allow_headers
|
||||
.iter()
|
||||
.filter_map(|header| header.parse::<HeaderName>().ok())
|
||||
.collect::<Vec<_>>();
|
||||
if !headers.is_empty() {
|
||||
layer = layer.allow_headers(headers);
|
||||
}
|
||||
}
|
||||
|
||||
if config
|
||||
.cors_expose_headers
|
||||
.iter()
|
||||
.any(|header| header == "*")
|
||||
{
|
||||
layer = layer.expose_headers(Any);
|
||||
} else {
|
||||
let headers = config
|
||||
.cors_expose_headers
|
||||
.iter()
|
||||
.filter_map(|header| header.parse::<HeaderName>().ok())
|
||||
.collect::<Vec<_>>();
|
||||
if !headers.is_empty() {
|
||||
layer = layer.expose_headers(headers);
|
||||
}
|
||||
}
|
||||
|
||||
layer
|
||||
}
|
||||
547
crates/myfsio-server/src/main.rs
Normal file
547
crates/myfsio-server/src/main.rs
Normal file
@@ -0,0 +1,547 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use myfsio_server::config::ServerConfig;
|
||||
use myfsio_server::state::AppState;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(
|
||||
name = "myfsio",
|
||||
version,
|
||||
about = "MyFSIO S3-compatible storage engine"
|
||||
)]
|
||||
struct Cli {
|
||||
#[arg(long, help = "Validate configuration and exit")]
|
||||
check_config: bool,
|
||||
#[arg(long, help = "Show configuration summary and exit")]
|
||||
show_config: bool,
|
||||
#[arg(long, help = "Reset admin credentials and exit")]
|
||||
reset_cred: bool,
|
||||
#[command(subcommand)]
|
||||
command: Option<Command>,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum Command {
|
||||
Serve,
|
||||
Version,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
load_env_files();
|
||||
init_tracing();
|
||||
|
||||
let cli = Cli::parse();
|
||||
let config = ServerConfig::from_env();
|
||||
if !config
|
||||
.ratelimit_storage_uri
|
||||
.eq_ignore_ascii_case("memory://")
|
||||
{
|
||||
tracing::warn!(
|
||||
"RATE_LIMIT_STORAGE_URI={} is not supported yet; using in-memory rate limits",
|
||||
config.ratelimit_storage_uri
|
||||
);
|
||||
}
|
||||
|
||||
if cli.reset_cred {
|
||||
reset_admin_credentials(&config);
|
||||
return;
|
||||
}
|
||||
if cli.check_config || cli.show_config {
|
||||
print_config_summary(&config);
|
||||
if cli.check_config {
|
||||
let issues = validate_config(&config);
|
||||
for issue in &issues {
|
||||
println!("{issue}");
|
||||
}
|
||||
if issues.iter().any(|issue| issue.starts_with("CRITICAL:")) {
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
match cli.command.unwrap_or(Command::Serve) {
|
||||
Command::Version => {
|
||||
println!("myfsio {}", env!("CARGO_PKG_VERSION"));
|
||||
return;
|
||||
}
|
||||
Command::Serve => {}
|
||||
}
|
||||
|
||||
ensure_iam_bootstrap(&config);
|
||||
let bind_addr = config.bind_addr;
|
||||
let ui_bind_addr = config.ui_bind_addr;
|
||||
|
||||
tracing::info!("MyFSIO Rust Engine starting — API on {}", bind_addr);
|
||||
if config.ui_enabled {
|
||||
tracing::info!("UI will bind on {}", ui_bind_addr);
|
||||
}
|
||||
tracing::info!("Storage root: {}", config.storage_root.display());
|
||||
tracing::info!("Region: {}", config.region);
|
||||
tracing::info!(
|
||||
"Encryption: {}, KMS: {}, GC: {}, Lifecycle: {}, Integrity: {}, Metrics History: {}, Operation Metrics: {}, UI: {}",
|
||||
config.encryption_enabled,
|
||||
config.kms_enabled,
|
||||
config.gc_enabled,
|
||||
config.lifecycle_enabled,
|
||||
config.integrity_enabled,
|
||||
config.metrics_history_enabled,
|
||||
config.metrics_enabled,
|
||||
config.ui_enabled
|
||||
);
|
||||
|
||||
let state = if config.encryption_enabled || config.kms_enabled {
|
||||
AppState::new_with_encryption(config.clone()).await
|
||||
} else {
|
||||
AppState::new(config.clone())
|
||||
};
|
||||
|
||||
let mut bg_handles: Vec<tokio::task::JoinHandle<()>> = Vec::new();
|
||||
|
||||
if let Some(ref gc) = state.gc {
|
||||
bg_handles.push(gc.clone().start_background());
|
||||
tracing::info!("GC background service started");
|
||||
}
|
||||
|
||||
if let Some(ref integrity) = state.integrity {
|
||||
bg_handles.push(integrity.clone().start_background());
|
||||
tracing::info!("Integrity checker background service started");
|
||||
}
|
||||
|
||||
if let Some(ref metrics) = state.metrics {
|
||||
bg_handles.push(metrics.clone().start_background());
|
||||
tracing::info!("Metrics collector background service started");
|
||||
}
|
||||
|
||||
if let Some(ref system_metrics) = state.system_metrics {
|
||||
bg_handles.push(system_metrics.clone().start_background());
|
||||
tracing::info!("System metrics history collector started");
|
||||
}
|
||||
|
||||
if config.lifecycle_enabled {
|
||||
let lifecycle =
|
||||
std::sync::Arc::new(myfsio_server::services::lifecycle::LifecycleService::new(
|
||||
state.storage.clone(),
|
||||
config.storage_root.clone(),
|
||||
myfsio_server::services::lifecycle::LifecycleConfig {
|
||||
interval_seconds: 3600,
|
||||
max_history_per_bucket: config.lifecycle_max_history_per_bucket,
|
||||
},
|
||||
));
|
||||
bg_handles.push(lifecycle.start_background());
|
||||
tracing::info!("Lifecycle manager background service started");
|
||||
}
|
||||
|
||||
if let Some(ref site_sync) = state.site_sync {
|
||||
let worker = site_sync.clone();
|
||||
bg_handles.push(tokio::spawn(async move {
|
||||
worker.run().await;
|
||||
}));
|
||||
tracing::info!("Site sync worker started");
|
||||
}
|
||||
|
||||
let ui_enabled = config.ui_enabled;
|
||||
let api_app = myfsio_server::create_router(state.clone());
|
||||
let ui_app = if ui_enabled {
|
||||
Some(myfsio_server::create_ui_router(state.clone()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let api_listener = match tokio::net::TcpListener::bind(bind_addr).await {
|
||||
Ok(listener) => listener,
|
||||
Err(err) => {
|
||||
if err.kind() == std::io::ErrorKind::AddrInUse {
|
||||
tracing::error!("API port already in use: {}", bind_addr);
|
||||
} else {
|
||||
tracing::error!("Failed to bind API {}: {}", bind_addr, err);
|
||||
}
|
||||
for handle in bg_handles {
|
||||
handle.abort();
|
||||
}
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
tracing::info!("API listening on {}", bind_addr);
|
||||
|
||||
let ui_listener = if let Some(ref app) = ui_app {
|
||||
let _ = app;
|
||||
match tokio::net::TcpListener::bind(ui_bind_addr).await {
|
||||
Ok(listener) => {
|
||||
tracing::info!("UI listening on {}", ui_bind_addr);
|
||||
Some(listener)
|
||||
}
|
||||
Err(err) => {
|
||||
if err.kind() == std::io::ErrorKind::AddrInUse {
|
||||
tracing::error!("UI port already in use: {}", ui_bind_addr);
|
||||
} else {
|
||||
tracing::error!("Failed to bind UI {}: {}", ui_bind_addr, err);
|
||||
}
|
||||
for handle in bg_handles {
|
||||
handle.abort();
|
||||
}
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let shutdown = shutdown_signal_shared();
|
||||
let api_shutdown = shutdown.clone();
|
||||
let api_task = tokio::spawn(async move {
|
||||
axum::serve(
|
||||
api_listener,
|
||||
api_app.into_make_service_with_connect_info::<std::net::SocketAddr>(),
|
||||
)
|
||||
.with_graceful_shutdown(async move {
|
||||
api_shutdown.notified().await;
|
||||
})
|
||||
.await
|
||||
});
|
||||
|
||||
let ui_task = if let (Some(listener), Some(app)) = (ui_listener, ui_app) {
|
||||
let ui_shutdown = shutdown.clone();
|
||||
Some(tokio::spawn(async move {
|
||||
axum::serve(listener, app)
|
||||
.with_graceful_shutdown(async move {
|
||||
ui_shutdown.notified().await;
|
||||
})
|
||||
.await
|
||||
}))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
tokio::signal::ctrl_c()
|
||||
.await
|
||||
.expect("Failed to listen for Ctrl+C");
|
||||
tracing::info!("Shutdown signal received");
|
||||
shutdown.notify_waiters();
|
||||
|
||||
if let Err(err) = api_task.await.unwrap_or(Ok(())) {
|
||||
tracing::error!("API server exited with error: {}", err);
|
||||
}
|
||||
if let Some(task) = ui_task {
|
||||
if let Err(err) = task.await.unwrap_or(Ok(())) {
|
||||
tracing::error!("UI server exited with error: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
for handle in bg_handles {
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
|
||||
fn print_config_summary(config: &ServerConfig) {
|
||||
println!("MyFSIO Rust Configuration");
|
||||
println!("Version: {}", env!("CARGO_PKG_VERSION"));
|
||||
println!("API bind: {}", config.bind_addr);
|
||||
println!("UI bind: {}", config.ui_bind_addr);
|
||||
println!("UI enabled: {}", config.ui_enabled);
|
||||
println!("Storage root: {}", config.storage_root.display());
|
||||
println!("IAM config: {}", config.iam_config_path.display());
|
||||
println!("Region: {}", config.region);
|
||||
println!("Encryption enabled: {}", config.encryption_enabled);
|
||||
println!(
|
||||
"Encryption chunk size: {} bytes",
|
||||
config.encryption_chunk_size_bytes
|
||||
);
|
||||
println!("KMS enabled: {}", config.kms_enabled);
|
||||
println!(
|
||||
"KMS data key bounds: {}-{} bytes",
|
||||
config.kms_generate_data_key_min_bytes, config.kms_generate_data_key_max_bytes
|
||||
);
|
||||
println!("GC enabled: {}", config.gc_enabled);
|
||||
println!(
|
||||
"GC interval: {} hours, dry run: {}",
|
||||
config.gc_interval_hours, config.gc_dry_run
|
||||
);
|
||||
println!("Integrity enabled: {}", config.integrity_enabled);
|
||||
println!("Lifecycle enabled: {}", config.lifecycle_enabled);
|
||||
println!(
|
||||
"Lifecycle history limit: {}",
|
||||
config.lifecycle_max_history_per_bucket
|
||||
);
|
||||
println!(
|
||||
"Website hosting enabled: {}",
|
||||
config.website_hosting_enabled
|
||||
);
|
||||
println!("Site sync enabled: {}", config.site_sync_enabled);
|
||||
println!("API base URL: {}", config.api_base_url);
|
||||
println!(
|
||||
"Object key max: {} bytes, tag limit: {}",
|
||||
config.object_key_max_length_bytes, config.object_tag_limit
|
||||
);
|
||||
println!(
|
||||
"Rate limits: default {} per {}s, admin {} per {}s",
|
||||
config.ratelimit_default.max_requests,
|
||||
config.ratelimit_default.window_seconds,
|
||||
config.ratelimit_admin.max_requests,
|
||||
config.ratelimit_admin.window_seconds
|
||||
);
|
||||
println!(
|
||||
"Metrics history enabled: {}",
|
||||
config.metrics_history_enabled
|
||||
);
|
||||
println!("Operation metrics enabled: {}", config.metrics_enabled);
|
||||
}
|
||||
|
||||
fn validate_config(config: &ServerConfig) -> Vec<String> {
|
||||
let mut issues = Vec::new();
|
||||
|
||||
if config.ui_enabled && config.bind_addr == config.ui_bind_addr {
|
||||
issues.push(
|
||||
"CRITICAL: API and UI bind addresses cannot be identical when UI is enabled."
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
if config.presigned_url_min_expiry > config.presigned_url_max_expiry {
|
||||
issues.push("CRITICAL: PRESIGNED_URL_MIN_EXPIRY_SECONDS cannot exceed PRESIGNED_URL_MAX_EXPIRY_SECONDS.".to_string());
|
||||
}
|
||||
if config.encryption_chunk_size_bytes == 0 {
|
||||
issues.push("CRITICAL: ENCRYPTION_CHUNK_SIZE_BYTES must be greater than zero.".to_string());
|
||||
}
|
||||
if config.kms_generate_data_key_min_bytes == 0 {
|
||||
issues.push(
|
||||
"CRITICAL: KMS_GENERATE_DATA_KEY_MIN_BYTES must be greater than zero.".to_string(),
|
||||
);
|
||||
}
|
||||
if config.kms_generate_data_key_min_bytes > config.kms_generate_data_key_max_bytes {
|
||||
issues.push("CRITICAL: KMS_GENERATE_DATA_KEY_MIN_BYTES cannot exceed KMS_GENERATE_DATA_KEY_MAX_BYTES.".to_string());
|
||||
}
|
||||
if config.gc_interval_hours <= 0.0 {
|
||||
issues.push("CRITICAL: GC_INTERVAL_HOURS must be greater than zero.".to_string());
|
||||
}
|
||||
if config.bucket_config_cache_ttl_seconds < 0.0 {
|
||||
issues.push("CRITICAL: BUCKET_CONFIG_CACHE_TTL_SECONDS cannot be negative.".to_string());
|
||||
}
|
||||
if !config
|
||||
.ratelimit_storage_uri
|
||||
.eq_ignore_ascii_case("memory://")
|
||||
{
|
||||
issues.push(format!(
|
||||
"WARNING: RATE_LIMIT_STORAGE_URI={} is not supported yet; using in-memory limits.",
|
||||
config.ratelimit_storage_uri
|
||||
));
|
||||
}
|
||||
if let Err(err) = std::fs::create_dir_all(&config.storage_root) {
|
||||
issues.push(format!(
|
||||
"CRITICAL: Cannot create storage root {}: {}",
|
||||
config.storage_root.display(),
|
||||
err
|
||||
));
|
||||
}
|
||||
if let Some(parent) = config.iam_config_path.parent() {
|
||||
if let Err(err) = std::fs::create_dir_all(parent) {
|
||||
issues.push(format!(
|
||||
"CRITICAL: Cannot create IAM config directory {}: {}",
|
||||
parent.display(),
|
||||
err
|
||||
));
|
||||
}
|
||||
}
|
||||
if config.encryption_enabled && config.secret_key.is_none() {
|
||||
issues.push(
|
||||
"WARNING: ENCRYPTION_ENABLED=true but SECRET_KEY is not configured; secure-at-rest config encryption is unavailable.".to_string(),
|
||||
);
|
||||
}
|
||||
if config.site_sync_enabled && !config.website_hosting_enabled {
|
||||
issues.push(
|
||||
"INFO: SITE_SYNC_ENABLED=true without WEBSITE_HOSTING_ENABLED; this is valid but unrelated.".to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
issues
|
||||
}
|
||||
|
||||
fn init_tracing() {
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
let filter = EnvFilter::try_from_env("RUST_LOG")
|
||||
.or_else(|_| {
|
||||
EnvFilter::try_new(std::env::var("LOG_LEVEL").unwrap_or_else(|_| "INFO".to_string()))
|
||||
})
|
||||
.unwrap_or_else(|_| EnvFilter::new("INFO"));
|
||||
tracing_subscriber::fmt().with_env_filter(filter).init();
|
||||
}
|
||||
|
||||
fn shutdown_signal_shared() -> std::sync::Arc<tokio::sync::Notify> {
|
||||
std::sync::Arc::new(tokio::sync::Notify::new())
|
||||
}
|
||||
|
||||
fn load_env_files() {
|
||||
let cwd = std::env::current_dir().ok();
|
||||
let mut candidates: Vec<std::path::PathBuf> = Vec::new();
|
||||
candidates.push(std::path::PathBuf::from("/opt/myfsio/myfsio.env"));
|
||||
if let Some(ref dir) = cwd {
|
||||
candidates.push(dir.join(".env"));
|
||||
candidates.push(dir.join("myfsio.env"));
|
||||
for ancestor in dir.ancestors().skip(1).take(4) {
|
||||
candidates.push(ancestor.join(".env"));
|
||||
candidates.push(ancestor.join("myfsio.env"));
|
||||
}
|
||||
}
|
||||
|
||||
let mut seen = std::collections::HashSet::new();
|
||||
for path in candidates {
|
||||
if !seen.insert(path.clone()) {
|
||||
continue;
|
||||
}
|
||||
if path.is_file() {
|
||||
match dotenvy::from_path_override(&path) {
|
||||
Ok(()) => eprintln!("Loaded env file: {}", path.display()),
|
||||
Err(e) => eprintln!("Failed to load env file {}: {}", path.display(), e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_iam_bootstrap(config: &ServerConfig) {
|
||||
let iam_path = &config.iam_config_path;
|
||||
if iam_path.exists() {
|
||||
return;
|
||||
}
|
||||
|
||||
let access_key = std::env::var("ADMIN_ACCESS_KEY")
|
||||
.ok()
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.unwrap_or_else(|| format!("AK{}", uuid::Uuid::new_v4().simple()));
|
||||
let secret_key = std::env::var("ADMIN_SECRET_KEY")
|
||||
.ok()
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.unwrap_or_else(|| format!("SK{}", uuid::Uuid::new_v4().simple()));
|
||||
|
||||
let user_id = format!("u-{}", &uuid::Uuid::new_v4().simple().to_string()[..16]);
|
||||
let created_at = chrono::Utc::now().to_rfc3339();
|
||||
|
||||
let body = serde_json::json!({
|
||||
"version": 2,
|
||||
"users": [{
|
||||
"user_id": user_id,
|
||||
"display_name": "Local Admin",
|
||||
"enabled": true,
|
||||
"access_keys": [{
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"status": "active",
|
||||
"created_at": created_at,
|
||||
}],
|
||||
"policies": [{
|
||||
"bucket": "*",
|
||||
"actions": ["*"],
|
||||
"prefix": "*",
|
||||
}]
|
||||
}]
|
||||
});
|
||||
|
||||
let json = match serde_json::to_string_pretty(&body) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to serialize IAM bootstrap config: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(parent) = iam_path.parent() {
|
||||
if let Err(e) = std::fs::create_dir_all(parent) {
|
||||
tracing::error!(
|
||||
"Failed to create IAM config dir {}: {}",
|
||||
parent.display(),
|
||||
e
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = std::fs::write(iam_path, json) {
|
||||
tracing::error!(
|
||||
"Failed to write IAM bootstrap config {}: {}",
|
||||
iam_path.display(),
|
||||
e
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
tracing::info!("============================================================");
|
||||
tracing::info!("MYFSIO - ADMIN CREDENTIALS INITIALIZED");
|
||||
tracing::info!("============================================================");
|
||||
tracing::info!("Access Key: {}", access_key);
|
||||
tracing::info!("Secret Key: {}", secret_key);
|
||||
tracing::info!("Saved to: {}", iam_path.display());
|
||||
tracing::info!("============================================================");
|
||||
}
|
||||
|
||||
fn reset_admin_credentials(config: &ServerConfig) {
|
||||
if let Some(parent) = config.iam_config_path.parent() {
|
||||
if let Err(err) = std::fs::create_dir_all(parent) {
|
||||
eprintln!(
|
||||
"Failed to create IAM config directory {}: {}",
|
||||
parent.display(),
|
||||
err
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if config.iam_config_path.exists() {
|
||||
let backup = config
|
||||
.iam_config_path
|
||||
.with_extension(format!("bak-{}", chrono::Utc::now().timestamp()));
|
||||
if let Err(err) = std::fs::rename(&config.iam_config_path, &backup) {
|
||||
eprintln!(
|
||||
"Failed to back up existing IAM config {}: {}",
|
||||
config.iam_config_path.display(),
|
||||
err
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
println!("Backed up existing IAM config to {}", backup.display());
|
||||
prune_iam_backups(&config.iam_config_path, 5);
|
||||
}
|
||||
|
||||
ensure_iam_bootstrap(config);
|
||||
println!("Admin credentials reset.");
|
||||
}
|
||||
|
||||
fn prune_iam_backups(iam_path: &std::path::Path, keep: usize) {
|
||||
let parent = match iam_path.parent() {
|
||||
Some(p) => p,
|
||||
None => return,
|
||||
};
|
||||
let stem = match iam_path.file_stem().and_then(|s| s.to_str()) {
|
||||
Some(s) => s,
|
||||
None => return,
|
||||
};
|
||||
let prefix = format!("{}.bak-", stem);
|
||||
|
||||
let entries = match std::fs::read_dir(parent) {
|
||||
Ok(entries) => entries,
|
||||
Err(_) => return,
|
||||
};
|
||||
let mut backups: Vec<(i64, std::path::PathBuf)> = entries
|
||||
.filter_map(|e| e.ok())
|
||||
.filter_map(|e| {
|
||||
let path = e.path();
|
||||
let name = path.file_name()?.to_str()?;
|
||||
let rest = name.strip_prefix(&prefix)?;
|
||||
let ts: i64 = rest.parse().ok()?;
|
||||
Some((ts, path))
|
||||
})
|
||||
.collect();
|
||||
backups.sort_by(|a, b| b.0.cmp(&a.0));
|
||||
|
||||
for (_, path) in backups.into_iter().skip(keep) {
|
||||
if let Err(err) = std::fs::remove_file(&path) {
|
||||
eprintln!(
|
||||
"Failed to remove old IAM backup {}: {}",
|
||||
path.display(),
|
||||
err
|
||||
);
|
||||
} else {
|
||||
println!("Pruned old IAM backup {}", path.display());
|
||||
}
|
||||
}
|
||||
}
|
||||
1409
crates/myfsio-server/src/middleware/auth.rs
Normal file
1409
crates/myfsio-server/src/middleware/auth.rs
Normal file
File diff suppressed because it is too large
Load Diff
87
crates/myfsio-server/src/middleware/mod.rs
Normal file
87
crates/myfsio-server/src/middleware/mod.rs
Normal file
@@ -0,0 +1,87 @@
|
||||
mod auth;
|
||||
pub mod ratelimit;
|
||||
pub mod session;
|
||||
|
||||
pub use auth::auth_layer;
|
||||
pub use ratelimit::{rate_limit_layer, RateLimitLayerState};
|
||||
pub use session::{csrf_layer, session_layer, SessionHandle, SessionLayerState};
|
||||
|
||||
use axum::extract::{Request, State};
|
||||
use axum::middleware::Next;
|
||||
use axum::response::Response;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::state::AppState;
|
||||
|
||||
pub async fn server_header(req: Request, next: Next) -> Response {
|
||||
let mut resp = next.run(req).await;
|
||||
resp.headers_mut()
|
||||
.insert("server", crate::SERVER_HEADER.parse().unwrap());
|
||||
resp
|
||||
}
|
||||
|
||||
pub async fn ui_metrics_layer(State(state): State<AppState>, req: Request, next: Next) -> Response {
|
||||
let metrics = match state.metrics.clone() {
|
||||
Some(m) => m,
|
||||
None => return next.run(req).await,
|
||||
};
|
||||
let start = Instant::now();
|
||||
let method = req.method().clone();
|
||||
let path = req.uri().path().to_string();
|
||||
let endpoint_type = classify_ui_endpoint(&path);
|
||||
let bytes_in = req
|
||||
.headers()
|
||||
.get(axum::http::header::CONTENT_LENGTH)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(0);
|
||||
|
||||
let response = next.run(req).await;
|
||||
|
||||
let latency_ms = start.elapsed().as_secs_f64() * 1000.0;
|
||||
let status = response.status().as_u16();
|
||||
let bytes_out = response
|
||||
.headers()
|
||||
.get(axum::http::header::CONTENT_LENGTH)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(0);
|
||||
let error_code = if status >= 400 { Some("UIError") } else { None };
|
||||
metrics.record_request(
|
||||
method.as_str(),
|
||||
endpoint_type,
|
||||
status,
|
||||
latency_ms,
|
||||
bytes_in,
|
||||
bytes_out,
|
||||
error_code,
|
||||
);
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
fn classify_ui_endpoint(path: &str) -> &'static str {
|
||||
if path.contains("/upload") {
|
||||
"ui_upload"
|
||||
} else if path.starts_with("/ui/buckets/") {
|
||||
"ui_bucket"
|
||||
} else if path.starts_with("/ui/iam") {
|
||||
"ui_iam"
|
||||
} else if path.starts_with("/ui/sites") {
|
||||
"ui_sites"
|
||||
} else if path.starts_with("/ui/connections") {
|
||||
"ui_connections"
|
||||
} else if path.starts_with("/ui/metrics") {
|
||||
"ui_metrics"
|
||||
} else if path.starts_with("/ui/system") {
|
||||
"ui_system"
|
||||
} else if path.starts_with("/ui/website-domains") {
|
||||
"ui_website_domains"
|
||||
} else if path.starts_with("/ui/replication") {
|
||||
"ui_replication"
|
||||
} else if path.starts_with("/login") || path.starts_with("/logout") {
|
||||
"ui_auth"
|
||||
} else {
|
||||
"ui_other"
|
||||
}
|
||||
}
|
||||
241
crates/myfsio-server/src/middleware/ratelimit.rs
Normal file
241
crates/myfsio-server/src/middleware/ratelimit.rs
Normal file
@@ -0,0 +1,241 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use axum::extract::{ConnectInfo, Request, State};
|
||||
use axum::http::{header, StatusCode};
|
||||
use axum::middleware::Next;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use parking_lot::Mutex;
|
||||
|
||||
use crate::config::RateLimitSetting;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RateLimitLayerState {
|
||||
limiter: Arc<FixedWindowLimiter>,
|
||||
num_trusted_proxies: usize,
|
||||
}
|
||||
|
||||
impl RateLimitLayerState {
|
||||
pub fn new(setting: RateLimitSetting, num_trusted_proxies: usize) -> Self {
|
||||
Self {
|
||||
limiter: Arc::new(FixedWindowLimiter::new(setting)),
|
||||
num_trusted_proxies,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct FixedWindowLimiter {
|
||||
setting: RateLimitSetting,
|
||||
state: Mutex<LimiterState>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct LimiterState {
|
||||
entries: HashMap<String, LimitEntry>,
|
||||
last_sweep: Instant,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct LimitEntry {
|
||||
window_started: Instant,
|
||||
count: u32,
|
||||
}
|
||||
|
||||
const SWEEP_MIN_INTERVAL: Duration = Duration::from_secs(60);
|
||||
const SWEEP_ENTRY_THRESHOLD: usize = 1024;
|
||||
|
||||
impl FixedWindowLimiter {
|
||||
fn new(setting: RateLimitSetting) -> Self {
|
||||
Self {
|
||||
setting,
|
||||
state: Mutex::new(LimiterState {
|
||||
entries: HashMap::new(),
|
||||
last_sweep: Instant::now(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn check(&self, key: &str) -> Result<(), u64> {
|
||||
let now = Instant::now();
|
||||
let window = Duration::from_secs(self.setting.window_seconds.max(1));
|
||||
let mut state = self.state.lock();
|
||||
|
||||
if state.entries.len() >= SWEEP_ENTRY_THRESHOLD
|
||||
&& now.duration_since(state.last_sweep) >= SWEEP_MIN_INTERVAL
|
||||
{
|
||||
state
|
||||
.entries
|
||||
.retain(|_, entry| now.duration_since(entry.window_started) < window);
|
||||
state.last_sweep = now;
|
||||
}
|
||||
|
||||
let entry = state.entries.entry(key.to_string()).or_insert(LimitEntry {
|
||||
window_started: now,
|
||||
count: 0,
|
||||
});
|
||||
|
||||
if now.duration_since(entry.window_started) >= window {
|
||||
entry.window_started = now;
|
||||
entry.count = 0;
|
||||
}
|
||||
|
||||
if entry.count >= self.setting.max_requests {
|
||||
let elapsed = now.duration_since(entry.window_started);
|
||||
let retry_after = window.saturating_sub(elapsed).as_secs().max(1);
|
||||
return Err(retry_after);
|
||||
}
|
||||
|
||||
entry.count += 1;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn rate_limit_layer(
|
||||
State(state): State<RateLimitLayerState>,
|
||||
req: Request,
|
||||
next: Next,
|
||||
) -> Response {
|
||||
let key = rate_limit_key(&req, state.num_trusted_proxies);
|
||||
match state.limiter.check(&key) {
|
||||
Ok(()) => next.run(req).await,
|
||||
Err(retry_after) => too_many_requests(retry_after),
|
||||
}
|
||||
}
|
||||
|
||||
fn too_many_requests(retry_after: u64) -> Response {
|
||||
(
|
||||
StatusCode::TOO_MANY_REQUESTS,
|
||||
[
|
||||
(header::CONTENT_TYPE, "application/xml".to_string()),
|
||||
(header::RETRY_AFTER, retry_after.to_string()),
|
||||
],
|
||||
myfsio_xml::response::rate_limit_exceeded_xml(),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
fn rate_limit_key(req: &Request, num_trusted_proxies: usize) -> String {
|
||||
format!("ip:{}", client_ip(req, num_trusted_proxies))
|
||||
}
|
||||
|
||||
fn client_ip(req: &Request, num_trusted_proxies: usize) -> String {
|
||||
if num_trusted_proxies > 0 {
|
||||
if let Some(value) = req
|
||||
.headers()
|
||||
.get("x-forwarded-for")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
{
|
||||
let parts = value
|
||||
.split(',')
|
||||
.map(|part| part.trim())
|
||||
.filter(|part| !part.is_empty())
|
||||
.collect::<Vec<_>>();
|
||||
if parts.len() > num_trusted_proxies {
|
||||
let index = parts.len() - num_trusted_proxies - 1;
|
||||
return parts[index].to_string();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(value) = req.headers().get("x-real-ip").and_then(|v| v.to_str().ok()) {
|
||||
if !value.trim().is_empty() {
|
||||
return value.trim().to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
req.extensions()
|
||||
.get::<ConnectInfo<SocketAddr>>()
|
||||
.map(|ConnectInfo(addr)| addr.ip().to_string())
|
||||
.unwrap_or_else(|| "unknown".to_string())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use axum::body::Body;
|
||||
|
||||
#[test]
|
||||
fn honors_trusted_proxy_count_for_forwarded_for() {
|
||||
let req = Request::builder()
|
||||
.header("x-forwarded-for", "198.51.100.1, 10.0.0.1, 10.0.0.2")
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
assert_eq!(rate_limit_key(&req, 2), "ip:198.51.100.1");
|
||||
assert_eq!(rate_limit_key(&req, 1), "ip:10.0.0.1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn falls_back_to_connect_info_when_forwarded_for_has_too_few_hops() {
|
||||
let mut req = Request::builder()
|
||||
.header("x-forwarded-for", "198.51.100.1")
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
req.extensions_mut()
|
||||
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 9], 443))));
|
||||
|
||||
assert_eq!(rate_limit_key(&req, 2), "ip:203.0.113.9");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignores_forwarded_headers_when_no_proxies_are_trusted() {
|
||||
let mut req = Request::builder()
|
||||
.header("x-forwarded-for", "198.51.100.1")
|
||||
.header("x-real-ip", "198.51.100.2")
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
req.extensions_mut()
|
||||
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 9], 443))));
|
||||
|
||||
assert_eq!(rate_limit_key(&req, 0), "ip:203.0.113.9");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uses_connect_info_for_direct_clients() {
|
||||
let mut req = Request::builder().body(Body::empty()).unwrap();
|
||||
req.extensions_mut()
|
||||
.insert(ConnectInfo(SocketAddr::from(([203, 0, 113, 10], 443))));
|
||||
|
||||
assert_eq!(rate_limit_key(&req, 0), "ip:203.0.113.10");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fixed_window_rejects_after_quota() {
|
||||
let limiter = FixedWindowLimiter::new(RateLimitSetting::new(2, 60));
|
||||
assert!(limiter.check("k").is_ok());
|
||||
assert!(limiter.check("k").is_ok());
|
||||
assert!(limiter.check("k").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sweep_removes_expired_entries() {
|
||||
let limiter = FixedWindowLimiter::new(RateLimitSetting::new(10, 1));
|
||||
let far_past = Instant::now() - (SWEEP_MIN_INTERVAL + Duration::from_secs(5));
|
||||
{
|
||||
let mut state = limiter.state.lock();
|
||||
for i in 0..(SWEEP_ENTRY_THRESHOLD + 1024) {
|
||||
state.entries.insert(
|
||||
format!("stale-{}", i),
|
||||
LimitEntry {
|
||||
window_started: far_past,
|
||||
count: 5,
|
||||
},
|
||||
);
|
||||
}
|
||||
state.last_sweep = far_past;
|
||||
}
|
||||
let seeded = limiter.state.lock().entries.len();
|
||||
assert_eq!(seeded, SWEEP_ENTRY_THRESHOLD + 1024);
|
||||
|
||||
assert!(limiter.check("fresh").is_ok());
|
||||
|
||||
let remaining = limiter.state.lock().entries.len();
|
||||
assert_eq!(
|
||||
remaining, 1,
|
||||
"expected sweep to leave only the fresh entry, got {}",
|
||||
remaining
|
||||
);
|
||||
}
|
||||
}
|
||||
228
crates/myfsio-server/src/middleware/session.rs
Normal file
228
crates/myfsio-server/src/middleware/session.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::extract::{Request, State};
|
||||
use axum::http::{header, HeaderValue, StatusCode};
|
||||
use axum::middleware::Next;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use cookie::{Cookie, SameSite};
|
||||
use parking_lot::Mutex;
|
||||
|
||||
use crate::session::{
|
||||
csrf_tokens_match, SessionData, SessionStore, CSRF_FIELD_NAME, CSRF_HEADER_NAME,
|
||||
SESSION_COOKIE_NAME,
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SessionLayerState {
|
||||
pub store: Arc<SessionStore>,
|
||||
pub secure: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SessionHandle {
|
||||
pub id: String,
|
||||
inner: Arc<Mutex<SessionData>>,
|
||||
dirty: Arc<Mutex<bool>>,
|
||||
}
|
||||
|
||||
impl SessionHandle {
|
||||
pub fn new(id: String, data: SessionData) -> Self {
|
||||
Self {
|
||||
id,
|
||||
inner: Arc::new(Mutex::new(data)),
|
||||
dirty: Arc::new(Mutex::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read<R>(&self, f: impl FnOnce(&SessionData) -> R) -> R {
|
||||
let guard = self.inner.lock();
|
||||
f(&guard)
|
||||
}
|
||||
|
||||
pub fn write<R>(&self, f: impl FnOnce(&mut SessionData) -> R) -> R {
|
||||
let mut guard = self.inner.lock();
|
||||
let out = f(&mut guard);
|
||||
*self.dirty.lock() = true;
|
||||
out
|
||||
}
|
||||
|
||||
pub fn snapshot(&self) -> SessionData {
|
||||
self.inner.lock().clone()
|
||||
}
|
||||
|
||||
pub fn is_dirty(&self) -> bool {
|
||||
*self.dirty.lock()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn session_layer(
|
||||
State(state): State<SessionLayerState>,
|
||||
mut req: Request,
|
||||
next: Next,
|
||||
) -> Response {
|
||||
let cookie_id = extract_session_cookie(&req);
|
||||
|
||||
let (session_id, session_data, is_new) =
|
||||
match cookie_id.and_then(|id| state.store.get(&id).map(|data| (id.clone(), data))) {
|
||||
Some((id, data)) => (id, data, false),
|
||||
None => {
|
||||
let (id, data) = state.store.create();
|
||||
(id, data, true)
|
||||
}
|
||||
};
|
||||
|
||||
let handle = SessionHandle::new(session_id.clone(), session_data);
|
||||
req.extensions_mut().insert(handle.clone());
|
||||
|
||||
let mut resp = next.run(req).await;
|
||||
|
||||
if handle.is_dirty() {
|
||||
state.store.save(&handle.id, handle.snapshot());
|
||||
}
|
||||
|
||||
if is_new {
|
||||
let cookie = build_session_cookie(&session_id, state.secure);
|
||||
if let Ok(value) = HeaderValue::from_str(&cookie.to_string()) {
|
||||
resp.headers_mut().append(header::SET_COOKIE, value);
|
||||
}
|
||||
}
|
||||
|
||||
resp
|
||||
}
|
||||
|
||||
pub async fn csrf_layer(req: Request, next: Next) -> Response {
|
||||
const CSRF_HEADER_ALIAS: &str = "x-csrftoken";
|
||||
|
||||
let method = req.method().clone();
|
||||
let needs_check = matches!(
|
||||
method,
|
||||
axum::http::Method::POST
|
||||
| axum::http::Method::PUT
|
||||
| axum::http::Method::PATCH
|
||||
| axum::http::Method::DELETE
|
||||
);
|
||||
|
||||
if !needs_check {
|
||||
return next.run(req).await;
|
||||
}
|
||||
|
||||
let is_ui = req.uri().path().starts_with("/ui/")
|
||||
|| req.uri().path() == "/ui"
|
||||
|| req.uri().path() == "/login"
|
||||
|| req.uri().path() == "/logout";
|
||||
if !is_ui {
|
||||
return next.run(req).await;
|
||||
}
|
||||
|
||||
let handle = match req.extensions().get::<SessionHandle>() {
|
||||
Some(h) => h.clone(),
|
||||
None => return (StatusCode::FORBIDDEN, "Missing session").into_response(),
|
||||
};
|
||||
|
||||
let expected = handle.read(|s| s.csrf_token.clone());
|
||||
|
||||
let header_token = req
|
||||
.headers()
|
||||
.get(CSRF_HEADER_NAME)
|
||||
.or_else(|| req.headers().get(CSRF_HEADER_ALIAS))
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
if let Some(token) = header_token.as_deref() {
|
||||
if csrf_tokens_match(&expected, token) {
|
||||
return next.run(req).await;
|
||||
}
|
||||
}
|
||||
|
||||
let content_type = req
|
||||
.headers()
|
||||
.get(header::CONTENT_TYPE)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
let (parts, body) = req.into_parts();
|
||||
let bytes = match axum::body::to_bytes(body, usize::MAX).await {
|
||||
Ok(b) => b,
|
||||
Err(_) => return (StatusCode::BAD_REQUEST, "Body read failed").into_response(),
|
||||
};
|
||||
|
||||
let form_token = if content_type.starts_with("application/x-www-form-urlencoded") {
|
||||
extract_form_token(&bytes)
|
||||
} else if content_type.starts_with("multipart/form-data") {
|
||||
extract_multipart_token(&content_type, &bytes)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(token) = form_token {
|
||||
if csrf_tokens_match(&expected, &token) {
|
||||
let req = Request::from_parts(parts, axum::body::Body::from(bytes));
|
||||
return next.run(req).await;
|
||||
}
|
||||
}
|
||||
|
||||
tracing::warn!(
|
||||
path = %parts.uri.path(),
|
||||
content_type = %content_type,
|
||||
expected_len = expected.len(),
|
||||
header_present = header_token.is_some(),
|
||||
"CSRF token mismatch"
|
||||
);
|
||||
(StatusCode::FORBIDDEN, "Invalid CSRF token").into_response()
|
||||
}
|
||||
|
||||
fn extract_multipart_token(content_type: &str, body: &[u8]) -> Option<String> {
|
||||
let boundary = multer::parse_boundary(content_type).ok()?;
|
||||
let prefix = format!("--{}", boundary);
|
||||
let text = std::str::from_utf8(body).ok()?;
|
||||
let needle = "name=\"csrf_token\"";
|
||||
let idx = text.find(needle)?;
|
||||
let after = &text[idx + needle.len()..];
|
||||
let body_start = after.find("\r\n\r\n")? + 4;
|
||||
let tail = &after[body_start..];
|
||||
let end = tail
|
||||
.find(&format!("\r\n--{}", prefix.trim_start_matches("--")))
|
||||
.or_else(|| tail.find("\r\n--"))
|
||||
.unwrap_or(tail.len());
|
||||
Some(tail[..end].trim().to_string())
|
||||
}
|
||||
|
||||
fn extract_session_cookie(req: &Request) -> Option<String> {
|
||||
let raw = req.headers().get(header::COOKIE)?.to_str().ok()?;
|
||||
for pair in raw.split(';') {
|
||||
if let Ok(cookie) = Cookie::parse(pair.trim().to_string()) {
|
||||
if cookie.name() == SESSION_COOKIE_NAME {
|
||||
return Some(cookie.value().to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn build_session_cookie(id: &str, secure: bool) -> Cookie<'static> {
|
||||
let mut cookie = Cookie::new(SESSION_COOKIE_NAME, id.to_string());
|
||||
cookie.set_http_only(true);
|
||||
cookie.set_same_site(SameSite::Lax);
|
||||
cookie.set_secure(secure);
|
||||
cookie.set_path("/");
|
||||
cookie
|
||||
}
|
||||
|
||||
fn extract_form_token(body: &[u8]) -> Option<String> {
|
||||
let text = std::str::from_utf8(body).ok()?;
|
||||
let prefix = format!("{}=", CSRF_FIELD_NAME);
|
||||
for pair in text.split('&') {
|
||||
if let Some(rest) = pair.strip_prefix(&prefix) {
|
||||
return urldecode(rest);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn urldecode(s: &str) -> Option<String> {
|
||||
percent_encoding::percent_decode_str(&s.replace('+', " "))
|
||||
.decode_utf8()
|
||||
.ok()
|
||||
.map(|c| c.into_owned())
|
||||
}
|
||||
105
crates/myfsio-server/src/services/access_logging.rs
Normal file
105
crates/myfsio-server/src/services/access_logging.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
use parking_lot::RwLock;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LoggingConfiguration {
|
||||
pub target_bucket: String,
|
||||
#[serde(default)]
|
||||
pub target_prefix: String,
|
||||
#[serde(default = "default_enabled")]
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
fn default_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct StoredLoggingFile {
|
||||
#[serde(rename = "LoggingEnabled")]
|
||||
logging_enabled: Option<StoredLoggingEnabled>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct StoredLoggingEnabled {
|
||||
#[serde(rename = "TargetBucket")]
|
||||
target_bucket: String,
|
||||
#[serde(rename = "TargetPrefix", default)]
|
||||
target_prefix: String,
|
||||
}
|
||||
|
||||
pub struct AccessLoggingService {
|
||||
storage_root: PathBuf,
|
||||
cache: RwLock<HashMap<String, Option<LoggingConfiguration>>>,
|
||||
}
|
||||
|
||||
impl AccessLoggingService {
|
||||
pub fn new(storage_root: &Path) -> Self {
|
||||
Self {
|
||||
storage_root: storage_root.to_path_buf(),
|
||||
cache: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
fn config_path(&self, bucket: &str) -> PathBuf {
|
||||
self.storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("buckets")
|
||||
.join(bucket)
|
||||
.join("logging.json")
|
||||
}
|
||||
|
||||
pub fn get(&self, bucket: &str) -> Option<LoggingConfiguration> {
|
||||
if let Some(cached) = self.cache.read().get(bucket).cloned() {
|
||||
return cached;
|
||||
}
|
||||
|
||||
let path = self.config_path(bucket);
|
||||
let config = if path.exists() {
|
||||
std::fs::read_to_string(&path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<StoredLoggingFile>(&s).ok())
|
||||
.and_then(|f| f.logging_enabled)
|
||||
.map(|e| LoggingConfiguration {
|
||||
target_bucket: e.target_bucket,
|
||||
target_prefix: e.target_prefix,
|
||||
enabled: true,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.cache
|
||||
.write()
|
||||
.insert(bucket.to_string(), config.clone());
|
||||
config
|
||||
}
|
||||
|
||||
pub fn set(&self, bucket: &str, config: LoggingConfiguration) -> std::io::Result<()> {
|
||||
let path = self.config_path(bucket);
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
let stored = StoredLoggingFile {
|
||||
logging_enabled: Some(StoredLoggingEnabled {
|
||||
target_bucket: config.target_bucket.clone(),
|
||||
target_prefix: config.target_prefix.clone(),
|
||||
}),
|
||||
};
|
||||
let json = serde_json::to_string_pretty(&stored)
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
|
||||
std::fs::write(&path, json)?;
|
||||
self.cache.write().insert(bucket.to_string(), Some(config));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete(&self, bucket: &str) {
|
||||
let path = self.config_path(bucket);
|
||||
if path.exists() {
|
||||
let _ = std::fs::remove_file(&path);
|
||||
}
|
||||
self.cache.write().insert(bucket.to_string(), None);
|
||||
}
|
||||
}
|
||||
276
crates/myfsio-server/src/services/acl.rs
Normal file
276
crates/myfsio-server/src/services/acl.rs
Normal file
@@ -0,0 +1,276 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
pub const ACL_METADATA_KEY: &str = "__acl__";
|
||||
pub const GRANTEE_ALL_USERS: &str = "*";
|
||||
pub const GRANTEE_AUTHENTICATED_USERS: &str = "authenticated";
|
||||
|
||||
const ACL_PERMISSION_FULL_CONTROL: &str = "FULL_CONTROL";
|
||||
const ACL_PERMISSION_WRITE: &str = "WRITE";
|
||||
const ACL_PERMISSION_WRITE_ACP: &str = "WRITE_ACP";
|
||||
const ACL_PERMISSION_READ: &str = "READ";
|
||||
const ACL_PERMISSION_READ_ACP: &str = "READ_ACP";
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct AclGrant {
|
||||
pub grantee: String,
|
||||
pub permission: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct Acl {
|
||||
pub owner: String,
|
||||
#[serde(default)]
|
||||
pub grants: Vec<AclGrant>,
|
||||
}
|
||||
|
||||
impl Acl {
|
||||
pub fn allowed_actions(
|
||||
&self,
|
||||
principal_id: Option<&str>,
|
||||
is_authenticated: bool,
|
||||
) -> HashSet<&'static str> {
|
||||
let mut actions = HashSet::new();
|
||||
if let Some(principal_id) = principal_id {
|
||||
if principal_id == self.owner {
|
||||
actions.extend(permission_to_actions(ACL_PERMISSION_FULL_CONTROL));
|
||||
}
|
||||
}
|
||||
for grant in &self.grants {
|
||||
if grant.grantee == GRANTEE_ALL_USERS {
|
||||
actions.extend(permission_to_actions(&grant.permission));
|
||||
} else if grant.grantee == GRANTEE_AUTHENTICATED_USERS && is_authenticated {
|
||||
actions.extend(permission_to_actions(&grant.permission));
|
||||
} else if let Some(principal_id) = principal_id {
|
||||
if grant.grantee == principal_id {
|
||||
actions.extend(permission_to_actions(&grant.permission));
|
||||
}
|
||||
}
|
||||
}
|
||||
actions
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_canned_acl(canned_acl: &str, owner: &str) -> Acl {
|
||||
let owner_grant = AclGrant {
|
||||
grantee: owner.to_string(),
|
||||
permission: ACL_PERMISSION_FULL_CONTROL.to_string(),
|
||||
};
|
||||
match canned_acl {
|
||||
"public-read" => Acl {
|
||||
owner: owner.to_string(),
|
||||
grants: vec![
|
||||
owner_grant,
|
||||
AclGrant {
|
||||
grantee: GRANTEE_ALL_USERS.to_string(),
|
||||
permission: ACL_PERMISSION_READ.to_string(),
|
||||
},
|
||||
],
|
||||
},
|
||||
"public-read-write" => Acl {
|
||||
owner: owner.to_string(),
|
||||
grants: vec![
|
||||
owner_grant,
|
||||
AclGrant {
|
||||
grantee: GRANTEE_ALL_USERS.to_string(),
|
||||
permission: ACL_PERMISSION_READ.to_string(),
|
||||
},
|
||||
AclGrant {
|
||||
grantee: GRANTEE_ALL_USERS.to_string(),
|
||||
permission: ACL_PERMISSION_WRITE.to_string(),
|
||||
},
|
||||
],
|
||||
},
|
||||
"authenticated-read" => Acl {
|
||||
owner: owner.to_string(),
|
||||
grants: vec![
|
||||
owner_grant,
|
||||
AclGrant {
|
||||
grantee: GRANTEE_AUTHENTICATED_USERS.to_string(),
|
||||
permission: ACL_PERMISSION_READ.to_string(),
|
||||
},
|
||||
],
|
||||
},
|
||||
"bucket-owner-read" | "bucket-owner-full-control" | "private" | _ => Acl {
|
||||
owner: owner.to_string(),
|
||||
grants: vec![owner_grant],
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn acl_to_xml(acl: &Acl) -> String {
|
||||
let mut xml = format!(
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
|
||||
<Owner><ID>{}</ID><DisplayName>{}</DisplayName></Owner>\
|
||||
<AccessControlList>",
|
||||
xml_escape(&acl.owner),
|
||||
xml_escape(&acl.owner),
|
||||
);
|
||||
for grant in &acl.grants {
|
||||
xml.push_str("<Grant>");
|
||||
match grant.grantee.as_str() {
|
||||
GRANTEE_ALL_USERS => {
|
||||
xml.push_str(
|
||||
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\">\
|
||||
<URI>http://acs.amazonaws.com/groups/global/AllUsers</URI>\
|
||||
</Grantee>",
|
||||
);
|
||||
}
|
||||
GRANTEE_AUTHENTICATED_USERS => {
|
||||
xml.push_str(
|
||||
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\">\
|
||||
<URI>http://acs.amazonaws.com/groups/global/AuthenticatedUsers</URI>\
|
||||
</Grantee>",
|
||||
);
|
||||
}
|
||||
other => {
|
||||
xml.push_str(&format!(
|
||||
"<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
|
||||
<ID>{}</ID><DisplayName>{}</DisplayName>\
|
||||
</Grantee>",
|
||||
xml_escape(other),
|
||||
xml_escape(other),
|
||||
));
|
||||
}
|
||||
}
|
||||
xml.push_str(&format!(
|
||||
"<Permission>{}</Permission></Grant>",
|
||||
xml_escape(&grant.permission)
|
||||
));
|
||||
}
|
||||
xml.push_str("</AccessControlList></AccessControlPolicy>");
|
||||
xml
|
||||
}
|
||||
|
||||
pub fn acl_from_bucket_config(value: &Value) -> Option<Acl> {
|
||||
match value {
|
||||
Value::String(raw) => acl_from_xml(raw).or_else(|| serde_json::from_str(raw).ok()),
|
||||
Value::Object(_) => serde_json::from_value(value.clone()).ok(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn acl_from_object_metadata(metadata: &HashMap<String, String>) -> Option<Acl> {
|
||||
metadata
|
||||
.get(ACL_METADATA_KEY)
|
||||
.and_then(|raw| serde_json::from_str::<Acl>(raw).ok())
|
||||
}
|
||||
|
||||
pub fn store_object_acl(metadata: &mut HashMap<String, String>, acl: &Acl) {
|
||||
if let Ok(serialized) = serde_json::to_string(acl) {
|
||||
metadata.insert(ACL_METADATA_KEY.to_string(), serialized);
|
||||
}
|
||||
}
|
||||
|
||||
fn acl_from_xml(xml: &str) -> Option<Acl> {
|
||||
let doc = roxmltree::Document::parse(xml).ok()?;
|
||||
let owner = doc
|
||||
.descendants()
|
||||
.find(|node| node.is_element() && node.tag_name().name() == "Owner")
|
||||
.and_then(|node| {
|
||||
node.children()
|
||||
.find(|child| child.is_element() && child.tag_name().name() == "ID")
|
||||
.and_then(|child| child.text())
|
||||
})
|
||||
.unwrap_or("myfsio")
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
let mut grants = Vec::new();
|
||||
for grant in doc
|
||||
.descendants()
|
||||
.filter(|node| node.is_element() && node.tag_name().name() == "Grant")
|
||||
{
|
||||
let permission = grant
|
||||
.children()
|
||||
.find(|child| child.is_element() && child.tag_name().name() == "Permission")
|
||||
.and_then(|child| child.text())
|
||||
.unwrap_or_default()
|
||||
.trim()
|
||||
.to_string();
|
||||
if permission.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let grantee_node = grant
|
||||
.children()
|
||||
.find(|child| child.is_element() && child.tag_name().name() == "Grantee");
|
||||
let grantee = grantee_node
|
||||
.and_then(|node| {
|
||||
let uri = node
|
||||
.children()
|
||||
.find(|child| child.is_element() && child.tag_name().name() == "URI")
|
||||
.and_then(|child| child.text())
|
||||
.map(|text| text.trim().to_string());
|
||||
match uri.as_deref() {
|
||||
Some("http://acs.amazonaws.com/groups/global/AllUsers") => {
|
||||
Some(GRANTEE_ALL_USERS.to_string())
|
||||
}
|
||||
Some("http://acs.amazonaws.com/groups/global/AuthenticatedUsers") => {
|
||||
Some(GRANTEE_AUTHENTICATED_USERS.to_string())
|
||||
}
|
||||
_ => node
|
||||
.children()
|
||||
.find(|child| child.is_element() && child.tag_name().name() == "ID")
|
||||
.and_then(|child| child.text())
|
||||
.map(|text| text.trim().to_string()),
|
||||
}
|
||||
})
|
||||
.unwrap_or_default();
|
||||
if grantee.is_empty() {
|
||||
continue;
|
||||
}
|
||||
grants.push(AclGrant {
|
||||
grantee,
|
||||
permission,
|
||||
});
|
||||
}
|
||||
|
||||
Some(Acl { owner, grants })
|
||||
}
|
||||
|
||||
fn permission_to_actions(permission: &str) -> &'static [&'static str] {
|
||||
match permission {
|
||||
ACL_PERMISSION_FULL_CONTROL => &["read", "write", "delete", "list", "share"],
|
||||
ACL_PERMISSION_WRITE => &["write", "delete"],
|
||||
ACL_PERMISSION_WRITE_ACP => &["share"],
|
||||
ACL_PERMISSION_READ => &["read", "list"],
|
||||
ACL_PERMISSION_READ_ACP => &["share"],
|
||||
_ => &[],
|
||||
}
|
||||
}
|
||||
|
||||
fn xml_escape(s: &str) -> String {
|
||||
s.replace('&', "&")
|
||||
.replace('<', "<")
|
||||
.replace('>', ">")
|
||||
.replace('"', """)
|
||||
.replace('\'', "'")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn canned_acl_grants_public_read() {
|
||||
let acl = create_canned_acl("public-read", "owner");
|
||||
let actions = acl.allowed_actions(None, false);
|
||||
assert!(actions.contains("read"));
|
||||
assert!(actions.contains("list"));
|
||||
assert!(!actions.contains("write"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn xml_round_trip_preserves_grants() {
|
||||
let acl = create_canned_acl("authenticated-read", "owner");
|
||||
let parsed = acl_from_bucket_config(&Value::String(acl_to_xml(&acl))).unwrap();
|
||||
assert_eq!(parsed.owner, "owner");
|
||||
assert_eq!(parsed.grants.len(), 2);
|
||||
assert!(parsed
|
||||
.grants
|
||||
.iter()
|
||||
.any(|grant| grant.grantee == GRANTEE_AUTHENTICATED_USERS));
|
||||
}
|
||||
}
|
||||
315
crates/myfsio-server/src/services/gc.rs
Normal file
315
crates/myfsio-server/src/services/gc.rs
Normal file
@@ -0,0 +1,315 @@
|
||||
use serde_json::{json, Value};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub struct GcConfig {
|
||||
pub interval_hours: f64,
|
||||
pub temp_file_max_age_hours: f64,
|
||||
pub multipart_max_age_days: u64,
|
||||
pub lock_file_max_age_hours: f64,
|
||||
pub dry_run: bool,
|
||||
}
|
||||
|
||||
impl Default for GcConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval_hours: 6.0,
|
||||
temp_file_max_age_hours: 24.0,
|
||||
multipart_max_age_days: 7,
|
||||
lock_file_max_age_hours: 1.0,
|
||||
dry_run: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn dry_run_reports_but_does_not_delete_temp_files() {
|
||||
let tmp = tempfile::tempdir().unwrap();
|
||||
let tmp_dir = tmp.path().join(".myfsio.sys").join("tmp");
|
||||
std::fs::create_dir_all(&tmp_dir).unwrap();
|
||||
let file_path = tmp_dir.join("stale.tmp");
|
||||
std::fs::write(&file_path, b"temporary").unwrap();
|
||||
tokio::time::sleep(std::time::Duration::from_millis(5)).await;
|
||||
|
||||
let service = GcService::new(
|
||||
tmp.path().to_path_buf(),
|
||||
GcConfig {
|
||||
temp_file_max_age_hours: 0.0,
|
||||
dry_run: true,
|
||||
..GcConfig::default()
|
||||
},
|
||||
);
|
||||
|
||||
let result = service.run_now(false).await.unwrap();
|
||||
|
||||
assert_eq!(result["temp_files_deleted"], 1);
|
||||
assert!(file_path.exists());
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GcService {
|
||||
storage_root: PathBuf,
|
||||
config: GcConfig,
|
||||
running: Arc<RwLock<bool>>,
|
||||
started_at: Arc<RwLock<Option<Instant>>>,
|
||||
history: Arc<RwLock<Vec<Value>>>,
|
||||
history_path: PathBuf,
|
||||
}
|
||||
|
||||
impl GcService {
|
||||
pub fn new(storage_root: PathBuf, config: GcConfig) -> Self {
|
||||
let history_path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("gc_history.json");
|
||||
|
||||
let history = if history_path.exists() {
|
||||
std::fs::read_to_string(&history_path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
||||
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Self {
|
||||
storage_root,
|
||||
config,
|
||||
running: Arc::new(RwLock::new(false)),
|
||||
started_at: Arc::new(RwLock::new(None)),
|
||||
history: Arc::new(RwLock::new(history)),
|
||||
history_path,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn status(&self) -> Value {
|
||||
let running = *self.running.read().await;
|
||||
let scan_elapsed_seconds = self
|
||||
.started_at
|
||||
.read()
|
||||
.await
|
||||
.as_ref()
|
||||
.map(|started| started.elapsed().as_secs_f64());
|
||||
json!({
|
||||
"enabled": true,
|
||||
"running": running,
|
||||
"scanning": running,
|
||||
"scan_elapsed_seconds": scan_elapsed_seconds,
|
||||
"interval_hours": self.config.interval_hours,
|
||||
"temp_file_max_age_hours": self.config.temp_file_max_age_hours,
|
||||
"multipart_max_age_days": self.config.multipart_max_age_days,
|
||||
"lock_file_max_age_hours": self.config.lock_file_max_age_hours,
|
||||
"dry_run": self.config.dry_run,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn history(&self) -> Value {
|
||||
let history = self.history.read().await;
|
||||
let mut executions: Vec<Value> = history.iter().cloned().collect();
|
||||
executions.reverse();
|
||||
json!({ "executions": executions })
|
||||
}
|
||||
|
||||
pub async fn run_now(&self, dry_run: bool) -> Result<Value, String> {
|
||||
{
|
||||
let mut running = self.running.write().await;
|
||||
if *running {
|
||||
return Err("GC already running".to_string());
|
||||
}
|
||||
*running = true;
|
||||
}
|
||||
*self.started_at.write().await = Some(Instant::now());
|
||||
|
||||
let start = Instant::now();
|
||||
let result = self.execute_gc(dry_run || self.config.dry_run).await;
|
||||
let elapsed = start.elapsed().as_secs_f64();
|
||||
|
||||
*self.running.write().await = false;
|
||||
*self.started_at.write().await = None;
|
||||
|
||||
let mut result_json = result.clone();
|
||||
if let Some(obj) = result_json.as_object_mut() {
|
||||
obj.insert("execution_time_seconds".to_string(), json!(elapsed));
|
||||
}
|
||||
|
||||
let record = json!({
|
||||
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
|
||||
"dry_run": dry_run || self.config.dry_run,
|
||||
"result": result_json,
|
||||
});
|
||||
|
||||
{
|
||||
let mut history = self.history.write().await;
|
||||
history.push(record);
|
||||
if history.len() > 50 {
|
||||
let excess = history.len() - 50;
|
||||
history.drain(..excess);
|
||||
}
|
||||
}
|
||||
self.save_history().await;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn execute_gc(&self, dry_run: bool) -> Value {
|
||||
let mut temp_files_deleted = 0u64;
|
||||
let mut temp_bytes_freed = 0u64;
|
||||
let mut multipart_uploads_deleted = 0u64;
|
||||
let mut lock_files_deleted = 0u64;
|
||||
let mut empty_dirs_removed = 0u64;
|
||||
let mut errors: Vec<String> = Vec::new();
|
||||
|
||||
let now = std::time::SystemTime::now();
|
||||
let temp_max_age =
|
||||
std::time::Duration::from_secs_f64(self.config.temp_file_max_age_hours * 3600.0);
|
||||
let multipart_max_age =
|
||||
std::time::Duration::from_secs(self.config.multipart_max_age_days * 86400);
|
||||
let lock_max_age =
|
||||
std::time::Duration::from_secs_f64(self.config.lock_file_max_age_hours * 3600.0);
|
||||
|
||||
let tmp_dir = self.storage_root.join(".myfsio.sys").join("tmp");
|
||||
if tmp_dir.exists() {
|
||||
match std::fs::read_dir(&tmp_dir) {
|
||||
Ok(entries) => {
|
||||
for entry in entries.flatten() {
|
||||
if let Ok(metadata) = entry.metadata() {
|
||||
if let Ok(modified) = metadata.modified() {
|
||||
if let Ok(age) = now.duration_since(modified) {
|
||||
if age > temp_max_age {
|
||||
let size = metadata.len();
|
||||
if !dry_run {
|
||||
if let Err(e) = std::fs::remove_file(entry.path()) {
|
||||
errors.push(format!(
|
||||
"Failed to remove temp file: {}",
|
||||
e
|
||||
));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
temp_files_deleted += 1;
|
||||
temp_bytes_freed += size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => errors.push(format!("Failed to read tmp dir: {}", e)),
|
||||
}
|
||||
}
|
||||
|
||||
let multipart_dir = self.storage_root.join(".myfsio.sys").join("multipart");
|
||||
if multipart_dir.exists() {
|
||||
if let Ok(bucket_dirs) = std::fs::read_dir(&multipart_dir) {
|
||||
for bucket_entry in bucket_dirs.flatten() {
|
||||
if let Ok(uploads) = std::fs::read_dir(bucket_entry.path()) {
|
||||
for upload in uploads.flatten() {
|
||||
if let Ok(metadata) = upload.metadata() {
|
||||
if let Ok(modified) = metadata.modified() {
|
||||
if let Ok(age) = now.duration_since(modified) {
|
||||
if age > multipart_max_age {
|
||||
if !dry_run {
|
||||
let _ = std::fs::remove_dir_all(upload.path());
|
||||
}
|
||||
multipart_uploads_deleted += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let buckets_dir = self.storage_root.join(".myfsio.sys").join("buckets");
|
||||
if buckets_dir.exists() {
|
||||
if let Ok(bucket_dirs) = std::fs::read_dir(&buckets_dir) {
|
||||
for bucket_entry in bucket_dirs.flatten() {
|
||||
let locks_dir = bucket_entry.path().join("locks");
|
||||
if locks_dir.exists() {
|
||||
if let Ok(locks) = std::fs::read_dir(&locks_dir) {
|
||||
for lock in locks.flatten() {
|
||||
if let Ok(metadata) = lock.metadata() {
|
||||
if let Ok(modified) = metadata.modified() {
|
||||
if let Ok(age) = now.duration_since(modified) {
|
||||
if age > lock_max_age {
|
||||
if !dry_run {
|
||||
let _ = std::fs::remove_file(lock.path());
|
||||
}
|
||||
lock_files_deleted += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !dry_run {
|
||||
for dir in [&tmp_dir, &multipart_dir] {
|
||||
if dir.exists() {
|
||||
if let Ok(entries) = std::fs::read_dir(dir) {
|
||||
for entry in entries.flatten() {
|
||||
if entry.path().is_dir() {
|
||||
if let Ok(mut contents) = std::fs::read_dir(entry.path()) {
|
||||
if contents.next().is_none() {
|
||||
let _ = std::fs::remove_dir(entry.path());
|
||||
empty_dirs_removed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
json!({
|
||||
"temp_files_deleted": temp_files_deleted,
|
||||
"temp_bytes_freed": temp_bytes_freed,
|
||||
"multipart_uploads_deleted": multipart_uploads_deleted,
|
||||
"lock_files_deleted": lock_files_deleted,
|
||||
"empty_dirs_removed": empty_dirs_removed,
|
||||
"errors": errors,
|
||||
})
|
||||
}
|
||||
|
||||
async fn save_history(&self) {
|
||||
let history = self.history.read().await;
|
||||
let data = json!({ "executions": *history });
|
||||
if let Some(parent) = self.history_path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
let _ = std::fs::write(
|
||||
&self.history_path,
|
||||
serde_json::to_string_pretty(&data).unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
||||
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
|
||||
tokio::spawn(async move {
|
||||
let mut timer = tokio::time::interval(interval);
|
||||
timer.tick().await;
|
||||
loop {
|
||||
timer.tick().await;
|
||||
tracing::info!("GC cycle starting");
|
||||
match self.run_now(false).await {
|
||||
Ok(result) => tracing::info!("GC cycle complete: {:?}", result),
|
||||
Err(e) => tracing::warn!("GC cycle failed: {}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
732
crates/myfsio-server/src/services/integrity.rs
Normal file
732
crates/myfsio-server/src/services/integrity.rs
Normal file
@@ -0,0 +1,732 @@
|
||||
use myfsio_common::constants::{
|
||||
BUCKET_META_DIR, BUCKET_VERSIONS_DIR, INDEX_FILE, SYSTEM_BUCKETS_DIR, SYSTEM_ROOT,
|
||||
};
|
||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
||||
use serde_json::{json, Map, Value};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
const MAX_ISSUES: usize = 500;
|
||||
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
|
||||
pub struct IntegrityConfig {
|
||||
pub interval_hours: f64,
|
||||
pub batch_size: usize,
|
||||
pub auto_heal: bool,
|
||||
pub dry_run: bool,
|
||||
}
|
||||
|
||||
impl Default for IntegrityConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval_hours: 24.0,
|
||||
batch_size: 10_000,
|
||||
auto_heal: false,
|
||||
dry_run: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct IntegrityService {
|
||||
#[allow(dead_code)]
|
||||
storage: Arc<FsStorageBackend>,
|
||||
storage_root: PathBuf,
|
||||
config: IntegrityConfig,
|
||||
running: Arc<RwLock<bool>>,
|
||||
started_at: Arc<RwLock<Option<Instant>>>,
|
||||
history: Arc<RwLock<Vec<Value>>>,
|
||||
history_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct ScanState {
|
||||
objects_scanned: u64,
|
||||
buckets_scanned: u64,
|
||||
corrupted_objects: u64,
|
||||
orphaned_objects: u64,
|
||||
phantom_metadata: u64,
|
||||
stale_versions: u64,
|
||||
etag_cache_inconsistencies: u64,
|
||||
issues: Vec<Value>,
|
||||
errors: Vec<String>,
|
||||
}
|
||||
|
||||
impl ScanState {
|
||||
fn batch_exhausted(&self, batch_size: usize) -> bool {
|
||||
self.objects_scanned >= batch_size as u64
|
||||
}
|
||||
|
||||
fn push_issue(&mut self, issue_type: &str, bucket: &str, key: &str, detail: String) {
|
||||
if self.issues.len() < MAX_ISSUES {
|
||||
self.issues.push(json!({
|
||||
"issue_type": issue_type,
|
||||
"bucket": bucket,
|
||||
"key": key,
|
||||
"detail": detail,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
fn into_json(self, elapsed: f64) -> Value {
|
||||
json!({
|
||||
"objects_scanned": self.objects_scanned,
|
||||
"buckets_scanned": self.buckets_scanned,
|
||||
"corrupted_objects": self.corrupted_objects,
|
||||
"orphaned_objects": self.orphaned_objects,
|
||||
"phantom_metadata": self.phantom_metadata,
|
||||
"stale_versions": self.stale_versions,
|
||||
"etag_cache_inconsistencies": self.etag_cache_inconsistencies,
|
||||
"issues_healed": 0,
|
||||
"issues": self.issues,
|
||||
"errors": self.errors,
|
||||
"execution_time_seconds": elapsed,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl IntegrityService {
|
||||
pub fn new(
|
||||
storage: Arc<FsStorageBackend>,
|
||||
storage_root: &Path,
|
||||
config: IntegrityConfig,
|
||||
) -> Self {
|
||||
let history_path = storage_root
|
||||
.join(SYSTEM_ROOT)
|
||||
.join("config")
|
||||
.join("integrity_history.json");
|
||||
|
||||
let history = if history_path.exists() {
|
||||
std::fs::read_to_string(&history_path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
||||
.and_then(|v| v.get("executions").and_then(|e| e.as_array().cloned()))
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Self {
|
||||
storage,
|
||||
storage_root: storage_root.to_path_buf(),
|
||||
config,
|
||||
running: Arc::new(RwLock::new(false)),
|
||||
started_at: Arc::new(RwLock::new(None)),
|
||||
history: Arc::new(RwLock::new(history)),
|
||||
history_path,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn status(&self) -> Value {
|
||||
let running = *self.running.read().await;
|
||||
let scan_elapsed_seconds = self
|
||||
.started_at
|
||||
.read()
|
||||
.await
|
||||
.as_ref()
|
||||
.map(|started| started.elapsed().as_secs_f64());
|
||||
json!({
|
||||
"enabled": true,
|
||||
"running": running,
|
||||
"scanning": running,
|
||||
"scan_elapsed_seconds": scan_elapsed_seconds,
|
||||
"interval_hours": self.config.interval_hours,
|
||||
"batch_size": self.config.batch_size,
|
||||
"auto_heal": self.config.auto_heal,
|
||||
"dry_run": self.config.dry_run,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn history(&self) -> Value {
|
||||
let history = self.history.read().await;
|
||||
let mut executions: Vec<Value> = history.iter().cloned().collect();
|
||||
executions.reverse();
|
||||
json!({ "executions": executions })
|
||||
}
|
||||
|
||||
pub async fn run_now(&self, dry_run: bool, auto_heal: bool) -> Result<Value, String> {
|
||||
{
|
||||
let mut running = self.running.write().await;
|
||||
if *running {
|
||||
return Err("Integrity check already running".to_string());
|
||||
}
|
||||
*running = true;
|
||||
}
|
||||
*self.started_at.write().await = Some(Instant::now());
|
||||
|
||||
let start = Instant::now();
|
||||
let storage_root = self.storage_root.clone();
|
||||
let batch_size = self.config.batch_size;
|
||||
let result =
|
||||
tokio::task::spawn_blocking(move || scan_all_buckets(&storage_root, batch_size))
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
let mut st = ScanState::default();
|
||||
st.errors.push(format!("scan task failed: {}", e));
|
||||
st
|
||||
});
|
||||
let elapsed = start.elapsed().as_secs_f64();
|
||||
|
||||
*self.running.write().await = false;
|
||||
*self.started_at.write().await = None;
|
||||
|
||||
let result_json = result.into_json(elapsed);
|
||||
|
||||
let record = json!({
|
||||
"timestamp": chrono::Utc::now().timestamp_millis() as f64 / 1000.0,
|
||||
"dry_run": dry_run,
|
||||
"auto_heal": auto_heal,
|
||||
"result": result_json.clone(),
|
||||
});
|
||||
|
||||
{
|
||||
let mut history = self.history.write().await;
|
||||
history.push(record);
|
||||
if history.len() > 50 {
|
||||
let excess = history.len() - 50;
|
||||
history.drain(..excess);
|
||||
}
|
||||
}
|
||||
self.save_history().await;
|
||||
|
||||
Ok(result_json)
|
||||
}
|
||||
|
||||
async fn save_history(&self) {
|
||||
let history = self.history.read().await;
|
||||
let data = json!({ "executions": *history });
|
||||
if let Some(parent) = self.history_path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
let _ = std::fs::write(
|
||||
&self.history_path,
|
||||
serde_json::to_string_pretty(&data).unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
||||
let interval = std::time::Duration::from_secs_f64(self.config.interval_hours * 3600.0);
|
||||
tokio::spawn(async move {
|
||||
let mut timer = tokio::time::interval(interval);
|
||||
timer.tick().await;
|
||||
loop {
|
||||
timer.tick().await;
|
||||
tracing::info!("Integrity check starting");
|
||||
match self.run_now(false, false).await {
|
||||
Ok(result) => tracing::info!("Integrity check complete: {:?}", result),
|
||||
Err(e) => tracing::warn!("Integrity check failed: {}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn scan_all_buckets(storage_root: &Path, batch_size: usize) -> ScanState {
|
||||
let mut state = ScanState::default();
|
||||
let buckets = match list_bucket_names(storage_root) {
|
||||
Ok(b) => b,
|
||||
Err(e) => {
|
||||
state.errors.push(format!("list buckets: {}", e));
|
||||
return state;
|
||||
}
|
||||
};
|
||||
|
||||
for bucket in &buckets {
|
||||
if state.batch_exhausted(batch_size) {
|
||||
break;
|
||||
}
|
||||
state.buckets_scanned += 1;
|
||||
|
||||
let bucket_path = storage_root.join(bucket);
|
||||
let meta_root = storage_root
|
||||
.join(SYSTEM_ROOT)
|
||||
.join(SYSTEM_BUCKETS_DIR)
|
||||
.join(bucket)
|
||||
.join(BUCKET_META_DIR);
|
||||
|
||||
let index_entries = collect_index_entries(&meta_root);
|
||||
|
||||
check_corrupted(&mut state, bucket, &bucket_path, &index_entries, batch_size);
|
||||
check_phantom(&mut state, bucket, &bucket_path, &index_entries, batch_size);
|
||||
check_orphaned(&mut state, bucket, &bucket_path, &index_entries, batch_size);
|
||||
check_stale_versions(&mut state, storage_root, bucket, batch_size);
|
||||
check_etag_cache(&mut state, storage_root, bucket, &index_entries, batch_size);
|
||||
}
|
||||
|
||||
state
|
||||
}
|
||||
|
||||
fn list_bucket_names(storage_root: &Path) -> std::io::Result<Vec<String>> {
|
||||
let mut names = Vec::new();
|
||||
if !storage_root.exists() {
|
||||
return Ok(names);
|
||||
}
|
||||
for entry in std::fs::read_dir(storage_root)? {
|
||||
let entry = entry?;
|
||||
let name = entry.file_name().to_string_lossy().to_string();
|
||||
if name == SYSTEM_ROOT {
|
||||
continue;
|
||||
}
|
||||
if entry.file_type().map(|t| t.is_dir()).unwrap_or(false) {
|
||||
names.push(name);
|
||||
}
|
||||
}
|
||||
Ok(names)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
struct IndexEntryInfo {
|
||||
entry: Value,
|
||||
index_file: PathBuf,
|
||||
key_name: String,
|
||||
}
|
||||
|
||||
fn collect_index_entries(meta_root: &Path) -> HashMap<String, IndexEntryInfo> {
|
||||
let mut out: HashMap<String, IndexEntryInfo> = HashMap::new();
|
||||
if !meta_root.exists() {
|
||||
return out;
|
||||
}
|
||||
|
||||
let mut stack: Vec<PathBuf> = vec![meta_root.to_path_buf()];
|
||||
while let Some(dir) = stack.pop() {
|
||||
let rd = match std::fs::read_dir(&dir) {
|
||||
Ok(r) => r,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry in rd.flatten() {
|
||||
let path = entry.path();
|
||||
let ft = match entry.file_type() {
|
||||
Ok(t) => t,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() {
|
||||
stack.push(path);
|
||||
continue;
|
||||
}
|
||||
if entry.file_name().to_string_lossy() != INDEX_FILE {
|
||||
continue;
|
||||
}
|
||||
let rel_dir = match path.parent().and_then(|p| p.strip_prefix(meta_root).ok()) {
|
||||
Some(p) => p.to_path_buf(),
|
||||
None => continue,
|
||||
};
|
||||
let dir_prefix = if rel_dir.as_os_str().is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
rel_dir
|
||||
.components()
|
||||
.map(|c| c.as_os_str().to_string_lossy().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join("/")
|
||||
};
|
||||
|
||||
let content = match std::fs::read_to_string(&path) {
|
||||
Ok(c) => c,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let index_data: Map<String, Value> = match serde_json::from_str(&content) {
|
||||
Ok(Value::Object(m)) => m,
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
for (key_name, entry_val) in index_data {
|
||||
let full_key = if dir_prefix.is_empty() {
|
||||
key_name.clone()
|
||||
} else {
|
||||
format!("{}/{}", dir_prefix, key_name)
|
||||
};
|
||||
out.insert(
|
||||
full_key,
|
||||
IndexEntryInfo {
|
||||
entry: entry_val,
|
||||
index_file: path.clone(),
|
||||
key_name,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
fn stored_etag(entry: &Value) -> Option<String> {
|
||||
entry
|
||||
.get("metadata")
|
||||
.and_then(|m| m.get("__etag__"))
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string())
|
||||
}
|
||||
|
||||
fn check_corrupted(
|
||||
state: &mut ScanState,
|
||||
bucket: &str,
|
||||
bucket_path: &Path,
|
||||
entries: &HashMap<String, IndexEntryInfo>,
|
||||
batch_size: usize,
|
||||
) {
|
||||
let mut keys: Vec<&String> = entries.keys().collect();
|
||||
keys.sort();
|
||||
|
||||
for full_key in keys {
|
||||
if state.batch_exhausted(batch_size) {
|
||||
return;
|
||||
}
|
||||
let info = &entries[full_key];
|
||||
let object_path = bucket_path.join(full_key);
|
||||
if !object_path.exists() {
|
||||
continue;
|
||||
}
|
||||
state.objects_scanned += 1;
|
||||
|
||||
let Some(stored) = stored_etag(&info.entry) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
match myfsio_crypto::hashing::md5_file(&object_path) {
|
||||
Ok(actual) => {
|
||||
if actual != stored {
|
||||
state.corrupted_objects += 1;
|
||||
state.push_issue(
|
||||
"corrupted_object",
|
||||
bucket,
|
||||
full_key,
|
||||
format!("stored_etag={} actual_etag={}", stored, actual),
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => state
|
||||
.errors
|
||||
.push(format!("hash {}/{}: {}", bucket, full_key, e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn check_phantom(
|
||||
state: &mut ScanState,
|
||||
bucket: &str,
|
||||
bucket_path: &Path,
|
||||
entries: &HashMap<String, IndexEntryInfo>,
|
||||
batch_size: usize,
|
||||
) {
|
||||
let mut keys: Vec<&String> = entries.keys().collect();
|
||||
keys.sort();
|
||||
|
||||
for full_key in keys {
|
||||
if state.batch_exhausted(batch_size) {
|
||||
return;
|
||||
}
|
||||
state.objects_scanned += 1;
|
||||
let object_path = bucket_path.join(full_key);
|
||||
if !object_path.exists() {
|
||||
state.phantom_metadata += 1;
|
||||
state.push_issue(
|
||||
"phantom_metadata",
|
||||
bucket,
|
||||
full_key,
|
||||
"metadata entry without file on disk".to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn check_orphaned(
|
||||
state: &mut ScanState,
|
||||
bucket: &str,
|
||||
bucket_path: &Path,
|
||||
entries: &HashMap<String, IndexEntryInfo>,
|
||||
batch_size: usize,
|
||||
) {
|
||||
let indexed: HashSet<&String> = entries.keys().collect();
|
||||
let mut stack: Vec<(PathBuf, String)> = vec![(bucket_path.to_path_buf(), String::new())];
|
||||
|
||||
while let Some((dir, prefix)) = stack.pop() {
|
||||
if state.batch_exhausted(batch_size) {
|
||||
return;
|
||||
}
|
||||
let rd = match std::fs::read_dir(&dir) {
|
||||
Ok(r) => r,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry in rd.flatten() {
|
||||
if state.batch_exhausted(batch_size) {
|
||||
return;
|
||||
}
|
||||
let name = entry.file_name().to_string_lossy().to_string();
|
||||
let ft = match entry.file_type() {
|
||||
Ok(t) => t,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() {
|
||||
if prefix.is_empty() && INTERNAL_FOLDERS.contains(&name.as_str()) {
|
||||
continue;
|
||||
}
|
||||
let new_prefix = if prefix.is_empty() {
|
||||
name
|
||||
} else {
|
||||
format!("{}/{}", prefix, name)
|
||||
};
|
||||
stack.push((entry.path(), new_prefix));
|
||||
} else if ft.is_file() {
|
||||
let full_key = if prefix.is_empty() {
|
||||
name
|
||||
} else {
|
||||
format!("{}/{}", prefix, name)
|
||||
};
|
||||
state.objects_scanned += 1;
|
||||
if !indexed.contains(&full_key) {
|
||||
state.orphaned_objects += 1;
|
||||
state.push_issue(
|
||||
"orphaned_object",
|
||||
bucket,
|
||||
&full_key,
|
||||
"file exists without metadata entry".to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn check_stale_versions(
|
||||
state: &mut ScanState,
|
||||
storage_root: &Path,
|
||||
bucket: &str,
|
||||
batch_size: usize,
|
||||
) {
|
||||
let versions_root = storage_root
|
||||
.join(SYSTEM_ROOT)
|
||||
.join(SYSTEM_BUCKETS_DIR)
|
||||
.join(bucket)
|
||||
.join(BUCKET_VERSIONS_DIR);
|
||||
if !versions_root.exists() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut stack: Vec<PathBuf> = vec![versions_root.clone()];
|
||||
while let Some(dir) = stack.pop() {
|
||||
if state.batch_exhausted(batch_size) {
|
||||
return;
|
||||
}
|
||||
let rd = match std::fs::read_dir(&dir) {
|
||||
Ok(r) => r,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
let mut bin_stems: HashMap<String, PathBuf> = HashMap::new();
|
||||
let mut json_stems: HashMap<String, PathBuf> = HashMap::new();
|
||||
let mut subdirs: Vec<PathBuf> = Vec::new();
|
||||
|
||||
for entry in rd.flatten() {
|
||||
let ft = match entry.file_type() {
|
||||
Ok(t) => t,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let path = entry.path();
|
||||
if ft.is_dir() {
|
||||
subdirs.push(path);
|
||||
continue;
|
||||
}
|
||||
let name = entry.file_name().to_string_lossy().to_string();
|
||||
if let Some(stem) = name.strip_suffix(".bin") {
|
||||
bin_stems.insert(stem.to_string(), path);
|
||||
} else if let Some(stem) = name.strip_suffix(".json") {
|
||||
json_stems.insert(stem.to_string(), path);
|
||||
}
|
||||
}
|
||||
|
||||
for (stem, path) in &bin_stems {
|
||||
if state.batch_exhausted(batch_size) {
|
||||
return;
|
||||
}
|
||||
state.objects_scanned += 1;
|
||||
if !json_stems.contains_key(stem) {
|
||||
state.stale_versions += 1;
|
||||
let key = path
|
||||
.strip_prefix(&versions_root)
|
||||
.map(|p| p.to_string_lossy().replace('\\', "/"))
|
||||
.unwrap_or_else(|_| path.display().to_string());
|
||||
state.push_issue(
|
||||
"stale_version",
|
||||
bucket,
|
||||
&key,
|
||||
"version data without manifest".to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
for (stem, path) in &json_stems {
|
||||
if state.batch_exhausted(batch_size) {
|
||||
return;
|
||||
}
|
||||
state.objects_scanned += 1;
|
||||
if !bin_stems.contains_key(stem) {
|
||||
state.stale_versions += 1;
|
||||
let key = path
|
||||
.strip_prefix(&versions_root)
|
||||
.map(|p| p.to_string_lossy().replace('\\', "/"))
|
||||
.unwrap_or_else(|_| path.display().to_string());
|
||||
state.push_issue(
|
||||
"stale_version",
|
||||
bucket,
|
||||
&key,
|
||||
"version manifest without data".to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
stack.extend(subdirs);
|
||||
}
|
||||
}
|
||||
|
||||
fn check_etag_cache(
|
||||
state: &mut ScanState,
|
||||
storage_root: &Path,
|
||||
bucket: &str,
|
||||
entries: &HashMap<String, IndexEntryInfo>,
|
||||
batch_size: usize,
|
||||
) {
|
||||
let etag_index_path = storage_root
|
||||
.join(SYSTEM_ROOT)
|
||||
.join(SYSTEM_BUCKETS_DIR)
|
||||
.join(bucket)
|
||||
.join("etag_index.json");
|
||||
if !etag_index_path.exists() {
|
||||
return;
|
||||
}
|
||||
|
||||
let cache: HashMap<String, Value> = match std::fs::read_to_string(&etag_index_path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str(&s).ok())
|
||||
{
|
||||
Some(Value::Object(m)) => m.into_iter().collect(),
|
||||
_ => return,
|
||||
};
|
||||
|
||||
for (full_key, cached_val) in cache {
|
||||
if state.batch_exhausted(batch_size) {
|
||||
return;
|
||||
}
|
||||
state.objects_scanned += 1;
|
||||
let Some(cached_etag) = cached_val.as_str() else {
|
||||
continue;
|
||||
};
|
||||
let Some(info) = entries.get(&full_key) else {
|
||||
continue;
|
||||
};
|
||||
let Some(stored) = stored_etag(&info.entry) else {
|
||||
continue;
|
||||
};
|
||||
if cached_etag != stored {
|
||||
state.etag_cache_inconsistencies += 1;
|
||||
state.push_issue(
|
||||
"etag_cache_inconsistency",
|
||||
bucket,
|
||||
&full_key,
|
||||
format!("cached_etag={} index_etag={}", cached_etag, stored),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs;
|
||||
|
||||
fn md5_hex(bytes: &[u8]) -> String {
|
||||
myfsio_crypto::hashing::md5_bytes(bytes)
|
||||
}
|
||||
|
||||
fn write_index(meta_dir: &Path, entries: &[(&str, &str)]) {
|
||||
fs::create_dir_all(meta_dir).unwrap();
|
||||
let mut map = Map::new();
|
||||
for (name, etag) in entries {
|
||||
map.insert(
|
||||
name.to_string(),
|
||||
json!({ "metadata": { "__etag__": etag } }),
|
||||
);
|
||||
}
|
||||
fs::write(
|
||||
meta_dir.join(INDEX_FILE),
|
||||
serde_json::to_string(&Value::Object(map)).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn scan_detects_each_issue_type() {
|
||||
let tmp = tempfile::tempdir().unwrap();
|
||||
let root = tmp.path();
|
||||
let bucket = "testbucket";
|
||||
let bucket_path = root.join(bucket);
|
||||
let meta_root = root
|
||||
.join(SYSTEM_ROOT)
|
||||
.join(SYSTEM_BUCKETS_DIR)
|
||||
.join(bucket)
|
||||
.join(BUCKET_META_DIR);
|
||||
fs::create_dir_all(&bucket_path).unwrap();
|
||||
|
||||
let clean_bytes = b"clean file contents";
|
||||
let clean_etag = md5_hex(clean_bytes);
|
||||
fs::write(bucket_path.join("clean.txt"), clean_bytes).unwrap();
|
||||
|
||||
let corrupted_bytes = b"actual content";
|
||||
fs::write(bucket_path.join("corrupted.txt"), corrupted_bytes).unwrap();
|
||||
|
||||
fs::write(bucket_path.join("orphan.txt"), b"no metadata").unwrap();
|
||||
|
||||
write_index(
|
||||
&meta_root,
|
||||
&[
|
||||
("clean.txt", &clean_etag),
|
||||
("corrupted.txt", "00000000000000000000000000000000"),
|
||||
("phantom.txt", "deadbeefdeadbeefdeadbeefdeadbeef"),
|
||||
],
|
||||
);
|
||||
|
||||
let versions_root = root
|
||||
.join(SYSTEM_ROOT)
|
||||
.join(SYSTEM_BUCKETS_DIR)
|
||||
.join(bucket)
|
||||
.join(BUCKET_VERSIONS_DIR)
|
||||
.join("someobject");
|
||||
fs::create_dir_all(&versions_root).unwrap();
|
||||
fs::write(versions_root.join("v1.bin"), b"orphan bin").unwrap();
|
||||
fs::write(versions_root.join("v2.json"), b"{}").unwrap();
|
||||
|
||||
let etag_index = root
|
||||
.join(SYSTEM_ROOT)
|
||||
.join(SYSTEM_BUCKETS_DIR)
|
||||
.join(bucket)
|
||||
.join("etag_index.json");
|
||||
fs::write(
|
||||
&etag_index,
|
||||
serde_json::to_string(&json!({ "clean.txt": "stale-cached-etag" })).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let state = scan_all_buckets(root, 10_000);
|
||||
|
||||
assert_eq!(state.corrupted_objects, 1, "corrupted");
|
||||
assert_eq!(state.phantom_metadata, 1, "phantom");
|
||||
assert_eq!(state.orphaned_objects, 1, "orphaned");
|
||||
assert_eq!(state.stale_versions, 2, "stale versions");
|
||||
assert_eq!(state.etag_cache_inconsistencies, 1, "etag cache");
|
||||
assert_eq!(state.buckets_scanned, 1);
|
||||
assert!(
|
||||
state.errors.is_empty(),
|
||||
"unexpected errors: {:?}",
|
||||
state.errors
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn skips_system_root_as_bucket() {
|
||||
let tmp = tempfile::tempdir().unwrap();
|
||||
fs::create_dir_all(tmp.path().join(SYSTEM_ROOT).join("config")).unwrap();
|
||||
let state = scan_all_buckets(tmp.path(), 100);
|
||||
assert_eq!(state.buckets_scanned, 0);
|
||||
}
|
||||
}
|
||||
637
crates/myfsio-server/src/services/lifecycle.rs
Normal file
637
crates/myfsio-server/src/services/lifecycle.rs
Normal file
@@ -0,0 +1,637 @@
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
||||
use myfsio_storage::traits::StorageEngine;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{json, Value};
|
||||
use std::collections::VecDeque;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub struct LifecycleConfig {
|
||||
pub interval_seconds: u64,
|
||||
pub max_history_per_bucket: usize,
|
||||
}
|
||||
|
||||
impl Default for LifecycleConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval_seconds: 3600,
|
||||
max_history_per_bucket: 50,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LifecycleExecutionRecord {
|
||||
pub timestamp: f64,
|
||||
pub bucket_name: String,
|
||||
pub objects_deleted: u64,
|
||||
pub versions_deleted: u64,
|
||||
pub uploads_aborted: u64,
|
||||
#[serde(default)]
|
||||
pub errors: Vec<String>,
|
||||
pub execution_time_seconds: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct BucketLifecycleResult {
|
||||
bucket_name: String,
|
||||
objects_deleted: u64,
|
||||
versions_deleted: u64,
|
||||
uploads_aborted: u64,
|
||||
errors: Vec<String>,
|
||||
execution_time_seconds: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct ParsedLifecycleRule {
|
||||
status: String,
|
||||
prefix: String,
|
||||
expiration_days: Option<u64>,
|
||||
expiration_date: Option<DateTime<Utc>>,
|
||||
noncurrent_days: Option<u64>,
|
||||
abort_incomplete_multipart_days: Option<u64>,
|
||||
}
|
||||
|
||||
pub struct LifecycleService {
|
||||
storage: Arc<FsStorageBackend>,
|
||||
storage_root: PathBuf,
|
||||
config: LifecycleConfig,
|
||||
running: Arc<RwLock<bool>>,
|
||||
}
|
||||
|
||||
impl LifecycleService {
|
||||
pub fn new(
|
||||
storage: Arc<FsStorageBackend>,
|
||||
storage_root: impl Into<PathBuf>,
|
||||
config: LifecycleConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
storage,
|
||||
storage_root: storage_root.into(),
|
||||
config,
|
||||
running: Arc::new(RwLock::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_cycle(&self) -> Result<Value, String> {
|
||||
{
|
||||
let mut running = self.running.write().await;
|
||||
if *running {
|
||||
return Err("Lifecycle already running".to_string());
|
||||
}
|
||||
*running = true;
|
||||
}
|
||||
|
||||
let result = self.evaluate_rules().await;
|
||||
*self.running.write().await = false;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn evaluate_rules(&self) -> Value {
|
||||
let buckets = match self.storage.list_buckets().await {
|
||||
Ok(buckets) => buckets,
|
||||
Err(err) => return json!({ "error": err.to_string() }),
|
||||
};
|
||||
|
||||
let mut bucket_results = Vec::new();
|
||||
let mut total_objects_deleted = 0u64;
|
||||
let mut total_versions_deleted = 0u64;
|
||||
let mut total_uploads_aborted = 0u64;
|
||||
let mut errors = Vec::new();
|
||||
|
||||
for bucket in &buckets {
|
||||
let started_at = std::time::Instant::now();
|
||||
let mut result = BucketLifecycleResult {
|
||||
bucket_name: bucket.name.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let config = match self.storage.get_bucket_config(&bucket.name).await {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
result.errors.push(err.to_string());
|
||||
result.execution_time_seconds = started_at.elapsed().as_secs_f64();
|
||||
self.append_history(&result);
|
||||
errors.extend(result.errors.clone());
|
||||
bucket_results.push(result);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let Some(lifecycle) = config.lifecycle.as_ref() else {
|
||||
continue;
|
||||
};
|
||||
let rules = parse_lifecycle_rules(lifecycle);
|
||||
if rules.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
for rule in &rules {
|
||||
if rule.status != "Enabled" {
|
||||
continue;
|
||||
}
|
||||
if let Some(err) = self
|
||||
.apply_expiration_rule(&bucket.name, rule, &mut result)
|
||||
.await
|
||||
{
|
||||
result.errors.push(err);
|
||||
}
|
||||
if let Some(err) = self
|
||||
.apply_noncurrent_expiration_rule(&bucket.name, rule, &mut result)
|
||||
.await
|
||||
{
|
||||
result.errors.push(err);
|
||||
}
|
||||
if let Some(err) = self
|
||||
.apply_abort_incomplete_multipart_rule(&bucket.name, rule, &mut result)
|
||||
.await
|
||||
{
|
||||
result.errors.push(err);
|
||||
}
|
||||
}
|
||||
|
||||
result.execution_time_seconds = started_at.elapsed().as_secs_f64();
|
||||
if result.objects_deleted > 0
|
||||
|| result.versions_deleted > 0
|
||||
|| result.uploads_aborted > 0
|
||||
|| !result.errors.is_empty()
|
||||
{
|
||||
total_objects_deleted += result.objects_deleted;
|
||||
total_versions_deleted += result.versions_deleted;
|
||||
total_uploads_aborted += result.uploads_aborted;
|
||||
errors.extend(result.errors.clone());
|
||||
self.append_history(&result);
|
||||
bucket_results.push(result);
|
||||
}
|
||||
}
|
||||
|
||||
json!({
|
||||
"objects_deleted": total_objects_deleted,
|
||||
"versions_deleted": total_versions_deleted,
|
||||
"multipart_aborted": total_uploads_aborted,
|
||||
"buckets_evaluated": buckets.len(),
|
||||
"results": bucket_results.iter().map(result_to_json).collect::<Vec<_>>(),
|
||||
"errors": errors,
|
||||
})
|
||||
}
|
||||
|
||||
async fn apply_expiration_rule(
|
||||
&self,
|
||||
bucket: &str,
|
||||
rule: &ParsedLifecycleRule,
|
||||
result: &mut BucketLifecycleResult,
|
||||
) -> Option<String> {
|
||||
let cutoff = if let Some(days) = rule.expiration_days {
|
||||
Some(Utc::now() - Duration::days(days as i64))
|
||||
} else {
|
||||
rule.expiration_date
|
||||
};
|
||||
let Some(cutoff) = cutoff else {
|
||||
return None;
|
||||
};
|
||||
|
||||
let params = myfsio_common::types::ListParams {
|
||||
max_keys: 10_000,
|
||||
prefix: if rule.prefix.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(rule.prefix.clone())
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
match self.storage.list_objects(bucket, ¶ms).await {
|
||||
Ok(objects) => {
|
||||
for object in &objects.objects {
|
||||
if object.last_modified < cutoff {
|
||||
if let Err(err) = self.storage.delete_object(bucket, &object.key).await {
|
||||
result
|
||||
.errors
|
||||
.push(format!("{}:{}: {}", bucket, object.key, err));
|
||||
} else {
|
||||
result.objects_deleted += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
Err(err) => Some(format!("Failed to list objects for {}: {}", bucket, err)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn apply_noncurrent_expiration_rule(
|
||||
&self,
|
||||
bucket: &str,
|
||||
rule: &ParsedLifecycleRule,
|
||||
result: &mut BucketLifecycleResult,
|
||||
) -> Option<String> {
|
||||
let Some(days) = rule.noncurrent_days else {
|
||||
return None;
|
||||
};
|
||||
let cutoff = Utc::now() - Duration::days(days as i64);
|
||||
let versions_root = version_root_for_bucket(&self.storage_root, bucket);
|
||||
if !versions_root.exists() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut stack = VecDeque::from([versions_root]);
|
||||
while let Some(current) = stack.pop_front() {
|
||||
let entries = match std::fs::read_dir(¤t) {
|
||||
Ok(entries) => entries,
|
||||
Err(err) => return Some(err.to_string()),
|
||||
};
|
||||
for entry in entries.flatten() {
|
||||
let file_type = match entry.file_type() {
|
||||
Ok(file_type) => file_type,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if file_type.is_dir() {
|
||||
stack.push_back(entry.path());
|
||||
continue;
|
||||
}
|
||||
if entry.path().extension().and_then(|ext| ext.to_str()) != Some("json") {
|
||||
continue;
|
||||
}
|
||||
let contents = match std::fs::read_to_string(entry.path()) {
|
||||
Ok(contents) => contents,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let Ok(manifest) = serde_json::from_str::<Value>(&contents) else {
|
||||
continue;
|
||||
};
|
||||
let key = manifest
|
||||
.get("key")
|
||||
.and_then(|value| value.as_str())
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
if !rule.prefix.is_empty() && !key.starts_with(&rule.prefix) {
|
||||
continue;
|
||||
}
|
||||
let archived_at = manifest
|
||||
.get("archived_at")
|
||||
.and_then(|value| value.as_str())
|
||||
.and_then(|value| DateTime::parse_from_rfc3339(value).ok())
|
||||
.map(|value| value.with_timezone(&Utc));
|
||||
if archived_at.is_none() || archived_at.unwrap() >= cutoff {
|
||||
continue;
|
||||
}
|
||||
let version_id = manifest
|
||||
.get("version_id")
|
||||
.and_then(|value| value.as_str())
|
||||
.unwrap_or_default();
|
||||
let data_path = entry.path().with_file_name(format!("{}.bin", version_id));
|
||||
let _ = std::fs::remove_file(&data_path);
|
||||
let _ = std::fs::remove_file(entry.path());
|
||||
result.versions_deleted += 1;
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
async fn apply_abort_incomplete_multipart_rule(
|
||||
&self,
|
||||
bucket: &str,
|
||||
rule: &ParsedLifecycleRule,
|
||||
result: &mut BucketLifecycleResult,
|
||||
) -> Option<String> {
|
||||
let Some(days) = rule.abort_incomplete_multipart_days else {
|
||||
return None;
|
||||
};
|
||||
let cutoff = Utc::now() - Duration::days(days as i64);
|
||||
match self.storage.list_multipart_uploads(bucket).await {
|
||||
Ok(uploads) => {
|
||||
for upload in &uploads {
|
||||
if upload.initiated < cutoff {
|
||||
if let Err(err) = self
|
||||
.storage
|
||||
.abort_multipart(bucket, &upload.upload_id)
|
||||
.await
|
||||
{
|
||||
result
|
||||
.errors
|
||||
.push(format!("abort {}: {}", upload.upload_id, err));
|
||||
} else {
|
||||
result.uploads_aborted += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
Err(err) => Some(format!(
|
||||
"Failed to list multipart uploads for {}: {}",
|
||||
bucket, err
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn append_history(&self, result: &BucketLifecycleResult) {
|
||||
let path = lifecycle_history_path(&self.storage_root, &result.bucket_name);
|
||||
let mut history = load_history(&path);
|
||||
history.insert(
|
||||
0,
|
||||
LifecycleExecutionRecord {
|
||||
timestamp: Utc::now().timestamp_millis() as f64 / 1000.0,
|
||||
bucket_name: result.bucket_name.clone(),
|
||||
objects_deleted: result.objects_deleted,
|
||||
versions_deleted: result.versions_deleted,
|
||||
uploads_aborted: result.uploads_aborted,
|
||||
errors: result.errors.clone(),
|
||||
execution_time_seconds: result.execution_time_seconds,
|
||||
},
|
||||
);
|
||||
history.truncate(self.config.max_history_per_bucket);
|
||||
let payload = json!({
|
||||
"executions": history,
|
||||
});
|
||||
if let Some(parent) = path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
let _ = std::fs::write(
|
||||
&path,
|
||||
serde_json::to_string_pretty(&payload).unwrap_or_else(|_| "{}".to_string()),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
||||
let interval = std::time::Duration::from_secs(self.config.interval_seconds);
|
||||
tokio::spawn(async move {
|
||||
let mut timer = tokio::time::interval(interval);
|
||||
timer.tick().await;
|
||||
loop {
|
||||
timer.tick().await;
|
||||
tracing::info!("Lifecycle evaluation starting");
|
||||
match self.run_cycle().await {
|
||||
Ok(result) => tracing::info!("Lifecycle cycle complete: {:?}", result),
|
||||
Err(err) => tracing::warn!("Lifecycle cycle failed: {}", err),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_history(storage_root: &Path, bucket_name: &str, limit: usize, offset: usize) -> Value {
|
||||
let path = lifecycle_history_path(storage_root, bucket_name);
|
||||
let mut history = load_history(&path);
|
||||
let total = history.len();
|
||||
let executions = history
|
||||
.drain(offset.min(total)..)
|
||||
.take(limit)
|
||||
.collect::<Vec<_>>();
|
||||
json!({
|
||||
"executions": executions,
|
||||
"total": total,
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
"enabled": true,
|
||||
})
|
||||
}
|
||||
|
||||
fn load_history(path: &Path) -> Vec<LifecycleExecutionRecord> {
|
||||
if !path.exists() {
|
||||
return Vec::new();
|
||||
}
|
||||
std::fs::read_to_string(path)
|
||||
.ok()
|
||||
.and_then(|contents| serde_json::from_str::<Value>(&contents).ok())
|
||||
.and_then(|value| value.get("executions").cloned())
|
||||
.and_then(|value| serde_json::from_value::<Vec<LifecycleExecutionRecord>>(value).ok())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn lifecycle_history_path(storage_root: &Path, bucket_name: &str) -> PathBuf {
|
||||
storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("buckets")
|
||||
.join(bucket_name)
|
||||
.join("lifecycle_history.json")
|
||||
}
|
||||
|
||||
fn version_root_for_bucket(storage_root: &Path, bucket_name: &str) -> PathBuf {
|
||||
storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("buckets")
|
||||
.join(bucket_name)
|
||||
.join("versions")
|
||||
}
|
||||
|
||||
fn parse_lifecycle_rules(value: &Value) -> Vec<ParsedLifecycleRule> {
|
||||
match value {
|
||||
Value::String(raw) => parse_lifecycle_rules_from_string(raw),
|
||||
Value::Array(items) => items.iter().filter_map(parse_lifecycle_rule).collect(),
|
||||
Value::Object(map) => map
|
||||
.get("Rules")
|
||||
.and_then(|rules| rules.as_array())
|
||||
.map(|rules| rules.iter().filter_map(parse_lifecycle_rule).collect())
|
||||
.unwrap_or_default(),
|
||||
_ => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_lifecycle_rules_from_string(raw: &str) -> Vec<ParsedLifecycleRule> {
|
||||
if let Ok(json) = serde_json::from_str::<Value>(raw) {
|
||||
return parse_lifecycle_rules(&json);
|
||||
}
|
||||
let Ok(doc) = roxmltree::Document::parse(raw) else {
|
||||
return Vec::new();
|
||||
};
|
||||
doc.descendants()
|
||||
.filter(|node| node.is_element() && node.tag_name().name() == "Rule")
|
||||
.map(|rule| ParsedLifecycleRule {
|
||||
status: child_text(&rule, "Status").unwrap_or_else(|| "Enabled".to_string()),
|
||||
prefix: child_text(&rule, "Prefix")
|
||||
.or_else(|| {
|
||||
rule.descendants()
|
||||
.find(|node| {
|
||||
node.is_element()
|
||||
&& node.tag_name().name() == "Filter"
|
||||
&& node.children().any(|child| {
|
||||
child.is_element() && child.tag_name().name() == "Prefix"
|
||||
})
|
||||
})
|
||||
.and_then(|filter| child_text(&filter, "Prefix"))
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
expiration_days: rule
|
||||
.descendants()
|
||||
.find(|node| node.is_element() && node.tag_name().name() == "Expiration")
|
||||
.and_then(|expiration| child_text(&expiration, "Days"))
|
||||
.and_then(|value| value.parse::<u64>().ok()),
|
||||
expiration_date: rule
|
||||
.descendants()
|
||||
.find(|node| node.is_element() && node.tag_name().name() == "Expiration")
|
||||
.and_then(|expiration| child_text(&expiration, "Date"))
|
||||
.as_deref()
|
||||
.and_then(parse_datetime),
|
||||
noncurrent_days: rule
|
||||
.descendants()
|
||||
.find(|node| {
|
||||
node.is_element() && node.tag_name().name() == "NoncurrentVersionExpiration"
|
||||
})
|
||||
.and_then(|node| child_text(&node, "NoncurrentDays"))
|
||||
.and_then(|value| value.parse::<u64>().ok()),
|
||||
abort_incomplete_multipart_days: rule
|
||||
.descendants()
|
||||
.find(|node| {
|
||||
node.is_element() && node.tag_name().name() == "AbortIncompleteMultipartUpload"
|
||||
})
|
||||
.and_then(|node| child_text(&node, "DaysAfterInitiation"))
|
||||
.and_then(|value| value.parse::<u64>().ok()),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn parse_lifecycle_rule(value: &Value) -> Option<ParsedLifecycleRule> {
|
||||
let map = value.as_object()?;
|
||||
Some(ParsedLifecycleRule {
|
||||
status: map
|
||||
.get("Status")
|
||||
.and_then(|value| value.as_str())
|
||||
.unwrap_or("Enabled")
|
||||
.to_string(),
|
||||
prefix: map
|
||||
.get("Prefix")
|
||||
.and_then(|value| value.as_str())
|
||||
.or_else(|| {
|
||||
map.get("Filter")
|
||||
.and_then(|value| value.get("Prefix"))
|
||||
.and_then(|value| value.as_str())
|
||||
})
|
||||
.unwrap_or_default()
|
||||
.to_string(),
|
||||
expiration_days: map
|
||||
.get("Expiration")
|
||||
.and_then(|value| value.get("Days"))
|
||||
.and_then(|value| value.as_u64()),
|
||||
expiration_date: map
|
||||
.get("Expiration")
|
||||
.and_then(|value| value.get("Date"))
|
||||
.and_then(|value| value.as_str())
|
||||
.and_then(parse_datetime),
|
||||
noncurrent_days: map
|
||||
.get("NoncurrentVersionExpiration")
|
||||
.and_then(|value| value.get("NoncurrentDays"))
|
||||
.and_then(|value| value.as_u64()),
|
||||
abort_incomplete_multipart_days: map
|
||||
.get("AbortIncompleteMultipartUpload")
|
||||
.and_then(|value| value.get("DaysAfterInitiation"))
|
||||
.and_then(|value| value.as_u64()),
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_datetime(value: &str) -> Option<DateTime<Utc>> {
|
||||
DateTime::parse_from_rfc3339(value)
|
||||
.ok()
|
||||
.map(|value| value.with_timezone(&Utc))
|
||||
}
|
||||
|
||||
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
|
||||
node.children()
|
||||
.find(|child| child.is_element() && child.tag_name().name() == name)
|
||||
.and_then(|child| child.text())
|
||||
.map(|text| text.trim().to_string())
|
||||
.filter(|text| !text.is_empty())
|
||||
}
|
||||
|
||||
fn result_to_json(result: &BucketLifecycleResult) -> Value {
|
||||
json!({
|
||||
"bucket_name": result.bucket_name,
|
||||
"objects_deleted": result.objects_deleted,
|
||||
"versions_deleted": result.versions_deleted,
|
||||
"uploads_aborted": result.uploads_aborted,
|
||||
"errors": result.errors,
|
||||
"execution_time_seconds": result.execution_time_seconds,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::Duration;
|
||||
|
||||
#[test]
|
||||
fn parses_rules_from_xml() {
|
||||
let xml = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<Status>Enabled</Status>
|
||||
<Filter><Prefix>logs/</Prefix></Filter>
|
||||
<Expiration><Days>10</Days></Expiration>
|
||||
<NoncurrentVersionExpiration><NoncurrentDays>30</NoncurrentDays></NoncurrentVersionExpiration>
|
||||
<AbortIncompleteMultipartUpload><DaysAfterInitiation>7</DaysAfterInitiation></AbortIncompleteMultipartUpload>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>"#;
|
||||
let rules = parse_lifecycle_rules(&Value::String(xml.to_string()));
|
||||
assert_eq!(rules.len(), 1);
|
||||
assert_eq!(rules[0].prefix, "logs/");
|
||||
assert_eq!(rules[0].expiration_days, Some(10));
|
||||
assert_eq!(rules[0].noncurrent_days, Some(30));
|
||||
assert_eq!(rules[0].abort_incomplete_multipart_days, Some(7));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn run_cycle_writes_history_and_deletes_noncurrent_versions() {
|
||||
let tmp = tempfile::tempdir().unwrap();
|
||||
let storage = Arc::new(FsStorageBackend::new(tmp.path().to_path_buf()));
|
||||
storage.create_bucket("docs").await.unwrap();
|
||||
storage.set_versioning("docs", true).await.unwrap();
|
||||
|
||||
storage
|
||||
.put_object(
|
||||
"docs",
|
||||
"logs/file.txt",
|
||||
Box::pin(std::io::Cursor::new(b"old".to_vec())),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.put_object(
|
||||
"docs",
|
||||
"logs/file.txt",
|
||||
Box::pin(std::io::Cursor::new(b"new".to_vec())),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let versions_root = version_root_for_bucket(tmp.path(), "docs")
|
||||
.join("logs")
|
||||
.join("file.txt");
|
||||
let manifest = std::fs::read_dir(&versions_root)
|
||||
.unwrap()
|
||||
.flatten()
|
||||
.find(|entry| entry.path().extension().and_then(|ext| ext.to_str()) == Some("json"))
|
||||
.unwrap()
|
||||
.path();
|
||||
let old_manifest = json!({
|
||||
"version_id": "ver-1",
|
||||
"key": "logs/file.txt",
|
||||
"size": 3,
|
||||
"archived_at": (Utc::now() - Duration::days(45)).to_rfc3339(),
|
||||
"etag": "etag",
|
||||
});
|
||||
std::fs::write(&manifest, serde_json::to_string(&old_manifest).unwrap()).unwrap();
|
||||
std::fs::write(manifest.with_file_name("ver-1.bin"), b"old").unwrap();
|
||||
|
||||
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<Status>Enabled</Status>
|
||||
<Filter><Prefix>logs/</Prefix></Filter>
|
||||
<NoncurrentVersionExpiration><NoncurrentDays>30</NoncurrentDays></NoncurrentVersionExpiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>"#;
|
||||
let mut config = storage.get_bucket_config("docs").await.unwrap();
|
||||
config.lifecycle = Some(Value::String(lifecycle_xml.to_string()));
|
||||
storage.set_bucket_config("docs", &config).await.unwrap();
|
||||
|
||||
let service =
|
||||
LifecycleService::new(storage.clone(), tmp.path(), LifecycleConfig::default());
|
||||
let result = service.run_cycle().await.unwrap();
|
||||
assert_eq!(result["versions_deleted"], 1);
|
||||
|
||||
let history = read_history(tmp.path(), "docs", 50, 0);
|
||||
assert_eq!(history["total"], 1);
|
||||
assert_eq!(history["executions"][0]["versions_deleted"], 1);
|
||||
}
|
||||
}
|
||||
368
crates/myfsio-server/src/services/metrics.rs
Normal file
368
crates/myfsio-server/src/services/metrics.rs
Normal file
@@ -0,0 +1,368 @@
|
||||
use chrono::{DateTime, Utc};
|
||||
use parking_lot::Mutex;
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{json, Value};
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
const MAX_LATENCY_SAMPLES: usize = 5000;
|
||||
|
||||
pub struct MetricsConfig {
|
||||
pub interval_minutes: u64,
|
||||
pub retention_hours: u64,
|
||||
}
|
||||
|
||||
impl Default for MetricsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval_minutes: 5,
|
||||
retention_hours: 24,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct OperationStats {
|
||||
count: u64,
|
||||
success_count: u64,
|
||||
error_count: u64,
|
||||
latency_sum_ms: f64,
|
||||
latency_min_ms: f64,
|
||||
latency_max_ms: f64,
|
||||
bytes_in: u64,
|
||||
bytes_out: u64,
|
||||
latency_samples: Vec<f64>,
|
||||
}
|
||||
|
||||
impl Default for OperationStats {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
count: 0,
|
||||
success_count: 0,
|
||||
error_count: 0,
|
||||
latency_sum_ms: 0.0,
|
||||
latency_min_ms: f64::INFINITY,
|
||||
latency_max_ms: 0.0,
|
||||
bytes_in: 0,
|
||||
bytes_out: 0,
|
||||
latency_samples: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl OperationStats {
|
||||
fn record(&mut self, latency_ms: f64, success: bool, bytes_in: u64, bytes_out: u64) {
|
||||
self.count += 1;
|
||||
if success {
|
||||
self.success_count += 1;
|
||||
} else {
|
||||
self.error_count += 1;
|
||||
}
|
||||
self.latency_sum_ms += latency_ms;
|
||||
if latency_ms < self.latency_min_ms {
|
||||
self.latency_min_ms = latency_ms;
|
||||
}
|
||||
if latency_ms > self.latency_max_ms {
|
||||
self.latency_max_ms = latency_ms;
|
||||
}
|
||||
self.bytes_in += bytes_in;
|
||||
self.bytes_out += bytes_out;
|
||||
|
||||
if self.latency_samples.len() < MAX_LATENCY_SAMPLES {
|
||||
self.latency_samples.push(latency_ms);
|
||||
} else {
|
||||
let mut rng = rand::thread_rng();
|
||||
let j = rng.gen_range(0..self.count as usize);
|
||||
if j < MAX_LATENCY_SAMPLES {
|
||||
self.latency_samples[j] = latency_ms;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_percentile(sorted: &[f64], p: f64) -> f64 {
|
||||
if sorted.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
let k = (sorted.len() - 1) as f64 * (p / 100.0);
|
||||
let f = k.floor() as usize;
|
||||
let c = (f + 1).min(sorted.len() - 1);
|
||||
let d = k - f as f64;
|
||||
sorted[f] + d * (sorted[c] - sorted[f])
|
||||
}
|
||||
|
||||
fn to_json(&self) -> Value {
|
||||
let avg = if self.count > 0 {
|
||||
self.latency_sum_ms / self.count as f64
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
let min = if self.latency_min_ms.is_infinite() {
|
||||
0.0
|
||||
} else {
|
||||
self.latency_min_ms
|
||||
};
|
||||
let mut sorted = self.latency_samples.clone();
|
||||
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
|
||||
json!({
|
||||
"count": self.count,
|
||||
"success_count": self.success_count,
|
||||
"error_count": self.error_count,
|
||||
"latency_avg_ms": round2(avg),
|
||||
"latency_min_ms": round2(min),
|
||||
"latency_max_ms": round2(self.latency_max_ms),
|
||||
"latency_p50_ms": round2(Self::compute_percentile(&sorted, 50.0)),
|
||||
"latency_p95_ms": round2(Self::compute_percentile(&sorted, 95.0)),
|
||||
"latency_p99_ms": round2(Self::compute_percentile(&sorted, 99.0)),
|
||||
"bytes_in": self.bytes_in,
|
||||
"bytes_out": self.bytes_out,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn round2(v: f64) -> f64 {
|
||||
(v * 100.0).round() / 100.0
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MetricsSnapshot {
|
||||
pub timestamp: DateTime<Utc>,
|
||||
pub window_seconds: u64,
|
||||
pub by_method: HashMap<String, Value>,
|
||||
pub by_endpoint: HashMap<String, Value>,
|
||||
pub by_status_class: HashMap<String, u64>,
|
||||
pub error_codes: HashMap<String, u64>,
|
||||
pub totals: Value,
|
||||
}
|
||||
|
||||
struct Inner {
|
||||
by_method: HashMap<String, OperationStats>,
|
||||
by_endpoint: HashMap<String, OperationStats>,
|
||||
by_status_class: HashMap<String, u64>,
|
||||
error_codes: HashMap<String, u64>,
|
||||
totals: OperationStats,
|
||||
window_start: f64,
|
||||
snapshots: Vec<MetricsSnapshot>,
|
||||
}
|
||||
|
||||
pub struct MetricsService {
|
||||
config: MetricsConfig,
|
||||
inner: Arc<Mutex<Inner>>,
|
||||
snapshots_path: PathBuf,
|
||||
}
|
||||
|
||||
impl MetricsService {
|
||||
pub fn new(storage_root: &Path, config: MetricsConfig) -> Self {
|
||||
let snapshots_path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("operation_metrics.json");
|
||||
|
||||
let mut snapshots: Vec<MetricsSnapshot> = if snapshots_path.exists() {
|
||||
std::fs::read_to_string(&snapshots_path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<Value>(&s).ok())
|
||||
.and_then(|v| {
|
||||
v.get("snapshots").and_then(|s| {
|
||||
serde_json::from_value::<Vec<MetricsSnapshot>>(s.clone()).ok()
|
||||
})
|
||||
})
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let cutoff = now_secs() - (config.retention_hours * 3600) as f64;
|
||||
snapshots.retain(|s| s.timestamp.timestamp() as f64 > cutoff);
|
||||
|
||||
Self {
|
||||
config,
|
||||
inner: Arc::new(Mutex::new(Inner {
|
||||
by_method: HashMap::new(),
|
||||
by_endpoint: HashMap::new(),
|
||||
by_status_class: HashMap::new(),
|
||||
error_codes: HashMap::new(),
|
||||
totals: OperationStats::default(),
|
||||
window_start: now_secs(),
|
||||
snapshots,
|
||||
})),
|
||||
snapshots_path,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record_request(
|
||||
&self,
|
||||
method: &str,
|
||||
endpoint_type: &str,
|
||||
status_code: u16,
|
||||
latency_ms: f64,
|
||||
bytes_in: u64,
|
||||
bytes_out: u64,
|
||||
error_code: Option<&str>,
|
||||
) {
|
||||
let success = (200..400).contains(&status_code);
|
||||
let status_class = format!("{}xx", status_code / 100);
|
||||
|
||||
let mut inner = self.inner.lock();
|
||||
inner
|
||||
.by_method
|
||||
.entry(method.to_string())
|
||||
.or_default()
|
||||
.record(latency_ms, success, bytes_in, bytes_out);
|
||||
inner
|
||||
.by_endpoint
|
||||
.entry(endpoint_type.to_string())
|
||||
.or_default()
|
||||
.record(latency_ms, success, bytes_in, bytes_out);
|
||||
*inner.by_status_class.entry(status_class).or_insert(0) += 1;
|
||||
if let Some(code) = error_code {
|
||||
*inner.error_codes.entry(code.to_string()).or_insert(0) += 1;
|
||||
}
|
||||
inner
|
||||
.totals
|
||||
.record(latency_ms, success, bytes_in, bytes_out);
|
||||
}
|
||||
|
||||
pub fn get_current_stats(&self) -> Value {
|
||||
let inner = self.inner.lock();
|
||||
let window_seconds = (now_secs() - inner.window_start).max(0.0) as u64;
|
||||
let by_method: HashMap<String, Value> = inner
|
||||
.by_method
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), v.to_json()))
|
||||
.collect();
|
||||
let by_endpoint: HashMap<String, Value> = inner
|
||||
.by_endpoint
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), v.to_json()))
|
||||
.collect();
|
||||
json!({
|
||||
"timestamp": Utc::now().to_rfc3339(),
|
||||
"window_seconds": window_seconds,
|
||||
"by_method": by_method,
|
||||
"by_endpoint": by_endpoint,
|
||||
"by_status_class": inner.by_status_class,
|
||||
"error_codes": inner.error_codes,
|
||||
"totals": inner.totals.to_json(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_history(&self, hours: Option<u64>) -> Vec<MetricsSnapshot> {
|
||||
let inner = self.inner.lock();
|
||||
let mut snapshots = inner.snapshots.clone();
|
||||
if let Some(h) = hours {
|
||||
let cutoff = now_secs() - (h * 3600) as f64;
|
||||
snapshots.retain(|s| s.timestamp.timestamp() as f64 > cutoff);
|
||||
}
|
||||
snapshots
|
||||
}
|
||||
|
||||
pub fn snapshot(&self) -> Value {
|
||||
let current = self.get_current_stats();
|
||||
let history = self.get_history(None);
|
||||
json!({
|
||||
"enabled": true,
|
||||
"current": current,
|
||||
"snapshots": history,
|
||||
})
|
||||
}
|
||||
|
||||
fn take_snapshot(&self) {
|
||||
let snapshot = {
|
||||
let mut inner = self.inner.lock();
|
||||
let window_seconds = (now_secs() - inner.window_start).max(0.0) as u64;
|
||||
|
||||
let by_method: HashMap<String, Value> = inner
|
||||
.by_method
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), v.to_json()))
|
||||
.collect();
|
||||
let by_endpoint: HashMap<String, Value> = inner
|
||||
.by_endpoint
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), v.to_json()))
|
||||
.collect();
|
||||
|
||||
let snap = MetricsSnapshot {
|
||||
timestamp: Utc::now(),
|
||||
window_seconds,
|
||||
by_method,
|
||||
by_endpoint,
|
||||
by_status_class: inner.by_status_class.clone(),
|
||||
error_codes: inner.error_codes.clone(),
|
||||
totals: inner.totals.to_json(),
|
||||
};
|
||||
|
||||
inner.snapshots.push(snap.clone());
|
||||
let cutoff = now_secs() - (self.config.retention_hours * 3600) as f64;
|
||||
inner
|
||||
.snapshots
|
||||
.retain(|s| s.timestamp.timestamp() as f64 > cutoff);
|
||||
|
||||
inner.by_method.clear();
|
||||
inner.by_endpoint.clear();
|
||||
inner.by_status_class.clear();
|
||||
inner.error_codes.clear();
|
||||
inner.totals = OperationStats::default();
|
||||
inner.window_start = now_secs();
|
||||
|
||||
snap
|
||||
};
|
||||
let _ = snapshot;
|
||||
self.save_snapshots();
|
||||
}
|
||||
|
||||
fn save_snapshots(&self) {
|
||||
let snapshots = { self.inner.lock().snapshots.clone() };
|
||||
if let Some(parent) = self.snapshots_path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
let data = json!({ "snapshots": snapshots });
|
||||
let _ = std::fs::write(
|
||||
&self.snapshots_path,
|
||||
serde_json::to_string_pretty(&data).unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
||||
let interval = std::time::Duration::from_secs(self.config.interval_minutes * 60);
|
||||
tokio::spawn(async move {
|
||||
let mut timer = tokio::time::interval(interval);
|
||||
timer.tick().await;
|
||||
loop {
|
||||
timer.tick().await;
|
||||
self.take_snapshot();
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn classify_endpoint(path: &str) -> &'static str {
|
||||
if path.is_empty() || path == "/" {
|
||||
return "service";
|
||||
}
|
||||
let trimmed = path.trim_end_matches('/');
|
||||
if trimmed.starts_with("/ui") {
|
||||
return "ui";
|
||||
}
|
||||
if trimmed.starts_with("/kms") {
|
||||
return "kms";
|
||||
}
|
||||
if trimmed.starts_with("/myfsio") {
|
||||
return "service";
|
||||
}
|
||||
let parts: Vec<&str> = trimmed.trim_start_matches('/').split('/').collect();
|
||||
match parts.len() {
|
||||
0 => "service",
|
||||
1 => "bucket",
|
||||
_ => "object",
|
||||
}
|
||||
}
|
||||
|
||||
fn now_secs() -> f64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map(|d| d.as_secs_f64())
|
||||
.unwrap_or(0.0)
|
||||
}
|
||||
14
crates/myfsio-server/src/services/mod.rs
Normal file
14
crates/myfsio-server/src/services/mod.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
pub mod access_logging;
|
||||
pub mod acl;
|
||||
pub mod gc;
|
||||
pub mod integrity;
|
||||
pub mod lifecycle;
|
||||
pub mod metrics;
|
||||
pub mod notifications;
|
||||
pub mod object_lock;
|
||||
pub mod replication;
|
||||
pub mod s3_client;
|
||||
pub mod site_registry;
|
||||
pub mod site_sync;
|
||||
pub mod system_metrics;
|
||||
pub mod website_domains;
|
||||
296
crates/myfsio-server/src/services/notifications.rs
Normal file
296
crates/myfsio-server/src/services/notifications.rs
Normal file
@@ -0,0 +1,296 @@
|
||||
use crate::state::AppState;
|
||||
use chrono::{DateTime, Utc};
|
||||
use myfsio_storage::traits::StorageEngine;
|
||||
use serde::Serialize;
|
||||
use serde_json::json;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct WebhookDestination {
|
||||
pub url: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct NotificationConfiguration {
|
||||
pub id: String,
|
||||
pub events: Vec<String>,
|
||||
pub destination: WebhookDestination,
|
||||
pub prefix_filter: String,
|
||||
pub suffix_filter: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct NotificationEvent {
|
||||
#[serde(rename = "eventVersion")]
|
||||
event_version: &'static str,
|
||||
#[serde(rename = "eventSource")]
|
||||
event_source: &'static str,
|
||||
#[serde(rename = "awsRegion")]
|
||||
aws_region: &'static str,
|
||||
#[serde(rename = "eventTime")]
|
||||
event_time: String,
|
||||
#[serde(rename = "eventName")]
|
||||
event_name: String,
|
||||
#[serde(rename = "userIdentity")]
|
||||
user_identity: serde_json::Value,
|
||||
#[serde(rename = "requestParameters")]
|
||||
request_parameters: serde_json::Value,
|
||||
#[serde(rename = "responseElements")]
|
||||
response_elements: serde_json::Value,
|
||||
s3: serde_json::Value,
|
||||
}
|
||||
|
||||
impl NotificationConfiguration {
|
||||
pub fn matches_event(&self, event_name: &str, object_key: &str) -> bool {
|
||||
let event_match = self.events.iter().any(|pattern| {
|
||||
if let Some(prefix) = pattern.strip_suffix('*') {
|
||||
event_name.starts_with(prefix)
|
||||
} else {
|
||||
pattern == event_name
|
||||
}
|
||||
});
|
||||
if !event_match {
|
||||
return false;
|
||||
}
|
||||
if !self.prefix_filter.is_empty() && !object_key.starts_with(&self.prefix_filter) {
|
||||
return false;
|
||||
}
|
||||
if !self.suffix_filter.is_empty() && !object_key.ends_with(&self.suffix_filter) {
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_notification_configurations(
|
||||
xml: &str,
|
||||
) -> Result<Vec<NotificationConfiguration>, String> {
|
||||
let doc = roxmltree::Document::parse(xml).map_err(|err| err.to_string())?;
|
||||
let mut configs = Vec::new();
|
||||
|
||||
for webhook in doc
|
||||
.descendants()
|
||||
.filter(|node| node.is_element() && node.tag_name().name() == "WebhookConfiguration")
|
||||
{
|
||||
let id = child_text(&webhook, "Id").unwrap_or_else(|| uuid::Uuid::new_v4().to_string());
|
||||
let events = webhook
|
||||
.children()
|
||||
.filter(|node| node.is_element() && node.tag_name().name() == "Event")
|
||||
.filter_map(|node| node.text())
|
||||
.map(|text| text.trim().to_string())
|
||||
.filter(|text| !text.is_empty())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let destination = webhook
|
||||
.children()
|
||||
.find(|node| node.is_element() && node.tag_name().name() == "Destination");
|
||||
let url = destination
|
||||
.as_ref()
|
||||
.and_then(|node| child_text(node, "Url"))
|
||||
.unwrap_or_default();
|
||||
if url.trim().is_empty() {
|
||||
return Err("Destination URL is required".to_string());
|
||||
}
|
||||
|
||||
let mut prefix_filter = String::new();
|
||||
let mut suffix_filter = String::new();
|
||||
if let Some(filter) = webhook
|
||||
.children()
|
||||
.find(|node| node.is_element() && node.tag_name().name() == "Filter")
|
||||
{
|
||||
if let Some(key) = filter
|
||||
.children()
|
||||
.find(|node| node.is_element() && node.tag_name().name() == "S3Key")
|
||||
{
|
||||
for rule in key
|
||||
.children()
|
||||
.filter(|node| node.is_element() && node.tag_name().name() == "FilterRule")
|
||||
{
|
||||
let name = child_text(&rule, "Name").unwrap_or_default();
|
||||
let value = child_text(&rule, "Value").unwrap_or_default();
|
||||
if name == "prefix" {
|
||||
prefix_filter = value;
|
||||
} else if name == "suffix" {
|
||||
suffix_filter = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
configs.push(NotificationConfiguration {
|
||||
id,
|
||||
events,
|
||||
destination: WebhookDestination { url },
|
||||
prefix_filter,
|
||||
suffix_filter,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(configs)
|
||||
}
|
||||
|
||||
pub fn emit_object_created(
|
||||
state: &AppState,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
size: u64,
|
||||
etag: Option<&str>,
|
||||
request_id: &str,
|
||||
source_ip: &str,
|
||||
user_identity: &str,
|
||||
operation: &str,
|
||||
) {
|
||||
emit_notifications(
|
||||
state.clone(),
|
||||
bucket.to_string(),
|
||||
key.to_string(),
|
||||
format!("s3:ObjectCreated:{}", operation),
|
||||
size,
|
||||
etag.unwrap_or_default().to_string(),
|
||||
request_id.to_string(),
|
||||
source_ip.to_string(),
|
||||
user_identity.to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn emit_object_removed(
|
||||
state: &AppState,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
request_id: &str,
|
||||
source_ip: &str,
|
||||
user_identity: &str,
|
||||
operation: &str,
|
||||
) {
|
||||
emit_notifications(
|
||||
state.clone(),
|
||||
bucket.to_string(),
|
||||
key.to_string(),
|
||||
format!("s3:ObjectRemoved:{}", operation),
|
||||
0,
|
||||
String::new(),
|
||||
request_id.to_string(),
|
||||
source_ip.to_string(),
|
||||
user_identity.to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
fn emit_notifications(
|
||||
state: AppState,
|
||||
bucket: String,
|
||||
key: String,
|
||||
event_name: String,
|
||||
size: u64,
|
||||
etag: String,
|
||||
request_id: String,
|
||||
source_ip: String,
|
||||
user_identity: String,
|
||||
) {
|
||||
tokio::spawn(async move {
|
||||
let config = match state.storage.get_bucket_config(&bucket).await {
|
||||
Ok(config) => config,
|
||||
Err(_) => return,
|
||||
};
|
||||
let raw = match config.notification {
|
||||
Some(serde_json::Value::String(raw)) => raw,
|
||||
_ => return,
|
||||
};
|
||||
let configs = match parse_notification_configurations(&raw) {
|
||||
Ok(configs) => configs,
|
||||
Err(err) => {
|
||||
tracing::warn!("Invalid notification config for bucket {}: {}", bucket, err);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let record = NotificationEvent {
|
||||
event_version: "2.1",
|
||||
event_source: "myfsio:s3",
|
||||
aws_region: "local",
|
||||
event_time: format_event_time(Utc::now()),
|
||||
event_name: event_name.clone(),
|
||||
user_identity: json!({ "principalId": if user_identity.is_empty() { "ANONYMOUS" } else { &user_identity } }),
|
||||
request_parameters: json!({ "sourceIPAddress": if source_ip.is_empty() { "127.0.0.1" } else { &source_ip } }),
|
||||
response_elements: json!({
|
||||
"x-amz-request-id": request_id,
|
||||
"x-amz-id-2": request_id,
|
||||
}),
|
||||
s3: json!({
|
||||
"s3SchemaVersion": "1.0",
|
||||
"configurationId": "notification",
|
||||
"bucket": {
|
||||
"name": bucket,
|
||||
"ownerIdentity": { "principalId": "local" },
|
||||
"arn": format!("arn:aws:s3:::{}", bucket),
|
||||
},
|
||||
"object": {
|
||||
"key": key,
|
||||
"size": size,
|
||||
"eTag": etag,
|
||||
"versionId": "null",
|
||||
"sequencer": format!("{:016X}", Utc::now().timestamp_millis()),
|
||||
}
|
||||
}),
|
||||
};
|
||||
let payload = json!({ "Records": [record] });
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
for config in configs {
|
||||
if !config.matches_event(&event_name, &key) {
|
||||
continue;
|
||||
}
|
||||
let result = client
|
||||
.post(&config.destination.url)
|
||||
.header("content-type", "application/json")
|
||||
.json(&payload)
|
||||
.send()
|
||||
.await;
|
||||
if let Err(err) = result {
|
||||
tracing::warn!(
|
||||
"Failed to deliver notification for {} to {}: {}",
|
||||
event_name,
|
||||
config.destination.url,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn format_event_time(value: DateTime<Utc>) -> String {
|
||||
value.format("%Y-%m-%dT%H:%M:%S.000Z").to_string()
|
||||
}
|
||||
|
||||
fn child_text(node: &roxmltree::Node<'_, '_>, name: &str) -> Option<String> {
|
||||
node.children()
|
||||
.find(|child| child.is_element() && child.tag_name().name() == name)
|
||||
.and_then(|child| child.text())
|
||||
.map(|text| text.trim().to_string())
|
||||
.filter(|text| !text.is_empty())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_webhook_configuration() {
|
||||
let xml = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<WebhookConfiguration>
|
||||
<Id>upload</Id>
|
||||
<Event>s3:ObjectCreated:*</Event>
|
||||
<Destination><Url>https://example.com/hook</Url></Destination>
|
||||
<Filter>
|
||||
<S3Key>
|
||||
<FilterRule><Name>prefix</Name><Value>logs/</Value></FilterRule>
|
||||
<FilterRule><Name>suffix</Name><Value>.txt</Value></FilterRule>
|
||||
</S3Key>
|
||||
</Filter>
|
||||
</WebhookConfiguration>
|
||||
</NotificationConfiguration>"#;
|
||||
let configs = parse_notification_configurations(xml).unwrap();
|
||||
assert_eq!(configs.len(), 1);
|
||||
assert!(configs[0].matches_event("s3:ObjectCreated:Put", "logs/test.txt"));
|
||||
assert!(!configs[0].matches_event("s3:ObjectRemoved:Delete", "logs/test.txt"));
|
||||
}
|
||||
}
|
||||
128
crates/myfsio-server/src/services/object_lock.rs
Normal file
128
crates/myfsio-server/src/services/object_lock.rs
Normal file
@@ -0,0 +1,128 @@
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub const LEGAL_HOLD_METADATA_KEY: &str = "__legal_hold__";
|
||||
pub const RETENTION_METADATA_KEY: &str = "__object_retention__";
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum RetentionMode {
|
||||
GOVERNANCE,
|
||||
COMPLIANCE,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct ObjectLockRetention {
|
||||
pub mode: RetentionMode,
|
||||
pub retain_until_date: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl ObjectLockRetention {
|
||||
pub fn is_expired(&self) -> bool {
|
||||
Utc::now() > self.retain_until_date
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_object_retention(metadata: &HashMap<String, String>) -> Option<ObjectLockRetention> {
|
||||
metadata
|
||||
.get(RETENTION_METADATA_KEY)
|
||||
.and_then(|raw| serde_json::from_str::<ObjectLockRetention>(raw).ok())
|
||||
}
|
||||
|
||||
pub fn set_object_retention(
|
||||
metadata: &mut HashMap<String, String>,
|
||||
retention: &ObjectLockRetention,
|
||||
) -> Result<(), String> {
|
||||
let encoded = serde_json::to_string(retention).map_err(|err| err.to_string())?;
|
||||
metadata.insert(RETENTION_METADATA_KEY.to_string(), encoded);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_legal_hold(metadata: &HashMap<String, String>) -> bool {
|
||||
metadata
|
||||
.get(LEGAL_HOLD_METADATA_KEY)
|
||||
.map(|value| value.eq_ignore_ascii_case("ON") || value.eq_ignore_ascii_case("true"))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
pub fn set_legal_hold(metadata: &mut HashMap<String, String>, enabled: bool) {
|
||||
metadata.insert(
|
||||
LEGAL_HOLD_METADATA_KEY.to_string(),
|
||||
if enabled { "ON" } else { "OFF" }.to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn ensure_retention_mutable(
|
||||
metadata: &HashMap<String, String>,
|
||||
bypass_governance: bool,
|
||||
) -> Result<(), String> {
|
||||
let Some(existing) = get_object_retention(metadata) else {
|
||||
return Ok(());
|
||||
};
|
||||
if existing.is_expired() {
|
||||
return Ok(());
|
||||
}
|
||||
match existing.mode {
|
||||
RetentionMode::COMPLIANCE => Err(format!(
|
||||
"Cannot modify retention on object with COMPLIANCE mode until retention expires"
|
||||
)),
|
||||
RetentionMode::GOVERNANCE if !bypass_governance => Err(
|
||||
"Cannot modify GOVERNANCE retention without bypass-governance permission".to_string(),
|
||||
),
|
||||
RetentionMode::GOVERNANCE => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn can_delete_object(
|
||||
metadata: &HashMap<String, String>,
|
||||
bypass_governance: bool,
|
||||
) -> Result<(), String> {
|
||||
if get_legal_hold(metadata) {
|
||||
return Err("Object is under legal hold".to_string());
|
||||
}
|
||||
if let Some(retention) = get_object_retention(metadata) {
|
||||
if !retention.is_expired() {
|
||||
return match retention.mode {
|
||||
RetentionMode::COMPLIANCE => Err(format!(
|
||||
"Object is locked in COMPLIANCE mode until {}",
|
||||
retention.retain_until_date.to_rfc3339()
|
||||
)),
|
||||
RetentionMode::GOVERNANCE if !bypass_governance => Err(format!(
|
||||
"Object is locked in GOVERNANCE mode until {}",
|
||||
retention.retain_until_date.to_rfc3339()
|
||||
)),
|
||||
RetentionMode::GOVERNANCE => Ok(()),
|
||||
};
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::Duration;
|
||||
|
||||
#[test]
|
||||
fn legal_hold_blocks_delete() {
|
||||
let mut metadata = HashMap::new();
|
||||
set_legal_hold(&mut metadata, true);
|
||||
let err = can_delete_object(&metadata, false).unwrap_err();
|
||||
assert!(err.contains("legal hold"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn governance_requires_bypass() {
|
||||
let mut metadata = HashMap::new();
|
||||
set_object_retention(
|
||||
&mut metadata,
|
||||
&ObjectLockRetention {
|
||||
mode: RetentionMode::GOVERNANCE,
|
||||
retain_until_date: Utc::now() + Duration::hours(1),
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
assert!(can_delete_object(&metadata, false).is_err());
|
||||
assert!(can_delete_object(&metadata, true).is_ok());
|
||||
}
|
||||
}
|
||||
713
crates/myfsio-server/src/services/replication.rs
Normal file
713
crates/myfsio-server/src/services/replication.rs
Normal file
@@ -0,0 +1,713 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use parking_lot::Mutex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use myfsio_common::types::ListParams;
|
||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
||||
use myfsio_storage::traits::StorageEngine;
|
||||
|
||||
use crate::services::s3_client::{build_client, check_endpoint_health, ClientOptions};
|
||||
use crate::stores::connections::{ConnectionStore, RemoteConnection};
|
||||
|
||||
pub const MODE_NEW_ONLY: &str = "new_only";
|
||||
pub const MODE_ALL: &str = "all";
|
||||
pub const MODE_BIDIRECTIONAL: &str = "bidirectional";
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ReplicationStats {
|
||||
#[serde(default)]
|
||||
pub objects_synced: u64,
|
||||
#[serde(default)]
|
||||
pub objects_pending: u64,
|
||||
#[serde(default)]
|
||||
pub objects_orphaned: u64,
|
||||
#[serde(default)]
|
||||
pub bytes_synced: u64,
|
||||
#[serde(default)]
|
||||
pub last_sync_at: Option<f64>,
|
||||
#[serde(default)]
|
||||
pub last_sync_key: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ReplicationRule {
|
||||
pub bucket_name: String,
|
||||
pub target_connection_id: String,
|
||||
pub target_bucket: String,
|
||||
#[serde(default = "default_true")]
|
||||
pub enabled: bool,
|
||||
#[serde(default = "default_mode")]
|
||||
pub mode: String,
|
||||
#[serde(default)]
|
||||
pub created_at: Option<f64>,
|
||||
#[serde(default)]
|
||||
pub stats: ReplicationStats,
|
||||
#[serde(default = "default_true")]
|
||||
pub sync_deletions: bool,
|
||||
#[serde(default)]
|
||||
pub last_pull_at: Option<f64>,
|
||||
#[serde(default)]
|
||||
pub filter_prefix: Option<String>,
|
||||
}
|
||||
|
||||
fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
fn default_mode() -> String {
|
||||
MODE_NEW_ONLY.to_string()
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ReplicationFailure {
|
||||
pub object_key: String,
|
||||
pub error_message: String,
|
||||
pub timestamp: f64,
|
||||
pub failure_count: u32,
|
||||
pub bucket_name: String,
|
||||
pub action: String,
|
||||
#[serde(default)]
|
||||
pub last_error_code: Option<String>,
|
||||
}
|
||||
|
||||
pub struct ReplicationFailureStore {
|
||||
storage_root: PathBuf,
|
||||
max_failures_per_bucket: usize,
|
||||
cache: Mutex<HashMap<String, Vec<ReplicationFailure>>>,
|
||||
}
|
||||
|
||||
impl ReplicationFailureStore {
|
||||
pub fn new(storage_root: PathBuf, max_failures_per_bucket: usize) -> Self {
|
||||
Self {
|
||||
storage_root,
|
||||
max_failures_per_bucket,
|
||||
cache: Mutex::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
fn path(&self, bucket: &str) -> PathBuf {
|
||||
self.storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("buckets")
|
||||
.join(bucket)
|
||||
.join("replication_failures.json")
|
||||
}
|
||||
|
||||
fn load_from_disk(&self, bucket: &str) -> Vec<ReplicationFailure> {
|
||||
let path = self.path(bucket);
|
||||
if !path.exists() {
|
||||
return Vec::new();
|
||||
}
|
||||
match std::fs::read_to_string(&path) {
|
||||
Ok(text) => {
|
||||
let parsed: serde_json::Value = match serde_json::from_str(&text) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return Vec::new(),
|
||||
};
|
||||
parsed
|
||||
.get("failures")
|
||||
.and_then(|v| serde_json::from_value(v.clone()).ok())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
Err(_) => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn save_to_disk(&self, bucket: &str, failures: &[ReplicationFailure]) {
|
||||
let path = self.path(bucket);
|
||||
if let Some(parent) = path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
let trimmed = &failures[..failures.len().min(self.max_failures_per_bucket)];
|
||||
let data = serde_json::json!({ "failures": trimmed });
|
||||
let _ = std::fs::write(
|
||||
&path,
|
||||
serde_json::to_string_pretty(&data).unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn load(&self, bucket: &str) -> Vec<ReplicationFailure> {
|
||||
let mut cache = self.cache.lock();
|
||||
if let Some(existing) = cache.get(bucket) {
|
||||
return existing.clone();
|
||||
}
|
||||
let loaded = self.load_from_disk(bucket);
|
||||
cache.insert(bucket.to_string(), loaded.clone());
|
||||
loaded
|
||||
}
|
||||
|
||||
pub fn save(&self, bucket: &str, failures: Vec<ReplicationFailure>) {
|
||||
let trimmed: Vec<ReplicationFailure> = failures
|
||||
.into_iter()
|
||||
.take(self.max_failures_per_bucket)
|
||||
.collect();
|
||||
self.save_to_disk(bucket, &trimmed);
|
||||
self.cache.lock().insert(bucket.to_string(), trimmed);
|
||||
}
|
||||
|
||||
pub fn add(&self, bucket: &str, failure: ReplicationFailure) {
|
||||
let mut failures = self.load(bucket);
|
||||
if let Some(existing) = failures
|
||||
.iter_mut()
|
||||
.find(|f| f.object_key == failure.object_key)
|
||||
{
|
||||
existing.failure_count += 1;
|
||||
existing.timestamp = failure.timestamp;
|
||||
existing.error_message = failure.error_message.clone();
|
||||
existing.last_error_code = failure.last_error_code.clone();
|
||||
} else {
|
||||
failures.insert(0, failure);
|
||||
}
|
||||
self.save(bucket, failures);
|
||||
}
|
||||
|
||||
pub fn remove(&self, bucket: &str, object_key: &str) -> bool {
|
||||
let failures = self.load(bucket);
|
||||
let before = failures.len();
|
||||
let after: Vec<_> = failures
|
||||
.into_iter()
|
||||
.filter(|f| f.object_key != object_key)
|
||||
.collect();
|
||||
if after.len() != before {
|
||||
self.save(bucket, after);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clear(&self, bucket: &str) {
|
||||
self.cache.lock().remove(bucket);
|
||||
let path = self.path(bucket);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
pub fn get(&self, bucket: &str, object_key: &str) -> Option<ReplicationFailure> {
|
||||
self.load(bucket)
|
||||
.into_iter()
|
||||
.find(|f| f.object_key == object_key)
|
||||
}
|
||||
|
||||
pub fn count(&self, bucket: &str) -> usize {
|
||||
self.load(bucket).len()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ReplicationManager {
|
||||
storage: Arc<FsStorageBackend>,
|
||||
connections: Arc<ConnectionStore>,
|
||||
rules_path: PathBuf,
|
||||
rules: Mutex<HashMap<String, ReplicationRule>>,
|
||||
client_options: ClientOptions,
|
||||
streaming_threshold_bytes: u64,
|
||||
pub failures: Arc<ReplicationFailureStore>,
|
||||
semaphore: Arc<Semaphore>,
|
||||
}
|
||||
|
||||
impl ReplicationManager {
|
||||
pub fn new(
|
||||
storage: Arc<FsStorageBackend>,
|
||||
connections: Arc<ConnectionStore>,
|
||||
storage_root: &Path,
|
||||
connect_timeout: Duration,
|
||||
read_timeout: Duration,
|
||||
max_retries: u32,
|
||||
streaming_threshold_bytes: u64,
|
||||
max_failures_per_bucket: usize,
|
||||
) -> Self {
|
||||
let rules_path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("replication_rules.json");
|
||||
let rules = load_rules(&rules_path);
|
||||
let failures = Arc::new(ReplicationFailureStore::new(
|
||||
storage_root.to_path_buf(),
|
||||
max_failures_per_bucket,
|
||||
));
|
||||
let client_options = ClientOptions {
|
||||
connect_timeout,
|
||||
read_timeout,
|
||||
max_attempts: max_retries,
|
||||
};
|
||||
Self {
|
||||
storage,
|
||||
connections,
|
||||
rules_path,
|
||||
rules: Mutex::new(rules),
|
||||
client_options,
|
||||
streaming_threshold_bytes,
|
||||
failures,
|
||||
semaphore: Arc::new(Semaphore::new(4)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reload_rules(&self) {
|
||||
*self.rules.lock() = load_rules(&self.rules_path);
|
||||
}
|
||||
|
||||
pub fn list_rules(&self) -> Vec<ReplicationRule> {
|
||||
self.rules.lock().values().cloned().collect()
|
||||
}
|
||||
|
||||
pub fn get_rule(&self, bucket: &str) -> Option<ReplicationRule> {
|
||||
self.rules.lock().get(bucket).cloned()
|
||||
}
|
||||
|
||||
pub fn set_rule(&self, rule: ReplicationRule) {
|
||||
{
|
||||
let mut guard = self.rules.lock();
|
||||
guard.insert(rule.bucket_name.clone(), rule);
|
||||
}
|
||||
self.save_rules();
|
||||
}
|
||||
|
||||
pub fn delete_rule(&self, bucket: &str) {
|
||||
{
|
||||
let mut guard = self.rules.lock();
|
||||
guard.remove(bucket);
|
||||
}
|
||||
self.save_rules();
|
||||
}
|
||||
|
||||
pub fn save_rules(&self) {
|
||||
let snapshot: HashMap<String, ReplicationRule> = self.rules.lock().clone();
|
||||
if let Some(parent) = self.rules_path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
if let Ok(text) = serde_json::to_string_pretty(&snapshot) {
|
||||
let _ = std::fs::write(&self.rules_path, text);
|
||||
}
|
||||
}
|
||||
|
||||
fn update_last_sync(&self, bucket: &str, key: &str) {
|
||||
{
|
||||
let mut guard = self.rules.lock();
|
||||
if let Some(rule) = guard.get_mut(bucket) {
|
||||
rule.stats.last_sync_at = Some(now_secs());
|
||||
rule.stats.last_sync_key = Some(key.to_string());
|
||||
}
|
||||
}
|
||||
self.save_rules();
|
||||
}
|
||||
|
||||
pub async fn trigger(self: Arc<Self>, bucket: String, key: String, action: String) {
|
||||
let rule = match self.get_rule(&bucket) {
|
||||
Some(r) if r.enabled => r,
|
||||
_ => return,
|
||||
};
|
||||
let connection = match self.connections.get(&rule.target_connection_id) {
|
||||
Some(c) => c,
|
||||
None => {
|
||||
tracing::warn!(
|
||||
"Replication skipped for {}/{}: connection {} not found",
|
||||
bucket,
|
||||
key,
|
||||
rule.target_connection_id
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let permit = match self.semaphore.clone().try_acquire_owned() {
|
||||
Ok(p) => p,
|
||||
Err(_) => {
|
||||
let sem = self.semaphore.clone();
|
||||
match sem.acquire_owned().await {
|
||||
Ok(p) => p,
|
||||
Err(_) => return,
|
||||
}
|
||||
}
|
||||
};
|
||||
let manager = self.clone();
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit;
|
||||
manager
|
||||
.replicate_task(&bucket, &key, &rule, &connection, &action)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn replicate_existing_objects(self: Arc<Self>, bucket: String) -> usize {
|
||||
let rule = match self.get_rule(&bucket) {
|
||||
Some(r) if r.enabled => r,
|
||||
_ => return 0,
|
||||
};
|
||||
let connection = match self.connections.get(&rule.target_connection_id) {
|
||||
Some(c) => c,
|
||||
None => {
|
||||
tracing::warn!(
|
||||
"Cannot replicate existing objects for {}: connection {} not found",
|
||||
bucket,
|
||||
rule.target_connection_id
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
if !self.check_endpoint(&connection).await {
|
||||
tracing::warn!(
|
||||
"Cannot replicate existing objects for {}: endpoint {} is unreachable",
|
||||
bucket,
|
||||
connection.endpoint_url
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
let mut continuation_token: Option<String> = None;
|
||||
let mut submitted = 0usize;
|
||||
|
||||
loop {
|
||||
let page = match self
|
||||
.storage
|
||||
.list_objects(
|
||||
&bucket,
|
||||
&ListParams {
|
||||
max_keys: 1000,
|
||||
continuation_token: continuation_token.clone(),
|
||||
prefix: rule.filter_prefix.clone(),
|
||||
start_after: None,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(page) => page,
|
||||
Err(err) => {
|
||||
tracing::error!(
|
||||
"Failed to list existing objects for replication in {}: {}",
|
||||
bucket,
|
||||
err
|
||||
);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
let next_token = page.next_continuation_token.clone();
|
||||
let is_truncated = page.is_truncated;
|
||||
|
||||
for object in page.objects {
|
||||
submitted += 1;
|
||||
self.clone()
|
||||
.trigger(bucket.clone(), object.key, "write".to_string())
|
||||
.await;
|
||||
}
|
||||
|
||||
if !is_truncated {
|
||||
break;
|
||||
}
|
||||
|
||||
continuation_token = next_token;
|
||||
if continuation_token.is_none() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
submitted
|
||||
}
|
||||
|
||||
pub fn schedule_existing_objects_sync(self: Arc<Self>, bucket: String) {
|
||||
tokio::spawn(async move {
|
||||
let submitted = self
|
||||
.clone()
|
||||
.replicate_existing_objects(bucket.clone())
|
||||
.await;
|
||||
if submitted > 0 {
|
||||
tracing::info!(
|
||||
"Scheduled {} existing object(s) for replication in {}",
|
||||
submitted,
|
||||
bucket
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async fn replicate_task(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object_key: &str,
|
||||
rule: &ReplicationRule,
|
||||
conn: &RemoteConnection,
|
||||
action: &str,
|
||||
) {
|
||||
if object_key.contains("..") || object_key.starts_with('/') || object_key.starts_with('\\')
|
||||
{
|
||||
tracing::error!("Invalid object key (path traversal): {}", object_key);
|
||||
return;
|
||||
}
|
||||
|
||||
let client = build_client(conn, &self.client_options);
|
||||
|
||||
if action == "delete" {
|
||||
match client
|
||||
.delete_object()
|
||||
.bucket(&rule.target_bucket)
|
||||
.key(object_key)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
tracing::info!(
|
||||
"Replicated DELETE {}/{} to {} ({})",
|
||||
bucket,
|
||||
object_key,
|
||||
conn.name,
|
||||
rule.target_bucket
|
||||
);
|
||||
self.update_last_sync(bucket, object_key);
|
||||
self.failures.remove(bucket, object_key);
|
||||
}
|
||||
Err(err) => {
|
||||
let msg = format!("{:?}", err);
|
||||
tracing::error!(
|
||||
"Replication DELETE failed {}/{}: {}",
|
||||
bucket,
|
||||
object_key,
|
||||
msg
|
||||
);
|
||||
self.failures.add(
|
||||
bucket,
|
||||
ReplicationFailure {
|
||||
object_key: object_key.to_string(),
|
||||
error_message: msg,
|
||||
timestamp: now_secs(),
|
||||
failure_count: 1,
|
||||
bucket_name: bucket.to_string(),
|
||||
action: "delete".to_string(),
|
||||
last_error_code: None,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let src_path = match self.storage.get_object_path(bucket, object_key).await {
|
||||
Ok(p) => p,
|
||||
Err(_) => {
|
||||
tracing::error!("Source object not found: {}/{}", bucket, object_key);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let file_size = match tokio::fs::metadata(&src_path).await {
|
||||
Ok(m) => m.len(),
|
||||
Err(_) => 0,
|
||||
};
|
||||
let content_type = mime_guess::from_path(&src_path)
|
||||
.first_raw()
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let upload_result = upload_object(
|
||||
&client,
|
||||
&rule.target_bucket,
|
||||
object_key,
|
||||
&src_path,
|
||||
file_size,
|
||||
self.streaming_threshold_bytes,
|
||||
content_type.as_deref(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let final_result = match upload_result {
|
||||
Err(err) if is_no_such_bucket(&err) => {
|
||||
tracing::info!(
|
||||
"Target bucket {} not found, creating it",
|
||||
rule.target_bucket
|
||||
);
|
||||
match client
|
||||
.create_bucket()
|
||||
.bucket(&rule.target_bucket)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(_) | Err(_) => {
|
||||
upload_object(
|
||||
&client,
|
||||
&rule.target_bucket,
|
||||
object_key,
|
||||
&src_path,
|
||||
file_size,
|
||||
self.streaming_threshold_bytes,
|
||||
content_type.as_deref(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
other => other,
|
||||
};
|
||||
|
||||
match final_result {
|
||||
Ok(()) => {
|
||||
tracing::info!(
|
||||
"Replicated {}/{} to {} ({})",
|
||||
bucket,
|
||||
object_key,
|
||||
conn.name,
|
||||
rule.target_bucket
|
||||
);
|
||||
self.update_last_sync(bucket, object_key);
|
||||
self.failures.remove(bucket, object_key);
|
||||
}
|
||||
Err(err) => {
|
||||
let msg = err.to_string();
|
||||
tracing::error!("Replication failed {}/{}: {}", bucket, object_key, msg);
|
||||
self.failures.add(
|
||||
bucket,
|
||||
ReplicationFailure {
|
||||
object_key: object_key.to_string(),
|
||||
error_message: msg,
|
||||
timestamp: now_secs(),
|
||||
failure_count: 1,
|
||||
bucket_name: bucket.to_string(),
|
||||
action: action.to_string(),
|
||||
last_error_code: None,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn check_endpoint(&self, conn: &RemoteConnection) -> bool {
|
||||
let client = build_client(conn, &self.client_options);
|
||||
check_endpoint_health(&client).await
|
||||
}
|
||||
|
||||
pub async fn retry_failed(&self, bucket: &str, object_key: &str) -> bool {
|
||||
let failure = match self.failures.get(bucket, object_key) {
|
||||
Some(f) => f,
|
||||
None => return false,
|
||||
};
|
||||
let rule = match self.get_rule(bucket) {
|
||||
Some(r) if r.enabled => r,
|
||||
_ => return false,
|
||||
};
|
||||
let conn = match self.connections.get(&rule.target_connection_id) {
|
||||
Some(c) => c,
|
||||
None => return false,
|
||||
};
|
||||
self.replicate_task(bucket, object_key, &rule, &conn, &failure.action)
|
||||
.await;
|
||||
true
|
||||
}
|
||||
|
||||
pub async fn retry_all(&self, bucket: &str) -> (usize, usize) {
|
||||
let failures = self.failures.load(bucket);
|
||||
if failures.is_empty() {
|
||||
return (0, 0);
|
||||
}
|
||||
let rule = match self.get_rule(bucket) {
|
||||
Some(r) if r.enabled => r,
|
||||
_ => return (0, failures.len()),
|
||||
};
|
||||
let conn = match self.connections.get(&rule.target_connection_id) {
|
||||
Some(c) => c,
|
||||
None => return (0, failures.len()),
|
||||
};
|
||||
let mut submitted = 0;
|
||||
for failure in failures {
|
||||
self.replicate_task(bucket, &failure.object_key, &rule, &conn, &failure.action)
|
||||
.await;
|
||||
submitted += 1;
|
||||
}
|
||||
(submitted, 0)
|
||||
}
|
||||
|
||||
pub fn get_failure_count(&self, bucket: &str) -> usize {
|
||||
self.failures.count(bucket)
|
||||
}
|
||||
|
||||
pub fn get_failed_items(
|
||||
&self,
|
||||
bucket: &str,
|
||||
limit: usize,
|
||||
offset: usize,
|
||||
) -> Vec<ReplicationFailure> {
|
||||
self.failures
|
||||
.load(bucket)
|
||||
.into_iter()
|
||||
.skip(offset)
|
||||
.take(limit)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn dismiss_failure(&self, bucket: &str, key: &str) -> bool {
|
||||
self.failures.remove(bucket, key)
|
||||
}
|
||||
|
||||
pub fn clear_failures(&self, bucket: &str) {
|
||||
self.failures.clear(bucket);
|
||||
}
|
||||
|
||||
pub fn rules_snapshot(&self) -> HashMap<String, ReplicationRule> {
|
||||
self.rules.lock().clone()
|
||||
}
|
||||
|
||||
pub fn update_last_pull(&self, bucket: &str, at: f64) {
|
||||
{
|
||||
let mut guard = self.rules.lock();
|
||||
if let Some(rule) = guard.get_mut(bucket) {
|
||||
rule.last_pull_at = Some(at);
|
||||
}
|
||||
}
|
||||
self.save_rules();
|
||||
}
|
||||
|
||||
pub fn client_options(&self) -> &ClientOptions {
|
||||
&self.client_options
|
||||
}
|
||||
}
|
||||
|
||||
fn is_no_such_bucket<E: std::fmt::Debug>(err: &E) -> bool {
|
||||
let text = format!("{:?}", err);
|
||||
text.contains("NoSuchBucket")
|
||||
}
|
||||
|
||||
async fn upload_object(
|
||||
client: &aws_sdk_s3::Client,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
path: &Path,
|
||||
file_size: u64,
|
||||
streaming_threshold: u64,
|
||||
content_type: Option<&str>,
|
||||
) -> Result<(), aws_sdk_s3::error::SdkError<aws_sdk_s3::operation::put_object::PutObjectError>> {
|
||||
let mut req = client.put_object().bucket(bucket).key(key);
|
||||
if let Some(ct) = content_type {
|
||||
req = req.content_type(ct);
|
||||
}
|
||||
|
||||
let body = if file_size >= streaming_threshold {
|
||||
ByteStream::from_path(path).await.map_err(|e| {
|
||||
aws_sdk_s3::error::SdkError::construction_failure(Box::new(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
e,
|
||||
)))
|
||||
})?
|
||||
} else {
|
||||
let bytes = tokio::fs::read(path)
|
||||
.await
|
||||
.map_err(|e| aws_sdk_s3::error::SdkError::construction_failure(Box::new(e)))?;
|
||||
ByteStream::from(bytes)
|
||||
};
|
||||
|
||||
req.body(body).send().await.map(|_| ())
|
||||
}
|
||||
|
||||
fn load_rules(path: &Path) -> HashMap<String, ReplicationRule> {
|
||||
if !path.exists() {
|
||||
return HashMap::new();
|
||||
}
|
||||
match std::fs::read_to_string(path) {
|
||||
Ok(text) => serde_json::from_str(&text).unwrap_or_default(),
|
||||
Err(_) => HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn now_secs() -> f64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map(|d| d.as_secs_f64())
|
||||
.unwrap_or(0.0)
|
||||
}
|
||||
64
crates/myfsio-server/src/services/s3_client.rs
Normal file
64
crates/myfsio-server/src/services/s3_client.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use aws_config::BehaviorVersion;
|
||||
use aws_credential_types::Credentials;
|
||||
use aws_sdk_s3::config::{Region, SharedCredentialsProvider};
|
||||
use aws_sdk_s3::Client;
|
||||
|
||||
use crate::stores::connections::RemoteConnection;
|
||||
|
||||
pub struct ClientOptions {
|
||||
pub connect_timeout: Duration,
|
||||
pub read_timeout: Duration,
|
||||
pub max_attempts: u32,
|
||||
}
|
||||
|
||||
impl Default for ClientOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
connect_timeout: Duration::from_secs(5),
|
||||
read_timeout: Duration::from_secs(30),
|
||||
max_attempts: 2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_client(connection: &RemoteConnection, options: &ClientOptions) -> Client {
|
||||
let credentials = Credentials::new(
|
||||
connection.access_key.clone(),
|
||||
connection.secret_key.clone(),
|
||||
None,
|
||||
None,
|
||||
"myfsio-replication",
|
||||
);
|
||||
|
||||
let timeout_config = aws_smithy_types::timeout::TimeoutConfig::builder()
|
||||
.connect_timeout(options.connect_timeout)
|
||||
.read_timeout(options.read_timeout)
|
||||
.build();
|
||||
|
||||
let retry_config =
|
||||
aws_smithy_types::retry::RetryConfig::standard().with_max_attempts(options.max_attempts);
|
||||
|
||||
let config = aws_sdk_s3::config::Builder::new()
|
||||
.behavior_version(BehaviorVersion::latest())
|
||||
.credentials_provider(SharedCredentialsProvider::new(credentials))
|
||||
.region(Region::new(connection.region.clone()))
|
||||
.endpoint_url(connection.endpoint_url.clone())
|
||||
.force_path_style(true)
|
||||
.timeout_config(timeout_config)
|
||||
.retry_config(retry_config)
|
||||
.build();
|
||||
|
||||
Client::from_conf(config)
|
||||
}
|
||||
|
||||
pub async fn check_endpoint_health(client: &Client) -> bool {
|
||||
match client.list_buckets().send().await {
|
||||
Ok(_) => true,
|
||||
Err(err) => {
|
||||
tracing::warn!("Endpoint health check failed: {:?}", err);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
148
crates/myfsio-server/src/services/site_registry.rs
Normal file
148
crates/myfsio-server/src/services/site_registry.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
use chrono::Utc;
|
||||
use parking_lot::RwLock;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SiteInfo {
|
||||
pub site_id: String,
|
||||
pub endpoint: String,
|
||||
#[serde(default = "default_region")]
|
||||
pub region: String,
|
||||
#[serde(default = "default_priority")]
|
||||
pub priority: i32,
|
||||
#[serde(default)]
|
||||
pub display_name: String,
|
||||
#[serde(default)]
|
||||
pub created_at: Option<String>,
|
||||
}
|
||||
|
||||
fn default_region() -> String {
|
||||
"us-east-1".to_string()
|
||||
}
|
||||
fn default_priority() -> i32 {
|
||||
100
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PeerSite {
|
||||
pub site_id: String,
|
||||
pub endpoint: String,
|
||||
#[serde(default = "default_region")]
|
||||
pub region: String,
|
||||
#[serde(default = "default_priority")]
|
||||
pub priority: i32,
|
||||
#[serde(default)]
|
||||
pub display_name: String,
|
||||
#[serde(default)]
|
||||
pub connection_id: Option<String>,
|
||||
#[serde(default)]
|
||||
pub created_at: Option<String>,
|
||||
#[serde(default)]
|
||||
pub is_healthy: bool,
|
||||
#[serde(default)]
|
||||
pub last_health_check: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
struct RegistryData {
|
||||
#[serde(default)]
|
||||
local: Option<SiteInfo>,
|
||||
#[serde(default)]
|
||||
peers: Vec<PeerSite>,
|
||||
}
|
||||
|
||||
pub struct SiteRegistry {
|
||||
path: PathBuf,
|
||||
data: Arc<RwLock<RegistryData>>,
|
||||
}
|
||||
|
||||
impl SiteRegistry {
|
||||
pub fn new(storage_root: &std::path::Path) -> Self {
|
||||
let path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("site_registry.json");
|
||||
let data = if path.exists() {
|
||||
std::fs::read_to_string(&path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str(&s).ok())
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
RegistryData::default()
|
||||
};
|
||||
Self {
|
||||
path,
|
||||
data: Arc::new(RwLock::new(data)),
|
||||
}
|
||||
}
|
||||
|
||||
fn save(&self) {
|
||||
let data = self.data.read();
|
||||
if let Some(parent) = self.path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
if let Ok(json) = serde_json::to_string_pretty(&*data) {
|
||||
let _ = std::fs::write(&self.path, json);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_local_site(&self) -> Option<SiteInfo> {
|
||||
self.data.read().local.clone()
|
||||
}
|
||||
|
||||
pub fn set_local_site(&self, site: SiteInfo) {
|
||||
self.data.write().local = Some(site);
|
||||
self.save();
|
||||
}
|
||||
|
||||
pub fn list_peers(&self) -> Vec<PeerSite> {
|
||||
self.data.read().peers.clone()
|
||||
}
|
||||
|
||||
pub fn get_peer(&self, site_id: &str) -> Option<PeerSite> {
|
||||
self.data
|
||||
.read()
|
||||
.peers
|
||||
.iter()
|
||||
.find(|p| p.site_id == site_id)
|
||||
.cloned()
|
||||
}
|
||||
|
||||
pub fn add_peer(&self, peer: PeerSite) {
|
||||
self.data.write().peers.push(peer);
|
||||
self.save();
|
||||
}
|
||||
|
||||
pub fn update_peer(&self, peer: PeerSite) {
|
||||
let mut data = self.data.write();
|
||||
if let Some(existing) = data.peers.iter_mut().find(|p| p.site_id == peer.site_id) {
|
||||
*existing = peer;
|
||||
}
|
||||
drop(data);
|
||||
self.save();
|
||||
}
|
||||
|
||||
pub fn delete_peer(&self, site_id: &str) -> bool {
|
||||
let mut data = self.data.write();
|
||||
let len_before = data.peers.len();
|
||||
data.peers.retain(|p| p.site_id != site_id);
|
||||
let removed = data.peers.len() < len_before;
|
||||
drop(data);
|
||||
if removed {
|
||||
self.save();
|
||||
}
|
||||
removed
|
||||
}
|
||||
|
||||
pub fn update_health(&self, site_id: &str, is_healthy: bool) {
|
||||
let mut data = self.data.write();
|
||||
if let Some(peer) = data.peers.iter_mut().find(|p| p.site_id == site_id) {
|
||||
peer.is_healthy = is_healthy;
|
||||
peer.last_health_check = Some(Utc::now().to_rfc3339());
|
||||
}
|
||||
drop(data);
|
||||
self.save();
|
||||
}
|
||||
}
|
||||
498
crates/myfsio-server/src/services/site_sync.rs
Normal file
498
crates/myfsio-server/src/services/site_sync.rs
Normal file
@@ -0,0 +1,498 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use aws_sdk_s3::Client;
|
||||
use parking_lot::Mutex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::io::AsyncRead;
|
||||
use tokio::sync::Notify;
|
||||
|
||||
use myfsio_common::types::{ListParams, ObjectMeta};
|
||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
||||
use myfsio_storage::traits::StorageEngine;
|
||||
|
||||
use crate::services::replication::{ReplicationManager, ReplicationRule, MODE_BIDIRECTIONAL};
|
||||
use crate::services::s3_client::{build_client, ClientOptions};
|
||||
use crate::stores::connections::ConnectionStore;
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct SyncedObjectInfo {
|
||||
pub last_synced_at: f64,
|
||||
pub remote_etag: String,
|
||||
pub source: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct SyncState {
|
||||
#[serde(default)]
|
||||
pub synced_objects: HashMap<String, SyncedObjectInfo>,
|
||||
#[serde(default)]
|
||||
pub last_full_sync: Option<f64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize)]
|
||||
pub struct SiteSyncStats {
|
||||
pub last_sync_at: Option<f64>,
|
||||
pub objects_pulled: u64,
|
||||
pub objects_skipped: u64,
|
||||
pub conflicts_resolved: u64,
|
||||
pub deletions_applied: u64,
|
||||
pub errors: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct RemoteObjectMeta {
|
||||
last_modified: f64,
|
||||
etag: String,
|
||||
}
|
||||
|
||||
pub struct SiteSyncWorker {
|
||||
storage: Arc<FsStorageBackend>,
|
||||
connections: Arc<ConnectionStore>,
|
||||
replication: Arc<ReplicationManager>,
|
||||
storage_root: PathBuf,
|
||||
interval: Duration,
|
||||
batch_size: usize,
|
||||
clock_skew_tolerance: f64,
|
||||
client_options: ClientOptions,
|
||||
bucket_stats: Mutex<HashMap<String, SiteSyncStats>>,
|
||||
shutdown: Arc<Notify>,
|
||||
}
|
||||
|
||||
impl SiteSyncWorker {
|
||||
pub fn new(
|
||||
storage: Arc<FsStorageBackend>,
|
||||
connections: Arc<ConnectionStore>,
|
||||
replication: Arc<ReplicationManager>,
|
||||
storage_root: PathBuf,
|
||||
interval_seconds: u64,
|
||||
batch_size: usize,
|
||||
connect_timeout: Duration,
|
||||
read_timeout: Duration,
|
||||
max_retries: u32,
|
||||
clock_skew_tolerance: f64,
|
||||
) -> Self {
|
||||
Self {
|
||||
storage,
|
||||
connections,
|
||||
replication,
|
||||
storage_root,
|
||||
interval: Duration::from_secs(interval_seconds),
|
||||
batch_size,
|
||||
clock_skew_tolerance,
|
||||
client_options: ClientOptions {
|
||||
connect_timeout,
|
||||
read_timeout,
|
||||
max_attempts: max_retries,
|
||||
},
|
||||
bucket_stats: Mutex::new(HashMap::new()),
|
||||
shutdown: Arc::new(Notify::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn shutdown(&self) {
|
||||
self.shutdown.notify_waiters();
|
||||
}
|
||||
|
||||
pub fn get_stats(&self, bucket: &str) -> Option<SiteSyncStats> {
|
||||
self.bucket_stats.lock().get(bucket).cloned()
|
||||
}
|
||||
|
||||
pub async fn run(self: Arc<Self>) {
|
||||
tracing::info!(
|
||||
"Site sync worker started (interval={}s)",
|
||||
self.interval.as_secs()
|
||||
);
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(self.interval) => {}
|
||||
_ = self.shutdown.notified() => {
|
||||
tracing::info!("Site sync worker shutting down");
|
||||
return;
|
||||
}
|
||||
}
|
||||
self.run_cycle().await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_cycle(&self) {
|
||||
let rules = self.replication.rules_snapshot();
|
||||
for (bucket, rule) in rules {
|
||||
if rule.mode != MODE_BIDIRECTIONAL || !rule.enabled {
|
||||
continue;
|
||||
}
|
||||
match self.sync_bucket(&rule).await {
|
||||
Ok(stats) => {
|
||||
self.bucket_stats.lock().insert(bucket, stats);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Site sync failed for bucket {}: {}", bucket, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn trigger_sync(&self, bucket: &str) -> Option<SiteSyncStats> {
|
||||
let rule = self.replication.get_rule(bucket)?;
|
||||
if rule.mode != MODE_BIDIRECTIONAL || !rule.enabled {
|
||||
return None;
|
||||
}
|
||||
match self.sync_bucket(&rule).await {
|
||||
Ok(stats) => {
|
||||
self.bucket_stats
|
||||
.lock()
|
||||
.insert(bucket.to_string(), stats.clone());
|
||||
Some(stats)
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Site sync trigger failed for {}: {}", bucket, e);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn sync_bucket(&self, rule: &ReplicationRule) -> Result<SiteSyncStats, String> {
|
||||
let mut stats = SiteSyncStats::default();
|
||||
let connection = self
|
||||
.connections
|
||||
.get(&rule.target_connection_id)
|
||||
.ok_or_else(|| format!("connection {} not found", rule.target_connection_id))?;
|
||||
|
||||
let local_objects = self
|
||||
.list_local_objects(&rule.bucket_name)
|
||||
.await
|
||||
.map_err(|e| format!("list local failed: {}", e))?;
|
||||
|
||||
let client = build_client(&connection, &self.client_options);
|
||||
let remote_objects = self
|
||||
.list_remote_objects(&client, &rule.target_bucket)
|
||||
.await
|
||||
.map_err(|e| format!("list remote failed: {}", e))?;
|
||||
|
||||
let mut sync_state = self.load_sync_state(&rule.bucket_name);
|
||||
|
||||
let mut to_pull: Vec<String> = Vec::new();
|
||||
for (key, remote_meta) in &remote_objects {
|
||||
if let Some(local_meta) = local_objects.get(key) {
|
||||
match self.resolve_conflict(local_meta, remote_meta) {
|
||||
"pull" => {
|
||||
to_pull.push(key.clone());
|
||||
stats.conflicts_resolved += 1;
|
||||
}
|
||||
_ => {
|
||||
stats.objects_skipped += 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
to_pull.push(key.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let mut pulled = 0usize;
|
||||
for key in &to_pull {
|
||||
if pulled >= self.batch_size {
|
||||
break;
|
||||
}
|
||||
let remote_meta = match remote_objects.get(key) {
|
||||
Some(m) => m,
|
||||
None => continue,
|
||||
};
|
||||
if self
|
||||
.pull_object(&client, &rule.target_bucket, &rule.bucket_name, key)
|
||||
.await
|
||||
{
|
||||
stats.objects_pulled += 1;
|
||||
pulled += 1;
|
||||
sync_state.synced_objects.insert(
|
||||
key.clone(),
|
||||
SyncedObjectInfo {
|
||||
last_synced_at: now_secs(),
|
||||
remote_etag: remote_meta.etag.clone(),
|
||||
source: "remote".to_string(),
|
||||
},
|
||||
);
|
||||
} else {
|
||||
stats.errors += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if rule.sync_deletions {
|
||||
let tracked_keys: Vec<String> = sync_state.synced_objects.keys().cloned().collect();
|
||||
for key in tracked_keys {
|
||||
if remote_objects.contains_key(&key) {
|
||||
continue;
|
||||
}
|
||||
let local_meta = match local_objects.get(&key) {
|
||||
Some(m) => m,
|
||||
None => continue,
|
||||
};
|
||||
let tracked = match sync_state.synced_objects.get(&key) {
|
||||
Some(t) => t.clone(),
|
||||
None => continue,
|
||||
};
|
||||
if tracked.source != "remote" {
|
||||
continue;
|
||||
}
|
||||
let local_ts = local_meta.last_modified.timestamp() as f64;
|
||||
if local_ts <= tracked.last_synced_at
|
||||
&& self.apply_remote_deletion(&rule.bucket_name, &key).await
|
||||
{
|
||||
stats.deletions_applied += 1;
|
||||
sync_state.synced_objects.remove(&key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sync_state.last_full_sync = Some(now_secs());
|
||||
self.save_sync_state(&rule.bucket_name, &sync_state);
|
||||
|
||||
self.replication
|
||||
.update_last_pull(&rule.bucket_name, now_secs());
|
||||
|
||||
stats.last_sync_at = Some(now_secs());
|
||||
tracing::info!(
|
||||
"Site sync completed for {}: pulled={}, skipped={}, conflicts={}, deletions={}, errors={}",
|
||||
rule.bucket_name,
|
||||
stats.objects_pulled,
|
||||
stats.objects_skipped,
|
||||
stats.conflicts_resolved,
|
||||
stats.deletions_applied,
|
||||
stats.errors,
|
||||
);
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
async fn list_local_objects(
|
||||
&self,
|
||||
bucket: &str,
|
||||
) -> Result<HashMap<String, ObjectMeta>, String> {
|
||||
let mut result = HashMap::new();
|
||||
let mut token: Option<String> = None;
|
||||
loop {
|
||||
let params = ListParams {
|
||||
max_keys: 1000,
|
||||
continuation_token: token.clone(),
|
||||
prefix: None,
|
||||
start_after: None,
|
||||
};
|
||||
let page = self
|
||||
.storage
|
||||
.list_objects(bucket, ¶ms)
|
||||
.await
|
||||
.map_err(|e| e.to_string())?;
|
||||
for obj in page.objects {
|
||||
result.insert(obj.key.clone(), obj);
|
||||
}
|
||||
if !page.is_truncated {
|
||||
break;
|
||||
}
|
||||
token = page.next_continuation_token;
|
||||
if token.is_none() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn list_remote_objects(
|
||||
&self,
|
||||
client: &Client,
|
||||
bucket: &str,
|
||||
) -> Result<HashMap<String, RemoteObjectMeta>, String> {
|
||||
let mut result = HashMap::new();
|
||||
let mut continuation: Option<String> = None;
|
||||
loop {
|
||||
let mut req = client.list_objects_v2().bucket(bucket);
|
||||
if let Some(ref t) = continuation {
|
||||
req = req.continuation_token(t);
|
||||
}
|
||||
let resp = match req.send().await {
|
||||
Ok(r) => r,
|
||||
Err(err) => {
|
||||
if is_not_found_error(&err) {
|
||||
return Ok(result);
|
||||
}
|
||||
return Err(format!("{:?}", err));
|
||||
}
|
||||
};
|
||||
for obj in resp.contents() {
|
||||
let key = match obj.key() {
|
||||
Some(k) => k.to_string(),
|
||||
None => continue,
|
||||
};
|
||||
let last_modified = obj
|
||||
.last_modified()
|
||||
.and_then(|t| {
|
||||
let secs = t.secs();
|
||||
let nanos = t.subsec_nanos();
|
||||
Some(secs as f64 + nanos as f64 / 1_000_000_000.0)
|
||||
})
|
||||
.unwrap_or(0.0);
|
||||
let etag = obj.e_tag().unwrap_or("").trim_matches('"').to_string();
|
||||
result.insert(
|
||||
key,
|
||||
RemoteObjectMeta {
|
||||
last_modified,
|
||||
etag,
|
||||
},
|
||||
);
|
||||
}
|
||||
if resp.is_truncated().unwrap_or(false) {
|
||||
continuation = resp.next_continuation_token().map(|s| s.to_string());
|
||||
if continuation.is_none() {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn resolve_conflict(&self, local: &ObjectMeta, remote: &RemoteObjectMeta) -> &'static str {
|
||||
let local_ts = local.last_modified.timestamp() as f64
|
||||
+ local.last_modified.timestamp_subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
let remote_ts = remote.last_modified;
|
||||
|
||||
if (remote_ts - local_ts).abs() < self.clock_skew_tolerance {
|
||||
let local_etag = local.etag.clone().unwrap_or_default();
|
||||
let local_etag_trim = local_etag.trim_matches('"');
|
||||
if remote.etag == local_etag_trim {
|
||||
return "skip";
|
||||
}
|
||||
if remote.etag.as_str() > local_etag_trim {
|
||||
return "pull";
|
||||
}
|
||||
return "keep";
|
||||
}
|
||||
|
||||
if remote_ts > local_ts {
|
||||
"pull"
|
||||
} else {
|
||||
"keep"
|
||||
}
|
||||
}
|
||||
|
||||
async fn pull_object(
|
||||
&self,
|
||||
client: &Client,
|
||||
remote_bucket: &str,
|
||||
local_bucket: &str,
|
||||
key: &str,
|
||||
) -> bool {
|
||||
let resp = match client
|
||||
.get_object()
|
||||
.bucket(remote_bucket)
|
||||
.key(key)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(r) => r,
|
||||
Err(err) => {
|
||||
tracing::error!("Pull GetObject failed {}/{}: {:?}", local_bucket, key, err);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
let head = match client
|
||||
.head_object()
|
||||
.bucket(remote_bucket)
|
||||
.key(key)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(r) => r,
|
||||
Err(err) => {
|
||||
tracing::error!("Pull HeadObject failed {}/{}: {:?}", local_bucket, key, err);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
let metadata: Option<HashMap<String, String>> = head
|
||||
.metadata()
|
||||
.map(|m| m.iter().map(|(k, v)| (k.clone(), v.clone())).collect());
|
||||
|
||||
let stream = resp.body.into_async_read();
|
||||
let boxed: Pin<Box<dyn AsyncRead + Send>> = Box::pin(stream);
|
||||
|
||||
match self
|
||||
.storage
|
||||
.put_object(local_bucket, key, boxed, metadata)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
tracing::debug!("Pulled object {}/{} from remote", local_bucket, key);
|
||||
true
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!(
|
||||
"Store pulled object failed {}/{}: {}",
|
||||
local_bucket,
|
||||
key,
|
||||
err
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn apply_remote_deletion(&self, bucket: &str, key: &str) -> bool {
|
||||
match self.storage.delete_object(bucket, key).await {
|
||||
Ok(_) => {
|
||||
tracing::debug!("Applied remote deletion for {}/{}", bucket, key);
|
||||
true
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("Remote deletion failed {}/{}: {}", bucket, key, err);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn sync_state_path(&self, bucket: &str) -> PathBuf {
|
||||
self.storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("buckets")
|
||||
.join(bucket)
|
||||
.join("site_sync_state.json")
|
||||
}
|
||||
|
||||
fn load_sync_state(&self, bucket: &str) -> SyncState {
|
||||
let path = self.sync_state_path(bucket);
|
||||
if !path.exists() {
|
||||
return SyncState::default();
|
||||
}
|
||||
match std::fs::read_to_string(&path) {
|
||||
Ok(text) => serde_json::from_str(&text).unwrap_or_default(),
|
||||
Err(_) => SyncState::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn save_sync_state(&self, bucket: &str, state: &SyncState) {
|
||||
let path = self.sync_state_path(bucket);
|
||||
if let Some(parent) = path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
if let Ok(text) = serde_json::to_string_pretty(state) {
|
||||
let _ = std::fs::write(&path, text);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn now_secs() -> f64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map(|d| d.as_secs_f64())
|
||||
.unwrap_or(0.0)
|
||||
}
|
||||
|
||||
fn is_not_found_error<E: std::fmt::Debug>(err: &aws_sdk_s3::error::SdkError<E>) -> bool {
|
||||
let msg = format!("{:?}", err);
|
||||
msg.contains("NoSuchBucket")
|
||||
|| msg.contains("code: Some(\"NotFound\")")
|
||||
|| msg.contains("code: Some(\"NoSuchBucket\")")
|
||||
|| msg.contains("status: 404")
|
||||
}
|
||||
203
crates/myfsio-server/src/services/system_metrics.rs
Normal file
203
crates/myfsio-server/src/services/system_metrics.rs
Normal file
@@ -0,0 +1,203 @@
|
||||
use chrono::{DateTime, Utc};
|
||||
use myfsio_storage::fs_backend::FsStorageBackend;
|
||||
use myfsio_storage::traits::StorageEngine;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use sysinfo::{Disks, System};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SystemMetricsConfig {
|
||||
pub interval_minutes: u64,
|
||||
pub retention_hours: u64,
|
||||
}
|
||||
|
||||
impl Default for SystemMetricsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval_minutes: 5,
|
||||
retention_hours: 24,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SystemMetricsSnapshot {
|
||||
pub timestamp: DateTime<Utc>,
|
||||
pub cpu_percent: f64,
|
||||
pub memory_percent: f64,
|
||||
pub disk_percent: f64,
|
||||
pub storage_bytes: u64,
|
||||
}
|
||||
|
||||
pub struct SystemMetricsService {
|
||||
storage_root: PathBuf,
|
||||
storage: Arc<FsStorageBackend>,
|
||||
config: SystemMetricsConfig,
|
||||
history: Arc<RwLock<Vec<SystemMetricsSnapshot>>>,
|
||||
history_path: PathBuf,
|
||||
}
|
||||
|
||||
impl SystemMetricsService {
|
||||
pub fn new(
|
||||
storage_root: &Path,
|
||||
storage: Arc<FsStorageBackend>,
|
||||
config: SystemMetricsConfig,
|
||||
) -> Self {
|
||||
let history_path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("metrics_history.json");
|
||||
|
||||
let mut history = if history_path.exists() {
|
||||
std::fs::read_to_string(&history_path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<serde_json::Value>(&s).ok())
|
||||
.and_then(|v| {
|
||||
v.get("history").and_then(|h| {
|
||||
serde_json::from_value::<Vec<SystemMetricsSnapshot>>(h.clone()).ok()
|
||||
})
|
||||
})
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
prune_history(&mut history, config.retention_hours);
|
||||
|
||||
Self {
|
||||
storage_root: storage_root.to_path_buf(),
|
||||
storage,
|
||||
config,
|
||||
history: Arc::new(RwLock::new(history)),
|
||||
history_path,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_history(&self, hours: Option<u64>) -> Vec<SystemMetricsSnapshot> {
|
||||
let mut history = self.history.read().await.clone();
|
||||
prune_history(&mut history, hours.unwrap_or(self.config.retention_hours));
|
||||
history
|
||||
}
|
||||
|
||||
async fn take_snapshot(&self) {
|
||||
let snapshot = collect_snapshot(&self.storage_root, &self.storage).await;
|
||||
let mut history = self.history.write().await;
|
||||
history.push(snapshot);
|
||||
prune_history(&mut history, self.config.retention_hours);
|
||||
drop(history);
|
||||
self.save_history().await;
|
||||
}
|
||||
|
||||
async fn save_history(&self) {
|
||||
let history = self.history.read().await;
|
||||
let data = json!({ "history": *history });
|
||||
if let Some(parent) = self.history_path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
let _ = std::fs::write(
|
||||
&self.history_path,
|
||||
serde_json::to_string_pretty(&data).unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn start_background(self: Arc<Self>) -> tokio::task::JoinHandle<()> {
|
||||
let interval =
|
||||
std::time::Duration::from_secs(self.config.interval_minutes.saturating_mul(60));
|
||||
tokio::spawn(async move {
|
||||
self.take_snapshot().await;
|
||||
let mut timer = tokio::time::interval(interval);
|
||||
loop {
|
||||
timer.tick().await;
|
||||
self.take_snapshot().await;
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn prune_history(history: &mut Vec<SystemMetricsSnapshot>, retention_hours: u64) {
|
||||
let cutoff = Utc::now() - chrono::Duration::hours(retention_hours as i64);
|
||||
history.retain(|item| item.timestamp > cutoff);
|
||||
}
|
||||
|
||||
fn sample_system_now() -> (f64, f64) {
|
||||
let mut system = System::new();
|
||||
system.refresh_cpu_usage();
|
||||
std::thread::sleep(sysinfo::MINIMUM_CPU_UPDATE_INTERVAL);
|
||||
system.refresh_cpu_usage();
|
||||
system.refresh_memory();
|
||||
|
||||
let cpu_percent = system.global_cpu_usage() as f64;
|
||||
let memory_percent = if system.total_memory() > 0 {
|
||||
(system.used_memory() as f64 / system.total_memory() as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
(cpu_percent, memory_percent)
|
||||
}
|
||||
|
||||
fn normalize_path_for_mount(path: &Path) -> String {
|
||||
let canonical = path.canonicalize().unwrap_or_else(|_| path.to_path_buf());
|
||||
let raw = canonical.to_string_lossy().to_string();
|
||||
let stripped = raw.strip_prefix(r"\\?\").unwrap_or(&raw);
|
||||
stripped.to_lowercase()
|
||||
}
|
||||
|
||||
fn sample_disk(path: &Path) -> (u64, u64) {
|
||||
let disks = Disks::new_with_refreshed_list();
|
||||
let path_str = normalize_path_for_mount(path);
|
||||
let mut best: Option<(usize, u64, u64)> = None;
|
||||
|
||||
for disk in disks.list() {
|
||||
let mount_raw = disk.mount_point().to_string_lossy().to_string();
|
||||
let mount = mount_raw
|
||||
.strip_prefix(r"\\?\")
|
||||
.unwrap_or(&mount_raw)
|
||||
.to_lowercase();
|
||||
let total = disk.total_space();
|
||||
let free = disk.available_space();
|
||||
if path_str.starts_with(&mount) {
|
||||
let len = mount.len();
|
||||
match best {
|
||||
Some((best_len, _, _)) if len <= best_len => {}
|
||||
_ => best = Some((len, total, free)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
best.map(|(_, total, free)| (total, free)).unwrap_or((0, 0))
|
||||
}
|
||||
|
||||
async fn collect_snapshot(
|
||||
storage_root: &Path,
|
||||
storage: &Arc<FsStorageBackend>,
|
||||
) -> SystemMetricsSnapshot {
|
||||
let (cpu_percent, memory_percent) = sample_system_now();
|
||||
let (disk_total, disk_free) = sample_disk(storage_root);
|
||||
let disk_percent = if disk_total > 0 {
|
||||
((disk_total - disk_free) as f64 / disk_total as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let mut storage_bytes = 0u64;
|
||||
let buckets = storage.list_buckets().await.unwrap_or_default();
|
||||
for bucket in buckets {
|
||||
if let Ok(stats) = storage.bucket_stats(&bucket.name).await {
|
||||
storage_bytes += stats.total_bytes();
|
||||
}
|
||||
}
|
||||
|
||||
SystemMetricsSnapshot {
|
||||
timestamp: Utc::now(),
|
||||
cpu_percent: round2(cpu_percent),
|
||||
memory_percent: round2(memory_percent),
|
||||
disk_percent: round2(disk_percent),
|
||||
storage_bytes,
|
||||
}
|
||||
}
|
||||
|
||||
fn round2(value: f64) -> f64 {
|
||||
(value * 100.0).round() / 100.0
|
||||
}
|
||||
197
crates/myfsio-server/src/services/website_domains.rs
Normal file
197
crates/myfsio-server/src/services/website_domains.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
use parking_lot::RwLock;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
struct DomainData {
|
||||
#[serde(default)]
|
||||
mappings: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum DomainDataFile {
|
||||
Wrapped(DomainData),
|
||||
Flat(HashMap<String, String>),
|
||||
}
|
||||
|
||||
impl DomainDataFile {
|
||||
fn into_domain_data(self) -> DomainData {
|
||||
match self {
|
||||
Self::Wrapped(data) => data,
|
||||
Self::Flat(mappings) => DomainData {
|
||||
mappings: mappings
|
||||
.into_iter()
|
||||
.map(|(domain, bucket)| (normalize_domain(&domain), bucket))
|
||||
.collect(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WebsiteDomainStore {
|
||||
path: PathBuf,
|
||||
data: Arc<RwLock<DomainData>>,
|
||||
}
|
||||
|
||||
impl WebsiteDomainStore {
|
||||
pub fn new(storage_root: &std::path::Path) -> Self {
|
||||
let path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("website_domains.json");
|
||||
let data = if path.exists() {
|
||||
std::fs::read_to_string(&path)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<DomainDataFile>(&s).ok())
|
||||
.map(DomainDataFile::into_domain_data)
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
DomainData::default()
|
||||
};
|
||||
Self {
|
||||
path,
|
||||
data: Arc::new(RwLock::new(data)),
|
||||
}
|
||||
}
|
||||
|
||||
fn save(&self) {
|
||||
let data = self.data.read();
|
||||
if let Some(parent) = self.path.parent() {
|
||||
let _ = std::fs::create_dir_all(parent);
|
||||
}
|
||||
if let Ok(json) = serde_json::to_string_pretty(&data.mappings) {
|
||||
let _ = std::fs::write(&self.path, json);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn list_all(&self) -> Vec<serde_json::Value> {
|
||||
self.data
|
||||
.read()
|
||||
.mappings
|
||||
.iter()
|
||||
.map(|(domain, bucket)| {
|
||||
serde_json::json!({
|
||||
"domain": domain,
|
||||
"bucket": bucket,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_bucket(&self, domain: &str) -> Option<String> {
|
||||
let domain = normalize_domain(domain);
|
||||
self.data.read().mappings.get(&domain).cloned()
|
||||
}
|
||||
|
||||
pub fn set_mapping(&self, domain: &str, bucket: &str) {
|
||||
let domain = normalize_domain(domain);
|
||||
self.data
|
||||
.write()
|
||||
.mappings
|
||||
.insert(domain, bucket.to_string());
|
||||
self.save();
|
||||
}
|
||||
|
||||
pub fn delete_mapping(&self, domain: &str) -> bool {
|
||||
let domain = normalize_domain(domain);
|
||||
let removed = self.data.write().mappings.remove(&domain).is_some();
|
||||
if removed {
|
||||
self.save();
|
||||
}
|
||||
removed
|
||||
}
|
||||
}
|
||||
|
||||
pub fn normalize_domain(domain: &str) -> String {
|
||||
domain.trim().to_ascii_lowercase()
|
||||
}
|
||||
|
||||
pub fn is_valid_domain(domain: &str) -> bool {
|
||||
if domain.is_empty() || domain.len() > 253 {
|
||||
return false;
|
||||
}
|
||||
let labels: Vec<&str> = domain.split('.').collect();
|
||||
if labels.len() < 2 {
|
||||
return false;
|
||||
}
|
||||
for label in &labels {
|
||||
if label.is_empty() || label.len() > 63 {
|
||||
return false;
|
||||
}
|
||||
if !label.chars().all(|c| c.is_ascii_alphanumeric() || c == '-') {
|
||||
return false;
|
||||
}
|
||||
if label.starts_with('-') || label.ends_with('-') {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::WebsiteDomainStore;
|
||||
use serde_json::json;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn loads_legacy_flat_mapping_file() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let config_dir = tmp.path().join(".myfsio.sys").join("config");
|
||||
std::fs::create_dir_all(&config_dir).expect("create config dir");
|
||||
std::fs::write(
|
||||
config_dir.join("website_domains.json"),
|
||||
r#"{"Example.COM":"site-bucket"}"#,
|
||||
)
|
||||
.expect("write config");
|
||||
|
||||
let store = WebsiteDomainStore::new(tmp.path());
|
||||
|
||||
assert_eq!(
|
||||
store.get_bucket("example.com"),
|
||||
Some("site-bucket".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loads_wrapped_mapping_file() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let config_dir = tmp.path().join(".myfsio.sys").join("config");
|
||||
std::fs::create_dir_all(&config_dir).expect("create config dir");
|
||||
std::fs::write(
|
||||
config_dir.join("website_domains.json"),
|
||||
r#"{"mappings":{"example.com":"site-bucket"}}"#,
|
||||
)
|
||||
.expect("write config");
|
||||
|
||||
let store = WebsiteDomainStore::new(tmp.path());
|
||||
|
||||
assert_eq!(
|
||||
store.get_bucket("example.com"),
|
||||
Some("site-bucket".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn saves_in_shared_plain_mapping_format() {
|
||||
let tmp = tempdir().expect("tempdir");
|
||||
let store = WebsiteDomainStore::new(tmp.path());
|
||||
|
||||
store.set_mapping("Example.COM", "site-bucket");
|
||||
|
||||
let saved = std::fs::read_to_string(
|
||||
tmp.path()
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("website_domains.json"),
|
||||
)
|
||||
.expect("read config");
|
||||
let json: serde_json::Value = serde_json::from_str(&saved).expect("parse config");
|
||||
|
||||
assert_eq!(json, json!({"example.com": "site-bucket"}));
|
||||
}
|
||||
}
|
||||
133
crates/myfsio-server/src/session.rs
Normal file
133
crates/myfsio-server/src/session.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine};
|
||||
use parking_lot::RwLock;
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub const SESSION_COOKIE_NAME: &str = "myfsio_session";
|
||||
pub const CSRF_FIELD_NAME: &str = "csrf_token";
|
||||
pub const CSRF_HEADER_NAME: &str = "x-csrf-token";
|
||||
|
||||
const SESSION_ID_BYTES: usize = 32;
|
||||
const CSRF_TOKEN_BYTES: usize = 32;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct FlashMessage {
|
||||
pub category: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SessionData {
|
||||
pub user_id: Option<String>,
|
||||
pub display_name: Option<String>,
|
||||
pub csrf_token: String,
|
||||
pub flash: Vec<FlashMessage>,
|
||||
pub extra: HashMap<String, String>,
|
||||
last_accessed: Instant,
|
||||
}
|
||||
|
||||
impl SessionData {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
user_id: None,
|
||||
display_name: None,
|
||||
csrf_token: generate_token(CSRF_TOKEN_BYTES),
|
||||
flash: Vec::new(),
|
||||
extra: HashMap::new(),
|
||||
last_accessed: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_authenticated(&self) -> bool {
|
||||
self.user_id.is_some()
|
||||
}
|
||||
|
||||
pub fn push_flash(&mut self, category: impl Into<String>, message: impl Into<String>) {
|
||||
self.flash.push(FlashMessage {
|
||||
category: category.into(),
|
||||
message: message.into(),
|
||||
});
|
||||
}
|
||||
|
||||
pub fn take_flash(&mut self) -> Vec<FlashMessage> {
|
||||
std::mem::take(&mut self.flash)
|
||||
}
|
||||
|
||||
pub fn rotate_csrf(&mut self) {
|
||||
self.csrf_token = generate_token(CSRF_TOKEN_BYTES);
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SessionData {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SessionStore {
|
||||
sessions: RwLock<HashMap<String, SessionData>>,
|
||||
ttl: Duration,
|
||||
}
|
||||
|
||||
impl SessionStore {
|
||||
pub fn new(ttl: Duration) -> Self {
|
||||
Self {
|
||||
sessions: RwLock::new(HashMap::new()),
|
||||
ttl,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create(&self) -> (String, SessionData) {
|
||||
let id = generate_token(SESSION_ID_BYTES);
|
||||
let data = SessionData::new();
|
||||
self.sessions.write().insert(id.clone(), data.clone());
|
||||
(id, data)
|
||||
}
|
||||
|
||||
pub fn get(&self, id: &str) -> Option<SessionData> {
|
||||
let mut guard = self.sessions.write();
|
||||
let entry = guard.get_mut(id)?;
|
||||
if entry.last_accessed.elapsed() > self.ttl {
|
||||
guard.remove(id);
|
||||
return None;
|
||||
}
|
||||
entry.last_accessed = Instant::now();
|
||||
Some(entry.clone())
|
||||
}
|
||||
|
||||
pub fn save(&self, id: &str, data: SessionData) {
|
||||
let mut guard = self.sessions.write();
|
||||
let mut updated = data;
|
||||
updated.last_accessed = Instant::now();
|
||||
guard.insert(id.to_string(), updated);
|
||||
}
|
||||
|
||||
pub fn destroy(&self, id: &str) {
|
||||
self.sessions.write().remove(id);
|
||||
}
|
||||
|
||||
pub fn sweep(&self) {
|
||||
let ttl = self.ttl;
|
||||
let mut guard = self.sessions.write();
|
||||
guard.retain(|_, data| data.last_accessed.elapsed() <= ttl);
|
||||
}
|
||||
}
|
||||
|
||||
pub type SharedSessionStore = Arc<SessionStore>;
|
||||
|
||||
pub fn generate_token(bytes: usize) -> String {
|
||||
let mut buf = vec![0u8; bytes];
|
||||
rand::thread_rng().fill_bytes(&mut buf);
|
||||
URL_SAFE_NO_PAD.encode(&buf)
|
||||
}
|
||||
|
||||
pub fn csrf_tokens_match(a: &str, b: &str) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
subtle::ConstantTimeEq::ct_eq(a.as_bytes(), b.as_bytes()).into()
|
||||
}
|
||||
240
crates/myfsio-server/src/state.rs
Normal file
240
crates/myfsio-server/src/state.rs
Normal file
@@ -0,0 +1,240 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::config::ServerConfig;
|
||||
use crate::services::access_logging::AccessLoggingService;
|
||||
use crate::services::gc::GcService;
|
||||
use crate::services::integrity::IntegrityService;
|
||||
use crate::services::metrics::MetricsService;
|
||||
use crate::services::replication::ReplicationManager;
|
||||
use crate::services::site_registry::SiteRegistry;
|
||||
use crate::services::site_sync::SiteSyncWorker;
|
||||
use crate::services::system_metrics::SystemMetricsService;
|
||||
use crate::services::website_domains::WebsiteDomainStore;
|
||||
use crate::session::SessionStore;
|
||||
use crate::stores::connections::ConnectionStore;
|
||||
use crate::templates::TemplateEngine;
|
||||
use myfsio_auth::iam::IamService;
|
||||
use myfsio_crypto::encryption::{EncryptionConfig, EncryptionService};
|
||||
use myfsio_crypto::kms::KmsService;
|
||||
use myfsio_storage::fs_backend::{FsStorageBackend, FsStorageBackendConfig};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub config: ServerConfig,
|
||||
pub storage: Arc<FsStorageBackend>,
|
||||
pub iam: Arc<IamService>,
|
||||
pub encryption: Option<Arc<EncryptionService>>,
|
||||
pub kms: Option<Arc<KmsService>>,
|
||||
pub gc: Option<Arc<GcService>>,
|
||||
pub integrity: Option<Arc<IntegrityService>>,
|
||||
pub metrics: Option<Arc<MetricsService>>,
|
||||
pub system_metrics: Option<Arc<SystemMetricsService>>,
|
||||
pub site_registry: Option<Arc<SiteRegistry>>,
|
||||
pub website_domains: Option<Arc<WebsiteDomainStore>>,
|
||||
pub connections: Arc<ConnectionStore>,
|
||||
pub replication: Arc<ReplicationManager>,
|
||||
pub site_sync: Option<Arc<SiteSyncWorker>>,
|
||||
pub templates: Option<Arc<TemplateEngine>>,
|
||||
pub sessions: Arc<SessionStore>,
|
||||
pub access_logging: Arc<AccessLoggingService>,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
pub fn new(config: ServerConfig) -> Self {
|
||||
let storage = Arc::new(FsStorageBackend::new_with_config(
|
||||
config.storage_root.clone(),
|
||||
FsStorageBackendConfig {
|
||||
object_key_max_length_bytes: config.object_key_max_length_bytes,
|
||||
object_cache_max_size: config.object_cache_max_size,
|
||||
bucket_config_cache_ttl: Duration::from_secs_f64(
|
||||
config.bucket_config_cache_ttl_seconds,
|
||||
),
|
||||
},
|
||||
));
|
||||
let iam = Arc::new(IamService::new_with_secret(
|
||||
config.iam_config_path.clone(),
|
||||
config.secret_key.clone(),
|
||||
));
|
||||
|
||||
let gc = if config.gc_enabled {
|
||||
Some(Arc::new(GcService::new(
|
||||
config.storage_root.clone(),
|
||||
crate::services::gc::GcConfig {
|
||||
interval_hours: config.gc_interval_hours,
|
||||
temp_file_max_age_hours: config.gc_temp_file_max_age_hours,
|
||||
multipart_max_age_days: config.gc_multipart_max_age_days,
|
||||
lock_file_max_age_hours: config.gc_lock_file_max_age_hours,
|
||||
dry_run: config.gc_dry_run,
|
||||
},
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let integrity = if config.integrity_enabled {
|
||||
Some(Arc::new(IntegrityService::new(
|
||||
storage.clone(),
|
||||
&config.storage_root,
|
||||
crate::services::integrity::IntegrityConfig::default(),
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let metrics = if config.metrics_enabled {
|
||||
Some(Arc::new(MetricsService::new(
|
||||
&config.storage_root,
|
||||
crate::services::metrics::MetricsConfig {
|
||||
interval_minutes: config.metrics_interval_minutes,
|
||||
retention_hours: config.metrics_retention_hours,
|
||||
},
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let system_metrics = if config.metrics_history_enabled {
|
||||
Some(Arc::new(SystemMetricsService::new(
|
||||
&config.storage_root,
|
||||
storage.clone(),
|
||||
crate::services::system_metrics::SystemMetricsConfig {
|
||||
interval_minutes: config.metrics_history_interval_minutes,
|
||||
retention_hours: config.metrics_history_retention_hours,
|
||||
},
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let site_registry = {
|
||||
let registry = SiteRegistry::new(&config.storage_root);
|
||||
if let (Some(site_id), Some(endpoint)) =
|
||||
(config.site_id.as_deref(), config.site_endpoint.as_deref())
|
||||
{
|
||||
registry.set_local_site(crate::services::site_registry::SiteInfo {
|
||||
site_id: site_id.to_string(),
|
||||
endpoint: endpoint.to_string(),
|
||||
region: config.site_region.clone(),
|
||||
priority: config.site_priority,
|
||||
display_name: site_id.to_string(),
|
||||
created_at: Some(chrono::Utc::now().to_rfc3339()),
|
||||
});
|
||||
}
|
||||
Some(Arc::new(registry))
|
||||
};
|
||||
|
||||
let website_domains = if config.website_hosting_enabled {
|
||||
Some(Arc::new(WebsiteDomainStore::new(&config.storage_root)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let connections = Arc::new(ConnectionStore::new(&config.storage_root));
|
||||
|
||||
let replication = Arc::new(ReplicationManager::new(
|
||||
storage.clone(),
|
||||
connections.clone(),
|
||||
&config.storage_root,
|
||||
Duration::from_secs(config.replication_connect_timeout_secs),
|
||||
Duration::from_secs(config.replication_read_timeout_secs),
|
||||
config.replication_max_retries,
|
||||
config.replication_streaming_threshold_bytes,
|
||||
config.replication_max_failures_per_bucket,
|
||||
));
|
||||
|
||||
let site_sync = if config.site_sync_enabled {
|
||||
Some(Arc::new(SiteSyncWorker::new(
|
||||
storage.clone(),
|
||||
connections.clone(),
|
||||
replication.clone(),
|
||||
config.storage_root.clone(),
|
||||
config.site_sync_interval_secs,
|
||||
config.site_sync_batch_size,
|
||||
Duration::from_secs(config.site_sync_connect_timeout_secs),
|
||||
Duration::from_secs(config.site_sync_read_timeout_secs),
|
||||
config.site_sync_max_retries,
|
||||
config.site_sync_clock_skew_tolerance,
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let templates = init_templates(&config.templates_dir);
|
||||
let access_logging = Arc::new(AccessLoggingService::new(&config.storage_root));
|
||||
let session_ttl = Duration::from_secs(config.session_lifetime_days.saturating_mul(86_400));
|
||||
Self {
|
||||
config,
|
||||
storage,
|
||||
iam,
|
||||
encryption: None,
|
||||
kms: None,
|
||||
gc,
|
||||
integrity,
|
||||
metrics,
|
||||
system_metrics,
|
||||
site_registry,
|
||||
website_domains,
|
||||
connections,
|
||||
replication,
|
||||
site_sync,
|
||||
templates,
|
||||
sessions: Arc::new(SessionStore::new(session_ttl)),
|
||||
access_logging,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn new_with_encryption(config: ServerConfig) -> Self {
|
||||
let mut state = Self::new(config.clone());
|
||||
|
||||
let keys_dir = config.storage_root.join(".myfsio.sys").join("keys");
|
||||
|
||||
let kms = if config.kms_enabled {
|
||||
match KmsService::new(&keys_dir).await {
|
||||
Ok(k) => Some(Arc::new(k)),
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to initialize KMS: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let encryption = if config.encryption_enabled {
|
||||
match myfsio_crypto::kms::load_or_create_master_key(&keys_dir).await {
|
||||
Ok(master_key) => Some(Arc::new(EncryptionService::with_config(
|
||||
master_key,
|
||||
kms.clone(),
|
||||
EncryptionConfig {
|
||||
chunk_size: config.encryption_chunk_size_bytes,
|
||||
},
|
||||
))),
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to initialize encryption: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
state.encryption = encryption;
|
||||
state.kms = kms;
|
||||
state
|
||||
}
|
||||
}
|
||||
|
||||
fn init_templates(templates_dir: &std::path::Path) -> Option<Arc<TemplateEngine>> {
|
||||
let glob = format!("{}/*.html", templates_dir.display()).replace('\\', "/");
|
||||
match TemplateEngine::new(&glob) {
|
||||
Ok(engine) => {
|
||||
crate::handlers::ui_pages::register_ui_endpoints(&engine);
|
||||
Some(Arc::new(engine))
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Template engine init failed: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
94
crates/myfsio-server/src/stores/connections.rs
Normal file
94
crates/myfsio-server/src/stores/connections.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use parking_lot::RwLock;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RemoteConnection {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
pub endpoint_url: String,
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
#[serde(default = "default_region")]
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
fn default_region() -> String {
|
||||
"us-east-1".to_string()
|
||||
}
|
||||
|
||||
pub struct ConnectionStore {
|
||||
path: PathBuf,
|
||||
inner: Arc<RwLock<Vec<RemoteConnection>>>,
|
||||
}
|
||||
|
||||
impl ConnectionStore {
|
||||
pub fn new(storage_root: &Path) -> Self {
|
||||
let path = storage_root
|
||||
.join(".myfsio.sys")
|
||||
.join("config")
|
||||
.join("connections.json");
|
||||
let inner = Arc::new(RwLock::new(load_from_disk(&path)));
|
||||
Self { path, inner }
|
||||
}
|
||||
|
||||
pub fn reload(&self) {
|
||||
let loaded = load_from_disk(&self.path);
|
||||
*self.inner.write() = loaded;
|
||||
}
|
||||
|
||||
pub fn list(&self) -> Vec<RemoteConnection> {
|
||||
self.inner.read().clone()
|
||||
}
|
||||
|
||||
pub fn get(&self, id: &str) -> Option<RemoteConnection> {
|
||||
self.inner.read().iter().find(|c| c.id == id).cloned()
|
||||
}
|
||||
|
||||
pub fn add(&self, connection: RemoteConnection) -> std::io::Result<()> {
|
||||
{
|
||||
let mut guard = self.inner.write();
|
||||
if let Some(existing) = guard.iter_mut().find(|c| c.id == connection.id) {
|
||||
*existing = connection;
|
||||
} else {
|
||||
guard.push(connection);
|
||||
}
|
||||
}
|
||||
self.save()
|
||||
}
|
||||
|
||||
pub fn delete(&self, id: &str) -> std::io::Result<bool> {
|
||||
let removed = {
|
||||
let mut guard = self.inner.write();
|
||||
let before = guard.len();
|
||||
guard.retain(|c| c.id != id);
|
||||
guard.len() != before
|
||||
};
|
||||
if removed {
|
||||
self.save()?;
|
||||
}
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
fn save(&self) -> std::io::Result<()> {
|
||||
if let Some(parent) = self.path.parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
let snapshot = self.inner.read().clone();
|
||||
let bytes = serde_json::to_vec_pretty(&snapshot)
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
|
||||
std::fs::write(&self.path, bytes)
|
||||
}
|
||||
}
|
||||
|
||||
fn load_from_disk(path: &Path) -> Vec<RemoteConnection> {
|
||||
if !path.exists() {
|
||||
return Vec::new();
|
||||
}
|
||||
match std::fs::read_to_string(path) {
|
||||
Ok(text) => serde_json::from_str(&text).unwrap_or_default(),
|
||||
Err(_) => Vec::new(),
|
||||
}
|
||||
}
|
||||
1
crates/myfsio-server/src/stores/mod.rs
Normal file
1
crates/myfsio-server/src/stores/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod connections;
|
||||
355
crates/myfsio-server/src/templates.rs
Normal file
355
crates/myfsio-server/src/templates.rs
Normal file
@@ -0,0 +1,355 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use parking_lot::RwLock;
|
||||
use serde_json::Value;
|
||||
use tera::{Context, Error as TeraError, Tera};
|
||||
|
||||
pub type EndpointResolver =
|
||||
Arc<dyn Fn(&str, &HashMap<String, Value>) -> Option<String> + Send + Sync>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TemplateEngine {
|
||||
tera: Arc<RwLock<Tera>>,
|
||||
endpoints: Arc<RwLock<HashMap<String, String>>>,
|
||||
}
|
||||
|
||||
impl TemplateEngine {
|
||||
pub fn new(template_glob: &str) -> Result<Self, TeraError> {
|
||||
let mut tera = Tera::new(template_glob)?;
|
||||
tera.set_escape_fn(html_escape);
|
||||
register_filters(&mut tera);
|
||||
|
||||
let endpoints: Arc<RwLock<HashMap<String, String>>> = Arc::new(RwLock::new(HashMap::new()));
|
||||
|
||||
register_functions(&mut tera, endpoints.clone());
|
||||
|
||||
Ok(Self {
|
||||
tera: Arc::new(RwLock::new(tera)),
|
||||
endpoints,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn register_endpoint(&self, name: &str, path_template: &str) {
|
||||
self.endpoints
|
||||
.write()
|
||||
.insert(name.to_string(), path_template.to_string());
|
||||
}
|
||||
|
||||
pub fn register_endpoints(&self, pairs: &[(&str, &str)]) {
|
||||
let mut guard = self.endpoints.write();
|
||||
for (n, p) in pairs {
|
||||
guard.insert((*n).to_string(), (*p).to_string());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn render(&self, name: &str, context: &Context) -> Result<String, TeraError> {
|
||||
self.tera.read().render(name, context)
|
||||
}
|
||||
|
||||
pub fn reload(&self) -> Result<(), TeraError> {
|
||||
self.tera.write().full_reload()
|
||||
}
|
||||
}
|
||||
|
||||
fn html_escape(input: &str) -> String {
|
||||
let mut out = String::with_capacity(input.len());
|
||||
for c in input.chars() {
|
||||
match c {
|
||||
'&' => out.push_str("&"),
|
||||
'<' => out.push_str("<"),
|
||||
'>' => out.push_str(">"),
|
||||
'"' => out.push_str("""),
|
||||
'\'' => out.push_str("'"),
|
||||
_ => out.push(c),
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
fn register_filters(tera: &mut Tera) {
|
||||
tera.register_filter("format_datetime", format_datetime_filter);
|
||||
tera.register_filter("filesizeformat", filesizeformat_filter);
|
||||
tera.register_filter("slice", slice_filter);
|
||||
}
|
||||
|
||||
fn register_functions(tera: &mut Tera, endpoints: Arc<RwLock<HashMap<String, String>>>) {
|
||||
let endpoints_for_url = endpoints.clone();
|
||||
tera.register_function(
|
||||
"url_for",
|
||||
move |args: &HashMap<String, Value>| -> tera::Result<Value> {
|
||||
let endpoint = args
|
||||
.get("endpoint")
|
||||
.and_then(|v| v.as_str())
|
||||
.ok_or_else(|| tera::Error::msg("url_for requires endpoint"))?;
|
||||
if endpoint == "static" {
|
||||
let filename = args.get("filename").and_then(|v| v.as_str()).unwrap_or("");
|
||||
return Ok(Value::String(format!("/static/{}", filename)));
|
||||
}
|
||||
let path = match endpoints_for_url.read().get(endpoint) {
|
||||
Some(p) => p.clone(),
|
||||
None => {
|
||||
return Ok(Value::String(format!("/__missing__/{}", endpoint)));
|
||||
}
|
||||
};
|
||||
Ok(Value::String(substitute_path_params(&path, args)))
|
||||
},
|
||||
);
|
||||
|
||||
tera.register_function(
|
||||
"csrf_token",
|
||||
|args: &HashMap<String, Value>| -> tera::Result<Value> {
|
||||
if let Some(token) = args.get("token").and_then(|v| v.as_str()) {
|
||||
return Ok(Value::String(token.to_string()));
|
||||
}
|
||||
Ok(Value::String(String::new()))
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
fn substitute_path_params(template: &str, args: &HashMap<String, Value>) -> String {
|
||||
let mut path = template.to_string();
|
||||
let mut query: Vec<(String, String)> = Vec::new();
|
||||
for (k, v) in args {
|
||||
if k == "endpoint" || k == "filename" {
|
||||
continue;
|
||||
}
|
||||
let value_str = value_to_string(v);
|
||||
let placeholder = format!("{{{}}}", k);
|
||||
if path.contains(&placeholder) {
|
||||
let encoded = urlencode_path(&value_str);
|
||||
path = path.replace(&placeholder, &encoded);
|
||||
} else {
|
||||
query.push((k.clone(), value_str));
|
||||
}
|
||||
}
|
||||
if !query.is_empty() {
|
||||
let qs: Vec<String> = query
|
||||
.into_iter()
|
||||
.map(|(k, v)| format!("{}={}", urlencode_query(&k), urlencode_query(&v)))
|
||||
.collect();
|
||||
path.push('?');
|
||||
path.push_str(&qs.join("&"));
|
||||
}
|
||||
path
|
||||
}
|
||||
|
||||
fn value_to_string(v: &Value) -> String {
|
||||
match v {
|
||||
Value::String(s) => s.clone(),
|
||||
Value::Number(n) => n.to_string(),
|
||||
Value::Bool(b) => b.to_string(),
|
||||
Value::Null => String::new(),
|
||||
other => other.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
const UNRESERVED: &percent_encoding::AsciiSet = &percent_encoding::NON_ALPHANUMERIC
|
||||
.remove(b'-')
|
||||
.remove(b'_')
|
||||
.remove(b'.')
|
||||
.remove(b'~');
|
||||
|
||||
fn urlencode_path(s: &str) -> String {
|
||||
percent_encoding::utf8_percent_encode(s, UNRESERVED).to_string()
|
||||
}
|
||||
|
||||
fn urlencode_query(s: &str) -> String {
|
||||
percent_encoding::utf8_percent_encode(s, UNRESERVED).to_string()
|
||||
}
|
||||
|
||||
fn format_datetime_filter(value: &Value, args: &HashMap<String, Value>) -> tera::Result<Value> {
|
||||
let format = args
|
||||
.get("format")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("%Y-%m-%d %H:%M:%S UTC");
|
||||
|
||||
let dt: Option<DateTime<Utc>> = match value {
|
||||
Value::String(s) => DateTime::parse_from_rfc3339(s)
|
||||
.ok()
|
||||
.map(|d| d.with_timezone(&Utc))
|
||||
.or_else(|| {
|
||||
DateTime::parse_from_rfc2822(s)
|
||||
.ok()
|
||||
.map(|d| d.with_timezone(&Utc))
|
||||
}),
|
||||
Value::Number(n) => n.as_f64().and_then(|f| {
|
||||
let secs = f as i64;
|
||||
let nanos = ((f - secs as f64) * 1_000_000_000.0) as u32;
|
||||
DateTime::<Utc>::from_timestamp(secs, nanos)
|
||||
}),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
match dt {
|
||||
Some(d) => Ok(Value::String(d.format(format).to_string())),
|
||||
None => Ok(value.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
fn slice_filter(value: &Value, args: &HashMap<String, Value>) -> tera::Result<Value> {
|
||||
let start = args.get("start").and_then(|v| v.as_i64()).unwrap_or(0);
|
||||
let end = args.get("end").and_then(|v| v.as_i64());
|
||||
|
||||
match value {
|
||||
Value::String(s) => {
|
||||
let chars: Vec<char> = s.chars().collect();
|
||||
let len = chars.len() as i64;
|
||||
let norm = |i: i64| -> usize {
|
||||
if i < 0 {
|
||||
(len + i).max(0) as usize
|
||||
} else {
|
||||
i.min(len) as usize
|
||||
}
|
||||
};
|
||||
let s_idx = norm(start);
|
||||
let e_idx = match end {
|
||||
Some(e) => norm(e),
|
||||
None => len as usize,
|
||||
};
|
||||
let e_idx = e_idx.max(s_idx);
|
||||
Ok(Value::String(chars[s_idx..e_idx].iter().collect()))
|
||||
}
|
||||
Value::Array(arr) => {
|
||||
let len = arr.len() as i64;
|
||||
let norm = |i: i64| -> usize {
|
||||
if i < 0 {
|
||||
(len + i).max(0) as usize
|
||||
} else {
|
||||
i.min(len) as usize
|
||||
}
|
||||
};
|
||||
let s_idx = norm(start);
|
||||
let e_idx = match end {
|
||||
Some(e) => norm(e),
|
||||
None => len as usize,
|
||||
};
|
||||
let e_idx = e_idx.max(s_idx);
|
||||
Ok(Value::Array(arr[s_idx..e_idx].to_vec()))
|
||||
}
|
||||
Value::Null => Ok(Value::String(String::new())),
|
||||
_ => Err(tera::Error::msg("slice: unsupported value type")),
|
||||
}
|
||||
}
|
||||
|
||||
fn filesizeformat_filter(value: &Value, _args: &HashMap<String, Value>) -> tera::Result<Value> {
|
||||
let bytes = match value {
|
||||
Value::Number(n) => n.as_f64().unwrap_or(0.0),
|
||||
Value::String(s) => s.parse::<f64>().unwrap_or(0.0),
|
||||
_ => 0.0,
|
||||
};
|
||||
|
||||
const UNITS: [&str; 6] = ["B", "KB", "MB", "GB", "TB", "PB"];
|
||||
let mut size = bytes;
|
||||
let mut unit = 0;
|
||||
while size >= 1024.0 && unit < UNITS.len() - 1 {
|
||||
size /= 1024.0;
|
||||
unit += 1;
|
||||
}
|
||||
let formatted = if unit == 0 {
|
||||
format!("{} {}", size as u64, UNITS[unit])
|
||||
} else {
|
||||
format!("{:.1} {}", size, UNITS[unit])
|
||||
};
|
||||
Ok(Value::String(formatted))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_engine() -> TemplateEngine {
|
||||
let tmp = tempfile::TempDir::new().unwrap();
|
||||
let tpl = tmp.path().join("t.html");
|
||||
std::fs::write(&tpl, "").unwrap();
|
||||
let glob = format!("{}/*.html", tmp.path().display());
|
||||
let engine = TemplateEngine::new(&glob).unwrap();
|
||||
engine.register_endpoints(&[
|
||||
("ui.buckets_overview", "/ui/buckets"),
|
||||
("ui.bucket_detail", "/ui/buckets/{bucket_name}"),
|
||||
(
|
||||
"ui.abort_multipart_upload",
|
||||
"/ui/buckets/{bucket_name}/multipart/{upload_id}/abort",
|
||||
),
|
||||
]);
|
||||
engine
|
||||
}
|
||||
|
||||
fn render_inline(engine: &TemplateEngine, tpl: &str) -> String {
|
||||
let mut tera = engine.tera.write();
|
||||
tera.add_raw_template("__inline__", tpl).unwrap();
|
||||
drop(tera);
|
||||
engine.render("__inline__", &Context::new()).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn static_url() {
|
||||
let e = test_engine();
|
||||
let out = render_inline(
|
||||
&e,
|
||||
"{{ url_for(endpoint='static', filename='css/main.css') }}",
|
||||
);
|
||||
assert_eq!(out, "/static/css/main.css");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn path_param_substitution() {
|
||||
let e = test_engine();
|
||||
let out = render_inline(
|
||||
&e,
|
||||
"{{ url_for(endpoint='ui.bucket_detail', bucket_name='my-bucket') }}",
|
||||
);
|
||||
assert_eq!(out, "/ui/buckets/my-bucket");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extra_args_become_query() {
|
||||
let e = test_engine();
|
||||
let out = render_inline(
|
||||
&e,
|
||||
"{{ url_for(endpoint='ui.bucket_detail', bucket_name='b', tab='replication') }}",
|
||||
);
|
||||
assert_eq!(out, "/ui/buckets/b?tab=replication");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn filesizeformat_basic() {
|
||||
let v = filesizeformat_filter(&Value::Number(1024.into()), &HashMap::new()).unwrap();
|
||||
assert_eq!(v, Value::String("1.0 KB".into()));
|
||||
let v = filesizeformat_filter(&Value::Number(1_048_576.into()), &HashMap::new()).unwrap();
|
||||
assert_eq!(v, Value::String("1.0 MB".into()));
|
||||
let v = filesizeformat_filter(&Value::Number(500.into()), &HashMap::new()).unwrap();
|
||||
assert_eq!(v, Value::String("500 B".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn project_templates_parse() {
|
||||
let mut path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
path.push("templates");
|
||||
path.push("*.html");
|
||||
let glob = path.to_string_lossy().replace('\\', "/");
|
||||
let engine = TemplateEngine::new(&glob).expect("Tera parse failed");
|
||||
let names: Vec<String> = engine
|
||||
.tera
|
||||
.read()
|
||||
.get_template_names()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
assert!(
|
||||
names.len() >= 10,
|
||||
"expected 10+ templates, got {}",
|
||||
names.len()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_datetime_rfc3339() {
|
||||
let v = format_datetime_filter(
|
||||
&Value::String("2024-06-15T12:34:56Z".into()),
|
||||
&HashMap::new(),
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(v, Value::String("2024-06-15 12:34:56 UTC".into()));
|
||||
}
|
||||
}
|
||||
3157
crates/myfsio-server/static/css/main.css
Normal file
3157
crates/myfsio-server/static/css/main.css
Normal file
File diff suppressed because it is too large
Load Diff
BIN
crates/myfsio-server/static/images/MyFSIO.ico
Normal file
BIN
crates/myfsio-server/static/images/MyFSIO.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 200 KiB |
BIN
crates/myfsio-server/static/images/MyFSIO.png
Normal file
BIN
crates/myfsio-server/static/images/MyFSIO.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 872 KiB |
4827
crates/myfsio-server/static/js/bucket-detail-main.js
Normal file
4827
crates/myfsio-server/static/js/bucket-detail-main.js
Normal file
File diff suppressed because it is too large
Load Diff
192
crates/myfsio-server/static/js/bucket-detail-operations.js
Normal file
192
crates/myfsio-server/static/js/bucket-detail-operations.js
Normal file
@@ -0,0 +1,192 @@
|
||||
window.BucketDetailOperations = (function() {
|
||||
'use strict';
|
||||
|
||||
let showMessage = function() {};
|
||||
let escapeHtml = function(s) { return s; };
|
||||
|
||||
function init(config) {
|
||||
showMessage = config.showMessage || showMessage;
|
||||
escapeHtml = config.escapeHtml || escapeHtml;
|
||||
}
|
||||
|
||||
async function loadLifecycleRules(card, endpoint) {
|
||||
if (!card || !endpoint) return;
|
||||
const body = card.querySelector('[data-lifecycle-body]');
|
||||
if (!body) return;
|
||||
|
||||
try {
|
||||
const response = await fetch(endpoint);
|
||||
const data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(data.error || 'Failed to load')}</td></tr>`;
|
||||
return;
|
||||
}
|
||||
|
||||
const rules = data.rules || [];
|
||||
if (rules.length === 0) {
|
||||
body.innerHTML = '<tr><td colspan="5" class="text-center text-muted py-3">No lifecycle rules configured</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
body.innerHTML = rules.map(rule => {
|
||||
const actions = [];
|
||||
if (rule.expiration_days) actions.push(`Delete after ${rule.expiration_days} days`);
|
||||
if (rule.noncurrent_days) actions.push(`Delete old versions after ${rule.noncurrent_days} days`);
|
||||
if (rule.abort_mpu_days) actions.push(`Abort incomplete MPU after ${rule.abort_mpu_days} days`);
|
||||
|
||||
return `
|
||||
<tr>
|
||||
<td class="fw-medium">${escapeHtml(rule.id)}</td>
|
||||
<td><code>${escapeHtml(rule.prefix || '(all)')}</code></td>
|
||||
<td>${actions.map(a => `<div class="small">${escapeHtml(a)}</div>`).join('')}</td>
|
||||
<td>
|
||||
<span class="badge ${rule.status === 'Enabled' ? 'text-bg-success' : 'text-bg-secondary'}">${escapeHtml(rule.status)}</span>
|
||||
</td>
|
||||
<td class="text-end">
|
||||
<button class="btn btn-sm btn-outline-danger" onclick="BucketDetailOperations.deleteLifecycleRule('${escapeHtml(rule.id)}')">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
`;
|
||||
}).join('');
|
||||
} catch (err) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(err.message)}</td></tr>`;
|
||||
}
|
||||
}
|
||||
|
||||
async function loadCorsRules(card, endpoint) {
|
||||
if (!card || !endpoint) return;
|
||||
const body = document.getElementById('cors-rules-body');
|
||||
if (!body) return;
|
||||
|
||||
try {
|
||||
const response = await fetch(endpoint);
|
||||
const data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(data.error || 'Failed to load')}</td></tr>`;
|
||||
return;
|
||||
}
|
||||
|
||||
const rules = data.rules || [];
|
||||
if (rules.length === 0) {
|
||||
body.innerHTML = '<tr><td colspan="5" class="text-center text-muted py-3">No CORS rules configured</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
body.innerHTML = rules.map((rule, idx) => `
|
||||
<tr>
|
||||
<td>${(rule.allowed_origins || []).map(o => `<code class="d-block">${escapeHtml(o)}</code>`).join('')}</td>
|
||||
<td>${(rule.allowed_methods || []).map(m => `<span class="badge text-bg-secondary me-1">${escapeHtml(m)}</span>`).join('')}</td>
|
||||
<td class="small text-muted">${(rule.allowed_headers || []).slice(0, 3).join(', ')}${(rule.allowed_headers || []).length > 3 ? '...' : ''}</td>
|
||||
<td class="text-muted">${rule.max_age_seconds || 0}s</td>
|
||||
<td class="text-end">
|
||||
<button class="btn btn-sm btn-outline-danger" onclick="BucketDetailOperations.deleteCorsRule(${idx})">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
`).join('');
|
||||
} catch (err) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(err.message)}</td></tr>`;
|
||||
}
|
||||
}
|
||||
|
||||
async function loadAcl(card, endpoint) {
|
||||
if (!card || !endpoint) return;
|
||||
const body = card.querySelector('[data-acl-body]');
|
||||
if (!body) return;
|
||||
|
||||
try {
|
||||
const response = await fetch(endpoint);
|
||||
const data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
body.innerHTML = `<tr><td colspan="3" class="text-center text-danger py-3">${escapeHtml(data.error || 'Failed to load')}</td></tr>`;
|
||||
return;
|
||||
}
|
||||
|
||||
const grants = data.grants || [];
|
||||
if (grants.length === 0) {
|
||||
body.innerHTML = '<tr><td colspan="3" class="text-center text-muted py-3">No ACL grants configured</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
body.innerHTML = grants.map(grant => {
|
||||
const grantee = grant.grantee_type === 'CanonicalUser'
|
||||
? grant.display_name || grant.grantee_id
|
||||
: grant.grantee_uri || grant.grantee_type;
|
||||
return `
|
||||
<tr>
|
||||
<td class="fw-medium">${escapeHtml(grantee)}</td>
|
||||
<td><span class="badge text-bg-info">${escapeHtml(grant.permission)}</span></td>
|
||||
<td class="text-muted small">${escapeHtml(grant.grantee_type)}</td>
|
||||
</tr>
|
||||
`;
|
||||
}).join('');
|
||||
} catch (err) {
|
||||
body.innerHTML = `<tr><td colspan="3" class="text-center text-danger py-3">${escapeHtml(err.message)}</td></tr>`;
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteLifecycleRule(ruleId) {
|
||||
if (!confirm(`Delete lifecycle rule "${ruleId}"?`)) return;
|
||||
const card = document.getElementById('lifecycle-rules-card');
|
||||
if (!card) return;
|
||||
const endpoint = card.dataset.lifecycleUrl;
|
||||
const csrfToken = window.getCsrfToken ? window.getCsrfToken() : '';
|
||||
|
||||
try {
|
||||
const resp = await fetch(endpoint, {
|
||||
method: 'DELETE',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken },
|
||||
body: JSON.stringify({ rule_id: ruleId })
|
||||
});
|
||||
const data = await resp.json();
|
||||
if (!resp.ok) throw new Error(data.error || 'Failed to delete');
|
||||
showMessage({ title: 'Rule deleted', body: `Lifecycle rule "${ruleId}" has been deleted.`, variant: 'success' });
|
||||
loadLifecycleRules(card, endpoint);
|
||||
} catch (err) {
|
||||
showMessage({ title: 'Delete failed', body: err.message, variant: 'danger' });
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteCorsRule(index) {
|
||||
if (!confirm('Delete this CORS rule?')) return;
|
||||
const card = document.getElementById('cors-rules-card');
|
||||
if (!card) return;
|
||||
const endpoint = card.dataset.corsUrl;
|
||||
const csrfToken = window.getCsrfToken ? window.getCsrfToken() : '';
|
||||
|
||||
try {
|
||||
const resp = await fetch(endpoint, {
|
||||
method: 'DELETE',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken },
|
||||
body: JSON.stringify({ rule_index: index })
|
||||
});
|
||||
const data = await resp.json();
|
||||
if (!resp.ok) throw new Error(data.error || 'Failed to delete');
|
||||
showMessage({ title: 'Rule deleted', body: 'CORS rule has been deleted.', variant: 'success' });
|
||||
loadCorsRules(card, endpoint);
|
||||
} catch (err) {
|
||||
showMessage({ title: 'Delete failed', body: err.message, variant: 'danger' });
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
init: init,
|
||||
loadLifecycleRules: loadLifecycleRules,
|
||||
loadCorsRules: loadCorsRules,
|
||||
loadAcl: loadAcl,
|
||||
deleteLifecycleRule: deleteLifecycleRule,
|
||||
deleteCorsRule: deleteCorsRule
|
||||
};
|
||||
})();
|
||||
600
crates/myfsio-server/static/js/bucket-detail-upload.js
Normal file
600
crates/myfsio-server/static/js/bucket-detail-upload.js
Normal file
@@ -0,0 +1,600 @@
|
||||
window.BucketDetailUpload = (function() {
|
||||
'use strict';
|
||||
|
||||
const MULTIPART_THRESHOLD = 8 * 1024 * 1024;
|
||||
const CHUNK_SIZE = 8 * 1024 * 1024;
|
||||
const MAX_PART_RETRIES = 3;
|
||||
const RETRY_BASE_DELAY_MS = 1000;
|
||||
|
||||
let state = {
|
||||
isUploading: false,
|
||||
uploadProgress: { current: 0, total: 0, currentFile: '' }
|
||||
};
|
||||
|
||||
let elements = {};
|
||||
let callbacks = {};
|
||||
|
||||
function init(config) {
|
||||
elements = {
|
||||
uploadForm: config.uploadForm,
|
||||
uploadFileInput: config.uploadFileInput,
|
||||
uploadModal: config.uploadModal,
|
||||
uploadModalEl: config.uploadModalEl,
|
||||
uploadSubmitBtn: config.uploadSubmitBtn,
|
||||
uploadCancelBtn: config.uploadCancelBtn,
|
||||
uploadBtnText: config.uploadBtnText,
|
||||
uploadDropZone: config.uploadDropZone,
|
||||
uploadDropZoneLabel: config.uploadDropZoneLabel,
|
||||
uploadProgressStack: config.uploadProgressStack,
|
||||
uploadKeyPrefix: config.uploadKeyPrefix,
|
||||
singleFileOptions: config.singleFileOptions,
|
||||
bulkUploadProgress: config.bulkUploadProgress,
|
||||
bulkUploadStatus: config.bulkUploadStatus,
|
||||
bulkUploadCounter: config.bulkUploadCounter,
|
||||
bulkUploadProgressBar: config.bulkUploadProgressBar,
|
||||
bulkUploadCurrentFile: config.bulkUploadCurrentFile,
|
||||
bulkUploadResults: config.bulkUploadResults,
|
||||
bulkUploadSuccessAlert: config.bulkUploadSuccessAlert,
|
||||
bulkUploadErrorAlert: config.bulkUploadErrorAlert,
|
||||
bulkUploadSuccessCount: config.bulkUploadSuccessCount,
|
||||
bulkUploadErrorCount: config.bulkUploadErrorCount,
|
||||
bulkUploadErrorList: config.bulkUploadErrorList,
|
||||
floatingProgress: config.floatingProgress,
|
||||
floatingProgressBar: config.floatingProgressBar,
|
||||
floatingProgressStatus: config.floatingProgressStatus,
|
||||
floatingProgressTitle: config.floatingProgressTitle,
|
||||
floatingProgressExpand: config.floatingProgressExpand
|
||||
};
|
||||
|
||||
callbacks = {
|
||||
showMessage: config.showMessage || function() {},
|
||||
formatBytes: config.formatBytes || function(b) { return b + ' bytes'; },
|
||||
escapeHtml: config.escapeHtml || function(s) { return s; },
|
||||
onUploadComplete: config.onUploadComplete || function() {},
|
||||
hasFolders: config.hasFolders || function() { return false; },
|
||||
getCurrentPrefix: config.getCurrentPrefix || function() { return ''; }
|
||||
};
|
||||
|
||||
setupEventListeners();
|
||||
setupBeforeUnload();
|
||||
}
|
||||
|
||||
function isUploading() {
|
||||
return state.isUploading;
|
||||
}
|
||||
|
||||
function setupBeforeUnload() {
|
||||
window.addEventListener('beforeunload', (e) => {
|
||||
if (state.isUploading) {
|
||||
e.preventDefault();
|
||||
e.returnValue = 'Upload in progress. Are you sure you want to leave?';
|
||||
return e.returnValue;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function showFloatingProgress() {
|
||||
if (elements.floatingProgress) {
|
||||
elements.floatingProgress.classList.remove('d-none');
|
||||
}
|
||||
}
|
||||
|
||||
function hideFloatingProgress() {
|
||||
if (elements.floatingProgress) {
|
||||
elements.floatingProgress.classList.add('d-none');
|
||||
}
|
||||
}
|
||||
|
||||
function updateFloatingProgress(current, total, currentFile) {
|
||||
state.uploadProgress = { current, total, currentFile: currentFile || '' };
|
||||
if (elements.floatingProgressBar && total > 0) {
|
||||
const percent = Math.round((current / total) * 100);
|
||||
elements.floatingProgressBar.style.width = `${percent}%`;
|
||||
}
|
||||
if (elements.floatingProgressStatus) {
|
||||
if (currentFile) {
|
||||
elements.floatingProgressStatus.textContent = `${current}/${total} files - ${currentFile}`;
|
||||
} else {
|
||||
elements.floatingProgressStatus.textContent = `${current}/${total} files completed`;
|
||||
}
|
||||
}
|
||||
if (elements.floatingProgressTitle) {
|
||||
elements.floatingProgressTitle.textContent = `Uploading ${total} file${total !== 1 ? 's' : ''}...`;
|
||||
}
|
||||
}
|
||||
|
||||
function refreshUploadDropLabel() {
|
||||
if (!elements.uploadDropZoneLabel || !elements.uploadFileInput) return;
|
||||
const files = elements.uploadFileInput.files;
|
||||
if (!files || files.length === 0) {
|
||||
elements.uploadDropZoneLabel.textContent = 'No file selected';
|
||||
if (elements.singleFileOptions) elements.singleFileOptions.classList.remove('d-none');
|
||||
return;
|
||||
}
|
||||
elements.uploadDropZoneLabel.textContent = files.length === 1 ? files[0].name : `${files.length} files selected`;
|
||||
if (elements.singleFileOptions) {
|
||||
elements.singleFileOptions.classList.toggle('d-none', files.length > 1);
|
||||
}
|
||||
}
|
||||
|
||||
function updateUploadBtnText() {
|
||||
if (!elements.uploadBtnText || !elements.uploadFileInput) return;
|
||||
const files = elements.uploadFileInput.files;
|
||||
if (!files || files.length <= 1) {
|
||||
elements.uploadBtnText.textContent = 'Upload';
|
||||
} else {
|
||||
elements.uploadBtnText.textContent = `Upload ${files.length} files`;
|
||||
}
|
||||
}
|
||||
|
||||
function resetUploadUI() {
|
||||
if (elements.bulkUploadProgress) elements.bulkUploadProgress.classList.add('d-none');
|
||||
if (elements.bulkUploadResults) elements.bulkUploadResults.classList.add('d-none');
|
||||
if (elements.bulkUploadSuccessAlert) elements.bulkUploadSuccessAlert.classList.remove('d-none');
|
||||
if (elements.bulkUploadErrorAlert) elements.bulkUploadErrorAlert.classList.add('d-none');
|
||||
if (elements.bulkUploadErrorList) elements.bulkUploadErrorList.innerHTML = '';
|
||||
if (elements.uploadSubmitBtn) elements.uploadSubmitBtn.disabled = false;
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.disabled = false;
|
||||
if (elements.uploadProgressStack) elements.uploadProgressStack.innerHTML = '';
|
||||
if (elements.uploadDropZone) {
|
||||
elements.uploadDropZone.classList.remove('upload-locked');
|
||||
elements.uploadDropZone.style.pointerEvents = '';
|
||||
}
|
||||
state.isUploading = false;
|
||||
hideFloatingProgress();
|
||||
}
|
||||
|
||||
function setUploadLockState(locked) {
|
||||
if (elements.uploadDropZone) {
|
||||
elements.uploadDropZone.classList.toggle('upload-locked', locked);
|
||||
elements.uploadDropZone.style.pointerEvents = locked ? 'none' : '';
|
||||
}
|
||||
if (elements.uploadFileInput) {
|
||||
elements.uploadFileInput.disabled = locked;
|
||||
}
|
||||
}
|
||||
|
||||
function createProgressItem(file) {
|
||||
const item = document.createElement('div');
|
||||
item.className = 'upload-progress-item';
|
||||
item.dataset.state = 'uploading';
|
||||
item.innerHTML = `
|
||||
<div class="d-flex justify-content-between align-items-start">
|
||||
<div class="min-width-0 flex-grow-1">
|
||||
<div class="file-name">${callbacks.escapeHtml(file.name)}</div>
|
||||
<div class="file-size">${callbacks.formatBytes(file.size)}</div>
|
||||
</div>
|
||||
<div class="upload-status text-end ms-2">Preparing...</div>
|
||||
</div>
|
||||
<div class="progress-container">
|
||||
<div class="progress">
|
||||
<div class="progress-bar bg-primary" role="progressbar" style="width: 0%"></div>
|
||||
</div>
|
||||
<div class="progress-text">
|
||||
<span class="progress-loaded">0 B</span>
|
||||
<span class="progress-percent">0%</span>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
return item;
|
||||
}
|
||||
|
||||
function updateProgressItem(item, { loaded, total, status, progressState, error }) {
|
||||
if (progressState) item.dataset.state = progressState;
|
||||
const statusEl = item.querySelector('.upload-status');
|
||||
const progressBar = item.querySelector('.progress-bar');
|
||||
const progressLoaded = item.querySelector('.progress-loaded');
|
||||
const progressPercent = item.querySelector('.progress-percent');
|
||||
|
||||
if (status) {
|
||||
statusEl.textContent = status;
|
||||
statusEl.className = 'upload-status text-end ms-2';
|
||||
if (progressState === 'success') statusEl.classList.add('success');
|
||||
if (progressState === 'error') statusEl.classList.add('error');
|
||||
}
|
||||
if (typeof loaded === 'number' && typeof total === 'number' && total > 0) {
|
||||
const percent = Math.round((loaded / total) * 100);
|
||||
progressBar.style.width = `${percent}%`;
|
||||
progressLoaded.textContent = `${callbacks.formatBytes(loaded)} / ${callbacks.formatBytes(total)}`;
|
||||
progressPercent.textContent = `${percent}%`;
|
||||
}
|
||||
if (error) {
|
||||
const progressContainer = item.querySelector('.progress-container');
|
||||
if (progressContainer) {
|
||||
progressContainer.innerHTML = `<div class="text-danger small mt-1">${callbacks.escapeHtml(error)}</div>`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const xhr = new XMLHttpRequest();
|
||||
xhr.open('PUT', url, true);
|
||||
xhr.setRequestHeader('X-CSRFToken', csrfToken || '');
|
||||
|
||||
xhr.upload.addEventListener('progress', (e) => {
|
||||
if (e.lengthComputable) {
|
||||
updateProgressItem(progressItem, {
|
||||
status: `Part ${partNumber}/${totalParts}`,
|
||||
loaded: baseBytes + e.loaded,
|
||||
total: fileSize
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('load', () => {
|
||||
if (xhr.status >= 200 && xhr.status < 300) {
|
||||
try {
|
||||
resolve(JSON.parse(xhr.responseText));
|
||||
} catch {
|
||||
reject(new Error(`Part ${partNumber}: invalid response`));
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
const data = JSON.parse(xhr.responseText);
|
||||
reject(new Error(data.error || `Part ${partNumber} failed (${xhr.status})`));
|
||||
} catch {
|
||||
reject(new Error(`Part ${partNumber} failed (${xhr.status})`));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('error', () => reject(new Error(`Part ${partNumber}: network error`)));
|
||||
xhr.addEventListener('abort', () => reject(new Error(`Part ${partNumber}: aborted`)));
|
||||
|
||||
xhr.send(chunk);
|
||||
});
|
||||
}
|
||||
|
||||
async function uploadPartWithRetry(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
|
||||
let lastError;
|
||||
for (let attempt = 0; attempt <= MAX_PART_RETRIES; attempt++) {
|
||||
try {
|
||||
return await uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts);
|
||||
} catch (err) {
|
||||
lastError = err;
|
||||
if (attempt < MAX_PART_RETRIES) {
|
||||
const delay = RETRY_BASE_DELAY_MS * Math.pow(2, attempt);
|
||||
updateProgressItem(progressItem, {
|
||||
status: `Part ${partNumber}/${totalParts} retry ${attempt + 1}/${MAX_PART_RETRIES}...`,
|
||||
loaded: baseBytes,
|
||||
total: fileSize
|
||||
});
|
||||
await new Promise(r => setTimeout(r, delay));
|
||||
}
|
||||
}
|
||||
}
|
||||
throw lastError;
|
||||
}
|
||||
|
||||
async function uploadMultipart(file, objectKey, metadata, progressItem, urls) {
|
||||
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
|
||||
|
||||
updateProgressItem(progressItem, { status: 'Initiating...', loaded: 0, total: file.size });
|
||||
const initResp = await fetch(urls.initUrl, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken || '' },
|
||||
body: JSON.stringify({ object_key: objectKey, metadata })
|
||||
});
|
||||
if (!initResp.ok) {
|
||||
const err = await initResp.json().catch(() => ({}));
|
||||
throw new Error(err.error || 'Failed to initiate upload');
|
||||
}
|
||||
const { upload_id } = await initResp.json();
|
||||
|
||||
const partUrl = urls.partTemplate.replace('UPLOAD_ID_PLACEHOLDER', upload_id);
|
||||
const completeUrl = urls.completeTemplate.replace('UPLOAD_ID_PLACEHOLDER', upload_id);
|
||||
const abortUrl = urls.abortTemplate.replace('UPLOAD_ID_PLACEHOLDER', upload_id);
|
||||
|
||||
const parts = [];
|
||||
const totalParts = Math.ceil(file.size / CHUNK_SIZE);
|
||||
let uploadedBytes = 0;
|
||||
|
||||
try {
|
||||
for (let partNumber = 1; partNumber <= totalParts; partNumber++) {
|
||||
const start = (partNumber - 1) * CHUNK_SIZE;
|
||||
const end = Math.min(start + CHUNK_SIZE, file.size);
|
||||
const chunk = file.slice(start, end);
|
||||
|
||||
const partData = await uploadPartWithRetry(
|
||||
`${partUrl}?partNumber=${partNumber}`,
|
||||
chunk, csrfToken, uploadedBytes, file.size,
|
||||
progressItem, partNumber, totalParts
|
||||
);
|
||||
|
||||
parts.push({ part_number: partNumber, etag: partData.etag });
|
||||
uploadedBytes += (end - start);
|
||||
|
||||
updateProgressItem(progressItem, {
|
||||
loaded: uploadedBytes,
|
||||
total: file.size
|
||||
});
|
||||
}
|
||||
|
||||
updateProgressItem(progressItem, { status: 'Completing...', loaded: file.size, total: file.size });
|
||||
const completeResp = await fetch(completeUrl, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken || '' },
|
||||
body: JSON.stringify({ parts })
|
||||
});
|
||||
|
||||
if (!completeResp.ok) {
|
||||
const err = await completeResp.json().catch(() => ({}));
|
||||
throw new Error(err.error || 'Failed to complete upload');
|
||||
}
|
||||
|
||||
return await completeResp.json();
|
||||
} catch (err) {
|
||||
try {
|
||||
await fetch(abortUrl, { method: 'DELETE', headers: { 'X-CSRFToken': csrfToken || '' } });
|
||||
} catch {}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function uploadRegular(file, objectKey, metadata, progressItem, formAction) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const formData = new FormData();
|
||||
formData.append('object', file);
|
||||
formData.append('object_key', objectKey);
|
||||
if (metadata) formData.append('metadata', JSON.stringify(metadata));
|
||||
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
|
||||
if (csrfToken) formData.append('csrf_token', csrfToken);
|
||||
|
||||
const xhr = new XMLHttpRequest();
|
||||
xhr.open('POST', formAction, true);
|
||||
xhr.setRequestHeader('X-Requested-With', 'XMLHttpRequest');
|
||||
xhr.setRequestHeader('X-CSRFToken', csrfToken || '');
|
||||
|
||||
xhr.upload.addEventListener('progress', (e) => {
|
||||
if (e.lengthComputable) {
|
||||
updateProgressItem(progressItem, {
|
||||
status: 'Uploading...',
|
||||
loaded: e.loaded,
|
||||
total: e.total
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('load', () => {
|
||||
if (xhr.status >= 200 && xhr.status < 300) {
|
||||
try {
|
||||
const data = JSON.parse(xhr.responseText);
|
||||
if (data.status === 'error') {
|
||||
reject(new Error(data.message || 'Upload failed'));
|
||||
} else {
|
||||
resolve(data);
|
||||
}
|
||||
} catch {
|
||||
resolve({});
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
const data = JSON.parse(xhr.responseText);
|
||||
reject(new Error(data.message || `Upload failed (${xhr.status})`));
|
||||
} catch {
|
||||
reject(new Error(`Upload failed (${xhr.status})`));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('error', () => reject(new Error('Network error')));
|
||||
xhr.addEventListener('abort', () => reject(new Error('Upload aborted')));
|
||||
|
||||
xhr.send(formData);
|
||||
});
|
||||
}
|
||||
|
||||
async function uploadSingleFile(file, keyPrefix, metadata, progressItem, urls) {
|
||||
const objectKey = keyPrefix ? `${keyPrefix}${file.name}` : file.name;
|
||||
const shouldUseMultipart = file.size >= MULTIPART_THRESHOLD && urls.initUrl;
|
||||
|
||||
if (!progressItem && elements.uploadProgressStack) {
|
||||
progressItem = createProgressItem(file);
|
||||
elements.uploadProgressStack.appendChild(progressItem);
|
||||
}
|
||||
|
||||
try {
|
||||
let result;
|
||||
if (shouldUseMultipart) {
|
||||
updateProgressItem(progressItem, { status: 'Multipart upload...', loaded: 0, total: file.size });
|
||||
result = await uploadMultipart(file, objectKey, metadata, progressItem, urls);
|
||||
} else {
|
||||
updateProgressItem(progressItem, { status: 'Uploading...', loaded: 0, total: file.size });
|
||||
result = await uploadRegular(file, objectKey, metadata, progressItem, urls.formAction);
|
||||
}
|
||||
updateProgressItem(progressItem, { progressState: 'success', status: 'Complete', loaded: file.size, total: file.size });
|
||||
return result;
|
||||
} catch (err) {
|
||||
updateProgressItem(progressItem, { progressState: 'error', status: 'Failed', error: err.message });
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function performBulkUpload(files, urls) {
|
||||
if (state.isUploading || !files || files.length === 0) return;
|
||||
|
||||
state.isUploading = true;
|
||||
setUploadLockState(true);
|
||||
const keyPrefix = (elements.uploadKeyPrefix?.value || '').trim();
|
||||
const metadataRaw = elements.uploadForm?.querySelector('textarea[name="metadata"]')?.value?.trim();
|
||||
let metadata = null;
|
||||
if (metadataRaw) {
|
||||
try {
|
||||
metadata = JSON.parse(metadataRaw);
|
||||
} catch {
|
||||
callbacks.showMessage({ title: 'Invalid metadata', body: 'Metadata must be valid JSON.', variant: 'danger' });
|
||||
resetUploadUI();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (elements.bulkUploadProgress) elements.bulkUploadProgress.classList.remove('d-none');
|
||||
if (elements.bulkUploadResults) elements.bulkUploadResults.classList.add('d-none');
|
||||
if (elements.uploadSubmitBtn) elements.uploadSubmitBtn.disabled = true;
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.disabled = true;
|
||||
|
||||
const successFiles = [];
|
||||
const errorFiles = [];
|
||||
const total = files.length;
|
||||
|
||||
updateFloatingProgress(0, total, files[0]?.name || '');
|
||||
|
||||
for (let i = 0; i < total; i++) {
|
||||
const file = files[i];
|
||||
const current = i + 1;
|
||||
|
||||
if (elements.bulkUploadCounter) elements.bulkUploadCounter.textContent = `${current}/${total}`;
|
||||
if (elements.bulkUploadCurrentFile) elements.bulkUploadCurrentFile.textContent = `Uploading: ${file.name}`;
|
||||
if (elements.bulkUploadProgressBar) {
|
||||
const percent = Math.round((current / total) * 100);
|
||||
elements.bulkUploadProgressBar.style.width = `${percent}%`;
|
||||
}
|
||||
updateFloatingProgress(i, total, file.name);
|
||||
|
||||
try {
|
||||
await uploadSingleFile(file, keyPrefix, metadata, null, urls);
|
||||
successFiles.push(file.name);
|
||||
} catch (error) {
|
||||
errorFiles.push({ name: file.name, error: error.message || 'Unknown error' });
|
||||
}
|
||||
}
|
||||
updateFloatingProgress(total, total);
|
||||
|
||||
if (elements.bulkUploadProgress) elements.bulkUploadProgress.classList.add('d-none');
|
||||
if (elements.bulkUploadResults) elements.bulkUploadResults.classList.remove('d-none');
|
||||
|
||||
if (elements.bulkUploadSuccessCount) elements.bulkUploadSuccessCount.textContent = successFiles.length;
|
||||
if (successFiles.length === 0 && elements.bulkUploadSuccessAlert) {
|
||||
elements.bulkUploadSuccessAlert.classList.add('d-none');
|
||||
}
|
||||
|
||||
if (errorFiles.length > 0) {
|
||||
if (elements.bulkUploadErrorCount) elements.bulkUploadErrorCount.textContent = errorFiles.length;
|
||||
if (elements.bulkUploadErrorAlert) elements.bulkUploadErrorAlert.classList.remove('d-none');
|
||||
if (elements.bulkUploadErrorList) {
|
||||
elements.bulkUploadErrorList.innerHTML = errorFiles
|
||||
.map(f => `<li><strong>${callbacks.escapeHtml(f.name)}</strong>: ${callbacks.escapeHtml(f.error)}</li>`)
|
||||
.join('');
|
||||
}
|
||||
}
|
||||
|
||||
state.isUploading = false;
|
||||
setUploadLockState(false);
|
||||
|
||||
if (successFiles.length > 0) {
|
||||
if (elements.uploadBtnText) elements.uploadBtnText.textContent = 'Refreshing...';
|
||||
callbacks.onUploadComplete(successFiles, errorFiles);
|
||||
} else {
|
||||
if (elements.uploadSubmitBtn) elements.uploadSubmitBtn.disabled = false;
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.disabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
function setupEventListeners() {
|
||||
if (elements.uploadFileInput) {
|
||||
elements.uploadFileInput.addEventListener('change', () => {
|
||||
if (state.isUploading) return;
|
||||
refreshUploadDropLabel();
|
||||
updateUploadBtnText();
|
||||
resetUploadUI();
|
||||
});
|
||||
}
|
||||
|
||||
if (elements.uploadDropZone) {
|
||||
elements.uploadDropZone.addEventListener('click', () => {
|
||||
if (state.isUploading) return;
|
||||
elements.uploadFileInput?.click();
|
||||
});
|
||||
}
|
||||
|
||||
if (elements.floatingProgressExpand) {
|
||||
elements.floatingProgressExpand.addEventListener('click', () => {
|
||||
if (elements.uploadModal) {
|
||||
elements.uploadModal.show();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (elements.uploadModalEl) {
|
||||
elements.uploadModalEl.addEventListener('hide.bs.modal', () => {
|
||||
if (state.isUploading) {
|
||||
showFloatingProgress();
|
||||
}
|
||||
});
|
||||
|
||||
elements.uploadModalEl.addEventListener('hidden.bs.modal', () => {
|
||||
if (!state.isUploading) {
|
||||
resetUploadUI();
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.value = '';
|
||||
refreshUploadDropLabel();
|
||||
updateUploadBtnText();
|
||||
}
|
||||
});
|
||||
|
||||
elements.uploadModalEl.addEventListener('show.bs.modal', () => {
|
||||
if (state.isUploading) {
|
||||
hideFloatingProgress();
|
||||
}
|
||||
if (callbacks.hasFolders() && callbacks.getCurrentPrefix()) {
|
||||
if (elements.uploadKeyPrefix) {
|
||||
elements.uploadKeyPrefix.value = callbacks.getCurrentPrefix();
|
||||
}
|
||||
} else if (elements.uploadKeyPrefix) {
|
||||
elements.uploadKeyPrefix.value = '';
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function wireDropTarget(target, options) {
|
||||
const { highlightClass = '', autoOpenModal = false } = options || {};
|
||||
if (!target) return;
|
||||
|
||||
const preventDefaults = (event) => {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
};
|
||||
|
||||
['dragenter', 'dragover'].forEach((eventName) => {
|
||||
target.addEventListener(eventName, (event) => {
|
||||
preventDefaults(event);
|
||||
if (state.isUploading) return;
|
||||
if (highlightClass) {
|
||||
target.classList.add(highlightClass);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
['dragleave', 'drop'].forEach((eventName) => {
|
||||
target.addEventListener(eventName, (event) => {
|
||||
preventDefaults(event);
|
||||
if (highlightClass) {
|
||||
target.classList.remove(highlightClass);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
target.addEventListener('drop', (event) => {
|
||||
if (state.isUploading) return;
|
||||
if (!event.dataTransfer?.files?.length || !elements.uploadFileInput) {
|
||||
return;
|
||||
}
|
||||
elements.uploadFileInput.files = event.dataTransfer.files;
|
||||
elements.uploadFileInput.dispatchEvent(new Event('change', { bubbles: true }));
|
||||
if (autoOpenModal && elements.uploadModal) {
|
||||
elements.uploadModal.show();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
init: init,
|
||||
isUploading: isUploading,
|
||||
performBulkUpload: performBulkUpload,
|
||||
wireDropTarget: wireDropTarget,
|
||||
resetUploadUI: resetUploadUI,
|
||||
refreshUploadDropLabel: refreshUploadDropLabel,
|
||||
updateUploadBtnText: updateUploadBtnText
|
||||
};
|
||||
})();
|
||||
120
crates/myfsio-server/static/js/bucket-detail-utils.js
Normal file
120
crates/myfsio-server/static/js/bucket-detail-utils.js
Normal file
@@ -0,0 +1,120 @@
|
||||
window.BucketDetailUtils = (function() {
|
||||
'use strict';
|
||||
|
||||
function setupJsonAutoIndent(textarea) {
|
||||
if (!textarea) return;
|
||||
|
||||
textarea.addEventListener('keydown', function(e) {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault();
|
||||
|
||||
const start = this.selectionStart;
|
||||
const end = this.selectionEnd;
|
||||
const value = this.value;
|
||||
|
||||
const lineStart = value.lastIndexOf('\n', start - 1) + 1;
|
||||
const currentLine = value.substring(lineStart, start);
|
||||
|
||||
const indentMatch = currentLine.match(/^(\s*)/);
|
||||
let indent = indentMatch ? indentMatch[1] : '';
|
||||
|
||||
const trimmedLine = currentLine.trim();
|
||||
const lastChar = trimmedLine.slice(-1);
|
||||
|
||||
let newIndent = indent;
|
||||
let insertAfter = '';
|
||||
|
||||
if (lastChar === '{' || lastChar === '[') {
|
||||
newIndent = indent + ' ';
|
||||
|
||||
const charAfterCursor = value.substring(start, start + 1).trim();
|
||||
if ((lastChar === '{' && charAfterCursor === '}') ||
|
||||
(lastChar === '[' && charAfterCursor === ']')) {
|
||||
insertAfter = '\n' + indent;
|
||||
}
|
||||
} else if (lastChar === ',' || lastChar === ':') {
|
||||
newIndent = indent;
|
||||
}
|
||||
|
||||
const insertion = '\n' + newIndent + insertAfter;
|
||||
const newValue = value.substring(0, start) + insertion + value.substring(end);
|
||||
|
||||
this.value = newValue;
|
||||
|
||||
const newCursorPos = start + 1 + newIndent.length;
|
||||
this.selectionStart = this.selectionEnd = newCursorPos;
|
||||
|
||||
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
}
|
||||
|
||||
if (e.key === 'Tab') {
|
||||
e.preventDefault();
|
||||
const start = this.selectionStart;
|
||||
const end = this.selectionEnd;
|
||||
|
||||
if (e.shiftKey) {
|
||||
const lineStart = this.value.lastIndexOf('\n', start - 1) + 1;
|
||||
const lineContent = this.value.substring(lineStart, start);
|
||||
if (lineContent.startsWith(' ')) {
|
||||
this.value = this.value.substring(0, lineStart) +
|
||||
this.value.substring(lineStart + 2);
|
||||
this.selectionStart = this.selectionEnd = Math.max(lineStart, start - 2);
|
||||
}
|
||||
} else {
|
||||
this.value = this.value.substring(0, start) + ' ' + this.value.substring(end);
|
||||
this.selectionStart = this.selectionEnd = start + 2;
|
||||
}
|
||||
|
||||
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (!Number.isFinite(bytes)) return `${bytes} bytes`;
|
||||
const units = ['bytes', 'KB', 'MB', 'GB', 'TB'];
|
||||
let i = 0;
|
||||
let size = bytes;
|
||||
while (size >= 1024 && i < units.length - 1) {
|
||||
size /= 1024;
|
||||
i++;
|
||||
}
|
||||
return `${size.toFixed(i === 0 ? 0 : 1)} ${units[i]}`;
|
||||
}
|
||||
|
||||
function escapeHtml(value) {
|
||||
if (value === null || value === undefined) return '';
|
||||
return String(value)
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''');
|
||||
}
|
||||
|
||||
function fallbackCopy(text) {
|
||||
const textArea = document.createElement('textarea');
|
||||
textArea.value = text;
|
||||
textArea.style.position = 'fixed';
|
||||
textArea.style.left = '-9999px';
|
||||
textArea.style.top = '-9999px';
|
||||
document.body.appendChild(textArea);
|
||||
textArea.focus();
|
||||
textArea.select();
|
||||
let success = false;
|
||||
try {
|
||||
success = document.execCommand('copy');
|
||||
} catch {
|
||||
success = false;
|
||||
}
|
||||
document.body.removeChild(textArea);
|
||||
return success;
|
||||
}
|
||||
|
||||
return {
|
||||
setupJsonAutoIndent: setupJsonAutoIndent,
|
||||
formatBytes: formatBytes,
|
||||
escapeHtml: escapeHtml,
|
||||
fallbackCopy: fallbackCopy
|
||||
};
|
||||
})();
|
||||
343
crates/myfsio-server/static/js/connections-management.js
Normal file
343
crates/myfsio-server/static/js/connections-management.js
Normal file
@@ -0,0 +1,343 @@
|
||||
window.ConnectionsManagement = (function() {
|
||||
'use strict';
|
||||
|
||||
var endpoints = {};
|
||||
var csrfToken = '';
|
||||
|
||||
function init(config) {
|
||||
endpoints = config.endpoints || {};
|
||||
csrfToken = config.csrfToken || '';
|
||||
|
||||
setupEventListeners();
|
||||
checkAllConnectionHealth();
|
||||
}
|
||||
|
||||
function togglePassword(id) {
|
||||
var input = document.getElementById(id);
|
||||
if (input) {
|
||||
input.type = input.type === 'password' ? 'text' : 'password';
|
||||
}
|
||||
}
|
||||
|
||||
async function testConnection(formId, resultId) {
|
||||
var form = document.getElementById(formId);
|
||||
var resultDiv = document.getElementById(resultId);
|
||||
if (!form || !resultDiv) return;
|
||||
|
||||
var formData = new FormData(form);
|
||||
var data = {};
|
||||
formData.forEach(function(value, key) {
|
||||
if (key !== 'csrf_token') {
|
||||
data[key] = value;
|
||||
}
|
||||
});
|
||||
|
||||
resultDiv.innerHTML = '<div class="text-info"><span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Testing connection...</div>';
|
||||
|
||||
var controller = new AbortController();
|
||||
var timeoutId = setTimeout(function() { controller.abort(); }, 20000);
|
||||
|
||||
try {
|
||||
var response = await fetch(endpoints.test, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-CSRFToken': csrfToken
|
||||
},
|
||||
body: JSON.stringify(data),
|
||||
signal: controller.signal
|
||||
});
|
||||
clearTimeout(timeoutId);
|
||||
|
||||
var result = await response.json();
|
||||
if (response.ok) {
|
||||
resultDiv.innerHTML = '<div class="text-success">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>' +
|
||||
'</svg>' + window.UICore.escapeHtml(result.message) + '</div>';
|
||||
} else {
|
||||
resultDiv.innerHTML = '<div class="text-danger">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>' +
|
||||
'</svg>' + window.UICore.escapeHtml(result.message) + '</div>';
|
||||
}
|
||||
} catch (error) {
|
||||
clearTimeout(timeoutId);
|
||||
var message = error.name === 'AbortError'
|
||||
? 'Connection test timed out - endpoint may be unreachable'
|
||||
: 'Connection failed: Network error';
|
||||
resultDiv.innerHTML = '<div class="text-danger">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>' +
|
||||
'</svg>' + message + '</div>';
|
||||
}
|
||||
}
|
||||
|
||||
async function checkConnectionHealth(connectionId, statusEl) {
|
||||
if (!statusEl) return;
|
||||
|
||||
try {
|
||||
var controller = new AbortController();
|
||||
var timeoutId = setTimeout(function() { controller.abort(); }, 10000);
|
||||
|
||||
var response = await fetch(endpoints.healthTemplate.replace('CONNECTION_ID', connectionId), {
|
||||
signal: controller.signal
|
||||
});
|
||||
clearTimeout(timeoutId);
|
||||
|
||||
var data = await response.json();
|
||||
if (data.healthy) {
|
||||
statusEl.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-success" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/></svg>';
|
||||
statusEl.setAttribute('data-status', 'healthy');
|
||||
statusEl.setAttribute('title', 'Connected');
|
||||
} else {
|
||||
statusEl.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-danger" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/></svg>';
|
||||
statusEl.setAttribute('data-status', 'unhealthy');
|
||||
statusEl.setAttribute('title', data.error || 'Unreachable');
|
||||
}
|
||||
} catch (error) {
|
||||
statusEl.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-warning" viewBox="0 0 16 16">' +
|
||||
'<path d="M8.982 1.566a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566zM8 5c.535 0 .954.462.9.995l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995A.905.905 0 0 1 8 5zm.002 6a1 1 0 1 1 0 2 1 1 0 0 1 0-2z"/></svg>';
|
||||
statusEl.setAttribute('data-status', 'unknown');
|
||||
statusEl.setAttribute('title', 'Could not check status');
|
||||
}
|
||||
}
|
||||
|
||||
function checkAllConnectionHealth() {
|
||||
var rows = document.querySelectorAll('tr[data-connection-id]');
|
||||
rows.forEach(function(row, index) {
|
||||
var connectionId = row.getAttribute('data-connection-id');
|
||||
var statusEl = row.querySelector('.connection-status');
|
||||
if (statusEl) {
|
||||
setTimeout(function() {
|
||||
checkConnectionHealth(connectionId, statusEl);
|
||||
}, index * 200);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function updateConnectionCount() {
|
||||
var countBadge = document.querySelector('.badge.bg-primary.bg-opacity-10.text-primary.fs-6');
|
||||
if (countBadge) {
|
||||
var remaining = document.querySelectorAll('tr[data-connection-id]').length;
|
||||
countBadge.textContent = remaining + ' connection' + (remaining !== 1 ? 's' : '');
|
||||
}
|
||||
}
|
||||
|
||||
function createConnectionRowHtml(conn) {
|
||||
var ak = conn.access_key || '';
|
||||
var maskedKey = ak.length > 12 ? ak.slice(0, 8) + '...' + ak.slice(-4) : ak;
|
||||
|
||||
return '<tr data-connection-id="' + window.UICore.escapeHtml(conn.id) + '">' +
|
||||
'<td class="text-center">' +
|
||||
'<span class="connection-status" data-status="checking" title="Checking...">' +
|
||||
'<span class="spinner-border spinner-border-sm text-muted" role="status" style="width: 12px; height: 12px;"></span>' +
|
||||
'</span></td>' +
|
||||
'<td><div class="d-flex align-items-center gap-2">' +
|
||||
'<div class="connection-icon"><svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/></svg></div>' +
|
||||
'<span class="fw-medium">' + window.UICore.escapeHtml(conn.name) + '</span>' +
|
||||
'</div></td>' +
|
||||
'<td><span class="text-muted small text-truncate d-inline-block" style="max-width: 200px;" title="' + window.UICore.escapeHtml(conn.endpoint_url) + '">' + window.UICore.escapeHtml(conn.endpoint_url) + '</span></td>' +
|
||||
'<td><span class="badge bg-primary bg-opacity-10 text-primary">' + window.UICore.escapeHtml(conn.region) + '</span></td>' +
|
||||
'<td><code class="small">' + window.UICore.escapeHtml(maskedKey) + '</code></td>' +
|
||||
'<td class="text-end"><div class="btn-group btn-group-sm" role="group">' +
|
||||
'<button type="button" class="btn btn-outline-secondary" data-bs-toggle="modal" data-bs-target="#editConnectionModal" ' +
|
||||
'data-id="' + window.UICore.escapeHtml(conn.id) + '" data-name="' + window.UICore.escapeHtml(conn.name) + '" ' +
|
||||
'data-endpoint="' + window.UICore.escapeHtml(conn.endpoint_url) + '" data-region="' + window.UICore.escapeHtml(conn.region) + '" ' +
|
||||
'data-access="' + window.UICore.escapeHtml(conn.access_key) + '" title="Edit connection">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/></svg></button>' +
|
||||
'<button type="button" class="btn btn-outline-danger" data-bs-toggle="modal" data-bs-target="#deleteConnectionModal" ' +
|
||||
'data-id="' + window.UICore.escapeHtml(conn.id) + '" data-name="' + window.UICore.escapeHtml(conn.name) + '" title="Delete connection">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>' +
|
||||
'<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/></svg></button>' +
|
||||
'</div></td></tr>';
|
||||
}
|
||||
|
||||
function setupEventListeners() {
|
||||
var testBtn = document.getElementById('testConnectionBtn');
|
||||
if (testBtn) {
|
||||
testBtn.addEventListener('click', function() {
|
||||
testConnection('createConnectionForm', 'testResult');
|
||||
});
|
||||
}
|
||||
|
||||
var editTestBtn = document.getElementById('editTestConnectionBtn');
|
||||
if (editTestBtn) {
|
||||
editTestBtn.addEventListener('click', function() {
|
||||
testConnection('editConnectionForm', 'editTestResult');
|
||||
});
|
||||
}
|
||||
|
||||
var editModal = document.getElementById('editConnectionModal');
|
||||
if (editModal) {
|
||||
editModal.addEventListener('show.bs.modal', function(event) {
|
||||
var button = event.relatedTarget;
|
||||
if (!button) return;
|
||||
|
||||
var id = button.getAttribute('data-id');
|
||||
|
||||
document.getElementById('edit_name').value = button.getAttribute('data-name') || '';
|
||||
document.getElementById('edit_endpoint_url').value = button.getAttribute('data-endpoint') || '';
|
||||
document.getElementById('edit_region').value = button.getAttribute('data-region') || '';
|
||||
document.getElementById('edit_access_key').value = button.getAttribute('data-access') || '';
|
||||
document.getElementById('edit_secret_key').value = '';
|
||||
document.getElementById('edit_secret_key').placeholder = '(unchanged — leave blank to keep current)';
|
||||
document.getElementById('edit_secret_key').required = false;
|
||||
document.getElementById('editTestResult').innerHTML = '';
|
||||
|
||||
var form = document.getElementById('editConnectionForm');
|
||||
form.action = endpoints.updateTemplate.replace('CONNECTION_ID', id);
|
||||
});
|
||||
}
|
||||
|
||||
var deleteModal = document.getElementById('deleteConnectionModal');
|
||||
if (deleteModal) {
|
||||
deleteModal.addEventListener('show.bs.modal', function(event) {
|
||||
var button = event.relatedTarget;
|
||||
if (!button) return;
|
||||
|
||||
var id = button.getAttribute('data-id');
|
||||
var name = button.getAttribute('data-name');
|
||||
|
||||
document.getElementById('deleteConnectionName').textContent = name;
|
||||
var form = document.getElementById('deleteConnectionForm');
|
||||
form.action = endpoints.deleteTemplate.replace('CONNECTION_ID', id);
|
||||
});
|
||||
}
|
||||
|
||||
var createForm = document.getElementById('createConnectionForm');
|
||||
if (createForm) {
|
||||
createForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(createForm, {
|
||||
successMessage: 'Connection created',
|
||||
onSuccess: function(data) {
|
||||
createForm.reset();
|
||||
document.getElementById('testResult').innerHTML = '';
|
||||
|
||||
if (data.connection) {
|
||||
var emptyState = document.querySelector('.empty-state');
|
||||
if (emptyState) {
|
||||
var cardBody = emptyState.closest('.card-body');
|
||||
if (cardBody) {
|
||||
cardBody.innerHTML = '<div class="table-responsive"><table class="table table-hover align-middle mb-0">' +
|
||||
'<thead class="table-light"><tr>' +
|
||||
'<th scope="col" style="width: 50px;">Status</th>' +
|
||||
'<th scope="col">Name</th><th scope="col">Endpoint</th>' +
|
||||
'<th scope="col">Region</th><th scope="col">Access Key</th>' +
|
||||
'<th scope="col" class="text-end">Actions</th></tr></thead>' +
|
||||
'<tbody></tbody></table></div>';
|
||||
}
|
||||
}
|
||||
|
||||
var tbody = document.querySelector('table tbody');
|
||||
if (tbody) {
|
||||
tbody.insertAdjacentHTML('beforeend', createConnectionRowHtml(data.connection));
|
||||
var newRow = tbody.lastElementChild;
|
||||
var statusEl = newRow.querySelector('.connection-status');
|
||||
if (statusEl) {
|
||||
checkConnectionHealth(data.connection.id, statusEl);
|
||||
}
|
||||
}
|
||||
updateConnectionCount();
|
||||
} else {
|
||||
location.reload();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var editForm = document.getElementById('editConnectionForm');
|
||||
if (editForm) {
|
||||
editForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(editForm, {
|
||||
successMessage: 'Connection updated',
|
||||
onSuccess: function(data) {
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('editConnectionModal'));
|
||||
if (modal) modal.hide();
|
||||
|
||||
var connId = editForm.action.split('/').slice(-2)[0];
|
||||
var row = document.querySelector('tr[data-connection-id="' + connId + '"]');
|
||||
if (row && data.connection) {
|
||||
var nameCell = row.querySelector('.fw-medium');
|
||||
if (nameCell) nameCell.textContent = data.connection.name;
|
||||
|
||||
var endpointCell = row.querySelector('.text-truncate');
|
||||
if (endpointCell) {
|
||||
endpointCell.textContent = data.connection.endpoint_url;
|
||||
endpointCell.title = data.connection.endpoint_url;
|
||||
}
|
||||
|
||||
var regionBadge = row.querySelector('.badge.bg-primary');
|
||||
if (regionBadge) regionBadge.textContent = data.connection.region;
|
||||
|
||||
var accessCode = row.querySelector('code.small');
|
||||
if (accessCode && data.connection.access_key) {
|
||||
var ak = data.connection.access_key;
|
||||
accessCode.textContent = ak.slice(0, 8) + '...' + ak.slice(-4);
|
||||
}
|
||||
|
||||
var editBtn = row.querySelector('[data-bs-target="#editConnectionModal"]');
|
||||
if (editBtn) {
|
||||
editBtn.setAttribute('data-name', data.connection.name);
|
||||
editBtn.setAttribute('data-endpoint', data.connection.endpoint_url);
|
||||
editBtn.setAttribute('data-region', data.connection.region);
|
||||
editBtn.setAttribute('data-access', data.connection.access_key);
|
||||
}
|
||||
|
||||
var deleteBtn = row.querySelector('[data-bs-target="#deleteConnectionModal"]');
|
||||
if (deleteBtn) {
|
||||
deleteBtn.setAttribute('data-name', data.connection.name);
|
||||
}
|
||||
|
||||
var statusEl = row.querySelector('.connection-status');
|
||||
if (statusEl) {
|
||||
checkConnectionHealth(connId, statusEl);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var deleteForm = document.getElementById('deleteConnectionForm');
|
||||
if (deleteForm) {
|
||||
deleteForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(deleteForm, {
|
||||
successMessage: 'Connection deleted',
|
||||
onSuccess: function(data) {
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('deleteConnectionModal'));
|
||||
if (modal) modal.hide();
|
||||
|
||||
var connId = deleteForm.action.split('/').slice(-2)[0];
|
||||
var row = document.querySelector('tr[data-connection-id="' + connId + '"]');
|
||||
if (row) {
|
||||
row.remove();
|
||||
}
|
||||
|
||||
updateConnectionCount();
|
||||
|
||||
if (document.querySelectorAll('tr[data-connection-id]').length === 0) {
|
||||
location.reload();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
init: init,
|
||||
togglePassword: togglePassword,
|
||||
testConnection: testConnection,
|
||||
checkConnectionHealth: checkConnectionHealth
|
||||
};
|
||||
})();
|
||||
846
crates/myfsio-server/static/js/iam-management.js
Normal file
846
crates/myfsio-server/static/js/iam-management.js
Normal file
@@ -0,0 +1,846 @@
|
||||
window.IAMManagement = (function() {
|
||||
'use strict';
|
||||
|
||||
var users = [];
|
||||
var currentUserKey = null;
|
||||
var endpoints = {};
|
||||
var csrfToken = '';
|
||||
var iamLocked = false;
|
||||
|
||||
var policyModal = null;
|
||||
var editUserModal = null;
|
||||
var deleteUserModal = null;
|
||||
var rotateSecretModal = null;
|
||||
var expiryModal = null;
|
||||
var currentRotateKey = null;
|
||||
var currentEditKey = null;
|
||||
var currentDeleteKey = null;
|
||||
var currentEditAccessKey = null;
|
||||
var currentDeleteAccessKey = null;
|
||||
var currentExpiryKey = null;
|
||||
var currentExpiryAccessKey = null;
|
||||
|
||||
var ALL_S3_ACTIONS = [
|
||||
'list', 'read', 'write', 'delete', 'share', 'policy',
|
||||
'replication', 'lifecycle', 'cors',
|
||||
'create_bucket', 'delete_bucket',
|
||||
'versioning', 'tagging', 'encryption', 'quota',
|
||||
'object_lock', 'notification', 'logging', 'website'
|
||||
];
|
||||
|
||||
var policyTemplates = {
|
||||
full: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'create_bucket', 'delete_bucket', 'replication', 'lifecycle', 'cors', 'versioning', 'tagging', 'encryption', 'quota', 'object_lock', 'notification', 'logging', 'website', 'iam:*'] }],
|
||||
readonly: [{ bucket: '*', actions: ['list', 'read'] }],
|
||||
writer: [{ bucket: '*', actions: ['list', 'read', 'write'] }],
|
||||
operator: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'create_bucket', 'delete_bucket'] }],
|
||||
bucketadmin: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'create_bucket', 'delete_bucket', 'versioning', 'tagging', 'encryption', 'cors', 'lifecycle', 'quota', 'object_lock', 'notification', 'logging', 'website', 'replication'] }]
|
||||
};
|
||||
|
||||
function isAdminUser(policies) {
|
||||
if (!policies || !policies.length) return false;
|
||||
return policies.some(function(p) {
|
||||
return p.actions && (p.actions.indexOf('iam:*') >= 0 || p.actions.indexOf('*') >= 0);
|
||||
});
|
||||
}
|
||||
|
||||
function getPermissionLevel(actions) {
|
||||
if (!actions || !actions.length) return 'Custom (0)';
|
||||
if (actions.indexOf('*') >= 0) return 'Full Access';
|
||||
if (actions.length >= ALL_S3_ACTIONS.length) {
|
||||
var hasAll = ALL_S3_ACTIONS.every(function(a) { return actions.indexOf(a) >= 0; });
|
||||
if (hasAll) return 'Full Access';
|
||||
}
|
||||
var has = function(a) { return actions.indexOf(a) >= 0; };
|
||||
if (has('list') && has('read') && has('write') && has('delete')) return 'Read + Write + Delete';
|
||||
if (has('list') && has('read') && has('write')) return 'Read + Write';
|
||||
if (has('list') && has('read')) return 'Read Only';
|
||||
return 'Custom (' + actions.length + ')';
|
||||
}
|
||||
|
||||
function getBucketLabel(bucket) {
|
||||
return bucket === '*' ? 'All Buckets' : bucket;
|
||||
}
|
||||
|
||||
function buildUserUrl(template, userId) {
|
||||
return template.replace('USER_ID', encodeURIComponent(userId));
|
||||
}
|
||||
|
||||
function getUserByIdentifier(identifier) {
|
||||
return users.find(function(u) {
|
||||
return u.user_id === identifier || u.access_key === identifier;
|
||||
}) || null;
|
||||
}
|
||||
|
||||
function getUserById(userId) {
|
||||
return users.find(function(u) { return u.user_id === userId; }) || null;
|
||||
}
|
||||
|
||||
function init(config) {
|
||||
users = config.users || [];
|
||||
currentUserKey = config.currentUserKey || null;
|
||||
endpoints = config.endpoints || {};
|
||||
csrfToken = config.csrfToken || '';
|
||||
iamLocked = config.iamLocked || false;
|
||||
|
||||
if (iamLocked) return;
|
||||
|
||||
initModals();
|
||||
setupJsonAutoIndent();
|
||||
setupCopyButtons();
|
||||
setupPolicyEditor();
|
||||
setupCreateUserModal();
|
||||
setupEditUserModal();
|
||||
setupDeleteUserModal();
|
||||
setupRotateSecretModal();
|
||||
setupExpiryModal();
|
||||
setupFormHandlers();
|
||||
setupSearch();
|
||||
setupCopyAccessKeyButtons();
|
||||
}
|
||||
|
||||
function initModals() {
|
||||
var policyModalEl = document.getElementById('policyEditorModal');
|
||||
var editModalEl = document.getElementById('editUserModal');
|
||||
var deleteModalEl = document.getElementById('deleteUserModal');
|
||||
var rotateModalEl = document.getElementById('rotateSecretModal');
|
||||
var expiryModalEl = document.getElementById('expiryModal');
|
||||
|
||||
if (policyModalEl) policyModal = new bootstrap.Modal(policyModalEl);
|
||||
if (editModalEl) editUserModal = new bootstrap.Modal(editModalEl);
|
||||
if (deleteModalEl) deleteUserModal = new bootstrap.Modal(deleteModalEl);
|
||||
if (rotateModalEl) rotateSecretModal = new bootstrap.Modal(rotateModalEl);
|
||||
if (expiryModalEl) expiryModal = new bootstrap.Modal(expiryModalEl);
|
||||
}
|
||||
|
||||
function setupJsonAutoIndent() {
|
||||
window.UICore.setupJsonAutoIndent(document.getElementById('policyEditorDocument'));
|
||||
window.UICore.setupJsonAutoIndent(document.getElementById('createUserPolicies'));
|
||||
}
|
||||
|
||||
function setupCopyButtons() {
|
||||
document.querySelectorAll('.config-copy').forEach(function(button) {
|
||||
button.addEventListener('click', async function() {
|
||||
var targetId = button.dataset.copyTarget;
|
||||
var target = document.getElementById(targetId);
|
||||
if (!target) return;
|
||||
await window.UICore.copyToClipboard(target.innerText, button, 'Copy JSON');
|
||||
});
|
||||
});
|
||||
|
||||
var accessKeyCopyButton = document.querySelector('[data-access-key-copy]');
|
||||
if (accessKeyCopyButton) {
|
||||
accessKeyCopyButton.addEventListener('click', async function() {
|
||||
var accessKeyInput = document.getElementById('disclosedAccessKeyValue');
|
||||
if (!accessKeyInput) return;
|
||||
await window.UICore.copyToClipboard(accessKeyInput.value, accessKeyCopyButton, 'Copy');
|
||||
});
|
||||
}
|
||||
|
||||
var secretCopyButton = document.querySelector('[data-secret-copy]');
|
||||
if (secretCopyButton) {
|
||||
secretCopyButton.addEventListener('click', async function() {
|
||||
var secretInput = document.getElementById('disclosedSecretValue');
|
||||
if (!secretInput) return;
|
||||
await window.UICore.copyToClipboard(secretInput.value, secretCopyButton, 'Copy');
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function getUserPolicies(identifier) {
|
||||
var user = getUserByIdentifier(identifier);
|
||||
return user ? JSON.stringify(user.policies, null, 2) : '';
|
||||
}
|
||||
|
||||
function applyPolicyTemplate(name, textareaEl) {
|
||||
if (policyTemplates[name] && textareaEl) {
|
||||
textareaEl.value = JSON.stringify(policyTemplates[name], null, 2);
|
||||
}
|
||||
}
|
||||
|
||||
function setupPolicyEditor() {
|
||||
var userLabelEl = document.getElementById('policyEditorUserLabel');
|
||||
var userInputEl = document.getElementById('policyEditorUserId');
|
||||
var textareaEl = document.getElementById('policyEditorDocument');
|
||||
|
||||
document.querySelectorAll('[data-policy-template]').forEach(function(button) {
|
||||
button.addEventListener('click', function() {
|
||||
applyPolicyTemplate(button.dataset.policyTemplate, textareaEl);
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('[data-policy-editor]').forEach(function(button) {
|
||||
button.addEventListener('click', function() {
|
||||
var userId = button.dataset.userId;
|
||||
var accessKey = button.dataset.accessKey || userId;
|
||||
if (!userId) return;
|
||||
|
||||
userLabelEl.textContent = accessKey;
|
||||
userInputEl.value = userId;
|
||||
textareaEl.value = getUserPolicies(userId);
|
||||
|
||||
policyModal.show();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function generateSecureHex(byteCount) {
|
||||
var arr = new Uint8Array(byteCount);
|
||||
crypto.getRandomValues(arr);
|
||||
return Array.from(arr).map(function(b) { return b.toString(16).padStart(2, '0'); }).join('');
|
||||
}
|
||||
|
||||
function generateSecureBase64(byteCount) {
|
||||
var arr = new Uint8Array(byteCount);
|
||||
crypto.getRandomValues(arr);
|
||||
var binary = '';
|
||||
for (var i = 0; i < arr.length; i++) {
|
||||
binary += String.fromCharCode(arr[i]);
|
||||
}
|
||||
return btoa(binary).replace(/\+/g, '-').replace(/\//g, '_').replace(/=+$/, '');
|
||||
}
|
||||
|
||||
function setupCreateUserModal() {
|
||||
var createUserPoliciesEl = document.getElementById('createUserPolicies');
|
||||
|
||||
document.querySelectorAll('[data-create-policy-template]').forEach(function(button) {
|
||||
button.addEventListener('click', function() {
|
||||
applyPolicyTemplate(button.dataset.createPolicyTemplate, createUserPoliciesEl);
|
||||
});
|
||||
});
|
||||
|
||||
var genAccessKeyBtn = document.getElementById('generateAccessKeyBtn');
|
||||
if (genAccessKeyBtn) {
|
||||
genAccessKeyBtn.addEventListener('click', function() {
|
||||
var input = document.getElementById('createUserAccessKey');
|
||||
if (input) input.value = generateSecureHex(8);
|
||||
});
|
||||
}
|
||||
|
||||
var genSecretKeyBtn = document.getElementById('generateSecretKeyBtn');
|
||||
if (genSecretKeyBtn) {
|
||||
genSecretKeyBtn.addEventListener('click', function() {
|
||||
var input = document.getElementById('createUserSecretKey');
|
||||
if (input) input.value = generateSecureBase64(24);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function setupEditUserModal() {
|
||||
var editUserForm = document.getElementById('editUserForm');
|
||||
var editUserDisplayName = document.getElementById('editUserDisplayName');
|
||||
|
||||
document.querySelectorAll('[data-edit-user]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var key = btn.dataset.userId;
|
||||
var accessKey = btn.dataset.accessKey || key;
|
||||
var name = btn.dataset.displayName;
|
||||
currentEditKey = key;
|
||||
currentEditAccessKey = accessKey;
|
||||
editUserDisplayName.value = name;
|
||||
editUserForm.action = buildUserUrl(endpoints.updateUser, key);
|
||||
editUserModal.show();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function setupDeleteUserModal() {
|
||||
var deleteUserForm = document.getElementById('deleteUserForm');
|
||||
var deleteUserLabel = document.getElementById('deleteUserLabel');
|
||||
var deleteSelfWarning = document.getElementById('deleteSelfWarning');
|
||||
|
||||
document.querySelectorAll('[data-delete-user]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var key = btn.dataset.userId;
|
||||
var accessKey = btn.dataset.accessKey || key;
|
||||
currentDeleteKey = key;
|
||||
currentDeleteAccessKey = accessKey;
|
||||
deleteUserLabel.textContent = accessKey;
|
||||
deleteUserForm.action = buildUserUrl(endpoints.deleteUser, key);
|
||||
|
||||
if (accessKey === currentUserKey) {
|
||||
deleteSelfWarning.classList.remove('d-none');
|
||||
} else {
|
||||
deleteSelfWarning.classList.add('d-none');
|
||||
}
|
||||
|
||||
deleteUserModal.show();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function setupRotateSecretModal() {
|
||||
var rotateUserLabel = document.getElementById('rotateUserLabel');
|
||||
var confirmRotateBtn = document.getElementById('confirmRotateBtn');
|
||||
var rotateCancelBtn = document.getElementById('rotateCancelBtn');
|
||||
var rotateDoneBtn = document.getElementById('rotateDoneBtn');
|
||||
var rotateSecretConfirm = document.getElementById('rotateSecretConfirm');
|
||||
var rotateSecretResult = document.getElementById('rotateSecretResult');
|
||||
var newSecretKeyInput = document.getElementById('newSecretKey');
|
||||
var copyNewSecretBtn = document.getElementById('copyNewSecret');
|
||||
|
||||
document.querySelectorAll('[data-rotate-user]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
currentRotateKey = btn.dataset.userId;
|
||||
rotateUserLabel.textContent = btn.dataset.accessKey || currentRotateKey;
|
||||
|
||||
rotateSecretConfirm.classList.remove('d-none');
|
||||
rotateSecretResult.classList.add('d-none');
|
||||
confirmRotateBtn.classList.remove('d-none');
|
||||
rotateCancelBtn.classList.remove('d-none');
|
||||
rotateDoneBtn.classList.add('d-none');
|
||||
|
||||
rotateSecretModal.show();
|
||||
});
|
||||
});
|
||||
|
||||
if (confirmRotateBtn) {
|
||||
confirmRotateBtn.addEventListener('click', async function() {
|
||||
if (!currentRotateKey) return;
|
||||
|
||||
window.UICore.setButtonLoading(confirmRotateBtn, true, 'Rotating...');
|
||||
|
||||
try {
|
||||
var url = buildUserUrl(endpoints.rotateSecret, currentRotateKey);
|
||||
var response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'X-CSRFToken': csrfToken
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
var data = await response.json();
|
||||
throw new Error(data.error || 'Failed to rotate secret');
|
||||
}
|
||||
|
||||
var data = await response.json();
|
||||
newSecretKeyInput.value = data.secret_key;
|
||||
|
||||
rotateSecretConfirm.classList.add('d-none');
|
||||
rotateSecretResult.classList.remove('d-none');
|
||||
confirmRotateBtn.classList.add('d-none');
|
||||
rotateCancelBtn.classList.add('d-none');
|
||||
rotateDoneBtn.classList.remove('d-none');
|
||||
|
||||
} catch (err) {
|
||||
if (window.showToast) {
|
||||
window.showToast(err.message, 'Error', 'danger');
|
||||
}
|
||||
rotateSecretModal.hide();
|
||||
} finally {
|
||||
window.UICore.setButtonLoading(confirmRotateBtn, false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (copyNewSecretBtn) {
|
||||
copyNewSecretBtn.addEventListener('click', async function() {
|
||||
await window.UICore.copyToClipboard(newSecretKeyInput.value, copyNewSecretBtn, 'Copy');
|
||||
});
|
||||
}
|
||||
|
||||
if (rotateDoneBtn) {
|
||||
rotateDoneBtn.addEventListener('click', function() {
|
||||
window.location.reload();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function openExpiryModal(key, expiresAt) {
|
||||
currentExpiryKey = key;
|
||||
var user = getUserByIdentifier(key);
|
||||
var label = document.getElementById('expiryUserLabel');
|
||||
var input = document.getElementById('expiryDateInput');
|
||||
var form = document.getElementById('expiryForm');
|
||||
if (label) label.textContent = currentExpiryAccessKey || (user ? user.access_key : key);
|
||||
if (expiresAt) {
|
||||
try {
|
||||
var dt = new Date(expiresAt);
|
||||
var local = new Date(dt.getTime() - dt.getTimezoneOffset() * 60000);
|
||||
if (input) input.value = local.toISOString().slice(0, 16);
|
||||
} catch(e) {
|
||||
if (input) input.value = '';
|
||||
}
|
||||
} else {
|
||||
if (input) input.value = '';
|
||||
}
|
||||
if (form) form.action = buildUserUrl(endpoints.updateExpiry, key);
|
||||
var modalEl = document.getElementById('expiryModal');
|
||||
if (modalEl) {
|
||||
var modal = bootstrap.Modal.getOrCreateInstance(modalEl);
|
||||
modal.show();
|
||||
}
|
||||
}
|
||||
|
||||
function setupExpiryModal() {
|
||||
document.querySelectorAll('[data-expiry-user]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
currentExpiryAccessKey = btn.dataset.accessKey || btn.dataset.userId;
|
||||
openExpiryModal(btn.dataset.userId, btn.dataset.expiresAt || '');
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('[data-expiry-preset]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var preset = btn.dataset.expiryPreset;
|
||||
var input = document.getElementById('expiryDateInput');
|
||||
if (!input) return;
|
||||
if (preset === 'clear') {
|
||||
input.value = '';
|
||||
return;
|
||||
}
|
||||
var now = new Date();
|
||||
var ms = 0;
|
||||
if (preset === '1h') ms = 3600000;
|
||||
else if (preset === '24h') ms = 86400000;
|
||||
else if (preset === '7d') ms = 7 * 86400000;
|
||||
else if (preset === '30d') ms = 30 * 86400000;
|
||||
else if (preset === '90d') ms = 90 * 86400000;
|
||||
var future = new Date(now.getTime() + ms);
|
||||
var local = new Date(future.getTime() - future.getTimezoneOffset() * 60000);
|
||||
input.value = local.toISOString().slice(0, 16);
|
||||
});
|
||||
});
|
||||
|
||||
var expiryForm = document.getElementById('expiryForm');
|
||||
if (expiryForm) {
|
||||
expiryForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(expiryForm, {
|
||||
successMessage: 'Expiry updated',
|
||||
onSuccess: function() {
|
||||
var modalEl = document.getElementById('expiryModal');
|
||||
if (modalEl) bootstrap.Modal.getOrCreateInstance(modalEl).hide();
|
||||
window.location.reload();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function createUserCardHtml(user) {
|
||||
var userId = user.user_id || '';
|
||||
var accessKey = user.access_key || userId;
|
||||
var displayName = user.display_name || accessKey;
|
||||
var policies = user.policies || [];
|
||||
var expiresAt = user.expires_at || '';
|
||||
var admin = isAdminUser(policies);
|
||||
var cardClass = 'card h-100 iam-user-card' + (admin ? ' iam-admin-card' : '');
|
||||
var roleBadge = admin
|
||||
? '<span class="iam-role-badge iam-role-admin" data-role-badge>Admin</span>'
|
||||
: '<span class="iam-role-badge iam-role-user" data-role-badge>User</span>';
|
||||
|
||||
var policyBadges = '';
|
||||
if (policies && policies.length > 0) {
|
||||
policyBadges = policies.map(function(p) {
|
||||
var bucketLabel = getBucketLabel(p.bucket);
|
||||
var permLevel = getPermissionLevel(p.actions);
|
||||
return '<span class="iam-perm-badge">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="10" height="10" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>' +
|
||||
'</svg>' + window.UICore.escapeHtml(bucketLabel) + ' · ' + window.UICore.escapeHtml(permLevel) + '</span>';
|
||||
}).join('');
|
||||
} else {
|
||||
policyBadges = '<span class="badge bg-secondary bg-opacity-10 text-secondary">No policies</span>';
|
||||
}
|
||||
|
||||
var esc = window.UICore.escapeHtml;
|
||||
return '<div class="col-md-6 col-xl-4 iam-user-item" data-user-id="' + esc(userId) + '" data-access-key="' + esc(accessKey) + '" data-display-name="' + esc(displayName.toLowerCase()) + '" data-access-key-filter="' + esc(accessKey.toLowerCase()) + '">' +
|
||||
'<div class="' + cardClass + '">' +
|
||||
'<div class="card-body">' +
|
||||
'<div class="d-flex align-items-start justify-content-between mb-3">' +
|
||||
'<div class="d-flex align-items-center gap-3 min-width-0 overflow-hidden">' +
|
||||
'<div class="user-avatar user-avatar-lg flex-shrink-0">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M8 8a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm2-3a2 2 0 1 1-4 0 2 2 0 0 1 4 0zm4 8c0 1-1 1-1 1H3s-1 0-1-1 1-4 6-4 6 3 6 4zm-1-.004c-.001-.246-.154-.986-.832-1.664C11.516 10.68 10.289 10 8 10c-2.29 0-3.516.68-4.168 1.332-.678.678-.83 1.418-.832 1.664h10z"/>' +
|
||||
'</svg></div>' +
|
||||
'<div class="min-width-0">' +
|
||||
'<div class="d-flex align-items-center gap-2 mb-0">' +
|
||||
'<h6 class="fw-semibold mb-0 text-truncate" title="' + esc(displayName) + '">' + esc(displayName) + '</h6>' +
|
||||
roleBadge +
|
||||
'</div>' +
|
||||
'<div class="d-flex align-items-center gap-1">' +
|
||||
'<code class="small text-muted text-truncate" title="' + esc(accessKey) + '">' + esc(accessKey) + '</code>' +
|
||||
'<button type="button" class="iam-copy-key" title="Copy access key" data-copy-access-key="' + esc(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M4 1.5H3a2 2 0 0 0-2 2V14a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V3.5a2 2 0 0 0-2-2h-1v1h1a1 1 0 0 1 1 1V14a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3.5a1 1 0 0 1 1-1h1v-1z"/>' +
|
||||
'<path d="M9.5 1a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1a.5.5 0 0 1 .5-.5h3zm-3-1A1.5 1.5 0 0 0 5 1.5v1A1.5 1.5 0 0 0 6.5 4h3A1.5 1.5 0 0 0 11 2.5v-1A1.5 1.5 0 0 0 9.5 0h-3z"/>' +
|
||||
'</svg></button>' +
|
||||
'</div>' +
|
||||
'</div></div>' +
|
||||
'<div class="dropdown flex-shrink-0">' +
|
||||
'<button class="btn btn-sm btn-icon" type="button" data-bs-toggle="dropdown" aria-expanded="false">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M9.5 13a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0z"/>' +
|
||||
'</svg></button>' +
|
||||
'<ul class="dropdown-menu dropdown-menu-end">' +
|
||||
'<li><button class="dropdown-item" type="button" data-edit-user data-user-id="' + esc(userId) + '" data-access-key="' + esc(accessKey) + '" data-display-name="' + esc(displayName) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/></svg>Edit Name</button></li>' +
|
||||
'<li><button class="dropdown-item" type="button" data-expiry-user data-user-id="' + esc(userId) + '" data-access-key="' + esc(accessKey) + '" data-expires-at="' + esc(expiresAt) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M8 3.5a.5.5 0 0 0-1 0V9a.5.5 0 0 0 .252.434l3.5 2a.5.5 0 0 0 .496-.868L8 8.71V3.5z"/><path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm7-8A7 7 0 1 1 1 8a7 7 0 0 1 14 0z"/></svg>Set Expiry</button></li>' +
|
||||
'<li><button class="dropdown-item" type="button" data-rotate-user data-user-id="' + esc(userId) + '" data-access-key="' + esc(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/><path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/></svg>Rotate Secret</button></li>' +
|
||||
'<li><hr class="dropdown-divider"></li>' +
|
||||
'<li><button class="dropdown-item text-danger" type="button" data-delete-user data-user-id="' + esc(userId) + '" data-access-key="' + esc(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M5.5 5.5a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm3 .5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 1 0z"/><path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/></svg>Delete User</button></li>' +
|
||||
'</ul></div></div>' +
|
||||
'<div class="mb-3">' +
|
||||
'<div class="small text-muted mb-2">Bucket Permissions</div>' +
|
||||
'<div class="d-flex flex-wrap gap-1" data-policy-badges>' + policyBadges + '</div></div>' +
|
||||
'<button class="btn btn-outline-primary btn-sm w-100" type="button" data-policy-editor data-user-id="' + esc(userId) + '" data-access-key="' + esc(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16"><path d="M8 4.754a3.246 3.246 0 1 0 0 6.492 3.246 3.246 0 0 0 0-6.492zM5.754 8a2.246 2.246 0 1 1 4.492 0 2.246 2.246 0 0 1-4.492 0z"/><path d="M9.796 1.343c-.527-1.79-3.065-1.79-3.592 0l-.094.319a.873.873 0 0 1-1.255.52l-.292-.16c-1.64-.892-3.433.902-2.54 2.541l.159.292a.873.873 0 0 1-.52 1.255l-.319.094c-1.79.527-1.79 3.065 0 3.592l.319.094a.873.873 0 0 1 .52 1.255l-.16.292c-.892 1.64.901 3.434 2.541 2.54l.292-.159a.873.873 0 0 1 1.255.52l.094.319c.527 1.79 3.065 1.79 3.592 0l.094-.319a.873.873 0 0 1 1.255-.52l.292.16c1.64.893 3.434-.902 2.54-2.541l-.159-.292a.873.873 0 0 1 .52-1.255l.319-.094c1.79-.527 1.79-3.065 0-3.592l-.319-.094a.873.873 0 0 1-.52-1.255l.16-.292c.893-1.64-.902-3.433-2.541-2.54l-.292.159a.873.873 0 0 1-1.255-.52l-.094-.319z"/></svg>Manage Policies</button>' +
|
||||
'</div></div></div>';
|
||||
}
|
||||
|
||||
function attachUserCardHandlers(cardElement, user) {
|
||||
var userId = user.user_id;
|
||||
var accessKey = user.access_key;
|
||||
var displayName = user.display_name;
|
||||
var expiresAt = user.expires_at || '';
|
||||
var editBtn = cardElement.querySelector('[data-edit-user]');
|
||||
if (editBtn) {
|
||||
editBtn.addEventListener('click', function() {
|
||||
currentEditKey = userId;
|
||||
currentEditAccessKey = accessKey;
|
||||
document.getElementById('editUserDisplayName').value = displayName;
|
||||
document.getElementById('editUserForm').action = buildUserUrl(endpoints.updateUser, userId);
|
||||
editUserModal.show();
|
||||
});
|
||||
}
|
||||
|
||||
var deleteBtn = cardElement.querySelector('[data-delete-user]');
|
||||
if (deleteBtn) {
|
||||
deleteBtn.addEventListener('click', function() {
|
||||
currentDeleteKey = userId;
|
||||
currentDeleteAccessKey = accessKey;
|
||||
document.getElementById('deleteUserLabel').textContent = accessKey;
|
||||
document.getElementById('deleteUserForm').action = buildUserUrl(endpoints.deleteUser, userId);
|
||||
var deleteSelfWarning = document.getElementById('deleteSelfWarning');
|
||||
if (accessKey === currentUserKey) {
|
||||
deleteSelfWarning.classList.remove('d-none');
|
||||
} else {
|
||||
deleteSelfWarning.classList.add('d-none');
|
||||
}
|
||||
deleteUserModal.show();
|
||||
});
|
||||
}
|
||||
|
||||
var rotateBtn = cardElement.querySelector('[data-rotate-user]');
|
||||
if (rotateBtn) {
|
||||
rotateBtn.addEventListener('click', function() {
|
||||
currentRotateKey = userId;
|
||||
document.getElementById('rotateUserLabel').textContent = accessKey;
|
||||
document.getElementById('rotateSecretConfirm').classList.remove('d-none');
|
||||
document.getElementById('rotateSecretResult').classList.add('d-none');
|
||||
document.getElementById('confirmRotateBtn').classList.remove('d-none');
|
||||
document.getElementById('rotateCancelBtn').classList.remove('d-none');
|
||||
document.getElementById('rotateDoneBtn').classList.add('d-none');
|
||||
rotateSecretModal.show();
|
||||
});
|
||||
}
|
||||
|
||||
var expiryBtn = cardElement.querySelector('[data-expiry-user]');
|
||||
if (expiryBtn) {
|
||||
expiryBtn.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
currentExpiryAccessKey = accessKey;
|
||||
openExpiryModal(userId, expiresAt);
|
||||
});
|
||||
}
|
||||
|
||||
var policyBtn = cardElement.querySelector('[data-policy-editor]');
|
||||
if (policyBtn) {
|
||||
policyBtn.addEventListener('click', function() {
|
||||
document.getElementById('policyEditorUserLabel').textContent = accessKey;
|
||||
document.getElementById('policyEditorUserId').value = userId;
|
||||
document.getElementById('policyEditorDocument').value = getUserPolicies(userId);
|
||||
policyModal.show();
|
||||
});
|
||||
}
|
||||
|
||||
var copyBtn = cardElement.querySelector('[data-copy-access-key]');
|
||||
if (copyBtn) {
|
||||
copyBtn.addEventListener('click', function() {
|
||||
copyAccessKey(copyBtn);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function updateUserCount() {
|
||||
var countEl = document.querySelector('.card-header .text-muted.small');
|
||||
if (countEl) {
|
||||
var count = document.querySelectorAll('.iam-user-card').length;
|
||||
countEl.textContent = count + ' user' + (count !== 1 ? 's' : '') + ' configured';
|
||||
}
|
||||
}
|
||||
|
||||
function setupFormHandlers() {
|
||||
var createUserForm = document.querySelector('#createUserModal form');
|
||||
if (createUserForm) {
|
||||
createUserForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(createUserForm, {
|
||||
successMessage: 'User created',
|
||||
onSuccess: function(data) {
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('createUserModal'));
|
||||
if (modal) modal.hide();
|
||||
createUserForm.reset();
|
||||
|
||||
var existingAlert = document.querySelector('.alert.alert-info.border-0.shadow-sm');
|
||||
if (existingAlert) existingAlert.remove();
|
||||
|
||||
if (data.secret_key) {
|
||||
var alertHtml = '<div class="alert alert-info border-0 shadow-sm mb-4" role="alert" id="newUserSecretAlert">' +
|
||||
'<div class="d-flex align-items-start gap-2 mb-2">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="bi bi-key flex-shrink-0 mt-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M0 8a4 4 0 0 1 7.465-2H14a.5.5 0 0 1 .354.146l1.5 1.5a.5.5 0 0 1 0 .708l-1.5 1.5a.5.5 0 0 1-.708 0L13 9.207l-.646.647a.5.5 0 0 1-.708 0L11 9.207l-.646.647a.5.5 0 0 1-.708 0L9 9.207l-.646.647A.5.5 0 0 1 8 10h-.535A4 4 0 0 1 0 8zm4-3a3 3 0 1 0 2.712 4.285A.5.5 0 0 1 7.163 9h.63l.853-.854a.5.5 0 0 1 .708 0l.646.647.646-.647a.5.5 0 0 1 .708 0l.646.647.646-.647a.5.5 0 0 1 .708 0l.646.647.793-.793-1-1h-6.63a.5.5 0 0 1-.451-.285A3 3 0 0 0 4 5z"/><path d="M4 8a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>' +
|
||||
'</svg>' +
|
||||
'<div class="flex-grow-1">' +
|
||||
'<div class="fw-semibold">New user created: <code>' + window.UICore.escapeHtml(data.access_key) + '</code></div>' +
|
||||
'<p class="mb-2 small">These credentials are only shown once. Copy them now and store them securely.</p>' +
|
||||
'</div>' +
|
||||
'<button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button>' +
|
||||
'</div>' +
|
||||
'<div class="input-group mb-2">' +
|
||||
'<span class="input-group-text"><strong>Access key</strong></span>' +
|
||||
'<input class="form-control font-monospace" type="text" value="' + window.UICore.escapeHtml(data.access_key) + '" readonly />' +
|
||||
'<button class="btn btn-outline-primary" type="button" id="copyNewUserAccessKey">Copy</button>' +
|
||||
'</div>' +
|
||||
'<div class="input-group">' +
|
||||
'<span class="input-group-text"><strong>Secret key</strong></span>' +
|
||||
'<input class="form-control font-monospace" type="text" value="' + window.UICore.escapeHtml(data.secret_key) + '" readonly id="newUserSecret" />' +
|
||||
'<button class="btn btn-outline-primary" type="button" id="copyNewUserSecret">Copy</button>' +
|
||||
'</div></div>';
|
||||
var container = document.querySelector('.page-header');
|
||||
if (container) {
|
||||
container.insertAdjacentHTML('afterend', alertHtml);
|
||||
document.getElementById('copyNewUserAccessKey').addEventListener('click', async function() {
|
||||
await window.UICore.copyToClipboard(data.access_key, this, 'Copy');
|
||||
});
|
||||
document.getElementById('copyNewUserSecret').addEventListener('click', async function() {
|
||||
await window.UICore.copyToClipboard(data.secret_key, this, 'Copy');
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
var usersGrid = document.querySelector('.row.g-3');
|
||||
var emptyState = document.querySelector('.empty-state');
|
||||
if (emptyState) {
|
||||
var emptyCol = emptyState.closest('.col-12');
|
||||
if (emptyCol) emptyCol.remove();
|
||||
if (!usersGrid) {
|
||||
var cardBody = document.querySelector('.card-body.px-4.pb-4');
|
||||
if (cardBody) {
|
||||
cardBody.innerHTML = '<div class="row g-3"></div>';
|
||||
usersGrid = cardBody.querySelector('.row.g-3');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (usersGrid) {
|
||||
var newUser = {
|
||||
user_id: data.user_id,
|
||||
access_key: data.access_key,
|
||||
display_name: data.display_name,
|
||||
expires_at: data.expires_at || '',
|
||||
policies: data.policies || []
|
||||
};
|
||||
var cardHtml = createUserCardHtml(newUser);
|
||||
usersGrid.insertAdjacentHTML('beforeend', cardHtml);
|
||||
var newCard = usersGrid.lastElementChild;
|
||||
attachUserCardHandlers(newCard, newUser);
|
||||
users.push(newUser);
|
||||
updateUserCount();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var policyEditorForm = document.getElementById('policyEditorForm');
|
||||
if (policyEditorForm) {
|
||||
policyEditorForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
var userInputEl = document.getElementById('policyEditorUserId');
|
||||
var userId = userInputEl.value;
|
||||
if (!userId) return;
|
||||
|
||||
var template = policyEditorForm.dataset.actionTemplate;
|
||||
policyEditorForm.action = template.replace('USER_ID_PLACEHOLDER', encodeURIComponent(userId));
|
||||
|
||||
window.UICore.submitFormAjax(policyEditorForm, {
|
||||
successMessage: 'Policies updated',
|
||||
onSuccess: function(data) {
|
||||
policyModal.hide();
|
||||
|
||||
var userCard = document.querySelector('.iam-user-item[data-user-id="' + userId + '"]');
|
||||
if (userCard) {
|
||||
var cardEl = userCard.querySelector('.iam-user-card');
|
||||
var badgeContainer = cardEl ? cardEl.querySelector('[data-policy-badges]') : null;
|
||||
if (badgeContainer && data.policies) {
|
||||
var badges = data.policies.map(function(p) {
|
||||
var bl = getBucketLabel(p.bucket);
|
||||
var pl = getPermissionLevel(p.actions);
|
||||
return '<span class="iam-perm-badge">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="10" height="10" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>' +
|
||||
'</svg>' + window.UICore.escapeHtml(bl) + ' · ' + window.UICore.escapeHtml(pl) + '</span>';
|
||||
}).join('');
|
||||
badgeContainer.innerHTML = badges || '<span class="badge bg-secondary bg-opacity-10 text-secondary">No policies</span>';
|
||||
}
|
||||
if (cardEl) {
|
||||
var nowAdmin = isAdminUser(data.policies);
|
||||
cardEl.classList.toggle('iam-admin-card', nowAdmin);
|
||||
var roleBadgeEl = cardEl.querySelector('[data-role-badge]');
|
||||
if (roleBadgeEl) {
|
||||
if (nowAdmin) {
|
||||
roleBadgeEl.className = 'iam-role-badge iam-role-admin';
|
||||
roleBadgeEl.textContent = 'Admin';
|
||||
} else {
|
||||
roleBadgeEl.className = 'iam-role-badge iam-role-user';
|
||||
roleBadgeEl.textContent = 'User';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var userIndex = users.findIndex(function(u) { return u.user_id === userId; });
|
||||
if (userIndex >= 0 && data.policies) {
|
||||
users[userIndex].policies = data.policies;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var editUserForm = document.getElementById('editUserForm');
|
||||
if (editUserForm) {
|
||||
editUserForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
var key = currentEditKey;
|
||||
window.UICore.submitFormAjax(editUserForm, {
|
||||
successMessage: 'User updated',
|
||||
onSuccess: function(data) {
|
||||
editUserModal.hide();
|
||||
|
||||
var newName = data.display_name || document.getElementById('editUserDisplayName').value;
|
||||
var editBtn = document.querySelector('[data-edit-user][data-user-id="' + key + '"]');
|
||||
if (editBtn) {
|
||||
editBtn.setAttribute('data-display-name', newName);
|
||||
var card = editBtn.closest('.iam-user-card');
|
||||
if (card) {
|
||||
var nameEl = card.querySelector('h6');
|
||||
if (nameEl) {
|
||||
nameEl.textContent = newName;
|
||||
nameEl.title = newName;
|
||||
}
|
||||
var itemWrapper = card.closest('.iam-user-item');
|
||||
if (itemWrapper) {
|
||||
itemWrapper.setAttribute('data-display-name', newName.toLowerCase());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var userIndex = users.findIndex(function(u) { return u.user_id === key; });
|
||||
if (userIndex >= 0) {
|
||||
users[userIndex].display_name = newName;
|
||||
}
|
||||
|
||||
if (currentEditAccessKey === currentUserKey) {
|
||||
document.querySelectorAll('.sidebar-user .user-name').forEach(function(el) {
|
||||
var truncated = newName.length > 16 ? newName.substring(0, 16) + '...' : newName;
|
||||
el.textContent = truncated;
|
||||
el.title = newName;
|
||||
});
|
||||
document.querySelectorAll('.sidebar-user[data-username]').forEach(function(el) {
|
||||
el.setAttribute('data-username', newName);
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var deleteUserForm = document.getElementById('deleteUserForm');
|
||||
if (deleteUserForm) {
|
||||
deleteUserForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
var key = currentDeleteKey;
|
||||
window.UICore.submitFormAjax(deleteUserForm, {
|
||||
successMessage: 'User deleted',
|
||||
onSuccess: function(data) {
|
||||
deleteUserModal.hide();
|
||||
|
||||
if (currentDeleteAccessKey === currentUserKey) {
|
||||
window.location.href = '/ui/';
|
||||
return;
|
||||
}
|
||||
|
||||
var deleteBtn = document.querySelector('[data-delete-user][data-user-id="' + key + '"]');
|
||||
if (deleteBtn) {
|
||||
var cardCol = deleteBtn.closest('[class*="col-"]');
|
||||
if (cardCol) {
|
||||
cardCol.remove();
|
||||
}
|
||||
}
|
||||
|
||||
users = users.filter(function(u) { return u.user_id !== key; });
|
||||
updateUserCount();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function setupSearch() {
|
||||
var searchInput = document.getElementById('iam-user-search');
|
||||
if (!searchInput) return;
|
||||
|
||||
searchInput.addEventListener('input', function() {
|
||||
var query = searchInput.value.toLowerCase().trim();
|
||||
var items = document.querySelectorAll('.iam-user-item');
|
||||
var noResults = document.getElementById('iam-no-results');
|
||||
var visibleCount = 0;
|
||||
|
||||
items.forEach(function(item) {
|
||||
var name = item.getAttribute('data-display-name') || '';
|
||||
var key = item.getAttribute('data-access-key-filter') || '';
|
||||
var matches = !query || name.indexOf(query) >= 0 || key.indexOf(query) >= 0;
|
||||
item.classList.toggle('d-none', !matches);
|
||||
if (matches) visibleCount++;
|
||||
});
|
||||
|
||||
if (noResults) {
|
||||
noResults.classList.toggle('d-none', visibleCount > 0);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function copyAccessKey(btn) {
|
||||
var key = btn.getAttribute('data-copy-access-key');
|
||||
if (!key) return;
|
||||
var originalHtml = btn.innerHTML;
|
||||
navigator.clipboard.writeText(key).then(function() {
|
||||
btn.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16"><path d="M13.854 3.646a.5.5 0 0 1 0 .708l-7 7a.5.5 0 0 1-.708 0l-3.5-3.5a.5.5 0 1 1 .708-.708L6.5 10.293l6.646-6.647a.5.5 0 0 1 .708 0z"/></svg>';
|
||||
btn.style.color = '#22c55e';
|
||||
setTimeout(function() {
|
||||
btn.innerHTML = originalHtml;
|
||||
btn.style.color = '';
|
||||
}, 1200);
|
||||
}).catch(function() {});
|
||||
}
|
||||
|
||||
function setupCopyAccessKeyButtons() {
|
||||
document.querySelectorAll('[data-copy-access-key]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
copyAccessKey(btn);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
init: init
|
||||
};
|
||||
})();
|
||||
334
crates/myfsio-server/static/js/ui-core.js
Normal file
334
crates/myfsio-server/static/js/ui-core.js
Normal file
@@ -0,0 +1,334 @@
|
||||
window.UICore = (function() {
|
||||
'use strict';
|
||||
|
||||
function getCsrfToken() {
|
||||
const meta = document.querySelector('meta[name="csrf-token"]');
|
||||
return meta ? meta.getAttribute('content') : '';
|
||||
}
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (!Number.isFinite(bytes)) return bytes + ' bytes';
|
||||
const units = ['bytes', 'KB', 'MB', 'GB', 'TB'];
|
||||
let i = 0;
|
||||
let size = bytes;
|
||||
while (size >= 1024 && i < units.length - 1) {
|
||||
size /= 1024;
|
||||
i++;
|
||||
}
|
||||
return size.toFixed(i === 0 ? 0 : 1) + ' ' + units[i];
|
||||
}
|
||||
|
||||
function escapeHtml(value) {
|
||||
if (value === null || value === undefined) return '';
|
||||
return String(value)
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''');
|
||||
}
|
||||
|
||||
async function submitFormAjax(form, options) {
|
||||
options = options || {};
|
||||
var onSuccess = options.onSuccess || function() {};
|
||||
var onError = options.onError || function() {};
|
||||
var successMessage = options.successMessage || 'Operation completed';
|
||||
|
||||
var formData = new FormData(form);
|
||||
var hasFileInput = !!form.querySelector('input[type="file"]');
|
||||
var requestBody = hasFileInput ? formData : new URLSearchParams(formData);
|
||||
var csrfToken = getCsrfToken();
|
||||
var submitBtn = form.querySelector('[type="submit"]');
|
||||
var originalHtml = submitBtn ? submitBtn.innerHTML : '';
|
||||
|
||||
try {
|
||||
if (submitBtn) {
|
||||
submitBtn.disabled = true;
|
||||
submitBtn.innerHTML = '<span class="spinner-border spinner-border-sm me-1"></span>Saving...';
|
||||
}
|
||||
|
||||
var formAction = form.getAttribute('action') || form.action;
|
||||
var headers = {
|
||||
'X-CSRF-Token': csrfToken,
|
||||
'Accept': 'application/json',
|
||||
'X-Requested-With': 'XMLHttpRequest'
|
||||
};
|
||||
if (!hasFileInput) {
|
||||
headers['Content-Type'] = 'application/x-www-form-urlencoded;charset=UTF-8';
|
||||
}
|
||||
var response = await fetch(formAction, {
|
||||
method: form.getAttribute('method') || 'POST',
|
||||
headers: headers,
|
||||
body: requestBody,
|
||||
redirect: 'follow'
|
||||
});
|
||||
|
||||
var contentType = response.headers.get('content-type') || '';
|
||||
if (!contentType.includes('application/json')) {
|
||||
throw new Error('Server returned an unexpected response. Please try again.');
|
||||
}
|
||||
|
||||
var data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || 'HTTP ' + response.status);
|
||||
}
|
||||
|
||||
window.showToast(data.message || successMessage, 'Success', 'success');
|
||||
onSuccess(data);
|
||||
|
||||
} catch (err) {
|
||||
window.showToast(err.message, 'Error', 'error');
|
||||
onError(err);
|
||||
} finally {
|
||||
if (submitBtn) {
|
||||
submitBtn.disabled = false;
|
||||
submitBtn.innerHTML = originalHtml;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function PollingManager() {
|
||||
this.intervals = {};
|
||||
this.callbacks = {};
|
||||
this.timers = {};
|
||||
this.defaults = {
|
||||
replication: 30000,
|
||||
lifecycle: 60000,
|
||||
connectionHealth: 60000,
|
||||
bucketStats: 120000
|
||||
};
|
||||
this._loadSettings();
|
||||
}
|
||||
|
||||
PollingManager.prototype._loadSettings = function() {
|
||||
try {
|
||||
var stored = localStorage.getItem('myfsio-polling-intervals');
|
||||
if (stored) {
|
||||
var settings = JSON.parse(stored);
|
||||
for (var key in settings) {
|
||||
if (settings.hasOwnProperty(key)) {
|
||||
this.defaults[key] = settings[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.warn('Failed to load polling settings:', e);
|
||||
}
|
||||
};
|
||||
|
||||
PollingManager.prototype.saveSettings = function(settings) {
|
||||
try {
|
||||
for (var key in settings) {
|
||||
if (settings.hasOwnProperty(key)) {
|
||||
this.defaults[key] = settings[key];
|
||||
}
|
||||
}
|
||||
localStorage.setItem('myfsio-polling-intervals', JSON.stringify(this.defaults));
|
||||
} catch (e) {
|
||||
console.warn('Failed to save polling settings:', e);
|
||||
}
|
||||
};
|
||||
|
||||
PollingManager.prototype.start = function(key, callback, interval) {
|
||||
this.stop(key);
|
||||
var ms = interval !== undefined ? interval : (this.defaults[key] || 30000);
|
||||
if (ms <= 0) return;
|
||||
|
||||
this.callbacks[key] = callback;
|
||||
this.intervals[key] = ms;
|
||||
|
||||
callback();
|
||||
|
||||
var self = this;
|
||||
this.timers[key] = setInterval(function() {
|
||||
if (!document.hidden) {
|
||||
callback();
|
||||
}
|
||||
}, ms);
|
||||
};
|
||||
|
||||
PollingManager.prototype.stop = function(key) {
|
||||
if (this.timers[key]) {
|
||||
clearInterval(this.timers[key]);
|
||||
delete this.timers[key];
|
||||
}
|
||||
};
|
||||
|
||||
PollingManager.prototype.stopAll = function() {
|
||||
for (var key in this.timers) {
|
||||
if (this.timers.hasOwnProperty(key)) {
|
||||
clearInterval(this.timers[key]);
|
||||
}
|
||||
}
|
||||
this.timers = {};
|
||||
};
|
||||
|
||||
PollingManager.prototype.updateInterval = function(key, newInterval) {
|
||||
var callback = this.callbacks[key];
|
||||
this.defaults[key] = newInterval;
|
||||
this.saveSettings(this.defaults);
|
||||
if (callback) {
|
||||
this.start(key, callback, newInterval);
|
||||
}
|
||||
};
|
||||
|
||||
PollingManager.prototype.getSettings = function() {
|
||||
var result = {};
|
||||
for (var key in this.defaults) {
|
||||
if (this.defaults.hasOwnProperty(key)) {
|
||||
result[key] = this.defaults[key];
|
||||
}
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
var pollingManager = new PollingManager();
|
||||
|
||||
document.addEventListener('visibilitychange', function() {
|
||||
if (document.hidden) {
|
||||
pollingManager.stopAll();
|
||||
} else {
|
||||
for (var key in pollingManager.callbacks) {
|
||||
if (pollingManager.callbacks.hasOwnProperty(key)) {
|
||||
pollingManager.start(key, pollingManager.callbacks[key], pollingManager.intervals[key]);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
window.addEventListener('beforeunload', function() {
|
||||
pollingManager.stopAll();
|
||||
});
|
||||
|
||||
return {
|
||||
getCsrfToken: getCsrfToken,
|
||||
formatBytes: formatBytes,
|
||||
escapeHtml: escapeHtml,
|
||||
submitFormAjax: submitFormAjax,
|
||||
PollingManager: PollingManager,
|
||||
pollingManager: pollingManager
|
||||
};
|
||||
})();
|
||||
|
||||
window.pollingManager = window.UICore.pollingManager;
|
||||
|
||||
window.UICore.copyToClipboard = async function(text, button, originalText) {
|
||||
try {
|
||||
await navigator.clipboard.writeText(text);
|
||||
if (button) {
|
||||
var prevText = button.textContent;
|
||||
button.textContent = 'Copied!';
|
||||
setTimeout(function() {
|
||||
button.textContent = originalText || prevText;
|
||||
}, 1500);
|
||||
}
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.error('Copy failed:', err);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
window.UICore.setButtonLoading = function(button, isLoading, loadingText) {
|
||||
if (!button) return;
|
||||
if (isLoading) {
|
||||
button._originalHtml = button.innerHTML;
|
||||
button._originalDisabled = button.disabled;
|
||||
button.disabled = true;
|
||||
button.innerHTML = '<span class="spinner-border spinner-border-sm me-1"></span>' + (loadingText || 'Loading...');
|
||||
} else {
|
||||
button.disabled = button._originalDisabled || false;
|
||||
button.innerHTML = button._originalHtml || button.innerHTML;
|
||||
}
|
||||
};
|
||||
|
||||
window.UICore.updateBadgeCount = function(selector, count, singular, plural) {
|
||||
var badge = document.querySelector(selector);
|
||||
if (badge) {
|
||||
var label = count === 1 ? (singular || '') : (plural || 's');
|
||||
badge.textContent = count + ' ' + label;
|
||||
}
|
||||
};
|
||||
|
||||
window.UICore.setupJsonAutoIndent = function(textarea) {
|
||||
if (!textarea) return;
|
||||
|
||||
textarea.addEventListener('keydown', function(e) {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault();
|
||||
|
||||
var start = this.selectionStart;
|
||||
var end = this.selectionEnd;
|
||||
var value = this.value;
|
||||
|
||||
var lineStart = value.lastIndexOf('\n', start - 1) + 1;
|
||||
var currentLine = value.substring(lineStart, start);
|
||||
|
||||
var indentMatch = currentLine.match(/^(\s*)/);
|
||||
var indent = indentMatch ? indentMatch[1] : '';
|
||||
|
||||
var trimmedLine = currentLine.trim();
|
||||
var lastChar = trimmedLine.slice(-1);
|
||||
|
||||
var newIndent = indent;
|
||||
var insertAfter = '';
|
||||
|
||||
if (lastChar === '{' || lastChar === '[') {
|
||||
newIndent = indent + ' ';
|
||||
|
||||
var charAfterCursor = value.substring(start, start + 1).trim();
|
||||
if ((lastChar === '{' && charAfterCursor === '}') ||
|
||||
(lastChar === '[' && charAfterCursor === ']')) {
|
||||
insertAfter = '\n' + indent;
|
||||
}
|
||||
} else if (lastChar === ',' || lastChar === ':') {
|
||||
newIndent = indent;
|
||||
}
|
||||
|
||||
var insertion = '\n' + newIndent + insertAfter;
|
||||
var newValue = value.substring(0, start) + insertion + value.substring(end);
|
||||
|
||||
this.value = newValue;
|
||||
|
||||
var newCursorPos = start + 1 + newIndent.length;
|
||||
this.selectionStart = this.selectionEnd = newCursorPos;
|
||||
|
||||
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
}
|
||||
|
||||
if (e.key === 'Tab') {
|
||||
e.preventDefault();
|
||||
var start = this.selectionStart;
|
||||
var end = this.selectionEnd;
|
||||
|
||||
if (e.shiftKey) {
|
||||
var lineStart = this.value.lastIndexOf('\n', start - 1) + 1;
|
||||
var lineContent = this.value.substring(lineStart, start);
|
||||
if (lineContent.startsWith(' ')) {
|
||||
this.value = this.value.substring(0, lineStart) +
|
||||
this.value.substring(lineStart + 2);
|
||||
this.selectionStart = this.selectionEnd = Math.max(lineStart, start - 2);
|
||||
}
|
||||
} else {
|
||||
this.value = this.value.substring(0, start) + ' ' + this.value.substring(end);
|
||||
this.selectionStart = this.selectionEnd = start + 2;
|
||||
}
|
||||
|
||||
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
var flashMessage = sessionStorage.getItem('flashMessage');
|
||||
if (flashMessage) {
|
||||
sessionStorage.removeItem('flashMessage');
|
||||
try {
|
||||
var msg = JSON.parse(flashMessage);
|
||||
if (window.showToast) {
|
||||
window.showToast(msg.body || msg.title, msg.title, msg.variant || 'info');
|
||||
}
|
||||
} catch (e) {}
|
||||
}
|
||||
});
|
||||
@@ -5,7 +5,7 @@
|
||||
<h1 class="display-6 mb-3">We can't find that page</h1>
|
||||
<p class="text-muted mb-4">The requested console route isn't available in MyFSIO. Double-check the URL or head back to your buckets.</p>
|
||||
<div class="d-flex flex-wrap justify-content-center gap-3">
|
||||
<a class="btn btn-primary" href="{{ url_for('ui.buckets_overview') }}">Return to buckets</a>
|
||||
<a class="btn btn-primary" href="{{ url_for(endpoint="ui.buckets_overview") }}">Return to buckets</a>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
@@ -5,7 +5,7 @@
|
||||
<h1 class="display-6 mb-3">Internal Server Error</h1>
|
||||
<p class="text-muted mb-4">Something went wrong on our end. Please try again later or contact support.</p>
|
||||
<div class="d-flex flex-wrap justify-content-center gap-3">
|
||||
<a class="btn btn-primary" href="{{ url_for('ui.buckets_overview') }}">Return to buckets</a>
|
||||
<a class="btn btn-primary" href="{{ url_for(endpoint="ui.buckets_overview") }}">Return to buckets</a>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
292
crates/myfsio-server/templates/_convert.py
Normal file
292
crates/myfsio-server/templates/_convert.py
Normal file
@@ -0,0 +1,292 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
TEMPLATE_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
TERNARY_RE = re.compile(
|
||||
r"""(\{\{\s*)
|
||||
(?:"([^"]*)"|'([^']*)') # literal A
|
||||
\s+if\s+
|
||||
([^{}]+?) # condition
|
||||
\s+else\s+
|
||||
(?:"([^"]*)"|'([^']*)') # literal B
|
||||
(\s*\}\})""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
TERNARY_SET_RE = re.compile(
|
||||
r"""(\{%\s*set\s+([A-Za-z_][A-Za-z_0-9]*)\s*=\s*)
|
||||
(?:"([^"]*)"|'([^']*)')
|
||||
\s+if\s+
|
||||
([^{}]+?)
|
||||
\s+else\s+
|
||||
(?:"([^"]*)"|'([^']*)')
|
||||
(\s*%\})""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
def convert_single_quoted_strings_in_expressions(text: str) -> str:
|
||||
"""Inside {{...}} or {%...%}, swap ' for " around tokens that look like strings."""
|
||||
def fix(m):
|
||||
body = m.group(2)
|
||||
body_fixed = re.sub(r"'([^'\\\n]*)'", r'"\1"', body)
|
||||
return m.group(1) + body_fixed + m.group(3)
|
||||
|
||||
return re.sub(
|
||||
r"(\{[{%])([^{}]*?)([}%]\})",
|
||||
fix,
|
||||
text,
|
||||
flags=re.DOTALL,
|
||||
)
|
||||
|
||||
|
||||
def convert_inline_ternary(text: str) -> str:
|
||||
def repl_expr(m):
|
||||
a = m.group(2) if m.group(2) is not None else m.group(3)
|
||||
cond = m.group(4)
|
||||
b = m.group(5) if m.group(5) is not None else m.group(6)
|
||||
return (
|
||||
'{% if ' + cond + ' %}' + a + '{% else %}' + b + '{% endif %}'
|
||||
)
|
||||
|
||||
def repl_set(m):
|
||||
varname = m.group(2)
|
||||
a = m.group(3) if m.group(3) is not None else m.group(4)
|
||||
cond = m.group(5)
|
||||
b = m.group(6) if m.group(6) is not None else m.group(7)
|
||||
return (
|
||||
'{% if ' + cond + ' %}{% set ' + varname + ' = "' + a + '" %}'
|
||||
'{% else %}{% set ' + varname + ' = "' + b + '" %}{% endif %}'
|
||||
)
|
||||
|
||||
prev = None
|
||||
while prev != text:
|
||||
prev = text
|
||||
text = TERNARY_SET_RE.sub(repl_set, text)
|
||||
text = TERNARY_RE.sub(repl_expr, text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_request_args(text: str) -> str:
|
||||
text = re.sub(
|
||||
r'request\.args\.get\(\s*"([^"]+)"\s*,\s*"([^"]*)"\s*\)',
|
||||
r'request_args.\1 | default(value="\2")',
|
||||
text,
|
||||
)
|
||||
text = re.sub(
|
||||
r'request\.args\.get\(\s*"([^"]+)"\s*\)',
|
||||
r'request_args.\1',
|
||||
text,
|
||||
)
|
||||
text = text.replace('request.endpoint', 'current_endpoint')
|
||||
return text
|
||||
|
||||
|
||||
def convert_items_keys(text: str) -> str:
|
||||
text = re.sub(r'\.items\(\)', '', text)
|
||||
text = re.sub(r'\.keys\(\)', '', text)
|
||||
text = re.sub(r'\.values\(\)', '', text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_tojson(text: str) -> str:
|
||||
text = re.sub(r'\|\s*tojson\b', '| json_encode | safe', text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_is_none(text: str) -> str:
|
||||
text = re.sub(r'\bis\s+not\s+none\b', '!= null', text)
|
||||
text = re.sub(r'\bis\s+none\b', '== null', text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_namespace(text: str) -> str:
|
||||
def repl(m):
|
||||
body = m.group(1)
|
||||
assigns = [a.strip() for a in body.split(',')]
|
||||
return '{# namespace shim #}'
|
||||
|
||||
text = re.sub(
|
||||
r'\{%\s*set\s+ns\s*=\s*namespace\(([^)]*)\)\s*%\}',
|
||||
repl,
|
||||
text,
|
||||
)
|
||||
text = re.sub(r'\bns\.([A-Za-z_][A-Za-z_0-9]*)\s*=\s*', r'{% set_global \1 = ', text)
|
||||
text = re.sub(r'\bns\.([A-Za-z_][A-Za-z_0-9]*)', r'\1', text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_url_for_positional(text: str) -> str:
|
||||
"""url_for("x", ...) -> url_for(endpoint="x", ...)"""
|
||||
def repl(m):
|
||||
prefix = m.group(1)
|
||||
endpoint = m.group(2)
|
||||
rest = m.group(3) or ''
|
||||
rest = rest.strip()
|
||||
if rest.startswith(','):
|
||||
rest = rest[1:].strip()
|
||||
if rest:
|
||||
return f'{prefix}(endpoint="{endpoint}", {rest})'
|
||||
return f'{prefix}(endpoint="{endpoint}")'
|
||||
|
||||
pattern = re.compile(r'(url_for)\(\s*"([^"]+)"\s*((?:,[^()]*)?)\)')
|
||||
prev = None
|
||||
while prev != text:
|
||||
prev = text
|
||||
text = pattern.sub(repl, text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_d_filter(text: str) -> str:
|
||||
text = re.sub(r'\|\s*d\(\s*([^)]*?)\s*\)', lambda m: f'| default(value={m.group(1) or 0})', text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_replace_filter(text: str) -> str:
|
||||
def repl(m):
|
||||
a = m.group(1)
|
||||
b = m.group(2)
|
||||
return f'| replace(from="{a}", to="{b}")'
|
||||
text = re.sub(r'\|\s*replace\(\s*"([^"]*)"\s*,\s*"([^"]*)"\s*\)', repl, text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_truncate_filter(text: str) -> str:
|
||||
def repl(m):
|
||||
n = m.group(1)
|
||||
return f'| truncate(length={n})'
|
||||
text = re.sub(r'\|\s*truncate\(\s*(\d+)\s*(?:,[^)]*)?\)', repl, text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_strip_method(text: str) -> str:
|
||||
text = re.sub(r'(\b[A-Za-z_][A-Za-z_0-9.\[\]"]*)\s*\.\s*strip\(\s*\)', r'\1 | trim', text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_split_method(text: str) -> str:
|
||||
def repl(m):
|
||||
obj = m.group(1)
|
||||
sep = m.group(2)
|
||||
return f'{obj} | split(pat="{sep}")'
|
||||
text = re.sub(r'(\b[A-Za-z_][A-Za-z_0-9.]*)\s*\.\s*split\(\s*"([^"]*)"\s*\)', repl, text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_python_slice(text: str) -> str:
|
||||
def repl_colon(m):
|
||||
obj = m.group(1)
|
||||
start = m.group(2) or '0'
|
||||
end = m.group(3)
|
||||
if start.startswith('-') or (end and end.startswith('-')):
|
||||
return m.group(0)
|
||||
if end:
|
||||
return f'{obj} | slice(start={start}, end={end})'
|
||||
return f'{obj} | slice(start={start})'
|
||||
|
||||
def repl_neg_end(m):
|
||||
obj = m.group(1)
|
||||
n = m.group(2)
|
||||
return f'{obj} | slice(start=-{n})'
|
||||
|
||||
text = re.sub(
|
||||
r'(\b[A-Za-z_][A-Za-z_0-9.]*)\[\s*(-?\d*)\s*:\s*(-?\d*)\s*\]',
|
||||
repl_colon,
|
||||
text,
|
||||
)
|
||||
text = re.sub(
|
||||
r'(\b[A-Za-z_][A-Za-z_0-9.]*)\|\s*slice\(start=-(\d+)\s*,\s*end=\s*\)',
|
||||
repl_neg_end,
|
||||
text,
|
||||
)
|
||||
return text
|
||||
|
||||
|
||||
def convert_inline_ternary_expr(text: str) -> str:
|
||||
"""Handle arbitrary ternary inside {{ ... }}: A if COND else B -> {% if COND %}A{% else %}B{% endif %}"""
|
||||
out_lines = []
|
||||
for line in text.split('\n'):
|
||||
out_lines.append(_convert_line_ternary(line))
|
||||
return '\n'.join(out_lines)
|
||||
|
||||
|
||||
def _convert_line_ternary(line: str) -> str:
|
||||
if '{{' not in line or ' if ' not in line or ' else ' not in line:
|
||||
return line
|
||||
prev = None
|
||||
while prev != line:
|
||||
prev = line
|
||||
m = re.search(r'\{\{\s*([^{}]+?)\s+if\s+([^{}]+?)\s+else\s+([^{}]+?)\s*\}\}', line)
|
||||
if not m:
|
||||
break
|
||||
replacement = '{% if ' + m.group(2) + ' %}{{ ' + m.group(1) + ' }}{% else %}{{ ' + m.group(3) + ' }}{% endif %}'
|
||||
line = line[:m.start()] + replacement + line[m.end():]
|
||||
return line
|
||||
|
||||
|
||||
def convert_dict_get(text: str) -> str:
|
||||
"""Convert X.get("key", default) -> X.key | default(value=default) when simple."""
|
||||
pattern = re.compile(
|
||||
r'([A-Za-z_][A-Za-z_0-9]*(?:\.[A-Za-z_][A-Za-z_0-9]*)*)'
|
||||
r'\.get\(\s*"([A-Za-z_][A-Za-z_0-9]*)"\s*(?:,\s*([^(){}]+?))?\s*\)'
|
||||
)
|
||||
|
||||
def repl(m):
|
||||
obj = m.group(1)
|
||||
key = m.group(2)
|
||||
default = (m.group(3) or '').strip()
|
||||
if default:
|
||||
return f'{obj}.{key} | default(value={default})'
|
||||
return f'{obj}.{key}'
|
||||
|
||||
prev = None
|
||||
while prev != text:
|
||||
prev = text
|
||||
text = pattern.sub(repl, text)
|
||||
return text
|
||||
|
||||
|
||||
def convert_file(path: str) -> bool:
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
original = f.read()
|
||||
text = original
|
||||
text = convert_single_quoted_strings_in_expressions(text)
|
||||
text = convert_inline_ternary(text)
|
||||
text = convert_request_args(text)
|
||||
text = convert_items_keys(text)
|
||||
text = convert_tojson(text)
|
||||
text = convert_is_none(text)
|
||||
text = convert_namespace(text)
|
||||
text = convert_dict_get(text)
|
||||
text = convert_url_for_positional(text)
|
||||
text = convert_d_filter(text)
|
||||
text = convert_replace_filter(text)
|
||||
text = convert_truncate_filter(text)
|
||||
text = convert_strip_method(text)
|
||||
text = convert_split_method(text)
|
||||
text = convert_python_slice(text)
|
||||
text = convert_inline_ternary_expr(text)
|
||||
if text != original:
|
||||
with open(path, 'w', encoding='utf-8', newline='\n') as f:
|
||||
f.write(text)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
changed = []
|
||||
for name in sorted(os.listdir(TEMPLATE_DIR)):
|
||||
if not name.endswith('.html'):
|
||||
continue
|
||||
p = os.path.join(TEMPLATE_DIR, name)
|
||||
if convert_file(p):
|
||||
changed.append(name)
|
||||
print('Changed:', len(changed))
|
||||
for c in changed:
|
||||
print(' -', c)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
443
crates/myfsio-server/templates/base.html
Normal file
443
crates/myfsio-server/templates/base.html
Normal file
@@ -0,0 +1,443 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
{% if principal %}<meta name="csrf-token" content="{{ csrf_token_value }}" />{% endif %}
|
||||
<title>MyFSIO Console</title>
|
||||
<link rel="icon" type="image/png" href="{{ url_for(endpoint="static", filename="images/MyFSIO.png") }}" />
|
||||
<link rel="icon" type="image/x-icon" href="{{ url_for(endpoint="static", filename="images/MyFSIO.ico") }}" />
|
||||
<link
|
||||
href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.2/dist/css/bootstrap.min.css"
|
||||
rel="stylesheet"
|
||||
integrity="sha384-T3c6CoIi6uLrA9TneNEoa7RxnatzjcDSCmG1MXxSR1GAsXEV/Dwwykc2MPK8M2HN"
|
||||
crossorigin="anonymous"
|
||||
/>
|
||||
<script>
|
||||
(function () {
|
||||
try {
|
||||
const stored = localStorage.getItem('myfsio-theme');
|
||||
const theme = stored === 'dark' ? 'dark' : 'light';
|
||||
document.documentElement.dataset.bsTheme = theme;
|
||||
document.documentElement.dataset.theme = theme;
|
||||
} catch (err) {
|
||||
document.documentElement.dataset.bsTheme = 'light';
|
||||
document.documentElement.dataset.theme = 'light';
|
||||
}
|
||||
try {
|
||||
if (localStorage.getItem('myfsio-sidebar-collapsed') === 'true') {
|
||||
document.documentElement.classList.add('sidebar-will-collapse');
|
||||
}
|
||||
} catch (err) {}
|
||||
})();
|
||||
</script>
|
||||
<link rel="stylesheet" href="{{ url_for(endpoint="static", filename="css/main.css") }}" />
|
||||
</head>
|
||||
<body>
|
||||
<header class="mobile-header d-lg-none">
|
||||
<button class="sidebar-toggle-btn" type="button" data-bs-toggle="offcanvas" data-bs-target="#mobileSidebar" aria-controls="mobileSidebar" aria-label="Toggle navigation">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M2.5 12a.5.5 0 0 1 .5-.5h10a.5.5 0 0 1 0 1H3a.5.5 0 0 1-.5-.5zm0-4a.5.5 0 0 1 .5-.5h10a.5.5 0 0 1 0 1H3a.5.5 0 0 1-.5-.5zm0-4a.5.5 0 0 1 .5-.5h10a.5.5 0 0 1 0 1H3a.5.5 0 0 1-.5-.5z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<a class="mobile-brand" href="{{ url_for(endpoint="ui.buckets_overview") }}">
|
||||
<img src="{{ url_for(endpoint="static", filename="images/MyFSIO.png") }}" alt="MyFSIO logo" width="28" height="28" />
|
||||
<span>MyFSIO</span>
|
||||
</a>
|
||||
<button class="theme-toggle-mobile" type="button" id="themeToggleMobile" aria-label="Toggle dark mode">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon-mobile" id="themeToggleSunMobile" viewBox="0 0 16 16">
|
||||
<path d="M8 11.5a3.5 3.5 0 1 1 0-7 3.5 3.5 0 0 1 0 7zm0 1.5a5 5 0 1 0 0-10 5 5 0 0 0 0 10zM8 0a.5.5 0 0 1 .5.5v1.555a.5.5 0 0 1-1 0V.5A.5.5 0 0 1 8 0zm0 12.945a.5.5 0 0 1 .5.5v2.055a.5.5 0 0 1-1 0v-2.055a.5.5 0 0 1 .5-.5zM2.343 2.343a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.708.707l-1.1-1.1a.5.5 0 0 1 0-.707zm9.507 9.507a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.707.708l-1.1-1.1a.5.5 0 0 1 0-.708zM0 8a.5.5 0 0 1 .5-.5h1.555a.5.5 0 0 1 0 1H.5A.5.5 0 0 1 0 8zm12.945 0a.5.5 0 0 1 .5-.5H15.5a.5.5 0 0 1 0 1h-2.055a.5.5 0 0 1-.5-.5zM2.343 13.657a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 1 1 .708.707l-1.1 1.1a.5.5 0 0 1-.708 0zm9.507-9.507a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 0 1 .707.708l-1.1 1.1a.5.5 0 0 1-.707 0z"/>
|
||||
</svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon-mobile" id="themeToggleMoonMobile" viewBox="0 0 16 16">
|
||||
<path d="M6 .278a.768.768 0 0 1 .08.858 7.208 7.208 0 0 0-.878 3.46c0 4.021 3.278 7.277 7.318 7.277.527 0 1.04-.055 1.533-.16a.787.787 0 0 1 .81.316.733.733 0 0 1-.031.893A8.349 8.349 0 0 1 8.344 16C3.734 16 0 12.286 0 7.71 0 4.266 2.114 1.312 5.124.06A.752.752 0 0 1 6 .278z"/>
|
||||
<path d="M10.794 3.148a.217.217 0 0 1 .412 0l.387 1.162c.173.518.579.924 1.097 1.097l1.162.387a.217.217 0 0 1 0 .412l-1.162.387a1.734 1.734 0 0 0-1.097 1.097l-.387 1.162a.217.217 0 0 1-.412 0l-.387-1.162A1.734 1.734 0 0 0 9.31 6.593l-1.162-.387a.217.217 0 0 1 0-.412l1.162-.387a1.734 1.734 0 0 0 1.097-1.097l.387-1.162zM13.863.099a.145.145 0 0 1 .274 0l.258.774c.115.346.386.617.732.732l.774.258a.145.145 0 0 1 0 .274l-.774.258a1.156 1.156 0 0 0-.732.732l-.258.774a.145.145 0 0 1-.274 0l-.258-.774a1.156 1.156 0 0 0-.732-.732l-.774-.258a.145.145 0 0 1 0-.274l.774-.258c.346-.115.617-.386.732-.732L13.863.1z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</header>
|
||||
|
||||
<div class="offcanvas offcanvas-start sidebar-offcanvas" tabindex="-1" id="mobileSidebar" aria-labelledby="mobileSidebarLabel">
|
||||
<div class="offcanvas-header sidebar-header">
|
||||
<a class="sidebar-brand" href="{{ url_for(endpoint="ui.buckets_overview") }}">
|
||||
<img src="{{ url_for(endpoint="static", filename="images/MyFSIO.png") }}" alt="MyFSIO logo" class="sidebar-logo" width="36" height="36" />
|
||||
<span class="sidebar-title">MyFSIO</span>
|
||||
</a>
|
||||
<button type="button" class="btn-close btn-close-white" data-bs-dismiss="offcanvas" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="offcanvas-body sidebar-body">
|
||||
<nav class="sidebar-nav">
|
||||
{% if principal %}
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Navigation</span>
|
||||
<a href="{{ url_for(endpoint="ui.buckets_overview") }}" class="sidebar-link {% if current_endpoint == "ui.buckets_overview" or current_endpoint == "ui.bucket_detail" %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
<span>Buckets</span>
|
||||
</a>
|
||||
{% if can_manage_iam %}
|
||||
<a href="{{ url_for(endpoint="ui.iam_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.iam_dashboard" %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M15 14s1 0 1-1-1-4-5-4-5 3-5 4 1 1 1 1h8zm-7.978-1A.261.261 0 0 1 7 12.996c.001-.264.167-1.03.76-1.72C8.312 10.629 9.282 10 11 10c1.717 0 2.687.63 3.24 1.276.593.69.758 1.457.76 1.72l-.008.002a.274.274 0 0 1-.014.002H7.022zM11 7a2 2 0 1 0 0-4 2 2 0 0 0 0 4zm3-2a3 3 0 1 1-6 0 3 3 0 0 1 6 0zM6.936 9.28a5.88 5.88 0 0 0-1.23-.247A7.35 7.35 0 0 0 5 9c-4 0-5 3-5 4 0 .667.333 1 1 1h4.216A2.238 2.238 0 0 1 5 13c0-1.01.377-2.042 1.09-2.904.243-.294.526-.569.846-.816zM4.92 10A5.493 5.493 0 0 0 4 13H1c0-.26.164-1.03.76-1.724.545-.636 1.492-1.256 3.16-1.275zM1.5 5.5a3 3 0 1 1 6 0 3 3 0 0 1-6 0zm3-2a2 2 0 1 0 0 4 2 2 0 0 0 0-4z"/>
|
||||
</svg>
|
||||
<span>IAM</span>
|
||||
</a>
|
||||
<a href="{{ url_for(endpoint="ui.connections_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.connections_dashboard" %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M6 3.5A1.5 1.5 0 0 1 7.5 2h1A1.5 1.5 0 0 1 10 3.5v1A1.5 1.5 0 0 1 8.5 6v1H14a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0v-1A.5.5 0 0 1 2 7h5.5V6A1.5 1.5 0 0 1 6 4.5v-1zM8.5 5a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1zM0 11.5A1.5 1.5 0 0 1 1.5 10h1A1.5 1.5 0 0 1 4 11.5v1A1.5 1.5 0 0 1 2.5 14h-1A1.5 1.5 0 0 1 0 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5A1.5 1.5 0 0 1 7.5 10h1a1.5 1.5 0 0 1 1.5 1.5v1A1.5 1.5 0 0 1 8.5 14h-1A1.5 1.5 0 0 1 6 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5a1.5 1.5 0 0 1 1.5-1.5h1a1.5 1.5 0 0 1 1.5 1.5v1a1.5 1.5 0 0 1-1.5 1.5h-1a1.5 1.5 0 0 1-1.5-1.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1z"/>
|
||||
</svg>
|
||||
<span>Connections</span>
|
||||
</a>
|
||||
<a href="{{ url_for(endpoint="ui.metrics_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.metrics_dashboard" %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
|
||||
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
|
||||
</svg>
|
||||
<span>Metrics</span>
|
||||
</a>
|
||||
<a href="{{ url_for(endpoint="ui.sites_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.sites_dashboard" %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||
</svg>
|
||||
<span>Sites</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if website_hosting_nav %}
|
||||
<a href="{{ url_for(endpoint="ui.website_domains_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.website_domains_dashboard" %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.715 6.542 3.343 7.914a3 3 0 1 0 4.243 4.243l1.828-1.829A3 3 0 0 0 8.586 5.5L8 6.086a1.002 1.002 0 0 0-.154.199 2 2 0 0 1 .861 3.337L6.88 11.45a2 2 0 1 1-2.83-2.83l.793-.792a4.018 4.018 0 0 1-.128-1.287z"/>
|
||||
<path d="M6.586 4.672A3 3 0 0 0 7.414 9.5l.775-.776a2 2 0 0 1-.896-3.346L9.12 3.55a2 2 0 1 1 2.83 2.83l-.793.792c.112.42.155.855.128 1.287l1.372-1.372a3 3 0 1 0-4.243-4.243L6.586 4.672z"/>
|
||||
</svg>
|
||||
<span>Domains</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if can_manage_iam %}
|
||||
<a href="{{ url_for(endpoint="ui.system_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.system_dashboard" %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
<span>System</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Resources</span>
|
||||
<a href="{{ url_for(endpoint="ui.docs_page") }}" class="sidebar-link {% if current_endpoint == "ui.docs_page" %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M1 2.828c.885-.37 2.154-.769 3.388-.893 1.33-.134 2.458.063 3.112.752v9.746c-.935-.53-2.12-.603-3.213-.493-1.18.12-2.37.461-3.287.811V2.828zm7.5-.141c.654-.689 1.782-.886 3.112-.752 1.234.124 2.503.523 3.388.893v9.923c-.918-.35-2.107-.692-3.287-.81-1.094-.111-2.278-.039-3.213.492V2.687zM8 1.783C7.015.936 5.587.81 4.287.94c-1.514.153-3.042.672-3.994 1.105A.5.5 0 0 0 0 2.5v11a.5.5 0 0 0 .707.455c.882-.4 2.303-.881 3.68-1.02 1.409-.142 2.59.087 3.223.877a.5.5 0 0 0 .78 0c.633-.79 1.814-1.019 3.222-.877 1.378.139 2.8.62 3.681 1.02A.5.5 0 0 0 16 13.5v-11a.5.5 0 0 0-.293-.455c-.952-.433-2.48-.952-3.994-1.105C10.413.809 8.985.936 8 1.783z"/>
|
||||
</svg>
|
||||
<span>Documentation</span>
|
||||
</a>
|
||||
</div>
|
||||
{% endif %}
|
||||
</nav>
|
||||
{% if principal %}
|
||||
<div class="sidebar-footer">
|
||||
<div class="sidebar-user">
|
||||
<div class="user-avatar">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M11 6a3 3 0 1 1-6 0 3 3 0 0 1 6 0z"/>
|
||||
<path fill-rule="evenodd" d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm8-7a7 7 0 0 0-5.468 11.37C3.242 11.226 4.805 10 8 10s4.757 1.225 5.468 2.37A7 7 0 0 0 8 1z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div class="user-info">
|
||||
<div class="user-name" title="{{ principal.display_name }}">{{ principal.display_name | truncate(length=16) }}</div>
|
||||
<div class="user-key">{{ principal.access_key | truncate(length=12) }}</div>
|
||||
</div>
|
||||
</div>
|
||||
<form method="post" action="{{ url_for(endpoint="ui.logout") }}" class="w-100">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}" />
|
||||
<button class="sidebar-logout-btn" type="submit">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M10 12.5a.5.5 0 0 1-.5.5h-8a.5.5 0 0 1-.5-.5v-9a.5.5 0 0 1 .5-.5h8a.5.5 0 0 1 .5.5v2a.5.5 0 0 0 1 0v-2A1.5 1.5 0 0 0 9.5 2h-8A1.5 1.5 0 0 0 0 3.5v9A1.5 1.5 0 0 0 1.5 14h8a1.5 1.5 0 0 0 1.5-1.5v-2a.5.5 0 0 0-1 0v2z"/>
|
||||
<path fill-rule="evenodd" d="M15.854 8.354a.5.5 0 0 0 0-.708l-3-3a.5.5 0 0 0-.708.708L14.293 7.5H5.5a.5.5 0 0 0 0 1h8.793l-2.147 2.146a.5.5 0 0 0 .708.708l3-3z"/>
|
||||
</svg>
|
||||
<span>Sign out</span>
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<aside class="sidebar d-none d-lg-flex" id="desktopSidebar">
|
||||
<div class="sidebar-header">
|
||||
<div class="sidebar-brand" id="sidebarBrand">
|
||||
<img src="{{ url_for(endpoint="static", filename="images/MyFSIO.png") }}" alt="MyFSIO logo" class="sidebar-logo" width="36" height="36" />
|
||||
<span class="sidebar-title">MyFSIO</span>
|
||||
</div>
|
||||
<button class="sidebar-collapse-btn" type="button" id="sidebarCollapseBtn" aria-label="Collapse sidebar">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
<div class="sidebar-body">
|
||||
<nav class="sidebar-nav">
|
||||
{% if principal %}
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Navigation</span>
|
||||
<a href="{{ url_for(endpoint="ui.buckets_overview") }}" class="sidebar-link {% if current_endpoint == "ui.buckets_overview" or current_endpoint == "ui.bucket_detail" %}active{% endif %}" data-tooltip="Buckets">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Buckets</span>
|
||||
</a>
|
||||
{% if can_manage_iam %}
|
||||
<a href="{{ url_for(endpoint="ui.iam_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.iam_dashboard" %}active{% endif %}" data-tooltip="IAM">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M15 14s1 0 1-1-1-4-5-4-5 3-5 4 1 1 1 1h8zm-7.978-1A.261.261 0 0 1 7 12.996c.001-.264.167-1.03.76-1.72C8.312 10.629 9.282 10 11 10c1.717 0 2.687.63 3.24 1.276.593.69.758 1.457.76 1.72l-.008.002a.274.274 0 0 1-.014.002H7.022zM11 7a2 2 0 1 0 0-4 2 2 0 0 0 0 4zm3-2a3 3 0 1 1-6 0 3 3 0 0 1 6 0zM6.936 9.28a5.88 5.88 0 0 0-1.23-.247A7.35 7.35 0 0 0 5 9c-4 0-5 3-5 4 0 .667.333 1 1 1h4.216A2.238 2.238 0 0 1 5 13c0-1.01.377-2.042 1.09-2.904.243-.294.526-.569.846-.816zM4.92 10A5.493 5.493 0 0 0 4 13H1c0-.26.164-1.03.76-1.724.545-.636 1.492-1.256 3.16-1.275zM1.5 5.5a3 3 0 1 1 6 0 3 3 0 0 1-6 0zm3-2a2 2 0 1 0 0 4 2 2 0 0 0 0-4z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">IAM</span>
|
||||
</a>
|
||||
<a href="{{ url_for(endpoint="ui.connections_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.connections_dashboard" %}active{% endif %}" data-tooltip="Connections">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M6 3.5A1.5 1.5 0 0 1 7.5 2h1A1.5 1.5 0 0 1 10 3.5v1A1.5 1.5 0 0 1 8.5 6v1H14a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0v-1A.5.5 0 0 1 2 7h5.5V6A1.5 1.5 0 0 1 6 4.5v-1zM8.5 5a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1zM0 11.5A1.5 1.5 0 0 1 1.5 10h1A1.5 1.5 0 0 1 4 11.5v1A1.5 1.5 0 0 1 2.5 14h-1A1.5 1.5 0 0 1 0 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5A1.5 1.5 0 0 1 7.5 10h1a1.5 1.5 0 0 1 1.5 1.5v1A1.5 1.5 0 0 1 8.5 14h-1A1.5 1.5 0 0 1 6 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5a1.5 1.5 0 0 1 1.5-1.5h1a1.5 1.5 0 0 1 1.5 1.5v1a1.5 1.5 0 0 1-1.5 1.5h-1a1.5 1.5 0 0 1-1.5-1.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Connections</span>
|
||||
</a>
|
||||
<a href="{{ url_for(endpoint="ui.metrics_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.metrics_dashboard" %}active{% endif %}" data-tooltip="Metrics">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
|
||||
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Metrics</span>
|
||||
</a>
|
||||
<a href="{{ url_for(endpoint="ui.sites_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.sites_dashboard" %}active{% endif %}" data-tooltip="Sites">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Sites</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if website_hosting_nav %}
|
||||
<a href="{{ url_for(endpoint="ui.website_domains_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.website_domains_dashboard" %}active{% endif %}" data-tooltip="Domains">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.715 6.542 3.343 7.914a3 3 0 1 0 4.243 4.243l1.828-1.829A3 3 0 0 0 8.586 5.5L8 6.086a1.002 1.002 0 0 0-.154.199 2 2 0 0 1 .861 3.337L6.88 11.45a2 2 0 1 1-2.83-2.83l.793-.792a4.018 4.018 0 0 1-.128-1.287z"/>
|
||||
<path d="M6.586 4.672A3 3 0 0 0 7.414 9.5l.775-.776a2 2 0 0 1-.896-3.346L9.12 3.55a2 2 0 1 1 2.83 2.83l-.793.792c.112.42.155.855.128 1.287l1.372-1.372a3 3 0 1 0-4.243-4.243L6.586 4.672z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Domains</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if can_manage_iam %}
|
||||
<a href="{{ url_for(endpoint="ui.system_dashboard") }}" class="sidebar-link {% if current_endpoint == "ui.system_dashboard" %}active{% endif %}" data-tooltip="System">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">System</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Resources</span>
|
||||
<a href="{{ url_for(endpoint="ui.docs_page") }}" class="sidebar-link {% if current_endpoint == "ui.docs_page" %}active{% endif %}" data-tooltip="Documentation">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M1 2.828c.885-.37 2.154-.769 3.388-.893 1.33-.134 2.458.063 3.112.752v9.746c-.935-.53-2.12-.603-3.213-.493-1.18.12-2.37.461-3.287.811V2.828zm7.5-.141c.654-.689 1.782-.886 3.112-.752 1.234.124 2.503.523 3.388.893v9.923c-.918-.35-2.107-.692-3.287-.81-1.094-.111-2.278-.039-3.213.492V2.687zM8 1.783C7.015.936 5.587.81 4.287.94c-1.514.153-3.042.672-3.994 1.105A.5.5 0 0 0 0 2.5v11a.5.5 0 0 0 .707.455c.882-.4 2.303-.881 3.68-1.02 1.409-.142 2.59.087 3.223.877a.5.5 0 0 0 .78 0c.633-.79 1.814-1.019 3.222-.877 1.378.139 2.8.62 3.681 1.02A.5.5 0 0 0 16 13.5v-11a.5.5 0 0 0-.293-.455c-.952-.433-2.48-.952-3.994-1.105C10.413.809 8.985.936 8 1.783z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Documentation</span>
|
||||
</a>
|
||||
</div>
|
||||
{% endif %}
|
||||
</nav>
|
||||
</div>
|
||||
<div class="sidebar-footer">
|
||||
<button class="theme-toggle-sidebar" type="button" id="themeToggle" aria-label="Toggle dark mode">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon" id="themeToggleSun" viewBox="0 0 16 16">
|
||||
<path d="M8 11.5a3.5 3.5 0 1 1 0-7 3.5 3.5 0 0 1 0 7zm0 1.5a5 5 0 1 0 0-10 5 5 0 0 0 0 10zM8 0a.5.5 0 0 1 .5.5v1.555a.5.5 0 0 1-1 0V.5A.5.5 0 0 1 8 0zm0 12.945a.5.5 0 0 1 .5.5v2.055a.5.5 0 0 1-1 0v-2.055a.5.5 0 0 1 .5-.5zM2.343 2.343a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.708.707l-1.1-1.1a.5.5 0 0 1 0-.707zm9.507 9.507a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.707.708l-1.1-1.1a.5.5 0 0 1 0-.708zM0 8a.5.5 0 0 1 .5-.5h1.555a.5.5 0 0 1 0 1H.5A.5.5 0 0 1 0 8zm12.945 0a.5.5 0 0 1 .5-.5H15.5a.5.5 0 0 1 0 1h-2.055a.5.5 0 0 1-.5-.5zM2.343 13.657a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 1 1 .708.707l-1.1 1.1a.5.5 0 0 1-.708 0zm9.507-9.507a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 0 1 .707.708l-1.1 1.1a.5.5 0 0 1-.707 0z"/>
|
||||
</svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon" id="themeToggleMoon" viewBox="0 0 16 16">
|
||||
<path d="M6 .278a.768.768 0 0 1 .08.858 7.208 7.208 0 0 0-.878 3.46c0 4.021 3.278 7.277 7.318 7.277.527 0 1.04-.055 1.533-.16a.787.787 0 0 1 .81.316.733.733 0 0 1-.031.893A8.349 8.349 0 0 1 8.344 16C3.734 16 0 12.286 0 7.71 0 4.266 2.114 1.312 5.124.06A.752.752 0 0 1 6 .278z"/>
|
||||
<path d="M10.794 3.148a.217.217 0 0 1 .412 0l.387 1.162c.173.518.579.924 1.097 1.097l1.162.387a.217.217 0 0 1 0 .412l-1.162.387a1.734 1.734 0 0 0-1.097 1.097l-.387 1.162a.217.217 0 0 1-.412 0l-.387-1.162A1.734 1.734 0 0 0 9.31 6.593l-1.162-.387a.217.217 0 0 1 0-.412l1.162-.387a1.734 1.734 0 0 0 1.097-1.097l.387-1.162zM13.863.099a.145.145 0 0 1 .274 0l.258.774c.115.346.386.617.732.732l.774.258a.145.145 0 0 1 0 .274l-.774.258a1.156 1.156 0 0 0-.732.732l-.258.774a.145.145 0 0 1-.274 0l-.258-.774a1.156 1.156 0 0 0-.732-.732l-.774-.258a.145.145 0 0 1 0-.274l.774-.258c.346-.115.617-.386.732-.732L13.863.1z"/>
|
||||
</svg>
|
||||
<span class="theme-toggle-text">Toggle theme</span>
|
||||
</button>
|
||||
{% if principal %}
|
||||
<div class="sidebar-user" data-username="{{ principal.display_name }}">
|
||||
<div class="user-avatar">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M11 6a3 3 0 1 1-6 0 3 3 0 0 1 6 0z"/>
|
||||
<path fill-rule="evenodd" d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm8-7a7 7 0 0 0-5.468 11.37C3.242 11.226 4.805 10 8 10s4.757 1.225 5.468 2.37A7 7 0 0 0 8 1z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div class="user-info">
|
||||
<div class="user-name" title="{{ principal.display_name }}">{{ principal.display_name | truncate(length=16) }}</div>
|
||||
<div class="user-key">{{ principal.access_key | truncate(length=12) }}</div>
|
||||
</div>
|
||||
</div>
|
||||
<form method="post" action="{{ url_for(endpoint="ui.logout") }}" class="w-100">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}" />
|
||||
<button class="sidebar-logout-btn" type="submit">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M10 12.5a.5.5 0 0 1-.5.5h-8a.5.5 0 0 1-.5-.5v-9a.5.5 0 0 1 .5-.5h8a.5.5 0 0 1 .5.5v2a.5.5 0 0 0 1 0v-2A1.5 1.5 0 0 0 9.5 2h-8A1.5 1.5 0 0 0 0 3.5v9A1.5 1.5 0 0 0 1.5 14h8a1.5 1.5 0 0 0 1.5-1.5v-2a.5.5 0 0 0-1 0v2z"/>
|
||||
<path fill-rule="evenodd" d="M15.854 8.354a.5.5 0 0 0 0-.708l-3-3a.5.5 0 0 0-.708.708L14.293 7.5H5.5a.5.5 0 0 0 0 1h8.793l-2.147 2.146a.5.5 0 0 0 .708.708l3-3z"/>
|
||||
</svg>
|
||||
<span class="logout-text">Sign out</span>
|
||||
</button>
|
||||
</form>
|
||||
{% endif %}
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
<div class="main-wrapper">
|
||||
<main class="main-content">
|
||||
{% block content %}{% endblock %}
|
||||
</main>
|
||||
</div>
|
||||
<div class="toast-container position-fixed bottom-0 end-0 p-3">
|
||||
<div id="liveToast" class="toast" role="alert" aria-live="assertive" aria-atomic="true">
|
||||
<div class="toast-header">
|
||||
<strong class="me-auto" id="toastTitle">Notification</strong>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="toast" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="toast-body" id="toastMessage">
|
||||
Hello, world! This is a toast message.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script
|
||||
src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.2/dist/js/bootstrap.bundle.min.js"
|
||||
integrity="sha384-C6RzsynM9kWDrMNeT87bh95OGNyZPhcTNXj1NW7RuBCsyN/o0jlpcV8Qyq46cDfL"
|
||||
crossorigin="anonymous"
|
||||
></script>
|
||||
<script>
|
||||
window.myfsioCsrfToken = document.querySelector('meta[name="csrf-token"]')?.getAttribute('content') || '';
|
||||
window.getCsrfToken = () => window.myfsioCsrfToken;
|
||||
(function () {
|
||||
const originalFetch = window.fetch;
|
||||
window.fetch = function (input, init) {
|
||||
init = init || {};
|
||||
const method = (init.method || 'GET').toUpperCase();
|
||||
if (method !== 'GET' && method !== 'HEAD' && method !== 'OPTIONS') {
|
||||
const headers = new Headers(init.headers || {});
|
||||
if (window.myfsioCsrfToken) {
|
||||
headers.set('X-CSRFToken', window.myfsioCsrfToken);
|
||||
}
|
||||
init.headers = headers;
|
||||
}
|
||||
return originalFetch.call(this, input, init);
|
||||
};
|
||||
})();
|
||||
</script>
|
||||
<script>
|
||||
(function () {
|
||||
const storageKey = 'myfsio-theme';
|
||||
const toggle = document.getElementById('themeToggle');
|
||||
const toggleMobile = document.getElementById('themeToggleMobile');
|
||||
const sunIcon = document.getElementById('themeToggleSun');
|
||||
const moonIcon = document.getElementById('themeToggleMoon');
|
||||
const sunIconMobile = document.getElementById('themeToggleSunMobile');
|
||||
const moonIconMobile = document.getElementById('themeToggleMoonMobile');
|
||||
|
||||
const applyTheme = (theme) => {
|
||||
document.documentElement.dataset.bsTheme = theme;
|
||||
document.documentElement.dataset.theme = theme;
|
||||
try {
|
||||
localStorage.setItem(storageKey, theme);
|
||||
} catch (err) {
|
||||
console.log("Error: local storage not available, cannot save theme preference.");
|
||||
}
|
||||
const isDark = theme === 'dark';
|
||||
if (sunIcon && moonIcon) {
|
||||
sunIcon.classList.toggle('d-none', !isDark);
|
||||
moonIcon.classList.toggle('d-none', isDark);
|
||||
}
|
||||
if (sunIconMobile && moonIconMobile) {
|
||||
sunIconMobile.classList.toggle('d-none', !isDark);
|
||||
moonIconMobile.classList.toggle('d-none', isDark);
|
||||
}
|
||||
[toggle, toggleMobile].forEach(btn => {
|
||||
if (btn) {
|
||||
btn.setAttribute('aria-pressed', isDark ? 'true' : 'false');
|
||||
btn.setAttribute('title', isDark ? 'Switch to light mode' : 'Switch to dark mode');
|
||||
btn.setAttribute('aria-label', isDark ? 'Switch to light mode' : 'Switch to dark mode');
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const current = document.documentElement.dataset.bsTheme || 'light';
|
||||
applyTheme(current);
|
||||
|
||||
const handleToggle = () => {
|
||||
const next = document.documentElement.dataset.bsTheme === 'dark' ? 'light' : 'dark';
|
||||
applyTheme(next);
|
||||
};
|
||||
|
||||
toggle?.addEventListener('click', handleToggle);
|
||||
toggleMobile?.addEventListener('click', handleToggle);
|
||||
})();
|
||||
</script>
|
||||
<script>
|
||||
(function () {
|
||||
const sidebar = document.getElementById('desktopSidebar');
|
||||
const collapseBtn = document.getElementById('sidebarCollapseBtn');
|
||||
const sidebarBrand = document.getElementById('sidebarBrand');
|
||||
const storageKey = 'myfsio-sidebar-collapsed';
|
||||
|
||||
if (!sidebar || !collapseBtn) return;
|
||||
|
||||
const applyCollapsed = (collapsed) => {
|
||||
sidebar.classList.toggle('sidebar-collapsed', collapsed);
|
||||
document.body.classList.toggle('sidebar-is-collapsed', collapsed);
|
||||
document.documentElement.classList.remove('sidebar-will-collapse');
|
||||
try {
|
||||
localStorage.setItem(storageKey, collapsed ? 'true' : 'false');
|
||||
} catch (err) {}
|
||||
};
|
||||
|
||||
try {
|
||||
const stored = localStorage.getItem(storageKey);
|
||||
applyCollapsed(stored === 'true');
|
||||
} catch (err) {
|
||||
document.documentElement.classList.remove('sidebar-will-collapse');
|
||||
}
|
||||
|
||||
collapseBtn.addEventListener('click', () => {
|
||||
const isCollapsed = sidebar.classList.contains('sidebar-collapsed');
|
||||
applyCollapsed(!isCollapsed);
|
||||
});
|
||||
|
||||
sidebarBrand?.addEventListener('click', (e) => {
|
||||
const isCollapsed = sidebar.classList.contains('sidebar-collapsed');
|
||||
if (isCollapsed) {
|
||||
e.preventDefault();
|
||||
applyCollapsed(false);
|
||||
}
|
||||
});
|
||||
})();
|
||||
</script>
|
||||
<script>
|
||||
|
||||
window.showToast = function(message, title = 'Notification', type = 'info') {
|
||||
const toastEl = document.getElementById('liveToast');
|
||||
const toastTitle = document.getElementById('toastTitle');
|
||||
const toastMessage = document.getElementById('toastMessage');
|
||||
|
||||
toastTitle.textContent = title;
|
||||
toastMessage.textContent = message;
|
||||
|
||||
toastEl.classList.remove('text-bg-primary', 'text-bg-success', 'text-bg-danger', 'text-bg-warning');
|
||||
|
||||
if (type === 'success') toastEl.classList.add('text-bg-success');
|
||||
if (type === 'error') toastEl.classList.add('text-bg-danger');
|
||||
if (type === 'warning') toastEl.classList.add('text-bg-warning');
|
||||
|
||||
const toast = new bootstrap.Toast(toastEl);
|
||||
toast.show();
|
||||
};
|
||||
</script>
|
||||
<script>
|
||||
(function () {
|
||||
|
||||
{% if flashed_messages %}
|
||||
{% for flash in flashed_messages %}
|
||||
var type = "{{ flash.category }}";
|
||||
if (type === "danger") type = "error";
|
||||
window.showToast({{ flash.message | json_encode | safe }}, "Notification", type);
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
})();
|
||||
</script>
|
||||
<script src="{{ url_for(endpoint="static", filename="js/ui-core.js") }}"></script>
|
||||
{% block extra_scripts %}{% endblock %}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
File diff suppressed because it is too large
Load Diff
@@ -46,13 +46,12 @@
|
||||
<div class="d-flex align-items-center gap-3">
|
||||
<div class="bucket-icon">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H11a.5.5 0 0 1 0 1h-1v1h1a.5.5 0 0 1 0 1h-1v1a.5.5 0 0 1-1 0v-1H6v1a.5.5 0 0 1-1 0v-1H4a.5.5 0 0 1 0-1h1v-1H4a.5.5 0 0 1 0-1h1.5A1.5 1.5 0 0 1 7 10.5V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1zm5 7.5v1h3v-1a.5.5 0 0 0-.5-.5h-2a.5.5 0 0 0-.5.5z"/>
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div>
|
||||
<h5 class="bucket-name text-break">{{ bucket.meta.name }}</h5>
|
||||
<small class="text-muted">Created {{ bucket.meta.created_at.strftime('%b %d, %Y') }}</small>
|
||||
<small class="text-muted">Created {{ bucket.meta.creation_date | format_datetime }}</small>
|
||||
</div>
|
||||
</div>
|
||||
<span class="badge {{ bucket.access_badge }} bucket-access-badge">{{ bucket.access_label }}</span>
|
||||
@@ -90,6 +89,14 @@
|
||||
</div>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<div class="col-12 d-none" id="bucket-no-results">
|
||||
<div class="text-center py-5 text-muted">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="currentColor" class="mb-3 opacity-50" viewBox="0 0 16 16">
|
||||
<path d="M11.742 10.344a6.5 6.5 0 1 0-1.397 1.398h-.001c.03.04.062.078.098.115l3.85 3.85a1 1 0 0 0 1.415-1.414l-3.85-3.85a1.007 1.007 0 0 0-.115-.1zM12 6.5a5.5 5.5 0 1 1-11 0 5.5 5.5 0 0 1 11 0z"/>
|
||||
</svg>
|
||||
<p class="mb-0 fw-medium">No buckets match your filter.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="createBucketModal" tabindex="-1" aria-hidden="true">
|
||||
@@ -105,11 +112,11 @@
|
||||
</h1>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<form method="post" action="{{ url_for('ui.create_bucket') }}">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<form method="post" action="{{ url_for(endpoint="ui.create_bucket") }}" id="createBucketForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}" />
|
||||
<div class="modal-body pt-0">
|
||||
<label class="form-label fw-medium">Bucket name</label>
|
||||
<input class="form-control" type="text" name="bucket_name" pattern="[a-z0-9.-]{3,63}" placeholder="my-bucket-name" required autofocus />
|
||||
<input class="form-control" type="text" name="bucket_name" pattern="[a-z0-9.\-]{3,63}" placeholder="my-bucket-name" required autofocus />
|
||||
<div class="form-text">Use 3-63 characters: lowercase letters, numbers, dots, or hyphens.</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
@@ -131,10 +138,10 @@
|
||||
{{ super() }}
|
||||
<script>
|
||||
(function () {
|
||||
// Search functionality
|
||||
|
||||
const searchInput = document.getElementById('bucket-search');
|
||||
const bucketItems = document.querySelectorAll('.bucket-item');
|
||||
const noBucketsMsg = document.querySelector('.text-center.py-5'); // The "No buckets found" empty state
|
||||
const noBucketsMsg = document.querySelector('.text-center.py-5');
|
||||
|
||||
if (searchInput) {
|
||||
searchInput.addEventListener('input', (e) => {
|
||||
@@ -142,7 +149,7 @@
|
||||
let visibleCount = 0;
|
||||
|
||||
bucketItems.forEach(item => {
|
||||
const name = item.querySelector('.card-title').textContent.toLowerCase();
|
||||
const name = item.querySelector('.bucket-name').textContent.toLowerCase();
|
||||
if (name.includes(term)) {
|
||||
item.classList.remove('d-none');
|
||||
visibleCount++;
|
||||
@@ -150,10 +157,18 @@
|
||||
item.classList.add('d-none');
|
||||
}
|
||||
});
|
||||
|
||||
var noResults = document.getElementById('bucket-no-results');
|
||||
if (noResults) {
|
||||
if (term && visibleCount === 0) {
|
||||
noResults.classList.remove('d-none');
|
||||
} else {
|
||||
noResults.classList.add('d-none');
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// View toggle functionality
|
||||
const viewGrid = document.getElementById('view-grid');
|
||||
const viewList = document.getElementById('view-list');
|
||||
const container = document.getElementById('buckets-container');
|
||||
@@ -168,8 +183,7 @@
|
||||
});
|
||||
cards.forEach(card => {
|
||||
card.classList.remove('h-100');
|
||||
// Optional: Add flex-row to card-body content if we want a horizontal layout
|
||||
// For now, full-width stacked cards is a good list view
|
||||
|
||||
});
|
||||
localStorage.setItem('bucket-view-pref', 'list');
|
||||
} else {
|
||||
@@ -188,7 +202,6 @@
|
||||
viewGrid.addEventListener('change', () => setView('grid'));
|
||||
viewList.addEventListener('change', () => setView('list'));
|
||||
|
||||
// Restore preference
|
||||
const pref = localStorage.getItem('bucket-view-pref');
|
||||
if (pref === 'list') {
|
||||
viewList.checked = true;
|
||||
@@ -209,6 +222,25 @@
|
||||
});
|
||||
row.style.cursor = 'pointer';
|
||||
});
|
||||
|
||||
var createForm = document.getElementById('createBucketForm');
|
||||
if (createForm) {
|
||||
createForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(createForm, {
|
||||
successMessage: 'Bucket created',
|
||||
onSuccess: function(data) {
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('createBucketModal'));
|
||||
if (modal) modal.hide();
|
||||
if (data.bucket_name) {
|
||||
window.location.href = '{{ url_for(endpoint="ui.bucket_detail", bucket_name="__BUCKET__") }}'.replace('__BUCKET__', data.bucket_name);
|
||||
} else {
|
||||
location.reload();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
})();
|
||||
</script>
|
||||
{% endblock %}
|
||||
@@ -8,8 +8,8 @@
|
||||
<p class="text-uppercase text-muted small mb-1">Replication</p>
|
||||
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H12a.5.5 0 0 1 0 1H4a.5.5 0 0 1 0-1h2A1.5 1.5 0 0 1 7.5 10V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1z"/>
|
||||
<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>
|
||||
<path d="M10.232 8.768l.546-.353a.25.25 0 0 0 0-.418l-.546-.354a.25.25 0 0 1-.116-.21V6.25a.25.25 0 0 0-.25-.25h-.5a.25.25 0 0 0-.25.25v1.183a.25.25 0 0 1-.116.21l-.546.354a.25.25 0 0 0 0 .418l.546.353a.25.25 0 0 1 .116.21v1.183a.25.25 0 0 0 .25.25h.5a.25.25 0 0 0 .25-.25V8.978a.25.25 0 0 1 .116-.21z"/>
|
||||
</svg>
|
||||
Remote Connections
|
||||
</h1>
|
||||
@@ -17,7 +17,7 @@
|
||||
</div>
|
||||
<div class="d-none d-md-block">
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary fs-6 px-3 py-2">
|
||||
{{ connections|length }} connection{{ 's' if connections|length != 1 else '' }}
|
||||
{{ connections|length }} connection{% if connections|length != 1 %}s{% else %}{% endif %}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
@@ -35,8 +35,8 @@
|
||||
<p class="text-muted small mb-0">Connect to an S3-compatible endpoint</p>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
<form method="POST" action="{{ url_for('ui.create_connection') }}" id="createConnectionForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<form method="POST" action="{{ url_for(endpoint="ui.create_connection") }}" id="createConnectionForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}"/>
|
||||
<div class="mb-3">
|
||||
<label for="name" class="form-label fw-medium">Name</label>
|
||||
<input type="text" class="form-control" id="name" name="name" required placeholder="Production Backup">
|
||||
@@ -57,7 +57,7 @@
|
||||
<label for="secret_key" class="form-label fw-medium">Secret Key</label>
|
||||
<div class="input-group">
|
||||
<input type="password" class="form-control font-monospace" id="secret_key" name="secret_key" required>
|
||||
<button class="btn btn-outline-secondary" type="button" onclick="togglePassword('secret_key')" title="Toggle visibility">
|
||||
<button class="btn btn-outline-secondary" type="button" onclick="ConnectionsManagement.togglePassword('secret_key')" title="Toggle visibility">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
|
||||
<path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
|
||||
@@ -104,6 +104,7 @@
|
||||
<table class="table table-hover align-middle mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th scope="col" style="width: 50px;">Status</th>
|
||||
<th scope="col">Name</th>
|
||||
<th scope="col">Endpoint</th>
|
||||
<th scope="col">Region</th>
|
||||
@@ -113,13 +114,17 @@
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for conn in connections %}
|
||||
<tr>
|
||||
<tr data-connection-id="{{ conn.id }}">
|
||||
<td class="text-center">
|
||||
<span class="connection-status" data-status="checking" title="Checking...">
|
||||
<span class="spinner-border spinner-border-sm text-muted" role="status" style="width: 12px; height: 12px;"></span>
|
||||
</span>
|
||||
</td>
|
||||
<td>
|
||||
<div class="d-flex align-items-center gap-2">
|
||||
<div class="connection-icon">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H12a.5.5 0 0 1 0 1H4a.5.5 0 0 1 0-1h2A1.5 1.5 0 0 1 7.5 10V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1z"/>
|
||||
<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<span class="fw-medium">{{ conn.name }}</span>
|
||||
@@ -129,7 +134,7 @@
|
||||
<span class="text-muted small text-truncate d-inline-block" style="max-width: 200px;" title="{{ conn.endpoint_url }}">{{ conn.endpoint_url }}</span>
|
||||
</td>
|
||||
<td><span class="badge bg-primary bg-opacity-10 text-primary">{{ conn.region }}</span></td>
|
||||
<td><code class="small">{{ conn.access_key[:8] }}...{{ conn.access_key[-4:] }}</code></td>
|
||||
<td><code class="small">{{ conn.access_key | slice(start=0, end=8) }}...{{ conn.access_key | slice(start=-4) }}</code></td>
|
||||
<td class="text-end">
|
||||
<div class="btn-group btn-group-sm" role="group">
|
||||
<button type="button" class="btn btn-outline-secondary"
|
||||
@@ -140,7 +145,6 @@
|
||||
data-endpoint="{{ conn.endpoint_url }}"
|
||||
data-region="{{ conn.region }}"
|
||||
data-access="{{ conn.access_key }}"
|
||||
data-secret="{{ conn.secret_key }}"
|
||||
title="Edit connection">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||
@@ -168,8 +172,7 @@
|
||||
<div class="empty-state text-center py-5">
|
||||
<div class="empty-state-icon mx-auto mb-3">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H12a.5.5 0 0 1 0 1H4a.5.5 0 0 1 0-1h2A1.5 1.5 0 0 1 7.5 10V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1z"/>
|
||||
<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<h5 class="fw-semibold mb-2">No connections yet</h5>
|
||||
@@ -181,7 +184,6 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Edit Connection Modal -->
|
||||
<div class="modal fade" id="editConnectionModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
@@ -195,7 +197,7 @@
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<form method="POST" id="editConnectionForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}"/>
|
||||
<div class="modal-body">
|
||||
<div class="mb-3">
|
||||
<label for="edit_name" class="form-label fw-medium">Name</label>
|
||||
@@ -217,7 +219,7 @@
|
||||
<label for="edit_secret_key" class="form-label fw-medium">Secret Key</label>
|
||||
<div class="input-group">
|
||||
<input type="password" class="form-control font-monospace" id="edit_secret_key" name="secret_key" required>
|
||||
<button class="btn btn-outline-secondary" type="button" onclick="togglePassword('edit_secret_key')">
|
||||
<button class="btn btn-outline-secondary" type="button" onclick="ConnectionsManagement.togglePassword('edit_secret_key')">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
|
||||
<path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
|
||||
@@ -247,7 +249,6 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Delete Connection Modal -->
|
||||
<div class="modal fade" id="deleteConnectionModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
@@ -273,7 +274,7 @@
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
<form method="POST" id="deleteConnectionForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}"/>
|
||||
<button type="submit" class="btn btn-danger">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
@@ -287,80 +288,16 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script src="{{ url_for(endpoint="static", filename="js/connections-management.js") }}"></script>
|
||||
<script>
|
||||
function togglePassword(id) {
|
||||
const input = document.getElementById(id);
|
||||
if (input.type === "password") {
|
||||
input.type = "text";
|
||||
} else {
|
||||
input.type = "password";
|
||||
}
|
||||
ConnectionsManagement.init({
|
||||
csrfToken: "{{ csrf_token_value }}",
|
||||
endpoints: {
|
||||
test: "{{ url_for(endpoint="ui.test_connection") }}",
|
||||
updateTemplate: "{{ url_for(endpoint="ui.update_connection", connection_id="CONNECTION_ID") }}",
|
||||
deleteTemplate: "{{ url_for(endpoint="ui.delete_connection", connection_id="CONNECTION_ID") }}",
|
||||
healthTemplate: "/ui/connections/CONNECTION_ID/health"
|
||||
}
|
||||
|
||||
// Test Connection Logic
|
||||
async function testConnection(formId, resultId) {
|
||||
const form = document.getElementById(formId);
|
||||
const resultDiv = document.getElementById(resultId);
|
||||
const formData = new FormData(form);
|
||||
const data = Object.fromEntries(formData.entries());
|
||||
|
||||
resultDiv.innerHTML = '<div class="text-info"><span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Testing...</div>';
|
||||
|
||||
try {
|
||||
const response = await fetch("{{ url_for('ui.test_connection') }}", {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"X-CSRFToken": "{{ csrf_token() }}"
|
||||
},
|
||||
body: JSON.stringify(data)
|
||||
});
|
||||
|
||||
const result = await response.json();
|
||||
if (response.ok) {
|
||||
resultDiv.innerHTML = `<div class="text-success"><i class="bi bi-check-circle"></i> ${result.message}</div>`;
|
||||
} else {
|
||||
resultDiv.innerHTML = `<div class="text-danger"><i class="bi bi-exclamation-circle"></i> ${result.message}</div>`;
|
||||
}
|
||||
} catch (error) {
|
||||
resultDiv.innerHTML = `<div class="text-danger"><i class="bi bi-exclamation-circle"></i> Connection failed</div>`;
|
||||
}
|
||||
}
|
||||
|
||||
document.getElementById('testConnectionBtn').addEventListener('click', () => {
|
||||
testConnection('createConnectionForm', 'testResult');
|
||||
});
|
||||
|
||||
document.getElementById('editTestConnectionBtn').addEventListener('click', () => {
|
||||
testConnection('editConnectionForm', 'editTestResult');
|
||||
});
|
||||
|
||||
// Modal Event Listeners
|
||||
const editModal = document.getElementById('editConnectionModal');
|
||||
editModal.addEventListener('show.bs.modal', event => {
|
||||
const button = event.relatedTarget;
|
||||
const id = button.getAttribute('data-id');
|
||||
|
||||
document.getElementById('edit_name').value = button.getAttribute('data-name');
|
||||
document.getElementById('edit_endpoint_url').value = button.getAttribute('data-endpoint');
|
||||
document.getElementById('edit_region').value = button.getAttribute('data-region');
|
||||
document.getElementById('edit_access_key').value = button.getAttribute('data-access');
|
||||
document.getElementById('edit_secret_key').value = button.getAttribute('data-secret');
|
||||
document.getElementById('editTestResult').innerHTML = '';
|
||||
|
||||
const form = document.getElementById('editConnectionForm');
|
||||
form.action = "{{ url_for('ui.update_connection', connection_id='CONN_ID') }}".replace('CONN_ID', id);
|
||||
});
|
||||
|
||||
const deleteModal = document.getElementById('deleteConnectionModal');
|
||||
deleteModal.addEventListener('show.bs.modal', event => {
|
||||
const button = event.relatedTarget;
|
||||
const id = button.getAttribute('data-id');
|
||||
const name = button.getAttribute('data-name');
|
||||
|
||||
document.getElementById('deleteConnectionName').textContent = name;
|
||||
const form = document.getElementById('deleteConnectionForm');
|
||||
form.action = "{{ url_for('ui.delete_connection', connection_id='CONN_ID') }}".replace('CONN_ID', id);
|
||||
});
|
||||
});
|
||||
</script>
|
||||
{% endblock %}
|
||||
@@ -8,7 +8,7 @@
|
||||
<p class="text-danger small">{{ reason }}</p>
|
||||
{% endif %}
|
||||
<div class="d-flex flex-wrap justify-content-center gap-3">
|
||||
<a class="btn btn-primary" href="{{ url_for('ui.buckets_overview') }}">Return to buckets</a>
|
||||
<a class="btn btn-primary" href="{{ url_for(endpoint="ui.buckets_overview") }}">Return to buckets</a>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
2894
crates/myfsio-server/templates/docs.html
Normal file
2894
crates/myfsio-server/templates/docs.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
{% set iam_disabled = 'disabled' if iam_locked else '' %}
|
||||
{% if iam_locked %}{% set iam_disabled = "disabled" %}{% else %}{% set iam_disabled = "" %}{% endif %}
|
||||
<div class="page-header d-flex justify-content-between align-items-center mb-4">
|
||||
<div>
|
||||
<p class="text-uppercase text-muted small mb-1">Identity & Access Management</p>
|
||||
@@ -10,6 +10,7 @@
|
||||
</svg>
|
||||
IAM Configuration
|
||||
</h1>
|
||||
<p class="text-muted mb-0 mt-1">Create and manage users with fine-grained bucket permissions.</p>
|
||||
</div>
|
||||
<div class="d-flex gap-2">
|
||||
{% if not iam_locked %}
|
||||
@@ -43,15 +44,26 @@
|
||||
</svg>
|
||||
<div class="flex-grow-1">
|
||||
<div class="fw-semibold">
|
||||
{% if disclosed_secret.operation == 'rotate' %}
|
||||
{% if disclosed_secret.operation == "rotate" %}
|
||||
Secret rotated for <code>{{ disclosed_secret.access_key }}</code>
|
||||
{% else %}
|
||||
New user created: <code>{{ disclosed_secret.access_key }}</code>
|
||||
{% endif %}
|
||||
</div>
|
||||
<p class="mb-2 small">⚠️ This secret is only shown once. Copy it now and store it securely.</p>
|
||||
<p class="mb-2 small">These credentials are only shown once. Copy them now and store them securely.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="input-group mb-2">
|
||||
<span class="input-group-text"><strong>Access key</strong></span>
|
||||
<input class="form-control font-monospace" type="text" value="{{ disclosed_secret.access_key }}" readonly id="disclosedAccessKeyValue" />
|
||||
<button class="btn btn-outline-primary" type="button" data-access-key-copy>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-clipboard" viewBox="0 0 16 16">
|
||||
<path d="M4 1.5H3a2 2 0 0 0-2 2V14a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V3.5a2 2 0 0 0-2-2h-1v1h1a1 1 0 0 1 1 1V14a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3.5a1 1 0 0 1 1-1h1v-1z"/>
|
||||
<path d="M9.5 1a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1a.5.5 0 0 1 .5-.5h3zm-3-1A1.5 1.5 0 0 0 5 1.5v1A1.5 1.5 0 0 0 6.5 4h3A1.5 1.5 0 0 0 11 2.5v-1A1.5 1.5 0 0 0 9.5 0h-3z"/>
|
||||
</svg>
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<div class="input-group">
|
||||
<span class="input-group-text"><strong>Secret key</strong></span>
|
||||
<input class="form-control font-monospace" type="text" value="{{ disclosed_secret.secret_key }}" readonly id="disclosedSecretValue" />
|
||||
@@ -78,7 +90,7 @@
|
||||
<pre class="policy-preview mb-0" id="iamConfigPreview">{{ config_document }}</pre>
|
||||
<button class="btn btn-outline-light btn-sm config-copy" type="button" data-copy-target="iamConfigPreview">Copy JSON</button>
|
||||
</div>
|
||||
<p class="text-muted small mt-2 mb-0">Secrets are masked above. Access <code>{{ config_summary.path }}</code> directly to view full credentials.</p>
|
||||
<p class="text-muted small mt-2 mb-0">Secrets are masked above. IAM config is encrypted at rest.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -93,7 +105,7 @@
|
||||
</svg>
|
||||
Users
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">{{ users|length if not iam_locked else '?' }} user{{ 's' if (users|length if not iam_locked else 0) != 1 else '' }} configured</p>
|
||||
<p class="text-muted small mb-0">{% if not iam_locked %}{{ users|length }}{% else %}{{ "?" }}{% endif %} user{% if not iam_locked and users|length != 1 %}s{% endif %} configured</p>
|
||||
</div>
|
||||
{% if iam_locked %}<span class="badge bg-warning bg-opacity-10 text-warning">View only</span>{% endif %}
|
||||
</div>
|
||||
@@ -109,78 +121,146 @@
|
||||
{% else %}
|
||||
<div class="card-body px-4 pb-4">
|
||||
{% if users %}
|
||||
<div class="table-responsive">
|
||||
<table class="table table-hover align-middle mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th scope="col">User</th>
|
||||
<th scope="col">Policies</th>
|
||||
<th scope="col" class="text-end">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for user in users %}
|
||||
<tr>
|
||||
<td>
|
||||
<div class="d-flex align-items-center gap-3">
|
||||
<div class="user-avatar">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
{% if users|length > 1 %}
|
||||
<div class="mb-3">
|
||||
<div class="search-input-wrapper">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="search-icon" viewBox="0 0 16 16">
|
||||
<path d="M11.742 10.344a6.5 6.5 0 1 0-1.397 1.398h-.001c.03.04.062.078.098.115l3.85 3.85a1 1 0 0 0 1.415-1.414l-3.85-3.85a1.007 1.007 0 0 0-.115-.1zM12 6.5a5.5 5.5 0 1 1-11 0 5.5 5.5 0 0 1 11 0z"/>
|
||||
</svg>
|
||||
<input type="text" class="form-control" id="iam-user-search" placeholder="Filter users by name or access key..." autocomplete="off" />
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
<div class="row g-3">
|
||||
{% for user in users %}
|
||||
<div class="col-md-6 col-xl-4 iam-user-item" data-user-id="{{ user.user_id }}" data-access-key="{{ user.access_key }}" data-display-name="{{ user.display_name|lower }}" data-access-key-filter="{{ user.access_key|lower }}" data-update-url="{{ url_for(endpoint="ui.update_iam_user", user_id=user.user_id) }}">
|
||||
<div class="card h-100 iam-user-card{% if user.is_admin %} iam-admin-card{% else %}{% endif %}">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-start justify-content-between mb-3">
|
||||
<div class="d-flex align-items-center gap-3 min-width-0 overflow-hidden">
|
||||
<div class="user-avatar user-avatar-lg flex-shrink-0">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8 8a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm2-3a2 2 0 1 1-4 0 2 2 0 0 1 4 0zm4 8c0 1-1 1-1 1H3s-1 0-1-1 1-4 6-4 6 3 6 4zm-1-.004c-.001-.246-.154-.986-.832-1.664C11.516 10.68 10.289 10 8 10c-2.29 0-3.516.68-4.168 1.332-.678.678-.83 1.418-.832 1.664h10z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div>
|
||||
<div class="fw-medium">{{ user.display_name }}</div>
|
||||
<code class="small text-muted">{{ user.access_key }}</code>
|
||||
<div class="min-width-0">
|
||||
<div class="d-flex align-items-center gap-2 mb-0">
|
||||
<h6 class="fw-semibold mb-0 text-truncate" title="{{ user.display_name }}">{{ user.display_name }}</h6>
|
||||
{% if user.is_admin %}
|
||||
<span class="iam-role-badge iam-role-admin" data-role-badge>Admin</span>
|
||||
{% else %}
|
||||
<span class="iam-role-badge iam-role-user" data-role-badge>User</span>
|
||||
{% endif %}
|
||||
{% if user.is_expired %}
|
||||
<span class="badge text-bg-danger" style="font-size: .65rem">Expired</span>
|
||||
{% elif user.is_expiring_soon %}
|
||||
<span class="badge text-bg-warning" style="font-size: .65rem">Expiring soon</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="d-flex align-items-center gap-1">
|
||||
<code class="small text-muted text-truncate" title="{{ user.access_key }}">{{ user.access_key }}</code>
|
||||
<button type="button" class="iam-copy-key" title="Copy access key" data-copy-access-key="{{ user.access_key }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4 1.5H3a2 2 0 0 0-2 2V14a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V3.5a2 2 0 0 0-2-2h-1v1h1a1 1 0 0 1 1 1V14a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3.5a1 1 0 0 1 1-1h1v-1z"/>
|
||||
<path d="M9.5 1a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1a.5.5 0 0 1 .5-.5h3zm-3-1A1.5 1.5 0 0 0 5 1.5v1A1.5 1.5 0 0 0 6.5 4h3A1.5 1.5 0 0 0 11 2.5v-1A1.5 1.5 0 0 0 9.5 0h-3z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<div class="d-flex flex-wrap gap-1">
|
||||
<div class="dropdown flex-shrink-0">
|
||||
<button class="btn btn-sm btn-icon" type="button" data-bs-toggle="dropdown" aria-expanded="false">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M9.5 13a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<ul class="dropdown-menu dropdown-menu-end">
|
||||
<li>
|
||||
<button class="dropdown-item" type="button" data-edit-user data-user-id="{{ user.user_id }}" data-access-key="{{ user.access_key }}" data-display-name="{{ user.display_name }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||
</svg>
|
||||
Edit Name
|
||||
</button>
|
||||
</li>
|
||||
<li>
|
||||
<button class="dropdown-item" type="button" data-expiry-user data-user-id="{{ user.user_id }}" data-access-key="{{ user.access_key }}" data-expires-at="{{ user.expires_at | default(value="") }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
<path d="M8 3.5a.5.5 0 0 0-1 0V9a.5.5 0 0 0 .252.434l3.5 2a.5.5 0 0 0 .496-.868L8 8.71V3.5z"/>
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm7-8A7 7 0 1 1 1 8a7 7 0 0 1 14 0z"/>
|
||||
</svg>
|
||||
Set Expiry
|
||||
</button>
|
||||
</li>
|
||||
<li>
|
||||
<button class="dropdown-item" type="button" data-rotate-user data-user-id="{{ user.user_id }}" data-access-key="{{ user.access_key }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
<path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/>
|
||||
<path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/>
|
||||
</svg>
|
||||
Rotate Secret
|
||||
</button>
|
||||
</li>
|
||||
<li><hr class="dropdown-divider"></li>
|
||||
<li>
|
||||
<button class="dropdown-item text-danger" type="button" data-delete-user data-user-id="{{ user.user_id }}" data-access-key="{{ user.access_key }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm3 .5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 1 0z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
Delete User
|
||||
</button>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<div class="small text-muted mb-2">Bucket Permissions</div>
|
||||
<div class="d-flex flex-wrap gap-1" data-policy-badges>
|
||||
{% for policy in user.policies %}
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary">
|
||||
{{ policy.bucket }}
|
||||
{% if '*' in policy.actions %}
|
||||
<span class="opacity-75">(full)</span>
|
||||
{% else %}
|
||||
<span class="opacity-75">({{ policy.actions|length }})</span>
|
||||
{% endif %}
|
||||
{% if policy.bucket == "*" %}{% set bucket_label = "All Buckets" %}{% else %}{% set bucket_label = policy.bucket %}{% endif %}
|
||||
{% if "*" in policy.actions %}
|
||||
{% set perm_label = "Full Access" %}
|
||||
{% elif policy.actions|length >= 19 %}
|
||||
{% set perm_label = "Full Access" %}
|
||||
{% elif "list" in policy.actions and "read" in policy.actions and "write" in policy.actions and "delete" in policy.actions %}
|
||||
{% set perm_label = "Read + Write + Delete" %}
|
||||
{% elif "list" in policy.actions and "read" in policy.actions and "write" in policy.actions %}
|
||||
{% set perm_label = "Read + Write" %}
|
||||
{% elif "list" in policy.actions and "read" in policy.actions %}
|
||||
{% set perm_label = "Read Only" %}
|
||||
{% else %}
|
||||
{% set policy_actions_count = policy.actions | length %}
|
||||
{% set perm_label = "Custom (" ~ policy_actions_count ~ ")" %}
|
||||
{% endif %}
|
||||
<span class="iam-perm-badge">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="10" height="10" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
{{ bucket_label }} · {{ perm_label }}
|
||||
</span>
|
||||
{% else %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">No policies</span>
|
||||
{% endfor %}
|
||||
</div>
|
||||
</td>
|
||||
<td class="text-end">
|
||||
<div class="btn-group btn-group-sm" role="group">
|
||||
<button class="btn btn-outline-primary" type="button" data-rotate-user="{{ user.access_key }}" title="Rotate Secret">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/>
|
||||
<path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<button class="btn btn-outline-secondary" type="button" data-edit-user="{{ user.access_key }}" data-display-name="{{ user.display_name }}" title="Edit User">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<button class="btn btn-outline-secondary" type="button" data-policy-editor data-access-key="{{ user.access_key }}" title="Edit Policies">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8 4.754a3.246 3.246 0 1 0 0 6.492 3.246 3.246 0 0 0 0-6.492zM5.754 8a2.246 2.246 0 1 1 4.492 0 2.246 2.246 0 0 1-4.492 0z"/>
|
||||
<path d="M9.796 1.343c-.527-1.79-3.065-1.79-3.592 0l-.094.319a.873.873 0 0 1-1.255.52l-.292-.16c-1.64-.892-3.433.902-2.54 2.541l.159.292a.873.873 0 0 1-.52 1.255l-.319.094c-1.79.527-1.79 3.065 0 3.592l.319.094a.873.873 0 0 1 .52 1.255l-.16.292c-.892 1.64.901 3.434 2.541 2.54l.292-.159a.873.873 0 0 1 1.255.52l.094.319c.527 1.79 3.065 1.79 3.592 0l.094-.319a.873.873 0 0 1 1.255-.52l.292.16c1.64.893 3.434-.902 2.54-2.541l-.159-.292a.873.873 0 0 1 .52-1.255l.319-.094c1.79-.527 1.79-3.065 0-3.592l-.319-.094a.873.873 0 0 1-.52-1.255l.16-.292c.893-1.64-.902-3.433-2.541-2.54l-.292.159a.873.873 0 0 1-1.255-.52l-.094-.319z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<button class="btn btn-outline-danger" type="button" data-delete-user="{{ user.access_key }}" title="Delete User">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm3 .5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 1 0z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<button class="btn btn-outline-primary btn-sm w-100" type="button" data-policy-editor data-user-id="{{ user.user_id }}" data-access-key="{{ user.access_key }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M8 4.754a3.246 3.246 0 1 0 0 6.492 3.246 3.246 0 0 0 0-6.492zM5.754 8a2.246 2.246 0 1 1 4.492 0 2.246 2.246 0 0 1-4.492 0z"/>
|
||||
<path d="M9.796 1.343c-.527-1.79-3.065-1.79-3.592 0l-.094.319a.873.873 0 0 1-1.255.52l-.292-.16c-1.64-.892-3.433.902-2.54 2.541l.159.292a.873.873 0 0 1-.52 1.255l-.319.094c-1.79.527-1.79 3.065 0 3.592l.319.094a.873.873 0 0 1 .52 1.255l-.16.292c-.892 1.64.901 3.434 2.541 2.54l.292-.159a.873.873 0 0 1 1.255.52l.094.319c.527 1.79 3.065 1.79 3.592 0l.094-.319a.873.873 0 0 1 1.255-.52l.292.16c1.64.893 3.434-.902 2.54-2.541l-.159-.292a.873.873 0 0 1 .52-1.255l.319-.094c1.79-.527 1.79-3.065 0-3.592l-.319-.094a.873.873 0 0 1-.52-1.255l.16-.292c.893-1.64-.902-3.433-2.541-2.54l-.292.159a.873.873 0 0 1-1.255-.52l-.094-.319z"/>
|
||||
</svg>
|
||||
Manage Policies
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
<div class="iam-no-results d-none" id="iam-no-results">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="mb-2" viewBox="0 0 16 16">
|
||||
<path d="M11.742 10.344a6.5 6.5 0 1 0-1.397 1.398h-.001c.03.04.062.078.098.115l3.85 3.85a1 1 0 0 0 1.415-1.414l-3.85-3.85a1.007 1.007 0 0 0-.115-.1zM12 6.5a5.5 5.5 0 1 1-11 0 5.5 5.5 0 0 1 11 0z"/>
|
||||
</svg>
|
||||
<p class="mb-0">No users match your filter.</p>
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="empty-state text-center py-5">
|
||||
@@ -203,7 +283,6 @@
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<!-- Create User Modal -->
|
||||
<div class="modal fade" id="createUserModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
@@ -217,13 +296,39 @@
|
||||
</h1>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<form method="post" action="{{ url_for('ui.create_iam_user') }}">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<form method="post" action="{{ url_for(endpoint="ui.create_iam_user") }}">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}" />
|
||||
<div class="modal-body">
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Display Name</label>
|
||||
<input class="form-control" type="text" name="display_name" placeholder="Analytics Team" required autofocus />
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium d-flex justify-content-between align-items-center">
|
||||
Access Key <span class="text-muted fw-normal small">optional</span>
|
||||
</label>
|
||||
<div class="input-group">
|
||||
<input class="form-control font-monospace" type="text" name="access_key" id="createUserAccessKey" placeholder="Leave blank to auto-generate" />
|
||||
<button class="btn btn-outline-secondary" type="button" id="generateAccessKeyBtn" title="Generate secure access key">Generate</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium d-flex justify-content-between align-items-center">
|
||||
Secret Key <span class="text-muted fw-normal small">optional</span>
|
||||
</label>
|
||||
<div class="input-group">
|
||||
<input class="form-control font-monospace" type="text" name="secret_key" id="createUserSecretKey" placeholder="Leave blank to auto-generate" />
|
||||
<button class="btn btn-outline-secondary" type="button" id="generateSecretKeyBtn" title="Generate secure secret key">Generate</button>
|
||||
</div>
|
||||
<div class="form-text">If you set a custom secret key, copy it now. It will be encrypted and cannot be recovered.</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium d-flex justify-content-between align-items-center">
|
||||
Expiry <span class="text-muted fw-normal small">optional</span>
|
||||
</label>
|
||||
<input class="form-control" type="datetime-local" name="expires_at" id="createUserExpiry" />
|
||||
<div class="form-text">Leave blank for no expiration. Expired users cannot authenticate.</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Initial Policies (JSON)</label>
|
||||
<textarea class="form-control font-monospace" name="policies" id="createUserPolicies" rows="6" spellcheck="false" placeholder='[
|
||||
@@ -236,6 +341,8 @@
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="full">Full Control</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="readonly">Read-Only</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="writer">Read + Write</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="operator">Operator</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="bucketadmin">Bucket Admin</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
@@ -252,7 +359,6 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Policy Editor Modal -->
|
||||
<div class="modal fade" id="policyEditorModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-lg modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
@@ -271,11 +377,11 @@
|
||||
<form
|
||||
id="policyEditorForm"
|
||||
method="post"
|
||||
data-action-template="{{ url_for('ui.update_iam_policies', access_key='ACCESS_KEY_PLACEHOLDER') }}"
|
||||
data-action-template="{{ url_for(endpoint="ui.update_iam_policies", user_id="USER_ID_PLACEHOLDER") }}"
|
||||
class="d-flex flex-column gap-3"
|
||||
>
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<input type="hidden" id="policyEditorUser" name="access_key" />
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}" />
|
||||
<input type="hidden" id="policyEditorUserId" name="user_id" />
|
||||
|
||||
<div>
|
||||
<label class="form-label fw-medium">Inline Policies (JSON array)</label>
|
||||
@@ -287,6 +393,8 @@
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="full">Full Control</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="readonly">Read-Only</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="writer">Read + Write</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="operator">Operator</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="bucketadmin">Bucket Admin</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
@@ -303,7 +411,6 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Edit User Modal -->
|
||||
<div class="modal fade" id="editUserModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
@@ -317,7 +424,7 @@
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<form method="post" id="editUserForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}" />
|
||||
<div class="modal-body">
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Display Name</label>
|
||||
@@ -338,15 +445,14 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Delete User Modal -->
|
||||
<div class="modal fade" id="deleteUserModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h1 class="modal-title fs-5 fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
|
||||
<path d="M1 14s-1 0-1-1 1-4 6-4 6 3 6 4-1 1-1 1H1zm5-6a3 3 0 1 0 0-6 3 3 0 0 0 0 6z"/>
|
||||
<path fill-rule="evenodd" d="M11 1.5v1h5v1h-1v9a2 2 0 0 1-2 2H3a2 2 0 0 1-2-2v-9H0v-1h5v-1a1 1 0 0 1 1-1h4a1 1 0 0 1 1 1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118z"/>
|
||||
<path d="M11 5a3 3 0 1 1-6 0 3 3 0 0 1 6 0M8 7a2 2 0 1 0 0-4 2 2 0 0 0 0 4m.256 7a4.5 4.5 0 0 1-.229-1.004H3c.001-.246.154-.986.832-1.664C4.484 10.68 5.711 10 8 10q.39 0 .74.025c.226-.341.496-.65.804-.918Q9.077 9.014 8 9c-5 0-6 3-6 4s1 1 1 1h5.256Z"/>
|
||||
<path d="M12.5 16a3.5 3.5 0 1 0 0-7 3.5 3.5 0 0 0 0 7m-.646-4.854.646.647.646-.647a.5.5 0 0 1 .708.708l-.647.646.647.646a.5.5 0 0 1-.708.708l-.646-.647-.646.647a.5.5 0 0 1-.708-.708l.647-.646-.647-.646a.5.5 0 0 1 .708-.708"/>
|
||||
</svg>
|
||||
Delete User
|
||||
</h1>
|
||||
@@ -368,7 +474,7 @@
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
<form method="post" id="deleteUserForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}" />
|
||||
<button class="btn btn-danger" type="submit">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
@@ -382,7 +488,6 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Rotate Secret Modal -->
|
||||
<div class="modal fade" id="rotateSecretModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
@@ -440,277 +545,72 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script id="iamUsersJson" type="application/json">{{ users | tojson }}</script>
|
||||
<div class="modal fade" id="expiryModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h1 class="modal-title fs-5 fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M8 3.5a.5.5 0 0 0-1 0V9a.5.5 0 0 0 .252.434l3.5 2a.5.5 0 0 0 .496-.868L8 8.71V3.5z"/>
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm7-8A7 7 0 1 1 1 8a7 7 0 0 1 14 0z"/>
|
||||
</svg>
|
||||
Set Expiry
|
||||
</h1>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<form method="post" id="expiryForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}" />
|
||||
<div class="modal-body">
|
||||
<p class="text-muted small mb-3">Set expiration for <code id="expiryUserLabel"></code></p>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Expires at</label>
|
||||
<input class="form-control" type="datetime-local" name="expires_at" id="expiryDateInput" />
|
||||
<div class="form-text">Leave blank to remove expiration (never expires).</div>
|
||||
</div>
|
||||
<div class="d-flex flex-wrap gap-2">
|
||||
<span class="text-muted small me-2 align-self-center">Quick presets:</span>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="1h">1 hour</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="24h">24 hours</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="7d">7 days</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="30d">30 days</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="90d">90 days</button>
|
||||
<button class="btn btn-outline-secondary btn-sm text-danger" type="button" data-expiry-preset="clear">Never</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
<button class="btn btn-primary" type="submit">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||
</svg>
|
||||
Save Expiry
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script id="iamUsersJson" type="application/json">{{ users | json_encode | safe }}</script>
|
||||
{% endblock %}
|
||||
|
||||
{% block extra_scripts %}
|
||||
{{ super() }}
|
||||
<script src="{{ url_for(endpoint="static", filename="js/iam-management.js") }}"></script>
|
||||
<script>
|
||||
(function () {
|
||||
const currentUserKey = {{ principal.access_key | tojson }};
|
||||
const configCopyButtons = document.querySelectorAll('.config-copy');
|
||||
configCopyButtons.forEach((button) => {
|
||||
button.addEventListener('click', async () => {
|
||||
const targetId = button.dataset.copyTarget;
|
||||
const target = document.getElementById(targetId);
|
||||
if (!target) return;
|
||||
const text = target.innerText;
|
||||
try {
|
||||
await navigator.clipboard.writeText(text);
|
||||
button.textContent = 'Copied!';
|
||||
setTimeout(() => {
|
||||
button.textContent = 'Copy JSON';
|
||||
}, 1500);
|
||||
} catch (err) {
|
||||
console.error('Unable to copy IAM config', err);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
const secretCopyButton = document.querySelector('[data-secret-copy]');
|
||||
if (secretCopyButton) {
|
||||
secretCopyButton.addEventListener('click', async () => {
|
||||
const secretInput = document.getElementById('disclosedSecretValue');
|
||||
if (!secretInput) return;
|
||||
try {
|
||||
await navigator.clipboard.writeText(secretInput.value);
|
||||
secretCopyButton.textContent = 'Copied!';
|
||||
setTimeout(() => {
|
||||
secretCopyButton.textContent = 'Copy';
|
||||
}, 1500);
|
||||
} catch (err) {
|
||||
console.error('Unable to copy IAM secret', err);
|
||||
}
|
||||
});
|
||||
IAMManagement.init({
|
||||
users: JSON.parse(document.getElementById('iamUsersJson').textContent || '[]'),
|
||||
currentUserKey: {{ principal.access_key | json_encode | safe }},
|
||||
iamLocked: {{ iam_locked | json_encode | safe }},
|
||||
csrfToken: "{{ csrf_token_value }}",
|
||||
endpoints: {
|
||||
createUser: "{{ url_for(endpoint="ui.create_iam_user") }}",
|
||||
updateUser: "{{ url_for(endpoint="ui.update_iam_user", user_id="USER_ID") }}",
|
||||
deleteUser: "{{ url_for(endpoint="ui.delete_iam_user", user_id="USER_ID") }}",
|
||||
updatePolicies: "{{ url_for(endpoint="ui.update_iam_policies", user_id="USER_ID") }}",
|
||||
rotateSecret: "{{ url_for(endpoint="ui.rotate_iam_secret", user_id="USER_ID") }}",
|
||||
updateExpiry: "{{ url_for(endpoint="ui.update_iam_expiry", user_id="USER_ID") }}"
|
||||
}
|
||||
|
||||
const iamUsersData = document.getElementById('iamUsersJson');
|
||||
const users = iamUsersData ? JSON.parse(iamUsersData.textContent || '[]') : [];
|
||||
|
||||
// Policy Editor Logic
|
||||
const policyModalEl = document.getElementById('policyEditorModal');
|
||||
const policyModal = new bootstrap.Modal(policyModalEl);
|
||||
const userLabelEl = document.getElementById('policyEditorUserLabel');
|
||||
const userInputEl = document.getElementById('policyEditorUser');
|
||||
const textareaEl = document.getElementById('policyEditorDocument');
|
||||
const formEl = document.getElementById('policyEditorForm');
|
||||
const templateButtons = document.querySelectorAll('[data-policy-template]');
|
||||
const iamLocked = {{ iam_locked | tojson }};
|
||||
|
||||
if (iamLocked) return;
|
||||
|
||||
const userPolicies = (accessKey) => {
|
||||
const target = users.find((user) => user.access_key === accessKey);
|
||||
return target ? JSON.stringify(target.policies, null, 2) : '';
|
||||
};
|
||||
|
||||
const applyTemplate = (name) => {
|
||||
const templates = {
|
||||
full: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'iam:list_users', 'iam:*'],
|
||||
},
|
||||
],
|
||||
readonly: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read'],
|
||||
},
|
||||
],
|
||||
writer: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read', 'write'],
|
||||
},
|
||||
],
|
||||
};
|
||||
if (templates[name]) {
|
||||
textareaEl.value = JSON.stringify(templates[name], null, 2);
|
||||
}
|
||||
};
|
||||
|
||||
templateButtons.forEach((button) => {
|
||||
button.addEventListener('click', () => applyTemplate(button.dataset.policyTemplate));
|
||||
});
|
||||
|
||||
// Create User modal template buttons
|
||||
const createUserPoliciesEl = document.getElementById('createUserPolicies');
|
||||
const createTemplateButtons = document.querySelectorAll('[data-create-policy-template]');
|
||||
|
||||
const applyCreateTemplate = (name) => {
|
||||
const templates = {
|
||||
full: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'iam:list_users', 'iam:*'],
|
||||
},
|
||||
],
|
||||
readonly: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read'],
|
||||
},
|
||||
],
|
||||
writer: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read', 'write'],
|
||||
},
|
||||
],
|
||||
};
|
||||
if (templates[name] && createUserPoliciesEl) {
|
||||
createUserPoliciesEl.value = JSON.stringify(templates[name], null, 2);
|
||||
}
|
||||
};
|
||||
|
||||
createTemplateButtons.forEach((button) => {
|
||||
button.addEventListener('click', () => applyCreateTemplate(button.dataset.createPolicyTemplate));
|
||||
});
|
||||
|
||||
formEl?.addEventListener('submit', (event) => {
|
||||
const key = userInputEl.value;
|
||||
if (!key) {
|
||||
event.preventDefault();
|
||||
return;
|
||||
}
|
||||
const template = formEl.dataset.actionTemplate;
|
||||
formEl.action = template.replace('ACCESS_KEY_PLACEHOLDER', key);
|
||||
});
|
||||
|
||||
document.querySelectorAll('[data-policy-editor]').forEach((button) => {
|
||||
button.addEventListener('click', () => {
|
||||
const key = button.getAttribute('data-access-key');
|
||||
if (!key) return;
|
||||
|
||||
userLabelEl.textContent = key;
|
||||
userInputEl.value = key;
|
||||
textareaEl.value = userPolicies(key);
|
||||
|
||||
policyModal.show();
|
||||
});
|
||||
});
|
||||
|
||||
// Edit User Logic
|
||||
const editUserModal = new bootstrap.Modal(document.getElementById('editUserModal'));
|
||||
const editUserForm = document.getElementById('editUserForm');
|
||||
const editUserDisplayName = document.getElementById('editUserDisplayName');
|
||||
|
||||
document.querySelectorAll('[data-edit-user]').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
const key = btn.dataset.editUser;
|
||||
const name = btn.dataset.displayName;
|
||||
editUserDisplayName.value = name;
|
||||
editUserForm.action = "{{ url_for('ui.update_iam_user', access_key='ACCESS_KEY') }}".replace('ACCESS_KEY', key);
|
||||
editUserModal.show();
|
||||
});
|
||||
});
|
||||
|
||||
// Delete User Logic
|
||||
const deleteUserModal = new bootstrap.Modal(document.getElementById('deleteUserModal'));
|
||||
const deleteUserForm = document.getElementById('deleteUserForm');
|
||||
const deleteUserLabel = document.getElementById('deleteUserLabel');
|
||||
const deleteSelfWarning = document.getElementById('deleteSelfWarning');
|
||||
|
||||
document.querySelectorAll('[data-delete-user]').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
const key = btn.dataset.deleteUser;
|
||||
deleteUserLabel.textContent = key;
|
||||
deleteUserForm.action = "{{ url_for('ui.delete_iam_user', access_key='ACCESS_KEY') }}".replace('ACCESS_KEY', key);
|
||||
|
||||
if (key === currentUserKey) {
|
||||
deleteSelfWarning.classList.remove('d-none');
|
||||
} else {
|
||||
deleteSelfWarning.classList.add('d-none');
|
||||
}
|
||||
|
||||
deleteUserModal.show();
|
||||
});
|
||||
});
|
||||
|
||||
// Rotate Secret Logic
|
||||
const rotateSecretModal = new bootstrap.Modal(document.getElementById('rotateSecretModal'));
|
||||
const rotateUserLabel = document.getElementById('rotateUserLabel');
|
||||
const confirmRotateBtn = document.getElementById('confirmRotateBtn');
|
||||
const rotateCancelBtn = document.getElementById('rotateCancelBtn');
|
||||
const rotateDoneBtn = document.getElementById('rotateDoneBtn');
|
||||
const rotateSecretConfirm = document.getElementById('rotateSecretConfirm');
|
||||
const rotateSecretResult = document.getElementById('rotateSecretResult');
|
||||
const newSecretKeyInput = document.getElementById('newSecretKey');
|
||||
const copyNewSecretBtn = document.getElementById('copyNewSecret');
|
||||
let currentRotateKey = null;
|
||||
|
||||
document.querySelectorAll('[data-rotate-user]').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
currentRotateKey = btn.dataset.rotateUser;
|
||||
rotateUserLabel.textContent = currentRotateKey;
|
||||
|
||||
// Reset Modal State
|
||||
rotateSecretConfirm.classList.remove('d-none');
|
||||
rotateSecretResult.classList.add('d-none');
|
||||
confirmRotateBtn.classList.remove('d-none');
|
||||
rotateCancelBtn.classList.remove('d-none');
|
||||
rotateDoneBtn.classList.add('d-none');
|
||||
|
||||
rotateSecretModal.show();
|
||||
});
|
||||
});
|
||||
|
||||
confirmRotateBtn.addEventListener('click', async () => {
|
||||
if (!currentRotateKey) return;
|
||||
|
||||
confirmRotateBtn.disabled = true;
|
||||
confirmRotateBtn.textContent = "Rotating...";
|
||||
|
||||
try {
|
||||
const url = "{{ url_for('ui.rotate_iam_secret', access_key='ACCESS_KEY') }}".replace('ACCESS_KEY', currentRotateKey);
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'X-CSRFToken': "{{ csrf_token() }}"
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const data = await response.json();
|
||||
throw new Error(data.error || 'Failed to rotate secret');
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
newSecretKeyInput.value = data.secret_key;
|
||||
|
||||
// Show Result
|
||||
rotateSecretConfirm.classList.add('d-none');
|
||||
rotateSecretResult.classList.remove('d-none');
|
||||
confirmRotateBtn.classList.add('d-none');
|
||||
rotateCancelBtn.classList.add('d-none');
|
||||
rotateDoneBtn.classList.remove('d-none');
|
||||
|
||||
} catch (err) {
|
||||
if (window.showToast) {
|
||||
window.showToast(err.message, 'Error', 'danger');
|
||||
}
|
||||
rotateSecretModal.hide();
|
||||
} finally {
|
||||
confirmRotateBtn.disabled = false;
|
||||
confirmRotateBtn.textContent = "Rotate Key";
|
||||
}
|
||||
});
|
||||
|
||||
copyNewSecretBtn.addEventListener('click', async () => {
|
||||
try {
|
||||
await navigator.clipboard.writeText(newSecretKeyInput.value);
|
||||
copyNewSecretBtn.textContent = 'Copied!';
|
||||
setTimeout(() => copyNewSecretBtn.textContent = 'Copy', 1500);
|
||||
} catch (err) {
|
||||
console.error('Failed to copy', err);
|
||||
}
|
||||
});
|
||||
|
||||
rotateDoneBtn.addEventListener('click', () => {
|
||||
window.location.reload();
|
||||
});
|
||||
})();
|
||||
});
|
||||
</script>
|
||||
{% endblock %}
|
||||
@@ -35,13 +35,13 @@
|
||||
<div class="card shadow-lg login-card position-relative">
|
||||
<div class="card-body p-4 p-md-5">
|
||||
<div class="text-center mb-4 d-lg-none">
|
||||
<img src="{{ url_for('static', filename='images/MyFISO.png') }}" alt="MyFSIO" width="48" height="48" class="mb-3 rounded-3">
|
||||
<img src="{{ url_for(endpoint="static", filename="images/MyFSIO.png") }}" alt="MyFSIO" width="48" height="48" class="mb-3 rounded-3">
|
||||
<h2 class="h4 fw-bold">MyFSIO</h2>
|
||||
</div>
|
||||
<h2 class="h4 mb-1 d-none d-lg-block">Sign in</h2>
|
||||
<p class="text-muted mb-4 d-none d-lg-block">Enter your credentials to continue</p>
|
||||
<form method="post" action="{{ url_for('ui.login') }}">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<form method="post" action="{{ url_for(endpoint="ui.login") }}">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token_value }}" />
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Access key</label>
|
||||
<div class="input-group">
|
||||
@@ -73,9 +73,6 @@
|
||||
</svg>
|
||||
</button>
|
||||
</form>
|
||||
<div class="text-center mt-4">
|
||||
<small class="text-muted">Need help? Check the <a href="#" class="text-decoration-none">documentation</a></small>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
1159
crates/myfsio-server/templates/metrics.html
Normal file
1159
crates/myfsio-server/templates/metrics.html
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user