Compare commits
196 Commits
v0.1.7
...
9ec5797919
| Author | SHA1 | Date | |
|---|---|---|---|
| 9ec5797919 | |||
| 8935188c8f | |||
| c77c592832 | |||
| 501d563df2 | |||
| ddcdb4026c | |||
| 3e7c0af019 | |||
| 476b9bd2e4 | |||
| c2ef37b84e | |||
| be8e030940 | |||
| ad7b2a02cb | |||
| 72ddd9822c | |||
| 4c30efd802 | |||
| 926a7e6366 | |||
| 1eadc7b75c | |||
| 4a224a127b | |||
| c498fe7aee | |||
| 3838aed954 | |||
| 6a193dbb1c | |||
| e94b341a5b | |||
| 2ad3736852 | |||
| f05b2668c0 | |||
| f7c1c1f809 | |||
| 0e392e18b4 | |||
| 8996f1ce06 | |||
| f60dbaf9c9 | |||
| 1a5a7aa9e1 | |||
| 326367ae4c | |||
| a7f9b0a22f | |||
| 0e525713b1 | |||
| f43fad02fb | |||
| eff3e378f3 | |||
| 5e32cef792 | |||
| 9898167f8d | |||
| 4a553555d3 | |||
| 7a3202c996 | |||
| bd20ca86ab | |||
| 532cf95d59 | |||
| 366f8ce60d | |||
| 7612cb054a | |||
| 966d524dca | |||
| e84f1f1851 | |||
| a059f0502d | |||
| afd7173ba0 | |||
| c807bb2388 | |||
| aa4f9f5566 | |||
| 14786151e5 | |||
| a496862902 | |||
| df4f27ca2e | |||
| d72e0a347e | |||
| 6ed4b7d8ea | |||
| 31ebbea680 | |||
| d878134ebf | |||
| 55568d6892 | |||
| a4ae81c77c | |||
| 9da7104887 | |||
| de5377e5ac | |||
| 80b77b64eb | |||
| 6c912a3d71 | |||
| c6e368324a | |||
| 7b6c096bb7 | |||
| 03353a0aec | |||
| 72f5d9d70c | |||
| be63e27c15 | |||
| 81ef0fe4c7 | |||
| 5f24bd920d | |||
| 8552f193de | |||
| 5536330aeb | |||
| d4657c389d | |||
| 3827235232 | |||
| dfc0058d0d | |||
| 27aef84311 | |||
| 5003514a3d | |||
| 20a314e030 | |||
| d8232340c3 | |||
| a356bb0c4e | |||
| 1c328ee3af | |||
| 5bf7962c04 | |||
| e06f653606 | |||
| 9c2809c195 | |||
| fb32ca0a7d | |||
| 6ab702a818 | |||
| 550e7d435c | |||
| 776967e80d | |||
| 082a7fbcd1 | |||
| ff287cf67b | |||
| bddf36d52d | |||
| cf6cec9cab | |||
| d425839e57 | |||
| 4c661477d5 | |||
| f3f52f14a5 | |||
| d19ba3e305 | |||
| c627f41f53 | |||
| bcad0cd3da | |||
| 67f057ca1c | |||
| 01e79e6993 | |||
| 1e3c4b545f | |||
| 4ecd32a554 | |||
| aa6d7c4d28 | |||
| 6e6d6d32bf | |||
| 54705ab9c4 | |||
| 77a46d0725 | |||
| 0f750b9d89 | |||
| e0dee9db36 | |||
| 126657c99f | |||
| 07fb1ac773 | |||
| 147962e1dd | |||
| 2643a79121 | |||
| e9a035827b | |||
| 033b8a82be | |||
| e76c311231 | |||
| cbdf1a27c8 | |||
| 4a60cb269a | |||
| ebe7f6222d | |||
| 70b61fd8e6 | |||
| a779b002d7 | |||
| 45d21cce21 | |||
| 9629507acd | |||
| 5d6cb4efa1 | |||
| 56ad83bbaf | |||
| 847933b7c0 | |||
| be55d08c0a | |||
| 8c4bf67974 | |||
| 9385d1fe1c | |||
| 0ea54457e8 | |||
| ae26d22388 | |||
| 6b715851b9 | |||
| 62c36f7a6c | |||
| b32f1f94f7 | |||
| 6e3d280a75 | |||
| 704f79dc44 | |||
| 87c7f1bc7d | |||
| 23ea164215 | |||
| 7a8acfb933 | |||
| 71327bcbf1 | |||
| c0603c592b | |||
| 912a7dc74f | |||
| 4de936cea9 | |||
| adb9017580 | |||
| 4adfcc4131 | |||
| ebc315c1cc | |||
| 5ab62a00ff | |||
| 9c3518de63 | |||
| a52657e684 | |||
| 53297abe1e | |||
| a3b9db544c | |||
| f5d2e1c488 | |||
| f04c6a9cdc | |||
| 7a494abb96 | |||
| 956d17a649 | |||
| 5522f9ac04 | |||
| 3742f0228e | |||
| ba694cb717 | |||
| 433d291b4b | |||
| e3509e997f | |||
| 1c30200db0 | |||
| 7ff422d4dc | |||
| 546d51af9a | |||
| 0d1fe05fd0 | |||
| c5d4b2f1cd | |||
| a5d19e2982 | |||
| 692e7e3a6e | |||
| 78dba93ee0 | |||
| 93a5aa6618 | |||
| 9ab750650c | |||
| 609e9db2f7 | |||
| 94a55cf2b7 | |||
| b9cfc45aa2 | |||
| 2d60e36fbf | |||
| c78f7fa6b0 | |||
| b3dce8d13e | |||
| e792b86485 | |||
| cdb86aeea7 | |||
| cdbc156b5b | |||
| 1df8ff9d25 | |||
| 05f1b00473 | |||
| 5ebc97300e | |||
| d2f9c3bded | |||
| 9f347f2caa | |||
| 4ab58e59c2 | |||
| 32232211a1 | |||
| 1cacb80dd6 | |||
| e89bbb62dc | |||
| c8eb3de629 | |||
| 9165e365e6 | |||
| 01e26754e8 | |||
| b592fa9fdb | |||
| cd9734b398 | |||
| 90893cac27 | |||
| 6e659902bd | |||
| 39a707ecbc | |||
| 4199f8e6c7 | |||
| adc6770273 | |||
| f5451c162b | |||
| aab9ef696a | |||
| be48f59452 | |||
| 86c04f85f6 |
7
.gitignore
vendored
7
.gitignore
vendored
@@ -26,6 +26,13 @@ dist/
|
||||
*.egg-info/
|
||||
.eggs/
|
||||
|
||||
# Rust / maturin build artifacts
|
||||
python/myfsio_core/target/
|
||||
python/myfsio_core/Cargo.lock
|
||||
|
||||
# Rust engine build artifacts
|
||||
rust/myfsio-engine/target/
|
||||
|
||||
# Local runtime artifacts
|
||||
logs/
|
||||
*.log
|
||||
|
||||
37
Dockerfile
37
Dockerfile
@@ -1,37 +0,0 @@
|
||||
# syntax=docker/dockerfile:1.7
|
||||
FROM python:3.11-slim
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install build deps for any wheels that need compilation, then clean up
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY requirements.txt ./
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
# Make entrypoint executable
|
||||
RUN chmod +x docker-entrypoint.sh
|
||||
|
||||
# Create data directory and set permissions
|
||||
RUN mkdir -p /app/data \
|
||||
&& useradd -m -u 1000 myfsio \
|
||||
&& chown -R myfsio:myfsio /app
|
||||
|
||||
USER myfsio
|
||||
|
||||
EXPOSE 5000 5100
|
||||
ENV APP_HOST=0.0.0.0 \
|
||||
FLASK_ENV=production \
|
||||
FLASK_DEBUG=0
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD python -c "import requests; requests.get('http://localhost:5000/healthz', timeout=2)"
|
||||
|
||||
CMD ["./docker-entrypoint.sh"]
|
||||
661
LICENSE
Normal file
661
LICENSE
Normal file
@@ -0,0 +1,661 @@
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
259
README.md
259
README.md
@@ -1,117 +1,212 @@
|
||||
# MyFSIO (Flask S3 + IAM)
|
||||
# MyFSIO
|
||||
|
||||
MyFSIO is a batteries-included, Flask-based recreation of Amazon S3 and IAM workflows built for local development. The design mirrors the [AWS S3 documentation](https://docs.aws.amazon.com/s3/) wherever practical: bucket naming, Signature Version 4 presigning, Version 2012-10-17 bucket policies, IAM-style users, and familiar REST endpoints.
|
||||
MyFSIO is an S3-compatible object storage server with a Rust runtime and a filesystem-backed storage engine. The active server lives under `rust/myfsio-engine` and serves both the S3 API and the built-in web UI from a single process.
|
||||
|
||||
## Why MyFSIO?
|
||||
The `python/` implementation is deprecated as of 2026-04-21. It remains in the repository for migration reference and legacy tests, but new development and supported runtime usage should target the Rust server.
|
||||
|
||||
- **Dual servers:** Run both the API (port 5000) and UI (port 5100) with a single command: `python run.py`.
|
||||
- **IAM + access keys:** Users, access keys, key rotation, and bucket-scoped actions (`list/read/write/delete/policy`) now live in `data/.myfsio.sys/config/iam.json` and are editable from the IAM dashboard.
|
||||
- **Bucket policies + hot reload:** `data/.myfsio.sys/config/bucket_policies.json` uses AWS' policy grammar (Version `2012-10-17`) with a built-in watcher, so editing the JSON file applies immediately. The UI also ships Public/Private/Custom presets for faster edits.
|
||||
- **Presigned URLs everywhere:** Signature Version 4 presigned URLs respect IAM + bucket policies and replace the now-removed "share link" feature for public access scenarios.
|
||||
- **Modern UI:** Responsive tables, quick filters, preview sidebar, object-level delete buttons, a presign modal, and an inline JSON policy editor that respects dark mode keep bucket management friendly. The object browser supports folder navigation, infinite scroll pagination, bulk operations, and automatic retry on load failures.
|
||||
- **Tests & health:** `/healthz` for smoke checks and `pytest` coverage for IAM, CRUD, presign, and policy flows.
|
||||
## Features
|
||||
|
||||
## Architecture at a Glance
|
||||
- S3-compatible REST API with Signature Version 4 authentication
|
||||
- Browser UI for buckets, objects, IAM users, policies, replication, metrics, and site administration
|
||||
- Filesystem-backed storage rooted at `data/`
|
||||
- Bucket versioning, multipart uploads, presigned URLs, CORS, object and bucket tagging
|
||||
- Server-side encryption and built-in KMS support
|
||||
- Optional background services for lifecycle, garbage collection, integrity scanning, operation metrics, and system metrics history
|
||||
- Replication, site sync, and static website hosting support
|
||||
|
||||
```
|
||||
+-----------------+ +----------------+
|
||||
| API Server |<----->| Object storage |
|
||||
| (port 5000) | | (filesystem) |
|
||||
| - S3 routes | +----------------+
|
||||
| - Presigned URLs |
|
||||
| - Bucket policy |
|
||||
+-----------------+
|
||||
^
|
||||
|
|
||||
+-----------------+
|
||||
| UI Server |
|
||||
| (port 5100) |
|
||||
| - Auth console |
|
||||
| - IAM dashboard|
|
||||
| - Bucket editor|
|
||||
+-----------------+
|
||||
```
|
||||
## Runtime Model
|
||||
|
||||
Both apps load the same configuration via `AppConfig` so IAM data and bucket policies stay consistent no matter which process you run.
|
||||
Bucket policies are automatically reloaded whenever `bucket_policies.json` changes—no restarts required.
|
||||
MyFSIO now runs as one Rust process:
|
||||
|
||||
## Getting Started
|
||||
- API listener on `HOST` + `PORT` (default `127.0.0.1:5000`)
|
||||
- UI listener on `HOST` + `UI_PORT` (default `127.0.0.1:5100`)
|
||||
- Shared state for storage, IAM, policies, sessions, metrics, and background workers
|
||||
|
||||
If you want API-only mode, set `UI_ENABLED=false`. There is no separate "UI-only" runtime anymore.
|
||||
|
||||
## Quick Start
|
||||
|
||||
From the repository root:
|
||||
|
||||
```bash
|
||||
python -m venv .venv
|
||||
. .venv/Scripts/activate # PowerShell: .\.venv\Scripts\Activate.ps1
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Run both API and UI (default)
|
||||
python run.py
|
||||
|
||||
# Or run individually:
|
||||
# python run.py --mode api
|
||||
# python run.py --mode ui
|
||||
cd rust/myfsio-engine
|
||||
cargo run -p myfsio-server --
|
||||
```
|
||||
|
||||
Visit `http://127.0.0.1:5100/ui` for the console and `http://127.0.0.1:5000/` for the raw API. Override ports/hosts with the environment variables listed below.
|
||||
Useful URLs:
|
||||
|
||||
## IAM, Access Keys, and Bucket Policies
|
||||
- UI: `http://127.0.0.1:5100/ui`
|
||||
- API: `http://127.0.0.1:5000/`
|
||||
- Health: `http://127.0.0.1:5000/myfsio/health`
|
||||
|
||||
- First run creates `data/.myfsio.sys/config/iam.json` with `localadmin / localadmin` (full control). Sign in via the UI, then use the **IAM** tab to create users, rotate secrets, or edit inline policies without touching JSON by hand.
|
||||
- Bucket policies live in `data/.myfsio.sys/config/bucket_policies.json` and follow the AWS `arn:aws:s3:::bucket/key` resource syntax with Version `2012-10-17`. Attach/replace/remove policies from the bucket detail page or edit the JSON by hand—changes hot reload automatically.
|
||||
- IAM actions include extended verbs (`iam:list_users`, `iam:create_user`, `iam:update_policy`, etc.) so you can control who is allowed to manage other users and policies.
|
||||
On first boot, MyFSIO creates `data/.myfsio.sys/config/iam.json` and prints the generated admin access key and secret key to the console.
|
||||
|
||||
### Bucket Policy Presets & Hot Reload
|
||||
### Common CLI commands
|
||||
|
||||
- **Presets:** Every bucket detail view includes Public (read-only), Private (detach policy), and Custom presets. Public auto-populates a policy that grants anonymous `s3:ListBucket` + `s3:GetObject` access to the entire bucket.
|
||||
- **Custom drafts:** Switching back to Custom restores your last manual edit so you can toggle between presets without losing work.
|
||||
- **Hot reload:** The server watches `bucket_policies.json` and reloads statements on-the-fly—ideal for editing policies in your favorite editor while testing Via curl or the UI.
|
||||
```bash
|
||||
# Show resolved configuration
|
||||
cargo run -p myfsio-server -- --show-config
|
||||
|
||||
## Presigned URLs
|
||||
# Validate configuration and exit non-zero on critical issues
|
||||
cargo run -p myfsio-server -- --check-config
|
||||
|
||||
Presigned URLs follow the AWS CLI playbook:
|
||||
# Reset admin credentials
|
||||
cargo run -p myfsio-server -- --reset-cred
|
||||
|
||||
- Call `POST /presign/<bucket>/<key>` (or use the "Presign" button in the UI) to request a Signature Version 4 URL valid for 1 second to 7 days.
|
||||
- The generated URL honors IAM permissions and bucket-policy decisions at generation-time and again when somebody fetches it.
|
||||
- Because presigned URLs cover both authenticated and public sharing scenarios, the legacy "share link" feature has been removed.
|
||||
# API only
|
||||
UI_ENABLED=false cargo run -p myfsio-server --
|
||||
```
|
||||
|
||||
## Building a Binary
|
||||
|
||||
```bash
|
||||
cd rust/myfsio-engine
|
||||
cargo build --release -p myfsio-server
|
||||
```
|
||||
|
||||
Binary locations:
|
||||
|
||||
- Linux/macOS: `rust/myfsio-engine/target/release/myfsio-server`
|
||||
- Windows: `rust/myfsio-engine/target/release/myfsio-server.exe`
|
||||
|
||||
Run the built binary directly:
|
||||
|
||||
```bash
|
||||
./target/release/myfsio-server
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
The server reads environment variables from the process environment and also loads, when present:
|
||||
|
||||
- `/opt/myfsio/myfsio.env`
|
||||
- `.env`
|
||||
- `myfsio.env`
|
||||
|
||||
Core settings:
|
||||
|
||||
| Variable | Default | Description |
|
||||
| --- | --- | --- |
|
||||
| `STORAGE_ROOT` | `<project>/data` | Filesystem root for bucket directories |
|
||||
| `MAX_UPLOAD_SIZE` | `1073741824` | Maximum upload size (bytes) |
|
||||
| `UI_PAGE_SIZE` | `100` | `MaxKeys` hint for listings |
|
||||
| `SECRET_KEY` | `dev-secret-key` | Flask session secret for the UI |
|
||||
| `IAM_CONFIG` | `<project>/data/.myfsio.sys/config/iam.json` | IAM user + policy store |
|
||||
| `BUCKET_POLICY_PATH` | `<project>/data/.myfsio.sys/config/bucket_policies.json` | Bucket policy store |
|
||||
| `API_BASE_URL` | `http://127.0.0.1:5000` | Used by the UI when calling API endpoints (presign, bucket policy) |
|
||||
| `AWS_REGION` | `us-east-1` | Region used in Signature V4 scope |
|
||||
| `AWS_SERVICE` | `s3` | Service used in Signature V4 scope |
|
||||
| `HOST` | `127.0.0.1` | Bind address for API and UI listeners |
|
||||
| `PORT` | `5000` | API port |
|
||||
| `UI_PORT` | `5100` | UI port |
|
||||
| `UI_ENABLED` | `true` | Disable to run API-only |
|
||||
| `STORAGE_ROOT` | `./data` | Root directory for buckets and system metadata |
|
||||
| `IAM_CONFIG` | `<STORAGE_ROOT>/.myfsio.sys/config/iam.json` | IAM config path |
|
||||
| `API_BASE_URL` | unset | Public API base used by the UI and presigned URL generation |
|
||||
| `AWS_REGION` | `us-east-1` | Region used in SigV4 scope |
|
||||
| `SIGV4_TIMESTAMP_TOLERANCE_SECONDS` | `900` | Allowed request time skew |
|
||||
| `PRESIGNED_URL_MIN_EXPIRY_SECONDS` | `1` | Minimum presigned URL expiry |
|
||||
| `PRESIGNED_URL_MAX_EXPIRY_SECONDS` | `604800` | Maximum presigned URL expiry |
|
||||
| `SECRET_KEY` | loaded from `.myfsio.sys/config/.secret` if present | Session signing key and IAM-at-rest encryption key |
|
||||
| `ADMIN_ACCESS_KEY` | unset | Optional first-run or reset access key |
|
||||
| `ADMIN_SECRET_KEY` | unset | Optional first-run or reset secret key |
|
||||
|
||||
> Buckets now live directly under `data/` while system metadata (versions, IAM, bucket policies, multipart uploads, etc.) lives in `data/.myfsio.sys`.
|
||||
Feature toggles:
|
||||
|
||||
## API Cheatsheet (IAM headers required)
|
||||
| Variable | Default |
|
||||
| --- | --- |
|
||||
| `ENCRYPTION_ENABLED` | `false` |
|
||||
| `KMS_ENABLED` | `false` |
|
||||
| `GC_ENABLED` | `false` |
|
||||
| `INTEGRITY_ENABLED` | `false` |
|
||||
| `LIFECYCLE_ENABLED` | `false` |
|
||||
| `METRICS_HISTORY_ENABLED` | `false` |
|
||||
| `OPERATION_METRICS_ENABLED` | `false` |
|
||||
| `WEBSITE_HOSTING_ENABLED` | `false` |
|
||||
| `SITE_SYNC_ENABLED` | `false` |
|
||||
|
||||
Metrics and replication tuning:
|
||||
|
||||
| Variable | Default |
|
||||
| --- | --- |
|
||||
| `OPERATION_METRICS_INTERVAL_MINUTES` | `5` |
|
||||
| `OPERATION_METRICS_RETENTION_HOURS` | `24` |
|
||||
| `METRICS_HISTORY_INTERVAL_MINUTES` | `5` |
|
||||
| `METRICS_HISTORY_RETENTION_HOURS` | `24` |
|
||||
| `REPLICATION_CONNECT_TIMEOUT_SECONDS` | `5` |
|
||||
| `REPLICATION_READ_TIMEOUT_SECONDS` | `30` |
|
||||
| `REPLICATION_MAX_RETRIES` | `2` |
|
||||
| `REPLICATION_STREAMING_THRESHOLD_BYTES` | `10485760` |
|
||||
| `REPLICATION_MAX_FAILURES_PER_BUCKET` | `50` |
|
||||
| `SITE_SYNC_INTERVAL_SECONDS` | `60` |
|
||||
| `SITE_SYNC_BATCH_SIZE` | `100` |
|
||||
| `SITE_SYNC_CONNECT_TIMEOUT_SECONDS` | `10` |
|
||||
| `SITE_SYNC_READ_TIMEOUT_SECONDS` | `120` |
|
||||
| `SITE_SYNC_MAX_RETRIES` | `2` |
|
||||
| `SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS` | `1.0` |
|
||||
|
||||
UI asset overrides:
|
||||
|
||||
| Variable | Default |
|
||||
| --- | --- |
|
||||
| `TEMPLATES_DIR` | built-in crate templates directory |
|
||||
| `STATIC_DIR` | built-in crate static directory |
|
||||
|
||||
See [docs.md](./docs.md) for the full Rust-side operations guide.
|
||||
|
||||
## Data Layout
|
||||
|
||||
```text
|
||||
data/
|
||||
<bucket>/
|
||||
.myfsio.sys/
|
||||
config/
|
||||
iam.json
|
||||
bucket_policies.json
|
||||
connections.json
|
||||
operation_metrics.json
|
||||
metrics_history.json
|
||||
buckets/<bucket>/
|
||||
meta/
|
||||
versions/
|
||||
multipart/
|
||||
keys/
|
||||
```
|
||||
GET / -> List buckets (XML)
|
||||
PUT /<bucket> -> Create bucket
|
||||
DELETE /<bucket> -> Delete bucket (must be empty)
|
||||
GET /<bucket> -> List objects (XML)
|
||||
PUT /<bucket>/<key> -> Upload object (binary stream)
|
||||
GET /<bucket>/<key> -> Download object
|
||||
DELETE /<bucket>/<key> -> Delete object
|
||||
POST /presign/<bucket>/<key> -> Generate AWS SigV4 presigned URL (JSON)
|
||||
GET /bucket-policy/<bucket> -> Fetch bucket policy (JSON)
|
||||
PUT /bucket-policy/<bucket> -> Attach/replace bucket policy (JSON)
|
||||
DELETE /bucket-policy/<bucket> -> Remove bucket policy
|
||||
|
||||
## Docker
|
||||
|
||||
Build the Rust image from the `rust/` directory:
|
||||
|
||||
```bash
|
||||
docker build -t myfsio ./rust
|
||||
docker run --rm -p 5000:5000 -p 5100:5100 -v "${PWD}/data:/app/data" myfsio
|
||||
```
|
||||
|
||||
If the instance sits behind a reverse proxy, set `API_BASE_URL` to the public S3 endpoint.
|
||||
|
||||
## Linux Installation
|
||||
|
||||
The repository includes `scripts/install.sh` for systemd-style Linux installs. Build the Rust binary first, then pass it to the installer:
|
||||
|
||||
```bash
|
||||
cd rust/myfsio-engine
|
||||
cargo build --release -p myfsio-server
|
||||
|
||||
cd ../..
|
||||
sudo ./scripts/install.sh --binary ./rust/myfsio-engine/target/release/myfsio-server
|
||||
```
|
||||
|
||||
The installer copies the binary into `/opt/myfsio/myfsio`, writes `/opt/myfsio/myfsio.env`, and can register a `myfsio.service` unit.
|
||||
|
||||
## Testing
|
||||
|
||||
Run the Rust test suite from the workspace:
|
||||
|
||||
```bash
|
||||
pytest -q
|
||||
cd rust/myfsio-engine
|
||||
cargo test
|
||||
```
|
||||
|
||||
## References
|
||||
## Health Check
|
||||
|
||||
- [Amazon Simple Storage Service Documentation](https://docs.aws.amazon.com/s3/)
|
||||
- [Signature Version 4 Signing Process](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)
|
||||
- [Amazon S3 Bucket Policy Examples](https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html)
|
||||
`GET /myfsio/health` returns:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "ok",
|
||||
"version": "0.5.0"
|
||||
}
|
||||
```
|
||||
|
||||
The `version` field comes from the Rust crate version in `rust/myfsio-engine/crates/myfsio-server/Cargo.toml`.
|
||||
|
||||
316
app/__init__.py
316
app/__init__.py
@@ -1,316 +0,0 @@
|
||||
"""Application factory for the mini S3-compatible object store."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from datetime import timedelta
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from flask import Flask, g, has_request_context, redirect, render_template, request, url_for
|
||||
from flask_cors import CORS
|
||||
from flask_wtf.csrf import CSRFError
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
|
||||
from .bucket_policies import BucketPolicyStore
|
||||
from .config import AppConfig
|
||||
from .connections import ConnectionStore
|
||||
from .encryption import EncryptionManager
|
||||
from .extensions import limiter, csrf
|
||||
from .iam import IamService
|
||||
from .kms import KMSManager
|
||||
from .replication import ReplicationManager
|
||||
from .secret_store import EphemeralSecretStore
|
||||
from .storage import ObjectStorage
|
||||
from .version import get_version
|
||||
|
||||
|
||||
def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
|
||||
"""Migrate config file from legacy locations to the active path.
|
||||
|
||||
Checks each legacy path in order and moves the first one found to the active path.
|
||||
This ensures backward compatibility for users upgrading from older versions.
|
||||
"""
|
||||
active_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if active_path.exists():
|
||||
return active_path
|
||||
|
||||
for legacy_path in legacy_paths:
|
||||
if legacy_path.exists():
|
||||
try:
|
||||
shutil.move(str(legacy_path), str(active_path))
|
||||
except OSError:
|
||||
# Fall back to copy + delete if move fails (e.g., cross-device)
|
||||
shutil.copy2(legacy_path, active_path)
|
||||
try:
|
||||
legacy_path.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
break
|
||||
|
||||
return active_path
|
||||
|
||||
|
||||
def create_app(
|
||||
test_config: Optional[Dict[str, Any]] = None,
|
||||
*,
|
||||
include_api: bool = True,
|
||||
include_ui: bool = True,
|
||||
) -> Flask:
|
||||
"""Create and configure the Flask application."""
|
||||
config = AppConfig.from_env(test_config)
|
||||
|
||||
if getattr(sys, "frozen", False):
|
||||
project_root = Path(sys._MEIPASS)
|
||||
else:
|
||||
project_root = Path(__file__).resolve().parent.parent
|
||||
|
||||
app = Flask(
|
||||
__name__,
|
||||
static_folder=str(project_root / "static"),
|
||||
template_folder=str(project_root / "templates"),
|
||||
)
|
||||
app.config.update(config.to_flask_config())
|
||||
if test_config:
|
||||
app.config.update(test_config)
|
||||
app.config.setdefault("APP_VERSION", get_version())
|
||||
app.permanent_session_lifetime = timedelta(days=int(app.config.get("SESSION_LIFETIME_DAYS", 30)))
|
||||
if app.config.get("TESTING"):
|
||||
app.config.setdefault("WTF_CSRF_ENABLED", False)
|
||||
|
||||
# Trust X-Forwarded-* headers from proxies
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1, x_host=1, x_prefix=1)
|
||||
|
||||
_configure_cors(app)
|
||||
_configure_logging(app)
|
||||
|
||||
limiter.init_app(app)
|
||||
csrf.init_app(app)
|
||||
|
||||
storage = ObjectStorage(Path(app.config["STORAGE_ROOT"]))
|
||||
iam = IamService(
|
||||
Path(app.config["IAM_CONFIG"]),
|
||||
auth_max_attempts=app.config.get("AUTH_MAX_ATTEMPTS", 5),
|
||||
auth_lockout_minutes=app.config.get("AUTH_LOCKOUT_MINUTES", 15),
|
||||
)
|
||||
bucket_policies = BucketPolicyStore(Path(app.config["BUCKET_POLICY_PATH"]))
|
||||
secret_store = EphemeralSecretStore(default_ttl=app.config.get("SECRET_TTL_SECONDS", 300))
|
||||
|
||||
# Initialize Replication components
|
||||
# Store config files in the system config directory for consistency
|
||||
storage_root = Path(app.config["STORAGE_ROOT"])
|
||||
config_dir = storage_root / ".myfsio.sys" / "config"
|
||||
config_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Define paths with migration from legacy locations
|
||||
connections_path = _migrate_config_file(
|
||||
active_path=config_dir / "connections.json",
|
||||
legacy_paths=[
|
||||
storage_root / ".myfsio.sys" / "connections.json", # Previous location
|
||||
storage_root / ".connections.json", # Original legacy location
|
||||
],
|
||||
)
|
||||
replication_rules_path = _migrate_config_file(
|
||||
active_path=config_dir / "replication_rules.json",
|
||||
legacy_paths=[
|
||||
storage_root / ".myfsio.sys" / "replication_rules.json", # Previous location
|
||||
storage_root / ".replication_rules.json", # Original legacy location
|
||||
],
|
||||
)
|
||||
|
||||
connections = ConnectionStore(connections_path)
|
||||
replication = ReplicationManager(storage, connections, replication_rules_path)
|
||||
|
||||
# Initialize encryption and KMS
|
||||
encryption_config = {
|
||||
"encryption_enabled": app.config.get("ENCRYPTION_ENABLED", False),
|
||||
"encryption_master_key_path": app.config.get("ENCRYPTION_MASTER_KEY_PATH"),
|
||||
"default_encryption_algorithm": app.config.get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"),
|
||||
}
|
||||
encryption_manager = EncryptionManager(encryption_config)
|
||||
|
||||
kms_manager = None
|
||||
if app.config.get("KMS_ENABLED", False):
|
||||
kms_keys_path = Path(app.config.get("KMS_KEYS_PATH", ""))
|
||||
kms_master_key_path = Path(app.config.get("ENCRYPTION_MASTER_KEY_PATH", ""))
|
||||
kms_manager = KMSManager(kms_keys_path, kms_master_key_path)
|
||||
encryption_manager.set_kms_provider(kms_manager)
|
||||
|
||||
# Wrap storage with encryption layer if encryption is enabled
|
||||
if app.config.get("ENCRYPTION_ENABLED", False):
|
||||
from .encrypted_storage import EncryptedObjectStorage
|
||||
storage = EncryptedObjectStorage(storage, encryption_manager)
|
||||
|
||||
app.extensions["object_storage"] = storage
|
||||
app.extensions["iam"] = iam
|
||||
app.extensions["bucket_policies"] = bucket_policies
|
||||
app.extensions["secret_store"] = secret_store
|
||||
app.extensions["limiter"] = limiter
|
||||
app.extensions["connections"] = connections
|
||||
app.extensions["replication"] = replication
|
||||
app.extensions["encryption"] = encryption_manager
|
||||
app.extensions["kms"] = kms_manager
|
||||
|
||||
@app.errorhandler(500)
|
||||
def internal_error(error):
|
||||
return render_template('500.html'), 500
|
||||
|
||||
@app.errorhandler(CSRFError)
|
||||
def handle_csrf_error(e):
|
||||
return render_template('csrf_error.html', reason=e.description), 400
|
||||
|
||||
@app.template_filter("filesizeformat")
|
||||
def filesizeformat(value: int) -> str:
|
||||
"""Format bytes as human-readable file size."""
|
||||
for unit in ["B", "KB", "MB", "GB", "TB", "PB"]:
|
||||
if abs(value) < 1024.0 or unit == "PB":
|
||||
if unit == "B":
|
||||
return f"{int(value)} {unit}"
|
||||
return f"{value:.1f} {unit}"
|
||||
value /= 1024.0
|
||||
return f"{value:.1f} PB"
|
||||
|
||||
@app.template_filter("timestamp_to_datetime")
|
||||
def timestamp_to_datetime(value: float) -> str:
|
||||
"""Format Unix timestamp as human-readable datetime."""
|
||||
from datetime import datetime
|
||||
if not value:
|
||||
return "Never"
|
||||
try:
|
||||
dt = datetime.fromtimestamp(value)
|
||||
return dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
except (ValueError, OSError):
|
||||
return "Unknown"
|
||||
|
||||
if include_api:
|
||||
from .s3_api import s3_api_bp
|
||||
from .kms_api import kms_api_bp
|
||||
|
||||
app.register_blueprint(s3_api_bp)
|
||||
app.register_blueprint(kms_api_bp)
|
||||
csrf.exempt(s3_api_bp)
|
||||
csrf.exempt(kms_api_bp)
|
||||
|
||||
if include_ui:
|
||||
from .ui import ui_bp
|
||||
|
||||
app.register_blueprint(ui_bp)
|
||||
if not include_api:
|
||||
@app.get("/")
|
||||
def ui_root_redirect():
|
||||
return redirect(url_for("ui.buckets_overview"))
|
||||
|
||||
@app.errorhandler(404)
|
||||
def handle_not_found(error):
|
||||
wants_html = request.accept_mimetypes.accept_html
|
||||
path = request.path or ""
|
||||
if include_ui and wants_html:
|
||||
if not include_api or path.startswith("/ui") or path == "/":
|
||||
return render_template("404.html"), 404
|
||||
return error
|
||||
|
||||
@app.get("/healthz")
|
||||
def healthcheck() -> Dict[str, str]:
|
||||
return {"status": "ok", "version": app.config.get("APP_VERSION", "unknown")}
|
||||
|
||||
return app
|
||||
|
||||
|
||||
def create_api_app(test_config: Optional[Dict[str, Any]] = None) -> Flask:
|
||||
return create_app(test_config, include_api=True, include_ui=False)
|
||||
|
||||
|
||||
def create_ui_app(test_config: Optional[Dict[str, Any]] = None) -> Flask:
|
||||
return create_app(test_config, include_api=False, include_ui=True)
|
||||
|
||||
|
||||
def _configure_cors(app: Flask) -> None:
|
||||
origins = app.config.get("CORS_ORIGINS", ["*"])
|
||||
methods = app.config.get("CORS_METHODS", ["GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"])
|
||||
allow_headers = app.config.get("CORS_ALLOW_HEADERS", ["*"])
|
||||
expose_headers = app.config.get("CORS_EXPOSE_HEADERS", ["*"])
|
||||
CORS(
|
||||
app,
|
||||
resources={r"/*": {"origins": origins, "methods": methods, "allow_headers": allow_headers, "expose_headers": expose_headers}},
|
||||
supports_credentials=True,
|
||||
)
|
||||
|
||||
|
||||
class _RequestContextFilter(logging.Filter):
|
||||
"""Inject request-specific attributes into log records."""
|
||||
|
||||
def filter(self, record: logging.LogRecord) -> bool: # pragma: no cover - simple boilerplate
|
||||
if has_request_context():
|
||||
record.request_id = getattr(g, "request_id", "-")
|
||||
record.path = request.path
|
||||
record.method = request.method
|
||||
record.remote_addr = request.remote_addr or "-"
|
||||
else:
|
||||
record.request_id = getattr(record, "request_id", "-")
|
||||
record.path = getattr(record, "path", "-")
|
||||
record.method = getattr(record, "method", "-")
|
||||
record.remote_addr = getattr(record, "remote_addr", "-")
|
||||
return True
|
||||
|
||||
|
||||
def _configure_logging(app: Flask) -> None:
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s | %(levelname)s | %(request_id)s | %(method)s %(path)s | %(message)s"
|
||||
)
|
||||
|
||||
# Stream Handler (stdout) - Primary for Docker
|
||||
stream_handler = logging.StreamHandler(sys.stdout)
|
||||
stream_handler.setFormatter(formatter)
|
||||
stream_handler.addFilter(_RequestContextFilter())
|
||||
|
||||
logger = app.logger
|
||||
logger.handlers.clear()
|
||||
logger.addHandler(stream_handler)
|
||||
|
||||
# File Handler (optional, if configured)
|
||||
if app.config.get("LOG_TO_FILE"):
|
||||
log_file = Path(app.config["LOG_FILE"])
|
||||
log_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_handler = RotatingFileHandler(
|
||||
log_file,
|
||||
maxBytes=int(app.config.get("LOG_MAX_BYTES", 5 * 1024 * 1024)),
|
||||
backupCount=int(app.config.get("LOG_BACKUP_COUNT", 3)),
|
||||
encoding="utf-8",
|
||||
)
|
||||
file_handler.setFormatter(formatter)
|
||||
file_handler.addFilter(_RequestContextFilter())
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
logger.setLevel(getattr(logging, app.config.get("LOG_LEVEL", "INFO"), logging.INFO))
|
||||
|
||||
@app.before_request
|
||||
def _log_request_start() -> None:
|
||||
g.request_id = uuid.uuid4().hex
|
||||
g.request_started_at = time.perf_counter()
|
||||
app.logger.info(
|
||||
"Request started",
|
||||
extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr},
|
||||
)
|
||||
|
||||
@app.after_request
|
||||
def _log_request_end(response):
|
||||
duration_ms = 0.0
|
||||
if hasattr(g, "request_started_at"):
|
||||
duration_ms = (time.perf_counter() - g.request_started_at) * 1000
|
||||
request_id = getattr(g, "request_id", uuid.uuid4().hex)
|
||||
response.headers.setdefault("X-Request-ID", request_id)
|
||||
app.logger.info(
|
||||
"Request completed",
|
||||
extra={
|
||||
"path": request.path,
|
||||
"method": request.method,
|
||||
"remote_addr": request.remote_addr,
|
||||
},
|
||||
)
|
||||
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
|
||||
return response
|
||||
333
app/config.py
333
app/config.py
@@ -1,333 +0,0 @@
|
||||
"""Configuration helpers for the S3 clone application."""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import secrets
|
||||
import shutil
|
||||
import sys
|
||||
import warnings
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
if getattr(sys, "frozen", False):
|
||||
# Running in a PyInstaller bundle
|
||||
PROJECT_ROOT = Path(sys._MEIPASS)
|
||||
else:
|
||||
# Running in a normal Python environment
|
||||
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
||||
|
||||
|
||||
def _prepare_config_file(active_path: Path, legacy_path: Optional[Path] = None) -> Path:
|
||||
"""Ensure config directories exist and migrate legacy files when possible."""
|
||||
active_path = Path(active_path)
|
||||
active_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if legacy_path:
|
||||
legacy_path = Path(legacy_path)
|
||||
if not active_path.exists() and legacy_path.exists():
|
||||
legacy_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
shutil.move(str(legacy_path), str(active_path))
|
||||
except OSError:
|
||||
shutil.copy2(legacy_path, active_path)
|
||||
try:
|
||||
legacy_path.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
return active_path
|
||||
|
||||
|
||||
@dataclass
|
||||
class AppConfig:
|
||||
storage_root: Path
|
||||
max_upload_size: int
|
||||
ui_page_size: int
|
||||
secret_key: str
|
||||
iam_config_path: Path
|
||||
bucket_policy_path: Path
|
||||
api_base_url: Optional[str]
|
||||
aws_region: str
|
||||
aws_service: str
|
||||
ui_enforce_bucket_policies: bool
|
||||
log_level: str
|
||||
log_to_file: bool
|
||||
log_path: Path
|
||||
log_max_bytes: int
|
||||
log_backup_count: int
|
||||
ratelimit_default: str
|
||||
ratelimit_storage_uri: str
|
||||
cors_origins: list[str]
|
||||
cors_methods: list[str]
|
||||
cors_allow_headers: list[str]
|
||||
cors_expose_headers: list[str]
|
||||
session_lifetime_days: int
|
||||
auth_max_attempts: int
|
||||
auth_lockout_minutes: int
|
||||
bulk_delete_max_keys: int
|
||||
secret_ttl_seconds: int
|
||||
stream_chunk_size: int
|
||||
multipart_min_part_size: int
|
||||
bucket_stats_cache_ttl: int
|
||||
encryption_enabled: bool
|
||||
encryption_master_key_path: Path
|
||||
kms_enabled: bool
|
||||
kms_keys_path: Path
|
||||
default_encryption_algorithm: str
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
|
||||
overrides = overrides or {}
|
||||
|
||||
def _get(name: str, default: Any) -> Any:
|
||||
return overrides.get(name, os.getenv(name, default))
|
||||
|
||||
storage_root = Path(_get("STORAGE_ROOT", PROJECT_ROOT / "data")).resolve()
|
||||
max_upload_size = int(_get("MAX_UPLOAD_SIZE", 1024 * 1024 * 1024)) # 1 GiB default
|
||||
ui_page_size = int(_get("UI_PAGE_SIZE", 100))
|
||||
auth_max_attempts = int(_get("AUTH_MAX_ATTEMPTS", 5))
|
||||
auth_lockout_minutes = int(_get("AUTH_LOCKOUT_MINUTES", 15))
|
||||
bulk_delete_max_keys = int(_get("BULK_DELETE_MAX_KEYS", 500))
|
||||
secret_ttl_seconds = int(_get("SECRET_TTL_SECONDS", 300))
|
||||
stream_chunk_size = int(_get("STREAM_CHUNK_SIZE", 64 * 1024))
|
||||
multipart_min_part_size = int(_get("MULTIPART_MIN_PART_SIZE", 5 * 1024 * 1024))
|
||||
default_secret = "dev-secret-key"
|
||||
secret_key = str(_get("SECRET_KEY", default_secret))
|
||||
|
||||
if not secret_key or secret_key == default_secret:
|
||||
secret_file = storage_root / ".myfsio.sys" / "config" / ".secret"
|
||||
if secret_file.exists():
|
||||
secret_key = secret_file.read_text().strip()
|
||||
else:
|
||||
generated = secrets.token_urlsafe(32)
|
||||
if secret_key == default_secret:
|
||||
warnings.warn("Using insecure default SECRET_KEY. A random value has been generated and persisted; set SECRET_KEY for production", RuntimeWarning)
|
||||
try:
|
||||
secret_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
secret_file.write_text(generated)
|
||||
secret_key = generated
|
||||
except OSError:
|
||||
secret_key = generated
|
||||
|
||||
iam_env_override = "IAM_CONFIG" in overrides or "IAM_CONFIG" in os.environ
|
||||
bucket_policy_override = "BUCKET_POLICY_PATH" in overrides or "BUCKET_POLICY_PATH" in os.environ
|
||||
|
||||
default_iam_path = storage_root / ".myfsio.sys" / "config" / "iam.json"
|
||||
default_bucket_policy_path = storage_root / ".myfsio.sys" / "config" / "bucket_policies.json"
|
||||
|
||||
iam_config_path = Path(_get("IAM_CONFIG", default_iam_path)).resolve()
|
||||
bucket_policy_path = Path(_get("BUCKET_POLICY_PATH", default_bucket_policy_path)).resolve()
|
||||
|
||||
iam_config_path = _prepare_config_file(
|
||||
iam_config_path,
|
||||
legacy_path=None if iam_env_override else storage_root / "iam.json",
|
||||
)
|
||||
bucket_policy_path = _prepare_config_file(
|
||||
bucket_policy_path,
|
||||
legacy_path=None if bucket_policy_override else storage_root / "bucket_policies.json",
|
||||
)
|
||||
api_base_url = _get("API_BASE_URL", None)
|
||||
if api_base_url:
|
||||
api_base_url = str(api_base_url)
|
||||
|
||||
aws_region = str(_get("AWS_REGION", "us-east-1"))
|
||||
aws_service = str(_get("AWS_SERVICE", "s3"))
|
||||
enforce_ui_policies = str(_get("UI_ENFORCE_BUCKET_POLICIES", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
log_level = str(_get("LOG_LEVEL", "INFO")).upper()
|
||||
log_to_file = str(_get("LOG_TO_FILE", "1")).lower() in {"1", "true", "yes", "on"}
|
||||
log_dir = Path(_get("LOG_DIR", storage_root.parent / "logs")).resolve()
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_path = log_dir / str(_get("LOG_FILE", "app.log"))
|
||||
log_max_bytes = int(_get("LOG_MAX_BYTES", 5 * 1024 * 1024))
|
||||
log_backup_count = int(_get("LOG_BACKUP_COUNT", 3))
|
||||
ratelimit_default = str(_get("RATE_LIMIT_DEFAULT", "200 per minute"))
|
||||
ratelimit_storage_uri = str(_get("RATE_LIMIT_STORAGE_URI", "memory://"))
|
||||
|
||||
def _csv(value: str, default: list[str]) -> list[str]:
|
||||
if not value:
|
||||
return default
|
||||
parts = [segment.strip() for segment in value.split(",") if segment.strip()]
|
||||
return parts or default
|
||||
|
||||
cors_origins = _csv(str(_get("CORS_ORIGINS", "*")), ["*"])
|
||||
cors_methods = _csv(str(_get("CORS_METHODS", "GET,PUT,POST,DELETE,OPTIONS,HEAD")), ["GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"])
|
||||
cors_allow_headers = _csv(str(_get("CORS_ALLOW_HEADERS", "*")), ["*"])
|
||||
cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"])
|
||||
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
|
||||
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60)) # Default 60 seconds
|
||||
|
||||
# Encryption settings
|
||||
encryption_enabled = str(_get("ENCRYPTION_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
encryption_keys_dir = storage_root / ".myfsio.sys" / "keys"
|
||||
encryption_master_key_path = Path(_get("ENCRYPTION_MASTER_KEY_PATH", encryption_keys_dir / "master.key")).resolve()
|
||||
kms_enabled = str(_get("KMS_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
kms_keys_path = Path(_get("KMS_KEYS_PATH", encryption_keys_dir / "kms_keys.json")).resolve()
|
||||
default_encryption_algorithm = str(_get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"))
|
||||
|
||||
return cls(storage_root=storage_root,
|
||||
max_upload_size=max_upload_size,
|
||||
ui_page_size=ui_page_size,
|
||||
secret_key=secret_key,
|
||||
iam_config_path=iam_config_path,
|
||||
bucket_policy_path=bucket_policy_path,
|
||||
api_base_url=api_base_url,
|
||||
aws_region=aws_region,
|
||||
aws_service=aws_service,
|
||||
ui_enforce_bucket_policies=enforce_ui_policies,
|
||||
log_level=log_level,
|
||||
log_to_file=log_to_file,
|
||||
log_path=log_path,
|
||||
log_max_bytes=log_max_bytes,
|
||||
log_backup_count=log_backup_count,
|
||||
ratelimit_default=ratelimit_default,
|
||||
ratelimit_storage_uri=ratelimit_storage_uri,
|
||||
cors_origins=cors_origins,
|
||||
cors_methods=cors_methods,
|
||||
cors_allow_headers=cors_allow_headers,
|
||||
cors_expose_headers=cors_expose_headers,
|
||||
session_lifetime_days=session_lifetime_days,
|
||||
auth_max_attempts=auth_max_attempts,
|
||||
auth_lockout_minutes=auth_lockout_minutes,
|
||||
bulk_delete_max_keys=bulk_delete_max_keys,
|
||||
secret_ttl_seconds=secret_ttl_seconds,
|
||||
stream_chunk_size=stream_chunk_size,
|
||||
multipart_min_part_size=multipart_min_part_size,
|
||||
bucket_stats_cache_ttl=bucket_stats_cache_ttl,
|
||||
encryption_enabled=encryption_enabled,
|
||||
encryption_master_key_path=encryption_master_key_path,
|
||||
kms_enabled=kms_enabled,
|
||||
kms_keys_path=kms_keys_path,
|
||||
default_encryption_algorithm=default_encryption_algorithm)
|
||||
|
||||
def validate_and_report(self) -> list[str]:
|
||||
"""Validate configuration and return a list of warnings/issues.
|
||||
|
||||
Call this at startup to detect potential misconfigurations before
|
||||
the application fully commits to running.
|
||||
"""
|
||||
issues = []
|
||||
|
||||
# Check if storage_root is writable
|
||||
try:
|
||||
test_file = self.storage_root / ".write_test"
|
||||
test_file.touch()
|
||||
test_file.unlink()
|
||||
except (OSError, PermissionError) as e:
|
||||
issues.append(f"CRITICAL: STORAGE_ROOT '{self.storage_root}' is not writable: {e}")
|
||||
|
||||
# Check if storage_root looks like a temp directory
|
||||
storage_str = str(self.storage_root).lower()
|
||||
if "/tmp" in storage_str or "\\temp" in storage_str or "appdata\\local\\temp" in storage_str:
|
||||
issues.append(f"WARNING: STORAGE_ROOT '{self.storage_root}' appears to be a temporary directory. Data may be lost on reboot!")
|
||||
|
||||
# Check if IAM config path is under storage_root
|
||||
try:
|
||||
self.iam_config_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: IAM_CONFIG '{self.iam_config_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting IAM_CONFIG explicitly or ensuring paths are aligned.")
|
||||
|
||||
# Check if bucket policy path is under storage_root
|
||||
try:
|
||||
self.bucket_policy_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: BUCKET_POLICY_PATH '{self.bucket_policy_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting BUCKET_POLICY_PATH explicitly.")
|
||||
|
||||
# Check if log path is writable
|
||||
try:
|
||||
self.log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
test_log = self.log_path.parent / ".write_test"
|
||||
test_log.touch()
|
||||
test_log.unlink()
|
||||
except (OSError, PermissionError) as e:
|
||||
issues.append(f"WARNING: Log directory '{self.log_path.parent}' is not writable: {e}")
|
||||
|
||||
# Check log path location
|
||||
log_str = str(self.log_path).lower()
|
||||
if "/tmp" in log_str or "\\temp" in log_str or "appdata\\local\\temp" in log_str:
|
||||
issues.append(f"WARNING: LOG_DIR '{self.log_path.parent}' appears to be a temporary directory. Logs may be lost on reboot!")
|
||||
|
||||
# Check if encryption keys path is under storage_root (when encryption is enabled)
|
||||
if self.encryption_enabled:
|
||||
try:
|
||||
self.encryption_master_key_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: ENCRYPTION_MASTER_KEY_PATH '{self.encryption_master_key_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
|
||||
|
||||
# Check if KMS keys path is under storage_root (when KMS is enabled)
|
||||
if self.kms_enabled:
|
||||
try:
|
||||
self.kms_keys_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: KMS_KEYS_PATH '{self.kms_keys_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
|
||||
|
||||
# Warn about production settings
|
||||
if self.secret_key == "dev-secret-key":
|
||||
issues.append("WARNING: Using default SECRET_KEY. Set SECRET_KEY environment variable for production.")
|
||||
|
||||
if "*" in self.cors_origins:
|
||||
issues.append("INFO: CORS_ORIGINS is set to '*'. Consider restricting to specific domains in production.")
|
||||
|
||||
return issues
|
||||
|
||||
def print_startup_summary(self) -> None:
|
||||
"""Print a summary of the configuration at startup."""
|
||||
print("\n" + "=" * 60)
|
||||
print("MyFSIO Configuration Summary")
|
||||
print("=" * 60)
|
||||
print(f" STORAGE_ROOT: {self.storage_root}")
|
||||
print(f" IAM_CONFIG: {self.iam_config_path}")
|
||||
print(f" BUCKET_POLICY: {self.bucket_policy_path}")
|
||||
print(f" LOG_PATH: {self.log_path}")
|
||||
if self.api_base_url:
|
||||
print(f" API_BASE_URL: {self.api_base_url}")
|
||||
if self.encryption_enabled:
|
||||
print(f" ENCRYPTION: Enabled (Master key: {self.encryption_master_key_path})")
|
||||
if self.kms_enabled:
|
||||
print(f" KMS: Enabled (Keys: {self.kms_keys_path})")
|
||||
print("=" * 60)
|
||||
|
||||
issues = self.validate_and_report()
|
||||
if issues:
|
||||
print("\nConfiguration Issues Detected:")
|
||||
for issue in issues:
|
||||
print(f" • {issue}")
|
||||
print()
|
||||
else:
|
||||
print(" ✓ Configuration validated successfully\n")
|
||||
|
||||
def to_flask_config(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"STORAGE_ROOT": str(self.storage_root),
|
||||
"MAX_CONTENT_LENGTH": self.max_upload_size,
|
||||
"UI_PAGE_SIZE": self.ui_page_size,
|
||||
"SECRET_KEY": self.secret_key,
|
||||
"IAM_CONFIG": str(self.iam_config_path),
|
||||
"BUCKET_POLICY_PATH": str(self.bucket_policy_path),
|
||||
"API_BASE_URL": self.api_base_url,
|
||||
"AWS_REGION": self.aws_region,
|
||||
"AWS_SERVICE": self.aws_service,
|
||||
"UI_ENFORCE_BUCKET_POLICIES": self.ui_enforce_bucket_policies,
|
||||
"AUTH_MAX_ATTEMPTS": self.auth_max_attempts,
|
||||
"AUTH_LOCKOUT_MINUTES": self.auth_lockout_minutes,
|
||||
"BULK_DELETE_MAX_KEYS": self.bulk_delete_max_keys,
|
||||
"SECRET_TTL_SECONDS": self.secret_ttl_seconds,
|
||||
"STREAM_CHUNK_SIZE": self.stream_chunk_size,
|
||||
"MULTIPART_MIN_PART_SIZE": self.multipart_min_part_size,
|
||||
"BUCKET_STATS_CACHE_TTL": self.bucket_stats_cache_ttl,
|
||||
"LOG_LEVEL": self.log_level,
|
||||
"LOG_TO_FILE": self.log_to_file,
|
||||
"LOG_FILE": str(self.log_path),
|
||||
"LOG_MAX_BYTES": self.log_max_bytes,
|
||||
"LOG_BACKUP_COUNT": self.log_backup_count,
|
||||
"RATELIMIT_DEFAULT": self.ratelimit_default,
|
||||
"RATELIMIT_STORAGE_URI": self.ratelimit_storage_uri,
|
||||
"CORS_ORIGINS": self.cors_origins,
|
||||
"CORS_METHODS": self.cors_methods,
|
||||
"CORS_ALLOW_HEADERS": self.cors_allow_headers,
|
||||
"CORS_EXPOSE_HEADERS": self.cors_expose_headers,
|
||||
"SESSION_LIFETIME_DAYS": self.session_lifetime_days,
|
||||
"ENCRYPTION_ENABLED": self.encryption_enabled,
|
||||
"ENCRYPTION_MASTER_KEY_PATH": str(self.encryption_master_key_path),
|
||||
"KMS_ENABLED": self.kms_enabled,
|
||||
"KMS_KEYS_PATH": str(self.kms_keys_path),
|
||||
"DEFAULT_ENCRYPTION_ALGORITHM": self.default_encryption_algorithm,
|
||||
}
|
||||
@@ -1,395 +0,0 @@
|
||||
"""Encryption providers for server-side and client-side encryption."""
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
import secrets
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, BinaryIO, Dict, Generator, Optional
|
||||
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
|
||||
class EncryptionError(Exception):
|
||||
"""Raised when encryption/decryption fails."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class EncryptionResult:
|
||||
"""Result of encrypting data."""
|
||||
ciphertext: bytes
|
||||
nonce: bytes
|
||||
key_id: str
|
||||
encrypted_data_key: bytes
|
||||
|
||||
|
||||
@dataclass
|
||||
class EncryptionMetadata:
|
||||
"""Metadata stored with encrypted objects."""
|
||||
algorithm: str
|
||||
key_id: str
|
||||
nonce: bytes
|
||||
encrypted_data_key: bytes
|
||||
|
||||
def to_dict(self) -> Dict[str, str]:
|
||||
return {
|
||||
"x-amz-server-side-encryption": self.algorithm,
|
||||
"x-amz-encryption-key-id": self.key_id,
|
||||
"x-amz-encryption-nonce": base64.b64encode(self.nonce).decode(),
|
||||
"x-amz-encrypted-data-key": base64.b64encode(self.encrypted_data_key).decode(),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, str]) -> Optional["EncryptionMetadata"]:
|
||||
algorithm = data.get("x-amz-server-side-encryption")
|
||||
if not algorithm:
|
||||
return None
|
||||
try:
|
||||
return cls(
|
||||
algorithm=algorithm,
|
||||
key_id=data.get("x-amz-encryption-key-id", "local"),
|
||||
nonce=base64.b64decode(data.get("x-amz-encryption-nonce", "")),
|
||||
encrypted_data_key=base64.b64decode(data.get("x-amz-encrypted-data-key", "")),
|
||||
)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
class EncryptionProvider:
|
||||
"""Base class for encryption providers."""
|
||||
|
||||
def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
|
||||
raise NotImplementedError
|
||||
|
||||
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
||||
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
||||
raise NotImplementedError
|
||||
|
||||
def generate_data_key(self) -> tuple[bytes, bytes]:
|
||||
"""Generate a data key and its encrypted form.
|
||||
|
||||
Returns:
|
||||
Tuple of (plaintext_key, encrypted_key)
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class LocalKeyEncryption(EncryptionProvider):
|
||||
"""SSE-S3 style encryption using a local master key.
|
||||
|
||||
Uses envelope encryption:
|
||||
1. Generate a unique data key for each object
|
||||
2. Encrypt the data with the data key (AES-256-GCM)
|
||||
3. Encrypt the data key with the master key
|
||||
4. Store the encrypted data key alongside the ciphertext
|
||||
"""
|
||||
|
||||
KEY_ID = "local"
|
||||
|
||||
def __init__(self, master_key_path: Path):
|
||||
self.master_key_path = master_key_path
|
||||
self._master_key: bytes | None = None
|
||||
|
||||
@property
|
||||
def master_key(self) -> bytes:
|
||||
if self._master_key is None:
|
||||
self._master_key = self._load_or_create_master_key()
|
||||
return self._master_key
|
||||
|
||||
def _load_or_create_master_key(self) -> bytes:
|
||||
"""Load master key from file or generate a new one."""
|
||||
if self.master_key_path.exists():
|
||||
try:
|
||||
return base64.b64decode(self.master_key_path.read_text().strip())
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to load master key: {exc}") from exc
|
||||
|
||||
key = secrets.token_bytes(32)
|
||||
try:
|
||||
self.master_key_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.master_key_path.write_text(base64.b64encode(key).decode())
|
||||
except OSError as exc:
|
||||
raise EncryptionError(f"Failed to save master key: {exc}") from exc
|
||||
return key
|
||||
|
||||
def _encrypt_data_key(self, data_key: bytes) -> bytes:
|
||||
"""Encrypt the data key with the master key."""
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
encrypted = aesgcm.encrypt(nonce, data_key, None)
|
||||
return nonce + encrypted
|
||||
|
||||
def _decrypt_data_key(self, encrypted_data_key: bytes) -> bytes:
|
||||
"""Decrypt the data key using the master key."""
|
||||
if len(encrypted_data_key) < 12 + 32 + 16: # nonce + key + tag
|
||||
raise EncryptionError("Invalid encrypted data key")
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
nonce = encrypted_data_key[:12]
|
||||
ciphertext = encrypted_data_key[12:]
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt data key: {exc}") from exc
|
||||
|
||||
def generate_data_key(self) -> tuple[bytes, bytes]:
|
||||
"""Generate a data key and its encrypted form."""
|
||||
plaintext_key = secrets.token_bytes(32)
|
||||
encrypted_key = self._encrypt_data_key(plaintext_key)
|
||||
return plaintext_key, encrypted_key
|
||||
|
||||
def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
|
||||
"""Encrypt data using envelope encryption."""
|
||||
data_key, encrypted_data_key = self.generate_data_key()
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext, None)
|
||||
|
||||
return EncryptionResult(
|
||||
ciphertext=ciphertext,
|
||||
nonce=nonce,
|
||||
key_id=self.KEY_ID,
|
||||
encrypted_data_key=encrypted_data_key,
|
||||
)
|
||||
|
||||
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
||||
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Decrypt data using envelope encryption."""
|
||||
# Decrypt the data key
|
||||
data_key = self._decrypt_data_key(encrypted_data_key)
|
||||
|
||||
# Decrypt the data
|
||||
aesgcm = AESGCM(data_key)
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt data: {exc}") from exc
|
||||
|
||||
|
||||
class StreamingEncryptor:
|
||||
"""Encrypts/decrypts data in streaming fashion for large files.
|
||||
|
||||
For large files, we encrypt in chunks. Each chunk is encrypted with the
|
||||
same data key but a unique nonce derived from the base nonce + chunk index.
|
||||
"""
|
||||
|
||||
CHUNK_SIZE = 64 * 1024
|
||||
HEADER_SIZE = 4
|
||||
|
||||
def __init__(self, provider: EncryptionProvider, chunk_size: int = CHUNK_SIZE):
|
||||
self.provider = provider
|
||||
self.chunk_size = chunk_size
|
||||
|
||||
def _derive_chunk_nonce(self, base_nonce: bytes, chunk_index: int) -> bytes:
|
||||
"""Derive a unique nonce for each chunk."""
|
||||
# XOR the base nonce with the chunk index
|
||||
nonce_int = int.from_bytes(base_nonce, "big")
|
||||
derived = nonce_int ^ chunk_index
|
||||
return derived.to_bytes(12, "big")
|
||||
|
||||
def encrypt_stream(self, stream: BinaryIO,
|
||||
context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
|
||||
"""Encrypt a stream and return encrypted stream + metadata."""
|
||||
|
||||
data_key, encrypted_data_key = self.provider.generate_data_key()
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
encrypted_chunks = []
|
||||
chunk_index = 0
|
||||
|
||||
while True:
|
||||
chunk = stream.read(self.chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
encrypted_chunk = aesgcm.encrypt(chunk_nonce, chunk, None)
|
||||
|
||||
size_prefix = len(encrypted_chunk).to_bytes(self.HEADER_SIZE, "big")
|
||||
encrypted_chunks.append(size_prefix + encrypted_chunk)
|
||||
chunk_index += 1
|
||||
|
||||
header = chunk_index.to_bytes(4, "big")
|
||||
encrypted_data = header + b"".join(encrypted_chunks)
|
||||
|
||||
metadata = EncryptionMetadata(
|
||||
algorithm="AES256",
|
||||
key_id=self.provider.KEY_ID if hasattr(self.provider, "KEY_ID") else "local",
|
||||
nonce=base_nonce,
|
||||
encrypted_data_key=encrypted_data_key,
|
||||
)
|
||||
|
||||
return io.BytesIO(encrypted_data), metadata
|
||||
|
||||
def decrypt_stream(self, stream: BinaryIO, metadata: EncryptionMetadata) -> BinaryIO:
|
||||
"""Decrypt a stream using the provided metadata."""
|
||||
if isinstance(self.provider, LocalKeyEncryption):
|
||||
data_key = self.provider._decrypt_data_key(metadata.encrypted_data_key)
|
||||
else:
|
||||
raise EncryptionError("Unsupported provider for streaming decryption")
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
base_nonce = metadata.nonce
|
||||
|
||||
chunk_count_bytes = stream.read(4)
|
||||
if len(chunk_count_bytes) < 4:
|
||||
raise EncryptionError("Invalid encrypted stream: missing header")
|
||||
chunk_count = int.from_bytes(chunk_count_bytes, "big")
|
||||
|
||||
decrypted_chunks = []
|
||||
for chunk_index in range(chunk_count):
|
||||
size_bytes = stream.read(self.HEADER_SIZE)
|
||||
if len(size_bytes) < self.HEADER_SIZE:
|
||||
raise EncryptionError(f"Invalid encrypted stream: truncated at chunk {chunk_index}")
|
||||
chunk_size = int.from_bytes(size_bytes, "big")
|
||||
|
||||
encrypted_chunk = stream.read(chunk_size)
|
||||
if len(encrypted_chunk) < chunk_size:
|
||||
raise EncryptionError(f"Invalid encrypted stream: incomplete chunk {chunk_index}")
|
||||
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
try:
|
||||
decrypted_chunk = aesgcm.decrypt(chunk_nonce, encrypted_chunk, None)
|
||||
decrypted_chunks.append(decrypted_chunk)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt chunk {chunk_index}: {exc}") from exc
|
||||
|
||||
return io.BytesIO(b"".join(decrypted_chunks))
|
||||
|
||||
|
||||
class EncryptionManager:
|
||||
"""Manages encryption providers and operations."""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
self.config = config
|
||||
self._local_provider: LocalKeyEncryption | None = None
|
||||
self._kms_provider: Any = None # Set by KMS module
|
||||
self._streaming_encryptor: StreamingEncryptor | None = None
|
||||
|
||||
@property
|
||||
def enabled(self) -> bool:
|
||||
return self.config.get("encryption_enabled", False)
|
||||
|
||||
@property
|
||||
def default_algorithm(self) -> str:
|
||||
return self.config.get("default_encryption_algorithm", "AES256")
|
||||
|
||||
def get_local_provider(self) -> LocalKeyEncryption:
|
||||
if self._local_provider is None:
|
||||
key_path = Path(self.config.get("encryption_master_key_path", "data/.myfsio.sys/keys/master.key"))
|
||||
self._local_provider = LocalKeyEncryption(key_path)
|
||||
return self._local_provider
|
||||
|
||||
def set_kms_provider(self, kms_provider: Any) -> None:
|
||||
"""Set the KMS provider (injected from kms module)."""
|
||||
self._kms_provider = kms_provider
|
||||
|
||||
def get_provider(self, algorithm: str, kms_key_id: str | None = None) -> EncryptionProvider:
|
||||
"""Get the appropriate encryption provider for the algorithm."""
|
||||
if algorithm == "AES256":
|
||||
return self.get_local_provider()
|
||||
elif algorithm == "aws:kms":
|
||||
if self._kms_provider is None:
|
||||
raise EncryptionError("KMS is not configured")
|
||||
return self._kms_provider.get_provider(kms_key_id)
|
||||
else:
|
||||
raise EncryptionError(f"Unsupported encryption algorithm: {algorithm}")
|
||||
|
||||
def get_streaming_encryptor(self) -> StreamingEncryptor:
|
||||
if self._streaming_encryptor is None:
|
||||
self._streaming_encryptor = StreamingEncryptor(self.get_local_provider())
|
||||
return self._streaming_encryptor
|
||||
|
||||
def encrypt_object(self, data: bytes, algorithm: str = "AES256",
|
||||
kms_key_id: str | None = None,
|
||||
context: Dict[str, str] | None = None) -> tuple[bytes, EncryptionMetadata]:
|
||||
"""Encrypt object data."""
|
||||
provider = self.get_provider(algorithm, kms_key_id)
|
||||
result = provider.encrypt(data, context)
|
||||
|
||||
metadata = EncryptionMetadata(
|
||||
algorithm=algorithm,
|
||||
key_id=result.key_id,
|
||||
nonce=result.nonce,
|
||||
encrypted_data_key=result.encrypted_data_key,
|
||||
)
|
||||
|
||||
return result.ciphertext, metadata
|
||||
|
||||
def decrypt_object(self, ciphertext: bytes, metadata: EncryptionMetadata,
|
||||
context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Decrypt object data."""
|
||||
provider = self.get_provider(metadata.algorithm, metadata.key_id)
|
||||
return provider.decrypt(
|
||||
ciphertext,
|
||||
metadata.nonce,
|
||||
metadata.encrypted_data_key,
|
||||
metadata.key_id,
|
||||
context,
|
||||
)
|
||||
|
||||
def encrypt_stream(self, stream: BinaryIO, algorithm: str = "AES256",
|
||||
context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
|
||||
"""Encrypt a stream for large files."""
|
||||
encryptor = self.get_streaming_encryptor()
|
||||
return encryptor.encrypt_stream(stream, context)
|
||||
|
||||
def decrypt_stream(self, stream: BinaryIO, metadata: EncryptionMetadata) -> BinaryIO:
|
||||
"""Decrypt a stream."""
|
||||
encryptor = self.get_streaming_encryptor()
|
||||
return encryptor.decrypt_stream(stream, metadata)
|
||||
|
||||
|
||||
class ClientEncryptionHelper:
|
||||
"""Helpers for client-side encryption.
|
||||
|
||||
Client-side encryption is performed by the client, but this helper
|
||||
provides key generation and materials for clients that need them.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def generate_client_key() -> Dict[str, str]:
|
||||
"""Generate a new client encryption key."""
|
||||
from datetime import datetime, timezone
|
||||
key = secrets.token_bytes(32)
|
||||
return {
|
||||
"key": base64.b64encode(key).decode(),
|
||||
"algorithm": "AES-256-GCM",
|
||||
"created_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def encrypt_with_key(plaintext: bytes, key_b64: str) -> Dict[str, str]:
|
||||
"""Encrypt data with a client-provided key."""
|
||||
key = base64.b64decode(key_b64)
|
||||
if len(key) != 32:
|
||||
raise EncryptionError("Key must be 256 bits (32 bytes)")
|
||||
|
||||
aesgcm = AESGCM(key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext, None)
|
||||
|
||||
return {
|
||||
"ciphertext": base64.b64encode(ciphertext).decode(),
|
||||
"nonce": base64.b64encode(nonce).decode(),
|
||||
"algorithm": "AES-256-GCM",
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def decrypt_with_key(ciphertext_b64: str, nonce_b64: str, key_b64: str) -> bytes:
|
||||
"""Decrypt data with a client-provided key."""
|
||||
key = base64.b64decode(key_b64)
|
||||
nonce = base64.b64decode(nonce_b64)
|
||||
ciphertext = base64.b64decode(ciphertext_b64)
|
||||
|
||||
if len(key) != 32:
|
||||
raise EncryptionError("Key must be 256 bits (32 bytes)")
|
||||
|
||||
aesgcm = AESGCM(key)
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Decryption failed: {exc}") from exc
|
||||
456
app/iam.py
456
app/iam.py
@@ -1,456 +0,0 @@
|
||||
"""Lightweight IAM-style user and policy management."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import math
|
||||
import secrets
|
||||
from collections import deque
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Any, Deque, Dict, Iterable, List, Optional, Sequence, Set
|
||||
|
||||
|
||||
class IamError(RuntimeError):
|
||||
"""Raised when authentication or authorization fails."""
|
||||
|
||||
|
||||
S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication"}
|
||||
IAM_ACTIONS = {
|
||||
"iam:list_users",
|
||||
"iam:create_user",
|
||||
"iam:delete_user",
|
||||
"iam:rotate_key",
|
||||
"iam:update_policy",
|
||||
}
|
||||
ALLOWED_ACTIONS = (S3_ACTIONS | IAM_ACTIONS) | {"iam:*"}
|
||||
|
||||
ACTION_ALIASES = {
|
||||
# List actions
|
||||
"list": "list",
|
||||
"s3:listbucket": "list",
|
||||
"s3:listallmybuckets": "list",
|
||||
"s3:listbucketversions": "list",
|
||||
"s3:listmultipartuploads": "list",
|
||||
"s3:listparts": "list",
|
||||
# Read actions
|
||||
"read": "read",
|
||||
"s3:getobject": "read",
|
||||
"s3:getobjectversion": "read",
|
||||
"s3:getobjecttagging": "read",
|
||||
"s3:getobjectversiontagging": "read",
|
||||
"s3:getobjectacl": "read",
|
||||
"s3:getbucketversioning": "read",
|
||||
"s3:headobject": "read",
|
||||
"s3:headbucket": "read",
|
||||
# Write actions
|
||||
"write": "write",
|
||||
"s3:putobject": "write",
|
||||
"s3:createbucket": "write",
|
||||
"s3:putobjecttagging": "write",
|
||||
"s3:putbucketversioning": "write",
|
||||
"s3:createmultipartupload": "write",
|
||||
"s3:uploadpart": "write",
|
||||
"s3:completemultipartupload": "write",
|
||||
"s3:abortmultipartupload": "write",
|
||||
"s3:copyobject": "write",
|
||||
# Delete actions
|
||||
"delete": "delete",
|
||||
"s3:deleteobject": "delete",
|
||||
"s3:deleteobjectversion": "delete",
|
||||
"s3:deletebucket": "delete",
|
||||
"s3:deleteobjecttagging": "delete",
|
||||
# Share actions (ACL)
|
||||
"share": "share",
|
||||
"s3:putobjectacl": "share",
|
||||
"s3:putbucketacl": "share",
|
||||
"s3:getbucketacl": "share",
|
||||
# Policy actions
|
||||
"policy": "policy",
|
||||
"s3:putbucketpolicy": "policy",
|
||||
"s3:getbucketpolicy": "policy",
|
||||
"s3:deletebucketpolicy": "policy",
|
||||
# Replication actions
|
||||
"replication": "replication",
|
||||
"s3:getreplicationconfiguration": "replication",
|
||||
"s3:putreplicationconfiguration": "replication",
|
||||
"s3:deletereplicationconfiguration": "replication",
|
||||
"s3:replicateobject": "replication",
|
||||
"s3:replicatetags": "replication",
|
||||
"s3:replicatedelete": "replication",
|
||||
# IAM actions
|
||||
"iam:listusers": "iam:list_users",
|
||||
"iam:createuser": "iam:create_user",
|
||||
"iam:deleteuser": "iam:delete_user",
|
||||
"iam:rotateaccesskey": "iam:rotate_key",
|
||||
"iam:putuserpolicy": "iam:update_policy",
|
||||
"iam:*": "iam:*",
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class Policy:
|
||||
bucket: str
|
||||
actions: Set[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Principal:
|
||||
access_key: str
|
||||
display_name: str
|
||||
policies: List[Policy]
|
||||
|
||||
|
||||
class IamService:
|
||||
"""Loads IAM configuration, manages users, and evaluates policies."""
|
||||
|
||||
def __init__(self, config_path: Path, auth_max_attempts: int = 5, auth_lockout_minutes: int = 15) -> None:
|
||||
self.config_path = Path(config_path)
|
||||
self.auth_max_attempts = auth_max_attempts
|
||||
self.auth_lockout_window = timedelta(minutes=auth_lockout_minutes)
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if not self.config_path.exists():
|
||||
self._write_default()
|
||||
self._users: Dict[str, Dict[str, Any]] = {}
|
||||
self._raw_config: Dict[str, Any] = {}
|
||||
self._failed_attempts: Dict[str, Deque[datetime]] = {}
|
||||
self._last_load_time = 0.0
|
||||
self._load()
|
||||
|
||||
def _maybe_reload(self) -> None:
|
||||
"""Reload configuration if the file has changed on disk."""
|
||||
try:
|
||||
if self.config_path.stat().st_mtime > self._last_load_time:
|
||||
self._load()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# ---------------------- authz helpers ----------------------
|
||||
def authenticate(self, access_key: str, secret_key: str) -> Principal:
|
||||
self._maybe_reload()
|
||||
access_key = (access_key or "").strip()
|
||||
secret_key = (secret_key or "").strip()
|
||||
if not access_key or not secret_key:
|
||||
raise IamError("Missing access credentials")
|
||||
if self._is_locked_out(access_key):
|
||||
seconds = self._seconds_until_unlock(access_key)
|
||||
raise IamError(
|
||||
f"Access temporarily locked. Try again in {seconds} seconds."
|
||||
)
|
||||
record = self._users.get(access_key)
|
||||
if not record or record["secret_key"] != secret_key:
|
||||
self._record_failed_attempt(access_key)
|
||||
raise IamError("Invalid credentials")
|
||||
self._clear_failed_attempts(access_key)
|
||||
return self._build_principal(access_key, record)
|
||||
|
||||
def _record_failed_attempt(self, access_key: str) -> None:
|
||||
if not access_key:
|
||||
return
|
||||
attempts = self._failed_attempts.setdefault(access_key, deque())
|
||||
self._prune_attempts(attempts)
|
||||
attempts.append(datetime.now())
|
||||
|
||||
def _clear_failed_attempts(self, access_key: str) -> None:
|
||||
if not access_key:
|
||||
return
|
||||
self._failed_attempts.pop(access_key, None)
|
||||
|
||||
def _prune_attempts(self, attempts: Deque[datetime]) -> None:
|
||||
cutoff = datetime.now() - self.auth_lockout_window
|
||||
while attempts and attempts[0] < cutoff:
|
||||
attempts.popleft()
|
||||
|
||||
def _is_locked_out(self, access_key: str) -> bool:
|
||||
if not access_key:
|
||||
return False
|
||||
attempts = self._failed_attempts.get(access_key)
|
||||
if not attempts:
|
||||
return False
|
||||
self._prune_attempts(attempts)
|
||||
return len(attempts) >= self.auth_max_attempts
|
||||
|
||||
def _seconds_until_unlock(self, access_key: str) -> int:
|
||||
attempts = self._failed_attempts.get(access_key)
|
||||
if not attempts:
|
||||
return 0
|
||||
self._prune_attempts(attempts)
|
||||
if len(attempts) < self.auth_max_attempts:
|
||||
return 0
|
||||
oldest = attempts[0]
|
||||
elapsed = (datetime.now() - oldest).total_seconds()
|
||||
return int(max(0, self.auth_lockout_window.total_seconds() - elapsed))
|
||||
|
||||
def principal_for_key(self, access_key: str) -> Principal:
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if not record:
|
||||
raise IamError("Unknown access key")
|
||||
return self._build_principal(access_key, record)
|
||||
|
||||
def secret_for_key(self, access_key: str) -> str:
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if not record:
|
||||
raise IamError("Unknown access key")
|
||||
return record["secret_key"]
|
||||
|
||||
def authorize(self, principal: Principal, bucket_name: str | None, action: str) -> None:
|
||||
action = self._normalize_action(action)
|
||||
if action not in ALLOWED_ACTIONS:
|
||||
raise IamError(f"Unknown action '{action}'")
|
||||
bucket_name = bucket_name or "*"
|
||||
normalized = bucket_name.lower() if bucket_name != "*" else bucket_name
|
||||
if not self._is_allowed(principal, normalized, action):
|
||||
raise IamError(f"Access denied for action '{action}' on bucket '{bucket_name}'")
|
||||
|
||||
def buckets_for_principal(self, principal: Principal, buckets: Iterable[str]) -> List[str]:
|
||||
return [bucket for bucket in buckets if self._is_allowed(principal, bucket, "list")]
|
||||
|
||||
def _is_allowed(self, principal: Principal, bucket_name: str, action: str) -> bool:
|
||||
bucket_name = bucket_name.lower()
|
||||
for policy in principal.policies:
|
||||
if policy.bucket not in {"*", bucket_name}:
|
||||
continue
|
||||
if "*" in policy.actions or action in policy.actions:
|
||||
return True
|
||||
if "iam:*" in policy.actions and action.startswith("iam:"):
|
||||
return True
|
||||
return False
|
||||
|
||||
# ---------------------- management helpers ----------------------
|
||||
def list_users(self) -> List[Dict[str, Any]]:
|
||||
listing: List[Dict[str, Any]] = []
|
||||
for access_key, record in self._users.items():
|
||||
listing.append(
|
||||
{
|
||||
"access_key": access_key,
|
||||
"display_name": record["display_name"],
|
||||
"policies": [
|
||||
{"bucket": policy.bucket, "actions": sorted(policy.actions)}
|
||||
for policy in record["policies"]
|
||||
],
|
||||
}
|
||||
)
|
||||
return listing
|
||||
|
||||
def create_user(
|
||||
self,
|
||||
*,
|
||||
display_name: str,
|
||||
policies: Optional[Sequence[Dict[str, Any]]] = None,
|
||||
access_key: str | None = None,
|
||||
secret_key: str | None = None,
|
||||
) -> Dict[str, str]:
|
||||
access_key = (access_key or self._generate_access_key()).strip()
|
||||
if not access_key:
|
||||
raise IamError("Access key cannot be empty")
|
||||
if access_key in self._users:
|
||||
raise IamError("Access key already exists")
|
||||
secret_key = secret_key or self._generate_secret_key()
|
||||
sanitized_policies = self._prepare_policy_payload(policies)
|
||||
record = {
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"display_name": display_name or access_key,
|
||||
"policies": sanitized_policies,
|
||||
}
|
||||
self._raw_config.setdefault("users", []).append(record)
|
||||
self._save()
|
||||
self._load()
|
||||
return {"access_key": access_key, "secret_key": secret_key}
|
||||
|
||||
def rotate_secret(self, access_key: str) -> str:
|
||||
user = self._get_raw_user(access_key)
|
||||
new_secret = self._generate_secret_key()
|
||||
user["secret_key"] = new_secret
|
||||
self._save()
|
||||
self._load()
|
||||
return new_secret
|
||||
|
||||
def update_user(self, access_key: str, display_name: str) -> None:
|
||||
user = self._get_raw_user(access_key)
|
||||
user["display_name"] = display_name
|
||||
self._save()
|
||||
self._load()
|
||||
|
||||
def delete_user(self, access_key: str) -> None:
|
||||
users = self._raw_config.get("users", [])
|
||||
if len(users) <= 1:
|
||||
raise IamError("Cannot delete the only user")
|
||||
remaining = [user for user in users if user["access_key"] != access_key]
|
||||
if len(remaining) == len(users):
|
||||
raise IamError("User not found")
|
||||
self._raw_config["users"] = remaining
|
||||
self._save()
|
||||
self._load()
|
||||
|
||||
def update_user_policies(self, access_key: str, policies: Sequence[Dict[str, Any]]) -> None:
|
||||
user = self._get_raw_user(access_key)
|
||||
user["policies"] = self._prepare_policy_payload(policies)
|
||||
self._save()
|
||||
self._load()
|
||||
|
||||
# ---------------------- config helpers ----------------------
|
||||
def _load(self) -> None:
|
||||
try:
|
||||
self._last_load_time = self.config_path.stat().st_mtime
|
||||
content = self.config_path.read_text(encoding='utf-8')
|
||||
raw = json.loads(content)
|
||||
except FileNotFoundError:
|
||||
raise IamError(f"IAM config not found: {self.config_path}")
|
||||
except json.JSONDecodeError as e:
|
||||
raise IamError(f"Corrupted IAM config (invalid JSON): {e}")
|
||||
except PermissionError as e:
|
||||
raise IamError(f"Cannot read IAM config (permission denied): {e}")
|
||||
except (OSError, ValueError) as e:
|
||||
raise IamError(f"Failed to load IAM config: {e}")
|
||||
|
||||
users: Dict[str, Dict[str, Any]] = {}
|
||||
for user in raw.get("users", []):
|
||||
policies = self._build_policy_objects(user.get("policies", []))
|
||||
users[user["access_key"]] = {
|
||||
"secret_key": user["secret_key"],
|
||||
"display_name": user.get("display_name", user["access_key"]),
|
||||
"policies": policies,
|
||||
}
|
||||
if not users:
|
||||
raise IamError("IAM configuration contains no users")
|
||||
self._users = users
|
||||
self._raw_config = {
|
||||
"users": [
|
||||
{
|
||||
"access_key": entry["access_key"],
|
||||
"secret_key": entry["secret_key"],
|
||||
"display_name": entry.get("display_name", entry["access_key"]),
|
||||
"policies": entry.get("policies", []),
|
||||
}
|
||||
for entry in raw.get("users", [])
|
||||
]
|
||||
}
|
||||
|
||||
def _save(self) -> None:
|
||||
try:
|
||||
temp_path = self.config_path.with_suffix('.json.tmp')
|
||||
temp_path.write_text(json.dumps(self._raw_config, indent=2), encoding='utf-8')
|
||||
temp_path.replace(self.config_path)
|
||||
except (OSError, PermissionError) as e:
|
||||
raise IamError(f"Cannot save IAM config: {e}")
|
||||
|
||||
# ---------------------- insight helpers ----------------------
|
||||
def config_summary(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"path": str(self.config_path),
|
||||
"user_count": len(self._users),
|
||||
"allowed_actions": sorted(ALLOWED_ACTIONS),
|
||||
}
|
||||
|
||||
def export_config(self, mask_secrets: bool = True) -> Dict[str, Any]:
|
||||
payload: Dict[str, Any] = {"users": []}
|
||||
for user in self._raw_config.get("users", []):
|
||||
record = dict(user)
|
||||
if mask_secrets and "secret_key" in record:
|
||||
record["secret_key"] = "••••••••••"
|
||||
payload["users"].append(record)
|
||||
return payload
|
||||
|
||||
def _build_policy_objects(self, policies: Sequence[Dict[str, Any]]) -> List[Policy]:
|
||||
entries: List[Policy] = []
|
||||
for policy in policies:
|
||||
bucket = str(policy.get("bucket", "*")).lower()
|
||||
raw_actions = policy.get("actions", [])
|
||||
if isinstance(raw_actions, str):
|
||||
raw_actions = [raw_actions]
|
||||
action_set: Set[str] = set()
|
||||
for action in raw_actions:
|
||||
canonical = self._normalize_action(action)
|
||||
if canonical == "*":
|
||||
action_set = set(ALLOWED_ACTIONS)
|
||||
break
|
||||
if canonical:
|
||||
action_set.add(canonical)
|
||||
if action_set:
|
||||
entries.append(Policy(bucket=bucket, actions=action_set))
|
||||
return entries
|
||||
|
||||
def _prepare_policy_payload(self, policies: Optional[Sequence[Dict[str, Any]]]) -> List[Dict[str, Any]]:
|
||||
if not policies:
|
||||
policies = (
|
||||
{
|
||||
"bucket": "*",
|
||||
"actions": ["list", "read", "write", "delete", "share", "policy"],
|
||||
},
|
||||
)
|
||||
sanitized: List[Dict[str, Any]] = []
|
||||
for policy in policies:
|
||||
bucket = str(policy.get("bucket", "*")).lower()
|
||||
raw_actions = policy.get("actions", [])
|
||||
if isinstance(raw_actions, str):
|
||||
raw_actions = [raw_actions]
|
||||
action_set: Set[str] = set()
|
||||
for action in raw_actions:
|
||||
canonical = self._normalize_action(action)
|
||||
if canonical == "*":
|
||||
action_set = set(ALLOWED_ACTIONS)
|
||||
break
|
||||
if canonical:
|
||||
action_set.add(canonical)
|
||||
if not action_set:
|
||||
continue
|
||||
sanitized.append({"bucket": bucket, "actions": sorted(action_set)})
|
||||
if not sanitized:
|
||||
raise IamError("At least one policy with valid actions is required")
|
||||
return sanitized
|
||||
|
||||
def _build_principal(self, access_key: str, record: Dict[str, Any]) -> Principal:
|
||||
return Principal(
|
||||
access_key=access_key,
|
||||
display_name=record["display_name"],
|
||||
policies=record["policies"],
|
||||
)
|
||||
|
||||
def _normalize_action(self, action: str) -> str:
|
||||
if not action:
|
||||
return ""
|
||||
lowered = action.strip().lower()
|
||||
if lowered == "*":
|
||||
return "*"
|
||||
candidate = ACTION_ALIASES.get(lowered, lowered)
|
||||
return candidate if candidate in ALLOWED_ACTIONS else ""
|
||||
|
||||
def _write_default(self) -> None:
|
||||
default = {
|
||||
"users": [
|
||||
{
|
||||
"access_key": "localadmin",
|
||||
"secret_key": "localadmin",
|
||||
"display_name": "Local Admin",
|
||||
"policies": [
|
||||
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
self.config_path.write_text(json.dumps(default, indent=2))
|
||||
|
||||
def _generate_access_key(self) -> str:
|
||||
return secrets.token_hex(8)
|
||||
|
||||
def _generate_secret_key(self) -> str:
|
||||
return secrets.token_urlsafe(24)
|
||||
|
||||
def _get_raw_user(self, access_key: str) -> Dict[str, Any]:
|
||||
for user in self._raw_config.get("users", []):
|
||||
if user["access_key"] == access_key:
|
||||
return user
|
||||
raise IamError("User not found")
|
||||
|
||||
def get_secret_key(self, access_key: str) -> str | None:
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
return record["secret_key"] if record else None
|
||||
|
||||
def get_principal(self, access_key: str) -> Principal | None:
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
return self._build_principal(access_key, record) if record else None
|
||||
@@ -1,356 +0,0 @@
|
||||
"""Background replication worker."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import mimetypes
|
||||
import threading
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
from boto3.exceptions import S3UploadFailedError
|
||||
|
||||
from .connections import ConnectionStore, RemoteConnection
|
||||
from .storage import ObjectStorage, StorageError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
REPLICATION_USER_AGENT = "S3ReplicationAgent/1.0"
|
||||
|
||||
REPLICATION_MODE_NEW_ONLY = "new_only"
|
||||
REPLICATION_MODE_ALL = "all"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReplicationStats:
|
||||
"""Statistics for replication operations - computed dynamically."""
|
||||
objects_synced: int = 0 # Objects that exist in both source and destination
|
||||
objects_pending: int = 0 # Objects in source but not in destination
|
||||
objects_orphaned: int = 0 # Objects in destination but not in source (will be deleted)
|
||||
bytes_synced: int = 0 # Total bytes synced to destination
|
||||
last_sync_at: Optional[float] = None
|
||||
last_sync_key: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"objects_synced": self.objects_synced,
|
||||
"objects_pending": self.objects_pending,
|
||||
"objects_orphaned": self.objects_orphaned,
|
||||
"bytes_synced": self.bytes_synced,
|
||||
"last_sync_at": self.last_sync_at,
|
||||
"last_sync_key": self.last_sync_key,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "ReplicationStats":
|
||||
return cls(
|
||||
objects_synced=data.get("objects_synced", 0),
|
||||
objects_pending=data.get("objects_pending", 0),
|
||||
objects_orphaned=data.get("objects_orphaned", 0),
|
||||
bytes_synced=data.get("bytes_synced", 0),
|
||||
last_sync_at=data.get("last_sync_at"),
|
||||
last_sync_key=data.get("last_sync_key"),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReplicationRule:
|
||||
bucket_name: str
|
||||
target_connection_id: str
|
||||
target_bucket: str
|
||||
enabled: bool = True
|
||||
mode: str = REPLICATION_MODE_NEW_ONLY
|
||||
created_at: Optional[float] = None
|
||||
stats: ReplicationStats = field(default_factory=ReplicationStats)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"bucket_name": self.bucket_name,
|
||||
"target_connection_id": self.target_connection_id,
|
||||
"target_bucket": self.target_bucket,
|
||||
"enabled": self.enabled,
|
||||
"mode": self.mode,
|
||||
"created_at": self.created_at,
|
||||
"stats": self.stats.to_dict(),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "ReplicationRule":
|
||||
stats_data = data.pop("stats", {})
|
||||
# Handle old rules without mode/created_at
|
||||
if "mode" not in data:
|
||||
data["mode"] = REPLICATION_MODE_NEW_ONLY
|
||||
if "created_at" not in data:
|
||||
data["created_at"] = None
|
||||
rule = cls(**data)
|
||||
rule.stats = ReplicationStats.from_dict(stats_data) if stats_data else ReplicationStats()
|
||||
return rule
|
||||
|
||||
|
||||
class ReplicationManager:
|
||||
def __init__(self, storage: ObjectStorage, connections: ConnectionStore, rules_path: Path) -> None:
|
||||
self.storage = storage
|
||||
self.connections = connections
|
||||
self.rules_path = rules_path
|
||||
self._rules: Dict[str, ReplicationRule] = {}
|
||||
self._stats_lock = threading.Lock()
|
||||
self._executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="ReplicationWorker")
|
||||
self.reload_rules()
|
||||
|
||||
def reload_rules(self) -> None:
|
||||
if not self.rules_path.exists():
|
||||
self._rules = {}
|
||||
return
|
||||
try:
|
||||
with open(self.rules_path, "r") as f:
|
||||
data = json.load(f)
|
||||
for bucket, rule_data in data.items():
|
||||
self._rules[bucket] = ReplicationRule.from_dict(rule_data)
|
||||
except (OSError, ValueError) as e:
|
||||
logger.error(f"Failed to load replication rules: {e}")
|
||||
|
||||
def save_rules(self) -> None:
|
||||
data = {b: rule.to_dict() for b, rule in self._rules.items()}
|
||||
self.rules_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(self.rules_path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
def get_rule(self, bucket_name: str) -> Optional[ReplicationRule]:
|
||||
return self._rules.get(bucket_name)
|
||||
|
||||
def set_rule(self, rule: ReplicationRule) -> None:
|
||||
self._rules[rule.bucket_name] = rule
|
||||
self.save_rules()
|
||||
|
||||
def delete_rule(self, bucket_name: str) -> None:
|
||||
if bucket_name in self._rules:
|
||||
del self._rules[bucket_name]
|
||||
self.save_rules()
|
||||
|
||||
def _update_last_sync(self, bucket_name: str, object_key: str = "") -> None:
|
||||
"""Update last sync timestamp after a successful operation."""
|
||||
with self._stats_lock:
|
||||
rule = self._rules.get(bucket_name)
|
||||
if not rule:
|
||||
return
|
||||
rule.stats.last_sync_at = time.time()
|
||||
rule.stats.last_sync_key = object_key
|
||||
self.save_rules()
|
||||
|
||||
def get_sync_status(self, bucket_name: str) -> Optional[ReplicationStats]:
|
||||
"""Dynamically compute replication status by comparing source and destination buckets."""
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule:
|
||||
return None
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
return rule.stats # Return cached stats if connection unavailable
|
||||
|
||||
try:
|
||||
# Get source objects
|
||||
source_objects = self.storage.list_objects_all(bucket_name)
|
||||
source_keys = {obj.key: obj.size for obj in source_objects}
|
||||
|
||||
# Get destination objects
|
||||
s3 = boto3.client(
|
||||
"s3",
|
||||
endpoint_url=connection.endpoint_url,
|
||||
aws_access_key_id=connection.access_key,
|
||||
aws_secret_access_key=connection.secret_key,
|
||||
region_name=connection.region,
|
||||
)
|
||||
|
||||
dest_keys = set()
|
||||
bytes_synced = 0
|
||||
paginator = s3.get_paginator('list_objects_v2')
|
||||
try:
|
||||
for page in paginator.paginate(Bucket=rule.target_bucket):
|
||||
for obj in page.get('Contents', []):
|
||||
dest_keys.add(obj['Key'])
|
||||
if obj['Key'] in source_keys:
|
||||
bytes_synced += obj.get('Size', 0)
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'NoSuchBucket':
|
||||
# Destination bucket doesn't exist yet
|
||||
dest_keys = set()
|
||||
else:
|
||||
raise
|
||||
|
||||
# Compute stats
|
||||
synced = source_keys.keys() & dest_keys # Objects in both
|
||||
orphaned = dest_keys - source_keys.keys() # In dest but not source
|
||||
|
||||
# For "new_only" mode, we can't determine pending since we don't know
|
||||
# which objects existed before replication was enabled. Only "all" mode
|
||||
# should show pending (objects that should be replicated but aren't yet).
|
||||
if rule.mode == REPLICATION_MODE_ALL:
|
||||
pending = source_keys.keys() - dest_keys # In source but not dest
|
||||
else:
|
||||
pending = set() # New-only mode: don't show pre-existing as pending
|
||||
|
||||
# Update cached stats with computed values
|
||||
rule.stats.objects_synced = len(synced)
|
||||
rule.stats.objects_pending = len(pending)
|
||||
rule.stats.objects_orphaned = len(orphaned)
|
||||
rule.stats.bytes_synced = bytes_synced
|
||||
|
||||
return rule.stats
|
||||
|
||||
except (ClientError, StorageError) as e:
|
||||
logger.error(f"Failed to compute sync status for {bucket_name}: {e}")
|
||||
return rule.stats # Return cached stats on error
|
||||
|
||||
def replicate_existing_objects(self, bucket_name: str) -> None:
|
||||
"""Trigger replication for all existing objects in a bucket."""
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule or not rule.enabled:
|
||||
return
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning(f"Cannot replicate existing objects: Connection {rule.target_connection_id} not found")
|
||||
return
|
||||
|
||||
try:
|
||||
objects = self.storage.list_objects_all(bucket_name)
|
||||
logger.info(f"Starting replication of {len(objects)} existing objects from {bucket_name}")
|
||||
for obj in objects:
|
||||
self._executor.submit(self._replicate_task, bucket_name, obj.key, rule, connection, "write")
|
||||
except StorageError as e:
|
||||
logger.error(f"Failed to list objects for replication: {e}")
|
||||
|
||||
def create_remote_bucket(self, connection_id: str, bucket_name: str) -> None:
|
||||
"""Create a bucket on the remote connection."""
|
||||
connection = self.connections.get(connection_id)
|
||||
if not connection:
|
||||
raise ValueError(f"Connection {connection_id} not found")
|
||||
|
||||
try:
|
||||
s3 = boto3.client(
|
||||
"s3",
|
||||
endpoint_url=connection.endpoint_url,
|
||||
aws_access_key_id=connection.access_key,
|
||||
aws_secret_access_key=connection.secret_key,
|
||||
region_name=connection.region,
|
||||
)
|
||||
s3.create_bucket(Bucket=bucket_name)
|
||||
except ClientError as e:
|
||||
logger.error(f"Failed to create remote bucket {bucket_name}: {e}")
|
||||
raise
|
||||
|
||||
def trigger_replication(self, bucket_name: str, object_key: str, action: str = "write") -> None:
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule or not rule.enabled:
|
||||
return
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning(f"Replication skipped for {bucket_name}/{object_key}: Connection {rule.target_connection_id} not found")
|
||||
return
|
||||
|
||||
self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, action)
|
||||
|
||||
def _replicate_task(self, bucket_name: str, object_key: str, rule: ReplicationRule, conn: RemoteConnection, action: str) -> None:
|
||||
if ".." in object_key or object_key.startswith("/") or object_key.startswith("\\"):
|
||||
logger.error(f"Invalid object key in replication (path traversal attempt): {object_key}")
|
||||
return
|
||||
|
||||
try:
|
||||
from .storage import ObjectStorage
|
||||
ObjectStorage._sanitize_object_key(object_key)
|
||||
except StorageError as e:
|
||||
logger.error(f"Object key validation failed in replication: {e}")
|
||||
return
|
||||
|
||||
file_size = 0
|
||||
try:
|
||||
config = Config(user_agent_extra=REPLICATION_USER_AGENT)
|
||||
s3 = boto3.client(
|
||||
"s3",
|
||||
endpoint_url=conn.endpoint_url,
|
||||
aws_access_key_id=conn.access_key,
|
||||
aws_secret_access_key=conn.secret_key,
|
||||
region_name=conn.region,
|
||||
config=config,
|
||||
)
|
||||
|
||||
if action == "delete":
|
||||
try:
|
||||
s3.delete_object(Bucket=rule.target_bucket, Key=object_key)
|
||||
logger.info(f"Replicated DELETE {bucket_name}/{object_key} to {conn.name} ({rule.target_bucket})")
|
||||
self._update_last_sync(bucket_name, object_key)
|
||||
except ClientError as e:
|
||||
logger.error(f"Replication DELETE failed for {bucket_name}/{object_key}: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
path = self.storage.get_object_path(bucket_name, object_key)
|
||||
except StorageError:
|
||||
logger.error(f"Source object not found: {bucket_name}/{object_key}")
|
||||
return
|
||||
|
||||
metadata = self.storage.get_object_metadata(bucket_name, object_key)
|
||||
|
||||
extra_args = {}
|
||||
if metadata:
|
||||
extra_args["Metadata"] = metadata
|
||||
|
||||
# Guess content type to prevent corruption/wrong handling
|
||||
content_type, _ = mimetypes.guess_type(path)
|
||||
file_size = path.stat().st_size
|
||||
|
||||
logger.info(f"Replicating {bucket_name}/{object_key}: Size={file_size}, ContentType={content_type}")
|
||||
|
||||
try:
|
||||
with path.open("rb") as f:
|
||||
s3.put_object(
|
||||
Bucket=rule.target_bucket,
|
||||
Key=object_key,
|
||||
Body=f,
|
||||
ContentLength=file_size,
|
||||
ContentType=content_type or "application/octet-stream",
|
||||
Metadata=metadata or {}
|
||||
)
|
||||
except (ClientError, S3UploadFailedError) as e:
|
||||
is_no_bucket = False
|
||||
if isinstance(e, ClientError):
|
||||
if e.response['Error']['Code'] == 'NoSuchBucket':
|
||||
is_no_bucket = True
|
||||
elif isinstance(e, S3UploadFailedError):
|
||||
if "NoSuchBucket" in str(e):
|
||||
is_no_bucket = True
|
||||
|
||||
if is_no_bucket:
|
||||
logger.info(f"Target bucket {rule.target_bucket} not found. Attempting to create it.")
|
||||
try:
|
||||
s3.create_bucket(Bucket=rule.target_bucket)
|
||||
# Retry upload
|
||||
with path.open("rb") as f:
|
||||
s3.put_object(
|
||||
Bucket=rule.target_bucket,
|
||||
Key=object_key,
|
||||
Body=f,
|
||||
ContentLength=file_size,
|
||||
ContentType=content_type or "application/octet-stream",
|
||||
Metadata=metadata or {}
|
||||
)
|
||||
except Exception as create_err:
|
||||
logger.error(f"Failed to create target bucket {rule.target_bucket}: {create_err}")
|
||||
raise e # Raise original error
|
||||
else:
|
||||
raise e
|
||||
|
||||
logger.info(f"Replicated {bucket_name}/{object_key} to {conn.name} ({rule.target_bucket})")
|
||||
self._update_last_sync(bucket_name, object_key)
|
||||
|
||||
except (ClientError, OSError, ValueError) as e:
|
||||
logger.error(f"Replication failed for {bucket_name}/{object_key}: {e}")
|
||||
except Exception:
|
||||
logger.exception(f"Unexpected error during replication for {bucket_name}/{object_key}")
|
||||
2404
app/s3_api.py
2404
app/s3_api.py
File diff suppressed because it is too large
Load Diff
1453
app/storage.py
1453
app/storage.py
File diff suppressed because it is too large
Load Diff
@@ -1,5 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# Run both services using the python runner in production mode
|
||||
exec python run.py --prod
|
||||
@@ -1,3 +0,0 @@
|
||||
[pytest]
|
||||
testpaths = tests
|
||||
norecursedirs = data .git __pycache__ .venv
|
||||
@@ -11,3 +11,7 @@ htmlcov
|
||||
logs
|
||||
data
|
||||
tmp
|
||||
tests
|
||||
myfsio_core/target
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
56
python/Dockerfile
Normal file
56
python/Dockerfile
Normal file
@@ -0,0 +1,56 @@
|
||||
FROM python:3.14.3-slim AS builder
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential curl \
|
||||
&& curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
RUN pip install --no-cache-dir maturin
|
||||
|
||||
COPY myfsio_core ./myfsio_core
|
||||
RUN cd myfsio_core \
|
||||
&& maturin build --release --out /wheels
|
||||
|
||||
|
||||
FROM python:3.14.3-slim
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt ./
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY --from=builder /wheels/*.whl /tmp/
|
||||
RUN pip install --no-cache-dir /tmp/*.whl && rm /tmp/*.whl
|
||||
|
||||
COPY app ./app
|
||||
COPY templates ./templates
|
||||
COPY static ./static
|
||||
COPY run.py ./
|
||||
COPY docker-entrypoint.sh ./
|
||||
|
||||
RUN chmod +x docker-entrypoint.sh \
|
||||
&& mkdir -p /app/data \
|
||||
&& useradd -m -u 1000 myfsio \
|
||||
&& chown -R myfsio:myfsio /app
|
||||
|
||||
USER myfsio
|
||||
|
||||
EXPOSE 5000 5100
|
||||
ENV APP_HOST=0.0.0.0 \
|
||||
FLASK_ENV=production \
|
||||
FLASK_DEBUG=0
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD python -c "import requests; requests.get('http://localhost:5000/myfsio/health', timeout=2)"
|
||||
|
||||
CMD ["./docker-entrypoint.sh"]
|
||||
14
python/README.md
Normal file
14
python/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Deprecated Python Implementation
|
||||
|
||||
The Python implementation of MyFSIO is deprecated as of 2026-04-21.
|
||||
|
||||
The supported server runtime now lives in `../rust/myfsio-engine` and serves the S3 API and web UI from the Rust `myfsio-server` binary. Keep this tree for migration reference, compatibility checks, and legacy tests only.
|
||||
|
||||
For normal development and operations, run:
|
||||
|
||||
```bash
|
||||
cd ../rust/myfsio-engine
|
||||
cargo run -p myfsio-server --
|
||||
```
|
||||
|
||||
Do not add new product features to the Python implementation unless they are needed to unblock a migration or compare behavior with the Rust server.
|
||||
763
python/app/__init__.py
Normal file
763
python/app/__init__.py
Normal file
@@ -0,0 +1,763 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import mimetypes
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from datetime import timedelta
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from flask import Flask, Response, g, has_request_context, redirect, render_template, request, url_for
|
||||
from flask_cors import CORS
|
||||
from flask_wtf.csrf import CSRFError
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
|
||||
import io
|
||||
|
||||
from .access_logging import AccessLoggingService
|
||||
from .operation_metrics import OperationMetricsCollector, classify_endpoint
|
||||
from .compression import GzipMiddleware
|
||||
from .acl import AclService
|
||||
from .bucket_policies import BucketPolicyStore
|
||||
from .config import AppConfig
|
||||
from .connections import ConnectionStore
|
||||
from .encryption import EncryptionManager
|
||||
from .extensions import limiter, csrf
|
||||
from .iam import IamService
|
||||
from .kms import KMSManager
|
||||
from .gc import GarbageCollector
|
||||
from .integrity import IntegrityChecker
|
||||
from .lifecycle import LifecycleManager
|
||||
from .notifications import NotificationService
|
||||
from .object_lock import ObjectLockService
|
||||
from .replication import ReplicationManager
|
||||
from .secret_store import EphemeralSecretStore
|
||||
from .site_registry import SiteRegistry, SiteInfo
|
||||
from .storage import ObjectStorage, StorageError
|
||||
from .version import get_version
|
||||
from .website_domains import WebsiteDomainStore
|
||||
|
||||
_request_counter = itertools.count(1)
|
||||
|
||||
|
||||
class _ChunkedTransferMiddleware:
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
if environ.get("REQUEST_METHOD") not in ("PUT", "POST"):
|
||||
return self.app(environ, start_response)
|
||||
|
||||
transfer_encoding = environ.get("HTTP_TRANSFER_ENCODING", "")
|
||||
content_length = environ.get("CONTENT_LENGTH")
|
||||
|
||||
if "chunked" in transfer_encoding.lower():
|
||||
if content_length:
|
||||
del environ["HTTP_TRANSFER_ENCODING"]
|
||||
else:
|
||||
raw = environ.get("wsgi.input")
|
||||
if raw:
|
||||
try:
|
||||
if hasattr(raw, "seek"):
|
||||
raw.seek(0)
|
||||
body = raw.read()
|
||||
except Exception:
|
||||
body = b""
|
||||
if body:
|
||||
environ["wsgi.input"] = io.BytesIO(body)
|
||||
environ["CONTENT_LENGTH"] = str(len(body))
|
||||
del environ["HTTP_TRANSFER_ENCODING"]
|
||||
|
||||
content_length = environ.get("CONTENT_LENGTH")
|
||||
if not content_length or content_length == "0":
|
||||
sha256 = environ.get("HTTP_X_AMZ_CONTENT_SHA256", "")
|
||||
decoded_len = environ.get("HTTP_X_AMZ_DECODED_CONTENT_LENGTH", "")
|
||||
content_encoding = environ.get("HTTP_CONTENT_ENCODING", "")
|
||||
if ("STREAMING" in sha256.upper() or decoded_len
|
||||
or "aws-chunked" in content_encoding.lower()):
|
||||
raw = environ.get("wsgi.input")
|
||||
if raw:
|
||||
try:
|
||||
if hasattr(raw, "seek"):
|
||||
raw.seek(0)
|
||||
body = raw.read()
|
||||
except Exception:
|
||||
body = b""
|
||||
if body:
|
||||
environ["wsgi.input"] = io.BytesIO(body)
|
||||
environ["CONTENT_LENGTH"] = str(len(body))
|
||||
|
||||
raw = environ.get("wsgi.input")
|
||||
if raw and hasattr(raw, "seek"):
|
||||
try:
|
||||
raw.seek(0)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return self.app(environ, start_response)
|
||||
|
||||
|
||||
def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
|
||||
"""Migrate config file from legacy locations to the active path.
|
||||
|
||||
Checks each legacy path in order and moves the first one found to the active path.
|
||||
This ensures backward compatibility for users upgrading from older versions.
|
||||
"""
|
||||
active_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if active_path.exists():
|
||||
return active_path
|
||||
|
||||
for legacy_path in legacy_paths:
|
||||
if legacy_path.exists():
|
||||
try:
|
||||
shutil.move(str(legacy_path), str(active_path))
|
||||
except OSError:
|
||||
shutil.copy2(legacy_path, active_path)
|
||||
try:
|
||||
legacy_path.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
break
|
||||
|
||||
return active_path
|
||||
|
||||
|
||||
def create_app(
|
||||
test_config: Optional[Dict[str, Any]] = None,
|
||||
*,
|
||||
include_api: bool = True,
|
||||
include_ui: bool = True,
|
||||
) -> Flask:
|
||||
"""Create and configure the Flask application."""
|
||||
config = AppConfig.from_env(test_config)
|
||||
|
||||
if getattr(sys, "frozen", False):
|
||||
project_root = Path(sys._MEIPASS)
|
||||
else:
|
||||
project_root = Path(__file__).resolve().parent.parent
|
||||
|
||||
app = Flask(
|
||||
__name__,
|
||||
static_folder=str(project_root / "static"),
|
||||
template_folder=str(project_root / "templates"),
|
||||
)
|
||||
app.config.update(config.to_flask_config())
|
||||
if test_config:
|
||||
app.config.update(test_config)
|
||||
app.config.setdefault("APP_VERSION", get_version())
|
||||
app.permanent_session_lifetime = timedelta(days=int(app.config.get("SESSION_LIFETIME_DAYS", 30)))
|
||||
if app.config.get("TESTING"):
|
||||
app.config.setdefault("WTF_CSRF_ENABLED", False)
|
||||
|
||||
# Trust X-Forwarded-* headers from proxies
|
||||
num_proxies = app.config.get("NUM_TRUSTED_PROXIES", 1)
|
||||
if num_proxies:
|
||||
if "NUM_TRUSTED_PROXIES" not in os.environ:
|
||||
logging.getLogger(__name__).warning(
|
||||
"NUM_TRUSTED_PROXIES not set, defaulting to 1. "
|
||||
"Set NUM_TRUSTED_PROXIES=0 if not behind a reverse proxy."
|
||||
)
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=num_proxies, x_proto=num_proxies, x_host=num_proxies, x_prefix=num_proxies)
|
||||
|
||||
if app.config.get("ENABLE_GZIP", True):
|
||||
app.wsgi_app = GzipMiddleware(app.wsgi_app, compression_level=6)
|
||||
|
||||
app.wsgi_app = _ChunkedTransferMiddleware(app.wsgi_app)
|
||||
|
||||
_configure_cors(app)
|
||||
_configure_logging(app)
|
||||
|
||||
limiter.init_app(app)
|
||||
csrf.init_app(app)
|
||||
|
||||
storage = ObjectStorage(
|
||||
Path(app.config["STORAGE_ROOT"]),
|
||||
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 60),
|
||||
object_cache_max_size=app.config.get("OBJECT_CACHE_MAX_SIZE", 100),
|
||||
bucket_config_cache_ttl=app.config.get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0),
|
||||
object_key_max_length_bytes=app.config.get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024),
|
||||
meta_read_cache_max=app.config.get("META_READ_CACHE_MAX", 2048),
|
||||
)
|
||||
|
||||
if app.config.get("WARM_CACHE_ON_STARTUP", True) and not app.config.get("TESTING"):
|
||||
storage.warm_cache_async()
|
||||
|
||||
iam = IamService(
|
||||
Path(app.config["IAM_CONFIG"]),
|
||||
auth_max_attempts=app.config.get("AUTH_MAX_ATTEMPTS", 5),
|
||||
auth_lockout_minutes=app.config.get("AUTH_LOCKOUT_MINUTES", 15),
|
||||
encryption_key=app.config.get("SECRET_KEY"),
|
||||
)
|
||||
bucket_policies = BucketPolicyStore(Path(app.config["BUCKET_POLICY_PATH"]))
|
||||
secret_store = EphemeralSecretStore(default_ttl=app.config.get("SECRET_TTL_SECONDS", 300))
|
||||
|
||||
storage_root = Path(app.config["STORAGE_ROOT"])
|
||||
config_dir = storage_root / ".myfsio.sys" / "config"
|
||||
config_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
connections_path = _migrate_config_file(
|
||||
active_path=config_dir / "connections.json",
|
||||
legacy_paths=[
|
||||
storage_root / ".myfsio.sys" / "connections.json",
|
||||
storage_root / ".connections.json",
|
||||
],
|
||||
)
|
||||
replication_rules_path = _migrate_config_file(
|
||||
active_path=config_dir / "replication_rules.json",
|
||||
legacy_paths=[
|
||||
storage_root / ".myfsio.sys" / "replication_rules.json",
|
||||
storage_root / ".replication_rules.json",
|
||||
],
|
||||
)
|
||||
|
||||
connections = ConnectionStore(connections_path)
|
||||
replication = ReplicationManager(
|
||||
storage,
|
||||
connections,
|
||||
replication_rules_path,
|
||||
storage_root,
|
||||
connect_timeout=app.config.get("REPLICATION_CONNECT_TIMEOUT_SECONDS", 5),
|
||||
read_timeout=app.config.get("REPLICATION_READ_TIMEOUT_SECONDS", 30),
|
||||
max_retries=app.config.get("REPLICATION_MAX_RETRIES", 2),
|
||||
streaming_threshold_bytes=app.config.get("REPLICATION_STREAMING_THRESHOLD_BYTES", 10 * 1024 * 1024),
|
||||
max_failures_per_bucket=app.config.get("REPLICATION_MAX_FAILURES_PER_BUCKET", 50),
|
||||
)
|
||||
|
||||
site_registry_path = config_dir / "site_registry.json"
|
||||
site_registry = SiteRegistry(site_registry_path)
|
||||
if app.config.get("SITE_ID") and not site_registry.get_local_site():
|
||||
site_registry.set_local_site(SiteInfo(
|
||||
site_id=app.config["SITE_ID"],
|
||||
endpoint=app.config.get("SITE_ENDPOINT") or "",
|
||||
region=app.config.get("SITE_REGION", "us-east-1"),
|
||||
priority=app.config.get("SITE_PRIORITY", 100),
|
||||
))
|
||||
|
||||
encryption_config = {
|
||||
"encryption_enabled": app.config.get("ENCRYPTION_ENABLED", False),
|
||||
"encryption_master_key_path": app.config.get("ENCRYPTION_MASTER_KEY_PATH"),
|
||||
"default_encryption_algorithm": app.config.get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"),
|
||||
"encryption_chunk_size_bytes": app.config.get("ENCRYPTION_CHUNK_SIZE_BYTES", 64 * 1024),
|
||||
}
|
||||
encryption_manager = EncryptionManager(encryption_config)
|
||||
|
||||
kms_manager = None
|
||||
if app.config.get("KMS_ENABLED", False):
|
||||
kms_keys_path = Path(app.config.get("KMS_KEYS_PATH", ""))
|
||||
kms_master_key_path = Path(app.config.get("ENCRYPTION_MASTER_KEY_PATH", ""))
|
||||
kms_manager = KMSManager(
|
||||
kms_keys_path,
|
||||
kms_master_key_path,
|
||||
generate_data_key_min_bytes=app.config.get("KMS_GENERATE_DATA_KEY_MIN_BYTES", 1),
|
||||
generate_data_key_max_bytes=app.config.get("KMS_GENERATE_DATA_KEY_MAX_BYTES", 1024),
|
||||
)
|
||||
encryption_manager.set_kms_provider(kms_manager)
|
||||
|
||||
if app.config.get("ENCRYPTION_ENABLED", False):
|
||||
from .encrypted_storage import EncryptedObjectStorage
|
||||
storage = EncryptedObjectStorage(storage, encryption_manager)
|
||||
|
||||
acl_service = AclService(storage_root)
|
||||
object_lock_service = ObjectLockService(storage_root)
|
||||
notification_service = NotificationService(
|
||||
storage_root,
|
||||
allow_internal_endpoints=app.config.get("ALLOW_INTERNAL_ENDPOINTS", False),
|
||||
)
|
||||
access_logging_service = AccessLoggingService(storage_root)
|
||||
access_logging_service.set_storage(storage)
|
||||
|
||||
lifecycle_manager = None
|
||||
if app.config.get("LIFECYCLE_ENABLED", False):
|
||||
base_storage = storage.storage if hasattr(storage, 'storage') else storage
|
||||
lifecycle_manager = LifecycleManager(
|
||||
base_storage,
|
||||
interval_seconds=app.config.get("LIFECYCLE_INTERVAL_SECONDS", 3600),
|
||||
storage_root=storage_root,
|
||||
max_history_per_bucket=app.config.get("LIFECYCLE_MAX_HISTORY_PER_BUCKET", 50),
|
||||
)
|
||||
lifecycle_manager.start()
|
||||
|
||||
gc_collector = None
|
||||
if app.config.get("GC_ENABLED", False):
|
||||
gc_collector = GarbageCollector(
|
||||
storage_root=storage_root,
|
||||
interval_hours=app.config.get("GC_INTERVAL_HOURS", 6.0),
|
||||
temp_file_max_age_hours=app.config.get("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0),
|
||||
multipart_max_age_days=app.config.get("GC_MULTIPART_MAX_AGE_DAYS", 7),
|
||||
lock_file_max_age_hours=app.config.get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0),
|
||||
dry_run=app.config.get("GC_DRY_RUN", False),
|
||||
io_throttle_ms=app.config.get("GC_IO_THROTTLE_MS", 10),
|
||||
)
|
||||
gc_collector.start()
|
||||
|
||||
integrity_checker = None
|
||||
if app.config.get("INTEGRITY_ENABLED", False):
|
||||
integrity_checker = IntegrityChecker(
|
||||
storage_root=storage_root,
|
||||
interval_hours=app.config.get("INTEGRITY_INTERVAL_HOURS", 24.0),
|
||||
batch_size=app.config.get("INTEGRITY_BATCH_SIZE", 1000),
|
||||
auto_heal=app.config.get("INTEGRITY_AUTO_HEAL", False),
|
||||
dry_run=app.config.get("INTEGRITY_DRY_RUN", False),
|
||||
io_throttle_ms=app.config.get("INTEGRITY_IO_THROTTLE_MS", 10),
|
||||
)
|
||||
integrity_checker.start()
|
||||
|
||||
app.extensions["object_storage"] = storage
|
||||
app.extensions["iam"] = iam
|
||||
app.extensions["bucket_policies"] = bucket_policies
|
||||
app.extensions["secret_store"] = secret_store
|
||||
app.extensions["limiter"] = limiter
|
||||
app.extensions["connections"] = connections
|
||||
app.extensions["replication"] = replication
|
||||
app.extensions["encryption"] = encryption_manager
|
||||
app.extensions["kms"] = kms_manager
|
||||
app.extensions["acl"] = acl_service
|
||||
app.extensions["lifecycle"] = lifecycle_manager
|
||||
app.extensions["gc"] = gc_collector
|
||||
app.extensions["integrity"] = integrity_checker
|
||||
app.extensions["object_lock"] = object_lock_service
|
||||
app.extensions["notifications"] = notification_service
|
||||
app.extensions["access_logging"] = access_logging_service
|
||||
app.extensions["site_registry"] = site_registry
|
||||
|
||||
website_domains_store = None
|
||||
if app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
website_domains_path = config_dir / "website_domains.json"
|
||||
website_domains_store = WebsiteDomainStore(website_domains_path)
|
||||
app.extensions["website_domains"] = website_domains_store
|
||||
|
||||
from .s3_client import S3ProxyClient
|
||||
api_base = app.config.get("API_BASE_URL") or "http://127.0.0.1:5000"
|
||||
app.extensions["s3_proxy"] = S3ProxyClient(
|
||||
api_base_url=api_base,
|
||||
region=app.config.get("AWS_REGION", "us-east-1"),
|
||||
)
|
||||
|
||||
operation_metrics_collector = None
|
||||
if app.config.get("OPERATION_METRICS_ENABLED", False):
|
||||
operation_metrics_collector = OperationMetricsCollector(
|
||||
storage_root,
|
||||
interval_minutes=app.config.get("OPERATION_METRICS_INTERVAL_MINUTES", 5),
|
||||
retention_hours=app.config.get("OPERATION_METRICS_RETENTION_HOURS", 24),
|
||||
)
|
||||
app.extensions["operation_metrics"] = operation_metrics_collector
|
||||
|
||||
system_metrics_collector = None
|
||||
if app.config.get("METRICS_HISTORY_ENABLED", False):
|
||||
from .system_metrics import SystemMetricsCollector
|
||||
system_metrics_collector = SystemMetricsCollector(
|
||||
storage_root,
|
||||
interval_minutes=app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
|
||||
retention_hours=app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
|
||||
)
|
||||
system_metrics_collector.set_storage(storage)
|
||||
app.extensions["system_metrics"] = system_metrics_collector
|
||||
|
||||
site_sync_worker = None
|
||||
if app.config.get("SITE_SYNC_ENABLED", False):
|
||||
from .site_sync import SiteSyncWorker
|
||||
site_sync_worker = SiteSyncWorker(
|
||||
storage=storage,
|
||||
connections=connections,
|
||||
replication_manager=replication,
|
||||
storage_root=storage_root,
|
||||
interval_seconds=app.config.get("SITE_SYNC_INTERVAL_SECONDS", 60),
|
||||
batch_size=app.config.get("SITE_SYNC_BATCH_SIZE", 100),
|
||||
connect_timeout=app.config.get("SITE_SYNC_CONNECT_TIMEOUT_SECONDS", 10),
|
||||
read_timeout=app.config.get("SITE_SYNC_READ_TIMEOUT_SECONDS", 120),
|
||||
max_retries=app.config.get("SITE_SYNC_MAX_RETRIES", 2),
|
||||
clock_skew_tolerance_seconds=app.config.get("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS", 1.0),
|
||||
)
|
||||
site_sync_worker.start()
|
||||
app.extensions["site_sync"] = site_sync_worker
|
||||
|
||||
@app.errorhandler(500)
|
||||
def internal_error(error):
|
||||
wants_html = request.accept_mimetypes.accept_html
|
||||
path = request.path or ""
|
||||
if include_ui and wants_html and (path.startswith("/ui") or path == "/"):
|
||||
return render_template('500.html'), 500
|
||||
error_xml = (
|
||||
'<?xml version="1.0" encoding="UTF-8"?>'
|
||||
'<Error>'
|
||||
'<Code>InternalError</Code>'
|
||||
'<Message>An internal server error occurred</Message>'
|
||||
f'<Resource>{path}</Resource>'
|
||||
f'<RequestId>{getattr(g, "request_id", "-")}</RequestId>'
|
||||
'</Error>'
|
||||
)
|
||||
return error_xml, 500, {'Content-Type': 'application/xml'}
|
||||
|
||||
@app.errorhandler(CSRFError)
|
||||
def handle_csrf_error(e):
|
||||
wants_html = request.accept_mimetypes.accept_html
|
||||
path = request.path or ""
|
||||
if include_ui and wants_html and (path.startswith("/ui") or path == "/"):
|
||||
return render_template('csrf_error.html', reason=e.description), 400
|
||||
error_xml = (
|
||||
'<?xml version="1.0" encoding="UTF-8"?>'
|
||||
'<Error>'
|
||||
'<Code>CSRFError</Code>'
|
||||
f'<Message>{e.description}</Message>'
|
||||
f'<Resource>{path}</Resource>'
|
||||
f'<RequestId>{getattr(g, "request_id", "-")}</RequestId>'
|
||||
'</Error>'
|
||||
)
|
||||
return error_xml, 400, {'Content-Type': 'application/xml'}
|
||||
|
||||
@app.template_filter("filesizeformat")
|
||||
def filesizeformat(value: int) -> str:
|
||||
"""Format bytes as human-readable file size."""
|
||||
for unit in ["B", "KB", "MB", "GB", "TB", "PB"]:
|
||||
if abs(value) < 1024.0 or unit == "PB":
|
||||
if unit == "B":
|
||||
return f"{int(value)} {unit}"
|
||||
return f"{value:.1f} {unit}"
|
||||
value /= 1024.0
|
||||
return f"{value:.1f} PB"
|
||||
|
||||
@app.template_filter("timestamp_to_datetime")
|
||||
def timestamp_to_datetime(value: float) -> str:
|
||||
"""Format Unix timestamp as human-readable datetime in configured timezone."""
|
||||
from datetime import datetime, timezone as dt_timezone
|
||||
from zoneinfo import ZoneInfo
|
||||
if not value:
|
||||
return "Never"
|
||||
try:
|
||||
dt_utc = datetime.fromtimestamp(value, dt_timezone.utc)
|
||||
display_tz = app.config.get("DISPLAY_TIMEZONE", "UTC")
|
||||
if display_tz and display_tz != "UTC":
|
||||
try:
|
||||
tz = ZoneInfo(display_tz)
|
||||
dt_local = dt_utc.astimezone(tz)
|
||||
return dt_local.strftime("%Y-%m-%d %H:%M:%S")
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
return dt_utc.strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
except (ValueError, OSError):
|
||||
return "Unknown"
|
||||
|
||||
@app.template_filter("format_datetime")
|
||||
def format_datetime_filter(dt, include_tz: bool = True) -> str:
|
||||
"""Format datetime object as human-readable string in configured timezone."""
|
||||
from datetime import datetime, timezone as dt_timezone
|
||||
from zoneinfo import ZoneInfo
|
||||
if not dt:
|
||||
return ""
|
||||
try:
|
||||
display_tz = app.config.get("DISPLAY_TIMEZONE", "UTC")
|
||||
if display_tz and display_tz != "UTC":
|
||||
try:
|
||||
tz = ZoneInfo(display_tz)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=dt_timezone.utc)
|
||||
dt = dt.astimezone(tz)
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
tz_abbr = dt.strftime("%Z") or "UTC"
|
||||
if include_tz:
|
||||
return f"{dt.strftime('%b %d, %Y %H:%M')} ({tz_abbr})"
|
||||
return dt.strftime("%b %d, %Y %H:%M")
|
||||
except (ValueError, AttributeError):
|
||||
return str(dt)
|
||||
|
||||
if include_api:
|
||||
from .s3_api import s3_api_bp
|
||||
from .kms_api import kms_api_bp
|
||||
from .admin_api import admin_api_bp
|
||||
|
||||
app.register_blueprint(s3_api_bp)
|
||||
app.register_blueprint(kms_api_bp)
|
||||
app.register_blueprint(admin_api_bp)
|
||||
csrf.exempt(s3_api_bp)
|
||||
csrf.exempt(kms_api_bp)
|
||||
csrf.exempt(admin_api_bp)
|
||||
|
||||
if include_ui:
|
||||
from .ui import ui_bp
|
||||
|
||||
app.register_blueprint(ui_bp)
|
||||
if not include_api:
|
||||
@app.get("/")
|
||||
def ui_root_redirect():
|
||||
return redirect(url_for("ui.buckets_overview"))
|
||||
|
||||
@app.errorhandler(404)
|
||||
def handle_not_found(error):
|
||||
wants_html = request.accept_mimetypes.accept_html
|
||||
path = request.path or ""
|
||||
if include_ui and wants_html:
|
||||
if not include_api or path.startswith("/ui") or path == "/":
|
||||
return render_template("404.html"), 404
|
||||
return error
|
||||
|
||||
@app.get("/myfsio/health")
|
||||
def healthcheck() -> Dict[str, str]:
|
||||
return {"status": "ok"}
|
||||
|
||||
return app
|
||||
|
||||
|
||||
def create_api_app(test_config: Optional[Dict[str, Any]] = None) -> Flask:
|
||||
return create_app(test_config, include_api=True, include_ui=False)
|
||||
|
||||
|
||||
def create_ui_app(test_config: Optional[Dict[str, Any]] = None) -> Flask:
|
||||
return create_app(test_config, include_api=False, include_ui=True)
|
||||
|
||||
|
||||
def _configure_cors(app: Flask) -> None:
|
||||
origins = app.config.get("CORS_ORIGINS", ["*"])
|
||||
methods = app.config.get("CORS_METHODS", ["GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"])
|
||||
allow_headers = app.config.get("CORS_ALLOW_HEADERS", ["*"])
|
||||
expose_headers = app.config.get("CORS_EXPOSE_HEADERS", ["*"])
|
||||
CORS(
|
||||
app,
|
||||
resources={r"/*": {"origins": origins, "methods": methods, "allow_headers": allow_headers, "expose_headers": expose_headers}},
|
||||
supports_credentials=True,
|
||||
)
|
||||
|
||||
|
||||
class _RequestContextFilter(logging.Filter):
|
||||
"""Inject request-specific attributes into log records."""
|
||||
|
||||
def filter(self, record: logging.LogRecord) -> bool:
|
||||
if has_request_context():
|
||||
record.request_id = getattr(g, "request_id", "-")
|
||||
record.path = request.path
|
||||
record.method = request.method
|
||||
record.remote_addr = request.remote_addr or "-"
|
||||
else:
|
||||
record.request_id = getattr(record, "request_id", "-")
|
||||
record.path = getattr(record, "path", "-")
|
||||
record.method = getattr(record, "method", "-")
|
||||
record.remote_addr = getattr(record, "remote_addr", "-")
|
||||
return True
|
||||
|
||||
|
||||
def _configure_logging(app: Flask) -> None:
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s | %(levelname)s | %(request_id)s | %(method)s %(path)s | %(message)s"
|
||||
)
|
||||
|
||||
stream_handler = logging.StreamHandler(sys.stdout)
|
||||
stream_handler.setFormatter(formatter)
|
||||
stream_handler.addFilter(_RequestContextFilter())
|
||||
|
||||
logger = app.logger
|
||||
for handler in logger.handlers[:]:
|
||||
handler.close()
|
||||
logger.handlers.clear()
|
||||
logger.addHandler(stream_handler)
|
||||
|
||||
if app.config.get("LOG_TO_FILE"):
|
||||
log_file = Path(app.config["LOG_FILE"])
|
||||
log_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
file_handler = RotatingFileHandler(
|
||||
log_file,
|
||||
maxBytes=int(app.config.get("LOG_MAX_BYTES", 5 * 1024 * 1024)),
|
||||
backupCount=int(app.config.get("LOG_BACKUP_COUNT", 3)),
|
||||
encoding="utf-8",
|
||||
)
|
||||
file_handler.setFormatter(formatter)
|
||||
file_handler.addFilter(_RequestContextFilter())
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
logger.setLevel(getattr(logging, app.config.get("LOG_LEVEL", "INFO"), logging.INFO))
|
||||
|
||||
@app.before_request
|
||||
def _log_request_start() -> None:
|
||||
g.request_id = f"{os.getpid():x}{next(_request_counter):012x}"
|
||||
g.request_started_at = time.perf_counter()
|
||||
g.request_bytes_in = request.content_length or 0
|
||||
|
||||
@app.before_request
|
||||
def _maybe_serve_website():
|
||||
if not app.config.get("WEBSITE_HOSTING_ENABLED"):
|
||||
return None
|
||||
if request.method not in {"GET", "HEAD"}:
|
||||
return None
|
||||
host = request.host
|
||||
if ":" in host:
|
||||
host = host.rsplit(":", 1)[0]
|
||||
host = host.lower()
|
||||
store = app.extensions.get("website_domains")
|
||||
if not store:
|
||||
return None
|
||||
bucket = store.get_bucket(host)
|
||||
if not bucket:
|
||||
return None
|
||||
storage = app.extensions["object_storage"]
|
||||
if not storage.bucket_exists(bucket):
|
||||
return _website_error_response(404, "Not Found")
|
||||
website_config = storage.get_bucket_website(bucket)
|
||||
if not website_config:
|
||||
return _website_error_response(404, "Not Found")
|
||||
index_doc = website_config.get("index_document", "index.html")
|
||||
error_doc = website_config.get("error_document")
|
||||
req_path = request.path.lstrip("/")
|
||||
if not req_path or req_path.endswith("/"):
|
||||
object_key = req_path + index_doc
|
||||
else:
|
||||
object_key = req_path
|
||||
try:
|
||||
obj_path = storage.get_object_path(bucket, object_key)
|
||||
except (StorageError, OSError):
|
||||
if object_key == req_path:
|
||||
try:
|
||||
obj_path = storage.get_object_path(bucket, req_path + "/" + index_doc)
|
||||
object_key = req_path + "/" + index_doc
|
||||
except (StorageError, OSError):
|
||||
return _serve_website_error(storage, bucket, error_doc, 404)
|
||||
else:
|
||||
return _serve_website_error(storage, bucket, error_doc, 404)
|
||||
content_type = mimetypes.guess_type(object_key)[0] or "application/octet-stream"
|
||||
is_encrypted = False
|
||||
try:
|
||||
metadata = storage.get_object_metadata(bucket, object_key)
|
||||
is_encrypted = "x-amz-server-side-encryption" in metadata
|
||||
except (StorageError, OSError):
|
||||
pass
|
||||
if is_encrypted and hasattr(storage, "get_object_data"):
|
||||
try:
|
||||
data, _ = storage.get_object_data(bucket, object_key)
|
||||
file_size = len(data)
|
||||
except (StorageError, OSError):
|
||||
return _website_error_response(500, "Internal Server Error")
|
||||
else:
|
||||
data = None
|
||||
try:
|
||||
stat = obj_path.stat()
|
||||
file_size = stat.st_size
|
||||
except OSError:
|
||||
return _website_error_response(500, "Internal Server Error")
|
||||
if request.method == "HEAD":
|
||||
response = Response(status=200)
|
||||
response.headers["Content-Length"] = file_size
|
||||
response.headers["Content-Type"] = content_type
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
from .s3_api import _parse_range_header
|
||||
range_header = request.headers.get("Range")
|
||||
if range_header:
|
||||
ranges = _parse_range_header(range_header, file_size)
|
||||
if ranges is None:
|
||||
return Response(status=416, headers={"Content-Range": f"bytes */{file_size}"})
|
||||
start, end = ranges[0]
|
||||
length = end - start + 1
|
||||
if data is not None:
|
||||
partial_data = data[start:end + 1]
|
||||
response = Response(partial_data, status=206, mimetype=content_type)
|
||||
else:
|
||||
def _stream_range(file_path, start_pos, length_to_read):
|
||||
with file_path.open("rb") as f:
|
||||
f.seek(start_pos)
|
||||
remaining = length_to_read
|
||||
while remaining > 0:
|
||||
chunk = f.read(min(262144, remaining))
|
||||
if not chunk:
|
||||
break
|
||||
remaining -= len(chunk)
|
||||
yield chunk
|
||||
response = Response(_stream_range(obj_path, start, length), status=206, mimetype=content_type, direct_passthrough=True)
|
||||
response.headers["Content-Range"] = f"bytes {start}-{end}/{file_size}"
|
||||
response.headers["Content-Length"] = length
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
if data is not None:
|
||||
response = Response(data, mimetype=content_type)
|
||||
response.headers["Content-Length"] = file_size
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
def _stream(file_path):
|
||||
with file_path.open("rb") as f:
|
||||
while True:
|
||||
chunk = f.read(65536)
|
||||
if not chunk:
|
||||
break
|
||||
yield chunk
|
||||
response = Response(_stream(obj_path), mimetype=content_type, direct_passthrough=True)
|
||||
response.headers["Content-Length"] = file_size
|
||||
response.headers["Accept-Ranges"] = "bytes"
|
||||
return response
|
||||
|
||||
def _serve_website_error(storage, bucket, error_doc_key, status_code):
|
||||
if not error_doc_key:
|
||||
return _website_error_response(status_code, "Not Found" if status_code == 404 else "Error")
|
||||
try:
|
||||
obj_path = storage.get_object_path(bucket, error_doc_key)
|
||||
except (StorageError, OSError):
|
||||
return _website_error_response(status_code, "Not Found")
|
||||
content_type = mimetypes.guess_type(error_doc_key)[0] or "text/html"
|
||||
is_encrypted = False
|
||||
try:
|
||||
metadata = storage.get_object_metadata(bucket, error_doc_key)
|
||||
is_encrypted = "x-amz-server-side-encryption" in metadata
|
||||
except (StorageError, OSError):
|
||||
pass
|
||||
if is_encrypted and hasattr(storage, "get_object_data"):
|
||||
try:
|
||||
data, _ = storage.get_object_data(bucket, error_doc_key)
|
||||
response = Response(data, status=status_code, mimetype=content_type)
|
||||
response.headers["Content-Length"] = len(data)
|
||||
return response
|
||||
except (StorageError, OSError):
|
||||
return _website_error_response(status_code, "Not Found")
|
||||
try:
|
||||
data = obj_path.read_bytes()
|
||||
response = Response(data, status=status_code, mimetype=content_type)
|
||||
response.headers["Content-Length"] = len(data)
|
||||
return response
|
||||
except OSError:
|
||||
return _website_error_response(status_code, "Not Found")
|
||||
|
||||
def _website_error_response(status_code, message):
|
||||
if status_code == 404:
|
||||
body = "<h1>404 page not found</h1>"
|
||||
else:
|
||||
body = f"{status_code} {message}"
|
||||
return Response(body, status=status_code, mimetype="text/html")
|
||||
|
||||
@app.after_request
|
||||
def _log_request_end(response):
|
||||
duration_ms = 0.0
|
||||
if hasattr(g, "request_started_at"):
|
||||
duration_ms = (time.perf_counter() - g.request_started_at) * 1000
|
||||
request_id = getattr(g, "request_id", f"{os.getpid():x}{next(_request_counter):012x}")
|
||||
response.headers.setdefault("X-Request-ID", request_id)
|
||||
if app.logger.isEnabledFor(logging.INFO):
|
||||
app.logger.info(
|
||||
"Request completed",
|
||||
extra={
|
||||
"path": request.path,
|
||||
"method": request.method,
|
||||
"remote_addr": request.remote_addr,
|
||||
},
|
||||
)
|
||||
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
|
||||
response.headers["Server"] = "MyFSIO"
|
||||
|
||||
operation_metrics = app.extensions.get("operation_metrics")
|
||||
if operation_metrics:
|
||||
bytes_in = getattr(g, "request_bytes_in", 0)
|
||||
bytes_out = response.content_length or 0
|
||||
error_code = getattr(g, "s3_error_code", None)
|
||||
endpoint_type = classify_endpoint(request.path)
|
||||
operation_metrics.record_request(
|
||||
method=request.method,
|
||||
endpoint_type=endpoint_type,
|
||||
status_code=response.status_code,
|
||||
latency_ms=duration_ms,
|
||||
bytes_in=bytes_in,
|
||||
bytes_out=bytes_out,
|
||||
error_code=error_code,
|
||||
)
|
||||
|
||||
return response
|
||||
265
python/app/access_logging.py
Normal file
265
python/app/access_logging.py
Normal file
@@ -0,0 +1,265 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import queue
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AccessLogEntry:
|
||||
bucket_owner: str = "-"
|
||||
bucket: str = "-"
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
remote_ip: str = "-"
|
||||
requester: str = "-"
|
||||
request_id: str = field(default_factory=lambda: uuid.uuid4().hex[:16].upper())
|
||||
operation: str = "-"
|
||||
key: str = "-"
|
||||
request_uri: str = "-"
|
||||
http_status: int = 200
|
||||
error_code: str = "-"
|
||||
bytes_sent: int = 0
|
||||
object_size: int = 0
|
||||
total_time_ms: int = 0
|
||||
turn_around_time_ms: int = 0
|
||||
referrer: str = "-"
|
||||
user_agent: str = "-"
|
||||
version_id: str = "-"
|
||||
host_id: str = "-"
|
||||
signature_version: str = "SigV4"
|
||||
cipher_suite: str = "-"
|
||||
authentication_type: str = "AuthHeader"
|
||||
host_header: str = "-"
|
||||
tls_version: str = "-"
|
||||
|
||||
def to_log_line(self) -> str:
|
||||
time_str = self.timestamp.strftime("[%d/%b/%Y:%H:%M:%S %z]")
|
||||
return (
|
||||
f'{self.bucket_owner} {self.bucket} {time_str} {self.remote_ip} '
|
||||
f'{self.requester} {self.request_id} {self.operation} {self.key} '
|
||||
f'"{self.request_uri}" {self.http_status} {self.error_code or "-"} '
|
||||
f'{self.bytes_sent or "-"} {self.object_size or "-"} {self.total_time_ms or "-"} '
|
||||
f'{self.turn_around_time_ms or "-"} "{self.referrer}" "{self.user_agent}" {self.version_id}'
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"bucket_owner": self.bucket_owner,
|
||||
"bucket": self.bucket,
|
||||
"timestamp": self.timestamp.isoformat(),
|
||||
"remote_ip": self.remote_ip,
|
||||
"requester": self.requester,
|
||||
"request_id": self.request_id,
|
||||
"operation": self.operation,
|
||||
"key": self.key,
|
||||
"request_uri": self.request_uri,
|
||||
"http_status": self.http_status,
|
||||
"error_code": self.error_code,
|
||||
"bytes_sent": self.bytes_sent,
|
||||
"object_size": self.object_size,
|
||||
"total_time_ms": self.total_time_ms,
|
||||
"referrer": self.referrer,
|
||||
"user_agent": self.user_agent,
|
||||
"version_id": self.version_id,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoggingConfiguration:
|
||||
target_bucket: str
|
||||
target_prefix: str = ""
|
||||
enabled: bool = True
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"LoggingEnabled": {
|
||||
"TargetBucket": self.target_bucket,
|
||||
"TargetPrefix": self.target_prefix,
|
||||
}
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> Optional["LoggingConfiguration"]:
|
||||
logging_enabled = data.get("LoggingEnabled")
|
||||
if not logging_enabled:
|
||||
return None
|
||||
return cls(
|
||||
target_bucket=logging_enabled.get("TargetBucket", ""),
|
||||
target_prefix=logging_enabled.get("TargetPrefix", ""),
|
||||
enabled=True,
|
||||
)
|
||||
|
||||
|
||||
class AccessLoggingService:
|
||||
def __init__(self, storage_root: Path, flush_interval: int = 60, max_buffer_size: int = 1000):
|
||||
self.storage_root = storage_root
|
||||
self.flush_interval = flush_interval
|
||||
self.max_buffer_size = max_buffer_size
|
||||
self._configs: Dict[str, LoggingConfiguration] = {}
|
||||
self._buffer: Dict[str, List[AccessLogEntry]] = {}
|
||||
self._buffer_lock = threading.Lock()
|
||||
self._shutdown = threading.Event()
|
||||
self._storage = None
|
||||
|
||||
self._flush_thread = threading.Thread(target=self._flush_loop, name="access-log-flush", daemon=True)
|
||||
self._flush_thread.start()
|
||||
|
||||
def set_storage(self, storage: Any) -> None:
|
||||
self._storage = storage
|
||||
|
||||
def _config_path(self, bucket_name: str) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "logging.json"
|
||||
|
||||
def get_bucket_logging(self, bucket_name: str) -> Optional[LoggingConfiguration]:
|
||||
if bucket_name in self._configs:
|
||||
return self._configs[bucket_name]
|
||||
|
||||
config_path = self._config_path(bucket_name)
|
||||
if not config_path.exists():
|
||||
return None
|
||||
|
||||
try:
|
||||
data = json.loads(config_path.read_text(encoding="utf-8"))
|
||||
config = LoggingConfiguration.from_dict(data)
|
||||
if config:
|
||||
self._configs[bucket_name] = config
|
||||
return config
|
||||
except (json.JSONDecodeError, OSError) as e:
|
||||
logger.warning(f"Failed to load logging config for {bucket_name}: {e}")
|
||||
return None
|
||||
|
||||
def set_bucket_logging(self, bucket_name: str, config: LoggingConfiguration) -> None:
|
||||
config_path = self._config_path(bucket_name)
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(json.dumps(config.to_dict(), indent=2), encoding="utf-8")
|
||||
self._configs[bucket_name] = config
|
||||
|
||||
def delete_bucket_logging(self, bucket_name: str) -> None:
|
||||
config_path = self._config_path(bucket_name)
|
||||
try:
|
||||
if config_path.exists():
|
||||
config_path.unlink()
|
||||
except OSError:
|
||||
pass
|
||||
self._configs.pop(bucket_name, None)
|
||||
|
||||
def log_request(
|
||||
self,
|
||||
bucket_name: str,
|
||||
*,
|
||||
operation: str,
|
||||
key: str = "-",
|
||||
remote_ip: str = "-",
|
||||
requester: str = "-",
|
||||
request_uri: str = "-",
|
||||
http_status: int = 200,
|
||||
error_code: str = "",
|
||||
bytes_sent: int = 0,
|
||||
object_size: int = 0,
|
||||
total_time_ms: int = 0,
|
||||
referrer: str = "-",
|
||||
user_agent: str = "-",
|
||||
version_id: str = "-",
|
||||
request_id: str = "",
|
||||
) -> None:
|
||||
config = self.get_bucket_logging(bucket_name)
|
||||
if not config or not config.enabled:
|
||||
return
|
||||
|
||||
entry = AccessLogEntry(
|
||||
bucket_owner="local-owner",
|
||||
bucket=bucket_name,
|
||||
remote_ip=remote_ip,
|
||||
requester=requester,
|
||||
request_id=request_id or uuid.uuid4().hex[:16].upper(),
|
||||
operation=operation,
|
||||
key=key,
|
||||
request_uri=request_uri,
|
||||
http_status=http_status,
|
||||
error_code=error_code,
|
||||
bytes_sent=bytes_sent,
|
||||
object_size=object_size,
|
||||
total_time_ms=total_time_ms,
|
||||
referrer=referrer,
|
||||
user_agent=user_agent,
|
||||
version_id=version_id,
|
||||
)
|
||||
|
||||
target_key = f"{config.target_bucket}:{config.target_prefix}"
|
||||
should_flush = False
|
||||
with self._buffer_lock:
|
||||
if target_key not in self._buffer:
|
||||
self._buffer[target_key] = []
|
||||
self._buffer[target_key].append(entry)
|
||||
should_flush = len(self._buffer[target_key]) >= self.max_buffer_size
|
||||
|
||||
if should_flush:
|
||||
self._flush_buffer(target_key)
|
||||
|
||||
def _flush_loop(self) -> None:
|
||||
while not self._shutdown.is_set():
|
||||
self._shutdown.wait(timeout=self.flush_interval)
|
||||
if not self._shutdown.is_set():
|
||||
self._flush_all()
|
||||
|
||||
def _flush_all(self) -> None:
|
||||
with self._buffer_lock:
|
||||
targets = list(self._buffer.keys())
|
||||
|
||||
for target_key in targets:
|
||||
self._flush_buffer(target_key)
|
||||
|
||||
def _flush_buffer(self, target_key: str) -> None:
|
||||
with self._buffer_lock:
|
||||
entries = self._buffer.pop(target_key, [])
|
||||
|
||||
if not entries or not self._storage:
|
||||
return
|
||||
|
||||
try:
|
||||
bucket_name, prefix = target_key.split(":", 1)
|
||||
except ValueError:
|
||||
logger.error(f"Invalid target key: {target_key}")
|
||||
return
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
log_key = f"{prefix}{now.strftime('%Y-%m-%d-%H-%M-%S')}-{uuid.uuid4().hex[:8]}"
|
||||
|
||||
log_content = "\n".join(entry.to_log_line() for entry in entries) + "\n"
|
||||
|
||||
try:
|
||||
stream = io.BytesIO(log_content.encode("utf-8"))
|
||||
self._storage.put_object(bucket_name, log_key, stream, enforce_quota=False)
|
||||
logger.info(f"Flushed {len(entries)} access log entries to {bucket_name}/{log_key}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to write access log to {bucket_name}/{log_key}: {e}")
|
||||
with self._buffer_lock:
|
||||
if target_key not in self._buffer:
|
||||
self._buffer[target_key] = []
|
||||
self._buffer[target_key] = entries + self._buffer[target_key]
|
||||
|
||||
def flush(self) -> None:
|
||||
self._flush_all()
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._shutdown.set()
|
||||
self._flush_all()
|
||||
self._flush_thread.join(timeout=5.0)
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
with self._buffer_lock:
|
||||
buffered = sum(len(entries) for entries in self._buffer.values())
|
||||
return {
|
||||
"buffered_entries": buffered,
|
||||
"target_buckets": len(self._buffer),
|
||||
}
|
||||
204
python/app/acl.py
Normal file
204
python/app/acl.py
Normal file
@@ -0,0 +1,204 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
|
||||
|
||||
ACL_PERMISSION_FULL_CONTROL = "FULL_CONTROL"
|
||||
ACL_PERMISSION_WRITE = "WRITE"
|
||||
ACL_PERMISSION_WRITE_ACP = "WRITE_ACP"
|
||||
ACL_PERMISSION_READ = "READ"
|
||||
ACL_PERMISSION_READ_ACP = "READ_ACP"
|
||||
|
||||
ALL_PERMISSIONS = {
|
||||
ACL_PERMISSION_FULL_CONTROL,
|
||||
ACL_PERMISSION_WRITE,
|
||||
ACL_PERMISSION_WRITE_ACP,
|
||||
ACL_PERMISSION_READ,
|
||||
ACL_PERMISSION_READ_ACP,
|
||||
}
|
||||
|
||||
PERMISSION_TO_ACTIONS = {
|
||||
ACL_PERMISSION_FULL_CONTROL: {"read", "write", "delete", "list", "share"},
|
||||
ACL_PERMISSION_WRITE: {"write", "delete"},
|
||||
ACL_PERMISSION_WRITE_ACP: {"share"},
|
||||
ACL_PERMISSION_READ: {"read", "list"},
|
||||
ACL_PERMISSION_READ_ACP: {"share"},
|
||||
}
|
||||
|
||||
GRANTEE_ALL_USERS = "*"
|
||||
GRANTEE_AUTHENTICATED_USERS = "authenticated"
|
||||
|
||||
|
||||
@dataclass
|
||||
class AclGrant:
|
||||
grantee: str
|
||||
permission: str
|
||||
|
||||
def to_dict(self) -> Dict[str, str]:
|
||||
return {"grantee": self.grantee, "permission": self.permission}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, str]) -> "AclGrant":
|
||||
return cls(grantee=data["grantee"], permission=data["permission"])
|
||||
|
||||
|
||||
@dataclass
|
||||
class Acl:
|
||||
owner: str
|
||||
grants: List[AclGrant] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"owner": self.owner,
|
||||
"grants": [g.to_dict() for g in self.grants],
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "Acl":
|
||||
return cls(
|
||||
owner=data.get("owner", ""),
|
||||
grants=[AclGrant.from_dict(g) for g in data.get("grants", [])],
|
||||
)
|
||||
|
||||
def get_allowed_actions(self, principal_id: Optional[str], is_authenticated: bool = True) -> Set[str]:
|
||||
actions: Set[str] = set()
|
||||
if principal_id and principal_id == self.owner:
|
||||
actions.update(PERMISSION_TO_ACTIONS[ACL_PERMISSION_FULL_CONTROL])
|
||||
for grant in self.grants:
|
||||
if grant.grantee == GRANTEE_ALL_USERS:
|
||||
actions.update(PERMISSION_TO_ACTIONS.get(grant.permission, set()))
|
||||
elif grant.grantee == GRANTEE_AUTHENTICATED_USERS and is_authenticated:
|
||||
actions.update(PERMISSION_TO_ACTIONS.get(grant.permission, set()))
|
||||
elif principal_id and grant.grantee == principal_id:
|
||||
actions.update(PERMISSION_TO_ACTIONS.get(grant.permission, set()))
|
||||
return actions
|
||||
|
||||
|
||||
CANNED_ACLS = {
|
||||
"private": lambda owner: Acl(
|
||||
owner=owner,
|
||||
grants=[AclGrant(grantee=owner, permission=ACL_PERMISSION_FULL_CONTROL)],
|
||||
),
|
||||
"public-read": lambda owner: Acl(
|
||||
owner=owner,
|
||||
grants=[
|
||||
AclGrant(grantee=owner, permission=ACL_PERMISSION_FULL_CONTROL),
|
||||
AclGrant(grantee=GRANTEE_ALL_USERS, permission=ACL_PERMISSION_READ),
|
||||
],
|
||||
),
|
||||
"public-read-write": lambda owner: Acl(
|
||||
owner=owner,
|
||||
grants=[
|
||||
AclGrant(grantee=owner, permission=ACL_PERMISSION_FULL_CONTROL),
|
||||
AclGrant(grantee=GRANTEE_ALL_USERS, permission=ACL_PERMISSION_READ),
|
||||
AclGrant(grantee=GRANTEE_ALL_USERS, permission=ACL_PERMISSION_WRITE),
|
||||
],
|
||||
),
|
||||
"authenticated-read": lambda owner: Acl(
|
||||
owner=owner,
|
||||
grants=[
|
||||
AclGrant(grantee=owner, permission=ACL_PERMISSION_FULL_CONTROL),
|
||||
AclGrant(grantee=GRANTEE_AUTHENTICATED_USERS, permission=ACL_PERMISSION_READ),
|
||||
],
|
||||
),
|
||||
"bucket-owner-read": lambda owner: Acl(
|
||||
owner=owner,
|
||||
grants=[
|
||||
AclGrant(grantee=owner, permission=ACL_PERMISSION_FULL_CONTROL),
|
||||
],
|
||||
),
|
||||
"bucket-owner-full-control": lambda owner: Acl(
|
||||
owner=owner,
|
||||
grants=[
|
||||
AclGrant(grantee=owner, permission=ACL_PERMISSION_FULL_CONTROL),
|
||||
],
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def create_canned_acl(canned_acl: str, owner: str) -> Acl:
|
||||
factory = CANNED_ACLS.get(canned_acl)
|
||||
if not factory:
|
||||
return CANNED_ACLS["private"](owner)
|
||||
return factory(owner)
|
||||
|
||||
|
||||
class AclService:
|
||||
def __init__(self, storage_root: Path):
|
||||
self.storage_root = storage_root
|
||||
self._bucket_acl_cache: Dict[str, Acl] = {}
|
||||
|
||||
def _bucket_acl_path(self, bucket_name: str) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / ".acl.json"
|
||||
|
||||
def get_bucket_acl(self, bucket_name: str) -> Optional[Acl]:
|
||||
if bucket_name in self._bucket_acl_cache:
|
||||
return self._bucket_acl_cache[bucket_name]
|
||||
acl_path = self._bucket_acl_path(bucket_name)
|
||||
if not acl_path.exists():
|
||||
return None
|
||||
try:
|
||||
data = json.loads(acl_path.read_text(encoding="utf-8"))
|
||||
acl = Acl.from_dict(data)
|
||||
self._bucket_acl_cache[bucket_name] = acl
|
||||
return acl
|
||||
except (OSError, json.JSONDecodeError):
|
||||
return None
|
||||
|
||||
def set_bucket_acl(self, bucket_name: str, acl: Acl) -> None:
|
||||
acl_path = self._bucket_acl_path(bucket_name)
|
||||
acl_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
acl_path.write_text(json.dumps(acl.to_dict(), indent=2), encoding="utf-8")
|
||||
self._bucket_acl_cache[bucket_name] = acl
|
||||
|
||||
def set_bucket_canned_acl(self, bucket_name: str, canned_acl: str, owner: str) -> Acl:
|
||||
acl = create_canned_acl(canned_acl, owner)
|
||||
self.set_bucket_acl(bucket_name, acl)
|
||||
return acl
|
||||
|
||||
def delete_bucket_acl(self, bucket_name: str) -> None:
|
||||
acl_path = self._bucket_acl_path(bucket_name)
|
||||
if acl_path.exists():
|
||||
acl_path.unlink()
|
||||
self._bucket_acl_cache.pop(bucket_name, None)
|
||||
|
||||
def evaluate_bucket_acl(
|
||||
self,
|
||||
bucket_name: str,
|
||||
principal_id: Optional[str],
|
||||
action: str,
|
||||
is_authenticated: bool = True,
|
||||
) -> bool:
|
||||
acl = self.get_bucket_acl(bucket_name)
|
||||
if not acl:
|
||||
return False
|
||||
allowed_actions = acl.get_allowed_actions(principal_id, is_authenticated)
|
||||
return action in allowed_actions
|
||||
|
||||
def get_object_acl(self, bucket_name: str, object_key: str, object_metadata: Dict[str, Any]) -> Optional[Acl]:
|
||||
acl_data = object_metadata.get("__acl__")
|
||||
if not acl_data:
|
||||
return None
|
||||
try:
|
||||
return Acl.from_dict(acl_data)
|
||||
except (TypeError, KeyError):
|
||||
return None
|
||||
|
||||
def create_object_acl_metadata(self, acl: Acl) -> Dict[str, Any]:
|
||||
return {"__acl__": acl.to_dict()}
|
||||
|
||||
def evaluate_object_acl(
|
||||
self,
|
||||
object_metadata: Dict[str, Any],
|
||||
principal_id: Optional[str],
|
||||
action: str,
|
||||
is_authenticated: bool = True,
|
||||
) -> bool:
|
||||
acl = self.get_object_acl("", "", object_metadata)
|
||||
if not acl:
|
||||
return False
|
||||
allowed_actions = acl.get_allowed_actions(principal_id, is_authenticated)
|
||||
return action in allowed_actions
|
||||
984
python/app/admin_api.py
Normal file
984
python/app/admin_api.py
Normal file
@@ -0,0 +1,984 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import socket
|
||||
import time
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
from flask import Blueprint, Response, current_app, jsonify, request
|
||||
|
||||
from .connections import ConnectionStore
|
||||
from .extensions import limiter
|
||||
from .gc import GarbageCollector
|
||||
from .integrity import IntegrityChecker
|
||||
from .iam import IamError, Principal
|
||||
from .replication import ReplicationManager
|
||||
from .site_registry import PeerSite, SiteInfo, SiteRegistry
|
||||
from .website_domains import WebsiteDomainStore, normalize_domain, is_valid_domain
|
||||
|
||||
|
||||
def _is_safe_url(url: str, allow_internal: bool = False) -> bool:
|
||||
"""Check if a URL is safe to make requests to (not internal/private).
|
||||
|
||||
Args:
|
||||
url: The URL to check.
|
||||
allow_internal: If True, allows internal/private IP addresses.
|
||||
Use for self-hosted deployments on internal networks.
|
||||
"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
hostname = parsed.hostname
|
||||
if not hostname:
|
||||
return False
|
||||
cloud_metadata_hosts = {
|
||||
"metadata.google.internal",
|
||||
"169.254.169.254",
|
||||
}
|
||||
if hostname.lower() in cloud_metadata_hosts:
|
||||
return False
|
||||
if allow_internal:
|
||||
return True
|
||||
blocked_hosts = {
|
||||
"localhost",
|
||||
"127.0.0.1",
|
||||
"0.0.0.0",
|
||||
"::1",
|
||||
"[::1]",
|
||||
}
|
||||
if hostname.lower() in blocked_hosts:
|
||||
return False
|
||||
try:
|
||||
resolved_ip = socket.gethostbyname(hostname)
|
||||
ip = ipaddress.ip_address(resolved_ip)
|
||||
if ip.is_private or ip.is_loopback or ip.is_link_local or ip.is_reserved:
|
||||
return False
|
||||
except (socket.gaierror, ValueError):
|
||||
return False
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _validate_endpoint(endpoint: str) -> Optional[str]:
|
||||
"""Validate endpoint URL format. Returns error message or None."""
|
||||
try:
|
||||
parsed = urlparse(endpoint)
|
||||
if not parsed.scheme or parsed.scheme not in ("http", "https"):
|
||||
return "Endpoint must be http or https URL"
|
||||
if not parsed.netloc:
|
||||
return "Endpoint must have a host"
|
||||
return None
|
||||
except Exception:
|
||||
return "Invalid endpoint URL"
|
||||
|
||||
|
||||
def _validate_priority(priority: Any) -> Optional[str]:
|
||||
"""Validate priority value. Returns error message or None."""
|
||||
try:
|
||||
p = int(priority)
|
||||
if p < 0 or p > 1000:
|
||||
return "Priority must be between 0 and 1000"
|
||||
return None
|
||||
except (TypeError, ValueError):
|
||||
return "Priority must be an integer"
|
||||
|
||||
|
||||
def _validate_region(region: str) -> Optional[str]:
|
||||
"""Validate region format. Returns error message or None."""
|
||||
if not re.match(r"^[a-z]{2,}-[a-z]+-\d+$", region):
|
||||
return "Region must match format like us-east-1"
|
||||
return None
|
||||
|
||||
|
||||
def _validate_site_id(site_id: str) -> Optional[str]:
|
||||
"""Validate site_id format. Returns error message or None."""
|
||||
if not site_id or len(site_id) > 63:
|
||||
return "site_id must be 1-63 characters"
|
||||
if not re.match(r'^[a-zA-Z0-9][a-zA-Z0-9_-]*$', site_id):
|
||||
return "site_id must start with alphanumeric and contain only alphanumeric, hyphens, underscores"
|
||||
return None
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
admin_api_bp = Blueprint("admin_api", __name__, url_prefix="/admin")
|
||||
|
||||
|
||||
def _require_principal() -> Tuple[Optional[Principal], Optional[Tuple[Dict[str, Any], int]]]:
|
||||
from .s3_api import _require_principal as s3_require_principal
|
||||
return s3_require_principal()
|
||||
|
||||
|
||||
def _require_admin() -> Tuple[Optional[Principal], Optional[Tuple[Dict[str, Any], int]]]:
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return None, error
|
||||
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:*")
|
||||
return principal, None
|
||||
except IamError:
|
||||
return None, _json_error("AccessDenied", "Admin access required", 403)
|
||||
|
||||
|
||||
def _site_registry() -> SiteRegistry:
|
||||
return current_app.extensions["site_registry"]
|
||||
|
||||
|
||||
def _connections() -> ConnectionStore:
|
||||
return current_app.extensions["connections"]
|
||||
|
||||
|
||||
def _replication() -> ReplicationManager:
|
||||
return current_app.extensions["replication"]
|
||||
|
||||
|
||||
def _iam():
|
||||
return current_app.extensions["iam"]
|
||||
|
||||
|
||||
def _json_error(code: str, message: str, status: int) -> Tuple[Dict[str, Any], int]:
|
||||
return {"error": {"code": code, "message": message}}, status
|
||||
|
||||
|
||||
def _get_admin_rate_limit() -> str:
|
||||
return current_app.config.get("RATE_LIMIT_ADMIN", "60 per minute")
|
||||
|
||||
|
||||
@admin_api_bp.route("/site", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def get_local_site():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
|
||||
registry = _site_registry()
|
||||
local_site = registry.get_local_site()
|
||||
|
||||
if local_site:
|
||||
return jsonify(local_site.to_dict())
|
||||
|
||||
config_site_id = current_app.config.get("SITE_ID")
|
||||
config_endpoint = current_app.config.get("SITE_ENDPOINT")
|
||||
|
||||
if config_site_id:
|
||||
return jsonify({
|
||||
"site_id": config_site_id,
|
||||
"endpoint": config_endpoint or "",
|
||||
"region": current_app.config.get("SITE_REGION", "us-east-1"),
|
||||
"priority": current_app.config.get("SITE_PRIORITY", 100),
|
||||
"display_name": config_site_id,
|
||||
"source": "environment",
|
||||
})
|
||||
|
||||
return _json_error("NotFound", "Local site not configured", 404)
|
||||
|
||||
|
||||
@admin_api_bp.route("/site", methods=["PUT"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def update_local_site():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
|
||||
site_id = payload.get("site_id")
|
||||
endpoint = payload.get("endpoint")
|
||||
|
||||
if not site_id:
|
||||
return _json_error("ValidationError", "site_id is required", 400)
|
||||
|
||||
site_id_error = _validate_site_id(site_id)
|
||||
if site_id_error:
|
||||
return _json_error("ValidationError", site_id_error, 400)
|
||||
|
||||
if endpoint:
|
||||
endpoint_error = _validate_endpoint(endpoint)
|
||||
if endpoint_error:
|
||||
return _json_error("ValidationError", endpoint_error, 400)
|
||||
|
||||
if "priority" in payload:
|
||||
priority_error = _validate_priority(payload["priority"])
|
||||
if priority_error:
|
||||
return _json_error("ValidationError", priority_error, 400)
|
||||
|
||||
if "region" in payload:
|
||||
region_error = _validate_region(payload["region"])
|
||||
if region_error:
|
||||
return _json_error("ValidationError", region_error, 400)
|
||||
|
||||
registry = _site_registry()
|
||||
existing = registry.get_local_site()
|
||||
|
||||
site = SiteInfo(
|
||||
site_id=site_id,
|
||||
endpoint=endpoint or "",
|
||||
region=payload.get("region", "us-east-1"),
|
||||
priority=payload.get("priority", 100),
|
||||
display_name=payload.get("display_name", site_id),
|
||||
created_at=existing.created_at if existing else None,
|
||||
)
|
||||
|
||||
registry.set_local_site(site)
|
||||
|
||||
logger.info("Local site updated", extra={"site_id": site_id, "principal": principal.access_key})
|
||||
return jsonify(site.to_dict())
|
||||
|
||||
|
||||
@admin_api_bp.route("/sites", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def list_all_sites():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
|
||||
registry = _site_registry()
|
||||
local = registry.get_local_site()
|
||||
peers = registry.list_peers()
|
||||
|
||||
result = {
|
||||
"local": local.to_dict() if local else None,
|
||||
"peers": [peer.to_dict() for peer in peers],
|
||||
"total_peers": len(peers),
|
||||
}
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
|
||||
@admin_api_bp.route("/sites", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def register_peer_site():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
|
||||
site_id = payload.get("site_id")
|
||||
endpoint = payload.get("endpoint")
|
||||
|
||||
if not site_id:
|
||||
return _json_error("ValidationError", "site_id is required", 400)
|
||||
|
||||
site_id_error = _validate_site_id(site_id)
|
||||
if site_id_error:
|
||||
return _json_error("ValidationError", site_id_error, 400)
|
||||
|
||||
if not endpoint:
|
||||
return _json_error("ValidationError", "endpoint is required", 400)
|
||||
|
||||
endpoint_error = _validate_endpoint(endpoint)
|
||||
if endpoint_error:
|
||||
return _json_error("ValidationError", endpoint_error, 400)
|
||||
|
||||
region = payload.get("region", "us-east-1")
|
||||
region_error = _validate_region(region)
|
||||
if region_error:
|
||||
return _json_error("ValidationError", region_error, 400)
|
||||
|
||||
priority = payload.get("priority", 100)
|
||||
priority_error = _validate_priority(priority)
|
||||
if priority_error:
|
||||
return _json_error("ValidationError", priority_error, 400)
|
||||
|
||||
registry = _site_registry()
|
||||
|
||||
if registry.get_peer(site_id):
|
||||
return _json_error("AlreadyExists", f"Peer site '{site_id}' already exists", 409)
|
||||
|
||||
connection_id = payload.get("connection_id")
|
||||
if connection_id:
|
||||
if not _connections().get(connection_id):
|
||||
return _json_error("ValidationError", f"Connection '{connection_id}' not found", 400)
|
||||
|
||||
peer = PeerSite(
|
||||
site_id=site_id,
|
||||
endpoint=endpoint,
|
||||
region=region,
|
||||
priority=int(priority),
|
||||
display_name=payload.get("display_name", site_id),
|
||||
connection_id=connection_id,
|
||||
)
|
||||
|
||||
registry.add_peer(peer)
|
||||
|
||||
logger.info("Peer site registered", extra={"site_id": site_id, "principal": principal.access_key})
|
||||
return jsonify(peer.to_dict()), 201
|
||||
|
||||
|
||||
@admin_api_bp.route("/sites/<site_id>", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def get_peer_site(site_id: str):
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
|
||||
registry = _site_registry()
|
||||
peer = registry.get_peer(site_id)
|
||||
|
||||
if not peer:
|
||||
return _json_error("NotFound", f"Peer site '{site_id}' not found", 404)
|
||||
|
||||
return jsonify(peer.to_dict())
|
||||
|
||||
|
||||
@admin_api_bp.route("/sites/<site_id>", methods=["PUT"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def update_peer_site(site_id: str):
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
|
||||
registry = _site_registry()
|
||||
existing = registry.get_peer(site_id)
|
||||
|
||||
if not existing:
|
||||
return _json_error("NotFound", f"Peer site '{site_id}' not found", 404)
|
||||
|
||||
payload = request.get_json(silent=True) or {}
|
||||
|
||||
if "endpoint" in payload:
|
||||
endpoint_error = _validate_endpoint(payload["endpoint"])
|
||||
if endpoint_error:
|
||||
return _json_error("ValidationError", endpoint_error, 400)
|
||||
|
||||
if "priority" in payload:
|
||||
priority_error = _validate_priority(payload["priority"])
|
||||
if priority_error:
|
||||
return _json_error("ValidationError", priority_error, 400)
|
||||
|
||||
if "region" in payload:
|
||||
region_error = _validate_region(payload["region"])
|
||||
if region_error:
|
||||
return _json_error("ValidationError", region_error, 400)
|
||||
|
||||
if "connection_id" in payload:
|
||||
if payload["connection_id"] and not _connections().get(payload["connection_id"]):
|
||||
return _json_error("ValidationError", f"Connection '{payload['connection_id']}' not found", 400)
|
||||
|
||||
peer = PeerSite(
|
||||
site_id=site_id,
|
||||
endpoint=payload.get("endpoint", existing.endpoint),
|
||||
region=payload.get("region", existing.region),
|
||||
priority=payload.get("priority", existing.priority),
|
||||
display_name=payload.get("display_name", existing.display_name),
|
||||
connection_id=payload.get("connection_id", existing.connection_id),
|
||||
created_at=existing.created_at,
|
||||
is_healthy=existing.is_healthy,
|
||||
last_health_check=existing.last_health_check,
|
||||
)
|
||||
|
||||
registry.update_peer(peer)
|
||||
|
||||
logger.info("Peer site updated", extra={"site_id": site_id, "principal": principal.access_key})
|
||||
return jsonify(peer.to_dict())
|
||||
|
||||
|
||||
@admin_api_bp.route("/sites/<site_id>", methods=["DELETE"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def delete_peer_site(site_id: str):
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
|
||||
registry = _site_registry()
|
||||
|
||||
if not registry.delete_peer(site_id):
|
||||
return _json_error("NotFound", f"Peer site '{site_id}' not found", 404)
|
||||
|
||||
logger.info("Peer site deleted", extra={"site_id": site_id, "principal": principal.access_key})
|
||||
return Response(status=204)
|
||||
|
||||
|
||||
@admin_api_bp.route("/sites/<site_id>/health", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def check_peer_health(site_id: str):
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
|
||||
registry = _site_registry()
|
||||
peer = registry.get_peer(site_id)
|
||||
|
||||
if not peer:
|
||||
return _json_error("NotFound", f"Peer site '{site_id}' not found", 404)
|
||||
|
||||
is_healthy = False
|
||||
error_message = None
|
||||
|
||||
if peer.connection_id:
|
||||
connection = _connections().get(peer.connection_id)
|
||||
if connection:
|
||||
is_healthy = _replication().check_endpoint_health(connection)
|
||||
else:
|
||||
error_message = f"Connection '{peer.connection_id}' not found"
|
||||
else:
|
||||
error_message = "No connection configured for this peer"
|
||||
|
||||
registry.update_health(site_id, is_healthy)
|
||||
|
||||
result = {
|
||||
"site_id": site_id,
|
||||
"is_healthy": is_healthy,
|
||||
"checked_at": time.time(),
|
||||
}
|
||||
if error_message:
|
||||
result["error"] = error_message
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
|
||||
@admin_api_bp.route("/topology", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def get_topology():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
|
||||
registry = _site_registry()
|
||||
local = registry.get_local_site()
|
||||
peers = registry.list_peers()
|
||||
|
||||
sites = []
|
||||
|
||||
if local:
|
||||
sites.append({
|
||||
**local.to_dict(),
|
||||
"is_local": True,
|
||||
"is_healthy": True,
|
||||
})
|
||||
|
||||
for peer in peers:
|
||||
sites.append({
|
||||
**peer.to_dict(),
|
||||
"is_local": False,
|
||||
})
|
||||
|
||||
sites.sort(key=lambda s: s.get("priority", 100))
|
||||
|
||||
return jsonify({
|
||||
"sites": sites,
|
||||
"total": len(sites),
|
||||
"healthy_count": sum(1 for s in sites if s.get("is_healthy")),
|
||||
})
|
||||
|
||||
|
||||
@admin_api_bp.route("/sites/<site_id>/bidirectional-status", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def check_bidirectional_status(site_id: str):
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
|
||||
registry = _site_registry()
|
||||
peer = registry.get_peer(site_id)
|
||||
|
||||
if not peer:
|
||||
return _json_error("NotFound", f"Peer site '{site_id}' not found", 404)
|
||||
|
||||
local_site = registry.get_local_site()
|
||||
replication = _replication()
|
||||
local_rules = replication.list_rules()
|
||||
|
||||
local_bidir_rules = []
|
||||
for rule in local_rules:
|
||||
if rule.target_connection_id == peer.connection_id and rule.mode == "bidirectional":
|
||||
local_bidir_rules.append({
|
||||
"bucket_name": rule.bucket_name,
|
||||
"target_bucket": rule.target_bucket,
|
||||
"enabled": rule.enabled,
|
||||
})
|
||||
|
||||
result = {
|
||||
"site_id": site_id,
|
||||
"local_site_id": local_site.site_id if local_site else None,
|
||||
"local_endpoint": local_site.endpoint if local_site else None,
|
||||
"local_bidirectional_rules": local_bidir_rules,
|
||||
"local_site_sync_enabled": current_app.config.get("SITE_SYNC_ENABLED", False),
|
||||
"remote_status": None,
|
||||
"issues": [],
|
||||
"is_fully_configured": False,
|
||||
}
|
||||
|
||||
if not local_site or not local_site.site_id:
|
||||
result["issues"].append({
|
||||
"code": "NO_LOCAL_SITE_ID",
|
||||
"message": "Local site identity not configured",
|
||||
"severity": "error",
|
||||
})
|
||||
|
||||
if not local_site or not local_site.endpoint:
|
||||
result["issues"].append({
|
||||
"code": "NO_LOCAL_ENDPOINT",
|
||||
"message": "Local site endpoint not configured (remote site cannot reach back)",
|
||||
"severity": "error",
|
||||
})
|
||||
|
||||
if not peer.connection_id:
|
||||
result["issues"].append({
|
||||
"code": "NO_CONNECTION",
|
||||
"message": "No connection configured for this peer",
|
||||
"severity": "error",
|
||||
})
|
||||
return jsonify(result)
|
||||
|
||||
connection = _connections().get(peer.connection_id)
|
||||
if not connection:
|
||||
result["issues"].append({
|
||||
"code": "CONNECTION_NOT_FOUND",
|
||||
"message": f"Connection '{peer.connection_id}' not found",
|
||||
"severity": "error",
|
||||
})
|
||||
return jsonify(result)
|
||||
|
||||
if not local_bidir_rules:
|
||||
result["issues"].append({
|
||||
"code": "NO_LOCAL_BIDIRECTIONAL_RULES",
|
||||
"message": "No bidirectional replication rules configured on this site",
|
||||
"severity": "warning",
|
||||
})
|
||||
|
||||
if not result["local_site_sync_enabled"]:
|
||||
result["issues"].append({
|
||||
"code": "SITE_SYNC_DISABLED",
|
||||
"message": "Site sync worker is disabled (SITE_SYNC_ENABLED=false). Pull operations will not work.",
|
||||
"severity": "warning",
|
||||
})
|
||||
|
||||
if not replication.check_endpoint_health(connection):
|
||||
result["issues"].append({
|
||||
"code": "REMOTE_UNREACHABLE",
|
||||
"message": "Remote endpoint is not reachable",
|
||||
"severity": "error",
|
||||
})
|
||||
return jsonify(result)
|
||||
|
||||
allow_internal = current_app.config.get("ALLOW_INTERNAL_ENDPOINTS", False)
|
||||
if not _is_safe_url(peer.endpoint, allow_internal=allow_internal):
|
||||
result["issues"].append({
|
||||
"code": "ENDPOINT_NOT_ALLOWED",
|
||||
"message": "Peer endpoint points to cloud metadata service (SSRF protection)",
|
||||
"severity": "error",
|
||||
})
|
||||
return jsonify(result)
|
||||
|
||||
try:
|
||||
admin_url = peer.endpoint.rstrip("/") + "/admin/sites"
|
||||
resp = requests.get(
|
||||
admin_url,
|
||||
timeout=10,
|
||||
headers={
|
||||
"Accept": "application/json",
|
||||
"X-Access-Key": connection.access_key,
|
||||
"X-Secret-Key": connection.secret_key,
|
||||
},
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
try:
|
||||
remote_data = resp.json()
|
||||
if not isinstance(remote_data, dict):
|
||||
raise ValueError("Expected JSON object")
|
||||
remote_local = remote_data.get("local")
|
||||
if remote_local is not None and not isinstance(remote_local, dict):
|
||||
raise ValueError("Expected 'local' to be an object")
|
||||
remote_peers = remote_data.get("peers", [])
|
||||
if not isinstance(remote_peers, list):
|
||||
raise ValueError("Expected 'peers' to be a list")
|
||||
except (ValueError, json.JSONDecodeError) as e:
|
||||
logger.warning("Invalid JSON from remote admin API: %s", e)
|
||||
result["remote_status"] = {"reachable": True, "invalid_response": True}
|
||||
result["issues"].append({
|
||||
"code": "REMOTE_INVALID_RESPONSE",
|
||||
"message": "Remote admin API returned invalid JSON",
|
||||
"severity": "warning",
|
||||
})
|
||||
return jsonify(result)
|
||||
|
||||
result["remote_status"] = {
|
||||
"reachable": True,
|
||||
"local_site": remote_local,
|
||||
"site_sync_enabled": None,
|
||||
"has_peer_for_us": False,
|
||||
"peer_connection_configured": False,
|
||||
"has_bidirectional_rules_for_us": False,
|
||||
}
|
||||
|
||||
for rp in remote_peers:
|
||||
if not isinstance(rp, dict):
|
||||
continue
|
||||
if local_site and (
|
||||
rp.get("site_id") == local_site.site_id or
|
||||
rp.get("endpoint") == local_site.endpoint
|
||||
):
|
||||
result["remote_status"]["has_peer_for_us"] = True
|
||||
result["remote_status"]["peer_connection_configured"] = bool(rp.get("connection_id"))
|
||||
break
|
||||
|
||||
if not result["remote_status"]["has_peer_for_us"]:
|
||||
result["issues"].append({
|
||||
"code": "REMOTE_NO_PEER_FOR_US",
|
||||
"message": "Remote site does not have this site registered as a peer",
|
||||
"severity": "error",
|
||||
})
|
||||
elif not result["remote_status"]["peer_connection_configured"]:
|
||||
result["issues"].append({
|
||||
"code": "REMOTE_NO_CONNECTION_FOR_US",
|
||||
"message": "Remote site has us as peer but no connection configured (cannot push back)",
|
||||
"severity": "error",
|
||||
})
|
||||
elif resp.status_code == 401 or resp.status_code == 403:
|
||||
result["remote_status"] = {
|
||||
"reachable": True,
|
||||
"admin_access_denied": True,
|
||||
}
|
||||
result["issues"].append({
|
||||
"code": "REMOTE_ADMIN_ACCESS_DENIED",
|
||||
"message": "Cannot verify remote configuration (admin access denied)",
|
||||
"severity": "warning",
|
||||
})
|
||||
else:
|
||||
result["remote_status"] = {
|
||||
"reachable": True,
|
||||
"admin_api_error": resp.status_code,
|
||||
}
|
||||
result["issues"].append({
|
||||
"code": "REMOTE_ADMIN_API_ERROR",
|
||||
"message": f"Remote admin API returned status {resp.status_code}",
|
||||
"severity": "warning",
|
||||
})
|
||||
except requests.RequestException as e:
|
||||
logger.warning("Remote admin API unreachable: %s", e)
|
||||
result["remote_status"] = {
|
||||
"reachable": False,
|
||||
"error": "Connection failed",
|
||||
}
|
||||
result["issues"].append({
|
||||
"code": "REMOTE_ADMIN_UNREACHABLE",
|
||||
"message": "Could not reach remote admin API",
|
||||
"severity": "warning",
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning("Error checking remote bidirectional status: %s", e, exc_info=True)
|
||||
result["issues"].append({
|
||||
"code": "VERIFICATION_ERROR",
|
||||
"message": "Internal error during verification",
|
||||
"severity": "warning",
|
||||
})
|
||||
|
||||
error_issues = [i for i in result["issues"] if i["severity"] == "error"]
|
||||
result["is_fully_configured"] = len(error_issues) == 0 and len(local_bidir_rules) > 0
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
|
||||
def _website_domains() -> WebsiteDomainStore:
|
||||
return current_app.extensions["website_domains"]
|
||||
|
||||
|
||||
def _storage():
|
||||
return current_app.extensions["object_storage"]
|
||||
|
||||
|
||||
def _require_iam_action(action: str):
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return None, error
|
||||
try:
|
||||
_iam().authorize(principal, None, action)
|
||||
return principal, None
|
||||
except IamError:
|
||||
return None, _json_error("AccessDenied", f"Requires {action} permission", 403)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_list_users():
|
||||
principal, error = _require_iam_action("iam:list_users")
|
||||
if error:
|
||||
return error
|
||||
return jsonify({"users": _iam().list_users()})
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_get_user(identifier):
|
||||
principal, error = _require_iam_action("iam:get_user")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
user_id = _iam().resolve_user_id(identifier)
|
||||
return jsonify(_iam().get_user_by_id(user_id))
|
||||
except IamError as exc:
|
||||
return _json_error("NotFound", str(exc), 404)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/policies", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_get_user_policies(identifier):
|
||||
principal, error = _require_iam_action("iam:get_policy")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
return jsonify({"policies": _iam().get_user_policies(identifier)})
|
||||
except IamError as exc:
|
||||
return _json_error("NotFound", str(exc), 404)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/keys", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_create_access_key(identifier):
|
||||
principal, error = _require_iam_action("iam:create_key")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
result = _iam().create_access_key(identifier)
|
||||
logger.info("Access key created for %s by %s", identifier, principal.access_key)
|
||||
return jsonify(result), 201
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/keys/<access_key>", methods=["DELETE"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_delete_access_key(identifier, access_key):
|
||||
principal, error = _require_iam_action("iam:delete_key")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
_iam().delete_access_key(access_key)
|
||||
logger.info("Access key %s deleted by %s", access_key, principal.access_key)
|
||||
return "", 204
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/disable", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_disable_user(identifier):
|
||||
principal, error = _require_iam_action("iam:disable_user")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
_iam().disable_user(identifier)
|
||||
logger.info("User %s disabled by %s", identifier, principal.access_key)
|
||||
return jsonify({"status": "disabled"})
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/iam/users/<identifier>/enable", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def iam_enable_user(identifier):
|
||||
principal, error = _require_iam_action("iam:disable_user")
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
_iam().enable_user(identifier)
|
||||
logger.info("User %s enabled by %s", identifier, principal.access_key)
|
||||
return jsonify({"status": "enabled"})
|
||||
except IamError as exc:
|
||||
return _json_error("InvalidRequest", str(exc), 400)
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def list_website_domains():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
return jsonify(_website_domains().list_all())
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def create_website_domain():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
domain = normalize_domain(payload.get("domain") or "")
|
||||
bucket = (payload.get("bucket") or "").strip()
|
||||
if not domain:
|
||||
return _json_error("ValidationError", "domain is required", 400)
|
||||
if not is_valid_domain(domain):
|
||||
return _json_error("ValidationError", f"Invalid domain: '{domain}'", 400)
|
||||
if not bucket:
|
||||
return _json_error("ValidationError", "bucket is required", 400)
|
||||
storage = _storage()
|
||||
if not storage.bucket_exists(bucket):
|
||||
return _json_error("NoSuchBucket", f"Bucket '{bucket}' does not exist", 404)
|
||||
store = _website_domains()
|
||||
existing = store.get_bucket(domain)
|
||||
if existing:
|
||||
return _json_error("Conflict", f"Domain '{domain}' is already mapped to bucket '{existing}'", 409)
|
||||
store.set_mapping(domain, bucket)
|
||||
logger.info("Website domain mapping created: %s -> %s", domain, bucket)
|
||||
return jsonify({"domain": domain, "bucket": bucket}), 201
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains/<domain>", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def get_website_domain(domain: str):
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
domain = normalize_domain(domain)
|
||||
bucket = _website_domains().get_bucket(domain)
|
||||
if not bucket:
|
||||
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
|
||||
return jsonify({"domain": domain, "bucket": bucket})
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains/<domain>", methods=["PUT"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def update_website_domain(domain: str):
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
domain = normalize_domain(domain)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
bucket = (payload.get("bucket") or "").strip()
|
||||
if not bucket:
|
||||
return _json_error("ValidationError", "bucket is required", 400)
|
||||
storage = _storage()
|
||||
if not storage.bucket_exists(bucket):
|
||||
return _json_error("NoSuchBucket", f"Bucket '{bucket}' does not exist", 404)
|
||||
store = _website_domains()
|
||||
if not store.get_bucket(domain):
|
||||
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
|
||||
store.set_mapping(domain, bucket)
|
||||
logger.info("Website domain mapping updated: %s -> %s", domain, bucket)
|
||||
return jsonify({"domain": domain, "bucket": bucket})
|
||||
|
||||
|
||||
@admin_api_bp.route("/website-domains/<domain>", methods=["DELETE"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def delete_website_domain(domain: str):
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||
domain = normalize_domain(domain)
|
||||
if not _website_domains().delete_mapping(domain):
|
||||
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
|
||||
logger.info("Website domain mapping deleted: %s", domain)
|
||||
return Response(status=204)
|
||||
|
||||
|
||||
def _gc() -> Optional[GarbageCollector]:
|
||||
return current_app.extensions.get("gc")
|
||||
|
||||
|
||||
@admin_api_bp.route("/gc/status", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def gc_status():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
gc = _gc()
|
||||
if not gc:
|
||||
return jsonify({"enabled": False, "message": "GC is not enabled. Set GC_ENABLED=true to enable."})
|
||||
return jsonify(gc.get_status())
|
||||
|
||||
|
||||
@admin_api_bp.route("/gc/run", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def gc_run_now():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
gc = _gc()
|
||||
if not gc:
|
||||
return _json_error("InvalidRequest", "GC is not enabled", 400)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
started = gc.run_async(dry_run=payload.get("dry_run"))
|
||||
logger.info("GC manual run by %s", principal.access_key)
|
||||
if not started:
|
||||
return _json_error("Conflict", "GC is already in progress", 409)
|
||||
return jsonify({"status": "started"})
|
||||
|
||||
|
||||
@admin_api_bp.route("/gc/history", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def gc_history():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
gc = _gc()
|
||||
if not gc:
|
||||
return jsonify({"executions": []})
|
||||
limit = min(int(request.args.get("limit", 50)), 200)
|
||||
offset = int(request.args.get("offset", 0))
|
||||
records = gc.get_history(limit=limit, offset=offset)
|
||||
return jsonify({"executions": records})
|
||||
|
||||
|
||||
def _integrity() -> Optional[IntegrityChecker]:
|
||||
return current_app.extensions.get("integrity")
|
||||
|
||||
|
||||
@admin_api_bp.route("/integrity/status", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def integrity_status():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
checker = _integrity()
|
||||
if not checker:
|
||||
return jsonify({"enabled": False, "message": "Integrity checker is not enabled. Set INTEGRITY_ENABLED=true to enable."})
|
||||
return jsonify(checker.get_status())
|
||||
|
||||
|
||||
@admin_api_bp.route("/integrity/run", methods=["POST"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def integrity_run_now():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
checker = _integrity()
|
||||
if not checker:
|
||||
return _json_error("InvalidRequest", "Integrity checker is not enabled", 400)
|
||||
payload = request.get_json(silent=True) or {}
|
||||
override_dry_run = payload.get("dry_run")
|
||||
override_auto_heal = payload.get("auto_heal")
|
||||
started = checker.run_async(
|
||||
auto_heal=override_auto_heal if override_auto_heal is not None else None,
|
||||
dry_run=override_dry_run if override_dry_run is not None else None,
|
||||
)
|
||||
logger.info("Integrity manual run by %s", principal.access_key)
|
||||
if not started:
|
||||
return _json_error("Conflict", "A scan is already in progress", 409)
|
||||
return jsonify({"status": "started"})
|
||||
|
||||
|
||||
@admin_api_bp.route("/integrity/history", methods=["GET"])
|
||||
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||
def integrity_history():
|
||||
principal, error = _require_admin()
|
||||
if error:
|
||||
return error
|
||||
checker = _integrity()
|
||||
if not checker:
|
||||
return jsonify({"executions": []})
|
||||
limit = min(int(request.args.get("limit", 50)), 200)
|
||||
offset = int(request.args.get("offset", 0))
|
||||
records = checker.get_history(limit=limit, offset=offset)
|
||||
return jsonify({"executions": records})
|
||||
|
||||
|
||||
@@ -1,23 +1,89 @@
|
||||
"""Bucket policy loader/enforcer with a subset of AWS semantics."""
|
||||
from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
from dataclasses import dataclass
|
||||
from fnmatch import fnmatch
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from fnmatch import fnmatch, translate
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterable, List, Optional, Sequence
|
||||
from typing import Any, Dict, Iterable, List, Optional, Pattern, Sequence, Tuple
|
||||
|
||||
|
||||
RESOURCE_PREFIX = "arn:aws:s3:::"
|
||||
|
||||
|
||||
@lru_cache(maxsize=256)
|
||||
def _compile_pattern(pattern: str) -> Pattern[str]:
|
||||
return re.compile(translate(pattern), re.IGNORECASE)
|
||||
|
||||
|
||||
def _match_string_like(value: str, pattern: str) -> bool:
|
||||
compiled = _compile_pattern(pattern)
|
||||
return bool(compiled.match(value))
|
||||
|
||||
|
||||
def _ip_in_cidr(ip_str: str, cidr: str) -> bool:
|
||||
try:
|
||||
ip = ipaddress.ip_address(ip_str)
|
||||
network = ipaddress.ip_network(cidr, strict=False)
|
||||
return ip in network
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def _evaluate_condition_operator(
|
||||
operator: str,
|
||||
condition_key: str,
|
||||
condition_values: List[str],
|
||||
context: Dict[str, Any],
|
||||
) -> bool:
|
||||
context_value = context.get(condition_key)
|
||||
op_lower = operator.lower()
|
||||
if_exists = op_lower.endswith("ifexists")
|
||||
if if_exists:
|
||||
op_lower = op_lower[:-8]
|
||||
|
||||
if context_value is None:
|
||||
return if_exists
|
||||
|
||||
context_value_str = str(context_value)
|
||||
context_value_lower = context_value_str.lower()
|
||||
|
||||
if op_lower == "stringequals":
|
||||
return context_value_str in condition_values
|
||||
elif op_lower == "stringnotequals":
|
||||
return context_value_str not in condition_values
|
||||
elif op_lower == "stringequalsignorecase":
|
||||
return context_value_lower in [v.lower() for v in condition_values]
|
||||
elif op_lower == "stringnotequalsignorecase":
|
||||
return context_value_lower not in [v.lower() for v in condition_values]
|
||||
elif op_lower == "stringlike":
|
||||
return any(_match_string_like(context_value_str, p) for p in condition_values)
|
||||
elif op_lower == "stringnotlike":
|
||||
return not any(_match_string_like(context_value_str, p) for p in condition_values)
|
||||
elif op_lower == "ipaddress":
|
||||
return any(_ip_in_cidr(context_value_str, cidr) for cidr in condition_values)
|
||||
elif op_lower == "notipaddress":
|
||||
return not any(_ip_in_cidr(context_value_str, cidr) for cidr in condition_values)
|
||||
elif op_lower == "bool":
|
||||
bool_val = context_value_lower in ("true", "1", "yes")
|
||||
return str(bool_val).lower() in [v.lower() for v in condition_values]
|
||||
elif op_lower == "null":
|
||||
is_null = context_value is None or context_value == ""
|
||||
expected_null = condition_values[0].lower() in ("true", "1", "yes") if condition_values else True
|
||||
return is_null == expected_null
|
||||
|
||||
return False
|
||||
|
||||
ACTION_ALIASES = {
|
||||
# List actions
|
||||
"s3:listbucket": "list",
|
||||
"s3:listallmybuckets": "list",
|
||||
"s3:listbucketversions": "list",
|
||||
"s3:listmultipartuploads": "list",
|
||||
"s3:listparts": "list",
|
||||
# Read actions
|
||||
"s3:getobject": "read",
|
||||
"s3:getobjectversion": "read",
|
||||
"s3:getobjecttagging": "read",
|
||||
@@ -26,7 +92,6 @@ ACTION_ALIASES = {
|
||||
"s3:getbucketversioning": "read",
|
||||
"s3:headobject": "read",
|
||||
"s3:headbucket": "read",
|
||||
# Write actions
|
||||
"s3:putobject": "write",
|
||||
"s3:createbucket": "write",
|
||||
"s3:putobjecttagging": "write",
|
||||
@@ -36,26 +101,30 @@ ACTION_ALIASES = {
|
||||
"s3:completemultipartupload": "write",
|
||||
"s3:abortmultipartupload": "write",
|
||||
"s3:copyobject": "write",
|
||||
# Delete actions
|
||||
"s3:deleteobject": "delete",
|
||||
"s3:deleteobjectversion": "delete",
|
||||
"s3:deletebucket": "delete",
|
||||
"s3:deleteobjecttagging": "delete",
|
||||
# Share actions (ACL)
|
||||
"s3:putobjectacl": "share",
|
||||
"s3:putbucketacl": "share",
|
||||
"s3:getbucketacl": "share",
|
||||
# Policy actions
|
||||
"s3:putbucketpolicy": "policy",
|
||||
"s3:getbucketpolicy": "policy",
|
||||
"s3:deletebucketpolicy": "policy",
|
||||
# Replication actions
|
||||
"s3:getreplicationconfiguration": "replication",
|
||||
"s3:putreplicationconfiguration": "replication",
|
||||
"s3:deletereplicationconfiguration": "replication",
|
||||
"s3:replicateobject": "replication",
|
||||
"s3:replicatetags": "replication",
|
||||
"s3:replicatedelete": "replication",
|
||||
"s3:getlifecycleconfiguration": "lifecycle",
|
||||
"s3:putlifecycleconfiguration": "lifecycle",
|
||||
"s3:deletelifecycleconfiguration": "lifecycle",
|
||||
"s3:getbucketlifecycle": "lifecycle",
|
||||
"s3:putbucketlifecycle": "lifecycle",
|
||||
"s3:getbucketcors": "cors",
|
||||
"s3:putbucketcors": "cors",
|
||||
"s3:deletebucketcors": "cors",
|
||||
}
|
||||
|
||||
|
||||
@@ -133,7 +202,20 @@ class BucketPolicyStatement:
|
||||
effect: str
|
||||
principals: List[str] | str
|
||||
actions: List[str]
|
||||
resources: List[tuple[str | None, str | None]]
|
||||
resources: List[Tuple[str | None, str | None]]
|
||||
conditions: Dict[str, Dict[str, List[str]]] = field(default_factory=dict)
|
||||
_compiled_patterns: List[Tuple[str | None, Optional[Pattern[str]]]] | None = None
|
||||
|
||||
def _get_compiled_patterns(self) -> List[Tuple[str | None, Optional[Pattern[str]]]]:
|
||||
if self._compiled_patterns is None:
|
||||
self._compiled_patterns = []
|
||||
for resource_bucket, key_pattern in self.resources:
|
||||
if key_pattern is None:
|
||||
self._compiled_patterns.append((resource_bucket, None))
|
||||
else:
|
||||
regex_pattern = translate(key_pattern)
|
||||
self._compiled_patterns.append((resource_bucket, re.compile(regex_pattern)))
|
||||
return self._compiled_patterns
|
||||
|
||||
def matches_principal(self, access_key: Optional[str]) -> bool:
|
||||
if self.principals == "*":
|
||||
@@ -149,18 +231,29 @@ class BucketPolicyStatement:
|
||||
def matches_resource(self, bucket: Optional[str], object_key: Optional[str]) -> bool:
|
||||
bucket = (bucket or "*").lower()
|
||||
key = object_key or ""
|
||||
for resource_bucket, key_pattern in self.resources:
|
||||
for resource_bucket, compiled_pattern in self._get_compiled_patterns():
|
||||
resource_bucket = (resource_bucket or "*").lower()
|
||||
if resource_bucket not in {"*", bucket}:
|
||||
continue
|
||||
if key_pattern is None:
|
||||
if compiled_pattern is None:
|
||||
if not key:
|
||||
return True
|
||||
continue
|
||||
if fnmatch(key, key_pattern):
|
||||
if compiled_pattern.match(key):
|
||||
return True
|
||||
return False
|
||||
|
||||
def matches_condition(self, context: Optional[Dict[str, Any]]) -> bool:
|
||||
if not self.conditions:
|
||||
return True
|
||||
if context is None:
|
||||
context = {}
|
||||
for operator, key_values in self.conditions.items():
|
||||
for condition_key, condition_values in key_values.items():
|
||||
if not _evaluate_condition_operator(operator, condition_key, condition_values, context):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class BucketPolicyStore:
|
||||
"""Loads bucket policies from disk and evaluates statements."""
|
||||
@@ -174,8 +267,16 @@ class BucketPolicyStore:
|
||||
self._policies: Dict[str, List[BucketPolicyStatement]] = {}
|
||||
self._load()
|
||||
self._last_mtime = self._current_mtime()
|
||||
# Performance: Avoid stat() on every request
|
||||
self._last_stat_check = 0.0
|
||||
self._stat_check_interval = float(os.environ.get("BUCKET_POLICY_STAT_CHECK_INTERVAL_SECONDS", "2.0"))
|
||||
|
||||
def maybe_reload(self) -> None:
|
||||
# Performance: Skip stat check if we checked recently
|
||||
now = time.time()
|
||||
if now - self._last_stat_check < self._stat_check_interval:
|
||||
return
|
||||
self._last_stat_check = now
|
||||
current = self._current_mtime()
|
||||
if current is None or current == self._last_mtime:
|
||||
return
|
||||
@@ -188,13 +289,13 @@ class BucketPolicyStore:
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def evaluate(
|
||||
self,
|
||||
access_key: Optional[str],
|
||||
bucket: Optional[str],
|
||||
object_key: Optional[str],
|
||||
action: str,
|
||||
context: Optional[Dict[str, Any]] = None,
|
||||
) -> str | None:
|
||||
bucket = (bucket or "").lower()
|
||||
statements = self._policies.get(bucket) or []
|
||||
@@ -206,6 +307,8 @@ class BucketPolicyStore:
|
||||
continue
|
||||
if not statement.matches_resource(bucket, object_key):
|
||||
continue
|
||||
if not statement.matches_condition(context):
|
||||
continue
|
||||
if statement.effect == "deny":
|
||||
return "deny"
|
||||
decision = "allow"
|
||||
@@ -229,7 +332,6 @@ class BucketPolicyStore:
|
||||
self._policies.pop(bucket, None)
|
||||
self._persist()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def _load(self) -> None:
|
||||
try:
|
||||
content = self.policy_path.read_text(encoding='utf-8')
|
||||
@@ -271,6 +373,7 @@ class BucketPolicyStore:
|
||||
if not resources:
|
||||
continue
|
||||
effect = statement.get("Effect", "Allow").lower()
|
||||
conditions = self._normalize_conditions(statement.get("Condition", {}))
|
||||
statements.append(
|
||||
BucketPolicyStatement(
|
||||
sid=statement.get("Sid"),
|
||||
@@ -278,6 +381,24 @@ class BucketPolicyStore:
|
||||
principals=principals,
|
||||
actions=actions or ["*"],
|
||||
resources=resources,
|
||||
conditions=conditions,
|
||||
)
|
||||
)
|
||||
return statements
|
||||
return statements
|
||||
|
||||
def _normalize_conditions(self, condition_block: Dict[str, Any]) -> Dict[str, Dict[str, List[str]]]:
|
||||
if not condition_block or not isinstance(condition_block, dict):
|
||||
return {}
|
||||
normalized: Dict[str, Dict[str, List[str]]] = {}
|
||||
for operator, key_values in condition_block.items():
|
||||
if not isinstance(key_values, dict):
|
||||
continue
|
||||
normalized[operator] = {}
|
||||
for cond_key, cond_values in key_values.items():
|
||||
if isinstance(cond_values, str):
|
||||
normalized[operator][cond_key] = [cond_values]
|
||||
elif isinstance(cond_values, list):
|
||||
normalized[operator][cond_key] = [str(v) for v in cond_values]
|
||||
else:
|
||||
normalized[operator][cond_key] = [str(cond_values)]
|
||||
return normalized
|
||||
109
python/app/compression.py
Normal file
109
python/app/compression.py
Normal file
@@ -0,0 +1,109 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import gzip
|
||||
import io
|
||||
from typing import Callable, Iterable, List, Tuple
|
||||
|
||||
COMPRESSIBLE_MIMES = frozenset([
|
||||
'application/json',
|
||||
'application/javascript',
|
||||
'application/xml',
|
||||
'text/html',
|
||||
'text/css',
|
||||
'text/plain',
|
||||
'text/xml',
|
||||
'text/javascript',
|
||||
'application/x-ndjson',
|
||||
])
|
||||
|
||||
MIN_SIZE_FOR_COMPRESSION = 500
|
||||
|
||||
|
||||
class GzipMiddleware:
|
||||
def __init__(self, app: Callable, compression_level: int = 6, min_size: int = MIN_SIZE_FOR_COMPRESSION):
|
||||
self.app = app
|
||||
self.compression_level = compression_level
|
||||
self.min_size = min_size
|
||||
|
||||
def __call__(self, environ: dict, start_response: Callable) -> Iterable[bytes]:
|
||||
accept_encoding = environ.get('HTTP_ACCEPT_ENCODING', '')
|
||||
if 'gzip' not in accept_encoding.lower():
|
||||
return self.app(environ, start_response)
|
||||
|
||||
response_started = False
|
||||
status_code = None
|
||||
response_headers: List[Tuple[str, str]] = []
|
||||
content_type = None
|
||||
content_length = None
|
||||
should_compress = False
|
||||
passthrough = False
|
||||
exc_info_holder = [None]
|
||||
|
||||
def custom_start_response(status: str, headers: List[Tuple[str, str]], exc_info=None):
|
||||
nonlocal response_started, status_code, response_headers, content_type, content_length, should_compress, passthrough
|
||||
response_started = True
|
||||
status_code = int(status.split(' ', 1)[0])
|
||||
response_headers = list(headers)
|
||||
exc_info_holder[0] = exc_info
|
||||
|
||||
for name, value in headers:
|
||||
name_lower = name.lower()
|
||||
if name_lower == 'content-type':
|
||||
content_type = value.split(';')[0].strip().lower()
|
||||
elif name_lower == 'content-length':
|
||||
try:
|
||||
content_length = int(value)
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
elif name_lower == 'content-encoding':
|
||||
passthrough = True
|
||||
return start_response(status, headers, exc_info)
|
||||
elif name_lower == 'x-stream-response':
|
||||
passthrough = True
|
||||
return start_response(status, headers, exc_info)
|
||||
|
||||
if content_type and content_type in COMPRESSIBLE_MIMES:
|
||||
if content_length is None or content_length >= self.min_size:
|
||||
should_compress = True
|
||||
else:
|
||||
passthrough = True
|
||||
return start_response(status, headers, exc_info)
|
||||
|
||||
return None
|
||||
|
||||
app_iter = self.app(environ, custom_start_response)
|
||||
|
||||
if passthrough:
|
||||
return app_iter
|
||||
|
||||
response_body = b''.join(app_iter)
|
||||
|
||||
if not response_started:
|
||||
return [response_body]
|
||||
|
||||
if should_compress and len(response_body) >= self.min_size:
|
||||
buf = io.BytesIO()
|
||||
with gzip.GzipFile(fileobj=buf, mode='wb', compresslevel=self.compression_level) as gz:
|
||||
gz.write(response_body)
|
||||
compressed = buf.getvalue()
|
||||
|
||||
if len(compressed) < len(response_body):
|
||||
response_body = compressed
|
||||
new_headers = []
|
||||
for name, value in response_headers:
|
||||
if name.lower() not in ('content-length', 'content-encoding'):
|
||||
new_headers.append((name, value))
|
||||
new_headers.append(('Content-Encoding', 'gzip'))
|
||||
new_headers.append(('Content-Length', str(len(response_body))))
|
||||
new_headers.append(('Vary', 'Accept-Encoding'))
|
||||
response_headers = new_headers
|
||||
|
||||
status_str = f"{status_code} " + {
|
||||
200: "OK", 201: "Created", 204: "No Content", 206: "Partial Content",
|
||||
301: "Moved Permanently", 302: "Found", 304: "Not Modified",
|
||||
400: "Bad Request", 401: "Unauthorized", 403: "Forbidden", 404: "Not Found",
|
||||
405: "Method Not Allowed", 409: "Conflict", 500: "Internal Server Error",
|
||||
}.get(status_code, "Unknown")
|
||||
|
||||
start_response(status_str, response_headers, exc_info_holder[0])
|
||||
return [response_body]
|
||||
683
python/app/config.py
Normal file
683
python/app/config.py
Normal file
@@ -0,0 +1,683 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import secrets
|
||||
import shutil
|
||||
import sys
|
||||
import warnings
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import psutil
|
||||
|
||||
|
||||
def _calculate_auto_threads() -> int:
|
||||
cpu_count = psutil.cpu_count(logical=True) or 4
|
||||
return max(1, min(cpu_count * 2, 64))
|
||||
|
||||
|
||||
def _calculate_auto_connection_limit() -> int:
|
||||
available_mb = psutil.virtual_memory().available / (1024 * 1024)
|
||||
calculated = int(available_mb / 5)
|
||||
return max(20, min(calculated, 1000))
|
||||
|
||||
|
||||
def _calculate_auto_backlog(connection_limit: int) -> int:
|
||||
return max(128, min(connection_limit * 2, 4096))
|
||||
|
||||
|
||||
def _validate_rate_limit(value: str) -> str:
|
||||
pattern = r"^\d+\s+per\s+(second|minute|hour|day)$"
|
||||
if not re.match(pattern, value):
|
||||
raise ValueError(f"Invalid rate limit format: {value}. Expected format: '200 per minute'")
|
||||
return value
|
||||
|
||||
if getattr(sys, "frozen", False):
|
||||
# Running in a PyInstaller bundle
|
||||
PROJECT_ROOT = Path(sys._MEIPASS)
|
||||
else:
|
||||
# Running in a normal Python environment
|
||||
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
||||
|
||||
|
||||
def _prepare_config_file(active_path: Path, legacy_path: Optional[Path] = None) -> Path:
|
||||
"""Ensure config directories exist and migrate legacy files when possible."""
|
||||
active_path = Path(active_path)
|
||||
active_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if legacy_path:
|
||||
legacy_path = Path(legacy_path)
|
||||
if not active_path.exists() and legacy_path.exists():
|
||||
legacy_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
shutil.move(str(legacy_path), str(active_path))
|
||||
except OSError:
|
||||
shutil.copy2(legacy_path, active_path)
|
||||
try:
|
||||
legacy_path.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
return active_path
|
||||
|
||||
|
||||
@dataclass
|
||||
class AppConfig:
|
||||
storage_root: Path
|
||||
max_upload_size: int
|
||||
ui_page_size: int
|
||||
secret_key: str
|
||||
iam_config_path: Path
|
||||
bucket_policy_path: Path
|
||||
api_base_url: Optional[str]
|
||||
aws_region: str
|
||||
aws_service: str
|
||||
ui_enforce_bucket_policies: bool
|
||||
log_level: str
|
||||
log_to_file: bool
|
||||
log_path: Path
|
||||
log_max_bytes: int
|
||||
log_backup_count: int
|
||||
ratelimit_default: str
|
||||
ratelimit_storage_uri: str
|
||||
ratelimit_list_buckets: str
|
||||
ratelimit_bucket_ops: str
|
||||
ratelimit_object_ops: str
|
||||
ratelimit_head_ops: str
|
||||
cors_origins: list[str]
|
||||
cors_methods: list[str]
|
||||
cors_allow_headers: list[str]
|
||||
cors_expose_headers: list[str]
|
||||
session_lifetime_days: int
|
||||
auth_max_attempts: int
|
||||
auth_lockout_minutes: int
|
||||
bulk_delete_max_keys: int
|
||||
secret_ttl_seconds: int
|
||||
stream_chunk_size: int
|
||||
multipart_min_part_size: int
|
||||
bucket_stats_cache_ttl: int
|
||||
object_cache_ttl: int
|
||||
encryption_enabled: bool
|
||||
encryption_master_key_path: Path
|
||||
kms_enabled: bool
|
||||
kms_keys_path: Path
|
||||
default_encryption_algorithm: str
|
||||
display_timezone: str
|
||||
lifecycle_enabled: bool
|
||||
lifecycle_interval_seconds: int
|
||||
metrics_history_enabled: bool
|
||||
metrics_history_retention_hours: int
|
||||
metrics_history_interval_minutes: int
|
||||
operation_metrics_enabled: bool
|
||||
operation_metrics_interval_minutes: int
|
||||
operation_metrics_retention_hours: int
|
||||
server_threads: int
|
||||
server_connection_limit: int
|
||||
server_backlog: int
|
||||
server_channel_timeout: int
|
||||
server_max_buffer_size: int
|
||||
server_threads_auto: bool
|
||||
server_connection_limit_auto: bool
|
||||
server_backlog_auto: bool
|
||||
site_sync_enabled: bool
|
||||
site_sync_interval_seconds: int
|
||||
site_sync_batch_size: int
|
||||
sigv4_timestamp_tolerance_seconds: int
|
||||
presigned_url_min_expiry_seconds: int
|
||||
presigned_url_max_expiry_seconds: int
|
||||
replication_connect_timeout_seconds: int
|
||||
replication_read_timeout_seconds: int
|
||||
replication_max_retries: int
|
||||
replication_streaming_threshold_bytes: int
|
||||
replication_max_failures_per_bucket: int
|
||||
site_sync_connect_timeout_seconds: int
|
||||
site_sync_read_timeout_seconds: int
|
||||
site_sync_max_retries: int
|
||||
site_sync_clock_skew_tolerance_seconds: float
|
||||
object_key_max_length_bytes: int
|
||||
object_cache_max_size: int
|
||||
meta_read_cache_max: int
|
||||
bucket_config_cache_ttl_seconds: float
|
||||
object_tag_limit: int
|
||||
encryption_chunk_size_bytes: int
|
||||
kms_generate_data_key_min_bytes: int
|
||||
kms_generate_data_key_max_bytes: int
|
||||
lifecycle_max_history_per_bucket: int
|
||||
site_id: Optional[str]
|
||||
site_endpoint: Optional[str]
|
||||
site_region: str
|
||||
site_priority: int
|
||||
ratelimit_admin: str
|
||||
num_trusted_proxies: int
|
||||
allowed_redirect_hosts: list[str]
|
||||
allow_internal_endpoints: bool
|
||||
website_hosting_enabled: bool
|
||||
gc_enabled: bool
|
||||
gc_interval_hours: float
|
||||
gc_temp_file_max_age_hours: float
|
||||
gc_multipart_max_age_days: int
|
||||
gc_lock_file_max_age_hours: float
|
||||
gc_dry_run: bool
|
||||
gc_io_throttle_ms: int
|
||||
integrity_enabled: bool
|
||||
integrity_interval_hours: float
|
||||
integrity_batch_size: int
|
||||
integrity_auto_heal: bool
|
||||
integrity_dry_run: bool
|
||||
integrity_io_throttle_ms: int
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
|
||||
overrides = overrides or {}
|
||||
|
||||
def _get(name: str, default: Any) -> Any:
|
||||
return overrides.get(name, os.getenv(name, default))
|
||||
|
||||
storage_root = Path(_get("STORAGE_ROOT", PROJECT_ROOT / "data")).resolve()
|
||||
max_upload_size = int(_get("MAX_UPLOAD_SIZE", 1024 * 1024 * 1024))
|
||||
ui_page_size = int(_get("UI_PAGE_SIZE", 100))
|
||||
auth_max_attempts = int(_get("AUTH_MAX_ATTEMPTS", 5))
|
||||
auth_lockout_minutes = int(_get("AUTH_LOCKOUT_MINUTES", 15))
|
||||
bulk_delete_max_keys = int(_get("BULK_DELETE_MAX_KEYS", 500))
|
||||
secret_ttl_seconds = int(_get("SECRET_TTL_SECONDS", 300))
|
||||
stream_chunk_size = int(_get("STREAM_CHUNK_SIZE", 64 * 1024))
|
||||
multipart_min_part_size = int(_get("MULTIPART_MIN_PART_SIZE", 5 * 1024 * 1024))
|
||||
lifecycle_enabled = _get("LIFECYCLE_ENABLED", "false").lower() in ("true", "1", "yes")
|
||||
lifecycle_interval_seconds = int(_get("LIFECYCLE_INTERVAL_SECONDS", 3600))
|
||||
default_secret = "dev-secret-key"
|
||||
secret_key = str(_get("SECRET_KEY", default_secret))
|
||||
|
||||
if not secret_key or secret_key == default_secret:
|
||||
secret_file = storage_root / ".myfsio.sys" / "config" / ".secret"
|
||||
if secret_file.exists():
|
||||
secret_key = secret_file.read_text().strip()
|
||||
else:
|
||||
generated = secrets.token_urlsafe(32)
|
||||
if secret_key == default_secret:
|
||||
warnings.warn("Using insecure default SECRET_KEY. A random value has been generated and persisted; set SECRET_KEY for production", RuntimeWarning)
|
||||
try:
|
||||
secret_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
secret_file.write_text(generated)
|
||||
try:
|
||||
os.chmod(secret_file, 0o600)
|
||||
except OSError:
|
||||
pass
|
||||
secret_key = generated
|
||||
except OSError:
|
||||
secret_key = generated
|
||||
|
||||
iam_env_override = "IAM_CONFIG" in overrides or "IAM_CONFIG" in os.environ
|
||||
bucket_policy_override = "BUCKET_POLICY_PATH" in overrides or "BUCKET_POLICY_PATH" in os.environ
|
||||
|
||||
default_iam_path = storage_root / ".myfsio.sys" / "config" / "iam.json"
|
||||
default_bucket_policy_path = storage_root / ".myfsio.sys" / "config" / "bucket_policies.json"
|
||||
|
||||
iam_config_path = Path(_get("IAM_CONFIG", default_iam_path)).resolve()
|
||||
bucket_policy_path = Path(_get("BUCKET_POLICY_PATH", default_bucket_policy_path)).resolve()
|
||||
|
||||
iam_config_path = _prepare_config_file(
|
||||
iam_config_path,
|
||||
legacy_path=None if iam_env_override else storage_root / "iam.json",
|
||||
)
|
||||
bucket_policy_path = _prepare_config_file(
|
||||
bucket_policy_path,
|
||||
legacy_path=None if bucket_policy_override else storage_root / "bucket_policies.json",
|
||||
)
|
||||
api_base_url = _get("API_BASE_URL", None)
|
||||
if api_base_url:
|
||||
api_base_url = str(api_base_url)
|
||||
|
||||
aws_region = str(_get("AWS_REGION", "us-east-1"))
|
||||
aws_service = str(_get("AWS_SERVICE", "s3"))
|
||||
enforce_ui_policies = str(_get("UI_ENFORCE_BUCKET_POLICIES", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
log_level = str(_get("LOG_LEVEL", "INFO")).upper()
|
||||
log_to_file = str(_get("LOG_TO_FILE", "1")).lower() in {"1", "true", "yes", "on"}
|
||||
log_dir = Path(_get("LOG_DIR", storage_root.parent / "logs")).resolve()
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
log_path = log_dir / str(_get("LOG_FILE", "app.log"))
|
||||
log_max_bytes = int(_get("LOG_MAX_BYTES", 5 * 1024 * 1024))
|
||||
log_backup_count = int(_get("LOG_BACKUP_COUNT", 3))
|
||||
ratelimit_default = _validate_rate_limit(str(_get("RATE_LIMIT_DEFAULT", "200 per minute")))
|
||||
ratelimit_storage_uri = str(_get("RATE_LIMIT_STORAGE_URI", "memory://"))
|
||||
ratelimit_list_buckets = _validate_rate_limit(str(_get("RATE_LIMIT_LIST_BUCKETS", "60 per minute")))
|
||||
ratelimit_bucket_ops = _validate_rate_limit(str(_get("RATE_LIMIT_BUCKET_OPS", "120 per minute")))
|
||||
ratelimit_object_ops = _validate_rate_limit(str(_get("RATE_LIMIT_OBJECT_OPS", "240 per minute")))
|
||||
ratelimit_head_ops = _validate_rate_limit(str(_get("RATE_LIMIT_HEAD_OPS", "100 per minute")))
|
||||
|
||||
def _csv(value: str, default: list[str]) -> list[str]:
|
||||
if not value:
|
||||
return default
|
||||
parts = [segment.strip() for segment in value.split(",") if segment.strip()]
|
||||
return parts or default
|
||||
|
||||
cors_origins = _csv(str(_get("CORS_ORIGINS", "*")), ["*"])
|
||||
cors_methods = _csv(str(_get("CORS_METHODS", "GET,PUT,POST,DELETE,OPTIONS,HEAD")), ["GET", "PUT", "POST", "DELETE", "OPTIONS", "HEAD"])
|
||||
cors_allow_headers = _csv(str(_get("CORS_ALLOW_HEADERS", "*")), ["*"])
|
||||
cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"])
|
||||
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
|
||||
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60))
|
||||
object_cache_ttl = int(_get("OBJECT_CACHE_TTL", 60))
|
||||
|
||||
encryption_enabled = str(_get("ENCRYPTION_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
encryption_keys_dir = storage_root / ".myfsio.sys" / "keys"
|
||||
encryption_master_key_path = Path(_get("ENCRYPTION_MASTER_KEY_PATH", encryption_keys_dir / "master.key")).resolve()
|
||||
kms_enabled = str(_get("KMS_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
kms_keys_path = Path(_get("KMS_KEYS_PATH", encryption_keys_dir / "kms_keys.json")).resolve()
|
||||
default_encryption_algorithm = str(_get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"))
|
||||
display_timezone = str(_get("DISPLAY_TIMEZONE", "UTC"))
|
||||
metrics_history_enabled = str(_get("METRICS_HISTORY_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
metrics_history_retention_hours = int(_get("METRICS_HISTORY_RETENTION_HOURS", 24))
|
||||
metrics_history_interval_minutes = int(_get("METRICS_HISTORY_INTERVAL_MINUTES", 5))
|
||||
operation_metrics_enabled = str(_get("OPERATION_METRICS_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
operation_metrics_interval_minutes = int(_get("OPERATION_METRICS_INTERVAL_MINUTES", 5))
|
||||
operation_metrics_retention_hours = int(_get("OPERATION_METRICS_RETENTION_HOURS", 24))
|
||||
|
||||
_raw_threads = int(_get("SERVER_THREADS", 0))
|
||||
if _raw_threads == 0:
|
||||
server_threads = _calculate_auto_threads()
|
||||
server_threads_auto = True
|
||||
else:
|
||||
server_threads = _raw_threads
|
||||
server_threads_auto = False
|
||||
|
||||
_raw_conn_limit = int(_get("SERVER_CONNECTION_LIMIT", 0))
|
||||
if _raw_conn_limit == 0:
|
||||
server_connection_limit = _calculate_auto_connection_limit()
|
||||
server_connection_limit_auto = True
|
||||
else:
|
||||
server_connection_limit = _raw_conn_limit
|
||||
server_connection_limit_auto = False
|
||||
|
||||
_raw_backlog = int(_get("SERVER_BACKLOG", 0))
|
||||
if _raw_backlog == 0:
|
||||
server_backlog = _calculate_auto_backlog(server_connection_limit)
|
||||
server_backlog_auto = True
|
||||
else:
|
||||
server_backlog = _raw_backlog
|
||||
server_backlog_auto = False
|
||||
|
||||
server_channel_timeout = int(_get("SERVER_CHANNEL_TIMEOUT", 120))
|
||||
server_max_buffer_size = int(_get("SERVER_MAX_BUFFER_SIZE", 1024 * 1024 * 128))
|
||||
site_sync_enabled = str(_get("SITE_SYNC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
site_sync_interval_seconds = int(_get("SITE_SYNC_INTERVAL_SECONDS", 60))
|
||||
site_sync_batch_size = int(_get("SITE_SYNC_BATCH_SIZE", 100))
|
||||
|
||||
sigv4_timestamp_tolerance_seconds = int(_get("SIGV4_TIMESTAMP_TOLERANCE_SECONDS", 900))
|
||||
presigned_url_min_expiry_seconds = int(_get("PRESIGNED_URL_MIN_EXPIRY_SECONDS", 1))
|
||||
presigned_url_max_expiry_seconds = int(_get("PRESIGNED_URL_MAX_EXPIRY_SECONDS", 604800))
|
||||
replication_connect_timeout_seconds = int(_get("REPLICATION_CONNECT_TIMEOUT_SECONDS", 5))
|
||||
replication_read_timeout_seconds = int(_get("REPLICATION_READ_TIMEOUT_SECONDS", 30))
|
||||
replication_max_retries = int(_get("REPLICATION_MAX_RETRIES", 2))
|
||||
replication_streaming_threshold_bytes = int(_get("REPLICATION_STREAMING_THRESHOLD_BYTES", 10 * 1024 * 1024))
|
||||
replication_max_failures_per_bucket = int(_get("REPLICATION_MAX_FAILURES_PER_BUCKET", 50))
|
||||
site_sync_connect_timeout_seconds = int(_get("SITE_SYNC_CONNECT_TIMEOUT_SECONDS", 10))
|
||||
site_sync_read_timeout_seconds = int(_get("SITE_SYNC_READ_TIMEOUT_SECONDS", 120))
|
||||
site_sync_max_retries = int(_get("SITE_SYNC_MAX_RETRIES", 2))
|
||||
site_sync_clock_skew_tolerance_seconds = float(_get("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS", 1.0))
|
||||
object_key_max_length_bytes = int(_get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024))
|
||||
object_cache_max_size = int(_get("OBJECT_CACHE_MAX_SIZE", 100))
|
||||
meta_read_cache_max = int(_get("META_READ_CACHE_MAX", 2048))
|
||||
bucket_config_cache_ttl_seconds = float(_get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0))
|
||||
object_tag_limit = int(_get("OBJECT_TAG_LIMIT", 50))
|
||||
encryption_chunk_size_bytes = int(_get("ENCRYPTION_CHUNK_SIZE_BYTES", 64 * 1024))
|
||||
kms_generate_data_key_min_bytes = int(_get("KMS_GENERATE_DATA_KEY_MIN_BYTES", 1))
|
||||
kms_generate_data_key_max_bytes = int(_get("KMS_GENERATE_DATA_KEY_MAX_BYTES", 1024))
|
||||
lifecycle_max_history_per_bucket = int(_get("LIFECYCLE_MAX_HISTORY_PER_BUCKET", 50))
|
||||
|
||||
site_id_raw = _get("SITE_ID", None)
|
||||
site_id = str(site_id_raw).strip() if site_id_raw else None
|
||||
site_endpoint_raw = _get("SITE_ENDPOINT", None)
|
||||
site_endpoint = str(site_endpoint_raw).strip() if site_endpoint_raw else None
|
||||
site_region = str(_get("SITE_REGION", "us-east-1"))
|
||||
site_priority = int(_get("SITE_PRIORITY", 100))
|
||||
ratelimit_admin = _validate_rate_limit(str(_get("RATE_LIMIT_ADMIN", "60 per minute")))
|
||||
num_trusted_proxies = int(_get("NUM_TRUSTED_PROXIES", 1))
|
||||
allowed_redirect_hosts_raw = _get("ALLOWED_REDIRECT_HOSTS", "")
|
||||
allowed_redirect_hosts = [h.strip() for h in str(allowed_redirect_hosts_raw).split(",") if h.strip()]
|
||||
allow_internal_endpoints = str(_get("ALLOW_INTERNAL_ENDPOINTS", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
website_hosting_enabled = str(_get("WEBSITE_HOSTING_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
gc_enabled = str(_get("GC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
gc_interval_hours = float(_get("GC_INTERVAL_HOURS", 6.0))
|
||||
gc_temp_file_max_age_hours = float(_get("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0))
|
||||
gc_multipart_max_age_days = int(_get("GC_MULTIPART_MAX_AGE_DAYS", 7))
|
||||
gc_lock_file_max_age_hours = float(_get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0))
|
||||
gc_dry_run = str(_get("GC_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
gc_io_throttle_ms = int(_get("GC_IO_THROTTLE_MS", 10))
|
||||
integrity_enabled = str(_get("INTEGRITY_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
integrity_interval_hours = float(_get("INTEGRITY_INTERVAL_HOURS", 24.0))
|
||||
integrity_batch_size = int(_get("INTEGRITY_BATCH_SIZE", 1000))
|
||||
integrity_auto_heal = str(_get("INTEGRITY_AUTO_HEAL", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
integrity_dry_run = str(_get("INTEGRITY_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
integrity_io_throttle_ms = int(_get("INTEGRITY_IO_THROTTLE_MS", 10))
|
||||
|
||||
return cls(storage_root=storage_root,
|
||||
max_upload_size=max_upload_size,
|
||||
ui_page_size=ui_page_size,
|
||||
secret_key=secret_key,
|
||||
iam_config_path=iam_config_path,
|
||||
bucket_policy_path=bucket_policy_path,
|
||||
api_base_url=api_base_url,
|
||||
aws_region=aws_region,
|
||||
aws_service=aws_service,
|
||||
ui_enforce_bucket_policies=enforce_ui_policies,
|
||||
log_level=log_level,
|
||||
log_to_file=log_to_file,
|
||||
log_path=log_path,
|
||||
log_max_bytes=log_max_bytes,
|
||||
log_backup_count=log_backup_count,
|
||||
ratelimit_default=ratelimit_default,
|
||||
ratelimit_storage_uri=ratelimit_storage_uri,
|
||||
ratelimit_list_buckets=ratelimit_list_buckets,
|
||||
ratelimit_bucket_ops=ratelimit_bucket_ops,
|
||||
ratelimit_object_ops=ratelimit_object_ops,
|
||||
ratelimit_head_ops=ratelimit_head_ops,
|
||||
cors_origins=cors_origins,
|
||||
cors_methods=cors_methods,
|
||||
cors_allow_headers=cors_allow_headers,
|
||||
cors_expose_headers=cors_expose_headers,
|
||||
session_lifetime_days=session_lifetime_days,
|
||||
auth_max_attempts=auth_max_attempts,
|
||||
auth_lockout_minutes=auth_lockout_minutes,
|
||||
bulk_delete_max_keys=bulk_delete_max_keys,
|
||||
secret_ttl_seconds=secret_ttl_seconds,
|
||||
stream_chunk_size=stream_chunk_size,
|
||||
multipart_min_part_size=multipart_min_part_size,
|
||||
bucket_stats_cache_ttl=bucket_stats_cache_ttl,
|
||||
object_cache_ttl=object_cache_ttl,
|
||||
encryption_enabled=encryption_enabled,
|
||||
encryption_master_key_path=encryption_master_key_path,
|
||||
kms_enabled=kms_enabled,
|
||||
kms_keys_path=kms_keys_path,
|
||||
default_encryption_algorithm=default_encryption_algorithm,
|
||||
display_timezone=display_timezone,
|
||||
lifecycle_enabled=lifecycle_enabled,
|
||||
lifecycle_interval_seconds=lifecycle_interval_seconds,
|
||||
metrics_history_enabled=metrics_history_enabled,
|
||||
metrics_history_retention_hours=metrics_history_retention_hours,
|
||||
metrics_history_interval_minutes=metrics_history_interval_minutes,
|
||||
operation_metrics_enabled=operation_metrics_enabled,
|
||||
operation_metrics_interval_minutes=operation_metrics_interval_minutes,
|
||||
operation_metrics_retention_hours=operation_metrics_retention_hours,
|
||||
server_threads=server_threads,
|
||||
server_connection_limit=server_connection_limit,
|
||||
server_backlog=server_backlog,
|
||||
server_channel_timeout=server_channel_timeout,
|
||||
server_max_buffer_size=server_max_buffer_size,
|
||||
server_threads_auto=server_threads_auto,
|
||||
server_connection_limit_auto=server_connection_limit_auto,
|
||||
server_backlog_auto=server_backlog_auto,
|
||||
site_sync_enabled=site_sync_enabled,
|
||||
site_sync_interval_seconds=site_sync_interval_seconds,
|
||||
site_sync_batch_size=site_sync_batch_size,
|
||||
sigv4_timestamp_tolerance_seconds=sigv4_timestamp_tolerance_seconds,
|
||||
presigned_url_min_expiry_seconds=presigned_url_min_expiry_seconds,
|
||||
presigned_url_max_expiry_seconds=presigned_url_max_expiry_seconds,
|
||||
replication_connect_timeout_seconds=replication_connect_timeout_seconds,
|
||||
replication_read_timeout_seconds=replication_read_timeout_seconds,
|
||||
replication_max_retries=replication_max_retries,
|
||||
replication_streaming_threshold_bytes=replication_streaming_threshold_bytes,
|
||||
replication_max_failures_per_bucket=replication_max_failures_per_bucket,
|
||||
site_sync_connect_timeout_seconds=site_sync_connect_timeout_seconds,
|
||||
site_sync_read_timeout_seconds=site_sync_read_timeout_seconds,
|
||||
site_sync_max_retries=site_sync_max_retries,
|
||||
site_sync_clock_skew_tolerance_seconds=site_sync_clock_skew_tolerance_seconds,
|
||||
object_key_max_length_bytes=object_key_max_length_bytes,
|
||||
object_cache_max_size=object_cache_max_size,
|
||||
meta_read_cache_max=meta_read_cache_max,
|
||||
bucket_config_cache_ttl_seconds=bucket_config_cache_ttl_seconds,
|
||||
object_tag_limit=object_tag_limit,
|
||||
encryption_chunk_size_bytes=encryption_chunk_size_bytes,
|
||||
kms_generate_data_key_min_bytes=kms_generate_data_key_min_bytes,
|
||||
kms_generate_data_key_max_bytes=kms_generate_data_key_max_bytes,
|
||||
lifecycle_max_history_per_bucket=lifecycle_max_history_per_bucket,
|
||||
site_id=site_id,
|
||||
site_endpoint=site_endpoint,
|
||||
site_region=site_region,
|
||||
site_priority=site_priority,
|
||||
ratelimit_admin=ratelimit_admin,
|
||||
num_trusted_proxies=num_trusted_proxies,
|
||||
allowed_redirect_hosts=allowed_redirect_hosts,
|
||||
allow_internal_endpoints=allow_internal_endpoints,
|
||||
website_hosting_enabled=website_hosting_enabled,
|
||||
gc_enabled=gc_enabled,
|
||||
gc_interval_hours=gc_interval_hours,
|
||||
gc_temp_file_max_age_hours=gc_temp_file_max_age_hours,
|
||||
gc_multipart_max_age_days=gc_multipart_max_age_days,
|
||||
gc_lock_file_max_age_hours=gc_lock_file_max_age_hours,
|
||||
gc_dry_run=gc_dry_run,
|
||||
gc_io_throttle_ms=gc_io_throttle_ms,
|
||||
integrity_enabled=integrity_enabled,
|
||||
integrity_interval_hours=integrity_interval_hours,
|
||||
integrity_batch_size=integrity_batch_size,
|
||||
integrity_auto_heal=integrity_auto_heal,
|
||||
integrity_dry_run=integrity_dry_run,
|
||||
integrity_io_throttle_ms=integrity_io_throttle_ms)
|
||||
|
||||
def validate_and_report(self) -> list[str]:
|
||||
"""Validate configuration and return a list of warnings/issues.
|
||||
|
||||
Call this at startup to detect potential misconfigurations before
|
||||
the application fully commits to running.
|
||||
"""
|
||||
issues = []
|
||||
|
||||
try:
|
||||
test_file = self.storage_root / ".write_test"
|
||||
test_file.touch()
|
||||
test_file.unlink()
|
||||
except (OSError, PermissionError) as e:
|
||||
issues.append(f"CRITICAL: STORAGE_ROOT '{self.storage_root}' is not writable: {e}")
|
||||
|
||||
storage_str = str(self.storage_root).lower()
|
||||
if "/tmp" in storage_str or "\\temp" in storage_str or "appdata\\local\\temp" in storage_str:
|
||||
issues.append(f"WARNING: STORAGE_ROOT '{self.storage_root}' appears to be a temporary directory. Data may be lost on reboot!")
|
||||
|
||||
try:
|
||||
self.iam_config_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: IAM_CONFIG '{self.iam_config_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting IAM_CONFIG explicitly or ensuring paths are aligned.")
|
||||
|
||||
try:
|
||||
self.bucket_policy_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: BUCKET_POLICY_PATH '{self.bucket_policy_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting BUCKET_POLICY_PATH explicitly.")
|
||||
|
||||
try:
|
||||
self.log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
test_log = self.log_path.parent / ".write_test"
|
||||
test_log.touch()
|
||||
test_log.unlink()
|
||||
except (OSError, PermissionError) as e:
|
||||
issues.append(f"WARNING: Log directory '{self.log_path.parent}' is not writable: {e}")
|
||||
|
||||
log_str = str(self.log_path).lower()
|
||||
if "/tmp" in log_str or "\\temp" in log_str or "appdata\\local\\temp" in log_str:
|
||||
issues.append(f"WARNING: LOG_DIR '{self.log_path.parent}' appears to be a temporary directory. Logs may be lost on reboot!")
|
||||
|
||||
if self.encryption_enabled:
|
||||
try:
|
||||
self.encryption_master_key_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: ENCRYPTION_MASTER_KEY_PATH '{self.encryption_master_key_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
|
||||
|
||||
if self.kms_enabled:
|
||||
try:
|
||||
self.kms_keys_path.relative_to(self.storage_root)
|
||||
except ValueError:
|
||||
issues.append(f"WARNING: KMS_KEYS_PATH '{self.kms_keys_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
|
||||
|
||||
if self.secret_key == "dev-secret-key":
|
||||
issues.append("WARNING: Using default SECRET_KEY. Set SECRET_KEY environment variable for production.")
|
||||
|
||||
if "*" in self.cors_origins:
|
||||
issues.append("INFO: CORS_ORIGINS is set to '*'. Consider restricting to specific domains in production.")
|
||||
|
||||
if not (1 <= self.server_threads <= 64):
|
||||
issues.append(f"CRITICAL: SERVER_THREADS={self.server_threads} is outside valid range (1-64). Server cannot start.")
|
||||
if not (10 <= self.server_connection_limit <= 1000):
|
||||
issues.append(f"CRITICAL: SERVER_CONNECTION_LIMIT={self.server_connection_limit} is outside valid range (10-1000). Server cannot start.")
|
||||
if not (128 <= self.server_backlog <= 4096):
|
||||
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (128-4096). Server cannot start.")
|
||||
if not (10 <= self.server_channel_timeout <= 300):
|
||||
issues.append(f"CRITICAL: SERVER_CHANNEL_TIMEOUT={self.server_channel_timeout} is outside valid range (10-300). Server cannot start.")
|
||||
if self.server_max_buffer_size < 1024 * 1024:
|
||||
issues.append(f"WARNING: SERVER_MAX_BUFFER_SIZE={self.server_max_buffer_size} is less than 1MB. Large uploads will fail.")
|
||||
|
||||
if sys.platform != "win32":
|
||||
try:
|
||||
import resource
|
||||
soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
threshold = int(soft_limit * 0.8)
|
||||
if self.server_connection_limit > threshold:
|
||||
issues.append(f"WARNING: SERVER_CONNECTION_LIMIT={self.server_connection_limit} exceeds 80% of system file descriptor limit (soft={soft_limit}). Consider running 'ulimit -n {self.server_connection_limit + 100}'.")
|
||||
except (ImportError, OSError):
|
||||
pass
|
||||
|
||||
try:
|
||||
import psutil
|
||||
available_mb = psutil.virtual_memory().available / (1024 * 1024)
|
||||
estimated_mb = self.server_threads * 50
|
||||
if estimated_mb > available_mb * 0.5:
|
||||
issues.append(f"WARNING: SERVER_THREADS={self.server_threads} may require ~{estimated_mb}MB memory, exceeding 50% of available RAM ({int(available_mb)}MB).")
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return issues
|
||||
|
||||
def print_startup_summary(self) -> None:
|
||||
"""Print a summary of the configuration at startup."""
|
||||
print("\n" + "=" * 60)
|
||||
print("MyFSIO Configuration Summary")
|
||||
print("=" * 60)
|
||||
print(f" STORAGE_ROOT: {self.storage_root}")
|
||||
print(f" IAM_CONFIG: {self.iam_config_path}")
|
||||
print(f" BUCKET_POLICY: {self.bucket_policy_path}")
|
||||
print(f" LOG_PATH: {self.log_path}")
|
||||
if self.api_base_url:
|
||||
print(f" API_BASE_URL: {self.api_base_url}")
|
||||
if self.encryption_enabled:
|
||||
print(f" ENCRYPTION: Enabled (Master key: {self.encryption_master_key_path})")
|
||||
if self.kms_enabled:
|
||||
print(f" KMS: Enabled (Keys: {self.kms_keys_path})")
|
||||
if self.website_hosting_enabled:
|
||||
print(f" WEBSITE_HOSTING: Enabled")
|
||||
def _auto(flag: bool) -> str:
|
||||
return " (auto)" if flag else ""
|
||||
print(f" SERVER_THREADS: {self.server_threads}{_auto(self.server_threads_auto)}")
|
||||
print(f" CONNECTION_LIMIT: {self.server_connection_limit}{_auto(self.server_connection_limit_auto)}")
|
||||
print(f" BACKLOG: {self.server_backlog}{_auto(self.server_backlog_auto)}")
|
||||
print(f" CHANNEL_TIMEOUT: {self.server_channel_timeout}s")
|
||||
print(f" MAX_BUFFER_SIZE: {self.server_max_buffer_size // (1024 * 1024)}MB")
|
||||
print("=" * 60)
|
||||
|
||||
issues = self.validate_and_report()
|
||||
if issues:
|
||||
print("\nConfiguration Issues Detected:")
|
||||
for issue in issues:
|
||||
print(f" • {issue}")
|
||||
print()
|
||||
else:
|
||||
print(" ✓ Configuration validated successfully\n")
|
||||
|
||||
def to_flask_config(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"STORAGE_ROOT": str(self.storage_root),
|
||||
"MAX_CONTENT_LENGTH": self.max_upload_size,
|
||||
"UI_PAGE_SIZE": self.ui_page_size,
|
||||
"SECRET_KEY": self.secret_key,
|
||||
"IAM_CONFIG": str(self.iam_config_path),
|
||||
"BUCKET_POLICY_PATH": str(self.bucket_policy_path),
|
||||
"API_BASE_URL": self.api_base_url,
|
||||
"AWS_REGION": self.aws_region,
|
||||
"AWS_SERVICE": self.aws_service,
|
||||
"UI_ENFORCE_BUCKET_POLICIES": self.ui_enforce_bucket_policies,
|
||||
"AUTH_MAX_ATTEMPTS": self.auth_max_attempts,
|
||||
"AUTH_LOCKOUT_MINUTES": self.auth_lockout_minutes,
|
||||
"BULK_DELETE_MAX_KEYS": self.bulk_delete_max_keys,
|
||||
"SECRET_TTL_SECONDS": self.secret_ttl_seconds,
|
||||
"STREAM_CHUNK_SIZE": self.stream_chunk_size,
|
||||
"MULTIPART_MIN_PART_SIZE": self.multipart_min_part_size,
|
||||
"BUCKET_STATS_CACHE_TTL": self.bucket_stats_cache_ttl,
|
||||
"OBJECT_CACHE_TTL": self.object_cache_ttl,
|
||||
"LOG_LEVEL": self.log_level,
|
||||
"LOG_TO_FILE": self.log_to_file,
|
||||
"LOG_FILE": str(self.log_path),
|
||||
"LOG_MAX_BYTES": self.log_max_bytes,
|
||||
"LOG_BACKUP_COUNT": self.log_backup_count,
|
||||
"RATELIMIT_DEFAULT": self.ratelimit_default,
|
||||
"RATELIMIT_STORAGE_URI": self.ratelimit_storage_uri,
|
||||
"RATELIMIT_LIST_BUCKETS": self.ratelimit_list_buckets,
|
||||
"RATELIMIT_BUCKET_OPS": self.ratelimit_bucket_ops,
|
||||
"RATELIMIT_OBJECT_OPS": self.ratelimit_object_ops,
|
||||
"RATELIMIT_HEAD_OPS": self.ratelimit_head_ops,
|
||||
"CORS_ORIGINS": self.cors_origins,
|
||||
"CORS_METHODS": self.cors_methods,
|
||||
"CORS_ALLOW_HEADERS": self.cors_allow_headers,
|
||||
"CORS_EXPOSE_HEADERS": self.cors_expose_headers,
|
||||
"SESSION_LIFETIME_DAYS": self.session_lifetime_days,
|
||||
"ENCRYPTION_ENABLED": self.encryption_enabled,
|
||||
"ENCRYPTION_MASTER_KEY_PATH": str(self.encryption_master_key_path),
|
||||
"KMS_ENABLED": self.kms_enabled,
|
||||
"KMS_KEYS_PATH": str(self.kms_keys_path),
|
||||
"DEFAULT_ENCRYPTION_ALGORITHM": self.default_encryption_algorithm,
|
||||
"DISPLAY_TIMEZONE": self.display_timezone,
|
||||
"LIFECYCLE_ENABLED": self.lifecycle_enabled,
|
||||
"LIFECYCLE_INTERVAL_SECONDS": self.lifecycle_interval_seconds,
|
||||
"METRICS_HISTORY_ENABLED": self.metrics_history_enabled,
|
||||
"METRICS_HISTORY_RETENTION_HOURS": self.metrics_history_retention_hours,
|
||||
"METRICS_HISTORY_INTERVAL_MINUTES": self.metrics_history_interval_minutes,
|
||||
"OPERATION_METRICS_ENABLED": self.operation_metrics_enabled,
|
||||
"OPERATION_METRICS_INTERVAL_MINUTES": self.operation_metrics_interval_minutes,
|
||||
"OPERATION_METRICS_RETENTION_HOURS": self.operation_metrics_retention_hours,
|
||||
"SERVER_THREADS": self.server_threads,
|
||||
"SERVER_CONNECTION_LIMIT": self.server_connection_limit,
|
||||
"SERVER_BACKLOG": self.server_backlog,
|
||||
"SERVER_CHANNEL_TIMEOUT": self.server_channel_timeout,
|
||||
"SERVER_MAX_BUFFER_SIZE": self.server_max_buffer_size,
|
||||
"SITE_SYNC_ENABLED": self.site_sync_enabled,
|
||||
"SITE_SYNC_INTERVAL_SECONDS": self.site_sync_interval_seconds,
|
||||
"SITE_SYNC_BATCH_SIZE": self.site_sync_batch_size,
|
||||
"SIGV4_TIMESTAMP_TOLERANCE_SECONDS": self.sigv4_timestamp_tolerance_seconds,
|
||||
"PRESIGNED_URL_MIN_EXPIRY_SECONDS": self.presigned_url_min_expiry_seconds,
|
||||
"PRESIGNED_URL_MAX_EXPIRY_SECONDS": self.presigned_url_max_expiry_seconds,
|
||||
"REPLICATION_CONNECT_TIMEOUT_SECONDS": self.replication_connect_timeout_seconds,
|
||||
"REPLICATION_READ_TIMEOUT_SECONDS": self.replication_read_timeout_seconds,
|
||||
"REPLICATION_MAX_RETRIES": self.replication_max_retries,
|
||||
"REPLICATION_STREAMING_THRESHOLD_BYTES": self.replication_streaming_threshold_bytes,
|
||||
"REPLICATION_MAX_FAILURES_PER_BUCKET": self.replication_max_failures_per_bucket,
|
||||
"SITE_SYNC_CONNECT_TIMEOUT_SECONDS": self.site_sync_connect_timeout_seconds,
|
||||
"SITE_SYNC_READ_TIMEOUT_SECONDS": self.site_sync_read_timeout_seconds,
|
||||
"SITE_SYNC_MAX_RETRIES": self.site_sync_max_retries,
|
||||
"SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS": self.site_sync_clock_skew_tolerance_seconds,
|
||||
"OBJECT_KEY_MAX_LENGTH_BYTES": self.object_key_max_length_bytes,
|
||||
"OBJECT_CACHE_MAX_SIZE": self.object_cache_max_size,
|
||||
"META_READ_CACHE_MAX": self.meta_read_cache_max,
|
||||
"BUCKET_CONFIG_CACHE_TTL_SECONDS": self.bucket_config_cache_ttl_seconds,
|
||||
"OBJECT_TAG_LIMIT": self.object_tag_limit,
|
||||
"ENCRYPTION_CHUNK_SIZE_BYTES": self.encryption_chunk_size_bytes,
|
||||
"KMS_GENERATE_DATA_KEY_MIN_BYTES": self.kms_generate_data_key_min_bytes,
|
||||
"KMS_GENERATE_DATA_KEY_MAX_BYTES": self.kms_generate_data_key_max_bytes,
|
||||
"LIFECYCLE_MAX_HISTORY_PER_BUCKET": self.lifecycle_max_history_per_bucket,
|
||||
"SITE_ID": self.site_id,
|
||||
"SITE_ENDPOINT": self.site_endpoint,
|
||||
"SITE_REGION": self.site_region,
|
||||
"SITE_PRIORITY": self.site_priority,
|
||||
"RATE_LIMIT_ADMIN": self.ratelimit_admin,
|
||||
"NUM_TRUSTED_PROXIES": self.num_trusted_proxies,
|
||||
"ALLOWED_REDIRECT_HOSTS": self.allowed_redirect_hosts,
|
||||
"ALLOW_INTERNAL_ENDPOINTS": self.allow_internal_endpoints,
|
||||
"WEBSITE_HOSTING_ENABLED": self.website_hosting_enabled,
|
||||
"GC_ENABLED": self.gc_enabled,
|
||||
"GC_INTERVAL_HOURS": self.gc_interval_hours,
|
||||
"GC_TEMP_FILE_MAX_AGE_HOURS": self.gc_temp_file_max_age_hours,
|
||||
"GC_MULTIPART_MAX_AGE_DAYS": self.gc_multipart_max_age_days,
|
||||
"GC_LOCK_FILE_MAX_AGE_HOURS": self.gc_lock_file_max_age_hours,
|
||||
"GC_DRY_RUN": self.gc_dry_run,
|
||||
"GC_IO_THROTTLE_MS": self.gc_io_throttle_ms,
|
||||
"INTEGRITY_ENABLED": self.integrity_enabled,
|
||||
"INTEGRITY_INTERVAL_HOURS": self.integrity_interval_hours,
|
||||
"INTEGRITY_BATCH_SIZE": self.integrity_batch_size,
|
||||
"INTEGRITY_AUTO_HEAL": self.integrity_auto_heal,
|
||||
"INTEGRITY_DRY_RUN": self.integrity_dry_run,
|
||||
"INTEGRITY_IO_THROTTLE_MS": self.integrity_io_throttle_ms,
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
"""Manage remote S3 connections."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
@@ -1,4 +1,3 @@
|
||||
"""Encrypted storage layer that wraps ObjectStorage with encryption support."""
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
@@ -79,7 +78,7 @@ class EncryptedObjectStorage:
|
||||
kms_key_id: Optional[str] = None,
|
||||
) -> ObjectMeta:
|
||||
"""Store an object, optionally with encryption.
|
||||
|
||||
|
||||
Args:
|
||||
bucket_name: Name of the bucket
|
||||
object_key: Key for the object
|
||||
@@ -87,42 +86,41 @@ class EncryptedObjectStorage:
|
||||
metadata: Optional user metadata
|
||||
server_side_encryption: Encryption algorithm ("AES256" or "aws:kms")
|
||||
kms_key_id: KMS key ID (for aws:kms encryption)
|
||||
|
||||
|
||||
Returns:
|
||||
ObjectMeta with object information
|
||||
|
||||
Performance: Uses streaming encryption for large files to reduce memory usage.
|
||||
"""
|
||||
should_encrypt, algorithm, detected_kms_key = self._should_encrypt(
|
||||
bucket_name, server_side_encryption
|
||||
)
|
||||
|
||||
|
||||
if kms_key_id is None:
|
||||
kms_key_id = detected_kms_key
|
||||
|
||||
|
||||
if should_encrypt:
|
||||
data = stream.read()
|
||||
|
||||
try:
|
||||
ciphertext, enc_metadata = self.encryption.encrypt_object(
|
||||
data,
|
||||
# Performance: Use streaming encryption to avoid loading entire file into memory
|
||||
encrypted_stream, enc_metadata = self.encryption.encrypt_stream(
|
||||
stream,
|
||||
algorithm=algorithm,
|
||||
kms_key_id=kms_key_id,
|
||||
context={"bucket": bucket_name, "key": object_key},
|
||||
)
|
||||
|
||||
|
||||
combined_metadata = metadata.copy() if metadata else {}
|
||||
combined_metadata.update(enc_metadata.to_dict())
|
||||
|
||||
encrypted_stream = io.BytesIO(ciphertext)
|
||||
|
||||
result = self.storage.put_object(
|
||||
bucket_name,
|
||||
object_key,
|
||||
encrypted_stream,
|
||||
metadata=combined_metadata,
|
||||
)
|
||||
|
||||
|
||||
result.metadata = combined_metadata
|
||||
return result
|
||||
|
||||
|
||||
except EncryptionError as exc:
|
||||
raise StorageError(f"Encryption failed: {exc}") from exc
|
||||
else:
|
||||
@@ -135,33 +133,34 @@ class EncryptedObjectStorage:
|
||||
|
||||
def get_object_data(self, bucket_name: str, object_key: str) -> tuple[bytes, Dict[str, str]]:
|
||||
"""Get object data, decrypting if necessary.
|
||||
|
||||
|
||||
Returns:
|
||||
Tuple of (data, metadata)
|
||||
|
||||
Performance: Uses streaming decryption to reduce memory usage.
|
||||
"""
|
||||
path = self.storage.get_object_path(bucket_name, object_key)
|
||||
metadata = self.storage.get_object_metadata(bucket_name, object_key)
|
||||
|
||||
with path.open("rb") as f:
|
||||
data = f.read()
|
||||
|
||||
|
||||
enc_metadata = EncryptionMetadata.from_dict(metadata)
|
||||
if enc_metadata:
|
||||
try:
|
||||
data = self.encryption.decrypt_object(
|
||||
data,
|
||||
enc_metadata,
|
||||
context={"bucket": bucket_name, "key": object_key},
|
||||
)
|
||||
# Performance: Use streaming decryption to avoid loading entire file into memory
|
||||
with path.open("rb") as f:
|
||||
decrypted_stream = self.encryption.decrypt_stream(f, enc_metadata)
|
||||
data = decrypted_stream.read()
|
||||
except EncryptionError as exc:
|
||||
raise StorageError(f"Decryption failed: {exc}") from exc
|
||||
|
||||
else:
|
||||
with path.open("rb") as f:
|
||||
data = f.read()
|
||||
|
||||
clean_metadata = {
|
||||
k: v for k, v in metadata.items()
|
||||
if not k.startswith("x-amz-encryption")
|
||||
if not k.startswith("x-amz-encryption")
|
||||
and k != "x-amz-encrypted-data-key"
|
||||
}
|
||||
|
||||
|
||||
return data, clean_metadata
|
||||
|
||||
def get_object_stream(self, bucket_name: str, object_key: str) -> tuple[BinaryIO, Dict[str, str], int]:
|
||||
@@ -190,7 +189,16 @@ class EncryptedObjectStorage:
|
||||
|
||||
def list_objects(self, bucket_name: str, **kwargs):
|
||||
return self.storage.list_objects(bucket_name, **kwargs)
|
||||
|
||||
|
||||
def list_objects_shallow(self, bucket_name: str, **kwargs):
|
||||
return self.storage.list_objects_shallow(bucket_name, **kwargs)
|
||||
|
||||
def iter_objects_shallow(self, bucket_name: str, **kwargs):
|
||||
return self.storage.iter_objects_shallow(bucket_name, **kwargs)
|
||||
|
||||
def search_objects(self, bucket_name: str, query: str, **kwargs):
|
||||
return self.storage.search_objects(bucket_name, query, **kwargs)
|
||||
|
||||
def list_objects_all(self, bucket_name: str):
|
||||
return self.storage.list_objects_all(bucket_name)
|
||||
|
||||
@@ -271,9 +279,15 @@ class EncryptedObjectStorage:
|
||||
|
||||
def get_bucket_quota(self, bucket_name: str):
|
||||
return self.storage.get_bucket_quota(bucket_name)
|
||||
|
||||
|
||||
def set_bucket_quota(self, bucket_name: str, *, max_bytes=None, max_objects=None):
|
||||
return self.storage.set_bucket_quota(bucket_name, max_bytes=max_bytes, max_objects=max_objects)
|
||||
|
||||
def get_bucket_website(self, bucket_name: str):
|
||||
return self.storage.get_bucket_website(bucket_name)
|
||||
|
||||
def set_bucket_website(self, bucket_name: str, website_config):
|
||||
return self.storage.set_bucket_website(bucket_name, website_config)
|
||||
|
||||
def _compute_etag(self, path: Path) -> str:
|
||||
return self.storage._compute_etag(path)
|
||||
653
python/app/encryption.py
Normal file
653
python/app/encryption.py
Normal file
@@ -0,0 +1,653 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import secrets
|
||||
import subprocess
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, BinaryIO, Dict, Generator, Optional
|
||||
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
|
||||
if sys.platform != "win32":
|
||||
import fcntl
|
||||
|
||||
try:
|
||||
import myfsio_core as _rc
|
||||
if not all(hasattr(_rc, f) for f in (
|
||||
"encrypt_stream_chunked", "decrypt_stream_chunked",
|
||||
)):
|
||||
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
|
||||
_HAS_RUST = True
|
||||
except ImportError:
|
||||
_rc = None
|
||||
_HAS_RUST = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _set_secure_file_permissions(file_path: Path) -> None:
|
||||
"""Set restrictive file permissions (owner read/write only)."""
|
||||
if sys.platform == "win32":
|
||||
try:
|
||||
username = os.environ.get("USERNAME", "")
|
||||
if username:
|
||||
subprocess.run(
|
||||
["icacls", str(file_path), "/inheritance:r",
|
||||
"/grant:r", f"{username}:F"],
|
||||
check=True, capture_output=True
|
||||
)
|
||||
else:
|
||||
logger.warning("Could not set secure permissions on %s: USERNAME not set", file_path)
|
||||
except (subprocess.SubprocessError, OSError) as exc:
|
||||
logger.warning("Failed to set secure permissions on %s: %s", file_path, exc)
|
||||
else:
|
||||
os.chmod(file_path, 0o600)
|
||||
|
||||
|
||||
class EncryptionError(Exception):
|
||||
"""Raised when encryption/decryption fails."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class EncryptionResult:
|
||||
"""Result of encrypting data."""
|
||||
ciphertext: bytes
|
||||
nonce: bytes
|
||||
key_id: str
|
||||
encrypted_data_key: bytes
|
||||
|
||||
|
||||
@dataclass
|
||||
class EncryptionMetadata:
|
||||
"""Metadata stored with encrypted objects."""
|
||||
algorithm: str
|
||||
key_id: str
|
||||
nonce: bytes
|
||||
encrypted_data_key: bytes
|
||||
|
||||
def to_dict(self) -> Dict[str, str]:
|
||||
return {
|
||||
"x-amz-server-side-encryption": self.algorithm,
|
||||
"x-amz-encryption-key-id": self.key_id,
|
||||
"x-amz-encryption-nonce": base64.b64encode(self.nonce).decode(),
|
||||
"x-amz-encrypted-data-key": base64.b64encode(self.encrypted_data_key).decode(),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, str]) -> Optional["EncryptionMetadata"]:
|
||||
algorithm = data.get("x-amz-server-side-encryption")
|
||||
if not algorithm:
|
||||
return None
|
||||
try:
|
||||
return cls(
|
||||
algorithm=algorithm,
|
||||
key_id=data.get("x-amz-encryption-key-id", "local"),
|
||||
nonce=base64.b64decode(data.get("x-amz-encryption-nonce", "")),
|
||||
encrypted_data_key=base64.b64decode(data.get("x-amz-encrypted-data-key", "")),
|
||||
)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
class EncryptionProvider:
|
||||
"""Base class for encryption providers."""
|
||||
|
||||
def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
|
||||
raise NotImplementedError
|
||||
|
||||
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
||||
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
||||
raise NotImplementedError
|
||||
|
||||
def generate_data_key(self) -> tuple[bytes, bytes]:
|
||||
"""Generate a data key and its encrypted form.
|
||||
|
||||
Returns:
|
||||
Tuple of (plaintext_key, encrypted_key)
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def decrypt_data_key(self, encrypted_data_key: bytes, key_id: str | None = None) -> bytes:
|
||||
"""Decrypt an encrypted data key.
|
||||
|
||||
Args:
|
||||
encrypted_data_key: The encrypted data key bytes
|
||||
key_id: Optional key identifier (used by KMS providers)
|
||||
|
||||
Returns:
|
||||
The decrypted data key
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class LocalKeyEncryption(EncryptionProvider):
|
||||
"""SSE-S3 style encryption using a local master key.
|
||||
|
||||
Uses envelope encryption:
|
||||
1. Generate a unique data key for each object
|
||||
2. Encrypt the data with the data key (AES-256-GCM)
|
||||
3. Encrypt the data key with the master key
|
||||
4. Store the encrypted data key alongside the ciphertext
|
||||
"""
|
||||
|
||||
KEY_ID = "local"
|
||||
|
||||
def __init__(self, master_key_path: Path):
|
||||
self.master_key_path = master_key_path
|
||||
self._master_key: bytes | None = None
|
||||
|
||||
@property
|
||||
def master_key(self) -> bytes:
|
||||
if self._master_key is None:
|
||||
self._master_key = self._load_or_create_master_key()
|
||||
return self._master_key
|
||||
|
||||
def _load_or_create_master_key(self) -> bytes:
|
||||
"""Load master key from file or generate a new one (with file locking)."""
|
||||
lock_path = self.master_key_path.with_suffix(".lock")
|
||||
lock_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
try:
|
||||
with open(lock_path, "w") as lock_file:
|
||||
if sys.platform == "win32":
|
||||
import msvcrt
|
||||
msvcrt.locking(lock_file.fileno(), msvcrt.LK_LOCK, 1)
|
||||
else:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)
|
||||
try:
|
||||
if self.master_key_path.exists():
|
||||
try:
|
||||
return base64.b64decode(self.master_key_path.read_text().strip())
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to load master key: {exc}") from exc
|
||||
key = secrets.token_bytes(32)
|
||||
try:
|
||||
self.master_key_path.write_text(base64.b64encode(key).decode())
|
||||
_set_secure_file_permissions(self.master_key_path)
|
||||
except OSError as exc:
|
||||
raise EncryptionError(f"Failed to save master key: {exc}") from exc
|
||||
return key
|
||||
finally:
|
||||
if sys.platform == "win32":
|
||||
import msvcrt
|
||||
msvcrt.locking(lock_file.fileno(), msvcrt.LK_UNLCK, 1)
|
||||
else:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||
except OSError as exc:
|
||||
raise EncryptionError(f"Failed to acquire lock for master key: {exc}") from exc
|
||||
|
||||
DATA_KEY_AAD = b'{"purpose":"data_key","version":1}'
|
||||
|
||||
def _encrypt_data_key(self, data_key: bytes) -> bytes:
|
||||
"""Encrypt the data key with the master key."""
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
encrypted = aesgcm.encrypt(nonce, data_key, self.DATA_KEY_AAD)
|
||||
return nonce + encrypted
|
||||
|
||||
def _decrypt_data_key(self, encrypted_data_key: bytes) -> bytes:
|
||||
"""Decrypt the data key using the master key."""
|
||||
if len(encrypted_data_key) < 12 + 32 + 16: # nonce + key + tag
|
||||
raise EncryptionError("Invalid encrypted data key")
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
nonce = encrypted_data_key[:12]
|
||||
ciphertext = encrypted_data_key[12:]
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext, self.DATA_KEY_AAD)
|
||||
except Exception:
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt data key: {exc}") from exc
|
||||
|
||||
def decrypt_data_key(self, encrypted_data_key: bytes, key_id: str | None = None) -> bytes:
|
||||
"""Decrypt an encrypted data key (key_id ignored for local encryption)."""
|
||||
return self._decrypt_data_key(encrypted_data_key)
|
||||
|
||||
def generate_data_key(self) -> tuple[bytes, bytes]:
|
||||
"""Generate a data key and its encrypted form."""
|
||||
plaintext_key = secrets.token_bytes(32)
|
||||
encrypted_key = self._encrypt_data_key(plaintext_key)
|
||||
return plaintext_key, encrypted_key
|
||||
|
||||
def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
|
||||
"""Encrypt data using envelope encryption."""
|
||||
data_key, encrypted_data_key = self.generate_data_key()
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext, aad)
|
||||
|
||||
return EncryptionResult(
|
||||
ciphertext=ciphertext,
|
||||
nonce=nonce,
|
||||
key_id=self.KEY_ID,
|
||||
encrypted_data_key=encrypted_data_key,
|
||||
)
|
||||
|
||||
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
||||
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Decrypt data using envelope encryption."""
|
||||
data_key = self._decrypt_data_key(encrypted_data_key)
|
||||
aesgcm = AESGCM(data_key)
|
||||
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext, aad)
|
||||
except Exception as exc:
|
||||
raise EncryptionError("Failed to decrypt data") from exc
|
||||
|
||||
|
||||
class StreamingEncryptor:
|
||||
"""Encrypts/decrypts data in streaming fashion for large files.
|
||||
|
||||
For large files, we encrypt in chunks. Each chunk is encrypted with the
|
||||
same data key but a unique nonce derived from the base nonce + chunk index.
|
||||
"""
|
||||
|
||||
CHUNK_SIZE = 64 * 1024
|
||||
HEADER_SIZE = 4
|
||||
|
||||
def __init__(self, provider: EncryptionProvider, chunk_size: int = CHUNK_SIZE):
|
||||
self.provider = provider
|
||||
self.chunk_size = chunk_size
|
||||
|
||||
def _derive_chunk_nonce(self, base_nonce: bytes, chunk_index: int) -> bytes:
|
||||
"""Derive a unique nonce for each chunk using HKDF."""
|
||||
hkdf = HKDF(
|
||||
algorithm=hashes.SHA256(),
|
||||
length=12,
|
||||
salt=base_nonce,
|
||||
info=chunk_index.to_bytes(4, "big"),
|
||||
)
|
||||
return hkdf.derive(b"chunk_nonce")
|
||||
|
||||
def encrypt_stream(self, stream: BinaryIO,
|
||||
context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
|
||||
"""Encrypt a stream and return encrypted stream + metadata.
|
||||
|
||||
Performance: Writes chunks directly to output buffer instead of accumulating in list.
|
||||
"""
|
||||
data_key, encrypted_data_key = self.provider.generate_data_key()
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
# Performance: Write directly to BytesIO instead of accumulating chunks
|
||||
output = io.BytesIO()
|
||||
output.write(b"\x00\x00\x00\x00") # Placeholder for chunk count
|
||||
chunk_index = 0
|
||||
|
||||
while True:
|
||||
chunk = stream.read(self.chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
encrypted_chunk = aesgcm.encrypt(chunk_nonce, chunk, None)
|
||||
|
||||
# Write size prefix + encrypted chunk directly
|
||||
output.write(len(encrypted_chunk).to_bytes(self.HEADER_SIZE, "big"))
|
||||
output.write(encrypted_chunk)
|
||||
chunk_index += 1
|
||||
|
||||
# Write actual chunk count to header
|
||||
output.seek(0)
|
||||
output.write(chunk_index.to_bytes(4, "big"))
|
||||
output.seek(0)
|
||||
|
||||
metadata = EncryptionMetadata(
|
||||
algorithm="AES256",
|
||||
key_id=self.provider.KEY_ID if hasattr(self.provider, "KEY_ID") else "local",
|
||||
nonce=base_nonce,
|
||||
encrypted_data_key=encrypted_data_key,
|
||||
)
|
||||
|
||||
return output, metadata
|
||||
|
||||
def decrypt_stream(self, stream: BinaryIO, metadata: EncryptionMetadata) -> BinaryIO:
|
||||
"""Decrypt a stream using the provided metadata.
|
||||
|
||||
Performance: Writes chunks directly to output buffer instead of accumulating in list.
|
||||
"""
|
||||
data_key = self.provider.decrypt_data_key(metadata.encrypted_data_key, metadata.key_id)
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
base_nonce = metadata.nonce
|
||||
|
||||
chunk_count_bytes = stream.read(4)
|
||||
if len(chunk_count_bytes) < 4:
|
||||
raise EncryptionError("Invalid encrypted stream: missing header")
|
||||
chunk_count = int.from_bytes(chunk_count_bytes, "big")
|
||||
|
||||
# Performance: Write directly to BytesIO instead of accumulating chunks
|
||||
output = io.BytesIO()
|
||||
for chunk_index in range(chunk_count):
|
||||
size_bytes = stream.read(self.HEADER_SIZE)
|
||||
if len(size_bytes) < self.HEADER_SIZE:
|
||||
raise EncryptionError(f"Invalid encrypted stream: truncated at chunk {chunk_index}")
|
||||
chunk_size = int.from_bytes(size_bytes, "big")
|
||||
|
||||
encrypted_chunk = stream.read(chunk_size)
|
||||
if len(encrypted_chunk) < chunk_size:
|
||||
raise EncryptionError(f"Invalid encrypted stream: incomplete chunk {chunk_index}")
|
||||
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
try:
|
||||
decrypted_chunk = aesgcm.decrypt(chunk_nonce, encrypted_chunk, None)
|
||||
output.write(decrypted_chunk) # Write directly instead of appending to list
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt chunk {chunk_index}: {exc}") from exc
|
||||
|
||||
output.seek(0)
|
||||
return output
|
||||
|
||||
def encrypt_file(self, input_path: str, output_path: str) -> EncryptionMetadata:
|
||||
data_key, encrypted_data_key = self.provider.generate_data_key()
|
||||
base_nonce = secrets.token_bytes(12)
|
||||
|
||||
if _HAS_RUST:
|
||||
_rc.encrypt_stream_chunked(
|
||||
input_path, output_path, data_key, base_nonce, self.chunk_size
|
||||
)
|
||||
else:
|
||||
with open(input_path, "rb") as stream:
|
||||
aesgcm = AESGCM(data_key)
|
||||
with open(output_path, "wb") as out:
|
||||
out.write(b"\x00\x00\x00\x00")
|
||||
chunk_index = 0
|
||||
while True:
|
||||
chunk = stream.read(self.chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
encrypted_chunk = aesgcm.encrypt(chunk_nonce, chunk, None)
|
||||
out.write(len(encrypted_chunk).to_bytes(self.HEADER_SIZE, "big"))
|
||||
out.write(encrypted_chunk)
|
||||
chunk_index += 1
|
||||
out.seek(0)
|
||||
out.write(chunk_index.to_bytes(4, "big"))
|
||||
|
||||
return EncryptionMetadata(
|
||||
algorithm="AES256",
|
||||
key_id=self.provider.KEY_ID if hasattr(self.provider, "KEY_ID") else "local",
|
||||
nonce=base_nonce,
|
||||
encrypted_data_key=encrypted_data_key,
|
||||
)
|
||||
|
||||
def decrypt_file(self, input_path: str, output_path: str,
|
||||
metadata: EncryptionMetadata) -> None:
|
||||
data_key = self.provider.decrypt_data_key(metadata.encrypted_data_key, metadata.key_id)
|
||||
base_nonce = metadata.nonce
|
||||
|
||||
if _HAS_RUST:
|
||||
_rc.decrypt_stream_chunked(input_path, output_path, data_key, base_nonce)
|
||||
else:
|
||||
with open(input_path, "rb") as stream:
|
||||
chunk_count_bytes = stream.read(4)
|
||||
if len(chunk_count_bytes) < 4:
|
||||
raise EncryptionError("Invalid encrypted stream: missing header")
|
||||
chunk_count = int.from_bytes(chunk_count_bytes, "big")
|
||||
aesgcm = AESGCM(data_key)
|
||||
with open(output_path, "wb") as out:
|
||||
for chunk_index in range(chunk_count):
|
||||
size_bytes = stream.read(self.HEADER_SIZE)
|
||||
if len(size_bytes) < self.HEADER_SIZE:
|
||||
raise EncryptionError(f"Invalid encrypted stream: truncated at chunk {chunk_index}")
|
||||
chunk_size = int.from_bytes(size_bytes, "big")
|
||||
encrypted_chunk = stream.read(chunk_size)
|
||||
if len(encrypted_chunk) < chunk_size:
|
||||
raise EncryptionError(f"Invalid encrypted stream: incomplete chunk {chunk_index}")
|
||||
chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
|
||||
try:
|
||||
decrypted_chunk = aesgcm.decrypt(chunk_nonce, encrypted_chunk, None)
|
||||
out.write(decrypted_chunk)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt chunk {chunk_index}: {exc}") from exc
|
||||
|
||||
|
||||
class EncryptionManager:
|
||||
"""Manages encryption providers and operations."""
|
||||
|
||||
def __init__(self, config: Dict[str, Any]):
|
||||
self.config = config
|
||||
self._local_provider: LocalKeyEncryption | None = None
|
||||
self._kms_provider: Any = None # Set by KMS module
|
||||
self._streaming_encryptor: StreamingEncryptor | None = None
|
||||
|
||||
@property
|
||||
def enabled(self) -> bool:
|
||||
return self.config.get("encryption_enabled", False)
|
||||
|
||||
@property
|
||||
def default_algorithm(self) -> str:
|
||||
return self.config.get("default_encryption_algorithm", "AES256")
|
||||
|
||||
def get_local_provider(self) -> LocalKeyEncryption:
|
||||
if self._local_provider is None:
|
||||
key_path = Path(self.config.get("encryption_master_key_path", "data/.myfsio.sys/keys/master.key"))
|
||||
self._local_provider = LocalKeyEncryption(key_path)
|
||||
return self._local_provider
|
||||
|
||||
def set_kms_provider(self, kms_provider: Any) -> None:
|
||||
"""Set the KMS provider (injected from kms module)."""
|
||||
self._kms_provider = kms_provider
|
||||
|
||||
def get_provider(self, algorithm: str, kms_key_id: str | None = None) -> EncryptionProvider:
|
||||
"""Get the appropriate encryption provider for the algorithm."""
|
||||
if algorithm == "AES256":
|
||||
return self.get_local_provider()
|
||||
elif algorithm == "aws:kms":
|
||||
if self._kms_provider is None:
|
||||
raise EncryptionError("KMS is not configured")
|
||||
return self._kms_provider.get_provider(kms_key_id)
|
||||
else:
|
||||
raise EncryptionError(f"Unsupported encryption algorithm: {algorithm}")
|
||||
|
||||
def get_streaming_encryptor(self) -> StreamingEncryptor:
|
||||
if self._streaming_encryptor is None:
|
||||
chunk_size = self.config.get("encryption_chunk_size_bytes", 64 * 1024)
|
||||
self._streaming_encryptor = StreamingEncryptor(self.get_local_provider(), chunk_size=chunk_size)
|
||||
return self._streaming_encryptor
|
||||
|
||||
def encrypt_object(self, data: bytes, algorithm: str = "AES256",
|
||||
kms_key_id: str | None = None,
|
||||
context: Dict[str, str] | None = None) -> tuple[bytes, EncryptionMetadata]:
|
||||
"""Encrypt object data."""
|
||||
provider = self.get_provider(algorithm, kms_key_id)
|
||||
result = provider.encrypt(data, context)
|
||||
|
||||
metadata = EncryptionMetadata(
|
||||
algorithm=algorithm,
|
||||
key_id=result.key_id,
|
||||
nonce=result.nonce,
|
||||
encrypted_data_key=result.encrypted_data_key,
|
||||
)
|
||||
|
||||
return result.ciphertext, metadata
|
||||
|
||||
def decrypt_object(self, ciphertext: bytes, metadata: EncryptionMetadata,
|
||||
context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Decrypt object data."""
|
||||
provider = self.get_provider(metadata.algorithm, metadata.key_id)
|
||||
return provider.decrypt(
|
||||
ciphertext,
|
||||
metadata.nonce,
|
||||
metadata.encrypted_data_key,
|
||||
metadata.key_id,
|
||||
context,
|
||||
)
|
||||
|
||||
def encrypt_stream(self, stream: BinaryIO, algorithm: str = "AES256",
|
||||
context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
|
||||
"""Encrypt a stream for large files."""
|
||||
encryptor = self.get_streaming_encryptor()
|
||||
return encryptor.encrypt_stream(stream, context)
|
||||
|
||||
def decrypt_stream(self, stream: BinaryIO, metadata: EncryptionMetadata) -> BinaryIO:
|
||||
"""Decrypt a stream."""
|
||||
encryptor = self.get_streaming_encryptor()
|
||||
return encryptor.decrypt_stream(stream, metadata)
|
||||
|
||||
|
||||
class SSECEncryption(EncryptionProvider):
|
||||
"""SSE-C: Server-Side Encryption with Customer-Provided Keys.
|
||||
|
||||
The client provides the encryption key with each request.
|
||||
Server encrypts/decrypts but never stores the key.
|
||||
|
||||
Required headers for PUT:
|
||||
- x-amz-server-side-encryption-customer-algorithm: AES256
|
||||
- x-amz-server-side-encryption-customer-key: Base64-encoded 256-bit key
|
||||
- x-amz-server-side-encryption-customer-key-MD5: Base64-encoded MD5 of key
|
||||
"""
|
||||
|
||||
KEY_ID = "customer-provided"
|
||||
|
||||
def __init__(self, customer_key: bytes):
|
||||
if len(customer_key) != 32:
|
||||
raise EncryptionError("Customer key must be exactly 256 bits (32 bytes)")
|
||||
self.customer_key = customer_key
|
||||
|
||||
@classmethod
|
||||
def from_headers(cls, headers: Dict[str, str]) -> "SSECEncryption":
|
||||
algorithm = headers.get("x-amz-server-side-encryption-customer-algorithm", "")
|
||||
if algorithm.upper() != "AES256":
|
||||
raise EncryptionError(f"Unsupported SSE-C algorithm: {algorithm}. Only AES256 is supported.")
|
||||
|
||||
key_b64 = headers.get("x-amz-server-side-encryption-customer-key", "")
|
||||
if not key_b64:
|
||||
raise EncryptionError("Missing x-amz-server-side-encryption-customer-key header")
|
||||
|
||||
key_md5_b64 = headers.get("x-amz-server-side-encryption-customer-key-md5", "")
|
||||
|
||||
try:
|
||||
customer_key = base64.b64decode(key_b64)
|
||||
except Exception as e:
|
||||
raise EncryptionError(f"Invalid base64 in customer key: {e}") from e
|
||||
|
||||
if len(customer_key) != 32:
|
||||
raise EncryptionError(f"Customer key must be 256 bits, got {len(customer_key) * 8} bits")
|
||||
|
||||
if key_md5_b64:
|
||||
import hashlib
|
||||
expected_md5 = base64.b64encode(hashlib.md5(customer_key).digest()).decode()
|
||||
if key_md5_b64 != expected_md5:
|
||||
raise EncryptionError("Customer key MD5 mismatch")
|
||||
|
||||
return cls(customer_key)
|
||||
|
||||
def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
|
||||
aesgcm = AESGCM(self.customer_key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext, aad)
|
||||
|
||||
return EncryptionResult(
|
||||
ciphertext=ciphertext,
|
||||
nonce=nonce,
|
||||
key_id=self.KEY_ID,
|
||||
encrypted_data_key=b"",
|
||||
)
|
||||
|
||||
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
||||
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
||||
aesgcm = AESGCM(self.customer_key)
|
||||
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext, aad)
|
||||
except Exception as exc:
|
||||
raise EncryptionError("SSE-C decryption failed") from exc
|
||||
|
||||
def generate_data_key(self) -> tuple[bytes, bytes]:
|
||||
return self.customer_key, b""
|
||||
|
||||
|
||||
@dataclass
|
||||
class SSECMetadata:
|
||||
algorithm: str = "AES256"
|
||||
nonce: bytes = b""
|
||||
key_md5: str = ""
|
||||
|
||||
def to_dict(self) -> Dict[str, str]:
|
||||
return {
|
||||
"x-amz-server-side-encryption-customer-algorithm": self.algorithm,
|
||||
"x-amz-encryption-nonce": base64.b64encode(self.nonce).decode(),
|
||||
"x-amz-server-side-encryption-customer-key-MD5": self.key_md5,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, str]) -> Optional["SSECMetadata"]:
|
||||
algorithm = data.get("x-amz-server-side-encryption-customer-algorithm")
|
||||
if not algorithm:
|
||||
return None
|
||||
try:
|
||||
nonce = base64.b64decode(data.get("x-amz-encryption-nonce", ""))
|
||||
return cls(
|
||||
algorithm=algorithm,
|
||||
nonce=nonce,
|
||||
key_md5=data.get("x-amz-server-side-encryption-customer-key-MD5", ""),
|
||||
)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
class ClientEncryptionHelper:
|
||||
"""Helpers for client-side encryption.
|
||||
|
||||
Client-side encryption is performed by the client, but this helper
|
||||
provides key generation and materials for clients that need them.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def generate_client_key() -> Dict[str, str]:
|
||||
"""Generate a new client encryption key."""
|
||||
from datetime import datetime, timezone
|
||||
key = secrets.token_bytes(32)
|
||||
return {
|
||||
"key": base64.b64encode(key).decode(),
|
||||
"algorithm": "AES-256-GCM",
|
||||
"created_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def encrypt_with_key(plaintext: bytes, key_b64: str, context: Dict[str, str] | None = None) -> Dict[str, str]:
|
||||
"""Encrypt data with a client-provided key."""
|
||||
key = base64.b64decode(key_b64)
|
||||
if len(key) != 32:
|
||||
raise EncryptionError("Key must be 256 bits (32 bytes)")
|
||||
|
||||
aesgcm = AESGCM(key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext, aad)
|
||||
|
||||
return {
|
||||
"ciphertext": base64.b64encode(ciphertext).decode(),
|
||||
"nonce": base64.b64encode(nonce).decode(),
|
||||
"algorithm": "AES-256-GCM",
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def decrypt_with_key(ciphertext_b64: str, nonce_b64: str, key_b64: str, context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Decrypt data with a client-provided key."""
|
||||
key = base64.b64decode(key_b64)
|
||||
nonce = base64.b64decode(nonce_b64)
|
||||
ciphertext = base64.b64decode(ciphertext_b64)
|
||||
|
||||
if len(key) != 32:
|
||||
raise EncryptionError("Key must be 256 bits (32 bytes)")
|
||||
|
||||
aesgcm = AESGCM(key)
|
||||
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext, aad)
|
||||
except Exception as exc:
|
||||
raise EncryptionError("Decryption failed") from exc
|
||||
@@ -1,4 +1,3 @@
|
||||
"""Standardized error handling for API and UI responses."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
@@ -7,6 +6,7 @@ from typing import Optional, Dict, Any
|
||||
from xml.etree.ElementTree import Element, SubElement, tostring
|
||||
|
||||
from flask import Response, jsonify, request, flash, redirect, url_for, g
|
||||
from flask_limiter import RateLimitExceeded
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -173,10 +173,30 @@ def handle_app_error(error: AppError) -> Response:
|
||||
return error.to_xml_response()
|
||||
|
||||
|
||||
def handle_rate_limit_exceeded(e: RateLimitExceeded) -> Response:
|
||||
g.s3_error_code = "SlowDown"
|
||||
if request.path.startswith("/ui") or request.path.startswith("/buckets"):
|
||||
wants_json = (
|
||||
request.is_json or
|
||||
request.headers.get("X-Requested-With") == "XMLHttpRequest" or
|
||||
"application/json" in request.accept_mimetypes.values()
|
||||
)
|
||||
if wants_json:
|
||||
return jsonify({"success": False, "error": {"code": "SlowDown", "message": "Please reduce your request rate."}}), 429
|
||||
error = Element("Error")
|
||||
SubElement(error, "Code").text = "SlowDown"
|
||||
SubElement(error, "Message").text = "Please reduce your request rate."
|
||||
SubElement(error, "Resource").text = request.path
|
||||
SubElement(error, "RequestId").text = getattr(g, "request_id", "")
|
||||
xml_bytes = tostring(error, encoding="utf-8")
|
||||
return Response(xml_bytes, status="429 Too Many Requests", mimetype="application/xml")
|
||||
|
||||
|
||||
def register_error_handlers(app):
|
||||
"""Register error handlers with a Flask app."""
|
||||
app.register_error_handler(AppError, handle_app_error)
|
||||
|
||||
app.register_error_handler(RateLimitExceeded, handle_rate_limit_exceeded)
|
||||
|
||||
for error_class in [
|
||||
BucketNotFoundError, BucketAlreadyExistsError, BucketNotEmptyError,
|
||||
ObjectNotFoundError, InvalidObjectKeyError,
|
||||
@@ -1,4 +1,3 @@
|
||||
"""Application-wide extension instances."""
|
||||
from flask import g
|
||||
from flask_limiter import Limiter
|
||||
from flask_limiter.util import get_remote_address
|
||||
596
python/app/gc.py
Normal file
596
python/app/gc.py
Normal file
@@ -0,0 +1,596 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GCResult:
|
||||
temp_files_deleted: int = 0
|
||||
temp_bytes_freed: int = 0
|
||||
multipart_uploads_deleted: int = 0
|
||||
multipart_bytes_freed: int = 0
|
||||
lock_files_deleted: int = 0
|
||||
orphaned_metadata_deleted: int = 0
|
||||
orphaned_versions_deleted: int = 0
|
||||
orphaned_version_bytes_freed: int = 0
|
||||
empty_dirs_removed: int = 0
|
||||
errors: List[str] = field(default_factory=list)
|
||||
execution_time_seconds: float = 0.0
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"temp_files_deleted": self.temp_files_deleted,
|
||||
"temp_bytes_freed": self.temp_bytes_freed,
|
||||
"multipart_uploads_deleted": self.multipart_uploads_deleted,
|
||||
"multipart_bytes_freed": self.multipart_bytes_freed,
|
||||
"lock_files_deleted": self.lock_files_deleted,
|
||||
"orphaned_metadata_deleted": self.orphaned_metadata_deleted,
|
||||
"orphaned_versions_deleted": self.orphaned_versions_deleted,
|
||||
"orphaned_version_bytes_freed": self.orphaned_version_bytes_freed,
|
||||
"empty_dirs_removed": self.empty_dirs_removed,
|
||||
"errors": self.errors,
|
||||
"execution_time_seconds": self.execution_time_seconds,
|
||||
}
|
||||
|
||||
@property
|
||||
def total_bytes_freed(self) -> int:
|
||||
return self.temp_bytes_freed + self.multipart_bytes_freed + self.orphaned_version_bytes_freed
|
||||
|
||||
@property
|
||||
def has_work(self) -> bool:
|
||||
return (
|
||||
self.temp_files_deleted > 0
|
||||
or self.multipart_uploads_deleted > 0
|
||||
or self.lock_files_deleted > 0
|
||||
or self.orphaned_metadata_deleted > 0
|
||||
or self.orphaned_versions_deleted > 0
|
||||
or self.empty_dirs_removed > 0
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GCExecutionRecord:
|
||||
timestamp: float
|
||||
result: dict
|
||||
dry_run: bool
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"timestamp": self.timestamp,
|
||||
"result": self.result,
|
||||
"dry_run": self.dry_run,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> GCExecutionRecord:
|
||||
return cls(
|
||||
timestamp=data["timestamp"],
|
||||
result=data["result"],
|
||||
dry_run=data.get("dry_run", False),
|
||||
)
|
||||
|
||||
|
||||
class GCHistoryStore:
|
||||
def __init__(self, storage_root: Path, max_records: int = 50) -> None:
|
||||
self.storage_root = storage_root
|
||||
self.max_records = max_records
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "gc_history.json"
|
||||
|
||||
def load(self) -> List[GCExecutionRecord]:
|
||||
path = self._get_path()
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
return [GCExecutionRecord.from_dict(d) for d in data.get("executions", [])]
|
||||
except (OSError, ValueError, KeyError) as e:
|
||||
logger.error("Failed to load GC history: %s", e)
|
||||
return []
|
||||
|
||||
def save(self, records: List[GCExecutionRecord]) -> None:
|
||||
path = self._get_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {"executions": [r.to_dict() for r in records[: self.max_records]]}
|
||||
try:
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error("Failed to save GC history: %s", e)
|
||||
|
||||
def add(self, record: GCExecutionRecord) -> None:
|
||||
with self._lock:
|
||||
records = self.load()
|
||||
records.insert(0, record)
|
||||
self.save(records)
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[GCExecutionRecord]:
|
||||
return self.load()[offset : offset + limit]
|
||||
|
||||
|
||||
def _dir_size(path: Path) -> int:
|
||||
total = 0
|
||||
try:
|
||||
for f in path.rglob("*"):
|
||||
if f.is_file():
|
||||
try:
|
||||
total += f.stat().st_size
|
||||
except OSError:
|
||||
pass
|
||||
except OSError:
|
||||
pass
|
||||
return total
|
||||
|
||||
|
||||
def _file_age_hours(path: Path) -> float:
|
||||
try:
|
||||
mtime = path.stat().st_mtime
|
||||
return (time.time() - mtime) / 3600.0
|
||||
except OSError:
|
||||
return 0.0
|
||||
|
||||
|
||||
class GarbageCollector:
|
||||
SYSTEM_ROOT = ".myfsio.sys"
|
||||
SYSTEM_TMP_DIR = "tmp"
|
||||
SYSTEM_MULTIPART_DIR = "multipart"
|
||||
SYSTEM_BUCKETS_DIR = "buckets"
|
||||
BUCKET_META_DIR = "meta"
|
||||
BUCKET_VERSIONS_DIR = "versions"
|
||||
INTERNAL_FOLDERS = {".meta", ".versions", ".multipart"}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage_root: Path,
|
||||
interval_hours: float = 6.0,
|
||||
temp_file_max_age_hours: float = 24.0,
|
||||
multipart_max_age_days: int = 7,
|
||||
lock_file_max_age_hours: float = 1.0,
|
||||
dry_run: bool = False,
|
||||
max_history: int = 50,
|
||||
io_throttle_ms: int = 10,
|
||||
) -> None:
|
||||
self.storage_root = Path(storage_root)
|
||||
self.interval_seconds = interval_hours * 3600.0
|
||||
self.temp_file_max_age_hours = temp_file_max_age_hours
|
||||
self.multipart_max_age_days = multipart_max_age_days
|
||||
self.lock_file_max_age_hours = lock_file_max_age_hours
|
||||
self.dry_run = dry_run
|
||||
self._timer: Optional[threading.Timer] = None
|
||||
self._shutdown = False
|
||||
self._lock = threading.Lock()
|
||||
self._scanning = False
|
||||
self._scan_start_time: Optional[float] = None
|
||||
self._io_throttle = max(0, io_throttle_ms) / 1000.0
|
||||
self.history_store = GCHistoryStore(storage_root, max_records=max_history)
|
||||
|
||||
def start(self) -> None:
|
||||
if self._timer is not None:
|
||||
return
|
||||
self._shutdown = False
|
||||
self._schedule_next()
|
||||
logger.info(
|
||||
"GC started: interval=%.1fh, temp_max_age=%.1fh, multipart_max_age=%dd, lock_max_age=%.1fh, dry_run=%s",
|
||||
self.interval_seconds / 3600.0,
|
||||
self.temp_file_max_age_hours,
|
||||
self.multipart_max_age_days,
|
||||
self.lock_file_max_age_hours,
|
||||
self.dry_run,
|
||||
)
|
||||
|
||||
def stop(self) -> None:
|
||||
self._shutdown = True
|
||||
if self._timer:
|
||||
self._timer.cancel()
|
||||
self._timer = None
|
||||
logger.info("GC stopped")
|
||||
|
||||
def _schedule_next(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
self._timer = threading.Timer(self.interval_seconds, self._run_cycle)
|
||||
self._timer.daemon = True
|
||||
self._timer.start()
|
||||
|
||||
def _run_cycle(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
try:
|
||||
self.run_now()
|
||||
except Exception as e:
|
||||
logger.error("GC cycle failed: %s", e)
|
||||
finally:
|
||||
self._schedule_next()
|
||||
|
||||
def run_now(self, dry_run: Optional[bool] = None) -> GCResult:
|
||||
if not self._lock.acquire(blocking=False):
|
||||
raise RuntimeError("GC is already in progress")
|
||||
|
||||
effective_dry_run = dry_run if dry_run is not None else self.dry_run
|
||||
|
||||
try:
|
||||
self._scanning = True
|
||||
self._scan_start_time = time.time()
|
||||
|
||||
start = self._scan_start_time
|
||||
result = GCResult()
|
||||
|
||||
original_dry_run = self.dry_run
|
||||
self.dry_run = effective_dry_run
|
||||
try:
|
||||
self._clean_temp_files(result)
|
||||
self._clean_orphaned_multipart(result)
|
||||
self._clean_stale_locks(result)
|
||||
self._clean_orphaned_metadata(result)
|
||||
self._clean_orphaned_versions(result)
|
||||
self._clean_empty_dirs(result)
|
||||
finally:
|
||||
self.dry_run = original_dry_run
|
||||
|
||||
result.execution_time_seconds = time.time() - start
|
||||
|
||||
if result.has_work or result.errors:
|
||||
logger.info(
|
||||
"GC completed in %.2fs: temp=%d (%.1f MB), multipart=%d (%.1f MB), "
|
||||
"locks=%d, meta=%d, versions=%d (%.1f MB), dirs=%d, errors=%d%s",
|
||||
result.execution_time_seconds,
|
||||
result.temp_files_deleted,
|
||||
result.temp_bytes_freed / (1024 * 1024),
|
||||
result.multipart_uploads_deleted,
|
||||
result.multipart_bytes_freed / (1024 * 1024),
|
||||
result.lock_files_deleted,
|
||||
result.orphaned_metadata_deleted,
|
||||
result.orphaned_versions_deleted,
|
||||
result.orphaned_version_bytes_freed / (1024 * 1024),
|
||||
result.empty_dirs_removed,
|
||||
len(result.errors),
|
||||
" (dry run)" if effective_dry_run else "",
|
||||
)
|
||||
|
||||
record = GCExecutionRecord(
|
||||
timestamp=time.time(),
|
||||
result=result.to_dict(),
|
||||
dry_run=effective_dry_run,
|
||||
)
|
||||
self.history_store.add(record)
|
||||
|
||||
return result
|
||||
finally:
|
||||
self._scanning = False
|
||||
self._scan_start_time = None
|
||||
self._lock.release()
|
||||
|
||||
def run_async(self, dry_run: Optional[bool] = None) -> bool:
|
||||
if self._scanning:
|
||||
return False
|
||||
t = threading.Thread(target=self.run_now, args=(dry_run,), daemon=True)
|
||||
t.start()
|
||||
return True
|
||||
|
||||
def _system_path(self) -> Path:
|
||||
return self.storage_root / self.SYSTEM_ROOT
|
||||
|
||||
def _throttle(self) -> bool:
|
||||
if self._shutdown:
|
||||
return True
|
||||
if self._io_throttle > 0:
|
||||
time.sleep(self._io_throttle)
|
||||
return self._shutdown
|
||||
|
||||
def _list_bucket_names(self) -> List[str]:
|
||||
names = []
|
||||
try:
|
||||
for entry in self.storage_root.iterdir():
|
||||
if entry.is_dir() and entry.name != self.SYSTEM_ROOT:
|
||||
names.append(entry.name)
|
||||
except OSError:
|
||||
pass
|
||||
return names
|
||||
|
||||
def _clean_temp_files(self, result: GCResult) -> None:
|
||||
tmp_dir = self._system_path() / self.SYSTEM_TMP_DIR
|
||||
if not tmp_dir.exists():
|
||||
return
|
||||
try:
|
||||
for entry in tmp_dir.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not entry.is_file():
|
||||
continue
|
||||
age = _file_age_hours(entry)
|
||||
if age < self.temp_file_max_age_hours:
|
||||
continue
|
||||
try:
|
||||
size = entry.stat().st_size
|
||||
if not self.dry_run:
|
||||
entry.unlink()
|
||||
result.temp_files_deleted += 1
|
||||
result.temp_bytes_freed += size
|
||||
except OSError as e:
|
||||
result.errors.append(f"temp file {entry.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan tmp dir: {e}")
|
||||
|
||||
def _clean_orphaned_multipart(self, result: GCResult) -> None:
|
||||
cutoff_hours = self.multipart_max_age_days * 24.0
|
||||
bucket_names = self._list_bucket_names()
|
||||
|
||||
for bucket_name in bucket_names:
|
||||
if self._shutdown:
|
||||
return
|
||||
for multipart_root in (
|
||||
self._system_path() / self.SYSTEM_MULTIPART_DIR / bucket_name,
|
||||
self.storage_root / bucket_name / ".multipart",
|
||||
):
|
||||
if not multipart_root.exists():
|
||||
continue
|
||||
try:
|
||||
for upload_dir in multipart_root.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not upload_dir.is_dir():
|
||||
continue
|
||||
self._maybe_clean_upload(upload_dir, cutoff_hours, result)
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan multipart {bucket_name}: {e}")
|
||||
|
||||
def _maybe_clean_upload(self, upload_dir: Path, cutoff_hours: float, result: GCResult) -> None:
|
||||
manifest_path = upload_dir / "manifest.json"
|
||||
age = _file_age_hours(manifest_path) if manifest_path.exists() else _file_age_hours(upload_dir)
|
||||
|
||||
if age < cutoff_hours:
|
||||
return
|
||||
|
||||
dir_bytes = _dir_size(upload_dir)
|
||||
try:
|
||||
if not self.dry_run:
|
||||
shutil.rmtree(upload_dir, ignore_errors=True)
|
||||
result.multipart_uploads_deleted += 1
|
||||
result.multipart_bytes_freed += dir_bytes
|
||||
except OSError as e:
|
||||
result.errors.append(f"multipart {upload_dir.name}: {e}")
|
||||
|
||||
def _clean_stale_locks(self, result: GCResult) -> None:
|
||||
buckets_root = self._system_path() / self.SYSTEM_BUCKETS_DIR
|
||||
if not buckets_root.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
for bucket_dir in buckets_root.iterdir():
|
||||
if self._shutdown:
|
||||
return
|
||||
if not bucket_dir.is_dir():
|
||||
continue
|
||||
locks_dir = bucket_dir / "locks"
|
||||
if not locks_dir.exists():
|
||||
continue
|
||||
try:
|
||||
for lock_file in locks_dir.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not lock_file.is_file() or not lock_file.name.endswith(".lock"):
|
||||
continue
|
||||
age = _file_age_hours(lock_file)
|
||||
if age < self.lock_file_max_age_hours:
|
||||
continue
|
||||
try:
|
||||
if not self.dry_run:
|
||||
lock_file.unlink(missing_ok=True)
|
||||
result.lock_files_deleted += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"lock {lock_file.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan locks {bucket_dir.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan buckets for locks: {e}")
|
||||
|
||||
def _clean_orphaned_metadata(self, result: GCResult) -> None:
|
||||
bucket_names = self._list_bucket_names()
|
||||
|
||||
for bucket_name in bucket_names:
|
||||
if self._shutdown:
|
||||
return
|
||||
legacy_meta = self.storage_root / bucket_name / ".meta"
|
||||
if legacy_meta.exists():
|
||||
self._clean_legacy_metadata(bucket_name, legacy_meta, result)
|
||||
|
||||
new_meta = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
if new_meta.exists():
|
||||
self._clean_index_metadata(bucket_name, new_meta, result)
|
||||
|
||||
def _clean_legacy_metadata(self, bucket_name: str, meta_root: Path, result: GCResult) -> None:
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
try:
|
||||
for meta_file in meta_root.rglob("*.meta.json"):
|
||||
if self._throttle():
|
||||
return
|
||||
if not meta_file.is_file():
|
||||
continue
|
||||
try:
|
||||
rel = meta_file.relative_to(meta_root)
|
||||
object_key = rel.as_posix().removesuffix(".meta.json")
|
||||
object_path = bucket_path / object_key
|
||||
if not object_path.exists():
|
||||
if not self.dry_run:
|
||||
meta_file.unlink(missing_ok=True)
|
||||
result.orphaned_metadata_deleted += 1
|
||||
except (OSError, ValueError) as e:
|
||||
result.errors.append(f"legacy meta {bucket_name}/{meta_file.name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan legacy meta {bucket_name}: {e}")
|
||||
|
||||
def _clean_index_metadata(self, bucket_name: str, meta_root: Path, result: GCResult) -> None:
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
try:
|
||||
for index_file in meta_root.rglob("_index.json"):
|
||||
if self._throttle():
|
||||
return
|
||||
if not index_file.is_file():
|
||||
continue
|
||||
try:
|
||||
with open(index_file, "r", encoding="utf-8") as f:
|
||||
index_data = json.load(f)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
|
||||
keys_to_remove = []
|
||||
for key in index_data:
|
||||
rel_dir = index_file.parent.relative_to(meta_root)
|
||||
if rel_dir == Path("."):
|
||||
full_key = key
|
||||
else:
|
||||
full_key = rel_dir.as_posix() + "/" + key
|
||||
object_path = bucket_path / full_key
|
||||
if not object_path.exists():
|
||||
keys_to_remove.append(key)
|
||||
|
||||
if keys_to_remove:
|
||||
if not self.dry_run:
|
||||
for k in keys_to_remove:
|
||||
index_data.pop(k, None)
|
||||
if index_data:
|
||||
try:
|
||||
with open(index_file, "w", encoding="utf-8") as f:
|
||||
json.dump(index_data, f)
|
||||
except OSError as e:
|
||||
result.errors.append(f"write index {bucket_name}: {e}")
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
index_file.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
result.orphaned_metadata_deleted += len(keys_to_remove)
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan index meta {bucket_name}: {e}")
|
||||
|
||||
def _clean_orphaned_versions(self, result: GCResult) -> None:
|
||||
bucket_names = self._list_bucket_names()
|
||||
|
||||
for bucket_name in bucket_names:
|
||||
if self._shutdown:
|
||||
return
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
for versions_root in (
|
||||
self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR,
|
||||
self.storage_root / bucket_name / ".versions",
|
||||
):
|
||||
if not versions_root.exists():
|
||||
continue
|
||||
try:
|
||||
for key_dir in versions_root.iterdir():
|
||||
if self._throttle():
|
||||
return
|
||||
if not key_dir.is_dir():
|
||||
continue
|
||||
self._clean_versions_for_key(bucket_path, versions_root, key_dir, result)
|
||||
except OSError as e:
|
||||
result.errors.append(f"scan versions {bucket_name}: {e}")
|
||||
|
||||
def _clean_versions_for_key(
|
||||
self, bucket_path: Path, versions_root: Path, key_dir: Path, result: GCResult
|
||||
) -> None:
|
||||
try:
|
||||
rel = key_dir.relative_to(versions_root)
|
||||
except ValueError:
|
||||
return
|
||||
|
||||
object_path = bucket_path / rel
|
||||
if object_path.exists():
|
||||
return
|
||||
|
||||
version_files = list(key_dir.glob("*.bin")) + list(key_dir.glob("*.json"))
|
||||
if not version_files:
|
||||
return
|
||||
|
||||
for vf in version_files:
|
||||
try:
|
||||
size = vf.stat().st_size if vf.suffix == ".bin" else 0
|
||||
if not self.dry_run:
|
||||
vf.unlink(missing_ok=True)
|
||||
if vf.suffix == ".bin":
|
||||
result.orphaned_version_bytes_freed += size
|
||||
result.orphaned_versions_deleted += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"version file {vf.name}: {e}")
|
||||
|
||||
def _clean_empty_dirs(self, result: GCResult) -> None:
|
||||
targets = [
|
||||
self._system_path() / self.SYSTEM_TMP_DIR,
|
||||
self._system_path() / self.SYSTEM_MULTIPART_DIR,
|
||||
self._system_path() / self.SYSTEM_BUCKETS_DIR,
|
||||
]
|
||||
for bucket_name in self._list_bucket_names():
|
||||
targets.append(self.storage_root / bucket_name / ".meta")
|
||||
targets.append(self.storage_root / bucket_name / ".versions")
|
||||
targets.append(self.storage_root / bucket_name / ".multipart")
|
||||
|
||||
for root in targets:
|
||||
if not root.exists():
|
||||
continue
|
||||
self._remove_empty_dirs_recursive(root, root, result)
|
||||
|
||||
def _remove_empty_dirs_recursive(self, path: Path, stop_at: Path, result: GCResult) -> bool:
|
||||
if self._shutdown:
|
||||
return False
|
||||
if not path.is_dir():
|
||||
return False
|
||||
|
||||
try:
|
||||
children = list(path.iterdir())
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
all_empty = True
|
||||
for child in children:
|
||||
if self._throttle():
|
||||
return False
|
||||
if child.is_dir():
|
||||
if not self._remove_empty_dirs_recursive(child, stop_at, result):
|
||||
all_empty = False
|
||||
else:
|
||||
all_empty = False
|
||||
|
||||
if all_empty and path != stop_at:
|
||||
try:
|
||||
if not self.dry_run:
|
||||
path.rmdir()
|
||||
result.empty_dirs_removed += 1
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
return all_empty
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[dict]:
|
||||
records = self.history_store.get_history(limit, offset)
|
||||
return [r.to_dict() for r in records]
|
||||
|
||||
def get_status(self) -> dict:
|
||||
status: Dict[str, Any] = {
|
||||
"enabled": not self._shutdown or self._timer is not None,
|
||||
"running": self._timer is not None and not self._shutdown,
|
||||
"scanning": self._scanning,
|
||||
"interval_hours": self.interval_seconds / 3600.0,
|
||||
"temp_file_max_age_hours": self.temp_file_max_age_hours,
|
||||
"multipart_max_age_days": self.multipart_max_age_days,
|
||||
"lock_file_max_age_hours": self.lock_file_max_age_hours,
|
||||
"dry_run": self.dry_run,
|
||||
"io_throttle_ms": round(self._io_throttle * 1000),
|
||||
}
|
||||
if self._scanning and self._scan_start_time:
|
||||
status["scan_elapsed_seconds"] = time.time() - self._scan_start_time
|
||||
return status
|
||||
1095
python/app/iam.py
Normal file
1095
python/app/iam.py
Normal file
File diff suppressed because it is too large
Load Diff
995
python/app/integrity.py
Normal file
995
python/app/integrity.py
Normal file
@@ -0,0 +1,995 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
try:
|
||||
import myfsio_core as _rc
|
||||
if not hasattr(_rc, "md5_file"):
|
||||
raise ImportError("myfsio_core is outdated, rebuild with: cd myfsio_core && maturin develop --release")
|
||||
_HAS_RUST = True
|
||||
except ImportError:
|
||||
_HAS_RUST = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _compute_etag(path: Path) -> str:
|
||||
if _HAS_RUST:
|
||||
return _rc.md5_file(str(path))
|
||||
checksum = hashlib.md5()
|
||||
with path.open("rb") as handle:
|
||||
for chunk in iter(lambda: handle.read(8192), b""):
|
||||
checksum.update(chunk)
|
||||
return checksum.hexdigest()
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntegrityIssue:
|
||||
issue_type: str
|
||||
bucket: str
|
||||
key: str
|
||||
detail: str
|
||||
healed: bool = False
|
||||
heal_action: str = ""
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"issue_type": self.issue_type,
|
||||
"bucket": self.bucket,
|
||||
"key": self.key,
|
||||
"detail": self.detail,
|
||||
"healed": self.healed,
|
||||
"heal_action": self.heal_action,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntegrityResult:
|
||||
corrupted_objects: int = 0
|
||||
orphaned_objects: int = 0
|
||||
phantom_metadata: int = 0
|
||||
stale_versions: int = 0
|
||||
etag_cache_inconsistencies: int = 0
|
||||
legacy_metadata_drifts: int = 0
|
||||
issues_healed: int = 0
|
||||
issues: List[IntegrityIssue] = field(default_factory=list)
|
||||
errors: List[str] = field(default_factory=list)
|
||||
objects_scanned: int = 0
|
||||
buckets_scanned: int = 0
|
||||
execution_time_seconds: float = 0.0
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"corrupted_objects": self.corrupted_objects,
|
||||
"orphaned_objects": self.orphaned_objects,
|
||||
"phantom_metadata": self.phantom_metadata,
|
||||
"stale_versions": self.stale_versions,
|
||||
"etag_cache_inconsistencies": self.etag_cache_inconsistencies,
|
||||
"legacy_metadata_drifts": self.legacy_metadata_drifts,
|
||||
"issues_healed": self.issues_healed,
|
||||
"issues": [i.to_dict() for i in self.issues],
|
||||
"errors": self.errors,
|
||||
"objects_scanned": self.objects_scanned,
|
||||
"buckets_scanned": self.buckets_scanned,
|
||||
"execution_time_seconds": self.execution_time_seconds,
|
||||
}
|
||||
|
||||
@property
|
||||
def total_issues(self) -> int:
|
||||
return (
|
||||
self.corrupted_objects
|
||||
+ self.orphaned_objects
|
||||
+ self.phantom_metadata
|
||||
+ self.stale_versions
|
||||
+ self.etag_cache_inconsistencies
|
||||
+ self.legacy_metadata_drifts
|
||||
)
|
||||
|
||||
@property
|
||||
def has_issues(self) -> bool:
|
||||
return self.total_issues > 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntegrityExecutionRecord:
|
||||
timestamp: float
|
||||
result: dict
|
||||
dry_run: bool
|
||||
auto_heal: bool
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"timestamp": self.timestamp,
|
||||
"result": self.result,
|
||||
"dry_run": self.dry_run,
|
||||
"auto_heal": self.auto_heal,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> IntegrityExecutionRecord:
|
||||
return cls(
|
||||
timestamp=data["timestamp"],
|
||||
result=data["result"],
|
||||
dry_run=data.get("dry_run", False),
|
||||
auto_heal=data.get("auto_heal", False),
|
||||
)
|
||||
|
||||
|
||||
class IntegrityHistoryStore:
|
||||
def __init__(self, storage_root: Path, max_records: int = 50) -> None:
|
||||
self.storage_root = storage_root
|
||||
self.max_records = max_records
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "integrity_history.json"
|
||||
|
||||
def load(self) -> List[IntegrityExecutionRecord]:
|
||||
path = self._get_path()
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
return [IntegrityExecutionRecord.from_dict(d) for d in data.get("executions", [])]
|
||||
except (OSError, ValueError, KeyError) as e:
|
||||
logger.error("Failed to load integrity history: %s", e)
|
||||
return []
|
||||
|
||||
def save(self, records: List[IntegrityExecutionRecord]) -> None:
|
||||
path = self._get_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {"executions": [r.to_dict() for r in records[: self.max_records]]}
|
||||
try:
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error("Failed to save integrity history: %s", e)
|
||||
|
||||
def add(self, record: IntegrityExecutionRecord) -> None:
|
||||
with self._lock:
|
||||
records = self.load()
|
||||
records.insert(0, record)
|
||||
self.save(records)
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[IntegrityExecutionRecord]:
|
||||
return self.load()[offset : offset + limit]
|
||||
|
||||
|
||||
class IntegrityCursorStore:
|
||||
def __init__(self, storage_root: Path) -> None:
|
||||
self.storage_root = storage_root
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "integrity_cursor.json"
|
||||
|
||||
def load(self) -> Dict[str, Any]:
|
||||
path = self._get_path()
|
||||
if not path.exists():
|
||||
return {"buckets": {}}
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if not isinstance(data.get("buckets"), dict):
|
||||
return {"buckets": {}}
|
||||
return data
|
||||
except (OSError, ValueError, KeyError):
|
||||
return {"buckets": {}}
|
||||
|
||||
def save(self, data: Dict[str, Any]) -> None:
|
||||
path = self._get_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error("Failed to save integrity cursor: %s", e)
|
||||
|
||||
def update_bucket(
|
||||
self,
|
||||
bucket_name: str,
|
||||
timestamp: float,
|
||||
last_key: Optional[str] = None,
|
||||
completed: bool = False,
|
||||
) -> None:
|
||||
with self._lock:
|
||||
data = self.load()
|
||||
entry = data["buckets"].get(bucket_name, {})
|
||||
if completed:
|
||||
entry["last_scanned"] = timestamp
|
||||
entry.pop("last_key", None)
|
||||
entry["completed"] = True
|
||||
else:
|
||||
entry["last_scanned"] = timestamp
|
||||
if last_key is not None:
|
||||
entry["last_key"] = last_key
|
||||
entry["completed"] = False
|
||||
data["buckets"][bucket_name] = entry
|
||||
self.save(data)
|
||||
|
||||
def clean_stale(self, existing_buckets: List[str]) -> None:
|
||||
with self._lock:
|
||||
data = self.load()
|
||||
existing_set = set(existing_buckets)
|
||||
stale_keys = [k for k in data["buckets"] if k not in existing_set]
|
||||
if stale_keys:
|
||||
for k in stale_keys:
|
||||
del data["buckets"][k]
|
||||
self.save(data)
|
||||
|
||||
def get_last_key(self, bucket_name: str) -> Optional[str]:
|
||||
data = self.load()
|
||||
entry = data.get("buckets", {}).get(bucket_name)
|
||||
if entry is None:
|
||||
return None
|
||||
return entry.get("last_key")
|
||||
|
||||
def get_bucket_order(self, bucket_names: List[str]) -> List[str]:
|
||||
data = self.load()
|
||||
buckets_info = data.get("buckets", {})
|
||||
|
||||
incomplete = []
|
||||
complete = []
|
||||
for name in bucket_names:
|
||||
entry = buckets_info.get(name)
|
||||
if entry is None:
|
||||
incomplete.append((name, 0.0))
|
||||
elif entry.get("last_key") is not None:
|
||||
incomplete.append((name, entry.get("last_scanned", 0.0)))
|
||||
else:
|
||||
complete.append((name, entry.get("last_scanned", 0.0)))
|
||||
|
||||
incomplete.sort(key=lambda x: x[1])
|
||||
complete.sort(key=lambda x: x[1])
|
||||
|
||||
return [n for n, _ in incomplete] + [n for n, _ in complete]
|
||||
|
||||
def get_info(self) -> Dict[str, Any]:
|
||||
data = self.load()
|
||||
buckets = data.get("buckets", {})
|
||||
return {
|
||||
"tracked_buckets": len(buckets),
|
||||
"buckets": {
|
||||
name: {
|
||||
"last_scanned": info.get("last_scanned"),
|
||||
"last_key": info.get("last_key"),
|
||||
"completed": info.get("completed", False),
|
||||
}
|
||||
for name, info in buckets.items()
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
MAX_ISSUES = 500
|
||||
|
||||
|
||||
class IntegrityChecker:
|
||||
SYSTEM_ROOT = ".myfsio.sys"
|
||||
SYSTEM_BUCKETS_DIR = "buckets"
|
||||
BUCKET_META_DIR = "meta"
|
||||
BUCKET_VERSIONS_DIR = "versions"
|
||||
INTERNAL_FOLDERS = {".meta", ".versions", ".multipart"}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage_root: Path,
|
||||
interval_hours: float = 24.0,
|
||||
batch_size: int = 1000,
|
||||
auto_heal: bool = False,
|
||||
dry_run: bool = False,
|
||||
max_history: int = 50,
|
||||
io_throttle_ms: int = 10,
|
||||
) -> None:
|
||||
self.storage_root = Path(storage_root)
|
||||
self.interval_seconds = interval_hours * 3600.0
|
||||
self.batch_size = batch_size
|
||||
self.auto_heal = auto_heal
|
||||
self.dry_run = dry_run
|
||||
self._timer: Optional[threading.Timer] = None
|
||||
self._shutdown = False
|
||||
self._lock = threading.Lock()
|
||||
self._scanning = False
|
||||
self._scan_start_time: Optional[float] = None
|
||||
self._io_throttle = max(0, io_throttle_ms) / 1000.0
|
||||
self.history_store = IntegrityHistoryStore(storage_root, max_records=max_history)
|
||||
self.cursor_store = IntegrityCursorStore(self.storage_root)
|
||||
|
||||
def start(self) -> None:
|
||||
if self._timer is not None:
|
||||
return
|
||||
self._shutdown = False
|
||||
self._schedule_next()
|
||||
logger.info(
|
||||
"Integrity checker started: interval=%.1fh, batch_size=%d, auto_heal=%s, dry_run=%s",
|
||||
self.interval_seconds / 3600.0,
|
||||
self.batch_size,
|
||||
self.auto_heal,
|
||||
self.dry_run,
|
||||
)
|
||||
|
||||
def stop(self) -> None:
|
||||
self._shutdown = True
|
||||
if self._timer:
|
||||
self._timer.cancel()
|
||||
self._timer = None
|
||||
logger.info("Integrity checker stopped")
|
||||
|
||||
def _schedule_next(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
self._timer = threading.Timer(self.interval_seconds, self._run_cycle)
|
||||
self._timer.daemon = True
|
||||
self._timer.start()
|
||||
|
||||
def _run_cycle(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
try:
|
||||
self.run_now()
|
||||
except Exception as e:
|
||||
logger.error("Integrity check cycle failed: %s", e)
|
||||
finally:
|
||||
self._schedule_next()
|
||||
|
||||
def run_now(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> IntegrityResult:
|
||||
if not self._lock.acquire(blocking=False):
|
||||
raise RuntimeError("Integrity scan is already in progress")
|
||||
|
||||
try:
|
||||
self._scanning = True
|
||||
self._scan_start_time = time.time()
|
||||
|
||||
effective_auto_heal = auto_heal if auto_heal is not None else self.auto_heal
|
||||
effective_dry_run = dry_run if dry_run is not None else self.dry_run
|
||||
|
||||
start = self._scan_start_time
|
||||
result = IntegrityResult()
|
||||
|
||||
bucket_names = self._list_bucket_names()
|
||||
self.cursor_store.clean_stale(bucket_names)
|
||||
ordered_buckets = self.cursor_store.get_bucket_order(bucket_names)
|
||||
|
||||
for bucket_name in ordered_buckets:
|
||||
if self._batch_exhausted(result):
|
||||
break
|
||||
result.buckets_scanned += 1
|
||||
cursor_key = self.cursor_store.get_last_key(bucket_name)
|
||||
key_corrupted = self._check_corrupted_objects(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
|
||||
key_orphaned = self._check_orphaned_objects(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
|
||||
key_phantom = self._check_phantom_metadata(bucket_name, result, effective_auto_heal, effective_dry_run, cursor_key)
|
||||
self._check_stale_versions(bucket_name, result, effective_auto_heal, effective_dry_run)
|
||||
self._check_etag_cache(bucket_name, result, effective_auto_heal, effective_dry_run)
|
||||
self._check_legacy_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
|
||||
returned_keys = [k for k in (key_corrupted, key_orphaned, key_phantom) if k is not None]
|
||||
bucket_exhausted = self._batch_exhausted(result)
|
||||
if bucket_exhausted and returned_keys:
|
||||
self.cursor_store.update_bucket(bucket_name, time.time(), last_key=min(returned_keys))
|
||||
else:
|
||||
self.cursor_store.update_bucket(bucket_name, time.time(), completed=True)
|
||||
|
||||
result.execution_time_seconds = time.time() - start
|
||||
|
||||
if result.has_issues or result.errors:
|
||||
logger.info(
|
||||
"Integrity check completed in %.2fs: corrupted=%d, orphaned=%d, phantom=%d, "
|
||||
"stale_versions=%d, etag_cache=%d, legacy_drift=%d, healed=%d, errors=%d%s",
|
||||
result.execution_time_seconds,
|
||||
result.corrupted_objects,
|
||||
result.orphaned_objects,
|
||||
result.phantom_metadata,
|
||||
result.stale_versions,
|
||||
result.etag_cache_inconsistencies,
|
||||
result.legacy_metadata_drifts,
|
||||
result.issues_healed,
|
||||
len(result.errors),
|
||||
" (dry run)" if effective_dry_run else "",
|
||||
)
|
||||
|
||||
record = IntegrityExecutionRecord(
|
||||
timestamp=time.time(),
|
||||
result=result.to_dict(),
|
||||
dry_run=effective_dry_run,
|
||||
auto_heal=effective_auto_heal,
|
||||
)
|
||||
self.history_store.add(record)
|
||||
|
||||
return result
|
||||
finally:
|
||||
self._scanning = False
|
||||
self._scan_start_time = None
|
||||
self._lock.release()
|
||||
|
||||
def run_async(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> bool:
|
||||
if self._scanning:
|
||||
return False
|
||||
t = threading.Thread(target=self.run_now, args=(auto_heal, dry_run), daemon=True)
|
||||
t.start()
|
||||
return True
|
||||
|
||||
def _system_path(self) -> Path:
|
||||
return self.storage_root / self.SYSTEM_ROOT
|
||||
|
||||
def _list_bucket_names(self) -> List[str]:
|
||||
names = []
|
||||
try:
|
||||
for entry in self.storage_root.iterdir():
|
||||
if entry.is_dir() and entry.name != self.SYSTEM_ROOT:
|
||||
names.append(entry.name)
|
||||
except OSError:
|
||||
pass
|
||||
return names
|
||||
|
||||
def _throttle(self) -> bool:
|
||||
if self._shutdown:
|
||||
return True
|
||||
if self._io_throttle > 0:
|
||||
time.sleep(self._io_throttle)
|
||||
return self._shutdown
|
||||
|
||||
def _batch_exhausted(self, result: IntegrityResult) -> bool:
|
||||
return self._shutdown or result.objects_scanned >= self.batch_size
|
||||
|
||||
def _add_issue(self, result: IntegrityResult, issue: IntegrityIssue) -> None:
|
||||
if len(result.issues) < MAX_ISSUES:
|
||||
result.issues.append(issue)
|
||||
|
||||
def _collect_index_keys(
|
||||
self, meta_root: Path, cursor_key: Optional[str] = None,
|
||||
) -> Dict[str, Dict[str, Any]]:
|
||||
all_keys: Dict[str, Dict[str, Any]] = {}
|
||||
if not meta_root.exists():
|
||||
return all_keys
|
||||
try:
|
||||
for index_file in meta_root.rglob("_index.json"):
|
||||
if not index_file.is_file():
|
||||
continue
|
||||
rel_dir = index_file.parent.relative_to(meta_root)
|
||||
dir_prefix = "" if rel_dir == Path(".") else rel_dir.as_posix()
|
||||
if cursor_key is not None and dir_prefix:
|
||||
full_prefix = dir_prefix + "/"
|
||||
if not cursor_key.startswith(full_prefix) and cursor_key > full_prefix:
|
||||
continue
|
||||
try:
|
||||
index_data = json.loads(index_file.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
for key_name, entry in index_data.items():
|
||||
full_key = (dir_prefix + "/" + key_name) if dir_prefix else key_name
|
||||
if cursor_key is not None and full_key <= cursor_key:
|
||||
continue
|
||||
all_keys[full_key] = {
|
||||
"entry": entry,
|
||||
"index_file": index_file,
|
||||
"key_name": key_name,
|
||||
}
|
||||
except OSError:
|
||||
pass
|
||||
return all_keys
|
||||
|
||||
def _walk_bucket_files_sorted(
|
||||
self, bucket_path: Path, cursor_key: Optional[str] = None,
|
||||
):
|
||||
def _walk(dir_path: Path, prefix: str):
|
||||
try:
|
||||
entries = list(os.scandir(dir_path))
|
||||
except OSError:
|
||||
return
|
||||
|
||||
def _sort_key(e):
|
||||
if e.is_dir(follow_symlinks=False):
|
||||
return e.name + "/"
|
||||
return e.name
|
||||
|
||||
entries.sort(key=_sort_key)
|
||||
|
||||
for entry in entries:
|
||||
if entry.is_dir(follow_symlinks=False):
|
||||
if not prefix and entry.name in self.INTERNAL_FOLDERS:
|
||||
continue
|
||||
new_prefix = (prefix + "/" + entry.name) if prefix else entry.name
|
||||
if cursor_key is not None:
|
||||
full_prefix = new_prefix + "/"
|
||||
if not cursor_key.startswith(full_prefix) and cursor_key > full_prefix:
|
||||
continue
|
||||
yield from _walk(Path(entry.path), new_prefix)
|
||||
elif entry.is_file(follow_symlinks=False):
|
||||
full_key = (prefix + "/" + entry.name) if prefix else entry.name
|
||||
if cursor_key is not None and full_key <= cursor_key:
|
||||
continue
|
||||
yield full_key
|
||||
|
||||
yield from _walk(bucket_path, "")
|
||||
|
||||
def _check_corrupted_objects(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
|
||||
cursor_key: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
if self._batch_exhausted(result):
|
||||
return None
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
if not meta_root.exists():
|
||||
return None
|
||||
|
||||
last_key = None
|
||||
try:
|
||||
all_keys = self._collect_index_keys(meta_root, cursor_key)
|
||||
sorted_keys = sorted(all_keys.keys())
|
||||
|
||||
for full_key in sorted_keys:
|
||||
if self._throttle():
|
||||
return last_key
|
||||
if self._batch_exhausted(result):
|
||||
return last_key
|
||||
|
||||
info = all_keys[full_key]
|
||||
entry = info["entry"]
|
||||
index_file = info["index_file"]
|
||||
key_name = info["key_name"]
|
||||
|
||||
object_path = bucket_path / full_key
|
||||
if not object_path.exists():
|
||||
continue
|
||||
|
||||
result.objects_scanned += 1
|
||||
last_key = full_key
|
||||
|
||||
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
|
||||
stored_etag = meta.get("__etag__")
|
||||
if not stored_etag:
|
||||
continue
|
||||
|
||||
try:
|
||||
actual_etag = _compute_etag(object_path)
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
if actual_etag != stored_etag:
|
||||
result.corrupted_objects += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="corrupted_object",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail=f"stored_etag={stored_etag} actual_etag={actual_etag}",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
stat = object_path.stat()
|
||||
meta["__etag__"] = actual_etag
|
||||
meta["__size__"] = str(stat.st_size)
|
||||
meta["__last_modified__"] = str(stat.st_mtime)
|
||||
try:
|
||||
index_data = json.loads(index_file.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
index_data = {}
|
||||
index_data[key_name] = {"metadata": meta}
|
||||
self._atomic_write_index(index_file, index_data)
|
||||
issue.healed = True
|
||||
issue.heal_action = "updated etag in index"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal corrupted {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check corrupted {bucket_name}: {e}")
|
||||
return last_key
|
||||
|
||||
def _check_orphaned_objects(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
|
||||
cursor_key: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
if self._batch_exhausted(result):
|
||||
return None
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
last_key = None
|
||||
try:
|
||||
for full_key in self._walk_bucket_files_sorted(bucket_path, cursor_key):
|
||||
if self._throttle():
|
||||
return last_key
|
||||
if self._batch_exhausted(result):
|
||||
return last_key
|
||||
|
||||
result.objects_scanned += 1
|
||||
last_key = full_key
|
||||
key_path = Path(full_key)
|
||||
key_name = key_path.name
|
||||
parent = key_path.parent
|
||||
|
||||
if parent == Path("."):
|
||||
index_path = meta_root / "_index.json"
|
||||
else:
|
||||
index_path = meta_root / parent / "_index.json"
|
||||
|
||||
has_entry = False
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
has_entry = key_name in index_data
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
if not has_entry:
|
||||
result.orphaned_objects += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="orphaned_object",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="file exists without metadata entry",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
object_path = bucket_path / full_key
|
||||
etag = _compute_etag(object_path)
|
||||
stat = object_path.stat()
|
||||
meta = {
|
||||
"__etag__": etag,
|
||||
"__size__": str(stat.st_size),
|
||||
"__last_modified__": str(stat.st_mtime),
|
||||
}
|
||||
index_data = {}
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
index_data[key_name] = {"metadata": meta}
|
||||
self._atomic_write_index(index_path, index_data)
|
||||
issue.healed = True
|
||||
issue.heal_action = "created metadata entry"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal orphaned {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check orphaned {bucket_name}: {e}")
|
||||
return last_key
|
||||
|
||||
def _check_phantom_metadata(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool,
|
||||
cursor_key: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
if self._batch_exhausted(result):
|
||||
return None
|
||||
bucket_path = self.storage_root / bucket_name
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
if not meta_root.exists():
|
||||
return None
|
||||
|
||||
last_key = None
|
||||
try:
|
||||
all_keys = self._collect_index_keys(meta_root, cursor_key)
|
||||
sorted_keys = sorted(all_keys.keys())
|
||||
|
||||
heal_by_index: Dict[Path, List[str]] = {}
|
||||
|
||||
for full_key in sorted_keys:
|
||||
if self._batch_exhausted(result):
|
||||
break
|
||||
|
||||
result.objects_scanned += 1
|
||||
last_key = full_key
|
||||
|
||||
object_path = bucket_path / full_key
|
||||
if not object_path.exists():
|
||||
result.phantom_metadata += 1
|
||||
info = all_keys[full_key]
|
||||
issue = IntegrityIssue(
|
||||
issue_type="phantom_metadata",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="metadata entry without file on disk",
|
||||
)
|
||||
if auto_heal and not dry_run:
|
||||
index_file = info["index_file"]
|
||||
heal_by_index.setdefault(index_file, []).append(info["key_name"])
|
||||
issue.healed = True
|
||||
issue.heal_action = "removed stale index entry"
|
||||
result.issues_healed += 1
|
||||
self._add_issue(result, issue)
|
||||
|
||||
if heal_by_index and auto_heal and not dry_run:
|
||||
for index_file, keys_to_remove in heal_by_index.items():
|
||||
try:
|
||||
index_data = json.loads(index_file.read_text(encoding="utf-8"))
|
||||
for k in keys_to_remove:
|
||||
index_data.pop(k, None)
|
||||
if index_data:
|
||||
self._atomic_write_index(index_file, index_data)
|
||||
else:
|
||||
index_file.unlink(missing_ok=True)
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal phantom {bucket_name}: {e}")
|
||||
except OSError as e:
|
||||
result.errors.append(f"check phantom {bucket_name}: {e}")
|
||||
return last_key
|
||||
|
||||
def _check_stale_versions(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
|
||||
) -> None:
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
versions_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR
|
||||
|
||||
if not versions_root.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
for key_dir in versions_root.rglob("*"):
|
||||
if self._throttle():
|
||||
return
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
if not key_dir.is_dir():
|
||||
continue
|
||||
|
||||
bin_files = {f.stem: f for f in key_dir.glob("*.bin")}
|
||||
json_files = {f.stem: f for f in key_dir.glob("*.json")}
|
||||
|
||||
for stem, bin_file in bin_files.items():
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
result.objects_scanned += 1
|
||||
if stem not in json_files:
|
||||
result.stale_versions += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="stale_version",
|
||||
bucket=bucket_name,
|
||||
key=f"{key_dir.relative_to(versions_root).as_posix()}/{bin_file.name}",
|
||||
detail="version data without manifest",
|
||||
)
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
bin_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "removed orphaned version data"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal stale version {bin_file}: {e}")
|
||||
self._add_issue(result, issue)
|
||||
|
||||
for stem, json_file in json_files.items():
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
result.objects_scanned += 1
|
||||
if stem not in bin_files:
|
||||
result.stale_versions += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="stale_version",
|
||||
bucket=bucket_name,
|
||||
key=f"{key_dir.relative_to(versions_root).as_posix()}/{json_file.name}",
|
||||
detail="version manifest without data",
|
||||
)
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
json_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "removed orphaned version manifest"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal stale version {json_file}: {e}")
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check stale versions {bucket_name}: {e}")
|
||||
|
||||
def _check_etag_cache(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
|
||||
) -> None:
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
etag_index_path = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / "etag_index.json"
|
||||
|
||||
if not etag_index_path.exists():
|
||||
return
|
||||
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
if not meta_root.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
etag_cache = json.loads(etag_index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
return
|
||||
|
||||
found_mismatch = False
|
||||
|
||||
for full_key, cached_etag in etag_cache.items():
|
||||
if self._batch_exhausted(result):
|
||||
break
|
||||
result.objects_scanned += 1
|
||||
key_path = Path(full_key)
|
||||
key_name = key_path.name
|
||||
parent = key_path.parent
|
||||
|
||||
if parent == Path("."):
|
||||
index_path = meta_root / "_index.json"
|
||||
else:
|
||||
index_path = meta_root / parent / "_index.json"
|
||||
|
||||
if not index_path.exists():
|
||||
continue
|
||||
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
|
||||
entry = index_data.get(key_name)
|
||||
if not entry:
|
||||
continue
|
||||
|
||||
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
|
||||
stored_etag = meta.get("__etag__")
|
||||
|
||||
if stored_etag and cached_etag != stored_etag:
|
||||
result.etag_cache_inconsistencies += 1
|
||||
found_mismatch = True
|
||||
issue = IntegrityIssue(
|
||||
issue_type="etag_cache_inconsistency",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail=f"cached_etag={cached_etag} index_etag={stored_etag}",
|
||||
)
|
||||
self._add_issue(result, issue)
|
||||
|
||||
if found_mismatch and auto_heal and not dry_run:
|
||||
try:
|
||||
etag_index_path.unlink(missing_ok=True)
|
||||
for issue in result.issues:
|
||||
if issue.issue_type == "etag_cache_inconsistency" and issue.bucket == bucket_name and not issue.healed:
|
||||
issue.healed = True
|
||||
issue.heal_action = "deleted etag_index.json"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal etag cache {bucket_name}: {e}")
|
||||
|
||||
def _check_legacy_metadata(
|
||||
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
|
||||
) -> None:
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
legacy_meta_root = self.storage_root / bucket_name / ".meta"
|
||||
if not legacy_meta_root.exists():
|
||||
return
|
||||
|
||||
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
|
||||
|
||||
try:
|
||||
for meta_file in legacy_meta_root.rglob("*.meta.json"):
|
||||
if self._throttle():
|
||||
return
|
||||
if self._batch_exhausted(result):
|
||||
return
|
||||
if not meta_file.is_file():
|
||||
continue
|
||||
|
||||
result.objects_scanned += 1
|
||||
try:
|
||||
rel = meta_file.relative_to(legacy_meta_root)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
full_key = rel.as_posix().removesuffix(".meta.json")
|
||||
key_path = Path(full_key)
|
||||
key_name = key_path.name
|
||||
parent = key_path.parent
|
||||
|
||||
if parent == Path("."):
|
||||
index_path = meta_root / "_index.json"
|
||||
else:
|
||||
index_path = meta_root / parent / "_index.json"
|
||||
|
||||
try:
|
||||
legacy_data = json.loads(meta_file.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
|
||||
index_entry = None
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
index_entry = index_data.get(key_name)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
if index_entry is None:
|
||||
result.legacy_metadata_drifts += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="legacy_metadata_drift",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="unmigrated legacy .meta.json",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
index_data = {}
|
||||
if index_path.exists():
|
||||
try:
|
||||
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
index_data[key_name] = {"metadata": legacy_data}
|
||||
self._atomic_write_index(index_path, index_data)
|
||||
meta_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "migrated to index and deleted legacy file"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal legacy {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
else:
|
||||
index_meta = index_entry.get("metadata", {}) if isinstance(index_entry, dict) else {}
|
||||
if legacy_data != index_meta:
|
||||
result.legacy_metadata_drifts += 1
|
||||
issue = IntegrityIssue(
|
||||
issue_type="legacy_metadata_drift",
|
||||
bucket=bucket_name,
|
||||
key=full_key,
|
||||
detail="legacy .meta.json differs from index entry",
|
||||
)
|
||||
|
||||
if auto_heal and not dry_run:
|
||||
try:
|
||||
meta_file.unlink(missing_ok=True)
|
||||
issue.healed = True
|
||||
issue.heal_action = "deleted legacy file (index is authoritative)"
|
||||
result.issues_healed += 1
|
||||
except OSError as e:
|
||||
result.errors.append(f"heal legacy drift {bucket_name}/{full_key}: {e}")
|
||||
|
||||
self._add_issue(result, issue)
|
||||
except OSError as e:
|
||||
result.errors.append(f"check legacy meta {bucket_name}: {e}")
|
||||
|
||||
@staticmethod
|
||||
def _atomic_write_index(index_path: Path, data: Dict[str, Any]) -> None:
|
||||
index_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
tmp_path = index_path.with_suffix(".tmp")
|
||||
try:
|
||||
with open(tmp_path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f)
|
||||
os.replace(str(tmp_path), str(index_path))
|
||||
except BaseException:
|
||||
try:
|
||||
tmp_path.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
def get_history(self, limit: int = 50, offset: int = 0) -> List[dict]:
|
||||
records = self.history_store.get_history(limit, offset)
|
||||
return [r.to_dict() for r in records]
|
||||
|
||||
def get_status(self) -> dict:
|
||||
status: Dict[str, Any] = {
|
||||
"enabled": not self._shutdown or self._timer is not None,
|
||||
"running": self._timer is not None and not self._shutdown,
|
||||
"scanning": self._scanning,
|
||||
"interval_hours": self.interval_seconds / 3600.0,
|
||||
"batch_size": self.batch_size,
|
||||
"auto_heal": self.auto_heal,
|
||||
"dry_run": self.dry_run,
|
||||
"io_throttle_ms": round(self._io_throttle * 1000),
|
||||
}
|
||||
if self._scanning and self._scan_start_time is not None:
|
||||
status["scan_elapsed_seconds"] = round(time.time() - self._scan_start_time, 1)
|
||||
status["cursor"] = self.cursor_store.get_info()
|
||||
return status
|
||||
@@ -1,9 +1,12 @@
|
||||
"""Key Management Service (KMS) for encryption key management."""
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import secrets
|
||||
import subprocess
|
||||
import sys
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
@@ -14,6 +17,30 @@ from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
from .encryption import EncryptionError, EncryptionProvider, EncryptionResult
|
||||
|
||||
if sys.platform != "win32":
|
||||
import fcntl
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _set_secure_file_permissions(file_path: Path) -> None:
|
||||
"""Set restrictive file permissions (owner read/write only)."""
|
||||
if sys.platform == "win32":
|
||||
try:
|
||||
username = os.environ.get("USERNAME", "")
|
||||
if username:
|
||||
subprocess.run(
|
||||
["icacls", str(file_path), "/inheritance:r",
|
||||
"/grant:r", f"{username}:F"],
|
||||
check=True, capture_output=True
|
||||
)
|
||||
else:
|
||||
logger.warning("Could not set secure permissions on %s: USERNAME not set", file_path)
|
||||
except (subprocess.SubprocessError, OSError) as exc:
|
||||
logger.warning("Failed to set secure permissions on %s: %s", file_path, exc)
|
||||
else:
|
||||
os.chmod(file_path, 0o600)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KMSKey:
|
||||
@@ -75,11 +102,11 @@ class KMSEncryptionProvider(EncryptionProvider):
|
||||
def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
|
||||
"""Encrypt data using envelope encryption with KMS."""
|
||||
data_key, encrypted_data_key = self.generate_data_key()
|
||||
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
nonce = secrets.token_bytes(12)
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext,
|
||||
json.dumps(context).encode() if context else None)
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext,
|
||||
json.dumps(context, sort_keys=True).encode() if context else None)
|
||||
|
||||
return EncryptionResult(
|
||||
ciphertext=ciphertext,
|
||||
@@ -91,15 +118,26 @@ class KMSEncryptionProvider(EncryptionProvider):
|
||||
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
||||
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
||||
"""Decrypt data using envelope encryption with KMS."""
|
||||
# Note: Data key is encrypted without context (AAD), so we decrypt without context
|
||||
data_key = self.kms.decrypt_data_key(key_id, encrypted_data_key, context=None)
|
||||
|
||||
if len(data_key) != 32:
|
||||
raise EncryptionError("Invalid data key size")
|
||||
|
||||
aesgcm = AESGCM(data_key)
|
||||
try:
|
||||
return aesgcm.decrypt(nonce, ciphertext,
|
||||
json.dumps(context).encode() if context else None)
|
||||
json.dumps(context, sort_keys=True).encode() if context else None)
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Failed to decrypt data: {exc}") from exc
|
||||
logger.debug("KMS decryption failed: %s", exc)
|
||||
raise EncryptionError("Failed to decrypt data") from exc
|
||||
|
||||
def decrypt_data_key(self, encrypted_data_key: bytes, key_id: str | None = None) -> bytes:
|
||||
"""Decrypt an encrypted data key using KMS."""
|
||||
if key_id is None:
|
||||
key_id = self.key_id
|
||||
data_key = self.kms.decrypt_data_key(key_id, encrypted_data_key, context=None)
|
||||
if len(data_key) != 32:
|
||||
raise EncryptionError("Invalid data key size")
|
||||
return data_key
|
||||
|
||||
|
||||
class KMSManager:
|
||||
@@ -109,27 +147,52 @@ class KMSManager:
|
||||
Keys are stored encrypted on disk.
|
||||
"""
|
||||
|
||||
def __init__(self, keys_path: Path, master_key_path: Path):
|
||||
def __init__(
|
||||
self,
|
||||
keys_path: Path,
|
||||
master_key_path: Path,
|
||||
generate_data_key_min_bytes: int = 1,
|
||||
generate_data_key_max_bytes: int = 1024,
|
||||
):
|
||||
self.keys_path = keys_path
|
||||
self.master_key_path = master_key_path
|
||||
self.generate_data_key_min_bytes = generate_data_key_min_bytes
|
||||
self.generate_data_key_max_bytes = generate_data_key_max_bytes
|
||||
self._keys: Dict[str, KMSKey] = {}
|
||||
self._master_key: bytes | None = None
|
||||
self._master_aesgcm: AESGCM | None = None
|
||||
self._loaded = False
|
||||
|
||||
@property
|
||||
def master_key(self) -> bytes:
|
||||
"""Load or create the master key for encrypting KMS keys."""
|
||||
"""Load or create the master key for encrypting KMS keys (with file locking)."""
|
||||
if self._master_key is None:
|
||||
if self.master_key_path.exists():
|
||||
self._master_key = base64.b64decode(
|
||||
self.master_key_path.read_text().strip()
|
||||
)
|
||||
else:
|
||||
self._master_key = secrets.token_bytes(32)
|
||||
self.master_key_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.master_key_path.write_text(
|
||||
base64.b64encode(self._master_key).decode()
|
||||
)
|
||||
lock_path = self.master_key_path.with_suffix(".lock")
|
||||
lock_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(lock_path, "w") as lock_file:
|
||||
if sys.platform == "win32":
|
||||
import msvcrt
|
||||
msvcrt.locking(lock_file.fileno(), msvcrt.LK_LOCK, 1)
|
||||
else:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)
|
||||
try:
|
||||
if self.master_key_path.exists():
|
||||
self._master_key = base64.b64decode(
|
||||
self.master_key_path.read_text().strip()
|
||||
)
|
||||
else:
|
||||
self._master_key = secrets.token_bytes(32)
|
||||
self.master_key_path.write_text(
|
||||
base64.b64encode(self._master_key).decode()
|
||||
)
|
||||
_set_secure_file_permissions(self.master_key_path)
|
||||
finally:
|
||||
if sys.platform == "win32":
|
||||
import msvcrt
|
||||
msvcrt.locking(lock_file.fileno(), msvcrt.LK_UNLCK, 1)
|
||||
else:
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||
self._master_aesgcm = AESGCM(self._master_key)
|
||||
return self._master_key
|
||||
|
||||
def _load_keys(self) -> None:
|
||||
@@ -146,8 +209,10 @@ class KMSManager:
|
||||
encrypted = base64.b64decode(key_data["EncryptedKeyMaterial"])
|
||||
key.key_material = self._decrypt_key_material(encrypted)
|
||||
self._keys[key.key_id] = key
|
||||
except Exception:
|
||||
pass
|
||||
except json.JSONDecodeError as exc:
|
||||
logger.error("Failed to parse KMS keys file: %s", exc)
|
||||
except (ValueError, KeyError) as exc:
|
||||
logger.error("Invalid KMS key data: %s", exc)
|
||||
|
||||
self._loaded = True
|
||||
|
||||
@@ -159,26 +224,25 @@ class KMSManager:
|
||||
encrypted = self._encrypt_key_material(key.key_material)
|
||||
data["EncryptedKeyMaterial"] = base64.b64encode(encrypted).decode()
|
||||
keys_data.append(data)
|
||||
|
||||
|
||||
self.keys_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.keys_path.write_text(
|
||||
json.dumps({"keys": keys_data}, indent=2),
|
||||
encoding="utf-8"
|
||||
)
|
||||
_set_secure_file_permissions(self.keys_path)
|
||||
|
||||
def _encrypt_key_material(self, key_material: bytes) -> bytes:
|
||||
"""Encrypt key material with the master key."""
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
_ = self.master_key
|
||||
nonce = secrets.token_bytes(12)
|
||||
ciphertext = aesgcm.encrypt(nonce, key_material, None)
|
||||
ciphertext = self._master_aesgcm.encrypt(nonce, key_material, None)
|
||||
return nonce + ciphertext
|
||||
|
||||
|
||||
def _decrypt_key_material(self, encrypted: bytes) -> bytes:
|
||||
"""Decrypt key material with the master key."""
|
||||
aesgcm = AESGCM(self.master_key)
|
||||
_ = self.master_key
|
||||
nonce = encrypted[:12]
|
||||
ciphertext = encrypted[12:]
|
||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
||||
return self._master_aesgcm.decrypt(nonce, ciphertext, None)
|
||||
|
||||
def create_key(self, description: str = "", key_id: str | None = None) -> KMSKey:
|
||||
"""Create a new KMS key."""
|
||||
@@ -211,7 +275,27 @@ class KMSManager:
|
||||
"""List all keys."""
|
||||
self._load_keys()
|
||||
return list(self._keys.values())
|
||||
|
||||
|
||||
def get_default_key_id(self) -> str:
|
||||
"""Get the default KMS key ID, creating one if none exist."""
|
||||
self._load_keys()
|
||||
for key in self._keys.values():
|
||||
if key.enabled:
|
||||
return key.key_id
|
||||
default_key = self.create_key(description="Default KMS Key")
|
||||
return default_key.key_id
|
||||
|
||||
def get_provider(self, key_id: str | None = None) -> "KMSEncryptionProvider":
|
||||
"""Get a KMS encryption provider for the specified key."""
|
||||
if key_id is None:
|
||||
key_id = self.get_default_key_id()
|
||||
key = self.get_key(key_id)
|
||||
if not key:
|
||||
raise EncryptionError(f"Key not found: {key_id}")
|
||||
if not key.enabled:
|
||||
raise EncryptionError(f"Key is disabled: {key_id}")
|
||||
return KMSEncryptionProvider(self, key_id)
|
||||
|
||||
def enable_key(self, key_id: str) -> None:
|
||||
"""Enable a key."""
|
||||
self._load_keys()
|
||||
@@ -250,7 +334,7 @@ class KMSManager:
|
||||
|
||||
aesgcm = AESGCM(key.key_material)
|
||||
nonce = secrets.token_bytes(12)
|
||||
aad = json.dumps(context).encode() if context else None
|
||||
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||
ciphertext = aesgcm.encrypt(nonce, plaintext, aad)
|
||||
|
||||
key_id_bytes = key_id.encode("utf-8")
|
||||
@@ -279,17 +363,24 @@ class KMSManager:
|
||||
encrypted = rest[12:]
|
||||
|
||||
aesgcm = AESGCM(key.key_material)
|
||||
aad = json.dumps(context).encode() if context else None
|
||||
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||
try:
|
||||
plaintext = aesgcm.decrypt(nonce, encrypted, aad)
|
||||
return plaintext, key_id
|
||||
except Exception as exc:
|
||||
raise EncryptionError(f"Decryption failed: {exc}") from exc
|
||||
logger.debug("KMS decrypt operation failed: %s", exc)
|
||||
raise EncryptionError("Decryption failed") from exc
|
||||
|
||||
def generate_data_key(self, key_id: str,
|
||||
context: Dict[str, str] | None = None) -> tuple[bytes, bytes]:
|
||||
context: Dict[str, str] | None = None,
|
||||
key_spec: str = "AES_256") -> tuple[bytes, bytes]:
|
||||
"""Generate a data key and return both plaintext and encrypted versions.
|
||||
|
||||
|
||||
Args:
|
||||
key_id: The KMS key ID to use for encryption
|
||||
context: Optional encryption context
|
||||
key_spec: Key specification - AES_128 or AES_256 (default)
|
||||
|
||||
Returns:
|
||||
Tuple of (plaintext_key, encrypted_key)
|
||||
"""
|
||||
@@ -299,11 +390,12 @@ class KMSManager:
|
||||
raise EncryptionError(f"Key not found: {key_id}")
|
||||
if not key.enabled:
|
||||
raise EncryptionError(f"Key is disabled: {key_id}")
|
||||
|
||||
plaintext_key = secrets.token_bytes(32)
|
||||
|
||||
key_bytes = 32 if key_spec == "AES_256" else 16
|
||||
plaintext_key = secrets.token_bytes(key_bytes)
|
||||
|
||||
encrypted_key = self.encrypt(key_id, plaintext_key, context)
|
||||
|
||||
|
||||
return plaintext_key, encrypted_key
|
||||
|
||||
def decrypt_data_key(self, key_id: str, encrypted_key: bytes,
|
||||
@@ -312,22 +404,6 @@ class KMSManager:
|
||||
plaintext, _ = self.decrypt(encrypted_key, context)
|
||||
return plaintext
|
||||
|
||||
def get_provider(self, key_id: str | None = None) -> KMSEncryptionProvider:
|
||||
"""Get an encryption provider for a specific key."""
|
||||
self._load_keys()
|
||||
|
||||
if key_id is None:
|
||||
if not self._keys:
|
||||
key = self.create_key("Default KMS Key")
|
||||
key_id = key.key_id
|
||||
else:
|
||||
key_id = next(iter(self._keys.keys()))
|
||||
|
||||
if key_id not in self._keys:
|
||||
raise EncryptionError(f"Key not found: {key_id}")
|
||||
|
||||
return KMSEncryptionProvider(self, key_id)
|
||||
|
||||
def re_encrypt(self, ciphertext: bytes, destination_key_id: str,
|
||||
source_context: Dict[str, str] | None = None,
|
||||
destination_context: Dict[str, str] | None = None) -> bytes:
|
||||
@@ -339,6 +415,8 @@ class KMSManager:
|
||||
|
||||
def generate_random(self, num_bytes: int = 32) -> bytes:
|
||||
"""Generate cryptographically secure random bytes."""
|
||||
if num_bytes < 1 or num_bytes > 1024:
|
||||
raise EncryptionError("Number of bytes must be between 1 and 1024")
|
||||
if num_bytes < self.generate_data_key_min_bytes or num_bytes > self.generate_data_key_max_bytes:
|
||||
raise EncryptionError(
|
||||
f"Number of bytes must be between {self.generate_data_key_min_bytes} and {self.generate_data_key_max_bytes}"
|
||||
)
|
||||
return secrets.token_bytes(num_bytes)
|
||||
@@ -1,4 +1,3 @@
|
||||
"""KMS and encryption API endpoints."""
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
@@ -33,9 +32,6 @@ def _encryption():
|
||||
def _error_response(code: str, message: str, status: int) -> tuple[Dict[str, Any], int]:
|
||||
return {"__type": code, "message": message}, status
|
||||
|
||||
|
||||
# ---------------------- Key Management ----------------------
|
||||
|
||||
@kms_api_bp.route("/keys", methods=["GET", "POST"])
|
||||
@limiter.limit("30 per minute")
|
||||
def list_or_create_keys():
|
||||
@@ -65,7 +61,6 @@ def list_or_create_keys():
|
||||
except EncryptionError as exc:
|
||||
return _error_response("KMSInternalException", str(exc), 400)
|
||||
|
||||
# GET - List keys
|
||||
keys = kms.list_keys()
|
||||
return jsonify({
|
||||
"Keys": [{"KeyId": k.key_id, "KeyArn": k.arn} for k in keys],
|
||||
@@ -96,7 +91,6 @@ def get_or_delete_key(key_id: str):
|
||||
except EncryptionError as exc:
|
||||
return _error_response("NotFoundException", str(exc), 404)
|
||||
|
||||
# GET
|
||||
key = kms.get_key(key_id)
|
||||
if not key:
|
||||
return _error_response("NotFoundException", f"Key not found: {key_id}", 404)
|
||||
@@ -149,9 +143,6 @@ def disable_key(key_id: str):
|
||||
except EncryptionError as exc:
|
||||
return _error_response("NotFoundException", str(exc), 404)
|
||||
|
||||
|
||||
# ---------------------- Encryption Operations ----------------------
|
||||
|
||||
@kms_api_bp.route("/encrypt", methods=["POST"])
|
||||
@limiter.limit("60 per minute")
|
||||
def encrypt_data():
|
||||
@@ -251,7 +242,6 @@ def generate_data_key():
|
||||
try:
|
||||
plaintext_key, encrypted_key = kms.generate_data_key(key_id, context)
|
||||
|
||||
# Trim key if AES_128 requested
|
||||
if key_spec == "AES_128":
|
||||
plaintext_key = plaintext_key[:16]
|
||||
|
||||
@@ -322,10 +312,7 @@ def re_encrypt():
|
||||
return _error_response("ValidationException", "CiphertextBlob must be base64 encoded", 400)
|
||||
|
||||
try:
|
||||
# First decrypt, get source key id
|
||||
plaintext, source_key_id = kms.decrypt(ciphertext, source_context)
|
||||
|
||||
# Re-encrypt with destination key
|
||||
new_ciphertext = kms.encrypt(destination_key_id, plaintext, destination_context)
|
||||
|
||||
return jsonify({
|
||||
@@ -365,9 +352,6 @@ def generate_random():
|
||||
except EncryptionError as exc:
|
||||
return _error_response("ValidationException", str(exc), 400)
|
||||
|
||||
|
||||
# ---------------------- Client-Side Encryption Helpers ----------------------
|
||||
|
||||
@kms_api_bp.route("/client/generate-key", methods=["POST"])
|
||||
@limiter.limit("30 per minute")
|
||||
def generate_client_key():
|
||||
@@ -427,9 +411,6 @@ def client_decrypt():
|
||||
except Exception as exc:
|
||||
return _error_response("DecryptionError", str(exc), 400)
|
||||
|
||||
|
||||
# ---------------------- Encryption Materials for S3 Client-Side Encryption ----------------------
|
||||
|
||||
@kms_api_bp.route("/materials/<key_id>", methods=["POST"])
|
||||
@limiter.limit("60 per minute")
|
||||
def get_encryption_materials(key_id: str):
|
||||
340
python/app/lifecycle.py
Normal file
340
python/app/lifecycle.py
Normal file
@@ -0,0 +1,340 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from .storage import ObjectStorage, StorageError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LifecycleResult:
|
||||
bucket_name: str
|
||||
objects_deleted: int = 0
|
||||
versions_deleted: int = 0
|
||||
uploads_aborted: int = 0
|
||||
errors: List[str] = field(default_factory=list)
|
||||
execution_time_seconds: float = 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class LifecycleExecutionRecord:
|
||||
timestamp: float
|
||||
bucket_name: str
|
||||
objects_deleted: int
|
||||
versions_deleted: int
|
||||
uploads_aborted: int
|
||||
errors: List[str]
|
||||
execution_time_seconds: float
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"timestamp": self.timestamp,
|
||||
"bucket_name": self.bucket_name,
|
||||
"objects_deleted": self.objects_deleted,
|
||||
"versions_deleted": self.versions_deleted,
|
||||
"uploads_aborted": self.uploads_aborted,
|
||||
"errors": self.errors,
|
||||
"execution_time_seconds": self.execution_time_seconds,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "LifecycleExecutionRecord":
|
||||
return cls(
|
||||
timestamp=data["timestamp"],
|
||||
bucket_name=data["bucket_name"],
|
||||
objects_deleted=data["objects_deleted"],
|
||||
versions_deleted=data["versions_deleted"],
|
||||
uploads_aborted=data["uploads_aborted"],
|
||||
errors=data.get("errors", []),
|
||||
execution_time_seconds=data["execution_time_seconds"],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_result(cls, result: LifecycleResult) -> "LifecycleExecutionRecord":
|
||||
return cls(
|
||||
timestamp=time.time(),
|
||||
bucket_name=result.bucket_name,
|
||||
objects_deleted=result.objects_deleted,
|
||||
versions_deleted=result.versions_deleted,
|
||||
uploads_aborted=result.uploads_aborted,
|
||||
errors=result.errors.copy(),
|
||||
execution_time_seconds=result.execution_time_seconds,
|
||||
)
|
||||
|
||||
|
||||
class LifecycleHistoryStore:
|
||||
def __init__(self, storage_root: Path, max_history_per_bucket: int = 50) -> None:
|
||||
self.storage_root = storage_root
|
||||
self.max_history_per_bucket = max_history_per_bucket
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_history_path(self, bucket_name: str) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "lifecycle_history.json"
|
||||
|
||||
def load_history(self, bucket_name: str) -> List[LifecycleExecutionRecord]:
|
||||
path = self._get_history_path(bucket_name)
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
data = json.load(f)
|
||||
return [LifecycleExecutionRecord.from_dict(d) for d in data.get("executions", [])]
|
||||
except (OSError, ValueError, KeyError) as e:
|
||||
logger.error(f"Failed to load lifecycle history for {bucket_name}: {e}")
|
||||
return []
|
||||
|
||||
def save_history(self, bucket_name: str, records: List[LifecycleExecutionRecord]) -> None:
|
||||
path = self._get_history_path(bucket_name)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {"executions": [r.to_dict() for r in records[:self.max_history_per_bucket]]}
|
||||
try:
|
||||
with open(path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to save lifecycle history for {bucket_name}: {e}")
|
||||
|
||||
def add_record(self, bucket_name: str, record: LifecycleExecutionRecord) -> None:
|
||||
with self._lock:
|
||||
records = self.load_history(bucket_name)
|
||||
records.insert(0, record)
|
||||
self.save_history(bucket_name, records)
|
||||
|
||||
def get_history(self, bucket_name: str, limit: int = 50, offset: int = 0) -> List[LifecycleExecutionRecord]:
|
||||
records = self.load_history(bucket_name)
|
||||
return records[offset:offset + limit]
|
||||
|
||||
|
||||
class LifecycleManager:
|
||||
def __init__(
|
||||
self,
|
||||
storage: ObjectStorage,
|
||||
interval_seconds: int = 3600,
|
||||
storage_root: Optional[Path] = None,
|
||||
max_history_per_bucket: int = 50,
|
||||
):
|
||||
self.storage = storage
|
||||
self.interval_seconds = interval_seconds
|
||||
self.storage_root = storage_root
|
||||
self._timer: Optional[threading.Timer] = None
|
||||
self._shutdown = False
|
||||
self._lock = threading.Lock()
|
||||
self.history_store = LifecycleHistoryStore(storage_root, max_history_per_bucket) if storage_root else None
|
||||
|
||||
def start(self) -> None:
|
||||
if self._timer is not None:
|
||||
return
|
||||
self._shutdown = False
|
||||
self._schedule_next()
|
||||
logger.info(f"Lifecycle manager started with interval {self.interval_seconds}s")
|
||||
|
||||
def stop(self) -> None:
|
||||
self._shutdown = True
|
||||
if self._timer:
|
||||
self._timer.cancel()
|
||||
self._timer = None
|
||||
logger.info("Lifecycle manager stopped")
|
||||
|
||||
def _schedule_next(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
self._timer = threading.Timer(self.interval_seconds, self._run_enforcement)
|
||||
self._timer.daemon = True
|
||||
self._timer.start()
|
||||
|
||||
def _run_enforcement(self) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
try:
|
||||
self.enforce_all_buckets()
|
||||
except Exception as e:
|
||||
logger.error(f"Lifecycle enforcement failed: {e}")
|
||||
finally:
|
||||
self._schedule_next()
|
||||
|
||||
def enforce_all_buckets(self) -> Dict[str, LifecycleResult]:
|
||||
results = {}
|
||||
try:
|
||||
buckets = self.storage.list_buckets()
|
||||
for bucket in buckets:
|
||||
result = self.enforce_rules(bucket.name)
|
||||
if result.objects_deleted > 0 or result.versions_deleted > 0 or result.uploads_aborted > 0:
|
||||
results[bucket.name] = result
|
||||
except StorageError as e:
|
||||
logger.error(f"Failed to list buckets for lifecycle: {e}")
|
||||
return results
|
||||
|
||||
def enforce_rules(self, bucket_name: str) -> LifecycleResult:
|
||||
start_time = time.time()
|
||||
result = LifecycleResult(bucket_name=bucket_name)
|
||||
|
||||
try:
|
||||
lifecycle = self.storage.get_bucket_lifecycle(bucket_name)
|
||||
if not lifecycle:
|
||||
return result
|
||||
|
||||
for rule in lifecycle:
|
||||
if rule.get("Status") != "Enabled":
|
||||
continue
|
||||
rule_id = rule.get("ID", "unknown")
|
||||
prefix = rule.get("Prefix", rule.get("Filter", {}).get("Prefix", ""))
|
||||
|
||||
self._enforce_expiration(bucket_name, rule, prefix, result)
|
||||
self._enforce_noncurrent_expiration(bucket_name, rule, prefix, result)
|
||||
self._enforce_abort_multipart(bucket_name, rule, result)
|
||||
|
||||
except StorageError as e:
|
||||
result.errors.append(str(e))
|
||||
logger.error(f"Lifecycle enforcement error for {bucket_name}: {e}")
|
||||
|
||||
result.execution_time_seconds = time.time() - start_time
|
||||
if result.objects_deleted > 0 or result.versions_deleted > 0 or result.uploads_aborted > 0 or result.errors:
|
||||
logger.info(
|
||||
f"Lifecycle enforcement for {bucket_name}: "
|
||||
f"deleted={result.objects_deleted}, versions={result.versions_deleted}, "
|
||||
f"aborted={result.uploads_aborted}, time={result.execution_time_seconds:.2f}s"
|
||||
)
|
||||
if self.history_store:
|
||||
record = LifecycleExecutionRecord.from_result(result)
|
||||
self.history_store.add_record(bucket_name, record)
|
||||
return result
|
||||
|
||||
def _enforce_expiration(
|
||||
self, bucket_name: str, rule: Dict[str, Any], prefix: str, result: LifecycleResult
|
||||
) -> None:
|
||||
expiration = rule.get("Expiration", {})
|
||||
if not expiration:
|
||||
return
|
||||
|
||||
days = expiration.get("Days")
|
||||
date_str = expiration.get("Date")
|
||||
|
||||
if days:
|
||||
cutoff = datetime.now(timezone.utc) - timedelta(days=days)
|
||||
elif date_str:
|
||||
try:
|
||||
cutoff = datetime.fromisoformat(date_str.replace("Z", "+00:00"))
|
||||
except ValueError:
|
||||
return
|
||||
else:
|
||||
return
|
||||
|
||||
try:
|
||||
objects = self.storage.list_objects_all(bucket_name)
|
||||
for obj in objects:
|
||||
if prefix and not obj.key.startswith(prefix):
|
||||
continue
|
||||
if obj.last_modified < cutoff:
|
||||
try:
|
||||
self.storage.delete_object(bucket_name, obj.key)
|
||||
result.objects_deleted += 1
|
||||
except StorageError as e:
|
||||
result.errors.append(f"Failed to delete {obj.key}: {e}")
|
||||
except StorageError as e:
|
||||
result.errors.append(f"Failed to list objects: {e}")
|
||||
|
||||
def _enforce_noncurrent_expiration(
|
||||
self, bucket_name: str, rule: Dict[str, Any], prefix: str, result: LifecycleResult
|
||||
) -> None:
|
||||
noncurrent = rule.get("NoncurrentVersionExpiration", {})
|
||||
noncurrent_days = noncurrent.get("NoncurrentDays")
|
||||
if not noncurrent_days:
|
||||
return
|
||||
|
||||
cutoff = datetime.now(timezone.utc) - timedelta(days=noncurrent_days)
|
||||
|
||||
try:
|
||||
objects = self.storage.list_objects_all(bucket_name)
|
||||
for obj in objects:
|
||||
if prefix and not obj.key.startswith(prefix):
|
||||
continue
|
||||
try:
|
||||
versions = self.storage.list_object_versions(bucket_name, obj.key)
|
||||
for version in versions:
|
||||
archived_at_str = version.get("archived_at", "")
|
||||
if not archived_at_str:
|
||||
continue
|
||||
try:
|
||||
archived_at = datetime.fromisoformat(archived_at_str.replace("Z", "+00:00"))
|
||||
if archived_at < cutoff:
|
||||
version_id = version.get("version_id")
|
||||
if version_id:
|
||||
self.storage.delete_object_version(bucket_name, obj.key, version_id)
|
||||
result.versions_deleted += 1
|
||||
except (ValueError, StorageError) as e:
|
||||
result.errors.append(f"Failed to process version: {e}")
|
||||
except StorageError:
|
||||
pass
|
||||
except StorageError as e:
|
||||
result.errors.append(f"Failed to list objects: {e}")
|
||||
|
||||
try:
|
||||
orphaned = self.storage.list_orphaned_objects(bucket_name)
|
||||
for item in orphaned:
|
||||
obj_key = item.get("key", "")
|
||||
if prefix and not obj_key.startswith(prefix):
|
||||
continue
|
||||
try:
|
||||
versions = self.storage.list_object_versions(bucket_name, obj_key)
|
||||
for version in versions:
|
||||
archived_at_str = version.get("archived_at", "")
|
||||
if not archived_at_str:
|
||||
continue
|
||||
try:
|
||||
archived_at = datetime.fromisoformat(archived_at_str.replace("Z", "+00:00"))
|
||||
if archived_at < cutoff:
|
||||
version_id = version.get("version_id")
|
||||
if version_id:
|
||||
self.storage.delete_object_version(bucket_name, obj_key, version_id)
|
||||
result.versions_deleted += 1
|
||||
except (ValueError, StorageError) as e:
|
||||
result.errors.append(f"Failed to process orphaned version: {e}")
|
||||
except StorageError:
|
||||
pass
|
||||
except StorageError as e:
|
||||
result.errors.append(f"Failed to list orphaned objects: {e}")
|
||||
|
||||
def _enforce_abort_multipart(
|
||||
self, bucket_name: str, rule: Dict[str, Any], result: LifecycleResult
|
||||
) -> None:
|
||||
abort_config = rule.get("AbortIncompleteMultipartUpload", {})
|
||||
days_after = abort_config.get("DaysAfterInitiation")
|
||||
if not days_after:
|
||||
return
|
||||
|
||||
cutoff = datetime.now(timezone.utc) - timedelta(days=days_after)
|
||||
|
||||
try:
|
||||
uploads = self.storage.list_multipart_uploads(bucket_name)
|
||||
for upload in uploads:
|
||||
created_at_str = upload.get("created_at", "")
|
||||
if not created_at_str:
|
||||
continue
|
||||
try:
|
||||
created_at = datetime.fromisoformat(created_at_str.replace("Z", "+00:00"))
|
||||
if created_at < cutoff:
|
||||
upload_id = upload.get("upload_id")
|
||||
if upload_id:
|
||||
self.storage.abort_multipart_upload(bucket_name, upload_id)
|
||||
result.uploads_aborted += 1
|
||||
except (ValueError, StorageError) as e:
|
||||
result.errors.append(f"Failed to abort upload: {e}")
|
||||
except StorageError as e:
|
||||
result.errors.append(f"Failed to list multipart uploads: {e}")
|
||||
|
||||
def run_now(self, bucket_name: Optional[str] = None) -> Dict[str, LifecycleResult]:
|
||||
if bucket_name:
|
||||
return {bucket_name: self.enforce_rules(bucket_name)}
|
||||
return self.enforce_all_buckets()
|
||||
|
||||
def get_execution_history(self, bucket_name: str, limit: int = 50, offset: int = 0) -> List[LifecycleExecutionRecord]:
|
||||
if not self.history_store:
|
||||
return []
|
||||
return self.history_store.get_history(bucket_name, limit, offset)
|
||||
406
python/app/notifications.py
Normal file
406
python/app/notifications.py
Normal file
@@ -0,0 +1,406 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
import logging
|
||||
import queue
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
from urllib3.util.connection import create_connection as _urllib3_create_connection
|
||||
|
||||
|
||||
def _resolve_and_check_url(url: str, allow_internal: bool = False) -> Optional[str]:
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
hostname = parsed.hostname
|
||||
if not hostname:
|
||||
return None
|
||||
cloud_metadata_hosts = {
|
||||
"metadata.google.internal",
|
||||
"169.254.169.254",
|
||||
}
|
||||
if hostname.lower() in cloud_metadata_hosts:
|
||||
return None
|
||||
if allow_internal:
|
||||
return hostname
|
||||
blocked_hosts = {
|
||||
"localhost",
|
||||
"127.0.0.1",
|
||||
"0.0.0.0",
|
||||
"::1",
|
||||
"[::1]",
|
||||
}
|
||||
if hostname.lower() in blocked_hosts:
|
||||
return None
|
||||
try:
|
||||
resolved_ip = socket.gethostbyname(hostname)
|
||||
ip = ipaddress.ip_address(resolved_ip)
|
||||
if ip.is_private or ip.is_loopback or ip.is_link_local or ip.is_reserved:
|
||||
return None
|
||||
return resolved_ip
|
||||
except (socket.gaierror, ValueError):
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _is_safe_url(url: str, allow_internal: bool = False) -> bool:
|
||||
return _resolve_and_check_url(url, allow_internal) is not None
|
||||
|
||||
|
||||
_dns_pin_lock = threading.Lock()
|
||||
|
||||
|
||||
def _pinned_post(url: str, pinned_ip: str, **kwargs: Any) -> requests.Response:
|
||||
parsed = urlparse(url)
|
||||
hostname = parsed.hostname or ""
|
||||
session = requests.Session()
|
||||
original_create = _urllib3_create_connection
|
||||
|
||||
def _create_pinned(address: Any, *args: Any, **kw: Any) -> Any:
|
||||
host, req_port = address
|
||||
if host == hostname:
|
||||
return original_create((pinned_ip, req_port), *args, **kw)
|
||||
return original_create(address, *args, **kw)
|
||||
|
||||
import urllib3.util.connection as _conn_mod
|
||||
with _dns_pin_lock:
|
||||
_conn_mod.create_connection = _create_pinned
|
||||
try:
|
||||
return session.post(url, **kwargs)
|
||||
finally:
|
||||
_conn_mod.create_connection = original_create
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NotificationEvent:
|
||||
event_name: str
|
||||
bucket_name: str
|
||||
object_key: str
|
||||
object_size: int = 0
|
||||
etag: str = ""
|
||||
version_id: Optional[str] = None
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
request_id: str = field(default_factory=lambda: uuid.uuid4().hex)
|
||||
source_ip: str = ""
|
||||
user_identity: str = ""
|
||||
|
||||
def to_s3_event(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"Records": [
|
||||
{
|
||||
"eventVersion": "2.1",
|
||||
"eventSource": "myfsio:s3",
|
||||
"awsRegion": "local",
|
||||
"eventTime": self.timestamp.strftime("%Y-%m-%dT%H:%M:%S.000Z"),
|
||||
"eventName": self.event_name,
|
||||
"userIdentity": {
|
||||
"principalId": self.user_identity or "ANONYMOUS",
|
||||
},
|
||||
"requestParameters": {
|
||||
"sourceIPAddress": self.source_ip or "127.0.0.1",
|
||||
},
|
||||
"responseElements": {
|
||||
"x-amz-request-id": self.request_id,
|
||||
"x-amz-id-2": self.request_id,
|
||||
},
|
||||
"s3": {
|
||||
"s3SchemaVersion": "1.0",
|
||||
"configurationId": "notification",
|
||||
"bucket": {
|
||||
"name": self.bucket_name,
|
||||
"ownerIdentity": {"principalId": "local"},
|
||||
"arn": f"arn:aws:s3:::{self.bucket_name}",
|
||||
},
|
||||
"object": {
|
||||
"key": self.object_key,
|
||||
"size": self.object_size,
|
||||
"eTag": self.etag,
|
||||
"versionId": self.version_id or "null",
|
||||
"sequencer": f"{int(time.time() * 1000):016X}",
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class WebhookDestination:
|
||||
url: str
|
||||
headers: Dict[str, str] = field(default_factory=dict)
|
||||
timeout_seconds: int = 30
|
||||
retry_count: int = 3
|
||||
retry_delay_seconds: int = 1
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"url": self.url,
|
||||
"headers": self.headers,
|
||||
"timeout_seconds": self.timeout_seconds,
|
||||
"retry_count": self.retry_count,
|
||||
"retry_delay_seconds": self.retry_delay_seconds,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "WebhookDestination":
|
||||
return cls(
|
||||
url=data.get("url", ""),
|
||||
headers=data.get("headers", {}),
|
||||
timeout_seconds=data.get("timeout_seconds", 30),
|
||||
retry_count=data.get("retry_count", 3),
|
||||
retry_delay_seconds=data.get("retry_delay_seconds", 1),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NotificationConfiguration:
|
||||
id: str
|
||||
events: List[str]
|
||||
destination: WebhookDestination
|
||||
prefix_filter: str = ""
|
||||
suffix_filter: str = ""
|
||||
|
||||
def matches_event(self, event_name: str, object_key: str) -> bool:
|
||||
event_match = False
|
||||
for pattern in self.events:
|
||||
if pattern.endswith("*"):
|
||||
base = pattern[:-1]
|
||||
if event_name.startswith(base):
|
||||
event_match = True
|
||||
break
|
||||
elif pattern == event_name:
|
||||
event_match = True
|
||||
break
|
||||
|
||||
if not event_match:
|
||||
return False
|
||||
|
||||
if self.prefix_filter and not object_key.startswith(self.prefix_filter):
|
||||
return False
|
||||
if self.suffix_filter and not object_key.endswith(self.suffix_filter):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"Id": self.id,
|
||||
"Events": self.events,
|
||||
"Destination": self.destination.to_dict(),
|
||||
"Filter": {
|
||||
"Key": {
|
||||
"FilterRules": [
|
||||
{"Name": "prefix", "Value": self.prefix_filter},
|
||||
{"Name": "suffix", "Value": self.suffix_filter},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "NotificationConfiguration":
|
||||
prefix = ""
|
||||
suffix = ""
|
||||
filter_data = data.get("Filter", {})
|
||||
key_filter = filter_data.get("Key", {})
|
||||
for rule in key_filter.get("FilterRules", []):
|
||||
if rule.get("Name") == "prefix":
|
||||
prefix = rule.get("Value", "")
|
||||
elif rule.get("Name") == "suffix":
|
||||
suffix = rule.get("Value", "")
|
||||
|
||||
return cls(
|
||||
id=data.get("Id", uuid.uuid4().hex),
|
||||
events=data.get("Events", []),
|
||||
destination=WebhookDestination.from_dict(data.get("Destination", {})),
|
||||
prefix_filter=prefix,
|
||||
suffix_filter=suffix,
|
||||
)
|
||||
|
||||
|
||||
class NotificationService:
|
||||
def __init__(self, storage_root: Path, worker_count: int = 2, allow_internal_endpoints: bool = False):
|
||||
self.storage_root = storage_root
|
||||
self._allow_internal_endpoints = allow_internal_endpoints
|
||||
self._configs: Dict[str, List[NotificationConfiguration]] = {}
|
||||
self._queue: queue.Queue[tuple[NotificationEvent, WebhookDestination]] = queue.Queue()
|
||||
self._workers: List[threading.Thread] = []
|
||||
self._shutdown = threading.Event()
|
||||
self._stats = {
|
||||
"events_queued": 0,
|
||||
"events_sent": 0,
|
||||
"events_failed": 0,
|
||||
}
|
||||
|
||||
for i in range(worker_count):
|
||||
worker = threading.Thread(target=self._worker_loop, name=f"notification-worker-{i}", daemon=True)
|
||||
worker.start()
|
||||
self._workers.append(worker)
|
||||
|
||||
def _config_path(self, bucket_name: str) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "notifications.json"
|
||||
|
||||
def get_bucket_notifications(self, bucket_name: str) -> List[NotificationConfiguration]:
|
||||
if bucket_name in self._configs:
|
||||
return self._configs[bucket_name]
|
||||
|
||||
config_path = self._config_path(bucket_name)
|
||||
if not config_path.exists():
|
||||
return []
|
||||
|
||||
try:
|
||||
data = json.loads(config_path.read_text(encoding="utf-8"))
|
||||
configs = [NotificationConfiguration.from_dict(c) for c in data.get("configurations", [])]
|
||||
self._configs[bucket_name] = configs
|
||||
return configs
|
||||
except (json.JSONDecodeError, OSError) as e:
|
||||
logger.warning(f"Failed to load notification config for {bucket_name}: {e}")
|
||||
return []
|
||||
|
||||
def set_bucket_notifications(
|
||||
self, bucket_name: str, configurations: List[NotificationConfiguration]
|
||||
) -> None:
|
||||
config_path = self._config_path(bucket_name)
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
data = {"configurations": [c.to_dict() for c in configurations]}
|
||||
config_path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
||||
self._configs[bucket_name] = configurations
|
||||
|
||||
def delete_bucket_notifications(self, bucket_name: str) -> None:
|
||||
config_path = self._config_path(bucket_name)
|
||||
try:
|
||||
if config_path.exists():
|
||||
config_path.unlink()
|
||||
except OSError:
|
||||
pass
|
||||
self._configs.pop(bucket_name, None)
|
||||
|
||||
def emit_event(self, event: NotificationEvent) -> None:
|
||||
configurations = self.get_bucket_notifications(event.bucket_name)
|
||||
if not configurations:
|
||||
return
|
||||
|
||||
for config in configurations:
|
||||
if config.matches_event(event.event_name, event.object_key):
|
||||
self._queue.put((event, config.destination))
|
||||
self._stats["events_queued"] += 1
|
||||
logger.debug(
|
||||
f"Queued notification for {event.event_name} on {event.bucket_name}/{event.object_key}"
|
||||
)
|
||||
|
||||
def emit_object_created(
|
||||
self,
|
||||
bucket_name: str,
|
||||
object_key: str,
|
||||
*,
|
||||
size: int = 0,
|
||||
etag: str = "",
|
||||
version_id: Optional[str] = None,
|
||||
request_id: str = "",
|
||||
source_ip: str = "",
|
||||
user_identity: str = "",
|
||||
operation: str = "Put",
|
||||
) -> None:
|
||||
event = NotificationEvent(
|
||||
event_name=f"s3:ObjectCreated:{operation}",
|
||||
bucket_name=bucket_name,
|
||||
object_key=object_key,
|
||||
object_size=size,
|
||||
etag=etag,
|
||||
version_id=version_id,
|
||||
request_id=request_id or uuid.uuid4().hex,
|
||||
source_ip=source_ip,
|
||||
user_identity=user_identity,
|
||||
)
|
||||
self.emit_event(event)
|
||||
|
||||
def emit_object_removed(
|
||||
self,
|
||||
bucket_name: str,
|
||||
object_key: str,
|
||||
*,
|
||||
version_id: Optional[str] = None,
|
||||
request_id: str = "",
|
||||
source_ip: str = "",
|
||||
user_identity: str = "",
|
||||
operation: str = "Delete",
|
||||
) -> None:
|
||||
event = NotificationEvent(
|
||||
event_name=f"s3:ObjectRemoved:{operation}",
|
||||
bucket_name=bucket_name,
|
||||
object_key=object_key,
|
||||
version_id=version_id,
|
||||
request_id=request_id or uuid.uuid4().hex,
|
||||
source_ip=source_ip,
|
||||
user_identity=user_identity,
|
||||
)
|
||||
self.emit_event(event)
|
||||
|
||||
def _worker_loop(self) -> None:
|
||||
while not self._shutdown.is_set():
|
||||
try:
|
||||
event, destination = self._queue.get(timeout=1.0)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
try:
|
||||
self._send_notification(event, destination)
|
||||
self._stats["events_sent"] += 1
|
||||
except Exception as e:
|
||||
self._stats["events_failed"] += 1
|
||||
logger.error(f"Failed to send notification: {e}")
|
||||
finally:
|
||||
self._queue.task_done()
|
||||
|
||||
def _send_notification(self, event: NotificationEvent, destination: WebhookDestination) -> None:
|
||||
resolved_ip = _resolve_and_check_url(destination.url, allow_internal=self._allow_internal_endpoints)
|
||||
if not resolved_ip:
|
||||
raise RuntimeError(f"Blocked request (SSRF protection): {destination.url}")
|
||||
payload = event.to_s3_event()
|
||||
headers = {"Content-Type": "application/json", **destination.headers}
|
||||
|
||||
last_error = None
|
||||
for attempt in range(destination.retry_count):
|
||||
try:
|
||||
response = _pinned_post(
|
||||
destination.url,
|
||||
resolved_ip,
|
||||
json=payload,
|
||||
headers=headers,
|
||||
timeout=destination.timeout_seconds,
|
||||
)
|
||||
if response.status_code < 400:
|
||||
logger.info(
|
||||
f"Notification sent: {event.event_name} -> {destination.url} (status={response.status_code})"
|
||||
)
|
||||
return
|
||||
last_error = f"HTTP {response.status_code}: {response.text[:200]}"
|
||||
except requests.RequestException as e:
|
||||
last_error = str(e)
|
||||
|
||||
if attempt < destination.retry_count - 1:
|
||||
time.sleep(destination.retry_delay_seconds * (attempt + 1))
|
||||
|
||||
raise RuntimeError(f"Failed after {destination.retry_count} attempts: {last_error}")
|
||||
|
||||
def get_stats(self) -> Dict[str, int]:
|
||||
return dict(self._stats)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._shutdown.set()
|
||||
for worker in self._workers:
|
||||
worker.join(timeout=5.0)
|
||||
234
python/app/object_lock.py
Normal file
234
python/app/object_lock.py
Normal file
@@ -0,0 +1,234 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
|
||||
class RetentionMode(Enum):
|
||||
GOVERNANCE = "GOVERNANCE"
|
||||
COMPLIANCE = "COMPLIANCE"
|
||||
|
||||
|
||||
class ObjectLockError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObjectLockRetention:
|
||||
mode: RetentionMode
|
||||
retain_until_date: datetime
|
||||
|
||||
def to_dict(self) -> Dict[str, str]:
|
||||
return {
|
||||
"Mode": self.mode.value,
|
||||
"RetainUntilDate": self.retain_until_date.isoformat(),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> Optional["ObjectLockRetention"]:
|
||||
if not data:
|
||||
return None
|
||||
mode_str = data.get("Mode")
|
||||
date_str = data.get("RetainUntilDate")
|
||||
if not mode_str or not date_str:
|
||||
return None
|
||||
try:
|
||||
mode = RetentionMode(mode_str)
|
||||
retain_until = datetime.fromisoformat(date_str.replace("Z", "+00:00"))
|
||||
return cls(mode=mode, retain_until_date=retain_until)
|
||||
except (ValueError, KeyError):
|
||||
return None
|
||||
|
||||
def is_expired(self) -> bool:
|
||||
return datetime.now(timezone.utc) > self.retain_until_date
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObjectLockConfig:
|
||||
enabled: bool = False
|
||||
default_retention: Optional[ObjectLockRetention] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
result: Dict[str, Any] = {"ObjectLockEnabled": "Enabled" if self.enabled else "Disabled"}
|
||||
if self.default_retention:
|
||||
result["Rule"] = {
|
||||
"DefaultRetention": {
|
||||
"Mode": self.default_retention.mode.value,
|
||||
"Days": None,
|
||||
"Years": None,
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "ObjectLockConfig":
|
||||
enabled = data.get("ObjectLockEnabled") == "Enabled"
|
||||
default_retention = None
|
||||
rule = data.get("Rule")
|
||||
if rule and "DefaultRetention" in rule:
|
||||
dr = rule["DefaultRetention"]
|
||||
mode_str = dr.get("Mode", "GOVERNANCE")
|
||||
days = dr.get("Days")
|
||||
years = dr.get("Years")
|
||||
if days or years:
|
||||
from datetime import timedelta
|
||||
now = datetime.now(timezone.utc)
|
||||
if years:
|
||||
delta = timedelta(days=int(years) * 365)
|
||||
else:
|
||||
delta = timedelta(days=int(days))
|
||||
default_retention = ObjectLockRetention(
|
||||
mode=RetentionMode(mode_str),
|
||||
retain_until_date=now + delta,
|
||||
)
|
||||
return cls(enabled=enabled, default_retention=default_retention)
|
||||
|
||||
|
||||
class ObjectLockService:
|
||||
def __init__(self, storage_root: Path):
|
||||
self.storage_root = storage_root
|
||||
self._config_cache: Dict[str, ObjectLockConfig] = {}
|
||||
|
||||
def _bucket_lock_config_path(self, bucket_name: str) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "object_lock.json"
|
||||
|
||||
def _object_lock_meta_path(self, bucket_name: str, object_key: str) -> Path:
|
||||
safe_key = object_key.replace("/", "_").replace("\\", "_")
|
||||
return (
|
||||
self.storage_root / ".myfsio.sys" / "buckets" / bucket_name /
|
||||
"locks" / f"{safe_key}.lock.json"
|
||||
)
|
||||
|
||||
def get_bucket_lock_config(self, bucket_name: str) -> ObjectLockConfig:
|
||||
if bucket_name in self._config_cache:
|
||||
return self._config_cache[bucket_name]
|
||||
|
||||
config_path = self._bucket_lock_config_path(bucket_name)
|
||||
if not config_path.exists():
|
||||
return ObjectLockConfig(enabled=False)
|
||||
|
||||
try:
|
||||
data = json.loads(config_path.read_text(encoding="utf-8"))
|
||||
config = ObjectLockConfig.from_dict(data)
|
||||
self._config_cache[bucket_name] = config
|
||||
return config
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return ObjectLockConfig(enabled=False)
|
||||
|
||||
def set_bucket_lock_config(self, bucket_name: str, config: ObjectLockConfig) -> None:
|
||||
config_path = self._bucket_lock_config_path(bucket_name)
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(json.dumps(config.to_dict()), encoding="utf-8")
|
||||
self._config_cache[bucket_name] = config
|
||||
|
||||
def enable_bucket_lock(self, bucket_name: str) -> None:
|
||||
config = self.get_bucket_lock_config(bucket_name)
|
||||
config.enabled = True
|
||||
self.set_bucket_lock_config(bucket_name, config)
|
||||
|
||||
def is_bucket_lock_enabled(self, bucket_name: str) -> bool:
|
||||
return self.get_bucket_lock_config(bucket_name).enabled
|
||||
|
||||
def get_object_retention(self, bucket_name: str, object_key: str) -> Optional[ObjectLockRetention]:
|
||||
meta_path = self._object_lock_meta_path(bucket_name, object_key)
|
||||
if not meta_path.exists():
|
||||
return None
|
||||
try:
|
||||
data = json.loads(meta_path.read_text(encoding="utf-8"))
|
||||
return ObjectLockRetention.from_dict(data.get("retention", {}))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return None
|
||||
|
||||
def set_object_retention(
|
||||
self,
|
||||
bucket_name: str,
|
||||
object_key: str,
|
||||
retention: ObjectLockRetention,
|
||||
bypass_governance: bool = False,
|
||||
) -> None:
|
||||
existing = self.get_object_retention(bucket_name, object_key)
|
||||
if existing and not existing.is_expired():
|
||||
if existing.mode == RetentionMode.COMPLIANCE:
|
||||
raise ObjectLockError(
|
||||
"Cannot modify retention on object with COMPLIANCE mode until retention expires"
|
||||
)
|
||||
if existing.mode == RetentionMode.GOVERNANCE and not bypass_governance:
|
||||
raise ObjectLockError(
|
||||
"Cannot modify GOVERNANCE retention without bypass-governance permission"
|
||||
)
|
||||
|
||||
meta_path = self._object_lock_meta_path(bucket_name, object_key)
|
||||
meta_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
existing_data: Dict[str, Any] = {}
|
||||
if meta_path.exists():
|
||||
try:
|
||||
existing_data = json.loads(meta_path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
pass
|
||||
|
||||
existing_data["retention"] = retention.to_dict()
|
||||
meta_path.write_text(json.dumps(existing_data), encoding="utf-8")
|
||||
|
||||
def get_legal_hold(self, bucket_name: str, object_key: str) -> bool:
|
||||
meta_path = self._object_lock_meta_path(bucket_name, object_key)
|
||||
if not meta_path.exists():
|
||||
return False
|
||||
try:
|
||||
data = json.loads(meta_path.read_text(encoding="utf-8"))
|
||||
return data.get("legal_hold", False)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return False
|
||||
|
||||
def set_legal_hold(self, bucket_name: str, object_key: str, enabled: bool) -> None:
|
||||
meta_path = self._object_lock_meta_path(bucket_name, object_key)
|
||||
meta_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
existing_data: Dict[str, Any] = {}
|
||||
if meta_path.exists():
|
||||
try:
|
||||
existing_data = json.loads(meta_path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
pass
|
||||
|
||||
existing_data["legal_hold"] = enabled
|
||||
meta_path.write_text(json.dumps(existing_data), encoding="utf-8")
|
||||
|
||||
def can_delete_object(
|
||||
self,
|
||||
bucket_name: str,
|
||||
object_key: str,
|
||||
bypass_governance: bool = False,
|
||||
) -> tuple[bool, str]:
|
||||
if self.get_legal_hold(bucket_name, object_key):
|
||||
return False, "Object is under legal hold"
|
||||
|
||||
retention = self.get_object_retention(bucket_name, object_key)
|
||||
if retention and not retention.is_expired():
|
||||
if retention.mode == RetentionMode.COMPLIANCE:
|
||||
return False, f"Object is locked in COMPLIANCE mode until {retention.retain_until_date.isoformat()}"
|
||||
if retention.mode == RetentionMode.GOVERNANCE:
|
||||
if not bypass_governance:
|
||||
return False, f"Object is locked in GOVERNANCE mode until {retention.retain_until_date.isoformat()}"
|
||||
|
||||
return True, ""
|
||||
|
||||
def can_overwrite_object(
|
||||
self,
|
||||
bucket_name: str,
|
||||
object_key: str,
|
||||
bypass_governance: bool = False,
|
||||
) -> tuple[bool, str]:
|
||||
return self.can_delete_object(bucket_name, object_key, bypass_governance)
|
||||
|
||||
def delete_object_lock_metadata(self, bucket_name: str, object_key: str) -> None:
|
||||
meta_path = self._object_lock_meta_path(bucket_name, object_key)
|
||||
try:
|
||||
if meta_path.exists():
|
||||
meta_path.unlink()
|
||||
except OSError:
|
||||
pass
|
||||
296
python/app/operation_metrics.py
Normal file
296
python/app/operation_metrics.py
Normal file
@@ -0,0 +1,296 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import threading
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
MAX_LATENCY_SAMPLES = 5000
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class OperationStats:
|
||||
count: int = 0
|
||||
success_count: int = 0
|
||||
error_count: int = 0
|
||||
latency_sum_ms: float = 0.0
|
||||
latency_min_ms: float = float("inf")
|
||||
latency_max_ms: float = 0.0
|
||||
bytes_in: int = 0
|
||||
bytes_out: int = 0
|
||||
latency_samples: List[float] = field(default_factory=list)
|
||||
|
||||
@staticmethod
|
||||
def _compute_percentile(sorted_data: List[float], p: float) -> float:
|
||||
if not sorted_data:
|
||||
return 0.0
|
||||
k = (len(sorted_data) - 1) * (p / 100.0)
|
||||
f = int(k)
|
||||
c = min(f + 1, len(sorted_data) - 1)
|
||||
d = k - f
|
||||
return sorted_data[f] + d * (sorted_data[c] - sorted_data[f])
|
||||
|
||||
def record(self, latency_ms: float, success: bool, bytes_in: int = 0, bytes_out: int = 0) -> None:
|
||||
self.count += 1
|
||||
if success:
|
||||
self.success_count += 1
|
||||
else:
|
||||
self.error_count += 1
|
||||
self.latency_sum_ms += latency_ms
|
||||
if latency_ms < self.latency_min_ms:
|
||||
self.latency_min_ms = latency_ms
|
||||
if latency_ms > self.latency_max_ms:
|
||||
self.latency_max_ms = latency_ms
|
||||
self.bytes_in += bytes_in
|
||||
self.bytes_out += bytes_out
|
||||
if len(self.latency_samples) < MAX_LATENCY_SAMPLES:
|
||||
self.latency_samples.append(latency_ms)
|
||||
else:
|
||||
j = random.randint(0, self.count - 1)
|
||||
if j < MAX_LATENCY_SAMPLES:
|
||||
self.latency_samples[j] = latency_ms
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
avg_latency = self.latency_sum_ms / self.count if self.count > 0 else 0.0
|
||||
min_latency = self.latency_min_ms if self.latency_min_ms != float("inf") else 0.0
|
||||
sorted_latencies = sorted(self.latency_samples)
|
||||
return {
|
||||
"count": self.count,
|
||||
"success_count": self.success_count,
|
||||
"error_count": self.error_count,
|
||||
"latency_avg_ms": round(avg_latency, 2),
|
||||
"latency_min_ms": round(min_latency, 2),
|
||||
"latency_max_ms": round(self.latency_max_ms, 2),
|
||||
"latency_p50_ms": round(self._compute_percentile(sorted_latencies, 50), 2),
|
||||
"latency_p95_ms": round(self._compute_percentile(sorted_latencies, 95), 2),
|
||||
"latency_p99_ms": round(self._compute_percentile(sorted_latencies, 99), 2),
|
||||
"bytes_in": self.bytes_in,
|
||||
"bytes_out": self.bytes_out,
|
||||
}
|
||||
|
||||
def merge(self, other: "OperationStats") -> None:
|
||||
self.count += other.count
|
||||
self.success_count += other.success_count
|
||||
self.error_count += other.error_count
|
||||
self.latency_sum_ms += other.latency_sum_ms
|
||||
if other.latency_min_ms < self.latency_min_ms:
|
||||
self.latency_min_ms = other.latency_min_ms
|
||||
if other.latency_max_ms > self.latency_max_ms:
|
||||
self.latency_max_ms = other.latency_max_ms
|
||||
self.bytes_in += other.bytes_in
|
||||
self.bytes_out += other.bytes_out
|
||||
combined = self.latency_samples + other.latency_samples
|
||||
if len(combined) > MAX_LATENCY_SAMPLES:
|
||||
random.shuffle(combined)
|
||||
combined = combined[:MAX_LATENCY_SAMPLES]
|
||||
self.latency_samples = combined
|
||||
|
||||
|
||||
@dataclass
|
||||
class MetricsSnapshot:
|
||||
timestamp: datetime
|
||||
window_seconds: int
|
||||
by_method: Dict[str, Dict[str, Any]]
|
||||
by_endpoint: Dict[str, Dict[str, Any]]
|
||||
by_status_class: Dict[str, int]
|
||||
error_codes: Dict[str, int]
|
||||
totals: Dict[str, Any]
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"timestamp": self.timestamp.isoformat(),
|
||||
"window_seconds": self.window_seconds,
|
||||
"by_method": self.by_method,
|
||||
"by_endpoint": self.by_endpoint,
|
||||
"by_status_class": self.by_status_class,
|
||||
"error_codes": self.error_codes,
|
||||
"totals": self.totals,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "MetricsSnapshot":
|
||||
return cls(
|
||||
timestamp=datetime.fromisoformat(data["timestamp"]),
|
||||
window_seconds=data.get("window_seconds", 300),
|
||||
by_method=data.get("by_method", {}),
|
||||
by_endpoint=data.get("by_endpoint", {}),
|
||||
by_status_class=data.get("by_status_class", {}),
|
||||
error_codes=data.get("error_codes", {}),
|
||||
totals=data.get("totals", {}),
|
||||
)
|
||||
|
||||
|
||||
class OperationMetricsCollector:
|
||||
def __init__(
|
||||
self,
|
||||
storage_root: Path,
|
||||
interval_minutes: int = 5,
|
||||
retention_hours: int = 24,
|
||||
):
|
||||
self.storage_root = storage_root
|
||||
self.interval_seconds = interval_minutes * 60
|
||||
self.retention_hours = retention_hours
|
||||
self._lock = threading.Lock()
|
||||
self._by_method: Dict[str, OperationStats] = defaultdict(OperationStats)
|
||||
self._by_endpoint: Dict[str, OperationStats] = defaultdict(OperationStats)
|
||||
self._by_status_class: Dict[str, int] = {}
|
||||
self._error_codes: Dict[str, int] = {}
|
||||
self._totals = OperationStats()
|
||||
self._window_start = time.time()
|
||||
self._shutdown = threading.Event()
|
||||
self._snapshots: List[MetricsSnapshot] = []
|
||||
|
||||
self._load_history()
|
||||
|
||||
self._snapshot_thread = threading.Thread(
|
||||
target=self._snapshot_loop, name="operation-metrics-snapshot", daemon=True
|
||||
)
|
||||
self._snapshot_thread.start()
|
||||
|
||||
def _config_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "operation_metrics.json"
|
||||
|
||||
def _load_history(self) -> None:
|
||||
config_path = self._config_path()
|
||||
if not config_path.exists():
|
||||
return
|
||||
try:
|
||||
data = json.loads(config_path.read_text(encoding="utf-8"))
|
||||
snapshots_data = data.get("snapshots", [])
|
||||
self._snapshots = [MetricsSnapshot.from_dict(s) for s in snapshots_data]
|
||||
self._prune_old_snapshots()
|
||||
except (json.JSONDecodeError, OSError, KeyError) as e:
|
||||
logger.warning(f"Failed to load operation metrics history: {e}")
|
||||
|
||||
def _save_history(self) -> None:
|
||||
config_path = self._config_path()
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
data = {"snapshots": [s.to_dict() for s in self._snapshots]}
|
||||
config_path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
||||
except OSError as e:
|
||||
logger.warning(f"Failed to save operation metrics history: {e}")
|
||||
|
||||
def _prune_old_snapshots(self) -> None:
|
||||
if not self._snapshots:
|
||||
return
|
||||
cutoff = datetime.now(timezone.utc).timestamp() - (self.retention_hours * 3600)
|
||||
self._snapshots = [
|
||||
s for s in self._snapshots if s.timestamp.timestamp() > cutoff
|
||||
]
|
||||
|
||||
def _snapshot_loop(self) -> None:
|
||||
while not self._shutdown.is_set():
|
||||
self._shutdown.wait(timeout=self.interval_seconds)
|
||||
if not self._shutdown.is_set():
|
||||
self._take_snapshot()
|
||||
|
||||
def _take_snapshot(self) -> None:
|
||||
with self._lock:
|
||||
now = datetime.now(timezone.utc)
|
||||
window_seconds = int(time.time() - self._window_start)
|
||||
|
||||
snapshot = MetricsSnapshot(
|
||||
timestamp=now,
|
||||
window_seconds=window_seconds,
|
||||
by_method={k: v.to_dict() for k, v in self._by_method.items()},
|
||||
by_endpoint={k: v.to_dict() for k, v in self._by_endpoint.items()},
|
||||
by_status_class=dict(self._by_status_class),
|
||||
error_codes=dict(self._error_codes),
|
||||
totals=self._totals.to_dict(),
|
||||
)
|
||||
|
||||
self._snapshots.append(snapshot)
|
||||
self._prune_old_snapshots()
|
||||
self._save_history()
|
||||
|
||||
self._by_method = defaultdict(OperationStats)
|
||||
self._by_endpoint = defaultdict(OperationStats)
|
||||
self._by_status_class.clear()
|
||||
self._error_codes.clear()
|
||||
self._totals = OperationStats()
|
||||
self._window_start = time.time()
|
||||
|
||||
def record_request(
|
||||
self,
|
||||
method: str,
|
||||
endpoint_type: str,
|
||||
status_code: int,
|
||||
latency_ms: float,
|
||||
bytes_in: int = 0,
|
||||
bytes_out: int = 0,
|
||||
error_code: Optional[str] = None,
|
||||
) -> None:
|
||||
success = 200 <= status_code < 400
|
||||
status_class = f"{status_code // 100}xx"
|
||||
|
||||
with self._lock:
|
||||
self._by_method[method].record(latency_ms, success, bytes_in, bytes_out)
|
||||
self._by_endpoint[endpoint_type].record(latency_ms, success, bytes_in, bytes_out)
|
||||
|
||||
self._by_status_class[status_class] = self._by_status_class.get(status_class, 0) + 1
|
||||
|
||||
if error_code:
|
||||
self._error_codes[error_code] = self._error_codes.get(error_code, 0) + 1
|
||||
|
||||
self._totals.record(latency_ms, success, bytes_in, bytes_out)
|
||||
|
||||
def get_current_stats(self) -> Dict[str, Any]:
|
||||
with self._lock:
|
||||
window_seconds = int(time.time() - self._window_start)
|
||||
return {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"window_seconds": window_seconds,
|
||||
"by_method": {k: v.to_dict() for k, v in self._by_method.items()},
|
||||
"by_endpoint": {k: v.to_dict() for k, v in self._by_endpoint.items()},
|
||||
"by_status_class": dict(self._by_status_class),
|
||||
"error_codes": dict(self._error_codes),
|
||||
"totals": self._totals.to_dict(),
|
||||
}
|
||||
|
||||
def get_history(self, hours: Optional[int] = None) -> List[Dict[str, Any]]:
|
||||
with self._lock:
|
||||
snapshots = list(self._snapshots)
|
||||
|
||||
if hours:
|
||||
cutoff = datetime.now(timezone.utc).timestamp() - (hours * 3600)
|
||||
snapshots = [s for s in snapshots if s.timestamp.timestamp() > cutoff]
|
||||
|
||||
return [s.to_dict() for s in snapshots]
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._shutdown.set()
|
||||
self._take_snapshot()
|
||||
self._snapshot_thread.join(timeout=5.0)
|
||||
|
||||
|
||||
def classify_endpoint(path: str) -> str:
|
||||
if not path or path == "/":
|
||||
return "service"
|
||||
|
||||
path = path.rstrip("/")
|
||||
|
||||
if path.startswith("/ui"):
|
||||
return "ui"
|
||||
|
||||
if path.startswith("/kms"):
|
||||
return "kms"
|
||||
|
||||
if path.startswith("/myfsio"):
|
||||
return "service"
|
||||
|
||||
parts = path.lstrip("/").split("/")
|
||||
if len(parts) == 0:
|
||||
return "service"
|
||||
elif len(parts) == 1:
|
||||
return "bucket"
|
||||
else:
|
||||
return "object"
|
||||
667
python/app/replication.py
Normal file
667
python/app/replication.py
Normal file
@@ -0,0 +1,667 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import mimetypes
|
||||
import threading
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
from boto3.exceptions import S3UploadFailedError
|
||||
|
||||
from .connections import ConnectionStore, RemoteConnection
|
||||
from .storage import ObjectStorage, StorageError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
REPLICATION_USER_AGENT = "S3ReplicationAgent/1.0"
|
||||
|
||||
REPLICATION_MODE_NEW_ONLY = "new_only"
|
||||
REPLICATION_MODE_ALL = "all"
|
||||
REPLICATION_MODE_BIDIRECTIONAL = "bidirectional"
|
||||
|
||||
|
||||
def _create_s3_client(
|
||||
connection: RemoteConnection,
|
||||
*,
|
||||
health_check: bool = False,
|
||||
connect_timeout: int = 5,
|
||||
read_timeout: int = 30,
|
||||
max_retries: int = 2,
|
||||
) -> Any:
|
||||
"""Create a boto3 S3 client for the given connection.
|
||||
Args:
|
||||
connection: Remote S3 connection configuration
|
||||
health_check: If True, use minimal retries for quick health checks
|
||||
"""
|
||||
config = Config(
|
||||
user_agent_extra=REPLICATION_USER_AGENT,
|
||||
connect_timeout=connect_timeout,
|
||||
read_timeout=read_timeout,
|
||||
retries={'max_attempts': 1 if health_check else max_retries},
|
||||
signature_version='s3v4',
|
||||
s3={'addressing_style': 'path'},
|
||||
request_checksum_calculation='when_required',
|
||||
response_checksum_validation='when_required',
|
||||
)
|
||||
return boto3.client(
|
||||
"s3",
|
||||
endpoint_url=connection.endpoint_url,
|
||||
aws_access_key_id=connection.access_key,
|
||||
aws_secret_access_key=connection.secret_key,
|
||||
region_name=connection.region or 'us-east-1',
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReplicationStats:
|
||||
"""Statistics for replication operations - computed dynamically."""
|
||||
objects_synced: int = 0
|
||||
objects_pending: int = 0
|
||||
objects_orphaned: int = 0
|
||||
bytes_synced: int = 0
|
||||
last_sync_at: Optional[float] = None
|
||||
last_sync_key: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"objects_synced": self.objects_synced,
|
||||
"objects_pending": self.objects_pending,
|
||||
"objects_orphaned": self.objects_orphaned,
|
||||
"bytes_synced": self.bytes_synced,
|
||||
"last_sync_at": self.last_sync_at,
|
||||
"last_sync_key": self.last_sync_key,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "ReplicationStats":
|
||||
return cls(
|
||||
objects_synced=data.get("objects_synced", 0),
|
||||
objects_pending=data.get("objects_pending", 0),
|
||||
objects_orphaned=data.get("objects_orphaned", 0),
|
||||
bytes_synced=data.get("bytes_synced", 0),
|
||||
last_sync_at=data.get("last_sync_at"),
|
||||
last_sync_key=data.get("last_sync_key"),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReplicationFailure:
|
||||
object_key: str
|
||||
error_message: str
|
||||
timestamp: float
|
||||
failure_count: int
|
||||
bucket_name: str
|
||||
action: str
|
||||
last_error_code: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"object_key": self.object_key,
|
||||
"error_message": self.error_message,
|
||||
"timestamp": self.timestamp,
|
||||
"failure_count": self.failure_count,
|
||||
"bucket_name": self.bucket_name,
|
||||
"action": self.action,
|
||||
"last_error_code": self.last_error_code,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "ReplicationFailure":
|
||||
return cls(
|
||||
object_key=data["object_key"],
|
||||
error_message=data["error_message"],
|
||||
timestamp=data["timestamp"],
|
||||
failure_count=data["failure_count"],
|
||||
bucket_name=data["bucket_name"],
|
||||
action=data["action"],
|
||||
last_error_code=data.get("last_error_code"),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReplicationRule:
|
||||
bucket_name: str
|
||||
target_connection_id: str
|
||||
target_bucket: str
|
||||
enabled: bool = True
|
||||
mode: str = REPLICATION_MODE_NEW_ONLY
|
||||
created_at: Optional[float] = None
|
||||
stats: ReplicationStats = field(default_factory=ReplicationStats)
|
||||
sync_deletions: bool = True
|
||||
last_pull_at: Optional[float] = None
|
||||
filter_prefix: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"bucket_name": self.bucket_name,
|
||||
"target_connection_id": self.target_connection_id,
|
||||
"target_bucket": self.target_bucket,
|
||||
"enabled": self.enabled,
|
||||
"mode": self.mode,
|
||||
"created_at": self.created_at,
|
||||
"stats": self.stats.to_dict(),
|
||||
"sync_deletions": self.sync_deletions,
|
||||
"last_pull_at": self.last_pull_at,
|
||||
"filter_prefix": self.filter_prefix,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "ReplicationRule":
|
||||
stats_data = data.pop("stats", {})
|
||||
if "mode" not in data:
|
||||
data["mode"] = REPLICATION_MODE_NEW_ONLY
|
||||
if "created_at" not in data:
|
||||
data["created_at"] = None
|
||||
if "sync_deletions" not in data:
|
||||
data["sync_deletions"] = True
|
||||
if "last_pull_at" not in data:
|
||||
data["last_pull_at"] = None
|
||||
if "filter_prefix" not in data:
|
||||
data["filter_prefix"] = None
|
||||
rule = cls(**data)
|
||||
rule.stats = ReplicationStats.from_dict(stats_data) if stats_data else ReplicationStats()
|
||||
return rule
|
||||
|
||||
|
||||
class ReplicationFailureStore:
|
||||
def __init__(self, storage_root: Path, max_failures_per_bucket: int = 50) -> None:
|
||||
self.storage_root = storage_root
|
||||
self.max_failures_per_bucket = max_failures_per_bucket
|
||||
self._lock = threading.Lock()
|
||||
self._cache: Dict[str, List[ReplicationFailure]] = {}
|
||||
|
||||
def _get_failures_path(self, bucket_name: str) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "replication_failures.json"
|
||||
|
||||
def _load_from_disk(self, bucket_name: str) -> List[ReplicationFailure]:
|
||||
path = self._get_failures_path(bucket_name)
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
data = json.load(f)
|
||||
return [ReplicationFailure.from_dict(d) for d in data.get("failures", [])]
|
||||
except (OSError, ValueError, KeyError) as e:
|
||||
logger.error(f"Failed to load replication failures for {bucket_name}: {e}")
|
||||
return []
|
||||
|
||||
def _save_to_disk(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
|
||||
path = self._get_failures_path(bucket_name)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {"failures": [f.to_dict() for f in failures[:self.max_failures_per_bucket]]}
|
||||
try:
|
||||
with open(path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to save replication failures for {bucket_name}: {e}")
|
||||
|
||||
def load_failures(self, bucket_name: str) -> List[ReplicationFailure]:
|
||||
if bucket_name in self._cache:
|
||||
return list(self._cache[bucket_name])
|
||||
failures = self._load_from_disk(bucket_name)
|
||||
self._cache[bucket_name] = failures
|
||||
return list(failures)
|
||||
|
||||
def save_failures(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
|
||||
trimmed = failures[:self.max_failures_per_bucket]
|
||||
self._cache[bucket_name] = trimmed
|
||||
self._save_to_disk(bucket_name, trimmed)
|
||||
|
||||
def add_failure(self, bucket_name: str, failure: ReplicationFailure) -> None:
|
||||
with self._lock:
|
||||
failures = self.load_failures(bucket_name)
|
||||
existing = next((f for f in failures if f.object_key == failure.object_key), None)
|
||||
if existing:
|
||||
existing.failure_count += 1
|
||||
existing.timestamp = failure.timestamp
|
||||
existing.error_message = failure.error_message
|
||||
existing.last_error_code = failure.last_error_code
|
||||
else:
|
||||
failures.insert(0, failure)
|
||||
self.save_failures(bucket_name, failures)
|
||||
|
||||
def remove_failure(self, bucket_name: str, object_key: str) -> bool:
|
||||
with self._lock:
|
||||
failures = self.load_failures(bucket_name)
|
||||
original_len = len(failures)
|
||||
failures = [f for f in failures if f.object_key != object_key]
|
||||
if len(failures) < original_len:
|
||||
self.save_failures(bucket_name, failures)
|
||||
return True
|
||||
return False
|
||||
|
||||
def clear_failures(self, bucket_name: str) -> None:
|
||||
with self._lock:
|
||||
self._cache.pop(bucket_name, None)
|
||||
path = self._get_failures_path(bucket_name)
|
||||
if path.exists():
|
||||
path.unlink()
|
||||
|
||||
def get_failure(self, bucket_name: str, object_key: str) -> Optional[ReplicationFailure]:
|
||||
failures = self.load_failures(bucket_name)
|
||||
return next((f for f in failures if f.object_key == object_key), None)
|
||||
|
||||
def get_failure_count(self, bucket_name: str) -> int:
|
||||
return len(self.load_failures(bucket_name))
|
||||
|
||||
|
||||
class ReplicationManager:
|
||||
def __init__(
|
||||
self,
|
||||
storage: ObjectStorage,
|
||||
connections: ConnectionStore,
|
||||
rules_path: Path,
|
||||
storage_root: Path,
|
||||
connect_timeout: int = 5,
|
||||
read_timeout: int = 30,
|
||||
max_retries: int = 2,
|
||||
streaming_threshold_bytes: int = 10 * 1024 * 1024,
|
||||
max_failures_per_bucket: int = 50,
|
||||
) -> None:
|
||||
self.storage = storage
|
||||
self.connections = connections
|
||||
self.rules_path = rules_path
|
||||
self.storage_root = storage_root
|
||||
self.connect_timeout = connect_timeout
|
||||
self.read_timeout = read_timeout
|
||||
self.max_retries = max_retries
|
||||
self.streaming_threshold_bytes = streaming_threshold_bytes
|
||||
self._rules: Dict[str, ReplicationRule] = {}
|
||||
self._stats_lock = threading.Lock()
|
||||
self._executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="ReplicationWorker")
|
||||
self._shutdown = False
|
||||
self.failure_store = ReplicationFailureStore(storage_root, max_failures_per_bucket)
|
||||
self.reload_rules()
|
||||
|
||||
def _create_client(self, connection: RemoteConnection, *, health_check: bool = False) -> Any:
|
||||
"""Create an S3 client with the manager's configured timeouts."""
|
||||
return _create_s3_client(
|
||||
connection,
|
||||
health_check=health_check,
|
||||
connect_timeout=self.connect_timeout,
|
||||
read_timeout=self.read_timeout,
|
||||
max_retries=self.max_retries,
|
||||
)
|
||||
|
||||
def shutdown(self, wait: bool = True) -> None:
|
||||
"""Shutdown the replication executor gracefully.
|
||||
|
||||
Args:
|
||||
wait: If True, wait for pending tasks to complete
|
||||
"""
|
||||
self._shutdown = True
|
||||
self._executor.shutdown(wait=wait)
|
||||
logger.info("Replication manager shut down")
|
||||
|
||||
def reload_rules(self) -> None:
|
||||
if not self.rules_path.exists():
|
||||
self._rules = {}
|
||||
return
|
||||
try:
|
||||
with open(self.rules_path, "r") as f:
|
||||
data = json.load(f)
|
||||
for bucket, rule_data in data.items():
|
||||
self._rules[bucket] = ReplicationRule.from_dict(rule_data)
|
||||
except (OSError, ValueError) as e:
|
||||
logger.error(f"Failed to load replication rules: {e}")
|
||||
|
||||
def save_rules(self) -> None:
|
||||
data = {b: rule.to_dict() for b, rule in self._rules.items()}
|
||||
self.rules_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(self.rules_path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
def check_endpoint_health(self, connection: RemoteConnection) -> bool:
|
||||
"""Check if a remote endpoint is reachable and responsive.
|
||||
|
||||
Returns True if endpoint is healthy, False otherwise.
|
||||
Uses short timeouts to prevent blocking.
|
||||
"""
|
||||
try:
|
||||
s3 = self._create_client(connection, health_check=True)
|
||||
s3.list_buckets()
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(f"Endpoint health check failed for {connection.name} ({connection.endpoint_url}): {e}")
|
||||
return False
|
||||
|
||||
def get_rule(self, bucket_name: str) -> Optional[ReplicationRule]:
|
||||
return self._rules.get(bucket_name)
|
||||
|
||||
def list_rules(self) -> List[ReplicationRule]:
|
||||
return list(self._rules.values())
|
||||
|
||||
def set_rule(self, rule: ReplicationRule) -> None:
|
||||
old_rule = self._rules.get(rule.bucket_name)
|
||||
was_all_mode = old_rule and old_rule.mode == REPLICATION_MODE_ALL if old_rule else False
|
||||
self._rules[rule.bucket_name] = rule
|
||||
self.save_rules()
|
||||
|
||||
if rule.mode == REPLICATION_MODE_ALL and rule.enabled and not was_all_mode:
|
||||
logger.info(f"Replication mode ALL enabled for {rule.bucket_name}, triggering sync of existing objects")
|
||||
self._executor.submit(self.replicate_existing_objects, rule.bucket_name)
|
||||
|
||||
def delete_rule(self, bucket_name: str) -> None:
|
||||
if bucket_name in self._rules:
|
||||
del self._rules[bucket_name]
|
||||
self.save_rules()
|
||||
|
||||
def _update_last_sync(self, bucket_name: str, object_key: str = "") -> None:
|
||||
"""Update last sync timestamp after a successful operation."""
|
||||
with self._stats_lock:
|
||||
rule = self._rules.get(bucket_name)
|
||||
if not rule:
|
||||
return
|
||||
rule.stats.last_sync_at = time.time()
|
||||
rule.stats.last_sync_key = object_key
|
||||
self.save_rules()
|
||||
|
||||
def get_sync_status(self, bucket_name: str) -> Optional[ReplicationStats]:
|
||||
"""Dynamically compute replication status by comparing source and destination buckets."""
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule:
|
||||
return None
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
return rule.stats
|
||||
|
||||
try:
|
||||
source_objects = self.storage.list_objects_all(bucket_name)
|
||||
source_keys = {obj.key: obj.size for obj in source_objects}
|
||||
|
||||
s3 = self._create_client(connection)
|
||||
|
||||
dest_keys = set()
|
||||
bytes_synced = 0
|
||||
paginator = s3.get_paginator('list_objects_v2')
|
||||
try:
|
||||
for page in paginator.paginate(Bucket=rule.target_bucket):
|
||||
for obj in page.get('Contents', []):
|
||||
dest_keys.add(obj['Key'])
|
||||
if obj['Key'] in source_keys:
|
||||
bytes_synced += obj.get('Size', 0)
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'NoSuchBucket':
|
||||
dest_keys = set()
|
||||
else:
|
||||
raise
|
||||
|
||||
synced = source_keys.keys() & dest_keys
|
||||
orphaned = dest_keys - source_keys.keys()
|
||||
|
||||
if rule.mode == REPLICATION_MODE_ALL:
|
||||
pending = source_keys.keys() - dest_keys
|
||||
else:
|
||||
pending = set()
|
||||
|
||||
rule.stats.objects_synced = len(synced)
|
||||
rule.stats.objects_pending = len(pending)
|
||||
rule.stats.objects_orphaned = len(orphaned)
|
||||
rule.stats.bytes_synced = bytes_synced
|
||||
|
||||
return rule.stats
|
||||
|
||||
except (ClientError, StorageError) as e:
|
||||
logger.error(f"Failed to compute sync status for {bucket_name}: {e}")
|
||||
return rule.stats
|
||||
|
||||
def replicate_existing_objects(self, bucket_name: str) -> None:
|
||||
"""Trigger replication for all existing objects in a bucket."""
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule or not rule.enabled:
|
||||
return
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning(f"Cannot replicate existing objects: Connection {rule.target_connection_id} not found")
|
||||
return
|
||||
|
||||
if not self.check_endpoint_health(connection):
|
||||
logger.warning(f"Cannot replicate existing objects: Endpoint {connection.name} ({connection.endpoint_url}) is not reachable")
|
||||
return
|
||||
|
||||
try:
|
||||
objects = self.storage.list_objects_all(bucket_name)
|
||||
logger.info(f"Starting replication of {len(objects)} existing objects from {bucket_name}")
|
||||
for obj in objects:
|
||||
self._executor.submit(self._replicate_task, bucket_name, obj.key, rule, connection, "write")
|
||||
except StorageError as e:
|
||||
logger.error(f"Failed to list objects for replication: {e}")
|
||||
|
||||
def create_remote_bucket(self, connection_id: str, bucket_name: str) -> None:
|
||||
"""Create a bucket on the remote connection."""
|
||||
connection = self.connections.get(connection_id)
|
||||
if not connection:
|
||||
raise ValueError(f"Connection {connection_id} not found")
|
||||
|
||||
try:
|
||||
s3 = self._create_client(connection)
|
||||
s3.create_bucket(Bucket=bucket_name)
|
||||
except ClientError as e:
|
||||
logger.error(f"Failed to create remote bucket {bucket_name}: {e}")
|
||||
raise
|
||||
|
||||
def trigger_replication(self, bucket_name: str, object_key: str, action: str = "write") -> None:
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule or not rule.enabled:
|
||||
return
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning(f"Replication skipped for {bucket_name}/{object_key}: Connection {rule.target_connection_id} not found")
|
||||
return
|
||||
|
||||
if not self.check_endpoint_health(connection):
|
||||
logger.warning(f"Replication skipped for {bucket_name}/{object_key}: Endpoint {connection.name} ({connection.endpoint_url}) is not reachable")
|
||||
return
|
||||
|
||||
self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, action)
|
||||
|
||||
def _replicate_task(self, bucket_name: str, object_key: str, rule: ReplicationRule, conn: RemoteConnection, action: str) -> None:
|
||||
if self._shutdown:
|
||||
return
|
||||
|
||||
current_rule = self.get_rule(bucket_name)
|
||||
if not current_rule or not current_rule.enabled:
|
||||
logger.debug(f"Replication skipped for {bucket_name}/{object_key}: rule disabled or removed")
|
||||
return
|
||||
|
||||
if ".." in object_key or object_key.startswith("/") or object_key.startswith("\\"):
|
||||
logger.error(f"Invalid object key in replication (path traversal attempt): {object_key}")
|
||||
return
|
||||
|
||||
try:
|
||||
from .storage import ObjectStorage
|
||||
ObjectStorage._sanitize_object_key(object_key)
|
||||
except StorageError as e:
|
||||
logger.error(f"Object key validation failed in replication: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
s3 = self._create_client(conn)
|
||||
|
||||
if action == "delete":
|
||||
try:
|
||||
s3.delete_object(Bucket=rule.target_bucket, Key=object_key)
|
||||
logger.info(f"Replicated DELETE {bucket_name}/{object_key} to {conn.name} ({rule.target_bucket})")
|
||||
self._update_last_sync(bucket_name, object_key)
|
||||
self.failure_store.remove_failure(bucket_name, object_key)
|
||||
except ClientError as e:
|
||||
error_code = e.response.get('Error', {}).get('Code')
|
||||
logger.error(f"Replication DELETE failed for {bucket_name}/{object_key}: {e}")
|
||||
self.failure_store.add_failure(bucket_name, ReplicationFailure(
|
||||
object_key=object_key,
|
||||
error_message=str(e),
|
||||
timestamp=time.time(),
|
||||
failure_count=1,
|
||||
bucket_name=bucket_name,
|
||||
action="delete",
|
||||
last_error_code=error_code,
|
||||
))
|
||||
return
|
||||
|
||||
try:
|
||||
path = self.storage.get_object_path(bucket_name, object_key)
|
||||
except StorageError:
|
||||
logger.error(f"Source object not found: {bucket_name}/{object_key}")
|
||||
return
|
||||
|
||||
content_type, _ = mimetypes.guess_type(path)
|
||||
file_size = path.stat().st_size
|
||||
|
||||
logger.info(f"Replicating {bucket_name}/{object_key}: Size={file_size}, ContentType={content_type}")
|
||||
|
||||
def do_upload() -> None:
|
||||
"""Upload object using appropriate method based on file size.
|
||||
|
||||
For small files (< 10 MiB): Read into memory for simpler handling
|
||||
For large files: Use streaming upload to avoid memory issues
|
||||
"""
|
||||
extra_args = {}
|
||||
if content_type:
|
||||
extra_args["ContentType"] = content_type
|
||||
|
||||
if file_size >= self.streaming_threshold_bytes:
|
||||
s3.upload_file(
|
||||
str(path),
|
||||
rule.target_bucket,
|
||||
object_key,
|
||||
ExtraArgs=extra_args if extra_args else None,
|
||||
)
|
||||
else:
|
||||
file_content = path.read_bytes()
|
||||
put_kwargs = {
|
||||
"Bucket": rule.target_bucket,
|
||||
"Key": object_key,
|
||||
"Body": file_content,
|
||||
**extra_args,
|
||||
}
|
||||
s3.put_object(**put_kwargs)
|
||||
|
||||
try:
|
||||
do_upload()
|
||||
except (ClientError, S3UploadFailedError) as e:
|
||||
error_code = None
|
||||
if isinstance(e, ClientError):
|
||||
error_code = e.response['Error']['Code']
|
||||
elif isinstance(e, S3UploadFailedError):
|
||||
if "NoSuchBucket" in str(e):
|
||||
error_code = 'NoSuchBucket'
|
||||
|
||||
if error_code == 'NoSuchBucket':
|
||||
logger.info(f"Target bucket {rule.target_bucket} not found. Attempting to create it.")
|
||||
bucket_ready = False
|
||||
try:
|
||||
s3.create_bucket(Bucket=rule.target_bucket)
|
||||
bucket_ready = True
|
||||
logger.info(f"Created target bucket {rule.target_bucket}")
|
||||
except ClientError as bucket_err:
|
||||
if bucket_err.response['Error']['Code'] in ('BucketAlreadyExists', 'BucketAlreadyOwnedByYou'):
|
||||
logger.debug(f"Bucket {rule.target_bucket} already exists (created by another thread)")
|
||||
bucket_ready = True
|
||||
else:
|
||||
logger.error(f"Failed to create target bucket {rule.target_bucket}: {bucket_err}")
|
||||
raise e
|
||||
|
||||
if bucket_ready:
|
||||
do_upload()
|
||||
else:
|
||||
raise e
|
||||
|
||||
logger.info(f"Replicated {bucket_name}/{object_key} to {conn.name} ({rule.target_bucket})")
|
||||
self._update_last_sync(bucket_name, object_key)
|
||||
self.failure_store.remove_failure(bucket_name, object_key)
|
||||
|
||||
except (ClientError, OSError, ValueError) as e:
|
||||
error_code = None
|
||||
if isinstance(e, ClientError):
|
||||
error_code = e.response.get('Error', {}).get('Code')
|
||||
logger.error(f"Replication failed for {bucket_name}/{object_key}: {e}")
|
||||
self.failure_store.add_failure(bucket_name, ReplicationFailure(
|
||||
object_key=object_key,
|
||||
error_message=str(e),
|
||||
timestamp=time.time(),
|
||||
failure_count=1,
|
||||
bucket_name=bucket_name,
|
||||
action=action,
|
||||
last_error_code=error_code,
|
||||
))
|
||||
except Exception as e:
|
||||
logger.exception(f"Unexpected error during replication for {bucket_name}/{object_key}")
|
||||
self.failure_store.add_failure(bucket_name, ReplicationFailure(
|
||||
object_key=object_key,
|
||||
error_message=str(e),
|
||||
timestamp=time.time(),
|
||||
failure_count=1,
|
||||
bucket_name=bucket_name,
|
||||
action=action,
|
||||
last_error_code=None,
|
||||
))
|
||||
|
||||
def get_failed_items(self, bucket_name: str, limit: int = 50, offset: int = 0) -> List[ReplicationFailure]:
|
||||
failures = self.failure_store.load_failures(bucket_name)
|
||||
return failures[offset:offset + limit]
|
||||
|
||||
def get_failure_count(self, bucket_name: str) -> int:
|
||||
return self.failure_store.get_failure_count(bucket_name)
|
||||
|
||||
def retry_failed_item(self, bucket_name: str, object_key: str) -> bool:
|
||||
failure = self.failure_store.get_failure(bucket_name, object_key)
|
||||
if not failure:
|
||||
return False
|
||||
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule or not rule.enabled:
|
||||
return False
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning(f"Cannot retry: Connection {rule.target_connection_id} not found")
|
||||
return False
|
||||
|
||||
if not self.check_endpoint_health(connection):
|
||||
logger.warning(f"Cannot retry: Endpoint {connection.name} is not reachable")
|
||||
return False
|
||||
|
||||
self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, failure.action)
|
||||
return True
|
||||
|
||||
def retry_all_failed(self, bucket_name: str) -> Dict[str, int]:
|
||||
failures = self.failure_store.load_failures(bucket_name)
|
||||
if not failures:
|
||||
return {"submitted": 0, "skipped": 0}
|
||||
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule or not rule.enabled:
|
||||
return {"submitted": 0, "skipped": len(failures)}
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning(f"Cannot retry: Connection {rule.target_connection_id} not found")
|
||||
return {"submitted": 0, "skipped": len(failures)}
|
||||
|
||||
if not self.check_endpoint_health(connection):
|
||||
logger.warning(f"Cannot retry: Endpoint {connection.name} is not reachable")
|
||||
return {"submitted": 0, "skipped": len(failures)}
|
||||
|
||||
submitted = 0
|
||||
for failure in failures:
|
||||
self._executor.submit(self._replicate_task, bucket_name, failure.object_key, rule, connection, failure.action)
|
||||
submitted += 1
|
||||
|
||||
return {"submitted": submitted, "skipped": 0}
|
||||
|
||||
def dismiss_failure(self, bucket_name: str, object_key: str) -> bool:
|
||||
return self.failure_store.remove_failure(bucket_name, object_key)
|
||||
|
||||
def clear_failures(self, bucket_name: str) -> None:
|
||||
self.failure_store.clear_failures(bucket_name)
|
||||
3975
python/app/s3_api.py
Normal file
3975
python/app/s3_api.py
Normal file
File diff suppressed because it is too large
Load Diff
296
python/app/s3_client.py
Normal file
296
python/app/s3_client.py
Normal file
@@ -0,0 +1,296 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from typing import Any, Generator, Optional
|
||||
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError, EndpointConnectionError, ConnectionClosedError
|
||||
from flask import current_app, session
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
UI_PROXY_USER_AGENT = "MyFSIO-UIProxy/1.0"
|
||||
|
||||
_BOTO_ERROR_MAP = {
|
||||
"NoSuchBucket": 404,
|
||||
"NoSuchKey": 404,
|
||||
"NoSuchUpload": 404,
|
||||
"BucketAlreadyExists": 409,
|
||||
"BucketAlreadyOwnedByYou": 409,
|
||||
"BucketNotEmpty": 409,
|
||||
"AccessDenied": 403,
|
||||
"InvalidAccessKeyId": 403,
|
||||
"SignatureDoesNotMatch": 403,
|
||||
"InvalidBucketName": 400,
|
||||
"InvalidArgument": 400,
|
||||
"MalformedXML": 400,
|
||||
"EntityTooLarge": 400,
|
||||
"QuotaExceeded": 403,
|
||||
}
|
||||
|
||||
_UPLOAD_REGISTRY_MAX_AGE = 86400
|
||||
_UPLOAD_REGISTRY_CLEANUP_INTERVAL = 3600
|
||||
|
||||
|
||||
class UploadRegistry:
|
||||
def __init__(self) -> None:
|
||||
self._entries: dict[str, tuple[str, str, float]] = {}
|
||||
self._lock = threading.Lock()
|
||||
self._last_cleanup = time.monotonic()
|
||||
|
||||
def register(self, upload_id: str, bucket_name: str, object_key: str) -> None:
|
||||
with self._lock:
|
||||
self._entries[upload_id] = (bucket_name, object_key, time.monotonic())
|
||||
self._maybe_cleanup()
|
||||
|
||||
def get_key(self, upload_id: str, bucket_name: str) -> Optional[str]:
|
||||
with self._lock:
|
||||
entry = self._entries.get(upload_id)
|
||||
if entry is None:
|
||||
return None
|
||||
stored_bucket, key, created_at = entry
|
||||
if stored_bucket != bucket_name:
|
||||
return None
|
||||
if time.monotonic() - created_at > _UPLOAD_REGISTRY_MAX_AGE:
|
||||
del self._entries[upload_id]
|
||||
return None
|
||||
return key
|
||||
|
||||
def remove(self, upload_id: str) -> None:
|
||||
with self._lock:
|
||||
self._entries.pop(upload_id, None)
|
||||
|
||||
def _maybe_cleanup(self) -> None:
|
||||
now = time.monotonic()
|
||||
if now - self._last_cleanup < _UPLOAD_REGISTRY_CLEANUP_INTERVAL:
|
||||
return
|
||||
self._last_cleanup = now
|
||||
cutoff = now - _UPLOAD_REGISTRY_MAX_AGE
|
||||
stale = [uid for uid, (_, _, ts) in self._entries.items() if ts < cutoff]
|
||||
for uid in stale:
|
||||
del self._entries[uid]
|
||||
|
||||
|
||||
class S3ProxyClient:
|
||||
def __init__(self, api_base_url: str, region: str = "us-east-1") -> None:
|
||||
if not api_base_url:
|
||||
raise ValueError("api_base_url is required for S3ProxyClient")
|
||||
self._api_base_url = api_base_url.rstrip("/")
|
||||
self._region = region
|
||||
self.upload_registry = UploadRegistry()
|
||||
|
||||
@property
|
||||
def api_base_url(self) -> str:
|
||||
return self._api_base_url
|
||||
|
||||
def get_client(self, access_key: str, secret_key: str) -> Any:
|
||||
if not access_key or not secret_key:
|
||||
raise ValueError("Both access_key and secret_key are required")
|
||||
config = Config(
|
||||
user_agent_extra=UI_PROXY_USER_AGENT,
|
||||
connect_timeout=5,
|
||||
read_timeout=30,
|
||||
retries={"max_attempts": 0},
|
||||
signature_version="s3v4",
|
||||
s3={"addressing_style": "path"},
|
||||
request_checksum_calculation="when_required",
|
||||
response_checksum_validation="when_required",
|
||||
)
|
||||
return boto3.client(
|
||||
"s3",
|
||||
endpoint_url=self._api_base_url,
|
||||
aws_access_key_id=access_key,
|
||||
aws_secret_access_key=secret_key,
|
||||
region_name=self._region,
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
def _get_proxy() -> S3ProxyClient:
|
||||
proxy = current_app.extensions.get("s3_proxy")
|
||||
if proxy is None:
|
||||
raise RuntimeError(
|
||||
"S3 proxy not configured. Set API_BASE_URL or run both API and UI servers."
|
||||
)
|
||||
return proxy
|
||||
|
||||
|
||||
def _get_session_creds() -> tuple[str, str]:
|
||||
secret_store = current_app.extensions["secret_store"]
|
||||
secret_store.purge_expired()
|
||||
token = session.get("cred_token")
|
||||
if not token:
|
||||
raise PermissionError("Not authenticated")
|
||||
creds = secret_store.peek(token)
|
||||
if not creds:
|
||||
raise PermissionError("Session expired")
|
||||
access_key = creds.get("access_key", "")
|
||||
secret_key = creds.get("secret_key", "")
|
||||
if not access_key or not secret_key:
|
||||
raise PermissionError("Invalid session credentials")
|
||||
return access_key, secret_key
|
||||
|
||||
|
||||
def get_session_s3_client() -> Any:
|
||||
proxy = _get_proxy()
|
||||
access_key, secret_key = _get_session_creds()
|
||||
return proxy.get_client(access_key, secret_key)
|
||||
|
||||
|
||||
def get_upload_registry() -> UploadRegistry:
|
||||
return _get_proxy().upload_registry
|
||||
|
||||
|
||||
def handle_client_error(exc: ClientError) -> tuple[dict[str, str], int]:
|
||||
error_info = exc.response.get("Error", {})
|
||||
code = error_info.get("Code", "InternalError")
|
||||
message = error_info.get("Message") or "S3 operation failed"
|
||||
http_status = _BOTO_ERROR_MAP.get(code)
|
||||
if http_status is None:
|
||||
http_status = exc.response.get("ResponseMetadata", {}).get("HTTPStatusCode", 500)
|
||||
return {"error": message}, http_status
|
||||
|
||||
|
||||
def handle_connection_error(exc: Exception) -> tuple[dict[str, str], int]:
|
||||
logger.error("S3 API connection failed: %s", exc)
|
||||
return {"error": "S3 API server is unreachable. Ensure the API server is running."}, 502
|
||||
|
||||
|
||||
def format_datetime_display(dt: Any, display_tz: str = "UTC") -> str:
|
||||
from .ui import _format_datetime_display
|
||||
return _format_datetime_display(dt, display_tz)
|
||||
|
||||
|
||||
def format_datetime_iso(dt: Any, display_tz: str = "UTC") -> str:
|
||||
from .ui import _format_datetime_iso
|
||||
return _format_datetime_iso(dt, display_tz)
|
||||
|
||||
|
||||
def build_url_templates(bucket_name: str) -> dict[str, str]:
|
||||
from flask import url_for
|
||||
preview_t = url_for("ui.object_preview", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
delete_t = url_for("ui.delete_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
presign_t = url_for("ui.object_presign", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
versions_t = url_for("ui.object_versions", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
restore_t = url_for(
|
||||
"ui.restore_object_version",
|
||||
bucket_name=bucket_name,
|
||||
object_key="KEY_PLACEHOLDER",
|
||||
version_id="VERSION_ID_PLACEHOLDER",
|
||||
)
|
||||
tags_t = url_for("ui.object_tags", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
copy_t = url_for("ui.copy_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
move_t = url_for("ui.move_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
metadata_t = url_for("ui.object_metadata", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
return {
|
||||
"preview": preview_t,
|
||||
"download": preview_t + "?download=1",
|
||||
"presign": presign_t,
|
||||
"delete": delete_t,
|
||||
"versions": versions_t,
|
||||
"restore": restore_t,
|
||||
"tags": tags_t,
|
||||
"copy": copy_t,
|
||||
"move": move_t,
|
||||
"metadata": metadata_t,
|
||||
}
|
||||
|
||||
|
||||
def translate_list_objects(
|
||||
boto3_response: dict[str, Any],
|
||||
url_templates: dict[str, str],
|
||||
display_tz: str = "UTC",
|
||||
versioning_enabled: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
objects_data = []
|
||||
for obj in boto3_response.get("Contents", []):
|
||||
last_mod = obj["LastModified"]
|
||||
objects_data.append({
|
||||
"key": obj["Key"],
|
||||
"size": obj["Size"],
|
||||
"last_modified": last_mod.isoformat(),
|
||||
"last_modified_display": format_datetime_display(last_mod, display_tz),
|
||||
"last_modified_iso": format_datetime_iso(last_mod, display_tz),
|
||||
"etag": obj.get("ETag", "").strip('"'),
|
||||
})
|
||||
return {
|
||||
"objects": objects_data,
|
||||
"is_truncated": boto3_response.get("IsTruncated", False),
|
||||
"next_continuation_token": boto3_response.get("NextContinuationToken"),
|
||||
"total_count": boto3_response.get("KeyCount", len(objects_data)),
|
||||
"versioning_enabled": versioning_enabled,
|
||||
"url_templates": url_templates,
|
||||
}
|
||||
|
||||
|
||||
def get_versioning_via_s3(client: Any, bucket_name: str) -> bool:
|
||||
try:
|
||||
resp = client.get_bucket_versioning(Bucket=bucket_name)
|
||||
return resp.get("Status") == "Enabled"
|
||||
except ClientError as exc:
|
||||
code = exc.response.get("Error", {}).get("Code", "")
|
||||
if code != "NoSuchBucket":
|
||||
logger.warning("Failed to check versioning for %s: %s", bucket_name, code)
|
||||
return False
|
||||
|
||||
|
||||
def stream_objects_ndjson(
|
||||
client: Any,
|
||||
bucket_name: str,
|
||||
prefix: Optional[str],
|
||||
url_templates: dict[str, str],
|
||||
display_tz: str = "UTC",
|
||||
versioning_enabled: bool = False,
|
||||
delimiter: Optional[str] = None,
|
||||
) -> Generator[str, None, None]:
|
||||
meta_line = json.dumps({
|
||||
"type": "meta",
|
||||
"versioning_enabled": versioning_enabled,
|
||||
"url_templates": url_templates,
|
||||
}) + "\n"
|
||||
yield meta_line
|
||||
|
||||
yield json.dumps({"type": "count", "total_count": 0}) + "\n"
|
||||
|
||||
kwargs: dict[str, Any] = {"Bucket": bucket_name, "MaxKeys": 1000}
|
||||
if prefix:
|
||||
kwargs["Prefix"] = prefix
|
||||
if delimiter:
|
||||
kwargs["Delimiter"] = delimiter
|
||||
|
||||
running_count = 0
|
||||
try:
|
||||
paginator = client.get_paginator("list_objects_v2")
|
||||
for page in paginator.paginate(**kwargs):
|
||||
for cp in page.get("CommonPrefixes", []):
|
||||
yield json.dumps({
|
||||
"type": "folder",
|
||||
"prefix": cp["Prefix"],
|
||||
}) + "\n"
|
||||
page_contents = page.get("Contents", [])
|
||||
for obj in page_contents:
|
||||
last_mod = obj["LastModified"]
|
||||
yield json.dumps({
|
||||
"type": "object",
|
||||
"key": obj["Key"],
|
||||
"size": obj["Size"],
|
||||
"last_modified": last_mod.isoformat(),
|
||||
"last_modified_display": format_datetime_display(last_mod, display_tz),
|
||||
"last_modified_iso": format_datetime_iso(last_mod, display_tz),
|
||||
"etag": obj.get("ETag", "").strip('"'),
|
||||
}) + "\n"
|
||||
running_count += len(page_contents)
|
||||
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
|
||||
except ClientError as exc:
|
||||
error_msg = exc.response.get("Error", {}).get("Message", "S3 operation failed")
|
||||
yield json.dumps({"type": "error", "error": error_msg}) + "\n"
|
||||
return
|
||||
except (EndpointConnectionError, ConnectionClosedError):
|
||||
yield json.dumps({"type": "error", "error": "S3 API server is unreachable"}) + "\n"
|
||||
return
|
||||
|
||||
yield json.dumps({"type": "done"}) + "\n"
|
||||
@@ -1,4 +1,3 @@
|
||||
"""Ephemeral store for one-time secrets communicated to the UI."""
|
||||
from __future__ import annotations
|
||||
|
||||
import secrets
|
||||
@@ -19,6 +18,18 @@ class EphemeralSecretStore:
|
||||
self._store[token] = (payload, expires_at)
|
||||
return token
|
||||
|
||||
def peek(self, token: str | None) -> Any | None:
|
||||
if not token:
|
||||
return None
|
||||
entry = self._store.get(token)
|
||||
if not entry:
|
||||
return None
|
||||
payload, expires_at = entry
|
||||
if expires_at < time.time():
|
||||
self._store.pop(token, None)
|
||||
return None
|
||||
return payload
|
||||
|
||||
def pop(self, token: str | None) -> Any | None:
|
||||
if not token:
|
||||
return None
|
||||
171
python/app/select_content.py
Normal file
171
python/app/select_content.py
Normal file
@@ -0,0 +1,171 @@
|
||||
"""S3 SelectObjectContent SQL query execution using DuckDB."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Generator, Optional
|
||||
|
||||
try:
|
||||
import duckdb
|
||||
DUCKDB_AVAILABLE = True
|
||||
except ImportError:
|
||||
DUCKDB_AVAILABLE = False
|
||||
|
||||
|
||||
class SelectError(Exception):
|
||||
"""Error during SELECT query execution."""
|
||||
pass
|
||||
|
||||
|
||||
def execute_select_query(
|
||||
file_path: Path,
|
||||
expression: str,
|
||||
input_format: str,
|
||||
input_config: Dict[str, Any],
|
||||
output_format: str,
|
||||
output_config: Dict[str, Any],
|
||||
chunk_size: int = 65536,
|
||||
) -> Generator[bytes, None, None]:
|
||||
"""Execute SQL query on object content."""
|
||||
if not DUCKDB_AVAILABLE:
|
||||
raise SelectError("DuckDB is not installed. Install with: pip install duckdb")
|
||||
|
||||
conn = duckdb.connect(":memory:")
|
||||
|
||||
try:
|
||||
if input_format == "CSV":
|
||||
_load_csv(conn, file_path, input_config)
|
||||
elif input_format == "JSON":
|
||||
_load_json(conn, file_path, input_config)
|
||||
elif input_format == "Parquet":
|
||||
_load_parquet(conn, file_path)
|
||||
else:
|
||||
raise SelectError(f"Unsupported input format: {input_format}")
|
||||
|
||||
normalized_expression = expression.replace("s3object", "data").replace("S3Object", "data")
|
||||
|
||||
try:
|
||||
result = conn.execute(normalized_expression)
|
||||
except duckdb.Error as exc:
|
||||
raise SelectError(f"SQL execution error: {exc}")
|
||||
|
||||
if output_format == "CSV":
|
||||
yield from _output_csv(result, output_config, chunk_size)
|
||||
elif output_format == "JSON":
|
||||
yield from _output_json(result, output_config, chunk_size)
|
||||
else:
|
||||
raise SelectError(f"Unsupported output format: {output_format}")
|
||||
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
def _load_csv(conn, file_path: Path, config: Dict[str, Any]) -> None:
|
||||
"""Load CSV file into DuckDB."""
|
||||
file_header_info = config.get("file_header_info", "NONE")
|
||||
delimiter = config.get("field_delimiter", ",")
|
||||
quote = config.get("quote_character", '"')
|
||||
|
||||
header = file_header_info in ("USE", "IGNORE")
|
||||
path_str = str(file_path).replace("\\", "/")
|
||||
|
||||
conn.execute(f"""
|
||||
CREATE TABLE data AS
|
||||
SELECT * FROM read_csv('{path_str}',
|
||||
header={header},
|
||||
delim='{delimiter}',
|
||||
quote='{quote}'
|
||||
)
|
||||
""")
|
||||
|
||||
|
||||
def _load_json(conn, file_path: Path, config: Dict[str, Any]) -> None:
|
||||
"""Load JSON file into DuckDB."""
|
||||
json_type = config.get("type", "DOCUMENT")
|
||||
path_str = str(file_path).replace("\\", "/")
|
||||
|
||||
if json_type == "LINES":
|
||||
conn.execute(f"""
|
||||
CREATE TABLE data AS
|
||||
SELECT * FROM read_json_auto('{path_str}', format='newline_delimited')
|
||||
""")
|
||||
else:
|
||||
conn.execute(f"""
|
||||
CREATE TABLE data AS
|
||||
SELECT * FROM read_json_auto('{path_str}', format='array')
|
||||
""")
|
||||
|
||||
|
||||
def _load_parquet(conn, file_path: Path) -> None:
|
||||
"""Load Parquet file into DuckDB."""
|
||||
path_str = str(file_path).replace("\\", "/")
|
||||
conn.execute(f"CREATE TABLE data AS SELECT * FROM read_parquet('{path_str}')")
|
||||
|
||||
|
||||
def _output_csv(
|
||||
result,
|
||||
config: Dict[str, Any],
|
||||
chunk_size: int,
|
||||
) -> Generator[bytes, None, None]:
|
||||
"""Output query results as CSV."""
|
||||
delimiter = config.get("field_delimiter", ",")
|
||||
record_delimiter = config.get("record_delimiter", "\n")
|
||||
quote = config.get("quote_character", '"')
|
||||
|
||||
buffer = ""
|
||||
|
||||
while True:
|
||||
rows = result.fetchmany(1000)
|
||||
if not rows:
|
||||
break
|
||||
|
||||
for row in rows:
|
||||
fields = []
|
||||
for value in row:
|
||||
if value is None:
|
||||
fields.append("")
|
||||
elif isinstance(value, str):
|
||||
if delimiter in value or quote in value or record_delimiter in value:
|
||||
escaped = value.replace(quote, quote + quote)
|
||||
fields.append(f'{quote}{escaped}{quote}')
|
||||
else:
|
||||
fields.append(value)
|
||||
else:
|
||||
fields.append(str(value))
|
||||
|
||||
buffer += delimiter.join(fields) + record_delimiter
|
||||
|
||||
while len(buffer) >= chunk_size:
|
||||
yield buffer[:chunk_size].encode("utf-8")
|
||||
buffer = buffer[chunk_size:]
|
||||
|
||||
if buffer:
|
||||
yield buffer.encode("utf-8")
|
||||
|
||||
|
||||
def _output_json(
|
||||
result,
|
||||
config: Dict[str, Any],
|
||||
chunk_size: int,
|
||||
) -> Generator[bytes, None, None]:
|
||||
"""Output query results as JSON Lines."""
|
||||
record_delimiter = config.get("record_delimiter", "\n")
|
||||
columns = [desc[0] for desc in result.description]
|
||||
|
||||
buffer = ""
|
||||
|
||||
while True:
|
||||
rows = result.fetchmany(1000)
|
||||
if not rows:
|
||||
break
|
||||
|
||||
for row in rows:
|
||||
record = dict(zip(columns, row))
|
||||
buffer += json.dumps(record, default=str) + record_delimiter
|
||||
|
||||
while len(buffer) >= chunk_size:
|
||||
yield buffer[:chunk_size].encode("utf-8")
|
||||
buffer = buffer[chunk_size:]
|
||||
|
||||
if buffer:
|
||||
yield buffer.encode("utf-8")
|
||||
177
python/app/site_registry.py
Normal file
177
python/app/site_registry.py
Normal file
@@ -0,0 +1,177 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class SiteInfo:
|
||||
site_id: str
|
||||
endpoint: str
|
||||
region: str = "us-east-1"
|
||||
priority: int = 100
|
||||
display_name: str = ""
|
||||
created_at: Optional[float] = None
|
||||
updated_at: Optional[float] = None
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if not self.display_name:
|
||||
self.display_name = self.site_id
|
||||
if self.created_at is None:
|
||||
self.created_at = time.time()
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"site_id": self.site_id,
|
||||
"endpoint": self.endpoint,
|
||||
"region": self.region,
|
||||
"priority": self.priority,
|
||||
"display_name": self.display_name,
|
||||
"created_at": self.created_at,
|
||||
"updated_at": self.updated_at,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> SiteInfo:
|
||||
return cls(
|
||||
site_id=data["site_id"],
|
||||
endpoint=data.get("endpoint", ""),
|
||||
region=data.get("region", "us-east-1"),
|
||||
priority=data.get("priority", 100),
|
||||
display_name=data.get("display_name", ""),
|
||||
created_at=data.get("created_at"),
|
||||
updated_at=data.get("updated_at"),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PeerSite:
|
||||
site_id: str
|
||||
endpoint: str
|
||||
region: str = "us-east-1"
|
||||
priority: int = 100
|
||||
display_name: str = ""
|
||||
created_at: Optional[float] = None
|
||||
updated_at: Optional[float] = None
|
||||
connection_id: Optional[str] = None
|
||||
is_healthy: Optional[bool] = None
|
||||
last_health_check: Optional[float] = None
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if not self.display_name:
|
||||
self.display_name = self.site_id
|
||||
if self.created_at is None:
|
||||
self.created_at = time.time()
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"site_id": self.site_id,
|
||||
"endpoint": self.endpoint,
|
||||
"region": self.region,
|
||||
"priority": self.priority,
|
||||
"display_name": self.display_name,
|
||||
"created_at": self.created_at,
|
||||
"updated_at": self.updated_at,
|
||||
"connection_id": self.connection_id,
|
||||
"is_healthy": self.is_healthy,
|
||||
"last_health_check": self.last_health_check,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> PeerSite:
|
||||
return cls(
|
||||
site_id=data["site_id"],
|
||||
endpoint=data.get("endpoint", ""),
|
||||
region=data.get("region", "us-east-1"),
|
||||
priority=data.get("priority", 100),
|
||||
display_name=data.get("display_name", ""),
|
||||
created_at=data.get("created_at"),
|
||||
updated_at=data.get("updated_at"),
|
||||
connection_id=data.get("connection_id"),
|
||||
is_healthy=data.get("is_healthy"),
|
||||
last_health_check=data.get("last_health_check"),
|
||||
)
|
||||
|
||||
|
||||
class SiteRegistry:
|
||||
def __init__(self, config_path: Path) -> None:
|
||||
self.config_path = config_path
|
||||
self._local_site: Optional[SiteInfo] = None
|
||||
self._peers: Dict[str, PeerSite] = {}
|
||||
self.reload()
|
||||
|
||||
def reload(self) -> None:
|
||||
if not self.config_path.exists():
|
||||
self._local_site = None
|
||||
self._peers = {}
|
||||
return
|
||||
|
||||
try:
|
||||
with open(self.config_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
|
||||
if data.get("local"):
|
||||
self._local_site = SiteInfo.from_dict(data["local"])
|
||||
else:
|
||||
self._local_site = None
|
||||
|
||||
self._peers = {}
|
||||
for peer_data in data.get("peers", []):
|
||||
peer = PeerSite.from_dict(peer_data)
|
||||
self._peers[peer.site_id] = peer
|
||||
|
||||
except (OSError, json.JSONDecodeError, KeyError):
|
||||
self._local_site = None
|
||||
self._peers = {}
|
||||
|
||||
def save(self) -> None:
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {
|
||||
"local": self._local_site.to_dict() if self._local_site else None,
|
||||
"peers": [peer.to_dict() for peer in self._peers.values()],
|
||||
}
|
||||
with open(self.config_path, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
def get_local_site(self) -> Optional[SiteInfo]:
|
||||
return self._local_site
|
||||
|
||||
def set_local_site(self, site: SiteInfo) -> None:
|
||||
site.updated_at = time.time()
|
||||
self._local_site = site
|
||||
self.save()
|
||||
|
||||
def list_peers(self) -> List[PeerSite]:
|
||||
return list(self._peers.values())
|
||||
|
||||
def get_peer(self, site_id: str) -> Optional[PeerSite]:
|
||||
return self._peers.get(site_id)
|
||||
|
||||
def add_peer(self, peer: PeerSite) -> None:
|
||||
peer.created_at = peer.created_at or time.time()
|
||||
self._peers[peer.site_id] = peer
|
||||
self.save()
|
||||
|
||||
def update_peer(self, peer: PeerSite) -> None:
|
||||
if peer.site_id not in self._peers:
|
||||
raise ValueError(f"Peer {peer.site_id} not found")
|
||||
peer.updated_at = time.time()
|
||||
self._peers[peer.site_id] = peer
|
||||
self.save()
|
||||
|
||||
def delete_peer(self, site_id: str) -> bool:
|
||||
if site_id in self._peers:
|
||||
del self._peers[site_id]
|
||||
self.save()
|
||||
return True
|
||||
return False
|
||||
|
||||
def update_health(self, site_id: str, is_healthy: bool) -> None:
|
||||
peer = self._peers.get(site_id)
|
||||
if peer:
|
||||
peer.is_healthy = is_healthy
|
||||
peer.last_health_check = time.time()
|
||||
self.save()
|
||||
416
python/app/site_sync.py
Normal file
416
python/app/site_sync.py
Normal file
@@ -0,0 +1,416 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .connections import ConnectionStore, RemoteConnection
|
||||
from .replication import ReplicationManager, ReplicationRule
|
||||
from .storage import ObjectStorage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SITE_SYNC_USER_AGENT = "SiteSyncAgent/1.0"
|
||||
|
||||
|
||||
@dataclass
|
||||
class SyncedObjectInfo:
|
||||
last_synced_at: float
|
||||
remote_etag: str
|
||||
source: str
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"last_synced_at": self.last_synced_at,
|
||||
"remote_etag": self.remote_etag,
|
||||
"source": self.source,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "SyncedObjectInfo":
|
||||
return cls(
|
||||
last_synced_at=data["last_synced_at"],
|
||||
remote_etag=data["remote_etag"],
|
||||
source=data["source"],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SyncState:
|
||||
synced_objects: Dict[str, SyncedObjectInfo] = field(default_factory=dict)
|
||||
last_full_sync: Optional[float] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"synced_objects": {k: v.to_dict() for k, v in self.synced_objects.items()},
|
||||
"last_full_sync": self.last_full_sync,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "SyncState":
|
||||
synced_objects = {}
|
||||
for k, v in data.get("synced_objects", {}).items():
|
||||
synced_objects[k] = SyncedObjectInfo.from_dict(v)
|
||||
return cls(
|
||||
synced_objects=synced_objects,
|
||||
last_full_sync=data.get("last_full_sync"),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SiteSyncStats:
|
||||
last_sync_at: Optional[float] = None
|
||||
objects_pulled: int = 0
|
||||
objects_skipped: int = 0
|
||||
conflicts_resolved: int = 0
|
||||
deletions_applied: int = 0
|
||||
errors: int = 0
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"last_sync_at": self.last_sync_at,
|
||||
"objects_pulled": self.objects_pulled,
|
||||
"objects_skipped": self.objects_skipped,
|
||||
"conflicts_resolved": self.conflicts_resolved,
|
||||
"deletions_applied": self.deletions_applied,
|
||||
"errors": self.errors,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class RemoteObjectMeta:
|
||||
key: str
|
||||
size: int
|
||||
last_modified: datetime
|
||||
etag: str
|
||||
|
||||
@classmethod
|
||||
def from_s3_object(cls, obj: Dict[str, Any]) -> "RemoteObjectMeta":
|
||||
return cls(
|
||||
key=obj["Key"],
|
||||
size=obj.get("Size", 0),
|
||||
last_modified=obj["LastModified"],
|
||||
etag=obj.get("ETag", "").strip('"'),
|
||||
)
|
||||
|
||||
|
||||
def _create_sync_client(
|
||||
connection: "RemoteConnection",
|
||||
*,
|
||||
connect_timeout: int = 10,
|
||||
read_timeout: int = 120,
|
||||
max_retries: int = 2,
|
||||
) -> Any:
|
||||
config = Config(
|
||||
user_agent_extra=SITE_SYNC_USER_AGENT,
|
||||
connect_timeout=connect_timeout,
|
||||
read_timeout=read_timeout,
|
||||
retries={"max_attempts": max_retries},
|
||||
signature_version="s3v4",
|
||||
s3={"addressing_style": "path"},
|
||||
request_checksum_calculation="when_required",
|
||||
response_checksum_validation="when_required",
|
||||
)
|
||||
return boto3.client(
|
||||
"s3",
|
||||
endpoint_url=connection.endpoint_url,
|
||||
aws_access_key_id=connection.access_key,
|
||||
aws_secret_access_key=connection.secret_key,
|
||||
region_name=connection.region or "us-east-1",
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
class SiteSyncWorker:
|
||||
def __init__(
|
||||
self,
|
||||
storage: "ObjectStorage",
|
||||
connections: "ConnectionStore",
|
||||
replication_manager: "ReplicationManager",
|
||||
storage_root: Path,
|
||||
interval_seconds: int = 60,
|
||||
batch_size: int = 100,
|
||||
connect_timeout: int = 10,
|
||||
read_timeout: int = 120,
|
||||
max_retries: int = 2,
|
||||
clock_skew_tolerance_seconds: float = 1.0,
|
||||
):
|
||||
self.storage = storage
|
||||
self.connections = connections
|
||||
self.replication_manager = replication_manager
|
||||
self.storage_root = storage_root
|
||||
self.interval_seconds = interval_seconds
|
||||
self.batch_size = batch_size
|
||||
self.connect_timeout = connect_timeout
|
||||
self.read_timeout = read_timeout
|
||||
self.max_retries = max_retries
|
||||
self.clock_skew_tolerance_seconds = clock_skew_tolerance_seconds
|
||||
self._lock = threading.Lock()
|
||||
self._shutdown = threading.Event()
|
||||
self._sync_thread: Optional[threading.Thread] = None
|
||||
self._bucket_stats: Dict[str, SiteSyncStats] = {}
|
||||
|
||||
def _create_client(self, connection: "RemoteConnection") -> Any:
|
||||
"""Create an S3 client with the worker's configured timeouts."""
|
||||
return _create_sync_client(
|
||||
connection,
|
||||
connect_timeout=self.connect_timeout,
|
||||
read_timeout=self.read_timeout,
|
||||
max_retries=self.max_retries,
|
||||
)
|
||||
|
||||
def start(self) -> None:
|
||||
if self._sync_thread is not None and self._sync_thread.is_alive():
|
||||
return
|
||||
self._shutdown.clear()
|
||||
self._sync_thread = threading.Thread(
|
||||
target=self._sync_loop, name="site-sync-worker", daemon=True
|
||||
)
|
||||
self._sync_thread.start()
|
||||
logger.info("Site sync worker started (interval=%ds)", self.interval_seconds)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._shutdown.set()
|
||||
if self._sync_thread is not None:
|
||||
self._sync_thread.join(timeout=10.0)
|
||||
logger.info("Site sync worker shut down")
|
||||
|
||||
def trigger_sync(self, bucket_name: str) -> Optional[SiteSyncStats]:
|
||||
from .replication import REPLICATION_MODE_BIDIRECTIONAL
|
||||
rule = self.replication_manager.get_rule(bucket_name)
|
||||
if not rule or rule.mode != REPLICATION_MODE_BIDIRECTIONAL or not rule.enabled:
|
||||
return None
|
||||
return self._sync_bucket(rule)
|
||||
|
||||
def get_stats(self, bucket_name: str) -> Optional[SiteSyncStats]:
|
||||
with self._lock:
|
||||
return self._bucket_stats.get(bucket_name)
|
||||
|
||||
def _sync_loop(self) -> None:
|
||||
while not self._shutdown.is_set():
|
||||
self._shutdown.wait(timeout=self.interval_seconds)
|
||||
if self._shutdown.is_set():
|
||||
break
|
||||
self._run_sync_cycle()
|
||||
|
||||
def _run_sync_cycle(self) -> None:
|
||||
from .replication import REPLICATION_MODE_BIDIRECTIONAL
|
||||
for bucket_name, rule in list(self.replication_manager._rules.items()):
|
||||
if self._shutdown.is_set():
|
||||
break
|
||||
if rule.mode != REPLICATION_MODE_BIDIRECTIONAL or not rule.enabled:
|
||||
continue
|
||||
try:
|
||||
stats = self._sync_bucket(rule)
|
||||
with self._lock:
|
||||
self._bucket_stats[bucket_name] = stats
|
||||
except Exception as e:
|
||||
logger.exception("Site sync failed for bucket %s: %s", bucket_name, e)
|
||||
|
||||
def _sync_bucket(self, rule: "ReplicationRule") -> SiteSyncStats:
|
||||
stats = SiteSyncStats()
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning("Connection %s not found for bucket %s", rule.target_connection_id, rule.bucket_name)
|
||||
stats.errors += 1
|
||||
return stats
|
||||
|
||||
try:
|
||||
local_objects = self._list_local_objects(rule.bucket_name)
|
||||
except Exception as e:
|
||||
logger.error("Failed to list local objects for %s: %s", rule.bucket_name, e)
|
||||
stats.errors += 1
|
||||
return stats
|
||||
|
||||
try:
|
||||
remote_objects = self._list_remote_objects(rule, connection)
|
||||
except Exception as e:
|
||||
logger.error("Failed to list remote objects for %s: %s", rule.bucket_name, e)
|
||||
stats.errors += 1
|
||||
return stats
|
||||
|
||||
sync_state = self._load_sync_state(rule.bucket_name)
|
||||
local_keys = set(local_objects.keys())
|
||||
remote_keys = set(remote_objects.keys())
|
||||
|
||||
to_pull = []
|
||||
for key in remote_keys:
|
||||
remote_meta = remote_objects[key]
|
||||
local_meta = local_objects.get(key)
|
||||
if local_meta is None:
|
||||
to_pull.append(key)
|
||||
else:
|
||||
resolution = self._resolve_conflict(local_meta, remote_meta)
|
||||
if resolution == "pull":
|
||||
to_pull.append(key)
|
||||
stats.conflicts_resolved += 1
|
||||
else:
|
||||
stats.objects_skipped += 1
|
||||
|
||||
pulled_count = 0
|
||||
for key in to_pull:
|
||||
if self._shutdown.is_set():
|
||||
break
|
||||
if pulled_count >= self.batch_size:
|
||||
break
|
||||
remote_meta = remote_objects[key]
|
||||
success = self._pull_object(rule, key, connection, remote_meta)
|
||||
if success:
|
||||
stats.objects_pulled += 1
|
||||
pulled_count += 1
|
||||
sync_state.synced_objects[key] = SyncedObjectInfo(
|
||||
last_synced_at=time.time(),
|
||||
remote_etag=remote_meta.etag,
|
||||
source="remote",
|
||||
)
|
||||
else:
|
||||
stats.errors += 1
|
||||
|
||||
if rule.sync_deletions:
|
||||
for key in list(sync_state.synced_objects.keys()):
|
||||
if key not in remote_keys and key in local_keys:
|
||||
tracked = sync_state.synced_objects[key]
|
||||
if tracked.source == "remote":
|
||||
local_meta = local_objects.get(key)
|
||||
if local_meta and local_meta.last_modified.timestamp() <= tracked.last_synced_at:
|
||||
success = self._apply_remote_deletion(rule.bucket_name, key)
|
||||
if success:
|
||||
stats.deletions_applied += 1
|
||||
del sync_state.synced_objects[key]
|
||||
|
||||
sync_state.last_full_sync = time.time()
|
||||
self._save_sync_state(rule.bucket_name, sync_state)
|
||||
|
||||
with self.replication_manager._stats_lock:
|
||||
rule.last_pull_at = time.time()
|
||||
self.replication_manager.save_rules()
|
||||
|
||||
stats.last_sync_at = time.time()
|
||||
logger.info(
|
||||
"Site sync completed for %s: pulled=%d, skipped=%d, conflicts=%d, deletions=%d, errors=%d",
|
||||
rule.bucket_name,
|
||||
stats.objects_pulled,
|
||||
stats.objects_skipped,
|
||||
stats.conflicts_resolved,
|
||||
stats.deletions_applied,
|
||||
stats.errors,
|
||||
)
|
||||
return stats
|
||||
|
||||
def _list_local_objects(self, bucket_name: str) -> Dict[str, Any]:
|
||||
from .storage import ObjectMeta
|
||||
objects = self.storage.list_objects_all(bucket_name)
|
||||
return {obj.key: obj for obj in objects}
|
||||
|
||||
def _list_remote_objects(self, rule: "ReplicationRule", connection: "RemoteConnection") -> Dict[str, RemoteObjectMeta]:
|
||||
s3 = self._create_client(connection)
|
||||
result: Dict[str, RemoteObjectMeta] = {}
|
||||
paginator = s3.get_paginator("list_objects_v2")
|
||||
try:
|
||||
for page in paginator.paginate(Bucket=rule.target_bucket):
|
||||
for obj in page.get("Contents", []):
|
||||
meta = RemoteObjectMeta.from_s3_object(obj)
|
||||
result[meta.key] = meta
|
||||
except ClientError as e:
|
||||
if e.response["Error"]["Code"] == "NoSuchBucket":
|
||||
return {}
|
||||
raise
|
||||
return result
|
||||
|
||||
def _resolve_conflict(self, local_meta: Any, remote_meta: RemoteObjectMeta) -> str:
|
||||
local_ts = local_meta.last_modified.timestamp()
|
||||
remote_ts = remote_meta.last_modified.timestamp()
|
||||
|
||||
if abs(remote_ts - local_ts) < self.clock_skew_tolerance_seconds:
|
||||
local_etag = local_meta.etag or ""
|
||||
if remote_meta.etag == local_etag:
|
||||
return "skip"
|
||||
return "pull" if remote_meta.etag > local_etag else "keep"
|
||||
|
||||
return "pull" if remote_ts > local_ts else "keep"
|
||||
|
||||
def _pull_object(
|
||||
self,
|
||||
rule: "ReplicationRule",
|
||||
object_key: str,
|
||||
connection: "RemoteConnection",
|
||||
remote_meta: RemoteObjectMeta,
|
||||
) -> bool:
|
||||
s3 = self._create_client(connection)
|
||||
tmp_path = None
|
||||
try:
|
||||
tmp_dir = self.storage_root / ".myfsio.sys" / "tmp"
|
||||
tmp_dir.mkdir(parents=True, exist_ok=True)
|
||||
with tempfile.NamedTemporaryFile(dir=tmp_dir, delete=False) as tmp_file:
|
||||
tmp_path = Path(tmp_file.name)
|
||||
|
||||
s3.download_file(rule.target_bucket, object_key, str(tmp_path))
|
||||
|
||||
head_response = s3.head_object(Bucket=rule.target_bucket, Key=object_key)
|
||||
user_metadata = head_response.get("Metadata", {})
|
||||
|
||||
with open(tmp_path, "rb") as f:
|
||||
self.storage.put_object(
|
||||
rule.bucket_name,
|
||||
object_key,
|
||||
f,
|
||||
metadata=user_metadata if user_metadata else None,
|
||||
)
|
||||
|
||||
logger.debug("Pulled object %s/%s from remote", rule.bucket_name, object_key)
|
||||
return True
|
||||
|
||||
except ClientError as e:
|
||||
logger.error("Failed to pull %s/%s: %s", rule.bucket_name, object_key, e)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("Failed to store pulled object %s/%s: %s", rule.bucket_name, object_key, e)
|
||||
return False
|
||||
finally:
|
||||
if tmp_path and tmp_path.exists():
|
||||
try:
|
||||
tmp_path.unlink()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def _apply_remote_deletion(self, bucket_name: str, object_key: str) -> bool:
|
||||
try:
|
||||
self.storage.delete_object(bucket_name, object_key)
|
||||
logger.debug("Applied remote deletion for %s/%s", bucket_name, object_key)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error("Failed to apply remote deletion for %s/%s: %s", bucket_name, object_key, e)
|
||||
return False
|
||||
|
||||
def _sync_state_path(self, bucket_name: str) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "site_sync_state.json"
|
||||
|
||||
def _load_sync_state(self, bucket_name: str) -> SyncState:
|
||||
path = self._sync_state_path(bucket_name)
|
||||
if not path.exists():
|
||||
return SyncState()
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
return SyncState.from_dict(data)
|
||||
except (json.JSONDecodeError, OSError, KeyError) as e:
|
||||
logger.warning("Failed to load sync state for %s: %s", bucket_name, e)
|
||||
return SyncState()
|
||||
|
||||
def _save_sync_state(self, bucket_name: str, state: SyncState) -> None:
|
||||
path = self._sync_state_path(bucket_name)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
path.write_text(json.dumps(state.to_dict(), indent=2), encoding="utf-8")
|
||||
except OSError as e:
|
||||
logger.warning("Failed to save sync state for %s: %s", bucket_name, e)
|
||||
2904
python/app/storage.py
Normal file
2904
python/app/storage.py
Normal file
File diff suppressed because it is too large
Load Diff
215
python/app/system_metrics.py
Normal file
215
python/app/system_metrics.py
Normal file
@@ -0,0 +1,215 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
import psutil
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .storage import ObjectStorage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemMetricsSnapshot:
|
||||
timestamp: datetime
|
||||
cpu_percent: float
|
||||
memory_percent: float
|
||||
disk_percent: float
|
||||
storage_bytes: int
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"timestamp": self.timestamp.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
||||
"cpu_percent": round(self.cpu_percent, 2),
|
||||
"memory_percent": round(self.memory_percent, 2),
|
||||
"disk_percent": round(self.disk_percent, 2),
|
||||
"storage_bytes": self.storage_bytes,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "SystemMetricsSnapshot":
|
||||
timestamp_str = data["timestamp"]
|
||||
if timestamp_str.endswith("Z"):
|
||||
timestamp_str = timestamp_str[:-1] + "+00:00"
|
||||
return cls(
|
||||
timestamp=datetime.fromisoformat(timestamp_str),
|
||||
cpu_percent=data.get("cpu_percent", 0.0),
|
||||
memory_percent=data.get("memory_percent", 0.0),
|
||||
disk_percent=data.get("disk_percent", 0.0),
|
||||
storage_bytes=data.get("storage_bytes", 0),
|
||||
)
|
||||
|
||||
|
||||
class SystemMetricsCollector:
|
||||
def __init__(
|
||||
self,
|
||||
storage_root: Path,
|
||||
interval_minutes: int = 5,
|
||||
retention_hours: int = 24,
|
||||
):
|
||||
self.storage_root = storage_root
|
||||
self.interval_seconds = interval_minutes * 60
|
||||
self.retention_hours = retention_hours
|
||||
self._lock = threading.Lock()
|
||||
self._shutdown = threading.Event()
|
||||
self._snapshots: List[SystemMetricsSnapshot] = []
|
||||
self._storage_ref: Optional["ObjectStorage"] = None
|
||||
|
||||
self._load_history()
|
||||
|
||||
self._snapshot_thread = threading.Thread(
|
||||
target=self._snapshot_loop,
|
||||
name="system-metrics-snapshot",
|
||||
daemon=True,
|
||||
)
|
||||
self._snapshot_thread.start()
|
||||
|
||||
def set_storage(self, storage: "ObjectStorage") -> None:
|
||||
with self._lock:
|
||||
self._storage_ref = storage
|
||||
|
||||
def _config_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "metrics_history.json"
|
||||
|
||||
def _load_history(self) -> None:
|
||||
config_path = self._config_path()
|
||||
if not config_path.exists():
|
||||
return
|
||||
try:
|
||||
data = json.loads(config_path.read_text(encoding="utf-8"))
|
||||
history_data = data.get("history", [])
|
||||
self._snapshots = [SystemMetricsSnapshot.from_dict(s) for s in history_data]
|
||||
self._prune_old_snapshots()
|
||||
except (json.JSONDecodeError, OSError, KeyError) as e:
|
||||
logger.warning(f"Failed to load system metrics history: {e}")
|
||||
|
||||
def _save_history(self) -> None:
|
||||
config_path = self._config_path()
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
data = {"history": [s.to_dict() for s in self._snapshots]}
|
||||
config_path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
||||
except OSError as e:
|
||||
logger.warning(f"Failed to save system metrics history: {e}")
|
||||
|
||||
def _prune_old_snapshots(self) -> None:
|
||||
if not self._snapshots:
|
||||
return
|
||||
cutoff = datetime.now(timezone.utc).timestamp() - (self.retention_hours * 3600)
|
||||
self._snapshots = [
|
||||
s for s in self._snapshots if s.timestamp.timestamp() > cutoff
|
||||
]
|
||||
|
||||
def _snapshot_loop(self) -> None:
|
||||
while not self._shutdown.is_set():
|
||||
self._shutdown.wait(timeout=self.interval_seconds)
|
||||
if not self._shutdown.is_set():
|
||||
self._take_snapshot()
|
||||
|
||||
def _take_snapshot(self) -> None:
|
||||
try:
|
||||
cpu_percent = psutil.cpu_percent(interval=0.1)
|
||||
memory = psutil.virtual_memory()
|
||||
disk = psutil.disk_usage(str(self.storage_root))
|
||||
|
||||
storage_bytes = 0
|
||||
with self._lock:
|
||||
storage = self._storage_ref
|
||||
if storage:
|
||||
try:
|
||||
buckets = storage.list_buckets()
|
||||
for bucket in buckets:
|
||||
stats = storage.bucket_stats(bucket.name, cache_ttl=60)
|
||||
storage_bytes += stats.get("total_bytes", stats.get("bytes", 0))
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect bucket stats: {e}")
|
||||
|
||||
snapshot = SystemMetricsSnapshot(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
cpu_percent=cpu_percent,
|
||||
memory_percent=memory.percent,
|
||||
disk_percent=disk.percent,
|
||||
storage_bytes=storage_bytes,
|
||||
)
|
||||
|
||||
with self._lock:
|
||||
self._snapshots.append(snapshot)
|
||||
self._prune_old_snapshots()
|
||||
self._save_history()
|
||||
|
||||
logger.debug(f"System metrics snapshot taken: CPU={cpu_percent:.1f}%, Memory={memory.percent:.1f}%")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to take system metrics snapshot: {e}")
|
||||
|
||||
def get_current(self) -> Dict[str, Any]:
|
||||
cpu_percent = psutil.cpu_percent(interval=0.1)
|
||||
memory = psutil.virtual_memory()
|
||||
disk = psutil.disk_usage(str(self.storage_root))
|
||||
boot_time = psutil.boot_time()
|
||||
uptime_seconds = time.time() - boot_time
|
||||
uptime_days = int(uptime_seconds / 86400)
|
||||
|
||||
total_buckets = 0
|
||||
total_objects = 0
|
||||
total_bytes_used = 0
|
||||
total_versions = 0
|
||||
|
||||
with self._lock:
|
||||
storage = self._storage_ref
|
||||
if storage:
|
||||
try:
|
||||
buckets = storage.list_buckets()
|
||||
total_buckets = len(buckets)
|
||||
for bucket in buckets:
|
||||
stats = storage.bucket_stats(bucket.name, cache_ttl=60)
|
||||
total_objects += stats.get("total_objects", stats.get("objects", 0))
|
||||
total_bytes_used += stats.get("total_bytes", stats.get("bytes", 0))
|
||||
total_versions += stats.get("version_count", 0)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect current bucket stats: {e}")
|
||||
|
||||
return {
|
||||
"cpu_percent": round(cpu_percent, 2),
|
||||
"memory": {
|
||||
"total": memory.total,
|
||||
"available": memory.available,
|
||||
"used": memory.used,
|
||||
"percent": round(memory.percent, 2),
|
||||
},
|
||||
"disk": {
|
||||
"total": disk.total,
|
||||
"free": disk.free,
|
||||
"used": disk.used,
|
||||
"percent": round(disk.percent, 2),
|
||||
},
|
||||
"app": {
|
||||
"buckets": total_buckets,
|
||||
"objects": total_objects,
|
||||
"versions": total_versions,
|
||||
"storage_bytes": total_bytes_used,
|
||||
"uptime_days": uptime_days,
|
||||
},
|
||||
}
|
||||
|
||||
def get_history(self, hours: Optional[int] = None) -> List[Dict[str, Any]]:
|
||||
with self._lock:
|
||||
snapshots = list(self._snapshots)
|
||||
|
||||
if hours:
|
||||
cutoff = datetime.now(timezone.utc).timestamp() - (hours * 3600)
|
||||
snapshots = [s for s in snapshots if s.timestamp.timestamp() > cutoff]
|
||||
|
||||
return [s.to_dict() for s in snapshots]
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._shutdown.set()
|
||||
self._take_snapshot()
|
||||
self._snapshot_thread.join(timeout=5.0)
|
||||
4309
python/app/ui.py
Normal file
4309
python/app/ui.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,6 @@
|
||||
"""Central location for the application version string."""
|
||||
from __future__ import annotations
|
||||
|
||||
APP_VERSION = "0.1.7"
|
||||
APP_VERSION = "0.4.3"
|
||||
|
||||
|
||||
def get_version() -> str:
|
||||
108
python/app/website_domains.py
Normal file
108
python/app/website_domains.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
_DOMAIN_RE = re.compile(
|
||||
r"^(?!-)[a-z0-9]([a-z0-9-]*[a-z0-9])?(\.[a-z0-9]([a-z0-9-]*[a-z0-9])?)*$"
|
||||
)
|
||||
|
||||
|
||||
def normalize_domain(raw: str) -> str:
|
||||
raw = raw.strip().lower()
|
||||
for prefix in ("https://", "http://"):
|
||||
if raw.startswith(prefix):
|
||||
raw = raw[len(prefix):]
|
||||
raw = raw.split("/", 1)[0]
|
||||
raw = raw.split("?", 1)[0]
|
||||
raw = raw.split("#", 1)[0]
|
||||
if ":" in raw:
|
||||
raw = raw.rsplit(":", 1)[0]
|
||||
return raw
|
||||
|
||||
|
||||
def is_valid_domain(domain: str) -> bool:
|
||||
if not domain or len(domain) > 253:
|
||||
return False
|
||||
return bool(_DOMAIN_RE.match(domain))
|
||||
|
||||
|
||||
class WebsiteDomainStore:
|
||||
def __init__(self, config_path: Path) -> None:
|
||||
self.config_path = config_path
|
||||
self._lock = threading.Lock()
|
||||
self._domains: Dict[str, str] = {}
|
||||
self._last_mtime: float = 0.0
|
||||
self.reload()
|
||||
|
||||
def reload(self) -> None:
|
||||
if not self.config_path.exists():
|
||||
self._domains = {}
|
||||
self._last_mtime = 0.0
|
||||
return
|
||||
try:
|
||||
self._last_mtime = self.config_path.stat().st_mtime
|
||||
with open(self.config_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if isinstance(data, dict):
|
||||
self._domains = {k.lower(): v for k, v in data.items()}
|
||||
else:
|
||||
self._domains = {}
|
||||
except (OSError, json.JSONDecodeError):
|
||||
self._domains = {}
|
||||
|
||||
def _maybe_reload(self) -> None:
|
||||
try:
|
||||
if self.config_path.exists():
|
||||
mtime = self.config_path.stat().st_mtime
|
||||
if mtime != self._last_mtime:
|
||||
self._last_mtime = mtime
|
||||
with open(self.config_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if isinstance(data, dict):
|
||||
self._domains = {k.lower(): v for k, v in data.items()}
|
||||
else:
|
||||
self._domains = {}
|
||||
elif self._domains:
|
||||
self._domains = {}
|
||||
self._last_mtime = 0.0
|
||||
except (OSError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
def _save(self) -> None:
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(self.config_path, "w", encoding="utf-8") as f:
|
||||
json.dump(self._domains, f, indent=2)
|
||||
self._last_mtime = self.config_path.stat().st_mtime
|
||||
|
||||
def list_all(self) -> List[Dict[str, str]]:
|
||||
with self._lock:
|
||||
self._maybe_reload()
|
||||
return [{"domain": d, "bucket": b} for d, b in self._domains.items()]
|
||||
|
||||
def get_bucket(self, domain: str) -> Optional[str]:
|
||||
with self._lock:
|
||||
self._maybe_reload()
|
||||
return self._domains.get(domain.lower())
|
||||
|
||||
def get_domains_for_bucket(self, bucket: str) -> List[str]:
|
||||
with self._lock:
|
||||
self._maybe_reload()
|
||||
return [d for d, b in self._domains.items() if b == bucket]
|
||||
|
||||
def set_mapping(self, domain: str, bucket: str) -> None:
|
||||
with self._lock:
|
||||
self._domains[domain.lower()] = bucket
|
||||
self._save()
|
||||
|
||||
def delete_mapping(self, domain: str) -> bool:
|
||||
with self._lock:
|
||||
key = domain.lower()
|
||||
if key not in self._domains:
|
||||
return False
|
||||
del self._domains[key]
|
||||
self._save()
|
||||
return True
|
||||
4
python/docker-entrypoint.sh
Normal file
4
python/docker-entrypoint.sh
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
exec python run.py --prod
|
||||
24
python/myfsio_core/Cargo.toml
Normal file
24
python/myfsio_core/Cargo.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "myfsio_core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "myfsio_core"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
pyo3 = { version = "0.28", features = ["extension-module"] }
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
md-5 = "0.10"
|
||||
hex = "0.4"
|
||||
unicode-normalization = "0.1"
|
||||
serde_json = "1"
|
||||
regex = "1"
|
||||
lru = "0.14"
|
||||
parking_lot = "0.12"
|
||||
percent-encoding = "2"
|
||||
aes-gcm = "0.10"
|
||||
hkdf = "0.12"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
11
python/myfsio_core/pyproject.toml
Normal file
11
python/myfsio_core/pyproject.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[build-system]
|
||||
requires = ["maturin>=1.0,<2.0"]
|
||||
build-backend = "maturin"
|
||||
|
||||
[project]
|
||||
name = "myfsio_core"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10"
|
||||
|
||||
[tool.maturin]
|
||||
features = ["pyo3/extension-module"]
|
||||
192
python/myfsio_core/src/crypto.rs
Normal file
192
python/myfsio_core/src/crypto.rs
Normal file
@@ -0,0 +1,192 @@
|
||||
use aes_gcm::aead::Aead;
|
||||
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||
use hkdf::Hkdf;
|
||||
use pyo3::exceptions::{PyIOError, PyValueError};
|
||||
use pyo3::prelude::*;
|
||||
use sha2::Sha256;
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
|
||||
const DEFAULT_CHUNK_SIZE: usize = 65536;
|
||||
const HEADER_SIZE: usize = 4;
|
||||
|
||||
fn read_exact_chunk(reader: &mut impl Read, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
let mut filled = 0;
|
||||
while filled < buf.len() {
|
||||
match reader.read(&mut buf[filled..]) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => filled += n,
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(filled)
|
||||
}
|
||||
|
||||
fn derive_chunk_nonce(base_nonce: &[u8], chunk_index: u32) -> Result<[u8; 12], String> {
|
||||
let hkdf = Hkdf::<Sha256>::new(Some(base_nonce), b"chunk_nonce");
|
||||
let mut okm = [0u8; 12];
|
||||
hkdf.expand(&chunk_index.to_be_bytes(), &mut okm)
|
||||
.map_err(|e| format!("HKDF expand failed: {}", e))?;
|
||||
Ok(okm)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (input_path, output_path, key, base_nonce, chunk_size=DEFAULT_CHUNK_SIZE))]
|
||||
pub fn encrypt_stream_chunked(
|
||||
py: Python<'_>,
|
||||
input_path: &str,
|
||||
output_path: &str,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
chunk_size: usize,
|
||||
) -> PyResult<u32> {
|
||||
if key.len() != 32 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Key must be 32 bytes, got {}",
|
||||
key.len()
|
||||
)));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Base nonce must be 12 bytes, got {}",
|
||||
base_nonce.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let chunk_size = if chunk_size == 0 {
|
||||
DEFAULT_CHUNK_SIZE
|
||||
} else {
|
||||
chunk_size
|
||||
};
|
||||
|
||||
let inp = input_path.to_owned();
|
||||
let out = output_path.to_owned();
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
|
||||
py.detach(move || {
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(&inp)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open input: {}", e)))?;
|
||||
let mut outfile = File::create(&out)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create output: {}", e)))?;
|
||||
|
||||
outfile
|
||||
.write_all(&[0u8; 4])
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write header: {}", e)))?;
|
||||
|
||||
let mut buf = vec![0u8; chunk_size];
|
||||
let mut chunk_index: u32 = 0;
|
||||
|
||||
loop {
|
||||
let n = read_exact_chunk(&mut infile, &mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)
|
||||
.map_err(|e| PyValueError::new_err(e))?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, &buf[..n])
|
||||
.map_err(|e| PyValueError::new_err(format!("Encrypt failed: {}", e)))?;
|
||||
|
||||
let size = encrypted.len() as u32;
|
||||
outfile
|
||||
.write_all(&size.to_be_bytes())
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk size: {}", e)))?;
|
||||
outfile
|
||||
.write_all(&encrypted)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk: {}", e)))?;
|
||||
|
||||
chunk_index += 1;
|
||||
}
|
||||
|
||||
outfile
|
||||
.seek(SeekFrom::Start(0))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to seek: {}", e)))?;
|
||||
outfile
|
||||
.write_all(&chunk_index.to_be_bytes())
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write chunk count: {}", e)))?;
|
||||
|
||||
Ok(chunk_index)
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn decrypt_stream_chunked(
|
||||
py: Python<'_>,
|
||||
input_path: &str,
|
||||
output_path: &str,
|
||||
key: &[u8],
|
||||
base_nonce: &[u8],
|
||||
) -> PyResult<u32> {
|
||||
if key.len() != 32 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Key must be 32 bytes, got {}",
|
||||
key.len()
|
||||
)));
|
||||
}
|
||||
if base_nonce.len() != 12 {
|
||||
return Err(PyValueError::new_err(format!(
|
||||
"Base nonce must be 12 bytes, got {}",
|
||||
base_nonce.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let inp = input_path.to_owned();
|
||||
let out = output_path.to_owned();
|
||||
let key_arr: [u8; 32] = key.try_into().unwrap();
|
||||
let nonce_arr: [u8; 12] = base_nonce.try_into().unwrap();
|
||||
|
||||
py.detach(move || {
|
||||
let cipher = Aes256Gcm::new(&key_arr.into());
|
||||
|
||||
let mut infile = File::open(&inp)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open input: {}", e)))?;
|
||||
let mut outfile = File::create(&out)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create output: {}", e)))?;
|
||||
|
||||
let mut header = [0u8; HEADER_SIZE];
|
||||
infile
|
||||
.read_exact(&mut header)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read header: {}", e)))?;
|
||||
let chunk_count = u32::from_be_bytes(header);
|
||||
|
||||
let mut size_buf = [0u8; HEADER_SIZE];
|
||||
for chunk_index in 0..chunk_count {
|
||||
infile
|
||||
.read_exact(&mut size_buf)
|
||||
.map_err(|e| {
|
||||
PyIOError::new_err(format!(
|
||||
"Failed to read chunk {} size: {}",
|
||||
chunk_index, e
|
||||
))
|
||||
})?;
|
||||
let chunk_size = u32::from_be_bytes(size_buf) as usize;
|
||||
|
||||
let mut encrypted = vec![0u8; chunk_size];
|
||||
infile.read_exact(&mut encrypted).map_err(|e| {
|
||||
PyIOError::new_err(format!("Failed to read chunk {}: {}", chunk_index, e))
|
||||
})?;
|
||||
|
||||
let nonce_bytes = derive_chunk_nonce(&nonce_arr, chunk_index)
|
||||
.map_err(|e| PyValueError::new_err(e))?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let decrypted = cipher.decrypt(nonce, encrypted.as_ref()).map_err(|e| {
|
||||
PyValueError::new_err(format!("Decrypt chunk {} failed: {}", chunk_index, e))
|
||||
})?;
|
||||
|
||||
outfile.write_all(&decrypted).map_err(|e| {
|
||||
PyIOError::new_err(format!("Failed to write chunk {}: {}", chunk_index, e))
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(chunk_count)
|
||||
})
|
||||
}
|
||||
90
python/myfsio_core/src/hashing.rs
Normal file
90
python/myfsio_core/src/hashing.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
use md5::{Digest, Md5};
|
||||
use pyo3::exceptions::PyIOError;
|
||||
use pyo3::prelude::*;
|
||||
use sha2::Sha256;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
|
||||
const CHUNK_SIZE: usize = 65536;
|
||||
|
||||
#[pyfunction]
|
||||
pub fn md5_file(py: Python<'_>, path: &str) -> PyResult<String> {
|
||||
let path = path.to_owned();
|
||||
py.detach(move || {
|
||||
let mut file = File::open(&path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open file: {}", e)))?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read file: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn md5_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Md5::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn sha256_file(py: Python<'_>, path: &str) -> PyResult<String> {
|
||||
let path = path.to_owned();
|
||||
py.detach(move || {
|
||||
let mut file = File::open(&path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open file: {}", e)))?;
|
||||
let mut hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read file: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn sha256_bytes(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn md5_sha256_file(py: Python<'_>, path: &str) -> PyResult<(String, String)> {
|
||||
let path = path.to_owned();
|
||||
py.detach(move || {
|
||||
let mut file = File::open(&path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open file: {}", e)))?;
|
||||
let mut md5_hasher = Md5::new();
|
||||
let mut sha_hasher = Sha256::new();
|
||||
let mut buf = vec![0u8; CHUNK_SIZE];
|
||||
loop {
|
||||
let n = file
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read file: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
md5_hasher.update(&buf[..n]);
|
||||
sha_hasher.update(&buf[..n]);
|
||||
}
|
||||
Ok((
|
||||
format!("{:x}", md5_hasher.finalize()),
|
||||
format!("{:x}", sha_hasher.finalize()),
|
||||
))
|
||||
})
|
||||
}
|
||||
51
python/myfsio_core/src/lib.rs
Normal file
51
python/myfsio_core/src/lib.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
mod crypto;
|
||||
mod hashing;
|
||||
mod metadata;
|
||||
mod sigv4;
|
||||
mod storage;
|
||||
mod streaming;
|
||||
mod validation;
|
||||
|
||||
use pyo3::prelude::*;
|
||||
|
||||
#[pymodule]
|
||||
mod myfsio_core {
|
||||
use super::*;
|
||||
|
||||
#[pymodule_init]
|
||||
fn init(m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(sigv4::verify_sigv4_signature, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::derive_signing_key, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::compute_signature, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::build_string_to_sign, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::constant_time_compare, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(sigv4::clear_signing_key_cache, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(hashing::md5_file, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::md5_bytes, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::sha256_file, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::sha256_bytes, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(hashing::md5_sha256_file, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(validation::validate_object_key, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(validation::validate_bucket_name, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(metadata::read_index_entry, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(storage::write_index_entry, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::delete_index_entry, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::check_bucket_contents, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::shallow_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::bucket_stats_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::search_objects_scan, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(storage::build_object_cache, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(streaming::stream_to_file_with_md5, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(streaming::assemble_parts_with_md5, m)?)?;
|
||||
|
||||
m.add_function(wrap_pyfunction!(crypto::encrypt_stream_chunked, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(crypto::decrypt_stream_chunked, m)?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
71
python/myfsio_core/src/metadata.rs
Normal file
71
python/myfsio_core/src/metadata.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
use pyo3::exceptions::PyValueError;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::{PyDict, PyList, PyString};
|
||||
use serde_json::Value;
|
||||
use std::fs;
|
||||
|
||||
const MAX_DEPTH: u32 = 64;
|
||||
|
||||
fn value_to_py(py: Python<'_>, v: &Value, depth: u32) -> PyResult<Py<PyAny>> {
|
||||
if depth > MAX_DEPTH {
|
||||
return Err(PyValueError::new_err("JSON nesting too deep"));
|
||||
}
|
||||
match v {
|
||||
Value::Null => Ok(py.None()),
|
||||
Value::Bool(b) => Ok((*b).into_pyobject(py)?.to_owned().into_any().unbind()),
|
||||
Value::Number(n) => {
|
||||
if let Some(i) = n.as_i64() {
|
||||
Ok(i.into_pyobject(py)?.into_any().unbind())
|
||||
} else if let Some(f) = n.as_f64() {
|
||||
Ok(f.into_pyobject(py)?.into_any().unbind())
|
||||
} else {
|
||||
Ok(py.None())
|
||||
}
|
||||
}
|
||||
Value::String(s) => Ok(PyString::new(py, s).into_any().unbind()),
|
||||
Value::Array(arr) => {
|
||||
let list = PyList::empty(py);
|
||||
for item in arr {
|
||||
list.append(value_to_py(py, item, depth + 1)?)?;
|
||||
}
|
||||
Ok(list.into_any().unbind())
|
||||
}
|
||||
Value::Object(map) => {
|
||||
let dict = PyDict::new(py);
|
||||
for (k, val) in map {
|
||||
dict.set_item(k, value_to_py(py, val, depth + 1)?)?;
|
||||
}
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn read_index_entry(
|
||||
py: Python<'_>,
|
||||
path: &str,
|
||||
entry_name: &str,
|
||||
) -> PyResult<Option<Py<PyAny>>> {
|
||||
let path_owned = path.to_owned();
|
||||
let entry_owned = entry_name.to_owned();
|
||||
|
||||
let entry: Option<Value> = py.detach(move || -> PyResult<Option<Value>> {
|
||||
let content = match fs::read_to_string(&path_owned) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return Ok(None),
|
||||
};
|
||||
let parsed: Value = match serde_json::from_str(&content) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return Ok(None),
|
||||
};
|
||||
match parsed {
|
||||
Value::Object(mut map) => Ok(map.remove(&entry_owned)),
|
||||
_ => Ok(None),
|
||||
}
|
||||
})?;
|
||||
|
||||
match entry {
|
||||
Some(val) => Ok(Some(value_to_py(py, &val, 0)?)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
193
python/myfsio_core/src/sigv4.rs
Normal file
193
python/myfsio_core/src/sigv4.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
use hmac::{Hmac, Mac};
|
||||
use lru::LruCache;
|
||||
use parking_lot::Mutex;
|
||||
use percent_encoding::{percent_encode, AsciiSet, NON_ALPHANUMERIC};
|
||||
use pyo3::prelude::*;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::num::NonZeroUsize;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Instant;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
struct CacheEntry {
|
||||
key: Vec<u8>,
|
||||
created: Instant,
|
||||
}
|
||||
|
||||
static SIGNING_KEY_CACHE: LazyLock<Mutex<LruCache<(String, String, String, String), CacheEntry>>> =
|
||||
LazyLock::new(|| Mutex::new(LruCache::new(NonZeroUsize::new(256).unwrap())));
|
||||
|
||||
const CACHE_TTL_SECS: u64 = 60;
|
||||
|
||||
const AWS_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC
|
||||
.remove(b'-')
|
||||
.remove(b'_')
|
||||
.remove(b'.')
|
||||
.remove(b'~');
|
||||
|
||||
fn hmac_sha256(key: &[u8], msg: &[u8]) -> Vec<u8> {
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key length is always valid");
|
||||
mac.update(msg);
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
fn sha256_hex(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
hex::encode(hasher.finalize())
|
||||
}
|
||||
|
||||
fn aws_uri_encode(input: &str) -> String {
|
||||
percent_encode(input.as_bytes(), AWS_ENCODE_SET).to_string()
|
||||
}
|
||||
|
||||
fn derive_signing_key_cached(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
let cache_key = (
|
||||
secret_key.to_owned(),
|
||||
date_stamp.to_owned(),
|
||||
region.to_owned(),
|
||||
service.to_owned(),
|
||||
);
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
if let Some(entry) = cache.get(&cache_key) {
|
||||
if entry.created.elapsed().as_secs() < CACHE_TTL_SECS {
|
||||
return entry.key.clone();
|
||||
}
|
||||
cache.pop(&cache_key);
|
||||
}
|
||||
}
|
||||
|
||||
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date_stamp.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, service.as_bytes());
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
|
||||
{
|
||||
let mut cache = SIGNING_KEY_CACHE.lock();
|
||||
cache.put(
|
||||
cache_key,
|
||||
CacheEntry {
|
||||
key: k_signing.clone(),
|
||||
created: Instant::now(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
k_signing
|
||||
}
|
||||
|
||||
fn constant_time_compare_inner(a: &[u8], b: &[u8]) -> bool {
|
||||
if a.len() != b.len() {
|
||||
return false;
|
||||
}
|
||||
let mut result: u8 = 0;
|
||||
for (x, y) in a.iter().zip(b.iter()) {
|
||||
result |= x ^ y;
|
||||
}
|
||||
result == 0
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn verify_sigv4_signature(
|
||||
method: &str,
|
||||
canonical_uri: &str,
|
||||
query_params: Vec<(String, String)>,
|
||||
signed_headers_str: &str,
|
||||
header_values: Vec<(String, String)>,
|
||||
payload_hash: &str,
|
||||
amz_date: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
secret_key: &str,
|
||||
provided_signature: &str,
|
||||
) -> bool {
|
||||
let mut sorted_params = query_params;
|
||||
sorted_params.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1)));
|
||||
|
||||
let canonical_query_string = sorted_params
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{}={}", aws_uri_encode(k), aws_uri_encode(v)))
|
||||
.collect::<Vec<_>>()
|
||||
.join("&");
|
||||
|
||||
let mut canonical_headers = String::new();
|
||||
for (name, value) in &header_values {
|
||||
let lower_name = name.to_lowercase();
|
||||
let normalized = value.split_whitespace().collect::<Vec<_>>().join(" ");
|
||||
let final_value = if lower_name == "expect" && normalized.is_empty() {
|
||||
"100-continue"
|
||||
} else {
|
||||
&normalized
|
||||
};
|
||||
canonical_headers.push_str(&lower_name);
|
||||
canonical_headers.push(':');
|
||||
canonical_headers.push_str(final_value);
|
||||
canonical_headers.push('\n');
|
||||
}
|
||||
|
||||
let canonical_request = format!(
|
||||
"{}\n{}\n{}\n{}\n{}\n{}",
|
||||
method, canonical_uri, canonical_query_string, canonical_headers, signed_headers_str, payload_hash
|
||||
);
|
||||
|
||||
let credential_scope = format!("{}/{}/{}/aws4_request", date_stamp, region, service);
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
);
|
||||
|
||||
let signing_key = derive_signing_key_cached(secret_key, date_stamp, region, service);
|
||||
let calculated = hmac_sha256(&signing_key, string_to_sign.as_bytes());
|
||||
let calculated_hex = hex::encode(&calculated);
|
||||
|
||||
constant_time_compare_inner(calculated_hex.as_bytes(), provided_signature.as_bytes())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn derive_signing_key(
|
||||
secret_key: &str,
|
||||
date_stamp: &str,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Vec<u8> {
|
||||
derive_signing_key_cached(secret_key, date_stamp, region, service)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn compute_signature(signing_key: &[u8], string_to_sign: &str) -> String {
|
||||
let sig = hmac_sha256(signing_key, string_to_sign.as_bytes());
|
||||
hex::encode(sig)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn build_string_to_sign(
|
||||
amz_date: &str,
|
||||
credential_scope: &str,
|
||||
canonical_request: &str,
|
||||
) -> String {
|
||||
let cr_hash = sha256_hex(canonical_request.as_bytes());
|
||||
format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
amz_date, credential_scope, cr_hash
|
||||
)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn constant_time_compare(a: &str, b: &str) -> bool {
|
||||
constant_time_compare_inner(a.as_bytes(), b.as_bytes())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn clear_signing_key_cache() {
|
||||
SIGNING_KEY_CACHE.lock().clear();
|
||||
}
|
||||
817
python/myfsio_core/src/storage.rs
Normal file
817
python/myfsio_core/src/storage.rs
Normal file
@@ -0,0 +1,817 @@
|
||||
use pyo3::exceptions::PyIOError;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::types::{PyDict, PyList, PyString, PyTuple};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::time::SystemTime;
|
||||
|
||||
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
|
||||
fn system_time_to_epoch(t: SystemTime) -> f64 {
|
||||
t.duration_since(std::time::UNIX_EPOCH)
|
||||
.map(|d| d.as_secs_f64())
|
||||
.unwrap_or(0.0)
|
||||
}
|
||||
|
||||
fn extract_etag_from_meta_bytes(content: &[u8]) -> Option<String> {
|
||||
let marker = b"\"__etag__\"";
|
||||
let idx = content.windows(marker.len()).position(|w| w == marker)?;
|
||||
let after = &content[idx + marker.len()..];
|
||||
let start = after.iter().position(|&b| b == b'"')? + 1;
|
||||
let rest = &after[start..];
|
||||
let end = rest.iter().position(|&b| b == b'"')?;
|
||||
std::str::from_utf8(&rest[..end]).ok().map(|s| s.to_owned())
|
||||
}
|
||||
|
||||
fn has_any_file(root: &str) -> bool {
|
||||
let root_path = Path::new(root);
|
||||
if !root_path.is_dir() {
|
||||
return false;
|
||||
}
|
||||
let mut stack = vec![root_path.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_file() {
|
||||
return true;
|
||||
}
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn write_index_entry(
|
||||
py: Python<'_>,
|
||||
path: &str,
|
||||
entry_name: &str,
|
||||
entry_data_json: &str,
|
||||
) -> PyResult<()> {
|
||||
let path_owned = path.to_owned();
|
||||
let entry_owned = entry_name.to_owned();
|
||||
let data_owned = entry_data_json.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<()> {
|
||||
let entry_value: Value = serde_json::from_str(&data_owned)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to parse entry data: {}", e)))?;
|
||||
|
||||
if let Some(parent) = Path::new(&path_owned).parent() {
|
||||
let _ = fs::create_dir_all(parent);
|
||||
}
|
||||
|
||||
let mut index_data: serde_json::Map<String, Value> = match fs::read_to_string(&path_owned)
|
||||
{
|
||||
Ok(content) => serde_json::from_str(&content).unwrap_or_default(),
|
||||
Err(_) => serde_json::Map::new(),
|
||||
};
|
||||
|
||||
index_data.insert(entry_owned, entry_value);
|
||||
|
||||
let serialized = serde_json::to_string(&Value::Object(index_data))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to serialize index: {}", e)))?;
|
||||
|
||||
fs::write(&path_owned, serialized)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn delete_index_entry(py: Python<'_>, path: &str, entry_name: &str) -> PyResult<bool> {
|
||||
let path_owned = path.to_owned();
|
||||
let entry_owned = entry_name.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<bool> {
|
||||
let content = match fs::read_to_string(&path_owned) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return Ok(false),
|
||||
};
|
||||
|
||||
let mut index_data: serde_json::Map<String, Value> =
|
||||
match serde_json::from_str(&content) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return Ok(false),
|
||||
};
|
||||
|
||||
if index_data.remove(&entry_owned).is_none() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
if index_data.is_empty() {
|
||||
let _ = fs::remove_file(&path_owned);
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let serialized = serde_json::to_string(&Value::Object(index_data))
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to serialize index: {}", e)))?;
|
||||
|
||||
fs::write(&path_owned, serialized)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write index: {}", e)))?;
|
||||
|
||||
Ok(true)
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn check_bucket_contents(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
version_roots: Vec<String>,
|
||||
multipart_roots: Vec<String>,
|
||||
) -> PyResult<(bool, bool, bool)> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<(bool, bool, bool)> {
|
||||
let mut has_objects = false;
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
'obj_scan: while let Some(current) = stack.pop() {
|
||||
let is_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if ft.is_file() && !ft.is_symlink() {
|
||||
has_objects = true;
|
||||
break 'obj_scan;
|
||||
}
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut has_versions = false;
|
||||
for root in &version_roots {
|
||||
if has_versions {
|
||||
break;
|
||||
}
|
||||
has_versions = has_any_file(root);
|
||||
}
|
||||
|
||||
let mut has_multipart = false;
|
||||
for root in &multipart_roots {
|
||||
if has_multipart {
|
||||
break;
|
||||
}
|
||||
has_multipart = has_any_file(root);
|
||||
}
|
||||
|
||||
Ok((has_objects, has_versions, has_multipart))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn shallow_scan(
|
||||
py: Python<'_>,
|
||||
target_dir: &str,
|
||||
prefix: &str,
|
||||
meta_cache_json: &str,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let target_owned = target_dir.to_owned();
|
||||
let prefix_owned = prefix.to_owned();
|
||||
let cache_owned = meta_cache_json.to_owned();
|
||||
|
||||
let result: (
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
Vec<String>,
|
||||
Vec<(String, bool)>,
|
||||
) = py.detach(move || -> PyResult<(
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
Vec<String>,
|
||||
Vec<(String, bool)>,
|
||||
)> {
|
||||
let meta_cache: HashMap<String, String> =
|
||||
serde_json::from_str(&cache_owned).unwrap_or_default();
|
||||
|
||||
let mut files: Vec<(String, u64, f64, Option<String>)> = Vec::new();
|
||||
let mut dirs: Vec<String> = Vec::new();
|
||||
|
||||
let entries = match fs::read_dir(&target_owned) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return Ok((files, dirs, Vec::new())),
|
||||
};
|
||||
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let name = match entry.file_name().into_string() {
|
||||
Ok(n) => n,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&name.as_str()) {
|
||||
continue;
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
let cp = format!("{}{}/", prefix_owned, name);
|
||||
dirs.push(cp);
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let key = format!("{}{}", prefix_owned, name);
|
||||
let md = match entry.metadata() {
|
||||
Ok(m) => m,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
let etag = meta_cache.get(&key).cloned();
|
||||
files.push((key, size, mtime, etag));
|
||||
}
|
||||
}
|
||||
|
||||
files.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
dirs.sort();
|
||||
|
||||
let mut merged: Vec<(String, bool)> = Vec::with_capacity(files.len() + dirs.len());
|
||||
let mut fi = 0;
|
||||
let mut di = 0;
|
||||
while fi < files.len() && di < dirs.len() {
|
||||
if files[fi].0 <= dirs[di] {
|
||||
merged.push((files[fi].0.clone(), false));
|
||||
fi += 1;
|
||||
} else {
|
||||
merged.push((dirs[di].clone(), true));
|
||||
di += 1;
|
||||
}
|
||||
}
|
||||
while fi < files.len() {
|
||||
merged.push((files[fi].0.clone(), false));
|
||||
fi += 1;
|
||||
}
|
||||
while di < dirs.len() {
|
||||
merged.push((dirs[di].clone(), true));
|
||||
di += 1;
|
||||
}
|
||||
|
||||
Ok((files, dirs, merged))
|
||||
})?;
|
||||
|
||||
let (files, dirs, merged) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let files_list = PyList::empty(py);
|
||||
for (key, size, mtime, etag) in &files {
|
||||
let etag_py: Py<PyAny> = match etag {
|
||||
Some(e) => PyString::new(py, e).into_any().unbind(),
|
||||
None => py.None(),
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
etag_py,
|
||||
])?;
|
||||
files_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("files", files_list)?;
|
||||
|
||||
let dirs_list = PyList::empty(py);
|
||||
for d in &dirs {
|
||||
dirs_list.append(PyString::new(py, d))?;
|
||||
}
|
||||
dict.set_item("dirs", dirs_list)?;
|
||||
|
||||
let merged_list = PyList::empty(py);
|
||||
for (key, is_dir) in &merged {
|
||||
let bool_obj: Py<PyAny> = if *is_dir {
|
||||
true.into_pyobject(py)?.to_owned().into_any().unbind()
|
||||
} else {
|
||||
false.into_pyobject(py)?.to_owned().into_any().unbind()
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
bool_obj,
|
||||
])?;
|
||||
merged_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("merged_keys", merged_list)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn bucket_stats_scan(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
versions_root: &str,
|
||||
) -> PyResult<(u64, u64, u64, u64)> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let versions_owned = versions_root.to_owned();
|
||||
|
||||
py.detach(move || -> PyResult<(u64, u64, u64, u64)> {
|
||||
let mut object_count: u64 = 0;
|
||||
let mut total_bytes: u64 = 0;
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let is_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
object_count += 1;
|
||||
if let Ok(md) = entry.metadata() {
|
||||
total_bytes += md.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut version_count: u64 = 0;
|
||||
let mut version_bytes: u64 = 0;
|
||||
|
||||
let versions_p = Path::new(&versions_owned);
|
||||
if versions_p.is_dir() {
|
||||
let mut stack = vec![versions_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if name.ends_with(".bin") {
|
||||
version_count += 1;
|
||||
if let Ok(md) = entry.metadata() {
|
||||
version_bytes += md.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((object_count, total_bytes, version_count, version_bytes))
|
||||
})
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (bucket_path, search_root, query, limit))]
|
||||
pub fn search_objects_scan(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
search_root: &str,
|
||||
query: &str,
|
||||
limit: usize,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let search_owned = search_root.to_owned();
|
||||
let query_owned = query.to_owned();
|
||||
|
||||
let result: (Vec<(String, u64, f64)>, bool) = py.detach(
|
||||
move || -> PyResult<(Vec<(String, u64, f64)>, bool)> {
|
||||
let query_lower = query_owned.to_lowercase();
|
||||
let bucket_len = bucket_owned.len() + 1;
|
||||
let scan_limit = limit * 4;
|
||||
let mut matched: usize = 0;
|
||||
let mut results: Vec<(String, u64, f64)> = Vec::new();
|
||||
|
||||
let search_p = Path::new(&search_owned);
|
||||
if !search_p.is_dir() {
|
||||
return Ok((results, false));
|
||||
}
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
let mut stack = vec![search_p.to_path_buf()];
|
||||
|
||||
'scan: while let Some(current) = stack.pop() {
|
||||
let is_bucket_root = current == bucket_p;
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if is_bucket_root {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
stack.push(entry.path());
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let full_path = entry.path();
|
||||
let full_str = full_path.to_string_lossy();
|
||||
if full_str.len() <= bucket_len {
|
||||
continue;
|
||||
}
|
||||
let key = full_str[bucket_len..].replace('\\', "/");
|
||||
if key.to_lowercase().contains(&query_lower) {
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
results.push((key, size, mtime));
|
||||
matched += 1;
|
||||
}
|
||||
}
|
||||
if matched >= scan_limit {
|
||||
break 'scan;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
let truncated = results.len() > limit;
|
||||
results.truncate(limit);
|
||||
|
||||
Ok((results, truncated))
|
||||
},
|
||||
)?;
|
||||
|
||||
let (results, truncated) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let results_list = PyList::empty(py);
|
||||
for (key, size, mtime) in &results {
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
])?;
|
||||
results_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("results", results_list)?;
|
||||
dict.set_item("truncated", truncated)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn build_object_cache(
|
||||
py: Python<'_>,
|
||||
bucket_path: &str,
|
||||
meta_root: &str,
|
||||
etag_index_path: &str,
|
||||
) -> PyResult<Py<PyAny>> {
|
||||
let bucket_owned = bucket_path.to_owned();
|
||||
let meta_owned = meta_root.to_owned();
|
||||
let index_path_owned = etag_index_path.to_owned();
|
||||
|
||||
let result: (HashMap<String, String>, Vec<(String, u64, f64, Option<String>)>, bool) =
|
||||
py.detach(move || -> PyResult<(
|
||||
HashMap<String, String>,
|
||||
Vec<(String, u64, f64, Option<String>)>,
|
||||
bool,
|
||||
)> {
|
||||
let mut meta_cache: HashMap<String, String> = HashMap::new();
|
||||
let mut index_mtime: f64 = 0.0;
|
||||
let mut etag_cache_changed = false;
|
||||
|
||||
let index_p = Path::new(&index_path_owned);
|
||||
if index_p.is_file() {
|
||||
if let Ok(md) = fs::metadata(&index_path_owned) {
|
||||
index_mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
}
|
||||
if let Ok(content) = fs::read_to_string(&index_path_owned) {
|
||||
if let Ok(parsed) = serde_json::from_str::<HashMap<String, String>>(&content) {
|
||||
meta_cache = parsed;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let meta_p = Path::new(&meta_owned);
|
||||
let mut needs_rebuild = false;
|
||||
|
||||
if meta_p.is_dir() && index_mtime > 0.0 {
|
||||
fn check_newer(dir: &Path, index_mtime: f64) -> bool {
|
||||
let entries = match fs::read_dir(dir) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return false,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
if check_newer(&entry.path(), index_mtime) {
|
||||
return true;
|
||||
}
|
||||
} else if ft.is_file() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if name.ends_with(".meta.json") || name == "_index.json" {
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let mt = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
if mt > index_mtime {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
needs_rebuild = check_newer(meta_p, index_mtime);
|
||||
} else if meta_cache.is_empty() {
|
||||
needs_rebuild = true;
|
||||
}
|
||||
|
||||
if needs_rebuild && meta_p.is_dir() {
|
||||
let meta_str = meta_owned.clone();
|
||||
let meta_len = meta_str.len() + 1;
|
||||
let mut index_files: Vec<String> = Vec::new();
|
||||
let mut legacy_meta_files: Vec<(String, String)> = Vec::new();
|
||||
|
||||
fn collect_meta(
|
||||
dir: &Path,
|
||||
meta_len: usize,
|
||||
index_files: &mut Vec<String>,
|
||||
legacy_meta_files: &mut Vec<(String, String)>,
|
||||
) {
|
||||
let entries = match fs::read_dir(dir) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
collect_meta(&entry.path(), meta_len, index_files, legacy_meta_files);
|
||||
} else if ft.is_file() {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
let full = entry.path().to_string_lossy().to_string();
|
||||
if name == "_index.json" {
|
||||
index_files.push(full);
|
||||
} else if name.ends_with(".meta.json") {
|
||||
if full.len() > meta_len {
|
||||
let rel = &full[meta_len..];
|
||||
let key = if rel.len() > 10 {
|
||||
rel[..rel.len() - 10].replace('\\', "/")
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
legacy_meta_files.push((key, full));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
collect_meta(
|
||||
meta_p,
|
||||
meta_len,
|
||||
&mut index_files,
|
||||
&mut legacy_meta_files,
|
||||
);
|
||||
|
||||
meta_cache.clear();
|
||||
|
||||
for idx_path in &index_files {
|
||||
if let Ok(content) = fs::read_to_string(idx_path) {
|
||||
if let Ok(idx_data) = serde_json::from_str::<HashMap<String, Value>>(&content) {
|
||||
let rel_dir = if idx_path.len() > meta_len {
|
||||
let r = &idx_path[meta_len..];
|
||||
r.replace('\\', "/")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
let dir_prefix = if rel_dir.ends_with("/_index.json") {
|
||||
&rel_dir[..rel_dir.len() - "/_index.json".len()]
|
||||
} else {
|
||||
""
|
||||
};
|
||||
for (entry_name, entry_data) in &idx_data {
|
||||
let key = if dir_prefix.is_empty() {
|
||||
entry_name.clone()
|
||||
} else {
|
||||
format!("{}/{}", dir_prefix, entry_name)
|
||||
};
|
||||
if let Some(meta_obj) = entry_data.get("metadata") {
|
||||
if let Some(etag) = meta_obj.get("__etag__") {
|
||||
if let Some(etag_str) = etag.as_str() {
|
||||
meta_cache.insert(key, etag_str.to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (key, path) in &legacy_meta_files {
|
||||
if meta_cache.contains_key(key) {
|
||||
continue;
|
||||
}
|
||||
if let Ok(content) = fs::read(path) {
|
||||
if let Some(etag) = extract_etag_from_meta_bytes(&content) {
|
||||
meta_cache.insert(key.clone(), etag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
etag_cache_changed = true;
|
||||
}
|
||||
|
||||
let bucket_p = Path::new(&bucket_owned);
|
||||
let bucket_len = bucket_owned.len() + 1;
|
||||
let mut objects: Vec<(String, u64, f64, Option<String>)> = Vec::new();
|
||||
|
||||
if bucket_p.is_dir() {
|
||||
let mut stack = vec![bucket_p.to_path_buf()];
|
||||
while let Some(current) = stack.pop() {
|
||||
let entries = match fs::read_dir(¤t) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry_result in entries {
|
||||
let entry = match entry_result {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let ft = match entry.file_type() {
|
||||
Ok(ft) => ft,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if ft.is_dir() && !ft.is_symlink() {
|
||||
let full = entry.path();
|
||||
let full_str = full.to_string_lossy();
|
||||
if full_str.len() > bucket_len {
|
||||
let first_part: &str = if let Some(sep_pos) =
|
||||
full_str[bucket_len..].find(|c: char| c == '\\' || c == '/')
|
||||
{
|
||||
&full_str[bucket_len..bucket_len + sep_pos]
|
||||
} else {
|
||||
&full_str[bucket_len..]
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&first_part) {
|
||||
continue;
|
||||
}
|
||||
} else if let Some(name) = entry.file_name().to_str() {
|
||||
if INTERNAL_FOLDERS.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
stack.push(full);
|
||||
} else if ft.is_file() && !ft.is_symlink() {
|
||||
let full = entry.path();
|
||||
let full_str = full.to_string_lossy();
|
||||
if full_str.len() <= bucket_len {
|
||||
continue;
|
||||
}
|
||||
let rel = &full_str[bucket_len..];
|
||||
let first_part: &str =
|
||||
if let Some(sep_pos) = rel.find(|c: char| c == '\\' || c == '/') {
|
||||
&rel[..sep_pos]
|
||||
} else {
|
||||
rel
|
||||
};
|
||||
if INTERNAL_FOLDERS.contains(&first_part) {
|
||||
continue;
|
||||
}
|
||||
let key = rel.replace('\\', "/");
|
||||
if let Ok(md) = entry.metadata() {
|
||||
let size = md.len();
|
||||
let mtime = md
|
||||
.modified()
|
||||
.map(system_time_to_epoch)
|
||||
.unwrap_or(0.0);
|
||||
let etag = meta_cache.get(&key).cloned();
|
||||
objects.push((key, size, mtime, etag));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((meta_cache, objects, etag_cache_changed))
|
||||
})?;
|
||||
|
||||
let (meta_cache, objects, etag_cache_changed) = result;
|
||||
|
||||
let dict = PyDict::new(py);
|
||||
|
||||
let cache_dict = PyDict::new(py);
|
||||
for (k, v) in &meta_cache {
|
||||
cache_dict.set_item(k, v)?;
|
||||
}
|
||||
dict.set_item("etag_cache", cache_dict)?;
|
||||
|
||||
let objects_list = PyList::empty(py);
|
||||
for (key, size, mtime, etag) in &objects {
|
||||
let etag_py: Py<PyAny> = match etag {
|
||||
Some(e) => PyString::new(py, e).into_any().unbind(),
|
||||
None => py.None(),
|
||||
};
|
||||
let tuple = PyTuple::new(py, &[
|
||||
PyString::new(py, key).into_any().unbind(),
|
||||
size.into_pyobject(py)?.into_any().unbind(),
|
||||
mtime.into_pyobject(py)?.into_any().unbind(),
|
||||
etag_py,
|
||||
])?;
|
||||
objects_list.append(tuple)?;
|
||||
}
|
||||
dict.set_item("objects", objects_list)?;
|
||||
dict.set_item("etag_cache_changed", etag_cache_changed)?;
|
||||
|
||||
Ok(dict.into_any().unbind())
|
||||
}
|
||||
112
python/myfsio_core/src/streaming.rs
Normal file
112
python/myfsio_core/src/streaming.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
use md5::{Digest, Md5};
|
||||
use pyo3::exceptions::{PyIOError, PyValueError};
|
||||
use pyo3::prelude::*;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{Read, Write};
|
||||
use uuid::Uuid;
|
||||
|
||||
const DEFAULT_CHUNK_SIZE: usize = 262144;
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (stream, tmp_dir, chunk_size=DEFAULT_CHUNK_SIZE))]
|
||||
pub fn stream_to_file_with_md5(
|
||||
py: Python<'_>,
|
||||
stream: &Bound<'_, PyAny>,
|
||||
tmp_dir: &str,
|
||||
chunk_size: usize,
|
||||
) -> PyResult<(String, String, u64)> {
|
||||
let chunk_size = if chunk_size == 0 {
|
||||
DEFAULT_CHUNK_SIZE
|
||||
} else {
|
||||
chunk_size
|
||||
};
|
||||
|
||||
fs::create_dir_all(tmp_dir)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create tmp dir: {}", e)))?;
|
||||
|
||||
let tmp_name = format!("{}.tmp", Uuid::new_v4().as_hyphenated());
|
||||
let tmp_path_buf = std::path::PathBuf::from(tmp_dir).join(&tmp_name);
|
||||
let tmp_path = tmp_path_buf.to_string_lossy().into_owned();
|
||||
|
||||
let mut file = File::create(&tmp_path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create temp file: {}", e)))?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut total_bytes: u64 = 0;
|
||||
|
||||
let result: PyResult<()> = (|| {
|
||||
loop {
|
||||
let chunk: Vec<u8> = stream.call_method1("read", (chunk_size,))?.extract()?;
|
||||
if chunk.is_empty() {
|
||||
break;
|
||||
}
|
||||
hasher.update(&chunk);
|
||||
file.write_all(&chunk)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write: {}", e)))?;
|
||||
total_bytes += chunk.len() as u64;
|
||||
|
||||
py.check_signals()?;
|
||||
}
|
||||
file.sync_all()
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to fsync: {}", e)))?;
|
||||
Ok(())
|
||||
})();
|
||||
|
||||
if let Err(e) = result {
|
||||
drop(file);
|
||||
let _ = fs::remove_file(&tmp_path);
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
drop(file);
|
||||
|
||||
let md5_hex = format!("{:x}", hasher.finalize());
|
||||
Ok((tmp_path, md5_hex, total_bytes))
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn assemble_parts_with_md5(
|
||||
py: Python<'_>,
|
||||
part_paths: Vec<String>,
|
||||
dest_path: &str,
|
||||
) -> PyResult<String> {
|
||||
if part_paths.is_empty() {
|
||||
return Err(PyValueError::new_err("No parts to assemble"));
|
||||
}
|
||||
|
||||
let dest = dest_path.to_owned();
|
||||
let parts = part_paths;
|
||||
|
||||
py.detach(move || {
|
||||
if let Some(parent) = std::path::Path::new(&dest).parent() {
|
||||
fs::create_dir_all(parent)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create dest dir: {}", e)))?;
|
||||
}
|
||||
|
||||
let mut target = File::create(&dest)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to create dest file: {}", e)))?;
|
||||
let mut hasher = Md5::new();
|
||||
let mut buf = vec![0u8; 1024 * 1024];
|
||||
|
||||
for part_path in &parts {
|
||||
let mut part = File::open(part_path)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to open part {}: {}", part_path, e)))?;
|
||||
loop {
|
||||
let n = part
|
||||
.read(&mut buf)
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to read part: {}", e)))?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
hasher.update(&buf[..n]);
|
||||
target
|
||||
.write_all(&buf[..n])
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to write: {}", e)))?;
|
||||
}
|
||||
}
|
||||
|
||||
target.sync_all()
|
||||
.map_err(|e| PyIOError::new_err(format!("Failed to fsync: {}", e)))?;
|
||||
|
||||
Ok(format!("{:x}", hasher.finalize()))
|
||||
})
|
||||
}
|
||||
149
python/myfsio_core/src/validation.rs
Normal file
149
python/myfsio_core/src/validation.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
use pyo3::prelude::*;
|
||||
use std::sync::LazyLock;
|
||||
use unicode_normalization::UnicodeNormalization;
|
||||
|
||||
const WINDOWS_RESERVED: &[&str] = &[
|
||||
"CON", "PRN", "AUX", "NUL", "COM0", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7",
|
||||
"COM8", "COM9", "LPT0", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8",
|
||||
"LPT9",
|
||||
];
|
||||
|
||||
const WINDOWS_ILLEGAL_CHARS: &[char] = &['<', '>', ':', '"', '/', '\\', '|', '?', '*'];
|
||||
|
||||
const INTERNAL_FOLDERS: &[&str] = &[".meta", ".versions", ".multipart"];
|
||||
const SYSTEM_ROOT: &str = ".myfsio.sys";
|
||||
|
||||
static IP_REGEX: LazyLock<regex::Regex> =
|
||||
LazyLock::new(|| regex::Regex::new(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$").unwrap());
|
||||
|
||||
#[pyfunction]
|
||||
#[pyo3(signature = (object_key, max_length_bytes=1024, is_windows=false, reserved_prefixes=None))]
|
||||
pub fn validate_object_key(
|
||||
object_key: &str,
|
||||
max_length_bytes: usize,
|
||||
is_windows: bool,
|
||||
reserved_prefixes: Option<Vec<String>>,
|
||||
) -> PyResult<Option<String>> {
|
||||
if object_key.is_empty() {
|
||||
return Ok(Some("Object key required".to_string()));
|
||||
}
|
||||
|
||||
if object_key.contains('\0') {
|
||||
return Ok(Some("Object key contains null bytes".to_string()));
|
||||
}
|
||||
|
||||
let normalized: String = object_key.nfc().collect();
|
||||
|
||||
if normalized.len() > max_length_bytes {
|
||||
return Ok(Some(format!(
|
||||
"Object key exceeds maximum length of {} bytes",
|
||||
max_length_bytes
|
||||
)));
|
||||
}
|
||||
|
||||
if normalized.starts_with('/') || normalized.starts_with('\\') {
|
||||
return Ok(Some("Object key cannot start with a slash".to_string()));
|
||||
}
|
||||
|
||||
let parts: Vec<&str> = if cfg!(windows) || is_windows {
|
||||
normalized.split(['/', '\\']).collect()
|
||||
} else {
|
||||
normalized.split('/').collect()
|
||||
};
|
||||
|
||||
for part in &parts {
|
||||
if part.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if *part == ".." {
|
||||
return Ok(Some(
|
||||
"Object key contains parent directory references".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if *part == "." {
|
||||
return Ok(Some("Object key contains invalid segments".to_string()));
|
||||
}
|
||||
|
||||
if part.chars().any(|c| (c as u32) < 32) {
|
||||
return Ok(Some(
|
||||
"Object key contains control characters".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if is_windows {
|
||||
if part.chars().any(|c| WINDOWS_ILLEGAL_CHARS.contains(&c)) {
|
||||
return Ok(Some(
|
||||
"Object key contains characters not supported on Windows filesystems"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
if part.ends_with(' ') || part.ends_with('.') {
|
||||
return Ok(Some(
|
||||
"Object key segments cannot end with spaces or periods on Windows".to_string(),
|
||||
));
|
||||
}
|
||||
let trimmed = part.trim_end_matches(['.', ' ']).to_uppercase();
|
||||
if WINDOWS_RESERVED.contains(&trimmed.as_str()) {
|
||||
return Ok(Some(format!("Invalid filename segment: {}", part)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let non_empty_parts: Vec<&str> = parts.iter().filter(|p| !p.is_empty()).copied().collect();
|
||||
if let Some(top) = non_empty_parts.first() {
|
||||
if INTERNAL_FOLDERS.contains(top) || *top == SYSTEM_ROOT {
|
||||
return Ok(Some("Object key uses a reserved prefix".to_string()));
|
||||
}
|
||||
|
||||
if let Some(ref prefixes) = reserved_prefixes {
|
||||
for prefix in prefixes {
|
||||
if *top == prefix.as_str() {
|
||||
return Ok(Some("Object key uses a reserved prefix".to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
pub fn validate_bucket_name(bucket_name: &str) -> Option<String> {
|
||||
let len = bucket_name.len();
|
||||
if len < 3 || len > 63 {
|
||||
return Some("Bucket name must be between 3 and 63 characters".to_string());
|
||||
}
|
||||
|
||||
let bytes = bucket_name.as_bytes();
|
||||
if !bytes[0].is_ascii_lowercase() && !bytes[0].is_ascii_digit() {
|
||||
return Some(
|
||||
"Bucket name must start and end with a lowercase letter or digit".to_string(),
|
||||
);
|
||||
}
|
||||
if !bytes[len - 1].is_ascii_lowercase() && !bytes[len - 1].is_ascii_digit() {
|
||||
return Some(
|
||||
"Bucket name must start and end with a lowercase letter or digit".to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
for &b in bytes {
|
||||
if !b.is_ascii_lowercase() && !b.is_ascii_digit() && b != b'.' && b != b'-' {
|
||||
return Some(
|
||||
"Bucket name can only contain lowercase letters, digits, dots, and hyphens"
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if bucket_name.contains("..") {
|
||||
return Some("Bucket name must not contain consecutive periods".to_string());
|
||||
}
|
||||
|
||||
if IP_REGEX.is_match(bucket_name) {
|
||||
return Some("Bucket name must not be formatted as an IP address".to_string());
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
5
python/pytest.ini
Normal file
5
python/pytest.ini
Normal file
@@ -0,0 +1,5 @@
|
||||
[pytest]
|
||||
testpaths = tests
|
||||
norecursedirs = data .git __pycache__ .venv
|
||||
markers =
|
||||
integration: marks tests as integration tests (may require external services)
|
||||
13
python/requirements.txt
Normal file
13
python/requirements.txt
Normal file
@@ -0,0 +1,13 @@
|
||||
Flask>=3.1.3
|
||||
Flask-Limiter>=4.1.1
|
||||
Flask-Cors>=6.0.2
|
||||
Flask-WTF>=1.2.2
|
||||
python-dotenv>=1.2.1
|
||||
pytest>=9.0.2
|
||||
requests>=2.32.5
|
||||
boto3>=1.42.14
|
||||
granian>=2.7.2
|
||||
psutil>=7.2.2
|
||||
cryptography>=46.0.5
|
||||
defusedxml>=0.7.1
|
||||
duckdb>=1.5.1
|
||||
315
python/run.py
Normal file
315
python/run.py
Normal file
@@ -0,0 +1,315 @@
|
||||
"""Helper script to run the API server, UI server, or both."""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import atexit
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import warnings
|
||||
import multiprocessing
|
||||
from multiprocessing import Process
|
||||
from pathlib import Path
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
for _env_file in [
|
||||
Path("/opt/myfsio/myfsio.env"),
|
||||
Path.cwd() / ".env",
|
||||
Path.cwd() / "myfsio.env",
|
||||
]:
|
||||
if _env_file.exists():
|
||||
load_dotenv(_env_file, override=True)
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from app import create_api_app, create_ui_app
|
||||
from app.config import AppConfig
|
||||
from app.iam import IamService, IamError, ALLOWED_ACTIONS, _derive_fernet_key
|
||||
from app.version import get_version
|
||||
|
||||
PYTHON_DEPRECATION_MESSAGE = (
|
||||
"The Python MyFSIO runtime is deprecated as of 2026-04-21. "
|
||||
"Use the Rust server in rust/myfsio-engine for supported development and production usage."
|
||||
)
|
||||
|
||||
|
||||
def _server_host() -> str:
|
||||
"""Return the bind host for API and UI servers."""
|
||||
return os.getenv("APP_HOST", "0.0.0.0")
|
||||
|
||||
|
||||
def _is_debug_enabled() -> bool:
|
||||
return os.getenv("FLASK_DEBUG", "0").lower() in ("1", "true", "yes")
|
||||
|
||||
|
||||
def _is_frozen() -> bool:
|
||||
"""Check if running as a compiled binary (PyInstaller/Nuitka)."""
|
||||
return getattr(sys, 'frozen', False) or '__compiled__' in globals()
|
||||
|
||||
|
||||
def _serve_granian(target: str, port: int, config: Optional[AppConfig] = None) -> None:
|
||||
from granian import Granian
|
||||
from granian.constants import Interfaces
|
||||
from granian.http import HTTP1Settings
|
||||
|
||||
kwargs: dict = {
|
||||
"target": target,
|
||||
"address": _server_host(),
|
||||
"port": port,
|
||||
"interface": Interfaces.WSGI,
|
||||
"factory": True,
|
||||
"workers": 1,
|
||||
}
|
||||
|
||||
if config:
|
||||
kwargs["blocking_threads"] = config.server_threads
|
||||
kwargs["backlog"] = config.server_backlog
|
||||
kwargs["backpressure"] = config.server_connection_limit
|
||||
kwargs["http1_settings"] = HTTP1Settings(
|
||||
header_read_timeout=config.server_channel_timeout * 1000,
|
||||
max_buffer_size=config.server_max_buffer_size,
|
||||
)
|
||||
else:
|
||||
kwargs["http1_settings"] = HTTP1Settings(
|
||||
max_buffer_size=1024 * 1024 * 128,
|
||||
)
|
||||
|
||||
server = Granian(**kwargs)
|
||||
server.serve()
|
||||
|
||||
|
||||
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
if prod:
|
||||
_serve_granian("app:create_api_app", port, config)
|
||||
else:
|
||||
app = create_api_app()
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
|
||||
app.run(host=_server_host(), port=port, debug=debug)
|
||||
|
||||
|
||||
def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
if prod:
|
||||
_serve_granian("app:create_ui_app", port, config)
|
||||
else:
|
||||
app = create_ui_app()
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
|
||||
app.run(host=_server_host(), port=port, debug=debug)
|
||||
|
||||
|
||||
def reset_credentials() -> None:
|
||||
import json
|
||||
import secrets
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
config = AppConfig.from_env()
|
||||
iam_path = config.iam_config_path
|
||||
encryption_key = config.secret_key
|
||||
|
||||
access_key = os.environ.get("ADMIN_ACCESS_KEY", "").strip() or secrets.token_hex(12)
|
||||
secret_key = os.environ.get("ADMIN_SECRET_KEY", "").strip() or secrets.token_urlsafe(32)
|
||||
custom_keys = bool(os.environ.get("ADMIN_ACCESS_KEY", "").strip())
|
||||
|
||||
fernet = Fernet(_derive_fernet_key(encryption_key)) if encryption_key else None
|
||||
|
||||
raw_config = None
|
||||
if iam_path.exists():
|
||||
try:
|
||||
raw_bytes = iam_path.read_bytes()
|
||||
from app.iam import _IAM_ENCRYPTED_PREFIX
|
||||
if raw_bytes.startswith(_IAM_ENCRYPTED_PREFIX):
|
||||
if fernet:
|
||||
try:
|
||||
content = fernet.decrypt(raw_bytes[len(_IAM_ENCRYPTED_PREFIX):]).decode("utf-8")
|
||||
raw_config = json.loads(content)
|
||||
except Exception:
|
||||
print("WARNING: Could not decrypt existing IAM config. Creating fresh config.")
|
||||
else:
|
||||
print("WARNING: IAM config is encrypted but no SECRET_KEY available. Creating fresh config.")
|
||||
else:
|
||||
try:
|
||||
raw_config = json.loads(raw_bytes.decode("utf-8"))
|
||||
except json.JSONDecodeError:
|
||||
print("WARNING: Existing IAM config is corrupted. Creating fresh config.")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if raw_config and raw_config.get("users"):
|
||||
is_v2 = raw_config.get("version", 1) >= 2
|
||||
admin_user = None
|
||||
for user in raw_config["users"]:
|
||||
policies = user.get("policies", [])
|
||||
for p in policies:
|
||||
actions = p.get("actions", [])
|
||||
if "iam:*" in actions or "*" in actions:
|
||||
admin_user = user
|
||||
break
|
||||
if admin_user:
|
||||
break
|
||||
if not admin_user:
|
||||
admin_user = raw_config["users"][0]
|
||||
|
||||
if is_v2:
|
||||
admin_keys = admin_user.get("access_keys", [])
|
||||
if admin_keys:
|
||||
admin_keys[0]["access_key"] = access_key
|
||||
admin_keys[0]["secret_key"] = secret_key
|
||||
else:
|
||||
from datetime import datetime as _dt, timezone as _tz
|
||||
admin_user["access_keys"] = [{
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"status": "active",
|
||||
"created_at": _dt.now(_tz.utc).isoformat(),
|
||||
}]
|
||||
else:
|
||||
admin_user["access_key"] = access_key
|
||||
admin_user["secret_key"] = secret_key
|
||||
else:
|
||||
from datetime import datetime as _dt, timezone as _tz
|
||||
raw_config = {
|
||||
"version": 2,
|
||||
"users": [
|
||||
{
|
||||
"user_id": f"u-{secrets.token_hex(8)}",
|
||||
"display_name": "Local Admin",
|
||||
"enabled": True,
|
||||
"access_keys": [
|
||||
{
|
||||
"access_key": access_key,
|
||||
"secret_key": secret_key,
|
||||
"status": "active",
|
||||
"created_at": _dt.now(_tz.utc).isoformat(),
|
||||
}
|
||||
],
|
||||
"policies": [
|
||||
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
json_text = json.dumps(raw_config, indent=2)
|
||||
iam_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
temp_path = iam_path.with_suffix(".json.tmp")
|
||||
if fernet:
|
||||
from app.iam import _IAM_ENCRYPTED_PREFIX
|
||||
encrypted = fernet.encrypt(json_text.encode("utf-8"))
|
||||
temp_path.write_bytes(_IAM_ENCRYPTED_PREFIX + encrypted)
|
||||
else:
|
||||
temp_path.write_text(json_text, encoding="utf-8")
|
||||
temp_path.replace(iam_path)
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print("MYFSIO - ADMIN CREDENTIALS RESET")
|
||||
print(f"{'='*60}")
|
||||
if custom_keys:
|
||||
print(f"Access Key: {access_key} (from ADMIN_ACCESS_KEY)")
|
||||
print(f"Secret Key: {'(from ADMIN_SECRET_KEY)' if os.environ.get('ADMIN_SECRET_KEY', '').strip() else secret_key}")
|
||||
else:
|
||||
print(f"Access Key: {access_key}")
|
||||
print(f"Secret Key: {secret_key}")
|
||||
print(f"{'='*60}")
|
||||
if fernet:
|
||||
print("IAM config saved (encrypted).")
|
||||
else:
|
||||
print(f"IAM config saved to: {iam_path}")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
multiprocessing.freeze_support()
|
||||
if _is_frozen():
|
||||
multiprocessing.set_start_method("spawn", force=True)
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run the S3 clone services.")
|
||||
parser.add_argument("--mode", choices=["api", "ui", "both", "reset-cred"], default="both")
|
||||
parser.add_argument("--api-port", type=int, default=5000)
|
||||
parser.add_argument("--ui-port", type=int, default=5100)
|
||||
parser.add_argument("--prod", action="store_true", help="Run in production mode using Granian")
|
||||
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
|
||||
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
|
||||
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
|
||||
parser.add_argument("--reset-cred", action="store_true", help="Reset admin credentials and exit")
|
||||
parser.add_argument("--version", action="version", version=f"MyFSIO {get_version()}")
|
||||
args = parser.parse_args()
|
||||
|
||||
warnings.warn(PYTHON_DEPRECATION_MESSAGE, DeprecationWarning, stacklevel=1)
|
||||
|
||||
if args.reset_cred or args.mode == "reset-cred":
|
||||
reset_credentials()
|
||||
sys.exit(0)
|
||||
|
||||
if args.check_config or args.show_config:
|
||||
config = AppConfig.from_env()
|
||||
config.print_startup_summary()
|
||||
if args.check_config:
|
||||
issues = config.validate_and_report()
|
||||
critical = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
sys.exit(1 if critical else 0)
|
||||
sys.exit(0)
|
||||
|
||||
prod_mode = args.prod or (_is_frozen() and not args.dev)
|
||||
|
||||
config = AppConfig.from_env()
|
||||
|
||||
first_run_marker = config.storage_root / ".myfsio.sys" / ".initialized"
|
||||
is_first_run = not first_run_marker.exists()
|
||||
|
||||
if is_first_run:
|
||||
config.print_startup_summary()
|
||||
|
||||
issues = config.validate_and_report()
|
||||
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
if critical_issues:
|
||||
print("ABORTING: Critical configuration issues detected. Please fix them before starting.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
first_run_marker.parent.mkdir(parents=True, exist_ok=True)
|
||||
first_run_marker.write_text(f"Initialized on {__import__('datetime').datetime.now().isoformat()}\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if prod_mode:
|
||||
print("Running in production mode (Granian)")
|
||||
issues = config.validate_and_report()
|
||||
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
if critical_issues:
|
||||
for issue in critical_issues:
|
||||
print(f" {issue}")
|
||||
print("ABORTING: Critical configuration issues detected. Please fix them before starting.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Running in development mode (Flask dev server)")
|
||||
|
||||
if args.mode in {"api", "both"}:
|
||||
print(f"Starting API server on port {args.api_port}...")
|
||||
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config))
|
||||
api_proc.start()
|
||||
else:
|
||||
api_proc = None
|
||||
|
||||
def _cleanup_api():
|
||||
if api_proc and api_proc.is_alive():
|
||||
api_proc.terminate()
|
||||
api_proc.join(timeout=5)
|
||||
if api_proc.is_alive():
|
||||
api_proc.kill()
|
||||
|
||||
if api_proc:
|
||||
atexit.register(_cleanup_api)
|
||||
signal.signal(signal.SIGTERM, lambda *_: sys.exit(0))
|
||||
|
||||
if args.mode in {"ui", "both"}:
|
||||
print(f"Starting UI server on port {args.ui_port}...")
|
||||
serve_ui(args.ui_port, prod_mode, config)
|
||||
elif api_proc:
|
||||
try:
|
||||
api_proc.join()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
File diff suppressed because it is too large
Load Diff
BIN
python/static/images/MyFSIO.ico
Normal file
BIN
python/static/images/MyFSIO.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 200 KiB |
BIN
python/static/images/MyFSIO.png
Normal file
BIN
python/static/images/MyFSIO.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 872 KiB |
4831
python/static/js/bucket-detail-main.js
Normal file
4831
python/static/js/bucket-detail-main.js
Normal file
File diff suppressed because it is too large
Load Diff
192
python/static/js/bucket-detail-operations.js
Normal file
192
python/static/js/bucket-detail-operations.js
Normal file
@@ -0,0 +1,192 @@
|
||||
window.BucketDetailOperations = (function() {
|
||||
'use strict';
|
||||
|
||||
let showMessage = function() {};
|
||||
let escapeHtml = function(s) { return s; };
|
||||
|
||||
function init(config) {
|
||||
showMessage = config.showMessage || showMessage;
|
||||
escapeHtml = config.escapeHtml || escapeHtml;
|
||||
}
|
||||
|
||||
async function loadLifecycleRules(card, endpoint) {
|
||||
if (!card || !endpoint) return;
|
||||
const body = card.querySelector('[data-lifecycle-body]');
|
||||
if (!body) return;
|
||||
|
||||
try {
|
||||
const response = await fetch(endpoint);
|
||||
const data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(data.error || 'Failed to load')}</td></tr>`;
|
||||
return;
|
||||
}
|
||||
|
||||
const rules = data.rules || [];
|
||||
if (rules.length === 0) {
|
||||
body.innerHTML = '<tr><td colspan="5" class="text-center text-muted py-3">No lifecycle rules configured</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
body.innerHTML = rules.map(rule => {
|
||||
const actions = [];
|
||||
if (rule.expiration_days) actions.push(`Delete after ${rule.expiration_days} days`);
|
||||
if (rule.noncurrent_days) actions.push(`Delete old versions after ${rule.noncurrent_days} days`);
|
||||
if (rule.abort_mpu_days) actions.push(`Abort incomplete MPU after ${rule.abort_mpu_days} days`);
|
||||
|
||||
return `
|
||||
<tr>
|
||||
<td class="fw-medium">${escapeHtml(rule.id)}</td>
|
||||
<td><code>${escapeHtml(rule.prefix || '(all)')}</code></td>
|
||||
<td>${actions.map(a => `<div class="small">${escapeHtml(a)}</div>`).join('')}</td>
|
||||
<td>
|
||||
<span class="badge ${rule.status === 'Enabled' ? 'text-bg-success' : 'text-bg-secondary'}">${escapeHtml(rule.status)}</span>
|
||||
</td>
|
||||
<td class="text-end">
|
||||
<button class="btn btn-sm btn-outline-danger" onclick="BucketDetailOperations.deleteLifecycleRule('${escapeHtml(rule.id)}')">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
`;
|
||||
}).join('');
|
||||
} catch (err) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(err.message)}</td></tr>`;
|
||||
}
|
||||
}
|
||||
|
||||
async function loadCorsRules(card, endpoint) {
|
||||
if (!card || !endpoint) return;
|
||||
const body = document.getElementById('cors-rules-body');
|
||||
if (!body) return;
|
||||
|
||||
try {
|
||||
const response = await fetch(endpoint);
|
||||
const data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(data.error || 'Failed to load')}</td></tr>`;
|
||||
return;
|
||||
}
|
||||
|
||||
const rules = data.rules || [];
|
||||
if (rules.length === 0) {
|
||||
body.innerHTML = '<tr><td colspan="5" class="text-center text-muted py-3">No CORS rules configured</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
body.innerHTML = rules.map((rule, idx) => `
|
||||
<tr>
|
||||
<td>${(rule.allowed_origins || []).map(o => `<code class="d-block">${escapeHtml(o)}</code>`).join('')}</td>
|
||||
<td>${(rule.allowed_methods || []).map(m => `<span class="badge text-bg-secondary me-1">${escapeHtml(m)}</span>`).join('')}</td>
|
||||
<td class="small text-muted">${(rule.allowed_headers || []).slice(0, 3).join(', ')}${(rule.allowed_headers || []).length > 3 ? '...' : ''}</td>
|
||||
<td class="text-muted">${rule.max_age_seconds || 0}s</td>
|
||||
<td class="text-end">
|
||||
<button class="btn btn-sm btn-outline-danger" onclick="BucketDetailOperations.deleteCorsRule(${idx})">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
`).join('');
|
||||
} catch (err) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(err.message)}</td></tr>`;
|
||||
}
|
||||
}
|
||||
|
||||
async function loadAcl(card, endpoint) {
|
||||
if (!card || !endpoint) return;
|
||||
const body = card.querySelector('[data-acl-body]');
|
||||
if (!body) return;
|
||||
|
||||
try {
|
||||
const response = await fetch(endpoint);
|
||||
const data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
body.innerHTML = `<tr><td colspan="3" class="text-center text-danger py-3">${escapeHtml(data.error || 'Failed to load')}</td></tr>`;
|
||||
return;
|
||||
}
|
||||
|
||||
const grants = data.grants || [];
|
||||
if (grants.length === 0) {
|
||||
body.innerHTML = '<tr><td colspan="3" class="text-center text-muted py-3">No ACL grants configured</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
body.innerHTML = grants.map(grant => {
|
||||
const grantee = grant.grantee_type === 'CanonicalUser'
|
||||
? grant.display_name || grant.grantee_id
|
||||
: grant.grantee_uri || grant.grantee_type;
|
||||
return `
|
||||
<tr>
|
||||
<td class="fw-medium">${escapeHtml(grantee)}</td>
|
||||
<td><span class="badge text-bg-info">${escapeHtml(grant.permission)}</span></td>
|
||||
<td class="text-muted small">${escapeHtml(grant.grantee_type)}</td>
|
||||
</tr>
|
||||
`;
|
||||
}).join('');
|
||||
} catch (err) {
|
||||
body.innerHTML = `<tr><td colspan="3" class="text-center text-danger py-3">${escapeHtml(err.message)}</td></tr>`;
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteLifecycleRule(ruleId) {
|
||||
if (!confirm(`Delete lifecycle rule "${ruleId}"?`)) return;
|
||||
const card = document.getElementById('lifecycle-rules-card');
|
||||
if (!card) return;
|
||||
const endpoint = card.dataset.lifecycleUrl;
|
||||
const csrfToken = window.getCsrfToken ? window.getCsrfToken() : '';
|
||||
|
||||
try {
|
||||
const resp = await fetch(endpoint, {
|
||||
method: 'DELETE',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken },
|
||||
body: JSON.stringify({ rule_id: ruleId })
|
||||
});
|
||||
const data = await resp.json();
|
||||
if (!resp.ok) throw new Error(data.error || 'Failed to delete');
|
||||
showMessage({ title: 'Rule deleted', body: `Lifecycle rule "${ruleId}" has been deleted.`, variant: 'success' });
|
||||
loadLifecycleRules(card, endpoint);
|
||||
} catch (err) {
|
||||
showMessage({ title: 'Delete failed', body: err.message, variant: 'danger' });
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteCorsRule(index) {
|
||||
if (!confirm('Delete this CORS rule?')) return;
|
||||
const card = document.getElementById('cors-rules-card');
|
||||
if (!card) return;
|
||||
const endpoint = card.dataset.corsUrl;
|
||||
const csrfToken = window.getCsrfToken ? window.getCsrfToken() : '';
|
||||
|
||||
try {
|
||||
const resp = await fetch(endpoint, {
|
||||
method: 'DELETE',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken },
|
||||
body: JSON.stringify({ rule_index: index })
|
||||
});
|
||||
const data = await resp.json();
|
||||
if (!resp.ok) throw new Error(data.error || 'Failed to delete');
|
||||
showMessage({ title: 'Rule deleted', body: 'CORS rule has been deleted.', variant: 'success' });
|
||||
loadCorsRules(card, endpoint);
|
||||
} catch (err) {
|
||||
showMessage({ title: 'Delete failed', body: err.message, variant: 'danger' });
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
init: init,
|
||||
loadLifecycleRules: loadLifecycleRules,
|
||||
loadCorsRules: loadCorsRules,
|
||||
loadAcl: loadAcl,
|
||||
deleteLifecycleRule: deleteLifecycleRule,
|
||||
deleteCorsRule: deleteCorsRule
|
||||
};
|
||||
})();
|
||||
599
python/static/js/bucket-detail-upload.js
Normal file
599
python/static/js/bucket-detail-upload.js
Normal file
@@ -0,0 +1,599 @@
|
||||
window.BucketDetailUpload = (function() {
|
||||
'use strict';
|
||||
|
||||
const MULTIPART_THRESHOLD = 8 * 1024 * 1024;
|
||||
const CHUNK_SIZE = 8 * 1024 * 1024;
|
||||
const MAX_PART_RETRIES = 3;
|
||||
const RETRY_BASE_DELAY_MS = 1000;
|
||||
|
||||
let state = {
|
||||
isUploading: false,
|
||||
uploadProgress: { current: 0, total: 0, currentFile: '' }
|
||||
};
|
||||
|
||||
let elements = {};
|
||||
let callbacks = {};
|
||||
|
||||
function init(config) {
|
||||
elements = {
|
||||
uploadForm: config.uploadForm,
|
||||
uploadFileInput: config.uploadFileInput,
|
||||
uploadModal: config.uploadModal,
|
||||
uploadModalEl: config.uploadModalEl,
|
||||
uploadSubmitBtn: config.uploadSubmitBtn,
|
||||
uploadCancelBtn: config.uploadCancelBtn,
|
||||
uploadBtnText: config.uploadBtnText,
|
||||
uploadDropZone: config.uploadDropZone,
|
||||
uploadDropZoneLabel: config.uploadDropZoneLabel,
|
||||
uploadProgressStack: config.uploadProgressStack,
|
||||
uploadKeyPrefix: config.uploadKeyPrefix,
|
||||
singleFileOptions: config.singleFileOptions,
|
||||
bulkUploadProgress: config.bulkUploadProgress,
|
||||
bulkUploadStatus: config.bulkUploadStatus,
|
||||
bulkUploadCounter: config.bulkUploadCounter,
|
||||
bulkUploadProgressBar: config.bulkUploadProgressBar,
|
||||
bulkUploadCurrentFile: config.bulkUploadCurrentFile,
|
||||
bulkUploadResults: config.bulkUploadResults,
|
||||
bulkUploadSuccessAlert: config.bulkUploadSuccessAlert,
|
||||
bulkUploadErrorAlert: config.bulkUploadErrorAlert,
|
||||
bulkUploadSuccessCount: config.bulkUploadSuccessCount,
|
||||
bulkUploadErrorCount: config.bulkUploadErrorCount,
|
||||
bulkUploadErrorList: config.bulkUploadErrorList,
|
||||
floatingProgress: config.floatingProgress,
|
||||
floatingProgressBar: config.floatingProgressBar,
|
||||
floatingProgressStatus: config.floatingProgressStatus,
|
||||
floatingProgressTitle: config.floatingProgressTitle,
|
||||
floatingProgressExpand: config.floatingProgressExpand
|
||||
};
|
||||
|
||||
callbacks = {
|
||||
showMessage: config.showMessage || function() {},
|
||||
formatBytes: config.formatBytes || function(b) { return b + ' bytes'; },
|
||||
escapeHtml: config.escapeHtml || function(s) { return s; },
|
||||
onUploadComplete: config.onUploadComplete || function() {},
|
||||
hasFolders: config.hasFolders || function() { return false; },
|
||||
getCurrentPrefix: config.getCurrentPrefix || function() { return ''; }
|
||||
};
|
||||
|
||||
setupEventListeners();
|
||||
setupBeforeUnload();
|
||||
}
|
||||
|
||||
function isUploading() {
|
||||
return state.isUploading;
|
||||
}
|
||||
|
||||
function setupBeforeUnload() {
|
||||
window.addEventListener('beforeunload', (e) => {
|
||||
if (state.isUploading) {
|
||||
e.preventDefault();
|
||||
e.returnValue = 'Upload in progress. Are you sure you want to leave?';
|
||||
return e.returnValue;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function showFloatingProgress() {
|
||||
if (elements.floatingProgress) {
|
||||
elements.floatingProgress.classList.remove('d-none');
|
||||
}
|
||||
}
|
||||
|
||||
function hideFloatingProgress() {
|
||||
if (elements.floatingProgress) {
|
||||
elements.floatingProgress.classList.add('d-none');
|
||||
}
|
||||
}
|
||||
|
||||
function updateFloatingProgress(current, total, currentFile) {
|
||||
state.uploadProgress = { current, total, currentFile: currentFile || '' };
|
||||
if (elements.floatingProgressBar && total > 0) {
|
||||
const percent = Math.round((current / total) * 100);
|
||||
elements.floatingProgressBar.style.width = `${percent}%`;
|
||||
}
|
||||
if (elements.floatingProgressStatus) {
|
||||
if (currentFile) {
|
||||
elements.floatingProgressStatus.textContent = `${current}/${total} files - ${currentFile}`;
|
||||
} else {
|
||||
elements.floatingProgressStatus.textContent = `${current}/${total} files completed`;
|
||||
}
|
||||
}
|
||||
if (elements.floatingProgressTitle) {
|
||||
elements.floatingProgressTitle.textContent = `Uploading ${total} file${total !== 1 ? 's' : ''}...`;
|
||||
}
|
||||
}
|
||||
|
||||
function refreshUploadDropLabel() {
|
||||
if (!elements.uploadDropZoneLabel || !elements.uploadFileInput) return;
|
||||
const files = elements.uploadFileInput.files;
|
||||
if (!files || files.length === 0) {
|
||||
elements.uploadDropZoneLabel.textContent = 'No file selected';
|
||||
if (elements.singleFileOptions) elements.singleFileOptions.classList.remove('d-none');
|
||||
return;
|
||||
}
|
||||
elements.uploadDropZoneLabel.textContent = files.length === 1 ? files[0].name : `${files.length} files selected`;
|
||||
if (elements.singleFileOptions) {
|
||||
elements.singleFileOptions.classList.toggle('d-none', files.length > 1);
|
||||
}
|
||||
}
|
||||
|
||||
function updateUploadBtnText() {
|
||||
if (!elements.uploadBtnText || !elements.uploadFileInput) return;
|
||||
const files = elements.uploadFileInput.files;
|
||||
if (!files || files.length <= 1) {
|
||||
elements.uploadBtnText.textContent = 'Upload';
|
||||
} else {
|
||||
elements.uploadBtnText.textContent = `Upload ${files.length} files`;
|
||||
}
|
||||
}
|
||||
|
||||
function resetUploadUI() {
|
||||
if (elements.bulkUploadProgress) elements.bulkUploadProgress.classList.add('d-none');
|
||||
if (elements.bulkUploadResults) elements.bulkUploadResults.classList.add('d-none');
|
||||
if (elements.bulkUploadSuccessAlert) elements.bulkUploadSuccessAlert.classList.remove('d-none');
|
||||
if (elements.bulkUploadErrorAlert) elements.bulkUploadErrorAlert.classList.add('d-none');
|
||||
if (elements.bulkUploadErrorList) elements.bulkUploadErrorList.innerHTML = '';
|
||||
if (elements.uploadSubmitBtn) elements.uploadSubmitBtn.disabled = false;
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.disabled = false;
|
||||
if (elements.uploadProgressStack) elements.uploadProgressStack.innerHTML = '';
|
||||
if (elements.uploadDropZone) {
|
||||
elements.uploadDropZone.classList.remove('upload-locked');
|
||||
elements.uploadDropZone.style.pointerEvents = '';
|
||||
}
|
||||
state.isUploading = false;
|
||||
hideFloatingProgress();
|
||||
}
|
||||
|
||||
function setUploadLockState(locked) {
|
||||
if (elements.uploadDropZone) {
|
||||
elements.uploadDropZone.classList.toggle('upload-locked', locked);
|
||||
elements.uploadDropZone.style.pointerEvents = locked ? 'none' : '';
|
||||
}
|
||||
if (elements.uploadFileInput) {
|
||||
elements.uploadFileInput.disabled = locked;
|
||||
}
|
||||
}
|
||||
|
||||
function createProgressItem(file) {
|
||||
const item = document.createElement('div');
|
||||
item.className = 'upload-progress-item';
|
||||
item.dataset.state = 'uploading';
|
||||
item.innerHTML = `
|
||||
<div class="d-flex justify-content-between align-items-start">
|
||||
<div class="min-width-0 flex-grow-1">
|
||||
<div class="file-name">${callbacks.escapeHtml(file.name)}</div>
|
||||
<div class="file-size">${callbacks.formatBytes(file.size)}</div>
|
||||
</div>
|
||||
<div class="upload-status text-end ms-2">Preparing...</div>
|
||||
</div>
|
||||
<div class="progress-container">
|
||||
<div class="progress">
|
||||
<div class="progress-bar bg-primary" role="progressbar" style="width: 0%"></div>
|
||||
</div>
|
||||
<div class="progress-text">
|
||||
<span class="progress-loaded">0 B</span>
|
||||
<span class="progress-percent">0%</span>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
return item;
|
||||
}
|
||||
|
||||
function updateProgressItem(item, { loaded, total, status, progressState, error }) {
|
||||
if (progressState) item.dataset.state = progressState;
|
||||
const statusEl = item.querySelector('.upload-status');
|
||||
const progressBar = item.querySelector('.progress-bar');
|
||||
const progressLoaded = item.querySelector('.progress-loaded');
|
||||
const progressPercent = item.querySelector('.progress-percent');
|
||||
|
||||
if (status) {
|
||||
statusEl.textContent = status;
|
||||
statusEl.className = 'upload-status text-end ms-2';
|
||||
if (progressState === 'success') statusEl.classList.add('success');
|
||||
if (progressState === 'error') statusEl.classList.add('error');
|
||||
}
|
||||
if (typeof loaded === 'number' && typeof total === 'number' && total > 0) {
|
||||
const percent = Math.round((loaded / total) * 100);
|
||||
progressBar.style.width = `${percent}%`;
|
||||
progressLoaded.textContent = `${callbacks.formatBytes(loaded)} / ${callbacks.formatBytes(total)}`;
|
||||
progressPercent.textContent = `${percent}%`;
|
||||
}
|
||||
if (error) {
|
||||
const progressContainer = item.querySelector('.progress-container');
|
||||
if (progressContainer) {
|
||||
progressContainer.innerHTML = `<div class="text-danger small mt-1">${callbacks.escapeHtml(error)}</div>`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const xhr = new XMLHttpRequest();
|
||||
xhr.open('PUT', url, true);
|
||||
xhr.setRequestHeader('X-CSRFToken', csrfToken || '');
|
||||
|
||||
xhr.upload.addEventListener('progress', (e) => {
|
||||
if (e.lengthComputable) {
|
||||
updateProgressItem(progressItem, {
|
||||
status: `Part ${partNumber}/${totalParts}`,
|
||||
loaded: baseBytes + e.loaded,
|
||||
total: fileSize
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('load', () => {
|
||||
if (xhr.status >= 200 && xhr.status < 300) {
|
||||
try {
|
||||
resolve(JSON.parse(xhr.responseText));
|
||||
} catch {
|
||||
reject(new Error(`Part ${partNumber}: invalid response`));
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
const data = JSON.parse(xhr.responseText);
|
||||
reject(new Error(data.error || `Part ${partNumber} failed (${xhr.status})`));
|
||||
} catch {
|
||||
reject(new Error(`Part ${partNumber} failed (${xhr.status})`));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('error', () => reject(new Error(`Part ${partNumber}: network error`)));
|
||||
xhr.addEventListener('abort', () => reject(new Error(`Part ${partNumber}: aborted`)));
|
||||
|
||||
xhr.send(chunk);
|
||||
});
|
||||
}
|
||||
|
||||
async function uploadPartWithRetry(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
|
||||
let lastError;
|
||||
for (let attempt = 0; attempt <= MAX_PART_RETRIES; attempt++) {
|
||||
try {
|
||||
return await uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts);
|
||||
} catch (err) {
|
||||
lastError = err;
|
||||
if (attempt < MAX_PART_RETRIES) {
|
||||
const delay = RETRY_BASE_DELAY_MS * Math.pow(2, attempt);
|
||||
updateProgressItem(progressItem, {
|
||||
status: `Part ${partNumber}/${totalParts} retry ${attempt + 1}/${MAX_PART_RETRIES}...`,
|
||||
loaded: baseBytes,
|
||||
total: fileSize
|
||||
});
|
||||
await new Promise(r => setTimeout(r, delay));
|
||||
}
|
||||
}
|
||||
}
|
||||
throw lastError;
|
||||
}
|
||||
|
||||
async function uploadMultipart(file, objectKey, metadata, progressItem, urls) {
|
||||
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
|
||||
|
||||
updateProgressItem(progressItem, { status: 'Initiating...', loaded: 0, total: file.size });
|
||||
const initResp = await fetch(urls.initUrl, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken || '' },
|
||||
body: JSON.stringify({ object_key: objectKey, metadata })
|
||||
});
|
||||
if (!initResp.ok) {
|
||||
const err = await initResp.json().catch(() => ({}));
|
||||
throw new Error(err.error || 'Failed to initiate upload');
|
||||
}
|
||||
const { upload_id } = await initResp.json();
|
||||
|
||||
const partUrl = urls.partTemplate.replace('UPLOAD_ID_PLACEHOLDER', upload_id);
|
||||
const completeUrl = urls.completeTemplate.replace('UPLOAD_ID_PLACEHOLDER', upload_id);
|
||||
const abortUrl = urls.abortTemplate.replace('UPLOAD_ID_PLACEHOLDER', upload_id);
|
||||
|
||||
const parts = [];
|
||||
const totalParts = Math.ceil(file.size / CHUNK_SIZE);
|
||||
let uploadedBytes = 0;
|
||||
|
||||
try {
|
||||
for (let partNumber = 1; partNumber <= totalParts; partNumber++) {
|
||||
const start = (partNumber - 1) * CHUNK_SIZE;
|
||||
const end = Math.min(start + CHUNK_SIZE, file.size);
|
||||
const chunk = file.slice(start, end);
|
||||
|
||||
const partData = await uploadPartWithRetry(
|
||||
`${partUrl}?partNumber=${partNumber}`,
|
||||
chunk, csrfToken, uploadedBytes, file.size,
|
||||
progressItem, partNumber, totalParts
|
||||
);
|
||||
|
||||
parts.push({ part_number: partNumber, etag: partData.etag });
|
||||
uploadedBytes += (end - start);
|
||||
|
||||
updateProgressItem(progressItem, {
|
||||
loaded: uploadedBytes,
|
||||
total: file.size
|
||||
});
|
||||
}
|
||||
|
||||
updateProgressItem(progressItem, { status: 'Completing...', loaded: file.size, total: file.size });
|
||||
const completeResp = await fetch(completeUrl, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken || '' },
|
||||
body: JSON.stringify({ parts })
|
||||
});
|
||||
|
||||
if (!completeResp.ok) {
|
||||
const err = await completeResp.json().catch(() => ({}));
|
||||
throw new Error(err.error || 'Failed to complete upload');
|
||||
}
|
||||
|
||||
return await completeResp.json();
|
||||
} catch (err) {
|
||||
try {
|
||||
await fetch(abortUrl, { method: 'DELETE', headers: { 'X-CSRFToken': csrfToken || '' } });
|
||||
} catch {}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function uploadRegular(file, objectKey, metadata, progressItem, formAction) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const formData = new FormData();
|
||||
formData.append('object', file);
|
||||
formData.append('object_key', objectKey);
|
||||
if (metadata) formData.append('metadata', JSON.stringify(metadata));
|
||||
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
|
||||
if (csrfToken) formData.append('csrf_token', csrfToken);
|
||||
|
||||
const xhr = new XMLHttpRequest();
|
||||
xhr.open('POST', formAction, true);
|
||||
xhr.setRequestHeader('X-Requested-With', 'XMLHttpRequest');
|
||||
|
||||
xhr.upload.addEventListener('progress', (e) => {
|
||||
if (e.lengthComputable) {
|
||||
updateProgressItem(progressItem, {
|
||||
status: 'Uploading...',
|
||||
loaded: e.loaded,
|
||||
total: e.total
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('load', () => {
|
||||
if (xhr.status >= 200 && xhr.status < 300) {
|
||||
try {
|
||||
const data = JSON.parse(xhr.responseText);
|
||||
if (data.status === 'error') {
|
||||
reject(new Error(data.message || 'Upload failed'));
|
||||
} else {
|
||||
resolve(data);
|
||||
}
|
||||
} catch {
|
||||
resolve({});
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
const data = JSON.parse(xhr.responseText);
|
||||
reject(new Error(data.message || `Upload failed (${xhr.status})`));
|
||||
} catch {
|
||||
reject(new Error(`Upload failed (${xhr.status})`));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('error', () => reject(new Error('Network error')));
|
||||
xhr.addEventListener('abort', () => reject(new Error('Upload aborted')));
|
||||
|
||||
xhr.send(formData);
|
||||
});
|
||||
}
|
||||
|
||||
async function uploadSingleFile(file, keyPrefix, metadata, progressItem, urls) {
|
||||
const objectKey = keyPrefix ? `${keyPrefix}${file.name}` : file.name;
|
||||
const shouldUseMultipart = file.size >= MULTIPART_THRESHOLD && urls.initUrl;
|
||||
|
||||
if (!progressItem && elements.uploadProgressStack) {
|
||||
progressItem = createProgressItem(file);
|
||||
elements.uploadProgressStack.appendChild(progressItem);
|
||||
}
|
||||
|
||||
try {
|
||||
let result;
|
||||
if (shouldUseMultipart) {
|
||||
updateProgressItem(progressItem, { status: 'Multipart upload...', loaded: 0, total: file.size });
|
||||
result = await uploadMultipart(file, objectKey, metadata, progressItem, urls);
|
||||
} else {
|
||||
updateProgressItem(progressItem, { status: 'Uploading...', loaded: 0, total: file.size });
|
||||
result = await uploadRegular(file, objectKey, metadata, progressItem, urls.formAction);
|
||||
}
|
||||
updateProgressItem(progressItem, { progressState: 'success', status: 'Complete', loaded: file.size, total: file.size });
|
||||
return result;
|
||||
} catch (err) {
|
||||
updateProgressItem(progressItem, { progressState: 'error', status: 'Failed', error: err.message });
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function performBulkUpload(files, urls) {
|
||||
if (state.isUploading || !files || files.length === 0) return;
|
||||
|
||||
state.isUploading = true;
|
||||
setUploadLockState(true);
|
||||
const keyPrefix = (elements.uploadKeyPrefix?.value || '').trim();
|
||||
const metadataRaw = elements.uploadForm?.querySelector('textarea[name="metadata"]')?.value?.trim();
|
||||
let metadata = null;
|
||||
if (metadataRaw) {
|
||||
try {
|
||||
metadata = JSON.parse(metadataRaw);
|
||||
} catch {
|
||||
callbacks.showMessage({ title: 'Invalid metadata', body: 'Metadata must be valid JSON.', variant: 'danger' });
|
||||
resetUploadUI();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (elements.bulkUploadProgress) elements.bulkUploadProgress.classList.remove('d-none');
|
||||
if (elements.bulkUploadResults) elements.bulkUploadResults.classList.add('d-none');
|
||||
if (elements.uploadSubmitBtn) elements.uploadSubmitBtn.disabled = true;
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.disabled = true;
|
||||
|
||||
const successFiles = [];
|
||||
const errorFiles = [];
|
||||
const total = files.length;
|
||||
|
||||
updateFloatingProgress(0, total, files[0]?.name || '');
|
||||
|
||||
for (let i = 0; i < total; i++) {
|
||||
const file = files[i];
|
||||
const current = i + 1;
|
||||
|
||||
if (elements.bulkUploadCounter) elements.bulkUploadCounter.textContent = `${current}/${total}`;
|
||||
if (elements.bulkUploadCurrentFile) elements.bulkUploadCurrentFile.textContent = `Uploading: ${file.name}`;
|
||||
if (elements.bulkUploadProgressBar) {
|
||||
const percent = Math.round((current / total) * 100);
|
||||
elements.bulkUploadProgressBar.style.width = `${percent}%`;
|
||||
}
|
||||
updateFloatingProgress(i, total, file.name);
|
||||
|
||||
try {
|
||||
await uploadSingleFile(file, keyPrefix, metadata, null, urls);
|
||||
successFiles.push(file.name);
|
||||
} catch (error) {
|
||||
errorFiles.push({ name: file.name, error: error.message || 'Unknown error' });
|
||||
}
|
||||
}
|
||||
updateFloatingProgress(total, total);
|
||||
|
||||
if (elements.bulkUploadProgress) elements.bulkUploadProgress.classList.add('d-none');
|
||||
if (elements.bulkUploadResults) elements.bulkUploadResults.classList.remove('d-none');
|
||||
|
||||
if (elements.bulkUploadSuccessCount) elements.bulkUploadSuccessCount.textContent = successFiles.length;
|
||||
if (successFiles.length === 0 && elements.bulkUploadSuccessAlert) {
|
||||
elements.bulkUploadSuccessAlert.classList.add('d-none');
|
||||
}
|
||||
|
||||
if (errorFiles.length > 0) {
|
||||
if (elements.bulkUploadErrorCount) elements.bulkUploadErrorCount.textContent = errorFiles.length;
|
||||
if (elements.bulkUploadErrorAlert) elements.bulkUploadErrorAlert.classList.remove('d-none');
|
||||
if (elements.bulkUploadErrorList) {
|
||||
elements.bulkUploadErrorList.innerHTML = errorFiles
|
||||
.map(f => `<li><strong>${callbacks.escapeHtml(f.name)}</strong>: ${callbacks.escapeHtml(f.error)}</li>`)
|
||||
.join('');
|
||||
}
|
||||
}
|
||||
|
||||
state.isUploading = false;
|
||||
setUploadLockState(false);
|
||||
|
||||
if (successFiles.length > 0) {
|
||||
if (elements.uploadBtnText) elements.uploadBtnText.textContent = 'Refreshing...';
|
||||
callbacks.onUploadComplete(successFiles, errorFiles);
|
||||
} else {
|
||||
if (elements.uploadSubmitBtn) elements.uploadSubmitBtn.disabled = false;
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.disabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
function setupEventListeners() {
|
||||
if (elements.uploadFileInput) {
|
||||
elements.uploadFileInput.addEventListener('change', () => {
|
||||
if (state.isUploading) return;
|
||||
refreshUploadDropLabel();
|
||||
updateUploadBtnText();
|
||||
resetUploadUI();
|
||||
});
|
||||
}
|
||||
|
||||
if (elements.uploadDropZone) {
|
||||
elements.uploadDropZone.addEventListener('click', () => {
|
||||
if (state.isUploading) return;
|
||||
elements.uploadFileInput?.click();
|
||||
});
|
||||
}
|
||||
|
||||
if (elements.floatingProgressExpand) {
|
||||
elements.floatingProgressExpand.addEventListener('click', () => {
|
||||
if (elements.uploadModal) {
|
||||
elements.uploadModal.show();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (elements.uploadModalEl) {
|
||||
elements.uploadModalEl.addEventListener('hide.bs.modal', () => {
|
||||
if (state.isUploading) {
|
||||
showFloatingProgress();
|
||||
}
|
||||
});
|
||||
|
||||
elements.uploadModalEl.addEventListener('hidden.bs.modal', () => {
|
||||
if (!state.isUploading) {
|
||||
resetUploadUI();
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.value = '';
|
||||
refreshUploadDropLabel();
|
||||
updateUploadBtnText();
|
||||
}
|
||||
});
|
||||
|
||||
elements.uploadModalEl.addEventListener('show.bs.modal', () => {
|
||||
if (state.isUploading) {
|
||||
hideFloatingProgress();
|
||||
}
|
||||
if (callbacks.hasFolders() && callbacks.getCurrentPrefix()) {
|
||||
if (elements.uploadKeyPrefix) {
|
||||
elements.uploadKeyPrefix.value = callbacks.getCurrentPrefix();
|
||||
}
|
||||
} else if (elements.uploadKeyPrefix) {
|
||||
elements.uploadKeyPrefix.value = '';
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function wireDropTarget(target, options) {
|
||||
const { highlightClass = '', autoOpenModal = false } = options || {};
|
||||
if (!target) return;
|
||||
|
||||
const preventDefaults = (event) => {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
};
|
||||
|
||||
['dragenter', 'dragover'].forEach((eventName) => {
|
||||
target.addEventListener(eventName, (event) => {
|
||||
preventDefaults(event);
|
||||
if (state.isUploading) return;
|
||||
if (highlightClass) {
|
||||
target.classList.add(highlightClass);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
['dragleave', 'drop'].forEach((eventName) => {
|
||||
target.addEventListener(eventName, (event) => {
|
||||
preventDefaults(event);
|
||||
if (highlightClass) {
|
||||
target.classList.remove(highlightClass);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
target.addEventListener('drop', (event) => {
|
||||
if (state.isUploading) return;
|
||||
if (!event.dataTransfer?.files?.length || !elements.uploadFileInput) {
|
||||
return;
|
||||
}
|
||||
elements.uploadFileInput.files = event.dataTransfer.files;
|
||||
elements.uploadFileInput.dispatchEvent(new Event('change', { bubbles: true }));
|
||||
if (autoOpenModal && elements.uploadModal) {
|
||||
elements.uploadModal.show();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
init: init,
|
||||
isUploading: isUploading,
|
||||
performBulkUpload: performBulkUpload,
|
||||
wireDropTarget: wireDropTarget,
|
||||
resetUploadUI: resetUploadUI,
|
||||
refreshUploadDropLabel: refreshUploadDropLabel,
|
||||
updateUploadBtnText: updateUploadBtnText
|
||||
};
|
||||
})();
|
||||
120
python/static/js/bucket-detail-utils.js
Normal file
120
python/static/js/bucket-detail-utils.js
Normal file
@@ -0,0 +1,120 @@
|
||||
window.BucketDetailUtils = (function() {
|
||||
'use strict';
|
||||
|
||||
function setupJsonAutoIndent(textarea) {
|
||||
if (!textarea) return;
|
||||
|
||||
textarea.addEventListener('keydown', function(e) {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault();
|
||||
|
||||
const start = this.selectionStart;
|
||||
const end = this.selectionEnd;
|
||||
const value = this.value;
|
||||
|
||||
const lineStart = value.lastIndexOf('\n', start - 1) + 1;
|
||||
const currentLine = value.substring(lineStart, start);
|
||||
|
||||
const indentMatch = currentLine.match(/^(\s*)/);
|
||||
let indent = indentMatch ? indentMatch[1] : '';
|
||||
|
||||
const trimmedLine = currentLine.trim();
|
||||
const lastChar = trimmedLine.slice(-1);
|
||||
|
||||
let newIndent = indent;
|
||||
let insertAfter = '';
|
||||
|
||||
if (lastChar === '{' || lastChar === '[') {
|
||||
newIndent = indent + ' ';
|
||||
|
||||
const charAfterCursor = value.substring(start, start + 1).trim();
|
||||
if ((lastChar === '{' && charAfterCursor === '}') ||
|
||||
(lastChar === '[' && charAfterCursor === ']')) {
|
||||
insertAfter = '\n' + indent;
|
||||
}
|
||||
} else if (lastChar === ',' || lastChar === ':') {
|
||||
newIndent = indent;
|
||||
}
|
||||
|
||||
const insertion = '\n' + newIndent + insertAfter;
|
||||
const newValue = value.substring(0, start) + insertion + value.substring(end);
|
||||
|
||||
this.value = newValue;
|
||||
|
||||
const newCursorPos = start + 1 + newIndent.length;
|
||||
this.selectionStart = this.selectionEnd = newCursorPos;
|
||||
|
||||
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
}
|
||||
|
||||
if (e.key === 'Tab') {
|
||||
e.preventDefault();
|
||||
const start = this.selectionStart;
|
||||
const end = this.selectionEnd;
|
||||
|
||||
if (e.shiftKey) {
|
||||
const lineStart = this.value.lastIndexOf('\n', start - 1) + 1;
|
||||
const lineContent = this.value.substring(lineStart, start);
|
||||
if (lineContent.startsWith(' ')) {
|
||||
this.value = this.value.substring(0, lineStart) +
|
||||
this.value.substring(lineStart + 2);
|
||||
this.selectionStart = this.selectionEnd = Math.max(lineStart, start - 2);
|
||||
}
|
||||
} else {
|
||||
this.value = this.value.substring(0, start) + ' ' + this.value.substring(end);
|
||||
this.selectionStart = this.selectionEnd = start + 2;
|
||||
}
|
||||
|
||||
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (!Number.isFinite(bytes)) return `${bytes} bytes`;
|
||||
const units = ['bytes', 'KB', 'MB', 'GB', 'TB'];
|
||||
let i = 0;
|
||||
let size = bytes;
|
||||
while (size >= 1024 && i < units.length - 1) {
|
||||
size /= 1024;
|
||||
i++;
|
||||
}
|
||||
return `${size.toFixed(i === 0 ? 0 : 1)} ${units[i]}`;
|
||||
}
|
||||
|
||||
function escapeHtml(value) {
|
||||
if (value === null || value === undefined) return '';
|
||||
return String(value)
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''');
|
||||
}
|
||||
|
||||
function fallbackCopy(text) {
|
||||
const textArea = document.createElement('textarea');
|
||||
textArea.value = text;
|
||||
textArea.style.position = 'fixed';
|
||||
textArea.style.left = '-9999px';
|
||||
textArea.style.top = '-9999px';
|
||||
document.body.appendChild(textArea);
|
||||
textArea.focus();
|
||||
textArea.select();
|
||||
let success = false;
|
||||
try {
|
||||
success = document.execCommand('copy');
|
||||
} catch {
|
||||
success = false;
|
||||
}
|
||||
document.body.removeChild(textArea);
|
||||
return success;
|
||||
}
|
||||
|
||||
return {
|
||||
setupJsonAutoIndent: setupJsonAutoIndent,
|
||||
formatBytes: formatBytes,
|
||||
escapeHtml: escapeHtml,
|
||||
fallbackCopy: fallbackCopy
|
||||
};
|
||||
})();
|
||||
343
python/static/js/connections-management.js
Normal file
343
python/static/js/connections-management.js
Normal file
@@ -0,0 +1,343 @@
|
||||
window.ConnectionsManagement = (function() {
|
||||
'use strict';
|
||||
|
||||
var endpoints = {};
|
||||
var csrfToken = '';
|
||||
|
||||
function init(config) {
|
||||
endpoints = config.endpoints || {};
|
||||
csrfToken = config.csrfToken || '';
|
||||
|
||||
setupEventListeners();
|
||||
checkAllConnectionHealth();
|
||||
}
|
||||
|
||||
function togglePassword(id) {
|
||||
var input = document.getElementById(id);
|
||||
if (input) {
|
||||
input.type = input.type === 'password' ? 'text' : 'password';
|
||||
}
|
||||
}
|
||||
|
||||
async function testConnection(formId, resultId) {
|
||||
var form = document.getElementById(formId);
|
||||
var resultDiv = document.getElementById(resultId);
|
||||
if (!form || !resultDiv) return;
|
||||
|
||||
var formData = new FormData(form);
|
||||
var data = {};
|
||||
formData.forEach(function(value, key) {
|
||||
if (key !== 'csrf_token') {
|
||||
data[key] = value;
|
||||
}
|
||||
});
|
||||
|
||||
resultDiv.innerHTML = '<div class="text-info"><span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Testing connection...</div>';
|
||||
|
||||
var controller = new AbortController();
|
||||
var timeoutId = setTimeout(function() { controller.abort(); }, 20000);
|
||||
|
||||
try {
|
||||
var response = await fetch(endpoints.test, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-CSRFToken': csrfToken
|
||||
},
|
||||
body: JSON.stringify(data),
|
||||
signal: controller.signal
|
||||
});
|
||||
clearTimeout(timeoutId);
|
||||
|
||||
var result = await response.json();
|
||||
if (response.ok) {
|
||||
resultDiv.innerHTML = '<div class="text-success">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>' +
|
||||
'</svg>' + window.UICore.escapeHtml(result.message) + '</div>';
|
||||
} else {
|
||||
resultDiv.innerHTML = '<div class="text-danger">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>' +
|
||||
'</svg>' + window.UICore.escapeHtml(result.message) + '</div>';
|
||||
}
|
||||
} catch (error) {
|
||||
clearTimeout(timeoutId);
|
||||
var message = error.name === 'AbortError'
|
||||
? 'Connection test timed out - endpoint may be unreachable'
|
||||
: 'Connection failed: Network error';
|
||||
resultDiv.innerHTML = '<div class="text-danger">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>' +
|
||||
'</svg>' + message + '</div>';
|
||||
}
|
||||
}
|
||||
|
||||
async function checkConnectionHealth(connectionId, statusEl) {
|
||||
if (!statusEl) return;
|
||||
|
||||
try {
|
||||
var controller = new AbortController();
|
||||
var timeoutId = setTimeout(function() { controller.abort(); }, 10000);
|
||||
|
||||
var response = await fetch(endpoints.healthTemplate.replace('CONNECTION_ID', connectionId), {
|
||||
signal: controller.signal
|
||||
});
|
||||
clearTimeout(timeoutId);
|
||||
|
||||
var data = await response.json();
|
||||
if (data.healthy) {
|
||||
statusEl.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-success" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/></svg>';
|
||||
statusEl.setAttribute('data-status', 'healthy');
|
||||
statusEl.setAttribute('title', 'Connected');
|
||||
} else {
|
||||
statusEl.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-danger" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/></svg>';
|
||||
statusEl.setAttribute('data-status', 'unhealthy');
|
||||
statusEl.setAttribute('title', data.error || 'Unreachable');
|
||||
}
|
||||
} catch (error) {
|
||||
statusEl.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-warning" viewBox="0 0 16 16">' +
|
||||
'<path d="M8.982 1.566a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566zM8 5c.535 0 .954.462.9.995l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995A.905.905 0 0 1 8 5zm.002 6a1 1 0 1 1 0 2 1 1 0 0 1 0-2z"/></svg>';
|
||||
statusEl.setAttribute('data-status', 'unknown');
|
||||
statusEl.setAttribute('title', 'Could not check status');
|
||||
}
|
||||
}
|
||||
|
||||
function checkAllConnectionHealth() {
|
||||
var rows = document.querySelectorAll('tr[data-connection-id]');
|
||||
rows.forEach(function(row, index) {
|
||||
var connectionId = row.getAttribute('data-connection-id');
|
||||
var statusEl = row.querySelector('.connection-status');
|
||||
if (statusEl) {
|
||||
setTimeout(function() {
|
||||
checkConnectionHealth(connectionId, statusEl);
|
||||
}, index * 200);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function updateConnectionCount() {
|
||||
var countBadge = document.querySelector('.badge.bg-primary.bg-opacity-10.text-primary.fs-6');
|
||||
if (countBadge) {
|
||||
var remaining = document.querySelectorAll('tr[data-connection-id]').length;
|
||||
countBadge.textContent = remaining + ' connection' + (remaining !== 1 ? 's' : '');
|
||||
}
|
||||
}
|
||||
|
||||
function createConnectionRowHtml(conn) {
|
||||
var ak = conn.access_key || '';
|
||||
var maskedKey = ak.length > 12 ? ak.slice(0, 8) + '...' + ak.slice(-4) : ak;
|
||||
|
||||
return '<tr data-connection-id="' + window.UICore.escapeHtml(conn.id) + '">' +
|
||||
'<td class="text-center">' +
|
||||
'<span class="connection-status" data-status="checking" title="Checking...">' +
|
||||
'<span class="spinner-border spinner-border-sm text-muted" role="status" style="width: 12px; height: 12px;"></span>' +
|
||||
'</span></td>' +
|
||||
'<td><div class="d-flex align-items-center gap-2">' +
|
||||
'<div class="connection-icon"><svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/></svg></div>' +
|
||||
'<span class="fw-medium">' + window.UICore.escapeHtml(conn.name) + '</span>' +
|
||||
'</div></td>' +
|
||||
'<td><span class="text-muted small text-truncate d-inline-block" style="max-width: 200px;" title="' + window.UICore.escapeHtml(conn.endpoint_url) + '">' + window.UICore.escapeHtml(conn.endpoint_url) + '</span></td>' +
|
||||
'<td><span class="badge bg-primary bg-opacity-10 text-primary">' + window.UICore.escapeHtml(conn.region) + '</span></td>' +
|
||||
'<td><code class="small">' + window.UICore.escapeHtml(maskedKey) + '</code></td>' +
|
||||
'<td class="text-end"><div class="btn-group btn-group-sm" role="group">' +
|
||||
'<button type="button" class="btn btn-outline-secondary" data-bs-toggle="modal" data-bs-target="#editConnectionModal" ' +
|
||||
'data-id="' + window.UICore.escapeHtml(conn.id) + '" data-name="' + window.UICore.escapeHtml(conn.name) + '" ' +
|
||||
'data-endpoint="' + window.UICore.escapeHtml(conn.endpoint_url) + '" data-region="' + window.UICore.escapeHtml(conn.region) + '" ' +
|
||||
'data-access="' + window.UICore.escapeHtml(conn.access_key) + '" title="Edit connection">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/></svg></button>' +
|
||||
'<button type="button" class="btn btn-outline-danger" data-bs-toggle="modal" data-bs-target="#deleteConnectionModal" ' +
|
||||
'data-id="' + window.UICore.escapeHtml(conn.id) + '" data-name="' + window.UICore.escapeHtml(conn.name) + '" title="Delete connection">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>' +
|
||||
'<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/></svg></button>' +
|
||||
'</div></td></tr>';
|
||||
}
|
||||
|
||||
function setupEventListeners() {
|
||||
var testBtn = document.getElementById('testConnectionBtn');
|
||||
if (testBtn) {
|
||||
testBtn.addEventListener('click', function() {
|
||||
testConnection('createConnectionForm', 'testResult');
|
||||
});
|
||||
}
|
||||
|
||||
var editTestBtn = document.getElementById('editTestConnectionBtn');
|
||||
if (editTestBtn) {
|
||||
editTestBtn.addEventListener('click', function() {
|
||||
testConnection('editConnectionForm', 'editTestResult');
|
||||
});
|
||||
}
|
||||
|
||||
var editModal = document.getElementById('editConnectionModal');
|
||||
if (editModal) {
|
||||
editModal.addEventListener('show.bs.modal', function(event) {
|
||||
var button = event.relatedTarget;
|
||||
if (!button) return;
|
||||
|
||||
var id = button.getAttribute('data-id');
|
||||
|
||||
document.getElementById('edit_name').value = button.getAttribute('data-name') || '';
|
||||
document.getElementById('edit_endpoint_url').value = button.getAttribute('data-endpoint') || '';
|
||||
document.getElementById('edit_region').value = button.getAttribute('data-region') || '';
|
||||
document.getElementById('edit_access_key').value = button.getAttribute('data-access') || '';
|
||||
document.getElementById('edit_secret_key').value = '';
|
||||
document.getElementById('edit_secret_key').placeholder = '(unchanged — leave blank to keep current)';
|
||||
document.getElementById('edit_secret_key').required = false;
|
||||
document.getElementById('editTestResult').innerHTML = '';
|
||||
|
||||
var form = document.getElementById('editConnectionForm');
|
||||
form.action = endpoints.updateTemplate.replace('CONNECTION_ID', id);
|
||||
});
|
||||
}
|
||||
|
||||
var deleteModal = document.getElementById('deleteConnectionModal');
|
||||
if (deleteModal) {
|
||||
deleteModal.addEventListener('show.bs.modal', function(event) {
|
||||
var button = event.relatedTarget;
|
||||
if (!button) return;
|
||||
|
||||
var id = button.getAttribute('data-id');
|
||||
var name = button.getAttribute('data-name');
|
||||
|
||||
document.getElementById('deleteConnectionName').textContent = name;
|
||||
var form = document.getElementById('deleteConnectionForm');
|
||||
form.action = endpoints.deleteTemplate.replace('CONNECTION_ID', id);
|
||||
});
|
||||
}
|
||||
|
||||
var createForm = document.getElementById('createConnectionForm');
|
||||
if (createForm) {
|
||||
createForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(createForm, {
|
||||
successMessage: 'Connection created',
|
||||
onSuccess: function(data) {
|
||||
createForm.reset();
|
||||
document.getElementById('testResult').innerHTML = '';
|
||||
|
||||
if (data.connection) {
|
||||
var emptyState = document.querySelector('.empty-state');
|
||||
if (emptyState) {
|
||||
var cardBody = emptyState.closest('.card-body');
|
||||
if (cardBody) {
|
||||
cardBody.innerHTML = '<div class="table-responsive"><table class="table table-hover align-middle mb-0">' +
|
||||
'<thead class="table-light"><tr>' +
|
||||
'<th scope="col" style="width: 50px;">Status</th>' +
|
||||
'<th scope="col">Name</th><th scope="col">Endpoint</th>' +
|
||||
'<th scope="col">Region</th><th scope="col">Access Key</th>' +
|
||||
'<th scope="col" class="text-end">Actions</th></tr></thead>' +
|
||||
'<tbody></tbody></table></div>';
|
||||
}
|
||||
}
|
||||
|
||||
var tbody = document.querySelector('table tbody');
|
||||
if (tbody) {
|
||||
tbody.insertAdjacentHTML('beforeend', createConnectionRowHtml(data.connection));
|
||||
var newRow = tbody.lastElementChild;
|
||||
var statusEl = newRow.querySelector('.connection-status');
|
||||
if (statusEl) {
|
||||
checkConnectionHealth(data.connection.id, statusEl);
|
||||
}
|
||||
}
|
||||
updateConnectionCount();
|
||||
} else {
|
||||
location.reload();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var editForm = document.getElementById('editConnectionForm');
|
||||
if (editForm) {
|
||||
editForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(editForm, {
|
||||
successMessage: 'Connection updated',
|
||||
onSuccess: function(data) {
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('editConnectionModal'));
|
||||
if (modal) modal.hide();
|
||||
|
||||
var connId = editForm.action.split('/').slice(-2)[0];
|
||||
var row = document.querySelector('tr[data-connection-id="' + connId + '"]');
|
||||
if (row && data.connection) {
|
||||
var nameCell = row.querySelector('.fw-medium');
|
||||
if (nameCell) nameCell.textContent = data.connection.name;
|
||||
|
||||
var endpointCell = row.querySelector('.text-truncate');
|
||||
if (endpointCell) {
|
||||
endpointCell.textContent = data.connection.endpoint_url;
|
||||
endpointCell.title = data.connection.endpoint_url;
|
||||
}
|
||||
|
||||
var regionBadge = row.querySelector('.badge.bg-primary');
|
||||
if (regionBadge) regionBadge.textContent = data.connection.region;
|
||||
|
||||
var accessCode = row.querySelector('code.small');
|
||||
if (accessCode && data.connection.access_key) {
|
||||
var ak = data.connection.access_key;
|
||||
accessCode.textContent = ak.slice(0, 8) + '...' + ak.slice(-4);
|
||||
}
|
||||
|
||||
var editBtn = row.querySelector('[data-bs-target="#editConnectionModal"]');
|
||||
if (editBtn) {
|
||||
editBtn.setAttribute('data-name', data.connection.name);
|
||||
editBtn.setAttribute('data-endpoint', data.connection.endpoint_url);
|
||||
editBtn.setAttribute('data-region', data.connection.region);
|
||||
editBtn.setAttribute('data-access', data.connection.access_key);
|
||||
}
|
||||
|
||||
var deleteBtn = row.querySelector('[data-bs-target="#deleteConnectionModal"]');
|
||||
if (deleteBtn) {
|
||||
deleteBtn.setAttribute('data-name', data.connection.name);
|
||||
}
|
||||
|
||||
var statusEl = row.querySelector('.connection-status');
|
||||
if (statusEl) {
|
||||
checkConnectionHealth(connId, statusEl);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var deleteForm = document.getElementById('deleteConnectionForm');
|
||||
if (deleteForm) {
|
||||
deleteForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(deleteForm, {
|
||||
successMessage: 'Connection deleted',
|
||||
onSuccess: function(data) {
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('deleteConnectionModal'));
|
||||
if (modal) modal.hide();
|
||||
|
||||
var connId = deleteForm.action.split('/').slice(-2)[0];
|
||||
var row = document.querySelector('tr[data-connection-id="' + connId + '"]');
|
||||
if (row) {
|
||||
row.remove();
|
||||
}
|
||||
|
||||
updateConnectionCount();
|
||||
|
||||
if (document.querySelectorAll('tr[data-connection-id]').length === 0) {
|
||||
location.reload();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
init: init,
|
||||
togglePassword: togglePassword,
|
||||
testConnection: testConnection,
|
||||
checkConnectionHealth: checkConnectionHealth
|
||||
};
|
||||
})();
|
||||
807
python/static/js/iam-management.js
Normal file
807
python/static/js/iam-management.js
Normal file
@@ -0,0 +1,807 @@
|
||||
window.IAMManagement = (function() {
|
||||
'use strict';
|
||||
|
||||
var users = [];
|
||||
var currentUserKey = null;
|
||||
var endpoints = {};
|
||||
var csrfToken = '';
|
||||
var iamLocked = false;
|
||||
|
||||
var policyModal = null;
|
||||
var editUserModal = null;
|
||||
var deleteUserModal = null;
|
||||
var rotateSecretModal = null;
|
||||
var expiryModal = null;
|
||||
var currentRotateKey = null;
|
||||
var currentEditKey = null;
|
||||
var currentDeleteKey = null;
|
||||
var currentExpiryKey = null;
|
||||
|
||||
var ALL_S3_ACTIONS = [
|
||||
'list', 'read', 'write', 'delete', 'share', 'policy',
|
||||
'replication', 'lifecycle', 'cors',
|
||||
'create_bucket', 'delete_bucket',
|
||||
'versioning', 'tagging', 'encryption', 'quota',
|
||||
'object_lock', 'notification', 'logging', 'website'
|
||||
];
|
||||
|
||||
var policyTemplates = {
|
||||
full: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'create_bucket', 'delete_bucket', 'replication', 'lifecycle', 'cors', 'versioning', 'tagging', 'encryption', 'quota', 'object_lock', 'notification', 'logging', 'website', 'iam:*'] }],
|
||||
readonly: [{ bucket: '*', actions: ['list', 'read'] }],
|
||||
writer: [{ bucket: '*', actions: ['list', 'read', 'write'] }],
|
||||
operator: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'create_bucket', 'delete_bucket'] }],
|
||||
bucketadmin: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'create_bucket', 'delete_bucket', 'versioning', 'tagging', 'encryption', 'cors', 'lifecycle', 'quota', 'object_lock', 'notification', 'logging', 'website', 'replication'] }]
|
||||
};
|
||||
|
||||
function isAdminUser(policies) {
|
||||
if (!policies || !policies.length) return false;
|
||||
return policies.some(function(p) {
|
||||
return p.actions && (p.actions.indexOf('iam:*') >= 0 || p.actions.indexOf('*') >= 0);
|
||||
});
|
||||
}
|
||||
|
||||
function getPermissionLevel(actions) {
|
||||
if (!actions || !actions.length) return 'Custom (0)';
|
||||
if (actions.indexOf('*') >= 0) return 'Full Access';
|
||||
if (actions.length >= ALL_S3_ACTIONS.length) {
|
||||
var hasAll = ALL_S3_ACTIONS.every(function(a) { return actions.indexOf(a) >= 0; });
|
||||
if (hasAll) return 'Full Access';
|
||||
}
|
||||
var has = function(a) { return actions.indexOf(a) >= 0; };
|
||||
if (has('list') && has('read') && has('write') && has('delete')) return 'Read + Write + Delete';
|
||||
if (has('list') && has('read') && has('write')) return 'Read + Write';
|
||||
if (has('list') && has('read')) return 'Read Only';
|
||||
return 'Custom (' + actions.length + ')';
|
||||
}
|
||||
|
||||
function getBucketLabel(bucket) {
|
||||
return bucket === '*' ? 'All Buckets' : bucket;
|
||||
}
|
||||
|
||||
function init(config) {
|
||||
users = config.users || [];
|
||||
currentUserKey = config.currentUserKey || null;
|
||||
endpoints = config.endpoints || {};
|
||||
csrfToken = config.csrfToken || '';
|
||||
iamLocked = config.iamLocked || false;
|
||||
|
||||
if (iamLocked) return;
|
||||
|
||||
initModals();
|
||||
setupJsonAutoIndent();
|
||||
setupCopyButtons();
|
||||
setupPolicyEditor();
|
||||
setupCreateUserModal();
|
||||
setupEditUserModal();
|
||||
setupDeleteUserModal();
|
||||
setupRotateSecretModal();
|
||||
setupExpiryModal();
|
||||
setupFormHandlers();
|
||||
setupSearch();
|
||||
setupCopyAccessKeyButtons();
|
||||
}
|
||||
|
||||
function initModals() {
|
||||
var policyModalEl = document.getElementById('policyEditorModal');
|
||||
var editModalEl = document.getElementById('editUserModal');
|
||||
var deleteModalEl = document.getElementById('deleteUserModal');
|
||||
var rotateModalEl = document.getElementById('rotateSecretModal');
|
||||
var expiryModalEl = document.getElementById('expiryModal');
|
||||
|
||||
if (policyModalEl) policyModal = new bootstrap.Modal(policyModalEl);
|
||||
if (editModalEl) editUserModal = new bootstrap.Modal(editModalEl);
|
||||
if (deleteModalEl) deleteUserModal = new bootstrap.Modal(deleteModalEl);
|
||||
if (rotateModalEl) rotateSecretModal = new bootstrap.Modal(rotateModalEl);
|
||||
if (expiryModalEl) expiryModal = new bootstrap.Modal(expiryModalEl);
|
||||
}
|
||||
|
||||
function setupJsonAutoIndent() {
|
||||
window.UICore.setupJsonAutoIndent(document.getElementById('policyEditorDocument'));
|
||||
window.UICore.setupJsonAutoIndent(document.getElementById('createUserPolicies'));
|
||||
}
|
||||
|
||||
function setupCopyButtons() {
|
||||
document.querySelectorAll('.config-copy').forEach(function(button) {
|
||||
button.addEventListener('click', async function() {
|
||||
var targetId = button.dataset.copyTarget;
|
||||
var target = document.getElementById(targetId);
|
||||
if (!target) return;
|
||||
await window.UICore.copyToClipboard(target.innerText, button, 'Copy JSON');
|
||||
});
|
||||
});
|
||||
|
||||
var accessKeyCopyButton = document.querySelector('[data-access-key-copy]');
|
||||
if (accessKeyCopyButton) {
|
||||
accessKeyCopyButton.addEventListener('click', async function() {
|
||||
var accessKeyInput = document.getElementById('disclosedAccessKeyValue');
|
||||
if (!accessKeyInput) return;
|
||||
await window.UICore.copyToClipboard(accessKeyInput.value, accessKeyCopyButton, 'Copy');
|
||||
});
|
||||
}
|
||||
|
||||
var secretCopyButton = document.querySelector('[data-secret-copy]');
|
||||
if (secretCopyButton) {
|
||||
secretCopyButton.addEventListener('click', async function() {
|
||||
var secretInput = document.getElementById('disclosedSecretValue');
|
||||
if (!secretInput) return;
|
||||
await window.UICore.copyToClipboard(secretInput.value, secretCopyButton, 'Copy');
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function getUserPolicies(accessKey) {
|
||||
var user = users.find(function(u) { return u.access_key === accessKey; });
|
||||
return user ? JSON.stringify(user.policies, null, 2) : '';
|
||||
}
|
||||
|
||||
function applyPolicyTemplate(name, textareaEl) {
|
||||
if (policyTemplates[name] && textareaEl) {
|
||||
textareaEl.value = JSON.stringify(policyTemplates[name], null, 2);
|
||||
}
|
||||
}
|
||||
|
||||
function setupPolicyEditor() {
|
||||
var userLabelEl = document.getElementById('policyEditorUserLabel');
|
||||
var userInputEl = document.getElementById('policyEditorUser');
|
||||
var textareaEl = document.getElementById('policyEditorDocument');
|
||||
|
||||
document.querySelectorAll('[data-policy-template]').forEach(function(button) {
|
||||
button.addEventListener('click', function() {
|
||||
applyPolicyTemplate(button.dataset.policyTemplate, textareaEl);
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('[data-policy-editor]').forEach(function(button) {
|
||||
button.addEventListener('click', function() {
|
||||
var key = button.getAttribute('data-access-key');
|
||||
if (!key) return;
|
||||
|
||||
userLabelEl.textContent = key;
|
||||
userInputEl.value = key;
|
||||
textareaEl.value = getUserPolicies(key);
|
||||
|
||||
policyModal.show();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function generateSecureHex(byteCount) {
|
||||
var arr = new Uint8Array(byteCount);
|
||||
crypto.getRandomValues(arr);
|
||||
return Array.from(arr).map(function(b) { return b.toString(16).padStart(2, '0'); }).join('');
|
||||
}
|
||||
|
||||
function generateSecureBase64(byteCount) {
|
||||
var arr = new Uint8Array(byteCount);
|
||||
crypto.getRandomValues(arr);
|
||||
var binary = '';
|
||||
for (var i = 0; i < arr.length; i++) {
|
||||
binary += String.fromCharCode(arr[i]);
|
||||
}
|
||||
return btoa(binary).replace(/\+/g, '-').replace(/\//g, '_').replace(/=+$/, '');
|
||||
}
|
||||
|
||||
function setupCreateUserModal() {
|
||||
var createUserPoliciesEl = document.getElementById('createUserPolicies');
|
||||
|
||||
document.querySelectorAll('[data-create-policy-template]').forEach(function(button) {
|
||||
button.addEventListener('click', function() {
|
||||
applyPolicyTemplate(button.dataset.createPolicyTemplate, createUserPoliciesEl);
|
||||
});
|
||||
});
|
||||
|
||||
var genAccessKeyBtn = document.getElementById('generateAccessKeyBtn');
|
||||
if (genAccessKeyBtn) {
|
||||
genAccessKeyBtn.addEventListener('click', function() {
|
||||
var input = document.getElementById('createUserAccessKey');
|
||||
if (input) input.value = generateSecureHex(8);
|
||||
});
|
||||
}
|
||||
|
||||
var genSecretKeyBtn = document.getElementById('generateSecretKeyBtn');
|
||||
if (genSecretKeyBtn) {
|
||||
genSecretKeyBtn.addEventListener('click', function() {
|
||||
var input = document.getElementById('createUserSecretKey');
|
||||
if (input) input.value = generateSecureBase64(24);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function setupEditUserModal() {
|
||||
var editUserForm = document.getElementById('editUserForm');
|
||||
var editUserDisplayName = document.getElementById('editUserDisplayName');
|
||||
|
||||
document.querySelectorAll('[data-edit-user]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var key = btn.dataset.editUser;
|
||||
var name = btn.dataset.displayName;
|
||||
currentEditKey = key;
|
||||
editUserDisplayName.value = name;
|
||||
editUserForm.action = endpoints.updateUser.replace('ACCESS_KEY', key);
|
||||
editUserModal.show();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function setupDeleteUserModal() {
|
||||
var deleteUserForm = document.getElementById('deleteUserForm');
|
||||
var deleteUserLabel = document.getElementById('deleteUserLabel');
|
||||
var deleteSelfWarning = document.getElementById('deleteSelfWarning');
|
||||
|
||||
document.querySelectorAll('[data-delete-user]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var key = btn.dataset.deleteUser;
|
||||
currentDeleteKey = key;
|
||||
deleteUserLabel.textContent = key;
|
||||
deleteUserForm.action = endpoints.deleteUser.replace('ACCESS_KEY', key);
|
||||
|
||||
if (key === currentUserKey) {
|
||||
deleteSelfWarning.classList.remove('d-none');
|
||||
} else {
|
||||
deleteSelfWarning.classList.add('d-none');
|
||||
}
|
||||
|
||||
deleteUserModal.show();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function setupRotateSecretModal() {
|
||||
var rotateUserLabel = document.getElementById('rotateUserLabel');
|
||||
var confirmRotateBtn = document.getElementById('confirmRotateBtn');
|
||||
var rotateCancelBtn = document.getElementById('rotateCancelBtn');
|
||||
var rotateDoneBtn = document.getElementById('rotateDoneBtn');
|
||||
var rotateSecretConfirm = document.getElementById('rotateSecretConfirm');
|
||||
var rotateSecretResult = document.getElementById('rotateSecretResult');
|
||||
var newSecretKeyInput = document.getElementById('newSecretKey');
|
||||
var copyNewSecretBtn = document.getElementById('copyNewSecret');
|
||||
|
||||
document.querySelectorAll('[data-rotate-user]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
currentRotateKey = btn.dataset.rotateUser;
|
||||
rotateUserLabel.textContent = currentRotateKey;
|
||||
|
||||
rotateSecretConfirm.classList.remove('d-none');
|
||||
rotateSecretResult.classList.add('d-none');
|
||||
confirmRotateBtn.classList.remove('d-none');
|
||||
rotateCancelBtn.classList.remove('d-none');
|
||||
rotateDoneBtn.classList.add('d-none');
|
||||
|
||||
rotateSecretModal.show();
|
||||
});
|
||||
});
|
||||
|
||||
if (confirmRotateBtn) {
|
||||
confirmRotateBtn.addEventListener('click', async function() {
|
||||
if (!currentRotateKey) return;
|
||||
|
||||
window.UICore.setButtonLoading(confirmRotateBtn, true, 'Rotating...');
|
||||
|
||||
try {
|
||||
var url = endpoints.rotateSecret.replace('ACCESS_KEY', currentRotateKey);
|
||||
var response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'X-CSRFToken': csrfToken
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
var data = await response.json();
|
||||
throw new Error(data.error || 'Failed to rotate secret');
|
||||
}
|
||||
|
||||
var data = await response.json();
|
||||
newSecretKeyInput.value = data.secret_key;
|
||||
|
||||
rotateSecretConfirm.classList.add('d-none');
|
||||
rotateSecretResult.classList.remove('d-none');
|
||||
confirmRotateBtn.classList.add('d-none');
|
||||
rotateCancelBtn.classList.add('d-none');
|
||||
rotateDoneBtn.classList.remove('d-none');
|
||||
|
||||
} catch (err) {
|
||||
if (window.showToast) {
|
||||
window.showToast(err.message, 'Error', 'danger');
|
||||
}
|
||||
rotateSecretModal.hide();
|
||||
} finally {
|
||||
window.UICore.setButtonLoading(confirmRotateBtn, false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (copyNewSecretBtn) {
|
||||
copyNewSecretBtn.addEventListener('click', async function() {
|
||||
await window.UICore.copyToClipboard(newSecretKeyInput.value, copyNewSecretBtn, 'Copy');
|
||||
});
|
||||
}
|
||||
|
||||
if (rotateDoneBtn) {
|
||||
rotateDoneBtn.addEventListener('click', function() {
|
||||
window.location.reload();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function openExpiryModal(key, expiresAt) {
|
||||
currentExpiryKey = key;
|
||||
var label = document.getElementById('expiryUserLabel');
|
||||
var input = document.getElementById('expiryDateInput');
|
||||
var form = document.getElementById('expiryForm');
|
||||
if (label) label.textContent = key;
|
||||
if (expiresAt) {
|
||||
try {
|
||||
var dt = new Date(expiresAt);
|
||||
var local = new Date(dt.getTime() - dt.getTimezoneOffset() * 60000);
|
||||
if (input) input.value = local.toISOString().slice(0, 16);
|
||||
} catch(e) {
|
||||
if (input) input.value = '';
|
||||
}
|
||||
} else {
|
||||
if (input) input.value = '';
|
||||
}
|
||||
if (form) form.action = endpoints.updateExpiry.replace('ACCESS_KEY', key);
|
||||
var modalEl = document.getElementById('expiryModal');
|
||||
if (modalEl) {
|
||||
var modal = bootstrap.Modal.getOrCreateInstance(modalEl);
|
||||
modal.show();
|
||||
}
|
||||
}
|
||||
|
||||
function setupExpiryModal() {
|
||||
document.querySelectorAll('[data-expiry-user]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
openExpiryModal(btn.dataset.expiryUser, btn.dataset.expiresAt || '');
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('[data-expiry-preset]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var preset = btn.dataset.expiryPreset;
|
||||
var input = document.getElementById('expiryDateInput');
|
||||
if (!input) return;
|
||||
if (preset === 'clear') {
|
||||
input.value = '';
|
||||
return;
|
||||
}
|
||||
var now = new Date();
|
||||
var ms = 0;
|
||||
if (preset === '1h') ms = 3600000;
|
||||
else if (preset === '24h') ms = 86400000;
|
||||
else if (preset === '7d') ms = 7 * 86400000;
|
||||
else if (preset === '30d') ms = 30 * 86400000;
|
||||
else if (preset === '90d') ms = 90 * 86400000;
|
||||
var future = new Date(now.getTime() + ms);
|
||||
var local = new Date(future.getTime() - future.getTimezoneOffset() * 60000);
|
||||
input.value = local.toISOString().slice(0, 16);
|
||||
});
|
||||
});
|
||||
|
||||
var expiryForm = document.getElementById('expiryForm');
|
||||
if (expiryForm) {
|
||||
expiryForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(expiryForm, {
|
||||
successMessage: 'Expiry updated',
|
||||
onSuccess: function() {
|
||||
var modalEl = document.getElementById('expiryModal');
|
||||
if (modalEl) bootstrap.Modal.getOrCreateInstance(modalEl).hide();
|
||||
window.location.reload();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function createUserCardHtml(accessKey, displayName, policies) {
|
||||
var admin = isAdminUser(policies);
|
||||
var cardClass = 'card h-100 iam-user-card' + (admin ? ' iam-admin-card' : '');
|
||||
var roleBadge = admin
|
||||
? '<span class="iam-role-badge iam-role-admin" data-role-badge>Admin</span>'
|
||||
: '<span class="iam-role-badge iam-role-user" data-role-badge>User</span>';
|
||||
|
||||
var policyBadges = '';
|
||||
if (policies && policies.length > 0) {
|
||||
policyBadges = policies.map(function(p) {
|
||||
var bucketLabel = getBucketLabel(p.bucket);
|
||||
var permLevel = getPermissionLevel(p.actions);
|
||||
return '<span class="iam-perm-badge">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="10" height="10" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>' +
|
||||
'</svg>' + window.UICore.escapeHtml(bucketLabel) + ' · ' + window.UICore.escapeHtml(permLevel) + '</span>';
|
||||
}).join('');
|
||||
} else {
|
||||
policyBadges = '<span class="badge bg-secondary bg-opacity-10 text-secondary">No policies</span>';
|
||||
}
|
||||
|
||||
var esc = window.UICore.escapeHtml;
|
||||
return '<div class="col-md-6 col-xl-4 iam-user-item" data-display-name="' + esc(displayName.toLowerCase()) + '" data-access-key-filter="' + esc(accessKey.toLowerCase()) + '">' +
|
||||
'<div class="' + cardClass + '">' +
|
||||
'<div class="card-body">' +
|
||||
'<div class="d-flex align-items-start justify-content-between mb-3">' +
|
||||
'<div class="d-flex align-items-center gap-3 min-width-0 overflow-hidden">' +
|
||||
'<div class="user-avatar user-avatar-lg flex-shrink-0">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M8 8a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm2-3a2 2 0 1 1-4 0 2 2 0 0 1 4 0zm4 8c0 1-1 1-1 1H3s-1 0-1-1 1-4 6-4 6 3 6 4zm-1-.004c-.001-.246-.154-.986-.832-1.664C11.516 10.68 10.289 10 8 10c-2.29 0-3.516.68-4.168 1.332-.678.678-.83 1.418-.832 1.664h10z"/>' +
|
||||
'</svg></div>' +
|
||||
'<div class="min-width-0">' +
|
||||
'<div class="d-flex align-items-center gap-2 mb-0">' +
|
||||
'<h6 class="fw-semibold mb-0 text-truncate" title="' + esc(displayName) + '">' + esc(displayName) + '</h6>' +
|
||||
roleBadge +
|
||||
'</div>' +
|
||||
'<div class="d-flex align-items-center gap-1">' +
|
||||
'<code class="small text-muted text-truncate" title="' + esc(accessKey) + '">' + esc(accessKey) + '</code>' +
|
||||
'<button type="button" class="iam-copy-key" title="Copy access key" data-copy-access-key="' + esc(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M4 1.5H3a2 2 0 0 0-2 2V14a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V3.5a2 2 0 0 0-2-2h-1v1h1a1 1 0 0 1 1 1V14a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3.5a1 1 0 0 1 1-1h1v-1z"/>' +
|
||||
'<path d="M9.5 1a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1a.5.5 0 0 1 .5-.5h3zm-3-1A1.5 1.5 0 0 0 5 1.5v1A1.5 1.5 0 0 0 6.5 4h3A1.5 1.5 0 0 0 11 2.5v-1A1.5 1.5 0 0 0 9.5 0h-3z"/>' +
|
||||
'</svg></button>' +
|
||||
'</div>' +
|
||||
'</div></div>' +
|
||||
'<div class="dropdown flex-shrink-0">' +
|
||||
'<button class="btn btn-sm btn-icon" type="button" data-bs-toggle="dropdown" aria-expanded="false">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M9.5 13a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0z"/>' +
|
||||
'</svg></button>' +
|
||||
'<ul class="dropdown-menu dropdown-menu-end">' +
|
||||
'<li><button class="dropdown-item" type="button" data-edit-user="' + esc(accessKey) + '" data-display-name="' + esc(displayName) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/></svg>Edit Name</button></li>' +
|
||||
'<li><button class="dropdown-item" type="button" data-expiry-user="' + esc(accessKey) + '" data-expires-at="">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M8 3.5a.5.5 0 0 0-1 0V9a.5.5 0 0 0 .252.434l3.5 2a.5.5 0 0 0 .496-.868L8 8.71V3.5z"/><path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm7-8A7 7 0 1 1 1 8a7 7 0 0 1 14 0z"/></svg>Set Expiry</button></li>' +
|
||||
'<li><button class="dropdown-item" type="button" data-rotate-user="' + esc(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/><path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/></svg>Rotate Secret</button></li>' +
|
||||
'<li><hr class="dropdown-divider"></li>' +
|
||||
'<li><button class="dropdown-item text-danger" type="button" data-delete-user="' + esc(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16"><path d="M5.5 5.5a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm3 .5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 1 0z"/><path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/></svg>Delete User</button></li>' +
|
||||
'</ul></div></div>' +
|
||||
'<div class="mb-3">' +
|
||||
'<div class="small text-muted mb-2">Bucket Permissions</div>' +
|
||||
'<div class="d-flex flex-wrap gap-1" data-policy-badges>' + policyBadges + '</div></div>' +
|
||||
'<button class="btn btn-outline-primary btn-sm w-100" type="button" data-policy-editor data-access-key="' + esc(accessKey) + '">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16"><path d="M8 4.754a3.246 3.246 0 1 0 0 6.492 3.246 3.246 0 0 0 0-6.492zM5.754 8a2.246 2.246 0 1 1 4.492 0 2.246 2.246 0 0 1-4.492 0z"/><path d="M9.796 1.343c-.527-1.79-3.065-1.79-3.592 0l-.094.319a.873.873 0 0 1-1.255.52l-.292-.16c-1.64-.892-3.433.902-2.54 2.541l.159.292a.873.873 0 0 1-.52 1.255l-.319.094c-1.79.527-1.79 3.065 0 3.592l.319.094a.873.873 0 0 1 .52 1.255l-.16.292c-.892 1.64.901 3.434 2.541 2.54l.292-.159a.873.873 0 0 1 1.255.52l.094.319c.527 1.79 3.065 1.79 3.592 0l.094-.319a.873.873 0 0 1 1.255-.52l.292.16c1.64.893 3.434-.902 2.54-2.541l-.159-.292a.873.873 0 0 1 .52-1.255l.319-.094c1.79-.527 1.79-3.065 0-3.592l-.319-.094a.873.873 0 0 1-.52-1.255l.16-.292c.893-1.64-.902-3.433-2.541-2.54l-.292.159a.873.873 0 0 1-1.255-.52l-.094-.319z"/></svg>Manage Policies</button>' +
|
||||
'</div></div></div>';
|
||||
}
|
||||
|
||||
function attachUserCardHandlers(cardElement, accessKey, displayName) {
|
||||
var editBtn = cardElement.querySelector('[data-edit-user]');
|
||||
if (editBtn) {
|
||||
editBtn.addEventListener('click', function() {
|
||||
currentEditKey = accessKey;
|
||||
document.getElementById('editUserDisplayName').value = displayName;
|
||||
document.getElementById('editUserForm').action = endpoints.updateUser.replace('ACCESS_KEY', accessKey);
|
||||
editUserModal.show();
|
||||
});
|
||||
}
|
||||
|
||||
var deleteBtn = cardElement.querySelector('[data-delete-user]');
|
||||
if (deleteBtn) {
|
||||
deleteBtn.addEventListener('click', function() {
|
||||
currentDeleteKey = accessKey;
|
||||
document.getElementById('deleteUserLabel').textContent = accessKey;
|
||||
document.getElementById('deleteUserForm').action = endpoints.deleteUser.replace('ACCESS_KEY', accessKey);
|
||||
var deleteSelfWarning = document.getElementById('deleteSelfWarning');
|
||||
if (accessKey === currentUserKey) {
|
||||
deleteSelfWarning.classList.remove('d-none');
|
||||
} else {
|
||||
deleteSelfWarning.classList.add('d-none');
|
||||
}
|
||||
deleteUserModal.show();
|
||||
});
|
||||
}
|
||||
|
||||
var rotateBtn = cardElement.querySelector('[data-rotate-user]');
|
||||
if (rotateBtn) {
|
||||
rotateBtn.addEventListener('click', function() {
|
||||
currentRotateKey = accessKey;
|
||||
document.getElementById('rotateUserLabel').textContent = accessKey;
|
||||
document.getElementById('rotateSecretConfirm').classList.remove('d-none');
|
||||
document.getElementById('rotateSecretResult').classList.add('d-none');
|
||||
document.getElementById('confirmRotateBtn').classList.remove('d-none');
|
||||
document.getElementById('rotateCancelBtn').classList.remove('d-none');
|
||||
document.getElementById('rotateDoneBtn').classList.add('d-none');
|
||||
rotateSecretModal.show();
|
||||
});
|
||||
}
|
||||
|
||||
var expiryBtn = cardElement.querySelector('[data-expiry-user]');
|
||||
if (expiryBtn) {
|
||||
expiryBtn.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
openExpiryModal(accessKey, '');
|
||||
});
|
||||
}
|
||||
|
||||
var policyBtn = cardElement.querySelector('[data-policy-editor]');
|
||||
if (policyBtn) {
|
||||
policyBtn.addEventListener('click', function() {
|
||||
document.getElementById('policyEditorUserLabel').textContent = accessKey;
|
||||
document.getElementById('policyEditorUser').value = accessKey;
|
||||
document.getElementById('policyEditorDocument').value = getUserPolicies(accessKey);
|
||||
policyModal.show();
|
||||
});
|
||||
}
|
||||
|
||||
var copyBtn = cardElement.querySelector('[data-copy-access-key]');
|
||||
if (copyBtn) {
|
||||
copyBtn.addEventListener('click', function() {
|
||||
copyAccessKey(copyBtn);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function updateUserCount() {
|
||||
var countEl = document.querySelector('.card-header .text-muted.small');
|
||||
if (countEl) {
|
||||
var count = document.querySelectorAll('.iam-user-card').length;
|
||||
countEl.textContent = count + ' user' + (count !== 1 ? 's' : '') + ' configured';
|
||||
}
|
||||
}
|
||||
|
||||
function setupFormHandlers() {
|
||||
var createUserForm = document.querySelector('#createUserModal form');
|
||||
if (createUserForm) {
|
||||
createUserForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(createUserForm, {
|
||||
successMessage: 'User created',
|
||||
onSuccess: function(data) {
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('createUserModal'));
|
||||
if (modal) modal.hide();
|
||||
createUserForm.reset();
|
||||
|
||||
var existingAlert = document.querySelector('.alert.alert-info.border-0.shadow-sm');
|
||||
if (existingAlert) existingAlert.remove();
|
||||
|
||||
if (data.secret_key) {
|
||||
var alertHtml = '<div class="alert alert-info border-0 shadow-sm mb-4" role="alert" id="newUserSecretAlert">' +
|
||||
'<div class="d-flex align-items-start gap-2 mb-2">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="bi bi-key flex-shrink-0 mt-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M0 8a4 4 0 0 1 7.465-2H14a.5.5 0 0 1 .354.146l1.5 1.5a.5.5 0 0 1 0 .708l-1.5 1.5a.5.5 0 0 1-.708 0L13 9.207l-.646.647a.5.5 0 0 1-.708 0L11 9.207l-.646.647a.5.5 0 0 1-.708 0L9 9.207l-.646.647A.5.5 0 0 1 8 10h-.535A4 4 0 0 1 0 8zm4-3a3 3 0 1 0 2.712 4.285A.5.5 0 0 1 7.163 9h.63l.853-.854a.5.5 0 0 1 .708 0l.646.647.646-.647a.5.5 0 0 1 .708 0l.646.647.646-.647a.5.5 0 0 1 .708 0l.646.647.793-.793-1-1h-6.63a.5.5 0 0 1-.451-.285A3 3 0 0 0 4 5z"/><path d="M4 8a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>' +
|
||||
'</svg>' +
|
||||
'<div class="flex-grow-1">' +
|
||||
'<div class="fw-semibold">New user created: <code>' + window.UICore.escapeHtml(data.access_key) + '</code></div>' +
|
||||
'<p class="mb-2 small">These credentials are only shown once. Copy them now and store them securely.</p>' +
|
||||
'</div>' +
|
||||
'<button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button>' +
|
||||
'</div>' +
|
||||
'<div class="input-group mb-2">' +
|
||||
'<span class="input-group-text"><strong>Access key</strong></span>' +
|
||||
'<input class="form-control font-monospace" type="text" value="' + window.UICore.escapeHtml(data.access_key) + '" readonly />' +
|
||||
'<button class="btn btn-outline-primary" type="button" id="copyNewUserAccessKey">Copy</button>' +
|
||||
'</div>' +
|
||||
'<div class="input-group">' +
|
||||
'<span class="input-group-text"><strong>Secret key</strong></span>' +
|
||||
'<input class="form-control font-monospace" type="text" value="' + window.UICore.escapeHtml(data.secret_key) + '" readonly id="newUserSecret" />' +
|
||||
'<button class="btn btn-outline-primary" type="button" id="copyNewUserSecret">Copy</button>' +
|
||||
'</div></div>';
|
||||
var container = document.querySelector('.page-header');
|
||||
if (container) {
|
||||
container.insertAdjacentHTML('afterend', alertHtml);
|
||||
document.getElementById('copyNewUserAccessKey').addEventListener('click', async function() {
|
||||
await window.UICore.copyToClipboard(data.access_key, this, 'Copy');
|
||||
});
|
||||
document.getElementById('copyNewUserSecret').addEventListener('click', async function() {
|
||||
await window.UICore.copyToClipboard(data.secret_key, this, 'Copy');
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
var usersGrid = document.querySelector('.row.g-3');
|
||||
var emptyState = document.querySelector('.empty-state');
|
||||
if (emptyState) {
|
||||
var emptyCol = emptyState.closest('.col-12');
|
||||
if (emptyCol) emptyCol.remove();
|
||||
if (!usersGrid) {
|
||||
var cardBody = document.querySelector('.card-body.px-4.pb-4');
|
||||
if (cardBody) {
|
||||
cardBody.innerHTML = '<div class="row g-3"></div>';
|
||||
usersGrid = cardBody.querySelector('.row.g-3');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (usersGrid) {
|
||||
var cardHtml = createUserCardHtml(data.access_key, data.display_name, data.policies);
|
||||
usersGrid.insertAdjacentHTML('beforeend', cardHtml);
|
||||
var newCard = usersGrid.lastElementChild;
|
||||
attachUserCardHandlers(newCard, data.access_key, data.display_name);
|
||||
users.push({
|
||||
access_key: data.access_key,
|
||||
display_name: data.display_name,
|
||||
policies: data.policies || []
|
||||
});
|
||||
updateUserCount();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var policyEditorForm = document.getElementById('policyEditorForm');
|
||||
if (policyEditorForm) {
|
||||
policyEditorForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
var userInputEl = document.getElementById('policyEditorUser');
|
||||
var key = userInputEl.value;
|
||||
if (!key) return;
|
||||
|
||||
var template = policyEditorForm.dataset.actionTemplate;
|
||||
policyEditorForm.action = template.replace('ACCESS_KEY_PLACEHOLDER', key);
|
||||
|
||||
window.UICore.submitFormAjax(policyEditorForm, {
|
||||
successMessage: 'Policies updated',
|
||||
onSuccess: function(data) {
|
||||
policyModal.hide();
|
||||
|
||||
var userCard = document.querySelector('[data-access-key="' + key + '"]');
|
||||
if (userCard) {
|
||||
var cardEl = userCard.closest('.iam-user-card');
|
||||
var badgeContainer = cardEl ? cardEl.querySelector('[data-policy-badges]') : null;
|
||||
if (badgeContainer && data.policies) {
|
||||
var badges = data.policies.map(function(p) {
|
||||
var bl = getBucketLabel(p.bucket);
|
||||
var pl = getPermissionLevel(p.actions);
|
||||
return '<span class="iam-perm-badge">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="10" height="10" fill="currentColor" class="me-1" viewBox="0 0 16 16">' +
|
||||
'<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>' +
|
||||
'</svg>' + window.UICore.escapeHtml(bl) + ' · ' + window.UICore.escapeHtml(pl) + '</span>';
|
||||
}).join('');
|
||||
badgeContainer.innerHTML = badges || '<span class="badge bg-secondary bg-opacity-10 text-secondary">No policies</span>';
|
||||
}
|
||||
if (cardEl) {
|
||||
var nowAdmin = isAdminUser(data.policies);
|
||||
cardEl.classList.toggle('iam-admin-card', nowAdmin);
|
||||
var roleBadgeEl = cardEl.querySelector('[data-role-badge]');
|
||||
if (roleBadgeEl) {
|
||||
if (nowAdmin) {
|
||||
roleBadgeEl.className = 'iam-role-badge iam-role-admin';
|
||||
roleBadgeEl.textContent = 'Admin';
|
||||
} else {
|
||||
roleBadgeEl.className = 'iam-role-badge iam-role-user';
|
||||
roleBadgeEl.textContent = 'User';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var userIndex = users.findIndex(function(u) { return u.access_key === key; });
|
||||
if (userIndex >= 0 && data.policies) {
|
||||
users[userIndex].policies = data.policies;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var editUserForm = document.getElementById('editUserForm');
|
||||
if (editUserForm) {
|
||||
editUserForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
var key = currentEditKey;
|
||||
window.UICore.submitFormAjax(editUserForm, {
|
||||
successMessage: 'User updated',
|
||||
onSuccess: function(data) {
|
||||
editUserModal.hide();
|
||||
|
||||
var newName = data.display_name || document.getElementById('editUserDisplayName').value;
|
||||
var editBtn = document.querySelector('[data-edit-user="' + key + '"]');
|
||||
if (editBtn) {
|
||||
editBtn.setAttribute('data-display-name', newName);
|
||||
var card = editBtn.closest('.iam-user-card');
|
||||
if (card) {
|
||||
var nameEl = card.querySelector('h6');
|
||||
if (nameEl) {
|
||||
nameEl.textContent = newName;
|
||||
nameEl.title = newName;
|
||||
}
|
||||
var itemWrapper = card.closest('.iam-user-item');
|
||||
if (itemWrapper) {
|
||||
itemWrapper.setAttribute('data-display-name', newName.toLowerCase());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var userIndex = users.findIndex(function(u) { return u.access_key === key; });
|
||||
if (userIndex >= 0) {
|
||||
users[userIndex].display_name = newName;
|
||||
}
|
||||
|
||||
if (key === currentUserKey) {
|
||||
document.querySelectorAll('.sidebar-user .user-name').forEach(function(el) {
|
||||
var truncated = newName.length > 16 ? newName.substring(0, 16) + '...' : newName;
|
||||
el.textContent = truncated;
|
||||
el.title = newName;
|
||||
});
|
||||
document.querySelectorAll('.sidebar-user[data-username]').forEach(function(el) {
|
||||
el.setAttribute('data-username', newName);
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var deleteUserForm = document.getElementById('deleteUserForm');
|
||||
if (deleteUserForm) {
|
||||
deleteUserForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
var key = currentDeleteKey;
|
||||
window.UICore.submitFormAjax(deleteUserForm, {
|
||||
successMessage: 'User deleted',
|
||||
onSuccess: function(data) {
|
||||
deleteUserModal.hide();
|
||||
|
||||
if (key === currentUserKey) {
|
||||
window.location.href = '/ui/';
|
||||
return;
|
||||
}
|
||||
|
||||
var deleteBtn = document.querySelector('[data-delete-user="' + key + '"]');
|
||||
if (deleteBtn) {
|
||||
var cardCol = deleteBtn.closest('[class*="col-"]');
|
||||
if (cardCol) {
|
||||
cardCol.remove();
|
||||
}
|
||||
}
|
||||
|
||||
users = users.filter(function(u) { return u.access_key !== key; });
|
||||
updateUserCount();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function setupSearch() {
|
||||
var searchInput = document.getElementById('iam-user-search');
|
||||
if (!searchInput) return;
|
||||
|
||||
searchInput.addEventListener('input', function() {
|
||||
var query = searchInput.value.toLowerCase().trim();
|
||||
var items = document.querySelectorAll('.iam-user-item');
|
||||
var noResults = document.getElementById('iam-no-results');
|
||||
var visibleCount = 0;
|
||||
|
||||
items.forEach(function(item) {
|
||||
var name = item.getAttribute('data-display-name') || '';
|
||||
var key = item.getAttribute('data-access-key-filter') || '';
|
||||
var matches = !query || name.indexOf(query) >= 0 || key.indexOf(query) >= 0;
|
||||
item.classList.toggle('d-none', !matches);
|
||||
if (matches) visibleCount++;
|
||||
});
|
||||
|
||||
if (noResults) {
|
||||
noResults.classList.toggle('d-none', visibleCount > 0);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function copyAccessKey(btn) {
|
||||
var key = btn.getAttribute('data-copy-access-key');
|
||||
if (!key) return;
|
||||
var originalHtml = btn.innerHTML;
|
||||
navigator.clipboard.writeText(key).then(function() {
|
||||
btn.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16"><path d="M13.854 3.646a.5.5 0 0 1 0 .708l-7 7a.5.5 0 0 1-.708 0l-3.5-3.5a.5.5 0 1 1 .708-.708L6.5 10.293l6.646-6.647a.5.5 0 0 1 .708 0z"/></svg>';
|
||||
btn.style.color = '#22c55e';
|
||||
setTimeout(function() {
|
||||
btn.innerHTML = originalHtml;
|
||||
btn.style.color = '';
|
||||
}, 1200);
|
||||
}).catch(function() {});
|
||||
}
|
||||
|
||||
function setupCopyAccessKeyButtons() {
|
||||
document.querySelectorAll('[data-copy-access-key]').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
copyAccessKey(btn);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
init: init
|
||||
};
|
||||
})();
|
||||
328
python/static/js/ui-core.js
Normal file
328
python/static/js/ui-core.js
Normal file
@@ -0,0 +1,328 @@
|
||||
window.UICore = (function() {
|
||||
'use strict';
|
||||
|
||||
function getCsrfToken() {
|
||||
const meta = document.querySelector('meta[name="csrf-token"]');
|
||||
return meta ? meta.getAttribute('content') : '';
|
||||
}
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (!Number.isFinite(bytes)) return bytes + ' bytes';
|
||||
const units = ['bytes', 'KB', 'MB', 'GB', 'TB'];
|
||||
let i = 0;
|
||||
let size = bytes;
|
||||
while (size >= 1024 && i < units.length - 1) {
|
||||
size /= 1024;
|
||||
i++;
|
||||
}
|
||||
return size.toFixed(i === 0 ? 0 : 1) + ' ' + units[i];
|
||||
}
|
||||
|
||||
function escapeHtml(value) {
|
||||
if (value === null || value === undefined) return '';
|
||||
return String(value)
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''');
|
||||
}
|
||||
|
||||
async function submitFormAjax(form, options) {
|
||||
options = options || {};
|
||||
var onSuccess = options.onSuccess || function() {};
|
||||
var onError = options.onError || function() {};
|
||||
var successMessage = options.successMessage || 'Operation completed';
|
||||
|
||||
var formData = new FormData(form);
|
||||
var csrfToken = getCsrfToken();
|
||||
var submitBtn = form.querySelector('[type="submit"]');
|
||||
var originalHtml = submitBtn ? submitBtn.innerHTML : '';
|
||||
|
||||
try {
|
||||
if (submitBtn) {
|
||||
submitBtn.disabled = true;
|
||||
submitBtn.innerHTML = '<span class="spinner-border spinner-border-sm me-1"></span>Saving...';
|
||||
}
|
||||
|
||||
var formAction = form.getAttribute('action') || form.action;
|
||||
var response = await fetch(formAction, {
|
||||
method: form.getAttribute('method') || 'POST',
|
||||
headers: {
|
||||
'X-CSRFToken': csrfToken,
|
||||
'Accept': 'application/json',
|
||||
'X-Requested-With': 'XMLHttpRequest'
|
||||
},
|
||||
body: formData,
|
||||
redirect: 'follow'
|
||||
});
|
||||
|
||||
var contentType = response.headers.get('content-type') || '';
|
||||
if (!contentType.includes('application/json')) {
|
||||
throw new Error('Server returned an unexpected response. Please try again.');
|
||||
}
|
||||
|
||||
var data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || 'HTTP ' + response.status);
|
||||
}
|
||||
|
||||
window.showToast(data.message || successMessage, 'Success', 'success');
|
||||
onSuccess(data);
|
||||
|
||||
} catch (err) {
|
||||
window.showToast(err.message, 'Error', 'error');
|
||||
onError(err);
|
||||
} finally {
|
||||
if (submitBtn) {
|
||||
submitBtn.disabled = false;
|
||||
submitBtn.innerHTML = originalHtml;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function PollingManager() {
|
||||
this.intervals = {};
|
||||
this.callbacks = {};
|
||||
this.timers = {};
|
||||
this.defaults = {
|
||||
replication: 30000,
|
||||
lifecycle: 60000,
|
||||
connectionHealth: 60000,
|
||||
bucketStats: 120000
|
||||
};
|
||||
this._loadSettings();
|
||||
}
|
||||
|
||||
PollingManager.prototype._loadSettings = function() {
|
||||
try {
|
||||
var stored = localStorage.getItem('myfsio-polling-intervals');
|
||||
if (stored) {
|
||||
var settings = JSON.parse(stored);
|
||||
for (var key in settings) {
|
||||
if (settings.hasOwnProperty(key)) {
|
||||
this.defaults[key] = settings[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.warn('Failed to load polling settings:', e);
|
||||
}
|
||||
};
|
||||
|
||||
PollingManager.prototype.saveSettings = function(settings) {
|
||||
try {
|
||||
for (var key in settings) {
|
||||
if (settings.hasOwnProperty(key)) {
|
||||
this.defaults[key] = settings[key];
|
||||
}
|
||||
}
|
||||
localStorage.setItem('myfsio-polling-intervals', JSON.stringify(this.defaults));
|
||||
} catch (e) {
|
||||
console.warn('Failed to save polling settings:', e);
|
||||
}
|
||||
};
|
||||
|
||||
PollingManager.prototype.start = function(key, callback, interval) {
|
||||
this.stop(key);
|
||||
var ms = interval !== undefined ? interval : (this.defaults[key] || 30000);
|
||||
if (ms <= 0) return;
|
||||
|
||||
this.callbacks[key] = callback;
|
||||
this.intervals[key] = ms;
|
||||
|
||||
callback();
|
||||
|
||||
var self = this;
|
||||
this.timers[key] = setInterval(function() {
|
||||
if (!document.hidden) {
|
||||
callback();
|
||||
}
|
||||
}, ms);
|
||||
};
|
||||
|
||||
PollingManager.prototype.stop = function(key) {
|
||||
if (this.timers[key]) {
|
||||
clearInterval(this.timers[key]);
|
||||
delete this.timers[key];
|
||||
}
|
||||
};
|
||||
|
||||
PollingManager.prototype.stopAll = function() {
|
||||
for (var key in this.timers) {
|
||||
if (this.timers.hasOwnProperty(key)) {
|
||||
clearInterval(this.timers[key]);
|
||||
}
|
||||
}
|
||||
this.timers = {};
|
||||
};
|
||||
|
||||
PollingManager.prototype.updateInterval = function(key, newInterval) {
|
||||
var callback = this.callbacks[key];
|
||||
this.defaults[key] = newInterval;
|
||||
this.saveSettings(this.defaults);
|
||||
if (callback) {
|
||||
this.start(key, callback, newInterval);
|
||||
}
|
||||
};
|
||||
|
||||
PollingManager.prototype.getSettings = function() {
|
||||
var result = {};
|
||||
for (var key in this.defaults) {
|
||||
if (this.defaults.hasOwnProperty(key)) {
|
||||
result[key] = this.defaults[key];
|
||||
}
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
var pollingManager = new PollingManager();
|
||||
|
||||
document.addEventListener('visibilitychange', function() {
|
||||
if (document.hidden) {
|
||||
pollingManager.stopAll();
|
||||
} else {
|
||||
for (var key in pollingManager.callbacks) {
|
||||
if (pollingManager.callbacks.hasOwnProperty(key)) {
|
||||
pollingManager.start(key, pollingManager.callbacks[key], pollingManager.intervals[key]);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
window.addEventListener('beforeunload', function() {
|
||||
pollingManager.stopAll();
|
||||
});
|
||||
|
||||
return {
|
||||
getCsrfToken: getCsrfToken,
|
||||
formatBytes: formatBytes,
|
||||
escapeHtml: escapeHtml,
|
||||
submitFormAjax: submitFormAjax,
|
||||
PollingManager: PollingManager,
|
||||
pollingManager: pollingManager
|
||||
};
|
||||
})();
|
||||
|
||||
window.pollingManager = window.UICore.pollingManager;
|
||||
|
||||
window.UICore.copyToClipboard = async function(text, button, originalText) {
|
||||
try {
|
||||
await navigator.clipboard.writeText(text);
|
||||
if (button) {
|
||||
var prevText = button.textContent;
|
||||
button.textContent = 'Copied!';
|
||||
setTimeout(function() {
|
||||
button.textContent = originalText || prevText;
|
||||
}, 1500);
|
||||
}
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.error('Copy failed:', err);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
window.UICore.setButtonLoading = function(button, isLoading, loadingText) {
|
||||
if (!button) return;
|
||||
if (isLoading) {
|
||||
button._originalHtml = button.innerHTML;
|
||||
button._originalDisabled = button.disabled;
|
||||
button.disabled = true;
|
||||
button.innerHTML = '<span class="spinner-border spinner-border-sm me-1"></span>' + (loadingText || 'Loading...');
|
||||
} else {
|
||||
button.disabled = button._originalDisabled || false;
|
||||
button.innerHTML = button._originalHtml || button.innerHTML;
|
||||
}
|
||||
};
|
||||
|
||||
window.UICore.updateBadgeCount = function(selector, count, singular, plural) {
|
||||
var badge = document.querySelector(selector);
|
||||
if (badge) {
|
||||
var label = count === 1 ? (singular || '') : (plural || 's');
|
||||
badge.textContent = count + ' ' + label;
|
||||
}
|
||||
};
|
||||
|
||||
window.UICore.setupJsonAutoIndent = function(textarea) {
|
||||
if (!textarea) return;
|
||||
|
||||
textarea.addEventListener('keydown', function(e) {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault();
|
||||
|
||||
var start = this.selectionStart;
|
||||
var end = this.selectionEnd;
|
||||
var value = this.value;
|
||||
|
||||
var lineStart = value.lastIndexOf('\n', start - 1) + 1;
|
||||
var currentLine = value.substring(lineStart, start);
|
||||
|
||||
var indentMatch = currentLine.match(/^(\s*)/);
|
||||
var indent = indentMatch ? indentMatch[1] : '';
|
||||
|
||||
var trimmedLine = currentLine.trim();
|
||||
var lastChar = trimmedLine.slice(-1);
|
||||
|
||||
var newIndent = indent;
|
||||
var insertAfter = '';
|
||||
|
||||
if (lastChar === '{' || lastChar === '[') {
|
||||
newIndent = indent + ' ';
|
||||
|
||||
var charAfterCursor = value.substring(start, start + 1).trim();
|
||||
if ((lastChar === '{' && charAfterCursor === '}') ||
|
||||
(lastChar === '[' && charAfterCursor === ']')) {
|
||||
insertAfter = '\n' + indent;
|
||||
}
|
||||
} else if (lastChar === ',' || lastChar === ':') {
|
||||
newIndent = indent;
|
||||
}
|
||||
|
||||
var insertion = '\n' + newIndent + insertAfter;
|
||||
var newValue = value.substring(0, start) + insertion + value.substring(end);
|
||||
|
||||
this.value = newValue;
|
||||
|
||||
var newCursorPos = start + 1 + newIndent.length;
|
||||
this.selectionStart = this.selectionEnd = newCursorPos;
|
||||
|
||||
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
}
|
||||
|
||||
if (e.key === 'Tab') {
|
||||
e.preventDefault();
|
||||
var start = this.selectionStart;
|
||||
var end = this.selectionEnd;
|
||||
|
||||
if (e.shiftKey) {
|
||||
var lineStart = this.value.lastIndexOf('\n', start - 1) + 1;
|
||||
var lineContent = this.value.substring(lineStart, start);
|
||||
if (lineContent.startsWith(' ')) {
|
||||
this.value = this.value.substring(0, lineStart) +
|
||||
this.value.substring(lineStart + 2);
|
||||
this.selectionStart = this.selectionEnd = Math.max(lineStart, start - 2);
|
||||
}
|
||||
} else {
|
||||
this.value = this.value.substring(0, start) + ' ' + this.value.substring(end);
|
||||
this.selectionStart = this.selectionEnd = start + 2;
|
||||
}
|
||||
|
||||
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
var flashMessage = sessionStorage.getItem('flashMessage');
|
||||
if (flashMessage) {
|
||||
sessionStorage.removeItem('flashMessage');
|
||||
try {
|
||||
var msg = JSON.parse(flashMessage);
|
||||
if (window.showToast) {
|
||||
window.showToast(msg.body || msg.title, msg.title, msg.variant || 'info');
|
||||
}
|
||||
} catch (e) {}
|
||||
}
|
||||
});
|
||||
446
python/templates/base.html
Normal file
446
python/templates/base.html
Normal file
@@ -0,0 +1,446 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
{% if principal %}<meta name="csrf-token" content="{{ csrf_token() }}" />{% endif %}
|
||||
<title>MyFSIO Console</title>
|
||||
<link rel="icon" type="image/png" href="{{ url_for('static', filename='images/MyFSIO.png') }}" />
|
||||
<link rel="icon" type="image/x-icon" href="{{ url_for('static', filename='images/MyFSIO.ico') }}" />
|
||||
<link
|
||||
href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.2/dist/css/bootstrap.min.css"
|
||||
rel="stylesheet"
|
||||
integrity="sha384-T3c6CoIi6uLrA9TneNEoa7RxnatzjcDSCmG1MXxSR1GAsXEV/Dwwykc2MPK8M2HN"
|
||||
crossorigin="anonymous"
|
||||
/>
|
||||
<script>
|
||||
(function () {
|
||||
try {
|
||||
const stored = localStorage.getItem('myfsio-theme');
|
||||
const theme = stored === 'dark' ? 'dark' : 'light';
|
||||
document.documentElement.dataset.bsTheme = theme;
|
||||
document.documentElement.dataset.theme = theme;
|
||||
} catch (err) {
|
||||
document.documentElement.dataset.bsTheme = 'light';
|
||||
document.documentElement.dataset.theme = 'light';
|
||||
}
|
||||
try {
|
||||
if (localStorage.getItem('myfsio-sidebar-collapsed') === 'true') {
|
||||
document.documentElement.classList.add('sidebar-will-collapse');
|
||||
}
|
||||
} catch (err) {}
|
||||
})();
|
||||
</script>
|
||||
<link rel="stylesheet" href="{{ url_for('static', filename='css/main.css') }}" />
|
||||
</head>
|
||||
<body>
|
||||
<header class="mobile-header d-lg-none">
|
||||
<button class="sidebar-toggle-btn" type="button" data-bs-toggle="offcanvas" data-bs-target="#mobileSidebar" aria-controls="mobileSidebar" aria-label="Toggle navigation">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M2.5 12a.5.5 0 0 1 .5-.5h10a.5.5 0 0 1 0 1H3a.5.5 0 0 1-.5-.5zm0-4a.5.5 0 0 1 .5-.5h10a.5.5 0 0 1 0 1H3a.5.5 0 0 1-.5-.5zm0-4a.5.5 0 0 1 .5-.5h10a.5.5 0 0 1 0 1H3a.5.5 0 0 1-.5-.5z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<a class="mobile-brand" href="{{ url_for('ui.buckets_overview') }}">
|
||||
<img src="{{ url_for('static', filename='images/MyFSIO.png') }}" alt="MyFSIO logo" width="28" height="28" />
|
||||
<span>MyFSIO</span>
|
||||
</a>
|
||||
<button class="theme-toggle-mobile" type="button" id="themeToggleMobile" aria-label="Toggle dark mode">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon-mobile" id="themeToggleSunMobile" viewBox="0 0 16 16">
|
||||
<path d="M8 11.5a3.5 3.5 0 1 1 0-7 3.5 3.5 0 0 1 0 7zm0 1.5a5 5 0 1 0 0-10 5 5 0 0 0 0 10zM8 0a.5.5 0 0 1 .5.5v1.555a.5.5 0 0 1-1 0V.5A.5.5 0 0 1 8 0zm0 12.945a.5.5 0 0 1 .5.5v2.055a.5.5 0 0 1-1 0v-2.055a.5.5 0 0 1 .5-.5zM2.343 2.343a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.708.707l-1.1-1.1a.5.5 0 0 1 0-.707zm9.507 9.507a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.707.708l-1.1-1.1a.5.5 0 0 1 0-.708zM0 8a.5.5 0 0 1 .5-.5h1.555a.5.5 0 0 1 0 1H.5A.5.5 0 0 1 0 8zm12.945 0a.5.5 0 0 1 .5-.5H15.5a.5.5 0 0 1 0 1h-2.055a.5.5 0 0 1-.5-.5zM2.343 13.657a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 1 1 .708.707l-1.1 1.1a.5.5 0 0 1-.708 0zm9.507-9.507a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 0 1 .707.708l-1.1 1.1a.5.5 0 0 1-.707 0z"/>
|
||||
</svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon-mobile" id="themeToggleMoonMobile" viewBox="0 0 16 16">
|
||||
<path d="M6 .278a.768.768 0 0 1 .08.858 7.208 7.208 0 0 0-.878 3.46c0 4.021 3.278 7.277 7.318 7.277.527 0 1.04-.055 1.533-.16a.787.787 0 0 1 .81.316.733.733 0 0 1-.031.893A8.349 8.349 0 0 1 8.344 16C3.734 16 0 12.286 0 7.71 0 4.266 2.114 1.312 5.124.06A.752.752 0 0 1 6 .278z"/>
|
||||
<path d="M10.794 3.148a.217.217 0 0 1 .412 0l.387 1.162c.173.518.579.924 1.097 1.097l1.162.387a.217.217 0 0 1 0 .412l-1.162.387a1.734 1.734 0 0 0-1.097 1.097l-.387 1.162a.217.217 0 0 1-.412 0l-.387-1.162A1.734 1.734 0 0 0 9.31 6.593l-1.162-.387a.217.217 0 0 1 0-.412l1.162-.387a1.734 1.734 0 0 0 1.097-1.097l.387-1.162zM13.863.099a.145.145 0 0 1 .274 0l.258.774c.115.346.386.617.732.732l.774.258a.145.145 0 0 1 0 .274l-.774.258a1.156 1.156 0 0 0-.732.732l-.258.774a.145.145 0 0 1-.274 0l-.258-.774a1.156 1.156 0 0 0-.732-.732l-.774-.258a.145.145 0 0 1 0-.274l.774-.258c.346-.115.617-.386.732-.732L13.863.1z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</header>
|
||||
|
||||
<div class="offcanvas offcanvas-start sidebar-offcanvas" tabindex="-1" id="mobileSidebar" aria-labelledby="mobileSidebarLabel">
|
||||
<div class="offcanvas-header sidebar-header">
|
||||
<a class="sidebar-brand" href="{{ url_for('ui.buckets_overview') }}">
|
||||
<img src="{{ url_for('static', filename='images/MyFSIO.png') }}" alt="MyFSIO logo" class="sidebar-logo" width="36" height="36" />
|
||||
<span class="sidebar-title">MyFSIO</span>
|
||||
</a>
|
||||
<button type="button" class="btn-close btn-close-white" data-bs-dismiss="offcanvas" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="offcanvas-body sidebar-body">
|
||||
<nav class="sidebar-nav">
|
||||
{% if principal %}
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Navigation</span>
|
||||
<a href="{{ url_for('ui.buckets_overview') }}" class="sidebar-link {% if request.endpoint == 'ui.buckets_overview' or request.endpoint == 'ui.bucket_detail' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
<span>Buckets</span>
|
||||
</a>
|
||||
{% if can_manage_iam %}
|
||||
<a href="{{ url_for('ui.iam_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.iam_dashboard' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M15 14s1 0 1-1-1-4-5-4-5 3-5 4 1 1 1 1h8zm-7.978-1A.261.261 0 0 1 7 12.996c.001-.264.167-1.03.76-1.72C8.312 10.629 9.282 10 11 10c1.717 0 2.687.63 3.24 1.276.593.69.758 1.457.76 1.72l-.008.002a.274.274 0 0 1-.014.002H7.022zM11 7a2 2 0 1 0 0-4 2 2 0 0 0 0 4zm3-2a3 3 0 1 1-6 0 3 3 0 0 1 6 0zM6.936 9.28a5.88 5.88 0 0 0-1.23-.247A7.35 7.35 0 0 0 5 9c-4 0-5 3-5 4 0 .667.333 1 1 1h4.216A2.238 2.238 0 0 1 5 13c0-1.01.377-2.042 1.09-2.904.243-.294.526-.569.846-.816zM4.92 10A5.493 5.493 0 0 0 4 13H1c0-.26.164-1.03.76-1.724.545-.636 1.492-1.256 3.16-1.275zM1.5 5.5a3 3 0 1 1 6 0 3 3 0 0 1-6 0zm3-2a2 2 0 1 0 0 4 2 2 0 0 0 0-4z"/>
|
||||
</svg>
|
||||
<span>IAM</span>
|
||||
</a>
|
||||
<a href="{{ url_for('ui.connections_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.connections_dashboard' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M6 3.5A1.5 1.5 0 0 1 7.5 2h1A1.5 1.5 0 0 1 10 3.5v1A1.5 1.5 0 0 1 8.5 6v1H14a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0v-1A.5.5 0 0 1 2 7h5.5V6A1.5 1.5 0 0 1 6 4.5v-1zM8.5 5a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1zM0 11.5A1.5 1.5 0 0 1 1.5 10h1A1.5 1.5 0 0 1 4 11.5v1A1.5 1.5 0 0 1 2.5 14h-1A1.5 1.5 0 0 1 0 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5A1.5 1.5 0 0 1 7.5 10h1a1.5 1.5 0 0 1 1.5 1.5v1A1.5 1.5 0 0 1 8.5 14h-1A1.5 1.5 0 0 1 6 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5a1.5 1.5 0 0 1 1.5-1.5h1a1.5 1.5 0 0 1 1.5 1.5v1a1.5 1.5 0 0 1-1.5 1.5h-1a1.5 1.5 0 0 1-1.5-1.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1z"/>
|
||||
</svg>
|
||||
<span>Connections</span>
|
||||
</a>
|
||||
<a href="{{ url_for('ui.metrics_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.metrics_dashboard' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
|
||||
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
|
||||
</svg>
|
||||
<span>Metrics</span>
|
||||
</a>
|
||||
<a href="{{ url_for('ui.sites_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.sites_dashboard' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||
</svg>
|
||||
<span>Sites</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if website_hosting_nav %}
|
||||
<a href="{{ url_for('ui.website_domains_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.website_domains_dashboard' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.715 6.542 3.343 7.914a3 3 0 1 0 4.243 4.243l1.828-1.829A3 3 0 0 0 8.586 5.5L8 6.086a1.002 1.002 0 0 0-.154.199 2 2 0 0 1 .861 3.337L6.88 11.45a2 2 0 1 1-2.83-2.83l.793-.792a4.018 4.018 0 0 1-.128-1.287z"/>
|
||||
<path d="M6.586 4.672A3 3 0 0 0 7.414 9.5l.775-.776a2 2 0 0 1-.896-3.346L9.12 3.55a2 2 0 1 1 2.83 2.83l-.793.792c.112.42.155.855.128 1.287l1.372-1.372a3 3 0 1 0-4.243-4.243L6.586 4.672z"/>
|
||||
</svg>
|
||||
<span>Domains</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if can_manage_iam %}
|
||||
<a href="{{ url_for('ui.system_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.system_dashboard' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
<span>System</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Resources</span>
|
||||
<a href="{{ url_for('ui.docs_page') }}" class="sidebar-link {% if request.endpoint == 'ui.docs_page' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M1 2.828c.885-.37 2.154-.769 3.388-.893 1.33-.134 2.458.063 3.112.752v9.746c-.935-.53-2.12-.603-3.213-.493-1.18.12-2.37.461-3.287.811V2.828zm7.5-.141c.654-.689 1.782-.886 3.112-.752 1.234.124 2.503.523 3.388.893v9.923c-.918-.35-2.107-.692-3.287-.81-1.094-.111-2.278-.039-3.213.492V2.687zM8 1.783C7.015.936 5.587.81 4.287.94c-1.514.153-3.042.672-3.994 1.105A.5.5 0 0 0 0 2.5v11a.5.5 0 0 0 .707.455c.882-.4 2.303-.881 3.68-1.02 1.409-.142 2.59.087 3.223.877a.5.5 0 0 0 .78 0c.633-.79 1.814-1.019 3.222-.877 1.378.139 2.8.62 3.681 1.02A.5.5 0 0 0 16 13.5v-11a.5.5 0 0 0-.293-.455c-.952-.433-2.48-.952-3.994-1.105C10.413.809 8.985.936 8 1.783z"/>
|
||||
</svg>
|
||||
<span>Documentation</span>
|
||||
</a>
|
||||
</div>
|
||||
{% endif %}
|
||||
</nav>
|
||||
{% if principal %}
|
||||
<div class="sidebar-footer">
|
||||
<div class="sidebar-user">
|
||||
<div class="user-avatar">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M11 6a3 3 0 1 1-6 0 3 3 0 0 1 6 0z"/>
|
||||
<path fill-rule="evenodd" d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm8-7a7 7 0 0 0-5.468 11.37C3.242 11.226 4.805 10 8 10s4.757 1.225 5.468 2.37A7 7 0 0 0 8 1z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div class="user-info">
|
||||
<div class="user-name" title="{{ principal.display_name }}">{{ principal.display_name | truncate(16, true) }}</div>
|
||||
<div class="user-key">{{ principal.access_key | truncate(12, true) }}</div>
|
||||
</div>
|
||||
</div>
|
||||
<form method="post" action="{{ url_for('ui.logout') }}" class="w-100">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<button class="sidebar-logout-btn" type="submit">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M10 12.5a.5.5 0 0 1-.5.5h-8a.5.5 0 0 1-.5-.5v-9a.5.5 0 0 1 .5-.5h8a.5.5 0 0 1 .5.5v2a.5.5 0 0 0 1 0v-2A1.5 1.5 0 0 0 9.5 2h-8A1.5 1.5 0 0 0 0 3.5v9A1.5 1.5 0 0 0 1.5 14h8a1.5 1.5 0 0 0 1.5-1.5v-2a.5.5 0 0 0-1 0v2z"/>
|
||||
<path fill-rule="evenodd" d="M15.854 8.354a.5.5 0 0 0 0-.708l-3-3a.5.5 0 0 0-.708.708L14.293 7.5H5.5a.5.5 0 0 0 0 1h8.793l-2.147 2.146a.5.5 0 0 0 .708.708l3-3z"/>
|
||||
</svg>
|
||||
<span>Sign out</span>
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<aside class="sidebar d-none d-lg-flex" id="desktopSidebar">
|
||||
<div class="sidebar-header">
|
||||
<div class="sidebar-brand" id="sidebarBrand">
|
||||
<img src="{{ url_for('static', filename='images/MyFSIO.png') }}" alt="MyFSIO logo" class="sidebar-logo" width="36" height="36" />
|
||||
<span class="sidebar-title">MyFSIO</span>
|
||||
</div>
|
||||
<button class="sidebar-collapse-btn" type="button" id="sidebarCollapseBtn" aria-label="Collapse sidebar">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
<div class="sidebar-body">
|
||||
<nav class="sidebar-nav">
|
||||
{% if principal %}
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Navigation</span>
|
||||
<a href="{{ url_for('ui.buckets_overview') }}" class="sidebar-link {% if request.endpoint == 'ui.buckets_overview' or request.endpoint == 'ui.bucket_detail' %}active{% endif %}" data-tooltip="Buckets">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Buckets</span>
|
||||
</a>
|
||||
{% if can_manage_iam %}
|
||||
<a href="{{ url_for('ui.iam_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.iam_dashboard' %}active{% endif %}" data-tooltip="IAM">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M15 14s1 0 1-1-1-4-5-4-5 3-5 4 1 1 1 1h8zm-7.978-1A.261.261 0 0 1 7 12.996c.001-.264.167-1.03.76-1.72C8.312 10.629 9.282 10 11 10c1.717 0 2.687.63 3.24 1.276.593.69.758 1.457.76 1.72l-.008.002a.274.274 0 0 1-.014.002H7.022zM11 7a2 2 0 1 0 0-4 2 2 0 0 0 0 4zm3-2a3 3 0 1 1-6 0 3 3 0 0 1 6 0zM6.936 9.28a5.88 5.88 0 0 0-1.23-.247A7.35 7.35 0 0 0 5 9c-4 0-5 3-5 4 0 .667.333 1 1 1h4.216A2.238 2.238 0 0 1 5 13c0-1.01.377-2.042 1.09-2.904.243-.294.526-.569.846-.816zM4.92 10A5.493 5.493 0 0 0 4 13H1c0-.26.164-1.03.76-1.724.545-.636 1.492-1.256 3.16-1.275zM1.5 5.5a3 3 0 1 1 6 0 3 3 0 0 1-6 0zm3-2a2 2 0 1 0 0 4 2 2 0 0 0 0-4z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">IAM</span>
|
||||
</a>
|
||||
<a href="{{ url_for('ui.connections_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.connections_dashboard' %}active{% endif %}" data-tooltip="Connections">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M6 3.5A1.5 1.5 0 0 1 7.5 2h1A1.5 1.5 0 0 1 10 3.5v1A1.5 1.5 0 0 1 8.5 6v1H14a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0v-1A.5.5 0 0 1 2 7h5.5V6A1.5 1.5 0 0 1 6 4.5v-1zM8.5 5a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1zM0 11.5A1.5 1.5 0 0 1 1.5 10h1A1.5 1.5 0 0 1 4 11.5v1A1.5 1.5 0 0 1 2.5 14h-1A1.5 1.5 0 0 1 0 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5A1.5 1.5 0 0 1 7.5 10h1a1.5 1.5 0 0 1 1.5 1.5v1A1.5 1.5 0 0 1 8.5 14h-1A1.5 1.5 0 0 1 6 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5a1.5 1.5 0 0 1 1.5-1.5h1a1.5 1.5 0 0 1 1.5 1.5v1a1.5 1.5 0 0 1-1.5 1.5h-1a1.5 1.5 0 0 1-1.5-1.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Connections</span>
|
||||
</a>
|
||||
<a href="{{ url_for('ui.metrics_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.metrics_dashboard' %}active{% endif %}" data-tooltip="Metrics">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
|
||||
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Metrics</span>
|
||||
</a>
|
||||
<a href="{{ url_for('ui.sites_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.sites_dashboard' %}active{% endif %}" data-tooltip="Sites">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Sites</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if website_hosting_nav %}
|
||||
<a href="{{ url_for('ui.website_domains_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.website_domains_dashboard' %}active{% endif %}" data-tooltip="Domains">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.715 6.542 3.343 7.914a3 3 0 1 0 4.243 4.243l1.828-1.829A3 3 0 0 0 8.586 5.5L8 6.086a1.002 1.002 0 0 0-.154.199 2 2 0 0 1 .861 3.337L6.88 11.45a2 2 0 1 1-2.83-2.83l.793-.792a4.018 4.018 0 0 1-.128-1.287z"/>
|
||||
<path d="M6.586 4.672A3 3 0 0 0 7.414 9.5l.775-.776a2 2 0 0 1-.896-3.346L9.12 3.55a2 2 0 1 1 2.83 2.83l-.793.792c.112.42.155.855.128 1.287l1.372-1.372a3 3 0 1 0-4.243-4.243L6.586 4.672z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Domains</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
{% if can_manage_iam %}
|
||||
<a href="{{ url_for('ui.system_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.system_dashboard' %}active{% endif %}" data-tooltip="System">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">System</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Resources</span>
|
||||
<a href="{{ url_for('ui.docs_page') }}" class="sidebar-link {% if request.endpoint == 'ui.docs_page' %}active{% endif %}" data-tooltip="Documentation">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M1 2.828c.885-.37 2.154-.769 3.388-.893 1.33-.134 2.458.063 3.112.752v9.746c-.935-.53-2.12-.603-3.213-.493-1.18.12-2.37.461-3.287.811V2.828zm7.5-.141c.654-.689 1.782-.886 3.112-.752 1.234.124 2.503.523 3.388.893v9.923c-.918-.35-2.107-.692-3.287-.81-1.094-.111-2.278-.039-3.213.492V2.687zM8 1.783C7.015.936 5.587.81 4.287.94c-1.514.153-3.042.672-3.994 1.105A.5.5 0 0 0 0 2.5v11a.5.5 0 0 0 .707.455c.882-.4 2.303-.881 3.68-1.02 1.409-.142 2.59.087 3.223.877a.5.5 0 0 0 .78 0c.633-.79 1.814-1.019 3.222-.877 1.378.139 2.8.62 3.681 1.02A.5.5 0 0 0 16 13.5v-11a.5.5 0 0 0-.293-.455c-.952-.433-2.48-.952-3.994-1.105C10.413.809 8.985.936 8 1.783z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Documentation</span>
|
||||
</a>
|
||||
</div>
|
||||
{% endif %}
|
||||
</nav>
|
||||
</div>
|
||||
<div class="sidebar-footer">
|
||||
<button class="theme-toggle-sidebar" type="button" id="themeToggle" aria-label="Toggle dark mode">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon" id="themeToggleSun" viewBox="0 0 16 16">
|
||||
<path d="M8 11.5a3.5 3.5 0 1 1 0-7 3.5 3.5 0 0 1 0 7zm0 1.5a5 5 0 1 0 0-10 5 5 0 0 0 0 10zM8 0a.5.5 0 0 1 .5.5v1.555a.5.5 0 0 1-1 0V.5A.5.5 0 0 1 8 0zm0 12.945a.5.5 0 0 1 .5.5v2.055a.5.5 0 0 1-1 0v-2.055a.5.5 0 0 1 .5-.5zM2.343 2.343a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.708.707l-1.1-1.1a.5.5 0 0 1 0-.707zm9.507 9.507a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.707.708l-1.1-1.1a.5.5 0 0 1 0-.708zM0 8a.5.5 0 0 1 .5-.5h1.555a.5.5 0 0 1 0 1H.5A.5.5 0 0 1 0 8zm12.945 0a.5.5 0 0 1 .5-.5H15.5a.5.5 0 0 1 0 1h-2.055a.5.5 0 0 1-.5-.5zM2.343 13.657a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 1 1 .708.707l-1.1 1.1a.5.5 0 0 1-.708 0zm9.507-9.507a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 0 1 .707.708l-1.1 1.1a.5.5 0 0 1-.707 0z"/>
|
||||
</svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon" id="themeToggleMoon" viewBox="0 0 16 16">
|
||||
<path d="M6 .278a.768.768 0 0 1 .08.858 7.208 7.208 0 0 0-.878 3.46c0 4.021 3.278 7.277 7.318 7.277.527 0 1.04-.055 1.533-.16a.787.787 0 0 1 .81.316.733.733 0 0 1-.031.893A8.349 8.349 0 0 1 8.344 16C3.734 16 0 12.286 0 7.71 0 4.266 2.114 1.312 5.124.06A.752.752 0 0 1 6 .278z"/>
|
||||
<path d="M10.794 3.148a.217.217 0 0 1 .412 0l.387 1.162c.173.518.579.924 1.097 1.097l1.162.387a.217.217 0 0 1 0 .412l-1.162.387a1.734 1.734 0 0 0-1.097 1.097l-.387 1.162a.217.217 0 0 1-.412 0l-.387-1.162A1.734 1.734 0 0 0 9.31 6.593l-1.162-.387a.217.217 0 0 1 0-.412l1.162-.387a1.734 1.734 0 0 0 1.097-1.097l.387-1.162zM13.863.099a.145.145 0 0 1 .274 0l.258.774c.115.346.386.617.732.732l.774.258a.145.145 0 0 1 0 .274l-.774.258a1.156 1.156 0 0 0-.732.732l-.258.774a.145.145 0 0 1-.274 0l-.258-.774a1.156 1.156 0 0 0-.732-.732l-.774-.258a.145.145 0 0 1 0-.274l.774-.258c.346-.115.617-.386.732-.732L13.863.1z"/>
|
||||
</svg>
|
||||
<span class="theme-toggle-text">Toggle theme</span>
|
||||
</button>
|
||||
{% if principal %}
|
||||
<div class="sidebar-user" data-username="{{ principal.display_name }}">
|
||||
<div class="user-avatar">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M11 6a3 3 0 1 1-6 0 3 3 0 0 1 6 0z"/>
|
||||
<path fill-rule="evenodd" d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm8-7a7 7 0 0 0-5.468 11.37C3.242 11.226 4.805 10 8 10s4.757 1.225 5.468 2.37A7 7 0 0 0 8 1z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div class="user-info">
|
||||
<div class="user-name" title="{{ principal.display_name }}">{{ principal.display_name | truncate(16, true) }}</div>
|
||||
<div class="user-key">{{ principal.access_key | truncate(12, true) }}</div>
|
||||
</div>
|
||||
</div>
|
||||
<form method="post" action="{{ url_for('ui.logout') }}" class="w-100">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<button class="sidebar-logout-btn" type="submit">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M10 12.5a.5.5 0 0 1-.5.5h-8a.5.5 0 0 1-.5-.5v-9a.5.5 0 0 1 .5-.5h8a.5.5 0 0 1 .5.5v2a.5.5 0 0 0 1 0v-2A1.5 1.5 0 0 0 9.5 2h-8A1.5 1.5 0 0 0 0 3.5v9A1.5 1.5 0 0 0 1.5 14h8a1.5 1.5 0 0 0 1.5-1.5v-2a.5.5 0 0 0-1 0v2z"/>
|
||||
<path fill-rule="evenodd" d="M15.854 8.354a.5.5 0 0 0 0-.708l-3-3a.5.5 0 0 0-.708.708L14.293 7.5H5.5a.5.5 0 0 0 0 1h8.793l-2.147 2.146a.5.5 0 0 0 .708.708l3-3z"/>
|
||||
</svg>
|
||||
<span class="logout-text">Sign out</span>
|
||||
</button>
|
||||
</form>
|
||||
{% endif %}
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
<div class="main-wrapper">
|
||||
<main class="main-content">
|
||||
{% block content %}{% endblock %}
|
||||
</main>
|
||||
</div>
|
||||
<div class="toast-container position-fixed bottom-0 end-0 p-3">
|
||||
<div id="liveToast" class="toast" role="alert" aria-live="assertive" aria-atomic="true">
|
||||
<div class="toast-header">
|
||||
<strong class="me-auto" id="toastTitle">Notification</strong>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="toast" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="toast-body" id="toastMessage">
|
||||
Hello, world! This is a toast message.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script
|
||||
src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.2/dist/js/bootstrap.bundle.min.js"
|
||||
integrity="sha384-C6RzsynM9kWDrMNeT87bh95OGNyZPhcTNXj1NW7RuBCsyN/o0jlpcV8Qyq46cDfL"
|
||||
crossorigin="anonymous"
|
||||
></script>
|
||||
<script>
|
||||
window.myfsioCsrfToken = document.querySelector('meta[name="csrf-token"]')?.getAttribute('content') || '';
|
||||
window.getCsrfToken = () => window.myfsioCsrfToken;
|
||||
(function () {
|
||||
const originalFetch = window.fetch;
|
||||
window.fetch = function (input, init) {
|
||||
init = init || {};
|
||||
const method = (init.method || 'GET').toUpperCase();
|
||||
if (method !== 'GET' && method !== 'HEAD' && method !== 'OPTIONS') {
|
||||
const headers = new Headers(init.headers || {});
|
||||
if (window.myfsioCsrfToken) {
|
||||
headers.set('X-CSRFToken', window.myfsioCsrfToken);
|
||||
}
|
||||
init.headers = headers;
|
||||
}
|
||||
return originalFetch.call(this, input, init);
|
||||
};
|
||||
})();
|
||||
</script>
|
||||
<script>
|
||||
(function () {
|
||||
const storageKey = 'myfsio-theme';
|
||||
const toggle = document.getElementById('themeToggle');
|
||||
const toggleMobile = document.getElementById('themeToggleMobile');
|
||||
const sunIcon = document.getElementById('themeToggleSun');
|
||||
const moonIcon = document.getElementById('themeToggleMoon');
|
||||
const sunIconMobile = document.getElementById('themeToggleSunMobile');
|
||||
const moonIconMobile = document.getElementById('themeToggleMoonMobile');
|
||||
|
||||
const applyTheme = (theme) => {
|
||||
document.documentElement.dataset.bsTheme = theme;
|
||||
document.documentElement.dataset.theme = theme;
|
||||
try {
|
||||
localStorage.setItem(storageKey, theme);
|
||||
} catch (err) {
|
||||
console.log("Error: local storage not available, cannot save theme preference.");
|
||||
}
|
||||
const isDark = theme === 'dark';
|
||||
if (sunIcon && moonIcon) {
|
||||
sunIcon.classList.toggle('d-none', !isDark);
|
||||
moonIcon.classList.toggle('d-none', isDark);
|
||||
}
|
||||
if (sunIconMobile && moonIconMobile) {
|
||||
sunIconMobile.classList.toggle('d-none', !isDark);
|
||||
moonIconMobile.classList.toggle('d-none', isDark);
|
||||
}
|
||||
[toggle, toggleMobile].forEach(btn => {
|
||||
if (btn) {
|
||||
btn.setAttribute('aria-pressed', isDark ? 'true' : 'false');
|
||||
btn.setAttribute('title', isDark ? 'Switch to light mode' : 'Switch to dark mode');
|
||||
btn.setAttribute('aria-label', isDark ? 'Switch to light mode' : 'Switch to dark mode');
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const current = document.documentElement.dataset.bsTheme || 'light';
|
||||
applyTheme(current);
|
||||
|
||||
const handleToggle = () => {
|
||||
const next = document.documentElement.dataset.bsTheme === 'dark' ? 'light' : 'dark';
|
||||
applyTheme(next);
|
||||
};
|
||||
|
||||
toggle?.addEventListener('click', handleToggle);
|
||||
toggleMobile?.addEventListener('click', handleToggle);
|
||||
})();
|
||||
</script>
|
||||
<script>
|
||||
(function () {
|
||||
const sidebar = document.getElementById('desktopSidebar');
|
||||
const collapseBtn = document.getElementById('sidebarCollapseBtn');
|
||||
const sidebarBrand = document.getElementById('sidebarBrand');
|
||||
const storageKey = 'myfsio-sidebar-collapsed';
|
||||
|
||||
if (!sidebar || !collapseBtn) return;
|
||||
|
||||
const applyCollapsed = (collapsed) => {
|
||||
sidebar.classList.toggle('sidebar-collapsed', collapsed);
|
||||
document.body.classList.toggle('sidebar-is-collapsed', collapsed);
|
||||
document.documentElement.classList.remove('sidebar-will-collapse');
|
||||
try {
|
||||
localStorage.setItem(storageKey, collapsed ? 'true' : 'false');
|
||||
} catch (err) {}
|
||||
};
|
||||
|
||||
try {
|
||||
const stored = localStorage.getItem(storageKey);
|
||||
applyCollapsed(stored === 'true');
|
||||
} catch (err) {
|
||||
document.documentElement.classList.remove('sidebar-will-collapse');
|
||||
}
|
||||
|
||||
collapseBtn.addEventListener('click', () => {
|
||||
const isCollapsed = sidebar.classList.contains('sidebar-collapsed');
|
||||
applyCollapsed(!isCollapsed);
|
||||
});
|
||||
|
||||
sidebarBrand?.addEventListener('click', (e) => {
|
||||
const isCollapsed = sidebar.classList.contains('sidebar-collapsed');
|
||||
if (isCollapsed) {
|
||||
e.preventDefault();
|
||||
applyCollapsed(false);
|
||||
}
|
||||
});
|
||||
})();
|
||||
</script>
|
||||
<script>
|
||||
|
||||
window.showToast = function(message, title = 'Notification', type = 'info') {
|
||||
const toastEl = document.getElementById('liveToast');
|
||||
const toastTitle = document.getElementById('toastTitle');
|
||||
const toastMessage = document.getElementById('toastMessage');
|
||||
|
||||
toastTitle.textContent = title;
|
||||
toastMessage.textContent = message;
|
||||
|
||||
toastEl.classList.remove('text-bg-primary', 'text-bg-success', 'text-bg-danger', 'text-bg-warning');
|
||||
|
||||
if (type === 'success') toastEl.classList.add('text-bg-success');
|
||||
if (type === 'error') toastEl.classList.add('text-bg-danger');
|
||||
if (type === 'warning') toastEl.classList.add('text-bg-warning');
|
||||
|
||||
const toast = new bootstrap.Toast(toastEl);
|
||||
toast.show();
|
||||
};
|
||||
</script>
|
||||
<script>
|
||||
(function () {
|
||||
|
||||
{% with messages = get_flashed_messages(with_categories=true) %}
|
||||
{% if messages %}
|
||||
{% for category, message in messages %}
|
||||
|
||||
var type = "{{ category }}";
|
||||
if (type === "danger") type = "error";
|
||||
window.showToast({{ message | tojson | safe }}, "Notification", type);
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endwith %}
|
||||
})();
|
||||
</script>
|
||||
<script src="{{ url_for('static', filename='js/ui-core.js') }}"></script>
|
||||
{% block extra_scripts %}{% endblock %}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
File diff suppressed because it is too large
Load Diff
@@ -46,13 +46,12 @@
|
||||
<div class="d-flex align-items-center gap-3">
|
||||
<div class="bucket-icon">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H11a.5.5 0 0 1 0 1h-1v1h1a.5.5 0 0 1 0 1h-1v1a.5.5 0 0 1-1 0v-1H6v1a.5.5 0 0 1-1 0v-1H4a.5.5 0 0 1 0-1h1v-1H4a.5.5 0 0 1 0-1h1.5A1.5 1.5 0 0 1 7 10.5V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1zm5 7.5v1h3v-1a.5.5 0 0 0-.5-.5h-2a.5.5 0 0 0-.5.5z"/>
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div>
|
||||
<h5 class="bucket-name text-break">{{ bucket.meta.name }}</h5>
|
||||
<small class="text-muted">Created {{ bucket.meta.created_at.strftime('%b %d, %Y') }}</small>
|
||||
<small class="text-muted">Created {{ bucket.meta.creation_date | format_datetime }}</small>
|
||||
</div>
|
||||
</div>
|
||||
<span class="badge {{ bucket.access_badge }} bucket-access-badge">{{ bucket.access_label }}</span>
|
||||
@@ -90,6 +89,14 @@
|
||||
</div>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<div class="col-12 d-none" id="bucket-no-results">
|
||||
<div class="text-center py-5 text-muted">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="currentColor" class="mb-3 opacity-50" viewBox="0 0 16 16">
|
||||
<path d="M11.742 10.344a6.5 6.5 0 1 0-1.397 1.398h-.001c.03.04.062.078.098.115l3.85 3.85a1 1 0 0 0 1.415-1.414l-3.85-3.85a1.007 1.007 0 0 0-.115-.1zM12 6.5a5.5 5.5 0 1 1-11 0 5.5 5.5 0 0 1 11 0z"/>
|
||||
</svg>
|
||||
<p class="mb-0 fw-medium">No buckets match your filter.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="createBucketModal" tabindex="-1" aria-hidden="true">
|
||||
@@ -105,7 +112,7 @@
|
||||
</h1>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<form method="post" action="{{ url_for('ui.create_bucket') }}">
|
||||
<form method="post" action="{{ url_for('ui.create_bucket') }}" id="createBucketForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<div class="modal-body pt-0">
|
||||
<label class="form-label fw-medium">Bucket name</label>
|
||||
@@ -134,7 +141,7 @@
|
||||
|
||||
const searchInput = document.getElementById('bucket-search');
|
||||
const bucketItems = document.querySelectorAll('.bucket-item');
|
||||
const noBucketsMsg = document.querySelector('.text-center.py-5'); // The "No buckets found" empty state
|
||||
const noBucketsMsg = document.querySelector('.text-center.py-5');
|
||||
|
||||
if (searchInput) {
|
||||
searchInput.addEventListener('input', (e) => {
|
||||
@@ -142,7 +149,7 @@
|
||||
let visibleCount = 0;
|
||||
|
||||
bucketItems.forEach(item => {
|
||||
const name = item.querySelector('.card-title').textContent.toLowerCase();
|
||||
const name = item.querySelector('.bucket-name').textContent.toLowerCase();
|
||||
if (name.includes(term)) {
|
||||
item.classList.remove('d-none');
|
||||
visibleCount++;
|
||||
@@ -150,6 +157,15 @@
|
||||
item.classList.add('d-none');
|
||||
}
|
||||
});
|
||||
|
||||
var noResults = document.getElementById('bucket-no-results');
|
||||
if (noResults) {
|
||||
if (term && visibleCount === 0) {
|
||||
noResults.classList.remove('d-none');
|
||||
} else {
|
||||
noResults.classList.add('d-none');
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -206,6 +222,25 @@
|
||||
});
|
||||
row.style.cursor = 'pointer';
|
||||
});
|
||||
|
||||
var createForm = document.getElementById('createBucketForm');
|
||||
if (createForm) {
|
||||
createForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(createForm, {
|
||||
successMessage: 'Bucket created',
|
||||
onSuccess: function(data) {
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('createBucketModal'));
|
||||
if (modal) modal.hide();
|
||||
if (data.bucket_name) {
|
||||
window.location.href = '{{ url_for("ui.bucket_detail", bucket_name="__BUCKET__") }}'.replace('__BUCKET__', data.bucket_name);
|
||||
} else {
|
||||
location.reload();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
})();
|
||||
</script>
|
||||
{% endblock %}
|
||||
@@ -8,8 +8,8 @@
|
||||
<p class="text-uppercase text-muted small mb-1">Replication</p>
|
||||
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H12a.5.5 0 0 1 0 1H4a.5.5 0 0 1 0-1h2A1.5 1.5 0 0 1 7.5 10V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1z"/>
|
||||
<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>
|
||||
<path d="M10.232 8.768l.546-.353a.25.25 0 0 0 0-.418l-.546-.354a.25.25 0 0 1-.116-.21V6.25a.25.25 0 0 0-.25-.25h-.5a.25.25 0 0 0-.25.25v1.183a.25.25 0 0 1-.116.21l-.546.354a.25.25 0 0 0 0 .418l.546.353a.25.25 0 0 1 .116.21v1.183a.25.25 0 0 0 .25.25h.5a.25.25 0 0 0 .25-.25V8.978a.25.25 0 0 1 .116-.21z"/>
|
||||
</svg>
|
||||
Remote Connections
|
||||
</h1>
|
||||
@@ -57,7 +57,7 @@
|
||||
<label for="secret_key" class="form-label fw-medium">Secret Key</label>
|
||||
<div class="input-group">
|
||||
<input type="password" class="form-control font-monospace" id="secret_key" name="secret_key" required>
|
||||
<button class="btn btn-outline-secondary" type="button" onclick="togglePassword('secret_key')" title="Toggle visibility">
|
||||
<button class="btn btn-outline-secondary" type="button" onclick="ConnectionsManagement.togglePassword('secret_key')" title="Toggle visibility">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
|
||||
<path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
|
||||
@@ -104,6 +104,7 @@
|
||||
<table class="table table-hover align-middle mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th scope="col" style="width: 50px;">Status</th>
|
||||
<th scope="col">Name</th>
|
||||
<th scope="col">Endpoint</th>
|
||||
<th scope="col">Region</th>
|
||||
@@ -113,13 +114,17 @@
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for conn in connections %}
|
||||
<tr>
|
||||
<tr data-connection-id="{{ conn.id }}">
|
||||
<td class="text-center">
|
||||
<span class="connection-status" data-status="checking" title="Checking...">
|
||||
<span class="spinner-border spinner-border-sm text-muted" role="status" style="width: 12px; height: 12px;"></span>
|
||||
</span>
|
||||
</td>
|
||||
<td>
|
||||
<div class="d-flex align-items-center gap-2">
|
||||
<div class="connection-icon">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H12a.5.5 0 0 1 0 1H4a.5.5 0 0 1 0-1h2A1.5 1.5 0 0 1 7.5 10V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1z"/>
|
||||
<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<span class="fw-medium">{{ conn.name }}</span>
|
||||
@@ -140,7 +145,6 @@
|
||||
data-endpoint="{{ conn.endpoint_url }}"
|
||||
data-region="{{ conn.region }}"
|
||||
data-access="{{ conn.access_key }}"
|
||||
data-secret="{{ conn.secret_key }}"
|
||||
title="Edit connection">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||
@@ -168,8 +172,7 @@
|
||||
<div class="empty-state text-center py-5">
|
||||
<div class="empty-state-icon mx-auto mb-3">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4.5 5a.5.5 0 1 0 0-1 .5.5 0 0 0 0 1zM3 4.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"/>
|
||||
<path d="M0 4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v1a2 2 0 0 1-2 2H8.5v3a1.5 1.5 0 0 1 1.5 1.5H12a.5.5 0 0 1 0 1H4a.5.5 0 0 1 0-1h2A1.5 1.5 0 0 1 7.5 10V7H2a2 2 0 0 1-2-2V4zm1 0v1a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H2a1 1 0 0 0-1 1z"/>
|
||||
<path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<h5 class="fw-semibold mb-2">No connections yet</h5>
|
||||
@@ -216,7 +219,7 @@
|
||||
<label for="edit_secret_key" class="form-label fw-medium">Secret Key</label>
|
||||
<div class="input-group">
|
||||
<input type="password" class="form-control font-monospace" id="edit_secret_key" name="secret_key" required>
|
||||
<button class="btn btn-outline-secondary" type="button" onclick="togglePassword('edit_secret_key')">
|
||||
<button class="btn btn-outline-secondary" type="button" onclick="ConnectionsManagement.togglePassword('edit_secret_key')">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
|
||||
<path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
|
||||
@@ -285,78 +288,16 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script src="{{ url_for('static', filename='js/connections-management.js') }}"></script>
|
||||
<script>
|
||||
function togglePassword(id) {
|
||||
const input = document.getElementById(id);
|
||||
if (input.type === "password") {
|
||||
input.type = "text";
|
||||
} else {
|
||||
input.type = "password";
|
||||
}
|
||||
ConnectionsManagement.init({
|
||||
csrfToken: "{{ csrf_token() }}",
|
||||
endpoints: {
|
||||
test: "{{ url_for('ui.test_connection') }}",
|
||||
updateTemplate: "{{ url_for('ui.update_connection', connection_id='CONNECTION_ID') }}",
|
||||
deleteTemplate: "{{ url_for('ui.delete_connection', connection_id='CONNECTION_ID') }}",
|
||||
healthTemplate: "/ui/connections/CONNECTION_ID/health"
|
||||
}
|
||||
|
||||
async function testConnection(formId, resultId) {
|
||||
const form = document.getElementById(formId);
|
||||
const resultDiv = document.getElementById(resultId);
|
||||
const formData = new FormData(form);
|
||||
const data = Object.fromEntries(formData.entries());
|
||||
|
||||
resultDiv.innerHTML = '<div class="text-info"><span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Testing...</div>';
|
||||
|
||||
try {
|
||||
const response = await fetch("{{ url_for('ui.test_connection') }}", {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"X-CSRFToken": "{{ csrf_token() }}"
|
||||
},
|
||||
body: JSON.stringify(data)
|
||||
});
|
||||
|
||||
const result = await response.json();
|
||||
if (response.ok) {
|
||||
resultDiv.innerHTML = `<div class="text-success"><i class="bi bi-check-circle"></i> ${result.message}</div>`;
|
||||
} else {
|
||||
resultDiv.innerHTML = `<div class="text-danger"><i class="bi bi-exclamation-circle"></i> ${result.message}</div>`;
|
||||
}
|
||||
} catch (error) {
|
||||
resultDiv.innerHTML = `<div class="text-danger"><i class="bi bi-exclamation-circle"></i> Connection failed</div>`;
|
||||
}
|
||||
}
|
||||
|
||||
document.getElementById('testConnectionBtn').addEventListener('click', () => {
|
||||
testConnection('createConnectionForm', 'testResult');
|
||||
});
|
||||
|
||||
document.getElementById('editTestConnectionBtn').addEventListener('click', () => {
|
||||
testConnection('editConnectionForm', 'editTestResult');
|
||||
});
|
||||
|
||||
const editModal = document.getElementById('editConnectionModal');
|
||||
editModal.addEventListener('show.bs.modal', event => {
|
||||
const button = event.relatedTarget;
|
||||
const id = button.getAttribute('data-id');
|
||||
|
||||
document.getElementById('edit_name').value = button.getAttribute('data-name');
|
||||
document.getElementById('edit_endpoint_url').value = button.getAttribute('data-endpoint');
|
||||
document.getElementById('edit_region').value = button.getAttribute('data-region');
|
||||
document.getElementById('edit_access_key').value = button.getAttribute('data-access');
|
||||
document.getElementById('edit_secret_key').value = button.getAttribute('data-secret');
|
||||
document.getElementById('editTestResult').innerHTML = '';
|
||||
|
||||
const form = document.getElementById('editConnectionForm');
|
||||
form.action = "{{ url_for('ui.update_connection', connection_id='CONN_ID') }}".replace('CONN_ID', id);
|
||||
});
|
||||
|
||||
const deleteModal = document.getElementById('deleteConnectionModal');
|
||||
deleteModal.addEventListener('show.bs.modal', event => {
|
||||
const button = event.relatedTarget;
|
||||
const id = button.getAttribute('data-id');
|
||||
const name = button.getAttribute('data-name');
|
||||
|
||||
document.getElementById('deleteConnectionName').textContent = name;
|
||||
const form = document.getElementById('deleteConnectionForm');
|
||||
form.action = "{{ url_for('ui.delete_connection', connection_id='CONN_ID') }}".replace('CONN_ID', id);
|
||||
});
|
||||
});
|
||||
</script>
|
||||
{% endblock %}
|
||||
2905
python/templates/docs.html
Normal file
2905
python/templates/docs.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -10,6 +10,7 @@
|
||||
</svg>
|
||||
IAM Configuration
|
||||
</h1>
|
||||
<p class="text-muted mb-0 mt-1">Create and manage users with fine-grained bucket permissions.</p>
|
||||
</div>
|
||||
<div class="d-flex gap-2">
|
||||
{% if not iam_locked %}
|
||||
@@ -49,9 +50,20 @@
|
||||
New user created: <code>{{ disclosed_secret.access_key }}</code>
|
||||
{% endif %}
|
||||
</div>
|
||||
<p class="mb-2 small">⚠️ This secret is only shown once. Copy it now and store it securely.</p>
|
||||
<p class="mb-2 small">These credentials are only shown once. Copy them now and store them securely.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="input-group mb-2">
|
||||
<span class="input-group-text"><strong>Access key</strong></span>
|
||||
<input class="form-control font-monospace" type="text" value="{{ disclosed_secret.access_key }}" readonly id="disclosedAccessKeyValue" />
|
||||
<button class="btn btn-outline-primary" type="button" data-access-key-copy>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-clipboard" viewBox="0 0 16 16">
|
||||
<path d="M4 1.5H3a2 2 0 0 0-2 2V14a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V3.5a2 2 0 0 0-2-2h-1v1h1a1 1 0 0 1 1 1V14a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3.5a1 1 0 0 1 1-1h1v-1z"/>
|
||||
<path d="M9.5 1a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1a.5.5 0 0 1 .5-.5h3zm-3-1A1.5 1.5 0 0 0 5 1.5v1A1.5 1.5 0 0 0 6.5 4h3A1.5 1.5 0 0 0 11 2.5v-1A1.5 1.5 0 0 0 9.5 0h-3z"/>
|
||||
</svg>
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<div class="input-group">
|
||||
<span class="input-group-text"><strong>Secret key</strong></span>
|
||||
<input class="form-control font-monospace" type="text" value="{{ disclosed_secret.secret_key }}" readonly id="disclosedSecretValue" />
|
||||
@@ -78,7 +90,7 @@
|
||||
<pre class="policy-preview mb-0" id="iamConfigPreview">{{ config_document }}</pre>
|
||||
<button class="btn btn-outline-light btn-sm config-copy" type="button" data-copy-target="iamConfigPreview">Copy JSON</button>
|
||||
</div>
|
||||
<p class="text-muted small mt-2 mb-0">Secrets are masked above. Access <code>{{ config_summary.path }}</code> directly to view full credentials.</p>
|
||||
<p class="text-muted small mt-2 mb-0">Secrets are masked above. IAM config is encrypted at rest.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -109,78 +121,159 @@
|
||||
{% else %}
|
||||
<div class="card-body px-4 pb-4">
|
||||
{% if users %}
|
||||
<div class="table-responsive">
|
||||
<table class="table table-hover align-middle mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th scope="col">User</th>
|
||||
<th scope="col">Policies</th>
|
||||
<th scope="col" class="text-end">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for user in users %}
|
||||
<tr>
|
||||
<td>
|
||||
<div class="d-flex align-items-center gap-3">
|
||||
<div class="user-avatar">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
{% if users|length > 1 %}
|
||||
<div class="mb-3">
|
||||
<div class="search-input-wrapper">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="search-icon" viewBox="0 0 16 16">
|
||||
<path d="M11.742 10.344a6.5 6.5 0 1 0-1.397 1.398h-.001c.03.04.062.078.098.115l3.85 3.85a1 1 0 0 0 1.415-1.414l-3.85-3.85a1.007 1.007 0 0 0-.115-.1zM12 6.5a5.5 5.5 0 1 1-11 0 5.5 5.5 0 0 1 11 0z"/>
|
||||
</svg>
|
||||
<input type="text" class="form-control" id="iam-user-search" placeholder="Filter users by name or access key..." autocomplete="off" />
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
<div class="row g-3">
|
||||
{% for user in users %}
|
||||
{% set ns = namespace(is_admin=false, is_expired=false, is_expiring_soon=false) %}
|
||||
{% for policy in user.policies %}
|
||||
{% if 'iam:*' in policy.actions or '*' in policy.actions %}
|
||||
{% set ns.is_admin = true %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if user.expires_at %}
|
||||
{% set exp_str = user.expires_at %}
|
||||
{% if exp_str <= now_iso %}
|
||||
{% set ns.is_expired = true %}
|
||||
{% elif exp_str <= soon_iso %}
|
||||
{% set ns.is_expiring_soon = true %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
<div class="col-md-6 col-xl-4 iam-user-item" data-display-name="{{ user.display_name|lower }}" data-access-key-filter="{{ user.access_key|lower }}">
|
||||
<div class="card h-100 iam-user-card{{ ' iam-admin-card' if ns.is_admin else '' }}">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-start justify-content-between mb-3">
|
||||
<div class="d-flex align-items-center gap-3 min-width-0 overflow-hidden">
|
||||
<div class="user-avatar user-avatar-lg flex-shrink-0">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8 8a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm2-3a2 2 0 1 1-4 0 2 2 0 0 1 4 0zm4 8c0 1-1 1-1 1H3s-1 0-1-1 1-4 6-4 6 3 6 4zm-1-.004c-.001-.246-.154-.986-.832-1.664C11.516 10.68 10.289 10 8 10c-2.29 0-3.516.68-4.168 1.332-.678.678-.83 1.418-.832 1.664h10z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div>
|
||||
<div class="fw-medium">{{ user.display_name }}</div>
|
||||
<code class="small text-muted">{{ user.access_key }}</code>
|
||||
<div class="min-width-0">
|
||||
<div class="d-flex align-items-center gap-2 mb-0">
|
||||
<h6 class="fw-semibold mb-0 text-truncate" title="{{ user.display_name }}">{{ user.display_name }}</h6>
|
||||
{% if ns.is_admin %}
|
||||
<span class="iam-role-badge iam-role-admin" data-role-badge>Admin</span>
|
||||
{% else %}
|
||||
<span class="iam-role-badge iam-role-user" data-role-badge>User</span>
|
||||
{% endif %}
|
||||
{% if ns.is_expired %}
|
||||
<span class="badge text-bg-danger" style="font-size: .65rem">Expired</span>
|
||||
{% elif ns.is_expiring_soon %}
|
||||
<span class="badge text-bg-warning" style="font-size: .65rem">Expiring soon</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="d-flex align-items-center gap-1">
|
||||
<code class="small text-muted text-truncate" title="{{ user.access_key }}">{{ user.access_key }}</code>
|
||||
<button type="button" class="iam-copy-key" title="Copy access key" data-copy-access-key="{{ user.access_key }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M4 1.5H3a2 2 0 0 0-2 2V14a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V3.5a2 2 0 0 0-2-2h-1v1h1a1 1 0 0 1 1 1V14a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3.5a1 1 0 0 1 1-1h1v-1z"/>
|
||||
<path d="M9.5 1a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1a.5.5 0 0 1 .5-.5h3zm-3-1A1.5 1.5 0 0 0 5 1.5v1A1.5 1.5 0 0 0 6.5 4h3A1.5 1.5 0 0 0 11 2.5v-1A1.5 1.5 0 0 0 9.5 0h-3z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<div class="d-flex flex-wrap gap-1">
|
||||
<div class="dropdown flex-shrink-0">
|
||||
<button class="btn btn-sm btn-icon" type="button" data-bs-toggle="dropdown" aria-expanded="false">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M9.5 13a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0zm0-5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<ul class="dropdown-menu dropdown-menu-end">
|
||||
<li>
|
||||
<button class="dropdown-item" type="button" data-edit-user="{{ user.access_key }}" data-display-name="{{ user.display_name }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||
</svg>
|
||||
Edit Name
|
||||
</button>
|
||||
</li>
|
||||
<li>
|
||||
<button class="dropdown-item" type="button" data-expiry-user="{{ user.access_key }}" data-expires-at="{{ user.expires_at or '' }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
<path d="M8 3.5a.5.5 0 0 0-1 0V9a.5.5 0 0 0 .252.434l3.5 2a.5.5 0 0 0 .496-.868L8 8.71V3.5z"/>
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm7-8A7 7 0 1 1 1 8a7 7 0 0 1 14 0z"/>
|
||||
</svg>
|
||||
Set Expiry
|
||||
</button>
|
||||
</li>
|
||||
<li>
|
||||
<button class="dropdown-item" type="button" data-rotate-user="{{ user.access_key }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
<path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/>
|
||||
<path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/>
|
||||
</svg>
|
||||
Rotate Secret
|
||||
</button>
|
||||
</li>
|
||||
<li><hr class="dropdown-divider"></li>
|
||||
<li>
|
||||
<button class="dropdown-item text-danger" type="button" data-delete-user="{{ user.access_key }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm3 .5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 1 0z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
Delete User
|
||||
</button>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<div class="small text-muted mb-2">Bucket Permissions</div>
|
||||
<div class="d-flex flex-wrap gap-1" data-policy-badges>
|
||||
{% for policy in user.policies %}
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary">
|
||||
{{ policy.bucket }}
|
||||
{% if '*' in policy.actions %}
|
||||
<span class="opacity-75">(full)</span>
|
||||
{% else %}
|
||||
<span class="opacity-75">({{ policy.actions|length }})</span>
|
||||
{% endif %}
|
||||
{% set bucket_label = 'All Buckets' if policy.bucket == '*' else policy.bucket %}
|
||||
{% if '*' in policy.actions %}
|
||||
{% set perm_label = 'Full Access' %}
|
||||
{% elif policy.actions|length >= 19 %}
|
||||
{% set perm_label = 'Full Access' %}
|
||||
{% elif 'list' in policy.actions and 'read' in policy.actions and 'write' in policy.actions and 'delete' in policy.actions %}
|
||||
{% set perm_label = 'Read + Write + Delete' %}
|
||||
{% elif 'list' in policy.actions and 'read' in policy.actions and 'write' in policy.actions %}
|
||||
{% set perm_label = 'Read + Write' %}
|
||||
{% elif 'list' in policy.actions and 'read' in policy.actions %}
|
||||
{% set perm_label = 'Read Only' %}
|
||||
{% else %}
|
||||
{% set perm_label = 'Custom (' ~ policy.actions|length ~ ')' %}
|
||||
{% endif %}
|
||||
<span class="iam-perm-badge">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="10" height="10" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
{{ bucket_label }} · {{ perm_label }}
|
||||
</span>
|
||||
{% else %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">No policies</span>
|
||||
{% endfor %}
|
||||
</div>
|
||||
</td>
|
||||
<td class="text-end">
|
||||
<div class="btn-group btn-group-sm" role="group">
|
||||
<button class="btn btn-outline-primary" type="button" data-rotate-user="{{ user.access_key }}" title="Rotate Secret">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/>
|
||||
<path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<button class="btn btn-outline-secondary" type="button" data-edit-user="{{ user.access_key }}" data-display-name="{{ user.display_name }}" title="Edit User">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<button class="btn btn-outline-secondary" type="button" data-policy-editor data-access-key="{{ user.access_key }}" title="Edit Policies">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8 4.754a3.246 3.246 0 1 0 0 6.492 3.246 3.246 0 0 0 0-6.492zM5.754 8a2.246 2.246 0 1 1 4.492 0 2.246 2.246 0 0 1-4.492 0z"/>
|
||||
<path d="M9.796 1.343c-.527-1.79-3.065-1.79-3.592 0l-.094.319a.873.873 0 0 1-1.255.52l-.292-.16c-1.64-.892-3.433.902-2.54 2.541l.159.292a.873.873 0 0 1-.52 1.255l-.319.094c-1.79.527-1.79 3.065 0 3.592l.319.094a.873.873 0 0 1 .52 1.255l-.16.292c-.892 1.64.901 3.434 2.541 2.54l.292-.159a.873.873 0 0 1 1.255.52l.094.319c.527 1.79 3.065 1.79 3.592 0l.094-.319a.873.873 0 0 1 1.255-.52l.292.16c1.64.893 3.434-.902 2.54-2.541l-.159-.292a.873.873 0 0 1 .52-1.255l.319-.094c1.79-.527 1.79-3.065 0-3.592l-.319-.094a.873.873 0 0 1-.52-1.255l.16-.292c.893-1.64-.902-3.433-2.541-2.54l-.292.159a.873.873 0 0 1-1.255-.52l-.094-.319z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<button class="btn btn-outline-danger" type="button" data-delete-user="{{ user.access_key }}" title="Delete User">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 .5-.5zm3 .5v6a.5.5 0 0 1-1 0v-6a.5.5 0 0 1 1 0z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<button class="btn btn-outline-primary btn-sm w-100" type="button" data-policy-editor data-access-key="{{ user.access_key }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M8 4.754a3.246 3.246 0 1 0 0 6.492 3.246 3.246 0 0 0 0-6.492zM5.754 8a2.246 2.246 0 1 1 4.492 0 2.246 2.246 0 0 1-4.492 0z"/>
|
||||
<path d="M9.796 1.343c-.527-1.79-3.065-1.79-3.592 0l-.094.319a.873.873 0 0 1-1.255.52l-.292-.16c-1.64-.892-3.433.902-2.54 2.541l.159.292a.873.873 0 0 1-.52 1.255l-.319.094c-1.79.527-1.79 3.065 0 3.592l.319.094a.873.873 0 0 1 .52 1.255l-.16.292c-.892 1.64.901 3.434 2.541 2.54l.292-.159a.873.873 0 0 1 1.255.52l.094.319c.527 1.79 3.065 1.79 3.592 0l.094-.319a.873.873 0 0 1 1.255-.52l.292.16c1.64.893 3.434-.902 2.54-2.541l-.159-.292a.873.873 0 0 1 .52-1.255l.319-.094c1.79-.527 1.79-3.065 0-3.592l-.319-.094a.873.873 0 0 1-.52-1.255l.16-.292c.893-1.64-.902-3.433-2.541-2.54l-.292.159a.873.873 0 0 1-1.255-.52l-.094-.319z"/>
|
||||
</svg>
|
||||
Manage Policies
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
<div class="iam-no-results d-none" id="iam-no-results">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="mb-2" viewBox="0 0 16 16">
|
||||
<path d="M11.742 10.344a6.5 6.5 0 1 0-1.397 1.398h-.001c.03.04.062.078.098.115l3.85 3.85a1 1 0 0 0 1.415-1.414l-3.85-3.85a1.007 1.007 0 0 0-.115-.1zM12 6.5a5.5 5.5 0 1 1-11 0 5.5 5.5 0 0 1 11 0z"/>
|
||||
</svg>
|
||||
<p class="mb-0">No users match your filter.</p>
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="empty-state text-center py-5">
|
||||
@@ -223,6 +316,32 @@
|
||||
<label class="form-label fw-medium">Display Name</label>
|
||||
<input class="form-control" type="text" name="display_name" placeholder="Analytics Team" required autofocus />
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium d-flex justify-content-between align-items-center">
|
||||
Access Key <span class="text-muted fw-normal small">optional</span>
|
||||
</label>
|
||||
<div class="input-group">
|
||||
<input class="form-control font-monospace" type="text" name="access_key" id="createUserAccessKey" placeholder="Leave blank to auto-generate" />
|
||||
<button class="btn btn-outline-secondary" type="button" id="generateAccessKeyBtn" title="Generate secure access key">Generate</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium d-flex justify-content-between align-items-center">
|
||||
Secret Key <span class="text-muted fw-normal small">optional</span>
|
||||
</label>
|
||||
<div class="input-group">
|
||||
<input class="form-control font-monospace" type="text" name="secret_key" id="createUserSecretKey" placeholder="Leave blank to auto-generate" />
|
||||
<button class="btn btn-outline-secondary" type="button" id="generateSecretKeyBtn" title="Generate secure secret key">Generate</button>
|
||||
</div>
|
||||
<div class="form-text">If you set a custom secret key, copy it now. It will be encrypted and cannot be recovered.</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium d-flex justify-content-between align-items-center">
|
||||
Expiry <span class="text-muted fw-normal small">optional</span>
|
||||
</label>
|
||||
<input class="form-control" type="datetime-local" name="expires_at" id="createUserExpiry" />
|
||||
<div class="form-text">Leave blank for no expiration. Expired users cannot authenticate.</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Initial Policies (JSON)</label>
|
||||
<textarea class="form-control font-monospace" name="policies" id="createUserPolicies" rows="6" spellcheck="false" placeholder='[
|
||||
@@ -235,6 +354,8 @@
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="full">Full Control</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="readonly">Read-Only</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="writer">Read + Write</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="operator">Operator</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="bucketadmin">Bucket Admin</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
@@ -285,6 +406,8 @@
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="full">Full Control</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="readonly">Read-Only</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="writer">Read + Write</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="operator">Operator</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="bucketadmin">Bucket Admin</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
@@ -341,8 +464,8 @@
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h1 class="modal-title fs-5 fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
|
||||
<path d="M1 14s-1 0-1-1 1-4 6-4 6 3 6 4-1 1-1 1H1zm5-6a3 3 0 1 0 0-6 3 3 0 0 0 0 6z"/>
|
||||
<path fill-rule="evenodd" d="M11 1.5v1h5v1h-1v9a2 2 0 0 1-2 2H3a2 2 0 0 1-2-2v-9H0v-1h5v-1a1 1 0 0 1 1-1h4a1 1 0 0 1 1 1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118z"/>
|
||||
<path d="M11 5a3 3 0 1 1-6 0 3 3 0 0 1 6 0M8 7a2 2 0 1 0 0-4 2 2 0 0 0 0 4m.256 7a4.5 4.5 0 0 1-.229-1.004H3c.001-.246.154-.986.832-1.664C4.484 10.68 5.711 10 8 10q.39 0 .74.025c.226-.341.496-.65.804-.918Q9.077 9.014 8 9c-5 0-6 3-6 4s1 1 1 1h5.256Z"/>
|
||||
<path d="M12.5 16a3.5 3.5 0 1 0 0-7 3.5 3.5 0 0 0 0 7m-.646-4.854.646.647.646-.647a.5.5 0 0 1 .708.708l-.647.646.647.646a.5.5 0 0 1-.708.708l-.646-.647-.646.647a.5.5 0 0 1-.708-.708l.647-.646-.647-.646a.5.5 0 0 1 .708-.708"/>
|
||||
</svg>
|
||||
Delete User
|
||||
</h1>
|
||||
@@ -435,270 +558,72 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="expiryModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h1 class="modal-title fs-5 fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M8 3.5a.5.5 0 0 0-1 0V9a.5.5 0 0 0 .252.434l3.5 2a.5.5 0 0 0 .496-.868L8 8.71V3.5z"/>
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm7-8A7 7 0 1 1 1 8a7 7 0 0 1 14 0z"/>
|
||||
</svg>
|
||||
Set Expiry
|
||||
</h1>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<form method="post" id="expiryForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<div class="modal-body">
|
||||
<p class="text-muted small mb-3">Set expiration for <code id="expiryUserLabel"></code></p>
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Expires at</label>
|
||||
<input class="form-control" type="datetime-local" name="expires_at" id="expiryDateInput" />
|
||||
<div class="form-text">Leave blank to remove expiration (never expires).</div>
|
||||
</div>
|
||||
<div class="d-flex flex-wrap gap-2">
|
||||
<span class="text-muted small me-2 align-self-center">Quick presets:</span>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="1h">1 hour</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="24h">24 hours</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="7d">7 days</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="30d">30 days</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-expiry-preset="90d">90 days</button>
|
||||
<button class="btn btn-outline-secondary btn-sm text-danger" type="button" data-expiry-preset="clear">Never</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
<button class="btn btn-primary" type="submit">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||
</svg>
|
||||
Save Expiry
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script id="iamUsersJson" type="application/json">{{ users | tojson }}</script>
|
||||
{% endblock %}
|
||||
|
||||
{% block extra_scripts %}
|
||||
{{ super() }}
|
||||
<script src="{{ url_for('static', filename='js/iam-management.js') }}"></script>
|
||||
<script>
|
||||
(function () {
|
||||
const currentUserKey = {{ principal.access_key | tojson }};
|
||||
const configCopyButtons = document.querySelectorAll('.config-copy');
|
||||
configCopyButtons.forEach((button) => {
|
||||
button.addEventListener('click', async () => {
|
||||
const targetId = button.dataset.copyTarget;
|
||||
const target = document.getElementById(targetId);
|
||||
if (!target) return;
|
||||
const text = target.innerText;
|
||||
try {
|
||||
await navigator.clipboard.writeText(text);
|
||||
button.textContent = 'Copied!';
|
||||
setTimeout(() => {
|
||||
button.textContent = 'Copy JSON';
|
||||
}, 1500);
|
||||
} catch (err) {
|
||||
console.error('Unable to copy IAM config', err);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
const secretCopyButton = document.querySelector('[data-secret-copy]');
|
||||
if (secretCopyButton) {
|
||||
secretCopyButton.addEventListener('click', async () => {
|
||||
const secretInput = document.getElementById('disclosedSecretValue');
|
||||
if (!secretInput) return;
|
||||
try {
|
||||
await navigator.clipboard.writeText(secretInput.value);
|
||||
secretCopyButton.textContent = 'Copied!';
|
||||
setTimeout(() => {
|
||||
secretCopyButton.textContent = 'Copy';
|
||||
}, 1500);
|
||||
} catch (err) {
|
||||
console.error('Unable to copy IAM secret', err);
|
||||
}
|
||||
});
|
||||
IAMManagement.init({
|
||||
users: JSON.parse(document.getElementById('iamUsersJson').textContent || '[]'),
|
||||
currentUserKey: {{ principal.access_key | tojson }},
|
||||
iamLocked: {{ iam_locked | tojson }},
|
||||
csrfToken: "{{ csrf_token() }}",
|
||||
endpoints: {
|
||||
createUser: "{{ url_for('ui.create_iam_user') }}",
|
||||
updateUser: "{{ url_for('ui.update_iam_user', access_key='ACCESS_KEY') }}",
|
||||
deleteUser: "{{ url_for('ui.delete_iam_user', access_key='ACCESS_KEY') }}",
|
||||
updatePolicies: "{{ url_for('ui.update_iam_policies', access_key='ACCESS_KEY') }}",
|
||||
rotateSecret: "{{ url_for('ui.rotate_iam_secret', access_key='ACCESS_KEY') }}",
|
||||
updateExpiry: "{{ url_for('ui.update_iam_expiry', access_key='ACCESS_KEY') }}"
|
||||
}
|
||||
|
||||
const iamUsersData = document.getElementById('iamUsersJson');
|
||||
const users = iamUsersData ? JSON.parse(iamUsersData.textContent || '[]') : [];
|
||||
|
||||
const policyModalEl = document.getElementById('policyEditorModal');
|
||||
const policyModal = new bootstrap.Modal(policyModalEl);
|
||||
const userLabelEl = document.getElementById('policyEditorUserLabel');
|
||||
const userInputEl = document.getElementById('policyEditorUser');
|
||||
const textareaEl = document.getElementById('policyEditorDocument');
|
||||
const formEl = document.getElementById('policyEditorForm');
|
||||
const templateButtons = document.querySelectorAll('[data-policy-template]');
|
||||
const iamLocked = {{ iam_locked | tojson }};
|
||||
|
||||
if (iamLocked) return;
|
||||
|
||||
const userPolicies = (accessKey) => {
|
||||
const target = users.find((user) => user.access_key === accessKey);
|
||||
return target ? JSON.stringify(target.policies, null, 2) : '';
|
||||
};
|
||||
|
||||
const applyTemplate = (name) => {
|
||||
const templates = {
|
||||
full: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'iam:list_users', 'iam:*'],
|
||||
},
|
||||
],
|
||||
readonly: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read'],
|
||||
},
|
||||
],
|
||||
writer: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read', 'write'],
|
||||
},
|
||||
],
|
||||
};
|
||||
if (templates[name]) {
|
||||
textareaEl.value = JSON.stringify(templates[name], null, 2);
|
||||
}
|
||||
};
|
||||
|
||||
templateButtons.forEach((button) => {
|
||||
button.addEventListener('click', () => applyTemplate(button.dataset.policyTemplate));
|
||||
});
|
||||
|
||||
const createUserPoliciesEl = document.getElementById('createUserPolicies');
|
||||
const createTemplateButtons = document.querySelectorAll('[data-create-policy-template]');
|
||||
|
||||
const applyCreateTemplate = (name) => {
|
||||
const templates = {
|
||||
full: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'iam:list_users', 'iam:*'],
|
||||
},
|
||||
],
|
||||
readonly: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read'],
|
||||
},
|
||||
],
|
||||
writer: [
|
||||
{
|
||||
bucket: '*',
|
||||
actions: ['list', 'read', 'write'],
|
||||
},
|
||||
],
|
||||
};
|
||||
if (templates[name] && createUserPoliciesEl) {
|
||||
createUserPoliciesEl.value = JSON.stringify(templates[name], null, 2);
|
||||
}
|
||||
};
|
||||
|
||||
createTemplateButtons.forEach((button) => {
|
||||
button.addEventListener('click', () => applyCreateTemplate(button.dataset.createPolicyTemplate));
|
||||
});
|
||||
|
||||
formEl?.addEventListener('submit', (event) => {
|
||||
const key = userInputEl.value;
|
||||
if (!key) {
|
||||
event.preventDefault();
|
||||
return;
|
||||
}
|
||||
const template = formEl.dataset.actionTemplate;
|
||||
formEl.action = template.replace('ACCESS_KEY_PLACEHOLDER', key);
|
||||
});
|
||||
|
||||
document.querySelectorAll('[data-policy-editor]').forEach((button) => {
|
||||
button.addEventListener('click', () => {
|
||||
const key = button.getAttribute('data-access-key');
|
||||
if (!key) return;
|
||||
|
||||
userLabelEl.textContent = key;
|
||||
userInputEl.value = key;
|
||||
textareaEl.value = userPolicies(key);
|
||||
|
||||
policyModal.show();
|
||||
});
|
||||
});
|
||||
|
||||
const editUserModal = new bootstrap.Modal(document.getElementById('editUserModal'));
|
||||
const editUserForm = document.getElementById('editUserForm');
|
||||
const editUserDisplayName = document.getElementById('editUserDisplayName');
|
||||
|
||||
document.querySelectorAll('[data-edit-user]').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
const key = btn.dataset.editUser;
|
||||
const name = btn.dataset.displayName;
|
||||
editUserDisplayName.value = name;
|
||||
editUserForm.action = "{{ url_for('ui.update_iam_user', access_key='ACCESS_KEY') }}".replace('ACCESS_KEY', key);
|
||||
editUserModal.show();
|
||||
});
|
||||
});
|
||||
|
||||
const deleteUserModal = new bootstrap.Modal(document.getElementById('deleteUserModal'));
|
||||
const deleteUserForm = document.getElementById('deleteUserForm');
|
||||
const deleteUserLabel = document.getElementById('deleteUserLabel');
|
||||
const deleteSelfWarning = document.getElementById('deleteSelfWarning');
|
||||
|
||||
document.querySelectorAll('[data-delete-user]').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
const key = btn.dataset.deleteUser;
|
||||
deleteUserLabel.textContent = key;
|
||||
deleteUserForm.action = "{{ url_for('ui.delete_iam_user', access_key='ACCESS_KEY') }}".replace('ACCESS_KEY', key);
|
||||
|
||||
if (key === currentUserKey) {
|
||||
deleteSelfWarning.classList.remove('d-none');
|
||||
} else {
|
||||
deleteSelfWarning.classList.add('d-none');
|
||||
}
|
||||
|
||||
deleteUserModal.show();
|
||||
});
|
||||
});
|
||||
|
||||
const rotateSecretModal = new bootstrap.Modal(document.getElementById('rotateSecretModal'));
|
||||
const rotateUserLabel = document.getElementById('rotateUserLabel');
|
||||
const confirmRotateBtn = document.getElementById('confirmRotateBtn');
|
||||
const rotateCancelBtn = document.getElementById('rotateCancelBtn');
|
||||
const rotateDoneBtn = document.getElementById('rotateDoneBtn');
|
||||
const rotateSecretConfirm = document.getElementById('rotateSecretConfirm');
|
||||
const rotateSecretResult = document.getElementById('rotateSecretResult');
|
||||
const newSecretKeyInput = document.getElementById('newSecretKey');
|
||||
const copyNewSecretBtn = document.getElementById('copyNewSecret');
|
||||
let currentRotateKey = null;
|
||||
|
||||
document.querySelectorAll('[data-rotate-user]').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
currentRotateKey = btn.dataset.rotateUser;
|
||||
rotateUserLabel.textContent = currentRotateKey;
|
||||
|
||||
rotateSecretConfirm.classList.remove('d-none');
|
||||
rotateSecretResult.classList.add('d-none');
|
||||
confirmRotateBtn.classList.remove('d-none');
|
||||
rotateCancelBtn.classList.remove('d-none');
|
||||
rotateDoneBtn.classList.add('d-none');
|
||||
|
||||
rotateSecretModal.show();
|
||||
});
|
||||
});
|
||||
|
||||
confirmRotateBtn.addEventListener('click', async () => {
|
||||
if (!currentRotateKey) return;
|
||||
|
||||
confirmRotateBtn.disabled = true;
|
||||
confirmRotateBtn.textContent = "Rotating...";
|
||||
|
||||
try {
|
||||
const url = "{{ url_for('ui.rotate_iam_secret', access_key='ACCESS_KEY') }}".replace('ACCESS_KEY', currentRotateKey);
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'X-CSRFToken': "{{ csrf_token() }}"
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const data = await response.json();
|
||||
throw new Error(data.error || 'Failed to rotate secret');
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
newSecretKeyInput.value = data.secret_key;
|
||||
|
||||
rotateSecretConfirm.classList.add('d-none');
|
||||
rotateSecretResult.classList.remove('d-none');
|
||||
confirmRotateBtn.classList.add('d-none');
|
||||
rotateCancelBtn.classList.add('d-none');
|
||||
rotateDoneBtn.classList.remove('d-none');
|
||||
|
||||
} catch (err) {
|
||||
if (window.showToast) {
|
||||
window.showToast(err.message, 'Error', 'danger');
|
||||
}
|
||||
rotateSecretModal.hide();
|
||||
} finally {
|
||||
confirmRotateBtn.disabled = false;
|
||||
confirmRotateBtn.textContent = "Rotate Key";
|
||||
}
|
||||
});
|
||||
|
||||
copyNewSecretBtn.addEventListener('click', async () => {
|
||||
try {
|
||||
await navigator.clipboard.writeText(newSecretKeyInput.value);
|
||||
copyNewSecretBtn.textContent = 'Copied!';
|
||||
setTimeout(() => copyNewSecretBtn.textContent = 'Copy', 1500);
|
||||
} catch (err) {
|
||||
console.error('Failed to copy', err);
|
||||
}
|
||||
});
|
||||
|
||||
rotateDoneBtn.addEventListener('click', () => {
|
||||
window.location.reload();
|
||||
});
|
||||
})();
|
||||
});
|
||||
</script>
|
||||
{% endblock %}
|
||||
@@ -35,7 +35,7 @@
|
||||
<div class="card shadow-lg login-card position-relative">
|
||||
<div class="card-body p-4 p-md-5">
|
||||
<div class="text-center mb-4 d-lg-none">
|
||||
<img src="{{ url_for('static', filename='images/MyFISO.png') }}" alt="MyFSIO" width="48" height="48" class="mb-3 rounded-3">
|
||||
<img src="{{ url_for('static', filename='images/MyFSIO.png') }}" alt="MyFSIO" width="48" height="48" class="mb-3 rounded-3">
|
||||
<h2 class="h4 fw-bold">MyFSIO</h2>
|
||||
</div>
|
||||
<h2 class="h4 mb-1 d-none d-lg-block">Sign in</h2>
|
||||
@@ -73,9 +73,6 @@
|
||||
</svg>
|
||||
</button>
|
||||
</form>
|
||||
<div class="text-center mt-4">
|
||||
<small class="text-muted">Need help? Check the <a href="#" class="text-decoration-none">documentation</a></small>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
1159
python/templates/metrics.html
Normal file
1159
python/templates/metrics.html
Normal file
File diff suppressed because it is too large
Load Diff
270
python/templates/replication_wizard.html
Normal file
270
python/templates/replication_wizard.html
Normal file
@@ -0,0 +1,270 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block title %}Set Up Replication - S3 Compatible Storage{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="page-header d-flex justify-content-between align-items-center mb-4">
|
||||
<div>
|
||||
<nav aria-label="breadcrumb">
|
||||
<ol class="breadcrumb mb-1">
|
||||
<li class="breadcrumb-item"><a href="{{ url_for('ui.sites_dashboard') }}">Sites</a></li>
|
||||
<li class="breadcrumb-item active" aria-current="page">Replication Wizard</li>
|
||||
</ol>
|
||||
</nav>
|
||||
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
|
||||
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
|
||||
</svg>
|
||||
Set Up Replication
|
||||
</h1>
|
||||
<p class="text-muted mb-0 mt-1">Configure bucket replication to <strong>{{ peer.display_name or peer.site_id }}</strong></p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row g-4">
|
||||
<div class="col-lg-4 col-md-5">
|
||||
<div class="card shadow-sm border-0 mb-4" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8m7.5-6.923c-.67.204-1.335.82-1.887 1.855A8 8 0 0 0 5.145 4H7.5zM4.09 4a9.3 9.3 0 0 1 .64-1.539 7 7 0 0 1 .597-.933A7.03 7.03 0 0 0 2.255 4zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a7 7 0 0 0-.656 2.5zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5zM8.5 5v2.5h2.99a12.5 12.5 0 0 0-.337-2.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5zM5.145 12q.208.58.468 1.068c.552 1.035 1.218 1.65 1.887 1.855V12zm.182 2.472a7 7 0 0 1-.597-.933A9.3 9.3 0 0 1 4.09 12H2.255a7 7 0 0 0 3.072 2.472M3.82 11a13.7 13.7 0 0 1-.312-2.5h-2.49a7 7 0 0 0 .656 2.5zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855q.26-.487.468-1.068zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.7 13.7 0 0 1-.312 2.5m2.802-3.5a7 7 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7 7 0 0 0-3.072-2.472c.218.284.418.598.597.933M10.855 4a8 8 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4z"/>
|
||||
</svg>
|
||||
Peer Site
|
||||
</h5>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
<dl class="mb-0">
|
||||
<dt class="text-muted small">Site ID</dt>
|
||||
<dd class="mb-2">{{ peer.site_id }}</dd>
|
||||
<dt class="text-muted small">Endpoint</dt>
|
||||
<dd class="mb-2 text-truncate" title="{{ peer.endpoint }}">{{ peer.endpoint }}</dd>
|
||||
<dt class="text-muted small">Region</dt>
|
||||
<dd class="mb-2"><span class="badge bg-primary bg-opacity-10 text-primary">{{ peer.region }}</span></dd>
|
||||
<dt class="text-muted small">Connection</dt>
|
||||
<dd class="mb-0"><span class="badge bg-secondary bg-opacity-10 text-secondary">{{ connection.name }}</span></dd>
|
||||
</dl>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||
</svg>
|
||||
Replication Modes
|
||||
</h5>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4 small">
|
||||
<p class="mb-2"><strong>New Only:</strong> Only replicate new objects uploaded after the rule is created.</p>
|
||||
<p class="mb-2"><strong>All Objects:</strong> Replicate all existing objects plus new uploads.</p>
|
||||
<p class="mb-0"><strong>Bidirectional:</strong> Two-way sync between sites. Changes on either side are synchronized.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-lg-8 col-md-7">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
Select Buckets to Replicate
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Choose which buckets should be replicated to this peer site</p>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
{% if buckets %}
|
||||
<form method="POST" action="{{ url_for('ui.create_peer_replication_rules', site_id=peer.site_id) }}">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
|
||||
<div class="mb-4">
|
||||
<label for="mode" class="form-label fw-medium">Replication Mode</label>
|
||||
<select class="form-select" id="mode" name="mode">
|
||||
<option value="new_only">New Objects Only</option>
|
||||
<option value="all">All Objects (includes existing)</option>
|
||||
<option value="bidirectional">Bidirectional Sync</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div id="bidirWarning" class="alert alert-warning d-none mb-4" role="alert">
|
||||
<h6 class="alert-heading fw-bold d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||
</svg>
|
||||
Bidirectional Sync Requires Configuration on Both Sites
|
||||
</h6>
|
||||
<p class="mb-2">For bidirectional sync to work properly, you must configure <strong>both</strong> sites. This wizard only configures one direction.</p>
|
||||
<hr class="my-2">
|
||||
<p class="mb-2 fw-semibold">After completing this wizard, you must also:</p>
|
||||
<ol class="mb-2 ps-3">
|
||||
<li>Go to <strong>{{ peer.display_name or peer.site_id }}</strong>'s admin UI</li>
|
||||
<li>Register <strong>this site</strong> as a peer (with a connection)</li>
|
||||
<li>Create matching bidirectional replication rules pointing back to this site</li>
|
||||
<li>Ensure <code>SITE_SYNC_ENABLED=true</code> is set on both sites</li>
|
||||
</ol>
|
||||
<div class="d-flex align-items-center gap-2 mt-3">
|
||||
<span class="badge bg-light text-dark border">Local Site ID: <strong>{{ local_site.site_id if local_site else 'Not configured' }}</strong></span>
|
||||
<span class="badge bg-light text-dark border">Local Endpoint: <strong>{{ local_site.endpoint if local_site and local_site.endpoint else 'Not configured' }}</strong></span>
|
||||
</div>
|
||||
{% if not local_site or not local_site.site_id or not local_site.endpoint %}
|
||||
<div class="alert alert-danger mt-3 mb-0 py-2">
|
||||
<small><strong>Warning:</strong> Your local site identity is not fully configured. The remote site won't be able to connect back. <a href="{{ url_for('ui.sites_dashboard') }}">Configure it now</a>.</small>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<div class="table-responsive">
|
||||
<table class="table table-hover align-middle mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th scope="col" style="width: 40px;">
|
||||
<input type="checkbox" class="form-check-input" id="selectAll">
|
||||
</th>
|
||||
<th scope="col">Local Bucket</th>
|
||||
<th scope="col">Target Bucket Name</th>
|
||||
<th scope="col">Status</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for bucket in buckets %}
|
||||
<tr>
|
||||
<td>
|
||||
<input type="checkbox" class="form-check-input bucket-checkbox"
|
||||
name="buckets" value="{{ bucket.name }}"
|
||||
{% if bucket.has_rule %}disabled{% endif %}>
|
||||
</td>
|
||||
<td>
|
||||
<div class="d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
<span class="fw-medium">{{ bucket.name }}</span>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<input type="text" class="form-control form-control-sm"
|
||||
name="target_{{ bucket.name }}"
|
||||
value="{{ bucket.existing_target or bucket.name }}"
|
||||
placeholder="{{ bucket.name }}"
|
||||
{% if bucket.has_rule %}disabled{% endif %}>
|
||||
</td>
|
||||
<td>
|
||||
{% if bucket.has_rule %}
|
||||
<span class="badge bg-info bg-opacity-10 text-info">
|
||||
Already configured ({{ bucket.existing_mode }})
|
||||
</span>
|
||||
{% else %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">
|
||||
Not configured
|
||||
</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="d-flex gap-2 mt-4 pt-3 border-top">
|
||||
<button type="submit" class="btn btn-primary" id="submitBtn" disabled>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||
</svg>
|
||||
Create Replication Rules
|
||||
</button>
|
||||
<a href="{{ url_for('ui.sites_dashboard') }}" class="btn btn-outline-secondary">
|
||||
Skip for Now
|
||||
</a>
|
||||
</div>
|
||||
</form>
|
||||
{% else %}
|
||||
<div class="empty-state text-center py-5">
|
||||
<div class="empty-state-icon mx-auto mb-3">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<h5 class="fw-semibold mb-2">No buckets yet</h5>
|
||||
<p class="text-muted mb-3">Create some buckets first, then come back to set up replication.</p>
|
||||
<a href="{{ url_for('ui.buckets_overview') }}" class="btn btn-primary">
|
||||
Go to Buckets
|
||||
</a>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
(function() {
|
||||
const selectAllCheckbox = document.getElementById('selectAll');
|
||||
const bucketCheckboxes = document.querySelectorAll('.bucket-checkbox:not(:disabled)');
|
||||
const submitBtn = document.getElementById('submitBtn');
|
||||
const modeSelect = document.getElementById('mode');
|
||||
const bidirWarning = document.getElementById('bidirWarning');
|
||||
|
||||
function updateBidirWarning() {
|
||||
if (modeSelect && bidirWarning) {
|
||||
if (modeSelect.value === 'bidirectional') {
|
||||
bidirWarning.classList.remove('d-none');
|
||||
} else {
|
||||
bidirWarning.classList.add('d-none');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (modeSelect) {
|
||||
modeSelect.addEventListener('change', updateBidirWarning);
|
||||
updateBidirWarning();
|
||||
}
|
||||
|
||||
function updateSubmitButton() {
|
||||
const checkedCount = document.querySelectorAll('.bucket-checkbox:checked').length;
|
||||
if (submitBtn) {
|
||||
submitBtn.disabled = checkedCount === 0;
|
||||
const text = checkedCount > 0
|
||||
? `Create ${checkedCount} Replication Rule${checkedCount > 1 ? 's' : ''}`
|
||||
: 'Create Replication Rules';
|
||||
submitBtn.innerHTML = `
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||
</svg>
|
||||
${text}
|
||||
`;
|
||||
}
|
||||
}
|
||||
|
||||
function updateSelectAll() {
|
||||
if (selectAllCheckbox && bucketCheckboxes.length > 0) {
|
||||
const allChecked = Array.from(bucketCheckboxes).every(cb => cb.checked);
|
||||
const someChecked = Array.from(bucketCheckboxes).some(cb => cb.checked);
|
||||
selectAllCheckbox.checked = allChecked;
|
||||
selectAllCheckbox.indeterminate = someChecked && !allChecked;
|
||||
}
|
||||
}
|
||||
|
||||
if (selectAllCheckbox) {
|
||||
selectAllCheckbox.addEventListener('change', function() {
|
||||
bucketCheckboxes.forEach(cb => {
|
||||
cb.checked = this.checked;
|
||||
});
|
||||
updateSubmitButton();
|
||||
});
|
||||
}
|
||||
|
||||
bucketCheckboxes.forEach(cb => {
|
||||
cb.addEventListener('change', function() {
|
||||
updateSelectAll();
|
||||
updateSubmitButton();
|
||||
});
|
||||
});
|
||||
|
||||
updateSelectAll();
|
||||
updateSubmitButton();
|
||||
})();
|
||||
</script>
|
||||
{% endblock %}
|
||||
891
python/templates/sites.html
Normal file
891
python/templates/sites.html
Normal file
@@ -0,0 +1,891 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block title %}Sites - S3 Compatible Storage{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="page-header d-flex justify-content-between align-items-center mb-4">
|
||||
<div>
|
||||
<p class="text-uppercase text-muted small mb-1">Geo-Distribution</p>
|
||||
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||
</svg>
|
||||
Site Registry
|
||||
</h1>
|
||||
<p class="text-muted mb-0 mt-1">Configure this site's identity and manage peer sites for geo-distribution.</p>
|
||||
</div>
|
||||
<div class="d-none d-md-flex align-items-center gap-2">
|
||||
{% if local_site and local_site.site_id %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary fs-6 px-3 py-2">
|
||||
{{ local_site.site_id }}
|
||||
</span>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M1 8a.5.5 0 0 1 .5-.5h11.793l-3.147-3.146a.5.5 0 0 1 .708-.708l4 4a.5.5 0 0 1 0 .708l-4 4a.5.5 0 0 1-.708-.708L13.293 8.5H1.5A.5.5 0 0 1 1 8z"/>
|
||||
</svg>
|
||||
{% endif %}
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary fs-6 px-3 py-2">
|
||||
{{ peers|length }} peer{{ 's' if peers|length != 1 else '' }}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row g-4">
|
||||
<div class="col-lg-4 col-md-5">
|
||||
<div class="card shadow-sm border-0 mb-4" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M8 16s6-5.686 6-10A6 6 0 0 0 2 6c0 4.314 6 10 6 10zm0-7a3 3 0 1 1 0-6 3 3 0 0 1 0 6z"/>
|
||||
</svg>
|
||||
Local Site Identity
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">This site's configuration</p>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
<form method="POST" action="{{ url_for('ui.update_local_site') }}" id="localSiteForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<div class="mb-3">
|
||||
<label for="site_id" class="form-label fw-medium">Site ID</label>
|
||||
<input type="text" class="form-control" id="site_id" name="site_id" required
|
||||
value="{{ local_site.site_id if local_site else config_site_id or '' }}"
|
||||
placeholder="us-west-1">
|
||||
<div class="form-text">Unique identifier for this site</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="endpoint" class="form-label fw-medium">Endpoint URL</label>
|
||||
<input type="url" class="form-control" id="endpoint" name="endpoint"
|
||||
value="{{ local_site.endpoint if local_site else config_site_endpoint or '' }}"
|
||||
placeholder="https://s3.us-west-1.example.com">
|
||||
<div class="form-text">Public URL for this site</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="region" class="form-label fw-medium">Region</label>
|
||||
<input type="text" class="form-control" id="region" name="region"
|
||||
value="{{ local_site.region if local_site else config_site_region }}">
|
||||
</div>
|
||||
<div class="row mb-3">
|
||||
<div class="col-6">
|
||||
<label for="priority" class="form-label fw-medium">Priority</label>
|
||||
<input type="number" class="form-control" id="priority" name="priority"
|
||||
value="{{ local_site.priority if local_site else 100 }}" min="0">
|
||||
<div class="form-text">Lower = preferred</div>
|
||||
</div>
|
||||
<div class="col-6">
|
||||
<label for="display_name" class="form-label fw-medium">Display Name</label>
|
||||
<input type="text" class="form-control" id="display_name" name="display_name"
|
||||
value="{{ local_site.display_name if local_site else '' }}"
|
||||
placeholder="US West Primary">
|
||||
</div>
|
||||
</div>
|
||||
<div class="d-grid">
|
||||
<button type="submit" class="btn btn-primary">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||
</svg>
|
||||
Save Local Site
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-3 pb-0 px-4">
|
||||
<button class="btn btn-link text-decoration-none p-0 w-100 d-flex align-items-center justify-content-between"
|
||||
type="button" data-bs-toggle="collapse" data-bs-target="#addPeerCollapse"
|
||||
aria-expanded="false" aria-controls="addPeerCollapse">
|
||||
<span class="d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 2a.5.5 0 0 1 .5.5v5h5a.5.5 0 0 1 0 1h-5v5a.5.5 0 0 1-1 0v-5h-5a.5.5 0 0 1 0-1h5v-5A.5.5 0 0 1 8 2Z"/>
|
||||
</svg>
|
||||
<span class="fw-semibold h5 mb-0">Add Peer Site</span>
|
||||
</span>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted add-peer-chevron" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<p class="text-muted small mb-0 mt-1">Register a remote site</p>
|
||||
</div>
|
||||
<div class="collapse" id="addPeerCollapse">
|
||||
<div class="card-body px-4 pb-4">
|
||||
<form method="POST" action="{{ url_for('ui.add_peer_site') }}" id="addPeerForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<div class="mb-3">
|
||||
<label for="peer_site_id" class="form-label fw-medium">Site ID</label>
|
||||
<input type="text" class="form-control" id="peer_site_id" name="site_id" required placeholder="us-east-1">
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="peer_endpoint" class="form-label fw-medium">Endpoint URL</label>
|
||||
<input type="url" class="form-control" id="peer_endpoint" name="endpoint" required placeholder="https://s3.us-east-1.example.com">
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="peer_region" class="form-label fw-medium">Region</label>
|
||||
<input type="text" class="form-control" id="peer_region" name="region" value="us-east-1">
|
||||
</div>
|
||||
<div class="row mb-3">
|
||||
<div class="col-6">
|
||||
<label for="peer_priority" class="form-label fw-medium">Priority</label>
|
||||
<input type="number" class="form-control" id="peer_priority" name="priority" value="100" min="0">
|
||||
</div>
|
||||
<div class="col-6">
|
||||
<label for="peer_display_name" class="form-label fw-medium">Display Name</label>
|
||||
<input type="text" class="form-control" id="peer_display_name" name="display_name" placeholder="US East DR">
|
||||
</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="peer_connection_id" class="form-label fw-medium">Connection</label>
|
||||
<select class="form-select" id="peer_connection_id" name="connection_id">
|
||||
<option value="">No connection</option>
|
||||
{% for conn in connections %}
|
||||
<option value="{{ conn.id }}">{{ conn.name }} ({{ conn.endpoint_url }})</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
<div class="form-text">Link to a remote connection for health checks</div>
|
||||
</div>
|
||||
<div class="d-grid">
|
||||
<button type="submit" class="btn btn-primary">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 2a.5.5 0 0 1 .5.5v5h5a.5.5 0 0 1 0 1h-5v5a.5.5 0 0 1-1 0v-5h-5a.5.5 0 0 1 0-1h5v-5A.5.5 0 0 1 8 2Z"/>
|
||||
</svg>
|
||||
Add Peer Site
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-lg-8 col-md-7">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4 d-flex justify-content-between align-items-start">
|
||||
<div>
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M6 3.5A1.5 1.5 0 0 1 7.5 2h1A1.5 1.5 0 0 1 10 3.5v1A1.5 1.5 0 0 1 8.5 6v1H14a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0v-1A.5.5 0 0 1 2 7h5.5V6A1.5 1.5 0 0 1 6 4.5v-1zM8.5 5a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1zM0 11.5A1.5 1.5 0 0 1 1.5 10h1A1.5 1.5 0 0 1 4 11.5v1A1.5 1.5 0 0 1 2.5 14h-1A1.5 1.5 0 0 1 0 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5A1.5 1.5 0 0 1 7.5 10h1a1.5 1.5 0 0 1 1.5 1.5v1A1.5 1.5 0 0 1 8.5 14h-1A1.5 1.5 0 0 1 6 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5a1.5 1.5 0 0 1 1.5-1.5h1a1.5 1.5 0 0 1 1.5 1.5v1a1.5 1.5 0 0 1-1.5 1.5h-1a1.5 1.5 0 0 1-1.5-1.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1z"/>
|
||||
</svg>
|
||||
Peer Sites
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Known remote sites in the cluster</p>
|
||||
</div>
|
||||
{% if peers %}
|
||||
<button type="button" class="btn btn-outline-secondary btn-sm" id="btnCheckAllHealth" title="Check health of all peers">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M11.251.068a.5.5 0 0 1 .227.58L9.677 6.5H13a.5.5 0 0 1 .364.843l-8 8.5a.5.5 0 0 1-.842-.49L6.323 9.5H3a.5.5 0 0 1-.364-.843l8-8.5a.5.5 0 0 1 .615-.09z"/>
|
||||
</svg>
|
||||
Check All
|
||||
</button>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
{% if peers %}
|
||||
<div class="table-responsive">
|
||||
<table class="table table-hover align-middle mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th scope="col" style="width: 50px;">Health</th>
|
||||
<th scope="col">Site ID</th>
|
||||
<th scope="col">Endpoint</th>
|
||||
<th scope="col">Region</th>
|
||||
<th scope="col">Priority</th>
|
||||
<th scope="col">Sync Status</th>
|
||||
<th scope="col" class="text-end">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for item in peers_with_stats %}
|
||||
{% set peer = item.peer %}
|
||||
<tr data-site-id="{{ peer.site_id }}">
|
||||
<td class="text-center">
|
||||
<span class="peer-health-status" data-site-id="{{ peer.site_id }}"
|
||||
data-last-checked="{{ peer.last_health_check or '' }}"
|
||||
title="{% if peer.is_healthy == true %}Healthy{% elif peer.is_healthy == false %}Unhealthy{% else %}Not checked{% endif %}{% if peer.last_health_check %} (checked {{ peer.last_health_check }}){% endif %}"
|
||||
style="cursor: help;">
|
||||
{% if peer.is_healthy == true %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-success" viewBox="0 0 16 16">
|
||||
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
|
||||
</svg>
|
||||
{% elif peer.is_healthy == false %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
|
||||
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>
|
||||
</svg>
|
||||
{% else %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||
<path d="M5.255 5.786a.237.237 0 0 0 .241.247h.825c.138 0 .248-.113.266-.25.09-.656.54-1.134 1.342-1.134.686 0 1.314.343 1.314 1.168 0 .635-.374.927-.965 1.371-.673.489-1.206 1.06-1.168 1.987l.003.217a.25.25 0 0 0 .25.246h.811a.25.25 0 0 0 .25-.25v-.105c0-.718.273-.927 1.01-1.486.609-.463 1.244-.977 1.244-2.056 0-1.511-1.276-2.241-2.673-2.241-1.267 0-2.655.59-2.75 2.286zm1.557 5.763c0 .533.425.927 1.01.927.609 0 1.028-.394 1.028-.927 0-.552-.42-.94-1.029-.94-.584 0-1.009.388-1.009.94z"/>
|
||||
</svg>
|
||||
{% endif %}
|
||||
</span>
|
||||
</td>
|
||||
<td>
|
||||
<div class="d-flex align-items-center gap-2">
|
||||
<div class="peer-icon">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div>
|
||||
<span class="fw-medium">{{ peer.display_name or peer.site_id }}</span>
|
||||
{% if peer.display_name and peer.display_name != peer.site_id %}
|
||||
<br><small class="text-muted">{{ peer.site_id }}</small>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<span class="endpoint-display text-muted small" data-full-url="{{ peer.endpoint }}" title="{{ peer.endpoint }}" style="cursor: pointer;">
|
||||
{% set parsed = peer.endpoint.split('//') %}
|
||||
{% if parsed|length > 1 %}{{ parsed[1].split('/')[0] }}{% else %}{{ peer.endpoint }}{% endif %}
|
||||
</span>
|
||||
<button type="button" class="btn btn-link btn-sm p-0 ms-1 btn-copy-endpoint" data-url="{{ peer.endpoint }}" title="Copy full URL">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M4 1.5H3a2 2 0 0 0-2 2V14a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V3.5a2 2 0 0 0-2-2h-1v1h1a1 1 0 0 1 1 1V14a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3.5a1 1 0 0 1 1-1h1v-1z"/>
|
||||
<path d="M9.5 1a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1a.5.5 0 0 1 .5-.5h3zm-3-1A1.5 1.5 0 0 0 5 1.5v1A1.5 1.5 0 0 0 6.5 4h3A1.5 1.5 0 0 0 11 2.5v-1A1.5 1.5 0 0 0 9.5 0h-3z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</td>
|
||||
<td><span class="text-muted small">{{ peer.region }}</span></td>
|
||||
<td><span class="text-muted small">{{ peer.priority }}</span></td>
|
||||
<td class="sync-stats-cell" data-site-id="{{ peer.site_id }}">
|
||||
{% if item.has_connection %}
|
||||
<div class="d-flex align-items-center gap-2">
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary">{{ item.buckets_syncing }} bucket{{ 's' if item.buckets_syncing != 1 else '' }}</span>
|
||||
{% if item.has_bidirectional %}
|
||||
<span class="bidir-status-icon" data-site-id="{{ peer.site_id }}" title="Bidirectional sync - click to verify">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-info" viewBox="0 0 16 16" style="cursor: pointer;">
|
||||
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||
</svg>
|
||||
</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="sync-stats-detail d-none mt-2 small" id="stats-{{ peer.site_id }}">
|
||||
<span class="spinner-border spinner-border-sm text-muted" style="width: 12px; height: 12px;"></span>
|
||||
</div>
|
||||
{% else %}
|
||||
<a href="#" class="text-muted small link-no-connection"
|
||||
data-site-id="{{ peer.site_id }}"
|
||||
title="Click to link a connection">Link a connection</a>
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="text-end">
|
||||
<div class="d-flex align-items-center justify-content-end gap-1">
|
||||
<button type="button" class="btn btn-outline-secondary btn-sm"
|
||||
data-bs-toggle="modal"
|
||||
data-bs-target="#editPeerModal"
|
||||
data-site-id="{{ peer.site_id }}"
|
||||
data-endpoint="{{ peer.endpoint }}"
|
||||
data-region="{{ peer.region }}"
|
||||
data-priority="{{ peer.priority }}"
|
||||
data-display-name="{{ peer.display_name }}"
|
||||
data-connection-id="{{ peer.connection_id or '' }}"
|
||||
title="Edit peer">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<div class="dropdown peer-actions-dropdown">
|
||||
<button class="btn btn-outline-secondary btn-sm" type="button" data-bs-toggle="dropdown" aria-expanded="false" title="More actions">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M3 9.5a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<ul class="dropdown-menu dropdown-menu-end">
|
||||
<li>
|
||||
<button type="button" class="dropdown-item btn-check-health" data-site-id="{{ peer.site_id }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2 text-warning" viewBox="0 0 16 16">
|
||||
<path d="M11.251.068a.5.5 0 0 1 .227.58L9.677 6.5H13a.5.5 0 0 1 .364.843l-8 8.5a.5.5 0 0 1-.842-.49L6.323 9.5H3a.5.5 0 0 1-.364-.843l8-8.5a.5.5 0 0 1 .615-.09z"/>
|
||||
</svg>
|
||||
Check Health
|
||||
</button>
|
||||
</li>
|
||||
<li>
|
||||
<button type="button" class="dropdown-item btn-check-bidir {% if not item.has_connection %}disabled{% endif %}"
|
||||
data-site-id="{{ peer.site_id }}"
|
||||
data-display-name="{{ peer.display_name or peer.site_id }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2 text-info" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||
</svg>
|
||||
Bidirectional Status
|
||||
</button>
|
||||
</li>
|
||||
{% if item.has_connection and item.buckets_syncing > 0 %}
|
||||
<li>
|
||||
<button type="button" class="dropdown-item btn-load-stats" data-site-id="{{ peer.site_id }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2 text-primary" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
||||
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
|
||||
</svg>
|
||||
Load Sync Stats
|
||||
</button>
|
||||
</li>
|
||||
{% endif %}
|
||||
<li>
|
||||
<a href="{{ url_for('ui.replication_wizard', site_id=peer.site_id) }}"
|
||||
class="dropdown-item {% if not item.has_connection %}disabled{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2 text-primary" viewBox="0 0 16 16">
|
||||
<path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/>
|
||||
<path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/>
|
||||
</svg>
|
||||
Replication Wizard
|
||||
</a>
|
||||
</li>
|
||||
<li><hr class="dropdown-divider"></li>
|
||||
<li>
|
||||
<button type="button" class="dropdown-item text-danger"
|
||||
data-bs-toggle="modal"
|
||||
data-bs-target="#deletePeerModal"
|
||||
data-site-id="{{ peer.site_id }}"
|
||||
data-display-name="{{ peer.display_name or peer.site_id }}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-2" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
Delete Peer
|
||||
</button>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="empty-state text-center py-5">
|
||||
<div class="empty-state-icon mx-auto mb-3">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<h5 class="fw-semibold mb-2">No peer sites yet</h5>
|
||||
<p class="text-muted mb-0">Add peer sites to enable geo-distribution and site-to-site replication.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="editPeerModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h5 class="modal-title fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5zm-9.761 5.175-.106.106-1.528 3.821 3.821-1.528.106-.106A.5.5 0 0 1 5 12.5V12h-.5a.5.5 0 0 1-.5-.5V11h-.5a.5.5 0 0 1-.468-.325z"/>
|
||||
</svg>
|
||||
Edit Peer Site
|
||||
</h5>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<form method="POST" id="editPeerForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<div class="modal-body">
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Site ID</label>
|
||||
<input type="text" class="form-control" id="edit_site_id" readonly>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="edit_endpoint" class="form-label fw-medium">Endpoint URL</label>
|
||||
<input type="url" class="form-control" id="edit_endpoint" name="endpoint" required>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="edit_region" class="form-label fw-medium">Region</label>
|
||||
<input type="text" class="form-control" id="edit_region" name="region" required>
|
||||
</div>
|
||||
<div class="row mb-3">
|
||||
<div class="col-6">
|
||||
<label for="edit_priority" class="form-label fw-medium">Priority</label>
|
||||
<input type="number" class="form-control" id="edit_priority" name="priority" min="0">
|
||||
</div>
|
||||
<div class="col-6">
|
||||
<label for="edit_display_name" class="form-label fw-medium">Display Name</label>
|
||||
<input type="text" class="form-control" id="edit_display_name" name="display_name">
|
||||
</div>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="edit_connection_id" class="form-label fw-medium">Connection</label>
|
||||
<select class="form-select" id="edit_connection_id" name="connection_id">
|
||||
<option value="">No connection</option>
|
||||
{% for conn in connections %}
|
||||
<option value="{{ conn.id }}">{{ conn.name }} ({{ conn.endpoint_url }})</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
<button type="submit" class="btn btn-primary">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||
</svg>
|
||||
Save
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="deletePeerModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h5 class="modal-title fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
Delete Peer Site
|
||||
</h5>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<p>Are you sure you want to delete <strong id="deletePeerName"></strong>?</p>
|
||||
<div class="alert alert-warning d-flex align-items-start small" role="alert">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="flex-shrink-0 me-2 mt-0" viewBox="0 0 16 16">
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||
</svg>
|
||||
<div>This will remove the peer from the site registry. Any site sync configurations may be affected.</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
<form method="POST" id="deletePeerForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<button type="submit" class="btn btn-danger">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
Delete
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="bidirStatusModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered modal-lg">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h5 class="modal-title fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-info me-2" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||
</svg>
|
||||
Bidirectional Sync Status
|
||||
</h5>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<div id="bidirStatusContent">
|
||||
<div class="text-center py-4">
|
||||
<span class="spinner-border text-primary" role="status"></span>
|
||||
<p class="text-muted mt-2 mb-0">Checking configuration...</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Close</button>
|
||||
<a href="#" id="bidirWizardLink" class="btn btn-primary d-none">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M9.828.722a.5.5 0 0 1 .354.146l4.95 4.95a.5.5 0 0 1 0 .707c-.48.48-1.072.588-1.503.588-.177 0-.335-.018-.46-.039l-3.134 3.134a5.927 5.927 0 0 1 .16 1.013c.046.702-.032 1.687-.72 2.375a.5.5 0 0 1-.707 0l-2.829-2.828-3.182 3.182c-.195.195-1.219.902-1.414.707-.195-.195.512-1.22.707-1.414l3.182-3.182-2.828-2.829a.5.5 0 0 1 0-.707c.688-.688 1.673-.767 2.375-.72a5.922 5.922 0 0 1 1.013.16l3.134-3.133a2.772 2.772 0 0 1-.04-.461c0-.43.108-1.022.589-1.503a.5.5 0 0 1 .353-.146z"/>
|
||||
</svg>
|
||||
Run Setup Wizard
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
(function() {
|
||||
var escapeHtml = window.UICore ? window.UICore.escapeHtml : function(s) { return s; };
|
||||
|
||||
var editPeerModal = document.getElementById('editPeerModal');
|
||||
if (editPeerModal) {
|
||||
editPeerModal.addEventListener('show.bs.modal', function (event) {
|
||||
var button = event.relatedTarget;
|
||||
var siteId = button.getAttribute('data-site-id');
|
||||
document.getElementById('edit_site_id').value = siteId;
|
||||
document.getElementById('edit_endpoint').value = button.getAttribute('data-endpoint');
|
||||
document.getElementById('edit_region').value = button.getAttribute('data-region');
|
||||
document.getElementById('edit_priority').value = button.getAttribute('data-priority');
|
||||
document.getElementById('edit_display_name').value = button.getAttribute('data-display-name');
|
||||
document.getElementById('edit_connection_id').value = button.getAttribute('data-connection-id');
|
||||
document.getElementById('editPeerForm').action = '/ui/sites/peers/' + encodeURIComponent(siteId) + '/update';
|
||||
});
|
||||
}
|
||||
|
||||
document.querySelectorAll('.link-no-connection').forEach(function(link) {
|
||||
link.addEventListener('click', function(e) {
|
||||
e.preventDefault();
|
||||
var siteId = this.getAttribute('data-site-id');
|
||||
var row = this.closest('tr[data-site-id]');
|
||||
if (row) {
|
||||
var btn = row.querySelector('.btn[data-bs-target="#editPeerModal"]');
|
||||
if (btn) btn.click();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
var deletePeerModal = document.getElementById('deletePeerModal');
|
||||
if (deletePeerModal) {
|
||||
deletePeerModal.addEventListener('show.bs.modal', function (event) {
|
||||
var button = event.relatedTarget;
|
||||
var siteId = button.getAttribute('data-site-id');
|
||||
var displayName = button.getAttribute('data-display-name');
|
||||
document.getElementById('deletePeerName').textContent = displayName;
|
||||
document.getElementById('deletePeerForm').action = '/ui/sites/peers/' + encodeURIComponent(siteId) + '/delete';
|
||||
});
|
||||
}
|
||||
|
||||
function formatTimeAgo(date) {
|
||||
var seconds = Math.floor((new Date() - date) / 1000);
|
||||
if (seconds < 60) return 'just now';
|
||||
var minutes = Math.floor(seconds / 60);
|
||||
if (minutes < 60) return minutes + 'm ago';
|
||||
var hours = Math.floor(minutes / 60);
|
||||
if (hours < 24) return hours + 'h ago';
|
||||
return Math.floor(hours / 24) + 'd ago';
|
||||
}
|
||||
|
||||
function doHealthCheck(siteId) {
|
||||
var row = document.querySelector('tr[data-site-id="' + CSS.escape(siteId) + '"]');
|
||||
var statusSpan = row ? row.querySelector('.peer-health-status') : null;
|
||||
if (!statusSpan) return Promise.resolve();
|
||||
|
||||
statusSpan.innerHTML = '<span class="spinner-border spinner-border-sm text-muted" role="status" style="width: 14px; height: 14px;"></span>';
|
||||
|
||||
return fetch('/ui/sites/peers/' + encodeURIComponent(siteId) + '/health')
|
||||
.then(function(response) { return response.json(); })
|
||||
.then(function(data) {
|
||||
var now = new Date();
|
||||
statusSpan.setAttribute('data-last-checked', now.toISOString());
|
||||
if (data.is_healthy) {
|
||||
statusSpan.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-success" viewBox="0 0 16 16"><path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/></svg>';
|
||||
statusSpan.title = 'Healthy (checked ' + formatTimeAgo(now) + ')';
|
||||
return { siteId: siteId, healthy: true };
|
||||
} else {
|
||||
statusSpan.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-danger" viewBox="0 0 16 16"><path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/></svg>';
|
||||
statusSpan.title = 'Unhealthy' + (data.error ? ': ' + data.error : '') + ' (checked ' + formatTimeAgo(now) + ')';
|
||||
return { siteId: siteId, healthy: false, error: data.error };
|
||||
}
|
||||
})
|
||||
.catch(function(err) {
|
||||
statusSpan.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16"><path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/><path d="M5.255 5.786a.237.237 0 0 0 .241.247h.825c.138 0 .248-.113.266-.25.09-.656.54-1.134 1.342-1.134.686 0 1.314.343 1.314 1.168 0 .635-.374.927-.965 1.371-.673.489-1.206 1.06-1.168 1.987l.003.217a.25.25 0 0 0 .25.246h.811a.25.25 0 0 0 .25-.25v-.105c0-.718.273-.927 1.01-1.486.609-.463 1.244-.977 1.244-2.056 0-1.511-1.276-2.241-2.673-2.241-1.267 0-2.655.59-2.75 2.286zm1.557 5.763c0 .533.425.927 1.01.927.609 0 1.028-.394 1.028-.927 0-.552-.42-.94-1.029-.94-.584 0-1.009.388-1.009.94z"/></svg>';
|
||||
statusSpan.title = 'Check failed';
|
||||
return { siteId: siteId, healthy: null };
|
||||
});
|
||||
}
|
||||
|
||||
document.querySelectorAll('.btn-check-health').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var siteId = this.getAttribute('data-site-id');
|
||||
doHealthCheck(siteId).then(function(result) {
|
||||
if (!result) return;
|
||||
if (result.healthy === true) {
|
||||
if (window.showToast) window.showToast('Peer site is healthy', 'Health Check', 'success');
|
||||
} else if (result.healthy === false) {
|
||||
if (window.showToast) window.showToast(result.error || 'Peer site is unhealthy', 'Health Check', 'error');
|
||||
} else {
|
||||
if (window.showToast) window.showToast('Failed to check health', 'Health Check', 'error');
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
var checkAllBtn = document.getElementById('btnCheckAllHealth');
|
||||
if (checkAllBtn) {
|
||||
checkAllBtn.addEventListener('click', function() {
|
||||
var btn = this;
|
||||
var originalHtml = btn.innerHTML;
|
||||
btn.disabled = true;
|
||||
btn.innerHTML = '<span class="spinner-border spinner-border-sm me-1"></span>Checking...';
|
||||
|
||||
var siteIds = [];
|
||||
document.querySelectorAll('.peer-health-status').forEach(function(el) {
|
||||
siteIds.push(el.getAttribute('data-site-id'));
|
||||
});
|
||||
|
||||
var promises = siteIds.map(function(id) { return doHealthCheck(id); });
|
||||
Promise.all(promises).then(function(results) {
|
||||
var healthy = results.filter(function(r) { return r && r.healthy === true; }).length;
|
||||
var unhealthy = results.filter(function(r) { return r && r.healthy === false; }).length;
|
||||
var failed = results.filter(function(r) { return r && r.healthy === null; }).length;
|
||||
|
||||
var msg = healthy + ' healthy';
|
||||
if (unhealthy > 0) msg += ', ' + unhealthy + ' unhealthy';
|
||||
if (failed > 0) msg += ', ' + failed + ' failed';
|
||||
if (window.showToast) window.showToast(msg, 'Health Check', unhealthy > 0 ? 'warning' : 'success');
|
||||
|
||||
btn.disabled = false;
|
||||
btn.innerHTML = originalHtml;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
document.querySelectorAll('.btn-load-stats').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var siteId = this.getAttribute('data-site-id');
|
||||
var detailDiv = document.getElementById('stats-' + siteId);
|
||||
if (!detailDiv) return;
|
||||
|
||||
detailDiv.classList.remove('d-none');
|
||||
detailDiv.innerHTML = '<span class="spinner-border spinner-border-sm text-muted" style="width: 12px; height: 12px;"></span> Loading...';
|
||||
|
||||
fetch('/ui/sites/peers/' + encodeURIComponent(siteId) + '/sync-stats')
|
||||
.then(function(response) { return response.json(); })
|
||||
.then(function(data) {
|
||||
if (data.error) {
|
||||
detailDiv.innerHTML = '<span class="text-danger">' + escapeHtml(data.error) + '</span>';
|
||||
} else {
|
||||
var lastSync = data.last_sync_at
|
||||
? new Date(data.last_sync_at * 1000).toLocaleString()
|
||||
: 'Never';
|
||||
detailDiv.innerHTML =
|
||||
'<div class="d-flex flex-wrap gap-2 mb-1">' +
|
||||
'<span class="text-success"><strong>' + escapeHtml(String(data.objects_synced)) + '</strong> synced</span>' +
|
||||
'<span class="text-warning"><strong>' + escapeHtml(String(data.objects_pending)) + '</strong> pending</span>' +
|
||||
'<span class="text-danger"><strong>' + escapeHtml(String(data.objects_failed)) + '</strong> failed</span>' +
|
||||
'</div>' +
|
||||
'<div class="text-muted" style="font-size: 0.75rem;">Last sync: ' + escapeHtml(lastSync) + '</div>';
|
||||
}
|
||||
})
|
||||
.catch(function() {
|
||||
detailDiv.innerHTML = '<span class="text-danger">Failed to load stats</span>';
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('.bidir-status-icon').forEach(function(icon) {
|
||||
icon.addEventListener('click', function() {
|
||||
var siteId = this.getAttribute('data-site-id');
|
||||
var row = this.closest('tr[data-site-id]');
|
||||
var btn = row ? row.querySelector('.btn-check-bidir') : null;
|
||||
if (btn) btn.click();
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('.btn-check-bidir').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var siteId = this.getAttribute('data-site-id');
|
||||
var displayName = this.getAttribute('data-display-name');
|
||||
var modal = new bootstrap.Modal(document.getElementById('bidirStatusModal'));
|
||||
var contentDiv = document.getElementById('bidirStatusContent');
|
||||
var wizardLink = document.getElementById('bidirWizardLink');
|
||||
|
||||
contentDiv.innerHTML =
|
||||
'<div class="text-center py-4">' +
|
||||
'<span class="spinner-border text-primary" role="status"></span>' +
|
||||
'<p class="text-muted mt-2 mb-0">Checking bidirectional configuration with ' + escapeHtml(displayName) + '...</p>' +
|
||||
'</div>';
|
||||
wizardLink.classList.add('d-none');
|
||||
modal.show();
|
||||
|
||||
fetch('/ui/sites/peers/' + encodeURIComponent(siteId) + '/bidirectional-status')
|
||||
.then(function(response) { return response.json(); })
|
||||
.then(function(data) {
|
||||
var html = '';
|
||||
|
||||
if (data.is_fully_configured) {
|
||||
html += '<div class="alert alert-success d-flex align-items-center mb-4" role="alert">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="flex-shrink-0 me-2" viewBox="0 0 16 16">' +
|
||||
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>' +
|
||||
'</svg>' +
|
||||
'<div><strong>Bidirectional sync is fully configured!</strong><br><small>Both sites are set up to sync data in both directions.</small></div>' +
|
||||
'</div>';
|
||||
} else if (data.issues && data.issues.length > 0) {
|
||||
var errors = data.issues.filter(function(i) { return i.severity === 'error'; });
|
||||
var warnings = data.issues.filter(function(i) { return i.severity === 'warning'; });
|
||||
|
||||
if (errors.length > 0) {
|
||||
html += '<div class="alert alert-danger mb-3" role="alert">' +
|
||||
'<h6 class="alert-heading fw-bold mb-2">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16"><path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/></svg>' +
|
||||
' Configuration Errors</h6><ul class="mb-0 ps-3">';
|
||||
errors.forEach(function(issue) {
|
||||
html += '<li><strong>' + escapeHtml(issue.code) + ':</strong> ' + escapeHtml(issue.message) + '</li>';
|
||||
});
|
||||
html += '</ul></div>';
|
||||
}
|
||||
|
||||
if (warnings.length > 0) {
|
||||
html += '<div class="alert alert-warning mb-3" role="alert">' +
|
||||
'<h6 class="alert-heading fw-bold mb-2">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16"><path d="M8.982 1.566a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566zM8 5c.535 0 .954.462.9.995l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995A.905.905 0 0 1 8 5zm.002 6a1 1 0 1 1 0 2 1 1 0 0 1 0-2z"/></svg>' +
|
||||
' Warnings</h6><ul class="mb-0 ps-3">';
|
||||
warnings.forEach(function(issue) {
|
||||
html += '<li><strong>' + escapeHtml(issue.code) + ':</strong> ' + escapeHtml(issue.message) + '</li>';
|
||||
});
|
||||
html += '</ul></div>';
|
||||
}
|
||||
}
|
||||
|
||||
html += '<div class="row g-3">';
|
||||
html += '<div class="col-md-6"><div class="card h-100"><div class="card-header bg-light py-2"><strong>This Site (Local)</strong></div>' +
|
||||
'<div class="card-body small">' +
|
||||
'<p class="mb-1"><strong>Site ID:</strong> ' + (data.local_site_id ? escapeHtml(data.local_site_id) : '<span class="text-danger">Not configured</span>') + '</p>' +
|
||||
'<p class="mb-1"><strong>Endpoint:</strong> ' + (data.local_endpoint ? escapeHtml(data.local_endpoint) : '<span class="text-danger">Not configured</span>') + '</p>' +
|
||||
'<p class="mb-1"><strong>Site Sync Worker:</strong> ' + (data.local_site_sync_enabled ? '<span class="text-success">Enabled</span>' : '<span class="text-warning">Disabled</span>') + '</p>' +
|
||||
'<p class="mb-0"><strong>Bidirectional Rules:</strong> ' + (data.local_bidirectional_rules ? data.local_bidirectional_rules.length : 0) + '</p>' +
|
||||
'</div></div></div>';
|
||||
|
||||
if (data.remote_status) {
|
||||
var rs = data.remote_status;
|
||||
html += '<div class="col-md-6"><div class="card h-100"><div class="card-header bg-light py-2"><strong>Remote Site (' + escapeHtml(displayName) + ')</strong></div>' +
|
||||
'<div class="card-body small">';
|
||||
if (rs.admin_access_denied) {
|
||||
html += '<p class="text-warning mb-0">Admin access denied - cannot verify remote configuration</p>';
|
||||
} else if (rs.reachable === false) {
|
||||
html += '<p class="text-danger mb-0">Could not reach remote admin API</p>';
|
||||
} else {
|
||||
html += '<p class="mb-1"><strong>Has Peer Entry for Us:</strong> ' + (rs.has_peer_for_us ? '<span class="text-success">Yes</span>' : '<span class="text-danger">No</span>') + '</p>' +
|
||||
'<p class="mb-1"><strong>Connection Configured:</strong> ' + (rs.peer_connection_configured ? '<span class="text-success">Yes</span>' : '<span class="text-danger">No</span>') + '</p>';
|
||||
}
|
||||
html += '</div></div></div>';
|
||||
} else {
|
||||
html += '<div class="col-md-6"><div class="card h-100"><div class="card-header bg-light py-2"><strong>Remote Site (' + escapeHtml(displayName) + ')</strong></div>' +
|
||||
'<div class="card-body small"><p class="text-muted mb-0">Could not check remote status</p></div></div></div>';
|
||||
}
|
||||
html += '</div>';
|
||||
|
||||
if (data.local_bidirectional_rules && data.local_bidirectional_rules.length > 0) {
|
||||
html += '<div class="mt-3"><h6 class="fw-semibold">Local Bidirectional Rules</h6>' +
|
||||
'<table class="table table-sm table-bordered mb-0"><thead class="table-light"><tr><th>Source Bucket</th><th>Target Bucket</th><th>Status</th></tr></thead><tbody>';
|
||||
data.local_bidirectional_rules.forEach(function(rule) {
|
||||
html += '<tr><td>' + escapeHtml(rule.bucket_name) + '</td><td>' + escapeHtml(rule.target_bucket) + '</td>' +
|
||||
'<td>' + (rule.enabled ? '<span class="badge bg-success">Enabled</span>' : '<span class="badge bg-secondary">Disabled</span>') + '</td></tr>';
|
||||
});
|
||||
html += '</tbody></table></div>';
|
||||
}
|
||||
|
||||
if (!data.is_fully_configured) {
|
||||
html += '<div class="alert alert-info mt-3 mb-0" role="alert">' +
|
||||
'<h6 class="alert-heading fw-bold">How to Fix</h6>' +
|
||||
'<ol class="mb-0 ps-3">' +
|
||||
'<li>Ensure this site has a Site ID and Endpoint URL configured</li>' +
|
||||
'<li>On the remote site, register this site as a peer with a connection</li>' +
|
||||
'<li>Create bidirectional replication rules on both sites</li>' +
|
||||
'<li>Enable SITE_SYNC_ENABLED=true on both sites</li>' +
|
||||
'</ol></div>';
|
||||
var blockingErrors = ['NO_CONNECTION', 'CONNECTION_NOT_FOUND', 'REMOTE_UNREACHABLE', 'ENDPOINT_NOT_ALLOWED'];
|
||||
var hasBlockingError = data.issues && data.issues.some(function(i) { return blockingErrors.indexOf(i.code) !== -1; });
|
||||
if (!hasBlockingError) {
|
||||
wizardLink.href = '/ui/sites/peers/' + encodeURIComponent(siteId) + '/replication-wizard';
|
||||
wizardLink.classList.remove('d-none');
|
||||
}
|
||||
}
|
||||
|
||||
contentDiv.innerHTML = html;
|
||||
})
|
||||
.catch(function(err) {
|
||||
contentDiv.innerHTML = '<div class="alert alert-danger" role="alert"><strong>Error:</strong> Failed to check bidirectional status. ' + escapeHtml(err.message || '') + '</div>';
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
document.querySelectorAll('.btn-copy-endpoint').forEach(function(btn) {
|
||||
btn.addEventListener('click', function(e) {
|
||||
e.stopPropagation();
|
||||
var url = this.getAttribute('data-url');
|
||||
if (window.UICore && window.UICore.copyToClipboard) {
|
||||
window.UICore.copyToClipboard(url).then(function(ok) {
|
||||
if (ok && window.showToast) window.showToast('Endpoint URL copied', 'Copied', 'success');
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
var localSiteForm = document.getElementById('localSiteForm');
|
||||
if (localSiteForm) {
|
||||
localSiteForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(this, {
|
||||
successMessage: 'Local site configuration updated',
|
||||
onSuccess: function() {
|
||||
setTimeout(function() { location.reload(); }, 800);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var addPeerForm = document.getElementById('addPeerForm');
|
||||
if (addPeerForm) {
|
||||
addPeerForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(this, {
|
||||
successMessage: 'Peer site added',
|
||||
onSuccess: function(data) {
|
||||
if (data.redirect) {
|
||||
setTimeout(function() { window.location.href = data.redirect; }, 800);
|
||||
} else {
|
||||
setTimeout(function() { location.reload(); }, 800);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var editPeerForm = document.getElementById('editPeerForm');
|
||||
if (editPeerForm) {
|
||||
editPeerForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('editPeerModal'));
|
||||
window.UICore.submitFormAjax(this, {
|
||||
successMessage: 'Peer site updated',
|
||||
onSuccess: function() {
|
||||
if (modal) modal.hide();
|
||||
setTimeout(function() { location.reload(); }, 800);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var deletePeerForm = document.getElementById('deletePeerForm');
|
||||
if (deletePeerForm) {
|
||||
deletePeerForm.addEventListener('submit', function(e) {
|
||||
e.preventDefault();
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('deletePeerModal'));
|
||||
window.UICore.submitFormAjax(this, {
|
||||
successMessage: 'Peer site deleted',
|
||||
onSuccess: function() {
|
||||
if (modal) modal.hide();
|
||||
setTimeout(function() { location.reload(); }, 800);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
document.querySelectorAll('.peer-actions-dropdown').forEach(function(dd) {
|
||||
dd.addEventListener('shown.bs.dropdown', function() {
|
||||
var toggle = dd.querySelector('[data-bs-toggle="dropdown"]');
|
||||
var menu = dd.querySelector('.dropdown-menu');
|
||||
if (!toggle || !menu) return;
|
||||
var rect = toggle.getBoundingClientRect();
|
||||
menu.style.top = rect.bottom + 'px';
|
||||
menu.style.left = (rect.right - menu.offsetWidth) + 'px';
|
||||
});
|
||||
});
|
||||
})();
|
||||
</script>
|
||||
|
||||
<style>
|
||||
.add-peer-chevron {
|
||||
transition: transform 0.2s ease;
|
||||
}
|
||||
[aria-expanded="true"] .add-peer-chevron {
|
||||
transform: rotate(180deg);
|
||||
}
|
||||
.endpoint-display:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
.peer-actions-dropdown .dropdown-menu {
|
||||
position: fixed !important;
|
||||
inset: auto !important;
|
||||
transform: none !important;
|
||||
}
|
||||
</style>
|
||||
{% endblock %}
|
||||
750
python/templates/system.html
Normal file
750
python/templates/system.html
Normal file
@@ -0,0 +1,750 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block title %}System - MyFSIO Console{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="page-header d-flex justify-content-between align-items-center mb-4">
|
||||
<div>
|
||||
<p class="text-uppercase text-muted small mb-1">Administration</p>
|
||||
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
System
|
||||
</h1>
|
||||
<p class="text-muted mb-0 mt-1">Server information, feature flags, and maintenance tools.</p>
|
||||
</div>
|
||||
<div class="d-none d-md-block">
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary fs-6 px-3 py-2">v{{ app_version }}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row g-4 mb-4">
|
||||
<div class="col-lg-6">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M5 0a.5.5 0 0 1 .5.5V2h1V.5a.5.5 0 0 1 1 0V2h1V.5a.5.5 0 0 1 1 0V2h1V.5a.5.5 0 0 1 1 0V2A2.5 2.5 0 0 1 14 4.5h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14a2.5 2.5 0 0 1-2.5 2.5v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14A2.5 2.5 0 0 1 2 11.5H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2A2.5 2.5 0 0 1 4.5 2V.5A.5.5 0 0 1 5 0zm-.5 3A1.5 1.5 0 0 0 3 4.5v7A1.5 1.5 0 0 0 4.5 13h7a1.5 1.5 0 0 0 1.5-1.5v-7A1.5 1.5 0 0 0 11.5 3h-7zM5 6.5A1.5 1.5 0 0 1 6.5 5h3A1.5 1.5 0 0 1 11 6.5v3A1.5 1.5 0 0 1 9.5 11h-3A1.5 1.5 0 0 1 5 9.5v-3zM6.5 6a.5.5 0 0 0-.5.5v3a.5.5 0 0 0 .5.5h3a.5.5 0 0 0 .5-.5v-3a.5.5 0 0 0-.5-.5h-3z"/>
|
||||
</svg>
|
||||
Server Information
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Runtime environment and configuration</p>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
<table class="table table-sm mb-0">
|
||||
<tbody>
|
||||
<tr><td class="text-muted" style="width:40%">Version</td><td class="fw-medium">{{ app_version }}</td></tr>
|
||||
<tr><td class="text-muted">Storage Root</td><td><code>{{ storage_root }}</code></td></tr>
|
||||
<tr><td class="text-muted">Platform</td><td>{{ platform }}</td></tr>
|
||||
<tr><td class="text-muted">Python</td><td>{{ python_version }}</td></tr>
|
||||
<tr><td class="text-muted">Rust Extension</td><td>
|
||||
{% if has_rust %}
|
||||
<span class="badge bg-success bg-opacity-10 text-success">Loaded</span>
|
||||
{% else %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">Not loaded</span>
|
||||
{% endif %}
|
||||
</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-lg-6">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M11.5 2a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zM9.05 3a2.5 2.5 0 0 1 4.9 0H16v1h-2.05a2.5 2.5 0 0 1-4.9 0H0V3h9.05zM4.5 7a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zM2.05 8a2.5 2.5 0 0 1 4.9 0H16v1H6.95a2.5 2.5 0 0 1-4.9 0H0V8h2.05zm9.45 4a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zm-2.45 1a2.5 2.5 0 0 1 4.9 0H16v1h-2.05a2.5 2.5 0 0 1-4.9 0H0v-1h9.05z"/>
|
||||
</svg>
|
||||
Feature Flags
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Features configured via environment variables</p>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
<table class="table table-sm mb-0">
|
||||
<tbody>
|
||||
{% for feat in features %}
|
||||
<tr>
|
||||
<td class="text-muted" style="width:55%">{{ feat.label }}</td>
|
||||
<td class="text-end">
|
||||
{% if feat.enabled %}
|
||||
<span class="badge bg-success bg-opacity-10 text-success">Enabled</span>
|
||||
{% else %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row g-4 mb-4">
|
||||
<div class="col-lg-6">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<div class="d-flex justify-content-between align-items-start">
|
||||
<div>
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M2.5 1a1 1 0 0 0-1 1v1a1 1 0 0 0 1 1H3v9a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V4h.5a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H10a1 1 0 0 0-1-1H7a1 1 0 0 0-1 1H2.5zm3 4a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 .5-.5zM8 5a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7A.5.5 0 0 1 8 5zm3 .5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 1 0z"/>
|
||||
</svg>
|
||||
Garbage Collection
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Clean up temporary files, orphaned uploads, and stale locks</p>
|
||||
</div>
|
||||
<div>
|
||||
{% if gc_status.enabled %}
|
||||
<span class="badge bg-success bg-opacity-10 text-success">Active</span>
|
||||
{% else %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
{% if gc_status.enabled %}
|
||||
<div class="d-flex gap-2 mb-3">
|
||||
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="gcRunBtn" onclick="runGC(false)">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
||||
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
|
||||
</svg>
|
||||
Run Now
|
||||
</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" id="gcDryRunBtn" onclick="runGC(true)">
|
||||
Dry Run
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div id="gcScanningBanner" class="mb-3 {% if not gc_status.scanning %}d-none{% endif %}">
|
||||
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
|
||||
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
|
||||
<span>GC in progress<span id="gcScanElapsed"></span></span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="gcResult" class="mb-3 d-none">
|
||||
<div class="alert mb-0 small" id="gcResultAlert">
|
||||
<div class="d-flex justify-content-between align-items-start">
|
||||
<div class="fw-semibold mb-1" id="gcResultTitle"></div>
|
||||
<button type="button" class="btn-close btn-close-sm" style="font-size:0.65rem" onclick="document.getElementById('gcResult').classList.add('d-none')"></button>
|
||||
</div>
|
||||
<div id="gcResultBody"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="border rounded p-3 mb-3" style="background: var(--bs-tertiary-bg, #f8f9fa);">
|
||||
<div class="d-flex align-items-center gap-2 mb-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
<span class="small fw-semibold text-muted">Configuration</span>
|
||||
</div>
|
||||
<div class="row small">
|
||||
<div class="col-6 mb-1"><span class="text-muted">Interval:</span> {{ gc_status.interval_hours }}h</div>
|
||||
<div class="col-6 mb-1"><span class="text-muted">Dry run:</span> {{ "Yes" if gc_status.dry_run else "No" }}</div>
|
||||
<div class="col-6 mb-1"><span class="text-muted">Temp max age:</span> {{ gc_status.temp_file_max_age_hours }}h</div>
|
||||
<div class="col-6 mb-1"><span class="text-muted">Lock max age:</span> {{ gc_status.lock_file_max_age_hours }}h</div>
|
||||
<div class="col-6"><span class="text-muted">Multipart max age:</span> {{ gc_status.multipart_max_age_days }}d</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="gcHistoryContainer">
|
||||
{% if gc_history %}
|
||||
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>
|
||||
<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>
|
||||
<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/>
|
||||
</svg>
|
||||
Recent Executions
|
||||
</h6>
|
||||
<div class="table-responsive">
|
||||
<table class="table table-sm small mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Time</th>
|
||||
<th class="text-center">Cleaned</th>
|
||||
<th class="text-center">Freed</th>
|
||||
<th class="text-center">Mode</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for exec in gc_history %}
|
||||
<tr>
|
||||
<td class="text-nowrap">{{ exec.timestamp_display }}</td>
|
||||
<td class="text-center">
|
||||
{% set r = exec.result %}
|
||||
{{ (r.temp_files_deleted|d(0)) + (r.multipart_uploads_deleted|d(0)) + (r.lock_files_deleted|d(0)) + (r.orphaned_metadata_deleted|d(0)) + (r.orphaned_versions_deleted|d(0)) + (r.empty_dirs_removed|d(0)) }}
|
||||
</td>
|
||||
<td class="text-center">{{ exec.bytes_freed_display }}</td>
|
||||
<td class="text-center">
|
||||
{% if exec.dry_run %}
|
||||
<span class="badge bg-warning bg-opacity-10 text-warning">Dry run</span>
|
||||
{% else %}
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary">Live</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="text-center py-2">
|
||||
<p class="text-muted small mb-0">No executions recorded yet.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
{% else %}
|
||||
<div class="text-center py-4">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="40" height="40" fill="currentColor" class="text-muted mb-2 opacity-50" viewBox="0 0 16 16">
|
||||
<path d="M2.5 1a1 1 0 0 0-1 1v1a1 1 0 0 0 1 1H3v9a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V4h.5a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H10a1 1 0 0 0-1-1H7a1 1 0 0 0-1 1H2.5zm3 4a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 .5-.5zM8 5a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7A.5.5 0 0 1 8 5zm3 .5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 1 0z"/>
|
||||
</svg>
|
||||
<p class="text-muted mb-1">Garbage collection is not enabled.</p>
|
||||
<p class="text-muted small mb-0">Set <code>GC_ENABLED=true</code> to enable automatic cleanup.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-lg-6">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<div class="d-flex justify-content-between align-items-start">
|
||||
<div>
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M5.338 1.59a61.44 61.44 0 0 0-2.837.856.481.481 0 0 0-.328.39c-.554 4.157.726 7.19 2.253 9.188a10.725 10.725 0 0 0 2.287 2.233c.346.244.652.42.893.533.12.057.218.095.293.118a.55.55 0 0 0 .101.025.615.615 0 0 0 .1-.025c.076-.023.174-.061.294-.118.24-.113.547-.29.893-.533a10.726 10.726 0 0 0 2.287-2.233c1.527-1.997 2.807-5.031 2.253-9.188a.48.48 0 0 0-.328-.39c-.651-.213-1.75-.56-2.837-.855C9.552 1.29 8.531 1.067 8 1.067c-.53 0-1.552.223-2.662.524zM5.072.56C6.157.265 7.31 0 8 0s1.843.265 2.928.56c1.11.3 2.229.655 2.887.87a1.54 1.54 0 0 1 1.044 1.262c.596 4.477-.787 7.795-2.465 9.99a11.775 11.775 0 0 1-2.517 2.453 7.159 7.159 0 0 1-1.048.625c-.28.132-.581.24-.829.24s-.548-.108-.829-.24a7.158 7.158 0 0 1-1.048-.625 11.777 11.777 0 0 1-2.517-2.453C1.928 10.487.545 7.169 1.141 2.692A1.54 1.54 0 0 1 2.185 1.43 62.456 62.456 0 0 1 5.072.56z"/>
|
||||
<path d="M10.854 5.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7.5 7.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
|
||||
</svg>
|
||||
Integrity Scanner
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Detect and heal corrupted objects, orphaned files, and metadata drift</p>
|
||||
</div>
|
||||
<div>
|
||||
{% if integrity_status.enabled %}
|
||||
<span class="badge bg-success bg-opacity-10 text-success">Active</span>
|
||||
{% else %}
|
||||
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
{% if integrity_status.enabled %}
|
||||
<div class="d-flex gap-2 flex-wrap mb-3">
|
||||
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="integrityRunBtn" onclick="runIntegrity(false, false)" {% if integrity_status.scanning %}disabled{% endif %}>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
||||
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
|
||||
</svg>
|
||||
Scan Now
|
||||
</button>
|
||||
<button class="btn btn-outline-warning btn-sm" id="integrityHealBtn" onclick="runIntegrity(false, true)" {% if integrity_status.scanning %}disabled{% endif %}>
|
||||
Scan & Heal
|
||||
</button>
|
||||
<button class="btn btn-outline-secondary btn-sm" id="integrityDryRunBtn" onclick="runIntegrity(true, false)" {% if integrity_status.scanning %}disabled{% endif %}>
|
||||
Dry Run
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div id="integrityScanningBanner" class="mb-3 {% if not integrity_status.scanning %}d-none{% endif %}">
|
||||
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
|
||||
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
|
||||
<span>Scan in progress<span id="integrityScanElapsed"></span></span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="integrityResult" class="mb-3 d-none">
|
||||
<div class="alert mb-0 small" id="integrityResultAlert">
|
||||
<div class="d-flex justify-content-between align-items-start">
|
||||
<div class="fw-semibold mb-1" id="integrityResultTitle"></div>
|
||||
<button type="button" class="btn-close btn-close-sm" style="font-size:0.65rem" onclick="document.getElementById('integrityResult').classList.add('d-none')"></button>
|
||||
</div>
|
||||
<div id="integrityResultBody"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="border rounded p-3 mb-3" style="background: var(--bs-tertiary-bg, #f8f9fa);">
|
||||
<div class="d-flex align-items-center gap-2 mb-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
|
||||
</svg>
|
||||
<span class="small fw-semibold text-muted">Configuration</span>
|
||||
</div>
|
||||
<div class="row small">
|
||||
<div class="col-6 mb-1"><span class="text-muted">Interval:</span> {{ integrity_status.interval_hours }}h</div>
|
||||
<div class="col-6 mb-1"><span class="text-muted">Dry run:</span> {{ "Yes" if integrity_status.dry_run else "No" }}</div>
|
||||
<div class="col-6"><span class="text-muted">Batch size:</span> {{ integrity_status.batch_size }}</div>
|
||||
<div class="col-6"><span class="text-muted">Auto-heal:</span> {{ "Yes" if integrity_status.auto_heal else "No" }}</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="integrityHistoryContainer">
|
||||
{% if integrity_history %}
|
||||
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>
|
||||
<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>
|
||||
<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/>
|
||||
</svg>
|
||||
Recent Scans
|
||||
</h6>
|
||||
<div class="table-responsive">
|
||||
<table class="table table-sm small mb-0">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Time</th>
|
||||
<th class="text-center">Scanned</th>
|
||||
<th class="text-center">Issues</th>
|
||||
<th class="text-center">Healed</th>
|
||||
<th class="text-center">Mode</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for exec in integrity_history %}
|
||||
<tr>
|
||||
<td class="text-nowrap">{{ exec.timestamp_display }}</td>
|
||||
<td class="text-center">{{ exec.result.objects_scanned|d(0) }}</td>
|
||||
<td class="text-center">
|
||||
{% set total_issues = (exec.result.corrupted_objects|d(0)) + (exec.result.orphaned_objects|d(0)) + (exec.result.phantom_metadata|d(0)) + (exec.result.stale_versions|d(0)) + (exec.result.etag_cache_inconsistencies|d(0)) + (exec.result.legacy_metadata_drifts|d(0)) %}
|
||||
{% if total_issues > 0 %}
|
||||
<span class="text-danger fw-medium">{{ total_issues }}</span>
|
||||
{% else %}
|
||||
<span class="text-success">0</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="text-center">{{ exec.result.issues_healed|d(0) }}</td>
|
||||
<td class="text-center">
|
||||
{% if exec.dry_run %}
|
||||
<span class="badge bg-warning bg-opacity-10 text-warning">Dry</span>
|
||||
{% elif exec.auto_heal %}
|
||||
<span class="badge bg-success bg-opacity-10 text-success">Heal</span>
|
||||
{% else %}
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary">Scan</span>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="text-center py-2">
|
||||
<p class="text-muted small mb-0">No scans recorded yet.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
{% else %}
|
||||
<div class="text-center py-4">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="40" height="40" fill="currentColor" class="text-muted mb-2 opacity-50" viewBox="0 0 16 16">
|
||||
<path d="M5.338 1.59a61.44 61.44 0 0 0-2.837.856.481.481 0 0 0-.328.39c-.554 4.157.726 7.19 2.253 9.188a10.725 10.725 0 0 0 2.287 2.233c.346.244.652.42.893.533.12.057.218.095.293.118a.55.55 0 0 0 .101.025.615.615 0 0 0 .1-.025c.076-.023.174-.061.294-.118.24-.113.547-.29.893-.533a10.726 10.726 0 0 0 2.287-2.233c1.527-1.997 2.807-5.031 2.253-9.188a.48.48 0 0 0-.328-.39c-.651-.213-1.75-.56-2.837-.855C9.552 1.29 8.531 1.067 8 1.067c-.53 0-1.552.223-2.662.524zM5.072.56C6.157.265 7.31 0 8 0s1.843.265 2.928.56c1.11.3 2.229.655 2.887.87a1.54 1.54 0 0 1 1.044 1.262c.596 4.477-.787 7.795-2.465 9.99a11.775 11.775 0 0 1-2.517 2.453 7.159 7.159 0 0 1-1.048.625c-.28.132-.581.24-.829.24s-.548-.108-.829-.24a7.158 7.158 0 0 1-1.048-.625 11.777 11.777 0 0 1-2.517-2.453C1.928 10.487.545 7.169 1.141 2.692A1.54 1.54 0 0 1 2.185 1.43 62.456 62.456 0 0 1 5.072.56z"/>
|
||||
<path d="M10.854 5.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7.5 7.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
|
||||
</svg>
|
||||
<p class="text-muted mb-1">Integrity scanner is not enabled.</p>
|
||||
<p class="text-muted small mb-0">Set <code>INTEGRITY_ENABLED=true</code> to enable automatic scanning.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block extra_scripts %}
|
||||
<script>
|
||||
(function () {
|
||||
var csrfToken = document.querySelector('meta[name="csrf-token"]')?.getAttribute('content') || '';
|
||||
|
||||
function setLoading(btnId, loading, spinnerOnly) {
|
||||
var btn = document.getElementById(btnId);
|
||||
if (!btn) return;
|
||||
btn.disabled = loading;
|
||||
if (loading && !spinnerOnly) {
|
||||
btn.dataset.originalHtml = btn.innerHTML;
|
||||
btn.innerHTML = '<span class="spinner-border spinner-border-sm me-1" role="status"></span>Running...';
|
||||
} else if (!loading && btn.dataset.originalHtml) {
|
||||
btn.innerHTML = btn.dataset.originalHtml;
|
||||
}
|
||||
}
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (!bytes || bytes === 0) return '0 B';
|
||||
var units = ['B', 'KB', 'MB', 'GB'];
|
||||
var i = 0;
|
||||
var b = bytes;
|
||||
while (b >= 1024 && i < units.length - 1) { b /= 1024; i++; }
|
||||
return (i === 0 ? b : b.toFixed(1)) + ' ' + units[i];
|
||||
}
|
||||
|
||||
var _displayTimezone = {{ display_timezone|tojson }};
|
||||
|
||||
function formatTimestamp(ts) {
|
||||
var d = new Date(ts * 1000);
|
||||
try {
|
||||
var opts = {year: 'numeric', month: 'short', day: '2-digit', hour: '2-digit', minute: '2-digit', hour12: false, timeZone: _displayTimezone, timeZoneName: 'short'};
|
||||
return d.toLocaleString('en-US', opts);
|
||||
} catch (e) {
|
||||
var pad = function (n) { return n < 10 ? '0' + n : '' + n; };
|
||||
return d.getUTCFullYear() + '-' + pad(d.getUTCMonth() + 1) + '-' + pad(d.getUTCDate()) +
|
||||
' ' + pad(d.getUTCHours()) + ':' + pad(d.getUTCMinutes()) + ' UTC';
|
||||
}
|
||||
}
|
||||
|
||||
var _gcHistoryIcon = '<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">' +
|
||||
'<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>' +
|
||||
'<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>' +
|
||||
'<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/></svg>';
|
||||
|
||||
function _gcRefreshHistory() {
|
||||
fetch('{{ url_for("ui.system_gc_history") }}?limit=10', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (hist) {
|
||||
var container = document.getElementById('gcHistoryContainer');
|
||||
if (!container) return;
|
||||
var execs = hist.executions || [];
|
||||
if (execs.length === 0) {
|
||||
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No executions recorded yet.</p></div>';
|
||||
return;
|
||||
}
|
||||
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
|
||||
_gcHistoryIcon + ' Recent Executions</h6>' +
|
||||
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
|
||||
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Cleaned</th>' +
|
||||
'<th class="text-center">Freed</th><th class="text-center">Mode</th></tr></thead><tbody>';
|
||||
execs.forEach(function (exec) {
|
||||
var r = exec.result || {};
|
||||
var cleaned = (r.temp_files_deleted || 0) + (r.multipart_uploads_deleted || 0) +
|
||||
(r.lock_files_deleted || 0) + (r.orphaned_metadata_deleted || 0) +
|
||||
(r.orphaned_versions_deleted || 0) + (r.empty_dirs_removed || 0);
|
||||
var freed = (r.temp_bytes_freed || 0) + (r.multipart_bytes_freed || 0) +
|
||||
(r.orphaned_version_bytes_freed || 0);
|
||||
var mode = exec.dry_run
|
||||
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry run</span>'
|
||||
: '<span class="badge bg-primary bg-opacity-10 text-primary">Live</span>';
|
||||
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
|
||||
'<td class="text-center">' + cleaned + '</td>' +
|
||||
'<td class="text-center">' + formatBytes(freed) + '</td>' +
|
||||
'<td class="text-center">' + mode + '</td></tr>';
|
||||
});
|
||||
html += '</tbody></table></div>';
|
||||
container.innerHTML = html;
|
||||
})
|
||||
.catch(function () {});
|
||||
}
|
||||
|
||||
function _integrityRefreshHistory() {
|
||||
fetch('{{ url_for("ui.system_integrity_history") }}?limit=10', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (hist) {
|
||||
var container = document.getElementById('integrityHistoryContainer');
|
||||
if (!container) return;
|
||||
var execs = hist.executions || [];
|
||||
if (execs.length === 0) {
|
||||
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No scans recorded yet.</p></div>';
|
||||
return;
|
||||
}
|
||||
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
|
||||
_gcHistoryIcon + ' Recent Scans</h6>' +
|
||||
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
|
||||
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Scanned</th>' +
|
||||
'<th class="text-center">Issues</th><th class="text-center">Healed</th>' +
|
||||
'<th class="text-center">Mode</th></tr></thead><tbody>';
|
||||
execs.forEach(function (exec) {
|
||||
var r = exec.result || {};
|
||||
var issues = (r.corrupted_objects || 0) + (r.orphaned_objects || 0) +
|
||||
(r.phantom_metadata || 0) + (r.stale_versions || 0) +
|
||||
(r.etag_cache_inconsistencies || 0) + (r.legacy_metadata_drifts || 0);
|
||||
var issueHtml = issues > 0
|
||||
? '<span class="text-danger fw-medium">' + issues + '</span>'
|
||||
: '<span class="text-success">0</span>';
|
||||
var mode = exec.dry_run
|
||||
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry</span>'
|
||||
: (exec.auto_heal
|
||||
? '<span class="badge bg-success bg-opacity-10 text-success">Heal</span>'
|
||||
: '<span class="badge bg-primary bg-opacity-10 text-primary">Scan</span>');
|
||||
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
|
||||
'<td class="text-center">' + (r.objects_scanned || 0) + '</td>' +
|
||||
'<td class="text-center">' + issueHtml + '</td>' +
|
||||
'<td class="text-center">' + (r.issues_healed || 0) + '</td>' +
|
||||
'<td class="text-center">' + mode + '</td></tr>';
|
||||
});
|
||||
html += '</tbody></table></div>';
|
||||
container.innerHTML = html;
|
||||
})
|
||||
.catch(function () {});
|
||||
}
|
||||
|
||||
var _gcPollTimer = null;
|
||||
var _gcLastDryRun = false;
|
||||
|
||||
function _gcSetScanning(scanning) {
|
||||
var banner = document.getElementById('gcScanningBanner');
|
||||
var btns = ['gcRunBtn', 'gcDryRunBtn'];
|
||||
if (scanning) {
|
||||
banner.classList.remove('d-none');
|
||||
btns.forEach(function (id) {
|
||||
var el = document.getElementById(id);
|
||||
if (el) el.disabled = true;
|
||||
});
|
||||
} else {
|
||||
banner.classList.add('d-none');
|
||||
document.getElementById('gcScanElapsed').textContent = '';
|
||||
btns.forEach(function (id) {
|
||||
var el = document.getElementById(id);
|
||||
if (el) el.disabled = false;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function _gcShowResult(data, dryRun) {
|
||||
var container = document.getElementById('gcResult');
|
||||
var alert = document.getElementById('gcResultAlert');
|
||||
var title = document.getElementById('gcResultTitle');
|
||||
var body = document.getElementById('gcResultBody');
|
||||
container.classList.remove('d-none');
|
||||
|
||||
var totalItems = (data.temp_files_deleted || 0) + (data.multipart_uploads_deleted || 0) +
|
||||
(data.lock_files_deleted || 0) + (data.orphaned_metadata_deleted || 0) +
|
||||
(data.orphaned_versions_deleted || 0) + (data.empty_dirs_removed || 0);
|
||||
var totalFreed = (data.temp_bytes_freed || 0) + (data.multipart_bytes_freed || 0) +
|
||||
(data.orphaned_version_bytes_freed || 0);
|
||||
|
||||
alert.className = totalItems > 0 ? 'alert alert-success mb-0 small' : 'alert alert-info mb-0 small';
|
||||
title.textContent = (dryRun ? '[Dry Run] ' : '') + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
|
||||
|
||||
var lines = [];
|
||||
if (data.temp_files_deleted) lines.push('Temp files: ' + data.temp_files_deleted + ' (' + formatBytes(data.temp_bytes_freed) + ')');
|
||||
if (data.multipart_uploads_deleted) lines.push('Multipart uploads: ' + data.multipart_uploads_deleted + ' (' + formatBytes(data.multipart_bytes_freed) + ')');
|
||||
if (data.lock_files_deleted) lines.push('Lock files: ' + data.lock_files_deleted);
|
||||
if (data.orphaned_metadata_deleted) lines.push('Orphaned metadata: ' + data.orphaned_metadata_deleted);
|
||||
if (data.orphaned_versions_deleted) lines.push('Orphaned versions: ' + data.orphaned_versions_deleted + ' (' + formatBytes(data.orphaned_version_bytes_freed) + ')');
|
||||
if (data.empty_dirs_removed) lines.push('Empty directories: ' + data.empty_dirs_removed);
|
||||
if (totalItems === 0) lines.push('Nothing to clean up.');
|
||||
if (totalFreed > 0) lines.push('Total freed: ' + formatBytes(totalFreed));
|
||||
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
|
||||
|
||||
body.innerHTML = lines.join('<br>');
|
||||
}
|
||||
|
||||
function _gcPoll() {
|
||||
fetch('{{ url_for("ui.system_gc_status") }}', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (status) {
|
||||
if (status.scanning) {
|
||||
var elapsed = status.scan_elapsed_seconds || 0;
|
||||
document.getElementById('gcScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
|
||||
_gcPollTimer = setTimeout(_gcPoll, 2000);
|
||||
} else {
|
||||
_gcSetScanning(false);
|
||||
_gcRefreshHistory();
|
||||
fetch('{{ url_for("ui.system_gc_history") }}?limit=1', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (hist) {
|
||||
if (hist.executions && hist.executions.length > 0) {
|
||||
var latest = hist.executions[0];
|
||||
_gcShowResult(latest.result, latest.dry_run);
|
||||
}
|
||||
})
|
||||
.catch(function () {});
|
||||
}
|
||||
})
|
||||
.catch(function () {
|
||||
_gcPollTimer = setTimeout(_gcPoll, 3000);
|
||||
});
|
||||
}
|
||||
|
||||
window.runGC = function (dryRun) {
|
||||
_gcLastDryRun = dryRun;
|
||||
document.getElementById('gcResult').classList.add('d-none');
|
||||
_gcSetScanning(true);
|
||||
|
||||
fetch('{{ url_for("ui.system_gc_run") }}', {
|
||||
method: 'POST',
|
||||
headers: {'Content-Type': 'application/json', 'X-CSRFToken': csrfToken},
|
||||
body: JSON.stringify({dry_run: dryRun})
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (data) {
|
||||
if (data.error) {
|
||||
_gcSetScanning(false);
|
||||
var container = document.getElementById('gcResult');
|
||||
var alert = document.getElementById('gcResultAlert');
|
||||
var title = document.getElementById('gcResultTitle');
|
||||
var body = document.getElementById('gcResultBody');
|
||||
container.classList.remove('d-none');
|
||||
alert.className = 'alert alert-danger mb-0 small';
|
||||
title.textContent = 'Error';
|
||||
body.textContent = data.error;
|
||||
return;
|
||||
}
|
||||
_gcPollTimer = setTimeout(_gcPoll, 2000);
|
||||
})
|
||||
.catch(function (err) {
|
||||
_gcSetScanning(false);
|
||||
var container = document.getElementById('gcResult');
|
||||
var alert = document.getElementById('gcResultAlert');
|
||||
var title = document.getElementById('gcResultTitle');
|
||||
var body = document.getElementById('gcResultBody');
|
||||
container.classList.remove('d-none');
|
||||
alert.className = 'alert alert-danger mb-0 small';
|
||||
title.textContent = 'Error';
|
||||
body.textContent = err.message;
|
||||
});
|
||||
};
|
||||
|
||||
{% if gc_status.scanning %}
|
||||
_gcSetScanning(true);
|
||||
_gcPollTimer = setTimeout(_gcPoll, 2000);
|
||||
{% endif %}
|
||||
|
||||
var _integrityPollTimer = null;
|
||||
var _integrityLastMode = {dryRun: false, autoHeal: false};
|
||||
|
||||
function _integritySetScanning(scanning) {
|
||||
var banner = document.getElementById('integrityScanningBanner');
|
||||
var btns = ['integrityRunBtn', 'integrityHealBtn', 'integrityDryRunBtn'];
|
||||
if (scanning) {
|
||||
banner.classList.remove('d-none');
|
||||
btns.forEach(function (id) {
|
||||
var el = document.getElementById(id);
|
||||
if (el) el.disabled = true;
|
||||
});
|
||||
} else {
|
||||
banner.classList.add('d-none');
|
||||
document.getElementById('integrityScanElapsed').textContent = '';
|
||||
btns.forEach(function (id) {
|
||||
var el = document.getElementById(id);
|
||||
if (el) el.disabled = false;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function _integrityShowResult(data, dryRun, autoHeal) {
|
||||
var container = document.getElementById('integrityResult');
|
||||
var alert = document.getElementById('integrityResultAlert');
|
||||
var title = document.getElementById('integrityResultTitle');
|
||||
var body = document.getElementById('integrityResultBody');
|
||||
container.classList.remove('d-none');
|
||||
|
||||
var totalIssues = (data.corrupted_objects || 0) + (data.orphaned_objects || 0) +
|
||||
(data.phantom_metadata || 0) + (data.stale_versions || 0) +
|
||||
(data.etag_cache_inconsistencies || 0) + (data.legacy_metadata_drifts || 0);
|
||||
|
||||
var prefix = dryRun ? '[Dry Run] ' : (autoHeal ? '[Heal] ' : '');
|
||||
alert.className = totalIssues > 0 ? 'alert alert-warning mb-0 small' : 'alert alert-success mb-0 small';
|
||||
title.textContent = prefix + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
|
||||
|
||||
var lines = [];
|
||||
lines.push('Scanned: ' + (data.objects_scanned || 0) + ' objects in ' + (data.buckets_scanned || 0) + ' buckets');
|
||||
if (totalIssues === 0) {
|
||||
lines.push('No issues found.');
|
||||
} else {
|
||||
if (data.corrupted_objects) lines.push('Corrupted objects: ' + data.corrupted_objects);
|
||||
if (data.orphaned_objects) lines.push('Orphaned objects: ' + data.orphaned_objects);
|
||||
if (data.phantom_metadata) lines.push('Phantom metadata: ' + data.phantom_metadata);
|
||||
if (data.stale_versions) lines.push('Stale versions: ' + data.stale_versions);
|
||||
if (data.etag_cache_inconsistencies) lines.push('ETag inconsistencies: ' + data.etag_cache_inconsistencies);
|
||||
if (data.legacy_metadata_drifts) lines.push('Legacy metadata drifts: ' + data.legacy_metadata_drifts);
|
||||
if (data.issues_healed) lines.push('Issues healed: ' + data.issues_healed);
|
||||
}
|
||||
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
|
||||
|
||||
body.innerHTML = lines.join('<br>');
|
||||
}
|
||||
|
||||
function _integrityPoll() {
|
||||
fetch('{{ url_for("ui.system_integrity_status") }}', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (status) {
|
||||
if (status.scanning) {
|
||||
var elapsed = status.scan_elapsed_seconds || 0;
|
||||
document.getElementById('integrityScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
|
||||
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
|
||||
} else {
|
||||
_integritySetScanning(false);
|
||||
_integrityRefreshHistory();
|
||||
fetch('{{ url_for("ui.system_integrity_history") }}?limit=1', {
|
||||
headers: {'X-CSRFToken': csrfToken}
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (hist) {
|
||||
if (hist.executions && hist.executions.length > 0) {
|
||||
var latest = hist.executions[0];
|
||||
_integrityShowResult(latest.result, latest.dry_run, latest.auto_heal);
|
||||
}
|
||||
})
|
||||
.catch(function () {});
|
||||
}
|
||||
})
|
||||
.catch(function () {
|
||||
_integrityPollTimer = setTimeout(_integrityPoll, 3000);
|
||||
});
|
||||
}
|
||||
|
||||
window.runIntegrity = function (dryRun, autoHeal) {
|
||||
_integrityLastMode = {dryRun: dryRun, autoHeal: autoHeal};
|
||||
document.getElementById('integrityResult').classList.add('d-none');
|
||||
_integritySetScanning(true);
|
||||
|
||||
fetch('{{ url_for("ui.system_integrity_run") }}', {
|
||||
method: 'POST',
|
||||
headers: {'Content-Type': 'application/json', 'X-CSRFToken': csrfToken},
|
||||
body: JSON.stringify({dry_run: dryRun, auto_heal: autoHeal})
|
||||
})
|
||||
.then(function (r) { return r.json(); })
|
||||
.then(function (data) {
|
||||
if (data.error) {
|
||||
_integritySetScanning(false);
|
||||
var container = document.getElementById('integrityResult');
|
||||
var alert = document.getElementById('integrityResultAlert');
|
||||
var title = document.getElementById('integrityResultTitle');
|
||||
var body = document.getElementById('integrityResultBody');
|
||||
container.classList.remove('d-none');
|
||||
alert.className = 'alert alert-danger mb-0 small';
|
||||
title.textContent = 'Error';
|
||||
body.textContent = data.error;
|
||||
return;
|
||||
}
|
||||
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
|
||||
})
|
||||
.catch(function (err) {
|
||||
_integritySetScanning(false);
|
||||
var container = document.getElementById('integrityResult');
|
||||
var alert = document.getElementById('integrityResultAlert');
|
||||
var title = document.getElementById('integrityResultTitle');
|
||||
var body = document.getElementById('integrityResultBody');
|
||||
container.classList.remove('d-none');
|
||||
alert.className = 'alert alert-danger mb-0 small';
|
||||
title.textContent = 'Error';
|
||||
body.textContent = err.message;
|
||||
});
|
||||
};
|
||||
|
||||
{% if integrity_status.scanning %}
|
||||
_integritySetScanning(true);
|
||||
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
|
||||
{% endif %}
|
||||
})();
|
||||
</script>
|
||||
{% endblock %}
|
||||
367
python/templates/website_domains.html
Normal file
367
python/templates/website_domains.html
Normal file
@@ -0,0 +1,367 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block title %}Website Domains - MyFSIO Console{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="page-header d-flex justify-content-between align-items-center mb-4">
|
||||
<div>
|
||||
<p class="text-uppercase text-muted small mb-1">Website Hosting</p>
|
||||
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||
</svg>
|
||||
Domain Mappings
|
||||
</h1>
|
||||
<p class="text-muted mb-0 mt-1">Map custom domains to buckets for static website hosting.</p>
|
||||
</div>
|
||||
<div class="d-none d-md-block">
|
||||
<span class="badge bg-primary bg-opacity-10 text-primary fs-6 px-3 py-2">
|
||||
{{ mappings|length }} mapping{{ 's' if mappings|length != 1 else '' }}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row g-4">
|
||||
<div class="col-lg-4 col-md-5">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 2a.5.5 0 0 1 .5.5v5h5a.5.5 0 0 1 0 1h-5v5a.5.5 0 0 1-1 0v-5h-5a.5.5 0 0 1 0-1h5v-5A.5.5 0 0 1 8 2Z"/>
|
||||
</svg>
|
||||
Add Domain Mapping
|
||||
</h5>
|
||||
<p class="text-muted small mb-0">Point a custom domain to a bucket</p>
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
<form method="POST" action="{{ url_for('ui.create_website_domain') }}" id="createDomainForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<div class="mb-3">
|
||||
<label for="domain" class="form-label fw-medium">Domain</label>
|
||||
<input type="text" class="form-control" id="domain" name="domain" required
|
||||
placeholder="www.example.com"
|
||||
pattern="^[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?)*$"
|
||||
title="Enter a valid hostname (e.g. www.example.com). Do not include http:// or trailing slashes.">
|
||||
<div class="form-text">Hostname only — no <code>http://</code> prefix or trailing slash.</div>
|
||||
<div class="invalid-feedback">Enter a valid hostname like www.example.com</div>
|
||||
</div>
|
||||
<div id="domainPreview" class="alert alert-light border small py-2 px-3 mb-3 d-none">
|
||||
<span class="text-muted">Will be accessible at:</span>
|
||||
<code id="domainPreviewUrl" class="ms-1"></code>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="bucket" class="form-label fw-medium">Bucket</label>
|
||||
{% if buckets %}
|
||||
<select class="form-select" id="bucket" name="bucket" required>
|
||||
<option value="" selected disabled>Select a bucket</option>
|
||||
{% for b in buckets %}
|
||||
<option value="{{ b }}">{{ b }}</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
{% else %}
|
||||
<input type="text" class="form-control" id="bucket" name="bucket" required placeholder="my-site-bucket">
|
||||
{% endif %}
|
||||
<div class="form-text">The bucket must have website hosting enabled.</div>
|
||||
</div>
|
||||
<div class="d-grid">
|
||||
<button type="submit" class="btn btn-primary" id="addMappingBtn">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 2a.5.5 0 0 1 .5.5v5h5a.5.5 0 0 1 0 1h-5v5a.5.5 0 0 1-1 0v-5h-5a.5.5 0 0 1 0-1h5v-5A.5.5 0 0 1 8 2Z"/>
|
||||
</svg>
|
||||
Add Mapping
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card shadow-sm border-0 mt-4" style="border-radius: 1rem;">
|
||||
<div class="card-body px-4 py-3">
|
||||
<h6 class="fw-semibold mb-2 d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||
</svg>
|
||||
How it works
|
||||
</h6>
|
||||
<ol class="small text-muted mb-0 ps-3">
|
||||
<li class="mb-1">Enable website hosting on a bucket (Properties tab)</li>
|
||||
<li class="mb-1">Create a domain mapping here</li>
|
||||
<li>Point your DNS (A/CNAME) to this server</li>
|
||||
</ol>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-lg-8 col-md-7">
|
||||
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||
<div class="d-flex justify-content-between align-items-center mb-1">
|
||||
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-0">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M4.715 6.542 3.343 7.914a3 3 0 1 0 4.243 4.243l1.828-1.829A3 3 0 0 0 8.586 5.5L8 6.086a1.002 1.002 0 0 0-.154.199 2 2 0 0 1 .861 3.337L6.88 11.45a2 2 0 1 1-2.83-2.83l.793-.792a4.018 4.018 0 0 1-.128-1.287z"/>
|
||||
<path d="M6.586 4.672A3 3 0 0 0 7.414 9.5l.775-.776a2 2 0 0 1-.896-3.346L9.12 3.55a2 2 0 1 1 2.83 2.83l-.793.792c.112.42.155.855.128 1.287l1.372-1.372a3 3 0 1 0-4.243-4.243L6.586 4.672z"/>
|
||||
</svg>
|
||||
Active Mappings
|
||||
</h5>
|
||||
</div>
|
||||
<p class="text-muted small mb-0">Domains currently serving website content</p>
|
||||
{% if mappings|length > 3 %}
|
||||
<div class="mt-3">
|
||||
<div class="search-input-wrapper">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="search-icon" viewBox="0 0 16 16">
|
||||
<path d="M11.742 10.344a6.5 6.5 0 1 0-1.397 1.398h-.001c.03.04.062.078.098.115l3.85 3.85a1 1 0 0 0 1.415-1.414l-3.85-3.85a1.007 1.007 0 0 0-.115-.1zM12 6.5a5.5 5.5 0 1 1-11 0 5.5 5.5 0 0 1 11 0z"/>
|
||||
</svg>
|
||||
<input type="text" class="form-control" id="domainSearch" placeholder="Filter by domain or bucket..." autocomplete="off" />
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="card-body px-4 pb-4">
|
||||
{% if mappings %}
|
||||
<div class="table-responsive">
|
||||
<table class="table table-hover align-middle mb-0" id="domainTable">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th scope="col">Domain</th>
|
||||
<th scope="col">Bucket</th>
|
||||
<th scope="col" class="text-end">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for m in mappings %}
|
||||
<tr data-domain="{{ m.domain }}" data-bucket="{{ m.bucket }}">
|
||||
<td>
|
||||
<div class="d-flex align-items-center gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-success flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||
</svg>
|
||||
<div>
|
||||
<code class="fw-medium">{{ m.domain }}</code>
|
||||
<div class="text-muted small">http://{{ m.domain }}</div>
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td><span class="badge bg-primary bg-opacity-10 text-primary">{{ m.bucket }}</span></td>
|
||||
<td class="text-end">
|
||||
<div class="btn-group btn-group-sm" role="group">
|
||||
<button type="button" class="btn btn-outline-secondary"
|
||||
data-bs-toggle="modal"
|
||||
data-bs-target="#editDomainModal"
|
||||
data-domain="{{ m.domain }}"
|
||||
data-bucket="{{ m.bucket }}"
|
||||
title="Edit mapping">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<button type="button" class="btn btn-outline-danger"
|
||||
data-bs-toggle="modal"
|
||||
data-bs-target="#deleteDomainModal"
|
||||
data-domain="{{ m.domain }}"
|
||||
data-bucket="{{ m.bucket }}"
|
||||
title="Delete mapping">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<div id="noSearchResults" class="text-center py-4 d-none">
|
||||
<p class="text-muted mb-0">No mappings match your search.</p>
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="empty-state text-center py-5">
|
||||
<div class="empty-state-icon mx-auto mb-3">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<h5 class="fw-semibold mb-2">No domain mappings yet</h5>
|
||||
<p class="text-muted mb-0">Add your first domain mapping to serve a bucket as a static website.</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="editDomainModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h5 class="modal-title fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5zm-9.761 5.175-.106.106-1.528 3.821 3.821-1.528.106-.106A.5.5 0 0 1 5 12.5V12h-.5a.5.5 0 0 1-.5-.5V11h-.5a.5.5 0 0 1-.468-.325z"/>
|
||||
</svg>
|
||||
Edit Domain Mapping
|
||||
</h5>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<form method="POST" id="editDomainForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<div class="modal-body">
|
||||
<div class="mb-3">
|
||||
<label class="form-label fw-medium">Domain</label>
|
||||
<input type="text" class="form-control bg-light" id="editDomainName" disabled>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<label for="editBucket" class="form-label fw-medium">Bucket</label>
|
||||
{% if buckets %}
|
||||
<select class="form-select" id="editBucket" name="bucket" required>
|
||||
{% for b in buckets %}
|
||||
<option value="{{ b }}">{{ b }}</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
{% else %}
|
||||
<input type="text" class="form-control" id="editBucket" name="bucket" required>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
<button type="submit" class="btn btn-primary">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||
</svg>
|
||||
Save
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="modal fade" id="deleteDomainModal" tabindex="-1" aria-hidden="true">
|
||||
<div class="modal-dialog modal-dialog-centered">
|
||||
<div class="modal-content">
|
||||
<form method="POST" id="deleteDomainForm">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h5 class="modal-title fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
Delete Domain Mapping
|
||||
</h5>
|
||||
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<p>Are you sure you want to delete the mapping for <strong><code id="deleteDomainName"></code></strong>?</p>
|
||||
<p class="text-muted small mb-0">Mapped to bucket: <code id="deleteBucketName"></code></p>
|
||||
<div class="alert alert-warning d-flex align-items-start small mt-3 mb-0" role="alert">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="flex-shrink-0 me-2 mt-0" viewBox="0 0 16 16">
|
||||
<path d="M8.982 1.566a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566zM8 5c.535 0 .954.462.9.995l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995A.905.905 0 0 1 8 5zm.002 6a1 1 0 1 1 0 2 1 1 0 0 1 0-2z"/>
|
||||
</svg>
|
||||
<div>This domain will stop serving website content immediately.</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||
<button type="submit" class="btn btn-danger">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
Delete
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block extra_scripts %}
|
||||
<script>
|
||||
(function () {
|
||||
function normalizeDomain(val) {
|
||||
val = val.trim().toLowerCase();
|
||||
if (val.indexOf('https://') === 0) val = val.substring(8);
|
||||
else if (val.indexOf('http://') === 0) val = val.substring(7);
|
||||
var slashIdx = val.indexOf('/');
|
||||
if (slashIdx !== -1) val = val.substring(0, slashIdx);
|
||||
var qIdx = val.indexOf('?');
|
||||
if (qIdx !== -1) val = val.substring(0, qIdx);
|
||||
var hIdx = val.indexOf('#');
|
||||
if (hIdx !== -1) val = val.substring(0, hIdx);
|
||||
var colonIdx = val.indexOf(':');
|
||||
if (colonIdx !== -1) val = val.substring(0, colonIdx);
|
||||
return val;
|
||||
}
|
||||
|
||||
var domainInput = document.getElementById('domain');
|
||||
var preview = document.getElementById('domainPreview');
|
||||
var previewUrl = document.getElementById('domainPreviewUrl');
|
||||
if (domainInput && preview) {
|
||||
domainInput.addEventListener('input', function () {
|
||||
var clean = normalizeDomain(this.value);
|
||||
if (clean && /^[a-z0-9]([a-z0-9\-]*[a-z0-9])?(\.[a-z0-9]([a-z0-9\-]*[a-z0-9])?)*$/.test(clean)) {
|
||||
previewUrl.textContent = 'http://' + clean;
|
||||
preview.classList.remove('d-none');
|
||||
} else {
|
||||
preview.classList.add('d-none');
|
||||
}
|
||||
});
|
||||
|
||||
var createForm = document.getElementById('createDomainForm');
|
||||
if (createForm) {
|
||||
createForm.addEventListener('submit', function () {
|
||||
domainInput.value = normalizeDomain(domainInput.value);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
var editModal = document.getElementById('editDomainModal');
|
||||
if (editModal) {
|
||||
editModal.addEventListener('show.bs.modal', function (event) {
|
||||
var btn = event.relatedTarget;
|
||||
var domain = btn.getAttribute('data-domain');
|
||||
var bucket = btn.getAttribute('data-bucket');
|
||||
document.getElementById('editDomainName').value = domain;
|
||||
var editBucket = document.getElementById('editBucket');
|
||||
editBucket.value = bucket;
|
||||
document.getElementById('editDomainForm').action = '{{ url_for("ui.update_website_domain", domain="__DOMAIN__") }}'.replace('__DOMAIN__', encodeURIComponent(domain));
|
||||
});
|
||||
}
|
||||
|
||||
var deleteModal = document.getElementById('deleteDomainModal');
|
||||
if (deleteModal) {
|
||||
deleteModal.addEventListener('show.bs.modal', function (event) {
|
||||
var btn = event.relatedTarget;
|
||||
var domain = btn.getAttribute('data-domain');
|
||||
var bucket = btn.getAttribute('data-bucket') || '';
|
||||
document.getElementById('deleteDomainName').textContent = domain;
|
||||
document.getElementById('deleteBucketName').textContent = bucket;
|
||||
document.getElementById('deleteDomainForm').action = '{{ url_for("ui.delete_website_domain", domain="__DOMAIN__") }}'.replace('__DOMAIN__', encodeURIComponent(domain));
|
||||
});
|
||||
}
|
||||
|
||||
var searchInput = document.getElementById('domainSearch');
|
||||
if (searchInput) {
|
||||
searchInput.addEventListener('input', function () {
|
||||
var q = this.value.toLowerCase();
|
||||
var rows = document.querySelectorAll('#domainTable tbody tr');
|
||||
var visible = 0;
|
||||
rows.forEach(function (row) {
|
||||
var domain = (row.getAttribute('data-domain') || '').toLowerCase();
|
||||
var bucket = (row.getAttribute('data-bucket') || '').toLowerCase();
|
||||
var match = !q || domain.indexOf(q) !== -1 || bucket.indexOf(q) !== -1;
|
||||
row.style.display = match ? '' : 'none';
|
||||
if (match) visible++;
|
||||
});
|
||||
var noResults = document.getElementById('noSearchResults');
|
||||
if (noResults) {
|
||||
noResults.classList.toggle('d-none', visible > 0);
|
||||
}
|
||||
});
|
||||
}
|
||||
})();
|
||||
</script>
|
||||
{% endblock %}
|
||||
@@ -27,7 +27,10 @@ def app(tmp_path: Path):
|
||||
"access_key": "test",
|
||||
"secret_key": "secret",
|
||||
"display_name": "Test User",
|
||||
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy"]}],
|
||||
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy",
|
||||
"create_bucket", "delete_bucket", "share", "versioning", "tagging",
|
||||
"encryption", "cors", "lifecycle", "replication", "quota",
|
||||
"object_lock", "notification", "logging", "website"]}],
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -35,6 +38,7 @@ def app(tmp_path: Path):
|
||||
flask_app = create_api_app(
|
||||
{
|
||||
"TESTING": True,
|
||||
"SECRET_KEY": "testing",
|
||||
"STORAGE_ROOT": storage_root,
|
||||
"IAM_CONFIG": iam_config,
|
||||
"BUCKET_POLICY_PATH": bucket_policies,
|
||||
@@ -42,6 +46,11 @@ def app(tmp_path: Path):
|
||||
}
|
||||
)
|
||||
yield flask_app
|
||||
storage = flask_app.extensions.get("object_storage")
|
||||
if storage:
|
||||
base = getattr(storage, "storage", storage)
|
||||
if hasattr(base, "shutdown_stats"):
|
||||
base.shutdown_stats()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
339
python/tests/test_access_logging.py
Normal file
339
python/tests/test_access_logging.py
Normal file
@@ -0,0 +1,339 @@
|
||||
import io
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from app.access_logging import (
|
||||
AccessLogEntry,
|
||||
AccessLoggingService,
|
||||
LoggingConfiguration,
|
||||
)
|
||||
from app.storage import ObjectStorage
|
||||
|
||||
|
||||
class TestAccessLogEntry:
|
||||
def test_default_values(self):
|
||||
entry = AccessLogEntry()
|
||||
assert entry.bucket_owner == "-"
|
||||
assert entry.bucket == "-"
|
||||
assert entry.remote_ip == "-"
|
||||
assert entry.requester == "-"
|
||||
assert entry.operation == "-"
|
||||
assert entry.http_status == 200
|
||||
assert len(entry.request_id) == 16
|
||||
|
||||
def test_to_log_line(self):
|
||||
entry = AccessLogEntry(
|
||||
bucket_owner="owner123",
|
||||
bucket="my-bucket",
|
||||
remote_ip="192.168.1.1",
|
||||
requester="user456",
|
||||
request_id="REQ123456789012",
|
||||
operation="REST.PUT.OBJECT",
|
||||
key="test/key.txt",
|
||||
request_uri="PUT /my-bucket/test/key.txt HTTP/1.1",
|
||||
http_status=200,
|
||||
bytes_sent=1024,
|
||||
object_size=2048,
|
||||
total_time_ms=150,
|
||||
referrer="http://example.com",
|
||||
user_agent="aws-cli/2.0",
|
||||
version_id="v1",
|
||||
)
|
||||
log_line = entry.to_log_line()
|
||||
|
||||
assert "owner123" in log_line
|
||||
assert "my-bucket" in log_line
|
||||
assert "192.168.1.1" in log_line
|
||||
assert "user456" in log_line
|
||||
assert "REST.PUT.OBJECT" in log_line
|
||||
assert "test/key.txt" in log_line
|
||||
assert "200" in log_line
|
||||
|
||||
def test_to_dict(self):
|
||||
entry = AccessLogEntry(
|
||||
bucket_owner="owner",
|
||||
bucket="bucket",
|
||||
remote_ip="10.0.0.1",
|
||||
requester="admin",
|
||||
request_id="ABC123",
|
||||
operation="REST.GET.OBJECT",
|
||||
key="file.txt",
|
||||
request_uri="GET /bucket/file.txt HTTP/1.1",
|
||||
http_status=200,
|
||||
bytes_sent=512,
|
||||
object_size=512,
|
||||
total_time_ms=50,
|
||||
)
|
||||
result = entry.to_dict()
|
||||
|
||||
assert result["bucket_owner"] == "owner"
|
||||
assert result["bucket"] == "bucket"
|
||||
assert result["remote_ip"] == "10.0.0.1"
|
||||
assert result["requester"] == "admin"
|
||||
assert result["operation"] == "REST.GET.OBJECT"
|
||||
assert result["key"] == "file.txt"
|
||||
assert result["http_status"] == 200
|
||||
assert result["bytes_sent"] == 512
|
||||
|
||||
|
||||
class TestLoggingConfiguration:
|
||||
def test_default_values(self):
|
||||
config = LoggingConfiguration(target_bucket="log-bucket")
|
||||
assert config.target_bucket == "log-bucket"
|
||||
assert config.target_prefix == ""
|
||||
assert config.enabled is True
|
||||
|
||||
def test_to_dict(self):
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="logs",
|
||||
target_prefix="access-logs/",
|
||||
enabled=True,
|
||||
)
|
||||
result = config.to_dict()
|
||||
|
||||
assert "LoggingEnabled" in result
|
||||
assert result["LoggingEnabled"]["TargetBucket"] == "logs"
|
||||
assert result["LoggingEnabled"]["TargetPrefix"] == "access-logs/"
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {
|
||||
"LoggingEnabled": {
|
||||
"TargetBucket": "my-logs",
|
||||
"TargetPrefix": "bucket-logs/",
|
||||
}
|
||||
}
|
||||
config = LoggingConfiguration.from_dict(data)
|
||||
|
||||
assert config is not None
|
||||
assert config.target_bucket == "my-logs"
|
||||
assert config.target_prefix == "bucket-logs/"
|
||||
assert config.enabled is True
|
||||
|
||||
def test_from_dict_no_logging(self):
|
||||
data = {}
|
||||
config = LoggingConfiguration.from_dict(data)
|
||||
assert config is None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def storage(tmp_path: Path):
|
||||
storage_root = tmp_path / "data"
|
||||
storage_root.mkdir(parents=True)
|
||||
return ObjectStorage(storage_root)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def logging_service(tmp_path: Path, storage):
|
||||
service = AccessLoggingService(
|
||||
tmp_path,
|
||||
flush_interval=3600,
|
||||
max_buffer_size=10,
|
||||
)
|
||||
service.set_storage(storage)
|
||||
yield service
|
||||
service.shutdown()
|
||||
|
||||
|
||||
class TestAccessLoggingService:
|
||||
def test_get_bucket_logging_not_configured(self, logging_service):
|
||||
result = logging_service.get_bucket_logging("unconfigured-bucket")
|
||||
assert result is None
|
||||
|
||||
def test_set_and_get_bucket_logging(self, logging_service):
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="log-bucket",
|
||||
target_prefix="logs/",
|
||||
)
|
||||
logging_service.set_bucket_logging("source-bucket", config)
|
||||
|
||||
retrieved = logging_service.get_bucket_logging("source-bucket")
|
||||
assert retrieved is not None
|
||||
assert retrieved.target_bucket == "log-bucket"
|
||||
assert retrieved.target_prefix == "logs/"
|
||||
|
||||
def test_delete_bucket_logging(self, logging_service):
|
||||
config = LoggingConfiguration(target_bucket="logs")
|
||||
logging_service.set_bucket_logging("to-delete", config)
|
||||
assert logging_service.get_bucket_logging("to-delete") is not None
|
||||
|
||||
logging_service.delete_bucket_logging("to-delete")
|
||||
logging_service._configs.clear()
|
||||
assert logging_service.get_bucket_logging("to-delete") is None
|
||||
|
||||
def test_log_request_no_config(self, logging_service):
|
||||
logging_service.log_request(
|
||||
"no-config-bucket",
|
||||
operation="REST.GET.OBJECT",
|
||||
key="test.txt",
|
||||
)
|
||||
stats = logging_service.get_stats()
|
||||
assert stats["buffered_entries"] == 0
|
||||
|
||||
def test_log_request_with_config(self, logging_service, storage):
|
||||
storage.create_bucket("log-target")
|
||||
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="log-target",
|
||||
target_prefix="access/",
|
||||
)
|
||||
logging_service.set_bucket_logging("source-bucket", config)
|
||||
|
||||
logging_service.log_request(
|
||||
"source-bucket",
|
||||
operation="REST.PUT.OBJECT",
|
||||
key="uploaded.txt",
|
||||
remote_ip="192.168.1.100",
|
||||
requester="test-user",
|
||||
http_status=200,
|
||||
bytes_sent=1024,
|
||||
)
|
||||
|
||||
stats = logging_service.get_stats()
|
||||
assert stats["buffered_entries"] == 1
|
||||
|
||||
def test_log_request_disabled_config(self, logging_service):
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="logs",
|
||||
enabled=False,
|
||||
)
|
||||
logging_service.set_bucket_logging("disabled-bucket", config)
|
||||
|
||||
logging_service.log_request(
|
||||
"disabled-bucket",
|
||||
operation="REST.GET.OBJECT",
|
||||
key="test.txt",
|
||||
)
|
||||
|
||||
stats = logging_service.get_stats()
|
||||
assert stats["buffered_entries"] == 0
|
||||
|
||||
def test_flush_buffer(self, logging_service, storage):
|
||||
storage.create_bucket("flush-target")
|
||||
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="flush-target",
|
||||
target_prefix="logs/",
|
||||
)
|
||||
logging_service.set_bucket_logging("flush-source", config)
|
||||
|
||||
for i in range(3):
|
||||
logging_service.log_request(
|
||||
"flush-source",
|
||||
operation="REST.GET.OBJECT",
|
||||
key=f"file{i}.txt",
|
||||
)
|
||||
|
||||
logging_service.flush()
|
||||
|
||||
objects = storage.list_objects_all("flush-target")
|
||||
assert len(objects) >= 1
|
||||
|
||||
def test_auto_flush_on_buffer_size(self, logging_service, storage):
|
||||
storage.create_bucket("auto-flush-target")
|
||||
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="auto-flush-target",
|
||||
target_prefix="",
|
||||
)
|
||||
logging_service.set_bucket_logging("auto-source", config)
|
||||
|
||||
for i in range(15):
|
||||
logging_service.log_request(
|
||||
"auto-source",
|
||||
operation="REST.GET.OBJECT",
|
||||
key=f"file{i}.txt",
|
||||
)
|
||||
|
||||
objects = storage.list_objects_all("auto-flush-target")
|
||||
assert len(objects) >= 1
|
||||
|
||||
def test_get_stats(self, logging_service, storage):
|
||||
storage.create_bucket("stats-target")
|
||||
config = LoggingConfiguration(target_bucket="stats-target")
|
||||
logging_service.set_bucket_logging("stats-bucket", config)
|
||||
|
||||
logging_service.log_request(
|
||||
"stats-bucket",
|
||||
operation="REST.GET.OBJECT",
|
||||
key="test.txt",
|
||||
)
|
||||
|
||||
stats = logging_service.get_stats()
|
||||
assert "buffered_entries" in stats
|
||||
assert "target_buckets" in stats
|
||||
assert stats["buffered_entries"] >= 1
|
||||
|
||||
def test_shutdown_flushes_buffer(self, tmp_path, storage):
|
||||
storage.create_bucket("shutdown-target")
|
||||
|
||||
service = AccessLoggingService(tmp_path, flush_interval=3600, max_buffer_size=100)
|
||||
service.set_storage(storage)
|
||||
|
||||
config = LoggingConfiguration(target_bucket="shutdown-target")
|
||||
service.set_bucket_logging("shutdown-source", config)
|
||||
|
||||
service.log_request(
|
||||
"shutdown-source",
|
||||
operation="REST.PUT.OBJECT",
|
||||
key="final.txt",
|
||||
)
|
||||
|
||||
service.shutdown()
|
||||
|
||||
objects = storage.list_objects_all("shutdown-target")
|
||||
assert len(objects) >= 1
|
||||
|
||||
def test_logging_caching(self, logging_service):
|
||||
config = LoggingConfiguration(target_bucket="cached-logs")
|
||||
logging_service.set_bucket_logging("cached-bucket", config)
|
||||
|
||||
logging_service.get_bucket_logging("cached-bucket")
|
||||
assert "cached-bucket" in logging_service._configs
|
||||
|
||||
def test_log_request_all_fields(self, logging_service, storage):
|
||||
storage.create_bucket("detailed-target")
|
||||
|
||||
config = LoggingConfiguration(target_bucket="detailed-target", target_prefix="detailed/")
|
||||
logging_service.set_bucket_logging("detailed-source", config)
|
||||
|
||||
logging_service.log_request(
|
||||
"detailed-source",
|
||||
operation="REST.PUT.OBJECT",
|
||||
key="detailed/file.txt",
|
||||
remote_ip="10.0.0.1",
|
||||
requester="admin-user",
|
||||
request_uri="PUT /detailed-source/detailed/file.txt HTTP/1.1",
|
||||
http_status=201,
|
||||
error_code="",
|
||||
bytes_sent=2048,
|
||||
object_size=2048,
|
||||
total_time_ms=100,
|
||||
referrer="http://admin.example.com",
|
||||
user_agent="curl/7.68.0",
|
||||
version_id="v1.0",
|
||||
request_id="CUSTOM_REQ_ID",
|
||||
)
|
||||
|
||||
stats = logging_service.get_stats()
|
||||
assert stats["buffered_entries"] == 1
|
||||
|
||||
def test_failed_flush_returns_to_buffer(self, logging_service):
|
||||
config = LoggingConfiguration(target_bucket="nonexistent-target")
|
||||
logging_service.set_bucket_logging("fail-source", config)
|
||||
|
||||
logging_service.log_request(
|
||||
"fail-source",
|
||||
operation="REST.GET.OBJECT",
|
||||
key="test.txt",
|
||||
)
|
||||
|
||||
initial_count = logging_service.get_stats()["buffered_entries"]
|
||||
logging_service.flush()
|
||||
|
||||
final_count = logging_service.get_stats()["buffered_entries"]
|
||||
assert final_count >= initial_count
|
||||
284
python/tests/test_acl.py
Normal file
284
python/tests/test_acl.py
Normal file
@@ -0,0 +1,284 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from app.acl import (
|
||||
Acl,
|
||||
AclGrant,
|
||||
AclService,
|
||||
ACL_PERMISSION_FULL_CONTROL,
|
||||
ACL_PERMISSION_READ,
|
||||
ACL_PERMISSION_WRITE,
|
||||
ACL_PERMISSION_READ_ACP,
|
||||
ACL_PERMISSION_WRITE_ACP,
|
||||
GRANTEE_ALL_USERS,
|
||||
GRANTEE_AUTHENTICATED_USERS,
|
||||
PERMISSION_TO_ACTIONS,
|
||||
create_canned_acl,
|
||||
CANNED_ACLS,
|
||||
)
|
||||
|
||||
|
||||
class TestAclGrant:
|
||||
def test_to_dict(self):
|
||||
grant = AclGrant(grantee="user123", permission=ACL_PERMISSION_READ)
|
||||
result = grant.to_dict()
|
||||
assert result == {"grantee": "user123", "permission": "READ"}
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {"grantee": "admin", "permission": "FULL_CONTROL"}
|
||||
grant = AclGrant.from_dict(data)
|
||||
assert grant.grantee == "admin"
|
||||
assert grant.permission == ACL_PERMISSION_FULL_CONTROL
|
||||
|
||||
|
||||
class TestAcl:
|
||||
def test_to_dict(self):
|
||||
acl = Acl(
|
||||
owner="owner-user",
|
||||
grants=[
|
||||
AclGrant(grantee="owner-user", permission=ACL_PERMISSION_FULL_CONTROL),
|
||||
AclGrant(grantee=GRANTEE_ALL_USERS, permission=ACL_PERMISSION_READ),
|
||||
],
|
||||
)
|
||||
result = acl.to_dict()
|
||||
assert result["owner"] == "owner-user"
|
||||
assert len(result["grants"]) == 2
|
||||
assert result["grants"][0]["grantee"] == "owner-user"
|
||||
assert result["grants"][1]["grantee"] == "*"
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {
|
||||
"owner": "the-owner",
|
||||
"grants": [
|
||||
{"grantee": "the-owner", "permission": "FULL_CONTROL"},
|
||||
{"grantee": "authenticated", "permission": "READ"},
|
||||
],
|
||||
}
|
||||
acl = Acl.from_dict(data)
|
||||
assert acl.owner == "the-owner"
|
||||
assert len(acl.grants) == 2
|
||||
assert acl.grants[0].grantee == "the-owner"
|
||||
assert acl.grants[1].grantee == GRANTEE_AUTHENTICATED_USERS
|
||||
|
||||
def test_from_dict_empty_grants(self):
|
||||
data = {"owner": "solo-owner"}
|
||||
acl = Acl.from_dict(data)
|
||||
assert acl.owner == "solo-owner"
|
||||
assert len(acl.grants) == 0
|
||||
|
||||
def test_get_allowed_actions_owner(self):
|
||||
acl = Acl(owner="owner123", grants=[])
|
||||
actions = acl.get_allowed_actions("owner123", is_authenticated=True)
|
||||
assert actions == PERMISSION_TO_ACTIONS[ACL_PERMISSION_FULL_CONTROL]
|
||||
|
||||
def test_get_allowed_actions_all_users(self):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[AclGrant(grantee=GRANTEE_ALL_USERS, permission=ACL_PERMISSION_READ)],
|
||||
)
|
||||
actions = acl.get_allowed_actions(None, is_authenticated=False)
|
||||
assert "read" in actions
|
||||
assert "list" in actions
|
||||
assert "write" not in actions
|
||||
|
||||
def test_get_allowed_actions_authenticated_users(self):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[AclGrant(grantee=GRANTEE_AUTHENTICATED_USERS, permission=ACL_PERMISSION_WRITE)],
|
||||
)
|
||||
actions_authenticated = acl.get_allowed_actions("some-user", is_authenticated=True)
|
||||
assert "write" in actions_authenticated
|
||||
assert "delete" in actions_authenticated
|
||||
|
||||
actions_anonymous = acl.get_allowed_actions(None, is_authenticated=False)
|
||||
assert "write" not in actions_anonymous
|
||||
|
||||
def test_get_allowed_actions_specific_grantee(self):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[
|
||||
AclGrant(grantee="user-abc", permission=ACL_PERMISSION_READ),
|
||||
AclGrant(grantee="user-xyz", permission=ACL_PERMISSION_WRITE),
|
||||
],
|
||||
)
|
||||
abc_actions = acl.get_allowed_actions("user-abc", is_authenticated=True)
|
||||
assert "read" in abc_actions
|
||||
assert "list" in abc_actions
|
||||
assert "write" not in abc_actions
|
||||
|
||||
xyz_actions = acl.get_allowed_actions("user-xyz", is_authenticated=True)
|
||||
assert "write" in xyz_actions
|
||||
assert "read" not in xyz_actions
|
||||
|
||||
def test_get_allowed_actions_combined(self):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[
|
||||
AclGrant(grantee=GRANTEE_ALL_USERS, permission=ACL_PERMISSION_READ),
|
||||
AclGrant(grantee="special-user", permission=ACL_PERMISSION_WRITE),
|
||||
],
|
||||
)
|
||||
actions = acl.get_allowed_actions("special-user", is_authenticated=True)
|
||||
assert "read" in actions
|
||||
assert "list" in actions
|
||||
assert "write" in actions
|
||||
assert "delete" in actions
|
||||
|
||||
|
||||
class TestCannedAcls:
|
||||
def test_private_acl(self):
|
||||
acl = create_canned_acl("private", "the-owner")
|
||||
assert acl.owner == "the-owner"
|
||||
assert len(acl.grants) == 1
|
||||
assert acl.grants[0].grantee == "the-owner"
|
||||
assert acl.grants[0].permission == ACL_PERMISSION_FULL_CONTROL
|
||||
|
||||
def test_public_read_acl(self):
|
||||
acl = create_canned_acl("public-read", "owner")
|
||||
assert acl.owner == "owner"
|
||||
has_owner_full_control = any(
|
||||
g.grantee == "owner" and g.permission == ACL_PERMISSION_FULL_CONTROL for g in acl.grants
|
||||
)
|
||||
has_public_read = any(
|
||||
g.grantee == GRANTEE_ALL_USERS and g.permission == ACL_PERMISSION_READ for g in acl.grants
|
||||
)
|
||||
assert has_owner_full_control
|
||||
assert has_public_read
|
||||
|
||||
def test_public_read_write_acl(self):
|
||||
acl = create_canned_acl("public-read-write", "owner")
|
||||
assert acl.owner == "owner"
|
||||
has_public_read = any(
|
||||
g.grantee == GRANTEE_ALL_USERS and g.permission == ACL_PERMISSION_READ for g in acl.grants
|
||||
)
|
||||
has_public_write = any(
|
||||
g.grantee == GRANTEE_ALL_USERS and g.permission == ACL_PERMISSION_WRITE for g in acl.grants
|
||||
)
|
||||
assert has_public_read
|
||||
assert has_public_write
|
||||
|
||||
def test_authenticated_read_acl(self):
|
||||
acl = create_canned_acl("authenticated-read", "owner")
|
||||
has_authenticated_read = any(
|
||||
g.grantee == GRANTEE_AUTHENTICATED_USERS and g.permission == ACL_PERMISSION_READ for g in acl.grants
|
||||
)
|
||||
assert has_authenticated_read
|
||||
|
||||
def test_unknown_canned_acl_defaults_to_private(self):
|
||||
acl = create_canned_acl("unknown-acl", "owner")
|
||||
private_acl = create_canned_acl("private", "owner")
|
||||
assert acl.to_dict() == private_acl.to_dict()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def acl_service(tmp_path: Path):
|
||||
return AclService(tmp_path)
|
||||
|
||||
|
||||
class TestAclService:
|
||||
def test_get_bucket_acl_not_exists(self, acl_service):
|
||||
result = acl_service.get_bucket_acl("nonexistent-bucket")
|
||||
assert result is None
|
||||
|
||||
def test_set_and_get_bucket_acl(self, acl_service):
|
||||
acl = Acl(
|
||||
owner="bucket-owner",
|
||||
grants=[AclGrant(grantee="bucket-owner", permission=ACL_PERMISSION_FULL_CONTROL)],
|
||||
)
|
||||
acl_service.set_bucket_acl("my-bucket", acl)
|
||||
|
||||
retrieved = acl_service.get_bucket_acl("my-bucket")
|
||||
assert retrieved is not None
|
||||
assert retrieved.owner == "bucket-owner"
|
||||
assert len(retrieved.grants) == 1
|
||||
|
||||
def test_bucket_acl_caching(self, acl_service):
|
||||
acl = Acl(owner="cached-owner", grants=[])
|
||||
acl_service.set_bucket_acl("cached-bucket", acl)
|
||||
|
||||
acl_service.get_bucket_acl("cached-bucket")
|
||||
assert "cached-bucket" in acl_service._bucket_acl_cache
|
||||
|
||||
retrieved = acl_service.get_bucket_acl("cached-bucket")
|
||||
assert retrieved.owner == "cached-owner"
|
||||
|
||||
def test_set_bucket_canned_acl(self, acl_service):
|
||||
result = acl_service.set_bucket_canned_acl("new-bucket", "public-read", "the-owner")
|
||||
assert result.owner == "the-owner"
|
||||
|
||||
retrieved = acl_service.get_bucket_acl("new-bucket")
|
||||
assert retrieved is not None
|
||||
has_public_read = any(
|
||||
g.grantee == GRANTEE_ALL_USERS and g.permission == ACL_PERMISSION_READ for g in retrieved.grants
|
||||
)
|
||||
assert has_public_read
|
||||
|
||||
def test_delete_bucket_acl(self, acl_service):
|
||||
acl = Acl(owner="to-delete-owner", grants=[])
|
||||
acl_service.set_bucket_acl("delete-me", acl)
|
||||
assert acl_service.get_bucket_acl("delete-me") is not None
|
||||
|
||||
acl_service.delete_bucket_acl("delete-me")
|
||||
acl_service._bucket_acl_cache.clear()
|
||||
assert acl_service.get_bucket_acl("delete-me") is None
|
||||
|
||||
def test_evaluate_bucket_acl_allowed(self, acl_service):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[AclGrant(grantee=GRANTEE_ALL_USERS, permission=ACL_PERMISSION_READ)],
|
||||
)
|
||||
acl_service.set_bucket_acl("public-bucket", acl)
|
||||
|
||||
result = acl_service.evaluate_bucket_acl("public-bucket", None, "read", is_authenticated=False)
|
||||
assert result is True
|
||||
|
||||
def test_evaluate_bucket_acl_denied(self, acl_service):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[AclGrant(grantee="owner", permission=ACL_PERMISSION_FULL_CONTROL)],
|
||||
)
|
||||
acl_service.set_bucket_acl("private-bucket", acl)
|
||||
|
||||
result = acl_service.evaluate_bucket_acl("private-bucket", "other-user", "write", is_authenticated=True)
|
||||
assert result is False
|
||||
|
||||
def test_evaluate_bucket_acl_no_acl(self, acl_service):
|
||||
result = acl_service.evaluate_bucket_acl("no-acl-bucket", "anyone", "read")
|
||||
assert result is False
|
||||
|
||||
def test_get_object_acl_from_metadata(self, acl_service):
|
||||
metadata = {
|
||||
"__acl__": {
|
||||
"owner": "object-owner",
|
||||
"grants": [{"grantee": "object-owner", "permission": "FULL_CONTROL"}],
|
||||
}
|
||||
}
|
||||
result = acl_service.get_object_acl("bucket", "key", metadata)
|
||||
assert result is not None
|
||||
assert result.owner == "object-owner"
|
||||
|
||||
def test_get_object_acl_no_acl_in_metadata(self, acl_service):
|
||||
metadata = {"Content-Type": "text/plain"}
|
||||
result = acl_service.get_object_acl("bucket", "key", metadata)
|
||||
assert result is None
|
||||
|
||||
def test_create_object_acl_metadata(self, acl_service):
|
||||
acl = Acl(owner="obj-owner", grants=[])
|
||||
result = acl_service.create_object_acl_metadata(acl)
|
||||
assert "__acl__" in result
|
||||
assert result["__acl__"]["owner"] == "obj-owner"
|
||||
|
||||
def test_evaluate_object_acl(self, acl_service):
|
||||
metadata = {
|
||||
"__acl__": {
|
||||
"owner": "obj-owner",
|
||||
"grants": [{"grantee": "*", "permission": "READ"}],
|
||||
}
|
||||
}
|
||||
result = acl_service.evaluate_object_acl(metadata, None, "read", is_authenticated=False)
|
||||
assert result is True
|
||||
|
||||
result = acl_service.evaluate_object_acl(metadata, None, "write", is_authenticated=False)
|
||||
assert result is False
|
||||
@@ -1,4 +1,54 @@
|
||||
from urllib.parse import urlsplit
|
||||
import hashlib
|
||||
import hmac
|
||||
from datetime import datetime, timezone
|
||||
from urllib.parse import quote
|
||||
|
||||
|
||||
def _build_presigned_query(path: str, *, access_key: str = "test", secret_key: str = "secret", expires: int = 60) -> str:
|
||||
now = datetime.now(timezone.utc)
|
||||
amz_date = now.strftime("%Y%m%dT%H%M%SZ")
|
||||
date_stamp = now.strftime("%Y%m%d")
|
||||
region = "us-east-1"
|
||||
service = "s3"
|
||||
credential_scope = f"{date_stamp}/{region}/{service}/aws4_request"
|
||||
|
||||
query_items = [
|
||||
("X-Amz-Algorithm", "AWS4-HMAC-SHA256"),
|
||||
("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD"),
|
||||
("X-Amz-Credential", f"{access_key}/{credential_scope}"),
|
||||
("X-Amz-Date", amz_date),
|
||||
("X-Amz-Expires", str(expires)),
|
||||
("X-Amz-SignedHeaders", "host"),
|
||||
]
|
||||
canonical_query = "&".join(
|
||||
f"{quote(k, safe='-_.~')}={quote(v, safe='-_.~')}" for k, v in sorted(query_items)
|
||||
)
|
||||
|
||||
canonical_request = "\n".join([
|
||||
"GET",
|
||||
quote(path, safe="/-_.~"),
|
||||
canonical_query,
|
||||
"host:localhost\n",
|
||||
"host",
|
||||
"UNSIGNED-PAYLOAD",
|
||||
])
|
||||
hashed_request = hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()
|
||||
string_to_sign = "\n".join([
|
||||
"AWS4-HMAC-SHA256",
|
||||
amz_date,
|
||||
credential_scope,
|
||||
hashed_request,
|
||||
])
|
||||
|
||||
def _sign(key: bytes, msg: str) -> bytes:
|
||||
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
|
||||
|
||||
k_date = _sign(("AWS4" + secret_key).encode("utf-8"), date_stamp)
|
||||
k_region = _sign(k_date, region)
|
||||
k_service = _sign(k_region, service)
|
||||
signing_key = _sign(k_service, "aws4_request")
|
||||
signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
|
||||
return canonical_query + f"&X-Amz-Signature={signature}"
|
||||
|
||||
|
||||
def test_bucket_and_object_lifecycle(client, signer):
|
||||
@@ -104,12 +154,12 @@ def test_request_id_header_present(client, signer):
|
||||
assert response.headers.get("X-Request-ID")
|
||||
|
||||
|
||||
def test_healthcheck_returns_version(client):
|
||||
response = client.get("/healthz")
|
||||
def test_healthcheck_returns_status(client):
|
||||
response = client.get("/myfsio/health")
|
||||
data = response.get_json()
|
||||
assert response.status_code == 200
|
||||
assert data["status"] == "ok"
|
||||
assert "version" in data
|
||||
assert "version" not in data
|
||||
|
||||
|
||||
def test_missing_credentials_denied(client):
|
||||
@@ -117,36 +167,59 @@ def test_missing_credentials_denied(client):
|
||||
assert response.status_code == 403
|
||||
|
||||
|
||||
def test_presign_and_bucket_policies(client, signer):
|
||||
# Create bucket and object
|
||||
def test_presigned_url_denied_for_disabled_user(client, signer):
|
||||
headers = signer("PUT", "/secure")
|
||||
assert client.put("/secure", headers=headers).status_code == 200
|
||||
|
||||
payload = b"hello"
|
||||
headers = signer("PUT", "/secure/file.txt", body=payload)
|
||||
assert client.put("/secure/file.txt", headers=headers, data=payload).status_code == 200
|
||||
|
||||
iam = client.application.extensions["iam"]
|
||||
iam.disable_user("test")
|
||||
|
||||
query = _build_presigned_query("/secure/file.txt")
|
||||
response = client.get(f"/secure/file.txt?{query}", headers={"Host": "localhost"})
|
||||
assert response.status_code == 403
|
||||
assert b"User account is disabled" in response.data
|
||||
|
||||
|
||||
def test_presigned_url_denied_for_inactive_key(client, signer):
|
||||
headers = signer("PUT", "/secure2")
|
||||
assert client.put("/secure2", headers=headers).status_code == 200
|
||||
|
||||
payload = b"hello"
|
||||
headers = signer("PUT", "/secure2/file.txt", body=payload)
|
||||
assert client.put("/secure2/file.txt", headers=headers, data=payload).status_code == 200
|
||||
|
||||
iam = client.application.extensions["iam"]
|
||||
for user in iam._raw_config.get("users", []):
|
||||
for key_info in user.get("access_keys", []):
|
||||
if key_info.get("access_key") == "test":
|
||||
key_info["status"] = "inactive"
|
||||
iam._save()
|
||||
iam._load()
|
||||
|
||||
query = _build_presigned_query("/secure2/file.txt")
|
||||
response = client.get(f"/secure2/file.txt?{query}", headers={"Host": "localhost"})
|
||||
assert response.status_code == 403
|
||||
assert b"Access key is inactive" in response.data
|
||||
|
||||
|
||||
def test_bucket_policies_deny_reads(client, signer):
|
||||
import json
|
||||
|
||||
headers = signer("PUT", "/docs")
|
||||
assert client.put("/docs", headers=headers).status_code == 200
|
||||
|
||||
|
||||
headers = signer("PUT", "/docs/readme.txt", body=b"content")
|
||||
assert client.put("/docs/readme.txt", headers=headers, data=b"content").status_code == 200
|
||||
|
||||
# Generate presigned GET URL and follow it
|
||||
json_body = {"method": "GET", "expires_in": 120}
|
||||
# Flask test client json parameter automatically sets Content-Type and serializes body
|
||||
# But for signing we need the body bytes.
|
||||
import json
|
||||
body_bytes = json.dumps(json_body).encode("utf-8")
|
||||
headers = signer("POST", "/presign/docs/readme.txt", headers={"Content-Type": "application/json"}, body=body_bytes)
|
||||
|
||||
response = client.post(
|
||||
"/presign/docs/readme.txt",
|
||||
headers=headers,
|
||||
json=json_body,
|
||||
)
|
||||
headers = signer("GET", "/docs/readme.txt")
|
||||
response = client.get("/docs/readme.txt", headers=headers)
|
||||
assert response.status_code == 200
|
||||
presigned_url = response.get_json()["url"]
|
||||
parts = urlsplit(presigned_url)
|
||||
presigned_path = f"{parts.path}?{parts.query}"
|
||||
download = client.get(presigned_path)
|
||||
assert download.status_code == 200
|
||||
assert download.data == b"content"
|
||||
assert response.data == b"content"
|
||||
|
||||
# Attach a deny policy for GETs
|
||||
policy = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
@@ -160,29 +233,26 @@ def test_presign_and_bucket_policies(client, signer):
|
||||
],
|
||||
}
|
||||
policy_bytes = json.dumps(policy).encode("utf-8")
|
||||
headers = signer("PUT", "/bucket-policy/docs", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/bucket-policy/docs", headers=headers, json=policy).status_code == 204
|
||||
|
||||
headers = signer("GET", "/bucket-policy/docs")
|
||||
fetched = client.get("/bucket-policy/docs", headers=headers)
|
||||
headers = signer("PUT", "/docs?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/docs?policy", headers=headers, json=policy).status_code == 204
|
||||
|
||||
headers = signer("GET", "/docs?policy")
|
||||
fetched = client.get("/docs?policy", headers=headers)
|
||||
assert fetched.status_code == 200
|
||||
assert fetched.get_json()["Version"] == "2012-10-17"
|
||||
|
||||
# Reads are now denied by bucket policy
|
||||
headers = signer("GET", "/docs/readme.txt")
|
||||
denied = client.get("/docs/readme.txt", headers=headers)
|
||||
assert denied.status_code == 403
|
||||
|
||||
# Presign attempts are also denied
|
||||
json_body = {"method": "GET", "expires_in": 60}
|
||||
body_bytes = json.dumps(json_body).encode("utf-8")
|
||||
headers = signer("POST", "/presign/docs/readme.txt", headers={"Content-Type": "application/json"}, body=body_bytes)
|
||||
response = client.post(
|
||||
"/presign/docs/readme.txt",
|
||||
headers=headers,
|
||||
json=json_body,
|
||||
)
|
||||
assert response.status_code == 403
|
||||
headers = signer("DELETE", "/docs?policy")
|
||||
assert client.delete("/docs?policy", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/docs/readme.txt")
|
||||
assert client.delete("/docs/readme.txt", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/docs")
|
||||
assert client.delete("/docs", headers=headers).status_code == 204
|
||||
|
||||
|
||||
def test_trailing_slash_returns_xml(client):
|
||||
@@ -193,9 +263,11 @@ def test_trailing_slash_returns_xml(client):
|
||||
|
||||
|
||||
def test_public_policy_allows_anonymous_list_and_read(client, signer):
|
||||
import json
|
||||
|
||||
headers = signer("PUT", "/public")
|
||||
assert client.put("/public", headers=headers).status_code == 200
|
||||
|
||||
|
||||
headers = signer("PUT", "/public/hello.txt", body=b"hi")
|
||||
assert client.put("/public/hello.txt", headers=headers, data=b"hi").status_code == 200
|
||||
|
||||
@@ -221,10 +293,9 @@ def test_public_policy_allows_anonymous_list_and_read(client, signer):
|
||||
},
|
||||
],
|
||||
}
|
||||
import json
|
||||
policy_bytes = json.dumps(policy).encode("utf-8")
|
||||
headers = signer("PUT", "/bucket-policy/public", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/bucket-policy/public", headers=headers, json=policy).status_code == 204
|
||||
headers = signer("PUT", "/public?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/public?policy", headers=headers, json=policy).status_code == 204
|
||||
|
||||
list_response = client.get("/public")
|
||||
assert list_response.status_code == 200
|
||||
@@ -236,18 +307,20 @@ def test_public_policy_allows_anonymous_list_and_read(client, signer):
|
||||
|
||||
headers = signer("DELETE", "/public/hello.txt")
|
||||
assert client.delete("/public/hello.txt", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/bucket-policy/public")
|
||||
assert client.delete("/bucket-policy/public", headers=headers).status_code == 204
|
||||
|
||||
|
||||
headers = signer("DELETE", "/public?policy")
|
||||
assert client.delete("/public?policy", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/public")
|
||||
assert client.delete("/public", headers=headers).status_code == 204
|
||||
|
||||
|
||||
def test_principal_dict_with_object_get_only(client, signer):
|
||||
import json
|
||||
|
||||
headers = signer("PUT", "/mixed")
|
||||
assert client.put("/mixed", headers=headers).status_code == 200
|
||||
|
||||
|
||||
headers = signer("PUT", "/mixed/only.txt", body=b"ok")
|
||||
assert client.put("/mixed/only.txt", headers=headers, data=b"ok").status_code == 200
|
||||
|
||||
@@ -270,10 +343,9 @@ def test_principal_dict_with_object_get_only(client, signer):
|
||||
},
|
||||
],
|
||||
}
|
||||
import json
|
||||
policy_bytes = json.dumps(policy).encode("utf-8")
|
||||
headers = signer("PUT", "/bucket-policy/mixed", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/bucket-policy/mixed", headers=headers, json=policy).status_code == 204
|
||||
headers = signer("PUT", "/mixed?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/mixed?policy", headers=headers, json=policy).status_code == 204
|
||||
|
||||
assert client.get("/mixed").status_code == 403
|
||||
allowed = client.get("/mixed/only.txt")
|
||||
@@ -282,18 +354,20 @@ def test_principal_dict_with_object_get_only(client, signer):
|
||||
|
||||
headers = signer("DELETE", "/mixed/only.txt")
|
||||
assert client.delete("/mixed/only.txt", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/bucket-policy/mixed")
|
||||
assert client.delete("/bucket-policy/mixed", headers=headers).status_code == 204
|
||||
|
||||
|
||||
headers = signer("DELETE", "/mixed?policy")
|
||||
assert client.delete("/mixed?policy", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/mixed")
|
||||
assert client.delete("/mixed", headers=headers).status_code == 204
|
||||
|
||||
|
||||
def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
|
||||
import json
|
||||
|
||||
headers = signer("PUT", "/test")
|
||||
assert client.put("/test", headers=headers).status_code == 200
|
||||
|
||||
|
||||
headers = signer("PUT", "/test/vid.mp4", body=b"video")
|
||||
assert client.put("/test/vid.mp4", headers=headers, data=b"video").status_code == 200
|
||||
|
||||
@@ -314,10 +388,9 @@ def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
|
||||
},
|
||||
],
|
||||
}
|
||||
import json
|
||||
policy_bytes = json.dumps(policy).encode("utf-8")
|
||||
headers = signer("PUT", "/bucket-policy/test", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/bucket-policy/test", headers=headers, json=policy).status_code == 204
|
||||
headers = signer("PUT", "/test?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/test?policy", headers=headers, json=policy).status_code == 204
|
||||
|
||||
listing = client.get("/test")
|
||||
assert listing.status_code == 403
|
||||
@@ -327,10 +400,10 @@ def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
|
||||
|
||||
headers = signer("DELETE", "/test/vid.mp4")
|
||||
assert client.delete("/test/vid.mp4", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/bucket-policy/test")
|
||||
assert client.delete("/bucket-policy/test", headers=headers).status_code == 204
|
||||
|
||||
|
||||
headers = signer("DELETE", "/test?policy")
|
||||
assert client.delete("/test?policy", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/test")
|
||||
assert client.delete("/test", headers=headers).status_code == 204
|
||||
|
||||
@@ -8,8 +8,6 @@ def client(app):
|
||||
|
||||
@pytest.fixture
|
||||
def auth_headers(app):
|
||||
# Create a test user and return headers
|
||||
# Using the user defined in conftest.py
|
||||
return {
|
||||
"X-Access-Key": "test",
|
||||
"X-Secret-Key": "secret"
|
||||
@@ -75,19 +73,16 @@ def test_multipart_upload_flow(client, auth_headers):
|
||||
|
||||
def test_abort_multipart_upload(client, auth_headers):
|
||||
client.put("/abort-bucket", headers=auth_headers)
|
||||
|
||||
# Initiate
|
||||
|
||||
resp = client.post("/abort-bucket/file.txt?uploads", headers=auth_headers)
|
||||
upload_id = fromstring(resp.data).find("UploadId").text
|
||||
|
||||
# Abort
|
||||
resp = client.delete(f"/abort-bucket/file.txt?uploadId={upload_id}", headers=auth_headers)
|
||||
assert resp.status_code == 204
|
||||
|
||||
# Try to upload part (should fail)
|
||||
resp = client.put(
|
||||
f"/abort-bucket/file.txt?partNumber=1&uploadId={upload_id}",
|
||||
headers=auth_headers,
|
||||
data=b"data"
|
||||
)
|
||||
assert resp.status_code == 404 # NoSuchUpload
|
||||
assert resp.status_code == 404
|
||||
156
python/tests/test_conditional_headers.py
Normal file
156
python/tests/test_conditional_headers.py
Normal file
@@ -0,0 +1,156 @@
|
||||
import hashlib
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def bucket(client, signer):
|
||||
headers = signer("PUT", "/cond-test")
|
||||
client.put("/cond-test", headers=headers)
|
||||
return "cond-test"
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def uploaded(client, signer, bucket):
|
||||
body = b"hello conditional"
|
||||
etag = hashlib.md5(body).hexdigest()
|
||||
headers = signer("PUT", f"/{bucket}/obj.txt", body=body)
|
||||
resp = client.put(f"/{bucket}/obj.txt", headers=headers, data=body)
|
||||
last_modified = resp.headers.get("Last-Modified")
|
||||
return {"etag": etag, "last_modified": last_modified}
|
||||
|
||||
|
||||
class TestIfMatch:
|
||||
def test_get_matching_etag(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": f'"{uploaded["etag"]}"'})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_get_non_matching_etag(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": '"wrongetag"'})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
|
||||
def test_head_matching_etag(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Match": f'"{uploaded["etag"]}"'})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_head_non_matching_etag(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Match": '"wrongetag"'})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
|
||||
def test_wildcard_match(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": "*"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_multiple_etags_one_matches(self, client, signer, bucket, uploaded):
|
||||
etag_list = f'"bad1", "{uploaded["etag"]}", "bad2"'
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": etag_list})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_multiple_etags_none_match(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": '"bad1", "bad2"'})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
|
||||
|
||||
class TestIfNoneMatch:
|
||||
def test_get_matching_etag_returns_304(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-None-Match": f'"{uploaded["etag"]}"'})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 304
|
||||
assert uploaded["etag"] in resp.headers.get("ETag", "")
|
||||
|
||||
def test_get_non_matching_etag_returns_200(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-None-Match": '"wrongetag"'})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_head_matching_etag_returns_304(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-None-Match": f'"{uploaded["etag"]}"'})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 304
|
||||
|
||||
def test_head_non_matching_etag_returns_200(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-None-Match": '"wrongetag"'})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_wildcard_returns_304(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-None-Match": "*"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 304
|
||||
|
||||
|
||||
class TestIfModifiedSince:
|
||||
def test_not_modified_returns_304(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Modified-Since": "Sun, 01 Jan 2034 00:00:00 GMT"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 304
|
||||
assert "ETag" in resp.headers
|
||||
|
||||
def test_modified_returns_200(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Modified-Since": "Sun, 01 Jan 2000 00:00:00 GMT"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_head_not_modified(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Modified-Since": "Sun, 01 Jan 2034 00:00:00 GMT"})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 304
|
||||
|
||||
def test_if_none_match_takes_precedence(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={
|
||||
"If-None-Match": '"wrongetag"',
|
||||
"If-Modified-Since": "Sun, 01 Jan 2034 00:00:00 GMT",
|
||||
})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
||||
class TestIfUnmodifiedSince:
|
||||
def test_unmodified_returns_200(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Unmodified-Since": "Sun, 01 Jan 2034 00:00:00 GMT"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_modified_returns_412(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Unmodified-Since": "Sun, 01 Jan 2000 00:00:00 GMT"})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
|
||||
def test_head_modified_returns_412(self, client, signer, bucket, uploaded):
|
||||
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Unmodified-Since": "Sun, 01 Jan 2000 00:00:00 GMT"})
|
||||
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
|
||||
def test_if_match_takes_precedence(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={
|
||||
"If-Match": f'"{uploaded["etag"]}"',
|
||||
"If-Unmodified-Since": "Sun, 01 Jan 2000 00:00:00 GMT",
|
||||
})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
||||
class TestConditionalWithRange:
|
||||
def test_if_match_with_range(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={
|
||||
"If-Match": f'"{uploaded["etag"]}"',
|
||||
"Range": "bytes=0-4",
|
||||
})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 206
|
||||
|
||||
def test_if_match_fails_with_range(self, client, signer, bucket, uploaded):
|
||||
headers = signer("GET", f"/{bucket}/obj.txt", headers={
|
||||
"If-Match": '"wrongetag"',
|
||||
"Range": "bytes=0-4",
|
||||
})
|
||||
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
|
||||
assert resp.status_code == 412
|
||||
@@ -53,15 +53,17 @@ def test_special_characters_in_metadata(tmp_path: Path):
|
||||
assert meta["special"] == "!@#$%^&*()"
|
||||
|
||||
def test_disk_full_scenario(tmp_path: Path, monkeypatch):
|
||||
# Simulate disk full by mocking write to fail
|
||||
import app.storage as _storage_mod
|
||||
monkeypatch.setattr(_storage_mod, "_HAS_RUST", False)
|
||||
|
||||
storage = ObjectStorage(tmp_path)
|
||||
storage.create_bucket("full")
|
||||
|
||||
|
||||
def mock_copyfileobj(*args, **kwargs):
|
||||
raise OSError(28, "No space left on device")
|
||||
|
||||
|
||||
import shutil
|
||||
monkeypatch.setattr(shutil, "copyfileobj", mock_copyfileobj)
|
||||
|
||||
|
||||
with pytest.raises(OSError, match="No space left on device"):
|
||||
storage.put_object("full", "file", io.BytesIO(b"data"))
|
||||
@@ -21,12 +21,11 @@ class TestLocalKeyEncryption:
|
||||
|
||||
key_path = tmp_path / "keys" / "master.key"
|
||||
provider = LocalKeyEncryption(key_path)
|
||||
|
||||
# Access master key to trigger creation
|
||||
|
||||
key = provider.master_key
|
||||
|
||||
|
||||
assert key_path.exists()
|
||||
assert len(key) == 32 # 256-bit key
|
||||
assert len(key) == 32
|
||||
|
||||
def test_load_existing_master_key(self, tmp_path):
|
||||
"""Test loading an existing master key."""
|
||||
@@ -49,16 +48,14 @@ class TestLocalKeyEncryption:
|
||||
provider = LocalKeyEncryption(key_path)
|
||||
|
||||
plaintext = b"Hello, World! This is a test message."
|
||||
|
||||
# Encrypt
|
||||
|
||||
result = provider.encrypt(plaintext)
|
||||
|
||||
|
||||
assert result.ciphertext != plaintext
|
||||
assert result.key_id == "local"
|
||||
assert len(result.nonce) == 12
|
||||
assert len(result.encrypted_data_key) > 0
|
||||
|
||||
# Decrypt
|
||||
|
||||
decrypted = provider.decrypt(
|
||||
result.ciphertext,
|
||||
result.nonce,
|
||||
@@ -79,12 +76,9 @@ class TestLocalKeyEncryption:
|
||||
|
||||
result1 = provider.encrypt(plaintext)
|
||||
result2 = provider.encrypt(plaintext)
|
||||
|
||||
# Different encrypted data keys
|
||||
|
||||
assert result1.encrypted_data_key != result2.encrypted_data_key
|
||||
# Different nonces
|
||||
assert result1.nonce != result2.nonce
|
||||
# Different ciphertexts
|
||||
assert result1.ciphertext != result2.ciphertext
|
||||
|
||||
def test_generate_data_key(self, tmp_path):
|
||||
@@ -95,30 +89,26 @@ class TestLocalKeyEncryption:
|
||||
provider = LocalKeyEncryption(key_path)
|
||||
|
||||
plaintext_key, encrypted_key = provider.generate_data_key()
|
||||
|
||||
|
||||
assert len(plaintext_key) == 32
|
||||
assert len(encrypted_key) > 32 # nonce + ciphertext + tag
|
||||
|
||||
# Verify we can decrypt the key
|
||||
assert len(encrypted_key) > 32
|
||||
|
||||
decrypted_key = provider._decrypt_data_key(encrypted_key)
|
||||
assert decrypted_key == plaintext_key
|
||||
|
||||
def test_decrypt_with_wrong_key_fails(self, tmp_path):
|
||||
"""Test that decryption fails with wrong master key."""
|
||||
from app.encryption import LocalKeyEncryption, EncryptionError
|
||||
|
||||
# Create two providers with different keys
|
||||
|
||||
key_path1 = tmp_path / "master1.key"
|
||||
key_path2 = tmp_path / "master2.key"
|
||||
|
||||
|
||||
provider1 = LocalKeyEncryption(key_path1)
|
||||
provider2 = LocalKeyEncryption(key_path2)
|
||||
|
||||
# Encrypt with provider1
|
||||
|
||||
plaintext = b"Secret message"
|
||||
result = provider1.encrypt(plaintext)
|
||||
|
||||
# Try to decrypt with provider2
|
||||
|
||||
with pytest.raises(EncryptionError):
|
||||
provider2.decrypt(
|
||||
result.ciphertext,
|
||||
@@ -195,19 +185,16 @@ class TestStreamingEncryptor:
|
||||
key_path = tmp_path / "master.key"
|
||||
provider = LocalKeyEncryption(key_path)
|
||||
encryptor = StreamingEncryptor(provider, chunk_size=1024)
|
||||
|
||||
# Create test data
|
||||
original_data = b"A" * 5000 + b"B" * 5000 + b"C" * 5000 # 15KB
|
||||
|
||||
original_data = b"A" * 5000 + b"B" * 5000 + b"C" * 5000
|
||||
stream = io.BytesIO(original_data)
|
||||
|
||||
# Encrypt
|
||||
|
||||
encrypted_stream, metadata = encryptor.encrypt_stream(stream)
|
||||
encrypted_data = encrypted_stream.read()
|
||||
|
||||
|
||||
assert encrypted_data != original_data
|
||||
assert metadata.algorithm == "AES256"
|
||||
|
||||
# Decrypt
|
||||
|
||||
encrypted_stream = io.BytesIO(encrypted_data)
|
||||
decrypted_stream = encryptor.decrypt_stream(encrypted_stream, metadata)
|
||||
decrypted_data = decrypted_stream.read()
|
||||
@@ -318,8 +305,7 @@ class TestClientEncryptionHelper:
|
||||
assert "key" in key_info
|
||||
assert key_info["algorithm"] == "AES-256-GCM"
|
||||
assert "created_at" in key_info
|
||||
|
||||
# Verify key is 256 bits
|
||||
|
||||
key = base64.b64decode(key_info["key"])
|
||||
assert len(key) == 32
|
||||
|
||||
@@ -424,8 +410,7 @@ class TestKMSManager:
|
||||
|
||||
assert key is not None
|
||||
assert key.key_id == "test-key"
|
||||
|
||||
# Non-existent key
|
||||
|
||||
assert kms.get_key("non-existent") is None
|
||||
|
||||
def test_enable_disable_key(self, tmp_path):
|
||||
@@ -438,15 +423,12 @@ class TestKMSManager:
|
||||
kms = KMSManager(keys_path, master_key_path)
|
||||
|
||||
kms.create_key("Test key", key_id="test-key")
|
||||
|
||||
# Initially enabled
|
||||
|
||||
assert kms.get_key("test-key").enabled
|
||||
|
||||
# Disable
|
||||
|
||||
kms.disable_key("test-key")
|
||||
assert not kms.get_key("test-key").enabled
|
||||
|
||||
# Enable
|
||||
|
||||
kms.enable_key("test-key")
|
||||
assert kms.get_key("test-key").enabled
|
||||
|
||||
@@ -502,12 +484,10 @@ class TestKMSManager:
|
||||
context = {"bucket": "test-bucket", "key": "test-key"}
|
||||
|
||||
ciphertext = kms.encrypt("test-key", plaintext, context)
|
||||
|
||||
# Decrypt with same context succeeds
|
||||
|
||||
decrypted, _ = kms.decrypt(ciphertext, context)
|
||||
assert decrypted == plaintext
|
||||
|
||||
# Decrypt with different context fails
|
||||
|
||||
with pytest.raises(EncryptionError):
|
||||
kms.decrypt(ciphertext, {"different": "context"})
|
||||
|
||||
@@ -526,8 +506,7 @@ class TestKMSManager:
|
||||
|
||||
assert len(plaintext_key) == 32
|
||||
assert len(encrypted_key) > 0
|
||||
|
||||
# Decrypt the encrypted key
|
||||
|
||||
decrypted_key = kms.decrypt_data_key("test-key", encrypted_key)
|
||||
|
||||
assert decrypted_key == plaintext_key
|
||||
@@ -560,14 +539,9 @@ class TestKMSManager:
|
||||
kms.create_key("Key 2", key_id="key-2")
|
||||
|
||||
plaintext = b"Data to re-encrypt"
|
||||
|
||||
# Encrypt with key-1
|
||||
|
||||
ciphertext1 = kms.encrypt("key-1", plaintext)
|
||||
|
||||
# Re-encrypt with key-2
|
||||
ciphertext2 = kms.re_encrypt(ciphertext1, "key-2")
|
||||
|
||||
# Decrypt with key-2
|
||||
decrypted, key_id = kms.decrypt(ciphertext2)
|
||||
|
||||
assert decrypted == plaintext
|
||||
@@ -587,7 +561,7 @@ class TestKMSManager:
|
||||
|
||||
assert len(random1) == 32
|
||||
assert len(random2) == 32
|
||||
assert random1 != random2 # Very unlikely to be equal
|
||||
assert random1 != random2
|
||||
|
||||
def test_keys_persist_across_instances(self, tmp_path):
|
||||
"""Test that keys persist and can be loaded by new instances."""
|
||||
@@ -595,15 +569,13 @@ class TestKMSManager:
|
||||
|
||||
keys_path = tmp_path / "kms_keys.json"
|
||||
master_key_path = tmp_path / "master.key"
|
||||
|
||||
# Create key with first instance
|
||||
|
||||
kms1 = KMSManager(keys_path, master_key_path)
|
||||
kms1.create_key("Test key", key_id="test-key")
|
||||
|
||||
|
||||
plaintext = b"Persistent encryption test"
|
||||
ciphertext = kms1.encrypt("test-key", plaintext)
|
||||
|
||||
# Create new instance and verify key works
|
||||
|
||||
kms2 = KMSManager(keys_path, master_key_path)
|
||||
|
||||
decrypted, key_id = kms2.decrypt(ciphertext)
|
||||
@@ -664,31 +636,27 @@ class TestEncryptedStorage:
|
||||
encryption = EncryptionManager(config)
|
||||
|
||||
encrypted_storage = EncryptedObjectStorage(storage, encryption)
|
||||
|
||||
# Create bucket with encryption config
|
||||
|
||||
storage.create_bucket("test-bucket")
|
||||
storage.set_bucket_encryption("test-bucket", {
|
||||
"Rules": [{"SSEAlgorithm": "AES256"}]
|
||||
})
|
||||
|
||||
# Put object
|
||||
|
||||
original_data = b"This is secret data that should be encrypted"
|
||||
stream = io.BytesIO(original_data)
|
||||
|
||||
|
||||
meta = encrypted_storage.put_object(
|
||||
"test-bucket",
|
||||
"secret.txt",
|
||||
stream,
|
||||
)
|
||||
|
||||
|
||||
assert meta is not None
|
||||
|
||||
# Verify file on disk is encrypted (not plaintext)
|
||||
|
||||
file_path = storage_root / "test-bucket" / "secret.txt"
|
||||
stored_data = file_path.read_bytes()
|
||||
assert stored_data != original_data
|
||||
|
||||
# Get object - should be decrypted
|
||||
|
||||
data, metadata = encrypted_storage.get_object_data("test-bucket", "secret.txt")
|
||||
|
||||
assert data == original_data
|
||||
@@ -711,14 +679,12 @@ class TestEncryptedStorage:
|
||||
encrypted_storage = EncryptedObjectStorage(storage, encryption)
|
||||
|
||||
storage.create_bucket("test-bucket")
|
||||
# No encryption config
|
||||
|
||||
|
||||
original_data = b"Unencrypted data"
|
||||
stream = io.BytesIO(original_data)
|
||||
|
||||
|
||||
encrypted_storage.put_object("test-bucket", "plain.txt", stream)
|
||||
|
||||
# Verify file on disk is NOT encrypted
|
||||
|
||||
file_path = storage_root / "test-bucket" / "plain.txt"
|
||||
stored_data = file_path.read_bytes()
|
||||
assert stored_data == original_data
|
||||
@@ -744,20 +710,17 @@ class TestEncryptedStorage:
|
||||
|
||||
original_data = b"Explicitly encrypted data"
|
||||
stream = io.BytesIO(original_data)
|
||||
|
||||
# Request encryption explicitly
|
||||
|
||||
encrypted_storage.put_object(
|
||||
"test-bucket",
|
||||
"encrypted.txt",
|
||||
stream,
|
||||
server_side_encryption="AES256",
|
||||
)
|
||||
|
||||
# Verify file is encrypted
|
||||
|
||||
file_path = storage_root / "test-bucket" / "encrypted.txt"
|
||||
stored_data = file_path.read_bytes()
|
||||
assert stored_data != original_data
|
||||
|
||||
# Get object - should be decrypted
|
||||
|
||||
data, _ = encrypted_storage.get_object_data("test-bucket", "encrypted.txt")
|
||||
assert data == original_data
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user