mirror of
https://github.com/NixOS/nix.git
synced 2025-11-20 01:09:37 +01:00
feat(libstore/s3-binary-cache-store): add multipart upload config settings
Add three configuration settings to `S3BinaryCacheStoreConfig` to control multipart upload behavior: - `bool multipart-upload` (default `false`): Enable/disable multipart uploads - `uint64_t multipart-chunk-size` (default 5 MiB): Size of each upload part - `uint64_t multipart-threshold` (default 100 MiB): Minimum file size for multipart The feature is disabled by default.
This commit is contained in:
parent
2d83bc6b83
commit
bf947bfc26
2 changed files with 55 additions and 0 deletions
|
|
@ -15,6 +15,7 @@ namespace nix {
|
|||
|
||||
MakeError(UploadToS3, Error);
|
||||
|
||||
static constexpr uint64_t AWS_MIN_PART_SIZE = 5 * 1024 * 1024; // 5MiB
|
||||
static constexpr uint64_t AWS_MAX_PART_SIZE = 5ULL * 1024 * 1024 * 1024; // 5GiB
|
||||
|
||||
class S3BinaryCacheStore : public virtual HttpBinaryCacheStore
|
||||
|
|
@ -253,6 +254,28 @@ S3BinaryCacheStoreConfig::S3BinaryCacheStoreConfig(
|
|||
cacheUri.query[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
if (multipartChunkSize < AWS_MIN_PART_SIZE) {
|
||||
throw UsageError(
|
||||
"multipart-chunk-size must be at least %s, got %s",
|
||||
renderSize(AWS_MIN_PART_SIZE),
|
||||
renderSize(multipartChunkSize.get()));
|
||||
}
|
||||
|
||||
if (multipartChunkSize > AWS_MAX_PART_SIZE) {
|
||||
throw UsageError(
|
||||
"multipart-chunk-size must be at most %s, got %s",
|
||||
renderSize(AWS_MAX_PART_SIZE),
|
||||
renderSize(multipartChunkSize.get()));
|
||||
}
|
||||
|
||||
if (multipartUpload && multipartThreshold < multipartChunkSize) {
|
||||
warn(
|
||||
"multipart-threshold (%s) is less than multipart-chunk-size (%s), "
|
||||
"which may result in single-part multipart uploads",
|
||||
renderSize(multipartThreshold.get()),
|
||||
renderSize(multipartChunkSize.get()));
|
||||
}
|
||||
}
|
||||
|
||||
std::string S3BinaryCacheStoreConfig::getHumanReadableURI() const
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue