1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-08 19:46:02 +01:00

refactor(libstore/s3-binary-cache-store): implement upload()

Stop delegating to `HttpBinaryCacheStore::upsertFile` and instead
handle compression in the S3 store's `upsertFile` override, then call
our own `upload()` method. This separation is necessary for future
multipart upload support.
This commit is contained in:
Bernardo Meurer Costa 2025-10-30 02:49:33 +00:00
parent 3b2186e1c8
commit e636888a09
No known key found for this signature in database

View file

@ -1,6 +1,10 @@
#include "nix/store/s3-binary-cache-store.hh"
#include "nix/store/http-binary-cache-store.hh"
#include "nix/store/store-registration.hh"
#include "nix/util/error.hh"
#include "nix/util/logging.hh"
#include "nix/util/serialise.hh"
#include "nix/util/util.hh"
#include <cassert>
#include <ranges>
@ -9,6 +13,10 @@
namespace nix {
MakeError(UploadToS3, Error);
static constexpr uint64_t AWS_MAX_PART_SIZE = 5ULL * 1024 * 1024 * 1024; // 5GiB
class S3BinaryCacheStore : public virtual HttpBinaryCacheStore
{
public:
@ -26,6 +34,26 @@ public:
private:
ref<S3BinaryCacheStoreConfig> s3Config;
/**
* Uploads a file to S3 using a regular (non-multipart) upload.
*
* This method is suitable for files up to 5GiB in size. For larger files,
* multipart upload should be used instead.
*
* @see https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
*/
void upload(
std::string_view path,
RestartableSource & source,
uint64_t sizeHint,
std::string_view mimeType,
std::optional<std::string_view> contentEncoding);
/**
* Uploads a file to S3 (CompressedSource overload).
*/
void upload(std::string_view path, CompressedSource & source, std::string_view mimeType);
/**
* Creates a multipart upload for large objects to S3.
*
@ -69,7 +97,40 @@ private:
void S3BinaryCacheStore::upsertFile(
const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint)
{
HttpBinaryCacheStore::upsertFile(path, source, mimeType, sizeHint);
if (auto compressionMethod = getCompressionMethod(path)) {
CompressedSource compressed(source, *compressionMethod);
upload(path, compressed, mimeType);
} else {
upload(path, source, sizeHint, mimeType, std::nullopt);
}
}
void S3BinaryCacheStore::upload(
std::string_view path,
RestartableSource & source,
uint64_t sizeHint,
std::string_view mimeType,
std::optional<std::string_view> contentEncoding)
{
debug("using S3 regular upload for '%s' (%d bytes)", path, sizeHint);
if (sizeHint > AWS_MAX_PART_SIZE)
throw Error(
"file too large for S3 upload without multipart: %s would exceed maximum size of %s. Consider enabling multipart-upload.",
renderSize(sizeHint),
renderSize(AWS_MAX_PART_SIZE));
try {
HttpBinaryCacheStore::upload(path, source, sizeHint, mimeType, contentEncoding);
} catch (FileTransferError & e) {
UploadToS3 err(e.message());
err.addTrace({}, "while uploading to S3 binary cache at '%s'", config->cacheUri.to_string());
throw err;
}
}
void S3BinaryCacheStore::upload(std::string_view path, CompressedSource & source, std::string_view mimeType)
{
upload(path, static_cast<RestartableSource &>(source), source.size(), mimeType, source.getCompressionMethod());
}
std::string S3BinaryCacheStore::createMultipartUpload(