From e636888a094ddb642edf00bccbd7b077df5939d5 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Thu, 30 Oct 2025 02:49:33 +0000 Subject: [PATCH] refactor(libstore/s3-binary-cache-store): implement `upload()` Stop delegating to `HttpBinaryCacheStore::upsertFile` and instead handle compression in the S3 store's `upsertFile` override, then call our own `upload()` method. This separation is necessary for future multipart upload support. --- src/libstore/s3-binary-cache-store.cc | 63 ++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index d3e6c7baf..9303a80f8 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -1,6 +1,10 @@ #include "nix/store/s3-binary-cache-store.hh" #include "nix/store/http-binary-cache-store.hh" #include "nix/store/store-registration.hh" +#include "nix/util/error.hh" +#include "nix/util/logging.hh" +#include "nix/util/serialise.hh" +#include "nix/util/util.hh" #include #include @@ -9,6 +13,10 @@ namespace nix { +MakeError(UploadToS3, Error); + +static constexpr uint64_t AWS_MAX_PART_SIZE = 5ULL * 1024 * 1024 * 1024; // 5GiB + class S3BinaryCacheStore : public virtual HttpBinaryCacheStore { public: @@ -26,6 +34,26 @@ public: private: ref s3Config; + /** + * Uploads a file to S3 using a regular (non-multipart) upload. + * + * This method is suitable for files up to 5GiB in size. For larger files, + * multipart upload should be used instead. + * + * @see https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + */ + void upload( + std::string_view path, + RestartableSource & source, + uint64_t sizeHint, + std::string_view mimeType, + std::optional contentEncoding); + + /** + * Uploads a file to S3 (CompressedSource overload). + */ + void upload(std::string_view path, CompressedSource & source, std::string_view mimeType); + /** * Creates a multipart upload for large objects to S3. * @@ -69,7 +97,40 @@ private: void S3BinaryCacheStore::upsertFile( const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint) { - HttpBinaryCacheStore::upsertFile(path, source, mimeType, sizeHint); + if (auto compressionMethod = getCompressionMethod(path)) { + CompressedSource compressed(source, *compressionMethod); + upload(path, compressed, mimeType); + } else { + upload(path, source, sizeHint, mimeType, std::nullopt); + } +} + +void S3BinaryCacheStore::upload( + std::string_view path, + RestartableSource & source, + uint64_t sizeHint, + std::string_view mimeType, + std::optional contentEncoding) +{ + debug("using S3 regular upload for '%s' (%d bytes)", path, sizeHint); + if (sizeHint > AWS_MAX_PART_SIZE) + throw Error( + "file too large for S3 upload without multipart: %s would exceed maximum size of %s. Consider enabling multipart-upload.", + renderSize(sizeHint), + renderSize(AWS_MAX_PART_SIZE)); + + try { + HttpBinaryCacheStore::upload(path, source, sizeHint, mimeType, contentEncoding); + } catch (FileTransferError & e) { + UploadToS3 err(e.message()); + err.addTrace({}, "while uploading to S3 binary cache at '%s'", config->cacheUri.to_string()); + throw err; + } +} + +void S3BinaryCacheStore::upload(std::string_view path, CompressedSource & source, std::string_view mimeType) +{ + upload(path, static_cast(source), source.size(), mimeType, source.getCompressionMethod()); } std::string S3BinaryCacheStore::createMultipartUpload(