diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 98f742c70..58cb72776 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -4,6 +4,7 @@ #include #include +#include namespace nix { @@ -27,6 +28,15 @@ public: private: ref s3Config; + /** + * Creates a multipart upload for large objects to S3. + * + * @see + * https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html#API_CreateMultipartUpload_RequestSyntax + */ + std::string createMultipartUpload( + std::string_view key, std::string_view mimeType, std::optional contentEncoding); + /** * Abort a multipart upload * @@ -45,6 +55,39 @@ void S3BinaryCacheStore::upsertFile( HttpBinaryCacheStore::upsertFile(path, istream, mimeType, sizeHint); } +std::string S3BinaryCacheStore::createMultipartUpload( + std::string_view key, std::string_view mimeType, std::optional contentEncoding) +{ + auto req = makeRequest(key); + + // setupForS3() converts s3:// to https:// but strips query parameters + // So we call it first, then add our multipart parameters + req.setupForS3(); + + auto url = req.uri.parsed(); + url.query["uploads"] = ""; + req.uri = VerbatimURL(url); + + req.method = HttpMethod::POST; + req.data = ""; + req.mimeType = mimeType; + + if (contentEncoding) { + req.headers.emplace_back("Content-Encoding", *contentEncoding); + } + + auto result = getFileTransfer()->enqueueFileTransfer(req).get(); + + std::regex uploadIdRegex("([^<]+)"); + std::smatch match; + + if (std::regex_search(result.data, match, uploadIdRegex)) { + return match[1]; + } + + throw Error("S3 CreateMultipartUpload response missing "); +} + void S3BinaryCacheStore::abortMultipartUpload(std::string_view key, std::string_view uploadId) { auto req = makeRequest(key);