1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-08 11:36:03 +01:00

Merge pull request #14477 from lovesegfault/http-upload-headers

refactor(libstore): pass headers into upload methods
This commit is contained in:
John Ericson 2025-11-07 20:41:14 +00:00 committed by GitHub
commit 3c2dcf42e9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 26 additions and 40 deletions

View file

@ -4,7 +4,6 @@
#include "nix/store/nar-info-disk-cache.hh"
#include "nix/util/callback.hh"
#include "nix/store/store-registration.hh"
#include "nix/util/compression.hh"
namespace nix {
@ -139,13 +138,14 @@ void HttpBinaryCacheStore::upload(
RestartableSource & source,
uint64_t sizeHint,
std::string_view mimeType,
std::optional<std::string_view> contentEncoding)
std::optional<Headers> headers)
{
auto req = makeRequest(path);
req.method = HttpMethod::PUT;
if (contentEncoding) {
req.headers.emplace_back("Content-Encoding", *contentEncoding);
if (headers) {
req.headers.reserve(req.headers.size() + headers->size());
std::ranges::move(std::move(*headers), std::back_inserter(req.headers));
}
req.data = {sizeHint, source};
@ -154,18 +154,14 @@ void HttpBinaryCacheStore::upload(
getFileTransfer()->upload(req);
}
void HttpBinaryCacheStore::upload(std::string_view path, CompressedSource & source, std::string_view mimeType)
{
upload(path, static_cast<RestartableSource &>(source), source.size(), mimeType, source.getCompressionMethod());
}
void HttpBinaryCacheStore::upsertFile(
const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint)
{
try {
if (auto compressionMethod = getCompressionMethod(path)) {
CompressedSource compressed(source, *compressionMethod);
upload(path, compressed, mimeType);
Headers headers = {{"Content-Encoding", *compressionMethod}};
upload(path, compressed, compressed.size(), mimeType, std::move(headers));
} else {
upload(path, source, sizeHint, mimeType, std::nullopt);
}

View file

@ -103,18 +103,7 @@ protected:
RestartableSource & source,
uint64_t sizeHint,
std::string_view mimeType,
std::optional<std::string_view> contentEncoding);
/**
* Uploads data to the binary cache (CompressedSource overload).
*
* This overload infers both the size and compression method from the CompressedSource.
*
* @param path The path in the binary cache to upload to
* @param source The compressed source (knows size and compression method)
* @param mimeType The MIME type of the content
*/
void upload(std::string_view path, CompressedSource & source, std::string_view mimeType);
std::optional<Headers> headers);
void getFile(const std::string & path, Sink & sink) override;

View file

@ -50,7 +50,7 @@ private:
RestartableSource & source,
uint64_t sizeHint,
std::string_view mimeType,
std::optional<std::string_view> contentEncoding);
std::optional<Headers> headers);
/**
* Uploads a file to S3 using multipart upload.
@ -67,7 +67,7 @@ private:
RestartableSource & source,
uint64_t sizeHint,
std::string_view mimeType,
std::optional<std::string_view> contentEncoding);
std::optional<Headers> headers);
/**
* A Sink that manages a complete S3 multipart upload lifecycle.
@ -89,7 +89,7 @@ private:
std::string_view path,
uint64_t sizeHint,
std::string_view mimeType,
std::optional<std::string_view> contentEncoding);
std::optional<Headers> headers);
void operator()(std::string_view data) override;
void finish();
@ -102,8 +102,7 @@ private:
* @see
* https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html#API_CreateMultipartUpload_RequestSyntax
*/
std::string createMultipartUpload(
std::string_view key, std::string_view mimeType, std::optional<std::string_view> contentEncoding);
std::string createMultipartUpload(std::string_view key, std::string_view mimeType, std::optional<Headers> headers);
/**
* Uploads a single part of a multipart upload
@ -134,18 +133,19 @@ private:
void S3BinaryCacheStore::upsertFile(
const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint)
{
auto doUpload = [&](RestartableSource & src, uint64_t size, std::optional<std::string_view> encoding) {
auto doUpload = [&](RestartableSource & src, uint64_t size, std::optional<Headers> headers) {
if (s3Config->multipartUpload && size > s3Config->multipartThreshold) {
uploadMultipart(path, src, size, mimeType, encoding);
uploadMultipart(path, src, size, mimeType, std::move(headers));
} else {
upload(path, src, size, mimeType, encoding);
upload(path, src, size, mimeType, std::move(headers));
}
};
try {
if (auto compressionMethod = getCompressionMethod(path)) {
CompressedSource compressed(source, *compressionMethod);
doUpload(compressed, compressed.size(), compressed.getCompressionMethod());
Headers headers = {{"Content-Encoding", *compressionMethod}};
doUpload(compressed, compressed.size(), std::move(headers));
} else {
doUpload(source, sizeHint, std::nullopt);
}
@ -161,7 +161,7 @@ void S3BinaryCacheStore::upload(
RestartableSource & source,
uint64_t sizeHint,
std::string_view mimeType,
std::optional<std::string_view> contentEncoding)
std::optional<Headers> headers)
{
debug("using S3 regular upload for '%s' (%d bytes)", path, sizeHint);
if (sizeHint > AWS_MAX_PART_SIZE)
@ -170,7 +170,7 @@ void S3BinaryCacheStore::upload(
renderSize(sizeHint),
renderSize(AWS_MAX_PART_SIZE));
HttpBinaryCacheStore::upload(path, source, sizeHint, mimeType, contentEncoding);
HttpBinaryCacheStore::upload(path, source, sizeHint, mimeType, std::move(headers));
}
void S3BinaryCacheStore::uploadMultipart(
@ -178,10 +178,10 @@ void S3BinaryCacheStore::uploadMultipart(
RestartableSource & source,
uint64_t sizeHint,
std::string_view mimeType,
std::optional<std::string_view> contentEncoding)
std::optional<Headers> headers)
{
debug("using S3 multipart upload for '%s' (%d bytes)", path, sizeHint);
MultipartSink sink(*this, path, sizeHint, mimeType, contentEncoding);
MultipartSink sink(*this, path, sizeHint, mimeType, std::move(headers));
source.drainInto(sink);
sink.finish();
}
@ -191,7 +191,7 @@ S3BinaryCacheStore::MultipartSink::MultipartSink(
std::string_view path,
uint64_t sizeHint,
std::string_view mimeType,
std::optional<std::string_view> contentEncoding)
std::optional<Headers> headers)
: store(store)
, path(path)
{
@ -227,7 +227,7 @@ S3BinaryCacheStore::MultipartSink::MultipartSink(
buffer.reserve(chunkSize);
partEtags.reserve(estimatedParts);
uploadId = store.createMultipartUpload(path, mimeType, contentEncoding);
uploadId = store.createMultipartUpload(path, mimeType, std::move(headers));
}
void S3BinaryCacheStore::MultipartSink::operator()(std::string_view data)
@ -279,7 +279,7 @@ void S3BinaryCacheStore::MultipartSink::uploadChunk(std::string chunk)
}
std::string S3BinaryCacheStore::createMultipartUpload(
std::string_view key, std::string_view mimeType, std::optional<std::string_view> contentEncoding)
std::string_view key, std::string_view mimeType, std::optional<Headers> headers)
{
auto req = makeRequest(key);
@ -296,8 +296,9 @@ std::string S3BinaryCacheStore::createMultipartUpload(
req.data = {payload};
req.mimeType = mimeType;
if (contentEncoding) {
req.headers.emplace_back("Content-Encoding", *contentEncoding);
if (headers) {
req.headers.reserve(req.headers.size() + headers->size());
std::move(headers->begin(), headers->end(), std::back_inserter(req.headers));
}
auto result = getFileTransfer()->enqueueFileTransfer(req).get();