1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-15 15:02:42 +01:00

Merge pull request #14464 from lovesegfault/nix-s3-storage-class

feat(libstore): add S3 storage class support
This commit is contained in:
John Ericson 2025-11-10 22:54:12 +00:00 committed by GitHub
commit 533db37ebc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 65 additions and 2 deletions

View file

@ -122,4 +122,22 @@ TEST(S3BinaryCacheStore, parameterFiltering)
EXPECT_EQ(ref.params["priority"], "10");
}
/**
* Test storage class configuration
*/
TEST(S3BinaryCacheStore, storageClassDefault)
{
S3BinaryCacheStoreConfig config{"s3", "test-bucket", {}};
EXPECT_EQ(config.storageClass.get(), std::nullopt);
}
TEST(S3BinaryCacheStore, storageClassConfiguration)
{
StringMap params;
params["storage-class"] = "GLACIER";
S3BinaryCacheStoreConfig config("s3", "test-bucket", params);
EXPECT_EQ(config.storageClass.get(), std::optional<std::string>("GLACIER"));
}
} // namespace nix

View file

@ -93,6 +93,26 @@ struct S3BinaryCacheStoreConfig : HttpBinaryCacheStoreConfig
Default is 100 MiB. Only takes effect when multipart-upload is enabled.
)"};
const Setting<std::optional<std::string>> storageClass{
this,
std::nullopt,
"storage-class",
R"(
The S3 storage class to use for uploaded objects. When not set (default),
uses the bucket's default storage class. Valid values include:
- STANDARD (default, frequently accessed data)
- REDUCED_REDUNDANCY (less frequently accessed data)
- STANDARD_IA (infrequent access)
- ONEZONE_IA (infrequent access, single AZ)
- INTELLIGENT_TIERING (automatic cost optimization)
- GLACIER (archival with retrieval times in minutes to hours)
- DEEP_ARCHIVE (long-term archival with 12-hour retrieval)
- GLACIER_IR (instant retrieval archival)
See AWS S3 documentation for detailed storage class descriptions and pricing:
https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html
)"};
/**
* Set of settings that are part of the S3 URI itself.
* These are needed for region specification and other S3-specific settings.

View file

@ -134,10 +134,14 @@ void S3BinaryCacheStore::upsertFile(
const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint)
{
auto doUpload = [&](RestartableSource & src, uint64_t size, std::optional<Headers> headers) {
Headers uploadHeaders = headers.value_or(Headers());
if (auto storageClass = s3Config->storageClass.get()) {
uploadHeaders.emplace_back("x-amz-storage-class", *storageClass);
}
if (s3Config->multipartUpload && size > s3Config->multipartThreshold) {
uploadMultipart(path, src, size, mimeType, std::move(headers));
uploadMultipart(path, src, size, mimeType, std::move(uploadHeaders));
} else {
upload(path, src, size, mimeType, std::move(headers));
upload(path, src, size, mimeType, std::move(uploadHeaders));
}
};