diff --git a/doc/manual/rl-next/s3-storage-class.md b/doc/manual/rl-next/s3-storage-class.md new file mode 100644 index 000000000..d742b5747 --- /dev/null +++ b/doc/manual/rl-next/s3-storage-class.md @@ -0,0 +1,21 @@ +--- +synopsis: "S3 binary cache stores now support storage class configuration" +prs: [14464] +issues: [7015] +--- + +S3 binary cache stores now support configuring the storage class for uploaded objects via the `storage-class` parameter. This allows users to optimize costs by selecting appropriate storage tiers based on access patterns. + +Example usage: + +```bash +# Use Glacier storage for long-term archival +nix copy --to 's3://my-bucket?storage-class=GLACIER' /nix/store/... + +# Use Intelligent Tiering for automatic cost optimization +nix copy --to 's3://my-bucket?storage-class=INTELLIGENT_TIERING' /nix/store/... +``` + +The storage class applies to both regular uploads and multipart uploads. When not specified, objects use the bucket's default storage class. + +See the [S3 storage classes documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) for available storage classes and their characteristics. diff --git a/src/libstore-tests/s3-binary-cache-store.cc b/src/libstore-tests/s3-binary-cache-store.cc index f01759771..4a38088bc 100644 --- a/src/libstore-tests/s3-binary-cache-store.cc +++ b/src/libstore-tests/s3-binary-cache-store.cc @@ -122,4 +122,22 @@ TEST(S3BinaryCacheStore, parameterFiltering) EXPECT_EQ(ref.params["priority"], "10"); } +/** + * Test storage class configuration + */ +TEST(S3BinaryCacheStore, storageClassDefault) +{ + S3BinaryCacheStoreConfig config{"s3", "test-bucket", {}}; + EXPECT_EQ(config.storageClass.get(), ""); +} + +TEST(S3BinaryCacheStore, storageClassConfiguration) +{ + StringMap params; + params["storage-class"] = "GLACIER"; + + S3BinaryCacheStoreConfig config("s3", "test-bucket", params); + EXPECT_EQ(config.storageClass.get(), "GLACIER"); +} + } // namespace nix diff --git a/src/libstore/include/nix/store/s3-binary-cache-store.hh b/src/libstore/include/nix/store/s3-binary-cache-store.hh index bf86d0671..688b06aa5 100644 --- a/src/libstore/include/nix/store/s3-binary-cache-store.hh +++ b/src/libstore/include/nix/store/s3-binary-cache-store.hh @@ -93,6 +93,26 @@ struct S3BinaryCacheStoreConfig : HttpBinaryCacheStoreConfig Default is 100 MiB. Only takes effect when multipart-upload is enabled. )"}; + const Setting storageClass{ + this, + "", + "storage-class", + R"( + The S3 storage class to use for uploaded objects. When empty (default), + uses the bucket's default storage class. Valid values include: + - STANDARD (default, frequently accessed data) + - REDUCED_REDUNDANCY (less frequently accessed data) + - STANDARD_IA (infrequent access) + - ONEZONE_IA (infrequent access, single AZ) + - INTELLIGENT_TIERING (automatic cost optimization) + - GLACIER (archival with retrieval times in minutes to hours) + - DEEP_ARCHIVE (long-term archival with 12-hour retrieval) + - GLACIER_IR (instant retrieval archival) + + See AWS S3 documentation for detailed storage class descriptions and pricing: + https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html + )"}; + /** * Set of settings that are part of the S3 URI itself. * These are needed for region specification and other S3-specific settings. diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 37264dfae..93cef2dfc 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -170,7 +170,19 @@ void S3BinaryCacheStore::upload( renderSize(sizeHint), renderSize(AWS_MAX_PART_SIZE)); - HttpBinaryCacheStore::upload(path, source, sizeHint, mimeType, contentEncoding); + auto req = makeRequest(path); + req.method = HttpMethod::PUT; + + if (contentEncoding) { + req.headers.emplace_back("Content-Encoding", *contentEncoding); + } + if (std::string_view storageClass = s3Config->storageClass.get(); !storageClass.empty()) { + req.headers.emplace_back("x-amz-storage-class", storageClass); + } + + req.data = {sizeHint, source}; + req.mimeType = mimeType; + getFileTransfer()->upload(req); } void S3BinaryCacheStore::uploadMultipart( @@ -299,6 +311,9 @@ std::string S3BinaryCacheStore::createMultipartUpload( if (contentEncoding) { req.headers.emplace_back("Content-Encoding", *contentEncoding); } + if (std::string_view storageClass = s3Config->storageClass.get(); !storageClass.empty()) { + req.headers.emplace_back("x-amz-storage-class", storageClass); + } auto result = getFileTransfer()->enqueueFileTransfer(req).get();