mirror of
https://github.com/NixOS/nix.git
synced 2025-11-08 19:46:02 +01:00
Merge pull request #14120 from lovesegfault/http-binary-cache-compression
feat(libstore/http-binary-cache-store): narinfo/ls/log compression
This commit is contained in:
commit
bc66e131f8
6 changed files with 246 additions and 76 deletions
19
doc/manual/rl-next/http-binary-cache-compression.md
Normal file
19
doc/manual/rl-next/http-binary-cache-compression.md
Normal file
|
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
synopsis: "HTTP binary caches now support transparent compression for metadata"
|
||||||
|
prs: []
|
||||||
|
---
|
||||||
|
|
||||||
|
HTTP binary cache stores can now compress `.narinfo`, `.ls`, and build log files before uploading them,
|
||||||
|
reducing bandwidth usage and storage requirements. The compression is applied transparently using the
|
||||||
|
`Content-Encoding` header, allowing compatible clients to automatically decompress the files.
|
||||||
|
|
||||||
|
Three new configuration options control this behavior:
|
||||||
|
- `narinfo-compression`: Compression method for `.narinfo` files
|
||||||
|
- `ls-compression`: Compression method for `.ls` files
|
||||||
|
- `log-compression`: Compression method for build logs in `log/` directory
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
```
|
||||||
|
nix copy --to 'http://cache.example.com?narinfo-compression=gzip&ls-compression=gzip' /nix/store/...
|
||||||
|
nix store copy-log --to 'http://cache.example.com?log-compression=br' /nix/store/...
|
||||||
|
```
|
||||||
|
|
@ -4,6 +4,7 @@
|
||||||
#include "nix/store/nar-info-disk-cache.hh"
|
#include "nix/store/nar-info-disk-cache.hh"
|
||||||
#include "nix/util/callback.hh"
|
#include "nix/util/callback.hh"
|
||||||
#include "nix/store/store-registration.hh"
|
#include "nix/store/store-registration.hh"
|
||||||
|
#include "nix/util/compression.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
|
@ -142,8 +143,27 @@ protected:
|
||||||
const std::string & mimeType) override
|
const std::string & mimeType) override
|
||||||
{
|
{
|
||||||
auto req = makeRequest(path);
|
auto req = makeRequest(path);
|
||||||
req.data = StreamToSourceAdapter(istream).drain();
|
|
||||||
|
auto data = StreamToSourceAdapter(istream).drain();
|
||||||
|
|
||||||
|
// Determine compression method based on file type
|
||||||
|
std::string compressionMethod;
|
||||||
|
if (hasSuffix(path, ".narinfo"))
|
||||||
|
compressionMethod = config->narinfoCompression;
|
||||||
|
else if (hasSuffix(path, ".ls"))
|
||||||
|
compressionMethod = config->lsCompression;
|
||||||
|
else if (hasPrefix(path, "log/"))
|
||||||
|
compressionMethod = config->logCompression;
|
||||||
|
|
||||||
|
// Apply compression if configured
|
||||||
|
if (!compressionMethod.empty()) {
|
||||||
|
data = compress(compressionMethod, data);
|
||||||
|
req.headers.emplace_back("Content-Encoding", compressionMethod);
|
||||||
|
}
|
||||||
|
|
||||||
|
req.data = std::move(data);
|
||||||
req.mimeType = mimeType;
|
req.mimeType = mimeType;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
getFileTransfer()->upload(req);
|
getFileTransfer()->upload(req);
|
||||||
} catch (FileTransferError & e) {
|
} catch (FileTransferError & e) {
|
||||||
|
|
|
||||||
|
|
@ -59,6 +59,21 @@ struct BinaryCacheStoreConfig : virtual StoreConfig
|
||||||
The meaning and accepted values depend on the compression method selected.
|
The meaning and accepted values depend on the compression method selected.
|
||||||
`-1` specifies that the default compression level should be used.
|
`-1` specifies that the default compression level should be used.
|
||||||
)"};
|
)"};
|
||||||
|
|
||||||
|
const Setting<std::string> narinfoCompression{
|
||||||
|
this, "", "narinfo-compression", "Compression method for `.narinfo` files."};
|
||||||
|
|
||||||
|
const Setting<std::string> lsCompression{this, "", "ls-compression", "Compression method for `.ls` files."};
|
||||||
|
|
||||||
|
const Setting<std::string> logCompression{
|
||||||
|
this,
|
||||||
|
"",
|
||||||
|
"log-compression",
|
||||||
|
R"(
|
||||||
|
Compression method for `log/*` files. It is recommended to
|
||||||
|
use a compression method supported by most web browsers
|
||||||
|
(e.g. `brotli`).
|
||||||
|
)"};
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
190
tests/nixos/content-encoding.nix
Normal file
190
tests/nixos/content-encoding.nix
Normal file
|
|
@ -0,0 +1,190 @@
|
||||||
|
# Test content encoding support in Nix:
|
||||||
|
# 1. Fetching compressed files from servers with Content-Encoding headers
|
||||||
|
# (e.g., fetching a zstd archive from a server using gzip Content-Encoding
|
||||||
|
# should preserve the zstd format, not double-decompress)
|
||||||
|
# 2. HTTP binary cache store upload/download with compression support
|
||||||
|
|
||||||
|
{ lib, config, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
pkgs = config.nodes.machine.nixpkgs.pkgs;
|
||||||
|
|
||||||
|
ztdCompressedFile = pkgs.stdenv.mkDerivation {
|
||||||
|
name = "dummy-zstd-compressed-archive";
|
||||||
|
dontUnpack = true;
|
||||||
|
nativeBuildInputs = with pkgs; [ zstd ];
|
||||||
|
buildPhase = ''
|
||||||
|
mkdir archive
|
||||||
|
for _ in {1..100}; do echo "lorem" > archive/file1; done
|
||||||
|
for _ in {1..100}; do echo "ipsum" > archive/file2; done
|
||||||
|
tar --zstd -cf archive.tar.zst archive
|
||||||
|
'';
|
||||||
|
installPhase = ''
|
||||||
|
install -Dm 644 -T archive.tar.zst $out/share/archive
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# Bare derivation for testing binary cache with logs
|
||||||
|
testDrv = builtins.toFile "test.nix" ''
|
||||||
|
derivation {
|
||||||
|
name = "test-package";
|
||||||
|
builder = "/bin/sh";
|
||||||
|
args = [ "-c" "echo 'Building test package...' >&2; echo 'hello from test package' > $out; echo 'Build complete!' >&2" ];
|
||||||
|
system = builtins.currentSystem;
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "content-encoding";
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
machine =
|
||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||||
|
|
||||||
|
services.nginx.enable = true;
|
||||||
|
services.nginx.virtualHosts."localhost" = {
|
||||||
|
root = "${ztdCompressedFile}/share/";
|
||||||
|
# Make sure that nginx really tries to compress the
|
||||||
|
# file on the fly with no regard to size/mime.
|
||||||
|
# http://nginx.org/en/docs/http/ngx_http_gzip_module.html
|
||||||
|
extraConfig = ''
|
||||||
|
gzip on;
|
||||||
|
gzip_types *;
|
||||||
|
gzip_proxied any;
|
||||||
|
gzip_min_length 0;
|
||||||
|
'';
|
||||||
|
|
||||||
|
# Upload endpoint with WebDAV
|
||||||
|
locations."/cache-upload" = {
|
||||||
|
root = "/var/lib/nginx-cache";
|
||||||
|
extraConfig = ''
|
||||||
|
client_body_temp_path /var/lib/nginx-cache/tmp;
|
||||||
|
create_full_put_path on;
|
||||||
|
dav_methods PUT DELETE;
|
||||||
|
dav_access user:rw group:rw all:r;
|
||||||
|
|
||||||
|
# Don't try to compress already compressed files
|
||||||
|
gzip off;
|
||||||
|
|
||||||
|
# Rewrite to remove -upload suffix when writing files
|
||||||
|
rewrite ^/cache-upload/(.*)$ /cache/$1 break;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# Download endpoint with Content-Encoding headers
|
||||||
|
locations."/cache" = {
|
||||||
|
root = "/var/lib/nginx-cache";
|
||||||
|
extraConfig = ''
|
||||||
|
gzip off;
|
||||||
|
|
||||||
|
# Serve .narinfo files with gzip encoding
|
||||||
|
location ~ \.narinfo$ {
|
||||||
|
add_header Content-Encoding gzip;
|
||||||
|
default_type "text/x-nix-narinfo";
|
||||||
|
}
|
||||||
|
|
||||||
|
# Serve .ls files with gzip encoding
|
||||||
|
location ~ \.ls$ {
|
||||||
|
add_header Content-Encoding gzip;
|
||||||
|
default_type "application/json";
|
||||||
|
}
|
||||||
|
|
||||||
|
# Serve log files with brotli encoding
|
||||||
|
location ~ ^/cache/log/ {
|
||||||
|
add_header Content-Encoding br;
|
||||||
|
default_type "text/plain";
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.nginx = {
|
||||||
|
serviceConfig = {
|
||||||
|
StateDirectory = "nginx-cache";
|
||||||
|
StateDirectoryMode = "0755";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
environment.systemPackages = with pkgs; [
|
||||||
|
file
|
||||||
|
gzip
|
||||||
|
brotli
|
||||||
|
curl
|
||||||
|
];
|
||||||
|
|
||||||
|
virtualisation.writableStore = true;
|
||||||
|
nix.settings.substituters = lib.mkForce [ ];
|
||||||
|
nix.settings.experimental-features = [
|
||||||
|
"nix-command"
|
||||||
|
"flakes"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Check that when nix-prefetch-url is used with a zst tarball it does not get decompressed.
|
||||||
|
# Also test HTTP binary cache store with compression support.
|
||||||
|
testScript = ''
|
||||||
|
# fmt: off
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
machine.wait_for_unit("nginx.service")
|
||||||
|
|
||||||
|
# Original test: zstd archive with gzip content-encoding
|
||||||
|
# Make sure that the file is properly compressed as the test would be meaningless otherwise
|
||||||
|
curl_output = machine.succeed("curl --compressed -v http://localhost/archive 2>&1")
|
||||||
|
assert "content-encoding: gzip" in curl_output.lower(), f"Expected 'content-encoding: gzip' in curl output, but got: {curl_output}"
|
||||||
|
|
||||||
|
archive_path = machine.succeed("nix-prefetch-url http://localhost/archive --print-path | tail -n1").strip()
|
||||||
|
mime_type = machine.succeed(f"file --brief --mime-type {archive_path}").strip()
|
||||||
|
assert mime_type == "application/zstd", f"Expected archive to be 'application/zstd', but got: {mime_type}"
|
||||||
|
machine.succeed(f"tar --zstd -xf {archive_path}")
|
||||||
|
|
||||||
|
# Test HTTP binary cache store with compression
|
||||||
|
outPath = machine.succeed("""
|
||||||
|
nix build --store /var/lib/build-store -f ${testDrv} --print-out-paths --print-build-logs
|
||||||
|
""").strip()
|
||||||
|
|
||||||
|
drvPath = machine.succeed(f"""
|
||||||
|
nix path-info --store /var/lib/build-store --derivation {outPath}
|
||||||
|
""").strip()
|
||||||
|
|
||||||
|
# Upload to cache with compression (use cache-upload endpoint)
|
||||||
|
machine.succeed(f"""
|
||||||
|
nix copy --store /var/lib/build-store --to 'http://localhost/cache-upload?narinfo-compression=gzip&ls-compression=gzip&write-nar-listing=1' {outPath} -vvvvv 2>&1 | tail -100
|
||||||
|
""")
|
||||||
|
machine.succeed(f"""
|
||||||
|
nix store copy-log --store /var/lib/build-store --to 'http://localhost/cache-upload?log-compression=br' {drvPath} -vvvvv 2>&1 | tail -100
|
||||||
|
""")
|
||||||
|
|
||||||
|
# List cache contents
|
||||||
|
print(machine.succeed("find /var/lib/nginx-cache -type f"))
|
||||||
|
|
||||||
|
narinfoHash = outPath.split('/')[3].split('-')[0]
|
||||||
|
drvName = drvPath.split('/')[3]
|
||||||
|
|
||||||
|
# Verify compression
|
||||||
|
machine.succeed(f"gzip -t /var/lib/nginx-cache/cache/{narinfoHash}.narinfo")
|
||||||
|
machine.succeed(f"gzip -t /var/lib/nginx-cache/cache/{narinfoHash}.ls")
|
||||||
|
machine.succeed(f"brotli -t /var/lib/nginx-cache/cache/log/{drvName}")
|
||||||
|
|
||||||
|
# Check Content-Encoding headers on the download endpoint
|
||||||
|
narinfo_headers = machine.succeed(f"curl -I http://localhost/cache/{narinfoHash}.narinfo 2>&1")
|
||||||
|
assert "content-encoding: gzip" in narinfo_headers.lower(), f"Expected 'content-encoding: gzip' for .narinfo file, but headers were: {narinfo_headers}"
|
||||||
|
|
||||||
|
ls_headers = machine.succeed(f"curl -I http://localhost/cache/{narinfoHash}.ls 2>&1")
|
||||||
|
assert "content-encoding: gzip" in ls_headers.lower(), f"Expected 'content-encoding: gzip' for .ls file, but headers were: {ls_headers}"
|
||||||
|
|
||||||
|
log_headers = machine.succeed(f"curl -I http://localhost/cache/log/{drvName} 2>&1")
|
||||||
|
assert "content-encoding: br" in log_headers.lower(), f"Expected 'content-encoding: br' for log file, but headers were: {log_headers}"
|
||||||
|
|
||||||
|
# Test fetching from cache
|
||||||
|
machine.succeed(f"nix copy --from 'http://localhost/cache' --no-check-sigs {outPath}")
|
||||||
|
|
||||||
|
# Test log retrieval
|
||||||
|
log_output = machine.succeed(f"nix log --store 'http://localhost/cache' {drvPath} 2>&1")
|
||||||
|
assert "Building test package" in log_output, f"Expected 'Building test package' in log output, but got: {log_output}"
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
|
@ -187,7 +187,7 @@ in
|
||||||
|
|
||||||
ca-fd-leak = runNixOSTest ./ca-fd-leak;
|
ca-fd-leak = runNixOSTest ./ca-fd-leak;
|
||||||
|
|
||||||
gzip-content-encoding = runNixOSTest ./gzip-content-encoding.nix;
|
content-encoding = runNixOSTest ./content-encoding.nix;
|
||||||
|
|
||||||
functional_user = runNixOSTest ./functional/as-user.nix;
|
functional_user = runNixOSTest ./functional/as-user.nix;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,74 +0,0 @@
|
||||||
# Test that compressed files fetched from server with compressed responses
|
|
||||||
# do not get excessively decompressed.
|
|
||||||
# E.g. fetching a zstd compressed tarball from a server,
|
|
||||||
# which compresses the response with `Content-Encoding: gzip`.
|
|
||||||
# The expected result is that the fetched file is a zstd archive.
|
|
||||||
|
|
||||||
{ lib, config, ... }:
|
|
||||||
|
|
||||||
let
|
|
||||||
pkgs = config.nodes.machine.nixpkgs.pkgs;
|
|
||||||
|
|
||||||
ztdCompressedFile = pkgs.stdenv.mkDerivation {
|
|
||||||
name = "dummy-zstd-compressed-archive";
|
|
||||||
dontUnpack = true;
|
|
||||||
nativeBuildInputs = with pkgs; [ zstd ];
|
|
||||||
buildPhase = ''
|
|
||||||
mkdir archive
|
|
||||||
for _ in {1..100}; do echo "lorem" > archive/file1; done
|
|
||||||
for _ in {1..100}; do echo "ipsum" > archive/file2; done
|
|
||||||
tar --zstd -cf archive.tar.zst archive
|
|
||||||
'';
|
|
||||||
installPhase = ''
|
|
||||||
install -Dm 644 -T archive.tar.zst $out/share/archive
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
fileCmd = "${pkgs.file}/bin/file";
|
|
||||||
in
|
|
||||||
|
|
||||||
{
|
|
||||||
name = "gzip-content-encoding";
|
|
||||||
|
|
||||||
nodes = {
|
|
||||||
machine =
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
|
||||||
|
|
||||||
services.nginx.enable = true;
|
|
||||||
services.nginx.virtualHosts."localhost" = {
|
|
||||||
root = "${ztdCompressedFile}/share/";
|
|
||||||
# Make sure that nginx really tries to compress the
|
|
||||||
# file on the fly with no regard to size/mime.
|
|
||||||
# http://nginx.org/en/docs/http/ngx_http_gzip_module.html
|
|
||||||
extraConfig = ''
|
|
||||||
gzip on;
|
|
||||||
gzip_types *;
|
|
||||||
gzip_proxied any;
|
|
||||||
gzip_min_length 0;
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
virtualisation.writableStore = true;
|
|
||||||
virtualisation.additionalPaths = with pkgs; [ file ];
|
|
||||||
nix.settings.substituters = lib.mkForce [ ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Check that when nix-prefetch-url is used with a zst tarball it does not get decompressed.
|
|
||||||
testScript =
|
|
||||||
{ nodes }:
|
|
||||||
''
|
|
||||||
# fmt: off
|
|
||||||
start_all()
|
|
||||||
|
|
||||||
machine.wait_for_unit("nginx.service")
|
|
||||||
machine.succeed("""
|
|
||||||
# Make sure that the file is properly compressed as the test would be meaningless otherwise
|
|
||||||
curl --compressed -v http://localhost/archive |& tr -s ' ' |& grep --ignore-case 'content-encoding: gzip'
|
|
||||||
archive_path=$(nix-prefetch-url http://localhost/archive --print-path | tail -n1)
|
|
||||||
[[ $(${fileCmd} --brief --mime-type $archive_path) == "application/zstd" ]]
|
|
||||||
tar --zstd -xf $archive_path
|
|
||||||
""")
|
|
||||||
'';
|
|
||||||
}
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue