mirror of
https://github.com/NixOS/nix.git
synced 2025-11-11 13:06:01 +01:00
Merge pull request #11691 from NixOS/mergify/bp/2.24-maintenance/pr-11677
builtins.fetchurl: Fix segfault on s3:// URLs (backport #11677)
This commit is contained in:
commit
31df105f45
6 changed files with 77 additions and 1 deletions
|
|
@ -90,6 +90,7 @@ DownloadFileResult downloadFile(
|
||||||
/* Cache metadata for all URLs in the redirect chain. */
|
/* Cache metadata for all URLs in the redirect chain. */
|
||||||
for (auto & url : res.urls) {
|
for (auto & url : res.urls) {
|
||||||
key.second.insert_or_assign("url", url);
|
key.second.insert_or_assign("url", url);
|
||||||
|
assert(!res.urls.empty());
|
||||||
infoAttrs.insert_or_assign("url", *res.urls.rbegin());
|
infoAttrs.insert_or_assign("url", *res.urls.rbegin());
|
||||||
getCache()->upsert(key, *store, infoAttrs, *storePath);
|
getCache()->upsert(key, *store, infoAttrs, *storePath);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -754,12 +754,17 @@ struct curlFileTransfer : public FileTransfer
|
||||||
|
|
||||||
S3Helper s3Helper(profile, region, scheme, endpoint);
|
S3Helper s3Helper(profile, region, scheme, endpoint);
|
||||||
|
|
||||||
|
Activity act(*logger, lvlTalkative, actFileTransfer,
|
||||||
|
fmt("downloading '%s'", request.uri),
|
||||||
|
{request.uri}, request.parentAct);
|
||||||
|
|
||||||
// FIXME: implement ETag
|
// FIXME: implement ETag
|
||||||
auto s3Res = s3Helper.getObject(bucketName, key);
|
auto s3Res = s3Helper.getObject(bucketName, key);
|
||||||
FileTransferResult res;
|
FileTransferResult res;
|
||||||
if (!s3Res.data)
|
if (!s3Res.data)
|
||||||
throw FileTransferError(NotFound, "S3 object '%s' does not exist", request.uri);
|
throw FileTransferError(NotFound, "S3 object '%s' does not exist", request.uri);
|
||||||
res.data = std::move(*s3Res.data);
|
res.data = std::move(*s3Res.data);
|
||||||
|
res.urls.push_back(request.uri);
|
||||||
callback(std::move(res));
|
callback(std::move(res));
|
||||||
#else
|
#else
|
||||||
throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri);
|
throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri);
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@
|
||||||
#include "globals.hh"
|
#include "globals.hh"
|
||||||
#include "compression.hh"
|
#include "compression.hh"
|
||||||
#include "filetransfer.hh"
|
#include "filetransfer.hh"
|
||||||
|
#include "signals.hh"
|
||||||
|
|
||||||
#include <aws/core/Aws.h>
|
#include <aws/core/Aws.h>
|
||||||
#include <aws/core/VersionConfig.h>
|
#include <aws/core/VersionConfig.h>
|
||||||
|
|
@ -117,6 +118,7 @@ class RetryStrategy : public Aws::Client::DefaultRetryStrategy
|
||||||
{
|
{
|
||||||
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override
|
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override
|
||||||
{
|
{
|
||||||
|
checkInterrupt();
|
||||||
auto retry = Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries);
|
auto retry = Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries);
|
||||||
if (retry)
|
if (retry)
|
||||||
printError("AWS error '%s' (%s), will retry in %d ms",
|
printError("AWS error '%s' (%s), will retry in %d ms",
|
||||||
|
|
|
||||||
|
|
@ -148,4 +148,6 @@ in
|
||||||
user-sandboxing = runNixOSTestFor "x86_64-linux" ./user-sandboxing;
|
user-sandboxing = runNixOSTestFor "x86_64-linux" ./user-sandboxing;
|
||||||
|
|
||||||
fetchurl = runNixOSTestFor "x86_64-linux" ./fetchurl.nix;
|
fetchurl = runNixOSTestFor "x86_64-linux" ./fetchurl.nix;
|
||||||
|
|
||||||
|
s3-binary-cache-store = runNixOSTestFor "x86_64-linux" ./s3-binary-cache-store.nix;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# Test ‘nix-copy-closure’.
|
# Test ‘nix-copy-closure’.
|
||||||
|
|
||||||
{ lib, config, nixpkgs, hostPkgs, ... }:
|
{ lib, config, nixpkgs, ... }:
|
||||||
|
|
||||||
let
|
let
|
||||||
pkgs = config.nodes.client.nixpkgs.pkgs;
|
pkgs = config.nodes.client.nixpkgs.pkgs;
|
||||||
|
|
|
||||||
66
tests/nixos/s3-binary-cache-store.nix
Normal file
66
tests/nixos/s3-binary-cache-store.nix
Normal file
|
|
@ -0,0 +1,66 @@
|
||||||
|
{ lib, config, nixpkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
pkgs = config.nodes.client.nixpkgs.pkgs;
|
||||||
|
|
||||||
|
pkgA = pkgs.cowsay;
|
||||||
|
|
||||||
|
accessKey = "BKIKJAA5BMMU2RHO6IBB";
|
||||||
|
secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
|
||||||
|
env = "AWS_ACCESS_KEY_ID=${accessKey} AWS_SECRET_ACCESS_KEY=${secretKey}";
|
||||||
|
|
||||||
|
storeUrl = "s3://my-cache?endpoint=http://server:9000®ion=eu-west-1";
|
||||||
|
|
||||||
|
in {
|
||||||
|
name = "nix-copy-closure";
|
||||||
|
|
||||||
|
nodes =
|
||||||
|
{ server =
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
{ virtualisation.writableStore = true;
|
||||||
|
virtualisation.additionalPaths = [ pkgA ];
|
||||||
|
environment.systemPackages = [ pkgs.minio-client ];
|
||||||
|
nix.extraOptions = "experimental-features = nix-command";
|
||||||
|
services.minio = {
|
||||||
|
enable = true;
|
||||||
|
region = "eu-west-1";
|
||||||
|
rootCredentialsFile = pkgs.writeText "minio-credentials-full" ''
|
||||||
|
MINIO_ROOT_USER=${accessKey}
|
||||||
|
MINIO_ROOT_PASSWORD=${secretKey}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
networking.firewall.allowedTCPPorts = [ 9000 ];
|
||||||
|
};
|
||||||
|
|
||||||
|
client =
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{ virtualisation.writableStore = true;
|
||||||
|
nix.extraOptions = "experimental-features = nix-command";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = { nodes }: ''
|
||||||
|
# fmt: off
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
# Create a binary cache.
|
||||||
|
server.wait_for_unit("minio")
|
||||||
|
|
||||||
|
server.succeed("mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4")
|
||||||
|
server.succeed("mc mb minio/my-cache")
|
||||||
|
|
||||||
|
server.succeed("${env} nix copy --to '${storeUrl}' ${pkgA}")
|
||||||
|
|
||||||
|
# Test fetchurl on s3:// URLs while we're at it.
|
||||||
|
client.succeed("${env} nix eval --impure --expr 'builtins.fetchurl { name = \"foo\"; url = \"s3://my-cache/nix-cache-info?endpoint=http://server:9000®ion=eu-west-1\"; }'")
|
||||||
|
|
||||||
|
# Copy a package from the binary cache.
|
||||||
|
client.fail("nix path-info ${pkgA}")
|
||||||
|
|
||||||
|
client.succeed("${env} nix store info --store '${storeUrl}' >&2")
|
||||||
|
|
||||||
|
client.succeed("${env} nix copy --no-check-sigs --from '${storeUrl}' ${pkgA}")
|
||||||
|
|
||||||
|
client.succeed("nix path-info ${pkgA}")
|
||||||
|
'';
|
||||||
|
}
|
||||||
Loading…
Add table
Add a link
Reference in a new issue