1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-18 00:12:43 +01:00

Merge remote-tracking branch 'upstream/master' into overlayfs-store

This commit is contained in:
John Ericson 2023-10-25 14:23:20 -04:00
commit 8434f23c97
29 changed files with 479 additions and 338 deletions

View file

@ -386,27 +386,27 @@ void LocalDerivationGoal::cleanupPostOutputsRegisteredModeNonCheck()
cleanupPostOutputsRegisteredModeCheck();
}
#if __linux__
static void linkOrCopy(const Path & from, const Path & to)
{
if (link(from.c_str(), to.c_str()) == -1) {
/* Hard-linking fails if we exceed the maximum link count on a
file (e.g. 32000 of ext3), which is quite possible after a
'nix-store --optimise'. FIXME: actually, why don't we just
bind-mount in this case?
It can also fail with EPERM in BeegFS v7 and earlier versions
or fail with EXDEV in OpenAFS
which don't allow hard-links to other directories */
if (errno != EMLINK && errno != EPERM && errno != EXDEV)
throw SysError("linking '%s' to '%s'", to, from);
copyPath(from, to);
static void doBind(const Path & source, const Path & target, bool optional = false) {
debug("bind mounting '%1%' to '%2%'", source, target);
struct stat st;
if (stat(source.c_str(), &st) == -1) {
if (optional && errno == ENOENT)
return;
else
throw SysError("getting attributes of path '%1%'", source);
}
}
if (S_ISDIR(st.st_mode))
createDirs(target);
else {
createDirs(dirOf(target));
writeFile(target, "");
}
if (mount(source.c_str(), target.c_str(), "", MS_BIND | MS_REC, 0) == -1)
throw SysError("bind mount from '%1%' to '%2%' failed", source, target);
};
#endif
void LocalDerivationGoal::startBuilder()
{
if ((buildUser && buildUser->getUIDCount() != 1)
@ -581,7 +581,7 @@ void LocalDerivationGoal::startBuilder()
/* Allow a user-configurable set of directories from the
host file system. */
dirsInChroot.clear();
pathsInChroot.clear();
for (auto i : settings.sandboxPaths.get()) {
if (i.empty()) continue;
@ -592,19 +592,19 @@ void LocalDerivationGoal::startBuilder()
}
size_t p = i.find('=');
if (p == std::string::npos)
dirsInChroot[i] = {i, optional};
pathsInChroot[i] = {i, optional};
else
dirsInChroot[i.substr(0, p)] = {i.substr(p + 1), optional};
pathsInChroot[i.substr(0, p)] = {i.substr(p + 1), optional};
}
if (hasPrefix(worker.store.storeDir, tmpDirInSandbox))
{
throw Error("`sandbox-build-dir` must not contain the storeDir");
}
dirsInChroot[tmpDirInSandbox] = tmpDir;
pathsInChroot[tmpDirInSandbox] = tmpDir;
/* Add the closure of store paths to the chroot. */
StorePathSet closure;
for (auto & i : dirsInChroot)
for (auto & i : pathsInChroot)
try {
if (worker.store.isInStore(i.second.source))
worker.store.computeFSClosure(worker.store.toStorePath(i.second.source).first, closure);
@ -615,7 +615,7 @@ void LocalDerivationGoal::startBuilder()
}
for (auto & i : closure) {
auto p = worker.store.printStorePath(i);
dirsInChroot.insert_or_assign(p, p);
pathsInChroot.insert_or_assign(p, p);
}
PathSet allowedPaths = settings.allowedImpureHostPrefixes;
@ -643,7 +643,7 @@ void LocalDerivationGoal::startBuilder()
/* Allow files in __impureHostDeps to be missing; e.g.
macOS 11+ has no /usr/lib/libSystem*.dylib */
dirsInChroot[i] = {i, true};
pathsInChroot[i] = {i, true};
}
#if __linux__
@ -711,15 +711,12 @@ void LocalDerivationGoal::startBuilder()
for (auto & i : inputPaths) {
auto p = worker.store.printStorePath(i);
Path r = worker.store.toRealPath(p);
if (S_ISDIR(lstat(r).st_mode))
dirsInChroot.insert_or_assign(p, r);
else
linkOrCopy(r, chrootRootDir + p);
pathsInChroot.insert_or_assign(p, r);
}
/* If we're repairing, checking or rebuilding part of a
multiple-outputs derivation, it's possible that we're
rebuilding a path that is in settings.dirsInChroot
rebuilding a path that is in settings.sandbox-paths
(typically the dependencies of /bin/sh). Throw them
out. */
for (auto & i : drv->outputsAndOptPaths(worker.store)) {
@ -729,7 +726,7 @@ void LocalDerivationGoal::startBuilder()
is already in the sandbox, so we don't need to worry about
removing it. */
if (i.second.second)
dirsInChroot.erase(worker.store.printStorePath(*i.second.second));
pathsInChroot.erase(worker.store.printStorePath(*i.second.second));
}
if (cgroup) {
@ -787,9 +784,9 @@ void LocalDerivationGoal::startBuilder()
} else {
auto p = line.find('=');
if (p == std::string::npos)
dirsInChroot[line] = line;
pathsInChroot[line] = line;
else
dirsInChroot[line.substr(0, p)] = line.substr(p + 1);
pathsInChroot[line.substr(0, p)] = line.substr(p + 1);
}
}
}
@ -1565,41 +1562,32 @@ void LocalDerivationGoal::addDependency(const StorePath & path)
Path source = worker.store.Store::toRealPath(path);
Path target = chrootRootDir + worker.store.printStorePath(path);
debug("bind-mounting %s -> %s", target, source);
if (pathExists(target))
// There is a similar debug message in doBind, so only run it in this block to not have double messages.
debug("bind-mounting %s -> %s", target, source);
throw Error("store path '%s' already exists in the sandbox", worker.store.printStorePath(path));
auto st = lstat(source);
/* Bind-mount the path into the sandbox. This requires
entering its mount namespace, which is not possible
in multithreaded programs. So we do this in a
child process.*/
Pid child(startProcess([&]() {
if (S_ISDIR(st.st_mode)) {
if (usingUserNamespace && (setns(sandboxUserNamespace.get(), 0) == -1))
throw SysError("entering sandbox user namespace");
/* Bind-mount the path into the sandbox. This requires
entering its mount namespace, which is not possible
in multithreaded programs. So we do this in a
child process.*/
Pid child(startProcess([&]() {
if (setns(sandboxMountNamespace.get(), 0) == -1)
throw SysError("entering sandbox mount namespace");
if (usingUserNamespace && (setns(sandboxUserNamespace.get(), 0) == -1))
throw SysError("entering sandbox user namespace");
doBind(source, target);
if (setns(sandboxMountNamespace.get(), 0) == -1)
throw SysError("entering sandbox mount namespace");
_exit(0);
}));
createDirs(target);
if (mount(source.c_str(), target.c_str(), "", MS_BIND, 0) == -1)
throw SysError("bind mount from '%s' to '%s' failed", source, target);
_exit(0);
}));
int status = child.wait();
if (status != 0)
throw Error("could not add path '%s' to sandbox", worker.store.printStorePath(path));
} else
linkOrCopy(source, target);
int status = child.wait();
if (status != 0)
throw Error("could not add path '%s' to sandbox", worker.store.printStorePath(path));
#else
throw Error("don't know how to make path '%s' (produced by a recursive Nix call) appear in the sandbox",
@ -1789,7 +1777,7 @@ void LocalDerivationGoal::runChild()
/* Set up a nearly empty /dev, unless the user asked to
bind-mount the host /dev. */
Strings ss;
if (dirsInChroot.find("/dev") == dirsInChroot.end()) {
if (pathsInChroot.find("/dev") == pathsInChroot.end()) {
createDirs(chrootRootDir + "/dev/shm");
createDirs(chrootRootDir + "/dev/pts");
ss.push_back("/dev/full");
@ -1824,34 +1812,15 @@ void LocalDerivationGoal::runChild()
ss.push_back(path);
if (settings.caFile != "")
dirsInChroot.try_emplace("/etc/ssl/certs/ca-certificates.crt", settings.caFile, true);
pathsInChroot.try_emplace("/etc/ssl/certs/ca-certificates.crt", settings.caFile, true);
}
for (auto & i : ss) dirsInChroot.emplace(i, i);
for (auto & i : ss) pathsInChroot.emplace(i, i);
/* Bind-mount all the directories from the "host"
filesystem that we want in the chroot
environment. */
auto doBind = [&](const Path & source, const Path & target, bool optional = false) {
debug("bind mounting '%1%' to '%2%'", source, target);
struct stat st;
if (stat(source.c_str(), &st) == -1) {
if (optional && errno == ENOENT)
return;
else
throw SysError("getting attributes of path '%1%'", source);
}
if (S_ISDIR(st.st_mode))
createDirs(target);
else {
createDirs(dirOf(target));
writeFile(target, "");
}
if (mount(source.c_str(), target.c_str(), "", MS_BIND | MS_REC, 0) == -1)
throw SysError("bind mount from '%1%' to '%2%' failed", source, target);
};
for (auto & i : dirsInChroot) {
for (auto & i : pathsInChroot) {
if (i.second.source == "/proc") continue; // backwards compatibility
#if HAVE_EMBEDDED_SANDBOX_SHELL
@ -1892,7 +1861,7 @@ void LocalDerivationGoal::runChild()
if /dev/ptx/ptmx exists). */
if (pathExists("/dev/pts/ptmx") &&
!pathExists(chrootRootDir + "/dev/ptmx")
&& !dirsInChroot.count("/dev/pts"))
&& !pathsInChroot.count("/dev/pts"))
{
if (mount("none", (chrootRootDir + "/dev/pts").c_str(), "devpts", 0, "newinstance,mode=0620") == 0)
{
@ -2027,7 +1996,7 @@ void LocalDerivationGoal::runChild()
/* We build the ancestry before adding all inputPaths to the store because we know they'll
all have the same parents (the store), and there might be lots of inputs. This isn't
particularly efficient... I doubt it'll be a bottleneck in practice */
for (auto & i : dirsInChroot) {
for (auto & i : pathsInChroot) {
Path cur = i.first;
while (cur.compare("/") != 0) {
cur = dirOf(cur);
@ -2035,7 +2004,7 @@ void LocalDerivationGoal::runChild()
}
}
/* And we want the store in there regardless of how empty dirsInChroot. We include the innermost
/* And we want the store in there regardless of how empty pathsInChroot. We include the innermost
path component this time, since it's typically /nix/store and we care about that. */
Path cur = worker.store.storeDir;
while (cur.compare("/") != 0) {
@ -2046,7 +2015,7 @@ void LocalDerivationGoal::runChild()
/* Add all our input paths to the chroot */
for (auto & i : inputPaths) {
auto p = worker.store.printStorePath(i);
dirsInChroot[p] = p;
pathsInChroot[p] = p;
}
/* Violations will go to the syslog if you set this. Unfortunately the destination does not appear to be configurable */
@ -2077,7 +2046,7 @@ void LocalDerivationGoal::runChild()
without file-write* allowed, access() incorrectly returns EPERM
*/
sandboxProfile += "(allow file-read* file-write* process-exec\n";
for (auto & i : dirsInChroot) {
for (auto & i : pathsInChroot) {
if (i.first != i.second.source)
throw Error(
"can't map '%1%' to '%2%': mismatched impure paths not supported on Darwin",

View file

@ -86,8 +86,8 @@ struct LocalDerivationGoal : public DerivationGoal
: source(source), optional(optional)
{ }
};
typedef map<Path, ChrootPath> DirsInChroot; // maps target path to source path
DirsInChroot dirsInChroot;
typedef map<Path, ChrootPath> PathsInChroot; // maps target path to source path
PathsInChroot pathsInChroot;
typedef map<std::string, std::string> Environment;
Environment env;

View file

@ -174,15 +174,19 @@ void builtinBuildenv(const BasicDerivation & drv)
/* Convert the stuff we get from the environment back into a
* coherent data type. */
Packages pkgs;
auto derivations = tokenizeString<Strings>(getAttr("derivations"));
while (!derivations.empty()) {
/* !!! We're trusting the caller to structure derivations env var correctly */
auto active = derivations.front(); derivations.pop_front();
auto priority = stoi(derivations.front()); derivations.pop_front();
auto outputs = stoi(derivations.front()); derivations.pop_front();
for (auto n = 0; n < outputs; n++) {
auto path = derivations.front(); derivations.pop_front();
pkgs.emplace_back(path, active != "false", priority);
{
auto derivations = tokenizeString<Strings>(getAttr("derivations"));
auto itemIt = derivations.begin();
while (itemIt != derivations.end()) {
/* !!! We're trusting the caller to structure derivations env var correctly */
const bool active = "false" != *itemIt++;
const int priority = stoi(*itemIt++);
const size_t outputs = stoul(*itemIt++);
for (size_t n {0}; n < outputs; n++) {
pkgs.emplace_back(std::move(*itemIt++), active, priority);
}
}
}

View file

@ -29,12 +29,13 @@ std::string ContentAddressMethod::renderPrefix() const
ContentAddressMethod ContentAddressMethod::parsePrefix(std::string_view & m)
{
ContentAddressMethod method = FileIngestionMethod::Flat;
if (splitPrefix(m, "r:"))
method = FileIngestionMethod::Recursive;
else if (splitPrefix(m, "text:"))
method = TextIngestionMethod {};
return method;
if (splitPrefix(m, "r:")) {
return FileIngestionMethod::Recursive;
}
else if (splitPrefix(m, "text:")) {
return TextIngestionMethod {};
}
return FileIngestionMethod::Flat;
}
std::string ContentAddressMethod::render(HashType ht) const

View file

@ -716,7 +716,7 @@ public:
- `apple-virt`
Included on darwin if virtualization is available.
Included on Darwin if virtualization is available.
- `kvm`

View file

@ -7,6 +7,31 @@
namespace nix {
#if __linux__
static std::vector<gid_t> get_group_list(const char *username, gid_t group_id)
{
std::vector<gid_t> gids;
gids.resize(32); // Initial guess
auto getgroupl_failed {[&] {
int ngroups = gids.size();
int err = getgrouplist(username, group_id, gids.data(), &ngroups);
gids.resize(ngroups);
return err == -1;
}};
// The first error means that the vector was not big enough.
// If it happens again, there is some different problem.
if (getgroupl_failed() && getgroupl_failed()) {
throw SysError("failed to get list of supplementary groups for '%s'", username);
}
return gids;
}
#endif
struct SimpleUserLock : UserLock
{
AutoCloseFD fdUserLock;
@ -67,37 +92,14 @@ struct SimpleUserLock : UserLock
throw Error("the Nix user should not be a member of '%s'", settings.buildUsersGroup);
#if __linux__
/* Get the list of supplementary groups of this build
user. This is usually either empty or contains a
group such as "kvm". */
int ngroups = 32; // arbitrary initial guess
std::vector<gid_t> gids;
gids.resize(ngroups);
int err = getgrouplist(
pw->pw_name, pw->pw_gid,
gids.data(),
&ngroups);
/* Our initial size of 32 wasn't sufficient, the
correct size has been stored in ngroups, so we try
again. */
if (err == -1) {
gids.resize(ngroups);
err = getgrouplist(
pw->pw_name, pw->pw_gid,
gids.data(),
&ngroups);
}
// If it failed once more, then something must be broken.
if (err == -1)
throw Error("failed to get list of supplementary groups for '%s'", pw->pw_name);
/* Get the list of supplementary groups of this user. This is
* usually either empty or contains a group such as "kvm". */
// Finally, trim back the GID list to its real size.
for (auto i = 0; i < ngroups; i++)
if (gids[i] != lock->gid)
lock->supplementaryGIDs.push_back(gids[i]);
for (auto gid : get_group_list(pw->pw_name, pw->pw_gid)) {
if (gid != lock->gid)
lock->supplementaryGIDs.push_back(gid);
}
#endif
return lock;

View file

@ -2,7 +2,103 @@ R"(
**Store URL format**: `s3://`*bucket-name*
This store allows reading and writing a binary cache stored in an AWS
S3 bucket.
This store allows reading and writing a binary cache stored in an AWS S3 (or S3-compatible service) bucket.
This store shares many idioms with the [HTTP Binary Cache Store](#http-binary-cache-store).
For AWS S3, the binary cache URL for a bucket named `example-nix-cache` will be exactly <s3://example-nix-cache>.
For S3 compatible binary caches, consult that cache's documentation.
### Anonymous reads to your S3-compatible binary cache
> If your binary cache is publicly accessible and does not require authentication,
> it is simplest to use the [HTTP Binary Cache Store] rather than S3 Binary Cache Store with
> <https://example-nix-cache.s3.amazonaws.com> instead of <s3://example-nix-cache>.
Your bucket will need a
[bucket policy](https://docs.aws.amazon.com/AmazonS3/v1/userguide/bucket-policies.html)
like the following to be accessible:
```json
{
"Id": "DirectReads",
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowDirectReads",
"Action": [
"s3:GetObject",
"s3:GetBucketLocation"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::example-nix-cache",
"arn:aws:s3:::example-nix-cache/*"
],
"Principal": "*"
}
]
}
```
### Authentication
Nix will use the
[default credential provider chain](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html)
for authenticating requests to Amazon S3.
Note that this means Nix will read environment variables and files with different idioms than with Nix's own settings, as implemented by the AWS SDK.
Consult the documentation linked above for further details.
### Authenticated reads to your S3 binary cache
Your bucket will need a bucket policy allowing the desired users to perform the `s3:GetObject` and `s3:GetBucketLocation` action on all objects in the bucket.
The [anonymous policy given above](#anonymous-reads-to-your-s3-compatible-binary-cache) can be updated to have a restricted `Principal` to support this.
### Authenticated writes to your S3-compatible binary cache
Your account will need an IAM policy to support uploading to the bucket:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "UploadToCache",
"Effect": "Allow",
"Action": [
"s3:AbortMultipartUpload",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
"s3:ListMultipartUploadParts",
"s3:PutObject"
],
"Resource": [
"arn:aws:s3:::example-nix-cache",
"arn:aws:s3:::example-nix-cache/*"
]
}
]
}
```
### Examples
With bucket policies and authentication set up as described above, uploading works via [`nix copy`](@docroot@/command-ref/new-cli/nix3-copy.md) (experimental).
- To upload with a specific credential profile for Amazon S3:
```console
$ nix copy nixpkgs.hello \
--to 's3://example-nix-cache?profile=cache-upload&region=eu-west-2'
```
- To upload to an S3-compatible binary cache:
```console
$ nix copy nixpkgs.hello --to \
's3://example-nix-cache?profile=cache-upload&scheme=https&endpoint=minio.example.com'
```
)"