1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-17 16:02:43 +01:00

Merge branch 'path-info' into ca-drv-exotic

This commit is contained in:
John Ericson 2023-01-06 10:56:22 -05:00
commit 989b8065b4
404 changed files with 14903 additions and 6731 deletions

View file

@ -9,7 +9,6 @@
#include "remote-fs-accessor.hh"
#include "nar-info-disk-cache.hh"
#include "nar-accessor.hh"
#include "json.hh"
#include "thread-pool.hh"
#include "callback.hh"
@ -193,19 +192,12 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
/* Optionally write a JSON file containing a listing of the
contents of the NAR. */
if (writeNARListing) {
std::ostringstream jsonOut;
nlohmann::json j = {
{"version", 1},
{"root", listNar(ref<FSAccessor>(narAccessor), "", true)},
};
{
JSONObject jsonRoot(jsonOut);
jsonRoot.attr("version", 1);
{
auto res = jsonRoot.placeholder("root");
listNar(res, ref<FSAccessor>(narAccessor), "", true);
}
}
upsertFile(std::string(info.path.hashPart()) + ".ls", jsonOut.str(), "application/json");
upsertFile(std::string(info.path.hashPart()) + ".ls", j.dump(), "application/json");
}
/* Optionally maintain an index of DWARF debug info files
@ -342,6 +334,17 @@ bool BinaryCacheStore::isValidPathUncached(const StorePath & storePath)
return fileExists(narInfoFileFor(storePath));
}
std::optional<StorePath> BinaryCacheStore::queryPathFromHashPart(const std::string & hashPart)
{
auto pseudoPath = StorePath(hashPart + "-" + MissingName);
try {
auto info = queryPathInfo(pseudoPath);
return info->path;
} catch (InvalidPath &) {
return std::nullopt;
}
}
void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
{
auto info = queryPathInfo(storePath).cast<const NarInfo>();
@ -354,7 +357,7 @@ void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
try {
getFile(info->url, *decompressor);
} catch (NoSuchBinaryCacheFile & e) {
throw SubstituteGone(e.info());
throw SubstituteGone(std::move(e.info()));
}
decompressor->finish();

View file

@ -95,8 +95,7 @@ public:
void queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
{ unsupported("queryPathFromHashPart"); }
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
void addToStore(const ValidPathInfo & info, Source & narSource,
RepairFlag repair, CheckSigsFlag checkSigs) override;

View file

@ -5,7 +5,7 @@
#include <string>
#include <chrono>
#include <optional>
namespace nix {
@ -78,6 +78,9 @@ struct BuildResult
was repeated). */
time_t startTime = 0, stopTime = 0;
/* User and system CPU time the build took. */
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
bool success()
{
return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid;

View file

@ -7,7 +7,6 @@
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
#include "json.hh"
#include "compression.hh"
#include "worker-protocol.hh"
#include "topo-sort.hh"
@ -40,7 +39,6 @@
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/ip.h>
#include <sys/personality.h>
#include <sys/mman.h>
#include <sched.h>
#include <sys/param.h>
@ -135,7 +133,7 @@ void DerivationGoal::killChild()
void DerivationGoal::timedOut(Error && ex)
{
killChild();
done(BuildResult::TimedOut, {}, ex);
done(BuildResult::TimedOut, {}, std::move(ex));
}
@ -344,7 +342,7 @@ void DerivationGoal::gaveUpOnSubstitution()
for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs) {
/* Ensure that pure, non-fixed-output derivations don't
depend on impure derivations. */
if (drv->type().isPure() && !drv->type().isFixed()) {
if (settings.isExperimentalFeatureEnabled(Xp::ImpureDerivations) && drv->type().isPure() && !drv->type().isFixed()) {
auto inputDrv = worker.evalStore.readDerivation(i.first);
if (!inputDrv.type().isPure())
throw Error("pure derivation '%s' depends on impure derivation '%s'",
@ -502,6 +500,14 @@ void DerivationGoal::inputsRealised()
now-known results of dependencies. If so, we become a
stub goal aliasing that resolved derivation goal. */
std::optional attempt = fullDrv.tryResolve(worker.store, inputDrvOutputs);
if (!attempt) {
/* TODO (impure derivations-induced tech debt) (see below):
The above attempt should have found it, but because we manage
inputDrvOutputs statefully, sometimes it gets out of sync with
the real source of truth (store). So we query the store
directly if there's a problem. */
attempt = fullDrv.tryResolve(worker.store);
}
assert(attempt);
Derivation drvResolved { *std::move(attempt) };
@ -528,13 +534,32 @@ void DerivationGoal::inputsRealised()
/* Add the relevant output closures of the input derivation
`i' as input paths. Only add the closures of output paths
that are specified as inputs. */
for (auto & j : wantedDepOutputs)
if (auto outPath = get(inputDrvOutputs, { depDrvPath, j }))
for (auto & j : wantedDepOutputs) {
/* TODO (impure derivations-induced tech debt):
Tracking input derivation outputs statefully through the
goals is error prone and has led to bugs.
For a robust nix, we need to move towards the `else` branch,
which does not rely on goal state to match up with the
reality of the store, which is our real source of truth.
However, the impure derivations feature still relies on this
fragile way of doing things, because its builds do not have
a representation in the store, which is a usability problem
in itself. When implementing this logic entirely with lookups
make sure that they're cached. */
if (auto outPath = get(inputDrvOutputs, { depDrvPath, j })) {
worker.store.computeFSClosure(*outPath, inputPaths);
else
throw Error(
"derivation '%s' requires non-existent output '%s' from input derivation '%s'",
worker.store.printStorePath(drvPath), j, worker.store.printStorePath(depDrvPath));
}
else {
auto outMap = worker.evalStore.queryDerivationOutputMap(depDrvPath);
auto outMapPath = outMap.find(j);
if (outMapPath == outMap.end()) {
throw Error(
"derivation '%s' requires non-existent output '%s' from input derivation '%s'",
worker.store.printStorePath(drvPath), j, worker.store.printStorePath(depDrvPath));
}
worker.store.computeFSClosure(outMapPath->second, inputPaths);
}
}
}
}
@ -546,10 +571,6 @@ void DerivationGoal::inputsRealised()
/* What type of derivation are we building? */
derivationType = drv->type();
/* Don't repeat fixed-output derivations since they're already
verified by their output hash.*/
nrRounds = derivationType.isFixed() ? 1 : settings.buildRepeat + 1;
/* Okay, try to build. Note that here we don't wait for a build
slot to become available, since we don't need one if there is a
build hook. */
@ -564,12 +585,11 @@ void DerivationGoal::started()
auto msg = fmt(
buildMode == bmRepair ? "repairing outputs of '%s'" :
buildMode == bmCheck ? "checking outputs of '%s'" :
nrRounds > 1 ? "building '%s' (round %d/%d)" :
"building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds);
"building '%s'", worker.store.printStorePath(drvPath));
fmt("building '%s'", worker.store.printStorePath(drvPath));
if (hook) msg += fmt(" on '%s'", machineName);
act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg,
Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds});
Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", 1, 1});
mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds);
worker.updateProgress();
}
@ -705,8 +725,7 @@ static void movePath(const Path & src, const Path & dst)
if (changePerm)
chmod_(src, st.st_mode | S_IWUSR);
if (rename(src.c_str(), dst.c_str()))
throw SysError("renaming '%1%' to '%2%'", src, dst);
renameFile(src, dst);
if (changePerm)
chmod_(dst, st.st_mode);
@ -786,8 +805,7 @@ void runPostBuildHook(
Store & store,
Logger & logger,
const StorePath & drvPath,
StorePathSet outputPaths
)
const StorePathSet & outputPaths)
{
auto hook = settings.postBuildHook;
if (hook == "")
@ -871,6 +889,14 @@ void DerivationGoal::buildDone()
cleanupPostChildKill();
if (buildResult.cpuUser && buildResult.cpuSystem) {
debug("builder for '%s' terminated with status %d, user CPU %.3fs, system CPU %.3fs",
worker.store.printStorePath(drvPath),
status,
((double) buildResult.cpuUser->count()) / 1000000,
((double) buildResult.cpuSystem->count()) / 1000000);
}
bool diskFull = false;
try {
@ -906,7 +932,7 @@ void DerivationGoal::buildDone()
auto builtOutputs = registerOutputs();
StorePathSet outputPaths;
for (auto & [_, output] : buildResult.builtOutputs)
for (auto & [_, output] : builtOutputs)
outputPaths.insert(output.outPath);
runPostBuildHook(
worker.store,
@ -915,22 +941,8 @@ void DerivationGoal::buildDone()
outputPaths
);
if (buildMode == bmCheck) {
cleanupPostOutputsRegisteredModeCheck();
done(BuildResult::Built, std::move(builtOutputs));
return;
}
cleanupPostOutputsRegisteredModeNonCheck();
/* Repeat the build if necessary. */
if (curRound++ < nrRounds) {
outputLocks.unlock();
state = &DerivationGoal::tryToBuild;
worker.wakeUp(shared_from_this());
return;
}
/* It is now safe to delete the lock files, since all future
lockers will see that the output paths are valid; they will
not create new lock files with the same names as the old
@ -959,7 +971,7 @@ void DerivationGoal::buildDone()
BuildResult::PermanentFailure;
}
done(st, {}, e);
done(st, {}, std::move(e));
return;
}
}
@ -985,13 +997,32 @@ void DerivationGoal::resolvedFinished()
realWantedOutputs = resolvedDrv.outputNames();
for (auto & wantedOutput : realWantedOutputs) {
assert(initialOutputs.count(wantedOutput) != 0);
assert(resolvedHashes.count(wantedOutput) != 0);
auto realisation = resolvedResult.builtOutputs.at(
DrvOutput { resolvedHashes.at(wantedOutput), wantedOutput });
auto initialOutput = get(initialOutputs, wantedOutput);
auto resolvedHash = get(resolvedHashes, wantedOutput);
if ((!initialOutput) || (!resolvedHash))
throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,resolve)",
worker.store.printStorePath(drvPath), wantedOutput);
auto realisation = [&]{
auto take1 = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
if (take1) return *take1;
/* The above `get` should work. But sateful tracking of
outputs in resolvedResult, this can get out of sync with the
store, which is our actual source of truth. For now we just
check the store directly if it fails. */
auto take2 = worker.evalStore.queryRealisation(DrvOutput { *resolvedHash, wantedOutput });
if (take2) return *take2;
throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
}();
if (drv->type().isPure()) {
auto newRealisation = realisation;
newRealisation.id = DrvOutput { initialOutputs.at(wantedOutput).outputHash, wantedOutput };
newRealisation.id = DrvOutput { initialOutput->outputHash, wantedOutput };
newRealisation.signatures.clear();
if (!drv->type().isFixed())
newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation.outPath);
@ -1295,7 +1326,11 @@ std::pair<bool, DrvOutputs> DerivationGoal::checkPathValidity()
DrvOutputs validOutputs;
for (auto & i : queryPartialDerivationOutputMap()) {
InitialOutput & info = initialOutputs.at(i.first);
auto initialOutput = get(initialOutputs, i.first);
if (!initialOutput)
// this is an invalid output, gets catched with (!wantedOutputsLeft.empty())
continue;
auto & info = *initialOutput;
info.wanted = wantOutput(i.first, wantedOutputs);
if (info.wanted)
wantedOutputsLeft.erase(i.first);
@ -1310,7 +1345,7 @@ std::pair<bool, DrvOutputs> DerivationGoal::checkPathValidity()
: PathStatus::Corrupt,
};
}
auto drvOutput = DrvOutput{initialOutputs.at(i.first).outputHash, i.first};
auto drvOutput = DrvOutput{info.outputHash, i.first};
if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
if (auto real = worker.store.queryRealisation(drvOutput)) {
info.known = {
@ -1399,7 +1434,7 @@ void DerivationGoal::done(
fs << worker.store.printStorePath(drvPath) << "\t" << buildResult.toString() << std::endl;
}
amDone(buildResult.success() ? ecSuccess : ecFailed, ex);
amDone(buildResult.success() ? ecSuccess : ecFailed, std::move(ex));
}

View file

@ -115,11 +115,6 @@ struct DerivationGoal : public Goal
BuildMode buildMode;
/* The current round, if we're building multiple times. */
size_t curRound = 1;
size_t nrRounds;
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds;
std::unique_ptr<Activity> act;

View file

@ -30,7 +30,7 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
if (ex)
logError(i->ex->info());
else
ex = i->ex;
ex = std::move(i->ex);
}
if (i->exitCode != Goal::ecSuccess) {
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath);
@ -40,7 +40,7 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
if (failed.size() == 1 && ex) {
ex->status = worker.exitStatus();
throw *ex;
throw std::move(*ex);
} else if (!failed.empty()) {
if (ex) logError(ex->info());
throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed));
@ -109,7 +109,7 @@ void Store::ensurePath(const StorePath & path)
if (goal->exitCode != Goal::ecSuccess) {
if (goal->ex) {
goal->ex->status = worker.exitStatus();
throw *goal->ex;
throw std::move(*goal->ex);
} else
throw Error(worker.exitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
}

View file

@ -7,6 +7,22 @@ HookInstance::HookInstance()
{
debug("starting build hook '%s'", settings.buildHook);
auto buildHookArgs = tokenizeString<std::list<std::string>>(settings.buildHook.get());
if (buildHookArgs.empty())
throw Error("'build-hook' setting is empty");
auto buildHook = buildHookArgs.front();
buildHookArgs.pop_front();
Strings args;
args.push_back(std::string(baseNameOf(buildHook)));
for (auto & arg : buildHookArgs)
args.push_back(arg);
args.push_back(std::to_string(verbosity));
/* Create a pipe to get the output of the child. */
fromHook.create();
@ -36,14 +52,9 @@ HookInstance::HookInstance()
if (dup2(builderOut.readSide.get(), 5) == -1)
throw SysError("dupping builder's stdout/stderr");
Strings args = {
std::string(baseNameOf(settings.buildHook.get())),
std::to_string(verbosity),
};
execv(buildHook.c_str(), stringsToCharPtrs(args).data());
execv(settings.buildHook.get().c_str(), stringsToCharPtrs(args).data());
throw SysError("executing '%s'", settings.buildHook);
throw SysError("executing '%s'", buildHook);
});
pid.setSeparatePG(true);

View file

@ -8,12 +8,14 @@
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
#include "json.hh"
#include "compression.hh"
#include "daemon.hh"
#include "worker-protocol.hh"
#include "topo-sort.hh"
#include "callback.hh"
#include "json-utils.hh"
#include "cgroup.hh"
#include "personality.hh"
#include <regex>
#include <queue>
@ -23,7 +25,6 @@
#include <termios.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/utsname.h>
#include <sys/resource.h>
#include <sys/socket.h>
@ -36,7 +37,6 @@
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/ip.h>
#include <sys/personality.h>
#include <sys/mman.h>
#include <sched.h>
#include <sys/param.h>
@ -55,8 +55,7 @@
#include <pwd.h>
#include <grp.h>
#include <nlohmann/json.hpp>
#include <iostream>
namespace nix {
@ -130,26 +129,44 @@ void LocalDerivationGoal::killChild()
if (pid != -1) {
worker.childTerminated(this);
if (buildUser) {
/* If we're using a build user, then there is a tricky
race condition: if we kill the build user before the
child has done its setuid() to the build user uid, then
it won't be killed, and we'll potentially lock up in
pid.wait(). So also send a conventional kill to the
child. */
::kill(-pid, SIGKILL); /* ignore the result */
buildUser->kill();
pid.wait();
} else
pid.kill();
/* If we're using a build user, then there is a tricky race
condition: if we kill the build user before the child has
done its setuid() to the build user uid, then it won't be
killed, and we'll potentially lock up in pid.wait(). So
also send a conventional kill to the child. */
::kill(-pid, SIGKILL); /* ignore the result */
assert(pid == -1);
killSandbox(true);
pid.wait();
}
DerivationGoal::killChild();
}
void LocalDerivationGoal::killSandbox(bool getStats)
{
if (cgroup) {
#if __linux__
auto stats = destroyCgroup(*cgroup);
if (getStats) {
buildResult.cpuUser = stats.cpuUser;
buildResult.cpuSystem = stats.cpuSystem;
}
#else
abort();
#endif
}
else if (buildUser) {
auto uid = buildUser->getUID();
assert(uid != 0);
killUser(uid);
}
}
void LocalDerivationGoal::tryLocalBuild() {
unsigned int curBuilds = worker.getNrLocalBuilds();
if (curBuilds >= settings.maxBuildJobs) {
@ -159,28 +176,46 @@ void LocalDerivationGoal::tryLocalBuild() {
return;
}
/* If `build-users-group' is not empty, then we have to build as
one of the members of that group. */
if (settings.buildUsersGroup != "" && getuid() == 0) {
#if defined(__linux__) || defined(__APPLE__)
if (!buildUser) buildUser = std::make_unique<UserLock>();
/* Are we doing a chroot build? */
{
auto noChroot = parsedDrv->getBoolAttr("__noChroot");
if (settings.sandboxMode == smEnabled) {
if (noChroot)
throw Error("derivation '%s' has '__noChroot' set, "
"but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
#if __APPLE__
if (additionalSandboxProfile != "")
throw Error("derivation '%s' specifies a sandbox profile, "
"but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
#endif
useChroot = true;
}
else if (settings.sandboxMode == smDisabled)
useChroot = false;
else if (settings.sandboxMode == smRelaxed)
useChroot = derivationType.isSandboxed() && !noChroot;
}
if (buildUser->findFreeUser()) {
/* Make sure that no other processes are executing under this
uid. */
buildUser->kill();
} else {
auto & localStore = getLocalStore();
if (localStore.storeDir != localStore.realStoreDir.get()) {
#if __linux__
useChroot = true;
#else
throw Error("building using a diverted store is not supported on this platform");
#endif
}
if (useBuildUsers()) {
if (!buildUser)
buildUser = acquireUserLock(parsedDrv->useUidRange() ? 65536 : 1, useChroot);
if (!buildUser) {
if (!actLock)
actLock = std::make_unique<Activity>(*logger, lvlWarn, actBuildWaiting,
fmt("waiting for UID to build '%s'", yellowtxt(worker.store.printStorePath(drvPath))));
worker.waitForAWhile(shared_from_this());
return;
}
#else
/* Don't know how to block the creation of setuid/setgid
binaries on this platform. */
throw Error("build users are not supported on this platform for security reasons");
#endif
}
actLock.reset();
@ -194,7 +229,7 @@ void LocalDerivationGoal::tryLocalBuild() {
outputLocks.unlock();
buildUser.reset();
worker.permanentFailure = true;
done(BuildResult::InputRejected, {}, e);
done(BuildResult::InputRejected, {}, std::move(e));
return;
}
@ -224,8 +259,7 @@ static void movePath(const Path & src, const Path & dst)
if (changePerm)
chmod_(src, st.st_mode | S_IWUSR);
if (rename(src.c_str(), dst.c_str()))
throw SysError("renaming '%1%' to '%2%'", src, dst);
renameFile(src, dst);
if (changePerm)
chmod_(dst, st.st_mode);
@ -272,7 +306,7 @@ void LocalDerivationGoal::cleanupPostChildKill()
malicious user from leaving behind a process that keeps files
open and modifies them after they have been chown'ed to
root. */
if (buildUser) buildUser->kill();
killSandbox(true);
/* Terminate the recursive Nix daemon. */
stopDaemon();
@ -312,7 +346,7 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull()
if (buildMode != bmCheck && status.known->isValid()) continue;
auto p = worker.store.printStorePath(status.known->path);
if (pathExists(chrootRootDir + p))
rename((chrootRootDir + p).c_str(), p.c_str());
renameFile((chrootRootDir + p), p);
}
return diskFull;
@ -365,6 +399,64 @@ static void linkOrCopy(const Path & from, const Path & to)
void LocalDerivationGoal::startBuilder()
{
if ((buildUser && buildUser->getUIDCount() != 1)
#if __linux__
|| settings.useCgroups
#endif
)
{
#if __linux__
settings.requireExperimentalFeature(Xp::Cgroups);
auto cgroupFS = getCgroupFS();
if (!cgroupFS)
throw Error("cannot determine the cgroups file system");
auto ourCgroups = getCgroups("/proc/self/cgroup");
auto ourCgroup = ourCgroups[""];
if (ourCgroup == "")
throw Error("cannot determine cgroup name from /proc/self/cgroup");
auto ourCgroupPath = canonPath(*cgroupFS + "/" + ourCgroup);
if (!pathExists(ourCgroupPath))
throw Error("expected cgroup directory '%s'", ourCgroupPath);
static std::atomic<unsigned int> counter{0};
cgroup = buildUser
? fmt("%s/nix-build-uid-%d", ourCgroupPath, buildUser->getUID())
: fmt("%s/nix-build-pid-%d-%d", ourCgroupPath, getpid(), counter++);
debug("using cgroup '%s'", *cgroup);
/* When using a build user, record the cgroup we used for that
user so that if we got interrupted previously, we can kill
any left-over cgroup first. */
if (buildUser) {
auto cgroupsDir = settings.nixStateDir + "/cgroups";
createDirs(cgroupsDir);
auto cgroupFile = fmt("%s/%d", cgroupsDir, buildUser->getUID());
if (pathExists(cgroupFile)) {
auto prevCgroup = readFile(cgroupFile);
destroyCgroup(prevCgroup);
}
writeFile(cgroupFile, *cgroup);
}
#else
throw Error("cgroups are not supported on this platform");
#endif
}
/* Make sure that no other processes are executing under the
sandbox uids. This must be done before any chownToBuilder()
calls. */
killSandbox(false);
/* Right platform? */
if (!parsedDrv->canBuildLocally(worker.store))
throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}",
@ -378,35 +470,6 @@ void LocalDerivationGoal::startBuilder()
additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
#endif
/* Are we doing a chroot build? */
{
auto noChroot = parsedDrv->getBoolAttr("__noChroot");
if (settings.sandboxMode == smEnabled) {
if (noChroot)
throw Error("derivation '%s' has '__noChroot' set, "
"but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
#if __APPLE__
if (additionalSandboxProfile != "")
throw Error("derivation '%s' specifies a sandbox profile, "
"but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
#endif
useChroot = true;
}
else if (settings.sandboxMode == smDisabled)
useChroot = false;
else if (settings.sandboxMode == smRelaxed)
useChroot = derivationType.isSandboxed() && !noChroot;
}
auto & localStore = getLocalStore();
if (localStore.storeDir != localStore.realStoreDir.get()) {
#if __linux__
useChroot = true;
#else
throw Error("building using a diverted store is not supported on this platform");
#endif
}
/* Create a temporary directory where the build will take
place. */
tmpDir = createTempDir("", "nix-build-" + std::string(drvPath.name()), false, false, 0700);
@ -482,7 +545,7 @@ void LocalDerivationGoal::startBuilder()
temporary build directory. The text files have the format used
by `nix-store --register-validity'. However, the deriver
fields are left empty. */
auto s = get(drv->env, "exportReferencesGraph").value_or("");
auto s = getOr(drv->env, "exportReferencesGraph", "");
Strings ss = tokenizeString<Strings>(s);
if (ss.size() % 2 != 0)
throw BuildError("odd number of tokens in 'exportReferencesGraph': '%1%'", s);
@ -582,10 +645,11 @@ void LocalDerivationGoal::startBuilder()
printMsg(lvlChatty, format("setting up chroot environment in '%1%'") % chrootRootDir);
if (mkdir(chrootRootDir.c_str(), 0750) == -1)
// FIXME: make this 0700
if (mkdir(chrootRootDir.c_str(), buildUser && buildUser->getUIDCount() != 1 ? 0755 : 0750) == -1)
throw SysError("cannot create '%1%'", chrootRootDir);
if (buildUser && chown(chrootRootDir.c_str(), 0, buildUser->getGID()) == -1)
if (buildUser && chown(chrootRootDir.c_str(), buildUser->getUIDCount() != 1 ? buildUser->getUID() : 0, buildUser->getGID()) == -1)
throw SysError("cannot change ownership of '%1%'", chrootRootDir);
/* Create a writable /tmp in the chroot. Many builders need
@ -599,6 +663,10 @@ void LocalDerivationGoal::startBuilder()
nobody account. The latter is kind of a hack to support
Samba-in-QEMU. */
createDirs(chrootRootDir + "/etc");
chownToBuilder(chrootRootDir + "/etc");
if (parsedDrv->useUidRange() && (!buildUser || buildUser->getUIDCount() < 65536))
throw Error("feature 'uid-range' requires the setting '%s' to be enabled", settings.autoAllocateUids.name);
/* Declare the build user's group so that programs get a consistent
view of the system (e.g., "id -gn"). */
@ -649,12 +717,28 @@ void LocalDerivationGoal::startBuilder()
dirsInChroot.erase(worker.store.printStorePath(*i.second.second));
}
#elif __APPLE__
/* We don't really have any parent prep work to do (yet?)
All work happens in the child, instead. */
if (cgroup) {
if (mkdir(cgroup->c_str(), 0755) != 0)
throw SysError("creating cgroup '%s'", *cgroup);
chownToBuilder(*cgroup);
chownToBuilder(*cgroup + "/cgroup.procs");
chownToBuilder(*cgroup + "/cgroup.threads");
//chownToBuilder(*cgroup + "/cgroup.subtree_control");
}
#else
throw Error("sandboxing builds is not supported on this platform");
if (parsedDrv->useUidRange())
throw Error("feature 'uid-range' is not supported on this platform");
#if __APPLE__
/* We don't really have any parent prep work to do (yet?)
All work happens in the child, instead. */
#else
throw Error("sandboxing builds is not supported on this platform");
#endif
#endif
} else {
if (parsedDrv->useUidRange())
throw Error("feature 'uid-range' is only supported in sandboxed builds");
}
if (needsHashRewrite() && pathExists(homeDir))
@ -846,18 +930,43 @@ void LocalDerivationGoal::startBuilder()
/* Some distros patch Linux to not allow unprivileged
* user namespaces. If we get EPERM or EINVAL, try
* without CLONE_NEWUSER and see if that works.
* Details: https://salsa.debian.org/kernel-team/linux/-/commit/d98e00eda6bea437e39b9e80444eee84a32438a6
*/
usingUserNamespace = false;
flags &= ~CLONE_NEWUSER;
child = clone(childEntry, stack + stackSize, flags, this);
}
/* Otherwise exit with EPERM so we can handle this in the
parent. This is only done when sandbox-fallback is set
to true (the default). */
if (child == -1 && (errno == EPERM || errno == EINVAL) && settings.sandboxFallback)
_exit(1);
if (child == -1) throw SysError("cloning builder process");
if (child == -1) {
switch(errno) {
case EPERM:
case EINVAL: {
int errno_ = errno;
if (!userNamespacesEnabled && errno==EPERM)
notice("user namespaces appear to be disabled; they are required for sandboxing; check /proc/sys/user/max_user_namespaces");
if (userNamespacesEnabled) {
Path procSysKernelUnprivilegedUsernsClone = "/proc/sys/kernel/unprivileged_userns_clone";
if (pathExists(procSysKernelUnprivilegedUsernsClone)
&& trim(readFile(procSysKernelUnprivilegedUsernsClone)) == "0") {
notice("user namespaces appear to be disabled; they are required for sandboxing; check /proc/sys/kernel/unprivileged_userns_clone");
}
}
Path procSelfNsUser = "/proc/self/ns/user";
if (!pathExists(procSelfNsUser))
notice("/proc/self/ns/user does not exist; your kernel was likely built without CONFIG_USER_NS=y, which is required for sandboxing");
/* Otherwise exit with EPERM so we can handle this in the
parent. This is only done when sandbox-fallback is set
to true (the default). */
if (settings.sandboxFallback)
_exit(1);
/* Mention sandbox-fallback in the error message so the user
knows that having it disabled contributed to the
unrecoverability of this failure */
throw SysError(errno_, "creating sandboxed builder process using clone(), without sandbox-fallback");
}
default:
throw SysError("creating sandboxed builder process using clone()");
}
}
writeFull(builderOut.writeSide.get(),
fmt("%d %d\n", usingUserNamespace, child));
_exit(0);
@ -890,14 +999,16 @@ void LocalDerivationGoal::startBuilder()
the calling user (if build users are disabled). */
uid_t hostUid = buildUser ? buildUser->getUID() : getuid();
uid_t hostGid = buildUser ? buildUser->getGID() : getgid();
uid_t nrIds = buildUser ? buildUser->getUIDCount() : 1;
writeFile("/proc/" + std::to_string(pid) + "/uid_map",
fmt("%d %d 1", sandboxUid(), hostUid));
fmt("%d %d %d", sandboxUid(), hostUid, nrIds));
writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
if (!buildUser || buildUser->getUIDCount() == 1)
writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
writeFile("/proc/" + std::to_string(pid) + "/gid_map",
fmt("%d %d 1", sandboxGid(), hostGid));
fmt("%d %d %d", sandboxGid(), hostGid, nrIds));
} else {
debug("note: not using a user namespace");
if (!buildUser)
@ -924,6 +1035,10 @@ void LocalDerivationGoal::startBuilder()
throw SysError("getting sandbox user namespace");
}
/* Move the child into its own cgroup. */
if (cgroup)
writeFile(*cgroup + "/cgroup.procs", fmt("%d", (pid_t) pid));
/* Signal the builder that we've updated its user namespace. */
writeFull(userNamespaceSync.writeSide.get(), "1");
@ -989,7 +1104,7 @@ void LocalDerivationGoal::initTmpDir() {
there is no size constraint). */
if (!parsedDrv->getStructuredAttrs()) {
StringSet passAsFile = tokenizeString<StringSet>(get(drv->env, "passAsFile").value_or(""));
StringSet passAsFile = tokenizeString<StringSet>(getOr(drv->env, "passAsFile", ""));
for (auto & i : drv->env) {
if (passAsFile.find(i.first) == passAsFile.end()) {
env[i.first] = i.second;
@ -1529,6 +1644,22 @@ void setupSeccomp()
seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0)
printError("unable to add ARM seccomp architecture; this may result in spurious build failures if running 32-bit ARM processes");
if (nativeSystem == "mips64-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPS) != 0)
printError("unable to add mips seccomp architecture");
if (nativeSystem == "mips64-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPS64N32) != 0)
printError("unable to add mips64-*abin32 seccomp architecture");
if (nativeSystem == "mips64el-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL) != 0)
printError("unable to add mipsel seccomp architecture");
if (nativeSystem == "mips64el-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL64N32) != 0)
printError("unable to add mips64el-*abin32 seccomp architecture");
/* Prevent builders from creating setuid/setgid binaries. */
for (int perm : { S_ISUID, S_ISGID }) {
if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(chmod), 1,
@ -1571,6 +1702,8 @@ void LocalDerivationGoal::runChild()
/* Warning: in the child we should absolutely not make any SQLite
calls! */
bool sendException = true;
try { /* child */
commonChildInit(builderOut);
@ -1718,7 +1851,19 @@ void LocalDerivationGoal::runChild()
for (auto & i : dirsInChroot) {
if (i.second.source == "/proc") continue; // backwards compatibility
doBind(i.second.source, chrootRootDir + i.first, i.second.optional);
#if HAVE_EMBEDDED_SANDBOX_SHELL
if (i.second.source == "__embedded_sandbox_shell__") {
static unsigned char sh[] = {
#include "embedded-sandbox-shell.gen.hh"
};
auto dst = chrootRootDir + i.first;
createDirs(dirOf(dst));
writeFile(dst, std::string_view((const char *) sh, sizeof(sh)));
chmod_(dst, 0555);
} else
#endif
doBind(i.second.source, chrootRootDir + i.first, i.second.optional);
}
/* Bind a new instance of procfs on /proc. */
@ -1726,6 +1871,13 @@ void LocalDerivationGoal::runChild()
if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, 0) == -1)
throw SysError("mounting /proc");
/* Mount sysfs on /sys. */
if (buildUser && buildUser->getUIDCount() != 1) {
createDirs(chrootRootDir + "/sys");
if (mount("none", (chrootRootDir + "/sys").c_str(), "sysfs", 0, 0) == -1)
throw SysError("mounting /sys");
}
/* Mount a new tmpfs on /dev/shm to ensure that whatever
the builder puts in /dev/shm is cleaned up automatically. */
if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0,
@ -1768,6 +1920,12 @@ void LocalDerivationGoal::runChild()
if (unshare(CLONE_NEWNS) == -1)
throw SysError("unsharing mount namespace");
/* Unshare the cgroup namespace. This means
/proc/self/cgroup will show the child's cgroup as '/'
rather than whatever it is in the parent. */
if (cgroup && unshare(CLONE_NEWCGROUP) == -1)
throw SysError("unsharing cgroup namespace");
/* Do the chroot(). */
if (chdir(chrootRootDir.c_str()) == -1)
throw SysError("cannot change directory to '%1%'", chrootRootDir);
@ -1805,33 +1963,7 @@ void LocalDerivationGoal::runChild()
/* Close all other file descriptors. */
closeMostFDs({STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO});
#if __linux__
/* Change the personality to 32-bit if we're doing an
i686-linux build on an x86_64-linux machine. */
struct utsname utsbuf;
uname(&utsbuf);
if ((drv->platform == "i686-linux"
&& (settings.thisSystem == "x86_64-linux"
|| (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
|| drv->platform == "armv7l-linux"
|| drv->platform == "armv6l-linux")
{
if (personality(PER_LINUX32) == -1)
throw SysError("cannot set 32-bit personality");
}
/* Impersonate a Linux 2.6 machine to get some determinism in
builds that depend on the kernel version. */
if ((drv->platform == "i686-linux" || drv->platform == "x86_64-linux") && settings.impersonateLinux26) {
int cur = personality(0xffffffff);
if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
}
/* Disable address space randomization for improved
determinism. */
int cur = personality(0xffffffff);
if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
#endif
setPersonality(drv->platform);
/* Disable core dumps by default. */
struct rlimit limit = { 0, RLIM_INFINITY };
@ -1853,9 +1985,8 @@ void LocalDerivationGoal::runChild()
if (setUser && buildUser) {
/* Preserve supplementary groups of the build user, to allow
admins to specify groups such as "kvm". */
if (!buildUser->getSupplementaryGIDs().empty() &&
setgroups(buildUser->getSupplementaryGIDs().size(),
buildUser->getSupplementaryGIDs().data()) == -1)
auto gids = buildUser->getSupplementaryGIDs();
if (setgroups(gids.size(), gids.data()) == -1)
throw SysError("cannot set supplementary groups of build user");
if (setgid(buildUser->getGID()) == -1 ||
@ -1919,10 +2050,14 @@ void LocalDerivationGoal::runChild()
sandboxProfile += "(deny default (with no-log))\n";
}
sandboxProfile += "(import \"sandbox-defaults.sb\")\n";
sandboxProfile +=
#include "sandbox-defaults.sb"
;
if (!derivationType.isSandboxed())
sandboxProfile += "(import \"sandbox-network.sb\")\n";
sandboxProfile +=
#include "sandbox-network.sb"
;
/* Add the output paths we'll use at build-time to the chroot */
sandboxProfile += "(allow file-read* file-write* process-exec\n";
@ -1965,7 +2100,9 @@ void LocalDerivationGoal::runChild()
sandboxProfile += additionalSandboxProfile;
} else
sandboxProfile += "(import \"sandbox-minimal.sb\")\n";
sandboxProfile +=
#include "sandbox-minimal.sb"
;
debug("Generated sandbox profile:");
debug(sandboxProfile);
@ -1990,8 +2127,6 @@ void LocalDerivationGoal::runChild()
args.push_back(sandboxFile);
args.push_back("-D");
args.push_back("_GLOBAL_TMP_DIR=" + globalTmpDir);
args.push_back("-D");
args.push_back("IMPORT_DIR=" + settings.nixDataDir + "/nix/sandbox/");
if (allowLocalNetworking) {
args.push_back("-D");
args.push_back(std::string("_ALLOW_LOCAL_NETWORKING=1"));
@ -2015,6 +2150,8 @@ void LocalDerivationGoal::runChild()
/* Indicate that we managed to set up the build environment. */
writeFull(STDERR_FILENO, std::string("\2\n"));
sendException = false;
/* Execute the program. This should not return. */
if (drv->isBuiltin()) {
try {
@ -2068,10 +2205,13 @@ void LocalDerivationGoal::runChild()
throw SysError("executing '%1%'", drv->builder);
} catch (Error & e) {
writeFull(STDERR_FILENO, "\1\n");
FdSink sink(STDERR_FILENO);
sink << e;
sink.flush();
if (sendException) {
writeFull(STDERR_FILENO, "\1\n");
FdSink sink(STDERR_FILENO);
sink << e;
sink.flush();
} else
std::cerr << e.msg();
_exit(1);
}
}
@ -2097,7 +2237,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
InodesSeen inodesSeen;
Path checkSuffix = ".check";
bool keepPreviousRound = settings.keepFailed || settings.runDiffHook;
std::exception_ptr delayedException;
@ -2128,12 +2267,22 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
std::map<std::string, std::variant<AlreadyRegistered, PerhapsNeedToRegister>> outputReferencesIfUnregistered;
std::map<std::string, struct stat> outputStats;
for (auto & [outputName, _] : drv->outputs) {
auto actualPath = toRealPathChroot(worker.store.printStorePath(scratchOutputs.at(outputName)));
auto scratchOutput = get(scratchOutputs, outputName);
if (!scratchOutput)
throw BuildError(
"builder for '%s' has no scratch output for '%s'",
worker.store.printStorePath(drvPath), outputName);
auto actualPath = toRealPathChroot(worker.store.printStorePath(*scratchOutput));
outputsToSort.insert(outputName);
/* Updated wanted info to remove the outputs we definitely don't need to register */
auto & initialInfo = initialOutputs.at(outputName);
auto initialOutput = get(initialOutputs, outputName);
if (!initialOutput)
throw BuildError(
"builder for '%s' has no initial output for '%s'",
worker.store.printStorePath(drvPath), outputName);
auto & initialInfo = *initialOutput;
/* Don't register if already valid, and not checking */
initialInfo.wanted = buildMode == bmCheck
@ -2169,7 +2318,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Canonicalise first. This ensures that the path we're
rewriting doesn't contain a hard link to /etc/shadow or
something like that. */
canonicalisePathMetaData(actualPath, buildUser ? buildUser->getUID() : -1, inodesSeen);
canonicalisePathMetaData(
actualPath,
buildUser ? std::optional(buildUser->getUIDRange()) : std::nullopt,
inodesSeen);
debug("scanning for references for output '%s' in temp location '%s'", outputName, actualPath);
@ -2185,6 +2337,11 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
auto sortedOutputNames = topoSort(outputsToSort,
{[&](const std::string & name) {
auto orifu = get(outputReferencesIfUnregistered, name);
if (!orifu)
throw BuildError(
"no output reference for '%s' in build of '%s'",
name, worker.store.printStorePath(drvPath));
return std::visit(overloaded {
/* Since we'll use the already installed versions of these, we
can treat them as leaves and ignore any references they
@ -2199,7 +2356,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
referencedOutputs.insert(o);
return referencedOutputs;
},
}, outputReferencesIfUnregistered.at(name));
}, *orifu);
}},
{[&](const std::string & path, const std::string & parent) {
// TODO with more -vvvv also show the temporary paths for manual inspection.
@ -2213,9 +2370,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
OutputPathMap finalOutputs;
for (auto & outputName : sortedOutputNames) {
auto output = drv->outputs.at(outputName);
auto & scratchPath = scratchOutputs.at(outputName);
auto actualPath = toRealPathChroot(worker.store.printStorePath(scratchPath));
auto output = get(drv->outputs, outputName);
auto scratchPath = get(scratchOutputs, outputName);
assert(output && scratchPath);
auto actualPath = toRealPathChroot(worker.store.printStorePath(*scratchPath));
auto finish = [&](StorePath finalStorePath) {
/* Store the final path */
@ -2223,10 +2381,13 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* The rewrite rule will be used in downstream outputs that refer to
use. This is why the topological sort is essential to do first
before this for loop. */
if (scratchPath != finalStorePath)
outputRewrites[std::string { scratchPath.hashPart() }] = std::string { finalStorePath.hashPart() };
if (*scratchPath != finalStorePath)
outputRewrites[std::string { scratchPath->hashPart() }] = std::string { finalStorePath.hashPart() };
};
auto orifu = get(outputReferencesIfUnregistered, outputName);
assert(orifu);
std::optional<StorePathSet> referencesOpt = std::visit(overloaded {
[&](const AlreadyRegistered & skippedFinalPath) -> std::optional<StorePathSet> {
finish(skippedFinalPath.path);
@ -2235,7 +2396,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
[&](const PerhapsNeedToRegister & r) -> std::optional<StorePathSet> {
return r.refs;
},
}, outputReferencesIfUnregistered.at(outputName));
}, *orifu);
if (!referencesOpt)
continue;
@ -2253,6 +2414,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
sink.s = rewriteStrings(sink.s, outputRewrites);
StringSource source(sink.s);
restorePath(actualPath, source);
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
canonicalisePathMetaData(actualPath, {}, inodesSeen);
}
};
@ -2267,27 +2432,31 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
for (auto & r : references) {
auto name = r.name();
auto origHash = std::string { r.hashPart() };
if (r == scratchPath)
if (r == *scratchPath) {
res.hasSelfReference = true;
else if (outputRewrites.count(origHash) == 0)
res.references.insert(r);
else {
std::string newRef = outputRewrites.at(origHash);
} else if (auto outputRewrite = get(outputRewrites, origHash)) {
std::string newRef = *outputRewrite;
newRef += '-';
newRef += name;
res.references.insert(StorePath { newRef });
} else {
res.references.insert(r);
}
}
return res;
};
auto newInfoFromCA = [&](const DerivationOutput::CAFloating outputHash) -> ValidPathInfo {
auto & st = outputStats.at(outputName);
auto st = get(outputStats, outputName);
if (!st)
throw BuildError(
"output path %1% without valid stats info",
actualPath);
if (outputHash.method == ContentAddressMethod { FileIngestionMethod::Flat } ||
outputHash.method == ContentAddressMethod { TextHashMethod {} })
{
/* The output path should be a regular file without execute permission. */
if (!S_ISREG(st.st_mode) || (st.st_mode & S_IXUSR) != 0)
if (!S_ISREG(st->st_mode) || (st->st_mode & S_IXUSR) != 0)
throw BuildError(
"output path '%1%' should be a non-executable regular file "
"since recursive hashing is not enabled (one of outputHashMode={flat,text} is true)",
@ -2295,7 +2464,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
}
rewriteOutput();
/* FIXME optimize and deduplicate with addToStore */
std::string oldHashPart { scratchPath.hashPart() };
std::string oldHashPart { scratchPath->hashPart() };
HashModuloSink caSink { outputHash.hashType, oldHashPart };
std::visit(overloaded {
[&](const TextHashMethod &) {
@ -2324,13 +2493,11 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
},
Hash::dummy,
};
if (scratchPath != newInfo0.path) {
if (*scratchPath != newInfo0.path) {
// Also rewrite the output path
auto source = sinkToSource([&](Sink & nextSink) {
StringSink sink;
dumpPath(actualPath, sink);
RewritingSink rsink2(oldHashPart, std::string(newInfo0.path.hashPart()), nextSink);
rsink2(sink.s);
dumpPath(actualPath, rsink2);
rsink2.flush();
});
Path tmpPath = actualPath + ".tmp";
@ -2354,9 +2521,9 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
auto requiredFinalPath = output.path;
/* Preemptively add rewrite rule for final hash, as that is
what the NAR hash will use rather than normalized-self references */
if (scratchPath != requiredFinalPath)
if (*scratchPath != requiredFinalPath)
outputRewrites.insert_or_assign(
std::string { scratchPath.hashPart() },
std::string { scratchPath->hashPart() },
std::string { requiredFinalPath.hashPart() });
rewriteOutput();
auto narHashAndSize = hashPath(htSHA256, actualPath);
@ -2412,11 +2579,11 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
});
},
}, output.raw());
}, output->raw());
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
canonicalisePathMetaData(actualPath, -1, inodesSeen);
canonicalisePathMetaData(actualPath, {}, inodesSeen);
/* Calculate where we'll move the output files. In the checking case we
will leave leave them where they are, for now, rather than move to
@ -2428,7 +2595,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
derivations. */
PathLocks dynamicOutputLock;
dynamicOutputLock.setDeletion(true);
auto optFixedPath = output.path(worker.store, drv->name, outputName);
auto optFixedPath = output->path(worker.store, drv->name, outputName);
if (!optFixedPath ||
worker.store.printStorePath(*optFixedPath) != finalDestPath)
{
@ -2494,17 +2661,14 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* For debugging, print out the referenced and unreferenced paths. */
for (auto & i : inputPaths) {
auto j = references.find(i);
if (j == references.end())
debug("unreferenced input: '%1%'", worker.store.printStorePath(i));
else
if (references.count(i))
debug("referenced input: '%1%'", worker.store.printStorePath(i));
else
debug("unreferenced input: '%1%'", worker.store.printStorePath(i));
}
if (curRound == nrRounds) {
localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
worker.markContentsGood(newInfo.path);
}
localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
worker.markContentsGood(newInfo.path);
newInfo.deriver = drvPath;
newInfo.ultimate = true;
@ -2533,62 +2697,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Apply output checks. */
checkOutputs(infos);
/* Compare the result with the previous round, and report which
path is different, if any.*/
if (curRound > 1 && prevInfos != infos) {
assert(prevInfos.size() == infos.size());
for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
if (!(*i == *j)) {
buildResult.isNonDeterministic = true;
Path prev = worker.store.printStorePath(i->second.path) + checkSuffix;
bool prevExists = keepPreviousRound && pathExists(prev);
hintformat hint = prevExists
? hintfmt("output '%s' of '%s' differs from '%s' from previous round",
worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath), prev)
: hintfmt("output '%s' of '%s' differs from previous round",
worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath));
handleDiffHook(
buildUser ? buildUser->getUID() : getuid(),
buildUser ? buildUser->getGID() : getgid(),
prev, worker.store.printStorePath(i->second.path),
worker.store.printStorePath(drvPath), tmpDir);
if (settings.enforceDeterminism)
throw NotDeterministic(hint);
printError(hint);
curRound = nrRounds; // we know enough, bail out early
}
}
/* If this is the first round of several, then move the output out of the way. */
if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
for (auto & [_, outputStorePath] : finalOutputs) {
auto path = worker.store.printStorePath(outputStorePath);
Path prev = path + checkSuffix;
deletePath(prev);
Path dst = path + checkSuffix;
if (rename(path.c_str(), dst.c_str()))
throw SysError("renaming '%s' to '%s'", path, dst);
}
}
if (curRound < nrRounds) {
prevInfos = std::move(infos);
return {};
}
/* Remove the .check directories if we're done. FIXME: keep them
if the result was not determistic? */
if (curRound == nrRounds) {
for (auto & [_, outputStorePath] : finalOutputs) {
Path prev = worker.store.printStorePath(outputStorePath) + checkSuffix;
deletePath(prev);
}
}
/* Register each output path as valid, and register the sets of
paths referenced by each of them. If there are cycles in the
outputs, this will fail. */
@ -2615,9 +2723,11 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
DrvOutputs builtOutputs;
for (auto & [outputName, newInfo] : infos) {
auto oldinfo = get(initialOutputs, outputName);
assert(oldinfo);
auto thisRealisation = Realisation {
.id = DrvOutput {
initialOutputs.at(outputName).outputHash,
oldinfo->outputHash,
outputName
},
.outPath = newInfo.path
@ -2713,9 +2823,10 @@ void LocalDerivationGoal::checkOutputs(const std::map<std::string, ValidPathInfo
for (auto & i : *value) {
if (worker.store.isStorePath(i))
spec.insert(worker.store.parseStorePath(i));
else if (outputs.count(i))
spec.insert(outputs.at(i).path);
else throw BuildError("derivation contains an illegal reference specifier '%s'", i);
else if (auto output = get(outputs, i))
spec.insert(output->path);
else
throw BuildError("derivation contains an illegal reference specifier '%s'", i);
}
auto used = recursive
@ -2754,24 +2865,18 @@ void LocalDerivationGoal::checkOutputs(const std::map<std::string, ValidPathInfo
};
if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) {
auto outputChecks = structuredAttrs->find("outputChecks");
if (outputChecks != structuredAttrs->end()) {
auto output = outputChecks->find(outputName);
if (output != outputChecks->end()) {
if (auto outputChecks = get(*structuredAttrs, "outputChecks")) {
if (auto output = get(*outputChecks, outputName)) {
Checks checks;
auto maxSize = output->find("maxSize");
if (maxSize != output->end())
if (auto maxSize = get(*output, "maxSize"))
checks.maxSize = maxSize->get<uint64_t>();
auto maxClosureSize = output->find("maxClosureSize");
if (maxClosureSize != output->end())
if (auto maxClosureSize = get(*output, "maxClosureSize"))
checks.maxClosureSize = maxClosureSize->get<uint64_t>();
auto get = [&](const std::string & name) -> std::optional<Strings> {
auto i = output->find(name);
if (i != output->end()) {
auto get_ = [&](const std::string & name) -> std::optional<Strings> {
if (auto i = get(*output, name)) {
Strings res;
for (auto j = i->begin(); j != i->end(); ++j) {
if (!j->is_string())
@ -2784,10 +2889,10 @@ void LocalDerivationGoal::checkOutputs(const std::map<std::string, ValidPathInfo
return {};
};
checks.allowedReferences = get("allowedReferences");
checks.allowedRequisites = get("allowedRequisites");
checks.disallowedReferences = get("disallowedReferences");
checks.disallowedRequisites = get("disallowedRequisites");
checks.allowedReferences = get_("allowedReferences");
checks.allowedRequisites = get_("allowedRequisites");
checks.disallowedReferences = get_("disallowedReferences");
checks.disallowedRequisites = get_("disallowedRequisites");
applyChecks(checks);
}

View file

@ -15,6 +15,9 @@ struct LocalDerivationGoal : public DerivationGoal
/* The process ID of the builder. */
Pid pid;
/* The cgroup of the builder, if any. */
std::optional<Path> cgroup;
/* The temporary directory. */
Path tmpDir;
@ -92,8 +95,8 @@ struct LocalDerivationGoal : public DerivationGoal
result. */
std::map<Path, ValidPathInfo> prevInfos;
uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); }
gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); }
uid_t sandboxUid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 1000 : 0) : buildUser->getUID(); }
gid_t sandboxGid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 100 : 0) : buildUser->getGID(); }
const static Path homeDir;
@ -197,6 +200,10 @@ struct LocalDerivationGoal : public DerivationGoal
/* Forcibly kill the child process, if any. */
void killChild() override;
/* Kill any processes running under the build user UID or in the
cgroup of the build. */
void killSandbox(bool getStats);
/* Create alternative path calculated from but distinct from the
input, so we can avoid overwriting outputs (or other store paths)
that already exist. */

View file

@ -0,0 +1,44 @@
#include "personality.hh"
#include "globals.hh"
#if __linux__
#include <sys/utsname.h>
#include <sys/personality.h>
#endif
#include <cstring>
namespace nix {
void setPersonality(std::string_view system)
{
#if __linux__
/* Change the personality to 32-bit if we're doing an
i686-linux build on an x86_64-linux machine. */
struct utsname utsbuf;
uname(&utsbuf);
if ((system == "i686-linux"
&& (std::string_view(SYSTEM) == "x86_64-linux"
|| (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
|| system == "armv7l-linux"
|| system == "armv6l-linux")
{
if (personality(PER_LINUX32) == -1)
throw SysError("cannot set 32-bit personality");
}
/* Impersonate a Linux 2.6 machine to get some determinism in
builds that depend on the kernel version. */
if ((system == "i686-linux" || system == "x86_64-linux") && settings.impersonateLinux26) {
int cur = personality(0xffffffff);
if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
}
/* Disable address space randomization for improved
determinism. */
int cur = personality(0xffffffff);
if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
#endif
}
}

View file

@ -0,0 +1,11 @@
#pragma once
#include <string>
namespace nix {
void setPersonality(std::string_view system);
}

View file

@ -1,3 +1,5 @@
R""(
(define TMPDIR (param "_GLOBAL_TMP_DIR"))
(deny default)
@ -98,7 +100,11 @@
(allow file*
(literal "/private/var/select/sh"))
; Allow Rosetta 2 to run x86_64 binaries on aarch64-darwin.
; Allow Rosetta 2 to run x86_64 binaries on aarch64-darwin (and vice versa).
(allow file-read*
(subpath "/Library/Apple/usr/libexec/oah")
(subpath "/System/Library/Apple/usr/libexec/oah"))
(subpath "/System/Library/Apple/usr/libexec/oah")
(subpath "/System/Library/LaunchDaemons/com.apple.oahd.plist")
(subpath "/Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist"))
)""

View file

@ -1,5 +1,9 @@
R""(
(allow default)
; Disallow creating setuid/setgid binaries, since that
; would allow breaking build user isolation.
(deny file-write-setugid)
)""

View file

@ -1,3 +1,5 @@
R""(
; Allow local and remote network traffic.
(allow network* (local ip) (remote ip))
@ -14,3 +16,9 @@
; Allow DNS lookups.
(allow network-outbound (remote unix-socket (path-literal "/private/var/run/mDNSResponder")))
; Allow access to trustd.
(allow mach-lookup (global-name "com.apple.trustd"))
(allow mach-lookup (global-name "com.apple.trustd.agent"))
)""

View file

@ -157,7 +157,7 @@ void PathSubstitutionGoal::tryNext()
only after we've downloaded the path. */
if (!sub->isTrusted && worker.store.pathInfoIsUntrusted(*info))
{
warn("the substitute for '%s' from '%s' is not signed by any of the keys in 'trusted-public-keys'",
warn("ignoring substitute for '%s' from '%s', as it's not signed by any of the keys in 'trusted-public-keys'",
worker.store.printStorePath(storePath), sub->getUri());
tryNext();
return;

View file

@ -350,7 +350,7 @@ void Worker::waitForInput()
become `available'. Note that `available' (i.e., non-blocking)
includes EOF. */
std::vector<struct pollfd> pollStatus;
std::map <int, int> fdToPollStatus;
std::map<int, size_t> fdToPollStatus;
for (auto & i : children) {
for (auto & j : i.fds) {
pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN });
@ -380,7 +380,10 @@ void Worker::waitForInput()
std::set<int> fds2(j->fds);
std::vector<unsigned char> buffer(4096);
for (auto & k : fds2) {
if (pollStatus.at(fdToPollStatus.at(k)).revents) {
const auto fdPollStatusId = get(fdToPollStatus, k);
assert(fdPollStatusId);
assert(*fdPollStatusId < pollStatus.size());
if (pollStatus.at(*fdPollStatusId).revents) {
ssize_t rd = ::read(k, buffer.data(), buffer.size());
// FIXME: is there a cleaner way to handle pt close
// than EIO? Is this even standard?

View file

@ -93,8 +93,9 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
auto prevPriority = state.priorities[dstFile];
if (prevPriority == priority)
throw Error(
"packages '%1%' and '%2%' have the same priority %3%; "
"files '%1%' and '%2%' have the same priority %3%; "
"use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' "
"or type 'nix profile install --help' if using 'nix profile' to find out how "
"to change the priority of one of the conflicting packages"
" (0 being the highest priority)",
srcFile, readLink(dstFile), priority);

View file

@ -24,7 +24,7 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
Path storePath = getAttr("out");
auto mainUrl = getAttr("url");
bool unpack = get(drv.env, "unpack").value_or("") == "1";
bool unpack = getOr(drv.env, "unpack", "") == "1";
/* Note: have to use a fresh fileTransfer here because we're in
a forked process. */

View file

@ -22,8 +22,7 @@ void builtinUnpackChannel(const BasicDerivation & drv)
auto entries = readDirectory(out);
if (entries.size() != 1)
throw Error("channel tarball '%s' contains more than one file", src);
if (rename((out + "/" + entries[0].name).c_str(), (out + "/" + channelName).c_str()) == -1)
throw SysError("renaming channel directory");
renameFile((out + "/" + entries[0].name), (out + "/" + channelName));
}
}

View file

@ -13,12 +13,27 @@ create table if not exists Realisations (
create index if not exists IndexRealisations on Realisations(drvPath, outputName);
-- We can end-up in a weird edge-case where a path depends on itself because
-- its an output of a CA derivation, that happens to be the same as one of its
-- dependencies.
-- In that case we have a dependency loop (path -> realisation1 -> realisation2
-- -> path) that we need to break by removing the dependencies between the
-- realisations
create trigger if not exists DeleteSelfRefsViaRealisations before delete on ValidPaths
begin
delete from RealisationsRefs where realisationReference in (
select id from Realisations where outputPath = old.id
);
end;
create table if not exists RealisationsRefs (
referrer integer not null,
realisationReference integer,
foreign key (referrer) references Realisations(id) on delete cascade,
foreign key (realisationReference) references Realisations(id) on delete restrict
);
-- used by deletion trigger
create index if not exists IndexRealisationsRefsRealisationReference on RealisationsRefs(realisationReference);
-- used by QueryRealisationReferences
create index if not exists IndexRealisationsRefs on RealisationsRefs(referrer);

View file

@ -238,7 +238,8 @@ struct ClientSettings
}
else if (trusted
|| name == settings.buildTimeout.name
|| name == settings.buildRepeat.name
|| name == settings.maxSilentTime.name
|| name == settings.pollInterval.name
|| name == "connect-timeout"
|| (name == "builders" && value == ""))
settings.set(name, value);

View file

@ -446,7 +446,7 @@ std::string Derivation::unparse(const Store & store, bool maskOutputs,
// FIXME: remove
bool isDerivation(const std::string & fileName)
bool isDerivation(std::string_view fileName)
{
return hasSuffix(fileName, drvExtension);
}
@ -659,8 +659,10 @@ DrvHash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOut
if (res.kind == DrvHash::Kind::Deferred)
kind = DrvHash::Kind::Deferred;
for (auto & outputName : inputOutputs) {
const auto h = res.hashes.at(outputName);
inputs2[h.to_string(Base16, false)].insert(outputName);
const auto h = get(res.hashes, outputName);
if (!h)
throw Error("no hash for output '%s' of derivation '%s'", outputName, drv.name);
inputs2[h->to_string(Base16, false)].insert(outputName);
}
}
@ -834,8 +836,11 @@ static void rewriteDerivation(Store & store, BasicDerivation & drv, const String
auto hashModulo = hashDerivationModulo(store, Derivation(drv), true);
for (auto & [outputName, output] : drv.outputs) {
if (std::holds_alternative<DerivationOutput::Deferred>(output.raw())) {
auto & h = hashModulo.hashes.at(outputName);
auto outPath = store.makeOutputPath(outputName, h, drv.name);
auto h = get(hashModulo.hashes, outputName);
if (!h)
throw Error("derivation '%s' output '%s' has no hash (derivations.cc/rewriteDerivation)",
drv.name, outputName);
auto outPath = store.makeOutputPath(outputName, *h, drv.name);
drv.env[outputName] = store.printStorePath(outPath);
output = DerivationOutput::InputAddressed {
.path = std::move(outPath),

View file

@ -224,7 +224,7 @@ StorePath writeDerivation(Store & store,
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name);
// FIXME: remove
bool isDerivation(const std::string & fileName);
bool isDerivation(std::string_view fileName);
/* Calculate the name that will be used for the store path for this
output.

View file

@ -4,6 +4,8 @@
#include <nlohmann/json.hpp>
#include <optional>
namespace nix {
nlohmann::json DerivedPath::Opaque::toJSON(ref<Store> store) const {
@ -17,10 +19,11 @@ nlohmann::json DerivedPath::Built::toJSON(ref<Store> store) const {
res["drvPath"] = store->printStorePath(drvPath);
// Fallback for the input-addressed derivation case: We expect to always be
// able to print the output paths, so lets do it
auto knownOutputs = store->queryPartialDerivationOutputMap(drvPath);
for (const auto& output : outputs) {
if (knownOutputs.at(output))
res["outputs"][output] = store->printStorePath(knownOutputs.at(output).value());
const auto knownOutputs = store->queryPartialDerivationOutputMap(drvPath);
for (const auto & output : outputs) {
auto knownOutput = get(knownOutputs, output);
if (knownOutput && *knownOutput)
res["outputs"][output] = store->printStorePath(**knownOutput);
else
res["outputs"][output] = nullptr;
}
@ -51,28 +54,13 @@ StorePathSet BuiltPath::outPaths() const
);
}
template<typename T>
nlohmann::json stuffToJSON(const std::vector<T> & ts, ref<Store> store) {
auto res = nlohmann::json::array();
for (const T & t : ts) {
std::visit([&res, store](const auto & t) {
res.push_back(t.toJSON(store));
}, t.raw());
}
return res;
}
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store)
{ return stuffToJSON<BuiltPath>(buildables, store); }
nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, ref<Store> store)
{ return stuffToJSON<DerivedPath>(paths, store); }
std::string DerivedPath::Opaque::to_string(const Store & store) const {
std::string DerivedPath::Opaque::to_string(const Store & store) const
{
return store.printStorePath(path);
}
std::string DerivedPath::Built::to_string(const Store & store) const {
std::string DerivedPath::Built::to_string(const Store & store) const
{
return store.printStorePath(drvPath)
+ "!"
+ (outputs.empty() ? std::string { "*" } : concatStringsSep(",", outputs));
@ -91,15 +79,16 @@ DerivedPath::Opaque DerivedPath::Opaque::parse(const Store & store, std::string_
return {store.parseStorePath(s)};
}
DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view s)
DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view drvS, std::string_view outputsS)
{
size_t n = s.find("!");
assert(n != s.npos);
auto drvPath = store.parseStorePath(s.substr(0, n));
auto outputsS = s.substr(n + 1);
auto drvPath = store.parseStorePath(drvS);
std::set<std::string> outputs;
if (outputsS != "*")
if (outputsS != "*") {
outputs = tokenizeString<std::set<std::string>>(outputsS, ",");
if (outputs.empty())
throw Error(
"Explicit list of wanted outputs '%s' must not be empty. Consider using '*' as a wildcard meaning all outputs if no output in particular is wanted.", outputsS);
}
return {drvPath, outputs};
}
@ -108,7 +97,7 @@ DerivedPath DerivedPath::parse(const Store & store, std::string_view s)
size_t n = s.find("!");
return n == s.npos
? (DerivedPath) DerivedPath::Opaque::parse(store, s)
: (DerivedPath) DerivedPath::Built::parse(store, s);
: (DerivedPath) DerivedPath::Built::parse(store, s.substr(0, n), s.substr(n + 1));
}
RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
@ -123,10 +112,15 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
for (auto& [outputName, outputPath] : p.outputs) {
if (settings.isExperimentalFeatureEnabled(
Xp::CaDerivations)) {
auto drvOutput = get(drvHashes, outputName);
if (!drvOutput)
throw Error(
"the derivation '%s' has unrealised output '%s' (derived-path.cc/toRealisedPaths)",
store.printStorePath(p.drvPath), outputName);
auto thisRealisation = store.queryRealisation(
DrvOutput{drvHashes.at(outputName), outputName});
assert(thisRealisation); // Weve built it, so we must h
// ve the realisation
DrvOutput{*drvOutput, outputName});
assert(thisRealisation); // Weve built it, so we must
// have the realisation
res.insert(*thisRealisation);
} else {
res.insert(outputPath);

View file

@ -48,7 +48,7 @@ struct DerivedPathBuilt {
std::set<std::string> outputs;
std::string to_string(const Store & store) const;
static DerivedPathBuilt parse(const Store & store, std::string_view);
static DerivedPathBuilt parse(const Store & store, std::string_view, std::string_view);
nlohmann::json toJSON(ref<Store> store) const;
bool operator < (const DerivedPathBuilt & b) const
@ -126,7 +126,4 @@ struct BuiltPath : _BuiltPathRaw {
typedef std::vector<DerivedPath> DerivedPaths;
typedef std::vector<BuiltPath> BuiltPaths;
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store);
nlohmann::json derivedPathsToJSON(const DerivedPaths & , ref<Store> store);
}

View file

@ -33,14 +33,6 @@ FileTransferSettings fileTransferSettings;
static GlobalConfig::Register rFileTransferSettings(&fileTransferSettings);
std::string resolveUri(std::string_view uri)
{
if (uri.compare(0, 8, "channel:") == 0)
return "https://nixos.org/channels/" + std::string(uri.substr(8)) + "/nixexprs.tar.xz";
else
return std::string(uri);
}
struct curlFileTransfer : public FileTransfer
{
CURLM * curlm = 0;
@ -142,9 +134,9 @@ struct curlFileTransfer : public FileTransfer
}
template<class T>
void fail(const T & e)
void fail(T && e)
{
failEx(std::make_exception_ptr(e));
failEx(std::make_exception_ptr(std::move(e)));
}
LambdaSink finalSink;
@ -308,6 +300,9 @@ struct curlFileTransfer : public FileTransfer
curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders);
if (settings.downloadSpeed.get() > 0)
curl_easy_setopt(req, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t) (settings.downloadSpeed.get() * 1024));
if (request.head)
curl_easy_setopt(req, CURLOPT_NOBODY, 1);
@ -319,7 +314,6 @@ struct curlFileTransfer : public FileTransfer
}
if (request.verifyTLS) {
debug("verify TLS: Nix CA file = '%s'", settings.caFile);
if (settings.caFile != "")
curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
} else {
@ -443,14 +437,13 @@ struct curlFileTransfer : public FileTransfer
: httpStatus != 0
? FileTransferError(err,
std::move(response),
fmt("unable to %s '%s': HTTP error %d ('%s')",
request.verb(), request.uri, httpStatus, statusMsg)
+ (code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
)
"unable to %s '%s': HTTP error %d%s",
request.verb(), request.uri, httpStatus,
code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
: FileTransferError(err,
std::move(response),
fmt("unable to %s '%s': %s (%d)",
request.verb(), request.uri, curl_easy_strerror(code), code));
"unable to %s '%s': %s (%d)",
request.verb(), request.uri, curl_easy_strerror(code), code);
/* If this is a transient error, then maybe retry the
download after a while. If we're writing to a
@ -471,7 +464,7 @@ struct curlFileTransfer : public FileTransfer
fileTransfer.enqueueItem(shared_from_this());
}
else
fail(exc);
fail(std::move(exc));
}
}
};
@ -693,10 +686,10 @@ struct curlFileTransfer : public FileTransfer
#if ENABLE_S3
auto [bucketName, key, params] = parseS3Uri(request.uri);
std::string profile = get(params, "profile").value_or("");
std::string region = get(params, "region").value_or(Aws::Region::US_EAST_1);
std::string scheme = get(params, "scheme").value_or("");
std::string endpoint = get(params, "endpoint").value_or("");
std::string profile = getOr(params, "profile", "");
std::string region = getOr(params, "region", Aws::Region::US_EAST_1);
std::string scheme = getOr(params, "scheme", "");
std::string endpoint = getOr(params, "endpoint", "");
S3Helper s3Helper(profile, region, scheme, endpoint);
@ -704,7 +697,7 @@ struct curlFileTransfer : public FileTransfer
auto s3Res = s3Helper.getObject(bucketName, key);
FileTransferResult res;
if (!s3Res.data)
throw FileTransferError(NotFound, {}, "S3 object '%s' does not exist", request.uri);
throw FileTransferError(NotFound, "S3 object '%s' does not exist", request.uri);
res.data = std::move(*s3Res.data);
callback(std::move(res));
#else
@ -872,14 +865,4 @@ FileTransferError::FileTransferError(FileTransfer::Error error, std::optional<st
err.msg = hf;
}
bool isUri(std::string_view s)
{
if (s.compare(0, 8, "channel:") == 0) return true;
size_t pos = s.find("://");
if (pos == std::string::npos) return false;
std::string scheme(s, 0, pos);
return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh";
}
}

View file

@ -125,9 +125,4 @@ public:
FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args);
};
bool isUri(std::string_view s);
/* Resolve deprecated 'channel:<foo>' URLs. */
std::string resolveUri(std::string_view uri);
}

View file

@ -39,9 +39,7 @@ static void makeSymlink(const Path & link, const Path & target)
createSymlink(target, tempLink);
/* Atomically replace the old one. */
if (rename(tempLink.c_str(), link.c_str()) == -1)
throw SysError("cannot rename '%1%' to '%2%'",
tempLink , link);
renameFile(tempLink, link);
}
@ -79,90 +77,106 @@ Path LocalFSStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot
}
void LocalStore::addTempRoot(const StorePath & path)
void LocalStore::createTempRootsFile()
{
auto state(_state.lock());
auto fdTempRoots(_fdTempRoots.lock());
/* Create the temporary roots file for this process. */
if (!state->fdTempRoots) {
if (*fdTempRoots) return;
while (1) {
if (pathExists(fnTempRoots))
/* It *must* be stale, since there can be no two
processes with the same pid. */
unlink(fnTempRoots.c_str());
while (1) {
if (pathExists(fnTempRoots))
/* It *must* be stale, since there can be no two
processes with the same pid. */
unlink(fnTempRoots.c_str());
state->fdTempRoots = openLockFile(fnTempRoots, true);
*fdTempRoots = openLockFile(fnTempRoots, true);
debug("acquiring write lock on '%s'", fnTempRoots);
lockFile(state->fdTempRoots.get(), ltWrite, true);
debug("acquiring write lock on '%s'", fnTempRoots);
lockFile(fdTempRoots->get(), ltWrite, true);
/* Check whether the garbage collector didn't get in our
way. */
struct stat st;
if (fstat(state->fdTempRoots.get(), &st) == -1)
throw SysError("statting '%1%'", fnTempRoots);
if (st.st_size == 0) break;
/* Check whether the garbage collector didn't get in our
way. */
struct stat st;
if (fstat(fdTempRoots->get(), &st) == -1)
throw SysError("statting '%1%'", fnTempRoots);
if (st.st_size == 0) break;
/* The garbage collector deleted this file before we could
get a lock. (It won't delete the file after we get a
lock.) Try again. */
}
/* The garbage collector deleted this file before we could get
a lock. (It won't delete the file after we get a lock.)
Try again. */
}
}
void LocalStore::addTempRoot(const StorePath & path)
{
createTempRootsFile();
/* Open/create the global GC lock file. */
{
auto fdGCLock(_fdGCLock.lock());
if (!*fdGCLock)
*fdGCLock = openGCLock();
}
if (!state->fdGCLock)
state->fdGCLock = openGCLock();
restart:
FdLock gcLock(state->fdGCLock.get(), ltRead, false, "");
/* Try to acquire a shared global GC lock (non-blocking). This
only succeeds if the garbage collector is not currently
running. */
FdLock gcLock(_fdGCLock.lock()->get(), ltRead, false, "");
if (!gcLock.acquired) {
/* We couldn't get a shared global GC lock, so the garbage
collector is running. So we have to connect to the garbage
collector and inform it about our root. */
if (!state->fdRootsSocket) {
auto fdRootsSocket(_fdRootsSocket.lock());
if (!*fdRootsSocket) {
auto socketPath = stateDir.get() + gcSocketPath;
debug("connecting to '%s'", socketPath);
state->fdRootsSocket = createUnixDomainSocket();
*fdRootsSocket = createUnixDomainSocket();
try {
nix::connect(state->fdRootsSocket.get(), socketPath);
nix::connect(fdRootsSocket->get(), socketPath);
} catch (SysError & e) {
/* The garbage collector may have exited, so we need to
restart. */
if (e.errNo == ECONNREFUSED) {
debug("GC socket connection refused");
state->fdRootsSocket.close();
fdRootsSocket->close();
goto restart;
}
throw;
}
}
try {
debug("sending GC root '%s'", printStorePath(path));
writeFull(state->fdRootsSocket.get(), printStorePath(path) + "\n", false);
writeFull(fdRootsSocket->get(), printStorePath(path) + "\n", false);
char c;
readFull(state->fdRootsSocket.get(), &c, 1);
readFull(fdRootsSocket->get(), &c, 1);
assert(c == '1');
debug("got ack for GC root '%s'", printStorePath(path));
} catch (SysError & e) {
/* The garbage collector may have exited, so we need to
restart. */
if (e.errNo == EPIPE) {
if (e.errNo == EPIPE || e.errNo == ECONNRESET) {
debug("GC socket disconnected");
state->fdRootsSocket.close();
fdRootsSocket->close();
goto restart;
}
throw;
} catch (EndOfFile & e) {
debug("GC socket disconnected");
state->fdRootsSocket.close();
fdRootsSocket->close();
goto restart;
}
}
/* Append the store path to the temporary roots file. */
/* Record the store path in the temporary roots file so it will be
seen by a future run of the garbage collector. */
auto s = printStorePath(path) + '\0';
writeFull(state->fdTempRoots.get(), s);
writeFull(_fdTempRoots.lock()->get(), s);
}
@ -506,6 +520,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
Finally cleanup([&]() {
debug("GC roots server shutting down");
fdServer.close();
while (true) {
auto item = remove_begin(*connections.lock());
if (!item) break;
@ -619,6 +634,17 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
Path path = storeDir + "/" + std::string(baseName);
Path realPath = realStoreDir + "/" + std::string(baseName);
/* There may be temp directories in the store that are still in use
by another process. We need to be sure that we can acquire an
exclusive lock before deleting them. */
if (baseName.find("tmp-", 0) == 0) {
AutoCloseFD tmpDirFd = open(realPath.c_str(), O_RDONLY | O_DIRECTORY);
if (tmpDirFd.get() == -1 || !lockFile(tmpDirFd.get(), ltWrite, false)) {
debug("skipping locked tempdir '%s'", realPath);
return;
}
}
printInfo("deleting '%1%'", path);
results.paths.insert(path);

View file

@ -36,7 +36,6 @@ Settings::Settings()
, nixStateDir(canonPath(getEnv("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
, nixConfDir(canonPath(getEnv("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
, nixUserConfFiles(getUserConfigFiles())
, nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR").value_or(NIX_LIBEXEC_DIR)))
, nixBinDir(canonPath(getEnv("NIX_BIN_DIR").value_or(NIX_BIN_DIR)))
, nixManDir(canonPath(NIX_MAN_DIR))
, nixDaemonSocketFile(canonPath(getEnv("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
@ -67,12 +66,13 @@ Settings::Settings()
sandboxPaths = tokenizeString<StringSet>("/bin/sh=" SANDBOX_SHELL);
#endif
/* chroot-like behavior from Apple's sandbox */
/* chroot-like behavior from Apple's sandbox */
#if __APPLE__
sandboxPaths = tokenizeString<StringSet>("/System/Library/Frameworks /System/Library/PrivateFrameworks /bin/sh /bin/bash /private/tmp /private/var/tmp /usr/lib");
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
#endif
buildHook = getSelfExe().value_or("nix") + " __build-remote";
}
void loadConfFile()
@ -114,7 +114,13 @@ std::vector<Path> getUserConfigFiles()
unsigned int Settings::getDefaultCores()
{
return std::max(1U, std::thread::hardware_concurrency());
const unsigned int concurrency = std::max(1U, std::thread::hardware_concurrency());
const unsigned int maxCPU = getMaxCPU();
if (maxCPU > 0)
return maxCPU;
else
return concurrency;
}
StringSet Settings::getDefaultSystemFeatures()
@ -124,6 +130,10 @@ StringSet Settings::getDefaultSystemFeatures()
actually require anything special on the machines. */
StringSet features{"nixos-test", "benchmark", "big-parallel"};
#if __linux__
features.insert("uid-range");
#endif
#if __linux__
if (access("/dev/kvm", R_OK | W_OK) == 0)
features.insert("kvm");
@ -148,13 +158,9 @@ StringSet Settings::getDefaultExtraPlatforms()
// machines. Note that we cant force processes from executing
// x86_64 in aarch64 environments or vice versa since they can
// always exec with their own binary preferences.
if (pathExists("/Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist") ||
pathExists("/System/Library/LaunchDaemons/com.apple.oahd.plist")) {
if (std::string{SYSTEM} == "x86_64-darwin")
extraPlatforms.insert("aarch64-darwin");
else if (std::string{SYSTEM} == "aarch64-darwin")
extraPlatforms.insert("x86_64-darwin");
}
if (std::string{SYSTEM} == "aarch64-darwin" &&
runProgram(RunOptions {.program = "arch", .args = {"-arch", "x86_64", "/usr/bin/true"}, .mergeStderrToStdout = true}).first == 0)
extraPlatforms.insert("x86_64-darwin");
#endif
return extraPlatforms;
@ -285,4 +291,18 @@ void initPlugins()
settings.pluginFiles.pluginsLoaded = true;
}
static bool initLibStoreDone = false;
void assertLibStoreInitialized() {
if (!initLibStoreDone) {
printError("The program must call nix::initNix() before calling any libstore library functions.");
abort();
};
}
void initLibStore() {
initLibStoreDone = true;
}
}

View file

@ -46,6 +46,14 @@ struct PluginFilesSetting : public BaseSetting<Paths>
void set(const std::string & str, bool append = false) override;
};
const uint32_t maxIdsPerBuild =
#if __linux__
1 << 16
#else
1
#endif
;
class Settings : public Config {
unsigned int getDefaultCores();
@ -79,9 +87,6 @@ public:
/* A list of user configuration files to load. */
std::vector<Path> nixUserConfFiles;
/* The directory where internal helper programs are stored. */
Path nixLibexecDir;
/* The directory where the main programs are stored. */
Path nixBinDir;
@ -195,7 +200,7 @@ public:
)",
{"build-timeout"}};
PathSetting buildHook{this, true, nixLibexecDir + "/nix/build-remote", "build-hook",
PathSetting buildHook{this, true, "", "build-hook",
"The path of the helper program that executes builds to remote machines."};
Setting<std::string> builders{
@ -276,8 +281,69 @@ public:
`NIX_REMOTE` is empty, the uid under which the Nix daemon runs if
`NIX_REMOTE` is `daemon`). Obviously, this should not be used in
multi-user settings with untrusted users.
Defaults to `nixbld` when running as root, *empty* otherwise.
)",
{}, false};
Setting<bool> autoAllocateUids{this, false, "auto-allocate-uids",
R"(
Whether to select UIDs for builds automatically, instead of using the
users in `build-users-group`.
UIDs are allocated starting at 872415232 (0x34000000) on Linux and 56930 on macOS.
> **Warning**
> This is an experimental feature.
To enable it, add the following to [`nix.conf`](#):
```
extra-experimental-features = auto-allocate-uids
auto-allocate-uids = true
```
)"};
Setting<uint32_t> startId{this,
#if __linux__
0x34000000,
#else
56930,
#endif
"start-id",
"The first UID and GID to use for dynamic ID allocation."};
Setting<uint32_t> uidCount{this,
#if __linux__
maxIdsPerBuild * 128,
#else
128,
#endif
"id-count",
"The number of UIDs/GIDs to use for dynamic ID allocation."};
#if __linux__
Setting<bool> useCgroups{
this, false, "use-cgroups",
R"(
Whether to execute builds inside cgroups.
This is only supported on Linux.
Cgroups are required and enabled automatically for derivations
that require the `uid-range` system feature.
> **Warning**
> This is an experimental feature.
To enable it, add the following to [`nix.conf`](#):
```
extra-experimental-features = cgroups
use-cgroups = true
```
)"};
#endif
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
"Whether to impersonate a Linux 2.6 machine on newer kernels.",
{"build-impersonate-linux-26"}};
@ -310,11 +376,6 @@ public:
)",
{"build-max-log-size"}};
/* When buildRepeat > 0 and verboseBuild == true, whether to print
repeated builds (i.e. builds other than the first one) to
stderr. Hack to prevent Hydra logs from being polluted. */
bool printRepeatedBuilds = true;
Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
"How often (in seconds) to poll for locks."};
@ -430,6 +491,9 @@ public:
for example, `/dev/nvidiactl?` specifies that `/dev/nvidiactl` will
only be mounted in the sandbox if it exists in the host filesystem.
If the source is in the Nix store, then its closure will be added to
the sandbox as well.
Depending on how Nix was built, the default value for this option
may be empty or provide `/bin/sh` as a bind-mount of `bash`.
)",
@ -438,19 +502,6 @@ public:
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
"Whether to disable sandboxing when the kernel doesn't allow it."};
Setting<size_t> buildRepeat{
this, 0, "repeat",
R"(
How many times to repeat builds to check whether they are
deterministic. The default value is 0. If the value is non-zero,
every build is repeated the specified number of times. If the
contents of any of the runs differs from the previous ones and
`enforce-determinism` is true, the build is rejected and the
resulting store paths are not registered as valid in Nixs
database.
)",
{"build-repeat"}};
#if __linux__
Setting<std::string> sandboxShmSize{
this, "50%", "sandbox-dev-shm-size",
@ -514,10 +565,6 @@ public:
configuration file, and cannot be passed at the command line.
)"};
Setting<bool> enforceDeterminism{
this, true, "enforce-determinism",
"Whether to fail if repeated builds produce different output. See `repeat`."};
Setting<Strings> trustedPublicKeys{
this,
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
@ -563,9 +610,15 @@ public:
R"(
If set to `true` (the default), any non-content-addressed path added
or copied to the Nix store (e.g. when substituting from a binary
cache) must have a valid signature, that is, be signed using one of
the keys listed in `trusted-public-keys` or `secret-key-files`. Set
to `false` to disable signature checking.
cache) must have a signature by a trusted key. A trusted key is one
listed in `trusted-public-keys`, or a public key counterpart to a
private key stored in a file listed in `secret-key-files`.
Set to `false` to disable signature checking and trust all
non-content-addressed paths unconditionally.
(Content-addressed paths are inherently trustworthy and thus
unaffected by this configuration option.)
)"};
Setting<StringSet> extraPlatforms{
@ -616,6 +669,14 @@ public:
are tried based on their Priority value, which each substituter can set
independently. Lower value means higher priority.
The default is `https://cache.nixos.org`, with a Priority of 40.
Nix will copy a store path from a remote store only if one
of the following is true:
- the store object is signed by one of the [`trusted-public-keys`](#conf-trusted-public-keys)
- the substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list
- the [`require-sigs`](#conf-require-sigs) option has been set to `false`
- the store object is [output-addressed](glossary.md#gloss-output-addressed-store-object)
)",
{"binary-caches"}};
@ -749,6 +810,13 @@ public:
/nix/store/xfghy8ixrhz3kyy6p724iv3cxji088dx-bash-4.4-p23`.
)"};
Setting<unsigned int> downloadSpeed {
this, 0, "download-speed",
R"(
Specify the maximum transfer rate in kilobytes per second you want
Nix to use for downloads.
)"};
Setting<std::string> netrcFile{
this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
R"(
@ -802,7 +870,7 @@ public:
)"};
Setting<StringSet> ignoredAcls{
this, {"security.selinux", "system.nfs4_acl"}, "ignored-acls",
this, {"security.selinux", "system.nfs4_acl", "security.csm"}, "ignored-acls",
R"(
A list of ACLs that should be ignored, normally Nix attempts to
remove all ACLs from files and directories in the Nix store, but
@ -919,4 +987,12 @@ std::vector<Path> getUserConfigFiles();
extern const std::string nixVersion;
/* NB: This is not sufficient. You need to call initNix() */
void initLibStore();
/* It's important to initialize before doing _anything_, which is why we
call upon the programmer to handle this correctly. However, we only add
this in a key locations, so as not to litter the code. */
void assertLibStoreInitialized();
}

View file

@ -161,7 +161,12 @@ protected:
void getFile(const std::string & path,
Callback<std::optional<std::string>> callback) noexcept override
{
checkEnabled();
try {
checkEnabled();
} catch (...) {
callback.rethrow();
return;
}
auto request(makeRequest(path));

View file

@ -254,8 +254,8 @@ private:
<< settings.maxLogSize;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3)
conn.to
<< settings.buildRepeat
<< settings.enforceDeterminism;
<< 0 // buildRepeat hasn't worked for ages anyway
<< 0;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 7) {
conn.to << ((int) settings.keepFailed);

View file

@ -57,8 +57,7 @@ protected:
AutoDelete del(tmp, false);
StreamToSourceAdapter source(istream);
writeFile(tmp, source);
if (rename(tmp.c_str(), path2.c_str()))
throw SysError("renaming '%1%' to '%2%'", tmp, path2);
renameFile(tmp, path2);
del.cancel();
}
@ -69,6 +68,7 @@ protected:
} catch (SysError & e) {
if (e.errNo == ENOENT)
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache", path);
throw;
}
}
@ -107,7 +107,7 @@ bool LocalBinaryCacheStore::fileExists(const std::string & path)
std::set<std::string> LocalBinaryCacheStore::uriSchemes()
{
if (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") == "1")
if (getEnv("_NIX_FORCE_HTTP") == "1")
return {};
else
return {"file"};

View file

@ -81,7 +81,7 @@ int getSchema(Path schemaPath)
void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
{
const int nixCASchemaVersion = 3;
const int nixCASchemaVersion = 4;
int curCASchema = getSchema(schemaPath);
if (curCASchema != nixCASchemaVersion) {
if (curCASchema > nixCASchemaVersion) {
@ -91,6 +91,7 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
if (!lockFile(lockFd.get(), ltWrite, false)) {
printInfo("waiting for exclusive access to the Nix store for ca drvs...");
lockFile(lockFd.get(), ltNone, false); // We have acquired a shared lock; release it to prevent deadlocks
lockFile(lockFd.get(), ltWrite, true);
}
@ -143,7 +144,22 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
)");
txn.commit();
}
writeFile(schemaPath, fmt("%d", nixCASchemaVersion));
if (curCASchema < 4) {
SQLiteTxn txn(db);
db.exec(R"(
create trigger if not exists DeleteSelfRefsViaRealisations before delete on ValidPaths
begin
delete from RealisationsRefs where realisationReference in (
select id from Realisations where outputPath = old.id
);
end;
-- used by deletion trigger
create index if not exists IndexRealisationsRefsRealisationReference on RealisationsRefs(realisationReference);
)");
txn.commit();
}
writeFile(schemaPath, fmt("%d", nixCASchemaVersion), 0666, true);
lockFile(lockFd.get(), ltRead, true);
}
}
@ -266,7 +282,7 @@ LocalStore::LocalStore(const Params & params)
else if (curSchema == 0) { /* new store */
curSchema = nixSchemaVersion;
openDB(*state, true);
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str(), 0666, true);
}
else if (curSchema < nixSchemaVersion) {
@ -284,6 +300,7 @@ LocalStore::LocalStore(const Params & params)
if (!lockFile(globalLock.get(), ltWrite, false)) {
printInfo("waiting for exclusive access to the Nix store...");
lockFile(globalLock.get(), ltNone, false); // We have acquired a shared lock; release it to prevent deadlocks
lockFile(globalLock.get(), ltWrite, true);
}
@ -314,7 +331,7 @@ LocalStore::LocalStore(const Params & params)
txn.commit();
}
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str(), 0666, true);
lockFile(globalLock.get(), ltRead, true);
}
@ -424,9 +441,9 @@ LocalStore::~LocalStore()
}
try {
auto state(_state.lock());
if (state->fdTempRoots) {
state->fdTempRoots = -1;
auto fdTempRoots(_fdTempRoots.lock());
if (*fdTempRoots) {
*fdTempRoots = -1;
unlink(fnTempRoots.c_str());
}
} catch (...) {
@ -482,18 +499,18 @@ void LocalStore::openDB(State & state, bool create)
SQLiteStmt stmt;
stmt.create(db, "pragma main.journal_mode;");
if (sqlite3_step(stmt) != SQLITE_ROW)
throwSQLiteError(db, "querying journal mode");
SQLiteError::throw_(db, "querying journal mode");
prevMode = std::string((const char *) sqlite3_column_text(stmt, 0));
}
if (prevMode != mode &&
sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "setting journal mode");
SQLiteError::throw_(db, "setting journal mode");
/* Increase the auto-checkpoint interval to 40000 pages. This
seems enough to ensure that instantiating the NixOS system
derivation is done in a single fsync(). */
if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 40000;", 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "setting autocheckpoint interval");
SQLiteError::throw_(db, "setting autocheckpoint interval");
/* Initialise the database schema, if necessary. */
if (create) {
@ -568,7 +585,10 @@ void canonicaliseTimestampAndPermissions(const Path & path)
}
static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
static void canonicalisePathMetaData_(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen)
{
checkInterrupt();
@ -615,7 +635,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
However, ignore files that we chown'ed ourselves previously to
ensure that we don't fail on hard links within the same build
(i.e. "touch $out/foo; ln $out/foo $out/bar"). */
if (fromUid != (uid_t) -1 && st.st_uid != fromUid) {
if (uidRange && (st.st_uid < uidRange->first || st.st_uid > uidRange->second)) {
if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino)))
throw BuildError("invalid ownership on file '%1%'", path);
mode_t mode = st.st_mode & ~S_IFMT;
@ -648,14 +668,17 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
if (S_ISDIR(st.st_mode)) {
DirEntries entries = readDirectory(path);
for (auto & i : entries)
canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen);
canonicalisePathMetaData_(path + "/" + i.name, uidRange, inodesSeen);
}
}
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen)
{
canonicalisePathMetaData_(path, fromUid, inodesSeen);
canonicalisePathMetaData_(path, uidRange, inodesSeen);
/* On platforms that don't have lchown(), the top-level path can't
be a symlink, since we can't change its ownership. */
@ -668,10 +691,11 @@ void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & ino
}
void canonicalisePathMetaData(const Path & path, uid_t fromUid)
void canonicalisePathMetaData(const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange)
{
InodesSeen inodesSeen;
canonicalisePathMetaData(path, fromUid, inodesSeen);
canonicalisePathMetaData(path, uidRange, inodesSeen);
}
@ -703,7 +727,11 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
// somewhat expensive so we do lazily
hashesModulo = hashDerivationModulo(*this, drv, true);
}
StorePath recomputed = makeOutputPath(i.first, hashesModulo->hashes.at(i.first), drvName);
auto currentOutputHash = get(hashesModulo->hashes, i.first);
if (!currentOutputHash)
throw Error("derivation '%s' has unexpected output '%s' (local-store / hashesModulo) named '%s'",
printStorePath(drvPath), printStorePath(doia.path), i.first);
StorePath recomputed = makeOutputPath(i.first, *currentOutputHash, drvName);
if (doia.path != recomputed)
throw Error("derivation '%s' has incorrect output '%s', should be '%s'",
printStorePath(drvPath), printStorePath(doia.path), printStorePath(recomputed));
@ -732,7 +760,7 @@ void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag check
if (checkSigs == NoCheckSigs || !realisationIsUntrusted(info))
registerDrvOutput(info);
else
throw Error("cannot register realisation '%s' because it lacks a valid signature", info.outPath.to_string());
throw Error("cannot register realisation '%s' because it lacks a signature by a trusted key", info.outPath.to_string());
}
void LocalStore::registerDrvOutput(const Realisation & info)
@ -1253,7 +1281,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs)
{
if (checkSigs && pathInfoIsUntrusted(info))
throw Error("cannot add path '%s' because it lacks a valid signature", printStorePath(info.path));
throw Error("cannot add path '%s' because it lacks a signature by a trusted key", printStorePath(info.path));
addTempRoot(info.path);
@ -1318,7 +1346,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
autoGC();
canonicalisePathMetaData(realPath, -1);
canonicalisePathMetaData(realPath, {});
optimisePath(realPath, repair); // FIXME: combine with hashPath()
@ -1369,13 +1397,15 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
std::unique_ptr<AutoDelete> delTempDir;
Path tempPath;
Path tempDir;
AutoCloseFD tempDirFd;
if (!inMemory) {
/* Drain what we pulled so far, and then keep on pulling */
StringSource dumpSource { dump };
ChainSource bothSource { dumpSource, source };
auto tempDir = createTempDir(realStoreDir, "add");
std::tie(tempDir, tempDirFd) = createTempDirInStore();
delTempDir = std::make_unique<AutoDelete>(tempDir);
tempPath = tempDir + "/x";
@ -1431,8 +1461,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
writeFile(realPath, dumpSource);
} else {
/* Move the temporary path we restored above. */
if (rename(tempPath.c_str(), realPath.c_str()))
throw Error("renaming '%s' to '%s'", tempPath, realPath);
moveFile(tempPath, realPath);
}
/* For computing the nar hash. In recursive SHA-256 mode, this
@ -1444,7 +1473,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
narHash = narSink.finish();
}
canonicalisePathMetaData(realPath, -1); // FIXME: merge into restorePath
canonicalisePathMetaData(realPath, {}); // FIXME: merge into restorePath
optimisePath(realPath, repair);
@ -1487,7 +1516,7 @@ StorePath LocalStore::addTextToStore(
writeFile(realPath, s);
canonicalisePathMetaData(realPath, -1);
canonicalisePathMetaData(realPath, {});
StringSink sink;
dumpString(s, sink);
@ -1510,18 +1539,24 @@ StorePath LocalStore::addTextToStore(
/* Create a temporary directory in the store that won't be
garbage-collected. */
Path LocalStore::createTempDirInStore()
garbage-collected until the returned FD is closed. */
std::pair<Path, AutoCloseFD> LocalStore::createTempDirInStore()
{
Path tmpDir;
Path tmpDirFn;
AutoCloseFD tmpDirFd;
bool lockedByUs = false;
do {
/* There is a slight possibility that `tmpDir' gets deleted by
the GC between createTempDir() and addTempRoot(), so repeat
until `tmpDir' exists. */
tmpDir = createTempDir(realStoreDir);
addTempRoot(parseStorePath(tmpDir));
} while (!pathExists(tmpDir));
return tmpDir;
the GC between createTempDir() and when we acquire a lock on it.
We'll repeat until 'tmpDir' exists and we've locked it. */
tmpDirFn = createTempDir(realStoreDir, "tmp");
tmpDirFd = open(tmpDirFn.c_str(), O_RDONLY | O_DIRECTORY);
if (tmpDirFd.get() < 0) {
continue;
}
lockedByUs = lockFile(tmpDirFd.get(), ltWrite, true);
} while (!pathExists(tmpDirFn) || !lockedByUs);
return {tmpDirFn, std::move(tmpDirFd)};
}
@ -1944,8 +1979,7 @@ void LocalStore::addBuildLog(const StorePath & drvPath, std::string_view log)
writeFile(tmpFile, compress("bzip2", log));
if (rename(tmpFile.c_str(), logPath.c_str()) != 0)
throw SysError("renaming '%1%' to '%2%'", tmpFile, logPath);
renameFile(tmpFile, logPath);
}
std::optional<std::string> LocalStore::getVersion()

View file

@ -59,15 +59,6 @@ private:
struct Stmts;
std::unique_ptr<Stmts> stmts;
/* The global GC lock */
AutoCloseFD fdGCLock;
/* The file to which we write our temporary roots. */
AutoCloseFD fdTempRoots;
/* Connection to the garbage collector. */
AutoCloseFD fdRootsSocket;
/* The last time we checked whether to do an auto-GC, or an
auto-GC finished. */
std::chrono::time_point<std::chrono::steady_clock> lastGCCheck;
@ -156,6 +147,21 @@ public:
void addTempRoot(const StorePath & path) override;
private:
void createTempRootsFile();
/* The file to which we write our temporary roots. */
Sync<AutoCloseFD> _fdTempRoots;
/* The global GC lock. */
Sync<AutoCloseFD> _fdGCLock;
/* Connection to the garbage collector. */
Sync<AutoCloseFD> _fdRootsSocket;
public:
void addIndirectRoot(const Path & path) override;
private:
@ -256,7 +262,7 @@ private:
void findRuntimeRoots(Roots & roots, bool censor);
Path createTempDirInStore();
std::pair<Path, AutoCloseFD> createTempDirInStore();
void checkDerivationOutputs(const StorePath & drvPath, const Derivation & drv);
@ -310,9 +316,18 @@ typedef std::set<Inode> InodesSeen;
- the permissions are set of 444 or 555 (i.e., read-only with or
without execute permission; setuid bits etc. are cleared)
- the owner and group are set to the Nix user and group, if we're
running as root. */
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen);
void canonicalisePathMetaData(const Path & path, uid_t fromUid);
running as root.
If uidRange is not empty, this function will throw an error if it
encounters files owned by a user outside of the closed interval
[uidRange->first, uidRange->second].
*/
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen);
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange);
void canonicaliseTimestampAndPermissions(const Path & path);

View file

@ -13,14 +13,10 @@ ifdef HOST_LINUX
libstore_LDFLAGS += -ldl
endif
ifdef HOST_DARWIN
libstore_FILES = sandbox-defaults.sb sandbox-minimal.sb sandbox-network.sb
endif
$(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox)))
ifeq ($(ENABLE_S3), 1)
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core -laws-crt-cpp
endif
ifdef HOST_SOLARIS
@ -39,14 +35,23 @@ libstore_CXXFLAGS += \
-DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
-DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
-DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
-DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
-DNIX_BIN_DIR=\"$(bindir)\" \
-DNIX_MAN_DIR=\"$(mandir)\" \
-DLSOF=\"$(lsof)\"
ifeq ($(embedded_sandbox_shell),yes)
libstore_CXXFLAGS += -DSANDBOX_SHELL=\"__embedded_sandbox_shell__\"
$(d)/build/local-derivation-goal.cc: $(d)/embedded-sandbox-shell.gen.hh
$(d)/embedded-sandbox-shell.gen.hh: $(sandbox_shell)
$(trace-gen) hexdump -v -e '1/1 "0x%x," "\n"' < $< > $@.tmp
@mv $@.tmp $@
else
ifneq ($(sandbox_shell),)
libstore_CXXFLAGS += -DSANDBOX_SHELL="\"$(sandbox_shell)\""
endif
endif
$(d)/local-store.cc: $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh

View file

@ -2,92 +2,201 @@
#include "globals.hh"
#include "pathlocks.hh"
#include <grp.h>
#include <pwd.h>
#include <fcntl.h>
#include <unistd.h>
#include <grp.h>
namespace nix {
UserLock::UserLock()
struct SimpleUserLock : UserLock
{
assert(settings.buildUsersGroup != "");
createDirs(settings.nixStateDir + "/userpool");
}
AutoCloseFD fdUserLock;
uid_t uid;
gid_t gid;
std::vector<gid_t> supplementaryGIDs;
bool UserLock::findFreeUser() {
if (enabled()) return true;
uid_t getUID() override { assert(uid); return uid; }
uid_t getUIDCount() override { return 1; }
gid_t getGID() override { assert(gid); return gid; }
/* Get the members of the build-users-group. */
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
if (!gr)
throw Error("the group '%1%' specified in 'build-users-group' does not exist",
settings.buildUsersGroup);
gid = gr->gr_gid;
std::vector<gid_t> getSupplementaryGIDs() override { return supplementaryGIDs; }
/* Copy the result of getgrnam. */
Strings users;
for (char * * p = gr->gr_mem; *p; ++p) {
debug("found build user '%1%'", *p);
users.push_back(*p);
}
static std::unique_ptr<UserLock> acquire()
{
assert(settings.buildUsersGroup != "");
createDirs(settings.nixStateDir + "/userpool");
if (users.empty())
throw Error("the build users group '%1%' has no members",
settings.buildUsersGroup);
/* Get the members of the build-users-group. */
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
if (!gr)
throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
/* Find a user account that isn't currently in use for another
build. */
for (auto & i : users) {
debug("trying user '%1%'", i);
struct passwd * pw = getpwnam(i.c_str());
if (!pw)
throw Error("the user '%1%' in the group '%2%' does not exist",
i, settings.buildUsersGroup);
fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (!fd)
throw SysError("opening user lock '%1%'", fnUserLock);
if (lockFile(fd.get(), ltWrite, false)) {
fdUserLock = std::move(fd);
user = i;
uid = pw->pw_uid;
/* Sanity check... */
if (uid == getuid() || uid == geteuid())
throw Error("the Nix user should not be a member of '%1%'",
settings.buildUsersGroup);
#if __linux__
/* Get the list of supplementary groups of this build user. This
is usually either empty or contains a group such as "kvm". */
supplementaryGIDs.resize(10);
int ngroups = supplementaryGIDs.size();
int err = getgrouplist(pw->pw_name, pw->pw_gid,
supplementaryGIDs.data(), &ngroups);
if (err == -1)
throw Error("failed to get list of supplementary groups for '%1%'", pw->pw_name);
supplementaryGIDs.resize(ngroups);
#endif
isEnabled = true;
return true;
/* Copy the result of getgrnam. */
Strings users;
for (char * * p = gr->gr_mem; *p; ++p) {
debug("found build user '%s'", *p);
users.push_back(*p);
}
if (users.empty())
throw Error("the build users group '%s' has no members", settings.buildUsersGroup);
/* Find a user account that isn't currently in use for another
build. */
for (auto & i : users) {
debug("trying user '%s'", i);
struct passwd * pw = getpwnam(i.c_str());
if (!pw)
throw Error("the user '%s' in the group '%s' does not exist", i, settings.buildUsersGroup);
auto fnUserLock = fmt("%s/userpool/%s", settings.nixStateDir,pw->pw_uid);
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (!fd)
throw SysError("opening user lock '%s'", fnUserLock);
if (lockFile(fd.get(), ltWrite, false)) {
auto lock = std::make_unique<SimpleUserLock>();
lock->fdUserLock = std::move(fd);
lock->uid = pw->pw_uid;
lock->gid = gr->gr_gid;
/* Sanity check... */
if (lock->uid == getuid() || lock->uid == geteuid())
throw Error("the Nix user should not be a member of '%s'", settings.buildUsersGroup);
#if __linux__
/* Get the list of supplementary groups of this build
user. This is usually either empty or contains a
group such as "kvm". */
int ngroups = 32; // arbitrary initial guess
std::vector<gid_t> gids;
gids.resize(ngroups);
int err = getgrouplist(
pw->pw_name, pw->pw_gid,
gids.data(),
&ngroups);
/* Our initial size of 32 wasn't sufficient, the
correct size has been stored in ngroups, so we try
again. */
if (err == -1) {
gids.resize(ngroups);
err = getgrouplist(
pw->pw_name, pw->pw_gid,
gids.data(),
&ngroups);
}
// If it failed once more, then something must be broken.
if (err == -1)
throw Error("failed to get list of supplementary groups for '%s'", pw->pw_name);
// Finally, trim back the GID list to its real size.
for (auto i = 0; i < ngroups; i++)
if (gids[i] != lock->gid)
lock->supplementaryGIDs.push_back(gids[i]);
#endif
return lock;
}
}
return nullptr;
}
};
return false;
}
void UserLock::kill()
struct AutoUserLock : UserLock
{
killUser(uid);
AutoCloseFD fdUserLock;
uid_t firstUid = 0;
gid_t firstGid = 0;
uid_t nrIds = 1;
uid_t getUID() override { assert(firstUid); return firstUid; }
gid_t getUIDCount() override { return nrIds; }
gid_t getGID() override { assert(firstGid); return firstGid; }
std::vector<gid_t> getSupplementaryGIDs() override { return {}; }
static std::unique_ptr<UserLock> acquire(uid_t nrIds, bool useUserNamespace)
{
#if !defined(__linux__)
useUserNamespace = false;
#endif
settings.requireExperimentalFeature(Xp::AutoAllocateUids);
assert(settings.startId > 0);
assert(settings.uidCount % maxIdsPerBuild == 0);
assert((uint64_t) settings.startId + (uint64_t) settings.uidCount <= std::numeric_limits<uid_t>::max());
assert(nrIds <= maxIdsPerBuild);
createDirs(settings.nixStateDir + "/userpool2");
size_t nrSlots = settings.uidCount / maxIdsPerBuild;
for (size_t i = 0; i < nrSlots; i++) {
debug("trying user slot '%d'", i);
createDirs(settings.nixStateDir + "/userpool2");
auto fnUserLock = fmt("%s/userpool2/slot-%d", settings.nixStateDir, i);
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (!fd)
throw SysError("opening user lock '%s'", fnUserLock);
if (lockFile(fd.get(), ltWrite, false)) {
auto firstUid = settings.startId + i * maxIdsPerBuild;
auto pw = getpwuid(firstUid);
if (pw)
throw Error("auto-allocated UID %d clashes with existing user account '%s'", firstUid, pw->pw_name);
auto lock = std::make_unique<AutoUserLock>();
lock->fdUserLock = std::move(fd);
lock->firstUid = firstUid;
if (useUserNamespace)
lock->firstGid = firstUid;
else {
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
if (!gr)
throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
lock->firstGid = gr->gr_gid;
}
lock->nrIds = nrIds;
return lock;
}
}
return nullptr;
}
};
std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useUserNamespace)
{
if (settings.autoAllocateUids)
return AutoUserLock::acquire(nrIds, useUserNamespace);
else
return SimpleUserLock::acquire();
}
bool useBuildUsers()
{
#if __linux__
static bool b = (settings.buildUsersGroup != "" || settings.autoAllocateUids) && getuid() == 0;
return b;
#elif __APPLE__
static bool b = settings.buildUsersGroup != "" && getuid() == 0;
return b;
#else
return false;
#endif
}
}

View file

@ -1,37 +1,38 @@
#pragma once
#include "sync.hh"
#include "types.hh"
#include "util.hh"
#include <optional>
#include <sys/types.h>
namespace nix {
class UserLock
struct UserLock
{
private:
Path fnUserLock;
AutoCloseFD fdUserLock;
virtual ~UserLock() { }
bool isEnabled = false;
std::string user;
uid_t uid = 0;
gid_t gid = 0;
std::vector<gid_t> supplementaryGIDs;
/* Get the first and last UID. */
std::pair<uid_t, uid_t> getUIDRange()
{
auto first = getUID();
return {first, first + getUIDCount() - 1};
}
public:
UserLock();
/* Get the first UID. */
virtual uid_t getUID() = 0;
void kill();
virtual uid_t getUIDCount() = 0;
std::string getUser() { return user; }
uid_t getUID() { assert(uid); return uid; }
uid_t getGID() { assert(gid); return gid; }
std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; }
bool findFreeUser();
bool enabled() { return isEnabled; }
virtual gid_t getGID() = 0;
virtual std::vector<gid_t> getSupplementaryGIDs() = 0;
};
/* Acquire a user lock for a UID range of size `nrIds`. Note that this
may return nullptr if no user is available. */
std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useUserNamespace);
bool useBuildUsers();
}

View file

@ -289,11 +289,16 @@ std::map<DrvOutput, StorePath> drvOutputReferences(
std::set<Realisation> inputRealisations;
for (const auto & [inputDrv, outputNames] : drv.inputDrvs) {
auto outputHashes =
const auto outputHashes =
staticOutputHashes(store, store.readDerivation(inputDrv));
for (const auto & outputName : outputNames) {
auto outputHash = get(outputHashes, outputName);
if (!outputHash)
throw Error(
"output '%s' of derivation '%s' isn't realised", outputName,
store.printStorePath(inputDrv));
auto thisRealisation = store.queryRealisation(
DrvOutput{outputHashes.at(outputName), outputName});
DrvOutput{*outputHash, outputName});
if (!thisRealisation)
throw Error(
"output '%s' of derivation '%s' isn't built", outputName,

View file

@ -1,6 +1,5 @@
#include "nar-accessor.hh"
#include "archive.hh"
#include "json.hh"
#include <map>
#include <stack>
@ -75,6 +74,9 @@ struct NarAccessor : public FSAccessor
createMember(path, {FSAccessor::Type::tRegular, false, 0, 0});
}
void closeRegularFile() override
{ }
void isExecutable() override
{
parents.top()->isExecutable = true;
@ -240,42 +242,43 @@ ref<FSAccessor> makeLazyNarAccessor(const std::string & listing,
return make_ref<NarAccessor>(listing, getNarBytes);
}
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
const Path & path, bool recurse)
using nlohmann::json;
json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse)
{
auto st = accessor->stat(path);
auto obj = res.object();
json obj = json::object();
switch (st.type) {
case FSAccessor::Type::tRegular:
obj.attr("type", "regular");
obj.attr("size", st.fileSize);
obj["type"] = "regular";
obj["size"] = st.fileSize;
if (st.isExecutable)
obj.attr("executable", true);
obj["executable"] = true;
if (st.narOffset)
obj.attr("narOffset", st.narOffset);
obj["narOffset"] = st.narOffset;
break;
case FSAccessor::Type::tDirectory:
obj.attr("type", "directory");
obj["type"] = "directory";
{
auto res2 = obj.object("entries");
obj["entries"] = json::object();
json &res2 = obj["entries"];
for (auto & name : accessor->readDirectory(path)) {
if (recurse) {
auto res3 = res2.placeholder(name);
listNar(res3, accessor, path + "/" + name, true);
res2[name] = listNar(accessor, path + "/" + name, true);
} else
res2.object(name);
res2[name] = json::object();
}
}
break;
case FSAccessor::Type::tSymlink:
obj.attr("type", "symlink");
obj.attr("target", accessor->readLink(path));
obj["type"] = "symlink";
obj["target"] = accessor->readLink(path);
break;
default:
throw Error("path '%s' does not exist in NAR", path);
}
return obj;
}
}

View file

@ -2,6 +2,7 @@
#include <functional>
#include <nlohmann/json_fwd.hpp>
#include "fs-accessor.hh"
namespace nix {
@ -24,11 +25,8 @@ ref<FSAccessor> makeLazyNarAccessor(
const std::string & listing,
GetNarBytes getNarBytes);
class JSONPlaceholder;
/* Write a JSON representation of the contents of a NAR (except file
contents). */
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
const Path & path, bool recurse);
nlohmann::json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse);
}

View file

@ -62,6 +62,9 @@ public:
/* How often to purge expired entries from the cache. */
const int purgeInterval = 24 * 3600;
/* How long to cache binary cache info (i.e. /nix-cache-info) */
const int cacheInfoTtl = 7 * 24 * 3600;
struct Cache
{
int id;
@ -98,7 +101,7 @@ public:
"insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
state->queryCache.create(state->db,
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ?");
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ? and timestamp > ?");
state->insertNAR.create(state->db,
"insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, "
@ -163,16 +166,37 @@ public:
return i->second;
}
std::optional<Cache> queryCacheRaw(State & state, const std::string & uri)
{
auto i = state.caches.find(uri);
if (i == state.caches.end()) {
auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl));
if (!queryCache.next())
return std::nullopt;
state.caches.emplace(uri,
Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
}
return getCache(state, uri);
}
void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
{
retrySQLite<void>([&]() {
auto state(_state.lock());
SQLiteTxn txn(state->db);
// FIXME: race
// To avoid the race, we have to check if maybe someone hasn't yet created
// the cache for this URI in the meantime.
auto cache(queryCacheRaw(*state, uri));
if (cache)
return;
state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
assert(sqlite3_changes(state->db) == 1);
state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
txn.commit();
});
}
@ -180,21 +204,12 @@ public:
{
return retrySQLite<std::optional<CacheInfo>>([&]() -> std::optional<CacheInfo> {
auto state(_state.lock());
auto i = state->caches.find(uri);
if (i == state->caches.end()) {
auto queryCache(state->queryCache.use()(uri));
if (!queryCache.next())
return std::nullopt;
state->caches.emplace(uri,
Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
}
auto & cache(getCache(*state, uri));
auto cache(queryCacheRaw(*state, uri));
if (!cache)
return std::nullopt;
return CacheInfo {
.wantMassQuery = cache.wantMassQuery,
.priority = cache.priority
.wantMassQuery = cache->wantMassQuery,
.priority = cache->priority
};
});
}

View file

@ -69,8 +69,6 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
if (value != "unknown-deriver")
deriver = StorePath(value);
}
else if (name == "System")
system = value;
else if (name == "Sig")
sigs.insert(value);
else if (name == "CA") {
@ -106,9 +104,6 @@ std::string NarInfo::to_string(const Store & store) const
if (deriver)
res += "Deriver: " + std::string(deriver->to_string()) + "\n";
if (!system.empty())
res += "System: " + system + "\n";
for (auto sig : sigs)
res += "Sig: " + sig + "\n";

View file

@ -14,7 +14,6 @@ struct NarInfo : ValidPathInfo
std::string compression;
std::optional<Hash> fileHash;
uint64_t fileSize = 0;
std::string system;
NarInfo() = delete;
NarInfo(const Store & store, StorePathDescriptor && ca, Hash narHash)

View file

@ -229,7 +229,9 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
}
/* Atomically replace the old file with the new hard link. */
if (rename(tempLink.c_str(), path.c_str()) == -1) {
try {
renameFile(tempLink, path);
} catch (SysError & e) {
if (unlink(tempLink.c_str()) == -1)
printError("unable to unlink '%1%'", tempLink);
if (errno == EMLINK) {
@ -240,7 +242,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
debug("'%s' has reached maximum number of links", linkPath);
return;
}
throw SysError("cannot rename '%1%' to '%2%'", tempLink, path);
throw;
}
stats.filesLinked++;

View file

@ -2,7 +2,6 @@
#include <nlohmann/json.hpp>
#include <regex>
#include "json.hh"
namespace nix {
@ -90,6 +89,7 @@ std::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name
StringSet ParsedDerivation::getRequiredSystemFeatures() const
{
// FIXME: cache this?
StringSet res;
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
res.insert(i);
@ -125,6 +125,11 @@ bool ParsedDerivation::substitutesAllowed() const
return getBoolAttr("allowSubstitutes", true);
}
bool ParsedDerivation::useUidRange() const
{
return getRequiredSystemFeatures().count("uid-range");
}
static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths)
@ -144,16 +149,11 @@ std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & s
auto e = json.find("exportReferencesGraph");
if (e != json.end() && e->is_object()) {
for (auto i = e->begin(); i != e->end(); ++i) {
std::ostringstream str;
{
JSONPlaceholder jsonRoot(str, true);
StorePathSet storePaths;
for (auto & p : *i)
storePaths.insert(store.parseStorePath(p.get<std::string>()));
store.pathInfoToJSON(jsonRoot,
store.exportReferences(storePaths, inputPaths), false, true);
}
json[i.key()] = nlohmann::json::parse(str.str()); // urgh
StorePathSet storePaths;
for (auto & p : *i)
storePaths.insert(store.parseStorePath(p.get<std::string>()));
json[i.key()] = store.pathInfoToJSON(
store.exportReferences(storePaths, inputPaths), false, true);
}
}

View file

@ -38,6 +38,8 @@ public:
bool substitutesAllowed() const;
bool useUidRange() const;
std::optional<nlohmann::json> prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths);
};

View file

@ -1,5 +1,8 @@
#include "path-with-outputs.hh"
#include "store-api.hh"
#include "nlohmann/json.hpp"
#include <regex>
namespace nix {
@ -68,4 +71,57 @@ StorePathWithOutputs followLinksToStorePathWithOutputs(const Store & store, std:
return StorePathWithOutputs { store.followLinksToStorePath(path), std::move(outputs) };
}
std::pair<std::string, OutputsSpec> parseOutputsSpec(const std::string & s)
{
static std::regex regex(R"((.*)\^((\*)|([a-z]+(,[a-z]+)*)))");
std::smatch match;
if (!std::regex_match(s, match, regex))
return {s, DefaultOutputs()};
if (match[3].matched)
return {match[1], AllOutputs()};
return {match[1], tokenizeString<OutputNames>(match[4].str(), ",")};
}
std::string printOutputsSpec(const OutputsSpec & outputsSpec)
{
if (std::get_if<DefaultOutputs>(&outputsSpec))
return "";
if (std::get_if<AllOutputs>(&outputsSpec))
return "^*";
if (auto outputNames = std::get_if<OutputNames>(&outputsSpec))
return "^" + concatStringsSep(",", *outputNames);
assert(false);
}
void to_json(nlohmann::json & json, const OutputsSpec & outputsSpec)
{
if (std::get_if<DefaultOutputs>(&outputsSpec))
json = nullptr;
else if (std::get_if<AllOutputs>(&outputsSpec))
json = std::vector<std::string>({"*"});
else if (auto outputNames = std::get_if<OutputNames>(&outputsSpec))
json = *outputNames;
}
void from_json(const nlohmann::json & json, OutputsSpec & outputsSpec)
{
if (json.is_null())
outputsSpec = DefaultOutputs();
else {
auto names = json.get<OutputNames>();
if (names == OutputNames({"*"}))
outputsSpec = AllOutputs();
else
outputsSpec = names;
}
}
}

View file

@ -4,6 +4,7 @@
#include "path.hh"
#include "derived-path.hh"
#include "nlohmann/json_fwd.hpp"
namespace nix {
@ -32,4 +33,25 @@ StorePathWithOutputs parsePathWithOutputs(const Store & store, std::string_view
StorePathWithOutputs followLinksToStorePathWithOutputs(const Store & store, std::string_view pathWithOutputs);
typedef std::set<std::string> OutputNames;
struct AllOutputs {
bool operator < (const AllOutputs & _) const { return false; }
};
struct DefaultOutputs {
bool operator < (const DefaultOutputs & _) const { return false; }
};
typedef std::variant<DefaultOutputs, AllOutputs, OutputNames> OutputsSpec;
/* Parse a string of the form 'prefix^output1,...outputN' or
'prefix^*', returning the prefix and the outputs spec. */
std::pair<std::string, OutputsSpec> parseOutputsSpec(const std::string & s);
std::string printOutputsSpec(const OutputsSpec & outputsSpec);
void to_json(nlohmann::json &, const OutputsSpec &);
void from_json(const nlohmann::json &, OutputsSpec &);
}

View file

@ -96,4 +96,14 @@ struct RealisedPath {
GENERATE_CMP(RealisedPath, me->raw);
};
class MissingRealisation : public Error
{
public:
MissingRealisation(DrvOutput & outputId)
: Error( "cannot operate on an output of the "
"unbuilt derivation '%s'",
outputId.to_string())
{}
};
}

View file

@ -67,6 +67,40 @@ void RefScanSink::operator () (std::string_view data)
}
PathRefScanSink::PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap)
: RefScanSink(std::move(hashes))
, backMap(std::move(backMap))
{ }
PathRefScanSink PathRefScanSink::fromPaths(const StorePathSet & refs)
{
StringSet hashes;
std::map<std::string, StorePath> backMap;
for (auto & i : refs) {
std::string hashPart(i.hashPart());
auto inserted = backMap.emplace(hashPart, i).second;
assert(inserted);
hashes.insert(hashPart);
}
return PathRefScanSink(std::move(hashes), std::move(backMap));
}
StorePathSet PathRefScanSink::getResultPaths()
{
/* Map the hashes found back to their store paths. */
StorePathSet found;
for (auto & i : getResult()) {
auto j = backMap.find(i);
assert(j != backMap.end());
found.insert(j->second);
}
return found;
}
std::pair<StorePathSet, HashResult> scanForReferences(
const std::string & path,
const StorePathSet & refs)
@ -82,30 +116,13 @@ StorePathSet scanForReferences(
const Path & path,
const StorePathSet & refs)
{
StringSet hashes;
std::map<std::string, StorePath> backMap;
for (auto & i : refs) {
std::string hashPart(i.hashPart());
auto inserted = backMap.emplace(hashPart, i).second;
assert(inserted);
hashes.insert(hashPart);
}
PathRefScanSink refsSink = PathRefScanSink::fromPaths(refs);
TeeSink sink { refsSink, toTee };
/* Look for the hashes in the NAR dump of the path. */
RefScanSink refsSink(std::move(hashes));
TeeSink sink { refsSink, toTee };
dumpPath(path, sink);
/* Map the hashes found back to their store paths. */
StorePathSet found;
for (auto & i : refsSink.getResult()) {
auto j = backMap.find(i);
assert(j != backMap.end());
found.insert(j->second);
}
return found;
return refsSink.getResultPaths();
}

View file

@ -27,6 +27,19 @@ public:
void operator () (std::string_view data) override;
};
class PathRefScanSink : public RefScanSink
{
std::map<std::string, StorePath> backMap;
PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap);
public:
static PathRefScanSink fromPaths(const StorePathSet & refs);
StorePathSet getResultPaths();
};
struct RewritingSink : Sink
{
std::string from, to, prev;

View file

@ -1,6 +1,6 @@
#include <nlohmann/json.hpp>
#include "remote-fs-accessor.hh"
#include "nar-accessor.hh"
#include "json.hh"
#include <sys/types.h>
#include <sys/stat.h>
@ -38,10 +38,8 @@ ref<FSAccessor> RemoteFSAccessor::addToCache(std::string_view hashPart, std::str
if (cacheDir != "") {
try {
std::ostringstream str;
JSONPlaceholder jsonRoot(str);
listNar(jsonRoot, narAccessor, "", true);
writeFile(makeCacheFile(hashPart, "ls"), str.str());
nlohmann::json j = listNar(narAccessor, "", true);
writeFile(makeCacheFile(hashPart, "ls"), j.dump());
} catch (...) {
ignoreException();
}

View file

@ -448,7 +448,7 @@ void RemoteStore::queryPathInfoUncached(const StorePath & path,
} catch (Error & e) {
// Ugly backwards compatibility hack.
if (e.msg().find("is not valid") != std::string::npos)
throw InvalidPath(e.info());
throw InvalidPath(std::move(e.info()));
throw;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
@ -585,7 +585,6 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
try {
conn->to.written = 0;
conn->to.warn = true;
connections->incCapacity();
{
Finally cleanup([&]() { connections->decCapacity(); });
@ -596,7 +595,6 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
dumpString(contents, conn->to);
}
}
conn->to.warn = false;
conn.processStderr();
} catch (SysError & e) {
/* Daemon closed while we were sending the path. Probably OOM
@ -678,6 +676,23 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
}
void RemoteStore::addMultipleToStore(
PathsSource & pathsToCopy,
Activity & act,
RepairFlag repair,
CheckSigsFlag checkSigs)
{
auto source = sinkToSource([&](Sink & sink) {
sink << pathsToCopy.size();
for (auto & [pathInfo, pathSource] : pathsToCopy) {
pathInfo.write(sink, *this, 16);
pathSource->drainInto(sink);
}
});
addMultipleToStore(*source, repair, checkSigs);
}
void RemoteStore::addMultipleToStore(
Source & source,
RepairFlag repair,
@ -723,36 +738,34 @@ void RemoteStore::registerDrvOutput(const Realisation & info)
void RemoteStore::queryRealisationUncached(const DrvOutput & id,
Callback<std::shared_ptr<const Realisation>> callback) noexcept
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 27) {
warn("the daemon is too old to support content-addressed derivations, please upgrade it to 2.4");
try {
callback(nullptr);
} catch (...) { return callback.rethrow(); }
}
conn->to << wopQueryRealisation;
conn->to << id.to_string();
conn.processStderr();
auto real = [&]() -> std::shared_ptr<const Realisation> {
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
auto outPaths = worker_proto::read(
*this, conn->from, Phantom<std::set<StorePath>> {});
if (outPaths.empty())
return nullptr;
return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
} else {
auto realisations = worker_proto::read(
*this, conn->from, Phantom<std::set<Realisation>> {});
if (realisations.empty())
return nullptr;
return std::make_shared<const Realisation>(*realisations.begin());
}
}();
try {
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 27) {
warn("the daemon is too old to support content-addressed derivations, please upgrade it to 2.4");
return callback(nullptr);
}
conn->to << wopQueryRealisation;
conn->to << id.to_string();
conn.processStderr();
auto real = [&]() -> std::shared_ptr<const Realisation> {
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
auto outPaths = worker_proto::read(
*this, conn->from, Phantom<std::set<StorePath>> {});
if (outPaths.empty())
return nullptr;
return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
} else {
auto realisations = worker_proto::read(
*this, conn->from, Phantom<std::set<Realisation>> {});
if (realisations.empty())
return nullptr;
return std::make_shared<const Realisation>(*realisations.begin());
}
}();
callback(std::shared_ptr<const Realisation>(real));
} catch (...) { return callback.rethrow(); }
}
@ -858,34 +871,32 @@ std::vector<BuildResult> RemoteStore::buildPathsWithResults(
OutputPathMap outputs;
auto drv = evalStore->readDerivation(bfd.drvPath);
auto outputHashes = staticOutputHashes(*evalStore, drv); // FIXME: expensive
auto drvOutputs = drv.outputsAndOptPaths(*this);
const auto outputHashes = staticOutputHashes(*evalStore, drv); // FIXME: expensive
const auto drvOutputs = drv.outputsAndOptPaths(*this);
for (auto & output : bfd.outputs) {
if (!outputHashes.count(output))
auto outputHash = get(outputHashes, output);
if (!outputHash)
throw Error(
"the derivation '%s' doesn't have an output named '%s'",
printStorePath(bfd.drvPath), output);
auto outputId =
DrvOutput{outputHashes.at(output), output};
auto outputId = DrvOutput{ *outputHash, output };
if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
auto realisation =
queryRealisation(outputId);
if (!realisation)
throw Error(
"cannot operate on an output of unbuilt "
"content-addressed derivation '%s'",
outputId.to_string());
throw MissingRealisation(outputId);
res.builtOutputs.emplace(realisation->id, *realisation);
} else {
// If ca-derivations isn't enabled, assume that
// the output path is statically known.
assert(drvOutputs.count(output));
assert(drvOutputs.at(output).second);
const auto drvOutput = get(drvOutputs, output);
assert(drvOutput);
assert(drvOutput->second);
res.builtOutputs.emplace(
outputId,
Realisation {
.id = outputId,
.outPath = *drvOutputs.at(output).second
.outPath = *drvOutput->second,
});
}
}

View file

@ -89,6 +89,12 @@ public:
RepairFlag repair,
CheckSigsFlag checkSigs) override;
void addMultipleToStore(
PathsSource & pathsToCopy,
Activity & act,
RepairFlag repair,
CheckSigsFlag checkSigs) override;
StorePath addTextToStore(
std::string_view name,
std::string_view s,

View file

@ -5,6 +5,7 @@
#include "ref.hh"
#include <optional>
#include <string>
namespace Aws { namespace Client { class ClientConfiguration; } }
namespace Aws { namespace S3 { class S3Client; } }

View file

@ -1,7 +1,7 @@
create table if not exists ValidPaths (
id integer primary key autoincrement not null,
path text unique not null,
hash text not null,
hash text not null, -- base16 representation
registrationTime integer not null,
deriver text,
narSize integer,

View file

@ -8,22 +8,37 @@
namespace nix {
[[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs)
SQLiteError::SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, hintformat && hf)
: Error(""), path(path), errMsg(errMsg), errNo(errNo), extendedErrNo(extendedErrNo), offset(offset)
{
auto offsetStr = (offset == -1) ? "" : "at offset " + std::to_string(offset) + ": ";
err.msg = hintfmt("%s: %s%s, %s (in '%s')",
normaltxt(hf.str()),
offsetStr,
sqlite3_errstr(extendedErrNo),
errMsg,
path ? path : "(in-memory)");
}
[[noreturn]] void SQLiteError::throw_(sqlite3 * db, hintformat && hf)
{
int err = sqlite3_errcode(db);
int exterr = sqlite3_extended_errcode(db);
int offset = sqlite3_error_offset(db);
auto path = sqlite3_db_filename(db, nullptr);
if (!path) path = "(in-memory)";
auto errMsg = sqlite3_errmsg(db);
if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
throw SQLiteBusy(
auto exp = SQLiteBusy(path, errMsg, err, exterr, offset, std::move(hf));
exp.err.msg = hintfmt(
err == SQLITE_PROTOCOL
? fmt("SQLite database '%s' is busy (SQLITE_PROTOCOL)", path)
: fmt("SQLite database '%s' is busy", path));
}
else
throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(exterr), path);
? "SQLite database '%s' is busy (SQLITE_PROTOCOL)"
: "SQLite database '%s' is busy",
path ? path : "(in-memory)");
throw exp;
} else
throw SQLiteError(path, errMsg, err, exterr, offset, std::move(hf));
}
SQLite::SQLite(const Path & path, bool create)
@ -32,12 +47,16 @@ SQLite::SQLite(const Path & path, bool create)
// `unix-dotfile` is needed on NFS file systems and on Windows' Subsystem
// for Linux (WSL) where useSQLiteWAL should be false by default.
const char *vfs = settings.useSQLiteWAL ? 0 : "unix-dotfile";
if (sqlite3_open_v2(path.c_str(), &db,
SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), vfs) != SQLITE_OK)
throw Error("cannot open SQLite database '%s'", path);
int flags = SQLITE_OPEN_READWRITE;
if (create) flags |= SQLITE_OPEN_CREATE;
int ret = sqlite3_open_v2(path.c_str(), &db, flags, vfs);
if (ret != SQLITE_OK) {
const char * err = sqlite3_errstr(ret);
throw Error("cannot open SQLite database '%s': %s", path, err);
}
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
throwSQLiteError(db, "setting timeout");
SQLiteError::throw_(db, "setting timeout");
exec("pragma foreign_keys = 1");
}
@ -46,7 +65,7 @@ SQLite::~SQLite()
{
try {
if (db && sqlite3_close(db) != SQLITE_OK)
throwSQLiteError(db, "closing database");
SQLiteError::throw_(db, "closing database");
} catch (...) {
ignoreException();
}
@ -62,7 +81,7 @@ void SQLite::exec(const std::string & stmt)
{
retrySQLite<void>([&]() {
if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, format("executing SQLite statement '%s'") % stmt);
SQLiteError::throw_(db, "executing SQLite statement '%s'", stmt);
});
}
@ -76,7 +95,7 @@ void SQLiteStmt::create(sqlite3 * db, const std::string & sql)
checkInterrupt();
assert(!stmt);
if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, 0) != SQLITE_OK)
throwSQLiteError(db, fmt("creating statement '%s'", sql));
SQLiteError::throw_(db, "creating statement '%s'", sql);
this->db = db;
this->sql = sql;
}
@ -85,7 +104,7 @@ SQLiteStmt::~SQLiteStmt()
{
try {
if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
throwSQLiteError(db, fmt("finalizing statement '%s'", sql));
SQLiteError::throw_(db, "finalizing statement '%s'", sql);
} catch (...) {
ignoreException();
}
@ -109,7 +128,7 @@ SQLiteStmt::Use & SQLiteStmt::Use::operator () (std::string_view value, bool not
{
if (notNull) {
if (sqlite3_bind_text(stmt, curArg++, value.data(), -1, SQLITE_TRANSIENT) != SQLITE_OK)
throwSQLiteError(stmt.db, "binding argument");
SQLiteError::throw_(stmt.db, "binding argument");
} else
bind();
return *this;
@ -119,7 +138,7 @@ SQLiteStmt::Use & SQLiteStmt::Use::operator () (const unsigned char * data, size
{
if (notNull) {
if (sqlite3_bind_blob(stmt, curArg++, data, len, SQLITE_TRANSIENT) != SQLITE_OK)
throwSQLiteError(stmt.db, "binding argument");
SQLiteError::throw_(stmt.db, "binding argument");
} else
bind();
return *this;
@ -129,7 +148,7 @@ SQLiteStmt::Use & SQLiteStmt::Use::operator () (int64_t value, bool notNull)
{
if (notNull) {
if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK)
throwSQLiteError(stmt.db, "binding argument");
SQLiteError::throw_(stmt.db, "binding argument");
} else
bind();
return *this;
@ -138,7 +157,7 @@ SQLiteStmt::Use & SQLiteStmt::Use::operator () (int64_t value, bool notNull)
SQLiteStmt::Use & SQLiteStmt::Use::bind()
{
if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK)
throwSQLiteError(stmt.db, "binding argument");
SQLiteError::throw_(stmt.db, "binding argument");
return *this;
}
@ -152,14 +171,14 @@ void SQLiteStmt::Use::exec()
int r = step();
assert(r != SQLITE_ROW);
if (r != SQLITE_DONE)
throwSQLiteError(stmt.db, fmt("executing SQLite statement '%s'", sqlite3_expanded_sql(stmt.stmt)));
SQLiteError::throw_(stmt.db, fmt("executing SQLite statement '%s'", sqlite3_expanded_sql(stmt.stmt)));
}
bool SQLiteStmt::Use::next()
{
int r = step();
if (r != SQLITE_DONE && r != SQLITE_ROW)
throwSQLiteError(stmt.db, fmt("executing SQLite query '%s'", sqlite3_expanded_sql(stmt.stmt)));
SQLiteError::throw_(stmt.db, fmt("executing SQLite query '%s'", sqlite3_expanded_sql(stmt.stmt)));
return r == SQLITE_ROW;
}
@ -185,14 +204,14 @@ SQLiteTxn::SQLiteTxn(sqlite3 * db)
{
this->db = db;
if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "starting transaction");
SQLiteError::throw_(db, "starting transaction");
active = true;
}
void SQLiteTxn::commit()
{
if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "committing transaction");
SQLiteError::throw_(db, "committing transaction");
active = false;
}
@ -200,7 +219,7 @@ SQLiteTxn::~SQLiteTxn()
{
try {
if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "aborting transaction");
SQLiteError::throw_(db, "aborting transaction");
} catch (...) {
ignoreException();
}

View file

@ -96,10 +96,31 @@ struct SQLiteTxn
};
MakeError(SQLiteError, Error);
MakeError(SQLiteBusy, SQLiteError);
struct SQLiteError : Error
{
std::string path;
std::string errMsg;
int errNo, extendedErrNo, offset;
[[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs);
template<typename... Args>
[[noreturn]] static void throw_(sqlite3 * db, const std::string & fs, const Args & ... args) {
throw_(db, hintfmt(fs, args...));
}
SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, hintformat && hf);
protected:
template<typename... Args>
SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, const std::string & fs, const Args & ... args)
: SQLiteError(path, errNo, extendedErrNo, offset, hintfmt(fs, args...))
{ }
[[noreturn]] static void throw_(sqlite3 * db, hintformat && hf);
};
MakeError(SQLiteBusy, SQLiteError);
void handleSQLiteBusy(const SQLiteBusy & e);

View file

@ -67,7 +67,7 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
if (fakeSSH) {
args = { "bash", "-c" };
} else {
args = { "ssh", host.c_str(), "-x", "-a" };
args = { "ssh", host.c_str(), "-x" };
addCommonSSHOpts(args);
if (socketPath != "")
args.insert(args.end(), {"-S", socketPath});

View file

@ -6,33 +6,35 @@
#include "util.hh"
#include "nar-info-disk-cache.hh"
#include "thread-pool.hh"
#include "json.hh"
#include "url.hh"
#include "references.hh"
#include "archive.hh"
#include "callback.hh"
#include "remote-store.hh"
#include <nlohmann/json.hpp>
#include <regex>
using json = nlohmann::json;
namespace nix {
bool Store::isInStore(const Path & path) const
bool Store::isInStore(PathView path) const
{
return isInDir(path, storeDir);
}
std::pair<StorePath, Path> Store::toStorePath(const Path & path) const
std::pair<StorePath, Path> Store::toStorePath(PathView path) const
{
if (!isInStore(path))
throw Error("path '%1%' is not in the Nix store", path);
Path::size_type slash = path.find('/', storeDir.size() + 1);
auto slash = path.find('/', storeDir.size() + 1);
if (slash == Path::npos)
return {parseStorePath(path), ""};
else
return {parseStorePath(std::string_view(path).substr(0, slash)), path.substr(slash)};
return {parseStorePath(path.substr(0, slash)), (Path) path.substr(slash)};
}
@ -267,6 +269,84 @@ StorePath Store::addToStore(
return addToStoreFromDump(*source, name, method, hashAlgo, repair, references);
}
void Store::addMultipleToStore(
PathsSource & pathsToCopy,
Activity & act,
RepairFlag repair,
CheckSigsFlag checkSigs)
{
std::atomic<size_t> nrDone{0};
std::atomic<size_t> nrFailed{0};
std::atomic<uint64_t> bytesExpected{0};
std::atomic<uint64_t> nrRunning{0};
using PathWithInfo = std::pair<ValidPathInfo, std::unique_ptr<Source>>;
std::map<StorePath, PathWithInfo *> infosMap;
StorePathSet storePathsToAdd;
for (auto & thingToAdd : pathsToCopy) {
infosMap.insert_or_assign(thingToAdd.first.path, &thingToAdd);
storePathsToAdd.insert(thingToAdd.first.path);
}
auto showProgress = [&]() {
act.progress(nrDone, pathsToCopy.size(), nrRunning, nrFailed);
};
ThreadPool pool;
processGraph<StorePath>(pool,
storePathsToAdd,
[&](const StorePath & path) {
auto & [info, _] = *infosMap.at(path);
if (isValidPath(info.path)) {
nrDone++;
showProgress();
return StorePathSet();
}
bytesExpected += info.narSize;
act.setExpected(actCopyPath, bytesExpected);
return info.references;
},
[&](const StorePath & path) {
checkInterrupt();
auto & [info_, source_] = *infosMap.at(path);
auto info = info_;
info.ultimate = false;
/* Make sure that the Source object is destroyed when
we're done. In particular, a SinkToSource object must
be destroyed to ensure that the destructors on its
stack frame are run; this includes
LegacySSHStore::narFromPath()'s connection lock. */
auto source = std::move(source_);
if (!isValidPath(info.path)) {
MaintainCount<decltype(nrRunning)> mc(nrRunning);
showProgress();
try {
addToStore(info, *source, repair, checkSigs);
} catch (Error & e) {
nrFailed++;
if (!settings.keepGoing)
throw e;
printMsg(lvlError, "could not copy %s: %s", printStorePath(path), e.what());
showProgress();
return;
}
}
nrDone++;
showProgress();
});
}
void Store::addMultipleToStore(
Source & source,
@ -396,6 +476,7 @@ Store::Store(const Params & params)
: StoreConfig(params)
, state({(size_t) pathInfoCacheSize})
{
assertLibStoreInitialized();
}
@ -778,56 +859,53 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor
return paths;
}
void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
json Store::pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase,
AllowInvalidFlag allowInvalid)
{
auto jsonList = jsonOut.list();
json::array_t jsonList = json::array();
for (auto & storePath : storePaths) {
auto jsonPath = jsonList.object();
auto& jsonPath = jsonList.emplace_back(json::object());
try {
auto info = queryPathInfo(storePath);
jsonPath.attr("path", printStorePath(info->path));
jsonPath
.attr("narHash", info->narHash.to_string(hashBase, true))
.attr("narSize", info->narSize);
jsonPath["path"] = printStorePath(info->path);
jsonPath["narHash"] = info->narHash.to_string(hashBase, true);
jsonPath["narSize"] = info->narSize;
{
auto jsonRefs = jsonPath.list("references");
auto& jsonRefs = (jsonPath["references"] = json::array());
for (auto & ref : info->references)
jsonRefs.elem(printStorePath(ref));
jsonRefs.emplace_back(printStorePath(ref));
}
if (info->ca)
jsonPath.attr("ca", renderContentAddress(info->ca));
jsonPath["ca"] = renderContentAddress(info->ca);
std::pair<uint64_t, uint64_t> closureSizes;
if (showClosureSize) {
closureSizes = getClosureSize(info->path);
jsonPath.attr("closureSize", closureSizes.first);
jsonPath["closureSize"] = closureSizes.first;
}
if (includeImpureInfo) {
if (info->deriver)
jsonPath.attr("deriver", printStorePath(*info->deriver));
jsonPath["deriver"] = printStorePath(*info->deriver);
if (info->registrationTime)
jsonPath.attr("registrationTime", info->registrationTime);
jsonPath["registrationTime"] = info->registrationTime;
if (info->ultimate)
jsonPath.attr("ultimate", info->ultimate);
jsonPath["ultimate"] = info->ultimate;
if (!info->sigs.empty()) {
auto jsonSigs = jsonPath.list("signatures");
for (auto & sig : info->sigs)
jsonSigs.elem(sig);
jsonPath["signatures"].push_back(sig);
}
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
@ -835,21 +913,22 @@ void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & store
if (narInfo) {
if (!narInfo->url.empty())
jsonPath.attr("url", narInfo->url);
jsonPath["url"] = narInfo->url;
if (narInfo->fileHash)
jsonPath.attr("downloadHash", narInfo->fileHash->to_string(hashBase, true));
jsonPath["downloadHash"] = narInfo->fileHash->to_string(hashBase, true);
if (narInfo->fileSize)
jsonPath.attr("downloadSize", narInfo->fileSize);
jsonPath["downloadSize"] = narInfo->fileSize;
if (showClosureSize)
jsonPath.attr("closureDownloadSize", closureSizes.second);
jsonPath["closureDownloadSize"] = closureSizes.second;
}
}
} catch (InvalidPath &) {
jsonPath.attr("path", printStorePath(storePath));
jsonPath.attr("valid", false);
jsonPath["path"] = printStorePath(storePath);
jsonPath["valid"] = false;
}
}
return jsonList;
}
@ -1011,115 +1090,62 @@ std::map<StorePath, StorePath> copyPaths(
for (auto & path : storePaths)
if (!valid.count(path)) missing.insert(path);
Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
// In the general case, `addMultipleToStore` requires a sorted list of
// store paths to add, so sort them right now
auto sortedMissing = srcStore.topoSortPaths(missing);
std::reverse(sortedMissing.begin(), sortedMissing.end());
std::map<StorePath, StorePath> pathsMap;
for (auto & path : storePaths)
pathsMap.insert_or_assign(path, path);
Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
Store::PathsSource pathsToCopy;
auto sorted = srcStore.topoSortPaths(missing);
std::reverse(sorted.begin(), sorted.end());
auto computeStorePathForDst = [&](const ValidPathInfo & currentPathInfo) -> StorePath {
auto storePathForSrc = currentPathInfo.path;
auto storePathForDst = storePathForSrc;
if (currentPathInfo.ca && currentPathInfo.references.empty()) {
storePathForDst = dstStore.makeFixedOutputPathFromCA(
currentPathInfo.fullStorePathDescriptorOpt().value());
if (dstStore.storeDir == srcStore.storeDir)
assert(storePathForDst == storePathForSrc);
if (storePathForDst != storePathForSrc)
debug("replaced path '%s' to '%s' for substituter '%s'",
srcStore.printStorePath(storePathForSrc),
dstStore.printStorePath(storePathForDst),
dstStore.getUri());
}
return storePathForDst;
};
auto source = sinkToSource([&](Sink & sink) {
sink << sorted.size();
for (auto & storePath : sorted) {
for (auto & missingPath : sortedMissing) {
auto info = srcStore.queryPathInfo(missingPath);
auto storePathForDst = computeStorePathForDst(*info);
pathsMap.insert_or_assign(missingPath, storePathForDst);
ValidPathInfo infoForDst = *info;
infoForDst.path = storePathForDst;
auto source = sinkToSource([&](Sink & sink) {
// We can reasonably assume that the copy will happen whenever we
// read the path, so log something about that at that point
auto srcUri = srcStore.getUri();
auto dstUri = dstStore.getUri();
auto storePathS = srcStore.printStorePath(storePath);
auto storePathS = srcStore.printStorePath(missingPath);
Activity act(*logger, lvlInfo, actCopyPath,
makeCopyPathMessage(srcUri, dstUri, storePathS),
{storePathS, srcUri, dstUri});
PushActivity pact(act.id);
auto info = srcStore.queryPathInfo(storePath);
info->write(sink, srcStore, 16);
srcStore.narFromPath(storePath, sink);
}
});
dstStore.addMultipleToStore(*source, repair, checkSigs);
#if 0
std::atomic<size_t> nrDone{0};
std::atomic<size_t> nrFailed{0};
std::atomic<uint64_t> bytesExpected{0};
std::atomic<uint64_t> nrRunning{0};
auto showProgress = [&]() {
act.progress(nrDone, missing.size(), nrRunning, nrFailed);
};
ThreadPool pool;
processGraph<StorePath>(pool,
StorePathSet(missing.begin(), missing.end()),
[&](const StorePath & storePath) {
auto info = srcStore.queryPathInfo(storePath);
auto storePathForDst = storePath;
if (info->ca && info->references.empty()) {
storePathForDst = dstStore.makeFixedOutputPathFromCA(
info->fullStorePathDescriptorOpt().value());
if (dstStore.storeDir == srcStore.storeDir)
assert(storePathForDst == storePath);
if (storePathForDst != storePath)
debug("replaced path '%s' to '%s' for substituter '%s'",
srcStore.printStorePath(storePath),
dstStore.printStorePath(storePathForDst),
dstStore.getUri());
}
pathsMap.insert_or_assign(storePath, storePathForDst);
if (dstStore.isValidPath(storePath)) {
nrDone++;
showProgress();
return StorePathSet();
}
bytesExpected += info->narSize;
act.setExpected(actCopyPath, bytesExpected);
return info->references;
},
[&](const StorePath & storePath) {
checkInterrupt();
auto info = srcStore.queryPathInfo(storePath);
auto storePathForDst = storePath;
if (info->ca && info->references.empty()) {
storePathForDst = dstStore.makeFixedOutputPathFromCA(
info->fullStorePathDescriptorOpt().value());
if (dstStore->storeDir == srcStore->storeDir)
assert(storePathForDst == storePath);
if (storePathForDst != storePath)
debug("replaced path '%s' to '%s' for substituter '%s'",
srcStore.printStorePath(storePath),
dstStore.printStorePath(storePathForDst),
dstStore.getUri());
}
pathsMap.insert_or_assign(storePath, storePathForDst);
if (!dstStore.isValidPath(storePathForDst)) {
MaintainCount<decltype(nrRunning)> mc(nrRunning);
showProgress();
try {
copyStorePath(srcStore, dstStore, storePath, repair, checkSigs);
} catch (Error &e) {
nrFailed++;
if (!settings.keepGoing)
throw e;
printMsg(lvlError, "could not copy %s: %s", dstStore.printStorePath(storePath), e.what());
showProgress();
return;
}
}
nrDone++;
showProgress();
srcStore.narFromPath(missingPath, sink);
});
#endif
pathsToCopy.push_back(std::pair{infoForDst, std::move(source)});
}
dstStore.addMultipleToStore(pathsToCopy, act, repair, checkSigs);
return pathsMap;
}
@ -1370,7 +1396,8 @@ std::pair<std::string, Store::Params> splitUriAndParams(const std::string & uri_
return {uri, params};
}
static bool isNonUriPath(const std::string & spec) {
static bool isNonUriPath(const std::string & spec)
{
return
// is not a URL
spec.find("://") == std::string::npos
@ -1382,11 +1409,36 @@ static bool isNonUriPath(const std::string & spec) {
std::shared_ptr<Store> openFromNonUri(const std::string & uri, const Store::Params & params)
{
if (uri == "" || uri == "auto") {
auto stateDir = get(params, "state").value_or(settings.nixStateDir);
auto stateDir = getOr(params, "state", settings.nixStateDir);
if (access(stateDir.c_str(), R_OK | W_OK) == 0)
return std::make_shared<LocalStore>(params);
else if (pathExists(settings.nixDaemonSocketFile))
return std::make_shared<UDSRemoteStore>(params);
#if __linux__
else if (!pathExists(stateDir)
&& params.empty()
&& getuid() != 0
&& !getEnv("NIX_STORE_DIR").has_value()
&& !getEnv("NIX_STATE_DIR").has_value())
{
/* If /nix doesn't exist, there is no daemon socket, and
we're not root, then automatically set up a chroot
store in ~/.local/share/nix/root. */
auto chrootStore = getDataDir() + "/nix/root";
if (!pathExists(chrootStore)) {
try {
createDirs(chrootStore);
} catch (Error & e) {
return std::make_shared<LocalStore>(params);
}
warn("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore);
} else
debug("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore);
Store::Params params2;
params2["root"] = chrootStore;
return std::make_shared<LocalStore>(params2);
}
#endif
else
return std::make_shared<LocalStore>(params);
} else if (uri == "daemon") {

View file

@ -1,5 +1,6 @@
#pragma once
#include "nar-info.hh"
#include "realisation.hh"
#include "path.hh"
#include "derived-path.hh"
@ -13,6 +14,7 @@
#include "path-info.hh"
#include "repair-flag.hh"
#include <nlohmann/json_fwd.hpp>
#include <atomic>
#include <limits>
#include <map>
@ -67,7 +69,6 @@ struct Derivation;
class FSAccessor;
class NarInfoDiskCache;
class Store;
class JSONPlaceholder;
enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true };
@ -180,7 +181,7 @@ public:
/* Return true if path is in the Nix store (but not the Nix
store itself). */
bool isInStore(const Path & path) const;
bool isInStore(PathView path) const;
/* Return true if path is a store path, i.e. a direct child of
the Nix store. */
@ -188,7 +189,7 @@ public:
/* Split a path like /nix/store/<hash>-<name>/<bla> into
/nix/store/<hash>-<name> and /<bla>. */
std::pair<StorePath, Path> toStorePath(const Path & path) const;
std::pair<StorePath, Path> toStorePath(PathView path) const;
/* Follow symlinks until we end up with a path in the Nix store. */
Path followLinksToStore(std::string_view path) const;
@ -355,12 +356,22 @@ public:
virtual void addToStore(const ValidPathInfo & info, Source & narSource,
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs) = 0;
// A list of paths infos along with a source providing the content of the
// associated store path
using PathsSource = std::vector<std::pair<ValidPathInfo, std::unique_ptr<Source>>>;
/* Import multiple paths into the store. */
virtual void addMultipleToStore(
Source & source,
RepairFlag repair = NoRepair,
CheckSigsFlag checkSigs = CheckSigs);
virtual void addMultipleToStore(
PathsSource & pathsToCopy,
Activity & act,
RepairFlag repair = NoRepair,
CheckSigsFlag checkSigs = CheckSigs);
/* Copy the contents of a path to the store and register the
validity the resulting path. The resulting path is returned.
The function object `filter' can be used to exclude files (see
@ -497,7 +508,7 @@ public:
variable elements such as the registration time are
included. If showClosureSize is true, the closure size of
each path is included. */
void pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
nlohmann::json pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase = Base32,
AllowInvalidFlag allowInvalid = DisallowInvalid);

View file

@ -0,0 +1,46 @@
#include "path-with-outputs.hh"
#include <gtest/gtest.h>
namespace nix {
TEST(parseOutputsSpec, basic)
{
{
auto [prefix, outputsSpec] = parseOutputsSpec("foo");
ASSERT_EQ(prefix, "foo");
ASSERT_TRUE(std::get_if<DefaultOutputs>(&outputsSpec));
}
{
auto [prefix, outputsSpec] = parseOutputsSpec("foo^*");
ASSERT_EQ(prefix, "foo");
ASSERT_TRUE(std::get_if<AllOutputs>(&outputsSpec));
}
{
auto [prefix, outputsSpec] = parseOutputsSpec("foo^out");
ASSERT_EQ(prefix, "foo");
ASSERT_TRUE(std::get<OutputNames>(outputsSpec) == OutputNames({"out"}));
}
{
auto [prefix, outputsSpec] = parseOutputsSpec("foo^out,bin");
ASSERT_EQ(prefix, "foo");
ASSERT_TRUE(std::get<OutputNames>(outputsSpec) == OutputNames({"out", "bin"}));
}
{
auto [prefix, outputsSpec] = parseOutputsSpec("foo^bar^out,bin");
ASSERT_EQ(prefix, "foo^bar");
ASSERT_TRUE(std::get<OutputNames>(outputsSpec) == OutputNames({"out", "bin"}));
}
{
auto [prefix, outputsSpec] = parseOutputsSpec("foo^&*()");
ASSERT_EQ(prefix, "foo^&*()");
ASSERT_TRUE(std::get_if<DefaultOutputs>(&outputsSpec));
}
}
}