mirror of
https://github.com/NixOS/nix.git
synced 2025-11-15 06:52:43 +01:00
Merge remote-tracking branch 'origin/master' into auto-uid-allocation
This commit is contained in:
commit
b95faccf03
401 changed files with 14006 additions and 5711 deletions
|
|
@ -331,6 +331,17 @@ bool BinaryCacheStore::isValidPathUncached(const StorePath & storePath)
|
|||
return fileExists(narInfoFileFor(storePath));
|
||||
}
|
||||
|
||||
std::optional<StorePath> BinaryCacheStore::queryPathFromHashPart(const std::string & hashPart)
|
||||
{
|
||||
auto pseudoPath = StorePath(hashPart + "-" + MissingName);
|
||||
try {
|
||||
auto info = queryPathInfo(pseudoPath);
|
||||
return info->path;
|
||||
} catch (InvalidPath &) {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
|
||||
{
|
||||
auto info = queryPathInfo(storePath).cast<const NarInfo>();
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include "crypto.hh"
|
||||
#include "store-api.hh"
|
||||
#include "log-store.hh"
|
||||
|
||||
#include "pool.hh"
|
||||
|
||||
|
|
@ -28,7 +29,9 @@ struct BinaryCacheStoreConfig : virtual StoreConfig
|
|||
"other than -1 which we reserve to indicate Nix defaults should be used"};
|
||||
};
|
||||
|
||||
class BinaryCacheStore : public virtual BinaryCacheStoreConfig, public virtual Store
|
||||
class BinaryCacheStore : public virtual BinaryCacheStoreConfig,
|
||||
public virtual Store,
|
||||
public virtual LogStore
|
||||
{
|
||||
|
||||
private:
|
||||
|
|
@ -92,8 +95,7 @@ public:
|
|||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
|
||||
{ unsupported("queryPathFromHashPart"); }
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
|
|
|||
92
src/libstore/build-result.hh
Normal file
92
src/libstore/build-result.hh
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
#pragma once
|
||||
|
||||
#include "realisation.hh"
|
||||
#include "derived-path.hh"
|
||||
|
||||
#include <string>
|
||||
#include <chrono>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct BuildResult
|
||||
{
|
||||
/* Note: don't remove status codes, and only add new status codes
|
||||
at the end of the list, to prevent client/server
|
||||
incompatibilities in the nix-store --serve protocol. */
|
||||
enum Status {
|
||||
Built = 0,
|
||||
Substituted,
|
||||
AlreadyValid,
|
||||
PermanentFailure,
|
||||
InputRejected,
|
||||
OutputRejected,
|
||||
TransientFailure, // possibly transient
|
||||
CachedFailure, // no longer used
|
||||
TimedOut,
|
||||
MiscFailure,
|
||||
DependencyFailed,
|
||||
LogLimitExceeded,
|
||||
NotDeterministic,
|
||||
ResolvesToAlreadyValid,
|
||||
NoSubstituters,
|
||||
} status = MiscFailure;
|
||||
|
||||
// FIXME: include entire ErrorInfo object.
|
||||
std::string errorMsg;
|
||||
|
||||
std::string toString() const {
|
||||
auto strStatus = [&]() {
|
||||
switch (status) {
|
||||
case Built: return "Built";
|
||||
case Substituted: return "Substituted";
|
||||
case AlreadyValid: return "AlreadyValid";
|
||||
case PermanentFailure: return "PermanentFailure";
|
||||
case InputRejected: return "InputRejected";
|
||||
case OutputRejected: return "OutputRejected";
|
||||
case TransientFailure: return "TransientFailure";
|
||||
case CachedFailure: return "CachedFailure";
|
||||
case TimedOut: return "TimedOut";
|
||||
case MiscFailure: return "MiscFailure";
|
||||
case DependencyFailed: return "DependencyFailed";
|
||||
case LogLimitExceeded: return "LogLimitExceeded";
|
||||
case NotDeterministic: return "NotDeterministic";
|
||||
case ResolvesToAlreadyValid: return "ResolvesToAlreadyValid";
|
||||
default: return "Unknown";
|
||||
};
|
||||
}();
|
||||
return strStatus + ((errorMsg == "") ? "" : " : " + errorMsg);
|
||||
}
|
||||
|
||||
/* How many times this build was performed. */
|
||||
unsigned int timesBuilt = 0;
|
||||
|
||||
/* If timesBuilt > 1, whether some builds did not produce the same
|
||||
result. (Note that 'isNonDeterministic = false' does not mean
|
||||
the build is deterministic, just that we don't have evidence of
|
||||
non-determinism.) */
|
||||
bool isNonDeterministic = false;
|
||||
|
||||
/* The derivation we built or the store path we substituted. */
|
||||
DerivedPath path;
|
||||
|
||||
/* For derivations, a mapping from the names of the wanted outputs
|
||||
to actual paths. */
|
||||
DrvOutputs builtOutputs;
|
||||
|
||||
/* The start/stop times of the build (or one of the rounds, if it
|
||||
was repeated). */
|
||||
time_t startTime = 0, stopTime = 0;
|
||||
|
||||
bool success()
|
||||
{
|
||||
return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid;
|
||||
}
|
||||
|
||||
void rethrow()
|
||||
{
|
||||
throw Error("%s", errorMsg);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
@ -66,7 +66,7 @@ namespace nix {
|
|||
|
||||
DerivationGoal::DerivationGoal(const StorePath & drvPath,
|
||||
const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode)
|
||||
: Goal(worker)
|
||||
: Goal(worker, DerivedPath::Built { .drvPath = drvPath, .outputs = wantedOutputs })
|
||||
, useDerivation(true)
|
||||
, drvPath(drvPath)
|
||||
, wantedOutputs(wantedOutputs)
|
||||
|
|
@ -85,7 +85,7 @@ DerivationGoal::DerivationGoal(const StorePath & drvPath,
|
|||
|
||||
DerivationGoal::DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode)
|
||||
: Goal(worker)
|
||||
: Goal(worker, DerivedPath::Built { .drvPath = drvPath, .outputs = wantedOutputs })
|
||||
, useDerivation(false)
|
||||
, drvPath(drvPath)
|
||||
, wantedOutputs(wantedOutputs)
|
||||
|
|
@ -135,7 +135,7 @@ void DerivationGoal::killChild()
|
|||
void DerivationGoal::timedOut(Error && ex)
|
||||
{
|
||||
killChild();
|
||||
done(BuildResult::TimedOut, ex);
|
||||
done(BuildResult::TimedOut, {}, ex);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -182,7 +182,7 @@ void DerivationGoal::loadDerivation()
|
|||
trace("loading derivation");
|
||||
|
||||
if (nrFailed != 0) {
|
||||
done(BuildResult::MiscFailure, Error("cannot build missing derivation '%s'", worker.store.printStorePath(drvPath)));
|
||||
done(BuildResult::MiscFailure, {}, Error("cannot build missing derivation '%s'", worker.store.printStorePath(drvPath)));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -204,10 +204,33 @@ void DerivationGoal::haveDerivation()
|
|||
{
|
||||
trace("have derivation");
|
||||
|
||||
if (drv->type() == DerivationType::CAFloating)
|
||||
parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *drv);
|
||||
|
||||
if (!drv->type().hasKnownOutputPaths())
|
||||
settings.requireExperimentalFeature(Xp::CaDerivations);
|
||||
|
||||
retrySubstitution = false;
|
||||
if (!drv->type().isPure()) {
|
||||
settings.requireExperimentalFeature(Xp::ImpureDerivations);
|
||||
|
||||
for (auto & [outputName, output] : drv->outputs) {
|
||||
auto randomPath = StorePath::random(outputPathName(drv->name, outputName));
|
||||
assert(!worker.store.isValidPath(randomPath));
|
||||
initialOutputs.insert({
|
||||
outputName,
|
||||
InitialOutput {
|
||||
.wanted = true,
|
||||
.outputHash = impureOutputHash,
|
||||
.known = InitialOutputStatus {
|
||||
.path = randomPath,
|
||||
.status = PathStatus::Absent
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
gaveUpOnSubstitution();
|
||||
return;
|
||||
}
|
||||
|
||||
for (auto & i : drv->outputsAndOptPaths(worker.store))
|
||||
if (i.second.second)
|
||||
|
|
@ -215,34 +238,23 @@ void DerivationGoal::haveDerivation()
|
|||
|
||||
auto outputHashes = staticOutputHashes(worker.evalStore, *drv);
|
||||
for (auto & [outputName, outputHash] : outputHashes)
|
||||
initialOutputs.insert({
|
||||
initialOutputs.insert({
|
||||
outputName,
|
||||
InitialOutput{
|
||||
InitialOutput {
|
||||
.wanted = true, // Will be refined later
|
||||
.outputHash = outputHash
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
/* Check what outputs paths are not already valid. */
|
||||
checkPathValidity();
|
||||
bool allValid = true;
|
||||
for (auto & [_, status] : initialOutputs) {
|
||||
if (!status.wanted) continue;
|
||||
if (!status.known || !status.known->isValid()) {
|
||||
allValid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
auto [allValid, validOutputs] = checkPathValidity();
|
||||
|
||||
/* If they are all valid, then we're done. */
|
||||
if (allValid && buildMode == bmNormal) {
|
||||
done(BuildResult::AlreadyValid);
|
||||
done(BuildResult::AlreadyValid, std::move(validOutputs));
|
||||
return;
|
||||
}
|
||||
|
||||
parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *drv);
|
||||
|
||||
|
||||
/* We are first going to try to create the invalid output paths
|
||||
through substitutes. If that doesn't work, we'll build
|
||||
them. */
|
||||
|
|
@ -276,8 +288,10 @@ void DerivationGoal::outputsSubstitutionTried()
|
|||
{
|
||||
trace("all outputs substituted (maybe)");
|
||||
|
||||
assert(drv->type().isPure());
|
||||
|
||||
if (nrFailed > 0 && nrFailed > nrNoSubstituters + nrIncompleteClosure && !settings.tryFallback) {
|
||||
done(BuildResult::TransientFailure,
|
||||
done(BuildResult::TransientFailure, {},
|
||||
Error("some substitutes for the outputs of derivation '%s' failed (usually happens due to networking issues); try '--fallback' to build derivation from source ",
|
||||
worker.store.printStorePath(drvPath)));
|
||||
return;
|
||||
|
|
@ -301,23 +315,17 @@ void DerivationGoal::outputsSubstitutionTried()
|
|||
return;
|
||||
}
|
||||
|
||||
checkPathValidity();
|
||||
size_t nrInvalid = 0;
|
||||
for (auto & [_, status] : initialOutputs) {
|
||||
if (!status.wanted) continue;
|
||||
if (!status.known || !status.known->isValid())
|
||||
nrInvalid++;
|
||||
}
|
||||
auto [allValid, validOutputs] = checkPathValidity();
|
||||
|
||||
if (buildMode == bmNormal && nrInvalid == 0) {
|
||||
done(BuildResult::Substituted);
|
||||
if (buildMode == bmNormal && allValid) {
|
||||
done(BuildResult::Substituted, std::move(validOutputs));
|
||||
return;
|
||||
}
|
||||
if (buildMode == bmRepair && nrInvalid == 0) {
|
||||
if (buildMode == bmRepair && allValid) {
|
||||
repairClosure();
|
||||
return;
|
||||
}
|
||||
if (buildMode == bmCheck && nrInvalid > 0)
|
||||
if (buildMode == bmCheck && !allValid)
|
||||
throw Error("some outputs of '%s' are not valid, so checking is not possible",
|
||||
worker.store.printStorePath(drvPath));
|
||||
|
||||
|
|
@ -325,18 +333,27 @@ void DerivationGoal::outputsSubstitutionTried()
|
|||
gaveUpOnSubstitution();
|
||||
}
|
||||
|
||||
|
||||
/* At least one of the output paths could not be
|
||||
produced using a substitute. So we have to build instead. */
|
||||
void DerivationGoal::gaveUpOnSubstitution()
|
||||
{
|
||||
/* Make sure checkPathValidity() from now on checks all
|
||||
outputs. */
|
||||
wantedOutputs.clear();
|
||||
|
||||
/* The inputs must be built before we can build this goal. */
|
||||
inputDrvOutputs.clear();
|
||||
if (useDerivation)
|
||||
for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs)
|
||||
for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs) {
|
||||
/* Ensure that pure, non-fixed-output derivations don't
|
||||
depend on impure derivations. */
|
||||
if (settings.isExperimentalFeatureEnabled(Xp::ImpureDerivations) && drv->type().isPure() && !drv->type().isFixed()) {
|
||||
auto inputDrv = worker.evalStore.readDerivation(i.first);
|
||||
if (!inputDrv.type().isPure())
|
||||
throw Error("pure derivation '%s' depends on impure derivation '%s'",
|
||||
worker.store.printStorePath(drvPath),
|
||||
worker.store.printStorePath(i.first));
|
||||
}
|
||||
|
||||
addWaitee(worker.makeDerivationGoal(i.first, i.second, buildMode == bmRepair ? bmRepair : bmNormal));
|
||||
}
|
||||
|
||||
/* Copy the input sources from the eval store to the build
|
||||
store. */
|
||||
|
|
@ -364,6 +381,8 @@ void DerivationGoal::gaveUpOnSubstitution()
|
|||
|
||||
void DerivationGoal::repairClosure()
|
||||
{
|
||||
assert(drv->type().isPure());
|
||||
|
||||
/* If we're repairing, we now know that our own outputs are valid.
|
||||
Now check whether the other paths in the outputs closure are
|
||||
good. If not, then start derivation goals for the derivations
|
||||
|
|
@ -409,7 +428,7 @@ void DerivationGoal::repairClosure()
|
|||
}
|
||||
|
||||
if (waitees.empty()) {
|
||||
done(BuildResult::AlreadyValid);
|
||||
done(BuildResult::AlreadyValid, assertPathValidity());
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -423,7 +442,7 @@ void DerivationGoal::closureRepaired()
|
|||
if (nrFailed > 0)
|
||||
throw Error("some paths in the output closure of derivation '%s' could not be repaired",
|
||||
worker.store.printStorePath(drvPath));
|
||||
done(BuildResult::AlreadyValid);
|
||||
done(BuildResult::AlreadyValid, assertPathValidity());
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -434,13 +453,14 @@ void DerivationGoal::inputsRealised()
|
|||
if (nrFailed != 0) {
|
||||
if (!useDerivation)
|
||||
throw Error("some dependencies of '%s' are missing", worker.store.printStorePath(drvPath));
|
||||
done(BuildResult::DependencyFailed, Error(
|
||||
done(BuildResult::DependencyFailed, {}, Error(
|
||||
"%s dependencies of derivation '%s' failed to build",
|
||||
nrFailed, worker.store.printStorePath(drvPath)));
|
||||
return;
|
||||
}
|
||||
|
||||
if (retrySubstitution) {
|
||||
if (retrySubstitution && !retriedSubstitution) {
|
||||
retriedSubstitution = true;
|
||||
haveDerivation();
|
||||
return;
|
||||
}
|
||||
|
|
@ -454,19 +474,40 @@ void DerivationGoal::inputsRealised()
|
|||
if (useDerivation) {
|
||||
auto & fullDrv = *dynamic_cast<Derivation *>(drv.get());
|
||||
|
||||
if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) &&
|
||||
((!fullDrv.inputDrvs.empty() && derivationIsCA(fullDrv.type()))
|
||||
|| fullDrv.type() == DerivationType::DeferredInputAddressed)) {
|
||||
auto drvType = fullDrv.type();
|
||||
bool resolveDrv = std::visit(overloaded {
|
||||
[&](const DerivationType::InputAddressed & ia) {
|
||||
/* must resolve if deferred. */
|
||||
return ia.deferred;
|
||||
},
|
||||
[&](const DerivationType::ContentAddressed & ca) {
|
||||
return !fullDrv.inputDrvs.empty() && (
|
||||
ca.fixed
|
||||
/* Can optionally resolve if fixed, which is good
|
||||
for avoiding unnecessary rebuilds. */
|
||||
? settings.isExperimentalFeatureEnabled(Xp::CaDerivations)
|
||||
/* Must resolve if floating and there are any inputs
|
||||
drvs. */
|
||||
: true);
|
||||
},
|
||||
[&](const DerivationType::Impure &) {
|
||||
return true;
|
||||
}
|
||||
}, drvType.raw());
|
||||
|
||||
if (resolveDrv && !fullDrv.inputDrvs.empty()) {
|
||||
settings.requireExperimentalFeature(Xp::CaDerivations);
|
||||
|
||||
/* We are be able to resolve this derivation based on the
|
||||
now-known results of dependencies. If so, we become a stub goal
|
||||
aliasing that resolved derivation goal */
|
||||
std::optional attempt = fullDrv.tryResolve(worker.store);
|
||||
now-known results of dependencies. If so, we become a
|
||||
stub goal aliasing that resolved derivation goal. */
|
||||
std::optional attempt = fullDrv.tryResolve(worker.store, inputDrvOutputs);
|
||||
assert(attempt);
|
||||
Derivation drvResolved { *std::move(attempt) };
|
||||
|
||||
auto pathResolved = writeDerivation(worker.store, drvResolved);
|
||||
|
||||
auto msg = fmt("Resolved derivation: '%s' -> '%s'",
|
||||
auto msg = fmt("resolved derivation: '%s' -> '%s'",
|
||||
worker.store.printStorePath(drvPath),
|
||||
worker.store.printStorePath(pathResolved));
|
||||
act = std::make_unique<Activity>(*logger, lvlInfo, actBuildWaiting, msg,
|
||||
|
|
@ -487,21 +528,13 @@ void DerivationGoal::inputsRealised()
|
|||
/* Add the relevant output closures of the input derivation
|
||||
`i' as input paths. Only add the closures of output paths
|
||||
that are specified as inputs. */
|
||||
assert(worker.evalStore.isValidPath(drvPath));
|
||||
auto outputs = worker.evalStore.queryPartialDerivationOutputMap(depDrvPath);
|
||||
for (auto & j : wantedDepOutputs) {
|
||||
if (outputs.count(j) > 0) {
|
||||
auto optRealizedInput = outputs.at(j);
|
||||
if (!optRealizedInput)
|
||||
throw Error(
|
||||
"derivation '%s' requires output '%s' from input derivation '%s', which is supposedly realized already, yet we still don't know what path corresponds to that output",
|
||||
worker.store.printStorePath(drvPath), j, worker.store.printStorePath(depDrvPath));
|
||||
worker.store.computeFSClosure(*optRealizedInput, inputPaths);
|
||||
} else
|
||||
for (auto & j : wantedDepOutputs)
|
||||
if (auto outPath = get(inputDrvOutputs, { depDrvPath, j }))
|
||||
worker.store.computeFSClosure(*outPath, inputPaths);
|
||||
else
|
||||
throw Error(
|
||||
"derivation '%s' requires non-existent output '%s' from input derivation '%s'",
|
||||
worker.store.printStorePath(drvPath), j, worker.store.printStorePath(depDrvPath));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -515,7 +548,7 @@ void DerivationGoal::inputsRealised()
|
|||
|
||||
/* Don't repeat fixed-output derivations since they're already
|
||||
verified by their output hash.*/
|
||||
nrRounds = derivationIsFixed(derivationType) ? 1 : settings.buildRepeat + 1;
|
||||
nrRounds = derivationType.isFixed() ? 1 : settings.buildRepeat + 1;
|
||||
|
||||
/* Okay, try to build. Note that here we don't wait for a build
|
||||
slot to become available, since we don't need one if there is a
|
||||
|
|
@ -523,10 +556,11 @@ void DerivationGoal::inputsRealised()
|
|||
state = &DerivationGoal::tryToBuild;
|
||||
worker.wakeUp(shared_from_this());
|
||||
|
||||
result = BuildResult();
|
||||
buildResult = BuildResult { .path = buildResult.path };
|
||||
}
|
||||
|
||||
void DerivationGoal::started() {
|
||||
void DerivationGoal::started()
|
||||
{
|
||||
auto msg = fmt(
|
||||
buildMode == bmRepair ? "repairing outputs of '%s'" :
|
||||
buildMode == bmCheck ? "checking outputs of '%s'" :
|
||||
|
|
@ -588,19 +622,12 @@ void DerivationGoal::tryToBuild()
|
|||
omitted, but that would be less efficient.) Note that since we
|
||||
now hold the locks on the output paths, no other process can
|
||||
build this derivation, so no further checks are necessary. */
|
||||
checkPathValidity();
|
||||
bool allValid = true;
|
||||
for (auto & [_, status] : initialOutputs) {
|
||||
if (!status.wanted) continue;
|
||||
if (!status.known || !status.known->isValid()) {
|
||||
allValid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
auto [allValid, validOutputs] = checkPathValidity();
|
||||
|
||||
if (buildMode != bmCheck && allValid) {
|
||||
debug("skipping build of derivation '%s', someone beat us to it", worker.store.printStorePath(drvPath));
|
||||
outputLocks.setDeletion(true);
|
||||
done(BuildResult::AlreadyValid);
|
||||
done(BuildResult::AlreadyValid, std::move(validOutputs));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -626,7 +653,7 @@ void DerivationGoal::tryToBuild()
|
|||
/* Yes, it has started doing so. Wait until we get
|
||||
EOF from the hook. */
|
||||
actLock.reset();
|
||||
result.startTime = time(0); // inexact
|
||||
buildResult.startTime = time(0); // inexact
|
||||
state = &DerivationGoal::buildDone;
|
||||
started();
|
||||
return;
|
||||
|
|
@ -678,8 +705,7 @@ static void movePath(const Path & src, const Path & dst)
|
|||
if (changePerm)
|
||||
chmod_(src, st.st_mode | S_IWUSR);
|
||||
|
||||
if (rename(src.c_str(), dst.c_str()))
|
||||
throw SysError("renaming '%1%' to '%2%'", src, dst);
|
||||
renameFile(src, dst);
|
||||
|
||||
if (changePerm)
|
||||
chmod_(dst, st.st_mode);
|
||||
|
|
@ -759,8 +785,7 @@ void runPostBuildHook(
|
|||
Store & store,
|
||||
Logger & logger,
|
||||
const StorePath & drvPath,
|
||||
StorePathSet outputPaths
|
||||
)
|
||||
const StorePathSet & outputPaths)
|
||||
{
|
||||
auto hook = settings.postBuildHook;
|
||||
if (hook == "")
|
||||
|
|
@ -830,8 +855,8 @@ void DerivationGoal::buildDone()
|
|||
|
||||
debug("builder process for '%s' finished", worker.store.printStorePath(drvPath));
|
||||
|
||||
result.timesBuilt++;
|
||||
result.stopTime = time(0);
|
||||
buildResult.timesBuilt++;
|
||||
buildResult.stopTime = time(0);
|
||||
|
||||
/* So the child is gone now. */
|
||||
worker.childTerminated(this);
|
||||
|
|
@ -876,11 +901,11 @@ void DerivationGoal::buildDone()
|
|||
|
||||
/* Compute the FS closure of the outputs and register them as
|
||||
being valid. */
|
||||
registerOutputs();
|
||||
auto builtOutputs = registerOutputs();
|
||||
|
||||
StorePathSet outputPaths;
|
||||
for (auto & [_, path] : finalOutputs)
|
||||
outputPaths.insert(path);
|
||||
for (auto & [_, output] : builtOutputs)
|
||||
outputPaths.insert(output.outPath);
|
||||
runPostBuildHook(
|
||||
worker.store,
|
||||
*logger,
|
||||
|
|
@ -888,12 +913,6 @@ void DerivationGoal::buildDone()
|
|||
outputPaths
|
||||
);
|
||||
|
||||
if (buildMode == bmCheck) {
|
||||
cleanupPostOutputsRegisteredModeCheck();
|
||||
done(BuildResult::Built);
|
||||
return;
|
||||
}
|
||||
|
||||
cleanupPostOutputsRegisteredModeNonCheck();
|
||||
|
||||
/* Repeat the build if necessary. */
|
||||
|
|
@ -911,6 +930,8 @@ void DerivationGoal::buildDone()
|
|||
outputLocks.setDeletion(true);
|
||||
outputLocks.unlock();
|
||||
|
||||
done(BuildResult::Built, std::move(builtOutputs));
|
||||
|
||||
} catch (BuildError & e) {
|
||||
outputLocks.unlock();
|
||||
|
||||
|
|
@ -926,71 +947,73 @@ void DerivationGoal::buildDone()
|
|||
st =
|
||||
dynamic_cast<NotDeterministic*>(&e) ? BuildResult::NotDeterministic :
|
||||
statusOk(status) ? BuildResult::OutputRejected :
|
||||
derivationIsImpure(derivationType) || diskFull ? BuildResult::TransientFailure :
|
||||
!derivationType.isSandboxed() || diskFull ? BuildResult::TransientFailure :
|
||||
BuildResult::PermanentFailure;
|
||||
}
|
||||
|
||||
done(st, e);
|
||||
done(st, {}, e);
|
||||
return;
|
||||
}
|
||||
|
||||
done(BuildResult::Built);
|
||||
}
|
||||
|
||||
void DerivationGoal::resolvedFinished() {
|
||||
void DerivationGoal::resolvedFinished()
|
||||
{
|
||||
trace("resolved derivation finished");
|
||||
|
||||
assert(resolvedDrvGoal);
|
||||
auto resolvedDrv = *resolvedDrvGoal->drv;
|
||||
auto & resolvedResult = resolvedDrvGoal->buildResult;
|
||||
|
||||
auto resolvedHashes = staticOutputHashes(worker.store, resolvedDrv);
|
||||
DrvOutputs builtOutputs;
|
||||
|
||||
StorePathSet outputPaths;
|
||||
if (resolvedResult.success()) {
|
||||
auto resolvedHashes = staticOutputHashes(worker.store, resolvedDrv);
|
||||
|
||||
// `wantedOutputs` might be empty, which means “all the outputs”
|
||||
auto realWantedOutputs = wantedOutputs;
|
||||
if (realWantedOutputs.empty())
|
||||
realWantedOutputs = resolvedDrv.outputNames();
|
||||
StorePathSet outputPaths;
|
||||
|
||||
for (auto & wantedOutput : realWantedOutputs) {
|
||||
assert(initialOutputs.count(wantedOutput) != 0);
|
||||
assert(resolvedHashes.count(wantedOutput) != 0);
|
||||
auto realisation = worker.store.queryRealisation(
|
||||
DrvOutput{resolvedHashes.at(wantedOutput), wantedOutput}
|
||||
);
|
||||
// We've just built it, but maybe the build failed, in which case the
|
||||
// realisation won't be there
|
||||
if (realisation) {
|
||||
auto newRealisation = *realisation;
|
||||
newRealisation.id = DrvOutput{initialOutputs.at(wantedOutput).outputHash, wantedOutput};
|
||||
newRealisation.signatures.clear();
|
||||
newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath);
|
||||
signRealisation(newRealisation);
|
||||
worker.store.registerDrvOutput(newRealisation);
|
||||
// `wantedOutputs` might be empty, which means “all the outputs”
|
||||
auto realWantedOutputs = wantedOutputs;
|
||||
if (realWantedOutputs.empty())
|
||||
realWantedOutputs = resolvedDrv.outputNames();
|
||||
|
||||
for (auto & wantedOutput : realWantedOutputs) {
|
||||
auto initialOutput = get(initialOutputs, wantedOutput);
|
||||
auto resolvedHash = get(resolvedHashes, wantedOutput);
|
||||
if ((!initialOutput) || (!resolvedHash))
|
||||
throw Error(
|
||||
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,resolve)",
|
||||
worker.store.printStorePath(drvPath), wantedOutput);
|
||||
auto realisation = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
|
||||
if (!realisation)
|
||||
throw Error(
|
||||
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
|
||||
worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
|
||||
if (drv->type().isPure()) {
|
||||
auto newRealisation = *realisation;
|
||||
newRealisation.id = DrvOutput { initialOutput->outputHash, wantedOutput };
|
||||
newRealisation.signatures.clear();
|
||||
if (!drv->type().isFixed())
|
||||
newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath);
|
||||
signRealisation(newRealisation);
|
||||
worker.store.registerDrvOutput(newRealisation);
|
||||
}
|
||||
outputPaths.insert(realisation->outPath);
|
||||
} else {
|
||||
// If we don't have a realisation, then it must mean that something
|
||||
// failed when building the resolved drv
|
||||
assert(!result.success());
|
||||
builtOutputs.emplace(realisation->id, *realisation);
|
||||
}
|
||||
|
||||
runPostBuildHook(
|
||||
worker.store,
|
||||
*logger,
|
||||
drvPath,
|
||||
outputPaths
|
||||
);
|
||||
}
|
||||
|
||||
runPostBuildHook(
|
||||
worker.store,
|
||||
*logger,
|
||||
drvPath,
|
||||
outputPaths
|
||||
);
|
||||
auto status = resolvedResult.status;
|
||||
if (status == BuildResult::AlreadyValid)
|
||||
status = BuildResult::ResolvesToAlreadyValid;
|
||||
|
||||
auto status = [&]() {
|
||||
auto resolvedResult = resolvedDrvGoal->getResult();
|
||||
switch (resolvedResult.status) {
|
||||
case BuildResult::AlreadyValid:
|
||||
return BuildResult::ResolvesToAlreadyValid;
|
||||
default:
|
||||
return resolvedResult.status;
|
||||
}
|
||||
}();
|
||||
|
||||
done(status);
|
||||
done(status, std::move(builtOutputs));
|
||||
}
|
||||
|
||||
HookReply DerivationGoal::tryBuildHook()
|
||||
|
|
@ -1100,7 +1123,7 @@ HookReply DerivationGoal::tryBuildHook()
|
|||
}
|
||||
|
||||
|
||||
void DerivationGoal::registerOutputs()
|
||||
DrvOutputs DerivationGoal::registerOutputs()
|
||||
{
|
||||
/* When using a build hook, the build hook can register the output
|
||||
as valid (by doing `nix-store --import'). If so we don't have
|
||||
|
|
@ -1109,21 +1132,7 @@ void DerivationGoal::registerOutputs()
|
|||
We can only early return when the outputs are known a priori. For
|
||||
floating content-addressed derivations this isn't the case.
|
||||
*/
|
||||
for (auto & [outputName, optOutputPath] : worker.store.queryPartialDerivationOutputMap(drvPath)) {
|
||||
if (!wantOutput(outputName, wantedOutputs))
|
||||
continue;
|
||||
if (!optOutputPath)
|
||||
throw BuildError(
|
||||
"output '%s' from derivation '%s' does not have a known output path",
|
||||
outputName, worker.store.printStorePath(drvPath));
|
||||
auto & outputPath = *optOutputPath;
|
||||
if (!worker.store.isValidPath(outputPath))
|
||||
throw BuildError(
|
||||
"output '%s' from derivation '%s' is supposed to be at '%s' but that path is not valid",
|
||||
outputName, worker.store.printStorePath(drvPath), worker.store.printStorePath(outputPath));
|
||||
|
||||
finalOutputs.insert_or_assign(outputName, outputPath);
|
||||
}
|
||||
return assertPathValidity();
|
||||
}
|
||||
|
||||
Path DerivationGoal::openLogFile()
|
||||
|
|
@ -1175,16 +1184,17 @@ bool DerivationGoal::isReadDesc(int fd)
|
|||
return fd == hook->builderOut.readSide.get();
|
||||
}
|
||||
|
||||
|
||||
void DerivationGoal::handleChildOutput(int fd, std::string_view data)
|
||||
{
|
||||
if (isReadDesc(fd))
|
||||
// local & `ssh://`-builds are dealt with here.
|
||||
auto isWrittenToLog = isReadDesc(fd);
|
||||
if (isWrittenToLog)
|
||||
{
|
||||
logSize += data.size();
|
||||
if (settings.maxLogSize && logSize > settings.maxLogSize) {
|
||||
killChild();
|
||||
done(
|
||||
BuildResult::LogLimitExceeded,
|
||||
BuildResult::LogLimitExceeded, {},
|
||||
Error("%s killed after writing more than %d bytes of log output",
|
||||
getName(), settings.maxLogSize));
|
||||
return;
|
||||
|
|
@ -1207,7 +1217,16 @@ void DerivationGoal::handleChildOutput(int fd, std::string_view data)
|
|||
if (hook && fd == hook->fromHook.readSide.get()) {
|
||||
for (auto c : data)
|
||||
if (c == '\n') {
|
||||
handleJSONLogMessage(currentHookLine, worker.act, hook->activities, true);
|
||||
auto json = parseJSONMessage(currentHookLine);
|
||||
if (json) {
|
||||
auto s = handleJSONLogMessage(*json, worker.act, hook->activities, true);
|
||||
// ensure that logs from a builder using `ssh-ng://` as protocol
|
||||
// are also available to `nix log`.
|
||||
if (s && !isWrittenToLog && logSink && (*json)["type"] == resBuildLogLine) {
|
||||
auto f = (*json)["fields"];
|
||||
(*logSink)((f.size() > 0 ? f.at(0).get<std::string>() : "") + "\n");
|
||||
}
|
||||
}
|
||||
currentHookLine.clear();
|
||||
} else
|
||||
currentHookLine += c;
|
||||
|
|
@ -1241,7 +1260,8 @@ void DerivationGoal::flushLine()
|
|||
|
||||
std::map<std::string, std::optional<StorePath>> DerivationGoal::queryPartialDerivationOutputMap()
|
||||
{
|
||||
if (!useDerivation || drv->type() != DerivationType::CAFloating) {
|
||||
assert(drv->type().isPure());
|
||||
if (!useDerivation || drv->type().hasKnownOutputPaths()) {
|
||||
std::map<std::string, std::optional<StorePath>> res;
|
||||
for (auto & [name, output] : drv->outputs)
|
||||
res.insert_or_assign(name, output.path(worker.store, drv->name, name));
|
||||
|
|
@ -1253,7 +1273,8 @@ std::map<std::string, std::optional<StorePath>> DerivationGoal::queryPartialDeri
|
|||
|
||||
OutputPathMap DerivationGoal::queryDerivationOutputMap()
|
||||
{
|
||||
if (!useDerivation || drv->type() != DerivationType::CAFloating) {
|
||||
assert(drv->type().isPure());
|
||||
if (!useDerivation || drv->type().hasKnownOutputPaths()) {
|
||||
OutputPathMap res;
|
||||
for (auto & [name, output] : drv->outputsAndOptPaths(worker.store))
|
||||
res.insert_or_assign(name, *output.second);
|
||||
|
|
@ -1264,12 +1285,20 @@ OutputPathMap DerivationGoal::queryDerivationOutputMap()
|
|||
}
|
||||
|
||||
|
||||
void DerivationGoal::checkPathValidity()
|
||||
std::pair<bool, DrvOutputs> DerivationGoal::checkPathValidity()
|
||||
{
|
||||
if (!drv->type().isPure()) return { false, {} };
|
||||
|
||||
bool checkHash = buildMode == bmRepair;
|
||||
auto wantedOutputsLeft = wantedOutputs;
|
||||
DrvOutputs validOutputs;
|
||||
|
||||
for (auto & i : queryPartialDerivationOutputMap()) {
|
||||
InitialOutput & info = initialOutputs.at(i.first);
|
||||
auto initialOutput = get(initialOutputs, i.first);
|
||||
if (!initialOutput)
|
||||
// this is an invalid output, gets catched with (!wantedOutputsLeft.empty())
|
||||
continue;
|
||||
auto & info = *initialOutput;
|
||||
info.wanted = wantOutput(i.first, wantedOutputs);
|
||||
if (info.wanted)
|
||||
wantedOutputsLeft.erase(i.first);
|
||||
|
|
@ -1284,27 +1313,30 @@ void DerivationGoal::checkPathValidity()
|
|||
: PathStatus::Corrupt,
|
||||
};
|
||||
}
|
||||
auto drvOutput = DrvOutput{info.outputHash, i.first};
|
||||
if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
|
||||
auto drvOutput = DrvOutput{initialOutputs.at(i.first).outputHash, i.first};
|
||||
if (auto real = worker.store.queryRealisation(drvOutput)) {
|
||||
info.known = {
|
||||
.path = real->outPath,
|
||||
.status = PathStatus::Valid,
|
||||
};
|
||||
} else if (info.known && info.known->status == PathStatus::Valid) {
|
||||
// We know the output because it' a static output of the
|
||||
} else if (info.known && info.known->isValid()) {
|
||||
// We know the output because it's a static output of the
|
||||
// derivation, and the output path is valid, but we don't have
|
||||
// its realisation stored (probably because it has been built
|
||||
// without the `ca-derivations` experimental flag)
|
||||
// without the `ca-derivations` experimental flag).
|
||||
worker.store.registerDrvOutput(
|
||||
Realisation{
|
||||
Realisation {
|
||||
drvOutput,
|
||||
info.known->path,
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
if (info.wanted && info.known && info.known->isValid())
|
||||
validOutputs.emplace(drvOutput, Realisation { drvOutput, info.known->path });
|
||||
}
|
||||
|
||||
// If we requested all the outputs via the empty set, we are always fine.
|
||||
// If we requested specific elements, the loop above removes all the valid
|
||||
// ones, so any that are left must be invalid.
|
||||
|
|
@ -1312,24 +1344,48 @@ void DerivationGoal::checkPathValidity()
|
|||
throw Error("derivation '%s' does not have wanted outputs %s",
|
||||
worker.store.printStorePath(drvPath),
|
||||
concatStringsSep(", ", quoteStrings(wantedOutputsLeft)));
|
||||
|
||||
bool allValid = true;
|
||||
for (auto & [_, status] : initialOutputs) {
|
||||
if (!status.wanted) continue;
|
||||
if (!status.known || !status.known->isValid()) {
|
||||
allValid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return { allValid, validOutputs };
|
||||
}
|
||||
|
||||
|
||||
void DerivationGoal::done(BuildResult::Status status, std::optional<Error> ex)
|
||||
DrvOutputs DerivationGoal::assertPathValidity()
|
||||
{
|
||||
result.status = status;
|
||||
auto [allValid, validOutputs] = checkPathValidity();
|
||||
if (!allValid)
|
||||
throw Error("some outputs are unexpectedly invalid");
|
||||
return validOutputs;
|
||||
}
|
||||
|
||||
|
||||
void DerivationGoal::done(
|
||||
BuildResult::Status status,
|
||||
DrvOutputs builtOutputs,
|
||||
std::optional<Error> ex)
|
||||
{
|
||||
buildResult.status = status;
|
||||
if (ex)
|
||||
result.errorMsg = ex->what();
|
||||
amDone(result.success() ? ecSuccess : ecFailed, ex);
|
||||
if (result.status == BuildResult::TimedOut)
|
||||
buildResult.errorMsg = fmt("%s", normaltxt(ex->info().msg));
|
||||
if (buildResult.status == BuildResult::TimedOut)
|
||||
worker.timedOut = true;
|
||||
if (result.status == BuildResult::PermanentFailure)
|
||||
if (buildResult.status == BuildResult::PermanentFailure)
|
||||
worker.permanentFailure = true;
|
||||
|
||||
mcExpectedBuilds.reset();
|
||||
mcRunningBuilds.reset();
|
||||
|
||||
if (result.success()) {
|
||||
if (buildResult.success()) {
|
||||
assert(!builtOutputs.empty());
|
||||
buildResult.builtOutputs = std::move(builtOutputs);
|
||||
if (status == BuildResult::Built)
|
||||
worker.doneBuilds++;
|
||||
} else {
|
||||
|
|
@ -1343,9 +1399,23 @@ void DerivationGoal::done(BuildResult::Status status, std::optional<Error> ex)
|
|||
if (traceBuiltOutputsFile != "") {
|
||||
std::fstream fs;
|
||||
fs.open(traceBuiltOutputsFile, std::fstream::out);
|
||||
fs << worker.store.printStorePath(drvPath) << "\t" << result.toString() << std::endl;
|
||||
fs << worker.store.printStorePath(drvPath) << "\t" << buildResult.toString() << std::endl;
|
||||
}
|
||||
|
||||
amDone(buildResult.success() ? ecSuccess : ecFailed, ex);
|
||||
}
|
||||
|
||||
|
||||
void DerivationGoal::waiteeDone(GoalPtr waitee, ExitCode result)
|
||||
{
|
||||
Goal::waiteeDone(waitee, result);
|
||||
|
||||
if (waitee->buildResult.success())
|
||||
if (auto bfd = std::get_if<DerivedPath::Built>(&waitee->buildResult.path))
|
||||
for (auto & [output, realisation] : waitee->buildResult.builtOutputs)
|
||||
inputDrvOutputs.insert_or_assign(
|
||||
{ bfd->drvPath, output.outputName },
|
||||
realisation.outPath);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,12 +57,21 @@ struct DerivationGoal : public Goal
|
|||
them. */
|
||||
StringSet wantedOutputs;
|
||||
|
||||
/* Mapping from input derivations + output names to actual store
|
||||
paths. This is filled in by waiteeDone() as each dependency
|
||||
finishes, before inputsRealised() is reached, */
|
||||
std::map<std::pair<StorePath, std::string>, StorePath> inputDrvOutputs;
|
||||
|
||||
/* Whether additional wanted outputs have been added. */
|
||||
bool needRestart = false;
|
||||
|
||||
/* Whether to retry substituting the outputs after building the
|
||||
inputs. */
|
||||
bool retrySubstitution;
|
||||
inputs. This is done in case of an incomplete closure. */
|
||||
bool retrySubstitution = false;
|
||||
|
||||
/* Whether we've retried substitution, in which case we won't try
|
||||
again. */
|
||||
bool retriedSubstitution = false;
|
||||
|
||||
/* The derivation stored at drvPath. */
|
||||
std::unique_ptr<Derivation> drv;
|
||||
|
|
@ -104,20 +113,8 @@ struct DerivationGoal : public Goal
|
|||
typedef void (DerivationGoal::*GoalState)();
|
||||
GoalState state;
|
||||
|
||||
/* The final output paths of the build.
|
||||
|
||||
- For input-addressed derivations, always the precomputed paths
|
||||
|
||||
- For content-addressed derivations, calcuated from whatever the hash
|
||||
ends up being. (Note that fixed outputs derivations that produce the
|
||||
"wrong" output still install that data under its true content-address.)
|
||||
*/
|
||||
OutputPathMap finalOutputs;
|
||||
|
||||
BuildMode buildMode;
|
||||
|
||||
BuildResult result;
|
||||
|
||||
/* The current round, if we're building multiple times. */
|
||||
size_t curRound = 1;
|
||||
|
||||
|
|
@ -152,8 +149,6 @@ struct DerivationGoal : public Goal
|
|||
/* Add wanted outputs to an already existing derivation goal. */
|
||||
void addWantedOutputs(const StringSet & outputs);
|
||||
|
||||
BuildResult getResult() { return result; }
|
||||
|
||||
/* The states. */
|
||||
void getDerivation();
|
||||
void loadDerivation();
|
||||
|
|
@ -175,7 +170,7 @@ struct DerivationGoal : public Goal
|
|||
|
||||
/* Check that the derivation outputs all exist and register them
|
||||
as valid. */
|
||||
virtual void registerOutputs();
|
||||
virtual DrvOutputs registerOutputs();
|
||||
|
||||
/* Open a log file and a pipe to it. */
|
||||
Path openLogFile();
|
||||
|
|
@ -210,8 +205,17 @@ struct DerivationGoal : public Goal
|
|||
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap();
|
||||
OutputPathMap queryDerivationOutputMap();
|
||||
|
||||
/* Return the set of (in)valid paths. */
|
||||
void checkPathValidity();
|
||||
/* Update 'initialOutputs' to determine the current status of the
|
||||
outputs of the derivation. Also returns a Boolean denoting
|
||||
whether all outputs are valid and non-corrupt, and a
|
||||
'DrvOutputs' structure containing the valid and wanted
|
||||
outputs. */
|
||||
std::pair<bool, DrvOutputs> checkPathValidity();
|
||||
|
||||
/* Aborts if any output is not valid or corrupt, and otherwise
|
||||
returns a 'DrvOutputs' structure containing the wanted
|
||||
outputs. */
|
||||
DrvOutputs assertPathValidity();
|
||||
|
||||
/* Forcibly kill the child process, if any. */
|
||||
virtual void killChild();
|
||||
|
|
@ -222,8 +226,11 @@ struct DerivationGoal : public Goal
|
|||
|
||||
void done(
|
||||
BuildResult::Status status,
|
||||
DrvOutputs builtOutputs = {},
|
||||
std::optional<Error> ex = {});
|
||||
|
||||
void waiteeDone(GoalPtr waitee, ExitCode result) override;
|
||||
|
||||
StorePathSet exportReferences(const StorePathSet & storePaths);
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -6,8 +6,12 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(const DrvOutput& id, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
: Goal(worker)
|
||||
DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(
|
||||
const DrvOutput & id,
|
||||
Worker & worker,
|
||||
RepairFlag repair,
|
||||
std::optional<ContentAddress> ca)
|
||||
: Goal(worker, DerivedPath::Opaque { StorePath::dummy })
|
||||
, id(id)
|
||||
{
|
||||
state = &DrvOutputSubstitutionGoal::init;
|
||||
|
|
@ -32,12 +36,12 @@ void DrvOutputSubstitutionGoal::init()
|
|||
|
||||
void DrvOutputSubstitutionGoal::tryNext()
|
||||
{
|
||||
trace("Trying next substituter");
|
||||
trace("trying next substituter");
|
||||
|
||||
if (subs.size() == 0) {
|
||||
/* None left. Terminate this goal and let someone else deal
|
||||
with it. */
|
||||
debug("drv output '%s' is required, but there is no substituter that can provide it", id.to_string());
|
||||
debug("derivation output '%s' is required, but there is no substituter that can provide it", id.to_string());
|
||||
|
||||
/* Hack: don't indicate failure if there were no substituters.
|
||||
In that case the calling derivation should just do a
|
||||
|
|
@ -119,7 +123,7 @@ void DrvOutputSubstitutionGoal::realisationFetched()
|
|||
void DrvOutputSubstitutionGoal::outPathValid()
|
||||
{
|
||||
assert(outputInfo);
|
||||
trace("Output path substituted");
|
||||
trace("output path substituted");
|
||||
|
||||
if (nrFailed > 0) {
|
||||
debug("The output path of the derivation output '%s' could not be substituted", id.to_string());
|
||||
|
|
|
|||
|
|
@ -47,43 +47,51 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
|
|||
}
|
||||
}
|
||||
|
||||
std::vector<BuildResult> Store::buildPathsWithResults(
|
||||
const std::vector<DerivedPath> & reqs,
|
||||
BuildMode buildMode,
|
||||
std::shared_ptr<Store> evalStore)
|
||||
{
|
||||
Worker worker(*this, evalStore ? *evalStore : *this);
|
||||
|
||||
Goals goals;
|
||||
for (const auto & br : reqs) {
|
||||
std::visit(overloaded {
|
||||
[&](const DerivedPath::Built & bfd) {
|
||||
goals.insert(worker.makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode));
|
||||
},
|
||||
[&](const DerivedPath::Opaque & bo) {
|
||||
goals.insert(worker.makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair));
|
||||
},
|
||||
}, br.raw());
|
||||
}
|
||||
|
||||
worker.run(goals);
|
||||
|
||||
std::vector<BuildResult> results;
|
||||
|
||||
for (auto & i : goals)
|
||||
results.push_back(i->buildResult);
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode)
|
||||
{
|
||||
Worker worker(*this, *this);
|
||||
auto goal = worker.makeBasicDerivationGoal(drvPath, drv, {}, buildMode);
|
||||
|
||||
BuildResult result;
|
||||
|
||||
try {
|
||||
worker.run(Goals{goal});
|
||||
result = goal->getResult();
|
||||
return goal->buildResult;
|
||||
} catch (Error & e) {
|
||||
result.status = BuildResult::MiscFailure;
|
||||
result.errorMsg = e.msg();
|
||||
}
|
||||
// XXX: Should use `goal->queryPartialDerivationOutputMap()` once it's
|
||||
// extended to return the full realisation for each output
|
||||
auto staticDrvOutputs = drv.outputsAndOptPaths(*this);
|
||||
auto outputHashes = staticOutputHashes(*this, drv);
|
||||
for (auto & [outputName, staticOutput] : staticDrvOutputs) {
|
||||
auto outputId = DrvOutput{outputHashes.at(outputName), outputName};
|
||||
if (staticOutput.second)
|
||||
result.builtOutputs.insert_or_assign(
|
||||
outputId,
|
||||
Realisation{ outputId, *staticOutput.second}
|
||||
);
|
||||
if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) && !derivationHasKnownOutputPaths(drv.type())) {
|
||||
auto realisation = this->queryRealisation(outputId);
|
||||
if (realisation)
|
||||
result.builtOutputs.insert_or_assign(
|
||||
outputId,
|
||||
*realisation
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
return BuildResult {
|
||||
.status = BuildResult::MiscFailure,
|
||||
.errorMsg = e.msg(),
|
||||
.path = DerivedPath::Built { .drvPath = drvPath },
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ void Goal::addWaitee(GoalPtr waitee)
|
|||
|
||||
void Goal::waiteeDone(GoalPtr waitee, ExitCode result)
|
||||
{
|
||||
assert(waitees.find(waitee) != waitees.end());
|
||||
assert(waitees.count(waitee));
|
||||
waitees.erase(waitee);
|
||||
|
||||
trace(fmt("waitee '%s' done; %d left", waitee->name, waitees.size()));
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include "types.hh"
|
||||
#include "store-api.hh"
|
||||
#include "build-result.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
@ -39,30 +40,32 @@ struct Goal : public std::enable_shared_from_this<Goal>
|
|||
WeakGoals waiters;
|
||||
|
||||
/* Number of goals we are/were waiting for that have failed. */
|
||||
unsigned int nrFailed;
|
||||
size_t nrFailed = 0;
|
||||
|
||||
/* Number of substitution goals we are/were waiting for that
|
||||
failed because there are no substituters. */
|
||||
unsigned int nrNoSubstituters;
|
||||
size_t nrNoSubstituters = 0;
|
||||
|
||||
/* Number of substitution goals we are/were waiting for that
|
||||
failed because they had unsubstitutable references. */
|
||||
unsigned int nrIncompleteClosure;
|
||||
size_t nrIncompleteClosure = 0;
|
||||
|
||||
/* Name of this goal for debugging purposes. */
|
||||
std::string name;
|
||||
|
||||
/* Whether the goal is finished. */
|
||||
ExitCode exitCode;
|
||||
ExitCode exitCode = ecBusy;
|
||||
|
||||
/* Build result. */
|
||||
BuildResult buildResult;
|
||||
|
||||
/* Exception containing an error message, if any. */
|
||||
std::optional<Error> ex;
|
||||
|
||||
Goal(Worker & worker) : worker(worker)
|
||||
{
|
||||
nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
|
||||
exitCode = ecBusy;
|
||||
}
|
||||
Goal(Worker & worker, DerivedPath path)
|
||||
: worker(worker)
|
||||
, buildResult { .path = std::move(path) }
|
||||
{ }
|
||||
|
||||
virtual ~Goal()
|
||||
{
|
||||
|
|
|
|||
|
|
@ -7,6 +7,22 @@ HookInstance::HookInstance()
|
|||
{
|
||||
debug("starting build hook '%s'", settings.buildHook);
|
||||
|
||||
auto buildHookArgs = tokenizeString<std::list<std::string>>(settings.buildHook.get());
|
||||
|
||||
if (buildHookArgs.empty())
|
||||
throw Error("'build-hook' setting is empty");
|
||||
|
||||
auto buildHook = buildHookArgs.front();
|
||||
buildHookArgs.pop_front();
|
||||
|
||||
Strings args;
|
||||
args.push_back(std::string(baseNameOf(buildHook)));
|
||||
|
||||
for (auto & arg : buildHookArgs)
|
||||
args.push_back(arg);
|
||||
|
||||
args.push_back(std::to_string(verbosity));
|
||||
|
||||
/* Create a pipe to get the output of the child. */
|
||||
fromHook.create();
|
||||
|
||||
|
|
@ -36,14 +52,9 @@ HookInstance::HookInstance()
|
|||
if (dup2(builderOut.readSide.get(), 5) == -1)
|
||||
throw SysError("dupping builder's stdout/stderr");
|
||||
|
||||
Strings args = {
|
||||
std::string(baseNameOf(settings.buildHook.get())),
|
||||
std::to_string(verbosity),
|
||||
};
|
||||
execv(buildHook.c_str(), stringsToCharPtrs(args).data());
|
||||
|
||||
execv(settings.buildHook.get().c_str(), stringsToCharPtrs(args).data());
|
||||
|
||||
throw SysError("executing '%s'", settings.buildHook);
|
||||
throw SysError("executing '%s'", buildHook);
|
||||
});
|
||||
|
||||
pid.setSeparatePG(true);
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
#include "local-derivation-goal.hh"
|
||||
#include "gc-store.hh"
|
||||
#include "hook-instance.hh"
|
||||
#include "worker.hh"
|
||||
#include "builtins.hh"
|
||||
|
|
@ -13,6 +14,7 @@
|
|||
#include "worker-protocol.hh"
|
||||
#include "topo-sort.hh"
|
||||
#include "callback.hh"
|
||||
#include "json-utils.hh"
|
||||
|
||||
#include <regex>
|
||||
#include <queue>
|
||||
|
|
@ -55,8 +57,6 @@
|
|||
#include <pwd.h>
|
||||
#include <grp.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
void handleDiffHook(
|
||||
|
|
@ -186,7 +186,7 @@ void LocalDerivationGoal::tryLocalBuild() {
|
|||
outputLocks.unlock();
|
||||
buildUser.reset();
|
||||
worker.permanentFailure = true;
|
||||
done(BuildResult::InputRejected, e);
|
||||
done(BuildResult::InputRejected, {}, e);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -216,8 +216,7 @@ static void movePath(const Path & src, const Path & dst)
|
|||
if (changePerm)
|
||||
chmod_(src, st.st_mode | S_IWUSR);
|
||||
|
||||
if (rename(src.c_str(), dst.c_str()))
|
||||
throw SysError("renaming '%1%' to '%2%'", src, dst);
|
||||
renameFile(src, dst);
|
||||
|
||||
if (changePerm)
|
||||
chmod_(dst, st.st_mode);
|
||||
|
|
@ -304,7 +303,7 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull()
|
|||
if (buildMode != bmCheck && status.known->isValid()) continue;
|
||||
auto p = worker.store.printStorePath(status.known->path);
|
||||
if (pathExists(chrootRootDir + p))
|
||||
rename((chrootRootDir + p).c_str(), p.c_str());
|
||||
renameFile((chrootRootDir + p), p);
|
||||
}
|
||||
|
||||
return diskFull;
|
||||
|
|
@ -387,7 +386,7 @@ void LocalDerivationGoal::startBuilder()
|
|||
else if (settings.sandboxMode == smDisabled)
|
||||
useChroot = false;
|
||||
else if (settings.sandboxMode == smRelaxed)
|
||||
useChroot = !(derivationIsImpure(derivationType)) && !noChroot;
|
||||
useChroot = derivationType.isSandboxed() && !noChroot;
|
||||
}
|
||||
|
||||
auto & localStore = getLocalStore();
|
||||
|
|
@ -474,7 +473,7 @@ void LocalDerivationGoal::startBuilder()
|
|||
temporary build directory. The text files have the format used
|
||||
by `nix-store --register-validity'. However, the deriver
|
||||
fields are left empty. */
|
||||
auto s = get(drv->env, "exportReferencesGraph").value_or("");
|
||||
auto s = getOr(drv->env, "exportReferencesGraph", "");
|
||||
Strings ss = tokenizeString<Strings>(s);
|
||||
if (ss.size() % 2 != 0)
|
||||
throw BuildError("odd number of tokens in 'exportReferencesGraph': '%1%'", s);
|
||||
|
|
@ -608,7 +607,7 @@ void LocalDerivationGoal::startBuilder()
|
|||
"nogroup:x:65534:\n", sandboxGid()));
|
||||
|
||||
/* Create /etc/hosts with localhost entry. */
|
||||
if (!(derivationIsImpure(derivationType)))
|
||||
if (derivationType.isSandboxed())
|
||||
writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n::1 localhost\n");
|
||||
|
||||
/* Make the closure of the inputs available in the chroot,
|
||||
|
|
@ -724,6 +723,9 @@ void LocalDerivationGoal::startBuilder()
|
|||
|
||||
/* Run the builder. */
|
||||
printMsg(lvlChatty, "executing builder '%1%'", drv->builder);
|
||||
printMsg(lvlChatty, "using builder args '%1%'", concatStringsSep(" ", drv->args));
|
||||
for (auto & i : drv->env)
|
||||
printMsg(lvlVomit, "setting builder env variable '%1%'='%2%'", i.first, i.second);
|
||||
|
||||
/* Create the log file. */
|
||||
Path logFile = openLogFile();
|
||||
|
|
@ -776,7 +778,7 @@ void LocalDerivationGoal::startBuilder()
|
|||
if (tcsetattr(builderOut.writeSide.get(), TCSANOW, &term))
|
||||
throw SysError("putting pseudoterminal into raw mode");
|
||||
|
||||
result.startTime = time(0);
|
||||
buildResult.startTime = time(0);
|
||||
|
||||
/* Fork a child to build the package. */
|
||||
|
||||
|
|
@ -816,7 +818,7 @@ void LocalDerivationGoal::startBuilder()
|
|||
us.
|
||||
*/
|
||||
|
||||
if (!(derivationIsImpure(derivationType)))
|
||||
if (derivationType.isSandboxed())
|
||||
privateNetwork = true;
|
||||
|
||||
userNamespaceSync.create();
|
||||
|
|
@ -863,18 +865,43 @@ void LocalDerivationGoal::startBuilder()
|
|||
/* Some distros patch Linux to not allow unprivileged
|
||||
* user namespaces. If we get EPERM or EINVAL, try
|
||||
* without CLONE_NEWUSER and see if that works.
|
||||
* Details: https://salsa.debian.org/kernel-team/linux/-/commit/d98e00eda6bea437e39b9e80444eee84a32438a6
|
||||
*/
|
||||
usingUserNamespace = false;
|
||||
flags &= ~CLONE_NEWUSER;
|
||||
child = clone(childEntry, stack + stackSize, flags, this);
|
||||
}
|
||||
/* Otherwise exit with EPERM so we can handle this in the
|
||||
parent. This is only done when sandbox-fallback is set
|
||||
to true (the default). */
|
||||
if (child == -1 && (errno == EPERM || errno == EINVAL) && settings.sandboxFallback)
|
||||
_exit(1);
|
||||
if (child == -1) throw SysError("cloning builder process");
|
||||
|
||||
if (child == -1) {
|
||||
switch(errno) {
|
||||
case EPERM:
|
||||
case EINVAL: {
|
||||
int errno_ = errno;
|
||||
if (!userNamespacesEnabled && errno==EPERM)
|
||||
notice("user namespaces appear to be disabled; they are required for sandboxing; check /proc/sys/user/max_user_namespaces");
|
||||
if (userNamespacesEnabled) {
|
||||
Path procSysKernelUnprivilegedUsernsClone = "/proc/sys/kernel/unprivileged_userns_clone";
|
||||
if (pathExists(procSysKernelUnprivilegedUsernsClone)
|
||||
&& trim(readFile(procSysKernelUnprivilegedUsernsClone)) == "0") {
|
||||
notice("user namespaces appear to be disabled; they are required for sandboxing; check /proc/sys/kernel/unprivileged_userns_clone");
|
||||
}
|
||||
}
|
||||
Path procSelfNsUser = "/proc/self/ns/user";
|
||||
if (!pathExists(procSelfNsUser))
|
||||
notice("/proc/self/ns/user does not exist; your kernel was likely built without CONFIG_USER_NS=y, which is required for sandboxing");
|
||||
/* Otherwise exit with EPERM so we can handle this in the
|
||||
parent. This is only done when sandbox-fallback is set
|
||||
to true (the default). */
|
||||
if (settings.sandboxFallback)
|
||||
_exit(1);
|
||||
/* Mention sandbox-fallback in the error message so the user
|
||||
knows that having it disabled contributed to the
|
||||
unrecoverability of this failure */
|
||||
throw SysError(errno_, "creating sandboxed builder process using clone(), without sandbox-fallback");
|
||||
}
|
||||
default:
|
||||
throw SysError("creating sandboxed builder process using clone()");
|
||||
}
|
||||
}
|
||||
writeFull(builderOut.writeSide.get(),
|
||||
fmt("%d %d\n", usingUserNamespace, child));
|
||||
_exit(0);
|
||||
|
|
@ -1014,7 +1041,7 @@ void LocalDerivationGoal::initTmpDir() {
|
|||
there is no size constraint). */
|
||||
if (!parsedDrv->getStructuredAttrs()) {
|
||||
|
||||
StringSet passAsFile = tokenizeString<StringSet>(get(drv->env, "passAsFile").value_or(""));
|
||||
StringSet passAsFile = tokenizeString<StringSet>(getOr(drv->env, "passAsFile", ""));
|
||||
for (auto & i : drv->env) {
|
||||
if (passAsFile.find(i.first) == passAsFile.end()) {
|
||||
env[i.first] = i.second;
|
||||
|
|
@ -1077,7 +1104,7 @@ void LocalDerivationGoal::initEnv()
|
|||
derivation, tell the builder, so that for instance `fetchurl'
|
||||
can skip checking the output. On older Nixes, this environment
|
||||
variable won't be set, so `fetchurl' will do the check. */
|
||||
if (derivationIsFixed(derivationType)) env["NIX_OUTPUT_CHECKED"] = "1";
|
||||
if (derivationType.isFixed()) env["NIX_OUTPUT_CHECKED"] = "1";
|
||||
|
||||
/* *Only* if this is a fixed-output derivation, propagate the
|
||||
values of the environment variables specified in the
|
||||
|
|
@ -1088,7 +1115,7 @@ void LocalDerivationGoal::initEnv()
|
|||
to the builder is generally impure, but the output of
|
||||
fixed-output derivations is by definition pure (since we
|
||||
already know the cryptographic hash of the output). */
|
||||
if (derivationIsImpure(derivationType)) {
|
||||
if (!derivationType.isSandboxed()) {
|
||||
for (auto & i : parsedDrv->getStringsAttr("impureEnvVars").value_or(Strings()))
|
||||
env[i] = getEnv(i).value_or("");
|
||||
}
|
||||
|
|
@ -1156,7 +1183,7 @@ struct RestrictedStoreConfig : virtual LocalFSStoreConfig
|
|||
/* A wrapper around LocalStore that only allows building/querying of
|
||||
paths that are in the input closures of the build or were added via
|
||||
recursive Nix calls. */
|
||||
struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual LocalFSStore
|
||||
struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual LocalFSStore, public virtual GcStore
|
||||
{
|
||||
ref<LocalStore> next;
|
||||
|
||||
|
|
@ -1288,6 +1315,16 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
|
|||
}
|
||||
|
||||
void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override
|
||||
{
|
||||
for (auto & result : buildPathsWithResults(paths, buildMode, evalStore))
|
||||
if (!result.success())
|
||||
result.rethrow();
|
||||
}
|
||||
|
||||
std::vector<BuildResult> buildPathsWithResults(
|
||||
const std::vector<DerivedPath> & paths,
|
||||
BuildMode buildMode = bmNormal,
|
||||
std::shared_ptr<Store> evalStore = nullptr) override
|
||||
{
|
||||
assert(!evalStore);
|
||||
|
||||
|
|
@ -1301,26 +1338,13 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
|
|||
throw InvalidPath("cannot build '%s' in recursive Nix because path is unknown", req.to_string(*next));
|
||||
}
|
||||
|
||||
next->buildPaths(paths, buildMode);
|
||||
auto results = next->buildPathsWithResults(paths, buildMode);
|
||||
|
||||
for (auto & path : paths) {
|
||||
auto p = std::get_if<DerivedPath::Built>(&path);
|
||||
if (!p) continue;
|
||||
auto & bfd = *p;
|
||||
auto drv = readDerivation(bfd.drvPath);
|
||||
auto drvHashes = staticOutputHashes(*this, drv);
|
||||
auto outputs = next->queryDerivationOutputMap(bfd.drvPath);
|
||||
for (auto & [outputName, outputPath] : outputs)
|
||||
if (wantOutput(outputName, bfd.outputs)) {
|
||||
newPaths.insert(outputPath);
|
||||
if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
|
||||
auto thisRealisation = next->queryRealisation(
|
||||
DrvOutput{drvHashes.at(outputName), outputName}
|
||||
);
|
||||
assert(thisRealisation);
|
||||
newRealisations.insert(*thisRealisation);
|
||||
}
|
||||
}
|
||||
for (auto & result : results) {
|
||||
for (auto & [outputName, output] : result.builtOutputs) {
|
||||
newPaths.insert(output.outPath);
|
||||
newRealisations.insert(output);
|
||||
}
|
||||
}
|
||||
|
||||
StorePathSet closure;
|
||||
|
|
@ -1329,6 +1353,8 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
|
|||
goal.addDependency(path);
|
||||
for (auto & real : Realisation::closure(*next, newRealisations))
|
||||
goal.addedDrvOutputs.insert(real.id);
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
|
|
@ -1369,6 +1395,12 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
|
|||
next->queryMissing(allowed, willBuild, willSubstitute,
|
||||
unknown, downloadSize, narSize);
|
||||
}
|
||||
|
||||
virtual std::optional<std::string> getBuildLog(const StorePath & path) override
|
||||
{ return std::nullopt; }
|
||||
|
||||
virtual void addBuildLog(const StorePath & path, std::string_view log) override
|
||||
{ unsupported("addBuildLog"); }
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -1591,6 +1623,8 @@ void LocalDerivationGoal::runChild()
|
|||
/* Warning: in the child we should absolutely not make any SQLite
|
||||
calls! */
|
||||
|
||||
bool sendException = true;
|
||||
|
||||
try { /* child */
|
||||
|
||||
commonChildInit(builderOut);
|
||||
|
|
@ -1697,7 +1731,7 @@ void LocalDerivationGoal::runChild()
|
|||
/* Fixed-output derivations typically need to access the
|
||||
network, so give them access to /etc/resolv.conf and so
|
||||
on. */
|
||||
if (derivationIsImpure(derivationType)) {
|
||||
if (!derivationType.isSandboxed()) {
|
||||
// Only use nss functions to resolve hosts and
|
||||
// services. Don’t use it for anything else that may
|
||||
// be configured for this system. This limits the
|
||||
|
|
@ -1738,7 +1772,19 @@ void LocalDerivationGoal::runChild()
|
|||
|
||||
for (auto & i : dirsInChroot) {
|
||||
if (i.second.source == "/proc") continue; // backwards compatibility
|
||||
doBind(i.second.source, chrootRootDir + i.first, i.second.optional);
|
||||
|
||||
#if HAVE_EMBEDDED_SANDBOX_SHELL
|
||||
if (i.second.source == "__embedded_sandbox_shell__") {
|
||||
static unsigned char sh[] = {
|
||||
#include "embedded-sandbox-shell.gen.hh"
|
||||
};
|
||||
auto dst = chrootRootDir + i.first;
|
||||
createDirs(dirOf(dst));
|
||||
writeFile(dst, std::string_view((const char *) sh, sizeof(sh)));
|
||||
chmod_(dst, 0555);
|
||||
} else
|
||||
#endif
|
||||
doBind(i.second.source, chrootRootDir + i.first, i.second.optional);
|
||||
}
|
||||
|
||||
/* Bind a new instance of procfs on /proc. */
|
||||
|
|
@ -1954,7 +2000,7 @@ void LocalDerivationGoal::runChild()
|
|||
|
||||
sandboxProfile += "(import \"sandbox-defaults.sb\")\n";
|
||||
|
||||
if (derivationIsImpure(derivationType))
|
||||
if (!derivationType.isSandboxed())
|
||||
sandboxProfile += "(import \"sandbox-network.sb\")\n";
|
||||
|
||||
/* Add the output paths we'll use at build-time to the chroot */
|
||||
|
|
@ -2048,6 +2094,8 @@ void LocalDerivationGoal::runChild()
|
|||
/* Indicate that we managed to set up the build environment. */
|
||||
writeFull(STDERR_FILENO, std::string("\2\n"));
|
||||
|
||||
sendException = false;
|
||||
|
||||
/* Execute the program. This should not return. */
|
||||
if (drv->isBuiltin()) {
|
||||
try {
|
||||
|
|
@ -2101,16 +2149,19 @@ void LocalDerivationGoal::runChild()
|
|||
throw SysError("executing '%1%'", drv->builder);
|
||||
|
||||
} catch (Error & e) {
|
||||
writeFull(STDERR_FILENO, "\1\n");
|
||||
FdSink sink(STDERR_FILENO);
|
||||
sink << e;
|
||||
sink.flush();
|
||||
if (sendException) {
|
||||
writeFull(STDERR_FILENO, "\1\n");
|
||||
FdSink sink(STDERR_FILENO);
|
||||
sink << e;
|
||||
sink.flush();
|
||||
} else
|
||||
std::cerr << e.msg();
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalDerivationGoal::registerOutputs()
|
||||
DrvOutputs LocalDerivationGoal::registerOutputs()
|
||||
{
|
||||
/* When using a build hook, the build hook can register the output
|
||||
as valid (by doing `nix-store --import'). If so we don't have
|
||||
|
|
@ -2119,10 +2170,8 @@ void LocalDerivationGoal::registerOutputs()
|
|||
We can only early return when the outputs are known a priori. For
|
||||
floating content-addressed derivations this isn't the case.
|
||||
*/
|
||||
if (hook) {
|
||||
DerivationGoal::registerOutputs();
|
||||
return;
|
||||
}
|
||||
if (hook)
|
||||
return DerivationGoal::registerOutputs();
|
||||
|
||||
std::map<std::string, ValidPathInfo> infos;
|
||||
|
||||
|
|
@ -2163,12 +2212,22 @@ void LocalDerivationGoal::registerOutputs()
|
|||
std::map<std::string, std::variant<AlreadyRegistered, PerhapsNeedToRegister>> outputReferencesIfUnregistered;
|
||||
std::map<std::string, struct stat> outputStats;
|
||||
for (auto & [outputName, _] : drv->outputs) {
|
||||
auto actualPath = toRealPathChroot(worker.store.printStorePath(scratchOutputs.at(outputName)));
|
||||
auto scratchOutput = get(scratchOutputs, outputName);
|
||||
if (!scratchOutput)
|
||||
throw BuildError(
|
||||
"builder for '%s' has no scratch output for '%s'",
|
||||
worker.store.printStorePath(drvPath), outputName);
|
||||
auto actualPath = toRealPathChroot(worker.store.printStorePath(*scratchOutput));
|
||||
|
||||
outputsToSort.insert(outputName);
|
||||
|
||||
/* Updated wanted info to remove the outputs we definitely don't need to register */
|
||||
auto & initialInfo = initialOutputs.at(outputName);
|
||||
auto initialOutput = get(initialOutputs, outputName);
|
||||
if (!initialOutput)
|
||||
throw BuildError(
|
||||
"builder for '%s' has no initial output for '%s'",
|
||||
worker.store.printStorePath(drvPath), outputName);
|
||||
auto & initialInfo = *initialOutput;
|
||||
|
||||
/* Don't register if already valid, and not checking */
|
||||
initialInfo.wanted = buildMode == bmCheck
|
||||
|
|
@ -2223,6 +2282,11 @@ void LocalDerivationGoal::registerOutputs()
|
|||
|
||||
auto sortedOutputNames = topoSort(outputsToSort,
|
||||
{[&](const std::string & name) {
|
||||
auto orifu = get(outputReferencesIfUnregistered, name);
|
||||
if (!orifu)
|
||||
throw BuildError(
|
||||
"no output reference for '%s' in build of '%s'",
|
||||
name, worker.store.printStorePath(drvPath));
|
||||
return std::visit(overloaded {
|
||||
/* Since we'll use the already installed versions of these, we
|
||||
can treat them as leaves and ignore any references they
|
||||
|
|
@ -2237,7 +2301,7 @@ void LocalDerivationGoal::registerOutputs()
|
|||
referencedOutputs.insert(o);
|
||||
return referencedOutputs;
|
||||
},
|
||||
}, outputReferencesIfUnregistered.at(name));
|
||||
}, *orifu);
|
||||
}},
|
||||
{[&](const std::string & path, const std::string & parent) {
|
||||
// TODO with more -vvvv also show the temporary paths for manual inspection.
|
||||
|
|
@ -2248,10 +2312,13 @@ void LocalDerivationGoal::registerOutputs()
|
|||
|
||||
std::reverse(sortedOutputNames.begin(), sortedOutputNames.end());
|
||||
|
||||
OutputPathMap finalOutputs;
|
||||
|
||||
for (auto & outputName : sortedOutputNames) {
|
||||
auto output = drv->outputs.at(outputName);
|
||||
auto & scratchPath = scratchOutputs.at(outputName);
|
||||
auto actualPath = toRealPathChroot(worker.store.printStorePath(scratchPath));
|
||||
auto output = get(drv->outputs, outputName);
|
||||
auto scratchPath = get(scratchOutputs, outputName);
|
||||
assert(output && scratchPath);
|
||||
auto actualPath = toRealPathChroot(worker.store.printStorePath(*scratchPath));
|
||||
|
||||
auto finish = [&](StorePath finalStorePath) {
|
||||
/* Store the final path */
|
||||
|
|
@ -2259,10 +2326,13 @@ void LocalDerivationGoal::registerOutputs()
|
|||
/* The rewrite rule will be used in downstream outputs that refer to
|
||||
use. This is why the topological sort is essential to do first
|
||||
before this for loop. */
|
||||
if (scratchPath != finalStorePath)
|
||||
outputRewrites[std::string { scratchPath.hashPart() }] = std::string { finalStorePath.hashPart() };
|
||||
if (*scratchPath != finalStorePath)
|
||||
outputRewrites[std::string { scratchPath->hashPart() }] = std::string { finalStorePath.hashPart() };
|
||||
};
|
||||
|
||||
auto orifu = get(outputReferencesIfUnregistered, outputName);
|
||||
assert(orifu);
|
||||
|
||||
std::optional<StorePathSet> referencesOpt = std::visit(overloaded {
|
||||
[&](const AlreadyRegistered & skippedFinalPath) -> std::optional<StorePathSet> {
|
||||
finish(skippedFinalPath.path);
|
||||
|
|
@ -2271,7 +2341,7 @@ void LocalDerivationGoal::registerOutputs()
|
|||
[&](const PerhapsNeedToRegister & r) -> std::optional<StorePathSet> {
|
||||
return r.refs;
|
||||
},
|
||||
}, outputReferencesIfUnregistered.at(outputName));
|
||||
}, *orifu);
|
||||
|
||||
if (!referencesOpt)
|
||||
continue;
|
||||
|
|
@ -2308,25 +2378,29 @@ void LocalDerivationGoal::registerOutputs()
|
|||
for (auto & r : references) {
|
||||
auto name = r.name();
|
||||
auto origHash = std::string { r.hashPart() };
|
||||
if (r == scratchPath)
|
||||
if (r == *scratchPath) {
|
||||
res.first = true;
|
||||
else if (outputRewrites.count(origHash) == 0)
|
||||
res.second.insert(r);
|
||||
else {
|
||||
std::string newRef = outputRewrites.at(origHash);
|
||||
} else if (auto outputRewrite = get(outputRewrites, origHash)) {
|
||||
std::string newRef = *outputRewrite;
|
||||
newRef += '-';
|
||||
newRef += name;
|
||||
res.second.insert(StorePath { newRef });
|
||||
} else {
|
||||
res.second.insert(r);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
};
|
||||
|
||||
auto newInfoFromCA = [&](const DerivationOutputCAFloating outputHash) -> ValidPathInfo {
|
||||
auto & st = outputStats.at(outputName);
|
||||
auto newInfoFromCA = [&](const DerivationOutput::CAFloating outputHash) -> ValidPathInfo {
|
||||
auto st = get(outputStats, outputName);
|
||||
if (!st)
|
||||
throw BuildError(
|
||||
"output path %1% without valid stats info",
|
||||
actualPath);
|
||||
if (outputHash.method == FileIngestionMethod::Flat) {
|
||||
/* The output path should be a regular file without execute permission. */
|
||||
if (!S_ISREG(st.st_mode) || (st.st_mode & S_IXUSR) != 0)
|
||||
if (!S_ISREG(st->st_mode) || (st->st_mode & S_IXUSR) != 0)
|
||||
throw BuildError(
|
||||
"output path '%1%' should be a non-executable regular file "
|
||||
"since recursive hashing is not enabled (outputHashMode=flat)",
|
||||
|
|
@ -2334,7 +2408,7 @@ void LocalDerivationGoal::registerOutputs()
|
|||
}
|
||||
rewriteOutput();
|
||||
/* FIXME optimize and deduplicate with addToStore */
|
||||
std::string oldHashPart { scratchPath.hashPart() };
|
||||
std::string oldHashPart { scratchPath->hashPart() };
|
||||
HashModuloSink caSink { outputHash.hashType, oldHashPart };
|
||||
switch (outputHash.method) {
|
||||
case FileIngestionMethod::Recursive:
|
||||
|
|
@ -2353,13 +2427,11 @@ void LocalDerivationGoal::registerOutputs()
|
|||
outputPathName(drv->name, outputName),
|
||||
refs.second,
|
||||
refs.first);
|
||||
if (scratchPath != finalPath) {
|
||||
if (*scratchPath != finalPath) {
|
||||
// Also rewrite the output path
|
||||
auto source = sinkToSource([&](Sink & nextSink) {
|
||||
StringSink sink;
|
||||
dumpPath(actualPath, sink);
|
||||
RewritingSink rsink2(oldHashPart, std::string(finalPath.hashPart()), nextSink);
|
||||
rsink2(sink.s);
|
||||
dumpPath(actualPath, rsink2);
|
||||
rsink2.flush();
|
||||
});
|
||||
Path tmpPath = actualPath + ".tmp";
|
||||
|
|
@ -2388,14 +2460,15 @@ void LocalDerivationGoal::registerOutputs()
|
|||
};
|
||||
|
||||
ValidPathInfo newInfo = std::visit(overloaded {
|
||||
[&](const DerivationOutputInputAddressed & output) {
|
||||
|
||||
[&](const DerivationOutput::InputAddressed & output) {
|
||||
/* input-addressed case */
|
||||
auto requiredFinalPath = output.path;
|
||||
/* Preemptively add rewrite rule for final hash, as that is
|
||||
what the NAR hash will use rather than normalized-self references */
|
||||
if (scratchPath != requiredFinalPath)
|
||||
if (*scratchPath != requiredFinalPath)
|
||||
outputRewrites.insert_or_assign(
|
||||
std::string { scratchPath.hashPart() },
|
||||
std::string { scratchPath->hashPart() },
|
||||
std::string { requiredFinalPath.hashPart() });
|
||||
rewriteOutput();
|
||||
auto narHashAndSize = hashPath(htSHA256, actualPath);
|
||||
|
|
@ -2407,8 +2480,9 @@ void LocalDerivationGoal::registerOutputs()
|
|||
newInfo0.references.insert(newInfo0.path);
|
||||
return newInfo0;
|
||||
},
|
||||
[&](const DerivationOutputCAFixed & dof) {
|
||||
auto newInfo0 = newInfoFromCA(DerivationOutputCAFloating {
|
||||
|
||||
[&](const DerivationOutput::CAFixed & dof) {
|
||||
auto newInfo0 = newInfoFromCA(DerivationOutput::CAFloating {
|
||||
.method = dof.hash.method,
|
||||
.hashType = dof.hash.hash.type,
|
||||
});
|
||||
|
|
@ -2429,15 +2503,25 @@ void LocalDerivationGoal::registerOutputs()
|
|||
}
|
||||
return newInfo0;
|
||||
},
|
||||
[&](DerivationOutputCAFloating dof) {
|
||||
|
||||
[&](const DerivationOutput::CAFloating & dof) {
|
||||
return newInfoFromCA(dof);
|
||||
},
|
||||
[&](DerivationOutputDeferred) -> ValidPathInfo {
|
||||
|
||||
[&](const DerivationOutput::Deferred &) -> ValidPathInfo {
|
||||
// No derivation should reach that point without having been
|
||||
// rewritten first
|
||||
assert(false);
|
||||
},
|
||||
}, output.output);
|
||||
|
||||
[&](const DerivationOutput::Impure & doi) {
|
||||
return newInfoFromCA(DerivationOutput::CAFloating {
|
||||
.method = doi.method,
|
||||
.hashType = doi.hashType,
|
||||
});
|
||||
},
|
||||
|
||||
}, output->raw());
|
||||
|
||||
/* FIXME: set proper permissions in restorePath() so
|
||||
we don't have to do another traversal. */
|
||||
|
|
@ -2453,7 +2537,7 @@ void LocalDerivationGoal::registerOutputs()
|
|||
derivations. */
|
||||
PathLocks dynamicOutputLock;
|
||||
dynamicOutputLock.setDeletion(true);
|
||||
auto optFixedPath = output.path(worker.store, drv->name, outputName);
|
||||
auto optFixedPath = output->path(worker.store, drv->name, outputName);
|
||||
if (!optFixedPath ||
|
||||
worker.store.printStorePath(*optFixedPath) != finalDestPath)
|
||||
{
|
||||
|
|
@ -2519,11 +2603,10 @@ void LocalDerivationGoal::registerOutputs()
|
|||
|
||||
/* For debugging, print out the referenced and unreferenced paths. */
|
||||
for (auto & i : inputPaths) {
|
||||
auto j = references.find(i);
|
||||
if (j == references.end())
|
||||
debug("unreferenced input: '%1%'", worker.store.printStorePath(i));
|
||||
else
|
||||
if (references.count(i))
|
||||
debug("referenced input: '%1%'", worker.store.printStorePath(i));
|
||||
else
|
||||
debug("unreferenced input: '%1%'", worker.store.printStorePath(i));
|
||||
}
|
||||
|
||||
if (curRound == nrRounds) {
|
||||
|
|
@ -2547,11 +2630,12 @@ void LocalDerivationGoal::registerOutputs()
|
|||
}
|
||||
|
||||
if (buildMode == bmCheck) {
|
||||
// In case of FOD mismatches on `--check` an error must be thrown as this is also
|
||||
// a source for non-determinism.
|
||||
/* In case of fixed-output derivations, if there are
|
||||
mismatches on `--check` an error must be thrown as this is
|
||||
also a source for non-determinism. */
|
||||
if (delayedException)
|
||||
std::rethrow_exception(delayedException);
|
||||
return;
|
||||
return assertPathValidity();
|
||||
}
|
||||
|
||||
/* Apply output checks. */
|
||||
|
|
@ -2563,7 +2647,7 @@ void LocalDerivationGoal::registerOutputs()
|
|||
assert(prevInfos.size() == infos.size());
|
||||
for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
|
||||
if (!(*i == *j)) {
|
||||
result.isNonDeterministic = true;
|
||||
buildResult.isNonDeterministic = true;
|
||||
Path prev = worker.store.printStorePath(i->second.path) + checkSuffix;
|
||||
bool prevExists = keepPreviousRound && pathExists(prev);
|
||||
hintformat hint = prevExists
|
||||
|
|
@ -2594,14 +2678,13 @@ void LocalDerivationGoal::registerOutputs()
|
|||
Path prev = path + checkSuffix;
|
||||
deletePath(prev);
|
||||
Path dst = path + checkSuffix;
|
||||
if (rename(path.c_str(), dst.c_str()))
|
||||
throw SysError("renaming '%s' to '%s'", path, dst);
|
||||
renameFile(path, dst);
|
||||
}
|
||||
}
|
||||
|
||||
if (curRound < nrRounds) {
|
||||
prevInfos = std::move(infos);
|
||||
return;
|
||||
return {};
|
||||
}
|
||||
|
||||
/* Remove the .check directories if we're done. FIXME: keep them
|
||||
|
|
@ -2636,17 +2719,29 @@ void LocalDerivationGoal::registerOutputs()
|
|||
means it's safe to link the derivation to the output hash. We must do
|
||||
that for floating CA derivations, which otherwise couldn't be cached,
|
||||
but it's fine to do in all cases. */
|
||||
DrvOutputs builtOutputs;
|
||||
|
||||
if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
|
||||
for (auto& [outputName, newInfo] : infos) {
|
||||
auto thisRealisation = Realisation{
|
||||
.id = DrvOutput{initialOutputs.at(outputName).outputHash,
|
||||
outputName},
|
||||
.outPath = newInfo.path};
|
||||
for (auto & [outputName, newInfo] : infos) {
|
||||
auto oldinfo = get(initialOutputs, outputName);
|
||||
assert(oldinfo);
|
||||
auto thisRealisation = Realisation {
|
||||
.id = DrvOutput {
|
||||
oldinfo->outputHash,
|
||||
outputName
|
||||
},
|
||||
.outPath = newInfo.path
|
||||
};
|
||||
if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)
|
||||
&& drv->type().isPure())
|
||||
{
|
||||
signRealisation(thisRealisation);
|
||||
worker.store.registerDrvOutput(thisRealisation);
|
||||
}
|
||||
if (wantOutput(outputName, wantedOutputs))
|
||||
builtOutputs.emplace(thisRealisation.id, thisRealisation);
|
||||
}
|
||||
|
||||
return builtOutputs;
|
||||
}
|
||||
|
||||
void LocalDerivationGoal::signRealisation(Realisation & realisation)
|
||||
|
|
@ -2655,7 +2750,7 @@ void LocalDerivationGoal::signRealisation(Realisation & realisation)
|
|||
}
|
||||
|
||||
|
||||
void LocalDerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & outputs)
|
||||
void LocalDerivationGoal::checkOutputs(const std::map<std::string, ValidPathInfo> & outputs)
|
||||
{
|
||||
std::map<Path, const ValidPathInfo &> outputsByPath;
|
||||
for (auto & output : outputs)
|
||||
|
|
@ -2727,9 +2822,10 @@ void LocalDerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & out
|
|||
for (auto & i : *value) {
|
||||
if (worker.store.isStorePath(i))
|
||||
spec.insert(worker.store.parseStorePath(i));
|
||||
else if (finalOutputs.count(i))
|
||||
spec.insert(finalOutputs.at(i));
|
||||
else throw BuildError("derivation contains an illegal reference specifier '%s'", i);
|
||||
else if (auto output = get(outputs, i))
|
||||
spec.insert(output->path);
|
||||
else
|
||||
throw BuildError("derivation contains an illegal reference specifier '%s'", i);
|
||||
}
|
||||
|
||||
auto used = recursive
|
||||
|
|
@ -2768,24 +2864,18 @@ void LocalDerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & out
|
|||
};
|
||||
|
||||
if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) {
|
||||
auto outputChecks = structuredAttrs->find("outputChecks");
|
||||
if (outputChecks != structuredAttrs->end()) {
|
||||
auto output = outputChecks->find(outputName);
|
||||
|
||||
if (output != outputChecks->end()) {
|
||||
if (auto outputChecks = get(*structuredAttrs, "outputChecks")) {
|
||||
if (auto output = get(*outputChecks, outputName)) {
|
||||
Checks checks;
|
||||
|
||||
auto maxSize = output->find("maxSize");
|
||||
if (maxSize != output->end())
|
||||
if (auto maxSize = get(*output, "maxSize"))
|
||||
checks.maxSize = maxSize->get<uint64_t>();
|
||||
|
||||
auto maxClosureSize = output->find("maxClosureSize");
|
||||
if (maxClosureSize != output->end())
|
||||
if (auto maxClosureSize = get(*output, "maxClosureSize"))
|
||||
checks.maxClosureSize = maxClosureSize->get<uint64_t>();
|
||||
|
||||
auto get = [&](const std::string & name) -> std::optional<Strings> {
|
||||
auto i = output->find(name);
|
||||
if (i != output->end()) {
|
||||
auto get_ = [&](const std::string & name) -> std::optional<Strings> {
|
||||
if (auto i = get(*output, name)) {
|
||||
Strings res;
|
||||
for (auto j = i->begin(); j != i->end(); ++j) {
|
||||
if (!j->is_string())
|
||||
|
|
@ -2798,10 +2888,10 @@ void LocalDerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & out
|
|||
return {};
|
||||
};
|
||||
|
||||
checks.allowedReferences = get("allowedReferences");
|
||||
checks.allowedRequisites = get("allowedRequisites");
|
||||
checks.disallowedReferences = get("disallowedReferences");
|
||||
checks.disallowedRequisites = get("disallowedRequisites");
|
||||
checks.allowedReferences = get_("allowedReferences");
|
||||
checks.allowedRequisites = get_("allowedRequisites");
|
||||
checks.disallowedReferences = get_("disallowedReferences");
|
||||
checks.disallowedRequisites = get_("disallowedRequisites");
|
||||
|
||||
applyChecks(checks);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ struct LocalDerivationGoal : public DerivationGoal
|
|||
|
||||
/* Check that the derivation outputs all exist and register them
|
||||
as valid. */
|
||||
void registerOutputs() override;
|
||||
DrvOutputs registerOutputs() override;
|
||||
|
||||
void signRealisation(Realisation &) override;
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
namespace nix {
|
||||
|
||||
PathSubstitutionGoal::PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
: Goal(worker)
|
||||
: Goal(worker, DerivedPath::Opaque { storePath })
|
||||
, storePath(storePath)
|
||||
, repair(repair)
|
||||
, ca(ca)
|
||||
|
|
@ -24,6 +24,20 @@ PathSubstitutionGoal::~PathSubstitutionGoal()
|
|||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::done(
|
||||
ExitCode result,
|
||||
BuildResult::Status status,
|
||||
std::optional<std::string> errorMsg)
|
||||
{
|
||||
buildResult.status = status;
|
||||
if (errorMsg) {
|
||||
debug(*errorMsg);
|
||||
buildResult.errorMsg = *errorMsg;
|
||||
}
|
||||
amDone(result);
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::work()
|
||||
{
|
||||
(this->*state)();
|
||||
|
|
@ -38,7 +52,7 @@ void PathSubstitutionGoal::init()
|
|||
|
||||
/* If the path already exists we're done. */
|
||||
if (!repair && worker.store.isValidPath(storePath)) {
|
||||
amDone(ecSuccess);
|
||||
done(ecSuccess, BuildResult::AlreadyValid);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -60,12 +74,14 @@ void PathSubstitutionGoal::tryNext()
|
|||
if (subs.size() == 0) {
|
||||
/* None left. Terminate this goal and let someone else deal
|
||||
with it. */
|
||||
debug("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath));
|
||||
|
||||
/* Hack: don't indicate failure if there were no substituters.
|
||||
In that case the calling derivation should just do a
|
||||
build. */
|
||||
amDone(substituterFailed ? ecFailed : ecNoSubstituters);
|
||||
done(
|
||||
substituterFailed ? ecFailed : ecNoSubstituters,
|
||||
BuildResult::NoSubstituters,
|
||||
fmt("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath)));
|
||||
|
||||
if (substituterFailed) {
|
||||
worker.failedSubstitutions++;
|
||||
|
|
@ -138,7 +154,7 @@ void PathSubstitutionGoal::tryNext()
|
|||
only after we've downloaded the path. */
|
||||
if (!sub->isTrusted && worker.store.pathInfoIsUntrusted(*info))
|
||||
{
|
||||
warn("the substitute for '%s' from '%s' is not signed by any of the keys in 'trusted-public-keys'",
|
||||
warn("ignoring substitute for '%s' from '%s', as it's not signed by any of the keys in 'trusted-public-keys'",
|
||||
worker.store.printStorePath(storePath), sub->getUri());
|
||||
tryNext();
|
||||
return;
|
||||
|
|
@ -162,8 +178,10 @@ void PathSubstitutionGoal::referencesValid()
|
|||
trace("all references realised");
|
||||
|
||||
if (nrFailed > 0) {
|
||||
debug("some references of path '%s' could not be realised", worker.store.printStorePath(storePath));
|
||||
amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
|
||||
done(
|
||||
nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed,
|
||||
BuildResult::DependencyFailed,
|
||||
fmt("some references of path '%s' could not be realised", worker.store.printStorePath(storePath)));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -268,7 +286,7 @@ void PathSubstitutionGoal::finished()
|
|||
|
||||
worker.updateProgress();
|
||||
|
||||
amDone(ecSuccess);
|
||||
done(ecSuccess, BuildResult::Substituted);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -53,6 +53,11 @@ struct PathSubstitutionGoal : public Goal
|
|||
/* Content address for recomputing store path */
|
||||
std::optional<ContentAddress> ca;
|
||||
|
||||
void done(
|
||||
ExitCode result,
|
||||
BuildResult::Status status,
|
||||
std::optional<std::string> errorMsg = {});
|
||||
|
||||
public:
|
||||
PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
~PathSubstitutionGoal();
|
||||
|
|
|
|||
|
|
@ -350,7 +350,7 @@ void Worker::waitForInput()
|
|||
become `available'. Note that `available' (i.e., non-blocking)
|
||||
includes EOF. */
|
||||
std::vector<struct pollfd> pollStatus;
|
||||
std::map <int, int> fdToPollStatus;
|
||||
std::map<int, size_t> fdToPollStatus;
|
||||
for (auto & i : children) {
|
||||
for (auto & j : i.fds) {
|
||||
pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN });
|
||||
|
|
@ -380,7 +380,10 @@ void Worker::waitForInput()
|
|||
std::set<int> fds2(j->fds);
|
||||
std::vector<unsigned char> buffer(4096);
|
||||
for (auto & k : fds2) {
|
||||
if (pollStatus.at(fdToPollStatus.at(k)).revents) {
|
||||
const auto fdPollStatusId = get(fdToPollStatus, k);
|
||||
assert(fdPollStatusId);
|
||||
assert(*fdPollStatusId < pollStatus.size());
|
||||
if (pollStatus.at(*fdPollStatusId).revents) {
|
||||
ssize_t rd = ::read(k, buffer.data(), buffer.size());
|
||||
// FIXME: is there a cleaner way to handle pt close
|
||||
// than EIO? Is this even standard?
|
||||
|
|
|
|||
|
|
@ -47,9 +47,9 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
|
|||
throw;
|
||||
}
|
||||
|
||||
/* The files below are special-cased to that they don't show up
|
||||
* in user profiles, either because they are useless, or
|
||||
* because they would cauase pointless collisions (e.g., each
|
||||
/* The files below are special-cased to that they don't show
|
||||
* up in user profiles, either because they are useless, or
|
||||
* because they would cause pointless collisions (e.g., each
|
||||
* Python package brings its own
|
||||
* `$out/lib/pythonX.Y/site-packages/easy-install.pth'.)
|
||||
*/
|
||||
|
|
@ -57,7 +57,9 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
|
|||
hasSuffix(srcFile, "/nix-support") ||
|
||||
hasSuffix(srcFile, "/perllocal.pod") ||
|
||||
hasSuffix(srcFile, "/info/dir") ||
|
||||
hasSuffix(srcFile, "/log"))
|
||||
hasSuffix(srcFile, "/log") ||
|
||||
hasSuffix(srcFile, "/manifest.nix") ||
|
||||
hasSuffix(srcFile, "/manifest.json"))
|
||||
continue;
|
||||
|
||||
else if (S_ISDIR(srcSt.st_mode)) {
|
||||
|
|
@ -91,8 +93,9 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
|
|||
auto prevPriority = state.priorities[dstFile];
|
||||
if (prevPriority == priority)
|
||||
throw Error(
|
||||
"packages '%1%' and '%2%' have the same priority %3%; "
|
||||
"files '%1%' and '%2%' have the same priority %3%; "
|
||||
"use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' "
|
||||
"or type 'nix profile install --help' if using 'nix profile' to find out how"
|
||||
"to change the priority of one of the conflicting packages"
|
||||
" (0 being the highest priority)",
|
||||
srcFile, readLink(dstFile), priority);
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
|
|||
|
||||
Path storePath = getAttr("out");
|
||||
auto mainUrl = getAttr("url");
|
||||
bool unpack = get(drv.env, "unpack").value_or("") == "1";
|
||||
bool unpack = getOr(drv.env, "unpack", "") == "1";
|
||||
|
||||
/* Note: have to use a fresh fileTransfer here because we're in
|
||||
a forked process. */
|
||||
|
|
|
|||
|
|
@ -22,8 +22,7 @@ void builtinUnpackChannel(const BasicDerivation & drv)
|
|||
auto entries = readDirectory(out);
|
||||
if (entries.size() != 1)
|
||||
throw Error("channel tarball '%s' contains more than one file", src);
|
||||
if (rename((out + "/" + entries[0].name).c_str(), (out + "/" + channelName).c_str()) == -1)
|
||||
throw SysError("renaming channel directory");
|
||||
renameFile((out + "/" + entries[0].name), (out + "/" + channelName));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,12 +13,27 @@ create table if not exists Realisations (
|
|||
|
||||
create index if not exists IndexRealisations on Realisations(drvPath, outputName);
|
||||
|
||||
-- We can end-up in a weird edge-case where a path depends on itself because
|
||||
-- it’s an output of a CA derivation, that happens to be the same as one of its
|
||||
-- dependencies.
|
||||
-- In that case we have a dependency loop (path -> realisation1 -> realisation2
|
||||
-- -> path) that we need to break by removing the dependencies between the
|
||||
-- realisations
|
||||
create trigger if not exists DeleteSelfRefsViaRealisations before delete on ValidPaths
|
||||
begin
|
||||
delete from RealisationsRefs where realisationReference in (
|
||||
select id from Realisations where outputPath = old.id
|
||||
);
|
||||
end;
|
||||
|
||||
create table if not exists RealisationsRefs (
|
||||
referrer integer not null,
|
||||
realisationReference integer,
|
||||
foreign key (referrer) references Realisations(id) on delete cascade,
|
||||
foreign key (realisationReference) references Realisations(id) on delete restrict
|
||||
);
|
||||
-- used by deletion trigger
|
||||
create index if not exists IndexRealisationsRefsRealisationReference on RealisationsRefs(realisationReference);
|
||||
|
||||
-- used by QueryRealisationReferences
|
||||
create index if not exists IndexRealisationsRefs on RealisationsRefs(referrer);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,11 @@
|
|||
#include "daemon.hh"
|
||||
#include "monitor-fd.hh"
|
||||
#include "worker-protocol.hh"
|
||||
#include "build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "store-cast.hh"
|
||||
#include "gc-store.hh"
|
||||
#include "log-store.hh"
|
||||
#include "path-with-outputs.hh"
|
||||
#include "finally.hh"
|
||||
#include "archive.hh"
|
||||
|
|
@ -235,6 +239,8 @@ struct ClientSettings
|
|||
else if (trusted
|
||||
|| name == settings.buildTimeout.name
|
||||
|| name == settings.buildRepeat.name
|
||||
|| name == settings.maxSilentTime.name
|
||||
|| name == settings.pollInterval.name
|
||||
|| name == "connect-timeout"
|
||||
|| (name == "builders" && value == ""))
|
||||
settings.set(name, value);
|
||||
|
|
@ -530,6 +536,25 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
break;
|
||||
}
|
||||
|
||||
case wopBuildPathsWithResults: {
|
||||
auto drvs = readDerivedPaths(*store, clientVersion, from);
|
||||
BuildMode mode = bmNormal;
|
||||
mode = (BuildMode) readInt(from);
|
||||
|
||||
/* Repairing is not atomic, so disallowed for "untrusted"
|
||||
clients. */
|
||||
if (mode == bmRepair && !trusted)
|
||||
throw Error("repairing is not allowed because you are not in 'trusted-users'");
|
||||
|
||||
logger->startWork();
|
||||
auto results = store->buildPathsWithResults(drvs, mode);
|
||||
logger->stopWork();
|
||||
|
||||
worker_proto::write(*store, to, results);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case wopBuildDerivation: {
|
||||
auto drvPath = store->parseStorePath(readString(from));
|
||||
BasicDerivation drv;
|
||||
|
|
@ -537,6 +562,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
BuildMode buildMode = (BuildMode) readInt(from);
|
||||
logger->startWork();
|
||||
|
||||
auto drvType = drv.type();
|
||||
|
||||
/* Content-addressed derivations are trustless because their output paths
|
||||
are verified by their content alone, so any derivation is free to
|
||||
try to produce such a path.
|
||||
|
|
@ -569,12 +596,12 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
derivations, we throw out the precomputed output paths and just
|
||||
store the hashes, so there aren't two competing sources of truth an
|
||||
attacker could exploit. */
|
||||
if (drv.type() == DerivationType::InputAddressed && !trusted)
|
||||
if (!(drvType.isCA() || trusted))
|
||||
throw Error("you are not privileged to build input-addressed derivations");
|
||||
|
||||
/* Make sure that the non-input-addressed derivations that got this far
|
||||
are in fact content-addressed if we don't trust them. */
|
||||
assert(derivationIsCA(drv.type()) || trusted);
|
||||
assert(drvType.isCA() || trusted);
|
||||
|
||||
/* Recompute the derivation path when we cannot trust the original. */
|
||||
if (!trusted) {
|
||||
|
|
@ -583,7 +610,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
original not-necessarily-resolved derivation to verify the drv
|
||||
derivation as adequate claim to the input-addressed output
|
||||
paths. */
|
||||
assert(derivationIsCA(drv.type()));
|
||||
assert(drvType.isCA());
|
||||
|
||||
Derivation drv2;
|
||||
static_cast<BasicDerivation &>(drv2) = drv;
|
||||
|
|
@ -622,9 +649,12 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
|
||||
case wopAddIndirectRoot: {
|
||||
Path path = absPath(readString(from));
|
||||
|
||||
logger->startWork();
|
||||
store->addIndirectRoot(path);
|
||||
auto & gcStore = require<GcStore>(*store);
|
||||
gcStore.addIndirectRoot(path);
|
||||
logger->stopWork();
|
||||
|
||||
to << 1;
|
||||
break;
|
||||
}
|
||||
|
|
@ -639,7 +669,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
|
||||
case wopFindRoots: {
|
||||
logger->startWork();
|
||||
Roots roots = store->findRoots(!trusted);
|
||||
auto & gcStore = require<GcStore>(*store);
|
||||
Roots roots = gcStore.findRoots(!trusted);
|
||||
logger->stopWork();
|
||||
|
||||
size_t size = 0;
|
||||
|
|
@ -670,7 +701,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->startWork();
|
||||
if (options.ignoreLiveness)
|
||||
throw Error("you are not allowed to ignore liveness");
|
||||
store->collectGarbage(options, results);
|
||||
auto & gcStore = require<GcStore>(*store);
|
||||
gcStore.collectGarbage(options, results);
|
||||
logger->stopWork();
|
||||
|
||||
to << results.paths << results.bytesFreed << 0 /* obsolete */;
|
||||
|
|
@ -927,11 +959,12 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->startWork();
|
||||
if (!trusted)
|
||||
throw Error("you are not privileged to add logs");
|
||||
auto & logStore = require<LogStore>(*store);
|
||||
{
|
||||
FramedSource source(from);
|
||||
StringSink sink;
|
||||
source.drainInto(sink);
|
||||
store->addBuildLog(path, sink.s);
|
||||
logStore.addBuildLog(path, sink.s);
|
||||
}
|
||||
logger->stopWork();
|
||||
to << 1;
|
||||
|
|
|
|||
|
|
@ -11,72 +11,114 @@ namespace nix {
|
|||
std::optional<StorePath> DerivationOutput::path(const Store & store, std::string_view drvName, std::string_view outputName) const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](const DerivationOutputInputAddressed & doi) -> std::optional<StorePath> {
|
||||
[](const DerivationOutput::InputAddressed & doi) -> std::optional<StorePath> {
|
||||
return { doi.path };
|
||||
},
|
||||
[&](const DerivationOutputCAFixed & dof) -> std::optional<StorePath> {
|
||||
[&](const DerivationOutput::CAFixed & dof) -> std::optional<StorePath> {
|
||||
return {
|
||||
dof.path(store, drvName, outputName)
|
||||
};
|
||||
},
|
||||
[](const DerivationOutputCAFloating & dof) -> std::optional<StorePath> {
|
||||
[](const DerivationOutput::CAFloating & dof) -> std::optional<StorePath> {
|
||||
return std::nullopt;
|
||||
},
|
||||
[](const DerivationOutputDeferred &) -> std::optional<StorePath> {
|
||||
[](const DerivationOutput::Deferred &) -> std::optional<StorePath> {
|
||||
return std::nullopt;
|
||||
},
|
||||
}, output);
|
||||
[](const DerivationOutput::Impure &) -> std::optional<StorePath> {
|
||||
return std::nullopt;
|
||||
},
|
||||
}, raw());
|
||||
}
|
||||
|
||||
|
||||
StorePath DerivationOutputCAFixed::path(const Store & store, std::string_view drvName, std::string_view outputName) const {
|
||||
StorePath DerivationOutput::CAFixed::path(const Store & store, std::string_view drvName, std::string_view outputName) const
|
||||
{
|
||||
return store.makeFixedOutputPath(
|
||||
hash.method, hash.hash,
|
||||
outputPathName(drvName, outputName));
|
||||
}
|
||||
|
||||
|
||||
bool derivationIsCA(DerivationType dt) {
|
||||
switch (dt) {
|
||||
case DerivationType::InputAddressed: return false;
|
||||
case DerivationType::CAFixed: return true;
|
||||
case DerivationType::CAFloating: return true;
|
||||
case DerivationType::DeferredInputAddressed: return false;
|
||||
};
|
||||
// Since enums can have non-variant values, but making a `default:` would
|
||||
// disable exhaustiveness warnings.
|
||||
assert(false);
|
||||
bool DerivationType::isCA() const
|
||||
{
|
||||
/* Normally we do the full `std::visit` to make sure we have
|
||||
exhaustively handled all variants, but so long as there is a
|
||||
variant called `ContentAddressed`, it must be the only one for
|
||||
which `isCA` is true for this to make sense!. */
|
||||
return std::visit(overloaded {
|
||||
[](const InputAddressed & ia) {
|
||||
return false;
|
||||
},
|
||||
[](const ContentAddressed & ca) {
|
||||
return true;
|
||||
},
|
||||
[](const Impure &) {
|
||||
return true;
|
||||
},
|
||||
}, raw());
|
||||
}
|
||||
|
||||
bool derivationIsFixed(DerivationType dt) {
|
||||
switch (dt) {
|
||||
case DerivationType::InputAddressed: return false;
|
||||
case DerivationType::CAFixed: return true;
|
||||
case DerivationType::CAFloating: return false;
|
||||
case DerivationType::DeferredInputAddressed: return false;
|
||||
};
|
||||
assert(false);
|
||||
bool DerivationType::isFixed() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](const InputAddressed & ia) {
|
||||
return false;
|
||||
},
|
||||
[](const ContentAddressed & ca) {
|
||||
return ca.fixed;
|
||||
},
|
||||
[](const Impure &) {
|
||||
return false;
|
||||
},
|
||||
}, raw());
|
||||
}
|
||||
|
||||
bool derivationHasKnownOutputPaths(DerivationType dt) {
|
||||
switch (dt) {
|
||||
case DerivationType::InputAddressed: return true;
|
||||
case DerivationType::CAFixed: return true;
|
||||
case DerivationType::CAFloating: return false;
|
||||
case DerivationType::DeferredInputAddressed: return false;
|
||||
};
|
||||
assert(false);
|
||||
bool DerivationType::hasKnownOutputPaths() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](const InputAddressed & ia) {
|
||||
return !ia.deferred;
|
||||
},
|
||||
[](const ContentAddressed & ca) {
|
||||
return ca.fixed;
|
||||
},
|
||||
[](const Impure &) {
|
||||
return false;
|
||||
},
|
||||
}, raw());
|
||||
}
|
||||
|
||||
|
||||
bool derivationIsImpure(DerivationType dt) {
|
||||
switch (dt) {
|
||||
case DerivationType::InputAddressed: return false;
|
||||
case DerivationType::CAFixed: return true;
|
||||
case DerivationType::CAFloating: return false;
|
||||
case DerivationType::DeferredInputAddressed: return false;
|
||||
};
|
||||
assert(false);
|
||||
bool DerivationType::isSandboxed() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](const InputAddressed & ia) {
|
||||
return true;
|
||||
},
|
||||
[](const ContentAddressed & ca) {
|
||||
return ca.sandboxed;
|
||||
},
|
||||
[](const Impure &) {
|
||||
return false;
|
||||
},
|
||||
}, raw());
|
||||
}
|
||||
|
||||
|
||||
bool DerivationType::isPure() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](const InputAddressed & ia) {
|
||||
return true;
|
||||
},
|
||||
[](const ContentAddressed & ca) {
|
||||
return true;
|
||||
},
|
||||
[](const Impure &) {
|
||||
return false;
|
||||
},
|
||||
}, raw());
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -177,37 +219,36 @@ static DerivationOutput parseDerivationOutput(const Store & store,
|
|||
hashAlgo = hashAlgo.substr(2);
|
||||
}
|
||||
const auto hashType = parseHashType(hashAlgo);
|
||||
if (hash != "") {
|
||||
if (hash == "impure") {
|
||||
settings.requireExperimentalFeature(Xp::ImpureDerivations);
|
||||
assert(pathS == "");
|
||||
return DerivationOutput::Impure {
|
||||
.method = std::move(method),
|
||||
.hashType = std::move(hashType),
|
||||
};
|
||||
} else if (hash != "") {
|
||||
validatePath(pathS);
|
||||
return DerivationOutput {
|
||||
.output = DerivationOutputCAFixed {
|
||||
.hash = FixedOutputHash {
|
||||
.method = std::move(method),
|
||||
.hash = Hash::parseNonSRIUnprefixed(hash, hashType),
|
||||
},
|
||||
return DerivationOutput::CAFixed {
|
||||
.hash = FixedOutputHash {
|
||||
.method = std::move(method),
|
||||
.hash = Hash::parseNonSRIUnprefixed(hash, hashType),
|
||||
},
|
||||
};
|
||||
} else {
|
||||
settings.requireExperimentalFeature(Xp::CaDerivations);
|
||||
assert(pathS == "");
|
||||
return DerivationOutput {
|
||||
.output = DerivationOutputCAFloating {
|
||||
.method = std::move(method),
|
||||
.hashType = std::move(hashType),
|
||||
},
|
||||
return DerivationOutput::CAFloating {
|
||||
.method = std::move(method),
|
||||
.hashType = std::move(hashType),
|
||||
};
|
||||
}
|
||||
} else {
|
||||
if (pathS == "") {
|
||||
return DerivationOutput {
|
||||
.output = DerivationOutputDeferred { }
|
||||
};
|
||||
return DerivationOutput::Deferred { };
|
||||
}
|
||||
validatePath(pathS);
|
||||
return DerivationOutput {
|
||||
.output = DerivationOutputInputAddressed {
|
||||
.path = store.parseStorePath(pathS),
|
||||
}
|
||||
return DerivationOutput::InputAddressed {
|
||||
.path = store.parseStorePath(pathS),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -335,27 +376,33 @@ std::string Derivation::unparse(const Store & store, bool maskOutputs,
|
|||
if (first) first = false; else s += ',';
|
||||
s += '('; printUnquotedString(s, i.first);
|
||||
std::visit(overloaded {
|
||||
[&](const DerivationOutputInputAddressed & doi) {
|
||||
[&](const DerivationOutput::InputAddressed & doi) {
|
||||
s += ','; printUnquotedString(s, maskOutputs ? "" : store.printStorePath(doi.path));
|
||||
s += ','; printUnquotedString(s, "");
|
||||
s += ','; printUnquotedString(s, "");
|
||||
},
|
||||
[&](const DerivationOutputCAFixed & dof) {
|
||||
[&](const DerivationOutput::CAFixed & dof) {
|
||||
s += ','; printUnquotedString(s, maskOutputs ? "" : store.printStorePath(dof.path(store, name, i.first)));
|
||||
s += ','; printUnquotedString(s, dof.hash.printMethodAlgo());
|
||||
s += ','; printUnquotedString(s, dof.hash.hash.to_string(Base16, false));
|
||||
},
|
||||
[&](const DerivationOutputCAFloating & dof) {
|
||||
[&](const DerivationOutput::CAFloating & dof) {
|
||||
s += ','; printUnquotedString(s, "");
|
||||
s += ','; printUnquotedString(s, makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
|
||||
s += ','; printUnquotedString(s, "");
|
||||
},
|
||||
[&](const DerivationOutputDeferred &) {
|
||||
[&](const DerivationOutput::Deferred &) {
|
||||
s += ','; printUnquotedString(s, "");
|
||||
s += ','; printUnquotedString(s, "");
|
||||
s += ','; printUnquotedString(s, "");
|
||||
},
|
||||
[&](const DerivationOutputImpure & doi) {
|
||||
// FIXME
|
||||
s += ','; printUnquotedString(s, "");
|
||||
s += ','; printUnquotedString(s, makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType));
|
||||
s += ','; printUnquotedString(s, "impure");
|
||||
}
|
||||
}, i.second.output);
|
||||
}, i.second.raw());
|
||||
s += ')';
|
||||
}
|
||||
|
||||
|
|
@ -419,49 +466,100 @@ std::string outputPathName(std::string_view drvName, std::string_view outputName
|
|||
|
||||
DerivationType BasicDerivation::type() const
|
||||
{
|
||||
std::set<std::string_view> inputAddressedOutputs, fixedCAOutputs, floatingCAOutputs, deferredIAOutputs;
|
||||
std::set<std::string_view>
|
||||
inputAddressedOutputs,
|
||||
fixedCAOutputs,
|
||||
floatingCAOutputs,
|
||||
deferredIAOutputs,
|
||||
impureOutputs;
|
||||
std::optional<HashType> floatingHashType;
|
||||
|
||||
for (auto & i : outputs) {
|
||||
std::visit(overloaded {
|
||||
[&](const DerivationOutputInputAddressed &) {
|
||||
[&](const DerivationOutput::InputAddressed &) {
|
||||
inputAddressedOutputs.insert(i.first);
|
||||
},
|
||||
[&](const DerivationOutputCAFixed &) {
|
||||
[&](const DerivationOutput::CAFixed &) {
|
||||
fixedCAOutputs.insert(i.first);
|
||||
},
|
||||
[&](const DerivationOutputCAFloating & dof) {
|
||||
[&](const DerivationOutput::CAFloating & dof) {
|
||||
floatingCAOutputs.insert(i.first);
|
||||
if (!floatingHashType) {
|
||||
floatingHashType = dof.hashType;
|
||||
} else {
|
||||
if (*floatingHashType != dof.hashType)
|
||||
throw Error("All floating outputs must use the same hash type");
|
||||
throw Error("all floating outputs must use the same hash type");
|
||||
}
|
||||
},
|
||||
[&](const DerivationOutputDeferred &) {
|
||||
deferredIAOutputs.insert(i.first);
|
||||
[&](const DerivationOutput::Deferred &) {
|
||||
deferredIAOutputs.insert(i.first);
|
||||
},
|
||||
}, i.second.output);
|
||||
[&](const DerivationOutput::Impure &) {
|
||||
impureOutputs.insert(i.first);
|
||||
},
|
||||
}, i.second.raw());
|
||||
}
|
||||
|
||||
if (inputAddressedOutputs.empty() && fixedCAOutputs.empty() && floatingCAOutputs.empty() && deferredIAOutputs.empty()) {
|
||||
throw Error("Must have at least one output");
|
||||
} else if (! inputAddressedOutputs.empty() && fixedCAOutputs.empty() && floatingCAOutputs.empty() && deferredIAOutputs.empty()) {
|
||||
return DerivationType::InputAddressed;
|
||||
} else if (inputAddressedOutputs.empty() && ! fixedCAOutputs.empty() && floatingCAOutputs.empty() && deferredIAOutputs.empty()) {
|
||||
if (inputAddressedOutputs.empty()
|
||||
&& fixedCAOutputs.empty()
|
||||
&& floatingCAOutputs.empty()
|
||||
&& deferredIAOutputs.empty()
|
||||
&& impureOutputs.empty())
|
||||
throw Error("must have at least one output");
|
||||
|
||||
if (!inputAddressedOutputs.empty()
|
||||
&& fixedCAOutputs.empty()
|
||||
&& floatingCAOutputs.empty()
|
||||
&& deferredIAOutputs.empty()
|
||||
&& impureOutputs.empty())
|
||||
return DerivationType::InputAddressed {
|
||||
.deferred = false,
|
||||
};
|
||||
|
||||
if (inputAddressedOutputs.empty()
|
||||
&& !fixedCAOutputs.empty()
|
||||
&& floatingCAOutputs.empty()
|
||||
&& deferredIAOutputs.empty()
|
||||
&& impureOutputs.empty())
|
||||
{
|
||||
if (fixedCAOutputs.size() > 1)
|
||||
// FIXME: Experimental feature?
|
||||
throw Error("Only one fixed output is allowed for now");
|
||||
throw Error("only one fixed output is allowed for now");
|
||||
if (*fixedCAOutputs.begin() != "out")
|
||||
throw Error("Single fixed output must be named \"out\"");
|
||||
return DerivationType::CAFixed;
|
||||
} else if (inputAddressedOutputs.empty() && fixedCAOutputs.empty() && ! floatingCAOutputs.empty() && deferredIAOutputs.empty()) {
|
||||
return DerivationType::CAFloating;
|
||||
} else if (inputAddressedOutputs.empty() && fixedCAOutputs.empty() && floatingCAOutputs.empty() && !deferredIAOutputs.empty()) {
|
||||
return DerivationType::DeferredInputAddressed;
|
||||
} else {
|
||||
throw Error("Can't mix derivation output types");
|
||||
throw Error("single fixed output must be named \"out\"");
|
||||
return DerivationType::ContentAddressed {
|
||||
.sandboxed = false,
|
||||
.fixed = true,
|
||||
};
|
||||
}
|
||||
|
||||
if (inputAddressedOutputs.empty()
|
||||
&& fixedCAOutputs.empty()
|
||||
&& !floatingCAOutputs.empty()
|
||||
&& deferredIAOutputs.empty()
|
||||
&& impureOutputs.empty())
|
||||
return DerivationType::ContentAddressed {
|
||||
.sandboxed = true,
|
||||
.fixed = false,
|
||||
};
|
||||
|
||||
if (inputAddressedOutputs.empty()
|
||||
&& fixedCAOutputs.empty()
|
||||
&& floatingCAOutputs.empty()
|
||||
&& !deferredIAOutputs.empty()
|
||||
&& impureOutputs.empty())
|
||||
return DerivationType::InputAddressed {
|
||||
.deferred = true,
|
||||
};
|
||||
|
||||
if (inputAddressedOutputs.empty()
|
||||
&& fixedCAOutputs.empty()
|
||||
&& floatingCAOutputs.empty()
|
||||
&& deferredIAOutputs.empty()
|
||||
&& !impureOutputs.empty())
|
||||
return DerivationType::Impure { };
|
||||
|
||||
throw Error("can't mix derivation output types");
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -473,7 +571,7 @@ Sync<DrvHashes> drvHashes;
|
|||
/* Look up the derivation by value and memoize the
|
||||
`hashDerivationModulo` call.
|
||||
*/
|
||||
static const DrvHashModulo pathDerivationModulo(Store & store, const StorePath & drvPath)
|
||||
static const DrvHash pathDerivationModulo(Store & store, const StorePath & drvPath)
|
||||
{
|
||||
{
|
||||
auto hashes = drvHashes.lock();
|
||||
|
|
@ -508,88 +606,85 @@ static const DrvHashModulo pathDerivationModulo(Store & store, const StorePath &
|
|||
don't leak the provenance of fixed outputs, reducing pointless cache
|
||||
misses as the build itself won't know this.
|
||||
*/
|
||||
DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs)
|
||||
DrvHash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs)
|
||||
{
|
||||
bool isDeferred = false;
|
||||
auto type = drv.type();
|
||||
|
||||
/* Return a fixed hash for fixed-output derivations. */
|
||||
switch (drv.type()) {
|
||||
case DerivationType::CAFixed: {
|
||||
if (type.isFixed()) {
|
||||
std::map<std::string, Hash> outputHashes;
|
||||
for (const auto & i : drv.outputs) {
|
||||
auto & dof = std::get<DerivationOutputCAFixed>(i.second.output);
|
||||
auto & dof = std::get<DerivationOutput::CAFixed>(i.second.raw());
|
||||
auto hash = hashString(htSHA256, "fixed:out:"
|
||||
+ dof.hash.printMethodAlgo() + ":"
|
||||
+ dof.hash.hash.to_string(Base16, false) + ":"
|
||||
+ store.printStorePath(dof.path(store, drv.name, i.first)));
|
||||
outputHashes.insert_or_assign(i.first, std::move(hash));
|
||||
}
|
||||
return outputHashes;
|
||||
}
|
||||
case DerivationType::CAFloating:
|
||||
isDeferred = true;
|
||||
break;
|
||||
case DerivationType::InputAddressed:
|
||||
break;
|
||||
case DerivationType::DeferredInputAddressed:
|
||||
break;
|
||||
return DrvHash {
|
||||
.hashes = outputHashes,
|
||||
.kind = DrvHash::Kind::Regular,
|
||||
};
|
||||
}
|
||||
|
||||
/* For other derivations, replace the inputs paths with recursive
|
||||
calls to this function. */
|
||||
if (!type.isPure()) {
|
||||
std::map<std::string, Hash> outputHashes;
|
||||
for (const auto & [outputName, _] : drv.outputs)
|
||||
outputHashes.insert_or_assign(outputName, impureOutputHash);
|
||||
return DrvHash {
|
||||
.hashes = outputHashes,
|
||||
.kind = DrvHash::Kind::Deferred,
|
||||
};
|
||||
}
|
||||
|
||||
auto kind = std::visit(overloaded {
|
||||
[](const DerivationType::InputAddressed & ia) {
|
||||
/* This might be a "pesimistically" deferred output, so we don't
|
||||
"taint" the kind yet. */
|
||||
return DrvHash::Kind::Regular;
|
||||
},
|
||||
[](const DerivationType::ContentAddressed & ca) {
|
||||
return ca.fixed
|
||||
? DrvHash::Kind::Regular
|
||||
: DrvHash::Kind::Deferred;
|
||||
},
|
||||
[](const DerivationType::Impure &) -> DrvHash::Kind {
|
||||
assert(false);
|
||||
}
|
||||
}, drv.type().raw());
|
||||
|
||||
std::map<std::string, StringSet> inputs2;
|
||||
for (auto & i : drv.inputDrvs) {
|
||||
const auto & res = pathDerivationModulo(store, i.first);
|
||||
std::visit(overloaded {
|
||||
// Regular non-CA derivation, replace derivation
|
||||
[&](const Hash & drvHash) {
|
||||
inputs2.insert_or_assign(drvHash.to_string(Base16, false), i.second);
|
||||
},
|
||||
[&](const DeferredHash & deferredHash) {
|
||||
isDeferred = true;
|
||||
inputs2.insert_or_assign(deferredHash.hash.to_string(Base16, false), i.second);
|
||||
},
|
||||
// CA derivation's output hashes
|
||||
[&](const CaOutputHashes & outputHashes) {
|
||||
std::set<std::string> justOut = { "out" };
|
||||
for (auto & output : i.second) {
|
||||
/* Put each one in with a single "out" output.. */
|
||||
const auto h = outputHashes.at(output);
|
||||
inputs2.insert_or_assign(
|
||||
h.to_string(Base16, false),
|
||||
justOut);
|
||||
}
|
||||
},
|
||||
}, res);
|
||||
for (auto & [drvPath, inputOutputs0] : drv.inputDrvs) {
|
||||
// Avoid lambda capture restriction with standard / Clang
|
||||
auto & inputOutputs = inputOutputs0;
|
||||
const auto & res = pathDerivationModulo(store, drvPath);
|
||||
if (res.kind == DrvHash::Kind::Deferred)
|
||||
kind = DrvHash::Kind::Deferred;
|
||||
for (auto & outputName : inputOutputs) {
|
||||
const auto h = get(res.hashes, outputName);
|
||||
if (!h)
|
||||
throw Error("no hash for output '%s' of derivation '%s'", outputName, drv.name);
|
||||
inputs2[h->to_string(Base16, false)].insert(outputName);
|
||||
}
|
||||
}
|
||||
|
||||
auto hash = hashString(htSHA256, drv.unparse(store, maskOutputs, &inputs2));
|
||||
|
||||
if (isDeferred)
|
||||
return DeferredHash { hash };
|
||||
else
|
||||
return hash;
|
||||
std::map<std::string, Hash> outputHashes;
|
||||
for (const auto & [outputName, _] : drv.outputs) {
|
||||
outputHashes.insert_or_assign(outputName, hash);
|
||||
}
|
||||
|
||||
return DrvHash {
|
||||
.hashes = outputHashes,
|
||||
.kind = kind,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
std::map<std::string, Hash> staticOutputHashes(Store & store, const Derivation & drv)
|
||||
{
|
||||
std::map<std::string, Hash> res;
|
||||
std::visit(overloaded {
|
||||
[&](const Hash & drvHash) {
|
||||
for (auto & outputName : drv.outputNames()) {
|
||||
res.insert({outputName, drvHash});
|
||||
}
|
||||
},
|
||||
[&](const DeferredHash & deferredHash) {
|
||||
for (auto & outputName : drv.outputNames()) {
|
||||
res.insert({outputName, deferredHash.hash});
|
||||
}
|
||||
},
|
||||
[&](const CaOutputHashes & outputHashes) {
|
||||
res = outputHashes;
|
||||
},
|
||||
}, hashDerivationModulo(store, drv, true));
|
||||
return res;
|
||||
return hashDerivationModulo(store, drv, true).hashes;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -616,7 +711,8 @@ StringSet BasicDerivation::outputNames() const
|
|||
return names;
|
||||
}
|
||||
|
||||
DerivationOutputsAndOptPaths BasicDerivation::outputsAndOptPaths(const Store & store) const {
|
||||
DerivationOutputsAndOptPaths BasicDerivation::outputsAndOptPaths(const Store & store) const
|
||||
{
|
||||
DerivationOutputsAndOptPaths outsAndOptPaths;
|
||||
for (auto output : outputs)
|
||||
outsAndOptPaths.insert(std::make_pair(
|
||||
|
|
@ -627,7 +723,8 @@ DerivationOutputsAndOptPaths BasicDerivation::outputsAndOptPaths(const Store & s
|
|||
return outsAndOptPaths;
|
||||
}
|
||||
|
||||
std::string_view BasicDerivation::nameFromPath(const StorePath & drvPath) {
|
||||
std::string_view BasicDerivation::nameFromPath(const StorePath & drvPath)
|
||||
{
|
||||
auto nameWithSuffix = drvPath.name();
|
||||
constexpr std::string_view extension = ".drv";
|
||||
assert(hasSuffix(nameWithSuffix, extension));
|
||||
|
|
@ -669,27 +766,32 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr
|
|||
for (auto & i : drv.outputs) {
|
||||
out << i.first;
|
||||
std::visit(overloaded {
|
||||
[&](const DerivationOutputInputAddressed & doi) {
|
||||
[&](const DerivationOutput::InputAddressed & doi) {
|
||||
out << store.printStorePath(doi.path)
|
||||
<< ""
|
||||
<< "";
|
||||
},
|
||||
[&](const DerivationOutputCAFixed & dof) {
|
||||
[&](const DerivationOutput::CAFixed & dof) {
|
||||
out << store.printStorePath(dof.path(store, drv.name, i.first))
|
||||
<< dof.hash.printMethodAlgo()
|
||||
<< dof.hash.hash.to_string(Base16, false);
|
||||
},
|
||||
[&](const DerivationOutputCAFloating & dof) {
|
||||
[&](const DerivationOutput::CAFloating & dof) {
|
||||
out << ""
|
||||
<< (makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType))
|
||||
<< "";
|
||||
},
|
||||
[&](const DerivationOutputDeferred &) {
|
||||
[&](const DerivationOutput::Deferred &) {
|
||||
out << ""
|
||||
<< ""
|
||||
<< "";
|
||||
},
|
||||
}, i.second.output);
|
||||
[&](const DerivationOutput::Impure & doi) {
|
||||
out << ""
|
||||
<< (makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType))
|
||||
<< "impure";
|
||||
},
|
||||
}, i.second.raw());
|
||||
}
|
||||
worker_proto::write(store, out, drv.inputSrcs);
|
||||
out << drv.platform << drv.builder << drv.args;
|
||||
|
|
@ -714,21 +816,19 @@ std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath
|
|||
}
|
||||
|
||||
|
||||
static void rewriteDerivation(Store & store, BasicDerivation & drv, const StringMap & rewrites) {
|
||||
|
||||
debug("Rewriting the derivation");
|
||||
|
||||
for (auto &rewrite: rewrites) {
|
||||
static void rewriteDerivation(Store & store, BasicDerivation & drv, const StringMap & rewrites)
|
||||
{
|
||||
for (auto & rewrite : rewrites) {
|
||||
debug("rewriting %s as %s", rewrite.first, rewrite.second);
|
||||
}
|
||||
|
||||
drv.builder = rewriteStrings(drv.builder, rewrites);
|
||||
for (auto & arg: drv.args) {
|
||||
for (auto & arg : drv.args) {
|
||||
arg = rewriteStrings(arg, rewrites);
|
||||
}
|
||||
|
||||
StringPairs newEnv;
|
||||
for (auto & envVar: drv.env) {
|
||||
for (auto & envVar : drv.env) {
|
||||
auto envName = rewriteStrings(envVar.first, rewrites);
|
||||
auto envValue = rewriteStrings(envVar.second, rewrites);
|
||||
newEnv.emplace(envName, envValue);
|
||||
|
|
@ -737,43 +837,55 @@ static void rewriteDerivation(Store & store, BasicDerivation & drv, const String
|
|||
|
||||
auto hashModulo = hashDerivationModulo(store, Derivation(drv), true);
|
||||
for (auto & [outputName, output] : drv.outputs) {
|
||||
if (std::holds_alternative<DerivationOutputDeferred>(output.output)) {
|
||||
Hash h = std::get<Hash>(hashModulo);
|
||||
auto outPath = store.makeOutputPath(outputName, h, drv.name);
|
||||
if (std::holds_alternative<DerivationOutput::Deferred>(output.raw())) {
|
||||
auto h = get(hashModulo.hashes, outputName);
|
||||
if (!h)
|
||||
throw Error("derivation '%s' output '%s' has no hash (derivations.cc/rewriteDerivation)",
|
||||
drv.name, outputName);
|
||||
auto outPath = store.makeOutputPath(outputName, *h, drv.name);
|
||||
drv.env[outputName] = store.printStorePath(outPath);
|
||||
output = DerivationOutput {
|
||||
.output = DerivationOutputInputAddressed {
|
||||
.path = std::move(outPath),
|
||||
},
|
||||
output = DerivationOutput::InputAddressed {
|
||||
.path = std::move(outPath),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
std::optional<BasicDerivation> Derivation::tryResolve(Store & store) {
|
||||
std::optional<BasicDerivation> Derivation::tryResolve(Store & store) const
|
||||
{
|
||||
std::map<std::pair<StorePath, std::string>, StorePath> inputDrvOutputs;
|
||||
|
||||
for (auto & input : inputDrvs)
|
||||
for (auto & [outputName, outputPath] : store.queryPartialDerivationOutputMap(input.first))
|
||||
if (outputPath)
|
||||
inputDrvOutputs.insert_or_assign({input.first, outputName}, *outputPath);
|
||||
|
||||
return tryResolve(store, inputDrvOutputs);
|
||||
}
|
||||
|
||||
std::optional<BasicDerivation> Derivation::tryResolve(
|
||||
Store & store,
|
||||
const std::map<std::pair<StorePath, std::string>, StorePath> & inputDrvOutputs) const
|
||||
{
|
||||
BasicDerivation resolved { *this };
|
||||
|
||||
// Input paths that we'll want to rewrite in the derivation
|
||||
StringMap inputRewrites;
|
||||
|
||||
for (auto & input : inputDrvs) {
|
||||
auto inputDrvOutputs = store.queryPartialDerivationOutputMap(input.first);
|
||||
StringSet newOutputNames;
|
||||
for (auto & outputName : input.second) {
|
||||
auto actualPathOpt = inputDrvOutputs.at(outputName);
|
||||
if (!actualPathOpt) {
|
||||
warn("output %s of input %s missing, aborting the resolving",
|
||||
for (auto & [inputDrv, inputOutputs] : inputDrvs) {
|
||||
for (auto & outputName : inputOutputs) {
|
||||
if (auto actualPath = get(inputDrvOutputs, { inputDrv, outputName })) {
|
||||
inputRewrites.emplace(
|
||||
downstreamPlaceholder(store, inputDrv, outputName),
|
||||
store.printStorePath(*actualPath));
|
||||
resolved.inputSrcs.insert(*actualPath);
|
||||
} else {
|
||||
warn("output '%s' of input '%s' missing, aborting the resolving",
|
||||
outputName,
|
||||
store.printStorePath(input.first)
|
||||
);
|
||||
return std::nullopt;
|
||||
store.printStorePath(inputDrv));
|
||||
return {};
|
||||
}
|
||||
auto actualPath = *actualPathOpt;
|
||||
inputRewrites.emplace(
|
||||
downstreamPlaceholder(store, input.first, outputName),
|
||||
store.printStorePath(actualPath));
|
||||
resolved.inputSrcs.insert(std::move(actualPath));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -782,4 +894,6 @@ std::optional<BasicDerivation> Derivation::tryResolve(Store & store) {
|
|||
return resolved;
|
||||
}
|
||||
|
||||
const Hash impureOutputHash = hashString(htSHA256, "impure");
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
#include "types.hh"
|
||||
#include "hash.hh"
|
||||
#include "content-address.hh"
|
||||
#include "repair-flag.hh"
|
||||
#include "sync.hh"
|
||||
|
||||
#include <map>
|
||||
|
|
@ -40,23 +41,47 @@ struct DerivationOutputCAFloating
|
|||
};
|
||||
|
||||
/* Input-addressed output which depends on a (CA) derivation whose hash isn't
|
||||
* known atm
|
||||
* known yet.
|
||||
*/
|
||||
struct DerivationOutputDeferred {};
|
||||
|
||||
struct DerivationOutput
|
||||
/* Impure output which is moved to a content-addressed location (like
|
||||
CAFloating) but isn't registered as a realization.
|
||||
*/
|
||||
struct DerivationOutputImpure
|
||||
{
|
||||
std::variant<
|
||||
DerivationOutputInputAddressed,
|
||||
DerivationOutputCAFixed,
|
||||
DerivationOutputCAFloating,
|
||||
DerivationOutputDeferred
|
||||
> output;
|
||||
/* information used for expected hash computation */
|
||||
FileIngestionMethod method;
|
||||
HashType hashType;
|
||||
};
|
||||
|
||||
typedef std::variant<
|
||||
DerivationOutputInputAddressed,
|
||||
DerivationOutputCAFixed,
|
||||
DerivationOutputCAFloating,
|
||||
DerivationOutputDeferred,
|
||||
DerivationOutputImpure
|
||||
> _DerivationOutputRaw;
|
||||
|
||||
struct DerivationOutput : _DerivationOutputRaw
|
||||
{
|
||||
using Raw = _DerivationOutputRaw;
|
||||
using Raw::Raw;
|
||||
|
||||
using InputAddressed = DerivationOutputInputAddressed;
|
||||
using CAFixed = DerivationOutputCAFixed;
|
||||
using CAFloating = DerivationOutputCAFloating;
|
||||
using Deferred = DerivationOutputDeferred;
|
||||
using Impure = DerivationOutputImpure;
|
||||
|
||||
/* Note, when you use this function you should make sure that you're passing
|
||||
the right derivation name. When in doubt, you should use the safer
|
||||
interface provided by BasicDerivation::outputsAndOptPaths */
|
||||
std::optional<StorePath> path(const Store & store, std::string_view drvName, std::string_view outputName) const;
|
||||
|
||||
inline const Raw & raw() const {
|
||||
return static_cast<const Raw &>(*this);
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, DerivationOutput> DerivationOutputs;
|
||||
|
|
@ -72,30 +97,62 @@ typedef std::map<std::string, std::pair<DerivationOutput, std::optional<StorePat
|
|||
output IDs we are interested in. */
|
||||
typedef std::map<StorePath, StringSet> DerivationInputs;
|
||||
|
||||
enum struct DerivationType : uint8_t {
|
||||
InputAddressed,
|
||||
DeferredInputAddressed,
|
||||
CAFixed,
|
||||
CAFloating,
|
||||
struct DerivationType_InputAddressed {
|
||||
bool deferred;
|
||||
};
|
||||
|
||||
/* Do the outputs of the derivation have paths calculated from their content,
|
||||
or from the derivation itself? */
|
||||
bool derivationIsCA(DerivationType);
|
||||
struct DerivationType_ContentAddressed {
|
||||
bool sandboxed;
|
||||
bool fixed;
|
||||
};
|
||||
|
||||
/* Is the content of the outputs fixed a-priori via a hash? Never true for
|
||||
non-CA derivations. */
|
||||
bool derivationIsFixed(DerivationType);
|
||||
struct DerivationType_Impure {
|
||||
};
|
||||
|
||||
/* Is the derivation impure and needs to access non-deterministic resources, or
|
||||
pure and can be sandboxed? Note that whether or not we actually sandbox the
|
||||
derivation is controlled separately. Never true for non-CA derivations. */
|
||||
bool derivationIsImpure(DerivationType);
|
||||
typedef std::variant<
|
||||
DerivationType_InputAddressed,
|
||||
DerivationType_ContentAddressed,
|
||||
DerivationType_Impure
|
||||
> _DerivationTypeRaw;
|
||||
|
||||
/* Does the derivation knows its own output paths?
|
||||
* Only true when there's no floating-ca derivation involved in the closure.
|
||||
*/
|
||||
bool derivationHasKnownOutputPaths(DerivationType);
|
||||
struct DerivationType : _DerivationTypeRaw {
|
||||
using Raw = _DerivationTypeRaw;
|
||||
using Raw::Raw;
|
||||
using InputAddressed = DerivationType_InputAddressed;
|
||||
using ContentAddressed = DerivationType_ContentAddressed;
|
||||
using Impure = DerivationType_Impure;
|
||||
|
||||
/* Do the outputs of the derivation have paths calculated from their content,
|
||||
or from the derivation itself? */
|
||||
bool isCA() const;
|
||||
|
||||
/* Is the content of the outputs fixed a-priori via a hash? Never true for
|
||||
non-CA derivations. */
|
||||
bool isFixed() const;
|
||||
|
||||
/* Whether the derivation is fully sandboxed. If false, the
|
||||
sandbox is opened up, e.g. the derivation has access to the
|
||||
network. Note that whether or not we actually sandbox the
|
||||
derivation is controlled separately. Always true for non-CA
|
||||
derivations. */
|
||||
bool isSandboxed() const;
|
||||
|
||||
/* Whether the derivation is expected to produce the same result
|
||||
every time, and therefore it only needs to be built once. This
|
||||
is only false for derivations that have the attribute '__impure
|
||||
= true'. */
|
||||
bool isPure() const;
|
||||
|
||||
/* Does the derivation knows its own output paths?
|
||||
Only true when there's no floating-ca derivation involved in the
|
||||
closure, or if fixed output.
|
||||
*/
|
||||
bool hasKnownOutputPaths() const;
|
||||
|
||||
inline const Raw & raw() const {
|
||||
return static_cast<const Raw &>(*this);
|
||||
}
|
||||
};
|
||||
|
||||
struct BasicDerivation
|
||||
{
|
||||
|
|
@ -140,7 +197,14 @@ struct Derivation : BasicDerivation
|
|||
added directly to input sources.
|
||||
|
||||
2. Input placeholders are replaced with realized input store paths. */
|
||||
std::optional<BasicDerivation> tryResolve(Store & store);
|
||||
std::optional<BasicDerivation> tryResolve(Store & store) const;
|
||||
|
||||
/* Like the above, but instead of querying the Nix database for
|
||||
realisations, uses a given mapping from input derivation paths
|
||||
+ output names to actual output store paths. */
|
||||
std::optional<BasicDerivation> tryResolve(
|
||||
Store & store,
|
||||
const std::map<std::pair<StorePath, std::string>, StorePath> & inputDrvOutputs) const;
|
||||
|
||||
Derivation() = default;
|
||||
Derivation(const BasicDerivation & bd) : BasicDerivation(bd) { }
|
||||
|
|
@ -150,8 +214,6 @@ struct Derivation : BasicDerivation
|
|||
|
||||
class Store;
|
||||
|
||||
enum RepairFlag : bool { NoRepair = false, Repair = true };
|
||||
|
||||
/* Write a derivation to the Nix store, and return its path. */
|
||||
StorePath writeDerivation(Store & store,
|
||||
const Derivation & drv,
|
||||
|
|
@ -171,17 +233,27 @@ bool isDerivation(const std::string & fileName);
|
|||
the output name is "out". */
|
||||
std::string outputPathName(std::string_view drvName, std::string_view outputName);
|
||||
|
||||
// known CA drv's output hashes, current just for fixed-output derivations
|
||||
// whose output hashes are always known since they are fixed up-front.
|
||||
typedef std::map<std::string, Hash> CaOutputHashes;
|
||||
|
||||
struct DeferredHash { Hash hash; };
|
||||
// The hashes modulo of a derivation.
|
||||
//
|
||||
// Each output is given a hash, although in practice only the content-addressed
|
||||
// derivations (fixed-output or not) will have a different hash for each
|
||||
// output.
|
||||
struct DrvHash {
|
||||
std::map<std::string, Hash> hashes;
|
||||
|
||||
typedef std::variant<
|
||||
Hash, // regular DRV normalized hash
|
||||
CaOutputHashes, // Fixed-output derivation hashes
|
||||
DeferredHash // Deferred hashes for floating outputs drvs and their dependencies
|
||||
> DrvHashModulo;
|
||||
enum struct Kind : bool {
|
||||
// Statically determined derivations.
|
||||
// This hash will be directly used to compute the output paths
|
||||
Regular,
|
||||
// Floating-output derivations (and their reverse dependencies).
|
||||
Deferred,
|
||||
};
|
||||
|
||||
Kind kind;
|
||||
};
|
||||
|
||||
void operator |= (DrvHash::Kind & self, const DrvHash::Kind & other) noexcept;
|
||||
|
||||
/* Returns hashes with the details of fixed-output subderivations
|
||||
expunged.
|
||||
|
|
@ -206,16 +278,18 @@ typedef std::variant<
|
|||
ATerm, after subderivations have been likewise expunged from that
|
||||
derivation.
|
||||
*/
|
||||
DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs);
|
||||
DrvHash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs);
|
||||
|
||||
/*
|
||||
Return a map associating each output to a hash that uniquely identifies its
|
||||
derivation (modulo the self-references).
|
||||
|
||||
FIXME: what is the Hash in this map?
|
||||
*/
|
||||
std::map<std::string, Hash> staticOutputHashes(Store& store, const Derivation& drv);
|
||||
std::map<std::string, Hash> staticOutputHashes(Store & store, const Derivation & drv);
|
||||
|
||||
/* Memoisation of hashDerivationModulo(). */
|
||||
typedef std::map<StorePath, DrvHashModulo> DrvHashes;
|
||||
typedef std::map<StorePath, DrvHash> DrvHashes;
|
||||
|
||||
// FIXME: global, though at least thread-safe.
|
||||
extern Sync<DrvHashes> drvHashes;
|
||||
|
|
@ -245,4 +319,6 @@ std::string hashPlaceholder(const std::string_view outputName);
|
|||
dependency which is a CA derivation. */
|
||||
std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath, std::string_view outputName);
|
||||
|
||||
extern const Hash impureOutputHash;
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,11 @@
|
|||
#include "derived-path.hh"
|
||||
#include "derivations.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace nix {
|
||||
|
||||
nlohmann::json DerivedPath::Opaque::toJSON(ref<Store> store) const {
|
||||
|
|
@ -11,6 +14,21 @@ nlohmann::json DerivedPath::Opaque::toJSON(ref<Store> store) const {
|
|||
return res;
|
||||
}
|
||||
|
||||
nlohmann::json DerivedPath::Built::toJSON(ref<Store> store) const {
|
||||
nlohmann::json res;
|
||||
res["drvPath"] = store->printStorePath(drvPath);
|
||||
// Fallback for the input-addressed derivation case: We expect to always be
|
||||
// able to print the output paths, so let’s do it
|
||||
const auto knownOutputs = store->queryPartialDerivationOutputMap(drvPath);
|
||||
for (const auto& output : outputs) {
|
||||
auto knownOutput = get(knownOutputs, output);
|
||||
res["outputs"][output] = (knownOutput && *knownOutput)
|
||||
? store->printStorePath(**knownOutput)
|
||||
: nullptr;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
nlohmann::json BuiltPath::Built::toJSON(ref<Store> store) const {
|
||||
nlohmann::json res;
|
||||
res["drvPath"] = store->printStorePath(drvPath);
|
||||
|
|
@ -35,16 +53,22 @@ StorePathSet BuiltPath::outPaths() const
|
|||
);
|
||||
}
|
||||
|
||||
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store) {
|
||||
template<typename T>
|
||||
nlohmann::json stuffToJSON(const std::vector<T> & ts, ref<Store> store) {
|
||||
auto res = nlohmann::json::array();
|
||||
for (const BuiltPath & buildable : buildables) {
|
||||
std::visit([&res, store](const auto & buildable) {
|
||||
res.push_back(buildable.toJSON(store));
|
||||
}, buildable.raw());
|
||||
for (const T & t : ts) {
|
||||
std::visit([&res, store](const auto & t) {
|
||||
res.push_back(t.toJSON(store));
|
||||
}, t.raw());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store)
|
||||
{ return stuffToJSON<BuiltPath>(buildables, store); }
|
||||
nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, ref<Store> store)
|
||||
{ return stuffToJSON<DerivedPath>(paths, store); }
|
||||
|
||||
|
||||
std::string DerivedPath::Opaque::to_string(const Store & store) const {
|
||||
return store.printStorePath(path);
|
||||
|
|
@ -101,10 +125,15 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
|
|||
for (auto& [outputName, outputPath] : p.outputs) {
|
||||
if (settings.isExperimentalFeatureEnabled(
|
||||
Xp::CaDerivations)) {
|
||||
auto drvOutput = get(drvHashes, outputName);
|
||||
if (!drvOutput)
|
||||
throw Error(
|
||||
"the derivation '%s' has unrealised output '%s' (derived-path.cc/toRealisedPaths)",
|
||||
store.printStorePath(p.drvPath), outputName);
|
||||
auto thisRealisation = store.queryRealisation(
|
||||
DrvOutput{drvHashes.at(outputName), outputName});
|
||||
assert(thisRealisation); // We’ve built it, so we must h
|
||||
// ve the realisation
|
||||
DrvOutput{*drvOutput, outputName});
|
||||
assert(thisRealisation); // We’ve built it, so we must
|
||||
// have the realisation
|
||||
res.insert(*thisRealisation);
|
||||
} else {
|
||||
res.insert(outputPath);
|
||||
|
|
|
|||
|
|
@ -25,6 +25,9 @@ struct DerivedPathOpaque {
|
|||
nlohmann::json toJSON(ref<Store> store) const;
|
||||
std::string to_string(const Store & store) const;
|
||||
static DerivedPathOpaque parse(const Store & store, std::string_view);
|
||||
|
||||
bool operator < (const DerivedPathOpaque & b) const
|
||||
{ return path < b.path; }
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -45,6 +48,10 @@ struct DerivedPathBuilt {
|
|||
|
||||
std::string to_string(const Store & store) const;
|
||||
static DerivedPathBuilt parse(const Store & store, std::string_view);
|
||||
nlohmann::json toJSON(ref<Store> store) const;
|
||||
|
||||
bool operator < (const DerivedPathBuilt & b) const
|
||||
{ return std::make_pair(drvPath, outputs) < std::make_pair(b.drvPath, b.outputs); }
|
||||
};
|
||||
|
||||
using _DerivedPathRaw = std::variant<
|
||||
|
|
@ -119,5 +126,6 @@ typedef std::vector<DerivedPath> DerivedPaths;
|
|||
typedef std::vector<BuiltPath> BuiltPaths;
|
||||
|
||||
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store);
|
||||
nlohmann::json derivedPathsToJSON(const DerivedPaths & , ref<Store> store);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
result.etag = "";
|
||||
result.data.clear();
|
||||
result.bodySize = 0;
|
||||
statusMsg = trim((std::string &) match[1]);
|
||||
statusMsg = trim(match.str(1));
|
||||
acceptRanges = false;
|
||||
encoding = "";
|
||||
} else {
|
||||
|
|
@ -308,6 +308,9 @@ struct curlFileTransfer : public FileTransfer
|
|||
|
||||
curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders);
|
||||
|
||||
if (settings.downloadSpeed.get() > 0)
|
||||
curl_easy_setopt(req, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t) (settings.downloadSpeed.get() * 1024));
|
||||
|
||||
if (request.head)
|
||||
curl_easy_setopt(req, CURLOPT_NOBODY, 1);
|
||||
|
||||
|
|
@ -319,7 +322,6 @@ struct curlFileTransfer : public FileTransfer
|
|||
}
|
||||
|
||||
if (request.verifyTLS) {
|
||||
debug("verify TLS: Nix CA file = '%s'", settings.caFile);
|
||||
if (settings.caFile != "")
|
||||
curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
|
||||
} else {
|
||||
|
|
@ -443,14 +445,13 @@ struct curlFileTransfer : public FileTransfer
|
|||
: httpStatus != 0
|
||||
? FileTransferError(err,
|
||||
std::move(response),
|
||||
fmt("unable to %s '%s': HTTP error %d ('%s')",
|
||||
request.verb(), request.uri, httpStatus, statusMsg)
|
||||
+ (code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
|
||||
)
|
||||
"unable to %s '%s': HTTP error %d%s",
|
||||
request.verb(), request.uri, httpStatus,
|
||||
code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
|
||||
: FileTransferError(err,
|
||||
std::move(response),
|
||||
fmt("unable to %s '%s': %s (%d)",
|
||||
request.verb(), request.uri, curl_easy_strerror(code), code));
|
||||
"unable to %s '%s': %s (%d)",
|
||||
request.verb(), request.uri, curl_easy_strerror(code), code);
|
||||
|
||||
/* If this is a transient error, then maybe retry the
|
||||
download after a while. If we're writing to a
|
||||
|
|
@ -693,10 +694,10 @@ struct curlFileTransfer : public FileTransfer
|
|||
#if ENABLE_S3
|
||||
auto [bucketName, key, params] = parseS3Uri(request.uri);
|
||||
|
||||
std::string profile = get(params, "profile").value_or("");
|
||||
std::string region = get(params, "region").value_or(Aws::Region::US_EAST_1);
|
||||
std::string scheme = get(params, "scheme").value_or("");
|
||||
std::string endpoint = get(params, "endpoint").value_or("");
|
||||
std::string profile = getOr(params, "profile", "");
|
||||
std::string region = getOr(params, "region", Aws::Region::US_EAST_1);
|
||||
std::string scheme = getOr(params, "scheme", "");
|
||||
std::string endpoint = getOr(params, "endpoint", "");
|
||||
|
||||
S3Helper s3Helper(profile, region, scheme, endpoint);
|
||||
|
||||
|
|
@ -704,7 +705,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
auto s3Res = s3Helper.getObject(bucketName, key);
|
||||
FileTransferResult res;
|
||||
if (!s3Res.data)
|
||||
throw FileTransferError(NotFound, {}, "S3 object '%s' does not exist", request.uri);
|
||||
throw FileTransferError(NotFound, "S3 object '%s' does not exist", request.uri);
|
||||
res.data = std::move(*s3Res.data);
|
||||
callback(std::move(res));
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ struct FileTransferSettings : Config
|
|||
R"(
|
||||
The timeout (in seconds) for establishing connections in the
|
||||
binary cache substituter. It corresponds to `curl`’s
|
||||
`--connect-timeout` option.
|
||||
`--connect-timeout` option. A value of 0 means no limit.
|
||||
)"};
|
||||
|
||||
Setting<unsigned long> stalledDownloadTimeout{
|
||||
|
|
@ -123,8 +123,6 @@ public:
|
|||
|
||||
template<typename... Args>
|
||||
FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args);
|
||||
|
||||
virtual const char* sname() const override { return "FileTransferError"; }
|
||||
};
|
||||
|
||||
bool isUri(std::string_view s);
|
||||
|
|
|
|||
84
src/libstore/gc-store.hh
Normal file
84
src/libstore/gc-store.hh
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
#pragma once
|
||||
|
||||
#include "store-api.hh"
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
typedef std::unordered_map<StorePath, std::unordered_set<std::string>> Roots;
|
||||
|
||||
|
||||
struct GCOptions
|
||||
{
|
||||
/* Garbage collector operation:
|
||||
|
||||
- `gcReturnLive': return the set of paths reachable from
|
||||
(i.e. in the closure of) the roots.
|
||||
|
||||
- `gcReturnDead': return the set of paths not reachable from
|
||||
the roots.
|
||||
|
||||
- `gcDeleteDead': actually delete the latter set.
|
||||
|
||||
- `gcDeleteSpecific': delete the paths listed in
|
||||
`pathsToDelete', insofar as they are not reachable.
|
||||
*/
|
||||
typedef enum {
|
||||
gcReturnLive,
|
||||
gcReturnDead,
|
||||
gcDeleteDead,
|
||||
gcDeleteSpecific,
|
||||
} GCAction;
|
||||
|
||||
GCAction action{gcDeleteDead};
|
||||
|
||||
/* If `ignoreLiveness' is set, then reachability from the roots is
|
||||
ignored (dangerous!). However, the paths must still be
|
||||
unreferenced *within* the store (i.e., there can be no other
|
||||
store paths that depend on them). */
|
||||
bool ignoreLiveness{false};
|
||||
|
||||
/* For `gcDeleteSpecific', the paths to delete. */
|
||||
StorePathSet pathsToDelete;
|
||||
|
||||
/* Stop after at least `maxFreed' bytes have been freed. */
|
||||
uint64_t maxFreed{std::numeric_limits<uint64_t>::max()};
|
||||
};
|
||||
|
||||
|
||||
struct GCResults
|
||||
{
|
||||
/* Depending on the action, the GC roots, or the paths that would
|
||||
be or have been deleted. */
|
||||
PathSet paths;
|
||||
|
||||
/* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the
|
||||
number of bytes that would be or was freed. */
|
||||
uint64_t bytesFreed = 0;
|
||||
};
|
||||
|
||||
|
||||
struct GcStore : public virtual Store
|
||||
{
|
||||
inline static std::string operationName = "Garbage collection";
|
||||
|
||||
/* Add an indirect root, which is merely a symlink to `path' from
|
||||
/nix/var/nix/gcroots/auto/<hash of `path'>. `path' is supposed
|
||||
to be a symlink to a store path. The garbage collector will
|
||||
automatically remove the indirect root when it finds that
|
||||
`path' has disappeared. */
|
||||
virtual void addIndirectRoot(const Path & path) = 0;
|
||||
|
||||
/* Find the roots of the garbage collector. Each root is a pair
|
||||
(link, storepath) where `link' is the path of the symlink
|
||||
outside of the Nix store that point to `storePath'. If
|
||||
'censor' is true, privacy-sensitive information about roots
|
||||
found in /proc is censored. */
|
||||
virtual Roots findRoots(bool censor) = 0;
|
||||
|
||||
/* Perform a garbage collection. */
|
||||
virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
@ -39,9 +39,7 @@ static void makeSymlink(const Path & link, const Path & target)
|
|||
createSymlink(target, tempLink);
|
||||
|
||||
/* Atomically replace the old one. */
|
||||
if (rename(tempLink.c_str(), link.c_str()) == -1)
|
||||
throw SysError("cannot rename '%1%' to '%2%'",
|
||||
tempLink , link);
|
||||
renameFile(tempLink, link);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -135,6 +133,7 @@ void LocalStore::addTempRoot(const StorePath & path)
|
|||
state->fdRootsSocket.close();
|
||||
goto restart;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -153,6 +152,7 @@ void LocalStore::addTempRoot(const StorePath & path)
|
|||
state->fdRootsSocket.close();
|
||||
goto restart;
|
||||
}
|
||||
throw;
|
||||
} catch (EndOfFile & e) {
|
||||
debug("GC socket disconnected");
|
||||
state->fdRootsSocket.close();
|
||||
|
|
@ -619,6 +619,17 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
Path path = storeDir + "/" + std::string(baseName);
|
||||
Path realPath = realStoreDir + "/" + std::string(baseName);
|
||||
|
||||
/* There may be temp directories in the store that are still in use
|
||||
by another process. We need to be sure that we can acquire an
|
||||
exclusive lock before deleting them. */
|
||||
if (baseName.find("tmp-", 0) == 0) {
|
||||
AutoCloseFD tmpDirFd = open(realPath.c_str(), O_RDONLY | O_DIRECTORY);
|
||||
if (tmpDirFd.get() == -1 || !lockFile(tmpDirFd.get(), ltWrite, false)) {
|
||||
debug("skipping locked tempdir '%s'", realPath);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
printInfo("deleting '%1%'", path);
|
||||
|
||||
results.paths.insert(path);
|
||||
|
|
@ -678,7 +689,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
alive.insert(start);
|
||||
try {
|
||||
StorePathSet closure;
|
||||
computeFSClosure(*path, closure);
|
||||
computeFSClosure(*path, closure,
|
||||
/* flipDirection */ false, gcKeepOutputs, gcKeepDerivations);
|
||||
for (auto & p : closure)
|
||||
alive.insert(p);
|
||||
} catch (InvalidPath &) { }
|
||||
|
|
@ -841,7 +853,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
if (unlink(path.c_str()) == -1)
|
||||
throw SysError("deleting '%1%'", path);
|
||||
|
||||
results.bytesFreed += st.st_size;
|
||||
/* Do not accound for deleted file here. Rely on deletePath()
|
||||
accounting. */
|
||||
}
|
||||
|
||||
struct stat st;
|
||||
|
|
|
|||
|
|
@ -36,7 +36,6 @@ Settings::Settings()
|
|||
, nixStateDir(canonPath(getEnv("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
|
||||
, nixConfDir(canonPath(getEnv("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
|
||||
, nixUserConfFiles(getUserConfigFiles())
|
||||
, nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR").value_or(NIX_LIBEXEC_DIR)))
|
||||
, nixBinDir(canonPath(getEnv("NIX_BIN_DIR").value_or(NIX_BIN_DIR)))
|
||||
, nixManDir(canonPath(NIX_MAN_DIR))
|
||||
, nixDaemonSocketFile(canonPath(getEnv("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
|
||||
|
|
@ -67,12 +66,13 @@ Settings::Settings()
|
|||
sandboxPaths = tokenizeString<StringSet>("/bin/sh=" SANDBOX_SHELL);
|
||||
#endif
|
||||
|
||||
|
||||
/* chroot-like behavior from Apple's sandbox */
|
||||
/* chroot-like behavior from Apple's sandbox */
|
||||
#if __APPLE__
|
||||
sandboxPaths = tokenizeString<StringSet>("/System/Library/Frameworks /System/Library/PrivateFrameworks /bin/sh /bin/bash /private/tmp /private/var/tmp /usr/lib");
|
||||
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
|
||||
#endif
|
||||
|
||||
buildHook = getSelfExe().value_or("nix") + " __build-remote";
|
||||
}
|
||||
|
||||
void loadConfFile()
|
||||
|
|
@ -114,7 +114,13 @@ std::vector<Path> getUserConfigFiles()
|
|||
|
||||
unsigned int Settings::getDefaultCores()
|
||||
{
|
||||
return std::max(1U, std::thread::hardware_concurrency());
|
||||
const unsigned int concurrency = std::max(1U, std::thread::hardware_concurrency());
|
||||
const unsigned int maxCPU = getMaxCPU();
|
||||
|
||||
if (maxCPU > 0)
|
||||
return maxCPU;
|
||||
else
|
||||
return concurrency;
|
||||
}
|
||||
|
||||
StringSet Settings::getDefaultSystemFeatures()
|
||||
|
|
@ -148,13 +154,9 @@ StringSet Settings::getDefaultExtraPlatforms()
|
|||
// machines. Note that we can’t force processes from executing
|
||||
// x86_64 in aarch64 environments or vice versa since they can
|
||||
// always exec with their own binary preferences.
|
||||
if (pathExists("/Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist") ||
|
||||
pathExists("/System/Library/LaunchDaemons/com.apple.oahd.plist")) {
|
||||
if (std::string{SYSTEM} == "x86_64-darwin")
|
||||
extraPlatforms.insert("aarch64-darwin");
|
||||
else if (std::string{SYSTEM} == "aarch64-darwin")
|
||||
extraPlatforms.insert("x86_64-darwin");
|
||||
}
|
||||
if (std::string{SYSTEM} == "aarch64-darwin" &&
|
||||
runProgram(RunOptions {.program = "arch", .args = {"-arch", "x86_64", "/usr/bin/true"}, .mergeStderrToStdout = true}).first == 0)
|
||||
extraPlatforms.insert("x86_64-darwin");
|
||||
#endif
|
||||
|
||||
return extraPlatforms;
|
||||
|
|
|
|||
|
|
@ -79,9 +79,6 @@ public:
|
|||
/* A list of user configuration files to load. */
|
||||
std::vector<Path> nixUserConfFiles;
|
||||
|
||||
/* The directory where internal helper programs are stored. */
|
||||
Path nixLibexecDir;
|
||||
|
||||
/* The directory where the main programs are stored. */
|
||||
Path nixBinDir;
|
||||
|
||||
|
|
@ -195,7 +192,7 @@ public:
|
|||
)",
|
||||
{"build-timeout"}};
|
||||
|
||||
PathSetting buildHook{this, true, nixLibexecDir + "/nix/build-remote", "build-hook",
|
||||
PathSetting buildHook{this, true, "", "build-hook",
|
||||
"The path of the helper program that executes builds to remote machines."};
|
||||
|
||||
Setting<std::string> builders{
|
||||
|
|
@ -576,9 +573,15 @@ public:
|
|||
R"(
|
||||
If set to `true` (the default), any non-content-addressed path added
|
||||
or copied to the Nix store (e.g. when substituting from a binary
|
||||
cache) must have a valid signature, that is, be signed using one of
|
||||
the keys listed in `trusted-public-keys` or `secret-key-files`. Set
|
||||
to `false` to disable signature checking.
|
||||
cache) must have a signature by a trusted key. A trusted key is one
|
||||
listed in `trusted-public-keys`, or a public key counterpart to a
|
||||
private key stored in a file listed in `secret-key-files`.
|
||||
|
||||
Set to `false` to disable signature checking and trust all
|
||||
non-content-addressed paths unconditionally.
|
||||
|
||||
(Content-addressed paths are inherently trustworthy and thus
|
||||
unaffected by this configuration option.)
|
||||
)"};
|
||||
|
||||
Setting<StringSet> extraPlatforms{
|
||||
|
|
@ -629,6 +632,14 @@ public:
|
|||
are tried based on their Priority value, which each substituter can set
|
||||
independently. Lower value means higher priority.
|
||||
The default is `https://cache.nixos.org`, with a Priority of 40.
|
||||
|
||||
Nix will copy a store path from a remote store only if one
|
||||
of the following is true:
|
||||
|
||||
- the store object is signed by one of the [`trusted-public-keys`](#conf-trusted-public-keys)
|
||||
- the substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list
|
||||
- the [`require-sigs`](#conf-require-sigs) option has been set to `false`
|
||||
- the store object is [output-addressed](glossary.md#gloss-output-addressed-store-object)
|
||||
)",
|
||||
{"binary-caches"}};
|
||||
|
||||
|
|
@ -762,6 +773,13 @@ public:
|
|||
/nix/store/xfghy8ixrhz3kyy6p724iv3cxji088dx-bash-4.4-p23`.
|
||||
)"};
|
||||
|
||||
Setting<unsigned int> downloadSpeed {
|
||||
this, 0, "download-speed",
|
||||
R"(
|
||||
Specify the maximum transfer rate in kilobytes per second you want
|
||||
Nix to use for downloads.
|
||||
)"};
|
||||
|
||||
Setting<std::string> netrcFile{
|
||||
this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
|
||||
R"(
|
||||
|
|
@ -815,7 +833,7 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<StringSet> ignoredAcls{
|
||||
this, {"security.selinux", "system.nfs4_acl"}, "ignored-acls",
|
||||
this, {"security.selinux", "system.nfs4_acl", "security.csm"}, "ignored-acls",
|
||||
R"(
|
||||
A list of ACLs that should be ignored, normally Nix attempts to
|
||||
remove all ACLs from files and directories in the Nix store, but
|
||||
|
|
@ -893,55 +911,6 @@ public:
|
|||
are loaded as plugins (non-recursively).
|
||||
)"};
|
||||
|
||||
Setting<StringMap> accessTokens{this, {}, "access-tokens",
|
||||
R"(
|
||||
Access tokens used to access protected GitHub, GitLab, or
|
||||
other locations requiring token-based authentication.
|
||||
|
||||
Access tokens are specified as a string made up of
|
||||
space-separated `host=token` values. The specific token
|
||||
used is selected by matching the `host` portion against the
|
||||
"host" specification of the input. The actual use of the
|
||||
`token` value is determined by the type of resource being
|
||||
accessed:
|
||||
|
||||
* Github: the token value is the OAUTH-TOKEN string obtained
|
||||
as the Personal Access Token from the Github server (see
|
||||
https://docs.github.com/en/developers/apps/authorizing-oath-apps).
|
||||
|
||||
* Gitlab: the token value is either the OAuth2 token or the
|
||||
Personal Access Token (these are different types tokens
|
||||
for gitlab, see
|
||||
https://docs.gitlab.com/12.10/ee/api/README.html#authentication).
|
||||
The `token` value should be `type:tokenstring` where
|
||||
`type` is either `OAuth2` or `PAT` to indicate which type
|
||||
of token is being specified.
|
||||
|
||||
Example `~/.config/nix/nix.conf`:
|
||||
|
||||
```
|
||||
access-tokens = github.com=23ac...b289 gitlab.mycompany.com=PAT:A123Bp_Cd..EfG gitlab.com=OAuth2:1jklw3jk
|
||||
```
|
||||
|
||||
Example `~/code/flake.nix`:
|
||||
|
||||
```nix
|
||||
input.foo = {
|
||||
type = "gitlab";
|
||||
host = "gitlab.mycompany.com";
|
||||
owner = "mycompany";
|
||||
repo = "pro";
|
||||
};
|
||||
```
|
||||
|
||||
This example specifies three tokens, one each for accessing
|
||||
github.com, gitlab.mycompany.com, and sourceforge.net.
|
||||
|
||||
The `input.foo` uses the "gitlab" fetcher, which might
|
||||
requires specifying the token type along with the token
|
||||
value.
|
||||
)"};
|
||||
|
||||
Setting<std::set<ExperimentalFeature>> experimentalFeatures{this, {}, "experimental-features",
|
||||
"Experimental Nix features to enable."};
|
||||
|
||||
|
|
@ -949,18 +918,9 @@ public:
|
|||
|
||||
void requireExperimentalFeature(const ExperimentalFeature &);
|
||||
|
||||
Setting<bool> allowDirty{this, true, "allow-dirty",
|
||||
"Whether to allow dirty Git/Mercurial trees."};
|
||||
|
||||
Setting<bool> warnDirty{this, true, "warn-dirty",
|
||||
"Whether to warn about dirty Git/Mercurial trees."};
|
||||
|
||||
Setting<size_t> narBufferSize{this, 32 * 1024 * 1024, "nar-buffer-size",
|
||||
"Maximum size of NARs before spilling them to disk."};
|
||||
|
||||
Setting<std::string> flakeRegistry{this, "https://github.com/NixOS/flake-registry/raw/master/flake-registry.json", "flake-registry",
|
||||
"Path or URI of the global flake registry."};
|
||||
|
||||
Setting<bool> allowSymlinkedStore{
|
||||
this, false, "allow-symlinked-store",
|
||||
R"(
|
||||
|
|
@ -973,19 +933,6 @@ public:
|
|||
resolves to a different location from that of the build machine. You
|
||||
can enable this setting if you are sure you're not going to do that.
|
||||
)"};
|
||||
|
||||
Setting<bool> useRegistries{this, true, "use-registries",
|
||||
"Whether to use flake registries to resolve flake references."};
|
||||
|
||||
Setting<bool> acceptFlakeConfig{this, false, "accept-flake-config",
|
||||
"Whether to accept nix configuration from a flake without prompting."};
|
||||
|
||||
Setting<std::string> commitLockFileSummary{
|
||||
this, "", "commit-lockfile-summary",
|
||||
R"(
|
||||
The commit summary to use when committing changed flake lock files. If
|
||||
empty, the summary is generated based on the action performed.
|
||||
)"};
|
||||
};
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -161,7 +161,12 @@ protected:
|
|||
void getFile(const std::string & path,
|
||||
Callback<std::optional<std::string>> callback) noexcept override
|
||||
{
|
||||
checkEnabled();
|
||||
try {
|
||||
checkEnabled();
|
||||
} catch (...) {
|
||||
callback.rethrow();
|
||||
return;
|
||||
}
|
||||
|
||||
auto request(makeRequest(path));
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
#include "pool.hh"
|
||||
#include "remote-store.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "path-with-outputs.hh"
|
||||
#include "worker-protocol.hh"
|
||||
|
|
@ -278,7 +279,7 @@ public:
|
|||
|
||||
conn->to.flush();
|
||||
|
||||
BuildResult status;
|
||||
BuildResult status { .path = DerivedPath::Built { .drvPath = drvPath } };
|
||||
status.status = (BuildResult::Status) readInt(conn->from);
|
||||
conn->from >> status.errorMsg;
|
||||
|
||||
|
|
@ -316,7 +317,7 @@ public:
|
|||
|
||||
conn->to.flush();
|
||||
|
||||
BuildResult result;
|
||||
BuildResult result { .path = DerivedPath::Opaque { StorePath::dummy } };
|
||||
result.status = (BuildResult::Status) readInt(conn->from);
|
||||
|
||||
if (!result.success()) {
|
||||
|
|
|
|||
|
|
@ -57,8 +57,7 @@ protected:
|
|||
AutoDelete del(tmp, false);
|
||||
StreamToSourceAdapter source(istream);
|
||||
writeFile(tmp, source);
|
||||
if (rename(tmp.c_str(), path2.c_str()))
|
||||
throw SysError("renaming '%1%' to '%2%'", tmp, path2);
|
||||
renameFile(tmp, path2);
|
||||
del.cancel();
|
||||
}
|
||||
|
||||
|
|
@ -69,6 +68,7 @@ protected:
|
|||
} catch (SysError & e) {
|
||||
if (e.errNo == ENOENT)
|
||||
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache", path);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -107,7 +107,7 @@ bool LocalBinaryCacheStore::fileExists(const std::string & path)
|
|||
|
||||
std::set<std::string> LocalBinaryCacheStore::uriSchemes()
|
||||
{
|
||||
if (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") == "1")
|
||||
if (getEnv("_NIX_FORCE_HTTP") == "1")
|
||||
return {};
|
||||
else
|
||||
return {"file"};
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
#pragma once
|
||||
|
||||
#include "store-api.hh"
|
||||
#include "gc-store.hh"
|
||||
#include "log-store.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
@ -23,7 +25,10 @@ struct LocalFSStoreConfig : virtual StoreConfig
|
|||
"physical path to the Nix store"};
|
||||
};
|
||||
|
||||
class LocalFSStore : public virtual LocalFSStoreConfig, public virtual Store
|
||||
class LocalFSStore : public virtual LocalFSStoreConfig,
|
||||
public virtual Store,
|
||||
public virtual GcStore,
|
||||
public virtual LogStore
|
||||
{
|
||||
public:
|
||||
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ int getSchema(Path schemaPath)
|
|||
|
||||
void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
|
||||
{
|
||||
const int nixCASchemaVersion = 3;
|
||||
const int nixCASchemaVersion = 4;
|
||||
int curCASchema = getSchema(schemaPath);
|
||||
if (curCASchema != nixCASchemaVersion) {
|
||||
if (curCASchema > nixCASchemaVersion) {
|
||||
|
|
@ -143,7 +143,22 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
|
|||
)");
|
||||
txn.commit();
|
||||
}
|
||||
writeFile(schemaPath, fmt("%d", nixCASchemaVersion));
|
||||
if (curCASchema < 4) {
|
||||
SQLiteTxn txn(db);
|
||||
db.exec(R"(
|
||||
create trigger if not exists DeleteSelfRefsViaRealisations before delete on ValidPaths
|
||||
begin
|
||||
delete from RealisationsRefs where realisationReference in (
|
||||
select id from Realisations where outputPath = old.id
|
||||
);
|
||||
end;
|
||||
-- used by deletion trigger
|
||||
create index if not exists IndexRealisationsRefsRealisationReference on RealisationsRefs(realisationReference);
|
||||
)");
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
writeFile(schemaPath, fmt("%d", nixCASchemaVersion), 0666, true);
|
||||
lockFile(lockFd.get(), ltRead, true);
|
||||
}
|
||||
}
|
||||
|
|
@ -266,7 +281,7 @@ LocalStore::LocalStore(const Params & params)
|
|||
else if (curSchema == 0) { /* new store */
|
||||
curSchema = nixSchemaVersion;
|
||||
openDB(*state, true);
|
||||
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
||||
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str(), 0666, true);
|
||||
}
|
||||
|
||||
else if (curSchema < nixSchemaVersion) {
|
||||
|
|
@ -314,7 +329,7 @@ LocalStore::LocalStore(const Params & params)
|
|||
txn.commit();
|
||||
}
|
||||
|
||||
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
||||
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str(), 0666, true);
|
||||
|
||||
lockFile(globalLock.get(), ltRead, true);
|
||||
}
|
||||
|
|
@ -482,18 +497,18 @@ void LocalStore::openDB(State & state, bool create)
|
|||
SQLiteStmt stmt;
|
||||
stmt.create(db, "pragma main.journal_mode;");
|
||||
if (sqlite3_step(stmt) != SQLITE_ROW)
|
||||
throwSQLiteError(db, "querying journal mode");
|
||||
SQLiteError::throw_(db, "querying journal mode");
|
||||
prevMode = std::string((const char *) sqlite3_column_text(stmt, 0));
|
||||
}
|
||||
if (prevMode != mode &&
|
||||
sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
|
||||
throwSQLiteError(db, "setting journal mode");
|
||||
SQLiteError::throw_(db, "setting journal mode");
|
||||
|
||||
/* Increase the auto-checkpoint interval to 40000 pages. This
|
||||
seems enough to ensure that instantiating the NixOS system
|
||||
derivation is done in a single fsync(). */
|
||||
if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 40000;", 0, 0, 0) != SQLITE_OK)
|
||||
throwSQLiteError(db, "setting autocheckpoint interval");
|
||||
SQLiteError::throw_(db, "setting autocheckpoint interval");
|
||||
|
||||
/* Initialise the database schema, if necessary. */
|
||||
if (create) {
|
||||
|
|
@ -702,31 +717,38 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
|
|||
// combinations that are currently prohibited.
|
||||
drv.type();
|
||||
|
||||
std::optional<Hash> h;
|
||||
std::optional<DrvHash> hashesModulo;
|
||||
for (auto & i : drv.outputs) {
|
||||
std::visit(overloaded {
|
||||
[&](const DerivationOutputInputAddressed & doia) {
|
||||
if (!h) {
|
||||
[&](const DerivationOutput::InputAddressed & doia) {
|
||||
if (!hashesModulo) {
|
||||
// somewhat expensive so we do lazily
|
||||
auto temp = hashDerivationModulo(*this, drv, true);
|
||||
h = std::get<Hash>(temp);
|
||||
hashesModulo = hashDerivationModulo(*this, drv, true);
|
||||
}
|
||||
StorePath recomputed = makeOutputPath(i.first, *h, drvName);
|
||||
auto currentOutputHash = get(hashesModulo->hashes, i.first);
|
||||
if (!currentOutputHash)
|
||||
throw Error("derivation '%s' has unexpected output '%s' (local-store / hashesModulo) named '%s'",
|
||||
printStorePath(drvPath), printStorePath(doia.path), i.first);
|
||||
StorePath recomputed = makeOutputPath(i.first, *currentOutputHash, drvName);
|
||||
if (doia.path != recomputed)
|
||||
throw Error("derivation '%s' has incorrect output '%s', should be '%s'",
|
||||
printStorePath(drvPath), printStorePath(doia.path), printStorePath(recomputed));
|
||||
envHasRightPath(doia.path, i.first);
|
||||
},
|
||||
[&](const DerivationOutputCAFixed & dof) {
|
||||
[&](const DerivationOutput::CAFixed & dof) {
|
||||
StorePath path = makeFixedOutputPath(dof.hash.method, dof.hash.hash, drvName);
|
||||
envHasRightPath(path, i.first);
|
||||
},
|
||||
[&](const DerivationOutputCAFloating &) {
|
||||
[&](const DerivationOutput::CAFloating &) {
|
||||
/* Nothing to check */
|
||||
},
|
||||
[&](const DerivationOutputDeferred &) {
|
||||
[&](const DerivationOutput::Deferred &) {
|
||||
/* Nothing to check */
|
||||
},
|
||||
}, i.second.output);
|
||||
[&](const DerivationOutput::Impure &) {
|
||||
/* Nothing to check */
|
||||
},
|
||||
}, i.second.raw());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -736,7 +758,7 @@ void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag check
|
|||
if (checkSigs == NoCheckSigs || !realisationIsUntrusted(info))
|
||||
registerDrvOutput(info);
|
||||
else
|
||||
throw Error("cannot register realisation '%s' because it lacks a valid signature", info.outPath.to_string());
|
||||
throw Error("cannot register realisation '%s' because it lacks a signature by a trusted key", info.outPath.to_string());
|
||||
}
|
||||
|
||||
void LocalStore::registerDrvOutput(const Realisation & info)
|
||||
|
|
@ -1251,7 +1273,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
RepairFlag repair, CheckSigsFlag checkSigs)
|
||||
{
|
||||
if (checkSigs && pathInfoIsUntrusted(info))
|
||||
throw Error("cannot add path '%s' because it lacks a valid signature", printStorePath(info.path));
|
||||
throw Error("cannot add path '%s' because it lacks a signature by a trusted key", printStorePath(info.path));
|
||||
|
||||
addTempRoot(info.path);
|
||||
|
||||
|
|
@ -1367,13 +1389,15 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
|
|||
|
||||
std::unique_ptr<AutoDelete> delTempDir;
|
||||
Path tempPath;
|
||||
Path tempDir;
|
||||
AutoCloseFD tempDirFd;
|
||||
|
||||
if (!inMemory) {
|
||||
/* Drain what we pulled so far, and then keep on pulling */
|
||||
StringSource dumpSource { dump };
|
||||
ChainSource bothSource { dumpSource, source };
|
||||
|
||||
auto tempDir = createTempDir(realStoreDir, "add");
|
||||
std::tie(tempDir, tempDirFd) = createTempDirInStore();
|
||||
delTempDir = std::make_unique<AutoDelete>(tempDir);
|
||||
tempPath = tempDir + "/x";
|
||||
|
||||
|
|
@ -1415,8 +1439,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
|
|||
writeFile(realPath, dumpSource);
|
||||
} else {
|
||||
/* Move the temporary path we restored above. */
|
||||
if (rename(tempPath.c_str(), realPath.c_str()))
|
||||
throw Error("renaming '%s' to '%s'", tempPath, realPath);
|
||||
moveFile(tempPath, realPath);
|
||||
}
|
||||
|
||||
/* For computing the nar hash. In recursive SHA-256 mode, this
|
||||
|
|
@ -1493,18 +1516,24 @@ StorePath LocalStore::addTextToStore(
|
|||
|
||||
|
||||
/* Create a temporary directory in the store that won't be
|
||||
garbage-collected. */
|
||||
Path LocalStore::createTempDirInStore()
|
||||
garbage-collected until the returned FD is closed. */
|
||||
std::pair<Path, AutoCloseFD> LocalStore::createTempDirInStore()
|
||||
{
|
||||
Path tmpDir;
|
||||
Path tmpDirFn;
|
||||
AutoCloseFD tmpDirFd;
|
||||
bool lockedByUs = false;
|
||||
do {
|
||||
/* There is a slight possibility that `tmpDir' gets deleted by
|
||||
the GC between createTempDir() and addTempRoot(), so repeat
|
||||
until `tmpDir' exists. */
|
||||
tmpDir = createTempDir(realStoreDir);
|
||||
addTempRoot(parseStorePath(tmpDir));
|
||||
} while (!pathExists(tmpDir));
|
||||
return tmpDir;
|
||||
the GC between createTempDir() and when we acquire a lock on it.
|
||||
We'll repeat until 'tmpDir' exists and we've locked it. */
|
||||
tmpDirFn = createTempDir(realStoreDir, "tmp");
|
||||
tmpDirFd = open(tmpDirFn.c_str(), O_RDONLY | O_DIRECTORY);
|
||||
if (tmpDirFd.get() < 0) {
|
||||
continue;
|
||||
}
|
||||
lockedByUs = lockFile(tmpDirFd.get(), ltWrite, true);
|
||||
} while (!pathExists(tmpDirFn) || !lockedByUs);
|
||||
return {tmpDirFn, std::move(tmpDirFd)};
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1927,8 +1956,7 @@ void LocalStore::addBuildLog(const StorePath & drvPath, std::string_view log)
|
|||
|
||||
writeFile(tmpFile, compress("bzip2", log));
|
||||
|
||||
if (rename(tmpFile.c_str(), logPath.c_str()) != 0)
|
||||
throw SysError("renaming '%1%' to '%2%'", tmpFile, logPath);
|
||||
renameFile(tmpFile, logPath);
|
||||
}
|
||||
|
||||
std::optional<std::string> LocalStore::getVersion()
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
#include "pathlocks.hh"
|
||||
#include "store-api.hh"
|
||||
#include "local-fs-store.hh"
|
||||
#include "gc-store.hh"
|
||||
#include "sync.hh"
|
||||
#include "util.hh"
|
||||
|
||||
|
|
@ -43,7 +44,7 @@ struct LocalStoreConfig : virtual LocalFSStoreConfig
|
|||
};
|
||||
|
||||
|
||||
class LocalStore : public virtual LocalStoreConfig, public virtual LocalFSStore
|
||||
class LocalStore : public virtual LocalStoreConfig, public virtual LocalFSStore, public virtual GcStore
|
||||
{
|
||||
private:
|
||||
|
||||
|
|
@ -255,7 +256,7 @@ private:
|
|||
|
||||
void findRuntimeRoots(Roots & roots, bool censor);
|
||||
|
||||
Path createTempDirInStore();
|
||||
std::pair<Path, AutoCloseFD> createTempDirInStore();
|
||||
|
||||
void checkDerivationOutputs(const StorePath & drvPath, const Derivation & drv);
|
||||
|
||||
|
|
|
|||
|
|
@ -39,14 +39,23 @@ libstore_CXXFLAGS += \
|
|||
-DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
|
||||
-DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
|
||||
-DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
|
||||
-DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
|
||||
-DNIX_BIN_DIR=\"$(bindir)\" \
|
||||
-DNIX_MAN_DIR=\"$(mandir)\" \
|
||||
-DLSOF=\"$(lsof)\"
|
||||
|
||||
ifeq ($(embedded_sandbox_shell),yes)
|
||||
libstore_CXXFLAGS += -DSANDBOX_SHELL=\"__embedded_sandbox_shell__\"
|
||||
|
||||
$(d)/build/local-derivation-goal.cc: $(d)/embedded-sandbox-shell.gen.hh
|
||||
|
||||
$(d)/embedded-sandbox-shell.gen.hh: $(sandbox_shell)
|
||||
$(trace-gen) hexdump -v -e '1/1 "0x%x," "\n"' < $< > $@.tmp
|
||||
@mv $@.tmp $@
|
||||
else
|
||||
ifneq ($(sandbox_shell),)
|
||||
libstore_CXXFLAGS += -DSANDBOX_SHELL="\"$(sandbox_shell)\""
|
||||
endif
|
||||
endif
|
||||
|
||||
$(d)/local-store.cc: $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
|
||||
|
||||
|
|
|
|||
|
|
@ -80,13 +80,30 @@ struct SimpleUserLock : UserLock
|
|||
/* Get the list of supplementary groups of this build
|
||||
user. This is usually either empty or contains a
|
||||
group such as "kvm". */
|
||||
lock->supplementaryGIDs.resize(10);
|
||||
int ngroups = lock->supplementaryGIDs.size();
|
||||
int err = getgrouplist(pw->pw_name, pw->pw_gid,
|
||||
lock->supplementaryGIDs.data(), &ngroups);
|
||||
int ngroups = 32; // arbitrary initial guess
|
||||
lock->supplementaryGIDs.resize(ngroups);
|
||||
|
||||
int err = getgrouplist(
|
||||
pw->pw_name, pw->pw_gid,
|
||||
lock->supplementaryGIDs.data(),
|
||||
&ngroups);
|
||||
|
||||
/* Our initial size of 32 wasn't sufficient, the
|
||||
correct size has been stored in ngroups, so we try
|
||||
again. */
|
||||
if (err == -1) {
|
||||
lock->supplementaryGIDs.resize(ngroups);
|
||||
err = getgrouplist(
|
||||
pw->pw_name, pw->pw_gid,
|
||||
lock->supplementaryGIDs.data(),
|
||||
&ngroups);
|
||||
}
|
||||
|
||||
// If it failed once more, then something must be broken.
|
||||
if (err == -1)
|
||||
throw Error("failed to get list of supplementary groups for '%s'", pw->pw_name);
|
||||
|
||||
// Finally, trim back the GID list to its real size.
|
||||
lock->supplementaryGIDs.resize(ngroups);
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
#include "types.hh"
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct UserLock
|
||||
|
|
|
|||
21
src/libstore/log-store.hh
Normal file
21
src/libstore/log-store.hh
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
#pragma once
|
||||
|
||||
#include "store-api.hh"
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct LogStore : public virtual Store
|
||||
{
|
||||
inline static std::string operationName = "Build log storage and retrieval";
|
||||
|
||||
/* Return the build log of the specified store path, if available,
|
||||
or null otherwise. */
|
||||
virtual std::optional<std::string> getBuildLog(const StorePath & path) = 0;
|
||||
|
||||
virtual void addBuildLog(const StorePath & path, std::string_view log) = 0;
|
||||
|
||||
static LogStore & require(Store & store);
|
||||
};
|
||||
|
||||
}
|
||||
80
src/libstore/make-content-addressed.cc
Normal file
80
src/libstore/make-content-addressed.cc
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
#include "make-content-addressed.hh"
|
||||
#include "references.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::map<StorePath, StorePath> makeContentAddressed(
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const StorePathSet & storePaths)
|
||||
{
|
||||
StorePathSet closure;
|
||||
srcStore.computeFSClosure(storePaths, closure);
|
||||
|
||||
auto paths = srcStore.topoSortPaths(closure);
|
||||
|
||||
std::reverse(paths.begin(), paths.end());
|
||||
|
||||
std::map<StorePath, StorePath> remappings;
|
||||
|
||||
for (auto & path : paths) {
|
||||
auto pathS = srcStore.printStorePath(path);
|
||||
auto oldInfo = srcStore.queryPathInfo(path);
|
||||
std::string oldHashPart(path.hashPart());
|
||||
|
||||
StringSink sink;
|
||||
srcStore.narFromPath(path, sink);
|
||||
|
||||
StringMap rewrites;
|
||||
|
||||
StorePathSet references;
|
||||
bool hasSelfReference = false;
|
||||
for (auto & ref : oldInfo->references) {
|
||||
if (ref == path)
|
||||
hasSelfReference = true;
|
||||
else {
|
||||
auto i = remappings.find(ref);
|
||||
auto replacement = i != remappings.end() ? i->second : ref;
|
||||
// FIXME: warn about unremapped paths?
|
||||
if (replacement != ref)
|
||||
rewrites.insert_or_assign(srcStore.printStorePath(ref), srcStore.printStorePath(replacement));
|
||||
references.insert(std::move(replacement));
|
||||
}
|
||||
}
|
||||
|
||||
sink.s = rewriteStrings(sink.s, rewrites);
|
||||
|
||||
HashModuloSink hashModuloSink(htSHA256, oldHashPart);
|
||||
hashModuloSink(sink.s);
|
||||
|
||||
auto narModuloHash = hashModuloSink.finish().first;
|
||||
|
||||
auto dstPath = dstStore.makeFixedOutputPath(
|
||||
FileIngestionMethod::Recursive, narModuloHash, path.name(), references, hasSelfReference);
|
||||
|
||||
printInfo("rewriting '%s' to '%s'", pathS, srcStore.printStorePath(dstPath));
|
||||
|
||||
StringSink sink2;
|
||||
RewritingSink rsink2(oldHashPart, std::string(dstPath.hashPart()), sink2);
|
||||
rsink2(sink.s);
|
||||
rsink2.flush();
|
||||
|
||||
ValidPathInfo info { dstPath, hashString(htSHA256, sink2.s) };
|
||||
info.references = std::move(references);
|
||||
if (hasSelfReference) info.references.insert(info.path);
|
||||
info.narSize = sink.s.size();
|
||||
info.ca = FixedOutputHash {
|
||||
.method = FileIngestionMethod::Recursive,
|
||||
.hash = narModuloHash,
|
||||
};
|
||||
|
||||
StringSource source(sink2.s);
|
||||
dstStore.addToStore(info, source);
|
||||
|
||||
remappings.insert_or_assign(std::move(path), std::move(info.path));
|
||||
}
|
||||
|
||||
return remappings;
|
||||
}
|
||||
|
||||
}
|
||||
12
src/libstore/make-content-addressed.hh
Normal file
12
src/libstore/make-content-addressed.hh
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
#pragma once
|
||||
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::map<StorePath, StorePath> makeContentAddressed(
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const StorePathSet & storePaths);
|
||||
|
||||
}
|
||||
|
|
@ -87,7 +87,7 @@ std::optional<ContentAddress> getDerivationCA(const BasicDerivation & drv)
|
|||
{
|
||||
auto out = drv.outputs.find("out");
|
||||
if (out != drv.outputs.end()) {
|
||||
if (auto v = std::get_if<DerivationOutputCAFixed>(&out->second.output))
|
||||
if (const auto * v = std::get_if<DerivationOutput::CAFixed>(&out->second.raw()))
|
||||
return v->hash;
|
||||
}
|
||||
return std::nullopt;
|
||||
|
|
@ -277,15 +277,20 @@ std::map<DrvOutput, StorePath> drvOutputReferences(
|
|||
{
|
||||
std::set<Realisation> inputRealisations;
|
||||
|
||||
for (const auto& [inputDrv, outputNames] : drv.inputDrvs) {
|
||||
auto outputHashes =
|
||||
for (const auto & [inputDrv, outputNames] : drv.inputDrvs) {
|
||||
const auto outputHashes =
|
||||
staticOutputHashes(store, store.readDerivation(inputDrv));
|
||||
for (const auto& outputName : outputNames) {
|
||||
for (const auto & outputName : outputNames) {
|
||||
auto outputHash = get(outputHashes, outputName);
|
||||
if (!outputHash)
|
||||
throw Error(
|
||||
"output '%s' of derivation '%s' isn't realised", outputName,
|
||||
store.printStorePath(inputDrv));
|
||||
auto thisRealisation = store.queryRealisation(
|
||||
DrvOutput{outputHashes.at(outputName), outputName});
|
||||
DrvOutput{*outputHash, outputName});
|
||||
if (!thisRealisation)
|
||||
throw Error(
|
||||
"output '%s' of derivation '%s' isn’t built", outputName,
|
||||
"output '%s' of derivation '%s' isn't built", outputName,
|
||||
store.printStorePath(inputDrv));
|
||||
inputRealisations.insert(*thisRealisation);
|
||||
}
|
||||
|
|
@ -295,4 +300,5 @@ std::map<DrvOutput, StorePath> drvOutputReferences(
|
|||
|
||||
return drvOutputReferences(Realisation::closure(store, inputRealisations), info->references);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,6 +75,9 @@ struct NarAccessor : public FSAccessor
|
|||
createMember(path, {FSAccessor::Type::tRegular, false, 0, 0});
|
||||
}
|
||||
|
||||
void closeRegularFile() override
|
||||
{ }
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
parents.top()->isExecutable = true;
|
||||
|
|
|
|||
|
|
@ -62,6 +62,9 @@ public:
|
|||
/* How often to purge expired entries from the cache. */
|
||||
const int purgeInterval = 24 * 3600;
|
||||
|
||||
/* How long to cache binary cache info (i.e. /nix-cache-info) */
|
||||
const int cacheInfoTtl = 7 * 24 * 3600;
|
||||
|
||||
struct Cache
|
||||
{
|
||||
int id;
|
||||
|
|
@ -98,7 +101,7 @@ public:
|
|||
"insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
|
||||
|
||||
state->queryCache.create(state->db,
|
||||
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ?");
|
||||
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ? and timestamp > ?");
|
||||
|
||||
state->insertNAR.create(state->db,
|
||||
"insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, "
|
||||
|
|
@ -183,7 +186,7 @@ public:
|
|||
|
||||
auto i = state->caches.find(uri);
|
||||
if (i == state->caches.end()) {
|
||||
auto queryCache(state->queryCache.use()(uri));
|
||||
auto queryCache(state->queryCache.use()(uri)(time(0) - cacheInfoTtl));
|
||||
if (!queryCache.next())
|
||||
return std::nullopt;
|
||||
state->caches.emplace(uri,
|
||||
|
|
|
|||
|
|
@ -69,8 +69,6 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
|
|||
if (value != "unknown-deriver")
|
||||
deriver = StorePath(value);
|
||||
}
|
||||
else if (name == "System")
|
||||
system = value;
|
||||
else if (name == "Sig")
|
||||
sigs.insert(value);
|
||||
else if (name == "CA") {
|
||||
|
|
@ -106,9 +104,6 @@ std::string NarInfo::to_string(const Store & store) const
|
|||
if (deriver)
|
||||
res += "Deriver: " + std::string(deriver->to_string()) + "\n";
|
||||
|
||||
if (!system.empty())
|
||||
res += "System: " + system + "\n";
|
||||
|
||||
for (auto sig : sigs)
|
||||
res += "Sig: " + sig + "\n";
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ struct NarInfo : ValidPathInfo
|
|||
std::string compression;
|
||||
std::optional<Hash> fileHash;
|
||||
uint64_t fileSize = 0;
|
||||
std::string system;
|
||||
|
||||
NarInfo() = delete;
|
||||
NarInfo(StorePath && path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { }
|
||||
|
|
|
|||
|
|
@ -229,7 +229,9 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
|||
}
|
||||
|
||||
/* Atomically replace the old file with the new hard link. */
|
||||
if (rename(tempLink.c_str(), path.c_str()) == -1) {
|
||||
try {
|
||||
renameFile(tempLink, path);
|
||||
} catch (SysError & e) {
|
||||
if (unlink(tempLink.c_str()) == -1)
|
||||
printError("unable to unlink '%1%'", tempLink);
|
||||
if (errno == EMLINK) {
|
||||
|
|
@ -240,7 +242,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
|||
debug("'%s' has reached maximum number of links", linkPath);
|
||||
return;
|
||||
}
|
||||
throw SysError("cannot rename '%1%' to '%2%'", tempLink, path);
|
||||
throw;
|
||||
}
|
||||
|
||||
stats.filesLinked++;
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ StringSet ParsedDerivation::getRequiredSystemFeatures() const
|
|||
StringSet res;
|
||||
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
|
||||
res.insert(i);
|
||||
if (!derivationHasKnownOutputPaths(drv.type()))
|
||||
if (!drv.type().hasKnownOutputPaths())
|
||||
res.insert("ca-derivations");
|
||||
return res;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
#pragma once
|
||||
|
||||
#include "derivations.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <nlohmann/json_fwd.hpp>
|
||||
|
|
|
|||
|
|
@ -1,5 +1,8 @@
|
|||
#include "path-with-outputs.hh"
|
||||
#include "store-api.hh"
|
||||
#include "nlohmann/json.hpp"
|
||||
|
||||
#include <regex>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
@ -22,9 +25,9 @@ DerivedPath StorePathWithOutputs::toDerivedPath() const
|
|||
|
||||
std::vector<DerivedPath> toDerivedPaths(const std::vector<StorePathWithOutputs> ss)
|
||||
{
|
||||
std::vector<DerivedPath> reqs;
|
||||
for (auto & s : ss) reqs.push_back(s.toDerivedPath());
|
||||
return reqs;
|
||||
std::vector<DerivedPath> reqs;
|
||||
for (auto & s : ss) reqs.push_back(s.toDerivedPath());
|
||||
return reqs;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -68,4 +71,57 @@ StorePathWithOutputs followLinksToStorePathWithOutputs(const Store & store, std:
|
|||
return StorePathWithOutputs { store.followLinksToStorePath(path), std::move(outputs) };
|
||||
}
|
||||
|
||||
std::pair<std::string, OutputsSpec> parseOutputsSpec(const std::string & s)
|
||||
{
|
||||
static std::regex regex(R"((.*)\^((\*)|([a-z]+(,[a-z]+)*)))");
|
||||
|
||||
std::smatch match;
|
||||
if (!std::regex_match(s, match, regex))
|
||||
return {s, DefaultOutputs()};
|
||||
|
||||
if (match[3].matched)
|
||||
return {match[1], AllOutputs()};
|
||||
|
||||
return {match[1], tokenizeString<OutputNames>(match[4].str(), ",")};
|
||||
}
|
||||
|
||||
std::string printOutputsSpec(const OutputsSpec & outputsSpec)
|
||||
{
|
||||
if (std::get_if<DefaultOutputs>(&outputsSpec))
|
||||
return "";
|
||||
|
||||
if (std::get_if<AllOutputs>(&outputsSpec))
|
||||
return "^*";
|
||||
|
||||
if (auto outputNames = std::get_if<OutputNames>(&outputsSpec))
|
||||
return "^" + concatStringsSep(",", *outputNames);
|
||||
|
||||
assert(false);
|
||||
}
|
||||
|
||||
void to_json(nlohmann::json & json, const OutputsSpec & outputsSpec)
|
||||
{
|
||||
if (std::get_if<DefaultOutputs>(&outputsSpec))
|
||||
json = nullptr;
|
||||
|
||||
else if (std::get_if<AllOutputs>(&outputsSpec))
|
||||
json = std::vector<std::string>({"*"});
|
||||
|
||||
else if (auto outputNames = std::get_if<OutputNames>(&outputsSpec))
|
||||
json = *outputNames;
|
||||
}
|
||||
|
||||
void from_json(const nlohmann::json & json, OutputsSpec & outputsSpec)
|
||||
{
|
||||
if (json.is_null())
|
||||
outputsSpec = DefaultOutputs();
|
||||
else {
|
||||
auto names = json.get<OutputNames>();
|
||||
if (names == OutputNames({"*"}))
|
||||
outputsSpec = AllOutputs();
|
||||
else
|
||||
outputsSpec = names;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
#include "path.hh"
|
||||
#include "derived-path.hh"
|
||||
#include "nlohmann/json_fwd.hpp"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
@ -32,4 +33,25 @@ StorePathWithOutputs parsePathWithOutputs(const Store & store, std::string_view
|
|||
|
||||
StorePathWithOutputs followLinksToStorePathWithOutputs(const Store & store, std::string_view pathWithOutputs);
|
||||
|
||||
typedef std::set<std::string> OutputNames;
|
||||
|
||||
struct AllOutputs {
|
||||
bool operator < (const AllOutputs & _) const { return false; }
|
||||
};
|
||||
|
||||
struct DefaultOutputs {
|
||||
bool operator < (const DefaultOutputs & _) const { return false; }
|
||||
};
|
||||
|
||||
typedef std::variant<DefaultOutputs, AllOutputs, OutputNames> OutputsSpec;
|
||||
|
||||
/* Parse a string of the form 'prefix^output1,...outputN' or
|
||||
'prefix^*', returning the prefix and the outputs spec. */
|
||||
std::pair<std::string, OutputsSpec> parseOutputsSpec(const std::string & s);
|
||||
|
||||
std::string printOutputsSpec(const OutputsSpec & outputsSpec);
|
||||
|
||||
void to_json(nlohmann::json &, const OutputsSpec &);
|
||||
void from_json(const nlohmann::json &, OutputsSpec &);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
#include "store-api.hh"
|
||||
|
||||
#include <sodium.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
static void checkName(std::string_view path, std::string_view name)
|
||||
|
|
@ -41,6 +43,13 @@ bool StorePath::isDerivation() const
|
|||
|
||||
StorePath StorePath::dummy("ffffffffffffffffffffffffffffffff-x");
|
||||
|
||||
StorePath StorePath::random(std::string_view name)
|
||||
{
|
||||
Hash hash(htSHA1);
|
||||
randombytes_buf(hash.hash, hash.hashSize);
|
||||
return StorePath(hash, name);
|
||||
}
|
||||
|
||||
StorePath Store::parseStorePath(std::string_view path) const
|
||||
{
|
||||
auto p = canonPath(std::string(path));
|
||||
|
|
|
|||
|
|
@ -58,6 +58,8 @@ public:
|
|||
}
|
||||
|
||||
static StorePath dummy;
|
||||
|
||||
static StorePath random(std::string_view name);
|
||||
};
|
||||
|
||||
typedef std::set<StorePath> StorePathSet;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
#include "serialise.hh"
|
||||
#include "util.hh"
|
||||
#include "path-with-outputs.hh"
|
||||
#include "gc-store.hh"
|
||||
#include "remote-fs-accessor.hh"
|
||||
#include "build-result.hh"
|
||||
#include "remote-store.hh"
|
||||
#include "worker-protocol.hh"
|
||||
#include "archive.hh"
|
||||
|
|
@ -89,6 +91,35 @@ void write(const Store & store, Sink & out, const DrvOutput & drvOutput)
|
|||
}
|
||||
|
||||
|
||||
BuildResult read(const Store & store, Source & from, Phantom<BuildResult> _)
|
||||
{
|
||||
auto path = worker_proto::read(store, from, Phantom<DerivedPath> {});
|
||||
BuildResult res { .path = path };
|
||||
res.status = (BuildResult::Status) readInt(from);
|
||||
from
|
||||
>> res.errorMsg
|
||||
>> res.timesBuilt
|
||||
>> res.isNonDeterministic
|
||||
>> res.startTime
|
||||
>> res.stopTime;
|
||||
res.builtOutputs = worker_proto::read(store, from, Phantom<DrvOutputs> {});
|
||||
return res;
|
||||
}
|
||||
|
||||
void write(const Store & store, Sink & to, const BuildResult & res)
|
||||
{
|
||||
worker_proto::write(store, to, res.path);
|
||||
to
|
||||
<< res.status
|
||||
<< res.errorMsg
|
||||
<< res.timesBuilt
|
||||
<< res.isNonDeterministic
|
||||
<< res.startTime
|
||||
<< res.stopTime;
|
||||
worker_proto::write(store, to, res.builtOutputs);
|
||||
}
|
||||
|
||||
|
||||
std::optional<StorePath> read(const Store & store, Source & from, Phantom<std::optional<StorePath>> _)
|
||||
{
|
||||
auto s = readString(from);
|
||||
|
|
@ -549,7 +580,6 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
|
|||
|
||||
try {
|
||||
conn->to.written = 0;
|
||||
conn->to.warn = true;
|
||||
connections->incCapacity();
|
||||
{
|
||||
Finally cleanup([&]() { connections->decCapacity(); });
|
||||
|
|
@ -560,7 +590,6 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
|
|||
dumpString(contents, conn->to);
|
||||
}
|
||||
}
|
||||
conn->to.warn = false;
|
||||
conn.processStderr();
|
||||
} catch (SysError & e) {
|
||||
/* Daemon closed while we were sending the path. Probably OOM
|
||||
|
|
@ -642,6 +671,23 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
}
|
||||
|
||||
|
||||
void RemoteStore::addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs)
|
||||
{
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
sink << pathsToCopy.size();
|
||||
for (auto & [pathInfo, pathSource] : pathsToCopy) {
|
||||
pathInfo.write(sink, *this, 16);
|
||||
pathSource->drainInto(sink);
|
||||
}
|
||||
});
|
||||
|
||||
addMultipleToStore(*source, repair, checkSigs);
|
||||
}
|
||||
|
||||
void RemoteStore::addMultipleToStore(
|
||||
Source & source,
|
||||
RepairFlag repair,
|
||||
|
|
@ -687,36 +733,34 @@ void RemoteStore::registerDrvOutput(const Realisation & info)
|
|||
void RemoteStore::queryRealisationUncached(const DrvOutput & id,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept
|
||||
{
|
||||
auto conn(getConnection());
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 27) {
|
||||
warn("the daemon is too old to support content-addressed derivations, please upgrade it to 2.4");
|
||||
try {
|
||||
callback(nullptr);
|
||||
} catch (...) { return callback.rethrow(); }
|
||||
}
|
||||
|
||||
conn->to << wopQueryRealisation;
|
||||
conn->to << id.to_string();
|
||||
conn.processStderr();
|
||||
|
||||
auto real = [&]() -> std::shared_ptr<const Realisation> {
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
|
||||
auto outPaths = worker_proto::read(
|
||||
*this, conn->from, Phantom<std::set<StorePath>> {});
|
||||
if (outPaths.empty())
|
||||
return nullptr;
|
||||
return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
|
||||
} else {
|
||||
auto realisations = worker_proto::read(
|
||||
*this, conn->from, Phantom<std::set<Realisation>> {});
|
||||
if (realisations.empty())
|
||||
return nullptr;
|
||||
return std::make_shared<const Realisation>(*realisations.begin());
|
||||
}
|
||||
}();
|
||||
|
||||
try {
|
||||
auto conn(getConnection());
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 27) {
|
||||
warn("the daemon is too old to support content-addressed derivations, please upgrade it to 2.4");
|
||||
return callback(nullptr);
|
||||
}
|
||||
|
||||
conn->to << wopQueryRealisation;
|
||||
conn->to << id.to_string();
|
||||
conn.processStderr();
|
||||
|
||||
auto real = [&]() -> std::shared_ptr<const Realisation> {
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
|
||||
auto outPaths = worker_proto::read(
|
||||
*this, conn->from, Phantom<std::set<StorePath>> {});
|
||||
if (outPaths.empty())
|
||||
return nullptr;
|
||||
return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
|
||||
} else {
|
||||
auto realisations = worker_proto::read(
|
||||
*this, conn->from, Phantom<std::set<Realisation>> {});
|
||||
if (realisations.empty())
|
||||
return nullptr;
|
||||
return std::make_shared<const Realisation>(*realisations.begin());
|
||||
}
|
||||
}();
|
||||
|
||||
callback(std::shared_ptr<const Realisation>(real));
|
||||
} catch (...) { return callback.rethrow(); }
|
||||
}
|
||||
|
|
@ -745,17 +789,24 @@ static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, cons
|
|||
}
|
||||
}
|
||||
|
||||
void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore)
|
||||
void RemoteStore::copyDrvsFromEvalStore(
|
||||
const std::vector<DerivedPath> & paths,
|
||||
std::shared_ptr<Store> evalStore)
|
||||
{
|
||||
if (evalStore && evalStore.get() != this) {
|
||||
/* The remote doesn't have a way to access evalStore, so copy
|
||||
the .drvs. */
|
||||
RealisedPath::Set drvPaths2;
|
||||
for (auto & i : drvPaths)
|
||||
for (auto & i : paths)
|
||||
if (auto p = std::get_if<DerivedPath::Built>(&i))
|
||||
drvPaths2.insert(p->drvPath);
|
||||
copyClosure(*evalStore, *this, drvPaths2);
|
||||
}
|
||||
}
|
||||
|
||||
void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore)
|
||||
{
|
||||
copyDrvsFromEvalStore(drvPaths, evalStore);
|
||||
|
||||
auto conn(getConnection());
|
||||
conn->to << wopBuildPaths;
|
||||
|
|
@ -772,6 +823,92 @@ void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMod
|
|||
readInt(conn->from);
|
||||
}
|
||||
|
||||
std::vector<BuildResult> RemoteStore::buildPathsWithResults(
|
||||
const std::vector<DerivedPath> & paths,
|
||||
BuildMode buildMode,
|
||||
std::shared_ptr<Store> evalStore)
|
||||
{
|
||||
copyDrvsFromEvalStore(paths, evalStore);
|
||||
|
||||
std::optional<ConnectionHandle> conn_(getConnection());
|
||||
auto & conn = *conn_;
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 34) {
|
||||
conn->to << wopBuildPathsWithResults;
|
||||
writeDerivedPaths(*this, conn, paths);
|
||||
conn->to << buildMode;
|
||||
conn.processStderr();
|
||||
return worker_proto::read(*this, conn->from, Phantom<std::vector<BuildResult>> {});
|
||||
} else {
|
||||
// Avoid deadlock.
|
||||
conn_.reset();
|
||||
|
||||
// Note: this throws an exception if a build/substitution
|
||||
// fails, but meh.
|
||||
buildPaths(paths, buildMode, evalStore);
|
||||
|
||||
std::vector<BuildResult> results;
|
||||
|
||||
for (auto & path : paths) {
|
||||
std::visit(
|
||||
overloaded {
|
||||
[&](const DerivedPath::Opaque & bo) {
|
||||
results.push_back(BuildResult {
|
||||
.status = BuildResult::Substituted,
|
||||
.path = bo,
|
||||
});
|
||||
},
|
||||
[&](const DerivedPath::Built & bfd) {
|
||||
BuildResult res {
|
||||
.status = BuildResult::Built,
|
||||
.path = bfd,
|
||||
};
|
||||
|
||||
OutputPathMap outputs;
|
||||
auto drv = evalStore->readDerivation(bfd.drvPath);
|
||||
const auto outputHashes = staticOutputHashes(*evalStore, drv); // FIXME: expensive
|
||||
const auto drvOutputs = drv.outputsAndOptPaths(*this);
|
||||
for (auto & output : bfd.outputs) {
|
||||
auto outputHash = get(outputHashes, output);
|
||||
if (!outputHash)
|
||||
throw Error(
|
||||
"the derivation '%s' doesn't have an output named '%s'",
|
||||
printStorePath(bfd.drvPath), output);
|
||||
auto outputId = DrvOutput{ *outputHash, output };
|
||||
if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
|
||||
auto realisation =
|
||||
queryRealisation(outputId);
|
||||
if (!realisation)
|
||||
throw Error(
|
||||
"cannot operate on an output of unbuilt "
|
||||
"content-addressed derivation '%s'",
|
||||
outputId.to_string());
|
||||
res.builtOutputs.emplace(realisation->id, *realisation);
|
||||
} else {
|
||||
// If ca-derivations isn't enabled, assume that
|
||||
// the output path is statically known.
|
||||
const auto drvOutput = get(drvOutputs, output);
|
||||
assert(drvOutput);
|
||||
assert(drvOutput->second);
|
||||
res.builtOutputs.emplace(
|
||||
outputId,
|
||||
Realisation {
|
||||
.id = outputId,
|
||||
.outPath = *drvOutput->second,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
results.push_back(res);
|
||||
}
|
||||
},
|
||||
path.raw());
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode)
|
||||
|
|
@ -781,7 +918,7 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
|
|||
writeDerivation(conn->to, *this, drv);
|
||||
conn->to << buildMode;
|
||||
conn.processStderr();
|
||||
BuildResult res;
|
||||
BuildResult res { .path = DerivedPath::Built { .drvPath = drvPath } };
|
||||
res.status = (BuildResult::Status) readInt(conn->from);
|
||||
conn->from >> res.errorMsg;
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 29) {
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
#include <string>
|
||||
|
||||
#include "store-api.hh"
|
||||
#include "gc-store.hh"
|
||||
#include "log-store.hh"
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
|
@ -29,7 +31,10 @@ struct RemoteStoreConfig : virtual StoreConfig
|
|||
|
||||
/* FIXME: RemoteStore is a misnomer - should be something like
|
||||
DaemonStore. */
|
||||
class RemoteStore : public virtual RemoteStoreConfig, public virtual Store
|
||||
class RemoteStore : public virtual RemoteStoreConfig,
|
||||
public virtual Store,
|
||||
public virtual GcStore,
|
||||
public virtual LogStore
|
||||
{
|
||||
public:
|
||||
|
||||
|
|
@ -83,6 +88,12 @@ public:
|
|||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs) override;
|
||||
|
||||
void addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs) override;
|
||||
|
||||
StorePath addTextToStore(
|
||||
std::string_view name,
|
||||
std::string_view s,
|
||||
|
|
@ -96,6 +107,11 @@ public:
|
|||
|
||||
void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
|
||||
|
||||
std::vector<BuildResult> buildPathsWithResults(
|
||||
const std::vector<DerivedPath> & paths,
|
||||
BuildMode buildMode,
|
||||
std::shared_ptr<Store> evalStore) override;
|
||||
|
||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode) override;
|
||||
|
||||
|
|
@ -170,6 +186,9 @@ private:
|
|||
|
||||
std::atomic_bool failed{false};
|
||||
|
||||
void copyDrvsFromEvalStore(
|
||||
const std::vector<DerivedPath> & paths,
|
||||
std::shared_ptr<Store> evalStore);
|
||||
};
|
||||
|
||||
|
||||
|
|
|
|||
7
src/libstore/repair-flag.hh
Normal file
7
src/libstore/repair-flag.hh
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
namespace nix {
|
||||
|
||||
enum RepairFlag : bool { NoRepair = false, Repair = true };
|
||||
|
||||
}
|
||||
|
|
@ -5,6 +5,7 @@
|
|||
#include "ref.hh"
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace Aws { namespace Client { class ClientConfiguration; } }
|
||||
namespace Aws { namespace S3 { class S3Client; } }
|
||||
|
|
|
|||
|
|
@ -98,7 +98,9 @@
|
|||
(allow file*
|
||||
(literal "/private/var/select/sh"))
|
||||
|
||||
; Allow Rosetta 2 to run x86_64 binaries on aarch64-darwin.
|
||||
; Allow Rosetta 2 to run x86_64 binaries on aarch64-darwin (and vice versa).
|
||||
(allow file-read*
|
||||
(subpath "/Library/Apple/usr/libexec/oah")
|
||||
(subpath "/System/Library/Apple/usr/libexec/oah"))
|
||||
(subpath "/System/Library/Apple/usr/libexec/oah")
|
||||
(subpath "/System/Library/LaunchDaemons/com.apple.oahd.plist")
|
||||
(subpath "/Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist"))
|
||||
|
|
|
|||
|
|
@ -14,3 +14,7 @@
|
|||
|
||||
; Allow DNS lookups.
|
||||
(allow network-outbound (remote unix-socket (path-literal "/private/var/run/mDNSResponder")))
|
||||
|
||||
; Allow access to trustd.
|
||||
(allow mach-lookup (global-name "com.apple.trustd"))
|
||||
(allow mach-lookup (global-name "com.apple.trustd.agent"))
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
create table if not exists ValidPaths (
|
||||
id integer primary key autoincrement not null,
|
||||
path text unique not null,
|
||||
hash text not null,
|
||||
hash text not null, -- base16 representation
|
||||
registrationTime integer not null,
|
||||
deriver text,
|
||||
narSize integer,
|
||||
|
|
|
|||
|
|
@ -8,22 +8,32 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
[[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs)
|
||||
SQLiteError::SQLiteError(const char *path, int errNo, int extendedErrNo, hintformat && hf)
|
||||
: Error(""), path(path), errNo(errNo), extendedErrNo(extendedErrNo)
|
||||
{
|
||||
err.msg = hintfmt("%s: %s (in '%s')",
|
||||
normaltxt(hf.str()),
|
||||
sqlite3_errstr(extendedErrNo),
|
||||
path ? path : "(in-memory)");
|
||||
}
|
||||
|
||||
[[noreturn]] void SQLiteError::throw_(sqlite3 * db, hintformat && hf)
|
||||
{
|
||||
int err = sqlite3_errcode(db);
|
||||
int exterr = sqlite3_extended_errcode(db);
|
||||
|
||||
auto path = sqlite3_db_filename(db, nullptr);
|
||||
if (!path) path = "(in-memory)";
|
||||
|
||||
if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
|
||||
throw SQLiteBusy(
|
||||
auto exp = SQLiteBusy(path, err, exterr, std::move(hf));
|
||||
exp.err.msg = hintfmt(
|
||||
err == SQLITE_PROTOCOL
|
||||
? fmt("SQLite database '%s' is busy (SQLITE_PROTOCOL)", path)
|
||||
: fmt("SQLite database '%s' is busy", path));
|
||||
}
|
||||
else
|
||||
throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(exterr), path);
|
||||
? "SQLite database '%s' is busy (SQLITE_PROTOCOL)"
|
||||
: "SQLite database '%s' is busy",
|
||||
path ? path : "(in-memory)");
|
||||
throw exp;
|
||||
} else
|
||||
throw SQLiteError(path, err, exterr, std::move(hf));
|
||||
}
|
||||
|
||||
SQLite::SQLite(const Path & path, bool create)
|
||||
|
|
@ -37,7 +47,7 @@ SQLite::SQLite(const Path & path, bool create)
|
|||
throw Error("cannot open SQLite database '%s'", path);
|
||||
|
||||
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
|
||||
throwSQLiteError(db, "setting timeout");
|
||||
SQLiteError::throw_(db, "setting timeout");
|
||||
|
||||
exec("pragma foreign_keys = 1");
|
||||
}
|
||||
|
|
@ -46,7 +56,7 @@ SQLite::~SQLite()
|
|||
{
|
||||
try {
|
||||
if (db && sqlite3_close(db) != SQLITE_OK)
|
||||
throwSQLiteError(db, "closing database");
|
||||
SQLiteError::throw_(db, "closing database");
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
|
|
@ -62,7 +72,7 @@ void SQLite::exec(const std::string & stmt)
|
|||
{
|
||||
retrySQLite<void>([&]() {
|
||||
if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK)
|
||||
throwSQLiteError(db, format("executing SQLite statement '%s'") % stmt);
|
||||
SQLiteError::throw_(db, "executing SQLite statement '%s'", stmt);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -76,7 +86,7 @@ void SQLiteStmt::create(sqlite3 * db, const std::string & sql)
|
|||
checkInterrupt();
|
||||
assert(!stmt);
|
||||
if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, 0) != SQLITE_OK)
|
||||
throwSQLiteError(db, fmt("creating statement '%s'", sql));
|
||||
SQLiteError::throw_(db, "creating statement '%s'", sql);
|
||||
this->db = db;
|
||||
this->sql = sql;
|
||||
}
|
||||
|
|
@ -85,7 +95,7 @@ SQLiteStmt::~SQLiteStmt()
|
|||
{
|
||||
try {
|
||||
if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
|
||||
throwSQLiteError(db, fmt("finalizing statement '%s'", sql));
|
||||
SQLiteError::throw_(db, "finalizing statement '%s'", sql);
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
|
|
@ -109,7 +119,7 @@ SQLiteStmt::Use & SQLiteStmt::Use::operator () (std::string_view value, bool not
|
|||
{
|
||||
if (notNull) {
|
||||
if (sqlite3_bind_text(stmt, curArg++, value.data(), -1, SQLITE_TRANSIENT) != SQLITE_OK)
|
||||
throwSQLiteError(stmt.db, "binding argument");
|
||||
SQLiteError::throw_(stmt.db, "binding argument");
|
||||
} else
|
||||
bind();
|
||||
return *this;
|
||||
|
|
@ -119,7 +129,7 @@ SQLiteStmt::Use & SQLiteStmt::Use::operator () (const unsigned char * data, size
|
|||
{
|
||||
if (notNull) {
|
||||
if (sqlite3_bind_blob(stmt, curArg++, data, len, SQLITE_TRANSIENT) != SQLITE_OK)
|
||||
throwSQLiteError(stmt.db, "binding argument");
|
||||
SQLiteError::throw_(stmt.db, "binding argument");
|
||||
} else
|
||||
bind();
|
||||
return *this;
|
||||
|
|
@ -129,7 +139,7 @@ SQLiteStmt::Use & SQLiteStmt::Use::operator () (int64_t value, bool notNull)
|
|||
{
|
||||
if (notNull) {
|
||||
if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK)
|
||||
throwSQLiteError(stmt.db, "binding argument");
|
||||
SQLiteError::throw_(stmt.db, "binding argument");
|
||||
} else
|
||||
bind();
|
||||
return *this;
|
||||
|
|
@ -138,7 +148,7 @@ SQLiteStmt::Use & SQLiteStmt::Use::operator () (int64_t value, bool notNull)
|
|||
SQLiteStmt::Use & SQLiteStmt::Use::bind()
|
||||
{
|
||||
if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK)
|
||||
throwSQLiteError(stmt.db, "binding argument");
|
||||
SQLiteError::throw_(stmt.db, "binding argument");
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
|
@ -152,14 +162,14 @@ void SQLiteStmt::Use::exec()
|
|||
int r = step();
|
||||
assert(r != SQLITE_ROW);
|
||||
if (r != SQLITE_DONE)
|
||||
throwSQLiteError(stmt.db, fmt("executing SQLite statement '%s'", sqlite3_expanded_sql(stmt.stmt)));
|
||||
SQLiteError::throw_(stmt.db, fmt("executing SQLite statement '%s'", sqlite3_expanded_sql(stmt.stmt)));
|
||||
}
|
||||
|
||||
bool SQLiteStmt::Use::next()
|
||||
{
|
||||
int r = step();
|
||||
if (r != SQLITE_DONE && r != SQLITE_ROW)
|
||||
throwSQLiteError(stmt.db, fmt("executing SQLite query '%s'", sqlite3_expanded_sql(stmt.stmt)));
|
||||
SQLiteError::throw_(stmt.db, fmt("executing SQLite query '%s'", sqlite3_expanded_sql(stmt.stmt)));
|
||||
return r == SQLITE_ROW;
|
||||
}
|
||||
|
||||
|
|
@ -185,14 +195,14 @@ SQLiteTxn::SQLiteTxn(sqlite3 * db)
|
|||
{
|
||||
this->db = db;
|
||||
if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK)
|
||||
throwSQLiteError(db, "starting transaction");
|
||||
SQLiteError::throw_(db, "starting transaction");
|
||||
active = true;
|
||||
}
|
||||
|
||||
void SQLiteTxn::commit()
|
||||
{
|
||||
if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK)
|
||||
throwSQLiteError(db, "committing transaction");
|
||||
SQLiteError::throw_(db, "committing transaction");
|
||||
active = false;
|
||||
}
|
||||
|
||||
|
|
@ -200,7 +210,7 @@ SQLiteTxn::~SQLiteTxn()
|
|||
{
|
||||
try {
|
||||
if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
|
||||
throwSQLiteError(db, "aborting transaction");
|
||||
SQLiteError::throw_(db, "aborting transaction");
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
|
|
@ -215,7 +225,6 @@ void handleSQLiteBusy(const SQLiteBusy & e)
|
|||
if (now > lastWarned + 10) {
|
||||
lastWarned = now;
|
||||
logWarning({
|
||||
.name = "Sqlite busy",
|
||||
.msg = hintfmt(e.what())
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -96,10 +96,30 @@ struct SQLiteTxn
|
|||
};
|
||||
|
||||
|
||||
MakeError(SQLiteError, Error);
|
||||
MakeError(SQLiteBusy, SQLiteError);
|
||||
struct SQLiteError : Error
|
||||
{
|
||||
const char *path;
|
||||
int errNo, extendedErrNo;
|
||||
|
||||
[[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs);
|
||||
template<typename... Args>
|
||||
[[noreturn]] static void throw_(sqlite3 * db, const std::string & fs, const Args & ... args) {
|
||||
throw_(db, hintfmt(fs, args...));
|
||||
}
|
||||
|
||||
SQLiteError(const char *path, int errNo, int extendedErrNo, hintformat && hf);
|
||||
|
||||
protected:
|
||||
|
||||
template<typename... Args>
|
||||
SQLiteError(const char *path, int errNo, int extendedErrNo, const std::string & fs, const Args & ... args)
|
||||
: SQLiteError(path, errNo, extendedErrNo, hintfmt(fs, args...))
|
||||
{ }
|
||||
|
||||
[[noreturn]] static void throw_(sqlite3 * db, hintformat && hf);
|
||||
|
||||
};
|
||||
|
||||
MakeError(SQLiteBusy, SQLiteError);
|
||||
|
||||
void handleSQLiteBusy(const SQLiteBusy & e);
|
||||
|
||||
|
|
|
|||
|
|
@ -52,6 +52,10 @@ public:
|
|||
bool sameMachine() override
|
||||
{ return false; }
|
||||
|
||||
// FIXME extend daemon protocol, move implementation to RemoteStore
|
||||
std::optional<std::string> getBuildLog(const StorePath & path) override
|
||||
{ unsupported("getBuildLog"); }
|
||||
|
||||
private:
|
||||
|
||||
struct Connection : RemoteStore::Connection
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
|
|||
if (fakeSSH) {
|
||||
args = { "bash", "-c" };
|
||||
} else {
|
||||
args = { "ssh", host.c_str(), "-x", "-a" };
|
||||
args = { "ssh", host.c_str(), "-x" };
|
||||
addCommonSSHOpts(args);
|
||||
if (socketPath != "")
|
||||
args.insert(args.end(), {"-S", socketPath});
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
#include "crypto.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include "globals.hh"
|
||||
#include "derivations.hh"
|
||||
#include "store-api.hh"
|
||||
#include "util.hh"
|
||||
#include "nar-info-disk-cache.hh"
|
||||
|
|
@ -257,6 +258,84 @@ StorePath Store::addToStore(
|
|||
return addToStoreFromDump(*source, name, method, hashAlgo, repair, references);
|
||||
}
|
||||
|
||||
void Store::addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs)
|
||||
{
|
||||
std::atomic<size_t> nrDone{0};
|
||||
std::atomic<size_t> nrFailed{0};
|
||||
std::atomic<uint64_t> bytesExpected{0};
|
||||
std::atomic<uint64_t> nrRunning{0};
|
||||
|
||||
using PathWithInfo = std::pair<ValidPathInfo, std::unique_ptr<Source>>;
|
||||
|
||||
std::map<StorePath, PathWithInfo *> infosMap;
|
||||
StorePathSet storePathsToAdd;
|
||||
for (auto & thingToAdd : pathsToCopy) {
|
||||
infosMap.insert_or_assign(thingToAdd.first.path, &thingToAdd);
|
||||
storePathsToAdd.insert(thingToAdd.first.path);
|
||||
}
|
||||
|
||||
auto showProgress = [&]() {
|
||||
act.progress(nrDone, pathsToCopy.size(), nrRunning, nrFailed);
|
||||
};
|
||||
|
||||
ThreadPool pool;
|
||||
|
||||
processGraph<StorePath>(pool,
|
||||
storePathsToAdd,
|
||||
|
||||
[&](const StorePath & path) {
|
||||
|
||||
auto & [info, _] = *infosMap.at(path);
|
||||
|
||||
if (isValidPath(info.path)) {
|
||||
nrDone++;
|
||||
showProgress();
|
||||
return StorePathSet();
|
||||
}
|
||||
|
||||
bytesExpected += info.narSize;
|
||||
act.setExpected(actCopyPath, bytesExpected);
|
||||
|
||||
return info.references;
|
||||
},
|
||||
|
||||
[&](const StorePath & path) {
|
||||
checkInterrupt();
|
||||
|
||||
auto & [info_, source_] = *infosMap.at(path);
|
||||
auto info = info_;
|
||||
info.ultimate = false;
|
||||
|
||||
/* Make sure that the Source object is destroyed when
|
||||
we're done. In particular, a SinkToSource object must
|
||||
be destroyed to ensure that the destructors on its
|
||||
stack frame are run; this includes
|
||||
LegacySSHStore::narFromPath()'s connection lock. */
|
||||
auto source = std::move(source_);
|
||||
|
||||
if (!isValidPath(info.path)) {
|
||||
MaintainCount<decltype(nrRunning)> mc(nrRunning);
|
||||
showProgress();
|
||||
try {
|
||||
addToStore(info, *source, repair, checkSigs);
|
||||
} catch (Error & e) {
|
||||
nrFailed++;
|
||||
if (!settings.keepGoing)
|
||||
throw e;
|
||||
printMsg(lvlError, "could not copy %s: %s", printStorePath(path), e.what());
|
||||
showProgress();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
nrDone++;
|
||||
showProgress();
|
||||
});
|
||||
}
|
||||
|
||||
void Store::addMultipleToStore(
|
||||
Source & source,
|
||||
|
|
@ -991,113 +1070,61 @@ std::map<StorePath, StorePath> copyPaths(
|
|||
for (auto & path : storePaths)
|
||||
if (!valid.count(path)) missing.insert(path);
|
||||
|
||||
Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
|
||||
|
||||
// In the general case, `addMultipleToStore` requires a sorted list of
|
||||
// store paths to add, so sort them right now
|
||||
auto sortedMissing = srcStore.topoSortPaths(missing);
|
||||
std::reverse(sortedMissing.begin(), sortedMissing.end());
|
||||
|
||||
std::map<StorePath, StorePath> pathsMap;
|
||||
for (auto & path : storePaths)
|
||||
pathsMap.insert_or_assign(path, path);
|
||||
|
||||
Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
|
||||
Store::PathsSource pathsToCopy;
|
||||
|
||||
auto sorted = srcStore.topoSortPaths(missing);
|
||||
std::reverse(sorted.begin(), sorted.end());
|
||||
auto computeStorePathForDst = [&](const ValidPathInfo & currentPathInfo) -> StorePath {
|
||||
auto storePathForSrc = currentPathInfo.path;
|
||||
auto storePathForDst = storePathForSrc;
|
||||
if (currentPathInfo.ca && currentPathInfo.references.empty()) {
|
||||
storePathForDst = dstStore.makeFixedOutputPathFromCA(storePathForSrc.name(), *currentPathInfo.ca);
|
||||
if (dstStore.storeDir == srcStore.storeDir)
|
||||
assert(storePathForDst == storePathForSrc);
|
||||
if (storePathForDst != storePathForSrc)
|
||||
debug("replaced path '%s' to '%s' for substituter '%s'",
|
||||
srcStore.printStorePath(storePathForSrc),
|
||||
dstStore.printStorePath(storePathForDst),
|
||||
dstStore.getUri());
|
||||
}
|
||||
return storePathForDst;
|
||||
};
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
sink << sorted.size();
|
||||
for (auto & storePath : sorted) {
|
||||
for (auto & missingPath : sortedMissing) {
|
||||
auto info = srcStore.queryPathInfo(missingPath);
|
||||
|
||||
auto storePathForDst = computeStorePathForDst(*info);
|
||||
pathsMap.insert_or_assign(missingPath, storePathForDst);
|
||||
|
||||
ValidPathInfo infoForDst = *info;
|
||||
infoForDst.path = storePathForDst;
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
// We can reasonably assume that the copy will happen whenever we
|
||||
// read the path, so log something about that at that point
|
||||
auto srcUri = srcStore.getUri();
|
||||
auto dstUri = dstStore.getUri();
|
||||
auto storePathS = srcStore.printStorePath(storePath);
|
||||
auto storePathS = srcStore.printStorePath(missingPath);
|
||||
Activity act(*logger, lvlInfo, actCopyPath,
|
||||
makeCopyPathMessage(srcUri, dstUri, storePathS),
|
||||
{storePathS, srcUri, dstUri});
|
||||
PushActivity pact(act.id);
|
||||
|
||||
auto info = srcStore.queryPathInfo(storePath);
|
||||
info->write(sink, srcStore, 16);
|
||||
srcStore.narFromPath(storePath, sink);
|
||||
}
|
||||
});
|
||||
|
||||
dstStore.addMultipleToStore(*source, repair, checkSigs);
|
||||
|
||||
#if 0
|
||||
std::atomic<size_t> nrDone{0};
|
||||
std::atomic<size_t> nrFailed{0};
|
||||
std::atomic<uint64_t> bytesExpected{0};
|
||||
std::atomic<uint64_t> nrRunning{0};
|
||||
|
||||
auto showProgress = [&]() {
|
||||
act.progress(nrDone, missing.size(), nrRunning, nrFailed);
|
||||
};
|
||||
|
||||
ThreadPool pool;
|
||||
|
||||
processGraph<StorePath>(pool,
|
||||
StorePathSet(missing.begin(), missing.end()),
|
||||
|
||||
[&](const StorePath & storePath) {
|
||||
auto info = srcStore.queryPathInfo(storePath);
|
||||
auto storePathForDst = storePath;
|
||||
if (info->ca && info->references.empty()) {
|
||||
storePathForDst = dstStore.makeFixedOutputPathFromCA(storePath.name(), *info->ca);
|
||||
if (dstStore.storeDir == srcStore.storeDir)
|
||||
assert(storePathForDst == storePath);
|
||||
if (storePathForDst != storePath)
|
||||
debug("replaced path '%s' to '%s' for substituter '%s'",
|
||||
srcStore.printStorePath(storePath),
|
||||
dstStore.printStorePath(storePathForDst),
|
||||
dstStore.getUri());
|
||||
}
|
||||
pathsMap.insert_or_assign(storePath, storePathForDst);
|
||||
|
||||
if (dstStore.isValidPath(storePath)) {
|
||||
nrDone++;
|
||||
showProgress();
|
||||
return StorePathSet();
|
||||
}
|
||||
|
||||
bytesExpected += info->narSize;
|
||||
act.setExpected(actCopyPath, bytesExpected);
|
||||
|
||||
return info->references;
|
||||
},
|
||||
|
||||
[&](const StorePath & storePath) {
|
||||
checkInterrupt();
|
||||
|
||||
auto info = srcStore.queryPathInfo(storePath);
|
||||
|
||||
auto storePathForDst = storePath;
|
||||
if (info->ca && info->references.empty()) {
|
||||
storePathForDst = dstStore.makeFixedOutputPathFromCA(storePath.name(), *info->ca);
|
||||
if (dstStore.storeDir == srcStore.storeDir)
|
||||
assert(storePathForDst == storePath);
|
||||
if (storePathForDst != storePath)
|
||||
debug("replaced path '%s' to '%s' for substituter '%s'",
|
||||
srcStore.printStorePath(storePath),
|
||||
dstStore.printStorePath(storePathForDst),
|
||||
dstStore.getUri());
|
||||
}
|
||||
pathsMap.insert_or_assign(storePath, storePathForDst);
|
||||
|
||||
if (!dstStore.isValidPath(storePathForDst)) {
|
||||
MaintainCount<decltype(nrRunning)> mc(nrRunning);
|
||||
showProgress();
|
||||
try {
|
||||
copyStorePath(srcStore, dstStore, storePath, repair, checkSigs);
|
||||
} catch (Error &e) {
|
||||
nrFailed++;
|
||||
if (!settings.keepGoing)
|
||||
throw e;
|
||||
printMsg(lvlError, "could not copy %s: %s", dstStore.printStorePath(storePath), e.what());
|
||||
showProgress();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
nrDone++;
|
||||
showProgress();
|
||||
srcStore.narFromPath(missingPath, sink);
|
||||
});
|
||||
#endif
|
||||
pathsToCopy.push_back(std::pair{infoForDst, std::move(source)});
|
||||
}
|
||||
|
||||
dstStore.addMultipleToStore(pathsToCopy, act, repair, checkSigs);
|
||||
|
||||
return pathsMap;
|
||||
}
|
||||
|
|
@ -1301,7 +1328,8 @@ std::pair<std::string, Store::Params> splitUriAndParams(const std::string & uri_
|
|||
return {uri, params};
|
||||
}
|
||||
|
||||
static bool isNonUriPath(const std::string & spec) {
|
||||
static bool isNonUriPath(const std::string & spec)
|
||||
{
|
||||
return
|
||||
// is not a URL
|
||||
spec.find("://") == std::string::npos
|
||||
|
|
@ -1313,11 +1341,36 @@ static bool isNonUriPath(const std::string & spec) {
|
|||
std::shared_ptr<Store> openFromNonUri(const std::string & uri, const Store::Params & params)
|
||||
{
|
||||
if (uri == "" || uri == "auto") {
|
||||
auto stateDir = get(params, "state").value_or(settings.nixStateDir);
|
||||
auto stateDir = getOr(params, "state", settings.nixStateDir);
|
||||
if (access(stateDir.c_str(), R_OK | W_OK) == 0)
|
||||
return std::make_shared<LocalStore>(params);
|
||||
else if (pathExists(settings.nixDaemonSocketFile))
|
||||
return std::make_shared<UDSRemoteStore>(params);
|
||||
#if __linux__
|
||||
else if (!pathExists(stateDir)
|
||||
&& params.empty()
|
||||
&& getuid() != 0
|
||||
&& !getEnv("NIX_STORE_DIR").has_value()
|
||||
&& !getEnv("NIX_STATE_DIR").has_value())
|
||||
{
|
||||
/* If /nix doesn't exist, there is no daemon socket, and
|
||||
we're not root, then automatically set up a chroot
|
||||
store in ~/.local/share/nix/root. */
|
||||
auto chrootStore = getDataDir() + "/nix/root";
|
||||
if (!pathExists(chrootStore)) {
|
||||
try {
|
||||
createDirs(chrootStore);
|
||||
} catch (Error & e) {
|
||||
return std::make_shared<LocalStore>(params);
|
||||
}
|
||||
warn("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore);
|
||||
} else
|
||||
debug("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore);
|
||||
Store::Params params2;
|
||||
params2["root"] = chrootStore;
|
||||
return std::make_shared<LocalStore>(params2);
|
||||
}
|
||||
#endif
|
||||
else
|
||||
return std::make_shared<LocalStore>(params);
|
||||
} else if (uri == "daemon") {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
#pragma once
|
||||
|
||||
#include "nar-info.hh"
|
||||
#include "realisation.hh"
|
||||
#include "path.hh"
|
||||
#include "derived-path.hh"
|
||||
|
|
@ -10,8 +11,8 @@
|
|||
#include "sync.hh"
|
||||
#include "globals.hh"
|
||||
#include "config.hh"
|
||||
#include "derivations.hh"
|
||||
#include "path-info.hh"
|
||||
#include "repair-flag.hh"
|
||||
|
||||
#include <atomic>
|
||||
#include <limits>
|
||||
|
|
@ -62,6 +63,8 @@ MakeError(BadStorePath, Error);
|
|||
|
||||
MakeError(InvalidStoreURI, Error);
|
||||
|
||||
struct BasicDerivation;
|
||||
struct Derivation;
|
||||
class FSAccessor;
|
||||
class NarInfoDiskCache;
|
||||
class Store;
|
||||
|
|
@ -76,127 +79,10 @@ enum AllowInvalidFlag : bool { DisallowInvalid = false, AllowInvalid = true };
|
|||
const uint32_t exportMagic = 0x4558494e;
|
||||
|
||||
|
||||
typedef std::unordered_map<StorePath, std::unordered_set<std::string>> Roots;
|
||||
|
||||
|
||||
struct GCOptions
|
||||
{
|
||||
/* Garbage collector operation:
|
||||
|
||||
- `gcReturnLive': return the set of paths reachable from
|
||||
(i.e. in the closure of) the roots.
|
||||
|
||||
- `gcReturnDead': return the set of paths not reachable from
|
||||
the roots.
|
||||
|
||||
- `gcDeleteDead': actually delete the latter set.
|
||||
|
||||
- `gcDeleteSpecific': delete the paths listed in
|
||||
`pathsToDelete', insofar as they are not reachable.
|
||||
*/
|
||||
typedef enum {
|
||||
gcReturnLive,
|
||||
gcReturnDead,
|
||||
gcDeleteDead,
|
||||
gcDeleteSpecific,
|
||||
} GCAction;
|
||||
|
||||
GCAction action{gcDeleteDead};
|
||||
|
||||
/* If `ignoreLiveness' is set, then reachability from the roots is
|
||||
ignored (dangerous!). However, the paths must still be
|
||||
unreferenced *within* the store (i.e., there can be no other
|
||||
store paths that depend on them). */
|
||||
bool ignoreLiveness{false};
|
||||
|
||||
/* For `gcDeleteSpecific', the paths to delete. */
|
||||
StorePathSet pathsToDelete;
|
||||
|
||||
/* Stop after at least `maxFreed' bytes have been freed. */
|
||||
uint64_t maxFreed{std::numeric_limits<uint64_t>::max()};
|
||||
};
|
||||
|
||||
|
||||
struct GCResults
|
||||
{
|
||||
/* Depending on the action, the GC roots, or the paths that would
|
||||
be or have been deleted. */
|
||||
PathSet paths;
|
||||
|
||||
/* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the
|
||||
number of bytes that would be or was freed. */
|
||||
uint64_t bytesFreed = 0;
|
||||
};
|
||||
|
||||
|
||||
enum BuildMode { bmNormal, bmRepair, bmCheck };
|
||||
|
||||
struct BuildResult;
|
||||
|
||||
struct BuildResult
|
||||
{
|
||||
/* Note: don't remove status codes, and only add new status codes
|
||||
at the end of the list, to prevent client/server
|
||||
incompatibilities in the nix-store --serve protocol. */
|
||||
enum Status {
|
||||
Built = 0,
|
||||
Substituted,
|
||||
AlreadyValid,
|
||||
PermanentFailure,
|
||||
InputRejected,
|
||||
OutputRejected,
|
||||
TransientFailure, // possibly transient
|
||||
CachedFailure, // no longer used
|
||||
TimedOut,
|
||||
MiscFailure,
|
||||
DependencyFailed,
|
||||
LogLimitExceeded,
|
||||
NotDeterministic,
|
||||
ResolvesToAlreadyValid,
|
||||
} status = MiscFailure;
|
||||
std::string errorMsg;
|
||||
|
||||
std::string toString() const {
|
||||
auto strStatus = [&]() {
|
||||
switch (status) {
|
||||
case Built: return "Built";
|
||||
case Substituted: return "Substituted";
|
||||
case AlreadyValid: return "AlreadyValid";
|
||||
case PermanentFailure: return "PermanentFailure";
|
||||
case InputRejected: return "InputRejected";
|
||||
case OutputRejected: return "OutputRejected";
|
||||
case TransientFailure: return "TransientFailure";
|
||||
case CachedFailure: return "CachedFailure";
|
||||
case TimedOut: return "TimedOut";
|
||||
case MiscFailure: return "MiscFailure";
|
||||
case DependencyFailed: return "DependencyFailed";
|
||||
case LogLimitExceeded: return "LogLimitExceeded";
|
||||
case NotDeterministic: return "NotDeterministic";
|
||||
case ResolvesToAlreadyValid: return "ResolvesToAlreadyValid";
|
||||
default: return "Unknown";
|
||||
};
|
||||
}();
|
||||
return strStatus + ((errorMsg == "") ? "" : " : " + errorMsg);
|
||||
}
|
||||
|
||||
/* How many times this build was performed. */
|
||||
unsigned int timesBuilt = 0;
|
||||
|
||||
/* If timesBuilt > 1, whether some builds did not produce the same
|
||||
result. (Note that 'isNonDeterministic = false' does not mean
|
||||
the build is deterministic, just that we don't have evidence of
|
||||
non-determinism.) */
|
||||
bool isNonDeterministic = false;
|
||||
|
||||
DrvOutputs builtOutputs;
|
||||
|
||||
/* The start/stop times of the build (or one of the rounds, if it
|
||||
was repeated). */
|
||||
time_t startTime = 0, stopTime = 0;
|
||||
|
||||
bool success() {
|
||||
return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid;
|
||||
}
|
||||
};
|
||||
|
||||
struct StoreConfig : public Config
|
||||
{
|
||||
|
|
@ -474,12 +360,22 @@ public:
|
|||
virtual void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs) = 0;
|
||||
|
||||
// A list of paths infos along with a source providing the content of the
|
||||
// associated store path
|
||||
using PathsSource = std::vector<std::pair<ValidPathInfo, std::unique_ptr<Source>>>;
|
||||
|
||||
/* Import multiple paths into the store. */
|
||||
virtual void addMultipleToStore(
|
||||
Source & source,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs);
|
||||
|
||||
virtual void addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs);
|
||||
|
||||
/* Copy the contents of a path to the store and register the
|
||||
validity the resulting path. The resulting path is returned.
|
||||
The function object `filter' can be used to exclude files (see
|
||||
|
|
@ -549,6 +445,16 @@ public:
|
|||
BuildMode buildMode = bmNormal,
|
||||
std::shared_ptr<Store> evalStore = nullptr);
|
||||
|
||||
/* Like `buildPaths()`, but return a vector of `BuildResult`s
|
||||
corresponding to each element in `paths`. Note that in case of
|
||||
a build/substitution error, this function won't throw an
|
||||
exception, but return a `BuildResult` containing an error
|
||||
message. */
|
||||
virtual std::vector<BuildResult> buildPathsWithResults(
|
||||
const std::vector<DerivedPath> & paths,
|
||||
BuildMode buildMode = bmNormal,
|
||||
std::shared_ptr<Store> evalStore = nullptr);
|
||||
|
||||
/* Build a single non-materialized derivation (i.e. not from an
|
||||
on-disk .drv file).
|
||||
|
||||
|
|
@ -595,26 +501,6 @@ public:
|
|||
virtual void addTempRoot(const StorePath & path)
|
||||
{ debug("not creating temporary root, store doesn't support GC"); }
|
||||
|
||||
/* Add an indirect root, which is merely a symlink to `path' from
|
||||
/nix/var/nix/gcroots/auto/<hash of `path'>. `path' is supposed
|
||||
to be a symlink to a store path. The garbage collector will
|
||||
automatically remove the indirect root when it finds that
|
||||
`path' has disappeared. */
|
||||
virtual void addIndirectRoot(const Path & path)
|
||||
{ unsupported("addIndirectRoot"); }
|
||||
|
||||
/* Find the roots of the garbage collector. Each root is a pair
|
||||
(link, storepath) where `link' is the path of the symlink
|
||||
outside of the Nix store that point to `storePath'. If
|
||||
'censor' is true, privacy-sensitive information about roots
|
||||
found in /proc is censored. */
|
||||
virtual Roots findRoots(bool censor)
|
||||
{ unsupported("findRoots"); }
|
||||
|
||||
/* Perform a garbage collection. */
|
||||
virtual void collectGarbage(const GCOptions & options, GCResults & results)
|
||||
{ unsupported("collectGarbage"); }
|
||||
|
||||
/* Return a string representing information about the path that
|
||||
can be loaded into the database using `nix-store --load-db' or
|
||||
`nix-store --register-validity'. */
|
||||
|
|
@ -732,14 +618,6 @@ public:
|
|||
*/
|
||||
StorePathSet exportReferences(const StorePathSet & storePaths, const StorePathSet & inputPaths);
|
||||
|
||||
/* Return the build log of the specified store path, if available,
|
||||
or null otherwise. */
|
||||
virtual std::optional<std::string> getBuildLog(const StorePath & path)
|
||||
{ return std::nullopt; }
|
||||
|
||||
virtual void addBuildLog(const StorePath & path, std::string_view log)
|
||||
{ unsupported("addBuildLog"); }
|
||||
|
||||
/* Hack to allow long-running processes like hydra-queue-runner to
|
||||
occasionally flush their path info cache. */
|
||||
void clearPathInfoCache()
|
||||
|
|
|
|||
16
src/libstore/store-cast.hh
Normal file
16
src/libstore/store-cast.hh
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
#pragma once
|
||||
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
template<typename T>
|
||||
T & require(Store & store)
|
||||
{
|
||||
auto * castedStore = dynamic_cast<T *>(&store);
|
||||
if (!castedStore)
|
||||
throw UsageError("%s not supported by store '%s'", T::operationName, store.getUri());
|
||||
return *castedStore;
|
||||
}
|
||||
|
||||
}
|
||||
46
src/libstore/tests/path-with-outputs.cc
Normal file
46
src/libstore/tests/path-with-outputs.cc
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
#include "path-with-outputs.hh"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
TEST(parseOutputsSpec, basic)
|
||||
{
|
||||
{
|
||||
auto [prefix, outputsSpec] = parseOutputsSpec("foo");
|
||||
ASSERT_EQ(prefix, "foo");
|
||||
ASSERT_TRUE(std::get_if<DefaultOutputs>(&outputsSpec));
|
||||
}
|
||||
|
||||
{
|
||||
auto [prefix, outputsSpec] = parseOutputsSpec("foo^*");
|
||||
ASSERT_EQ(prefix, "foo");
|
||||
ASSERT_TRUE(std::get_if<AllOutputs>(&outputsSpec));
|
||||
}
|
||||
|
||||
{
|
||||
auto [prefix, outputsSpec] = parseOutputsSpec("foo^out");
|
||||
ASSERT_EQ(prefix, "foo");
|
||||
ASSERT_TRUE(std::get<OutputNames>(outputsSpec) == OutputNames({"out"}));
|
||||
}
|
||||
|
||||
{
|
||||
auto [prefix, outputsSpec] = parseOutputsSpec("foo^out,bin");
|
||||
ASSERT_EQ(prefix, "foo");
|
||||
ASSERT_TRUE(std::get<OutputNames>(outputsSpec) == OutputNames({"out", "bin"}));
|
||||
}
|
||||
|
||||
{
|
||||
auto [prefix, outputsSpec] = parseOutputsSpec("foo^bar^out,bin");
|
||||
ASSERT_EQ(prefix, "foo^bar");
|
||||
ASSERT_TRUE(std::get<OutputNames>(outputsSpec) == OutputNames({"out", "bin"}));
|
||||
}
|
||||
|
||||
{
|
||||
auto [prefix, outputsSpec] = parseOutputsSpec("foo^&*()");
|
||||
ASSERT_EQ(prefix, "foo^&*()");
|
||||
ASSERT_TRUE(std::get_if<DefaultOutputs>(&outputsSpec));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -9,7 +9,7 @@ namespace nix {
|
|||
#define WORKER_MAGIC_1 0x6e697863
|
||||
#define WORKER_MAGIC_2 0x6478696f
|
||||
|
||||
#define PROTOCOL_VERSION (1 << 8 | 33)
|
||||
#define PROTOCOL_VERSION (1 << 8 | 34)
|
||||
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||
|
||||
|
|
@ -57,6 +57,7 @@ typedef enum {
|
|||
wopQueryRealisation = 43,
|
||||
wopAddMultipleToStore = 44,
|
||||
wopAddBuildLog = 45,
|
||||
wopBuildPathsWithResults = 46,
|
||||
} WorkerOp;
|
||||
|
||||
|
||||
|
|
@ -91,6 +92,7 @@ MAKE_WORKER_PROTO(, ContentAddress);
|
|||
MAKE_WORKER_PROTO(, DerivedPath);
|
||||
MAKE_WORKER_PROTO(, Realisation);
|
||||
MAKE_WORKER_PROTO(, DrvOutput);
|
||||
MAKE_WORKER_PROTO(, BuildResult);
|
||||
|
||||
MAKE_WORKER_PROTO(template<typename T>, std::vector<T>);
|
||||
MAKE_WORKER_PROTO(template<typename T>, std::set<T>);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue