mirror of
https://github.com/NixOS/nix.git
synced 2025-11-18 08:19:35 +01:00
Merge branch 'master' into overlayfs-store
This commit is contained in:
commit
cb4f85f11c
253 changed files with 5623 additions and 2830 deletions
|
|
@ -235,14 +235,14 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
|
|||
std::regex regex2("^[0-9a-f]{38}\\.debug$");
|
||||
|
||||
for (auto & [s1, _type] : narAccessor->readDirectory(buildIdDir)) {
|
||||
auto dir = buildIdDir + s1;
|
||||
auto dir = buildIdDir / s1;
|
||||
|
||||
if (narAccessor->lstat(dir).type != SourceAccessor::tDirectory
|
||||
|| !std::regex_match(s1, regex1))
|
||||
continue;
|
||||
|
||||
for (auto & [s2, _type] : narAccessor->readDirectory(dir)) {
|
||||
auto debugPath = dir + s2;
|
||||
auto debugPath = dir / s2;
|
||||
|
||||
if (narAccessor->lstat(debugPath).type != SourceAccessor::tRegular
|
||||
|| !std::regex_match(s2, regex2))
|
||||
|
|
@ -305,7 +305,8 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
|
|||
StorePath BinaryCacheStore::addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
ContentAddressMethod method,
|
||||
FileSerialisationMethod dumpMethod,
|
||||
ContentAddressMethod hashMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair)
|
||||
|
|
@ -313,17 +314,27 @@ StorePath BinaryCacheStore::addToStoreFromDump(
|
|||
std::optional<Hash> caHash;
|
||||
std::string nar;
|
||||
|
||||
// Calculating Git hash from NAR stream not yet implemented. May not
|
||||
// be possible to implement in single-pass if the NAR is in an
|
||||
// inconvenient order. Could fetch after uploading, however.
|
||||
if (hashMethod.getFileIngestionMethod() == FileIngestionMethod::Git)
|
||||
unsupported("addToStoreFromDump");
|
||||
|
||||
if (auto * dump2p = dynamic_cast<StringSource *>(&dump)) {
|
||||
auto & dump2 = *dump2p;
|
||||
// Hack, this gives us a "replayable" source so we can compute
|
||||
// multiple hashes more easily.
|
||||
caHash = hashString(HashAlgorithm::SHA256, dump2.s);
|
||||
switch (method.getFileIngestionMethod()) {
|
||||
case FileIngestionMethod::Recursive:
|
||||
//
|
||||
// Only calculate if the dump is in the right format, however.
|
||||
if (static_cast<FileIngestionMethod>(dumpMethod) == hashMethod.getFileIngestionMethod())
|
||||
caHash = hashString(HashAlgorithm::SHA256, dump2.s);
|
||||
switch (dumpMethod) {
|
||||
case FileSerialisationMethod::Recursive:
|
||||
// The dump is already NAR in this case, just use it.
|
||||
nar = dump2.s;
|
||||
break;
|
||||
case FileIngestionMethod::Flat:
|
||||
case FileSerialisationMethod::Flat:
|
||||
{
|
||||
// The dump is Flat, so we need to convert it to NAR with a
|
||||
// single file.
|
||||
StringSink s;
|
||||
|
|
@ -331,10 +342,11 @@ StorePath BinaryCacheStore::addToStoreFromDump(
|
|||
nar = std::move(s.s);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Otherwise, we have to do th same hashing as NAR so our single
|
||||
// hash will suffice for both purposes.
|
||||
if (method != FileIngestionMethod::Recursive || hashAlgo != HashAlgorithm::SHA256)
|
||||
if (dumpMethod != FileSerialisationMethod::Recursive || hashAlgo != HashAlgorithm::SHA256)
|
||||
unsupported("addToStoreFromDump");
|
||||
}
|
||||
StringSource narDump { nar };
|
||||
|
|
@ -349,7 +361,7 @@ StorePath BinaryCacheStore::addToStoreFromDump(
|
|||
*this,
|
||||
name,
|
||||
ContentAddressWithReferences::fromParts(
|
||||
method,
|
||||
hashMethod,
|
||||
caHash ? *caHash : nar.first,
|
||||
{
|
||||
.others = references,
|
||||
|
|
@ -450,7 +462,7 @@ StorePath BinaryCacheStore::addToStore(
|
|||
non-recursive+sha256 so we can just use the default
|
||||
implementation of this method in terms of addToStoreFromDump. */
|
||||
|
||||
auto h = hashPath(accessor, path, method.getFileIngestionMethod(), hashAlgo, filter).first;
|
||||
auto h = hashPath(accessor, path, method.getFileIngestionMethod(), hashAlgo, filter);
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
accessor.dumpPath(path, sink, filter);
|
||||
|
|
|
|||
|
|
@ -125,7 +125,8 @@ public:
|
|||
StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
ContentAddressMethod method,
|
||||
FileSerialisationMethod dumpMethod,
|
||||
ContentAddressMethod hashMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair) override;
|
||||
|
|
@ -147,7 +148,7 @@ public:
|
|||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override;
|
||||
|
||||
ref<SourceAccessor> getFSAccessor(bool requireValidPath) override;
|
||||
ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override;
|
||||
|
||||
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
|
||||
|
||||
|
|
|
|||
|
|
@ -708,7 +708,7 @@ void DerivationGoal::tryToBuild()
|
|||
if (!outputLocks.lockPaths(lockFiles, "", false)) {
|
||||
if (!actLock)
|
||||
actLock = std::make_unique<Activity>(*logger, lvlWarn, actBuildWaiting,
|
||||
fmt("waiting for lock on %s", yellowtxt(showPaths(lockFiles))));
|
||||
fmt("waiting for lock on %s", Magenta(showPaths(lockFiles))));
|
||||
worker.waitForAWhile(shared_from_this());
|
||||
return;
|
||||
}
|
||||
|
|
@ -762,7 +762,7 @@ void DerivationGoal::tryToBuild()
|
|||
the wake-up timeout expires. */
|
||||
if (!actLock)
|
||||
actLock = std::make_unique<Activity>(*logger, lvlWarn, actBuildWaiting,
|
||||
fmt("waiting for a machine to build '%s'", yellowtxt(worker.store.printStorePath(drvPath))));
|
||||
fmt("waiting for a machine to build '%s'", Magenta(worker.store.printStorePath(drvPath))));
|
||||
worker.waitForAWhile(shared_from_this());
|
||||
outputLocks.unlock();
|
||||
return;
|
||||
|
|
@ -780,9 +780,13 @@ void DerivationGoal::tryToBuild()
|
|||
|
||||
void DerivationGoal::tryLocalBuild() {
|
||||
throw Error(
|
||||
"unable to build with a primary store that isn't a local store; "
|
||||
"either pass a different '--store' or enable remote builds."
|
||||
"\nhttps://nixos.org/manual/nix/stable/advanced-topics/distributed-builds.html");
|
||||
R"(
|
||||
Unable to build with a primary store that isn't a local store;
|
||||
either pass a different '--store' or enable remote builds.
|
||||
|
||||
For more information check 'man nix.conf' and search for '/machines'.
|
||||
)"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -891,7 +895,7 @@ void runPostBuildHook(
|
|||
if (hook == "")
|
||||
return;
|
||||
|
||||
Activity act(logger, lvlInfo, actPostBuildHook,
|
||||
Activity act(logger, lvlTalkative, actPostBuildHook,
|
||||
fmt("running post-build-hook '%s'", settings.postBuildHook),
|
||||
Logger::Fields{store.printStorePath(drvPath)});
|
||||
PushActivity pact(act.id);
|
||||
|
|
@ -987,7 +991,7 @@ void DerivationGoal::buildDone()
|
|||
diskFull |= cleanupDecideWhetherDiskFull();
|
||||
|
||||
auto msg = fmt("builder for '%s' %s",
|
||||
yellowtxt(worker.store.printStorePath(drvPath)),
|
||||
Magenta(worker.store.printStorePath(drvPath)),
|
||||
statusToString(status));
|
||||
|
||||
if (!logger->isVerbose() && !logTail.empty()) {
|
||||
|
|
@ -1523,7 +1527,7 @@ void DerivationGoal::done(
|
|||
outputLocks.unlock();
|
||||
buildResult.status = status;
|
||||
if (ex)
|
||||
buildResult.errorMsg = fmt("%s", normaltxt(ex->info().msg));
|
||||
buildResult.errorMsg = fmt("%s", Uncolored(ex->info().msg));
|
||||
if (buildResult.status == BuildResult::TimedOut)
|
||||
worker.timedOut = true;
|
||||
if (buildResult.status == BuildResult::PermanentFailure)
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
|
|||
}
|
||||
|
||||
if (failed.size() == 1 && ex) {
|
||||
ex->status = worker.failingExitStatus();
|
||||
ex->withExitStatus(worker.failingExitStatus());
|
||||
throw std::move(*ex);
|
||||
} else if (!failed.empty()) {
|
||||
if (ex) logError(ex->info());
|
||||
|
|
@ -104,7 +104,7 @@ void Store::ensurePath(const StorePath & path)
|
|||
|
||||
if (goal->exitCode != Goal::ecSuccess) {
|
||||
if (goal->ex) {
|
||||
goal->ex->status = worker.failingExitStatus();
|
||||
goal->ex->withExitStatus(worker.failingExitStatus());
|
||||
throw std::move(*goal->ex);
|
||||
} else
|
||||
throw Error(worker.failingExitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@
|
|||
#include "finally.hh"
|
||||
#include "util.hh"
|
||||
#include "archive.hh"
|
||||
#include "git.hh"
|
||||
#include "compression.hh"
|
||||
#include "daemon.hh"
|
||||
#include "topo-sort.hh"
|
||||
|
|
@ -92,7 +93,7 @@ void handleDiffHook(
|
|||
} catch (Error & error) {
|
||||
ErrorInfo ei = error.info();
|
||||
// FIXME: wrap errors.
|
||||
ei.msg = hintfmt("diff hook execution failed: %s", ei.msg.str());
|
||||
ei.msg = HintFmt("diff hook execution failed: %s", ei.msg.str());
|
||||
logError(ei);
|
||||
}
|
||||
}
|
||||
|
|
@ -232,7 +233,7 @@ void LocalDerivationGoal::tryLocalBuild()
|
|||
if (!buildUser) {
|
||||
if (!actLock)
|
||||
actLock = std::make_unique<Activity>(*logger, lvlWarn, actBuildWaiting,
|
||||
fmt("waiting for a free build user ID for '%s'", yellowtxt(worker.store.printStorePath(drvPath))));
|
||||
fmt("waiting for a free build user ID for '%s'", Magenta(worker.store.printStorePath(drvPath))));
|
||||
worker.waitForAWhile(shared_from_this());
|
||||
return;
|
||||
}
|
||||
|
|
@ -1311,12 +1312,13 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual In
|
|||
StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
ContentAddressMethod method,
|
||||
FileSerialisationMethod dumpMethod,
|
||||
ContentAddressMethod hashMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair) override
|
||||
{
|
||||
auto path = next->addToStoreFromDump(dump, name, method, hashAlgo, references, repair);
|
||||
auto path = next->addToStoreFromDump(dump, name, dumpMethod, hashMethod, hashAlgo, references, repair);
|
||||
goal.addDependency(path);
|
||||
return path;
|
||||
}
|
||||
|
|
@ -2130,16 +2132,17 @@ void LocalDerivationGoal::runChild()
|
|||
try {
|
||||
logger = makeJSONLogger(*logger);
|
||||
|
||||
BasicDerivation & drv2(*drv);
|
||||
for (auto & e : drv2.env)
|
||||
e.second = rewriteStrings(e.second, inputRewrites);
|
||||
std::map<std::string, Path> outputs;
|
||||
for (auto & e : drv->outputs)
|
||||
outputs.insert_or_assign(e.first,
|
||||
worker.store.printStorePath(scratchOutputs.at(e.first)));
|
||||
|
||||
if (drv->builder == "builtin:fetchurl")
|
||||
builtinFetchurl(drv2, netrcData);
|
||||
builtinFetchurl(*drv, outputs, netrcData);
|
||||
else if (drv->builder == "builtin:buildenv")
|
||||
builtinBuildenv(drv2);
|
||||
builtinBuildenv(*drv, outputs);
|
||||
else if (drv->builder == "builtin:unpack-channel")
|
||||
builtinUnpackChannel(drv2);
|
||||
builtinUnpackChannel(*drv, outputs);
|
||||
else
|
||||
throw Error("unsupported builtin builder '%1%'", drv->builder.substr(8));
|
||||
_exit(0);
|
||||
|
|
@ -2456,15 +2459,28 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
|
|||
rewriteOutput(outputRewrites);
|
||||
/* FIXME optimize and deduplicate with addToStore */
|
||||
std::string oldHashPart { scratchPath->hashPart() };
|
||||
auto got = ({
|
||||
HashModuloSink caSink { outputHash.hashAlgo, oldHashPart };
|
||||
auto got = [&]{
|
||||
PosixSourceAccessor accessor;
|
||||
dumpPath(
|
||||
accessor, CanonPath { actualPath },
|
||||
caSink,
|
||||
outputHash.method.getFileIngestionMethod());
|
||||
caSink.finish().first;
|
||||
});
|
||||
auto fim = outputHash.method.getFileIngestionMethod();
|
||||
switch (fim) {
|
||||
case FileIngestionMethod::Flat:
|
||||
case FileIngestionMethod::Recursive:
|
||||
{
|
||||
HashModuloSink caSink { outputHash.hashAlgo, oldHashPart };
|
||||
auto fim = outputHash.method.getFileIngestionMethod();
|
||||
dumpPath(
|
||||
accessor, CanonPath { actualPath },
|
||||
caSink,
|
||||
(FileSerialisationMethod) fim);
|
||||
return caSink.finish().first;
|
||||
}
|
||||
case FileIngestionMethod::Git: {
|
||||
return git::dumpHash(
|
||||
outputHash.hashAlgo, accessor,
|
||||
CanonPath { tmpDir + "/tmp" }).hash;
|
||||
}
|
||||
}
|
||||
}();
|
||||
|
||||
ValidPathInfo newInfo0 {
|
||||
worker.store,
|
||||
|
|
@ -2490,7 +2506,7 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
|
|||
PosixSourceAccessor accessor;
|
||||
HashResult narHashAndSize = hashPath(
|
||||
accessor, CanonPath { actualPath },
|
||||
FileIngestionMethod::Recursive, HashAlgorithm::SHA256);
|
||||
FileSerialisationMethod::Recursive, HashAlgorithm::SHA256);
|
||||
newInfo0.narHash = narHashAndSize.first;
|
||||
newInfo0.narSize = narHashAndSize.second;
|
||||
}
|
||||
|
|
@ -2514,7 +2530,7 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
|
|||
PosixSourceAccessor accessor;
|
||||
HashResult narHashAndSize = hashPath(
|
||||
accessor, CanonPath { actualPath },
|
||||
FileIngestionMethod::Recursive, HashAlgorithm::SHA256);
|
||||
FileSerialisationMethod::Recursive, HashAlgorithm::SHA256);
|
||||
ValidPathInfo newInfo0 { requiredFinalPath, narHashAndSize.first };
|
||||
newInfo0.narSize = narHashAndSize.second;
|
||||
auto refs = rewriteRefs();
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ struct LocalDerivationGoal : public DerivationGoal
|
|||
RedirectedOutputs redirectedOutputs;
|
||||
|
||||
/**
|
||||
* The outputs paths used during the build.
|
||||
* The output paths used during the build.
|
||||
*
|
||||
* - Input-addressed derivations or fixed content-addressed outputs are
|
||||
* sometimes built when some of their outputs already exist, and can not
|
||||
|
|
|
|||
|
|
@ -331,13 +331,23 @@ void Worker::run(const Goals & _topGoals)
|
|||
if (awake.empty() && 0U == settings.maxBuildJobs)
|
||||
{
|
||||
if (getMachines().empty())
|
||||
throw Error("unable to start any build; either increase '--max-jobs' "
|
||||
"or enable remote builds."
|
||||
"\nhttps://nixos.org/manual/nix/stable/advanced-topics/distributed-builds.html");
|
||||
throw Error(
|
||||
R"(
|
||||
Unable to start any build;
|
||||
either increase '--max-jobs' or enable remote builds.
|
||||
|
||||
For more information run 'man nix.conf' and search for '/machines'.
|
||||
)"
|
||||
);
|
||||
else
|
||||
throw Error("unable to start any build; remote machines may not have "
|
||||
"all required system features."
|
||||
"\nhttps://nixos.org/manual/nix/stable/advanced-topics/distributed-builds.html");
|
||||
throw Error(
|
||||
R"(
|
||||
Unable to start any build;
|
||||
remote machines may not have all required system features.
|
||||
|
||||
For more information run 'man nix.conf' and search for '/machines'.
|
||||
)"
|
||||
);
|
||||
|
||||
}
|
||||
assert(!awake.empty());
|
||||
|
|
@ -519,11 +529,11 @@ bool Worker::pathContentsGood(const StorePath & path)
|
|||
if (!pathExists(store.printStorePath(path)))
|
||||
res = false;
|
||||
else {
|
||||
HashResult current = hashPath(
|
||||
Hash current = hashPath(
|
||||
*store.getFSAccessor(), CanonPath { store.printStorePath(path) },
|
||||
FileIngestionMethod::Recursive, info->narHash.algo);
|
||||
Hash nullHash(HashAlgorithm::SHA256);
|
||||
res = info->narHash == nullHash || info->narHash == current.first;
|
||||
res = info->narHash == nullHash || info->narHash == current;
|
||||
}
|
||||
pathContentsGoodCache.insert_or_assign(path, res);
|
||||
if (!res)
|
||||
|
|
|
|||
|
|
@ -6,7 +6,13 @@
|
|||
namespace nix {
|
||||
|
||||
// TODO: make pluggable.
|
||||
void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData);
|
||||
void builtinUnpackChannel(const BasicDerivation & drv);
|
||||
void builtinFetchurl(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs,
|
||||
const std::string & netrcData);
|
||||
|
||||
void builtinUnpackChannel(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -161,7 +161,9 @@ void buildProfile(const Path & out, Packages && pkgs)
|
|||
debug("created %d symlinks in user environment", state.symlinks);
|
||||
}
|
||||
|
||||
void builtinBuildenv(const BasicDerivation & drv)
|
||||
void builtinBuildenv(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs)
|
||||
{
|
||||
auto getAttr = [&](const std::string & name) {
|
||||
auto i = drv.env.find(name);
|
||||
|
|
@ -169,7 +171,7 @@ void builtinBuildenv(const BasicDerivation & drv)
|
|||
return i->second;
|
||||
};
|
||||
|
||||
Path out = getAttr("out");
|
||||
auto out = outputs.at("out");
|
||||
createDirs(out);
|
||||
|
||||
/* Convert the stuff we get from the environment back into a
|
||||
|
|
|
|||
|
|
@ -45,6 +45,8 @@ typedef std::vector<Package> Packages;
|
|||
|
||||
void buildProfile(const Path & out, Packages && pkgs);
|
||||
|
||||
void builtinBuildenv(const BasicDerivation & drv);
|
||||
void builtinBuildenv(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,10 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
|
||||
void builtinFetchurl(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs,
|
||||
const std::string & netrcData)
|
||||
{
|
||||
/* Make the host's netrc data available. Too bad curl requires
|
||||
this to be stored in a file. It would be nice if we could just
|
||||
|
|
@ -16,14 +19,15 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
|
|||
writeFile(settings.netrcFile, netrcData, 0600);
|
||||
}
|
||||
|
||||
auto getAttr = [&](const std::string & name) {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
return i->second;
|
||||
};
|
||||
auto out = get(drv.outputs, "out");
|
||||
if (!out)
|
||||
throw Error("'builtin:fetchurl' requires an 'out' output");
|
||||
|
||||
Path storePath = getAttr("out");
|
||||
auto mainUrl = getAttr("url");
|
||||
if (!(drv.type().isFixed() || drv.type().isImpure()))
|
||||
throw Error("'builtin:fetchurl' must be a fixed-output or impure derivation");
|
||||
|
||||
auto storePath = outputs.at("out");
|
||||
auto mainUrl = drv.env.at("url");
|
||||
bool unpack = getOr(drv.env, "unpack", "") == "1";
|
||||
|
||||
/* Note: have to use a fresh fileTransfer here because we're in
|
||||
|
|
@ -59,13 +63,12 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
|
|||
};
|
||||
|
||||
/* Try the hashed mirrors first. */
|
||||
if (getAttr("outputHashMode") == "flat")
|
||||
auto dof = std::get_if<DerivationOutput::CAFixed>(&out->raw);
|
||||
if (dof && dof->ca.method.getFileIngestionMethod() == FileIngestionMethod::Flat)
|
||||
for (auto hashedMirror : settings.hashedMirrors.get())
|
||||
try {
|
||||
if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
|
||||
std::optional<HashAlgorithm> ht = parseHashAlgoOpt(getAttr("outputHashAlgo"));
|
||||
Hash h = newHashAllowEmpty(getAttr("outputHash"), ht);
|
||||
fetch(hashedMirror + printHashAlgo(h.algo) + "/" + h.to_string(HashFormat::Base16, false));
|
||||
fetch(hashedMirror + printHashAlgo(dof->ca.hash.algo) + "/" + dof->ca.hash.to_string(HashFormat::Base16, false));
|
||||
return;
|
||||
} catch (Error & e) {
|
||||
debug(e.what());
|
||||
|
|
|
|||
|
|
@ -3,7 +3,9 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
void builtinUnpackChannel(const BasicDerivation & drv)
|
||||
void builtinUnpackChannel(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs)
|
||||
{
|
||||
auto getAttr = [&](const std::string & name) {
|
||||
auto i = drv.env.find(name);
|
||||
|
|
@ -11,7 +13,7 @@ void builtinUnpackChannel(const BasicDerivation & drv)
|
|||
return i->second;
|
||||
};
|
||||
|
||||
Path out = getAttr("out");
|
||||
auto out = outputs.at("out");
|
||||
auto channelName = getAttr("channelName");
|
||||
auto src = getAttr("src");
|
||||
|
||||
|
|
|
|||
|
|
@ -4,22 +4,44 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
std::string makeFileIngestionPrefix(FileIngestionMethod m)
|
||||
std::string_view makeFileIngestionPrefix(FileIngestionMethod m)
|
||||
{
|
||||
switch (m) {
|
||||
case FileIngestionMethod::Flat:
|
||||
return "";
|
||||
case FileIngestionMethod::Recursive:
|
||||
return "r:";
|
||||
case FileIngestionMethod::Git:
|
||||
experimentalFeatureSettings.require(Xp::GitHashing);
|
||||
return "git:";
|
||||
default:
|
||||
throw Error("impossible, caught both cases");
|
||||
}
|
||||
}
|
||||
|
||||
std::string ContentAddressMethod::renderPrefix() const
|
||||
std::string_view ContentAddressMethod::render() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](TextIngestionMethod) -> std::string { return "text:"; },
|
||||
[](TextIngestionMethod) -> std::string_view { return "text"; },
|
||||
[](FileIngestionMethod m2) {
|
||||
/* Not prefixed for back compat with things that couldn't produce text before. */
|
||||
return renderFileIngestionMethod(m2);
|
||||
},
|
||||
}, raw);
|
||||
}
|
||||
|
||||
ContentAddressMethod ContentAddressMethod::parse(std::string_view m)
|
||||
{
|
||||
if (m == "text")
|
||||
return TextIngestionMethod {};
|
||||
else
|
||||
return parseFileIngestionMethod(m);
|
||||
}
|
||||
|
||||
std::string_view ContentAddressMethod::renderPrefix() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](TextIngestionMethod) -> std::string_view { return "text:"; },
|
||||
[](FileIngestionMethod m2) {
|
||||
/* Not prefixed for back compat with things that couldn't produce text before. */
|
||||
return makeFileIngestionPrefix(m2);
|
||||
|
|
@ -32,13 +54,17 @@ ContentAddressMethod ContentAddressMethod::parsePrefix(std::string_view & m)
|
|||
if (splitPrefix(m, "r:")) {
|
||||
return FileIngestionMethod::Recursive;
|
||||
}
|
||||
else if (splitPrefix(m, "git:")) {
|
||||
experimentalFeatureSettings.require(Xp::GitHashing);
|
||||
return FileIngestionMethod::Git;
|
||||
}
|
||||
else if (splitPrefix(m, "text:")) {
|
||||
return TextIngestionMethod {};
|
||||
}
|
||||
return FileIngestionMethod::Flat;
|
||||
}
|
||||
|
||||
std::string ContentAddressMethod::render(HashAlgorithm ha) const
|
||||
std::string ContentAddressMethod::renderWithAlgo(HashAlgorithm ha) const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const TextIngestionMethod & th) {
|
||||
|
|
@ -92,10 +118,10 @@ static std::pair<ContentAddressMethod, HashAlgorithm> parseContentAddressMethodP
|
|||
}
|
||||
|
||||
auto parseHashAlgorithm_ = [&](){
|
||||
auto hashTypeRaw = splitPrefixTo(rest, ':');
|
||||
if (!hashTypeRaw)
|
||||
auto hashAlgoRaw = splitPrefixTo(rest, ':');
|
||||
if (!hashAlgoRaw)
|
||||
throw UsageError("content address hash must be in form '<algo>:<hash>', but found: %s", wholeInput);
|
||||
HashAlgorithm hashAlgo = parseHashAlgo(*hashTypeRaw);
|
||||
HashAlgorithm hashAlgo = parseHashAlgo(*hashAlgoRaw);
|
||||
return hashAlgo;
|
||||
};
|
||||
|
||||
|
|
@ -112,6 +138,10 @@ static std::pair<ContentAddressMethod, HashAlgorithm> parseContentAddressMethodP
|
|||
auto method = FileIngestionMethod::Flat;
|
||||
if (splitPrefix(rest, "r:"))
|
||||
method = FileIngestionMethod::Recursive;
|
||||
else if (splitPrefix(rest, "git:")) {
|
||||
experimentalFeatureSettings.require(Xp::GitHashing);
|
||||
method = FileIngestionMethod::Git;
|
||||
}
|
||||
HashAlgorithm hashAlgo = parseHashAlgorithm_();
|
||||
return {
|
||||
std::move(method),
|
||||
|
|
@ -133,7 +163,7 @@ ContentAddress ContentAddress::parse(std::string_view rawCa)
|
|||
};
|
||||
}
|
||||
|
||||
std::pair<ContentAddressMethod, HashAlgorithm> ContentAddressMethod::parse(std::string_view caMethod)
|
||||
std::pair<ContentAddressMethod, HashAlgorithm> ContentAddressMethod::parseWithAlgo(std::string_view caMethod)
|
||||
{
|
||||
std::string asPrefix = std::string{caMethod} + ":";
|
||||
// parseContentAddressMethodPrefix takes its argument by reference
|
||||
|
|
@ -155,7 +185,7 @@ std::string renderContentAddress(std::optional<ContentAddress> ca)
|
|||
|
||||
std::string ContentAddress::printMethodAlgo() const
|
||||
{
|
||||
return method.renderPrefix()
|
||||
return std::string { method.renderPrefix() }
|
||||
+ printHashAlgo(hash.algo);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ struct TextIngestionMethod : std::monostate { };
|
|||
* Compute the prefix to the hash algorithm which indicates how the
|
||||
* files were ingested.
|
||||
*/
|
||||
std::string makeFileIngestionPrefix(FileIngestionMethod m);
|
||||
std::string_view makeFileIngestionPrefix(FileIngestionMethod m);
|
||||
|
||||
/**
|
||||
* An enumeration of all the ways we can content-address store objects.
|
||||
|
|
@ -59,6 +59,20 @@ struct ContentAddressMethod
|
|||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(ContentAddressMethod);
|
||||
|
||||
/**
|
||||
* Parse a content addressing method (name).
|
||||
*
|
||||
* The inverse of `render`.
|
||||
*/
|
||||
static ContentAddressMethod parse(std::string_view rawCaMethod);
|
||||
|
||||
/**
|
||||
* Render a content addressing method (name).
|
||||
*
|
||||
* The inverse of `parse`.
|
||||
*/
|
||||
std::string_view render() const;
|
||||
|
||||
/**
|
||||
* Parse the prefix tag which indicates how the files
|
||||
* were ingested, with the fixed output case not prefixed for back
|
||||
|
|
@ -74,20 +88,20 @@ struct ContentAddressMethod
|
|||
*
|
||||
* The rough inverse of `parsePrefix()`.
|
||||
*/
|
||||
std::string renderPrefix() const;
|
||||
std::string_view renderPrefix() const;
|
||||
|
||||
/**
|
||||
* Parse a content addressing method and hash type.
|
||||
* Parse a content addressing method and hash algorithm.
|
||||
*/
|
||||
static std::pair<ContentAddressMethod, HashAlgorithm> parse(std::string_view rawCaMethod);
|
||||
static std::pair<ContentAddressMethod, HashAlgorithm> parseWithAlgo(std::string_view rawCaMethod);
|
||||
|
||||
/**
|
||||
* Render a content addressing method and hash type in a
|
||||
* Render a content addressing method and hash algorithm in a
|
||||
* nicer way, prefixing both cases.
|
||||
*
|
||||
* The rough inverse of `parse()`.
|
||||
*/
|
||||
std::string render(HashAlgorithm ht) const;
|
||||
std::string renderWithAlgo(HashAlgorithm ha) const;
|
||||
|
||||
/**
|
||||
* Get the underlying way to content-address file system objects.
|
||||
|
|
@ -113,7 +127,7 @@ struct ContentAddressMethod
|
|||
* ‘text:sha256:<sha256 hash of file contents>’
|
||||
*
|
||||
* - `FixedIngestionMethod`:
|
||||
* ‘fixed:<r?>:<hash type>:<hash of file contents>’
|
||||
* ‘fixed:<r?>:<hash algorithm>:<hash of file contents>’
|
||||
*/
|
||||
struct ContentAddress
|
||||
{
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#include "archive.hh"
|
||||
#include "derivations.hh"
|
||||
#include "args.hh"
|
||||
#include "git.hh"
|
||||
|
||||
namespace nix::daemon {
|
||||
|
||||
|
|
@ -119,7 +120,7 @@ struct TunnelLogger : public Logger
|
|||
if (GET_PROTOCOL_MINOR(clientVersion) >= 26) {
|
||||
to << STDERR_ERROR << *ex;
|
||||
} else {
|
||||
to << STDERR_ERROR << ex->what() << ex->status;
|
||||
to << STDERR_ERROR << ex->what() << ex->info().status;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -400,11 +401,23 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->startWork();
|
||||
auto pathInfo = [&]() {
|
||||
// NB: FramedSource must be out of scope before logger->stopWork();
|
||||
auto [contentAddressMethod, hashAlgo_] = ContentAddressMethod::parse(camStr);
|
||||
auto hashAlgo = hashAlgo_; // work around clang bug
|
||||
auto [contentAddressMethod, hashAlgo] = ContentAddressMethod::parseWithAlgo(camStr);
|
||||
FramedSource source(from);
|
||||
FileSerialisationMethod dumpMethod;
|
||||
switch (contentAddressMethod.getFileIngestionMethod()) {
|
||||
case FileIngestionMethod::Flat:
|
||||
dumpMethod = FileSerialisationMethod::Flat;
|
||||
break;
|
||||
case FileIngestionMethod::Recursive:
|
||||
dumpMethod = FileSerialisationMethod::Recursive;
|
||||
break;
|
||||
case FileIngestionMethod::Git:
|
||||
// Use NAR; Git is not a serialization method
|
||||
dumpMethod = FileSerialisationMethod::Recursive;
|
||||
break;
|
||||
}
|
||||
// TODO these two steps are essentially RemoteStore::addCAToStore. Move it up to Store.
|
||||
auto path = store->addToStoreFromDump(source, name, contentAddressMethod, hashAlgo, refs, repair);
|
||||
auto path = store->addToStoreFromDump(source, name, dumpMethod, contentAddressMethod, hashAlgo, refs, repair);
|
||||
return store->queryPathInfo(path);
|
||||
}();
|
||||
logger->stopWork();
|
||||
|
|
@ -430,30 +443,23 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
hashAlgo = parseHashAlgo(hashAlgoRaw);
|
||||
}
|
||||
|
||||
// Old protocol always sends NAR, regardless of hashing method
|
||||
auto dumpSource = sinkToSource([&](Sink & saved) {
|
||||
if (method == FileIngestionMethod::Recursive) {
|
||||
/* We parse the NAR dump through into `saved` unmodified,
|
||||
so why all this extra work? We still parse the NAR so
|
||||
that we aren't sending arbitrary data to `saved`
|
||||
unwittingly`, and we know when the NAR ends so we don't
|
||||
consume the rest of `from` and can't parse another
|
||||
command. (We don't trust `addToStoreFromDump` to not
|
||||
eagerly consume the entire stream it's given, past the
|
||||
length of the Nar. */
|
||||
TeeSource savedNARSource(from, saved);
|
||||
NullFileSystemObjectSink sink; /* just parse the NAR */
|
||||
parseDump(sink, savedNARSource);
|
||||
} else {
|
||||
/* Incrementally parse the NAR file, stripping the
|
||||
metadata, and streaming the sole file we expect into
|
||||
`saved`. */
|
||||
RegularFileSink savedRegular { saved };
|
||||
parseDump(savedRegular, from);
|
||||
if (!savedRegular.regular) throw Error("regular file expected");
|
||||
}
|
||||
/* We parse the NAR dump through into `saved` unmodified,
|
||||
so why all this extra work? We still parse the NAR so
|
||||
that we aren't sending arbitrary data to `saved`
|
||||
unwittingly`, and we know when the NAR ends so we don't
|
||||
consume the rest of `from` and can't parse another
|
||||
command. (We don't trust `addToStoreFromDump` to not
|
||||
eagerly consume the entire stream it's given, past the
|
||||
length of the Nar. */
|
||||
TeeSource savedNARSource(from, saved);
|
||||
NullFileSystemObjectSink sink; /* just parse the NAR */
|
||||
parseDump(sink, savedNARSource);
|
||||
});
|
||||
logger->startWork();
|
||||
auto path = store->addToStoreFromDump(*dumpSource, baseName, method, hashAlgo);
|
||||
auto path = store->addToStoreFromDump(
|
||||
*dumpSource, baseName, FileSerialisationMethod::Recursive, method, hashAlgo);
|
||||
logger->stopWork();
|
||||
|
||||
to << store->printStorePath(path);
|
||||
|
|
@ -485,7 +491,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->startWork();
|
||||
auto path = ({
|
||||
StringSource source { s };
|
||||
store->addToStoreFromDump(source, suffix, TextIngestionMethod {}, HashAlgorithm::SHA256, refs, NoRepair);
|
||||
store->addToStoreFromDump(source, suffix, FileSerialisationMethod::Flat, TextIngestionMethod {}, HashAlgorithm::SHA256, refs, NoRepair);
|
||||
});
|
||||
logger->stopWork();
|
||||
to << store->printStorePath(path);
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@ StorePath writeDerivation(Store & store,
|
|||
})
|
||||
: ({
|
||||
StringSource s { contents };
|
||||
store.addToStoreFromDump(s, suffix, TextIngestionMethod {}, HashAlgorithm::SHA256, references, repair);
|
||||
store.addToStoreFromDump(s, suffix, FileSerialisationMethod::Flat, TextIngestionMethod {}, HashAlgorithm::SHA256, references, repair);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -601,7 +601,7 @@ std::string Derivation::unparse(const StoreDirConfig & store, bool maskOutputs,
|
|||
},
|
||||
[&](const DerivationOutput::CAFloating & dof) {
|
||||
s += ','; printUnquotedString(s, "");
|
||||
s += ','; printUnquotedString(s, dof.method.renderPrefix() + printHashAlgo(dof.hashAlgo));
|
||||
s += ','; printUnquotedString(s, std::string { dof.method.renderPrefix() } + printHashAlgo(dof.hashAlgo));
|
||||
s += ','; printUnquotedString(s, "");
|
||||
},
|
||||
[&](const DerivationOutput::Deferred &) {
|
||||
|
|
@ -612,7 +612,7 @@ std::string Derivation::unparse(const StoreDirConfig & store, bool maskOutputs,
|
|||
[&](const DerivationOutput::Impure & doi) {
|
||||
// FIXME
|
||||
s += ','; printUnquotedString(s, "");
|
||||
s += ','; printUnquotedString(s, doi.method.renderPrefix() + printHashAlgo(doi.hashAlgo));
|
||||
s += ','; printUnquotedString(s, std::string { doi.method.renderPrefix() } + printHashAlgo(doi.hashAlgo));
|
||||
s += ','; printUnquotedString(s, "impure");
|
||||
}
|
||||
}, i.second.raw);
|
||||
|
|
@ -701,7 +701,7 @@ DerivationType BasicDerivation::type() const
|
|||
floatingHashAlgo = dof.hashAlgo;
|
||||
} else {
|
||||
if (*floatingHashAlgo != dof.hashAlgo)
|
||||
throw Error("all floating outputs must use the same hash type");
|
||||
throw Error("all floating outputs must use the same hash algorithm");
|
||||
}
|
||||
},
|
||||
[&](const DerivationOutput::Deferred &) {
|
||||
|
|
@ -984,7 +984,7 @@ void writeDerivation(Sink & out, const StoreDirConfig & store, const BasicDeriva
|
|||
},
|
||||
[&](const DerivationOutput::CAFloating & dof) {
|
||||
out << ""
|
||||
<< (dof.method.renderPrefix() + printHashAlgo(dof.hashAlgo))
|
||||
<< (std::string { dof.method.renderPrefix() } + printHashAlgo(dof.hashAlgo))
|
||||
<< "";
|
||||
},
|
||||
[&](const DerivationOutput::Deferred &) {
|
||||
|
|
@ -994,7 +994,7 @@ void writeDerivation(Sink & out, const StoreDirConfig & store, const BasicDeriva
|
|||
},
|
||||
[&](const DerivationOutput::Impure & doi) {
|
||||
out << ""
|
||||
<< (doi.method.renderPrefix() + printHashAlgo(doi.hashAlgo))
|
||||
<< (std::string { doi.method.renderPrefix() } + printHashAlgo(doi.hashAlgo))
|
||||
<< "impure";
|
||||
},
|
||||
}, i.second.raw);
|
||||
|
|
@ -1221,11 +1221,11 @@ nlohmann::json DerivationOutput::toJSON(
|
|||
// FIXME print refs?
|
||||
},
|
||||
[&](const DerivationOutput::CAFloating & dof) {
|
||||
res["hashAlgo"] = dof.method.renderPrefix() + printHashAlgo(dof.hashAlgo);
|
||||
res["hashAlgo"] = std::string { dof.method.renderPrefix() } + printHashAlgo(dof.hashAlgo);
|
||||
},
|
||||
[&](const DerivationOutput::Deferred &) {},
|
||||
[&](const DerivationOutput::Impure & doi) {
|
||||
res["hashAlgo"] = doi.method.renderPrefix() + printHashAlgo(doi.hashAlgo);
|
||||
res["hashAlgo"] = std::string { doi.method.renderPrefix() } + printHashAlgo(doi.hashAlgo);
|
||||
res["impure"] = true;
|
||||
},
|
||||
}, raw);
|
||||
|
|
|
|||
|
|
@ -61,7 +61,8 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store
|
|||
virtual StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
ContentAddressMethod method = FileIngestionMethod::Recursive,
|
||||
FileSerialisationMethod dumpMethod = FileSerialisationMethod::Recursive,
|
||||
ContentAddressMethod hashMethod = FileIngestionMethod::Recursive,
|
||||
HashAlgorithm hashAlgo = HashAlgorithm::SHA256,
|
||||
const StorePathSet & references = StorePathSet(),
|
||||
RepairFlag repair = NoRepair) override
|
||||
|
|
|
|||
|
|
@ -106,6 +106,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
this->result.data.append(data);
|
||||
})
|
||||
{
|
||||
result.urls.push_back(request.uri);
|
||||
|
||||
requestHeaders = curl_slist_append(requestHeaders, "Accept-Encoding: zstd, br, gzip, deflate, bzip2, xz");
|
||||
if (!request.expectedETag.empty())
|
||||
requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
|
||||
|
|
@ -182,6 +184,14 @@ struct curlFileTransfer : public FileTransfer
|
|||
return ((TransferItem *) userp)->writeCallback(contents, size, nmemb);
|
||||
}
|
||||
|
||||
void appendCurrentUrl()
|
||||
{
|
||||
char * effectiveUriCStr = nullptr;
|
||||
curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr);
|
||||
if (effectiveUriCStr && *result.urls.rbegin() != effectiveUriCStr)
|
||||
result.urls.push_back(effectiveUriCStr);
|
||||
}
|
||||
|
||||
size_t headerCallback(void * contents, size_t size, size_t nmemb)
|
||||
{
|
||||
size_t realSize = size * nmemb;
|
||||
|
|
@ -196,6 +206,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
statusMsg = trim(match.str(1));
|
||||
acceptRanges = false;
|
||||
encoding = "";
|
||||
appendCurrentUrl();
|
||||
} else {
|
||||
|
||||
auto i = line.find(':');
|
||||
|
|
@ -360,14 +371,11 @@ struct curlFileTransfer : public FileTransfer
|
|||
{
|
||||
auto httpStatus = getHTTPStatus();
|
||||
|
||||
char * effectiveUriCStr = nullptr;
|
||||
curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr);
|
||||
if (effectiveUriCStr)
|
||||
result.effectiveUri = effectiveUriCStr;
|
||||
|
||||
debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes",
|
||||
request.verb(), request.uri, code, httpStatus, result.bodySize);
|
||||
|
||||
appendCurrentUrl();
|
||||
|
||||
if (decompressionSink) {
|
||||
try {
|
||||
decompressionSink->finish();
|
||||
|
|
@ -779,7 +787,10 @@ FileTransferResult FileTransfer::upload(const FileTransferRequest & request)
|
|||
return enqueueFileTransfer(request).get();
|
||||
}
|
||||
|
||||
void FileTransfer::download(FileTransferRequest && request, Sink & sink)
|
||||
void FileTransfer::download(
|
||||
FileTransferRequest && request,
|
||||
Sink & sink,
|
||||
std::function<void(FileTransferResult)> resultCallback)
|
||||
{
|
||||
/* Note: we can't call 'sink' via request.dataCallback, because
|
||||
that would cause the sink to execute on the fileTransfer
|
||||
|
|
@ -829,11 +840,13 @@ void FileTransfer::download(FileTransferRequest && request, Sink & sink)
|
|||
};
|
||||
|
||||
enqueueFileTransfer(request,
|
||||
{[_state](std::future<FileTransferResult> fut) {
|
||||
{[_state, resultCallback{std::move(resultCallback)}](std::future<FileTransferResult> fut) {
|
||||
auto state(_state->lock());
|
||||
state->quit = true;
|
||||
try {
|
||||
fut.get();
|
||||
auto res = fut.get();
|
||||
if (resultCallback)
|
||||
resultCallback(std::move(res));
|
||||
} catch (...) {
|
||||
state->exc = std::current_exception();
|
||||
}
|
||||
|
|
@ -882,12 +895,12 @@ template<typename... Args>
|
|||
FileTransferError::FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args)
|
||||
: Error(args...), error(error), response(response)
|
||||
{
|
||||
const auto hf = hintfmt(args...);
|
||||
const auto hf = HintFmt(args...);
|
||||
// FIXME: Due to https://github.com/NixOS/nix/issues/3841 we don't know how
|
||||
// to print different messages for different verbosity levels. For now
|
||||
// we add some heuristics for detecting when we want to show the response.
|
||||
if (response && (response->size() < 1024 || response->find("<html>") != std::string::npos))
|
||||
err.msg = hintfmt("%1%\n\nresponse body:\n\n%2%", normaltxt(hf.str()), chomp(*response));
|
||||
err.msg = HintFmt("%1%\n\nresponse body:\n\n%2%", Uncolored(hf.str()), chomp(*response));
|
||||
else
|
||||
err.msg = hf;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,14 +75,34 @@ struct FileTransferRequest
|
|||
|
||||
struct FileTransferResult
|
||||
{
|
||||
/**
|
||||
* Whether this is a cache hit (i.e. the ETag supplied in the
|
||||
* request is still valid). If so, `data` is empty.
|
||||
*/
|
||||
bool cached = false;
|
||||
|
||||
/**
|
||||
* The ETag of the object.
|
||||
*/
|
||||
std::string etag;
|
||||
std::string effectiveUri;
|
||||
|
||||
/**
|
||||
* All URLs visited in the redirect chain.
|
||||
*/
|
||||
std::vector<std::string> urls;
|
||||
|
||||
/**
|
||||
* The response body.
|
||||
*/
|
||||
std::string data;
|
||||
|
||||
uint64_t bodySize = 0;
|
||||
/* An "immutable" URL for this resource (i.e. one whose contents
|
||||
will never change), as returned by the `Link: <url>;
|
||||
rel="immutable"` header. */
|
||||
|
||||
/**
|
||||
* An "immutable" URL for this resource (i.e. one whose contents
|
||||
* will never change), as returned by the `Link: <url>;
|
||||
* rel="immutable"` header.
|
||||
*/
|
||||
std::optional<std::string> immutableUrl;
|
||||
};
|
||||
|
||||
|
|
@ -116,7 +136,10 @@ struct FileTransfer
|
|||
* Download a file, writing its data to a sink. The sink will be
|
||||
* invoked on the thread of the caller.
|
||||
*/
|
||||
void download(FileTransferRequest && request, Sink & sink);
|
||||
void download(
|
||||
FileTransferRequest && request,
|
||||
Sink & sink,
|
||||
std::function<void(FileTransferResult)> resultCallback = {});
|
||||
|
||||
enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
|
||||
};
|
||||
|
|
|
|||
|
|
@ -180,14 +180,21 @@ public:
|
|||
getDefaultCores(),
|
||||
"cores",
|
||||
R"(
|
||||
Sets the value of the `NIX_BUILD_CORES` environment variable in the
|
||||
invocation of builders. Builders can use this variable at their
|
||||
discretion to control the maximum amount of parallelism. For
|
||||
instance, in Nixpkgs, if the derivation attribute
|
||||
`enableParallelBuilding` is set to `true`, the builder passes the
|
||||
`-jN` flag to GNU Make. It can be overridden using the `--cores`
|
||||
command line switch and defaults to `1`. The value `0` means that
|
||||
the builder should use all available CPU cores in the system.
|
||||
Sets the value of the `NIX_BUILD_CORES` environment variable in the [invocation of the `builder` executable](@docroot@/language/derivations.md#builder-execution) of a derivation.
|
||||
The `builder` executable can use this variable to control its own maximum amount of parallelism.
|
||||
|
||||
<!--
|
||||
FIXME(@fricklerhandwerk): I don't think this should even be mentioned here.
|
||||
A very generic example using `derivation` and `xargs` may be more appropriate to explain the mechanism.
|
||||
Using `mkDerivation` as an example requires being aware of that there are multiple independent layers that are completely opaque here.
|
||||
-->
|
||||
For instance, in Nixpkgs, if the attribute `enableParallelBuilding` for the `mkDerivation` build helper is set to `true`, it will pass the `-j${NIX_BUILD_CORES}` flag to GNU Make.
|
||||
|
||||
The value `0` means that the `builder` should use all available CPU cores in the system.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> The number of parallel local Nix build jobs is independently controlled with the [`max-jobs`](#conf-max-jobs) setting.
|
||||
)",
|
||||
{"build-cores"},
|
||||
// Don't document the machine-specific default value
|
||||
|
|
@ -270,9 +277,121 @@ public:
|
|||
Setting<std::string> builders{
|
||||
this, "@" + nixConfDir + "/machines", "builders",
|
||||
R"(
|
||||
A semicolon-separated list of build machines.
|
||||
For the exact format and examples, see [the manual chapter on remote builds](../advanced-topics/distributed-builds.md)
|
||||
)"};
|
||||
A semicolon- or newline-separated list of build machines.
|
||||
|
||||
In addition to the [usual ways of setting configuration options](@docroot@/command-ref/conf-file.md), the value can be read from a file by prefixing its absolute path with `@`.
|
||||
|
||||
> **Example**
|
||||
>
|
||||
> This is the default setting:
|
||||
>
|
||||
> ```
|
||||
> builders = @/etc/nix/machines
|
||||
> ```
|
||||
|
||||
Each machine specification consists of the following elements, separated by spaces.
|
||||
Only the first element is required.
|
||||
To leave a field at its default, set it to `-`.
|
||||
|
||||
1. The URI of the remote store in the format `ssh://[username@]hostname`.
|
||||
|
||||
> **Example**
|
||||
>
|
||||
> `ssh://nix@mac`
|
||||
|
||||
For backward compatibility, `ssh://` may be omitted.
|
||||
The hostname may be an alias defined in `~/.ssh/config`.
|
||||
|
||||
2. A comma-separated list of [Nix system types](@docroot@/contributing/hacking.md#system-type).
|
||||
If omitted, this defaults to the local platform type.
|
||||
|
||||
> **Example**
|
||||
>
|
||||
> `aarch64-darwin`
|
||||
|
||||
It is possible for a machine to support multiple platform types.
|
||||
|
||||
> **Example**
|
||||
>
|
||||
> `i686-linux,x86_64-linux`
|
||||
|
||||
3. The SSH identity file to be used to log in to the remote machine.
|
||||
If omitted, SSH will use its regular identities.
|
||||
|
||||
> **Example**
|
||||
>
|
||||
> `/home/user/.ssh/id_mac`
|
||||
|
||||
4. The maximum number of builds that Nix will execute in parallel on the machine.
|
||||
Typically this should be equal to the number of CPU cores.
|
||||
|
||||
5. The “speed factor”, indicating the relative speed of the machine as a positive integer.
|
||||
If there are multiple machines of the right type, Nix will prefer the fastest, taking load into account.
|
||||
|
||||
6. A comma-separated list of supported [system features](#conf-system-features).
|
||||
|
||||
A machine will only be used to build a derivation if all the features in the derivation's [`requiredSystemFeatures`](@docroot@/language/advanced-attributes.html#adv-attr-requiredSystemFeatures) attribute are supported by that machine.
|
||||
|
||||
7. A comma-separated list of required [system features](#conf-system-features).
|
||||
|
||||
A machine will only be used to build a derivation if all of the machine’s required features appear in the derivation’s [`requiredSystemFeatures`](@docroot@/language/advanced-attributes.html#adv-attr-requiredSystemFeatures) attribute.
|
||||
|
||||
8. The (base64-encoded) public host key of the remote machine.
|
||||
If omitted, SSH will use its regular `known_hosts` file.
|
||||
|
||||
The value for this field can be obtained via `base64 -w0`.
|
||||
|
||||
> **Example**
|
||||
>
|
||||
> Multiple builders specified on the command line:
|
||||
>
|
||||
> ```console
|
||||
> --builders 'ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd'
|
||||
> ```
|
||||
|
||||
> **Example**
|
||||
>
|
||||
> This specifies several machines that can perform `i686-linux` builds:
|
||||
>
|
||||
> ```
|
||||
> nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy 8 1 kvm
|
||||
> nix@itchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy 8 2
|
||||
> nix@poochie.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy 1 2 kvm benchmark
|
||||
> ```
|
||||
>
|
||||
> However, `poochie` will only build derivations that have the attribute
|
||||
>
|
||||
> ```nix
|
||||
> requiredSystemFeatures = [ "benchmark" ];
|
||||
> ```
|
||||
>
|
||||
> or
|
||||
>
|
||||
> ```nix
|
||||
> requiredSystemFeatures = [ "benchmark" "kvm" ];
|
||||
> ```
|
||||
>
|
||||
> `itchy` cannot do builds that require `kvm`, but `scratchy` does support such builds.
|
||||
> For regular builds, `itchy` will be preferred over `scratchy` because it has a higher speed factor.
|
||||
|
||||
For Nix to use substituters, the calling user must be in the [`trusted-users`](#conf-trusted-users) list.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> A build machine must be accessible via SSH and have Nix installed.
|
||||
> `nix` must be available in `$PATH` for the user connecting over SSH.
|
||||
|
||||
> **Warning**
|
||||
>
|
||||
> If you are building via the Nix daemon (default), the Nix daemon user account on the local machine (that is, `root`) requires access to a user account on the remote machine (not necessarily `root`).
|
||||
>
|
||||
> If you can’t or don’t want to configure `root` to be able to access the remote machine, set [`store`](#conf-store) to any [local store](@docroot@/store/types/local-store.html), e.g. by passing `--store /tmp` to the command on the local machine.
|
||||
|
||||
To build only on remote machines and disable local builds, set [`max-jobs`](#conf-max-jobs) to 0.
|
||||
|
||||
If you want the remote machines to use substituters, set [`builders-use-substitutes`](#conf-builders-use-substituters) to `true`.
|
||||
)",
|
||||
{}, false};
|
||||
|
||||
Setting<bool> alwaysAllowSubstitutes{
|
||||
this, false, "always-allow-substitutes",
|
||||
|
|
@ -793,10 +912,17 @@ public:
|
|||
Setting<unsigned int> ttlNegativeNarInfoCache{
|
||||
this, 3600, "narinfo-cache-negative-ttl",
|
||||
R"(
|
||||
The TTL in seconds for negative lookups. If a store path is queried
|
||||
from a substituter but was not found, there will be a negative
|
||||
lookup cached in the local disk cache database for the specified
|
||||
duration.
|
||||
The TTL in seconds for negative lookups.
|
||||
If a store path is queried from a [substituter](#conf-substituters) but was not found, there will be a negative lookup cached in the local disk cache database for the specified duration.
|
||||
|
||||
Set to `0` to force updating the lookup cache.
|
||||
|
||||
To wipe the lookup cache completely:
|
||||
|
||||
```shell-session
|
||||
$ rm $HOME/.cache/nix/binary-cache-v*.sqlite*
|
||||
# rm /root/.cache/nix/binary-cache-v*.sqlite*
|
||||
```
|
||||
)"};
|
||||
|
||||
Setting<unsigned int> ttlPositiveNarInfoCache{
|
||||
|
|
@ -968,8 +1094,8 @@ public:
|
|||
this, {}, "hashed-mirrors",
|
||||
R"(
|
||||
A list of web servers used by `builtins.fetchurl` to obtain files by
|
||||
hash. Given a hash type *ht* and a base-16 hash *h*, Nix will try to
|
||||
download the file from *hashed-mirror*/*ht*/*h*. This allows files to
|
||||
hash. Given a hash algorithm *ha* and a base-16 hash *h*, Nix will try to
|
||||
download the file from *hashed-mirror*/*ha*/*h*. This allows files to
|
||||
be downloaded even if they have disappeared from their original URI.
|
||||
For example, given an example mirror `http://tarballs.nixos.org/`,
|
||||
when building the derivation
|
||||
|
|
|
|||
|
|
@ -72,7 +72,8 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
|
|||
virtual StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
ContentAddressMethod method = FileIngestionMethod::Recursive,
|
||||
FileSerialisationMethod dumpMethod = FileSerialisationMethod::Recursive,
|
||||
ContentAddressMethod hashMethod = FileIngestionMethod::Recursive,
|
||||
HashAlgorithm hashAlgo = HashAlgorithm::SHA256,
|
||||
const StorePathSet & references = StorePathSet(),
|
||||
RepairFlag repair = NoRepair) override
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ struct LocalStoreAccessor : PosixSourceAccessor
|
|||
auto [storePath, rest] = store->toStorePath(path.abs());
|
||||
if (requireValidPath && !store->isValidPath(storePath))
|
||||
throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath));
|
||||
return CanonPath(store->getRealStoreDir()) + storePath.to_string() + CanonPath(rest);
|
||||
return CanonPath(store->getRealStoreDir()) / storePath.to_string() / CanonPath(rest);
|
||||
}
|
||||
|
||||
std::optional<Stat> maybeLstat(const CanonPath & path) override
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ public:
|
|||
LocalFSStore(const Params & params);
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override;
|
||||
ref<SourceAccessor> getFSAccessor(bool requireValidPath) override;
|
||||
ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override;
|
||||
|
||||
/**
|
||||
* Creates symlink from the `gcRoot` to the `storePath` and
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
#include "local-store.hh"
|
||||
#include "globals.hh"
|
||||
#include "git.hh"
|
||||
#include "archive.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "worker-protocol.hh"
|
||||
|
|
@ -1103,19 +1104,29 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
if (info.ca) {
|
||||
auto & specified = *info.ca;
|
||||
auto actualHash = ({
|
||||
HashModuloSink caSink {
|
||||
specified.hash.algo,
|
||||
std::string { info.path.hashPart() },
|
||||
};
|
||||
PosixSourceAccessor accessor;
|
||||
dumpPath(
|
||||
*getFSAccessor(false),
|
||||
CanonPath { printStorePath(info.path) },
|
||||
caSink,
|
||||
specified.method.getFileIngestionMethod());
|
||||
auto accessor = getFSAccessor(false);
|
||||
CanonPath path { printStorePath(info.path) };
|
||||
Hash h { HashAlgorithm::SHA256 }; // throwaway def to appease C++
|
||||
auto fim = specified.method.getFileIngestionMethod();
|
||||
switch (fim) {
|
||||
case FileIngestionMethod::Flat:
|
||||
case FileIngestionMethod::Recursive:
|
||||
{
|
||||
HashModuloSink caSink {
|
||||
specified.hash.algo,
|
||||
std::string { info.path.hashPart() },
|
||||
};
|
||||
dumpPath(*accessor, path, caSink, (FileSerialisationMethod) fim);
|
||||
h = caSink.finish().first;
|
||||
break;
|
||||
}
|
||||
case FileIngestionMethod::Git:
|
||||
h = git::dumpHash(specified.hash.algo, *accessor, path).hash;
|
||||
break;
|
||||
}
|
||||
ContentAddress {
|
||||
.method = specified.method,
|
||||
.hash = caSink.finish().first,
|
||||
.hash = std::move(h),
|
||||
};
|
||||
});
|
||||
if (specified.hash != actualHash.hash) {
|
||||
|
|
@ -1143,7 +1154,8 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
StorePath LocalStore::addToStoreFromDump(
|
||||
Source & source0,
|
||||
std::string_view name,
|
||||
ContentAddressMethod method,
|
||||
FileSerialisationMethod dumpMethod,
|
||||
ContentAddressMethod hashMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair)
|
||||
|
|
@ -1196,7 +1208,13 @@ StorePath LocalStore::addToStoreFromDump(
|
|||
Path tempDir;
|
||||
AutoCloseFD tempDirFd;
|
||||
|
||||
if (!inMemory) {
|
||||
bool methodsMatch = ContentAddressMethod(FileIngestionMethod(dumpMethod)) == hashMethod;
|
||||
|
||||
/* If the methods don't match, our streaming hash of the dump is the
|
||||
wrong sort, and we need to rehash. */
|
||||
bool inMemoryAndDontNeedRestore = inMemory && methodsMatch;
|
||||
|
||||
if (!inMemoryAndDontNeedRestore) {
|
||||
/* Drain what we pulled so far, and then keep on pulling */
|
||||
StringSource dumpSource { dump };
|
||||
ChainSource bothSource { dumpSource, source };
|
||||
|
|
@ -1205,17 +1223,23 @@ StorePath LocalStore::addToStoreFromDump(
|
|||
delTempDir = std::make_unique<AutoDelete>(tempDir);
|
||||
tempPath = tempDir + "/x";
|
||||
|
||||
restorePath(tempPath, bothSource, method.getFileIngestionMethod());
|
||||
restorePath(tempPath, bothSource, dumpMethod);
|
||||
|
||||
dumpBuffer.reset();
|
||||
dump = {};
|
||||
}
|
||||
|
||||
auto [hash, size] = hashSink->finish();
|
||||
auto [dumpHash, size] = hashSink->finish();
|
||||
|
||||
PosixSourceAccessor accessor;
|
||||
|
||||
auto desc = ContentAddressWithReferences::fromParts(
|
||||
method,
|
||||
hash,
|
||||
hashMethod,
|
||||
methodsMatch
|
||||
? dumpHash
|
||||
: hashPath(
|
||||
accessor, CanonPath { tempPath },
|
||||
hashMethod.getFileIngestionMethod(), hashAlgo),
|
||||
{
|
||||
.others = references,
|
||||
// caller is not capable of creating a self-reference, because this is content-addressed without modulus
|
||||
|
|
@ -1241,10 +1265,20 @@ StorePath LocalStore::addToStoreFromDump(
|
|||
|
||||
autoGC();
|
||||
|
||||
if (inMemory) {
|
||||
if (inMemoryAndDontNeedRestore) {
|
||||
StringSource dumpSource { dump };
|
||||
/* Restore from the buffer in memory. */
|
||||
restorePath(realPath, dumpSource, method.getFileIngestionMethod());
|
||||
auto fim = hashMethod.getFileIngestionMethod();
|
||||
switch (fim) {
|
||||
case FileIngestionMethod::Flat:
|
||||
case FileIngestionMethod::Recursive:
|
||||
restorePath(realPath, dumpSource, (FileSerialisationMethod) fim);
|
||||
break;
|
||||
case FileIngestionMethod::Git:
|
||||
// doesn't correspond to serialization method, so
|
||||
// this should be unreachable
|
||||
assert(false);
|
||||
}
|
||||
} else {
|
||||
/* Move the temporary path we restored above. */
|
||||
moveFile(tempPath, realPath);
|
||||
|
|
@ -1252,8 +1286,8 @@ StorePath LocalStore::addToStoreFromDump(
|
|||
|
||||
/* For computing the nar hash. In recursive SHA-256 mode, this
|
||||
is the same as the store hash, so no need to do it again. */
|
||||
auto narHash = std::pair { hash, size };
|
||||
if (method != FileIngestionMethod::Recursive || hashAlgo != HashAlgorithm::SHA256) {
|
||||
auto narHash = std::pair { dumpHash, size };
|
||||
if (dumpMethod != FileSerialisationMethod::Recursive || hashAlgo != HashAlgorithm::SHA256) {
|
||||
HashSink narSink { HashAlgorithm::SHA256 };
|
||||
dumpPath(realPath, narSink);
|
||||
narHash = narSink.finish();
|
||||
|
|
@ -1345,7 +1379,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
|
|||
PosixSourceAccessor accessor;
|
||||
std::string hash = hashPath(
|
||||
accessor, CanonPath { linkPath },
|
||||
FileIngestionMethod::Recursive, HashAlgorithm::SHA256).first.to_string(HashFormat::Nix32, false);
|
||||
FileIngestionMethod::Recursive, HashAlgorithm::SHA256).to_string(HashFormat::Nix32, false);
|
||||
if (hash != link.name) {
|
||||
printError("link '%s' was modified! expected hash '%s', got '%s'",
|
||||
linkPath, link.name, hash);
|
||||
|
|
|
|||
|
|
@ -180,7 +180,8 @@ public:
|
|||
StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
ContentAddressMethod method,
|
||||
FileSerialisationMethod dumpMethod,
|
||||
ContentAddressMethod hashMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair) override;
|
||||
|
|
|
|||
|
|
@ -277,7 +277,7 @@ json listNar(ref<SourceAccessor> accessor, const CanonPath & path, bool recurse)
|
|||
json &res2 = obj["entries"];
|
||||
for (const auto & [name, type] : accessor->readDirectory(path)) {
|
||||
if (recurse) {
|
||||
res2[name] = listNar(accessor, path + name, true);
|
||||
res2[name] = listNar(accessor, path / name, true);
|
||||
} else
|
||||
res2[name] = json::object();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -209,7 +209,7 @@ public:
|
|||
|
||||
{
|
||||
auto r(state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority));
|
||||
assert(r.next());
|
||||
if (!r.next()) { abort(); }
|
||||
ret.id = (int) r.getInt(0);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -151,7 +151,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
|||
PosixSourceAccessor accessor;
|
||||
hashPath(
|
||||
accessor, CanonPath { path },
|
||||
FileIngestionMethod::Recursive, HashAlgorithm::SHA256).first;
|
||||
FileSerialisationMethod::Recursive, HashAlgorithm::SHA256).first;
|
||||
});
|
||||
debug("'%1%' has hash '%2%'", path, hash.to_string(HashFormat::Nix32, true));
|
||||
|
||||
|
|
@ -166,7 +166,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
|||
PosixSourceAccessor accessor;
|
||||
hashPath(
|
||||
accessor, CanonPath { linkPath },
|
||||
FileIngestionMethod::Recursive, HashAlgorithm::SHA256).first;
|
||||
FileSerialisationMethod::Recursive, HashAlgorithm::SHA256).first;
|
||||
})))
|
||||
{
|
||||
// XXX: Consider overwriting linkPath with our valid version.
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#include "derivations.hh"
|
||||
#include "pool.hh"
|
||||
#include "finally.hh"
|
||||
#include "git.hh"
|
||||
#include "logging.hh"
|
||||
#include "callback.hh"
|
||||
#include "filetransfer.hh"
|
||||
|
|
@ -435,7 +436,7 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
|
|||
conn->to
|
||||
<< WorkerProto::Op::AddToStore
|
||||
<< name
|
||||
<< caMethod.render(hashAlgo);
|
||||
<< caMethod.renderWithAlgo(hashAlgo);
|
||||
WorkerProto::write(*this, *conn, references);
|
||||
conn->to << repair;
|
||||
|
||||
|
|
@ -508,12 +509,28 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
|
|||
StorePath RemoteStore::addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
ContentAddressMethod method,
|
||||
FileSerialisationMethod dumpMethod,
|
||||
ContentAddressMethod hashMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair)
|
||||
{
|
||||
return addCAToStore(dump, name, method, hashAlgo, references, repair)->path;
|
||||
FileSerialisationMethod fsm;
|
||||
switch (hashMethod.getFileIngestionMethod()) {
|
||||
case FileIngestionMethod::Flat:
|
||||
fsm = FileSerialisationMethod::Flat;
|
||||
break;
|
||||
case FileIngestionMethod::Recursive:
|
||||
fsm = FileSerialisationMethod::Recursive;
|
||||
break;
|
||||
case FileIngestionMethod::Git:
|
||||
// Use NAR; Git is not a serialization method
|
||||
fsm = FileSerialisationMethod::Recursive;
|
||||
break;
|
||||
}
|
||||
if (fsm != dumpMethod)
|
||||
unsupported("RemoteStore::addToStoreFromDump doesn't support this `dumpMethod` `hashMethod` combination");
|
||||
return addCAToStore(dump, name, hashMethod, hashAlgo, references, repair)->path;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -87,7 +87,8 @@ public:
|
|||
StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
ContentAddressMethod method = FileIngestionMethod::Recursive,
|
||||
FileSerialisationMethod dumpMethod = FileSerialisationMethod::Recursive,
|
||||
ContentAddressMethod hashMethod = FileIngestionMethod::Recursive,
|
||||
HashAlgorithm hashAlgo = HashAlgorithm::SHA256,
|
||||
const StorePathSet & references = StorePathSet(),
|
||||
RepairFlag repair = NoRepair) override;
|
||||
|
|
@ -184,7 +185,7 @@ protected:
|
|||
|
||||
friend struct ConnectionHandle;
|
||||
|
||||
virtual ref<SourceAccessor> getFSAccessor(bool requireValidPath) override;
|
||||
virtual ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override;
|
||||
|
||||
virtual void narFromPath(const StorePath & path, Sink & sink) override;
|
||||
|
||||
|
|
|
|||
|
|
@ -10,19 +10,19 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
SQLiteError::SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, hintformat && hf)
|
||||
SQLiteError::SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, HintFmt && hf)
|
||||
: Error(""), path(path), errMsg(errMsg), errNo(errNo), extendedErrNo(extendedErrNo), offset(offset)
|
||||
{
|
||||
auto offsetStr = (offset == -1) ? "" : "at offset " + std::to_string(offset) + ": ";
|
||||
err.msg = hintfmt("%s: %s%s, %s (in '%s')",
|
||||
normaltxt(hf.str()),
|
||||
err.msg = HintFmt("%s: %s%s, %s (in '%s')",
|
||||
Uncolored(hf.str()),
|
||||
offsetStr,
|
||||
sqlite3_errstr(extendedErrNo),
|
||||
errMsg,
|
||||
path ? path : "(in-memory)");
|
||||
}
|
||||
|
||||
[[noreturn]] void SQLiteError::throw_(sqlite3 * db, hintformat && hf)
|
||||
[[noreturn]] void SQLiteError::throw_(sqlite3 * db, HintFmt && hf)
|
||||
{
|
||||
int err = sqlite3_errcode(db);
|
||||
int exterr = sqlite3_extended_errcode(db);
|
||||
|
|
@ -33,7 +33,7 @@ SQLiteError::SQLiteError(const char *path, const char *errMsg, int errNo, int ex
|
|||
|
||||
if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
|
||||
auto exp = SQLiteBusy(path, errMsg, err, exterr, offset, std::move(hf));
|
||||
exp.err.msg = hintfmt(
|
||||
exp.err.msg = HintFmt(
|
||||
err == SQLITE_PROTOCOL
|
||||
? "SQLite database '%s' is busy (SQLITE_PROTOCOL)"
|
||||
: "SQLite database '%s' is busy",
|
||||
|
|
@ -249,7 +249,7 @@ void handleSQLiteBusy(const SQLiteBusy & e, time_t & nextWarning)
|
|||
if (now > nextWarning) {
|
||||
nextWarning = now + 10;
|
||||
logWarning({
|
||||
.msg = hintfmt(e.what())
|
||||
.msg = HintFmt(e.what())
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -142,19 +142,19 @@ struct SQLiteError : Error
|
|||
|
||||
template<typename... Args>
|
||||
[[noreturn]] static void throw_(sqlite3 * db, const std::string & fs, const Args & ... args) {
|
||||
throw_(db, hintfmt(fs, args...));
|
||||
throw_(db, HintFmt(fs, args...));
|
||||
}
|
||||
|
||||
SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, hintformat && hf);
|
||||
SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, HintFmt && hf);
|
||||
|
||||
protected:
|
||||
|
||||
template<typename... Args>
|
||||
SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, const std::string & fs, const Args & ... args)
|
||||
: SQLiteError(path, errNo, extendedErrNo, offset, hintfmt(fs, args...))
|
||||
: SQLiteError(path, errMsg, errNo, extendedErrNo, offset, HintFmt(fs, args...))
|
||||
{ }
|
||||
|
||||
[[noreturn]] static void throw_(sqlite3 * db, hintformat && hf);
|
||||
[[noreturn]] static void throw_(sqlite3 * db, HintFmt && hf);
|
||||
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,9 @@
|
|||
#include "references.hh"
|
||||
#include "archive.hh"
|
||||
#include "callback.hh"
|
||||
#include "git.hh"
|
||||
#include "remote-store.hh"
|
||||
#include "posix-source-accessor.hh"
|
||||
// FIXME this should not be here, see TODO below on
|
||||
// `addMultipleToStore`.
|
||||
#include "worker-protocol.hh"
|
||||
|
|
@ -65,85 +67,13 @@ StorePath Store::followLinksToStorePath(std::string_view path) const
|
|||
}
|
||||
|
||||
|
||||
/* Store paths have the following form:
|
||||
/*
|
||||
The exact specification of store paths is in `protocols/store-path.md`
|
||||
in the Nix manual. These few functions implement that specification.
|
||||
|
||||
<realized-path> = <store>/<h>-<name>
|
||||
|
||||
where
|
||||
|
||||
<store> = the location of the Nix store, usually /nix/store
|
||||
|
||||
<name> = a human readable name for the path, typically obtained
|
||||
from the name attribute of the derivation, or the name of the
|
||||
source file from which the store path is created. For derivation
|
||||
outputs other than the default "out" output, the string "-<id>"
|
||||
is suffixed to <name>.
|
||||
|
||||
<h> = base-32 representation of the first 160 bits of a SHA-256
|
||||
hash of <s>; the hash part of the store name
|
||||
|
||||
<s> = the string "<type>:sha256:<h2>:<store>:<name>";
|
||||
note that it includes the location of the store as well as the
|
||||
name to make sure that changes to either of those are reflected
|
||||
in the hash (e.g. you won't get /nix/store/<h>-name1 and
|
||||
/nix/store/<h>-name2 with equal hash parts).
|
||||
|
||||
<type> = one of:
|
||||
"text:<r1>:<r2>:...<rN>"
|
||||
for plain text files written to the store using
|
||||
addTextToStore(); <r1> ... <rN> are the store paths referenced
|
||||
by this path, in the form described by <realized-path>
|
||||
"source:<r1>:<r2>:...:<rN>:self"
|
||||
for paths copied to the store using addToStore() when recursive
|
||||
= true and hashAlgo = "sha256". Just like in the text case, we
|
||||
can have the store paths referenced by the path.
|
||||
Additionally, we can have an optional :self label to denote self
|
||||
reference.
|
||||
"output:<id>"
|
||||
for either the outputs created by derivations, OR paths copied
|
||||
to the store using addToStore() with recursive != true or
|
||||
hashAlgo != "sha256" (in that case "source" is used; it's
|
||||
silly, but it's done that way for compatibility). <id> is the
|
||||
name of the output (usually, "out").
|
||||
|
||||
<h2> = base-16 representation of a SHA-256 hash of <s2>
|
||||
|
||||
<s2> =
|
||||
if <type> = "text:...":
|
||||
the string written to the resulting store path
|
||||
if <type> = "source:...":
|
||||
the serialisation of the path from which this store path is
|
||||
copied, as returned by hashPath()
|
||||
if <type> = "output:<id>":
|
||||
for non-fixed derivation outputs:
|
||||
the derivation (see hashDerivationModulo() in
|
||||
primops.cc)
|
||||
for paths copied by addToStore() or produced by fixed-output
|
||||
derivations:
|
||||
the string "fixed:out:<rec><algo>:<hash>:", where
|
||||
<rec> = "r:" for recursive (path) hashes, or "" for flat
|
||||
(file) hashes
|
||||
<algo> = "md5", "sha1" or "sha256"
|
||||
<hash> = base-16 representation of the path or flat hash of
|
||||
the contents of the path (or expected contents of the
|
||||
path for fixed-output derivations)
|
||||
|
||||
Note that since an output derivation has always type output, while
|
||||
something added by addToStore can have type output or source depending
|
||||
on the hash, this means that the same input can be hashed differently
|
||||
if added to the store via addToStore or via a derivation, in the sha256
|
||||
recursive case.
|
||||
|
||||
It would have been nicer to handle fixed-output derivations under
|
||||
"source", e.g. have something like "source:<rec><algo>", but we're
|
||||
stuck with this for now...
|
||||
|
||||
The main reason for this way of computing names is to prevent name
|
||||
collisions (for security). For instance, it shouldn't be feasible
|
||||
to come up with a derivation whose output path collides with the
|
||||
path for a copied source. The former would have a <s> starting with
|
||||
"output:out:", while the latter would have a <s> starting with
|
||||
"source:".
|
||||
If changes to these functions go beyond mere implementation changes i.e.
|
||||
also update the user-visible behavior, please update the specification
|
||||
to match.
|
||||
*/
|
||||
|
||||
|
||||
|
|
@ -191,6 +121,9 @@ static std::string makeType(
|
|||
|
||||
StorePath StoreDirConfig::makeFixedOutputPath(std::string_view name, const FixedOutputInfo & info) const
|
||||
{
|
||||
if (info.method == FileIngestionMethod::Git && info.hash.algo != HashAlgorithm::SHA1)
|
||||
throw Error("Git file ingestion must use SHA-1 hash");
|
||||
|
||||
if (info.hash.algo == HashAlgorithm::SHA256 && info.method == FileIngestionMethod::Recursive) {
|
||||
return makeStorePath(makeType(*this, "source", info.references), info.hash, name);
|
||||
} else {
|
||||
|
|
@ -238,7 +171,7 @@ std::pair<StorePath, Hash> StoreDirConfig::computeStorePath(
|
|||
const StorePathSet & references,
|
||||
PathFilter & filter) const
|
||||
{
|
||||
auto h = hashPath(accessor, path, method.getFileIngestionMethod(), hashAlgo, filter).first;
|
||||
auto h = hashPath(accessor, path, method.getFileIngestionMethod(), hashAlgo, filter);
|
||||
return {
|
||||
makeFixedOutputPathFromCA(
|
||||
name,
|
||||
|
|
@ -264,10 +197,23 @@ StorePath Store::addToStore(
|
|||
PathFilter & filter,
|
||||
RepairFlag repair)
|
||||
{
|
||||
FileSerialisationMethod fsm;
|
||||
switch (method.getFileIngestionMethod()) {
|
||||
case FileIngestionMethod::Flat:
|
||||
fsm = FileSerialisationMethod::Flat;
|
||||
break;
|
||||
case FileIngestionMethod::Recursive:
|
||||
fsm = FileSerialisationMethod::Recursive;
|
||||
break;
|
||||
case FileIngestionMethod::Git:
|
||||
// Use NAR; Git is not a serialization method
|
||||
fsm = FileSerialisationMethod::Recursive;
|
||||
break;
|
||||
}
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
dumpPath(accessor, path, sink, method.getFileIngestionMethod(), filter);
|
||||
dumpPath(accessor, path, sink, fsm, filter);
|
||||
});
|
||||
return addToStoreFromDump(*source, name, method, hashAlgo, references, repair);
|
||||
return addToStoreFromDump(*source, name, fsm, method, hashAlgo, references, repair);
|
||||
}
|
||||
|
||||
void Store::addMultipleToStore(
|
||||
|
|
@ -427,9 +373,7 @@ ValidPathInfo Store::addToStoreSlow(
|
|||
NullFileSystemObjectSink blank;
|
||||
auto & parseSink = method.getFileIngestionMethod() == FileIngestionMethod::Flat
|
||||
? (FileSystemObjectSink &) fileSink
|
||||
: method.getFileIngestionMethod() == FileIngestionMethod::Recursive
|
||||
? (FileSystemObjectSink &) blank
|
||||
: (abort(), (FileSystemObjectSink &)*(FileSystemObjectSink *)nullptr); // handled both cases
|
||||
: (FileSystemObjectSink &) blank; // for recursive or git we do recursive
|
||||
|
||||
/* The information that flows from tapped (besides being replicated in
|
||||
narSink), is now put in parseSink. */
|
||||
|
|
@ -441,6 +385,8 @@ ValidPathInfo Store::addToStoreSlow(
|
|||
|
||||
auto hash = method == FileIngestionMethod::Recursive && hashAlgo == HashAlgorithm::SHA256
|
||||
? narHash
|
||||
: method == FileIngestionMethod::Git
|
||||
? git::dumpHash(hashAlgo, accessor, srcPath).hash
|
||||
: caHashSink.finish().first;
|
||||
|
||||
if (expectedCAHash && expectedCAHash != hash)
|
||||
|
|
@ -847,7 +793,7 @@ void Store::substitutePaths(const StorePathSet & paths)
|
|||
if (!willSubstitute.empty())
|
||||
try {
|
||||
std::vector<DerivedPath> subs;
|
||||
for (auto & p : willSubstitute) subs.push_back(DerivedPath::Opaque{p});
|
||||
for (auto & p : willSubstitute) subs.emplace_back(DerivedPath::Opaque{p});
|
||||
buildPaths(subs);
|
||||
} catch (Error & e) {
|
||||
logWarning(e.info());
|
||||
|
|
|
|||
|
|
@ -466,14 +466,23 @@ public:
|
|||
* in `dump`, which is either a NAR serialisation (if recursive ==
|
||||
* true) or simply the contents of a regular file (if recursive ==
|
||||
* false).
|
||||
* `dump` may be drained
|
||||
*
|
||||
* \todo remove?
|
||||
* `dump` may be drained.
|
||||
*
|
||||
* @param dumpMethod What serialisation format is `dump`, i.e. how
|
||||
* to deserialize it. Must either match hashMethod or be
|
||||
* `FileSerialisationMethod::Recursive`.
|
||||
*
|
||||
* @param hashMethod How content addressing? Need not match be the
|
||||
* same as `dumpMethod`.
|
||||
*
|
||||
* @todo remove?
|
||||
*/
|
||||
virtual StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
ContentAddressMethod method = FileIngestionMethod::Recursive,
|
||||
FileSerialisationMethod dumpMethod = FileSerialisationMethod::Recursive,
|
||||
ContentAddressMethod hashMethod = FileIngestionMethod::Recursive,
|
||||
HashAlgorithm hashAlgo = HashAlgorithm::SHA256,
|
||||
const StorePathSet & references = StorePathSet(),
|
||||
RepairFlag repair = NoRepair) = 0;
|
||||
|
|
@ -772,7 +781,7 @@ protected:
|
|||
* Helper for methods that are not unsupported: this is used for
|
||||
* default definitions for virtual methods that are meant to be overriden.
|
||||
*
|
||||
* \todo Using this should be a last resort. It is better to make
|
||||
* @todo Using this should be a last resort. It is better to make
|
||||
* the method "virtual pure" and/or move it to a subclass.
|
||||
*/
|
||||
[[noreturn]] void unsupported(const std::string & op)
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ public:
|
|||
static std::set<std::string> uriSchemes()
|
||||
{ return {"unix"}; }
|
||||
|
||||
ref<SourceAccessor> getFSAccessor(bool requireValidPath) override
|
||||
ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override
|
||||
{ return LocalFSStore::getFSAccessor(requireValidPath); }
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue