mirror of
https://github.com/NixOS/nix.git
synced 2025-11-14 14:32:42 +01:00
Apply clang-format universally.
* It is tough to contribute to a project that doesn't use a formatter, * It is extra hard to contribute to a project which has configured the formatter, but ignores it for some files * Code formatting makes it harder to hide obscure / weird bugs by accident or on purpose, Let's rip the bandaid off? Note that PRs currently in flight should be able to be merged relatively easily by applying `clang-format` to their tip prior to merge.
This commit is contained in:
parent
41bf87ec70
commit
e4f62e4608
587 changed files with 23258 additions and 23135 deletions
|
|
@ -28,15 +28,13 @@ BinaryCacheStore::BinaryCacheStore(Config & config)
|
|||
: config{config}
|
||||
{
|
||||
if (config.secretKeyFile != "")
|
||||
signers.push_back(std::make_unique<LocalSigner>(
|
||||
SecretKey { readFile(config.secretKeyFile) }));
|
||||
signers.push_back(std::make_unique<LocalSigner>(SecretKey{readFile(config.secretKeyFile)}));
|
||||
|
||||
if (config.secretKeyFiles != "") {
|
||||
std::stringstream ss(config.secretKeyFiles);
|
||||
Path keyPath;
|
||||
while (std::getline(ss, keyPath, ',')) {
|
||||
signers.push_back(std::make_unique<LocalSigner>(
|
||||
SecretKey { readFile(keyPath) }));
|
||||
signers.push_back(std::make_unique<LocalSigner>(SecretKey{readFile(keyPath)}));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -53,13 +51,14 @@ void BinaryCacheStore::init()
|
|||
} else {
|
||||
for (auto & line : tokenizeString<Strings>(*cacheInfo, "\n")) {
|
||||
size_t colon = line.find(':');
|
||||
if (colon == std::string::npos) continue;
|
||||
if (colon == std::string::npos)
|
||||
continue;
|
||||
auto name = line.substr(0, colon);
|
||||
auto value = trim(line.substr(colon + 1, std::string::npos));
|
||||
if (name == "StoreDir") {
|
||||
if (value != storeDir)
|
||||
throw Error("binary cache '%s' is for Nix stores with prefix '%s', not '%s'",
|
||||
getUri(), value, storeDir);
|
||||
throw Error(
|
||||
"binary cache '%s' is for Nix stores with prefix '%s', not '%s'", getUri(), value, storeDir);
|
||||
} else if (name == "WantMassQuery") {
|
||||
config.wantMassQuery.setDefault(value == "1");
|
||||
} else if (name == "Priority") {
|
||||
|
|
@ -74,32 +73,30 @@ std::optional<std::string> BinaryCacheStore::getNixCacheInfo()
|
|||
return getFile(cacheInfoFile);
|
||||
}
|
||||
|
||||
void BinaryCacheStore::upsertFile(const std::string & path,
|
||||
std::string && data,
|
||||
const std::string & mimeType)
|
||||
void BinaryCacheStore::upsertFile(const std::string & path, std::string && data, const std::string & mimeType)
|
||||
{
|
||||
upsertFile(path, std::make_shared<std::stringstream>(std::move(data)), mimeType);
|
||||
}
|
||||
|
||||
void BinaryCacheStore::getFile(const std::string & path,
|
||||
Callback<std::optional<std::string>> callback) noexcept
|
||||
void BinaryCacheStore::getFile(const std::string & path, Callback<std::optional<std::string>> callback) noexcept
|
||||
{
|
||||
try {
|
||||
callback(getFile(path));
|
||||
} catch (...) { callback.rethrow(); }
|
||||
} catch (...) {
|
||||
callback.rethrow();
|
||||
}
|
||||
}
|
||||
|
||||
void BinaryCacheStore::getFile(const std::string & path, Sink & sink)
|
||||
{
|
||||
std::promise<std::optional<std::string>> promise;
|
||||
getFile(path,
|
||||
{[&](std::future<std::optional<std::string>> result) {
|
||||
try {
|
||||
promise.set_value(result.get());
|
||||
} catch (...) {
|
||||
promise.set_exception(std::current_exception());
|
||||
}
|
||||
}});
|
||||
getFile(path, {[&](std::future<std::optional<std::string>> result) {
|
||||
try {
|
||||
promise.set_value(result.get());
|
||||
} catch (...) {
|
||||
promise.set_exception(std::current_exception());
|
||||
}
|
||||
}});
|
||||
sink(*promise.get_future().get());
|
||||
}
|
||||
|
||||
|
|
@ -128,8 +125,7 @@ void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo)
|
|||
{
|
||||
auto state_(state.lock());
|
||||
state_->pathInfoCache.upsert(
|
||||
std::string(narInfo->path.to_string()),
|
||||
PathInfoCacheValue { .value = std::shared_ptr<NarInfo>(narInfo) });
|
||||
std::string(narInfo->path.to_string()), PathInfoCacheValue{.value = std::shared_ptr<NarInfo>(narInfo)});
|
||||
}
|
||||
|
||||
if (diskCache)
|
||||
|
|
@ -137,8 +133,7 @@ void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo)
|
|||
}
|
||||
|
||||
ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
|
||||
Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::function<ValidPathInfo(HashResult)> mkInfo)
|
||||
Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs, std::function<ValidPathInfo(HashResult)> mkInfo)
|
||||
{
|
||||
auto [fdTemp, fnTemp] = createTempFile();
|
||||
|
||||
|
|
@ -149,22 +144,19 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
|
|||
/* Read the NAR simultaneously into a CompressionSink+FileSink (to
|
||||
write the compressed NAR to disk), into a HashSink (to get the
|
||||
NAR hash), and into a NarAccessor (to get the NAR listing). */
|
||||
HashSink fileHashSink { HashAlgorithm::SHA256 };
|
||||
HashSink fileHashSink{HashAlgorithm::SHA256};
|
||||
std::shared_ptr<SourceAccessor> narAccessor;
|
||||
HashSink narHashSink { HashAlgorithm::SHA256 };
|
||||
HashSink narHashSink{HashAlgorithm::SHA256};
|
||||
{
|
||||
FdSink fileSink(fdTemp.get());
|
||||
TeeSink teeSinkCompressed { fileSink, fileHashSink };
|
||||
auto compressionSink = makeCompressionSink(
|
||||
config.compression,
|
||||
teeSinkCompressed,
|
||||
config.parallelCompression,
|
||||
config.compressionLevel);
|
||||
TeeSink teeSinkUncompressed { *compressionSink, narHashSink };
|
||||
TeeSource teeSource { narSource, teeSinkUncompressed };
|
||||
narAccessor = makeNarAccessor(teeSource);
|
||||
compressionSink->finish();
|
||||
fileSink.flush();
|
||||
FdSink fileSink(fdTemp.get());
|
||||
TeeSink teeSinkCompressed{fileSink, fileHashSink};
|
||||
auto compressionSink = makeCompressionSink(
|
||||
config.compression, teeSinkCompressed, config.parallelCompression, config.compressionLevel);
|
||||
TeeSink teeSinkUncompressed{*compressionSink, narHashSink};
|
||||
TeeSource teeSource{narSource, teeSinkUncompressed};
|
||||
narAccessor = makeNarAccessor(teeSource);
|
||||
compressionSink->finish();
|
||||
fileSink.flush();
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
|
@ -176,17 +168,20 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
|
|||
narInfo->fileHash = fileHash;
|
||||
narInfo->fileSize = fileSize;
|
||||
narInfo->url = "nar/" + narInfo->fileHash->to_string(HashFormat::Nix32, false) + ".nar"
|
||||
+ (config.compression == "xz" ? ".xz" :
|
||||
config.compression == "bzip2" ? ".bz2" :
|
||||
config.compression == "zstd" ? ".zst" :
|
||||
config.compression == "lzip" ? ".lzip" :
|
||||
config.compression == "lz4" ? ".lz4" :
|
||||
config.compression == "br" ? ".br" :
|
||||
"");
|
||||
+ (config.compression == "xz" ? ".xz"
|
||||
: config.compression == "bzip2" ? ".bz2"
|
||||
: config.compression == "zstd" ? ".zst"
|
||||
: config.compression == "lzip" ? ".lzip"
|
||||
: config.compression == "lz4" ? ".lz4"
|
||||
: config.compression == "br" ? ".br"
|
||||
: "");
|
||||
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
printMsg(lvlTalkative, "copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache",
|
||||
printStorePath(narInfo->path), info.narSize,
|
||||
printMsg(
|
||||
lvlTalkative,
|
||||
"copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache",
|
||||
printStorePath(narInfo->path),
|
||||
info.narSize,
|
||||
((1.0 - (double) fileSize / info.narSize) * 100.0),
|
||||
duration);
|
||||
|
||||
|
|
@ -197,8 +192,10 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
|
|||
if (ref != info.path)
|
||||
queryPathInfo(ref);
|
||||
} catch (InvalidPath &) {
|
||||
throw Error("cannot add '%s' to the binary cache because the reference '%s' is not valid",
|
||||
printStorePath(info.path), printStorePath(ref));
|
||||
throw Error(
|
||||
"cannot add '%s' to the binary cache because the reference '%s' is not valid",
|
||||
printStorePath(info.path),
|
||||
printStorePath(ref));
|
||||
}
|
||||
|
||||
/* Optionally write a JSON file containing a listing of the
|
||||
|
|
@ -232,7 +229,8 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
|
|||
|
||||
// FIXME: or should we overwrite? The previous link may point
|
||||
// to a GC'ed file, so overwriting might be useful...
|
||||
if (fileExists(key)) return;
|
||||
if (fileExists(key))
|
||||
return;
|
||||
|
||||
printMsg(lvlTalkative, "creating debuginfo link from '%s' to '%s'", key, target);
|
||||
|
||||
|
|
@ -245,15 +243,13 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
|
|||
for (auto & [s1, _type] : narAccessor->readDirectory(buildIdDir)) {
|
||||
auto dir = buildIdDir / s1;
|
||||
|
||||
if (narAccessor->lstat(dir).type != SourceAccessor::tDirectory
|
||||
|| !std::regex_match(s1, regex1))
|
||||
if (narAccessor->lstat(dir).type != SourceAccessor::tDirectory || !std::regex_match(s1, regex1))
|
||||
continue;
|
||||
|
||||
for (auto & [s2, _type] : narAccessor->readDirectory(dir)) {
|
||||
auto debugPath = dir / s2;
|
||||
|
||||
if (narAccessor->lstat(debugPath).type != SourceAccessor::tRegular
|
||||
|| !std::regex_match(s2, regex2))
|
||||
if (narAccessor->lstat(debugPath).type != SourceAccessor::tRegular || !std::regex_match(s2, regex2))
|
||||
continue;
|
||||
|
||||
auto buildId = s1 + s2;
|
||||
|
|
@ -272,7 +268,8 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
|
|||
/* Atomically write the NAR file. */
|
||||
if (repair || !fileExists(narInfo->url)) {
|
||||
stats.narWrite++;
|
||||
upsertFile(narInfo->url,
|
||||
upsertFile(
|
||||
narInfo->url,
|
||||
std::make_shared<std::fstream>(fnTemp, std::ios_base::in | std::ios_base::binary),
|
||||
"application/x-nix-nar");
|
||||
} else
|
||||
|
|
@ -292,8 +289,8 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
|
|||
return narInfo;
|
||||
}
|
||||
|
||||
void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs)
|
||||
void BinaryCacheStore::addToStore(
|
||||
const ValidPathInfo & info, Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs)
|
||||
{
|
||||
if (!repair && isValidPath(info.path)) {
|
||||
// FIXME: copyNAR -> null sink
|
||||
|
|
@ -302,12 +299,12 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
|
|||
}
|
||||
|
||||
addToStoreCommon(narSource, repair, checkSigs, {[&](HashResult nar) {
|
||||
/* FIXME reinstate these, once we can correctly do hash modulo sink as
|
||||
needed. We need to throw here in case we uploaded a corrupted store path. */
|
||||
// assert(info.narHash == nar.first);
|
||||
// assert(info.narSize == nar.second);
|
||||
return info;
|
||||
}});
|
||||
/* FIXME reinstate these, once we can correctly do hash modulo sink as
|
||||
needed. We need to throw here in case we uploaded a corrupted store path. */
|
||||
// assert(info.narHash == nar.first);
|
||||
// assert(info.narSize == nar.second);
|
||||
return info;
|
||||
}});
|
||||
}
|
||||
|
||||
StorePath BinaryCacheStore::addToStoreFromDump(
|
||||
|
|
@ -341,8 +338,7 @@ StorePath BinaryCacheStore::addToStoreFromDump(
|
|||
// The dump is already NAR in this case, just use it.
|
||||
nar = dump2.s;
|
||||
break;
|
||||
case FileSerialisationMethod::Flat:
|
||||
{
|
||||
case FileSerialisationMethod::Flat: {
|
||||
// The dump is Flat, so we need to convert it to NAR with a
|
||||
// single file.
|
||||
StringSink s;
|
||||
|
|
@ -357,30 +353,34 @@ StorePath BinaryCacheStore::addToStoreFromDump(
|
|||
if (dumpMethod != FileSerialisationMethod::NixArchive || hashAlgo != HashAlgorithm::SHA256)
|
||||
unsupported("addToStoreFromDump");
|
||||
}
|
||||
StringSource narDump { nar };
|
||||
StringSource narDump{nar};
|
||||
|
||||
// Use `narDump` if we wrote to `nar`.
|
||||
Source & narDump2 = nar.size() > 0
|
||||
? static_cast<Source &>(narDump)
|
||||
: dump;
|
||||
Source & narDump2 = nar.size() > 0 ? static_cast<Source &>(narDump) : dump;
|
||||
|
||||
return addToStoreCommon(narDump2, repair, CheckSigs, [&](HashResult nar) {
|
||||
ValidPathInfo info {
|
||||
*this,
|
||||
name,
|
||||
ContentAddressWithReferences::fromParts(
|
||||
hashMethod,
|
||||
caHash ? *caHash : nar.first,
|
||||
{
|
||||
.others = references,
|
||||
// caller is not capable of creating a self-reference, because this is content-addressed without modulus
|
||||
.self = false,
|
||||
}),
|
||||
nar.first,
|
||||
};
|
||||
info.narSize = nar.second;
|
||||
return info;
|
||||
})->path;
|
||||
return addToStoreCommon(
|
||||
narDump2,
|
||||
repair,
|
||||
CheckSigs,
|
||||
[&](HashResult nar) {
|
||||
ValidPathInfo info{
|
||||
*this,
|
||||
name,
|
||||
ContentAddressWithReferences::fromParts(
|
||||
hashMethod,
|
||||
caHash ? *caHash : nar.first,
|
||||
{
|
||||
.others = references,
|
||||
// caller is not capable of creating a self-reference, because this is content-addressed
|
||||
// without modulus
|
||||
.self = false,
|
||||
}),
|
||||
nar.first,
|
||||
};
|
||||
info.narSize = nar.second;
|
||||
return info;
|
||||
})
|
||||
->path;
|
||||
}
|
||||
|
||||
bool BinaryCacheStore::isValidPathUncached(const StorePath & storePath)
|
||||
|
|
@ -407,7 +407,7 @@ void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
|
|||
auto info = queryPathInfo(storePath).cast<const NarInfo>();
|
||||
|
||||
LengthSink narSize;
|
||||
TeeSink tee { sink, narSize };
|
||||
TeeSink tee{sink, narSize};
|
||||
|
||||
auto decompressor = makeDecompressionSink(info->compression, tee);
|
||||
|
||||
|
|
@ -420,40 +420,44 @@ void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
|
|||
decompressor->finish();
|
||||
|
||||
stats.narRead++;
|
||||
//stats.narReadCompressedBytes += nar->size(); // FIXME
|
||||
// stats.narReadCompressedBytes += nar->size(); // FIXME
|
||||
stats.narReadBytes += narSize.length;
|
||||
}
|
||||
|
||||
void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
|
||||
void BinaryCacheStore::queryPathInfoUncached(
|
||||
const StorePath & storePath, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
|
||||
{
|
||||
auto uri = getUri();
|
||||
auto storePathS = printStorePath(storePath);
|
||||
auto act = std::make_shared<Activity>(*logger, lvlTalkative, actQueryPathInfo,
|
||||
fmt("querying info about '%s' on '%s'", storePathS, uri), Logger::Fields{storePathS, uri});
|
||||
auto act = std::make_shared<Activity>(
|
||||
*logger,
|
||||
lvlTalkative,
|
||||
actQueryPathInfo,
|
||||
fmt("querying info about '%s' on '%s'", storePathS, uri),
|
||||
Logger::Fields{storePathS, uri});
|
||||
PushActivity pact(act->id);
|
||||
|
||||
auto narInfoFile = narInfoFileFor(storePath);
|
||||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
getFile(narInfoFile,
|
||||
{[=,this](std::future<std::optional<std::string>> fut) {
|
||||
try {
|
||||
auto data = fut.get();
|
||||
getFile(narInfoFile, {[=, this](std::future<std::optional<std::string>> fut) {
|
||||
try {
|
||||
auto data = fut.get();
|
||||
|
||||
if (!data) return (*callbackPtr)({});
|
||||
if (!data)
|
||||
return (*callbackPtr)({});
|
||||
|
||||
stats.narInfoRead++;
|
||||
stats.narInfoRead++;
|
||||
|
||||
(*callbackPtr)((std::shared_ptr<ValidPathInfo>)
|
||||
std::make_shared<NarInfo>(*this, *data, narInfoFile));
|
||||
(*callbackPtr)(
|
||||
(std::shared_ptr<ValidPathInfo>) std::make_shared<NarInfo>(*this, *data, narInfoFile));
|
||||
|
||||
(void) act; // force Activity into this lambda to ensure it stays alive
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
(void) act; // force Activity into this lambda to ensure it stays alive
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
}
|
||||
|
||||
StorePath BinaryCacheStore::addToStore(
|
||||
|
|
@ -471,54 +475,57 @@ StorePath BinaryCacheStore::addToStore(
|
|||
|
||||
auto h = hashPath(path, method.getFileIngestionMethod(), hashAlgo, filter).first;
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
path.dumpPath(sink, filter);
|
||||
});
|
||||
return addToStoreCommon(*source, repair, CheckSigs, [&](HashResult nar) {
|
||||
ValidPathInfo info {
|
||||
*this,
|
||||
name,
|
||||
ContentAddressWithReferences::fromParts(
|
||||
method,
|
||||
h,
|
||||
{
|
||||
.others = references,
|
||||
// caller is not capable of creating a self-reference, because this is content-addressed without modulus
|
||||
.self = false,
|
||||
}),
|
||||
nar.first,
|
||||
};
|
||||
info.narSize = nar.second;
|
||||
return info;
|
||||
})->path;
|
||||
auto source = sinkToSource([&](Sink & sink) { path.dumpPath(sink, filter); });
|
||||
return addToStoreCommon(
|
||||
*source,
|
||||
repair,
|
||||
CheckSigs,
|
||||
[&](HashResult nar) {
|
||||
ValidPathInfo info{
|
||||
*this,
|
||||
name,
|
||||
ContentAddressWithReferences::fromParts(
|
||||
method,
|
||||
h,
|
||||
{
|
||||
.others = references,
|
||||
// caller is not capable of creating a self-reference, because this is content-addressed
|
||||
// without modulus
|
||||
.self = false,
|
||||
}),
|
||||
nar.first,
|
||||
};
|
||||
info.narSize = nar.second;
|
||||
return info;
|
||||
})
|
||||
->path;
|
||||
}
|
||||
|
||||
void BinaryCacheStore::queryRealisationUncached(const DrvOutput & id,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept
|
||||
void BinaryCacheStore::queryRealisationUncached(
|
||||
const DrvOutput & id, Callback<std::shared_ptr<const Realisation>> callback) noexcept
|
||||
{
|
||||
auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi";
|
||||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
Callback<std::optional<std::string>> newCallback = {
|
||||
[=](std::future<std::optional<std::string>> fut) {
|
||||
try {
|
||||
auto data = fut.get();
|
||||
if (!data) return (*callbackPtr)({});
|
||||
Callback<std::optional<std::string>> newCallback = {[=](std::future<std::optional<std::string>> fut) {
|
||||
try {
|
||||
auto data = fut.get();
|
||||
if (!data)
|
||||
return (*callbackPtr)({});
|
||||
|
||||
auto realisation = Realisation::fromJSON(
|
||||
nlohmann::json::parse(*data), outputInfoFilePath);
|
||||
return (*callbackPtr)(std::make_shared<const Realisation>(realisation));
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
auto realisation = Realisation::fromJSON(nlohmann::json::parse(*data), outputInfoFilePath);
|
||||
return (*callbackPtr)(std::make_shared<const Realisation>(realisation));
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
getFile(outputInfoFilePath, std::move(newCallback));
|
||||
}
|
||||
|
||||
void BinaryCacheStore::registerDrvOutput(const Realisation& info) {
|
||||
void BinaryCacheStore::registerDrvOutput(const Realisation & info)
|
||||
{
|
||||
if (diskCache)
|
||||
diskCache->upsertRealisation(getUri(), info);
|
||||
auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi";
|
||||
|
|
@ -563,4 +570,4 @@ void BinaryCacheStore::addBuildLog(const StorePath & drvPath, std::string_view l
|
|||
"text/plain; charset=utf-8");
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -5,4 +5,4 @@ namespace nix {
|
|||
bool BuildResult::operator==(const BuildResult &) const noexcept = default;
|
||||
std::strong_ordering BuildResult::operator<=>(const BuildResult &) const noexcept = default;
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -11,7 +11,7 @@
|
|||
#include "nix/util/compression.hh"
|
||||
#include "nix/store/common-protocol.hh"
|
||||
#include "nix/store/common-protocol-impl.hh" // Don't remove is actually needed
|
||||
#include "nix/store/local-store.hh" // TODO remove, along with remaining downcasts
|
||||
#include "nix/store/local-store.hh" // TODO remove, along with remaining downcasts
|
||||
|
||||
#include <fstream>
|
||||
#include <sys/types.h>
|
||||
|
|
@ -24,8 +24,12 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
DerivationGoal::DerivationGoal(const StorePath & drvPath, const Derivation & drv,
|
||||
const OutputName & wantedOutput, Worker & worker, BuildMode buildMode)
|
||||
DerivationGoal::DerivationGoal(
|
||||
const StorePath & drvPath,
|
||||
const Derivation & drv,
|
||||
const OutputName & wantedOutput,
|
||||
Worker & worker,
|
||||
BuildMode buildMode)
|
||||
: Goal(worker, haveDerivation())
|
||||
, drvPath(drvPath)
|
||||
, wantedOutput(wantedOutput)
|
||||
|
|
@ -33,17 +37,15 @@ DerivationGoal::DerivationGoal(const StorePath & drvPath, const Derivation & drv
|
|||
{
|
||||
this->drv = std::make_unique<Derivation>(drv);
|
||||
|
||||
name = fmt(
|
||||
"building of '%s' from in-memory derivation",
|
||||
DerivedPath::Built { makeConstantStorePathRef(drvPath), drv.outputNames() }.to_string(worker.store));
|
||||
name =
|
||||
fmt("building of '%s' from in-memory derivation",
|
||||
DerivedPath::Built{makeConstantStorePathRef(drvPath), drv.outputNames()}.to_string(worker.store));
|
||||
trace("created");
|
||||
|
||||
mcExpectedBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.expectedBuilds);
|
||||
worker.updateProgress();
|
||||
|
||||
}
|
||||
|
||||
|
||||
std::string DerivationGoal::key()
|
||||
{
|
||||
/* Ensure that derivations get built in order of their name,
|
||||
|
|
@ -56,7 +58,6 @@ std::string DerivationGoal::key()
|
|||
}.to_string(worker.store);
|
||||
}
|
||||
|
||||
|
||||
Goal::Co DerivationGoal::haveDerivation()
|
||||
{
|
||||
trace("have derivation");
|
||||
|
|
@ -76,8 +77,7 @@ Goal::Co DerivationGoal::haveDerivation()
|
|||
|
||||
/* At least one of the output paths could not be
|
||||
produced using a substitute. So we have to build instead. */
|
||||
auto gaveUpOnSubstitution = [&]() -> Goal::Co
|
||||
{
|
||||
auto gaveUpOnSubstitution = [&]() -> Goal::Co {
|
||||
auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode);
|
||||
|
||||
/* We will finish with it ourselves, as if we were the derivational goal. */
|
||||
|
|
@ -103,7 +103,7 @@ Goal::Co DerivationGoal::haveDerivation()
|
|||
buildResult.builtOutputs = assertPathValidity();
|
||||
}
|
||||
|
||||
for (auto it = buildResult.builtOutputs.begin(); it != buildResult.builtOutputs.end(); ) {
|
||||
for (auto it = buildResult.builtOutputs.begin(); it != buildResult.builtOutputs.end();) {
|
||||
if (it->first != wantedOutput) {
|
||||
it = buildResult.builtOutputs.erase(it);
|
||||
} else {
|
||||
|
|
@ -124,20 +124,20 @@ Goal::Co DerivationGoal::haveDerivation()
|
|||
{
|
||||
bool impure = drv->type().isImpure();
|
||||
|
||||
if (impure) experimentalFeatureSettings.require(Xp::ImpureDerivations);
|
||||
if (impure)
|
||||
experimentalFeatureSettings.require(Xp::ImpureDerivations);
|
||||
|
||||
auto outputHashes = staticOutputHashes(worker.evalStore, *drv);
|
||||
for (auto & [outputName, outputHash] : outputHashes) {
|
||||
InitialOutput v{
|
||||
.wanted = true, // Will be refined later
|
||||
.outputHash = outputHash
|
||||
};
|
||||
.outputHash = outputHash};
|
||||
|
||||
/* TODO we might want to also allow randomizing the paths
|
||||
for regular CA derivations, e.g. for sake of checking
|
||||
determinism. */
|
||||
if (impure) {
|
||||
v.known = InitialOutputStatus {
|
||||
v.known = InitialOutputStatus{
|
||||
.path = StorePath::random(outputPathName(drv->name, outputName)),
|
||||
.status = PathStatus::Absent,
|
||||
};
|
||||
|
|
@ -173,22 +173,17 @@ Goal::Co DerivationGoal::haveDerivation()
|
|||
them. */
|
||||
if (settings.useSubstitutes && drvOptions.substitutesAllowed())
|
||||
for (auto & [outputName, status] : initialOutputs) {
|
||||
if (!status.wanted) continue;
|
||||
if (!status.wanted)
|
||||
continue;
|
||||
if (!status.known)
|
||||
waitees.insert(
|
||||
upcast_goal(
|
||||
worker.makeDrvOutputSubstitutionGoal(
|
||||
DrvOutput{status.outputHash, outputName},
|
||||
buildMode == bmRepair ? Repair : NoRepair
|
||||
)
|
||||
)
|
||||
);
|
||||
waitees.insert(upcast_goal(worker.makeDrvOutputSubstitutionGoal(
|
||||
DrvOutput{status.outputHash, outputName}, buildMode == bmRepair ? Repair : NoRepair)));
|
||||
else {
|
||||
auto * cap = getDerivationCA(*drv);
|
||||
waitees.insert(upcast_goal(worker.makePathSubstitutionGoal(
|
||||
status.known->path,
|
||||
buildMode == bmRepair ? Repair : NoRepair,
|
||||
cap ? std::optional { *cap } : std::nullopt)));
|
||||
cap ? std::optional{*cap} : std::nullopt)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -199,8 +194,11 @@ Goal::Co DerivationGoal::haveDerivation()
|
|||
assert(!drv->type().isImpure());
|
||||
|
||||
if (nrFailed > 0 && nrFailed > nrNoSubstituters && !settings.tryFallback) {
|
||||
co_return done(BuildResult::TransientFailure, {},
|
||||
Error("some substitutes for the outputs of derivation '%s' failed (usually happens due to networking issues); try '--fallback' to build derivation from source ",
|
||||
co_return done(
|
||||
BuildResult::TransientFailure,
|
||||
{},
|
||||
Error(
|
||||
"some substitutes for the outputs of derivation '%s' failed (usually happens due to networking issues); try '--fallback' to build derivation from source ",
|
||||
worker.store.printStorePath(drvPath)));
|
||||
}
|
||||
|
||||
|
|
@ -215,26 +213,25 @@ Goal::Co DerivationGoal::haveDerivation()
|
|||
co_return repairClosure();
|
||||
}
|
||||
if (buildMode == bmCheck && !allValid)
|
||||
throw Error("some outputs of '%s' are not valid, so checking is not possible",
|
||||
worker.store.printStorePath(drvPath));
|
||||
throw Error(
|
||||
"some outputs of '%s' are not valid, so checking is not possible", worker.store.printStorePath(drvPath));
|
||||
|
||||
/* Nothing to wait for; tail call */
|
||||
co_return gaveUpOnSubstitution();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Used for `inputGoals` local variable below
|
||||
*/
|
||||
struct value_comparison
|
||||
{
|
||||
template <typename T>
|
||||
bool operator()(const ref<T> & lhs, const ref<T> & rhs) const {
|
||||
template<typename T>
|
||||
bool operator()(const ref<T> & lhs, const ref<T> & rhs) const
|
||||
{
|
||||
return *lhs < *rhs;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Goal::Co DerivationGoal::repairClosure()
|
||||
{
|
||||
assert(!drv->type().isImpure());
|
||||
|
|
@ -278,18 +275,20 @@ Goal::Co DerivationGoal::repairClosure()
|
|||
|
||||
/* Check each path (slow!). */
|
||||
for (auto & i : outputClosure) {
|
||||
if (worker.pathContentsGood(i)) continue;
|
||||
if (worker.pathContentsGood(i))
|
||||
continue;
|
||||
printError(
|
||||
"found corrupted or missing path '%s' in the output closure of '%s'",
|
||||
worker.store.printStorePath(i), worker.store.printStorePath(drvPath));
|
||||
worker.store.printStorePath(i),
|
||||
worker.store.printStorePath(drvPath));
|
||||
auto drvPath2 = outputsToDrv.find(i);
|
||||
if (drvPath2 == outputsToDrv.end())
|
||||
waitees.insert(upcast_goal(worker.makePathSubstitutionGoal(i, Repair)));
|
||||
else
|
||||
waitees.insert(worker.makeGoal(
|
||||
DerivedPath::Built {
|
||||
DerivedPath::Built{
|
||||
.drvPath = makeConstantStorePathRef(drvPath2->second),
|
||||
.outputs = OutputsSpec::All { },
|
||||
.outputs = OutputsSpec::All{},
|
||||
},
|
||||
bmRepair));
|
||||
}
|
||||
|
|
@ -299,18 +298,18 @@ Goal::Co DerivationGoal::repairClosure()
|
|||
if (!waitees.empty()) {
|
||||
trace("closure repaired");
|
||||
if (nrFailed > 0)
|
||||
throw Error("some paths in the output closure of derivation '%s' could not be repaired",
|
||||
throw Error(
|
||||
"some paths in the output closure of derivation '%s' could not be repaired",
|
||||
worker.store.printStorePath(drvPath));
|
||||
}
|
||||
co_return done(BuildResult::AlreadyValid, assertPathValidity());
|
||||
}
|
||||
|
||||
|
||||
std::map<std::string, std::optional<StorePath>> DerivationGoal::queryPartialDerivationOutputMap()
|
||||
{
|
||||
assert(!drv->type().isImpure());
|
||||
|
||||
for (auto * drvStore : { &worker.evalStore, &worker.store })
|
||||
for (auto * drvStore : {&worker.evalStore, &worker.store})
|
||||
if (drvStore->isValidPath(drvPath))
|
||||
return worker.store.queryPartialDerivationOutputMap(drvPath, drvStore);
|
||||
|
||||
|
|
@ -326,7 +325,7 @@ OutputPathMap DerivationGoal::queryDerivationOutputMap()
|
|||
{
|
||||
assert(!drv->type().isImpure());
|
||||
|
||||
for (auto * drvStore : { &worker.evalStore, &worker.store })
|
||||
for (auto * drvStore : {&worker.evalStore, &worker.store})
|
||||
if (drvStore->isValidPath(drvPath))
|
||||
return worker.store.queryDerivationOutputMap(drvPath, drvStore);
|
||||
|
||||
|
|
@ -337,10 +336,10 @@ OutputPathMap DerivationGoal::queryDerivationOutputMap()
|
|||
return res;
|
||||
}
|
||||
|
||||
|
||||
std::pair<bool, SingleDrvOutputs> DerivationGoal::checkPathValidity()
|
||||
{
|
||||
if (drv->type().isImpure()) return { false, {} };
|
||||
if (drv->type().isImpure())
|
||||
return {false, {}};
|
||||
|
||||
bool checkHash = buildMode == bmRepair;
|
||||
StringSet wantedOutputsLeft{wantedOutput};
|
||||
|
|
@ -359,11 +358,9 @@ std::pair<bool, SingleDrvOutputs> DerivationGoal::checkPathValidity()
|
|||
auto outputPath = *i.second;
|
||||
info.known = {
|
||||
.path = outputPath,
|
||||
.status = !worker.store.isValidPath(outputPath)
|
||||
? PathStatus::Absent
|
||||
: !checkHash || worker.pathContentsGood(outputPath)
|
||||
? PathStatus::Valid
|
||||
: PathStatus::Corrupt,
|
||||
.status = !worker.store.isValidPath(outputPath) ? PathStatus::Absent
|
||||
: !checkHash || worker.pathContentsGood(outputPath) ? PathStatus::Valid
|
||||
: PathStatus::Corrupt,
|
||||
};
|
||||
}
|
||||
auto drvOutput = DrvOutput{info.outputHash, i.first};
|
||||
|
|
@ -379,38 +376,38 @@ std::pair<bool, SingleDrvOutputs> DerivationGoal::checkPathValidity()
|
|||
// its realisation stored (probably because it has been built
|
||||
// without the `ca-derivations` experimental flag).
|
||||
worker.store.registerDrvOutput(
|
||||
Realisation {
|
||||
Realisation{
|
||||
drvOutput,
|
||||
info.known->path,
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
if (info.known && info.known->isValid())
|
||||
validOutputs.emplace(i.first, Realisation { drvOutput, info.known->path });
|
||||
validOutputs.emplace(i.first, Realisation{drvOutput, info.known->path});
|
||||
}
|
||||
|
||||
// If we requested all the outputs, we are always fine.
|
||||
// If we requested specific elements, the loop above removes all the valid
|
||||
// ones, so any that are left must be invalid.
|
||||
if (!wantedOutputsLeft.empty())
|
||||
throw Error("derivation '%s' does not have wanted outputs %s",
|
||||
throw Error(
|
||||
"derivation '%s' does not have wanted outputs %s",
|
||||
worker.store.printStorePath(drvPath),
|
||||
concatStringsSep(", ", quoteStrings(wantedOutputsLeft)));
|
||||
|
||||
bool allValid = true;
|
||||
for (auto & [_, status] : initialOutputs) {
|
||||
if (!status.wanted) continue;
|
||||
if (!status.wanted)
|
||||
continue;
|
||||
if (!status.known || !status.known->isValid()) {
|
||||
allValid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return { allValid, validOutputs };
|
||||
return {allValid, validOutputs};
|
||||
}
|
||||
|
||||
|
||||
SingleDrvOutputs DerivationGoal::assertPathValidity()
|
||||
{
|
||||
auto [allValid, validOutputs] = checkPathValidity();
|
||||
|
|
@ -419,11 +416,7 @@ SingleDrvOutputs DerivationGoal::assertPathValidity()
|
|||
return validOutputs;
|
||||
}
|
||||
|
||||
|
||||
Goal::Done DerivationGoal::done(
|
||||
BuildResult::Status status,
|
||||
SingleDrvOutputs builtOutputs,
|
||||
std::optional<Error> ex)
|
||||
Goal::Done DerivationGoal::done(BuildResult::Status status, SingleDrvOutputs builtOutputs, std::optional<Error> ex)
|
||||
{
|
||||
buildResult.status = status;
|
||||
if (ex)
|
||||
|
|
@ -458,4 +451,4 @@ Goal::Done DerivationGoal::done(
|
|||
return amDone(buildResult.success() ? ecSuccess : ecFailed, std::move(ex));
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -172,4 +172,4 @@ Goal::Co DerivationTrampolineGoal::haveDerivation(StorePath drvPath, Derivation
|
|||
co_return amDone(g->exitCode, g->ex);
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -8,10 +8,7 @@
|
|||
namespace nix {
|
||||
|
||||
DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(
|
||||
const DrvOutput & id,
|
||||
Worker & worker,
|
||||
RepairFlag repair,
|
||||
std::optional<ContentAddress> ca)
|
||||
const DrvOutput & id, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
: Goal(worker, init())
|
||||
, id(id)
|
||||
{
|
||||
|
|
@ -19,7 +16,6 @@ DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(
|
|||
trace("created");
|
||||
}
|
||||
|
||||
|
||||
Goal::Co DrvOutputSubstitutionGoal::init()
|
||||
{
|
||||
trace("init");
|
||||
|
|
@ -40,32 +36,35 @@ Goal::Co DrvOutputSubstitutionGoal::init()
|
|||
some other error occurs), so it must not touch `this`. So put
|
||||
the shared state in a separate refcounted object. */
|
||||
auto outPipe = std::make_shared<MuxablePipe>();
|
||||
#ifndef _WIN32
|
||||
#ifndef _WIN32
|
||||
outPipe->create();
|
||||
#else
|
||||
#else
|
||||
outPipe->createAsyncPipe(worker.ioport.get());
|
||||
#endif
|
||||
#endif
|
||||
|
||||
auto promise = std::make_shared<std::promise<std::shared_ptr<const Realisation>>>();
|
||||
|
||||
sub->queryRealisation(
|
||||
id,
|
||||
{ [outPipe(outPipe), promise(promise)](std::future<std::shared_ptr<const Realisation>> res) {
|
||||
id, {[outPipe(outPipe), promise(promise)](std::future<std::shared_ptr<const Realisation>> res) {
|
||||
try {
|
||||
Finally updateStats([&]() { outPipe->writeSide.close(); });
|
||||
promise->set_value(res.get());
|
||||
} catch (...) {
|
||||
promise->set_exception(std::current_exception());
|
||||
}
|
||||
} });
|
||||
}});
|
||||
|
||||
worker.childStarted(shared_from_this(), {
|
||||
#ifndef _WIN32
|
||||
outPipe->readSide.get()
|
||||
#else
|
||||
&*outPipe
|
||||
#endif
|
||||
}, true, false);
|
||||
worker.childStarted(
|
||||
shared_from_this(),
|
||||
{
|
||||
#ifndef _WIN32
|
||||
outPipe->readSide.get()
|
||||
#else
|
||||
&*outPipe
|
||||
#endif
|
||||
},
|
||||
true,
|
||||
false);
|
||||
|
||||
co_await Suspend{};
|
||||
|
||||
|
|
@ -84,7 +83,8 @@ Goal::Co DrvOutputSubstitutionGoal::init()
|
|||
substituterFailed = true;
|
||||
}
|
||||
|
||||
if (!outputInfo) continue;
|
||||
if (!outputInfo)
|
||||
continue;
|
||||
|
||||
bool failed = false;
|
||||
|
||||
|
|
@ -101,8 +101,7 @@ Goal::Co DrvOutputSubstitutionGoal::init()
|
|||
sub->getUri(),
|
||||
depId.to_string(),
|
||||
worker.store.printStorePath(localOutputInfo->outPath),
|
||||
worker.store.printStorePath(depPath)
|
||||
);
|
||||
worker.store.printStorePath(depPath));
|
||||
failed = true;
|
||||
break;
|
||||
}
|
||||
|
|
@ -110,7 +109,8 @@ Goal::Co DrvOutputSubstitutionGoal::init()
|
|||
}
|
||||
}
|
||||
|
||||
if (failed) continue;
|
||||
if (failed)
|
||||
continue;
|
||||
|
||||
co_return realisationFetched(std::move(waitees), outputInfo, sub);
|
||||
}
|
||||
|
|
@ -130,7 +130,9 @@ Goal::Co DrvOutputSubstitutionGoal::init()
|
|||
co_return amDone(substituterFailed ? ecFailed : ecNoSubstituters);
|
||||
}
|
||||
|
||||
Goal::Co DrvOutputSubstitutionGoal::realisationFetched(Goals waitees, std::shared_ptr<const Realisation> outputInfo, nix::ref<nix::Store> sub) {
|
||||
Goal::Co DrvOutputSubstitutionGoal::realisationFetched(
|
||||
Goals waitees, std::shared_ptr<const Realisation> outputInfo, nix::ref<nix::Store> sub)
|
||||
{
|
||||
waitees.insert(worker.makePathSubstitutionGoal(outputInfo->outPath));
|
||||
|
||||
co_await await(std::move(waitees));
|
||||
|
|
@ -160,5 +162,4 @@ void DrvOutputSubstitutionGoal::handleEOF(Descriptor fd)
|
|||
worker.wakeUp(shared_from_this());
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -38,15 +38,14 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
|
|||
ex->withExitStatus(worker.failingExitStatus());
|
||||
throw std::move(*ex);
|
||||
} else if (!failed.empty()) {
|
||||
if (ex) logError(ex->info());
|
||||
if (ex)
|
||||
logError(ex->info());
|
||||
throw Error(worker.failingExitStatus(), "build of %s failed", concatStringsSep(", ", quoteStrings(failed)));
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<KeyedBuildResult> Store::buildPathsWithResults(
|
||||
const std::vector<DerivedPath> & reqs,
|
||||
BuildMode buildMode,
|
||||
std::shared_ptr<Store> evalStore)
|
||||
const std::vector<DerivedPath> & reqs, BuildMode buildMode, std::shared_ptr<Store> evalStore)
|
||||
{
|
||||
Worker worker(*this, evalStore ? *evalStore : *this);
|
||||
|
||||
|
|
@ -65,36 +64,36 @@ std::vector<KeyedBuildResult> Store::buildPathsWithResults(
|
|||
results.reserve(state.size());
|
||||
|
||||
for (auto & [req, goalPtr] : state)
|
||||
results.emplace_back(KeyedBuildResult {
|
||||
goalPtr->buildResult,
|
||||
/* .path = */ req,
|
||||
});
|
||||
results.emplace_back(
|
||||
KeyedBuildResult{
|
||||
goalPtr->buildResult,
|
||||
/* .path = */ req,
|
||||
});
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode)
|
||||
BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode)
|
||||
{
|
||||
Worker worker(*this, *this);
|
||||
auto goal = worker.makeDerivationTrampolineGoal(drvPath, OutputsSpec::All {}, drv, buildMode);
|
||||
auto goal = worker.makeDerivationTrampolineGoal(drvPath, OutputsSpec::All{}, drv, buildMode);
|
||||
|
||||
try {
|
||||
worker.run(Goals{goal});
|
||||
return goal->buildResult;
|
||||
} catch (Error & e) {
|
||||
return BuildResult {
|
||||
return BuildResult{
|
||||
.status = BuildResult::MiscFailure,
|
||||
.errorMsg = e.msg(),
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
void Store::ensurePath(const StorePath & path)
|
||||
{
|
||||
/* If the path is already valid, we're done. */
|
||||
if (isValidPath(path)) return;
|
||||
if (isValidPath(path))
|
||||
return;
|
||||
|
||||
Worker worker(*this, *this);
|
||||
GoalPtr goal = worker.makePathSubstitutionGoal(path);
|
||||
|
|
@ -107,11 +106,11 @@ void Store::ensurePath(const StorePath & path)
|
|||
goal->ex->withExitStatus(worker.failingExitStatus());
|
||||
throw std::move(*goal->ex);
|
||||
} else
|
||||
throw Error(worker.failingExitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
|
||||
throw Error(
|
||||
worker.failingExitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Store::repairPath(const StorePath & path)
|
||||
{
|
||||
Worker worker(*this, *this);
|
||||
|
|
@ -126,15 +125,17 @@ void Store::repairPath(const StorePath & path)
|
|||
auto info = queryPathInfo(path);
|
||||
if (info->deriver && isValidPath(*info->deriver)) {
|
||||
goals.clear();
|
||||
goals.insert(worker.makeGoal(DerivedPath::Built {
|
||||
.drvPath = makeConstantStorePathRef(*info->deriver),
|
||||
// FIXME: Should just build the specific output we need.
|
||||
.outputs = OutputsSpec::All { },
|
||||
}, bmRepair));
|
||||
goals.insert(worker.makeGoal(
|
||||
DerivedPath::Built{
|
||||
.drvPath = makeConstantStorePathRef(*info->deriver),
|
||||
// FIXME: Should just build the specific output we need.
|
||||
.outputs = OutputsSpec::All{},
|
||||
},
|
||||
bmRepair));
|
||||
worker.run(goals);
|
||||
} else
|
||||
throw Error(worker.failingExitStatus(), "cannot repair path '%s'", printStorePath(path));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -8,28 +8,35 @@ using promise_type = nix::Goal::promise_type;
|
|||
using handle_type = nix::Goal::handle_type;
|
||||
using Suspend = nix::Goal::Suspend;
|
||||
|
||||
Co::Co(Co&& rhs) {
|
||||
Co::Co(Co && rhs)
|
||||
{
|
||||
this->handle = rhs.handle;
|
||||
rhs.handle = nullptr;
|
||||
}
|
||||
void Co::operator=(Co&& rhs) {
|
||||
|
||||
void Co::operator=(Co && rhs)
|
||||
{
|
||||
this->handle = rhs.handle;
|
||||
rhs.handle = nullptr;
|
||||
}
|
||||
Co::~Co() {
|
||||
|
||||
Co::~Co()
|
||||
{
|
||||
if (handle) {
|
||||
handle.promise().alive = false;
|
||||
handle.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
Co promise_type::get_return_object() {
|
||||
Co promise_type::get_return_object()
|
||||
{
|
||||
auto handle = handle_type::from_promise(*this);
|
||||
return Co{handle};
|
||||
};
|
||||
|
||||
std::coroutine_handle<> promise_type::final_awaiter::await_suspend(handle_type h) noexcept {
|
||||
auto& p = h.promise();
|
||||
std::coroutine_handle<> promise_type::final_awaiter::await_suspend(handle_type h) noexcept
|
||||
{
|
||||
auto & p = h.promise();
|
||||
auto goal = p.goal;
|
||||
assert(goal);
|
||||
goal->trace("in final_awaiter");
|
||||
|
|
@ -39,9 +46,9 @@ std::coroutine_handle<> promise_type::final_awaiter::await_suspend(handle_type h
|
|||
// We still have a continuation, i.e. work to do.
|
||||
// We assert that the goal is still busy.
|
||||
assert(goal->exitCode == ecBusy);
|
||||
assert(goal->top_co); // Goal must have an active coroutine.
|
||||
assert(goal->top_co); // Goal must have an active coroutine.
|
||||
assert(goal->top_co->handle == h); // The active coroutine must be us.
|
||||
assert(p.alive); // We must not have been destructed.
|
||||
assert(p.alive); // We must not have been destructed.
|
||||
|
||||
// we move continuation to the top,
|
||||
// note: previous top_co is actually h, so by moving into it,
|
||||
|
|
@ -68,7 +75,8 @@ std::coroutine_handle<> promise_type::final_awaiter::await_suspend(handle_type h
|
|||
}
|
||||
}
|
||||
|
||||
void promise_type::return_value(Co&& next) {
|
||||
void promise_type::return_value(Co && next)
|
||||
{
|
||||
goal->trace("return_value(Co&&)");
|
||||
// Save old continuation.
|
||||
auto old_continuation = std::move(continuation);
|
||||
|
|
@ -82,20 +90,22 @@ void promise_type::return_value(Co&& next) {
|
|||
continuation->handle.promise().continuation = std::move(old_continuation);
|
||||
}
|
||||
|
||||
std::coroutine_handle<> nix::Goal::Co::await_suspend(handle_type caller) {
|
||||
std::coroutine_handle<> nix::Goal::Co::await_suspend(handle_type caller)
|
||||
{
|
||||
assert(handle); // we must be a valid coroutine
|
||||
auto& p = handle.promise();
|
||||
auto & p = handle.promise();
|
||||
assert(!p.continuation); // we must have no continuation
|
||||
assert(!p.goal); // we must not have a goal yet
|
||||
assert(!p.goal); // we must not have a goal yet
|
||||
auto goal = caller.promise().goal;
|
||||
assert(goal);
|
||||
p.goal = goal;
|
||||
p.continuation = std::move(goal->top_co); // we set our continuation to be top_co (i.e. caller)
|
||||
goal->top_co = std::move(*this); // we set top_co to ourselves, don't use this anymore after this!
|
||||
return p.goal->top_co->handle; // we execute ourselves
|
||||
goal->top_co = std::move(*this); // we set top_co to ourselves, don't use this anymore after this!
|
||||
return p.goal->top_co->handle; // we execute ourselves
|
||||
}
|
||||
|
||||
bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const {
|
||||
bool CompareGoalPtrs::operator()(const GoalPtr & a, const GoalPtr & b) const
|
||||
{
|
||||
std::string s1 = a->key();
|
||||
std::string s2 = b->key();
|
||||
return s1 < s2;
|
||||
|
|
@ -146,9 +156,11 @@ Goal::Done Goal::amDone(ExitCode result, std::optional<Error> ex)
|
|||
|
||||
goal->trace(fmt("waitee '%s' done; %d left", name, goal->waitees.size()));
|
||||
|
||||
if (result == ecFailed || result == ecNoSubstituters) ++goal->nrFailed;
|
||||
if (result == ecFailed || result == ecNoSubstituters)
|
||||
++goal->nrFailed;
|
||||
|
||||
if (result == ecNoSubstituters) ++goal->nrNoSubstituters;
|
||||
if (result == ecNoSubstituters)
|
||||
++goal->nrNoSubstituters;
|
||||
|
||||
if (goal->waitees.empty()) {
|
||||
worker.wakeUp(goal);
|
||||
|
|
@ -177,7 +189,6 @@ Goal::Done Goal::amDone(ExitCode result, std::optional<Error> ex)
|
|||
return Done{};
|
||||
}
|
||||
|
||||
|
||||
void Goal::trace(std::string_view s)
|
||||
{
|
||||
debug("%1%: %2%", name, s);
|
||||
|
|
@ -194,22 +205,25 @@ void Goal::work()
|
|||
assert(top_co || exitCode != ecBusy);
|
||||
}
|
||||
|
||||
Goal::Co Goal::yield() {
|
||||
Goal::Co Goal::yield()
|
||||
{
|
||||
worker.wakeUp(shared_from_this());
|
||||
co_await Suspend{};
|
||||
co_return Return{};
|
||||
}
|
||||
|
||||
Goal::Co Goal::waitForAWhile() {
|
||||
Goal::Co Goal::waitForAWhile()
|
||||
{
|
||||
worker.waitForAWhile(shared_from_this());
|
||||
co_await Suspend{};
|
||||
co_return Return{};
|
||||
}
|
||||
|
||||
Goal::Co Goal::waitForBuildSlot() {
|
||||
Goal::Co Goal::waitForBuildSlot()
|
||||
{
|
||||
worker.waitForBuildSlot(shared_from_this());
|
||||
co_await Suspend{};
|
||||
co_return Return{};
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -8,7 +8,8 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
PathSubstitutionGoal::PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
PathSubstitutionGoal::PathSubstitutionGoal(
|
||||
const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
: Goal(worker, init())
|
||||
, storePath(storePath)
|
||||
, repair(repair)
|
||||
|
|
@ -19,17 +20,12 @@ PathSubstitutionGoal::PathSubstitutionGoal(const StorePath & storePath, Worker &
|
|||
maintainExpectedSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.expectedSubstitutions);
|
||||
}
|
||||
|
||||
|
||||
PathSubstitutionGoal::~PathSubstitutionGoal()
|
||||
{
|
||||
cleanup();
|
||||
}
|
||||
|
||||
|
||||
Goal::Done PathSubstitutionGoal::done(
|
||||
ExitCode result,
|
||||
BuildResult::Status status,
|
||||
std::optional<std::string> errorMsg)
|
||||
Goal::Done PathSubstitutionGoal::done(ExitCode result, BuildResult::Status status, std::optional<std::string> errorMsg)
|
||||
{
|
||||
buildResult.status = status;
|
||||
if (errorMsg) {
|
||||
|
|
@ -39,7 +35,6 @@ Goal::Done PathSubstitutionGoal::done(
|
|||
return amDone(result);
|
||||
}
|
||||
|
||||
|
||||
Goal::Co PathSubstitutionGoal::init()
|
||||
{
|
||||
trace("init");
|
||||
|
|
@ -52,7 +47,8 @@ Goal::Co PathSubstitutionGoal::init()
|
|||
}
|
||||
|
||||
if (settings.readOnlyMode)
|
||||
throw Error("cannot substitute path '%s' - no write access to the Nix store", worker.store.printStorePath(storePath));
|
||||
throw Error(
|
||||
"cannot substitute path '%s' - no write access to the Nix store", worker.store.printStorePath(storePath));
|
||||
|
||||
auto subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
|
||||
|
||||
|
|
@ -72,8 +68,7 @@ Goal::Co PathSubstitutionGoal::init()
|
|||
|
||||
if (ca) {
|
||||
subPath = sub->makeFixedOutputPathFromCA(
|
||||
std::string { storePath.name() },
|
||||
ContentAddressWithReferences::withoutRefs(*ca));
|
||||
std::string{storePath.name()}, ContentAddressWithReferences::withoutRefs(*ca));
|
||||
if (sub->storeDir == worker.store.storeDir)
|
||||
assert(subPath == storePath);
|
||||
} else if (sub->storeDir != worker.store.storeDir) {
|
||||
|
|
@ -86,13 +81,16 @@ Goal::Co PathSubstitutionGoal::init()
|
|||
} catch (InvalidPath &) {
|
||||
continue;
|
||||
} catch (SubstituterDisabled & e) {
|
||||
if (settings.tryFallback) continue;
|
||||
else throw e;
|
||||
if (settings.tryFallback)
|
||||
continue;
|
||||
else
|
||||
throw e;
|
||||
} catch (Error & e) {
|
||||
if (settings.tryFallback) {
|
||||
logError(e.info());
|
||||
continue;
|
||||
} else throw e;
|
||||
} else
|
||||
throw e;
|
||||
}
|
||||
|
||||
if (info->path != storePath) {
|
||||
|
|
@ -101,8 +99,11 @@ Goal::Co PathSubstitutionGoal::init()
|
|||
info2->path = storePath;
|
||||
info = info2;
|
||||
} else {
|
||||
printError("asked '%s' for '%s' but got '%s'",
|
||||
sub->getUri(), worker.store.printStorePath(storePath), sub->printStorePath(info->path));
|
||||
printError(
|
||||
"asked '%s' for '%s' but got '%s'",
|
||||
sub->getUri(),
|
||||
worker.store.printStorePath(storePath),
|
||||
sub->printStorePath(info->path));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
|
@ -114,18 +115,19 @@ Goal::Co PathSubstitutionGoal::init()
|
|||
|
||||
maintainExpectedDownload =
|
||||
narInfo && narInfo->fileSize
|
||||
? std::make_unique<MaintainCount<uint64_t>>(worker.expectedDownloadSize, narInfo->fileSize)
|
||||
: nullptr;
|
||||
? std::make_unique<MaintainCount<uint64_t>>(worker.expectedDownloadSize, narInfo->fileSize)
|
||||
: nullptr;
|
||||
|
||||
worker.updateProgress();
|
||||
|
||||
/* Bail out early if this substituter lacks a valid
|
||||
signature. LocalStore::addToStore() also checks for this, but
|
||||
only after we've downloaded the path. */
|
||||
if (!sub->config.isTrusted && worker.store.pathInfoIsUntrusted(*info))
|
||||
{
|
||||
warn("ignoring substitute for '%s' from '%s', as it's not signed by any of the keys in 'trusted-public-keys'",
|
||||
worker.store.printStorePath(storePath), sub->getUri());
|
||||
if (!sub->config.isTrusted && worker.store.pathInfoIsUntrusted(*info)) {
|
||||
warn(
|
||||
"ignoring substitute for '%s' from '%s', as it's not signed by any of the keys in 'trusted-public-keys'",
|
||||
worker.store.printStorePath(storePath),
|
||||
sub->getUri());
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -159,11 +161,12 @@ Goal::Co PathSubstitutionGoal::init()
|
|||
co_return done(
|
||||
substituterFailed ? ecFailed : ecNoSubstituters,
|
||||
BuildResult::NoSubstituters,
|
||||
fmt("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath)));
|
||||
fmt("path '%s' is required, but there is no substituter that can build it",
|
||||
worker.store.printStorePath(storePath)));
|
||||
}
|
||||
|
||||
|
||||
Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed)
|
||||
Goal::Co PathSubstitutionGoal::tryToRun(
|
||||
StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed)
|
||||
{
|
||||
trace("all references realised");
|
||||
|
||||
|
|
@ -175,11 +178,13 @@ Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub,
|
|||
}
|
||||
|
||||
for (auto & i : info->references)
|
||||
/* ignore self-references */
|
||||
/* ignore self-references */
|
||||
if (i != storePath) {
|
||||
if (!worker.store.isValidPath(i)) {
|
||||
throw Error("reference '%s' of path '%s' is not a valid path",
|
||||
worker.store.printStorePath(i), worker.store.printStorePath(storePath));
|
||||
throw Error(
|
||||
"reference '%s' of path '%s' is not a valid path",
|
||||
worker.store.printStorePath(i),
|
||||
worker.store.printStorePath(storePath));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -215,8 +220,7 @@ Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub,
|
|||
Activity act(*logger, actSubstitute, Logger::Fields{worker.store.printStorePath(storePath), sub->getUri()});
|
||||
PushActivity pact(act.id);
|
||||
|
||||
copyStorePath(*sub, worker.store,
|
||||
subPath, repair, sub->config.isTrusted ? NoCheckSigs : CheckSigs);
|
||||
copyStorePath(*sub, worker.store, subPath, repair, sub->config.isTrusted ? NoCheckSigs : CheckSigs);
|
||||
|
||||
promise.set_value();
|
||||
} catch (...) {
|
||||
|
|
@ -224,13 +228,17 @@ Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub,
|
|||
}
|
||||
});
|
||||
|
||||
worker.childStarted(shared_from_this(), {
|
||||
worker.childStarted(
|
||||
shared_from_this(),
|
||||
{
|
||||
#ifndef _WIN32
|
||||
outPipe.readSide.get()
|
||||
outPipe.readSide.get()
|
||||
#else
|
||||
&outPipe
|
||||
&outPipe
|
||||
#endif
|
||||
}, true, false);
|
||||
},
|
||||
true,
|
||||
false);
|
||||
|
||||
co_await Suspend{};
|
||||
|
||||
|
|
@ -282,13 +290,11 @@ Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub,
|
|||
co_return done(ecSuccess, BuildResult::Substituted);
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::handleEOF(Descriptor fd)
|
||||
{
|
||||
worker.wakeUp(shared_from_this());
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::cleanup()
|
||||
{
|
||||
try {
|
||||
|
|
@ -304,5 +310,4 @@ void PathSubstitutionGoal::cleanup()
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -29,7 +29,6 @@ Worker::Worker(Store & store, Store & evalStore)
|
|||
checkMismatch = false;
|
||||
}
|
||||
|
||||
|
||||
Worker::~Worker()
|
||||
{
|
||||
/* Explicitly get rid of all strong pointers now. After this all
|
||||
|
|
@ -44,9 +43,10 @@ Worker::~Worker()
|
|||
}
|
||||
|
||||
template<class G, typename... Args>
|
||||
std::shared_ptr<G> Worker::initGoalIfNeeded(std::weak_ptr<G> & goal_weak, Args && ...args)
|
||||
std::shared_ptr<G> Worker::initGoalIfNeeded(std::weak_ptr<G> & goal_weak, Args &&... args)
|
||||
{
|
||||
if (auto goal = goal_weak.lock()) return goal;
|
||||
if (auto goal = goal_weak.lock())
|
||||
return goal;
|
||||
|
||||
auto goal = std::make_shared<G>(args...);
|
||||
goal_weak = goal;
|
||||
|
|
@ -55,64 +55,60 @@ std::shared_ptr<G> Worker::initGoalIfNeeded(std::weak_ptr<G> & goal_weak, Args &
|
|||
}
|
||||
|
||||
std::shared_ptr<DerivationTrampolineGoal> Worker::makeDerivationTrampolineGoal(
|
||||
ref<const SingleDerivedPath> drvReq,
|
||||
const OutputsSpec & wantedOutputs,
|
||||
BuildMode buildMode)
|
||||
ref<const SingleDerivedPath> drvReq, const OutputsSpec & wantedOutputs, BuildMode buildMode)
|
||||
{
|
||||
return initGoalIfNeeded(
|
||||
derivationTrampolineGoals.ensureSlot(*drvReq).value[wantedOutputs],
|
||||
drvReq, wantedOutputs, *this, buildMode);
|
||||
derivationTrampolineGoals.ensureSlot(*drvReq).value[wantedOutputs], drvReq, wantedOutputs, *this, buildMode);
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DerivationTrampolineGoal> Worker::makeDerivationTrampolineGoal(
|
||||
const StorePath & drvPath,
|
||||
const OutputsSpec & wantedOutputs,
|
||||
const Derivation & drv,
|
||||
BuildMode buildMode)
|
||||
const StorePath & drvPath, const OutputsSpec & wantedOutputs, const Derivation & drv, BuildMode buildMode)
|
||||
{
|
||||
return initGoalIfNeeded(
|
||||
derivationTrampolineGoals.ensureSlot(DerivedPath::Opaque{drvPath}).value[wantedOutputs],
|
||||
drvPath, wantedOutputs, drv, *this, buildMode);
|
||||
drvPath,
|
||||
wantedOutputs,
|
||||
drv,
|
||||
*this,
|
||||
buildMode);
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(const StorePath & drvPath,
|
||||
const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode)
|
||||
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(
|
||||
const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode)
|
||||
{
|
||||
return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode);
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DerivationBuildingGoal> Worker::makeDerivationBuildingGoal(const StorePath & drvPath,
|
||||
const Derivation & drv, BuildMode buildMode)
|
||||
std::shared_ptr<DerivationBuildingGoal>
|
||||
Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode)
|
||||
{
|
||||
return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode);
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<PathSubstitutionGoal> Worker::makePathSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
std::shared_ptr<PathSubstitutionGoal>
|
||||
Worker::makePathSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
{
|
||||
return initGoalIfNeeded(substitutionGoals[path], path, *this, repair, ca);
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DrvOutputSubstitutionGoal> Worker::makeDrvOutputSubstitutionGoal(const DrvOutput& id, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
std::shared_ptr<DrvOutputSubstitutionGoal>
|
||||
Worker::makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
{
|
||||
return initGoalIfNeeded(drvOutputSubstitutionGoals[id], id, *this, repair, ca);
|
||||
}
|
||||
|
||||
|
||||
GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode)
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const DerivedPath::Built & bfd) -> GoalPtr {
|
||||
return makeDerivationTrampolineGoal(bfd.drvPath, bfd.outputs, buildMode);
|
||||
return std::visit(
|
||||
overloaded{
|
||||
[&](const DerivedPath::Built & bfd) -> GoalPtr {
|
||||
return makeDerivationTrampolineGoal(bfd.drvPath, bfd.outputs, buildMode);
|
||||
},
|
||||
[&](const DerivedPath::Opaque & bo) -> GoalPtr {
|
||||
return makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair);
|
||||
},
|
||||
},
|
||||
[&](const DerivedPath::Opaque & bo) -> GoalPtr {
|
||||
return makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair);
|
||||
},
|
||||
}, req.raw());
|
||||
req.raw());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -149,12 +145,12 @@ static bool removeGoal(std::shared_ptr<G> goal, std::map<K, Inner> & goalMap)
|
|||
}
|
||||
|
||||
template<typename G>
|
||||
static bool removeGoal(std::shared_ptr<G> goal, typename DerivedPathMap<std::map<OutputsSpec, std::weak_ptr<G>>>::ChildNode & node)
|
||||
static bool
|
||||
removeGoal(std::shared_ptr<G> goal, typename DerivedPathMap<std::map<OutputsSpec, std::weak_ptr<G>>>::ChildNode & node)
|
||||
{
|
||||
return removeGoal(goal, node.value) || removeGoal(goal, node.childMap);
|
||||
}
|
||||
|
||||
|
||||
void Worker::removeGoal(GoalPtr goal)
|
||||
{
|
||||
if (auto drvGoal = std::dynamic_pointer_cast<DerivationTrampolineGoal>(goal))
|
||||
|
|
@ -181,34 +177,31 @@ void Worker::removeGoal(GoalPtr goal)
|
|||
/* Wake up goals waiting for any goal to finish. */
|
||||
for (auto & i : waitingForAnyGoal) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
if (goal)
|
||||
wakeUp(goal);
|
||||
}
|
||||
|
||||
waitingForAnyGoal.clear();
|
||||
}
|
||||
|
||||
|
||||
void Worker::wakeUp(GoalPtr goal)
|
||||
{
|
||||
goal->trace("woken up");
|
||||
addToWeakGoals(awake, goal);
|
||||
}
|
||||
|
||||
|
||||
size_t Worker::getNrLocalBuilds()
|
||||
{
|
||||
return nrLocalBuilds;
|
||||
}
|
||||
|
||||
|
||||
size_t Worker::getNrSubstitutions()
|
||||
{
|
||||
return nrSubstitutions;
|
||||
}
|
||||
|
||||
|
||||
void Worker::childStarted(GoalPtr goal, const std::set<MuxablePipePollState::CommChannel> & channels,
|
||||
bool inBuildSlot, bool respectTimeouts)
|
||||
void Worker::childStarted(
|
||||
GoalPtr goal, const std::set<MuxablePipePollState::CommChannel> & channels, bool inBuildSlot, bool respectTimeouts)
|
||||
{
|
||||
Child child;
|
||||
child.goal = goal;
|
||||
|
|
@ -235,12 +228,11 @@ void Worker::childStarted(GoalPtr goal, const std::set<MuxablePipePollState::Com
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void Worker::childTerminated(Goal * goal, bool wakeSleepers)
|
||||
{
|
||||
auto i = std::find_if(children.begin(), children.end(),
|
||||
[&](const Child & child) { return child.goal2 == goal; });
|
||||
if (i == children.end()) return;
|
||||
auto i = std::find_if(children.begin(), children.end(), [&](const Child & child) { return child.goal2 == goal; });
|
||||
if (i == children.end())
|
||||
return;
|
||||
|
||||
if (i->inBuildSlot) {
|
||||
switch (goal->jobCategory()) {
|
||||
|
|
@ -267,40 +259,37 @@ void Worker::childTerminated(Goal * goal, bool wakeSleepers)
|
|||
/* Wake up goals waiting for a build slot. */
|
||||
for (auto & j : wantingToBuild) {
|
||||
GoalPtr goal = j.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
if (goal)
|
||||
wakeUp(goal);
|
||||
}
|
||||
|
||||
wantingToBuild.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Worker::waitForBuildSlot(GoalPtr goal)
|
||||
{
|
||||
goal->trace("wait for build slot");
|
||||
bool isSubstitutionGoal = goal->jobCategory() == JobCategory::Substitution;
|
||||
if ((!isSubstitutionGoal && getNrLocalBuilds() < settings.maxBuildJobs) ||
|
||||
(isSubstitutionGoal && getNrSubstitutions() < settings.maxSubstitutionJobs))
|
||||
if ((!isSubstitutionGoal && getNrLocalBuilds() < settings.maxBuildJobs)
|
||||
|| (isSubstitutionGoal && getNrSubstitutions() < settings.maxSubstitutionJobs))
|
||||
wakeUp(goal); /* we can do it right away */
|
||||
else
|
||||
addToWeakGoals(wantingToBuild, goal);
|
||||
}
|
||||
|
||||
|
||||
void Worker::waitForAnyGoal(GoalPtr goal)
|
||||
{
|
||||
debug("wait for any goal");
|
||||
addToWeakGoals(waitingForAnyGoal, goal);
|
||||
}
|
||||
|
||||
|
||||
void Worker::waitForAWhile(GoalPtr goal)
|
||||
{
|
||||
debug("wait for a while");
|
||||
addToWeakGoals(waitingForAWhile, goal);
|
||||
}
|
||||
|
||||
|
||||
void Worker::run(const Goals & _topGoals)
|
||||
{
|
||||
std::vector<nix::DerivedPath> topPaths;
|
||||
|
|
@ -308,10 +297,11 @@ void Worker::run(const Goals & _topGoals)
|
|||
for (auto & i : _topGoals) {
|
||||
topGoals.insert(i);
|
||||
if (auto goal = dynamic_cast<DerivationTrampolineGoal *>(i.get())) {
|
||||
topPaths.push_back(DerivedPath::Built {
|
||||
.drvPath = goal->drvReq,
|
||||
.outputs = goal->wantedOutputs,
|
||||
});
|
||||
topPaths.push_back(
|
||||
DerivedPath::Built{
|
||||
.drvPath = goal->drvReq,
|
||||
.outputs = goal->wantedOutputs,
|
||||
});
|
||||
} else if (auto goal = dynamic_cast<PathSubstitutionGoal *>(i.get())) {
|
||||
topPaths.push_back(DerivedPath::Opaque{goal->storePath});
|
||||
}
|
||||
|
|
@ -336,33 +326,37 @@ void Worker::run(const Goals & _topGoals)
|
|||
Goals awake2;
|
||||
for (auto & i : awake) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) awake2.insert(goal);
|
||||
if (goal)
|
||||
awake2.insert(goal);
|
||||
}
|
||||
awake.clear();
|
||||
for (auto & goal : awake2) {
|
||||
checkInterrupt();
|
||||
goal->work();
|
||||
if (topGoals.empty()) break; // stuff may have been cancelled
|
||||
if (topGoals.empty())
|
||||
break; // stuff may have been cancelled
|
||||
}
|
||||
}
|
||||
|
||||
if (topGoals.empty()) break;
|
||||
if (topGoals.empty())
|
||||
break;
|
||||
|
||||
/* Wait for input. */
|
||||
if (!children.empty() || !waitingForAWhile.empty())
|
||||
waitForInput();
|
||||
else if (awake.empty() && 0U == settings.maxBuildJobs) {
|
||||
if (getMachines().empty())
|
||||
throw Error(
|
||||
"Unable to start any build; either increase '--max-jobs' or enable remote builds.\n"
|
||||
"\n"
|
||||
"For more information run 'man nix.conf' and search for '/machines'.");
|
||||
throw Error(
|
||||
"Unable to start any build; either increase '--max-jobs' or enable remote builds.\n"
|
||||
"\n"
|
||||
"For more information run 'man nix.conf' and search for '/machines'.");
|
||||
else
|
||||
throw Error(
|
||||
"Unable to start any build; remote machines may not have all required system features.\n"
|
||||
"\n"
|
||||
"For more information run 'man nix.conf' and search for '/machines'.");
|
||||
} else assert(!awake.empty());
|
||||
throw Error(
|
||||
"Unable to start any build; remote machines may not have all required system features.\n"
|
||||
"\n"
|
||||
"For more information run 'man nix.conf' and search for '/machines'.");
|
||||
} else
|
||||
assert(!awake.empty());
|
||||
}
|
||||
|
||||
/* If --keep-going is not set, it's possible that the main goal
|
||||
|
|
@ -395,7 +389,8 @@ void Worker::waitForInput()
|
|||
// Periodicallty wake up to see if we need to run the garbage collector.
|
||||
nearest = before + std::chrono::seconds(10);
|
||||
for (auto & i : children) {
|
||||
if (!i.respectTimeouts) continue;
|
||||
if (!i.respectTimeouts)
|
||||
continue;
|
||||
if (0 != settings.maxSilentTime)
|
||||
nearest = std::min(nearest, i.lastOutput + std::chrono::seconds(settings.maxSilentTime));
|
||||
if (0 != settings.buildTimeout)
|
||||
|
|
@ -410,11 +405,15 @@ void Worker::waitForInput()
|
|||
up after a few seconds at most. */
|
||||
if (!waitingForAWhile.empty()) {
|
||||
useTimeout = true;
|
||||
if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before;
|
||||
timeout = std::max(1L,
|
||||
if (lastWokenUp == steady_time_point::min() || lastWokenUp > before)
|
||||
lastWokenUp = before;
|
||||
timeout = std::max(
|
||||
1L,
|
||||
(long) std::chrono::duration_cast<std::chrono::seconds>(
|
||||
lastWokenUp + std::chrono::seconds(settings.pollInterval) - before).count());
|
||||
} else lastWokenUp = steady_time_point::min();
|
||||
lastWokenUp + std::chrono::seconds(settings.pollInterval) - before)
|
||||
.count());
|
||||
} else
|
||||
lastWokenUp = steady_time_point::min();
|
||||
|
||||
if (useTimeout)
|
||||
vomit("sleeping %d seconds", timeout);
|
||||
|
|
@ -427,7 +426,7 @@ void Worker::waitForInput()
|
|||
includes EOF. */
|
||||
for (auto & i : children) {
|
||||
for (auto & j : i.channels) {
|
||||
state.pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN });
|
||||
state.pollStatus.push_back((struct pollfd) {.fd = j, .events = POLLIN});
|
||||
state.fdToPollStatus[j] = state.pollStatus.size() - 1;
|
||||
}
|
||||
}
|
||||
|
|
@ -437,7 +436,7 @@ void Worker::waitForInput()
|
|||
#ifdef _WIN32
|
||||
ioport.get(),
|
||||
#endif
|
||||
useTimeout ? (std::optional { timeout * 1000 }) : std::nullopt);
|
||||
useTimeout ? (std::optional{timeout * 1000}) : std::nullopt);
|
||||
|
||||
auto after = steady_time_point::clock::now();
|
||||
|
||||
|
|
@ -455,8 +454,7 @@ void Worker::waitForInput()
|
|||
state.iterate(
|
||||
j->channels,
|
||||
[&](Descriptor k, std::string_view data) {
|
||||
printMsg(lvlVomit, "%1%: read %2% bytes",
|
||||
goal->getName(), data.size());
|
||||
printMsg(lvlVomit, "%1%: read %2% bytes", goal->getName(), data.size());
|
||||
j->lastOutput = after;
|
||||
goal->handleChildOutput(k, data);
|
||||
},
|
||||
|
|
@ -465,24 +463,16 @@ void Worker::waitForInput()
|
|||
goal->handleEOF(k);
|
||||
});
|
||||
|
||||
if (goal->exitCode == Goal::ecBusy &&
|
||||
0 != settings.maxSilentTime &&
|
||||
j->respectTimeouts &&
|
||||
after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime))
|
||||
{
|
||||
goal->timedOut(Error(
|
||||
"%1% timed out after %2% seconds of silence",
|
||||
goal->getName(), settings.maxSilentTime));
|
||||
if (goal->exitCode == Goal::ecBusy && 0 != settings.maxSilentTime && j->respectTimeouts
|
||||
&& after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime)) {
|
||||
goal->timedOut(
|
||||
Error("%1% timed out after %2% seconds of silence", goal->getName(), settings.maxSilentTime));
|
||||
}
|
||||
|
||||
else if (goal->exitCode == Goal::ecBusy &&
|
||||
0 != settings.buildTimeout &&
|
||||
j->respectTimeouts &&
|
||||
after - j->timeStarted >= std::chrono::seconds(settings.buildTimeout))
|
||||
{
|
||||
goal->timedOut(Error(
|
||||
"%1% timed out after %2% seconds",
|
||||
goal->getName(), settings.buildTimeout));
|
||||
else if (
|
||||
goal->exitCode == Goal::ecBusy && 0 != settings.buildTimeout && j->respectTimeouts
|
||||
&& after - j->timeStarted >= std::chrono::seconds(settings.buildTimeout)) {
|
||||
goal->timedOut(Error("%1% timed out after %2% seconds", goal->getName(), settings.buildTimeout));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -490,26 +480,26 @@ void Worker::waitForInput()
|
|||
lastWokenUp = after;
|
||||
for (auto & i : waitingForAWhile) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
if (goal)
|
||||
wakeUp(goal);
|
||||
}
|
||||
waitingForAWhile.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned int Worker::failingExitStatus()
|
||||
{
|
||||
// See API docs in header for explanation
|
||||
unsigned int mask = 0;
|
||||
bool buildFailure = permanentFailure || timedOut || hashMismatch;
|
||||
if (buildFailure)
|
||||
mask |= 0x04; // 100
|
||||
mask |= 0x04; // 100
|
||||
if (timedOut)
|
||||
mask |= 0x01; // 101
|
||||
mask |= 0x01; // 101
|
||||
if (hashMismatch)
|
||||
mask |= 0x02; // 102
|
||||
mask |= 0x02; // 102
|
||||
if (checkMismatch) {
|
||||
mask |= 0x08; // 104
|
||||
mask |= 0x08; // 104
|
||||
}
|
||||
|
||||
if (mask)
|
||||
|
|
@ -517,11 +507,11 @@ unsigned int Worker::failingExitStatus()
|
|||
return mask ? mask : 1;
|
||||
}
|
||||
|
||||
|
||||
bool Worker::pathContentsGood(const StorePath & path)
|
||||
{
|
||||
auto i = pathContentsGoodCache.find(path);
|
||||
if (i != pathContentsGoodCache.end()) return i->second;
|
||||
if (i != pathContentsGoodCache.end())
|
||||
return i->second;
|
||||
printInfo("checking path '%s'...", store.printStorePath(path));
|
||||
auto info = store.queryPathInfo(path);
|
||||
bool res;
|
||||
|
|
@ -529,8 +519,10 @@ bool Worker::pathContentsGood(const StorePath & path)
|
|||
res = false;
|
||||
else {
|
||||
auto current = hashPath(
|
||||
{store.getFSAccessor(), CanonPath(path.to_string())},
|
||||
FileIngestionMethod::NixArchive, info->narHash.algo).first;
|
||||
{store.getFSAccessor(), CanonPath(path.to_string())},
|
||||
FileIngestionMethod::NixArchive,
|
||||
info->narHash.algo)
|
||||
.first;
|
||||
Hash nullHash(HashAlgorithm::SHA256);
|
||||
res = info->narHash == nullHash || info->narHash == current;
|
||||
}
|
||||
|
|
@ -540,13 +532,11 @@ bool Worker::pathContentsGood(const StorePath & path)
|
|||
return res;
|
||||
}
|
||||
|
||||
|
||||
void Worker::markContentsGood(const StorePath & path)
|
||||
{
|
||||
pathContentsGoodCache.insert_or_assign(path, true);
|
||||
}
|
||||
|
||||
|
||||
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal)
|
||||
{
|
||||
return subGoal;
|
||||
|
|
@ -562,4 +552,4 @@ GoalPtr upcast_goal(std::shared_ptr<DerivationGoal> subGoal)
|
|||
return subGoal;
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -58,13 +58,9 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
|
|||
* Python package brings its own
|
||||
* `$out/lib/pythonX.Y/site-packages/easy-install.pth'.)
|
||||
*/
|
||||
if (hasSuffix(srcFile, "/propagated-build-inputs") ||
|
||||
hasSuffix(srcFile, "/nix-support") ||
|
||||
hasSuffix(srcFile, "/perllocal.pod") ||
|
||||
hasSuffix(srcFile, "/info/dir") ||
|
||||
hasSuffix(srcFile, "/log") ||
|
||||
hasSuffix(srcFile, "/manifest.nix") ||
|
||||
hasSuffix(srcFile, "/manifest.json"))
|
||||
if (hasSuffix(srcFile, "/propagated-build-inputs") || hasSuffix(srcFile, "/nix-support")
|
||||
|| hasSuffix(srcFile, "/perllocal.pod") || hasSuffix(srcFile, "/info/dir") || hasSuffix(srcFile, "/log")
|
||||
|| hasSuffix(srcFile, "/manifest.nix") || hasSuffix(srcFile, "/manifest.json"))
|
||||
continue;
|
||||
|
||||
else if (S_ISDIR(srcSt.st_mode)) {
|
||||
|
|
@ -80,11 +76,14 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
|
|||
throw Error("collision between '%1%' and non-directory '%2%'", srcFile, target);
|
||||
if (unlink(dstFile.c_str()) == -1)
|
||||
throw SysError("unlinking '%1%'", dstFile);
|
||||
if (mkdir(dstFile.c_str()
|
||||
#ifndef _WIN32 // TODO abstract mkdir perms for Windows
|
||||
, 0755
|
||||
#endif
|
||||
) == -1)
|
||||
if (mkdir(
|
||||
dstFile.c_str()
|
||||
#ifndef _WIN32 // TODO abstract mkdir perms for Windows
|
||||
,
|
||||
0755
|
||||
#endif
|
||||
)
|
||||
== -1)
|
||||
throw SysError("creating directory '%1%'", dstFile);
|
||||
createLinks(state, target, dstFile, state.priorities[dstFile]);
|
||||
createLinks(state, srcFile, dstFile, priority);
|
||||
|
|
@ -100,11 +99,7 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
|
|||
if (S_ISLNK(dstSt.st_mode)) {
|
||||
auto prevPriority = state.priorities[dstFile];
|
||||
if (prevPriority == priority)
|
||||
throw BuildEnvFileConflictError(
|
||||
readLink(dstFile),
|
||||
srcFile,
|
||||
priority
|
||||
);
|
||||
throw BuildEnvFileConflictError(readLink(dstFile), srcFile, priority);
|
||||
if (prevPriority < priority)
|
||||
continue;
|
||||
if (unlink(dstFile.c_str()) == -1)
|
||||
|
|
@ -127,16 +122,18 @@ void buildProfile(const Path & out, Packages && pkgs)
|
|||
PathSet done, postponed;
|
||||
|
||||
auto addPkg = [&](const Path & pkgDir, int priority) {
|
||||
if (!done.insert(pkgDir).second) return;
|
||||
if (!done.insert(pkgDir).second)
|
||||
return;
|
||||
createLinks(state, pkgDir, out, priority);
|
||||
|
||||
try {
|
||||
for (const auto & p : tokenizeString<std::vector<std::string>>(
|
||||
readFile(pkgDir + "/nix-support/propagated-user-env-packages"), " \n"))
|
||||
readFile(pkgDir + "/nix-support/propagated-user-env-packages"), " \n"))
|
||||
if (!done.count(p))
|
||||
postponed.insert(p);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw;
|
||||
if (e.errNo != ENOENT && e.errNo != ENOTDIR)
|
||||
throw;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -171,7 +168,8 @@ static void builtinBuildenv(const BuiltinBuilderContext & ctx)
|
|||
{
|
||||
auto getAttr = [&](const std::string & name) {
|
||||
auto i = ctx.drv.env.find(name);
|
||||
if (i == ctx.drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
if (i == ctx.drv.env.end())
|
||||
throw Error("attribute '%s' missing", name);
|
||||
return i->second;
|
||||
};
|
||||
|
||||
|
|
@ -191,7 +189,7 @@ static void builtinBuildenv(const BuiltinBuilderContext & ctx)
|
|||
const int priority = stoi(*itemIt++);
|
||||
const size_t outputs = stoul(*itemIt++);
|
||||
|
||||
for (size_t n {0}; n < outputs; n++) {
|
||||
for (size_t n{0}; n < outputs; n++) {
|
||||
pkgs.emplace_back(std::move(*itemIt++), active, priority);
|
||||
}
|
||||
}
|
||||
|
|
@ -204,4 +202,4 @@ static void builtinBuildenv(const BuiltinBuilderContext & ctx)
|
|||
|
||||
static RegisterBuiltinBuilder registerBuildenv("buildenv", builtinBuildenv);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -35,14 +35,11 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx)
|
|||
auto fileTransfer = makeFileTransfer();
|
||||
|
||||
auto fetch = [&](const std::string & url) {
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
|
||||
FileTransferRequest request(url);
|
||||
request.decompress = false;
|
||||
|
||||
auto decompressor = makeDecompressionSink(
|
||||
unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
|
||||
auto decompressor = makeDecompressionSink(unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
|
||||
fileTransfer->download(std::move(request), *decompressor);
|
||||
decompressor->finish();
|
||||
});
|
||||
|
|
@ -64,8 +61,11 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx)
|
|||
if (dof && dof->ca.method.getFileIngestionMethod() == FileIngestionMethod::Flat)
|
||||
for (auto hashedMirror : settings.hashedMirrors.get())
|
||||
try {
|
||||
if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
|
||||
fetch(hashedMirror + printHashAlgo(dof->ca.hash.algo) + "/" + dof->ca.hash.to_string(HashFormat::Base16, false));
|
||||
if (!hasSuffix(hashedMirror, "/"))
|
||||
hashedMirror += '/';
|
||||
fetch(
|
||||
hashedMirror + printHashAlgo(dof->ca.hash.algo) + "/"
|
||||
+ dof->ca.hash.to_string(HashFormat::Base16, false));
|
||||
return;
|
||||
} catch (Error & e) {
|
||||
debug(e.what());
|
||||
|
|
@ -77,4 +77,4 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx)
|
|||
|
||||
static RegisterBuiltinBuilder registerFetchurl("fetchurl", builtinFetchurl);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -7,7 +7,8 @@ static void builtinUnpackChannel(const BuiltinBuilderContext & ctx)
|
|||
{
|
||||
auto getAttr = [&](const std::string & name) -> const std::string & {
|
||||
auto i = ctx.drv.env.find(name);
|
||||
if (i == ctx.drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
if (i == ctx.drv.env.end())
|
||||
throw Error("attribute '%s' missing", name);
|
||||
return i->second;
|
||||
};
|
||||
|
||||
|
|
@ -42,4 +43,4 @@ static void builtinUnpackChannel(const BuiltinBuilderContext & ctx)
|
|||
|
||||
static RegisterBuiltinBuilder registerUnpackChannel("unpack-channel", builtinUnpackChannel);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -18,80 +18,80 @@ std::string CommonProto::Serialise<std::string>::read(const StoreDirConfig & sto
|
|||
return readString(conn.from);
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<std::string>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const std::string & str)
|
||||
void CommonProto::Serialise<std::string>::write(
|
||||
const StoreDirConfig & store, CommonProto::WriteConn conn, const std::string & str)
|
||||
{
|
||||
conn.to << str;
|
||||
}
|
||||
|
||||
|
||||
StorePath CommonProto::Serialise<StorePath>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
return store.parseStorePath(readString(conn.from));
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<StorePath>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const StorePath & storePath)
|
||||
void CommonProto::Serialise<StorePath>::write(
|
||||
const StoreDirConfig & store, CommonProto::WriteConn conn, const StorePath & storePath)
|
||||
{
|
||||
conn.to << store.printStorePath(storePath);
|
||||
}
|
||||
|
||||
|
||||
ContentAddress CommonProto::Serialise<ContentAddress>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
return ContentAddress::parse(readString(conn.from));
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<ContentAddress>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const ContentAddress & ca)
|
||||
void CommonProto::Serialise<ContentAddress>::write(
|
||||
const StoreDirConfig & store, CommonProto::WriteConn conn, const ContentAddress & ca)
|
||||
{
|
||||
conn.to << renderContentAddress(ca);
|
||||
}
|
||||
|
||||
|
||||
Realisation CommonProto::Serialise<Realisation>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
std::string rawInput = readString(conn.from);
|
||||
return Realisation::fromJSON(
|
||||
nlohmann::json::parse(rawInput),
|
||||
"remote-protocol"
|
||||
);
|
||||
return Realisation::fromJSON(nlohmann::json::parse(rawInput), "remote-protocol");
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<Realisation>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const Realisation & realisation)
|
||||
void CommonProto::Serialise<Realisation>::write(
|
||||
const StoreDirConfig & store, CommonProto::WriteConn conn, const Realisation & realisation)
|
||||
{
|
||||
conn.to << realisation.toJSON().dump();
|
||||
}
|
||||
|
||||
|
||||
DrvOutput CommonProto::Serialise<DrvOutput>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
return DrvOutput::parse(readString(conn.from));
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<DrvOutput>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const DrvOutput & drvOutput)
|
||||
void CommonProto::Serialise<DrvOutput>::write(
|
||||
const StoreDirConfig & store, CommonProto::WriteConn conn, const DrvOutput & drvOutput)
|
||||
{
|
||||
conn.to << drvOutput.to_string();
|
||||
}
|
||||
|
||||
|
||||
std::optional<StorePath> CommonProto::Serialise<std::optional<StorePath>>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
std::optional<StorePath>
|
||||
CommonProto::Serialise<std::optional<StorePath>>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
auto s = readString(conn.from);
|
||||
return s == "" ? std::optional<StorePath> {} : store.parseStorePath(s);
|
||||
return s == "" ? std::optional<StorePath>{} : store.parseStorePath(s);
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<std::optional<StorePath>>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const std::optional<StorePath> & storePathOpt)
|
||||
void CommonProto::Serialise<std::optional<StorePath>>::write(
|
||||
const StoreDirConfig & store, CommonProto::WriteConn conn, const std::optional<StorePath> & storePathOpt)
|
||||
{
|
||||
conn.to << (storePathOpt ? store.printStorePath(*storePathOpt) : "");
|
||||
}
|
||||
|
||||
|
||||
std::optional<ContentAddress> CommonProto::Serialise<std::optional<ContentAddress>>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
std::optional<ContentAddress>
|
||||
CommonProto::Serialise<std::optional<ContentAddress>>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
return ContentAddress::parseOpt(readString(conn.from));
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<std::optional<ContentAddress>>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const std::optional<ContentAddress> & caOpt)
|
||||
void CommonProto::Serialise<std::optional<ContentAddress>>::write(
|
||||
const StoreDirConfig & store, CommonProto::WriteConn conn, const std::optional<ContentAddress> & caOpt)
|
||||
{
|
||||
conn.to << (caOpt ? renderContentAddress(*caOpt) : "");
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -40,4 +40,4 @@ SSHMaster CommonSSHStoreConfig::createSSHMaster(bool useMaster, Descriptor logFD
|
|||
};
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -62,8 +62,7 @@ ContentAddressMethod ContentAddressMethod::parse(std::string_view m)
|
|||
if (m == "text")
|
||||
return ContentAddressMethod::Raw::Text;
|
||||
else
|
||||
return fileIngestionMethodToContentAddressMethod(
|
||||
parseFileIngestionMethod(m));
|
||||
return fileIngestionMethodToContentAddressMethod(parseFileIngestionMethod(m));
|
||||
}
|
||||
|
||||
std::string_view ContentAddressMethod::renderPrefix() const
|
||||
|
|
@ -84,12 +83,10 @@ ContentAddressMethod ContentAddressMethod::parsePrefix(std::string_view & m)
|
|||
{
|
||||
if (splitPrefix(m, "r:")) {
|
||||
return ContentAddressMethod::Raw::NixArchive;
|
||||
}
|
||||
else if (splitPrefix(m, "git:")) {
|
||||
} else if (splitPrefix(m, "git:")) {
|
||||
experimentalFeatureSettings.require(Xp::GitHashing);
|
||||
return ContentAddressMethod::Raw::Git;
|
||||
}
|
||||
else if (splitPrefix(m, "text:")) {
|
||||
} else if (splitPrefix(m, "text:")) {
|
||||
return ContentAddressMethod::Raw::Text;
|
||||
}
|
||||
return ContentAddressMethod::Raw::Flat;
|
||||
|
|
@ -145,7 +142,7 @@ std::string ContentAddress::render() const
|
|||
*/
|
||||
static std::pair<ContentAddressMethod, HashAlgorithm> parseContentAddressMethodPrefix(std::string_view & rest)
|
||||
{
|
||||
std::string_view wholeInput { rest };
|
||||
std::string_view wholeInput{rest};
|
||||
|
||||
std::string_view prefix;
|
||||
{
|
||||
|
|
@ -155,7 +152,7 @@ static std::pair<ContentAddressMethod, HashAlgorithm> parseContentAddressMethodP
|
|||
prefix = *optPrefix;
|
||||
}
|
||||
|
||||
auto parseHashAlgorithm_ = [&](){
|
||||
auto parseHashAlgorithm_ = [&]() {
|
||||
auto hashAlgoRaw = splitPrefixTo(rest, ':');
|
||||
if (!hashAlgoRaw)
|
||||
throw UsageError("content address hash must be in form '<algo>:<hash>', but found: %s", wholeInput);
|
||||
|
|
@ -186,7 +183,8 @@ static std::pair<ContentAddressMethod, HashAlgorithm> parseContentAddressMethodP
|
|||
std::move(hashAlgo),
|
||||
};
|
||||
} else
|
||||
throw UsageError("content address prefix '%s' is unrecognized. Recogonized prefixes are 'text' or 'fixed'", prefix);
|
||||
throw UsageError(
|
||||
"content address prefix '%s' is unrecognized. Recogonized prefixes are 'text' or 'fixed'", prefix);
|
||||
}
|
||||
|
||||
ContentAddress ContentAddress::parse(std::string_view rawCa)
|
||||
|
|
@ -195,7 +193,7 @@ ContentAddress ContentAddress::parse(std::string_view rawCa)
|
|||
|
||||
auto [caMethod, hashAlgo] = parseContentAddressMethodPrefix(rest);
|
||||
|
||||
return ContentAddress {
|
||||
return ContentAddress{
|
||||
.method = std::move(caMethod),
|
||||
.hash = Hash::parseNonSRIUnprefixed(rest, hashAlgo),
|
||||
};
|
||||
|
|
@ -211,9 +209,7 @@ std::pair<ContentAddressMethod, HashAlgorithm> ContentAddressMethod::parseWithAl
|
|||
|
||||
std::optional<ContentAddress> ContentAddress::parseOpt(std::string_view rawCaOpt)
|
||||
{
|
||||
return rawCaOpt == ""
|
||||
? std::nullopt
|
||||
: std::optional { ContentAddress::parse(rawCaOpt) };
|
||||
return rawCaOpt == "" ? std::nullopt : std::optional{ContentAddress::parse(rawCaOpt)};
|
||||
};
|
||||
|
||||
std::string renderContentAddress(std::optional<ContentAddress> ca)
|
||||
|
|
@ -223,8 +219,7 @@ std::string renderContentAddress(std::optional<ContentAddress> ca)
|
|||
|
||||
std::string ContentAddress::printMethodAlgo() const
|
||||
{
|
||||
return std::string { method.renderPrefix() }
|
||||
+ printHashAlgo(hash.algo);
|
||||
return std::string{method.renderPrefix()} + printHashAlgo(hash.algo);
|
||||
}
|
||||
|
||||
bool StoreReferences::empty() const
|
||||
|
|
@ -241,14 +236,14 @@ ContentAddressWithReferences ContentAddressWithReferences::withoutRefs(const Con
|
|||
{
|
||||
switch (ca.method.raw) {
|
||||
case ContentAddressMethod::Raw::Text:
|
||||
return TextInfo {
|
||||
return TextInfo{
|
||||
.hash = ca.hash,
|
||||
.references = {},
|
||||
};
|
||||
case ContentAddressMethod::Raw::Flat:
|
||||
case ContentAddressMethod::Raw::NixArchive:
|
||||
case ContentAddressMethod::Raw::Git:
|
||||
return FixedOutputInfo {
|
||||
return FixedOutputInfo{
|
||||
.method = ca.method.getFileIngestionMethod(),
|
||||
.hash = ca.hash,
|
||||
.references = {},
|
||||
|
|
@ -258,21 +253,21 @@ ContentAddressWithReferences ContentAddressWithReferences::withoutRefs(const Con
|
|||
}
|
||||
}
|
||||
|
||||
ContentAddressWithReferences ContentAddressWithReferences::fromParts(
|
||||
ContentAddressMethod method, Hash hash, StoreReferences refs)
|
||||
ContentAddressWithReferences
|
||||
ContentAddressWithReferences::fromParts(ContentAddressMethod method, Hash hash, StoreReferences refs)
|
||||
{
|
||||
switch (method.raw) {
|
||||
case ContentAddressMethod::Raw::Text:
|
||||
if (refs.self)
|
||||
throw Error("self-reference not allowed with text hashing");
|
||||
return TextInfo {
|
||||
return TextInfo{
|
||||
.hash = std::move(hash),
|
||||
.references = std::move(refs.others),
|
||||
};
|
||||
case ContentAddressMethod::Raw::Flat:
|
||||
case ContentAddressMethod::Raw::NixArchive:
|
||||
case ContentAddressMethod::Raw::Git:
|
||||
return FixedOutputInfo {
|
||||
return FixedOutputInfo{
|
||||
.method = method.getFileIngestionMethod(),
|
||||
.hash = std::move(hash),
|
||||
.references = std::move(refs),
|
||||
|
|
@ -284,27 +279,24 @@ ContentAddressWithReferences ContentAddressWithReferences::fromParts(
|
|||
|
||||
ContentAddressMethod ContentAddressWithReferences::getMethod() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](const TextInfo & th) -> ContentAddressMethod {
|
||||
return ContentAddressMethod::Raw::Text;
|
||||
return std::visit(
|
||||
overloaded{
|
||||
[](const TextInfo & th) -> ContentAddressMethod { return ContentAddressMethod::Raw::Text; },
|
||||
[](const FixedOutputInfo & fsh) -> ContentAddressMethod {
|
||||
return fileIngestionMethodToContentAddressMethod(fsh.method);
|
||||
},
|
||||
},
|
||||
[](const FixedOutputInfo & fsh) -> ContentAddressMethod {
|
||||
return fileIngestionMethodToContentAddressMethod(
|
||||
fsh.method);
|
||||
},
|
||||
}, raw);
|
||||
raw);
|
||||
}
|
||||
|
||||
Hash ContentAddressWithReferences::getHash() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](const TextInfo & th) {
|
||||
return th.hash;
|
||||
return std::visit(
|
||||
overloaded{
|
||||
[](const TextInfo & th) { return th.hash; },
|
||||
[](const FixedOutputInfo & fsh) { return fsh.hash; },
|
||||
},
|
||||
[](const FixedOutputInfo & fsh) {
|
||||
return fsh.hash;
|
||||
},
|
||||
}, raw);
|
||||
raw);
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -18,14 +18,14 @@
|
|||
#include "nix/util/logging.hh"
|
||||
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
# include "nix/util/monitor-fd.hh"
|
||||
# include "nix/util/monitor-fd.hh"
|
||||
#endif
|
||||
|
||||
#include <sstream>
|
||||
|
||||
namespace nix::daemon {
|
||||
|
||||
Sink & operator << (Sink & sink, const Logger::Fields & fields)
|
||||
Sink & operator<<(Sink & sink, const Logger::Fields & fields)
|
||||
{
|
||||
sink << fields.size();
|
||||
for (auto & f : fields) {
|
||||
|
|
@ -34,7 +34,8 @@ Sink & operator << (Sink & sink, const Logger::Fields & fields)
|
|||
sink << f.i;
|
||||
else if (f.type == Logger::Field::tString)
|
||||
sink << f.s;
|
||||
else unreachable();
|
||||
else
|
||||
unreachable();
|
||||
}
|
||||
return sink;
|
||||
}
|
||||
|
|
@ -57,7 +58,10 @@ struct TunnelLogger : public Logger
|
|||
WorkerProto::Version clientVersion;
|
||||
|
||||
TunnelLogger(FdSink & to, WorkerProto::Version clientVersion)
|
||||
: to(to), clientVersion(clientVersion) { }
|
||||
: to(to)
|
||||
, clientVersion(clientVersion)
|
||||
{
|
||||
}
|
||||
|
||||
void enqueueMsg(const std::string & s)
|
||||
{
|
||||
|
|
@ -80,7 +84,8 @@ struct TunnelLogger : public Logger
|
|||
|
||||
void log(Verbosity lvl, std::string_view s) override
|
||||
{
|
||||
if (lvl > verbosity) return;
|
||||
if (lvl > verbosity)
|
||||
return;
|
||||
|
||||
StringSink buf;
|
||||
buf << STDERR_NEXT << (s + "\n");
|
||||
|
|
@ -89,7 +94,8 @@ struct TunnelLogger : public Logger
|
|||
|
||||
void logEI(const ErrorInfo & ei) override
|
||||
{
|
||||
if (ei.level > verbosity) return;
|
||||
if (ei.level > verbosity)
|
||||
return;
|
||||
|
||||
std::ostringstream oss;
|
||||
showErrorInfo(oss, ei, false);
|
||||
|
|
@ -133,8 +139,13 @@ struct TunnelLogger : public Logger
|
|||
}
|
||||
}
|
||||
|
||||
void startActivity(ActivityId act, Verbosity lvl, ActivityType type,
|
||||
const std::string & s, const Fields & fields, ActivityId parent) override
|
||||
void startActivity(
|
||||
ActivityId act,
|
||||
Verbosity lvl,
|
||||
ActivityType type,
|
||||
const std::string & s,
|
||||
const Fields & fields,
|
||||
ActivityId parent) override
|
||||
{
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) < 20) {
|
||||
if (!s.empty())
|
||||
|
|
@ -149,7 +160,8 @@ struct TunnelLogger : public Logger
|
|||
|
||||
void stopActivity(ActivityId act) override
|
||||
{
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) < 20) return;
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) < 20)
|
||||
return;
|
||||
StringSink buf;
|
||||
buf << STDERR_STOP_ACTIVITY << act;
|
||||
enqueueMsg(buf.s);
|
||||
|
|
@ -157,7 +169,8 @@ struct TunnelLogger : public Logger
|
|||
|
||||
void result(ActivityId act, ResultType type, const Fields & fields) override
|
||||
{
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) < 20) return;
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) < 20)
|
||||
return;
|
||||
StringSink buf;
|
||||
buf << STDERR_RESULT << act << type << fields;
|
||||
enqueueMsg(buf.s);
|
||||
|
|
@ -167,8 +180,13 @@ struct TunnelLogger : public Logger
|
|||
struct TunnelSink : Sink
|
||||
{
|
||||
Sink & to;
|
||||
TunnelSink(Sink & to) : to(to) { }
|
||||
void operator () (std::string_view data) override
|
||||
|
||||
TunnelSink(Sink & to)
|
||||
: to(to)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(std::string_view data) override
|
||||
{
|
||||
to << STDERR_WRITE;
|
||||
writeString(data, to);
|
||||
|
|
@ -179,13 +197,20 @@ struct TunnelSource : BufferedSource
|
|||
{
|
||||
Source & from;
|
||||
BufferedSink & to;
|
||||
TunnelSource(Source & from, BufferedSink & to) : from(from), to(to) { }
|
||||
|
||||
TunnelSource(Source & from, BufferedSink & to)
|
||||
: from(from)
|
||||
, to(to)
|
||||
{
|
||||
}
|
||||
|
||||
size_t readUnbuffered(char * data, size_t len) override
|
||||
{
|
||||
to << STDERR_READ << len;
|
||||
to.flush();
|
||||
size_t n = readString(data, len, from);
|
||||
if (n == 0) throw EndOfFile("unexpected end-of-file");
|
||||
if (n == 0)
|
||||
throw EndOfFile("unexpected end-of-file");
|
||||
return n;
|
||||
}
|
||||
};
|
||||
|
|
@ -233,8 +258,10 @@ struct ClientSettings
|
|||
else if (!hasSuffix(s, "/") && trusted.count(s + "/"))
|
||||
subs.push_back(s + "/");
|
||||
else
|
||||
warn("ignoring untrusted substituter '%s', you are not a trusted user.\n"
|
||||
"Run `man nix.conf` for more information on the `substituters` configuration option.", s);
|
||||
warn(
|
||||
"ignoring untrusted substituter '%s', you are not a trusted user.\n"
|
||||
"Run `man nix.conf` for more information on the `substituters` configuration option.",
|
||||
s);
|
||||
res = subs;
|
||||
return true;
|
||||
};
|
||||
|
|
@ -245,23 +272,24 @@ struct ClientSettings
|
|||
else if (name == experimentalFeatureSettings.experimentalFeatures.name) {
|
||||
// We don’t want to forward the experimental features to
|
||||
// the daemon, as that could cause some pretty weird stuff
|
||||
if (parseFeatures(tokenizeString<StringSet>(value)) != experimentalFeatureSettings.experimentalFeatures.get())
|
||||
if (parseFeatures(tokenizeString<StringSet>(value))
|
||||
!= experimentalFeatureSettings.experimentalFeatures.get())
|
||||
debug("Ignoring the client-specified experimental features");
|
||||
} else if (name == "plugin-files") {
|
||||
warn("Ignoring the client-specified plugin-files.\n"
|
||||
"The client specifying plugins to the daemon never made sense, and was removed in Nix >=2.14.");
|
||||
}
|
||||
else if (trusted
|
||||
|| name == settings.buildTimeout.name
|
||||
|| name == settings.maxSilentTime.name
|
||||
|| name == settings.pollInterval.name
|
||||
|| name == "connect-timeout"
|
||||
warn(
|
||||
"Ignoring the client-specified plugin-files.\n"
|
||||
"The client specifying plugins to the daemon never made sense, and was removed in Nix >=2.14.");
|
||||
} else if (
|
||||
trusted || name == settings.buildTimeout.name || name == settings.maxSilentTime.name
|
||||
|| name == settings.pollInterval.name || name == "connect-timeout"
|
||||
|| (name == "builders" && value == ""))
|
||||
settings.set(name, value);
|
||||
else if (setSubstituters(settings.substituters))
|
||||
;
|
||||
else
|
||||
warn("ignoring the client-specified setting '%s', because it is a restricted setting and you are not a trusted user", name);
|
||||
warn(
|
||||
"ignoring the client-specified setting '%s', because it is a restricted setting and you are not a trusted user",
|
||||
name);
|
||||
} catch (UsageError & e) {
|
||||
warn(e.what());
|
||||
}
|
||||
|
|
@ -269,8 +297,11 @@ struct ClientSettings
|
|||
}
|
||||
};
|
||||
|
||||
static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||
TrustedFlag trusted, RecursiveFlag recursive,
|
||||
static void performOp(
|
||||
TunnelLogger * logger,
|
||||
ref<Store> store,
|
||||
TrustedFlag trusted,
|
||||
RecursiveFlag recursive,
|
||||
WorkerProto::BasicServerConnection & conn,
|
||||
WorkerProto::Op op)
|
||||
{
|
||||
|
|
@ -349,7 +380,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
store->queryReferrers(path, paths);
|
||||
else if (op == WorkerProto::Op::QueryValidDerivers)
|
||||
paths = store->queryValidDerivers(path);
|
||||
else paths = store->queryDerivationOutputs(path);
|
||||
else
|
||||
paths = store->queryDerivationOutputs(path);
|
||||
logger->stopWork();
|
||||
WorkerProto::write(*store, wconn, paths);
|
||||
break;
|
||||
|
|
@ -424,7 +456,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
assert(false);
|
||||
}
|
||||
// TODO these two steps are essentially RemoteStore::addCAToStore. Move it up to Store.
|
||||
auto path = store->addToStoreFromDump(source, name, dumpMethod, contentAddressMethod, hashAlgo, refs, repair);
|
||||
auto path =
|
||||
store->addToStoreFromDump(source, name, dumpMethod, contentAddressMethod, hashAlgo, refs, repair);
|
||||
return store->queryPathInfo(path);
|
||||
}();
|
||||
logger->stopWork();
|
||||
|
|
@ -440,10 +473,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
std::string hashAlgoRaw;
|
||||
conn.from >> baseName >> fixed /* obsolete */ >> recursive >> hashAlgoRaw;
|
||||
if (recursive > true)
|
||||
throw Error("unsupported FileIngestionMethod with value of %i; you may need to upgrade nix-daemon", recursive);
|
||||
method = recursive
|
||||
? ContentAddressMethod::Raw::NixArchive
|
||||
: ContentAddressMethod::Raw::Flat;
|
||||
throw Error(
|
||||
"unsupported FileIngestionMethod with value of %i; you may need to upgrade nix-daemon",
|
||||
recursive);
|
||||
method = recursive ? ContentAddressMethod::Raw::NixArchive : ContentAddressMethod::Raw::Flat;
|
||||
/* Compatibility hack. */
|
||||
if (!fixed) {
|
||||
hashAlgoRaw = "sha256";
|
||||
|
|
@ -467,8 +500,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
parseDump(sink, savedNARSource);
|
||||
});
|
||||
logger->startWork();
|
||||
auto path = store->addToStoreFromDump(
|
||||
*dumpSource, baseName, FileSerialisationMethod::NixArchive, method, hashAlgo);
|
||||
auto path =
|
||||
store->addToStoreFromDump(*dumpSource, baseName, FileSerialisationMethod::NixArchive, method, hashAlgo);
|
||||
logger->stopWork();
|
||||
|
||||
conn.to << store->printStorePath(path);
|
||||
|
|
@ -485,9 +518,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->startWork();
|
||||
{
|
||||
FramedSource source(conn.from);
|
||||
store->addMultipleToStore(source,
|
||||
RepairFlag{repair},
|
||||
dontCheckSigs ? NoCheckSigs : CheckSigs);
|
||||
store->addMultipleToStore(source, RepairFlag{repair}, dontCheckSigs ? NoCheckSigs : CheckSigs);
|
||||
}
|
||||
logger->stopWork();
|
||||
break;
|
||||
|
|
@ -499,8 +530,15 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
auto refs = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
|
||||
logger->startWork();
|
||||
auto path = ({
|
||||
StringSource source { s };
|
||||
store->addToStoreFromDump(source, suffix, FileSerialisationMethod::Flat, ContentAddressMethod::Raw::Text, HashAlgorithm::SHA256, refs, NoRepair);
|
||||
StringSource source{s};
|
||||
store->addToStoreFromDump(
|
||||
source,
|
||||
suffix,
|
||||
FileSerialisationMethod::Flat,
|
||||
ContentAddressMethod::Raw::Text,
|
||||
HashAlgorithm::SHA256,
|
||||
refs,
|
||||
NoRepair);
|
||||
});
|
||||
logger->stopWork();
|
||||
conn.to << store->printStorePath(path);
|
||||
|
|
@ -521,11 +559,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
case WorkerProto::Op::ImportPaths: {
|
||||
logger->startWork();
|
||||
TunnelSource source(conn.from, conn.to);
|
||||
auto paths = store->importPaths(source,
|
||||
trusted ? NoCheckSigs : CheckSigs);
|
||||
auto paths = store->importPaths(source, trusted ? NoCheckSigs : CheckSigs);
|
||||
logger->stopWork();
|
||||
Strings paths2;
|
||||
for (auto & i : paths) paths2.push_back(store->printStorePath(i));
|
||||
for (auto & i : paths)
|
||||
paths2.push_back(store->printStorePath(i));
|
||||
conn.to << paths2;
|
||||
break;
|
||||
}
|
||||
|
|
@ -644,7 +682,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
|
||||
Derivation drv2;
|
||||
static_cast<BasicDerivation &>(drv2) = drv;
|
||||
drvPath = writeDerivation(*store, Derivation { drv2 });
|
||||
drvPath = writeDerivation(*store, Derivation{drv2});
|
||||
}
|
||||
|
||||
auto res = store->buildDerivation(drvPath, drv, buildMode);
|
||||
|
|
@ -796,11 +834,9 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
if (i == infos.end())
|
||||
conn.to << 0;
|
||||
else {
|
||||
conn.to << 1
|
||||
<< (i->second.deriver ? store->printStorePath(*i->second.deriver) : "");
|
||||
conn.to << 1 << (i->second.deriver ? store->printStorePath(*i->second.deriver) : "");
|
||||
WorkerProto::write(*store, wconn, i->second.references);
|
||||
conn.to << i->second.downloadSize
|
||||
<< i->second.narSize;
|
||||
conn.to << i->second.downloadSize << i->second.narSize;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
@ -842,7 +878,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
try {
|
||||
info = store->queryPathInfo(path);
|
||||
} catch (InvalidPath &) {
|
||||
if (GET_PROTOCOL_MINOR(conn.protoVersion) < 17) throw;
|
||||
if (GET_PROTOCOL_MINOR(conn.protoVersion) < 17)
|
||||
throw;
|
||||
}
|
||||
logger->stopWork();
|
||||
if (info) {
|
||||
|
|
@ -898,7 +935,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
auto path = store->parseStorePath(readString(conn.from));
|
||||
auto deriver = readString(conn.from);
|
||||
auto narHash = Hash::parseAny(readString(conn.from), HashAlgorithm::SHA256);
|
||||
ValidPathInfo info { path, narHash };
|
||||
ValidPathInfo info{path, narHash};
|
||||
if (deriver != "")
|
||||
info.deriver = store->parseStorePath(deriver);
|
||||
info.references = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
|
||||
|
|
@ -915,8 +952,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->startWork();
|
||||
{
|
||||
FramedSource source(conn.from);
|
||||
store->addToStore(info, source, (RepairFlag) repair,
|
||||
dontCheckSigs ? NoCheckSigs : CheckSigs);
|
||||
store->addToStore(info, source, (RepairFlag) repair, dontCheckSigs ? NoCheckSigs : CheckSigs);
|
||||
}
|
||||
logger->stopWork();
|
||||
}
|
||||
|
|
@ -927,7 +963,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
if (GET_PROTOCOL_MINOR(conn.protoVersion) >= 21)
|
||||
source = std::make_unique<TunnelSource>(conn.from, conn.to);
|
||||
else {
|
||||
TeeSource tee { conn.from, saved };
|
||||
TeeSource tee{conn.from, saved};
|
||||
NullFileSystemObjectSink ether;
|
||||
parseDump(ether, tee);
|
||||
source = std::make_unique<StringSource>(saved.s);
|
||||
|
|
@ -936,8 +972,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->startWork();
|
||||
|
||||
// FIXME: race if addToStore doesn't read source?
|
||||
store->addToStore(info, *source, (RepairFlag) repair,
|
||||
dontCheckSigs ? NoCheckSigs : CheckSigs);
|
||||
store->addToStore(info, *source, (RepairFlag) repair, dontCheckSigs ? NoCheckSigs : CheckSigs);
|
||||
|
||||
logger->stopWork();
|
||||
}
|
||||
|
|
@ -962,8 +997,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
if (GET_PROTOCOL_MINOR(conn.protoVersion) < 31) {
|
||||
auto outputId = DrvOutput::parse(readString(conn.from));
|
||||
auto outputPath = StorePath(readString(conn.from));
|
||||
store->registerDrvOutput(Realisation{
|
||||
.id = outputId, .outPath = outputPath});
|
||||
store->registerDrvOutput(Realisation{.id = outputId, .outPath = outputPath});
|
||||
} else {
|
||||
auto realisation = WorkerProto::Serialise<Realisation>::read(*store, rconn);
|
||||
store->registerDrvOutput(realisation);
|
||||
|
|
@ -979,11 +1013,13 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->stopWork();
|
||||
if (GET_PROTOCOL_MINOR(conn.protoVersion) < 31) {
|
||||
std::set<StorePath> outPaths;
|
||||
if (info) outPaths.insert(info->outPath);
|
||||
if (info)
|
||||
outPaths.insert(info->outPath);
|
||||
WorkerProto::write(*store, wconn, outPaths);
|
||||
} else {
|
||||
std::set<Realisation> realisations;
|
||||
if (info) realisations.insert(*info);
|
||||
if (info)
|
||||
realisations.insert(*info);
|
||||
WorkerProto::write(*store, wconn, realisations);
|
||||
}
|
||||
break;
|
||||
|
|
@ -1015,12 +1051,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
}
|
||||
}
|
||||
|
||||
void processConnection(
|
||||
ref<Store> store,
|
||||
FdSource && from,
|
||||
FdSink && to,
|
||||
TrustedFlag trusted,
|
||||
RecursiveFlag recursive)
|
||||
void processConnection(ref<Store> store, FdSource && from, FdSink && to, TrustedFlag trusted, RecursiveFlag recursive)
|
||||
{
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
auto monitor = !recursive ? std::make_unique<MonitorFdHup>(from.fd) : nullptr;
|
||||
|
|
@ -1029,8 +1060,7 @@ void processConnection(
|
|||
|
||||
/* Exchange the greeting. */
|
||||
auto [protoVersion, features] =
|
||||
WorkerProto::BasicServerConnection::handshake(
|
||||
to, from, PROTOCOL_VERSION, WorkerProto::allFeatures);
|
||||
WorkerProto::BasicServerConnection::handshake(to, from, PROTOCOL_VERSION, WorkerProto::allFeatures);
|
||||
|
||||
if (protoVersion < 0x10a)
|
||||
throw Error("the Nix client version is too old");
|
||||
|
|
@ -1059,14 +1089,14 @@ void processConnection(
|
|||
printMsgUsing(prevLogger, lvlDebug, "%d operations", opCount);
|
||||
});
|
||||
|
||||
conn.postHandshake(*store, {
|
||||
.daemonNixVersion = nixVersion,
|
||||
// We and the underlying store both need to trust the client for
|
||||
// it to be trusted.
|
||||
.remoteTrustsUs = trusted
|
||||
? store->isTrustedClient()
|
||||
: std::optional { NotTrusted },
|
||||
});
|
||||
conn.postHandshake(
|
||||
*store,
|
||||
{
|
||||
.daemonNixVersion = nixVersion,
|
||||
// We and the underlying store both need to trust the client for
|
||||
// it to be trusted.
|
||||
.remoteTrustsUs = trusted ? store->isTrustedClient() : std::optional{NotTrusted},
|
||||
});
|
||||
|
||||
/* Send startup error messages to the client. */
|
||||
tunnelLogger->startWork();
|
||||
|
|
@ -1103,7 +1133,8 @@ void processConnection(
|
|||
happens, just send the error message and exit. */
|
||||
bool errorAllowed = tunnelLogger->state_.lock()->canSendStderr;
|
||||
tunnelLogger->stopWork(&e);
|
||||
if (!errorAllowed) throw;
|
||||
if (!errorAllowed)
|
||||
throw;
|
||||
} catch (std::bad_alloc & e) {
|
||||
auto ex = Error("Nix daemon out of memory");
|
||||
tunnelLogger->stopWork(&ex);
|
||||
|
|
@ -1127,4 +1158,4 @@ void processConnection(
|
|||
}
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix::daemon
|
||||
|
|
|
|||
|
|
@ -291,7 +291,7 @@ bool DerivationOptions::useUidRange(const BasicDerivation & drv) const
|
|||
return getRequiredSystemFeatures(drv).count("uid-range");
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
||||
namespace nlohmann {
|
||||
|
||||
|
|
@ -381,4 +381,4 @@ void adl_serializer<DerivationOptions::OutputChecks>::to_json(json & json, Deriv
|
|||
json["disallowedRequisites"] = c.disallowedRequisites;
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nlohmann
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -6,18 +6,20 @@ namespace nix {
|
|||
template<typename V>
|
||||
typename DerivedPathMap<V>::ChildNode & DerivedPathMap<V>::ensureSlot(const SingleDerivedPath & k)
|
||||
{
|
||||
std::function<ChildNode &(const SingleDerivedPath & )> initIter;
|
||||
std::function<ChildNode &(const SingleDerivedPath &)> initIter;
|
||||
initIter = [&](const auto & k) -> auto & {
|
||||
return std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Opaque & bo) -> auto & {
|
||||
// will not overwrite if already there
|
||||
return map[bo.path];
|
||||
return std::visit(
|
||||
overloaded{
|
||||
[&](const SingleDerivedPath::Opaque & bo) -> auto & {
|
||||
// will not overwrite if already there
|
||||
return map[bo.path];
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & bfd) -> auto & {
|
||||
auto & n = initIter(*bfd.drvPath);
|
||||
return n.childMap[bfd.output];
|
||||
},
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & bfd) -> auto & {
|
||||
auto & n = initIter(*bfd.drvPath);
|
||||
return n.childMap[bfd.output];
|
||||
},
|
||||
}, k.raw());
|
||||
k.raw());
|
||||
};
|
||||
return initIter(k);
|
||||
}
|
||||
|
|
@ -25,39 +27,39 @@ typename DerivedPathMap<V>::ChildNode & DerivedPathMap<V>::ensureSlot(const Sing
|
|||
template<typename V>
|
||||
typename DerivedPathMap<V>::ChildNode * DerivedPathMap<V>::findSlot(const SingleDerivedPath & k)
|
||||
{
|
||||
std::function<ChildNode *(const SingleDerivedPath & )> initIter;
|
||||
std::function<ChildNode *(const SingleDerivedPath &)> initIter;
|
||||
initIter = [&](const auto & k) {
|
||||
return std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Opaque & bo) {
|
||||
auto it = map.find(bo.path);
|
||||
return it != map.end()
|
||||
? &it->second
|
||||
: nullptr;
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & bfd) {
|
||||
auto * n = initIter(*bfd.drvPath);
|
||||
if (!n) return (ChildNode *)nullptr;
|
||||
return std::visit(
|
||||
overloaded{
|
||||
[&](const SingleDerivedPath::Opaque & bo) {
|
||||
auto it = map.find(bo.path);
|
||||
return it != map.end() ? &it->second : nullptr;
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & bfd) {
|
||||
auto * n = initIter(*bfd.drvPath);
|
||||
if (!n)
|
||||
return (ChildNode *) nullptr;
|
||||
|
||||
auto it = n->childMap.find(bfd.output);
|
||||
return it != n->childMap.end()
|
||||
? &it->second
|
||||
: nullptr;
|
||||
auto it = n->childMap.find(bfd.output);
|
||||
return it != n->childMap.end() ? &it->second : nullptr;
|
||||
},
|
||||
},
|
||||
}, k.raw());
|
||||
k.raw());
|
||||
};
|
||||
return initIter(k);
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
||||
// instantiations
|
||||
|
||||
#include "nix/store/build/derivation-trampoline-goal.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
template<>
|
||||
bool DerivedPathMap<StringSet>::ChildNode::operator == (
|
||||
const DerivedPathMap<StringSet>::ChildNode &) const noexcept = default;
|
||||
bool DerivedPathMap<StringSet>::ChildNode::operator==(const DerivedPathMap<StringSet>::ChildNode &) const noexcept =
|
||||
default;
|
||||
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
#if 0
|
||||
|
|
@ -71,5 +73,4 @@ template struct DerivedPathMap<StringSet>;
|
|||
|
||||
template struct DerivedPathMap<std::map<OutputsSpec, std::weak_ptr<DerivationTrampolineGoal>>>;
|
||||
|
||||
|
||||
};
|
||||
}; // namespace nix
|
||||
|
|
|
|||
|
|
@ -10,38 +10,22 @@
|
|||
namespace nix {
|
||||
|
||||
// Custom implementation to avoid `ref` ptr equality
|
||||
GENERATE_CMP_EXT(
|
||||
,
|
||||
std::strong_ordering,
|
||||
SingleDerivedPathBuilt,
|
||||
*me->drvPath,
|
||||
me->output);
|
||||
GENERATE_CMP_EXT(, std::strong_ordering, SingleDerivedPathBuilt, *me->drvPath, me->output);
|
||||
|
||||
// Custom implementation to avoid `ref` ptr equality
|
||||
|
||||
// TODO no `GENERATE_CMP_EXT` because no `std::set::operator<=>` on
|
||||
// Darwin, per header.
|
||||
GENERATE_EQUAL(
|
||||
,
|
||||
DerivedPathBuilt ::,
|
||||
DerivedPathBuilt,
|
||||
*me->drvPath,
|
||||
me->outputs);
|
||||
GENERATE_ONE_CMP(
|
||||
,
|
||||
bool,
|
||||
DerivedPathBuilt ::,
|
||||
<,
|
||||
DerivedPathBuilt,
|
||||
*me->drvPath,
|
||||
me->outputs);
|
||||
GENERATE_EQUAL(, DerivedPathBuilt ::, DerivedPathBuilt, *me->drvPath, me->outputs);
|
||||
GENERATE_ONE_CMP(, bool, DerivedPathBuilt ::, <, DerivedPathBuilt, *me->drvPath, me->outputs);
|
||||
|
||||
nlohmann::json DerivedPath::Opaque::toJSON(const StoreDirConfig & store) const
|
||||
{
|
||||
return store.printStorePath(path);
|
||||
}
|
||||
|
||||
nlohmann::json SingleDerivedPath::Built::toJSON(Store & store) const {
|
||||
nlohmann::json SingleDerivedPath::Built::toJSON(Store & store) const
|
||||
{
|
||||
nlohmann::json res;
|
||||
res["drvPath"] = drvPath->toJSON(store);
|
||||
// Fallback for the input-addressed derivation case: We expect to always be
|
||||
|
|
@ -59,7 +43,8 @@ nlohmann::json SingleDerivedPath::Built::toJSON(Store & store) const {
|
|||
return res;
|
||||
}
|
||||
|
||||
nlohmann::json DerivedPath::Built::toJSON(Store & store) const {
|
||||
nlohmann::json DerivedPath::Built::toJSON(Store & store) const
|
||||
{
|
||||
nlohmann::json res;
|
||||
res["drvPath"] = drvPath->toJSON(store);
|
||||
// Fallback for the input-addressed derivation case: We expect to always be
|
||||
|
|
@ -67,7 +52,8 @@ nlohmann::json DerivedPath::Built::toJSON(Store & store) const {
|
|||
// FIXME try-resolve on drvPath
|
||||
const auto outputMap = store.queryPartialDerivationOutputMap(resolveDerivedPath(store, *drvPath));
|
||||
for (const auto & [output, outputPathOpt] : outputMap) {
|
||||
if (!outputs.contains(output)) continue;
|
||||
if (!outputs.contains(output))
|
||||
continue;
|
||||
if (outputPathOpt)
|
||||
res["outputs"][output] = store.printStorePath(*outputPathOpt);
|
||||
else
|
||||
|
|
@ -78,16 +64,12 @@ nlohmann::json DerivedPath::Built::toJSON(Store & store) const {
|
|||
|
||||
nlohmann::json SingleDerivedPath::toJSON(Store & store) const
|
||||
{
|
||||
return std::visit([&](const auto & buildable) {
|
||||
return buildable.toJSON(store);
|
||||
}, raw());
|
||||
return std::visit([&](const auto & buildable) { return buildable.toJSON(store); }, raw());
|
||||
}
|
||||
|
||||
nlohmann::json DerivedPath::toJSON(Store & store) const
|
||||
{
|
||||
return std::visit([&](const auto & buildable) {
|
||||
return buildable.toJSON(store);
|
||||
}, raw());
|
||||
return std::visit([&](const auto & buildable) { return buildable.toJSON(store); }, raw());
|
||||
}
|
||||
|
||||
std::string DerivedPath::Opaque::to_string(const StoreDirConfig & store) const
|
||||
|
|
@ -107,82 +89,77 @@ std::string SingleDerivedPath::Built::to_string_legacy(const StoreDirConfig & st
|
|||
|
||||
std::string DerivedPath::Built::to_string(const StoreDirConfig & store) const
|
||||
{
|
||||
return drvPath->to_string(store)
|
||||
+ '^'
|
||||
+ outputs.to_string();
|
||||
return drvPath->to_string(store) + '^' + outputs.to_string();
|
||||
}
|
||||
|
||||
std::string DerivedPath::Built::to_string_legacy(const StoreDirConfig & store) const
|
||||
{
|
||||
return drvPath->to_string_legacy(store)
|
||||
+ "!"
|
||||
+ outputs.to_string();
|
||||
return drvPath->to_string_legacy(store) + "!" + outputs.to_string();
|
||||
}
|
||||
|
||||
std::string SingleDerivedPath::to_string(const StoreDirConfig & store) const
|
||||
{
|
||||
return std::visit(
|
||||
[&](const auto & req) { return req.to_string(store); },
|
||||
raw());
|
||||
return std::visit([&](const auto & req) { return req.to_string(store); }, raw());
|
||||
}
|
||||
|
||||
std::string DerivedPath::to_string(const StoreDirConfig & store) const
|
||||
{
|
||||
return std::visit(
|
||||
[&](const auto & req) { return req.to_string(store); },
|
||||
raw());
|
||||
return std::visit([&](const auto & req) { return req.to_string(store); }, raw());
|
||||
}
|
||||
|
||||
std::string SingleDerivedPath::to_string_legacy(const StoreDirConfig & store) const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Built & req) { return req.to_string_legacy(store); },
|
||||
[&](const SingleDerivedPath::Opaque & req) { return req.to_string(store); },
|
||||
}, this->raw());
|
||||
return std::visit(
|
||||
overloaded{
|
||||
[&](const SingleDerivedPath::Built & req) { return req.to_string_legacy(store); },
|
||||
[&](const SingleDerivedPath::Opaque & req) { return req.to_string(store); },
|
||||
},
|
||||
this->raw());
|
||||
}
|
||||
|
||||
std::string DerivedPath::to_string_legacy(const StoreDirConfig & store) const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const DerivedPath::Built & req) { return req.to_string_legacy(store); },
|
||||
[&](const DerivedPath::Opaque & req) { return req.to_string(store); },
|
||||
}, this->raw());
|
||||
return std::visit(
|
||||
overloaded{
|
||||
[&](const DerivedPath::Built & req) { return req.to_string_legacy(store); },
|
||||
[&](const DerivedPath::Opaque & req) { return req.to_string(store); },
|
||||
},
|
||||
this->raw());
|
||||
}
|
||||
|
||||
|
||||
DerivedPath::Opaque DerivedPath::Opaque::parse(const StoreDirConfig & store, std::string_view s)
|
||||
{
|
||||
return {store.parseStorePath(s)};
|
||||
}
|
||||
|
||||
void drvRequireExperiment(
|
||||
const SingleDerivedPath & drv,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
void drvRequireExperiment(const SingleDerivedPath & drv, const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Opaque &) {
|
||||
// plain drv path; no experimental features required.
|
||||
std::visit(
|
||||
overloaded{
|
||||
[&](const SingleDerivedPath::Opaque &) {
|
||||
// plain drv path; no experimental features required.
|
||||
},
|
||||
[&](const SingleDerivedPath::Built &) { xpSettings.require(Xp::DynamicDerivations); },
|
||||
},
|
||||
[&](const SingleDerivedPath::Built &) {
|
||||
xpSettings.require(Xp::DynamicDerivations);
|
||||
},
|
||||
}, drv.raw());
|
||||
drv.raw());
|
||||
}
|
||||
|
||||
SingleDerivedPath::Built SingleDerivedPath::Built::parse(
|
||||
const StoreDirConfig & store, ref<const SingleDerivedPath> drv,
|
||||
const StoreDirConfig & store,
|
||||
ref<const SingleDerivedPath> drv,
|
||||
OutputNameView output,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
drvRequireExperiment(*drv, xpSettings);
|
||||
return {
|
||||
.drvPath = drv,
|
||||
.output = std::string { output },
|
||||
.output = std::string{output},
|
||||
};
|
||||
}
|
||||
|
||||
DerivedPath::Built DerivedPath::Built::parse(
|
||||
const StoreDirConfig & store, ref<const SingleDerivedPath> drv,
|
||||
const StoreDirConfig & store,
|
||||
ref<const SingleDerivedPath> drv,
|
||||
OutputNameView outputsS,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
|
|
@ -194,117 +171,105 @@ DerivedPath::Built DerivedPath::Built::parse(
|
|||
}
|
||||
|
||||
static SingleDerivedPath parseWithSingle(
|
||||
const StoreDirConfig & store, std::string_view s, std::string_view separator,
|
||||
const StoreDirConfig & store,
|
||||
std::string_view s,
|
||||
std::string_view separator,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
size_t n = s.rfind(separator);
|
||||
return n == s.npos
|
||||
? (SingleDerivedPath) SingleDerivedPath::Opaque::parse(store, s)
|
||||
: (SingleDerivedPath) SingleDerivedPath::Built::parse(store,
|
||||
make_ref<const SingleDerivedPath>(parseWithSingle(
|
||||
store,
|
||||
s.substr(0, n),
|
||||
separator,
|
||||
xpSettings)),
|
||||
s.substr(n + 1),
|
||||
xpSettings);
|
||||
? (SingleDerivedPath) SingleDerivedPath::Opaque::parse(store, s)
|
||||
: (SingleDerivedPath) SingleDerivedPath::Built::parse(
|
||||
store,
|
||||
make_ref<const SingleDerivedPath>(parseWithSingle(store, s.substr(0, n), separator, xpSettings)),
|
||||
s.substr(n + 1),
|
||||
xpSettings);
|
||||
}
|
||||
|
||||
SingleDerivedPath SingleDerivedPath::parse(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view s,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
const StoreDirConfig & store, std::string_view s, const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
return parseWithSingle(store, s, "^", xpSettings);
|
||||
}
|
||||
|
||||
SingleDerivedPath SingleDerivedPath::parseLegacy(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view s,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
const StoreDirConfig & store, std::string_view s, const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
return parseWithSingle(store, s, "!", xpSettings);
|
||||
}
|
||||
|
||||
static DerivedPath parseWith(
|
||||
const StoreDirConfig & store, std::string_view s, std::string_view separator,
|
||||
const StoreDirConfig & store,
|
||||
std::string_view s,
|
||||
std::string_view separator,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
size_t n = s.rfind(separator);
|
||||
return n == s.npos
|
||||
? (DerivedPath) DerivedPath::Opaque::parse(store, s)
|
||||
: (DerivedPath) DerivedPath::Built::parse(store,
|
||||
make_ref<const SingleDerivedPath>(parseWithSingle(
|
||||
store,
|
||||
s.substr(0, n),
|
||||
separator,
|
||||
xpSettings)),
|
||||
s.substr(n + 1),
|
||||
xpSettings);
|
||||
? (DerivedPath) DerivedPath::Opaque::parse(store, s)
|
||||
: (DerivedPath) DerivedPath::Built::parse(
|
||||
store,
|
||||
make_ref<const SingleDerivedPath>(parseWithSingle(store, s.substr(0, n), separator, xpSettings)),
|
||||
s.substr(n + 1),
|
||||
xpSettings);
|
||||
}
|
||||
|
||||
DerivedPath DerivedPath::parse(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view s,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
DerivedPath
|
||||
DerivedPath::parse(const StoreDirConfig & store, std::string_view s, const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
return parseWith(store, s, "^", xpSettings);
|
||||
}
|
||||
|
||||
DerivedPath DerivedPath::parseLegacy(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view s,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
const StoreDirConfig & store, std::string_view s, const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
return parseWith(store, s, "!", xpSettings);
|
||||
}
|
||||
|
||||
DerivedPath DerivedPath::fromSingle(const SingleDerivedPath & req)
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Opaque & o) -> DerivedPath {
|
||||
return o;
|
||||
return std::visit(
|
||||
overloaded{
|
||||
[&](const SingleDerivedPath::Opaque & o) -> DerivedPath { return o; },
|
||||
[&](const SingleDerivedPath::Built & b) -> DerivedPath {
|
||||
return DerivedPath::Built{
|
||||
.drvPath = b.drvPath,
|
||||
.outputs = OutputsSpec::Names{b.output},
|
||||
};
|
||||
},
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & b) -> DerivedPath {
|
||||
return DerivedPath::Built {
|
||||
.drvPath = b.drvPath,
|
||||
.outputs = OutputsSpec::Names { b.output },
|
||||
};
|
||||
},
|
||||
}, req.raw());
|
||||
req.raw());
|
||||
}
|
||||
|
||||
const StorePath & SingleDerivedPath::Built::getBaseStorePath() const
|
||||
{
|
||||
return drvPath->getBaseStorePath();
|
||||
return drvPath->getBaseStorePath();
|
||||
}
|
||||
|
||||
const StorePath & DerivedPath::Built::getBaseStorePath() const
|
||||
{
|
||||
return drvPath->getBaseStorePath();
|
||||
return drvPath->getBaseStorePath();
|
||||
}
|
||||
|
||||
template<typename DP>
|
||||
static inline const StorePath & getBaseStorePath_(const DP & derivedPath)
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const typename DP::Built & bfd) -> auto & {
|
||||
return bfd.drvPath->getBaseStorePath();
|
||||
return std::visit(
|
||||
overloaded{
|
||||
[&](const typename DP::Built & bfd) -> auto & { return bfd.drvPath->getBaseStorePath(); },
|
||||
[&](const typename DP::Opaque & bo) -> auto & { return bo.path; },
|
||||
},
|
||||
[&](const typename DP::Opaque & bo) -> auto & {
|
||||
return bo.path;
|
||||
},
|
||||
}, derivedPath.raw());
|
||||
derivedPath.raw());
|
||||
}
|
||||
|
||||
const StorePath & SingleDerivedPath::getBaseStorePath() const
|
||||
{
|
||||
return getBaseStorePath_(*this);
|
||||
return getBaseStorePath_(*this);
|
||||
}
|
||||
|
||||
const StorePath & DerivedPath::getBaseStorePath() const
|
||||
{
|
||||
return getBaseStorePath_(*this);
|
||||
return getBaseStorePath_(*this);
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -8,19 +8,15 @@ std::string DownstreamPlaceholder::render() const
|
|||
return "/" + hash.to_string(HashFormat::Nix32, false);
|
||||
}
|
||||
|
||||
|
||||
DownstreamPlaceholder DownstreamPlaceholder::unknownCaOutput(
|
||||
const StorePath & drvPath,
|
||||
OutputNameView outputName,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
const StorePath & drvPath, OutputNameView outputName, const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
xpSettings.require(Xp::CaDerivations);
|
||||
auto drvNameWithExtension = drvPath.name();
|
||||
auto drvName = drvNameWithExtension.substr(0, drvNameWithExtension.size() - 4);
|
||||
auto clearText = "nix-upstream-output:" + std::string { drvPath.hashPart() } + ":" + outputPathName(drvName, outputName);
|
||||
return DownstreamPlaceholder {
|
||||
hashString(HashAlgorithm::SHA256, clearText)
|
||||
};
|
||||
auto clearText =
|
||||
"nix-upstream-output:" + std::string{drvPath.hashPart()} + ":" + outputPathName(drvName, outputName);
|
||||
return DownstreamPlaceholder{hashString(HashAlgorithm::SHA256, clearText)};
|
||||
}
|
||||
|
||||
DownstreamPlaceholder DownstreamPlaceholder::unknownDerivation(
|
||||
|
|
@ -30,29 +26,25 @@ DownstreamPlaceholder DownstreamPlaceholder::unknownDerivation(
|
|||
{
|
||||
xpSettings.require(Xp::DynamicDerivations);
|
||||
auto compressed = compressHash(placeholder.hash, 20);
|
||||
auto clearText = "nix-computed-output:"
|
||||
+ compressed.to_string(HashFormat::Nix32, false)
|
||||
+ ":" + std::string { outputName };
|
||||
return DownstreamPlaceholder {
|
||||
hashString(HashAlgorithm::SHA256, clearText)
|
||||
};
|
||||
auto clearText =
|
||||
"nix-computed-output:" + compressed.to_string(HashFormat::Nix32, false) + ":" + std::string{outputName};
|
||||
return DownstreamPlaceholder{hashString(HashAlgorithm::SHA256, clearText)};
|
||||
}
|
||||
|
||||
DownstreamPlaceholder DownstreamPlaceholder::fromSingleDerivedPathBuilt(
|
||||
const SingleDerivedPath::Built & b,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
const SingleDerivedPath::Built & b, const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Opaque & o) {
|
||||
return DownstreamPlaceholder::unknownCaOutput(o.path, b.output, xpSettings);
|
||||
return std::visit(
|
||||
overloaded{
|
||||
[&](const SingleDerivedPath::Opaque & o) {
|
||||
return DownstreamPlaceholder::unknownCaOutput(o.path, b.output, xpSettings);
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & b2) {
|
||||
return DownstreamPlaceholder::unknownDerivation(
|
||||
DownstreamPlaceholder::fromSingleDerivedPathBuilt(b2, xpSettings), b.output, xpSettings);
|
||||
},
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & b2) {
|
||||
return DownstreamPlaceholder::unknownDerivation(
|
||||
DownstreamPlaceholder::fromSingleDerivedPathBuilt(b2, xpSettings),
|
||||
b.output,
|
||||
xpSettings);
|
||||
},
|
||||
}, b.drvPath->raw());
|
||||
b.drvPath->raw());
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -3,7 +3,8 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
struct DummyStoreConfig : public std::enable_shared_from_this<DummyStoreConfig>, virtual StoreConfig {
|
||||
struct DummyStoreConfig : public std::enable_shared_from_this<DummyStoreConfig>, virtual StoreConfig
|
||||
{
|
||||
using StoreConfig::StoreConfig;
|
||||
|
||||
DummyStoreConfig(std::string_view scheme, std::string_view authority, const Params & params)
|
||||
|
|
@ -13,16 +14,20 @@ struct DummyStoreConfig : public std::enable_shared_from_this<DummyStoreConfig>,
|
|||
throw UsageError("`%s` store URIs must not contain an authority part %s", scheme, authority);
|
||||
}
|
||||
|
||||
static const std::string name() { return "Dummy Store"; }
|
||||
static const std::string name()
|
||||
{
|
||||
return "Dummy Store";
|
||||
}
|
||||
|
||||
static std::string doc()
|
||||
{
|
||||
return
|
||||
#include "dummy-store.md"
|
||||
;
|
||||
#include "dummy-store.md"
|
||||
;
|
||||
}
|
||||
|
||||
static StringSet uriSchemes() {
|
||||
static StringSet uriSchemes()
|
||||
{
|
||||
return {"dummy"};
|
||||
}
|
||||
|
||||
|
|
@ -38,15 +43,16 @@ struct DummyStore : virtual Store
|
|||
DummyStore(ref<const Config> config)
|
||||
: Store{*config}
|
||||
, config(config)
|
||||
{ }
|
||||
{
|
||||
}
|
||||
|
||||
std::string getUri() override
|
||||
{
|
||||
return *Config::uriSchemes().begin();
|
||||
}
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override
|
||||
void queryPathInfoUncached(
|
||||
const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override
|
||||
{
|
||||
callback(nullptr);
|
||||
}
|
||||
|
|
@ -60,11 +66,14 @@ struct DummyStore : virtual Store
|
|||
}
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
|
||||
{ unsupported("queryPathFromHashPart"); }
|
||||
{
|
||||
unsupported("queryPathFromHashPart");
|
||||
}
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override
|
||||
{ unsupported("addToStore"); }
|
||||
void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override
|
||||
{
|
||||
unsupported("addToStore");
|
||||
}
|
||||
|
||||
virtual StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
|
|
@ -74,14 +83,20 @@ struct DummyStore : virtual Store
|
|||
HashAlgorithm hashAlgo = HashAlgorithm::SHA256,
|
||||
const StorePathSet & references = StorePathSet(),
|
||||
RepairFlag repair = NoRepair) override
|
||||
{ unsupported("addToStore"); }
|
||||
{
|
||||
unsupported("addToStore");
|
||||
}
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override
|
||||
{ unsupported("narFromPath"); }
|
||||
{
|
||||
unsupported("narFromPath");
|
||||
}
|
||||
|
||||
void queryRealisationUncached(const DrvOutput &,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override
|
||||
{ callback(nullptr); }
|
||||
void
|
||||
queryRealisationUncached(const DrvOutput &, Callback<std::shared_ptr<const Realisation>> callback) noexcept override
|
||||
{
|
||||
callback(nullptr);
|
||||
}
|
||||
|
||||
virtual ref<SourceAccessor> getFSAccessor(bool requireValidPath) override
|
||||
{
|
||||
|
|
@ -96,4 +111,4 @@ ref<Store> DummyStore::Config::openStore() const
|
|||
|
||||
static RegisterStoreImplementation<DummyStore::Config> regDummyStore;
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -35,18 +35,15 @@ void Store::exportPath(const StorePath & path, Sink & sink)
|
|||
Don't complain if the stored hash is zero (unknown). */
|
||||
Hash hash = hashSink.currentHash().first;
|
||||
if (hash != info->narHash && info->narHash != Hash(info->narHash.algo))
|
||||
throw Error("hash of path '%s' has changed from '%s' to '%s'!",
|
||||
printStorePath(path), info->narHash.to_string(HashFormat::Nix32, true), hash.to_string(HashFormat::Nix32, true));
|
||||
throw Error(
|
||||
"hash of path '%s' has changed from '%s' to '%s'!",
|
||||
printStorePath(path),
|
||||
info->narHash.to_string(HashFormat::Nix32, true),
|
||||
hash.to_string(HashFormat::Nix32, true));
|
||||
|
||||
teeSink
|
||||
<< exportMagic
|
||||
<< printStorePath(path);
|
||||
CommonProto::write(*this,
|
||||
CommonProto::WriteConn { .to = teeSink },
|
||||
info->references);
|
||||
teeSink
|
||||
<< (info->deriver ? printStorePath(*info->deriver) : "")
|
||||
<< 0;
|
||||
teeSink << exportMagic << printStorePath(path);
|
||||
CommonProto::write(*this, CommonProto::WriteConn{.to = teeSink}, info->references);
|
||||
teeSink << (info->deriver ? printStorePath(*info->deriver) : "") << 0;
|
||||
}
|
||||
|
||||
StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
|
||||
|
|
@ -54,12 +51,14 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
|
|||
StorePaths res;
|
||||
while (true) {
|
||||
auto n = readNum<uint64_t>(source);
|
||||
if (n == 0) break;
|
||||
if (n != 1) throw Error("input doesn't look like something created by 'nix-store --export'");
|
||||
if (n == 0)
|
||||
break;
|
||||
if (n != 1)
|
||||
throw Error("input doesn't look like something created by 'nix-store --export'");
|
||||
|
||||
/* Extract the NAR from the source. */
|
||||
StringSink saved;
|
||||
TeeSource tee { source, saved };
|
||||
TeeSource tee{source, saved};
|
||||
NullFileSystemObjectSink ether;
|
||||
parseDump(ether, tee);
|
||||
|
||||
|
|
@ -69,14 +68,13 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
|
|||
|
||||
auto path = parseStorePath(readString(source));
|
||||
|
||||
//Activity act(*logger, lvlInfo, "importing path '%s'", info.path);
|
||||
// Activity act(*logger, lvlInfo, "importing path '%s'", info.path);
|
||||
|
||||
auto references = CommonProto::Serialise<StorePathSet>::read(*this,
|
||||
CommonProto::ReadConn { .from = source });
|
||||
auto references = CommonProto::Serialise<StorePathSet>::read(*this, CommonProto::ReadConn{.from = source});
|
||||
auto deriver = readString(source);
|
||||
auto narHash = hashString(HashAlgorithm::SHA256, saved.s);
|
||||
|
||||
ValidPathInfo info { path, narHash };
|
||||
ValidPathInfo info{path, narHash};
|
||||
if (deriver != "")
|
||||
info.deriver = parseStorePath(deriver);
|
||||
info.references = references;
|
||||
|
|
@ -96,4 +94,4 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
|
|||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -10,11 +10,11 @@
|
|||
|
||||
#include "store-config-private.hh"
|
||||
#if NIX_WITH_S3_SUPPORT
|
||||
#include <aws/core/client/ClientConfiguration.h>
|
||||
# include <aws/core/client/ClientConfiguration.h>
|
||||
#endif
|
||||
|
||||
#ifdef __linux__
|
||||
# include "nix/util/linux-namespaces.hh"
|
||||
# include "nix/util/linux-namespaces.hh"
|
||||
#endif
|
||||
|
||||
#include <unistd.h>
|
||||
|
|
@ -77,7 +77,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
|
||||
std::chrono::steady_clock::time_point startTime = std::chrono::steady_clock::now();
|
||||
|
||||
inline static const std::set<long> successfulStatuses {200, 201, 204, 206, 304, 0 /* other protocol */};
|
||||
inline static const std::set<long> successfulStatuses{200, 201, 204, 206, 304, 0 /* other protocol */};
|
||||
|
||||
/* Get the HTTP status code, or 0 for other protocols. */
|
||||
long getHTTPStatus()
|
||||
|
|
@ -90,14 +90,18 @@ struct curlFileTransfer : public FileTransfer
|
|||
return httpStatus;
|
||||
}
|
||||
|
||||
TransferItem(curlFileTransfer & fileTransfer,
|
||||
TransferItem(
|
||||
curlFileTransfer & fileTransfer,
|
||||
const FileTransferRequest & request,
|
||||
Callback<FileTransferResult> && callback)
|
||||
: fileTransfer(fileTransfer)
|
||||
, request(request)
|
||||
, act(*logger, lvlTalkative, actFileTransfer,
|
||||
fmt("%sing '%s'", request.verb(), request.uri),
|
||||
{request.uri}, request.parentAct)
|
||||
, act(*logger,
|
||||
lvlTalkative,
|
||||
actFileTransfer,
|
||||
fmt("%sing '%s'", request.verb(), request.uri),
|
||||
{request.uri},
|
||||
request.parentAct)
|
||||
, callback(std::move(callback))
|
||||
, finalSink([this](std::string_view data) {
|
||||
if (errorSink) {
|
||||
|
|
@ -115,7 +119,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
}
|
||||
} else
|
||||
this->result.data.append(data);
|
||||
})
|
||||
})
|
||||
{
|
||||
result.urls.push_back(request.uri);
|
||||
|
||||
|
|
@ -124,7 +128,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
|
||||
if (!request.mimeType.empty())
|
||||
requestHeaders = curl_slist_append(requestHeaders, ("Content-Type: " + request.mimeType).c_str());
|
||||
for (auto it = request.headers.begin(); it != request.headers.end(); ++it){
|
||||
for (auto it = request.headers.begin(); it != request.headers.end(); ++it) {
|
||||
requestHeaders = curl_slist_append(requestHeaders, fmt("%s: %s", it->first, it->second).c_str());
|
||||
}
|
||||
}
|
||||
|
|
@ -136,7 +140,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
curl_multi_remove_handle(fileTransfer.curlm, req);
|
||||
curl_easy_cleanup(req);
|
||||
}
|
||||
if (requestHeaders) curl_slist_free_all(requestHeaders);
|
||||
if (requestHeaders)
|
||||
curl_slist_free_all(requestHeaders);
|
||||
try {
|
||||
if (!done)
|
||||
fail(FileTransferError(Interrupted, {}, "download of '%s' was interrupted", request.uri));
|
||||
|
|
@ -172,12 +177,12 @@ struct curlFileTransfer : public FileTransfer
|
|||
|
||||
if (!decompressionSink) {
|
||||
decompressionSink = makeDecompressionSink(encoding, finalSink);
|
||||
if (! successfulStatuses.count(getHTTPStatus())) {
|
||||
if (!successfulStatuses.count(getHTTPStatus())) {
|
||||
// In this case we want to construct a TeeSink, to keep
|
||||
// the response around (which we figure won't be big
|
||||
// like an actual download should be) to improve error
|
||||
// messages.
|
||||
errorSink = StringSink { };
|
||||
errorSink = StringSink{};
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -247,7 +252,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
|
||||
else if (name == "link" || name == "x-amz-meta-link") {
|
||||
auto value = trim(line.substr(i + 1));
|
||||
static std::regex linkRegex("<([^>]*)>; rel=\"immutable\"", std::regex::extended | std::regex::icase);
|
||||
static std::regex linkRegex(
|
||||
"<([^>]*)>; rel=\"immutable\"", std::regex::extended | std::regex::icase);
|
||||
if (std::smatch match; std::regex_match(value, match, linkRegex))
|
||||
result.immutableUrl = match.str(1);
|
||||
else
|
||||
|
|
@ -273,7 +279,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
return getInterrupted();
|
||||
}
|
||||
|
||||
static int progressCallbackWrapper(void * userp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow)
|
||||
static int progressCallbackWrapper(
|
||||
void * userp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow)
|
||||
{
|
||||
auto & item = *static_cast<TransferItem *>(userp);
|
||||
auto isUpload = bool(item.request.data);
|
||||
|
|
@ -288,7 +295,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
}
|
||||
|
||||
size_t readOffset = 0;
|
||||
size_t readCallback(char *buffer, size_t size, size_t nitems)
|
||||
|
||||
size_t readCallback(char * buffer, size_t size, size_t nitems)
|
||||
{
|
||||
if (readOffset == request.data->length())
|
||||
return 0;
|
||||
|
|
@ -299,18 +307,19 @@ struct curlFileTransfer : public FileTransfer
|
|||
return count;
|
||||
}
|
||||
|
||||
static size_t readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp)
|
||||
static size_t readCallbackWrapper(char * buffer, size_t size, size_t nitems, void * userp)
|
||||
{
|
||||
return ((TransferItem *) userp)->readCallback(buffer, size, nitems);
|
||||
}
|
||||
|
||||
#if !defined(_WIN32) && LIBCURL_VERSION_NUM >= 0x071000
|
||||
static int cloexec_callback(void *, curl_socket_t curlfd, curlsocktype purpose) {
|
||||
#if !defined(_WIN32) && LIBCURL_VERSION_NUM >= 0x071000
|
||||
static int cloexec_callback(void *, curl_socket_t curlfd, curlsocktype purpose)
|
||||
{
|
||||
unix::closeOnExec(curlfd);
|
||||
vomit("cloexec set for fd %i", curlfd);
|
||||
return CURL_SOCKOPT_OK;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
size_t seekCallback(curl_off_t offset, int origin)
|
||||
{
|
||||
|
|
@ -324,14 +333,15 @@ struct curlFileTransfer : public FileTransfer
|
|||
return CURL_SEEKFUNC_OK;
|
||||
}
|
||||
|
||||
static size_t seekCallbackWrapper(void *clientp, curl_off_t offset, int origin)
|
||||
static size_t seekCallbackWrapper(void * clientp, curl_off_t offset, int origin)
|
||||
{
|
||||
return ((TransferItem *) clientp)->seekCallback(offset, origin);
|
||||
}
|
||||
|
||||
void init()
|
||||
{
|
||||
if (!req) req = curl_easy_init();
|
||||
if (!req)
|
||||
req = curl_easy_init();
|
||||
|
||||
curl_easy_reset(req);
|
||||
|
||||
|
|
@ -344,18 +354,21 @@ struct curlFileTransfer : public FileTransfer
|
|||
curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L);
|
||||
curl_easy_setopt(req, CURLOPT_MAXREDIRS, 10);
|
||||
curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1);
|
||||
curl_easy_setopt(req, CURLOPT_USERAGENT,
|
||||
("curl/" LIBCURL_VERSION " Nix/" + nixVersion +
|
||||
(fileTransferSettings.userAgentSuffix != "" ? " " + fileTransferSettings.userAgentSuffix.get() : "")).c_str());
|
||||
#if LIBCURL_VERSION_NUM >= 0x072b00
|
||||
curl_easy_setopt(
|
||||
req,
|
||||
CURLOPT_USERAGENT,
|
||||
("curl/" LIBCURL_VERSION " Nix/" + nixVersion
|
||||
+ (fileTransferSettings.userAgentSuffix != "" ? " " + fileTransferSettings.userAgentSuffix.get() : ""))
|
||||
.c_str());
|
||||
#if LIBCURL_VERSION_NUM >= 0x072b00
|
||||
curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1);
|
||||
#endif
|
||||
#if LIBCURL_VERSION_NUM >= 0x072f00
|
||||
#endif
|
||||
#if LIBCURL_VERSION_NUM >= 0x072f00
|
||||
if (fileTransferSettings.enableHttp2)
|
||||
curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
|
||||
else
|
||||
curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1);
|
||||
#endif
|
||||
#endif
|
||||
curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, TransferItem::writeCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_WRITEDATA, this);
|
||||
curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, TransferItem::headerCallbackWrapper);
|
||||
|
|
@ -393,9 +406,9 @@ struct curlFileTransfer : public FileTransfer
|
|||
curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
|
||||
}
|
||||
|
||||
#if !defined(_WIN32) && LIBCURL_VERSION_NUM >= 0x071000
|
||||
#if !defined(_WIN32) && LIBCURL_VERSION_NUM >= 0x071000
|
||||
curl_easy_setopt(req, CURLOPT_SOCKOPTFUNCTION, cloexec_callback);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, fileTransferSettings.connectTimeout.get());
|
||||
|
||||
|
|
@ -425,10 +438,14 @@ struct curlFileTransfer : public FileTransfer
|
|||
|
||||
auto httpStatus = getHTTPStatus();
|
||||
|
||||
debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes, duration = %.2f s",
|
||||
request.verb(), request.uri, code, httpStatus, result.bodySize,
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(finishTime - startTime).count() / 1000.0f
|
||||
);
|
||||
debug(
|
||||
"finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes, duration = %.2f s",
|
||||
request.verb(),
|
||||
request.uri,
|
||||
code,
|
||||
httpStatus,
|
||||
result.bodySize,
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(finishTime - startTime).count() / 1000.0f);
|
||||
|
||||
appendCurrentUrl();
|
||||
|
||||
|
|
@ -448,8 +465,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
if (writeException)
|
||||
failEx(writeException);
|
||||
|
||||
else if (code == CURLE_OK && successfulStatuses.count(httpStatus))
|
||||
{
|
||||
else if (code == CURLE_OK && successfulStatuses.count(httpStatus)) {
|
||||
result.cached = httpStatus == 304;
|
||||
|
||||
// In 2021, GitHub responds to If-None-Match with 304,
|
||||
|
|
@ -487,32 +503,32 @@ struct curlFileTransfer : public FileTransfer
|
|||
// * 511 we're behind a captive portal
|
||||
err = Misc;
|
||||
} else {
|
||||
// Don't bother retrying on certain cURL errors either
|
||||
// Don't bother retrying on certain cURL errors either
|
||||
|
||||
// Allow selecting a subset of enum values
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wswitch-enum"
|
||||
// Allow selecting a subset of enum values
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wswitch-enum"
|
||||
switch (code) {
|
||||
case CURLE_FAILED_INIT:
|
||||
case CURLE_URL_MALFORMAT:
|
||||
case CURLE_NOT_BUILT_IN:
|
||||
case CURLE_REMOTE_ACCESS_DENIED:
|
||||
case CURLE_FILE_COULDNT_READ_FILE:
|
||||
case CURLE_FUNCTION_NOT_FOUND:
|
||||
case CURLE_ABORTED_BY_CALLBACK:
|
||||
case CURLE_BAD_FUNCTION_ARGUMENT:
|
||||
case CURLE_INTERFACE_FAILED:
|
||||
case CURLE_UNKNOWN_OPTION:
|
||||
case CURLE_SSL_CACERT_BADFILE:
|
||||
case CURLE_TOO_MANY_REDIRECTS:
|
||||
case CURLE_WRITE_ERROR:
|
||||
case CURLE_UNSUPPORTED_PROTOCOL:
|
||||
err = Misc;
|
||||
break;
|
||||
default: // Shut up warnings
|
||||
break;
|
||||
case CURLE_FAILED_INIT:
|
||||
case CURLE_URL_MALFORMAT:
|
||||
case CURLE_NOT_BUILT_IN:
|
||||
case CURLE_REMOTE_ACCESS_DENIED:
|
||||
case CURLE_FILE_COULDNT_READ_FILE:
|
||||
case CURLE_FUNCTION_NOT_FOUND:
|
||||
case CURLE_ABORTED_BY_CALLBACK:
|
||||
case CURLE_BAD_FUNCTION_ARGUMENT:
|
||||
case CURLE_INTERFACE_FAILED:
|
||||
case CURLE_UNKNOWN_OPTION:
|
||||
case CURLE_SSL_CACERT_BADFILE:
|
||||
case CURLE_TOO_MANY_REDIRECTS:
|
||||
case CURLE_WRITE_ERROR:
|
||||
case CURLE_UNSUPPORTED_PROTOCOL:
|
||||
err = Misc;
|
||||
break;
|
||||
default: // Shut up warnings
|
||||
break;
|
||||
}
|
||||
#pragma GCC diagnostic pop
|
||||
#pragma GCC diagnostic pop
|
||||
}
|
||||
|
||||
attempt++;
|
||||
|
|
@ -520,31 +536,40 @@ struct curlFileTransfer : public FileTransfer
|
|||
std::optional<std::string> response;
|
||||
if (errorSink)
|
||||
response = std::move(errorSink->s);
|
||||
auto exc =
|
||||
code == CURLE_ABORTED_BY_CALLBACK && getInterrupted()
|
||||
? FileTransferError(Interrupted, std::move(response), "%s of '%s' was interrupted", request.verb(), request.uri)
|
||||
: httpStatus != 0
|
||||
? FileTransferError(err,
|
||||
std::move(response),
|
||||
"unable to %s '%s': HTTP error %d%s",
|
||||
request.verb(), request.uri, httpStatus,
|
||||
code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
|
||||
: FileTransferError(err,
|
||||
std::move(response),
|
||||
"unable to %s '%s': %s (%d) %s",
|
||||
request.verb(), request.uri, curl_easy_strerror(code), code, errbuf);
|
||||
auto exc = code == CURLE_ABORTED_BY_CALLBACK && getInterrupted() ? FileTransferError(
|
||||
Interrupted,
|
||||
std::move(response),
|
||||
"%s of '%s' was interrupted",
|
||||
request.verb(),
|
||||
request.uri)
|
||||
: httpStatus != 0
|
||||
? FileTransferError(
|
||||
err,
|
||||
std::move(response),
|
||||
"unable to %s '%s': HTTP error %d%s",
|
||||
request.verb(),
|
||||
request.uri,
|
||||
httpStatus,
|
||||
code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
|
||||
: FileTransferError(
|
||||
err,
|
||||
std::move(response),
|
||||
"unable to %s '%s': %s (%d) %s",
|
||||
request.verb(),
|
||||
request.uri,
|
||||
curl_easy_strerror(code),
|
||||
code,
|
||||
errbuf);
|
||||
|
||||
/* If this is a transient error, then maybe retry the
|
||||
download after a while. If we're writing to a
|
||||
sink, we can only retry if the server supports
|
||||
ranged requests. */
|
||||
if (err == Transient
|
||||
&& attempt < request.tries
|
||||
&& (!this->request.dataCallback
|
||||
|| writtenToSink == 0
|
||||
|| (acceptRanges && encoding.empty())))
|
||||
{
|
||||
int ms = retryTimeMs * std::pow(2.0f, attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(fileTransfer.mt19937));
|
||||
if (err == Transient && attempt < request.tries
|
||||
&& (!this->request.dataCallback || writtenToSink == 0 || (acceptRanges && encoding.empty()))) {
|
||||
int ms = retryTimeMs
|
||||
* std::pow(
|
||||
2.0f, attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(fileTransfer.mt19937));
|
||||
if (writtenToSink)
|
||||
warn("%s; retrying from offset %d in %d ms", exc.what(), writtenToSink, ms);
|
||||
else
|
||||
|
|
@ -553,8 +578,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
errorSink.reset();
|
||||
embargo = std::chrono::steady_clock::now() + std::chrono::milliseconds(ms);
|
||||
fileTransfer.enqueueItem(shared_from_this());
|
||||
}
|
||||
else
|
||||
} else
|
||||
fail(std::move(exc));
|
||||
}
|
||||
}
|
||||
|
|
@ -562,23 +586,28 @@ struct curlFileTransfer : public FileTransfer
|
|||
|
||||
struct State
|
||||
{
|
||||
struct EmbargoComparator {
|
||||
bool operator() (const std::shared_ptr<TransferItem> & i1, const std::shared_ptr<TransferItem> & i2) {
|
||||
struct EmbargoComparator
|
||||
{
|
||||
bool operator()(const std::shared_ptr<TransferItem> & i1, const std::shared_ptr<TransferItem> & i2)
|
||||
{
|
||||
return i1->embargo > i2->embargo;
|
||||
}
|
||||
};
|
||||
|
||||
bool quit = false;
|
||||
std::priority_queue<std::shared_ptr<TransferItem>, std::vector<std::shared_ptr<TransferItem>>, EmbargoComparator> incoming;
|
||||
std::
|
||||
priority_queue<std::shared_ptr<TransferItem>, std::vector<std::shared_ptr<TransferItem>>, EmbargoComparator>
|
||||
incoming;
|
||||
};
|
||||
|
||||
Sync<State> state_;
|
||||
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
/* We can't use a std::condition_variable to wake up the curl
|
||||
thread, because it only monitors file descriptors. So use a
|
||||
pipe instead. */
|
||||
Pipe wakeupPipe;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
std::thread workerThread;
|
||||
|
||||
|
|
@ -590,18 +619,17 @@ struct curlFileTransfer : public FileTransfer
|
|||
|
||||
curlm = curl_multi_init();
|
||||
|
||||
#if LIBCURL_VERSION_NUM >= 0x072b00 // Multiplex requires >= 7.43.0
|
||||
#if LIBCURL_VERSION_NUM >= 0x072b00 // Multiplex requires >= 7.43.0
|
||||
curl_multi_setopt(curlm, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX);
|
||||
#endif
|
||||
#if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0
|
||||
curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS,
|
||||
fileTransferSettings.httpConnections.get());
|
||||
#endif
|
||||
#endif
|
||||
#if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0
|
||||
curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS, fileTransferSettings.httpConnections.get());
|
||||
#endif
|
||||
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
wakeupPipe.create();
|
||||
fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
workerThread = std::thread([&]() { workerThreadEntry(); });
|
||||
}
|
||||
|
|
@ -612,7 +640,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
|
||||
workerThread.join();
|
||||
|
||||
if (curlm) curl_multi_cleanup(curlm);
|
||||
if (curlm)
|
||||
curl_multi_cleanup(curlm);
|
||||
}
|
||||
|
||||
void stopWorkerThread()
|
||||
|
|
@ -622,28 +651,26 @@ struct curlFileTransfer : public FileTransfer
|
|||
auto state(state_.lock());
|
||||
state->quit = true;
|
||||
}
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
writeFull(wakeupPipe.writeSide.get(), " ", false);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
void workerThreadMain()
|
||||
{
|
||||
/* Cause this thread to be notified on SIGINT. */
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
auto callback = createInterruptCallback([&]() {
|
||||
stopWorkerThread();
|
||||
});
|
||||
#endif
|
||||
/* Cause this thread to be notified on SIGINT. */
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
auto callback = createInterruptCallback([&]() { stopWorkerThread(); });
|
||||
#endif
|
||||
|
||||
#ifdef __linux__
|
||||
#ifdef __linux__
|
||||
try {
|
||||
tryUnshareFilesystem();
|
||||
} catch (nix::Error & e) {
|
||||
e.addTrace({}, "in download thread");
|
||||
throw;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
std::map<CURL *, std::shared_ptr<TransferItem>> items;
|
||||
|
||||
|
|
@ -677,16 +704,19 @@ struct curlFileTransfer : public FileTransfer
|
|||
/* Wait for activity, including wakeup events. */
|
||||
int numfds = 0;
|
||||
struct curl_waitfd extraFDs[1];
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
extraFDs[0].fd = wakeupPipe.readSide.get();
|
||||
extraFDs[0].events = CURL_WAIT_POLLIN;
|
||||
extraFDs[0].revents = 0;
|
||||
#endif
|
||||
#endif
|
||||
long maxSleepTimeMs = items.empty() ? 10000 : 100;
|
||||
auto sleepTimeMs =
|
||||
nextWakeup != std::chrono::steady_clock::time_point()
|
||||
? std::max(0, (int) std::chrono::duration_cast<std::chrono::milliseconds>(nextWakeup - std::chrono::steady_clock::now()).count())
|
||||
: maxSleepTimeMs;
|
||||
auto sleepTimeMs = nextWakeup != std::chrono::steady_clock::time_point()
|
||||
? std::max(
|
||||
0,
|
||||
(int) std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
nextWakeup - std::chrono::steady_clock::now())
|
||||
.count())
|
||||
: maxSleepTimeMs;
|
||||
vomit("download thread waiting for %d ms", sleepTimeMs);
|
||||
mc = curl_multi_wait(curlm, extraFDs, 1, sleepTimeMs, &numfds);
|
||||
if (mc != CURLM_OK)
|
||||
|
|
@ -715,8 +745,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
incoming.push_back(item);
|
||||
state->incoming.pop();
|
||||
} else {
|
||||
if (nextWakeup == std::chrono::steady_clock::time_point()
|
||||
|| item->embargo < nextWakeup)
|
||||
if (nextWakeup == std::chrono::steady_clock::time_point() || item->embargo < nextWakeup)
|
||||
nextWakeup = item->embargo;
|
||||
break;
|
||||
}
|
||||
|
|
@ -747,16 +776,15 @@ struct curlFileTransfer : public FileTransfer
|
|||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
while (!state->incoming.empty()) state->incoming.pop();
|
||||
while (!state->incoming.empty())
|
||||
state->incoming.pop();
|
||||
state->quit = true;
|
||||
}
|
||||
}
|
||||
|
||||
void enqueueItem(std::shared_ptr<TransferItem> item)
|
||||
{
|
||||
if (item->request.data
|
||||
&& !hasPrefix(item->request.uri, "http://")
|
||||
&& !hasPrefix(item->request.uri, "https://"))
|
||||
if (item->request.data && !hasPrefix(item->request.uri, "http://") && !hasPrefix(item->request.uri, "https://"))
|
||||
throw nix::Error("uploading to '%s' is not supported", item->request.uri);
|
||||
|
||||
{
|
||||
|
|
@ -765,9 +793,9 @@ struct curlFileTransfer : public FileTransfer
|
|||
throw nix::Error("cannot enqueue download request because the download thread is shutting down");
|
||||
state->incoming.push(item);
|
||||
}
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
writeFull(wakeupPipe.writeSide.get(), " ");
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
#if NIX_WITH_S3_SUPPORT
|
||||
|
|
@ -776,8 +804,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
auto [path, params] = splitUriAndParams(uri);
|
||||
|
||||
auto slash = path.find('/', 5); // 5 is the length of "s3://" prefix
|
||||
if (slash == std::string::npos)
|
||||
throw nix::Error("bad S3 URI '%s'", path);
|
||||
if (slash == std::string::npos)
|
||||
throw nix::Error("bad S3 URI '%s'", path);
|
||||
|
||||
std::string bucketName(path, 5, slash - 5);
|
||||
std::string key(path, slash + 1);
|
||||
|
|
@ -786,8 +814,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
}
|
||||
#endif
|
||||
|
||||
void enqueueFileTransfer(const FileTransferRequest & request,
|
||||
Callback<FileTransferResult> callback) override
|
||||
void enqueueFileTransfer(const FileTransferRequest & request, Callback<FileTransferResult> callback) override
|
||||
{
|
||||
/* Ugly hack to support s3:// URIs. */
|
||||
if (hasPrefix(request.uri, "s3://")) {
|
||||
|
|
@ -814,7 +841,9 @@ struct curlFileTransfer : public FileTransfer
|
|||
#else
|
||||
throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri);
|
||||
#endif
|
||||
} catch (...) { callback.rethrow(); }
|
||||
} catch (...) {
|
||||
callback.rethrow();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -845,14 +874,13 @@ ref<FileTransfer> makeFileTransfer()
|
|||
std::future<FileTransferResult> FileTransfer::enqueueFileTransfer(const FileTransferRequest & request)
|
||||
{
|
||||
auto promise = std::make_shared<std::promise<FileTransferResult>>();
|
||||
enqueueFileTransfer(request,
|
||||
{[promise](std::future<FileTransferResult> fut) {
|
||||
try {
|
||||
promise->set_value(fut.get());
|
||||
} catch (...) {
|
||||
promise->set_exception(std::current_exception());
|
||||
}
|
||||
}});
|
||||
enqueueFileTransfer(request, {[promise](std::future<FileTransferResult> fut) {
|
||||
try {
|
||||
promise->set_value(fut.get());
|
||||
} catch (...) {
|
||||
promise->set_exception(std::current_exception());
|
||||
}
|
||||
}});
|
||||
return promise->get_future();
|
||||
}
|
||||
|
||||
|
|
@ -868,9 +896,7 @@ FileTransferResult FileTransfer::upload(const FileTransferRequest & request)
|
|||
}
|
||||
|
||||
void FileTransfer::download(
|
||||
FileTransferRequest && request,
|
||||
Sink & sink,
|
||||
std::function<void(FileTransferResult)> resultCallback)
|
||||
FileTransferRequest && request, Sink & sink, std::function<void(FileTransferResult)> resultCallback)
|
||||
{
|
||||
/* Note: we can't call 'sink' via request.dataCallback, because
|
||||
that would cause the sink to execute on the fileTransfer
|
||||
|
|
@ -880,7 +906,8 @@ void FileTransfer::download(
|
|||
Therefore we use a buffer to communicate data between the
|
||||
download thread and the calling thread. */
|
||||
|
||||
struct State {
|
||||
struct State
|
||||
{
|
||||
bool quit = false;
|
||||
std::exception_ptr exc;
|
||||
std::string data;
|
||||
|
|
@ -898,10 +925,10 @@ void FileTransfer::download(
|
|||
});
|
||||
|
||||
request.dataCallback = [_state](std::string_view data) {
|
||||
|
||||
auto state(_state->lock());
|
||||
|
||||
if (state->quit) return;
|
||||
if (state->quit)
|
||||
return;
|
||||
|
||||
/* If the buffer is full, then go to sleep until the calling
|
||||
thread wakes us up (i.e. when it has removed data from the
|
||||
|
|
@ -921,8 +948,8 @@ void FileTransfer::download(
|
|||
state->avail.notify_one();
|
||||
};
|
||||
|
||||
enqueueFileTransfer(request,
|
||||
{[_state, resultCallback{std::move(resultCallback)}](std::future<FileTransferResult> fut) {
|
||||
enqueueFileTransfer(
|
||||
request, {[_state, resultCallback{std::move(resultCallback)}](std::future<FileTransferResult> fut) {
|
||||
auto state(_state->lock());
|
||||
state->quit = true;
|
||||
try {
|
||||
|
|
@ -949,13 +976,15 @@ void FileTransfer::download(
|
|||
if (state->data.empty()) {
|
||||
|
||||
if (state->quit) {
|
||||
if (state->exc) std::rethrow_exception(state->exc);
|
||||
if (state->exc)
|
||||
std::rethrow_exception(state->exc);
|
||||
return;
|
||||
}
|
||||
|
||||
state.wait(state->avail);
|
||||
|
||||
if (state->data.empty()) continue;
|
||||
if (state->data.empty())
|
||||
continue;
|
||||
}
|
||||
|
||||
chunk = std::move(state->data);
|
||||
|
|
@ -974,8 +1003,11 @@ void FileTransfer::download(
|
|||
}
|
||||
|
||||
template<typename... Args>
|
||||
FileTransferError::FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args)
|
||||
: Error(args...), error(error), response(response)
|
||||
FileTransferError::FileTransferError(
|
||||
FileTransfer::Error error, std::optional<std::string> response, const Args &... args)
|
||||
: Error(args...)
|
||||
, error(error)
|
||||
, response(response)
|
||||
{
|
||||
const auto hf = HintFmt(args...);
|
||||
// FIXME: Due to https://github.com/NixOS/nix/issues/3841 we don't know how
|
||||
|
|
@ -987,4 +1019,4 @@ FileTransferError::FileTransferError(FileTransfer::Error error, std::optional<st
|
|||
err.msg = hf;
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -40,7 +40,6 @@ namespace nix {
|
|||
static std::string gcSocketPath = "/gc-socket/socket";
|
||||
static std::string gcRootsDir = "gcroots";
|
||||
|
||||
|
||||
void LocalStore::addIndirectRoot(const Path & path)
|
||||
{
|
||||
std::string hash = hashString(HashAlgorithm::SHA1, path).to_string(HashFormat::Nix32, false);
|
||||
|
|
@ -48,13 +47,13 @@ void LocalStore::addIndirectRoot(const Path & path)
|
|||
makeSymlink(realRoot, path);
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::createTempRootsFile()
|
||||
{
|
||||
auto fdTempRoots(_fdTempRoots.lock());
|
||||
|
||||
/* Create the temporary roots file for this process. */
|
||||
if (*fdTempRoots) return;
|
||||
if (*fdTempRoots)
|
||||
return;
|
||||
|
||||
while (1) {
|
||||
if (pathExists(fnTempRoots))
|
||||
|
|
@ -72,7 +71,8 @@ void LocalStore::createTempRootsFile()
|
|||
struct stat st;
|
||||
if (fstat(fromDescriptorReadOnly(fdTempRoots->get()), &st) == -1)
|
||||
throw SysError("statting '%1%'", fnTempRoots);
|
||||
if (st.st_size == 0) break;
|
||||
if (st.st_size == 0)
|
||||
break;
|
||||
|
||||
/* The garbage collector deleted this file before we could get
|
||||
a lock. (It won't delete the file after we get a lock.)
|
||||
|
|
@ -80,12 +80,12 @@ void LocalStore::createTempRootsFile()
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::addTempRoot(const StorePath & path)
|
||||
{
|
||||
if (config->readOnly) {
|
||||
debug("Read-only store doesn't support creating lock files for temp roots, but nothing can be deleted anyways.");
|
||||
return;
|
||||
debug(
|
||||
"Read-only store doesn't support creating lock files for temp roots, but nothing can be deleted anyways.");
|
||||
return;
|
||||
}
|
||||
|
||||
createTempRootsFile();
|
||||
|
|
@ -97,7 +97,7 @@ void LocalStore::addTempRoot(const StorePath & path)
|
|||
*fdGCLock = openGCLock();
|
||||
}
|
||||
|
||||
restart:
|
||||
restart:
|
||||
/* Try to acquire a shared global GC lock (non-blocking). This
|
||||
only succeeds if the garbage collector is not currently
|
||||
running. */
|
||||
|
|
@ -157,10 +157,8 @@ void LocalStore::addTempRoot(const StorePath & path)
|
|||
writeFull(_fdTempRoots.lock()->get(), s);
|
||||
}
|
||||
|
||||
|
||||
static std::string censored = "{censored}";
|
||||
|
||||
|
||||
void LocalStore::findTempRoots(Roots & tempRoots, bool censor)
|
||||
{
|
||||
/* Read the `temproots' directory for per-process temporary root
|
||||
|
|
@ -178,14 +176,17 @@ void LocalStore::findTempRoots(Roots & tempRoots, bool censor)
|
|||
pid_t pid = std::stoi(name);
|
||||
|
||||
debug("reading temporary root file '%1%'", path);
|
||||
AutoCloseFD fd(toDescriptor(open(path.c_str(),
|
||||
AutoCloseFD fd(toDescriptor(open(
|
||||
path.c_str(),
|
||||
#ifndef _WIN32
|
||||
O_CLOEXEC |
|
||||
#endif
|
||||
O_RDWR, 0666)));
|
||||
O_RDWR,
|
||||
0666)));
|
||||
if (!fd) {
|
||||
/* It's okay if the file has disappeared. */
|
||||
if (errno == ENOENT) continue;
|
||||
if (errno == ENOENT)
|
||||
continue;
|
||||
throw SysError("opening temporary roots file '%1%'", path);
|
||||
}
|
||||
|
||||
|
|
@ -214,7 +215,6 @@ void LocalStore::findTempRoots(Roots & tempRoots, bool censor)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::findRoots(const Path & path, std::filesystem::file_type type, Roots & roots)
|
||||
{
|
||||
auto foundRoot = [&](const Path & path, const Path & target) {
|
||||
|
|
@ -224,7 +224,8 @@ void LocalStore::findRoots(const Path & path, std::filesystem::file_type type, R
|
|||
roots[std::move(storePath)].emplace(path);
|
||||
else
|
||||
printInfo("skipping invalid root from '%1%' to '%2%'", path, target);
|
||||
} catch (BadStorePath &) { }
|
||||
} catch (BadStorePath &) {
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
|
|
@ -253,9 +254,11 @@ void LocalStore::findRoots(const Path & path, std::filesystem::file_type type, R
|
|||
unlink(path.c_str());
|
||||
}
|
||||
} else {
|
||||
if (!std::filesystem::is_symlink(target)) return;
|
||||
if (!std::filesystem::is_symlink(target))
|
||||
return;
|
||||
Path target2 = readLink(target);
|
||||
if (isInStore(target2)) foundRoot(target, target2);
|
||||
if (isInStore(target2))
|
||||
foundRoot(target, target2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -270,7 +273,8 @@ void LocalStore::findRoots(const Path & path, std::filesystem::file_type type, R
|
|||
|
||||
catch (std::filesystem::filesystem_error & e) {
|
||||
/* We only ignore permanent failures. */
|
||||
if (e.code() == std::errc::permission_denied || e.code() == std::errc::no_such_file_or_directory || e.code() == std::errc::not_a_directory)
|
||||
if (e.code() == std::errc::permission_denied || e.code() == std::errc::no_such_file_or_directory
|
||||
|| e.code() == std::errc::not_a_directory)
|
||||
printInfo("cannot read potential root '%1%'", path);
|
||||
else
|
||||
throw;
|
||||
|
|
@ -285,7 +289,6 @@ void LocalStore::findRoots(const Path & path, std::filesystem::file_type type, R
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::findRootsNoTemp(Roots & roots, bool censor)
|
||||
{
|
||||
/* Process direct roots in {gcroots,profiles}. */
|
||||
|
|
@ -298,7 +301,6 @@ void LocalStore::findRootsNoTemp(Roots & roots, bool censor)
|
|||
findRuntimeRoots(roots, censor);
|
||||
}
|
||||
|
||||
|
||||
Roots LocalStore::findRoots(bool censor)
|
||||
{
|
||||
Roots roots;
|
||||
|
|
@ -320,9 +322,8 @@ static void readProcLink(const std::filesystem::path & file, UncheckedRoots & ro
|
|||
try {
|
||||
buf = std::filesystem::read_symlink(file);
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
if (e.code() == std::errc::no_such_file_or_directory
|
||||
|| e.code() == std::errc::permission_denied
|
||||
|| e.code() == std::errc::no_such_process)
|
||||
if (e.code() == std::errc::no_such_file_or_directory || e.code() == std::errc::permission_denied
|
||||
|| e.code() == std::errc::no_such_process)
|
||||
return;
|
||||
throw;
|
||||
}
|
||||
|
|
@ -362,7 +363,7 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
|
|||
checkInterrupt();
|
||||
if (boost::regex_match(ent->d_name, digitsRegex)) {
|
||||
try {
|
||||
readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked);
|
||||
readProcLink(fmt("/proc/%s/exe", ent->d_name), unchecked);
|
||||
readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked);
|
||||
|
||||
auto fdStr = fmt("/proc/%s/fd", ent->d_name);
|
||||
|
|
@ -395,7 +396,9 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
|
|||
auto envFile = fmt("/proc/%s/environ", ent->d_name);
|
||||
auto envString = readFile(envFile);
|
||||
auto env_end = boost::sregex_iterator{};
|
||||
for (auto i = boost::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
|
||||
for (auto i = boost::sregex_iterator{envString.begin(), envString.end(), storePathRegex};
|
||||
i != env_end;
|
||||
++i)
|
||||
unchecked[i->str()].emplace(envFile);
|
||||
} catch (SystemError & e) {
|
||||
if (errno == ENOENT || errno == EACCES || errno == ESRCH)
|
||||
|
|
@ -416,7 +419,7 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
|
|||
try {
|
||||
boost::regex lsofRegex(R"(^n(/.*)$)");
|
||||
auto lsofLines =
|
||||
tokenizeString<std::vector<std::string>>(runProgram(LSOF, true, { "-n", "-w", "-F", "n" }), "\n");
|
||||
tokenizeString<std::vector<std::string>>(runProgram(LSOF, true, {"-n", "-w", "-F", "n"}), "\n");
|
||||
for (const auto & line : lsofLines) {
|
||||
boost::smatch match;
|
||||
if (boost::regex_match(line, match, lsofRegex))
|
||||
|
|
@ -435,22 +438,24 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
|
|||
#endif
|
||||
|
||||
for (auto & [target, links] : unchecked) {
|
||||
if (!isInStore(target)) continue;
|
||||
if (!isInStore(target))
|
||||
continue;
|
||||
try {
|
||||
auto path = toStorePath(target).first;
|
||||
if (!isValidPath(path)) continue;
|
||||
if (!isValidPath(path))
|
||||
continue;
|
||||
debug("got additional root '%1%'", printStorePath(path));
|
||||
if (censor)
|
||||
roots[path].insert(censored);
|
||||
else
|
||||
roots[path].insert(links.begin(), links.end());
|
||||
} catch (BadStorePath &) { }
|
||||
} catch (BadStorePath &) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct GCLimitReached { };
|
||||
|
||||
struct GCLimitReached
|
||||
{};
|
||||
|
||||
void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||
{
|
||||
|
|
@ -521,7 +526,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
fdServer.close();
|
||||
while (true) {
|
||||
auto item = remove_begin(*connections.lock());
|
||||
if (!item) break;
|
||||
if (!item)
|
||||
break;
|
||||
auto & [fd, thread] = *item;
|
||||
shutdown(fd, SHUT_RDWR);
|
||||
thread.join();
|
||||
|
|
@ -543,7 +549,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
/* Accept a new connection. */
|
||||
assert(fds[1].revents & POLLIN);
|
||||
AutoCloseFD fdClient = accept(fdServer.get(), nullptr, nullptr);
|
||||
if (!fdClient) continue;
|
||||
if (!fdClient)
|
||||
continue;
|
||||
|
||||
debug("GC roots server accepted new client");
|
||||
|
||||
|
|
@ -604,7 +611,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
Finally stopServer([&]() {
|
||||
writeFull(shutdownPipe.writeSide.get(), "x", false);
|
||||
wakeup.notify_all();
|
||||
if (serverThread.joinable()) serverThread.join();
|
||||
if (serverThread.joinable())
|
||||
serverThread.join();
|
||||
});
|
||||
|
||||
#endif
|
||||
|
|
@ -616,7 +624,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
if (!options.ignoreLiveness)
|
||||
findRootsNoTemp(rootMap, true);
|
||||
|
||||
for (auto & i : rootMap) roots.insert(i.first);
|
||||
for (auto & i : rootMap)
|
||||
roots.insert(i.first);
|
||||
|
||||
/* Read the temporary roots created before we acquired the global
|
||||
GC root. Any new roots will be sent to our socket. */
|
||||
|
|
@ -633,8 +642,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
|
||||
/* Helper function that deletes a path from the store and throws
|
||||
GCLimitReached if we've deleted enough garbage. */
|
||||
auto deleteFromStore = [&](std::string_view baseName)
|
||||
{
|
||||
auto deleteFromStore = [&](std::string_view baseName) {
|
||||
Path path = storeDir + "/" + std::string(baseName);
|
||||
Path realPath = config->realStoreDir + "/" + std::string(baseName);
|
||||
|
||||
|
|
@ -701,19 +709,24 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
|
||||
/* If we've previously deleted this path, we don't have to
|
||||
handle it again. */
|
||||
if (dead.count(*path)) continue;
|
||||
if (dead.count(*path))
|
||||
continue;
|
||||
|
||||
auto markAlive = [&]()
|
||||
{
|
||||
auto markAlive = [&]() {
|
||||
alive.insert(*path);
|
||||
alive.insert(start);
|
||||
try {
|
||||
StorePathSet closure;
|
||||
computeFSClosure(*path, closure,
|
||||
/* flipDirection */ false, gcKeepOutputs, gcKeepDerivations);
|
||||
computeFSClosure(
|
||||
*path,
|
||||
closure,
|
||||
/* flipDirection */ false,
|
||||
gcKeepOutputs,
|
||||
gcKeepDerivations);
|
||||
for (auto & p : closure)
|
||||
alive.insert(p);
|
||||
} catch (InvalidPath &) { }
|
||||
} catch (InvalidPath &) {
|
||||
}
|
||||
};
|
||||
|
||||
/* If this is a root, bail out. */
|
||||
|
|
@ -722,8 +735,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
return markAlive();
|
||||
}
|
||||
|
||||
if (options.action == GCOptions::gcDeleteSpecific
|
||||
&& !options.pathsToDelete.count(*path))
|
||||
if (options.action == GCOptions::gcDeleteSpecific && !options.pathsToDelete.count(*path))
|
||||
return;
|
||||
|
||||
{
|
||||
|
|
@ -753,9 +765,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
derivation, then visit the derivation outputs. */
|
||||
if (gcKeepDerivations && path->isDerivation()) {
|
||||
for (auto & [name, maybeOutPath] : queryPartialDerivationOutputMap(*path))
|
||||
if (maybeOutPath &&
|
||||
isValidPath(*maybeOutPath) &&
|
||||
queryPathInfo(*maybeOutPath)->deriver == *path)
|
||||
if (maybeOutPath && isValidPath(*maybeOutPath)
|
||||
&& queryPathInfo(*maybeOutPath)->deriver == *path)
|
||||
enqueue(*maybeOutPath);
|
||||
}
|
||||
|
||||
|
|
@ -768,13 +779,14 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
}
|
||||
}
|
||||
for (auto & path : topoSortPaths(visited)) {
|
||||
if (!dead.insert(path).second) continue;
|
||||
if (!dead.insert(path).second)
|
||||
continue;
|
||||
if (shouldDelete) {
|
||||
try {
|
||||
invalidatePathChecked(path);
|
||||
deleteFromStore(path.to_string());
|
||||
referrersCache.erase(path);
|
||||
} catch (PathInUse &e) {
|
||||
} catch (PathInUse & e) {
|
||||
// If we end up here, it's likely a new occurrence
|
||||
// of https://github.com/NixOS/nix/issues/11923
|
||||
printError("BUG: %s", e.what());
|
||||
|
|
@ -806,7 +818,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
|
||||
try {
|
||||
AutoCloseDir dir(opendir(config->realStoreDir.get().c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", config->realStoreDir);
|
||||
if (!dir)
|
||||
throw SysError("opening directory '%1%'", config->realStoreDir);
|
||||
|
||||
/* Read the store and delete all paths that are invalid or
|
||||
unreachable. We don't use readDirectory() here so that
|
||||
|
|
@ -817,13 +830,13 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
std::string name = dirent->d_name;
|
||||
if (name == "." || name == ".." || name == linksName) continue;
|
||||
if (name == "." || name == ".." || name == linksName)
|
||||
continue;
|
||||
|
||||
if (auto storePath = maybeParseStorePath(storeDir + "/" + name))
|
||||
deleteReferrersClosure(*storePath);
|
||||
else
|
||||
deleteFromStore(name);
|
||||
|
||||
}
|
||||
} catch (GCLimitReached & e) {
|
||||
}
|
||||
|
|
@ -850,7 +863,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
printInfo("deleting unused links...");
|
||||
|
||||
AutoCloseDir dir(opendir(linksDir.c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", linksDir);
|
||||
if (!dir)
|
||||
throw SysError("opening directory '%1%'", linksDir);
|
||||
|
||||
int64_t actualSize = 0, unsharedSize = 0;
|
||||
|
||||
|
|
@ -858,7 +872,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
std::string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
if (name == "." || name == "..")
|
||||
continue;
|
||||
Path path = linksDir + "/" + name;
|
||||
|
||||
auto st = lstat(path);
|
||||
|
|
@ -889,15 +904,15 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
#endif
|
||||
;
|
||||
|
||||
printInfo("note: currently hard linking saves %.2f MiB",
|
||||
printInfo(
|
||||
"note: currently hard linking saves %.2f MiB",
|
||||
((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
|
||||
}
|
||||
|
||||
/* While we're at it, vacuum the database. */
|
||||
//if (options.action == GCOptions::gcDeleteDead) vacuumDB();
|
||||
// if (options.action == GCOptions::gcDeleteDead) vacuumDB();
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::autoGC(bool sync)
|
||||
{
|
||||
#if HAVE_STATVFS
|
||||
|
|
@ -927,15 +942,18 @@ void LocalStore::autoGC(bool sync)
|
|||
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
|
||||
if (now < state->lastGCCheck + std::chrono::seconds(settings.minFreeCheckInterval)) return;
|
||||
if (now < state->lastGCCheck + std::chrono::seconds(settings.minFreeCheckInterval))
|
||||
return;
|
||||
|
||||
auto avail = getAvail();
|
||||
|
||||
state->lastGCCheck = now;
|
||||
|
||||
if (avail >= settings.minFree || avail >= settings.maxFree) return;
|
||||
if (avail >= settings.minFree || avail >= settings.maxFree)
|
||||
return;
|
||||
|
||||
if (avail > state->availAfterGC * 0.97) return;
|
||||
if (avail > state->availAfterGC * 0.97)
|
||||
return;
|
||||
|
||||
state->gcRunning = true;
|
||||
|
||||
|
|
@ -943,7 +961,6 @@ void LocalStore::autoGC(bool sync)
|
|||
future = state->gcFuture = promise.get_future().share();
|
||||
|
||||
std::thread([promise{std::move(promise)}, this, avail, getAvail]() mutable {
|
||||
|
||||
try {
|
||||
|
||||
/* Wake up any threads waiting for the auto-GC to finish. */
|
||||
|
|
@ -970,15 +987,14 @@ void LocalStore::autoGC(bool sync)
|
|||
// future, but we don't really care. (what??)
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
|
||||
}).detach();
|
||||
}
|
||||
|
||||
sync:
|
||||
sync:
|
||||
// Wait for the future outside of the state lock.
|
||||
if (sync) future.get();
|
||||
if (sync)
|
||||
future.get();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -16,30 +16,29 @@
|
|||
#include <nlohmann/json.hpp>
|
||||
|
||||
#ifndef _WIN32
|
||||
# include <sys/utsname.h>
|
||||
# include <sys/utsname.h>
|
||||
#endif
|
||||
|
||||
#ifdef __GLIBC__
|
||||
# include <gnu/lib-names.h>
|
||||
# include <nss.h>
|
||||
# include <dlfcn.h>
|
||||
# include <gnu/lib-names.h>
|
||||
# include <nss.h>
|
||||
# include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
#ifdef __APPLE__
|
||||
# include "nix/util/processes.hh"
|
||||
# include "nix/util/processes.hh"
|
||||
#endif
|
||||
|
||||
#include "nix/util/config-impl.hh"
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include <sys/sysctl.h>
|
||||
# include <sys/sysctl.h>
|
||||
#endif
|
||||
|
||||
#include "store-config-private.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/* The default location of the daemon socket, relative to nixStateDir.
|
||||
The socket is in a directory to allow you to control access to the
|
||||
Nix daemon by setting the mode/ownership of the directory
|
||||
|
|
@ -55,17 +54,18 @@ Settings::Settings()
|
|||
: nixPrefix(NIX_PREFIX)
|
||||
, nixStore(
|
||||
#ifndef _WIN32
|
||||
// On Windows `/nix/store` is not a canonical path, but we dont'
|
||||
// want to deal with that yet.
|
||||
canonPath
|
||||
// On Windows `/nix/store` is not a canonical path, but we dont'
|
||||
// want to deal with that yet.
|
||||
canonPath
|
||||
#endif
|
||||
(getEnvNonEmpty("NIX_STORE_DIR").value_or(getEnvNonEmpty("NIX_STORE").value_or(NIX_STORE_DIR))))
|
||||
(getEnvNonEmpty("NIX_STORE_DIR").value_or(getEnvNonEmpty("NIX_STORE").value_or(NIX_STORE_DIR))))
|
||||
, nixDataDir(canonPath(getEnvNonEmpty("NIX_DATA_DIR").value_or(NIX_DATA_DIR)))
|
||||
, nixLogDir(canonPath(getEnvNonEmpty("NIX_LOG_DIR").value_or(NIX_LOG_DIR)))
|
||||
, nixStateDir(canonPath(getEnvNonEmpty("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
|
||||
, nixConfDir(canonPath(getEnvNonEmpty("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
|
||||
, nixUserConfFiles(getUserConfigFiles())
|
||||
, nixDaemonSocketFile(canonPath(getEnvNonEmpty("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
|
||||
, nixDaemonSocketFile(
|
||||
canonPath(getEnvNonEmpty("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
|
||||
{
|
||||
#ifndef _WIN32
|
||||
buildUsersGroup = isRootUser() ? "nixbld" : "";
|
||||
|
|
@ -91,7 +91,8 @@ Settings::Settings()
|
|||
|
||||
/* chroot-like behavior from Apple's sandbox */
|
||||
#ifdef __APPLE__
|
||||
sandboxPaths = tokenizeString<StringSet>("/System/Library/Frameworks /System/Library/PrivateFrameworks /bin/sh /bin/bash /private/tmp /private/var/tmp /usr/lib");
|
||||
sandboxPaths = tokenizeString<StringSet>(
|
||||
"/System/Library/Frameworks /System/Library/PrivateFrameworks /bin/sh /bin/bash /private/tmp /private/var/tmp /usr/lib");
|
||||
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
|
||||
#endif
|
||||
}
|
||||
|
|
@ -102,7 +103,8 @@ void loadConfFile(AbstractConfig & config)
|
|||
try {
|
||||
std::string contents = readFile(path);
|
||||
config.applyConfig(contents, path);
|
||||
} catch (SystemError &) { }
|
||||
} catch (SystemError &) {
|
||||
}
|
||||
};
|
||||
|
||||
applyConfigFile(settings.nixConfDir + "/nix.conf");
|
||||
|
|
@ -120,7 +122,6 @@ void loadConfFile(AbstractConfig & config)
|
|||
if (nixConfEnv.has_value()) {
|
||||
config.applyConfig(nixConfEnv.value(), "NIX_CONFIG");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
std::vector<Path> getUserConfigFiles()
|
||||
|
|
@ -146,13 +147,14 @@ unsigned int Settings::getDefaultCores() const
|
|||
const unsigned int maxCPU = getMaxCPU();
|
||||
|
||||
if (maxCPU > 0)
|
||||
return maxCPU;
|
||||
return maxCPU;
|
||||
else
|
||||
return concurrency;
|
||||
return concurrency;
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
static bool hasVirt() {
|
||||
static bool hasVirt()
|
||||
{
|
||||
|
||||
int hasVMM;
|
||||
int hvSupport;
|
||||
|
|
@ -181,19 +183,19 @@ StringSet Settings::getDefaultSystemFeatures()
|
|||
actually require anything special on the machines. */
|
||||
StringSet features{"nixos-test", "benchmark", "big-parallel"};
|
||||
|
||||
#ifdef __linux__
|
||||
#ifdef __linux__
|
||||
features.insert("uid-range");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef __linux__
|
||||
#ifdef __linux__
|
||||
if (access("/dev/kvm", R_OK | W_OK) == 0)
|
||||
features.insert("kvm");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef __APPLE__
|
||||
#ifdef __APPLE__
|
||||
if (hasVirt())
|
||||
features.insert("apple-virt");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
return features;
|
||||
}
|
||||
|
|
@ -214,8 +216,11 @@ StringSet Settings::getDefaultExtraPlatforms()
|
|||
// machines. Note that we can’t force processes from executing
|
||||
// x86_64 in aarch64 environments or vice versa since they can
|
||||
// always exec with their own binary preferences.
|
||||
if (std::string{NIX_LOCAL_SYSTEM} == "aarch64-darwin" &&
|
||||
runProgram(RunOptions {.program = "arch", .args = {"-arch", "x86_64", "/usr/bin/true"}, .mergeStderrToStdout = true}).first == 0)
|
||||
if (std::string{NIX_LOCAL_SYSTEM} == "aarch64-darwin"
|
||||
&& runProgram(
|
||||
RunOptions{.program = "arch", .args = {"-arch", "x86_64", "/usr/bin/true"}, .mergeStderrToStdout = true})
|
||||
.first
|
||||
== 0)
|
||||
extraPlatforms.insert("x86_64-darwin");
|
||||
#endif
|
||||
|
||||
|
|
@ -237,41 +242,57 @@ bool Settings::isWSL1()
|
|||
|
||||
Path Settings::getDefaultSSLCertFile()
|
||||
{
|
||||
for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
|
||||
if (pathAccessible(fn)) return fn;
|
||||
for (auto & fn :
|
||||
{"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
|
||||
if (pathAccessible(fn))
|
||||
return fn;
|
||||
return "";
|
||||
}
|
||||
|
||||
std::string nixVersion = PACKAGE_VERSION;
|
||||
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(SandboxMode, {
|
||||
{SandboxMode::smEnabled, true},
|
||||
{SandboxMode::smRelaxed, "relaxed"},
|
||||
{SandboxMode::smDisabled, false},
|
||||
});
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(
|
||||
SandboxMode,
|
||||
{
|
||||
{SandboxMode::smEnabled, true},
|
||||
{SandboxMode::smRelaxed, "relaxed"},
|
||||
{SandboxMode::smDisabled, false},
|
||||
});
|
||||
|
||||
template<> SandboxMode BaseSetting<SandboxMode>::parse(const std::string & str) const
|
||||
template<>
|
||||
SandboxMode BaseSetting<SandboxMode>::parse(const std::string & str) const
|
||||
{
|
||||
if (str == "true") return smEnabled;
|
||||
else if (str == "relaxed") return smRelaxed;
|
||||
else if (str == "false") return smDisabled;
|
||||
else throw UsageError("option '%s' has invalid value '%s'", name, str);
|
||||
if (str == "true")
|
||||
return smEnabled;
|
||||
else if (str == "relaxed")
|
||||
return smRelaxed;
|
||||
else if (str == "false")
|
||||
return smDisabled;
|
||||
else
|
||||
throw UsageError("option '%s' has invalid value '%s'", name, str);
|
||||
}
|
||||
|
||||
template<> struct BaseSetting<SandboxMode>::trait
|
||||
template<>
|
||||
struct BaseSetting<SandboxMode>::trait
|
||||
{
|
||||
static constexpr bool appendable = false;
|
||||
};
|
||||
|
||||
template<> std::string BaseSetting<SandboxMode>::to_string() const
|
||||
template<>
|
||||
std::string BaseSetting<SandboxMode>::to_string() const
|
||||
{
|
||||
if (value == smEnabled) return "true";
|
||||
else if (value == smRelaxed) return "relaxed";
|
||||
else if (value == smDisabled) return "false";
|
||||
else unreachable();
|
||||
if (value == smEnabled)
|
||||
return "true";
|
||||
else if (value == smRelaxed)
|
||||
return "relaxed";
|
||||
else if (value == smDisabled)
|
||||
return "false";
|
||||
else
|
||||
unreachable();
|
||||
}
|
||||
|
||||
template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::string & category)
|
||||
template<>
|
||||
void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::string & category)
|
||||
{
|
||||
args.addFlag({
|
||||
.longName = name,
|
||||
|
|
@ -298,7 +319,8 @@ template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::s
|
|||
|
||||
unsigned int MaxBuildJobsSetting::parse(const std::string & str) const
|
||||
{
|
||||
if (str == "auto") return std::max(1U, std::thread::hardware_concurrency());
|
||||
if (str == "auto")
|
||||
return std::max(1U, std::thread::hardware_concurrency());
|
||||
else {
|
||||
if (auto n = string2Int<decltype(value)>(str))
|
||||
return *n;
|
||||
|
|
@ -307,7 +329,6 @@ unsigned int MaxBuildJobsSetting::parse(const std::string & str) const
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static void preloadNSS()
|
||||
{
|
||||
/* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
|
||||
|
|
@ -346,15 +367,18 @@ static void preloadNSS()
|
|||
|
||||
static bool initLibStoreDone = false;
|
||||
|
||||
void assertLibStoreInitialized() {
|
||||
void assertLibStoreInitialized()
|
||||
{
|
||||
if (!initLibStoreDone) {
|
||||
printError("The program must call nix::initNix() before calling any libstore library functions.");
|
||||
abort();
|
||||
};
|
||||
}
|
||||
|
||||
void initLibStore(bool loadConfig) {
|
||||
if (initLibStoreDone) return;
|
||||
void initLibStore(bool loadConfig)
|
||||
{
|
||||
if (initLibStoreDone)
|
||||
return;
|
||||
|
||||
initLibUtil();
|
||||
|
||||
|
|
@ -371,7 +395,8 @@ void initLibStore(bool loadConfig) {
|
|||
by calling curl_global_init here, which should mean curl will already
|
||||
have been initialized by the time we try to do so in a forked process.
|
||||
|
||||
[1] https://github.com/apple-oss-distributions/objc4/blob/01edf1705fbc3ff78a423cd21e03dfc21eb4d780/runtime/objc-initialize.mm#L614-L636
|
||||
[1]
|
||||
https://github.com/apple-oss-distributions/objc4/blob/01edf1705fbc3ff78a423cd21e03dfc21eb4d780/runtime/objc-initialize.mm#L614-L636
|
||||
*/
|
||||
curl_global_init(CURL_GLOBAL_ALL);
|
||||
#ifdef __APPLE__
|
||||
|
|
@ -385,5 +410,4 @@ void initLibStore(bool loadConfig) {
|
|||
initLibStoreDone = true;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ namespace nix {
|
|||
|
||||
MakeError(UploadToHTTP, Error);
|
||||
|
||||
|
||||
StringSet HttpBinaryCacheStoreConfig::uriSchemes()
|
||||
{
|
||||
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1";
|
||||
|
|
@ -20,33 +19,26 @@ StringSet HttpBinaryCacheStoreConfig::uriSchemes()
|
|||
}
|
||||
|
||||
HttpBinaryCacheStoreConfig::HttpBinaryCacheStoreConfig(
|
||||
std::string_view scheme,
|
||||
std::string_view _cacheUri,
|
||||
const Params & params)
|
||||
std::string_view scheme, std::string_view _cacheUri, const Params & params)
|
||||
: StoreConfig(params)
|
||||
, BinaryCacheStoreConfig(params)
|
||||
, cacheUri(
|
||||
std::string { scheme }
|
||||
+ "://"
|
||||
+ (!_cacheUri.empty()
|
||||
? _cacheUri
|
||||
: throw UsageError("`%s` Store requires a non-empty authority in Store URL", scheme)))
|
||||
std::string{scheme} + "://"
|
||||
+ (!_cacheUri.empty() ? _cacheUri
|
||||
: throw UsageError("`%s` Store requires a non-empty authority in Store URL", scheme)))
|
||||
{
|
||||
while (!cacheUri.empty() && cacheUri.back() == '/')
|
||||
cacheUri.pop_back();
|
||||
}
|
||||
|
||||
|
||||
std::string HttpBinaryCacheStoreConfig::doc()
|
||||
{
|
||||
return
|
||||
#include "http-binary-cache-store.md"
|
||||
;
|
||||
#include "http-binary-cache-store.md"
|
||||
;
|
||||
}
|
||||
|
||||
|
||||
class HttpBinaryCacheStore :
|
||||
public virtual BinaryCacheStore
|
||||
class HttpBinaryCacheStore : public virtual BinaryCacheStore
|
||||
{
|
||||
struct State
|
||||
{
|
||||
|
|
@ -63,8 +55,7 @@ public:
|
|||
ref<Config> config;
|
||||
|
||||
HttpBinaryCacheStore(ref<Config> config)
|
||||
: Store{*config}
|
||||
// TODO it will actually mutate the configuration
|
||||
: Store{*config} // TODO it will actually mutate the configuration
|
||||
, BinaryCacheStore{*config}
|
||||
, config{config}
|
||||
{
|
||||
|
|
@ -108,7 +99,8 @@ protected:
|
|||
void checkEnabled()
|
||||
{
|
||||
auto state(_state.lock());
|
||||
if (state->enabled) return;
|
||||
if (state->enabled)
|
||||
return;
|
||||
if (std::chrono::steady_clock::now() > state->disabledUntil) {
|
||||
state->enabled = true;
|
||||
debug("re-enabling binary cache '%s'", getUri());
|
||||
|
|
@ -136,7 +128,8 @@ protected:
|
|||
}
|
||||
}
|
||||
|
||||
void upsertFile(const std::string & path,
|
||||
void upsertFile(
|
||||
const std::string & path,
|
||||
std::shared_ptr<std::basic_iostream<char>> istream,
|
||||
const std::string & mimeType) override
|
||||
{
|
||||
|
|
@ -154,9 +147,8 @@ protected:
|
|||
{
|
||||
return FileTransferRequest(
|
||||
hasPrefix(path, "https://") || hasPrefix(path, "http://") || hasPrefix(path, "file://")
|
||||
? path
|
||||
: config->cacheUri + "/" + path);
|
||||
|
||||
? path
|
||||
: config->cacheUri + "/" + path);
|
||||
}
|
||||
|
||||
void getFile(const std::string & path, Sink & sink) override
|
||||
|
|
@ -173,8 +165,7 @@ protected:
|
|||
}
|
||||
}
|
||||
|
||||
void getFile(const std::string & path,
|
||||
Callback<std::optional<std::string>> callback) noexcept override
|
||||
void getFile(const std::string & path, Callback<std::optional<std::string>> callback) noexcept override
|
||||
{
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
|
|
@ -183,8 +174,8 @@ protected:
|
|||
|
||||
auto request(makeRequest(path));
|
||||
|
||||
getFileTransfer()->enqueueFileTransfer(request,
|
||||
{[callbackPtr, this](std::future<FileTransferResult> result) {
|
||||
getFileTransfer()->enqueueFileTransfer(
|
||||
request, {[callbackPtr, this](std::future<FileTransferResult> result) {
|
||||
try {
|
||||
(*callbackPtr)(std::move(result.get().data));
|
||||
} catch (FileTransferError & e) {
|
||||
|
|
@ -195,7 +186,7 @@ protected:
|
|||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
}});
|
||||
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
|
|
@ -232,12 +223,11 @@ protected:
|
|||
|
||||
ref<Store> HttpBinaryCacheStore::Config::openStore() const
|
||||
{
|
||||
return make_ref<HttpBinaryCacheStore>(ref{
|
||||
// FIXME we shouldn't actually need a mutable config
|
||||
std::const_pointer_cast<HttpBinaryCacheStore::Config>(shared_from_this())
|
||||
});
|
||||
return make_ref<HttpBinaryCacheStore>(
|
||||
ref{// FIXME we shouldn't actually need a mutable config
|
||||
std::const_pointer_cast<HttpBinaryCacheStore::Config>(shared_from_this())});
|
||||
}
|
||||
|
||||
static RegisterStoreImplementation<HttpBinaryCacheStore::Config> regHttpBinaryCacheStore;
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -17,31 +17,42 @@ struct BinaryCacheStoreConfig : virtual StoreConfig
|
|||
{
|
||||
using StoreConfig::StoreConfig;
|
||||
|
||||
const Setting<std::string> compression{this, "xz", "compression",
|
||||
"NAR compression method (`xz`, `bzip2`, `gzip`, `zstd`, or `none`)."};
|
||||
const Setting<std::string> compression{
|
||||
this, "xz", "compression", "NAR compression method (`xz`, `bzip2`, `gzip`, `zstd`, or `none`)."};
|
||||
|
||||
const Setting<bool> writeNARListing{this, false, "write-nar-listing",
|
||||
"Whether to write a JSON file that lists the files in each NAR."};
|
||||
const Setting<bool> writeNARListing{
|
||||
this, false, "write-nar-listing", "Whether to write a JSON file that lists the files in each NAR."};
|
||||
|
||||
const Setting<bool> writeDebugInfo{this, false, "index-debug-info",
|
||||
const Setting<bool> writeDebugInfo{
|
||||
this,
|
||||
false,
|
||||
"index-debug-info",
|
||||
R"(
|
||||
Whether to index DWARF debug info files by build ID. This allows [`dwarffs`](https://github.com/edolstra/dwarffs) to
|
||||
fetch debug info on demand
|
||||
)"};
|
||||
|
||||
const Setting<Path> secretKeyFile{this, "", "secret-key",
|
||||
"Path to the secret key used to sign the binary cache."};
|
||||
const Setting<Path> secretKeyFile{this, "", "secret-key", "Path to the secret key used to sign the binary cache."};
|
||||
|
||||
const Setting<std::string> secretKeyFiles{this, "", "secret-keys",
|
||||
"List of comma-separated paths to the secret keys used to sign the binary cache."};
|
||||
const Setting<std::string> secretKeyFiles{
|
||||
this, "", "secret-keys", "List of comma-separated paths to the secret keys used to sign the binary cache."};
|
||||
|
||||
const Setting<Path> localNarCache{this, "", "local-nar-cache",
|
||||
const Setting<Path> localNarCache{
|
||||
this,
|
||||
"",
|
||||
"local-nar-cache",
|
||||
"Path to a local cache of NARs fetched from this binary cache, used by commands such as `nix store cat`."};
|
||||
|
||||
const Setting<bool> parallelCompression{this, false, "parallel-compression",
|
||||
const Setting<bool> parallelCompression{
|
||||
this,
|
||||
false,
|
||||
"parallel-compression",
|
||||
"Enable multi-threaded compression of NARs. This is currently only available for `xz` and `zstd`."};
|
||||
|
||||
const Setting<int> compressionLevel{this, -1, "compression-level",
|
||||
const Setting<int> compressionLevel{
|
||||
this,
|
||||
-1,
|
||||
"compression-level",
|
||||
R"(
|
||||
The *preset level* to be used when compressing NARs.
|
||||
The meaning and accepted values depend on the compression method selected.
|
||||
|
|
@ -49,14 +60,11 @@ struct BinaryCacheStoreConfig : virtual StoreConfig
|
|||
)"};
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* @note subclasses must implement at least one of the two
|
||||
* virtual getFile() methods.
|
||||
*/
|
||||
struct BinaryCacheStore :
|
||||
virtual Store,
|
||||
virtual LogStore
|
||||
struct BinaryCacheStore : virtual Store, virtual LogStore
|
||||
{
|
||||
using Config = BinaryCacheStoreConfig;
|
||||
|
||||
|
|
@ -82,11 +90,11 @@ public:
|
|||
|
||||
virtual bool fileExists(const std::string & path) = 0;
|
||||
|
||||
virtual void upsertFile(const std::string & path,
|
||||
std::shared_ptr<std::basic_iostream<char>> istream,
|
||||
const std::string & mimeType) = 0;
|
||||
virtual void upsertFile(
|
||||
const std::string & path, std::shared_ptr<std::basic_iostream<char>> istream, const std::string & mimeType) = 0;
|
||||
|
||||
void upsertFile(const std::string & path,
|
||||
void upsertFile(
|
||||
const std::string & path,
|
||||
// FIXME: use std::string_view
|
||||
std::string && data,
|
||||
const std::string & mimeType);
|
||||
|
|
@ -106,9 +114,7 @@ public:
|
|||
* Fetch the specified file and call the specified callback with
|
||||
* the result. A subclass may implement this asynchronously.
|
||||
*/
|
||||
virtual void getFile(
|
||||
const std::string & path,
|
||||
Callback<std::optional<std::string>> callback) noexcept;
|
||||
virtual void getFile(const std::string & path, Callback<std::optional<std::string>> callback) noexcept;
|
||||
|
||||
std::optional<std::string> getFile(const std::string & path);
|
||||
|
||||
|
|
@ -125,20 +131,22 @@ private:
|
|||
void writeNarInfo(ref<NarInfo> narInfo);
|
||||
|
||||
ref<const ValidPathInfo> addToStoreCommon(
|
||||
Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
Source & narSource,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs,
|
||||
std::function<ValidPathInfo(HashResult)> mkInfo);
|
||||
|
||||
public:
|
||||
|
||||
bool isValidPathUncached(const StorePath & path) override;
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
void queryPathInfoUncached(
|
||||
const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
void
|
||||
addToStore(const ValidPathInfo & info, Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
|
|
@ -160,8 +168,8 @@ public:
|
|||
|
||||
void registerDrvOutput(const Realisation & info) override;
|
||||
|
||||
void queryRealisationUncached(const DrvOutput &,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
void queryRealisationUncached(
|
||||
const DrvOutput &, Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override;
|
||||
|
||||
|
|
@ -172,9 +180,8 @@ public:
|
|||
std::optional<std::string> getBuildLogExact(const StorePath & path) override;
|
||||
|
||||
void addBuildLog(const StorePath & drvPath, std::string_view log) override;
|
||||
|
||||
};
|
||||
|
||||
MakeError(NoSuchBinaryCacheFile, Error);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -46,25 +46,42 @@ struct BuildResult
|
|||
*/
|
||||
std::string errorMsg;
|
||||
|
||||
std::string toString() const {
|
||||
std::string toString() const
|
||||
{
|
||||
auto strStatus = [&]() {
|
||||
switch (status) {
|
||||
case Built: return "Built";
|
||||
case Substituted: return "Substituted";
|
||||
case AlreadyValid: return "AlreadyValid";
|
||||
case PermanentFailure: return "PermanentFailure";
|
||||
case InputRejected: return "InputRejected";
|
||||
case OutputRejected: return "OutputRejected";
|
||||
case TransientFailure: return "TransientFailure";
|
||||
case CachedFailure: return "CachedFailure";
|
||||
case TimedOut: return "TimedOut";
|
||||
case MiscFailure: return "MiscFailure";
|
||||
case DependencyFailed: return "DependencyFailed";
|
||||
case LogLimitExceeded: return "LogLimitExceeded";
|
||||
case NotDeterministic: return "NotDeterministic";
|
||||
case ResolvesToAlreadyValid: return "ResolvesToAlreadyValid";
|
||||
case NoSubstituters: return "NoSubstituters";
|
||||
default: return "Unknown";
|
||||
case Built:
|
||||
return "Built";
|
||||
case Substituted:
|
||||
return "Substituted";
|
||||
case AlreadyValid:
|
||||
return "AlreadyValid";
|
||||
case PermanentFailure:
|
||||
return "PermanentFailure";
|
||||
case InputRejected:
|
||||
return "InputRejected";
|
||||
case OutputRejected:
|
||||
return "OutputRejected";
|
||||
case TransientFailure:
|
||||
return "TransientFailure";
|
||||
case CachedFailure:
|
||||
return "CachedFailure";
|
||||
case TimedOut:
|
||||
return "TimedOut";
|
||||
case MiscFailure:
|
||||
return "MiscFailure";
|
||||
case DependencyFailed:
|
||||
return "DependencyFailed";
|
||||
case LogLimitExceeded:
|
||||
return "LogLimitExceeded";
|
||||
case NotDeterministic:
|
||||
return "NotDeterministic";
|
||||
case ResolvesToAlreadyValid:
|
||||
return "ResolvesToAlreadyValid";
|
||||
case NoSubstituters:
|
||||
return "NoSubstituters";
|
||||
default:
|
||||
return "Unknown";
|
||||
};
|
||||
}();
|
||||
return strStatus + ((errorMsg == "") ? "" : " : " + errorMsg);
|
||||
|
|
@ -100,8 +117,8 @@ struct BuildResult
|
|||
*/
|
||||
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
|
||||
|
||||
bool operator ==(const BuildResult &) const noexcept;
|
||||
std::strong_ordering operator <=>(const BuildResult &) const noexcept;
|
||||
bool operator==(const BuildResult &) const noexcept;
|
||||
std::strong_ordering operator<=>(const BuildResult &) const noexcept;
|
||||
|
||||
bool success()
|
||||
{
|
||||
|
|
@ -126,8 +143,10 @@ struct KeyedBuildResult : BuildResult
|
|||
|
||||
// Hack to work around a gcc "may be used uninitialized" warning.
|
||||
KeyedBuildResult(BuildResult res, DerivedPath path)
|
||||
: BuildResult(std::move(res)), path(std::move(path))
|
||||
{ }
|
||||
: BuildResult(std::move(res))
|
||||
, path(std::move(path))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -19,14 +19,10 @@ struct HookInstance;
|
|||
struct DerivationBuilder;
|
||||
#endif
|
||||
|
||||
typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
|
||||
typedef enum { rpAccept, rpDecline, rpPostpone } HookReply;
|
||||
|
||||
/** Used internally */
|
||||
void runPostBuildHook(
|
||||
Store & store,
|
||||
Logger & logger,
|
||||
const StorePath & drvPath,
|
||||
const StorePathSet & outputPaths);
|
||||
void runPostBuildHook(Store & store, Logger & logger, const StorePath & drvPath, const StorePathSet & outputPaths);
|
||||
|
||||
/**
|
||||
* A goal for building a derivation. Substitution, (or any other method of
|
||||
|
|
@ -111,9 +107,8 @@ struct DerivationBuildingGoal : public Goal
|
|||
*/
|
||||
std::string machineName;
|
||||
|
||||
DerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv,
|
||||
Worker & worker,
|
||||
BuildMode buildMode = bmNormal);
|
||||
DerivationBuildingGoal(
|
||||
const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal);
|
||||
~DerivationBuildingGoal();
|
||||
|
||||
void timedOut(Error && ex) override;
|
||||
|
|
@ -179,18 +174,16 @@ struct DerivationBuildingGoal : public Goal
|
|||
|
||||
void started();
|
||||
|
||||
Done done(
|
||||
BuildResult::Status status,
|
||||
SingleDrvOutputs builtOutputs = {},
|
||||
std::optional<Error> ex = {});
|
||||
Done done(BuildResult::Status status, SingleDrvOutputs builtOutputs = {}, std::optional<Error> ex = {});
|
||||
|
||||
void appendLogTailErrorMsg(std::string & msg);
|
||||
|
||||
StorePathSet exportReferences(const StorePathSet & storePaths);
|
||||
|
||||
JobCategory jobCategory() const override {
|
||||
JobCategory jobCategory() const override
|
||||
{
|
||||
return JobCategory::Build;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ struct InitialOutputStatus
|
|||
{
|
||||
StorePath path;
|
||||
PathStatus status;
|
||||
|
||||
/**
|
||||
* Valid in the store, and additionally non-corrupt if we are repairing
|
||||
*/
|
||||
|
|
@ -32,6 +33,7 @@ struct InitialOutputStatus
|
|||
{
|
||||
return status == PathStatus::Valid;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merely present, allowed to be corrupt
|
||||
*/
|
||||
|
|
@ -55,4 +57,4 @@ void runPostBuildHook(Store & store, Logger & logger, const StorePath & drvPath,
|
|||
*/
|
||||
std::string showKnownOutputs(Store & store, const Derivation & drv);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -15,11 +15,7 @@ namespace nix {
|
|||
using std::map;
|
||||
|
||||
/** Used internally */
|
||||
void runPostBuildHook(
|
||||
Store & store,
|
||||
Logger & logger,
|
||||
const StorePath & drvPath,
|
||||
const StorePathSet & outputPaths);
|
||||
void runPostBuildHook(Store & store, Logger & logger, const StorePath & drvPath, const StorePathSet & outputPaths);
|
||||
|
||||
/**
|
||||
* A goal for realising a single output of a derivation. Various sorts of
|
||||
|
|
@ -62,12 +58,18 @@ struct DerivationGoal : public Goal
|
|||
|
||||
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds;
|
||||
|
||||
DerivationGoal(const StorePath & drvPath, const Derivation & drv,
|
||||
const OutputName & wantedOutput, Worker & worker,
|
||||
DerivationGoal(
|
||||
const StorePath & drvPath,
|
||||
const Derivation & drv,
|
||||
const OutputName & wantedOutput,
|
||||
Worker & worker,
|
||||
BuildMode buildMode = bmNormal);
|
||||
~DerivationGoal() = default;
|
||||
|
||||
void timedOut(Error && ex) override { unreachable(); };
|
||||
void timedOut(Error && ex) override
|
||||
{
|
||||
unreachable();
|
||||
};
|
||||
|
||||
std::string key() override;
|
||||
|
||||
|
|
@ -100,14 +102,12 @@ struct DerivationGoal : public Goal
|
|||
|
||||
Co repairClosure();
|
||||
|
||||
Done done(
|
||||
BuildResult::Status status,
|
||||
SingleDrvOutputs builtOutputs = {},
|
||||
std::optional<Error> ex = {});
|
||||
Done done(BuildResult::Status status, SingleDrvOutputs builtOutputs = {}, std::optional<Error> ex = {});
|
||||
|
||||
JobCategory jobCategory() const override {
|
||||
JobCategory jobCategory() const override
|
||||
{
|
||||
return JobCategory::Administration;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -131,4 +131,4 @@ private:
|
|||
void commonInit();
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -20,7 +20,8 @@ class Worker;
|
|||
* 2. Substitute the corresponding output path
|
||||
* 3. Register the output info
|
||||
*/
|
||||
class DrvOutputSubstitutionGoal : public Goal {
|
||||
class DrvOutputSubstitutionGoal : public Goal
|
||||
{
|
||||
|
||||
/**
|
||||
* The drv output we're trying to substitute
|
||||
|
|
@ -28,7 +29,11 @@ class DrvOutputSubstitutionGoal : public Goal {
|
|||
DrvOutput id;
|
||||
|
||||
public:
|
||||
DrvOutputSubstitutionGoal(const DrvOutput& id, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
DrvOutputSubstitutionGoal(
|
||||
const DrvOutput & id,
|
||||
Worker & worker,
|
||||
RepairFlag repair = NoRepair,
|
||||
std::optional<ContentAddress> ca = std::nullopt);
|
||||
|
||||
typedef void (DrvOutputSubstitutionGoal::*GoalState)();
|
||||
GoalState state;
|
||||
|
|
@ -36,15 +41,19 @@ public:
|
|||
Co init();
|
||||
Co realisationFetched(Goals waitees, std::shared_ptr<const Realisation> outputInfo, nix::ref<nix::Store> sub);
|
||||
|
||||
void timedOut(Error && ex) override { unreachable(); };
|
||||
void timedOut(Error && ex) override
|
||||
{
|
||||
unreachable();
|
||||
};
|
||||
|
||||
std::string key() override;
|
||||
|
||||
void handleEOF(Descriptor fd) override;
|
||||
|
||||
JobCategory jobCategory() const override {
|
||||
JobCategory jobCategory() const override
|
||||
{
|
||||
return JobCategory::Substitution;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -20,8 +20,9 @@ class Worker;
|
|||
typedef std::shared_ptr<Goal> GoalPtr;
|
||||
typedef std::weak_ptr<Goal> WeakGoalPtr;
|
||||
|
||||
struct CompareGoalPtrs {
|
||||
bool operator() (const GoalPtr & a, const GoalPtr & b) const;
|
||||
struct CompareGoalPtrs
|
||||
{
|
||||
bool operator()(const GoalPtr & a, const GoalPtr & b) const;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -71,7 +72,7 @@ private:
|
|||
Goals waitees;
|
||||
|
||||
public:
|
||||
typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters} ExitCode;
|
||||
typedef enum { ecBusy, ecSuccess, ecFailed, ecNoSubstituters } ExitCode;
|
||||
|
||||
/**
|
||||
* Backlink to the worker.
|
||||
|
|
@ -114,22 +115,25 @@ public:
|
|||
* Suspend our goal and wait until we get `work`-ed again.
|
||||
* `co_await`-able by @ref Co.
|
||||
*/
|
||||
struct Suspend {};
|
||||
struct Suspend
|
||||
{};
|
||||
|
||||
/**
|
||||
* Return from the current coroutine and suspend our goal
|
||||
* if we're not busy anymore, or jump to the next coroutine
|
||||
* set to be executed/resumed.
|
||||
*/
|
||||
struct Return {};
|
||||
struct Return
|
||||
{};
|
||||
|
||||
/**
|
||||
* `co_return`-ing this will end the goal.
|
||||
* If you're not inside a coroutine, you can safely discard this.
|
||||
*/
|
||||
struct [[nodiscard]] Done {
|
||||
private:
|
||||
Done(){}
|
||||
struct [[nodiscard]] Done
|
||||
{
|
||||
private:
|
||||
Done() {}
|
||||
|
||||
friend Goal;
|
||||
};
|
||||
|
|
@ -183,18 +187,24 @@ public:
|
|||
*
|
||||
* @todo Support returning data natively
|
||||
*/
|
||||
struct [[nodiscard]] Co {
|
||||
struct [[nodiscard]] Co
|
||||
{
|
||||
/**
|
||||
* The underlying handle.
|
||||
*/
|
||||
handle_type handle;
|
||||
|
||||
explicit Co(handle_type handle) : handle(handle) {};
|
||||
void operator=(Co&&);
|
||||
Co(Co&& rhs);
|
||||
explicit Co(handle_type handle)
|
||||
: handle(handle) {};
|
||||
void operator=(Co &&);
|
||||
Co(Co && rhs);
|
||||
~Co();
|
||||
|
||||
bool await_ready() { return false; };
|
||||
bool await_ready()
|
||||
{
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* When we `co_await` another `Co`-returning coroutine,
|
||||
* we tell the caller of `caller_coroutine.resume()` to switch to our coroutine (@ref handle).
|
||||
|
|
@ -215,21 +225,29 @@ public:
|
|||
* Used on initial suspend, does the same as `std::suspend_always`,
|
||||
* but asserts that everything has been set correctly.
|
||||
*/
|
||||
struct InitialSuspend {
|
||||
struct InitialSuspend
|
||||
{
|
||||
/**
|
||||
* Handle of coroutine that does the
|
||||
* initial suspend
|
||||
*/
|
||||
handle_type handle;
|
||||
|
||||
bool await_ready() { return false; };
|
||||
void await_suspend(handle_type handle_) {
|
||||
bool await_ready()
|
||||
{
|
||||
return false;
|
||||
};
|
||||
|
||||
void await_suspend(handle_type handle_)
|
||||
{
|
||||
handle = handle_;
|
||||
}
|
||||
void await_resume() {
|
||||
|
||||
void await_resume()
|
||||
{
|
||||
assert(handle);
|
||||
assert(handle.promise().goal); // goal must be set
|
||||
assert(handle.promise().goal->top_co); // top_co of goal must be set
|
||||
assert(handle.promise().goal); // goal must be set
|
||||
assert(handle.promise().goal->top_co); // top_co of goal must be set
|
||||
assert(handle.promise().goal->top_co->handle == handle); // top_co of goal must be us
|
||||
}
|
||||
};
|
||||
|
|
@ -238,7 +256,8 @@ public:
|
|||
* Promise type for coroutines defined using @ref Co.
|
||||
* Attached to coroutine handle.
|
||||
*/
|
||||
struct promise_type {
|
||||
struct promise_type
|
||||
{
|
||||
/**
|
||||
* Either this is who called us, or it is who we will tail-call.
|
||||
* It is what we "jump" to once we are done.
|
||||
|
|
@ -249,7 +268,7 @@ public:
|
|||
* The goal that we're a part of.
|
||||
* Set either in @ref Co::await_suspend or in constructor of @ref Goal.
|
||||
*/
|
||||
Goal* goal = nullptr;
|
||||
Goal * goal = nullptr;
|
||||
|
||||
/**
|
||||
* Is set to false when destructed to ensure we don't use a
|
||||
|
|
@ -260,8 +279,13 @@ public:
|
|||
/**
|
||||
* The awaiter used by @ref final_suspend.
|
||||
*/
|
||||
struct final_awaiter {
|
||||
bool await_ready() noexcept { return false; };
|
||||
struct final_awaiter
|
||||
{
|
||||
bool await_ready() noexcept
|
||||
{
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Here we execute our continuation, by passing it back to the caller.
|
||||
* C++ compiler will create code that takes that and executes it promptly.
|
||||
|
|
@ -269,7 +293,11 @@ public:
|
|||
* thus it must be destroyed.
|
||||
*/
|
||||
std::coroutine_handle<> await_suspend(handle_type h) noexcept;
|
||||
void await_resume() noexcept { assert(false); };
|
||||
|
||||
void await_resume() noexcept
|
||||
{
|
||||
assert(false);
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -283,13 +311,19 @@ public:
|
|||
* We use this opportunity to set the @ref goal field
|
||||
* and `top_co` field of @ref Goal.
|
||||
*/
|
||||
InitialSuspend initial_suspend() { return {}; };
|
||||
InitialSuspend initial_suspend()
|
||||
{
|
||||
return {};
|
||||
};
|
||||
|
||||
/**
|
||||
* Called on `co_return`. Creates @ref final_awaiter which
|
||||
* either jumps to continuation or suspends goal.
|
||||
*/
|
||||
final_awaiter final_suspend() noexcept { return {}; };
|
||||
final_awaiter final_suspend() noexcept
|
||||
{
|
||||
return {};
|
||||
};
|
||||
|
||||
/**
|
||||
* Does nothing, but provides an opportunity for
|
||||
|
|
@ -316,24 +350,33 @@ public:
|
|||
* the continuation of the new continuation. Thus, the continuation
|
||||
* passed to @ref return_value must not have a continuation set.
|
||||
*/
|
||||
void return_value(Co&&);
|
||||
void return_value(Co &&);
|
||||
|
||||
/**
|
||||
* If an exception is thrown inside a coroutine,
|
||||
* we re-throw it in the context of the "resumer" of the continuation.
|
||||
*/
|
||||
void unhandled_exception() { throw; };
|
||||
void unhandled_exception()
|
||||
{
|
||||
throw;
|
||||
};
|
||||
|
||||
/**
|
||||
* Allows awaiting a @ref Co.
|
||||
*/
|
||||
Co&& await_transform(Co&& co) { return static_cast<Co&&>(co); }
|
||||
Co && await_transform(Co && co)
|
||||
{
|
||||
return static_cast<Co &&>(co);
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows awaiting a @ref Suspend.
|
||||
* Always suspends.
|
||||
*/
|
||||
std::suspend_always await_transform(Suspend) { return {}; };
|
||||
std::suspend_always await_transform(Suspend)
|
||||
{
|
||||
return {};
|
||||
};
|
||||
};
|
||||
|
||||
protected:
|
||||
|
|
@ -354,7 +397,7 @@ protected:
|
|||
Done amDone(ExitCode result, std::optional<Error> ex = {});
|
||||
|
||||
public:
|
||||
virtual void cleanup() { }
|
||||
virtual void cleanup() {}
|
||||
|
||||
/**
|
||||
* Hack to say that this goal should not log `ex`, but instead keep
|
||||
|
|
@ -373,7 +416,8 @@ public:
|
|||
std::optional<Error> ex;
|
||||
|
||||
Goal(Worker & worker, Co init)
|
||||
: worker(worker), top_co(std::move(init))
|
||||
: worker(worker)
|
||||
, top_co(std::move(init))
|
||||
{
|
||||
// top_co shouldn't have a goal already, should be nullptr.
|
||||
assert(!top_co->handle.promise().goal);
|
||||
|
|
@ -430,9 +474,10 @@ protected:
|
|||
|
||||
void addToWeakGoals(WeakGoals & goals, GoalPtr p);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
||||
template<typename... ArgTypes>
|
||||
struct std::coroutine_traits<nix::Goal::Co, ArgTypes...> {
|
||||
struct std::coroutine_traits<nix::Goal::Co, ArgTypes...>
|
||||
{
|
||||
using promise_type = nix::Goal::promise_type;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -33,24 +33,28 @@ struct PathSubstitutionGoal : public Goal
|
|||
*/
|
||||
std::thread thr;
|
||||
|
||||
std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions,
|
||||
maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload;
|
||||
std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions, maintainRunningSubstitutions,
|
||||
maintainExpectedNar, maintainExpectedDownload;
|
||||
|
||||
/**
|
||||
* Content address for recomputing store path
|
||||
*/
|
||||
std::optional<ContentAddress> ca;
|
||||
|
||||
Done done(
|
||||
ExitCode result,
|
||||
BuildResult::Status status,
|
||||
std::optional<std::string> errorMsg = {});
|
||||
Done done(ExitCode result, BuildResult::Status status, std::optional<std::string> errorMsg = {});
|
||||
|
||||
public:
|
||||
PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
PathSubstitutionGoal(
|
||||
const StorePath & storePath,
|
||||
Worker & worker,
|
||||
RepairFlag repair = NoRepair,
|
||||
std::optional<ContentAddress> ca = std::nullopt);
|
||||
~PathSubstitutionGoal();
|
||||
|
||||
void timedOut(Error && ex) override { unreachable(); };
|
||||
void timedOut(Error && ex) override
|
||||
{
|
||||
unreachable();
|
||||
};
|
||||
|
||||
/**
|
||||
* We prepend "a$" to the key name to ensure substitution goals
|
||||
|
|
@ -66,7 +70,8 @@ public:
|
|||
*/
|
||||
Co init();
|
||||
Co gotInfo();
|
||||
Co tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed);
|
||||
Co tryToRun(
|
||||
StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed);
|
||||
Co finished();
|
||||
|
||||
/**
|
||||
|
|
@ -78,9 +83,10 @@ public:
|
|||
/* Called by destructor, can't be overridden */
|
||||
void cleanup() override final;
|
||||
|
||||
JobCategory jobCategory() const override {
|
||||
JobCategory jobCategory() const override
|
||||
{
|
||||
return JobCategory::Substitution;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -205,11 +205,10 @@ public:
|
|||
*/
|
||||
private:
|
||||
template<class G, typename... Args>
|
||||
std::shared_ptr<G> initGoalIfNeeded(std::weak_ptr<G> & goal_weak, Args && ...args);
|
||||
std::shared_ptr<G> initGoalIfNeeded(std::weak_ptr<G> & goal_weak, Args &&... args);
|
||||
|
||||
std::shared_ptr<DerivationTrampolineGoal> makeDerivationTrampolineGoal(
|
||||
ref<const SingleDerivedPath> drvReq,
|
||||
const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal);
|
||||
ref<const SingleDerivedPath> drvReq, const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal);
|
||||
|
||||
public:
|
||||
std::shared_ptr<DerivationTrampolineGoal> makeDerivationTrampolineGoal(
|
||||
|
|
@ -219,21 +218,24 @@ public:
|
|||
BuildMode buildMode = bmNormal);
|
||||
|
||||
std::shared_ptr<DerivationGoal> makeDerivationGoal(
|
||||
const StorePath & drvPath, const Derivation & drv,
|
||||
const OutputName & wantedOutput, BuildMode buildMode = bmNormal);
|
||||
const StorePath & drvPath,
|
||||
const Derivation & drv,
|
||||
const OutputName & wantedOutput,
|
||||
BuildMode buildMode = bmNormal);
|
||||
|
||||
/**
|
||||
* @ref DerivationBuildingGoal "derivation goal"
|
||||
*/
|
||||
std::shared_ptr<DerivationBuildingGoal> makeDerivationBuildingGoal(
|
||||
const StorePath & drvPath, const Derivation & drv,
|
||||
BuildMode buildMode = bmNormal);
|
||||
std::shared_ptr<DerivationBuildingGoal>
|
||||
makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal);
|
||||
|
||||
/**
|
||||
* @ref PathSubstitutionGoal "substitution goal"
|
||||
*/
|
||||
std::shared_ptr<PathSubstitutionGoal> makePathSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
std::shared_ptr<DrvOutputSubstitutionGoal> makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
std::shared_ptr<PathSubstitutionGoal> makePathSubstitutionGoal(
|
||||
const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
std::shared_ptr<DrvOutputSubstitutionGoal> makeDrvOutputSubstitutionGoal(
|
||||
const DrvOutput & id, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
|
||||
/**
|
||||
* Make a goal corresponding to the `DerivedPath`.
|
||||
|
|
@ -268,8 +270,11 @@ public:
|
|||
* Registers a running child process. `inBuildSlot` means that
|
||||
* the process counts towards the jobs limit.
|
||||
*/
|
||||
void childStarted(GoalPtr goal, const std::set<MuxablePipePollState::CommChannel> & channels,
|
||||
bool inBuildSlot, bool respectTimeouts);
|
||||
void childStarted(
|
||||
GoalPtr goal,
|
||||
const std::set<MuxablePipePollState::CommChannel> & channels,
|
||||
bool inBuildSlot,
|
||||
bool respectTimeouts);
|
||||
|
||||
/**
|
||||
* Unregisters a running child process. `wakeSleepers` should be
|
||||
|
|
@ -343,10 +348,11 @@ public:
|
|||
void updateProgress()
|
||||
{
|
||||
actDerivations.progress(doneBuilds, expectedBuilds + doneBuilds, runningBuilds, failedBuilds);
|
||||
actSubstitutions.progress(doneSubstitutions, expectedSubstitutions + doneSubstitutions, runningSubstitutions, failedSubstitutions);
|
||||
actSubstitutions.progress(
|
||||
doneSubstitutions, expectedSubstitutions + doneSubstitutions, runningSubstitutions, failedSubstitutions);
|
||||
act.setExpected(actFileTransfer, expectedDownloadSize + doneDownloadSize);
|
||||
act.setExpected(actCopyPath, expectedNarSize + doneNarSize);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -20,7 +20,8 @@ struct RegisterBuiltinBuilder
|
|||
{
|
||||
typedef std::map<std::string, BuiltinBuilder> BuiltinBuilders;
|
||||
|
||||
static BuiltinBuilders & builtinBuilders() {
|
||||
static BuiltinBuilders & builtinBuilders()
|
||||
{
|
||||
static BuiltinBuilders builders;
|
||||
return builders;
|
||||
}
|
||||
|
|
@ -31,4 +32,4 @@ struct RegisterBuiltinBuilder
|
|||
}
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -8,11 +8,18 @@ namespace nix {
|
|||
/**
|
||||
* Think of this as a "store level package attrset", but stripped down to no more than the needs of buildenv.
|
||||
*/
|
||||
struct Package {
|
||||
struct Package
|
||||
{
|
||||
Path path;
|
||||
bool active;
|
||||
int priority;
|
||||
Package(const Path & path, bool active, int priority) : path{path}, active{active}, priority{priority} {}
|
||||
|
||||
Package(const Path & path, bool active, int priority)
|
||||
: path{path}
|
||||
, active{active}
|
||||
, priority{priority}
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
class BuildEnvFileConflictError : public Error
|
||||
|
|
@ -22,27 +29,23 @@ public:
|
|||
const Path fileB;
|
||||
int priority;
|
||||
|
||||
BuildEnvFileConflictError(
|
||||
const Path fileA,
|
||||
const Path fileB,
|
||||
int priority
|
||||
)
|
||||
BuildEnvFileConflictError(const Path fileA, const Path fileB, int priority)
|
||||
: Error(
|
||||
"Unable to build profile. There is a conflict for the following files:\n"
|
||||
"\n"
|
||||
" %1%\n"
|
||||
" %2%",
|
||||
fileA,
|
||||
fileB
|
||||
)
|
||||
"Unable to build profile. There is a conflict for the following files:\n"
|
||||
"\n"
|
||||
" %1%\n"
|
||||
" %2%",
|
||||
fileA,
|
||||
fileB)
|
||||
, fileA(fileA)
|
||||
, fileB(fileB)
|
||||
, priority(priority)
|
||||
{}
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::vector<Package> Packages;
|
||||
|
||||
void buildProfile(const Path & out, Packages && pkgs);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -15,14 +15,15 @@ namespace nix {
|
|||
|
||||
/* protocol-agnostic templates */
|
||||
|
||||
#define COMMON_USE_LENGTH_PREFIX_SERIALISER(TEMPLATE, T) \
|
||||
TEMPLATE T CommonProto::Serialise< T >::read(const StoreDirConfig & store, CommonProto::ReadConn conn) \
|
||||
{ \
|
||||
return LengthPrefixedProtoHelper<CommonProto, T >::read(store, conn); \
|
||||
} \
|
||||
TEMPLATE void CommonProto::Serialise< T >::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const T & t) \
|
||||
{ \
|
||||
LengthPrefixedProtoHelper<CommonProto, T >::write(store, conn, t); \
|
||||
#define COMMON_USE_LENGTH_PREFIX_SERIALISER(TEMPLATE, T) \
|
||||
TEMPLATE T CommonProto::Serialise<T>::read(const StoreDirConfig & store, CommonProto::ReadConn conn) \
|
||||
{ \
|
||||
return LengthPrefixedProtoHelper<CommonProto, T>::read(store, conn); \
|
||||
} \
|
||||
TEMPLATE void CommonProto::Serialise<T>::write( \
|
||||
const StoreDirConfig & store, CommonProto::WriteConn conn, const T & t) \
|
||||
{ \
|
||||
LengthPrefixedProtoHelper<CommonProto, T>::write(store, conn, t); \
|
||||
}
|
||||
|
||||
#define COMMA_ ,
|
||||
|
|
@ -30,12 +31,9 @@ COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::vector<T>)
|
|||
COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename T COMMA_ typename Compare>, std::set<T COMMA_ Compare>)
|
||||
COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename... Ts>, std::tuple<Ts...>)
|
||||
|
||||
COMMON_USE_LENGTH_PREFIX_SERIALISER(
|
||||
template<typename K COMMA_ typename V>,
|
||||
std::map<K COMMA_ V>)
|
||||
COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename K COMMA_ typename V>, std::map<K COMMA_ V>)
|
||||
#undef COMMA_
|
||||
|
||||
|
||||
/* protocol-specific templates */
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ struct ContentAddress;
|
|||
struct DrvOutput;
|
||||
struct Realisation;
|
||||
|
||||
|
||||
/**
|
||||
* Shared serializers between the worker protocol, serve protocol, and a
|
||||
* few others.
|
||||
|
|
@ -28,7 +27,8 @@ struct CommonProto
|
|||
* A unidirectional read connection, to be used by the read half of the
|
||||
* canonical serializers below.
|
||||
*/
|
||||
struct ReadConn {
|
||||
struct ReadConn
|
||||
{
|
||||
Source & from;
|
||||
};
|
||||
|
||||
|
|
@ -36,7 +36,8 @@ struct CommonProto
|
|||
* A unidirectional write connection, to be used by the write half of the
|
||||
* canonical serializers below.
|
||||
*/
|
||||
struct WriteConn {
|
||||
struct WriteConn
|
||||
{
|
||||
Sink & to;
|
||||
};
|
||||
|
||||
|
|
@ -54,10 +55,10 @@ struct CommonProto
|
|||
}
|
||||
};
|
||||
|
||||
#define DECLARE_COMMON_SERIALISER(T) \
|
||||
struct CommonProto::Serialise< T > \
|
||||
{ \
|
||||
static T read(const StoreDirConfig & store, CommonProto::ReadConn conn); \
|
||||
#define DECLARE_COMMON_SERIALISER(T) \
|
||||
struct CommonProto::Serialise<T> \
|
||||
{ \
|
||||
static T read(const StoreDirConfig & store, CommonProto::ReadConn conn); \
|
||||
static void write(const StoreDirConfig & store, CommonProto::WriteConn conn, const T & str); \
|
||||
}
|
||||
|
||||
|
|
@ -103,4 +104,4 @@ DECLARE_COMMON_SERIALISER(std::optional<StorePath>);
|
|||
template<>
|
||||
DECLARE_COMMON_SERIALISER(std::optional<ContentAddress>);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -13,16 +13,18 @@ struct CommonSSHStoreConfig : virtual StoreConfig
|
|||
|
||||
CommonSSHStoreConfig(std::string_view scheme, std::string_view host, const Params & params);
|
||||
|
||||
const Setting<Path> sshKey{this, "", "ssh-key",
|
||||
"Path to the SSH private key used to authenticate to the remote machine."};
|
||||
const Setting<Path> sshKey{
|
||||
this, "", "ssh-key", "Path to the SSH private key used to authenticate to the remote machine."};
|
||||
|
||||
const Setting<std::string> sshPublicHostKey{this, "", "base64-ssh-public-host-key",
|
||||
"The public host key of the remote machine."};
|
||||
const Setting<std::string> sshPublicHostKey{
|
||||
this, "", "base64-ssh-public-host-key", "The public host key of the remote machine."};
|
||||
|
||||
const Setting<bool> compress{this, false, "compress",
|
||||
"Whether to enable SSH compression."};
|
||||
const Setting<bool> compress{this, false, "compress", "Whether to enable SSH compression."};
|
||||
|
||||
const Setting<std::string> remoteStore{this, "", "remote-store",
|
||||
const Setting<std::string> remoteStore{
|
||||
this,
|
||||
"",
|
||||
"remote-store",
|
||||
R"(
|
||||
[Store URL](@docroot@/store/types/index.md#store-url-format)
|
||||
to be used on the remote machine. The default is `auto`
|
||||
|
|
@ -54,9 +56,7 @@ struct CommonSSHStoreConfig : virtual StoreConfig
|
|||
*
|
||||
* See that constructor for details on the remaining two arguments.
|
||||
*/
|
||||
SSHMaster createSSHMaster(
|
||||
bool useMaster,
|
||||
Descriptor logFD = INVALID_DESCRIPTOR) const;
|
||||
SSHMaster createSSHMaster(bool useMaster, Descriptor logFD = INVALID_DESCRIPTOR) const;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -73,8 +73,8 @@ struct ContentAddressMethod
|
|||
|
||||
Raw raw;
|
||||
|
||||
bool operator ==(const ContentAddressMethod &) const = default;
|
||||
auto operator <=>(const ContentAddressMethod &) const = default;
|
||||
bool operator==(const ContentAddressMethod &) const = default;
|
||||
auto operator<=>(const ContentAddressMethod &) const = default;
|
||||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(ContentAddressMethod);
|
||||
|
||||
|
|
@ -132,7 +132,6 @@ struct ContentAddressMethod
|
|||
FileIngestionMethod getFileIngestionMethod() const;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Mini content address
|
||||
*/
|
||||
|
|
@ -161,8 +160,8 @@ struct ContentAddress
|
|||
*/
|
||||
Hash hash;
|
||||
|
||||
bool operator ==(const ContentAddress &) const = default;
|
||||
auto operator <=>(const ContentAddress &) const = default;
|
||||
bool operator==(const ContentAddress &) const = default;
|
||||
auto operator<=>(const ContentAddress &) const = default;
|
||||
|
||||
/**
|
||||
* Compute the content-addressability assertion
|
||||
|
|
@ -184,7 +183,6 @@ struct ContentAddress
|
|||
*/
|
||||
std::string renderContentAddress(std::optional<ContentAddress> ca);
|
||||
|
||||
|
||||
/*
|
||||
* Full content address
|
||||
*
|
||||
|
|
@ -221,9 +219,9 @@ struct StoreReferences
|
|||
*/
|
||||
size_t size() const;
|
||||
|
||||
bool operator ==(const StoreReferences &) const = default;
|
||||
bool operator==(const StoreReferences &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=>(const StoreReferences &) const = default;
|
||||
// auto operator <=>(const StoreReferences &) const = default;
|
||||
};
|
||||
|
||||
// This matches the additional info that we need for makeTextPath
|
||||
|
|
@ -240,9 +238,9 @@ struct TextInfo
|
|||
*/
|
||||
StorePathSet references;
|
||||
|
||||
bool operator ==(const TextInfo &) const = default;
|
||||
bool operator==(const TextInfo &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=>(const TextInfo &) const = default;
|
||||
// auto operator <=>(const TextInfo &) const = default;
|
||||
};
|
||||
|
||||
struct FixedOutputInfo
|
||||
|
|
@ -262,9 +260,9 @@ struct FixedOutputInfo
|
|||
*/
|
||||
StoreReferences references;
|
||||
|
||||
bool operator ==(const FixedOutputInfo &) const = default;
|
||||
bool operator==(const FixedOutputInfo &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=>(const FixedOutputInfo &) const = default;
|
||||
// auto operator <=>(const FixedOutputInfo &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -274,16 +272,13 @@ struct FixedOutputInfo
|
|||
*/
|
||||
struct ContentAddressWithReferences
|
||||
{
|
||||
typedef std::variant<
|
||||
TextInfo,
|
||||
FixedOutputInfo
|
||||
> Raw;
|
||||
typedef std::variant<TextInfo, FixedOutputInfo> Raw;
|
||||
|
||||
Raw raw;
|
||||
|
||||
bool operator ==(const ContentAddressWithReferences &) const = default;
|
||||
bool operator==(const ContentAddressWithReferences &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=>(const ContentAddressWithReferences &) const = default;
|
||||
// auto operator <=>(const ContentAddressWithReferences &) const = default;
|
||||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(ContentAddressWithReferences);
|
||||
|
||||
|
|
@ -306,12 +301,11 @@ struct ContentAddressWithReferences
|
|||
* *partial function* and exceptions will be thrown for invalid
|
||||
* combinations.
|
||||
*/
|
||||
static ContentAddressWithReferences fromParts(
|
||||
ContentAddressMethod method, Hash hash, StoreReferences refs);
|
||||
static ContentAddressWithReferences fromParts(ContentAddressMethod method, Hash hash, StoreReferences refs);
|
||||
|
||||
ContentAddressMethod getMethod() const;
|
||||
|
||||
Hash getHash() const;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -8,11 +8,6 @@ namespace nix::daemon {
|
|||
|
||||
enum RecursiveFlag : bool { NotRecursive = false, Recursive = true };
|
||||
|
||||
void processConnection(
|
||||
ref<Store> store,
|
||||
FdSource && from,
|
||||
FdSink && to,
|
||||
TrustedFlag trusted,
|
||||
RecursiveFlag recursive);
|
||||
void processConnection(ref<Store> store, FdSource && from, FdSink && to, TrustedFlag trusted, RecursiveFlag recursive);
|
||||
|
||||
}
|
||||
} // namespace nix::daemon
|
||||
|
|
|
|||
|
|
@ -201,7 +201,7 @@ struct DerivationOptions
|
|||
bool useUidRange(const BasicDerivation & drv) const;
|
||||
};
|
||||
|
||||
};
|
||||
}; // namespace nix
|
||||
|
||||
JSON_IMPL(DerivationOptions);
|
||||
JSON_IMPL(DerivationOptions::OutputChecks)
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@ struct DerivationOutput
|
|||
{
|
||||
StorePath path;
|
||||
|
||||
bool operator == (const InputAddressed &) const = default;
|
||||
auto operator <=> (const InputAddressed &) const = default;
|
||||
bool operator==(const InputAddressed &) const = default;
|
||||
auto operator<=>(const InputAddressed &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -56,8 +56,8 @@ struct DerivationOutput
|
|||
*/
|
||||
StorePath path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const;
|
||||
|
||||
bool operator == (const CAFixed &) const = default;
|
||||
auto operator <=> (const CAFixed &) const = default;
|
||||
bool operator==(const CAFixed &) const = default;
|
||||
auto operator<=>(const CAFixed &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -77,17 +77,18 @@ struct DerivationOutput
|
|||
*/
|
||||
HashAlgorithm hashAlgo;
|
||||
|
||||
bool operator == (const CAFloating &) const = default;
|
||||
auto operator <=> (const CAFloating &) const = default;
|
||||
bool operator==(const CAFloating &) const = default;
|
||||
auto operator<=>(const CAFloating &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Input-addressed output which depends on a (CA) derivation whose hash
|
||||
* isn't known yet.
|
||||
*/
|
||||
struct Deferred {
|
||||
bool operator == (const Deferred &) const = default;
|
||||
auto operator <=> (const Deferred &) const = default;
|
||||
struct Deferred
|
||||
{
|
||||
bool operator==(const Deferred &) const = default;
|
||||
auto operator<=>(const Deferred &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -106,22 +107,16 @@ struct DerivationOutput
|
|||
*/
|
||||
HashAlgorithm hashAlgo;
|
||||
|
||||
bool operator == (const Impure &) const = default;
|
||||
auto operator <=> (const Impure &) const = default;
|
||||
bool operator==(const Impure &) const = default;
|
||||
auto operator<=>(const Impure &) const = default;
|
||||
};
|
||||
|
||||
typedef std::variant<
|
||||
InputAddressed,
|
||||
CAFixed,
|
||||
CAFloating,
|
||||
Deferred,
|
||||
Impure
|
||||
> Raw;
|
||||
typedef std::variant<InputAddressed, CAFixed, CAFloating, Deferred, Impure> Raw;
|
||||
|
||||
Raw raw;
|
||||
|
||||
bool operator == (const DerivationOutput &) const = default;
|
||||
auto operator <=> (const DerivationOutput &) const = default;
|
||||
bool operator==(const DerivationOutput &) const = default;
|
||||
auto operator<=>(const DerivationOutput &) const = default;
|
||||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(DerivationOutput);
|
||||
|
||||
|
|
@ -136,12 +131,10 @@ struct DerivationOutput
|
|||
* the safer interface provided by
|
||||
* BasicDerivation::outputsAndOptPaths
|
||||
*/
|
||||
std::optional<StorePath> path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const;
|
||||
std::optional<StorePath>
|
||||
path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const;
|
||||
|
||||
nlohmann::json toJSON(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view drvName,
|
||||
OutputNameView outputName) const;
|
||||
nlohmann::json toJSON(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const;
|
||||
/**
|
||||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
|
|
@ -161,8 +154,7 @@ typedef std::map<std::string, DerivationOutput> DerivationOutputs;
|
|||
* path in which it would be written. To calculate values of these
|
||||
* types, see the corresponding functions in BasicDerivation.
|
||||
*/
|
||||
typedef std::map<std::string, std::pair<DerivationOutput, std::optional<StorePath>>>
|
||||
DerivationOutputsAndOptPaths;
|
||||
typedef std::map<std::string, std::pair<DerivationOutput, std::optional<StorePath>>> DerivationOutputsAndOptPaths;
|
||||
|
||||
/**
|
||||
* For inputs that are sub-derivations, we specify exactly which
|
||||
|
|
@ -170,26 +162,29 @@ typedef std::map<std::string, std::pair<DerivationOutput, std::optional<StorePat
|
|||
*/
|
||||
typedef std::map<StorePath, StringSet> DerivationInputs;
|
||||
|
||||
struct DerivationType {
|
||||
struct DerivationType
|
||||
{
|
||||
/**
|
||||
* Input-addressed derivation types
|
||||
*/
|
||||
struct InputAddressed {
|
||||
struct InputAddressed
|
||||
{
|
||||
/**
|
||||
* True iff the derivation type can't be determined statically,
|
||||
* for instance because it (transitively) depends on a content-addressed
|
||||
* derivation.
|
||||
*/
|
||||
*/
|
||||
bool deferred;
|
||||
|
||||
bool operator == (const InputAddressed &) const = default;
|
||||
auto operator <=> (const InputAddressed &) const = default;
|
||||
bool operator==(const InputAddressed &) const = default;
|
||||
auto operator<=>(const InputAddressed &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Content-addressing derivation types
|
||||
*/
|
||||
struct ContentAddressed {
|
||||
struct ContentAddressed
|
||||
{
|
||||
/**
|
||||
* Whether the derivation should be built safely inside a sandbox.
|
||||
*/
|
||||
|
|
@ -207,8 +202,8 @@ struct DerivationType {
|
|||
*/
|
||||
bool fixed;
|
||||
|
||||
bool operator == (const ContentAddressed &) const = default;
|
||||
auto operator <=> (const ContentAddressed &) const = default;
|
||||
bool operator==(const ContentAddressed &) const = default;
|
||||
auto operator<=>(const ContentAddressed &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -217,21 +212,18 @@ struct DerivationType {
|
|||
* This is similar at build-time to the content addressed, not standboxed, not fixed
|
||||
* type, but has some restrictions on its usage.
|
||||
*/
|
||||
struct Impure {
|
||||
bool operator == (const Impure &) const = default;
|
||||
auto operator <=> (const Impure &) const = default;
|
||||
struct Impure
|
||||
{
|
||||
bool operator==(const Impure &) const = default;
|
||||
auto operator<=>(const Impure &) const = default;
|
||||
};
|
||||
|
||||
typedef std::variant<
|
||||
InputAddressed,
|
||||
ContentAddressed,
|
||||
Impure
|
||||
> Raw;
|
||||
typedef std::variant<InputAddressed, ContentAddressed, Impure> Raw;
|
||||
|
||||
Raw raw;
|
||||
|
||||
bool operator == (const DerivationType &) const = default;
|
||||
auto operator <=> (const DerivationType &) const = default;
|
||||
bool operator==(const DerivationType &) const = default;
|
||||
auto operator<=>(const DerivationType &) const = default;
|
||||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(DerivationType);
|
||||
|
||||
|
|
@ -300,9 +292,9 @@ struct BasicDerivation
|
|||
BasicDerivation() = default;
|
||||
BasicDerivation(BasicDerivation &&) = default;
|
||||
BasicDerivation(const BasicDerivation &) = default;
|
||||
BasicDerivation& operator=(BasicDerivation &&) = default;
|
||||
BasicDerivation& operator=(const BasicDerivation &) = default;
|
||||
virtual ~BasicDerivation() { };
|
||||
BasicDerivation & operator=(BasicDerivation &&) = default;
|
||||
BasicDerivation & operator=(const BasicDerivation &) = default;
|
||||
virtual ~BasicDerivation() {};
|
||||
|
||||
bool isBuiltin() const;
|
||||
|
||||
|
|
@ -331,9 +323,9 @@ struct BasicDerivation
|
|||
*/
|
||||
void applyRewrites(const StringMap & rewrites);
|
||||
|
||||
bool operator == (const BasicDerivation &) const = default;
|
||||
bool operator==(const BasicDerivation &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=> (const BasicDerivation &) const = default;
|
||||
// auto operator <=> (const BasicDerivation &) const = default;
|
||||
};
|
||||
|
||||
class Store;
|
||||
|
|
@ -348,7 +340,9 @@ struct Derivation : BasicDerivation
|
|||
/**
|
||||
* Print a derivation.
|
||||
*/
|
||||
std::string unparse(const StoreDirConfig & store, bool maskOutputs,
|
||||
std::string unparse(
|
||||
const StoreDirConfig & store,
|
||||
bool maskOutputs,
|
||||
DerivedPathMap<StringSet>::ChildNode::Map * actualInputs = nullptr) const;
|
||||
|
||||
/**
|
||||
|
|
@ -369,7 +363,8 @@ struct Derivation : BasicDerivation
|
|||
*/
|
||||
std::optional<BasicDerivation> tryResolve(
|
||||
Store & store,
|
||||
std::function<std::optional<StorePath>(ref<const SingleDerivedPath> drvPath, const std::string & outputName)> queryResolutionChain) const;
|
||||
std::function<std::optional<StorePath>(ref<const SingleDerivedPath> drvPath, const std::string & outputName)>
|
||||
queryResolutionChain) const;
|
||||
|
||||
/**
|
||||
* Check that the derivation is valid and does not present any
|
||||
|
|
@ -382,8 +377,16 @@ struct Derivation : BasicDerivation
|
|||
void checkInvariants(Store & store, const StorePath & drvPath) const;
|
||||
|
||||
Derivation() = default;
|
||||
Derivation(const BasicDerivation & bd) : BasicDerivation(bd) { }
|
||||
Derivation(BasicDerivation && bd) : BasicDerivation(std::move(bd)) { }
|
||||
|
||||
Derivation(const BasicDerivation & bd)
|
||||
: BasicDerivation(bd)
|
||||
{
|
||||
}
|
||||
|
||||
Derivation(BasicDerivation && bd)
|
||||
: BasicDerivation(std::move(bd))
|
||||
{
|
||||
}
|
||||
|
||||
nlohmann::json toJSON(const StoreDirConfig & store) const;
|
||||
static Derivation fromJSON(
|
||||
|
|
@ -391,21 +394,17 @@ struct Derivation : BasicDerivation
|
|||
const nlohmann::json & json,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
|
||||
bool operator == (const Derivation &) const = default;
|
||||
bool operator==(const Derivation &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=> (const Derivation &) const = default;
|
||||
// auto operator <=> (const Derivation &) const = default;
|
||||
};
|
||||
|
||||
|
||||
class Store;
|
||||
|
||||
/**
|
||||
* Write a derivation to the Nix store, and return its path.
|
||||
*/
|
||||
StorePath writeDerivation(Store & store,
|
||||
const Derivation & drv,
|
||||
RepairFlag repair = NoRepair,
|
||||
bool readOnly = false);
|
||||
StorePath writeDerivation(Store & store, const Derivation & drv, RepairFlag repair = NoRepair, bool readOnly = false);
|
||||
|
||||
/**
|
||||
* Read a derivation from a file.
|
||||
|
|
@ -432,7 +431,6 @@ bool isDerivation(std::string_view fileName);
|
|||
*/
|
||||
std::string outputPathName(std::string_view drvName, OutputNameView outputName);
|
||||
|
||||
|
||||
/**
|
||||
* The hashes modulo of a derivation.
|
||||
*
|
||||
|
|
@ -440,7 +438,8 @@ std::string outputPathName(std::string_view drvName, OutputNameView outputName);
|
|||
* derivations (fixed-output or not) will have a different hash for each
|
||||
* output.
|
||||
*/
|
||||
struct DrvHash {
|
||||
struct DrvHash
|
||||
{
|
||||
/**
|
||||
* Map from output names to hashes
|
||||
*/
|
||||
|
|
@ -466,7 +465,7 @@ struct DrvHash {
|
|||
Kind kind;
|
||||
};
|
||||
|
||||
void operator |= (DrvHash::Kind & self, const DrvHash::Kind & other) noexcept;
|
||||
void operator|=(DrvHash::Kind & self, const DrvHash::Kind & other) noexcept;
|
||||
|
||||
/**
|
||||
* Returns hashes with the details of fixed-output subderivations
|
||||
|
|
@ -526,4 +525,4 @@ void writeDerivation(Sink & out, const StoreDirConfig & store, const BasicDeriva
|
|||
*/
|
||||
std::string hashPlaceholder(const OutputNameView outputName);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -28,11 +28,13 @@ namespace nix {
|
|||
* "optional" types.
|
||||
*/
|
||||
template<typename V>
|
||||
struct DerivedPathMap {
|
||||
struct DerivedPathMap
|
||||
{
|
||||
/**
|
||||
* A child node (non-root node).
|
||||
*/
|
||||
struct ChildNode {
|
||||
struct ChildNode
|
||||
{
|
||||
/**
|
||||
* Value of this child node.
|
||||
*
|
||||
|
|
@ -50,7 +52,7 @@ struct DerivedPathMap {
|
|||
*/
|
||||
Map childMap;
|
||||
|
||||
bool operator == (const ChildNode &) const noexcept;
|
||||
bool operator==(const ChildNode &) const noexcept;
|
||||
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
// decltype(std::declval<V>() <=> std::declval<V>())
|
||||
|
|
@ -67,7 +69,7 @@ struct DerivedPathMap {
|
|||
*/
|
||||
Map map;
|
||||
|
||||
bool operator == (const DerivedPathMap &) const = default;
|
||||
bool operator==(const DerivedPathMap &) const = default;
|
||||
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
// auto operator <=> (const DerivedPathMap &) const noexcept;
|
||||
|
|
@ -94,8 +96,7 @@ struct DerivedPathMap {
|
|||
};
|
||||
|
||||
template<>
|
||||
bool DerivedPathMap<StringSet>::ChildNode::operator == (
|
||||
const DerivedPathMap<StringSet>::ChildNode &) const noexcept;
|
||||
bool DerivedPathMap<StringSet>::ChildNode::operator==(const DerivedPathMap<StringSet>::ChildNode &) const noexcept;
|
||||
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
#if 0
|
||||
|
|
@ -110,4 +111,4 @@ inline auto DerivedPathMap<StringSet>::operator <=> (const DerivedPathMap<String
|
|||
extern template struct DerivedPathMap<StringSet>::ChildNode;
|
||||
extern template struct DerivedPathMap<StringSet>;
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -24,15 +24,16 @@ class Store;
|
|||
* cannot be simplified further. Since they are opaque, they cannot be
|
||||
* built, but they can fetched.
|
||||
*/
|
||||
struct DerivedPathOpaque {
|
||||
struct DerivedPathOpaque
|
||||
{
|
||||
StorePath path;
|
||||
|
||||
std::string to_string(const StoreDirConfig & store) const;
|
||||
static DerivedPathOpaque parse(const StoreDirConfig & store, std::string_view);
|
||||
nlohmann::json toJSON(const StoreDirConfig & store) const;
|
||||
|
||||
bool operator == (const DerivedPathOpaque &) const = default;
|
||||
auto operator <=> (const DerivedPathOpaque &) const = default;
|
||||
bool operator==(const DerivedPathOpaque &) const = default;
|
||||
auto operator<=>(const DerivedPathOpaque &) const = default;
|
||||
};
|
||||
|
||||
struct SingleDerivedPath;
|
||||
|
|
@ -44,7 +45,8 @@ struct SingleDerivedPath;
|
|||
* evaluated by building the derivation, and then taking the resulting output
|
||||
* path of the given output name.
|
||||
*/
|
||||
struct SingleDerivedPathBuilt {
|
||||
struct SingleDerivedPathBuilt
|
||||
{
|
||||
ref<const SingleDerivedPath> drvPath;
|
||||
OutputName output;
|
||||
|
||||
|
|
@ -74,19 +76,17 @@ struct SingleDerivedPathBuilt {
|
|||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
static SingleDerivedPathBuilt parse(
|
||||
const StoreDirConfig & store, ref<const SingleDerivedPath> drvPath,
|
||||
const StoreDirConfig & store,
|
||||
ref<const SingleDerivedPath> drvPath,
|
||||
OutputNameView outputs,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
nlohmann::json toJSON(Store & store) const;
|
||||
|
||||
bool operator == (const SingleDerivedPathBuilt &) const noexcept;
|
||||
std::strong_ordering operator <=> (const SingleDerivedPathBuilt &) const noexcept;
|
||||
bool operator==(const SingleDerivedPathBuilt &) const noexcept;
|
||||
std::strong_ordering operator<=>(const SingleDerivedPathBuilt &) const noexcept;
|
||||
};
|
||||
|
||||
using _SingleDerivedPathRaw = std::variant<
|
||||
DerivedPathOpaque,
|
||||
SingleDerivedPathBuilt
|
||||
>;
|
||||
using _SingleDerivedPathRaw = std::variant<DerivedPathOpaque, SingleDerivedPathBuilt>;
|
||||
|
||||
/**
|
||||
* A "derived path" is a very simple sort of expression (not a Nix
|
||||
|
|
@ -99,19 +99,21 @@ using _SingleDerivedPathRaw = std::variant<
|
|||
* - built, in which case it is a pair of a derivation path and an
|
||||
* output name.
|
||||
*/
|
||||
struct SingleDerivedPath : _SingleDerivedPathRaw {
|
||||
struct SingleDerivedPath : _SingleDerivedPathRaw
|
||||
{
|
||||
using Raw = _SingleDerivedPathRaw;
|
||||
using Raw::Raw;
|
||||
|
||||
using Opaque = DerivedPathOpaque;
|
||||
using Built = SingleDerivedPathBuilt;
|
||||
|
||||
inline const Raw & raw() const {
|
||||
inline const Raw & raw() const
|
||||
{
|
||||
return static_cast<const Raw &>(*this);
|
||||
}
|
||||
|
||||
bool operator == (const SingleDerivedPath &) const = default;
|
||||
auto operator <=> (const SingleDerivedPath &) const = default;
|
||||
bool operator==(const SingleDerivedPath &) const = default;
|
||||
auto operator<=>(const SingleDerivedPath &) const = default;
|
||||
|
||||
/**
|
||||
* Get the store path this is ultimately derived from (by realising
|
||||
|
|
@ -156,7 +158,7 @@ struct SingleDerivedPath : _SingleDerivedPathRaw {
|
|||
|
||||
static inline ref<SingleDerivedPath> makeConstantStorePathRef(StorePath drvPath)
|
||||
{
|
||||
return make_ref<SingleDerivedPath>(SingleDerivedPath::Opaque { drvPath });
|
||||
return make_ref<SingleDerivedPath>(SingleDerivedPath::Opaque{drvPath});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -171,7 +173,8 @@ static inline ref<SingleDerivedPath> makeConstantStorePathRef(StorePath drvPath)
|
|||
* evaluate to single values. Perhaps this should have just a single
|
||||
* output name.
|
||||
*/
|
||||
struct DerivedPathBuilt {
|
||||
struct DerivedPathBuilt
|
||||
{
|
||||
ref<const SingleDerivedPath> drvPath;
|
||||
OutputsSpec outputs;
|
||||
|
||||
|
|
@ -201,20 +204,18 @@ struct DerivedPathBuilt {
|
|||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
static DerivedPathBuilt parse(
|
||||
const StoreDirConfig & store, ref<const SingleDerivedPath>,
|
||||
const StoreDirConfig & store,
|
||||
ref<const SingleDerivedPath>,
|
||||
std::string_view,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
nlohmann::json toJSON(Store & store) const;
|
||||
|
||||
bool operator == (const DerivedPathBuilt &) const noexcept;
|
||||
bool operator==(const DerivedPathBuilt &) const noexcept;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet.
|
||||
bool operator < (const DerivedPathBuilt &) const noexcept;
|
||||
bool operator<(const DerivedPathBuilt &) const noexcept;
|
||||
};
|
||||
|
||||
using _DerivedPathRaw = std::variant<
|
||||
DerivedPathOpaque,
|
||||
DerivedPathBuilt
|
||||
>;
|
||||
using _DerivedPathRaw = std::variant<DerivedPathOpaque, DerivedPathBuilt>;
|
||||
|
||||
/**
|
||||
* A "derived path" is a very simple sort of expression that evaluates
|
||||
|
|
@ -226,20 +227,22 @@ using _DerivedPathRaw = std::variant<
|
|||
* - built, in which case it is a pair of a derivation path and some
|
||||
* output names.
|
||||
*/
|
||||
struct DerivedPath : _DerivedPathRaw {
|
||||
struct DerivedPath : _DerivedPathRaw
|
||||
{
|
||||
using Raw = _DerivedPathRaw;
|
||||
using Raw::Raw;
|
||||
|
||||
using Opaque = DerivedPathOpaque;
|
||||
using Built = DerivedPathBuilt;
|
||||
|
||||
inline const Raw & raw() const {
|
||||
inline const Raw & raw() const
|
||||
{
|
||||
return static_cast<const Raw &>(*this);
|
||||
}
|
||||
|
||||
bool operator == (const DerivedPath &) const = default;
|
||||
bool operator==(const DerivedPath &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet.
|
||||
//auto operator <=> (const DerivedPath &) const = default;
|
||||
// auto operator <=> (const DerivedPath &) const = default;
|
||||
|
||||
/**
|
||||
* Get the store path this is ultimately derived from (by realising
|
||||
|
|
@ -300,6 +303,5 @@ typedef std::vector<DerivedPath> DerivedPaths;
|
|||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
void drvRequireExperiment(
|
||||
const SingleDerivedPath & drv,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
}
|
||||
const SingleDerivedPath & drv, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -38,7 +38,10 @@ class DownstreamPlaceholder
|
|||
/**
|
||||
* Newtype constructor
|
||||
*/
|
||||
DownstreamPlaceholder(Hash hash) : hash(hash) { }
|
||||
DownstreamPlaceholder(Hash hash)
|
||||
: hash(hash)
|
||||
{
|
||||
}
|
||||
|
||||
public:
|
||||
/**
|
||||
|
|
@ -88,4 +91,4 @@ public:
|
|||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -14,14 +14,15 @@ namespace nix {
|
|||
|
||||
struct FileTransferSettings : Config
|
||||
{
|
||||
Setting<bool> enableHttp2{this, true, "http2",
|
||||
"Whether to enable HTTP/2 support."};
|
||||
Setting<bool> enableHttp2{this, true, "http2", "Whether to enable HTTP/2 support."};
|
||||
|
||||
Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix",
|
||||
"String appended to the user agent in HTTP requests."};
|
||||
Setting<std::string> userAgentSuffix{
|
||||
this, "", "user-agent-suffix", "String appended to the user agent in HTTP requests."};
|
||||
|
||||
Setting<size_t> httpConnections{
|
||||
this, 25, "http-connections",
|
||||
this,
|
||||
25,
|
||||
"http-connections",
|
||||
R"(
|
||||
The maximum number of parallel TCP connections used to fetch
|
||||
files from binary caches and by other downloads. It defaults
|
||||
|
|
@ -30,7 +31,9 @@ struct FileTransferSettings : Config
|
|||
{"binary-caches-parallel-connections"}};
|
||||
|
||||
Setting<unsigned long> connectTimeout{
|
||||
this, 5, "connect-timeout",
|
||||
this,
|
||||
5,
|
||||
"connect-timeout",
|
||||
R"(
|
||||
The timeout (in seconds) for establishing connections in the
|
||||
binary cache substituter. It corresponds to `curl`’s
|
||||
|
|
@ -38,17 +41,22 @@ struct FileTransferSettings : Config
|
|||
)"};
|
||||
|
||||
Setting<unsigned long> stalledDownloadTimeout{
|
||||
this, 300, "stalled-download-timeout",
|
||||
this,
|
||||
300,
|
||||
"stalled-download-timeout",
|
||||
R"(
|
||||
The timeout (in seconds) for receiving data from servers
|
||||
during download. Nix cancels idle downloads after this
|
||||
timeout's duration.
|
||||
)"};
|
||||
|
||||
Setting<unsigned int> tries{this, 5, "download-attempts",
|
||||
"The number of times Nix attempts to download a file before giving up."};
|
||||
Setting<unsigned int> tries{
|
||||
this, 5, "download-attempts", "The number of times Nix attempts to download a file before giving up."};
|
||||
|
||||
Setting<size_t> downloadBufferSize{this, 64 * 1024 * 1024, "download-buffer-size",
|
||||
Setting<size_t> downloadBufferSize{
|
||||
this,
|
||||
64 * 1024 * 1024,
|
||||
"download-buffer-size",
|
||||
R"(
|
||||
The size of Nix's internal download buffer in bytes during `curl` transfers. If data is
|
||||
not processed quickly enough to exceed the size of this buffer, downloads may stall.
|
||||
|
|
@ -77,7 +85,10 @@ struct FileTransferRequest
|
|||
std::function<void(std::string_view data)> dataCallback;
|
||||
|
||||
FileTransferRequest(std::string_view uri)
|
||||
: uri(uri), parentAct(getCurActivity()) { }
|
||||
: uri(uri)
|
||||
, parentAct(getCurActivity())
|
||||
{
|
||||
}
|
||||
|
||||
std::string verb() const
|
||||
{
|
||||
|
|
@ -122,15 +133,14 @@ class Store;
|
|||
|
||||
struct FileTransfer
|
||||
{
|
||||
virtual ~FileTransfer() { }
|
||||
virtual ~FileTransfer() {}
|
||||
|
||||
/**
|
||||
* Enqueue a data transfer request, returning a future to the result of
|
||||
* the download. The future may throw a FileTransferError
|
||||
* exception.
|
||||
*/
|
||||
virtual void enqueueFileTransfer(const FileTransferRequest & request,
|
||||
Callback<FileTransferResult> callback) = 0;
|
||||
virtual void enqueueFileTransfer(const FileTransferRequest & request, Callback<FileTransferResult> callback) = 0;
|
||||
|
||||
std::future<FileTransferResult> enqueueFileTransfer(const FileTransferRequest & request);
|
||||
|
||||
|
|
@ -148,10 +158,8 @@ struct FileTransfer
|
|||
* Download a file, writing its data to a sink. The sink will be
|
||||
* invoked on the thread of the caller.
|
||||
*/
|
||||
void download(
|
||||
FileTransferRequest && request,
|
||||
Sink & sink,
|
||||
std::function<void(FileTransferResult)> resultCallback = {});
|
||||
void
|
||||
download(FileTransferRequest && request, Sink & sink, std::function<void(FileTransferResult)> resultCallback = {});
|
||||
|
||||
enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
|
||||
};
|
||||
|
|
@ -179,7 +187,7 @@ public:
|
|||
std::optional<std::string> response;
|
||||
|
||||
template<typename... Args>
|
||||
FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args);
|
||||
FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args &... args);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -7,10 +7,8 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
|
||||
typedef std::unordered_map<StorePath, std::unordered_set<std::string>> Roots;
|
||||
|
||||
|
||||
struct GCOptions
|
||||
{
|
||||
/**
|
||||
|
|
@ -55,7 +53,6 @@ struct GCOptions
|
|||
uint64_t maxFreed{std::numeric_limits<uint64_t>::max()};
|
||||
};
|
||||
|
||||
|
||||
struct GCResults
|
||||
{
|
||||
/**
|
||||
|
|
@ -71,7 +68,6 @@ struct GCResults
|
|||
uint64_t bytesFreed = 0;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Mix-in class for \ref Store "stores" which expose a notion of garbage
|
||||
* collection.
|
||||
|
|
@ -117,4 +113,4 @@ struct GcStore : public virtual Store
|
|||
virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -20,7 +20,8 @@ typedef enum { smEnabled, smRelaxed, smDisabled } SandboxMode;
|
|||
|
||||
struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
|
||||
{
|
||||
MaxBuildJobsSetting(Config * options,
|
||||
MaxBuildJobsSetting(
|
||||
Config * options,
|
||||
unsigned int def,
|
||||
const std::string & name,
|
||||
const std::string & description,
|
||||
|
|
@ -34,14 +35,15 @@ struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
|
|||
};
|
||||
|
||||
const uint32_t maxIdsPerBuild =
|
||||
#ifdef __linux__
|
||||
#ifdef __linux__
|
||||
1 << 16
|
||||
#else
|
||||
#else
|
||||
1
|
||||
#endif
|
||||
#endif
|
||||
;
|
||||
|
||||
class Settings : public Config {
|
||||
class Settings : public Config
|
||||
{
|
||||
|
||||
StringSet getDefaultSystemFeatures();
|
||||
|
||||
|
|
@ -91,7 +93,10 @@ public:
|
|||
*/
|
||||
Path nixDaemonSocketFile;
|
||||
|
||||
Setting<std::string> storeUri{this, getEnv("NIX_REMOTE").value_or("auto"), "store",
|
||||
Setting<std::string> storeUri{
|
||||
this,
|
||||
getEnv("NIX_REMOTE").value_or("auto"),
|
||||
"store",
|
||||
R"(
|
||||
The [URL of the Nix store](@docroot@/store/types/index.md#store-url-format)
|
||||
to use for most operations.
|
||||
|
|
@ -100,14 +105,15 @@ public:
|
|||
section of the manual for supported store types and settings.
|
||||
)"};
|
||||
|
||||
Setting<bool> keepFailed{this, false, "keep-failed",
|
||||
"Whether to keep temporary directories of failed builds."};
|
||||
Setting<bool> keepFailed{this, false, "keep-failed", "Whether to keep temporary directories of failed builds."};
|
||||
|
||||
Setting<bool> keepGoing{this, false, "keep-going",
|
||||
"Whether to keep building derivations when another build fails."};
|
||||
Setting<bool> keepGoing{
|
||||
this, false, "keep-going", "Whether to keep building derivations when another build fails."};
|
||||
|
||||
Setting<bool> tryFallback{
|
||||
this, false, "fallback",
|
||||
this,
|
||||
false,
|
||||
"fallback",
|
||||
R"(
|
||||
If set to `true`, Nix falls back to building from source if a
|
||||
binary substitute fails. This is equivalent to the `--fallback`
|
||||
|
|
@ -120,12 +126,17 @@ public:
|
|||
*/
|
||||
bool verboseBuild = true;
|
||||
|
||||
Setting<size_t> logLines{this, 25, "log-lines",
|
||||
Setting<size_t> logLines{
|
||||
this,
|
||||
25,
|
||||
"log-lines",
|
||||
"The number of lines of the tail of "
|
||||
"the log to show if a build fails."};
|
||||
|
||||
MaxBuildJobsSetting maxBuildJobs{
|
||||
this, 1, "max-jobs",
|
||||
this,
|
||||
1,
|
||||
"max-jobs",
|
||||
R"(
|
||||
Maximum number of jobs that Nix tries to build locally in parallel.
|
||||
|
||||
|
|
@ -143,7 +154,9 @@ public:
|
|||
{"build-max-jobs"}};
|
||||
|
||||
Setting<unsigned int> maxSubstitutionJobs{
|
||||
this, 16, "max-substitution-jobs",
|
||||
this,
|
||||
16,
|
||||
"max-substitution-jobs",
|
||||
R"(
|
||||
This option defines the maximum number of substitution jobs that Nix
|
||||
tries to run in parallel. The default is `16`. The minimum value
|
||||
|
|
@ -181,7 +194,9 @@ public:
|
|||
bool readOnlyMode = false;
|
||||
|
||||
Setting<std::string> thisSystem{
|
||||
this, NIX_LOCAL_SYSTEM, "system",
|
||||
this,
|
||||
NIX_LOCAL_SYSTEM,
|
||||
"system",
|
||||
R"(
|
||||
The system type of the current Nix installation.
|
||||
Nix only builds a given [store derivation](@docroot@/glossary.md#gloss-store-derivation) locally when its `system` attribute equals any of the values specified here or in [`extra-platforms`](#conf-extra-platforms).
|
||||
|
|
@ -208,7 +223,9 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<time_t> maxSilentTime{
|
||||
this, 0, "max-silent-time",
|
||||
this,
|
||||
0,
|
||||
"max-silent-time",
|
||||
R"(
|
||||
This option defines the maximum number of seconds that a builder can
|
||||
go without producing any data on standard output or standard error.
|
||||
|
|
@ -223,7 +240,9 @@ public:
|
|||
{"build-max-silent-time"}};
|
||||
|
||||
Setting<time_t> buildTimeout{
|
||||
this, 0, "timeout",
|
||||
this,
|
||||
0,
|
||||
"timeout",
|
||||
R"(
|
||||
This option defines the maximum number of seconds that a builder can
|
||||
run. This is useful (for instance in an automated build system) to
|
||||
|
|
@ -236,7 +255,10 @@ public:
|
|||
)",
|
||||
{"build-timeout"}};
|
||||
|
||||
Setting<Strings> buildHook{this, {"nix", "__build-remote"}, "build-hook",
|
||||
Setting<Strings> buildHook{
|
||||
this,
|
||||
{"nix", "__build-remote"},
|
||||
"build-hook",
|
||||
R"(
|
||||
The path to the helper program that executes remote builds.
|
||||
|
||||
|
|
@ -249,7 +271,9 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<std::string> builders{
|
||||
this, "@" + nixConfDir + "/machines", "builders",
|
||||
this,
|
||||
"@" + nixConfDir + "/machines",
|
||||
"builders",
|
||||
R"(
|
||||
A semicolon- or newline-separated list of build machines.
|
||||
|
||||
|
|
@ -365,16 +389,21 @@ public:
|
|||
|
||||
If you want the remote machines to use substituters, set [`builders-use-substitutes`](#conf-builders-use-substitutes) to `true`.
|
||||
)",
|
||||
{}, false};
|
||||
{},
|
||||
false};
|
||||
|
||||
Setting<bool> alwaysAllowSubstitutes{
|
||||
this, false, "always-allow-substitutes",
|
||||
this,
|
||||
false,
|
||||
"always-allow-substitutes",
|
||||
R"(
|
||||
If set to `true`, Nix ignores the [`allowSubstitutes`](@docroot@/language/advanced-attributes.md) attribute in derivations and always attempt to use [available substituters](#conf-substituters).
|
||||
)"};
|
||||
|
||||
Setting<bool> buildersUseSubstitutes{
|
||||
this, false, "builders-use-substitutes",
|
||||
this,
|
||||
false,
|
||||
"builders-use-substitutes",
|
||||
R"(
|
||||
If set to `true`, Nix instructs [remote build machines](#conf-builders) to use their own [`substituters`](#conf-substituters) if available.
|
||||
|
||||
|
|
@ -382,11 +411,13 @@ public:
|
|||
This can drastically reduce build times if the network connection between the local machine and the remote build host is slow.
|
||||
)"};
|
||||
|
||||
Setting<off_t> reservedSize{this, 8 * 1024 * 1024, "gc-reserved-space",
|
||||
"Amount of reserved disk space for the garbage collector."};
|
||||
Setting<off_t> reservedSize{
|
||||
this, 8 * 1024 * 1024, "gc-reserved-space", "Amount of reserved disk space for the garbage collector."};
|
||||
|
||||
Setting<bool> fsyncMetadata{
|
||||
this, true, "fsync-metadata",
|
||||
this,
|
||||
true,
|
||||
"fsync-metadata",
|
||||
R"(
|
||||
If set to `true`, changes to the Nix store metadata (in
|
||||
`/nix/var/nix/db`) are synchronously flushed to disk. This improves
|
||||
|
|
@ -394,24 +425,28 @@ public:
|
|||
default is `true`.
|
||||
)"};
|
||||
|
||||
Setting<bool> fsyncStorePaths{this, false, "fsync-store-paths",
|
||||
Setting<bool> fsyncStorePaths{
|
||||
this,
|
||||
false,
|
||||
"fsync-store-paths",
|
||||
R"(
|
||||
Whether to call `fsync()` on store paths before registering them, to
|
||||
flush them to disk. This improves robustness in case of system crashes,
|
||||
but reduces performance. The default is `false`.
|
||||
)"};
|
||||
|
||||
Setting<bool> useSQLiteWAL{this, !isWSL1(), "use-sqlite-wal",
|
||||
"Whether SQLite should use WAL mode."};
|
||||
Setting<bool> useSQLiteWAL{this, !isWSL1(), "use-sqlite-wal", "Whether SQLite should use WAL mode."};
|
||||
|
||||
#ifndef _WIN32
|
||||
// FIXME: remove this option, `fsync-store-paths` is faster.
|
||||
Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering",
|
||||
"Whether to call `sync()` before registering a path as valid."};
|
||||
Setting<bool> syncBeforeRegistering{
|
||||
this, false, "sync-before-registering", "Whether to call `sync()` before registering a path as valid."};
|
||||
#endif
|
||||
|
||||
Setting<bool> useSubstitutes{
|
||||
this, true, "substitute",
|
||||
this,
|
||||
true,
|
||||
"substitute",
|
||||
R"(
|
||||
If set to `true` (default), Nix uses binary substitutes if
|
||||
available. This option can be disabled to force building from
|
||||
|
|
@ -420,7 +455,9 @@ public:
|
|||
{"build-use-substitutes"}};
|
||||
|
||||
Setting<std::string> buildUsersGroup{
|
||||
this, "", "build-users-group",
|
||||
this,
|
||||
"",
|
||||
"build-users-group",
|
||||
R"(
|
||||
This options specifies the Unix group containing the Nix build user
|
||||
accounts. In multi-user Nix installations, builds should not be
|
||||
|
|
@ -454,37 +491,48 @@ public:
|
|||
|
||||
Defaults to `nixbld` when running as root, *empty* otherwise.
|
||||
)",
|
||||
{}, false};
|
||||
{},
|
||||
false};
|
||||
|
||||
Setting<bool> autoAllocateUids{this, false, "auto-allocate-uids",
|
||||
Setting<bool> autoAllocateUids{
|
||||
this,
|
||||
false,
|
||||
"auto-allocate-uids",
|
||||
R"(
|
||||
Whether to select UIDs for builds automatically, instead of using the
|
||||
users in `build-users-group`.
|
||||
|
||||
UIDs are allocated starting at 872415232 (0x34000000) on Linux and 56930 on macOS.
|
||||
)", {}, true, Xp::AutoAllocateUids};
|
||||
)",
|
||||
{},
|
||||
true,
|
||||
Xp::AutoAllocateUids};
|
||||
|
||||
Setting<uint32_t> startId{this,
|
||||
#ifdef __linux__
|
||||
Setting<uint32_t> startId{
|
||||
this,
|
||||
#ifdef __linux__
|
||||
0x34000000,
|
||||
#else
|
||||
#else
|
||||
56930,
|
||||
#endif
|
||||
#endif
|
||||
"start-id",
|
||||
"The first UID and GID to use for dynamic ID allocation."};
|
||||
|
||||
Setting<uint32_t> uidCount{this,
|
||||
#ifdef __linux__
|
||||
Setting<uint32_t> uidCount{
|
||||
this,
|
||||
#ifdef __linux__
|
||||
maxIdsPerBuild * 128,
|
||||
#else
|
||||
#else
|
||||
128,
|
||||
#endif
|
||||
#endif
|
||||
"id-count",
|
||||
"The number of UIDs/GIDs to use for dynamic ID allocation."};
|
||||
|
||||
#ifdef __linux__
|
||||
#ifdef __linux__
|
||||
Setting<bool> useCgroups{
|
||||
this, false, "use-cgroups",
|
||||
this,
|
||||
false,
|
||||
"use-cgroups",
|
||||
R"(
|
||||
Whether to execute builds inside cgroups.
|
||||
This is only supported on Linux.
|
||||
|
|
@ -492,14 +540,19 @@ public:
|
|||
Cgroups are required and enabled automatically for derivations
|
||||
that require the `uid-range` system feature.
|
||||
)"};
|
||||
#endif
|
||||
#endif
|
||||
|
||||
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
|
||||
Setting<bool> impersonateLinux26{
|
||||
this,
|
||||
false,
|
||||
"impersonate-linux-26",
|
||||
"Whether to impersonate a Linux 2.6 machine on newer kernels.",
|
||||
{"build-impersonate-linux-26"}};
|
||||
|
||||
Setting<bool> keepLog{
|
||||
this, true, "keep-build-log",
|
||||
this,
|
||||
true,
|
||||
"keep-build-log",
|
||||
R"(
|
||||
If set to `true` (the default), Nix writes the build log of a
|
||||
derivation (i.e. the standard output and error of its builder) to
|
||||
|
|
@ -509,7 +562,9 @@ public:
|
|||
{"build-keep-log"}};
|
||||
|
||||
Setting<bool> compressLog{
|
||||
this, true, "compress-build-log",
|
||||
this,
|
||||
true,
|
||||
"compress-build-log",
|
||||
R"(
|
||||
If set to `true` (the default), build logs written to
|
||||
`/nix/var/log/nix/drvs` are compressed on the fly using bzip2.
|
||||
|
|
@ -518,7 +573,9 @@ public:
|
|||
{"build-compress-log"}};
|
||||
|
||||
Setting<unsigned long> maxLogSize{
|
||||
this, 0, "max-build-log-size",
|
||||
this,
|
||||
0,
|
||||
"max-build-log-size",
|
||||
R"(
|
||||
This option defines the maximum number of bytes that a builder can
|
||||
write to its stdout/stderr. If the builder exceeds this limit, it’s
|
||||
|
|
@ -526,11 +583,12 @@ public:
|
|||
)",
|
||||
{"build-max-log-size"}};
|
||||
|
||||
Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
|
||||
"How often (in seconds) to poll for locks."};
|
||||
Setting<unsigned int> pollInterval{this, 5, "build-poll-interval", "How often (in seconds) to poll for locks."};
|
||||
|
||||
Setting<bool> gcKeepOutputs{
|
||||
this, false, "keep-outputs",
|
||||
this,
|
||||
false,
|
||||
"keep-outputs",
|
||||
R"(
|
||||
If `true`, the garbage collector keeps the outputs of
|
||||
non-garbage derivations. If `false` (default), outputs are
|
||||
|
|
@ -546,7 +604,9 @@ public:
|
|||
{"gc-keep-outputs"}};
|
||||
|
||||
Setting<bool> gcKeepDerivations{
|
||||
this, true, "keep-derivations",
|
||||
this,
|
||||
true,
|
||||
"keep-derivations",
|
||||
R"(
|
||||
If `true` (default), the garbage collector keeps the derivations
|
||||
from which non-garbage store paths were built. If `false`, they are
|
||||
|
|
@ -562,7 +622,9 @@ public:
|
|||
{"gc-keep-derivations"}};
|
||||
|
||||
Setting<bool> autoOptimiseStore{
|
||||
this, false, "auto-optimise-store",
|
||||
this,
|
||||
false,
|
||||
"auto-optimise-store",
|
||||
R"(
|
||||
If set to `true`, Nix automatically detects files in the store
|
||||
that have identical contents, and replaces them with hard links to
|
||||
|
|
@ -572,7 +634,9 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<bool> envKeepDerivations{
|
||||
this, false, "keep-env-derivations",
|
||||
this,
|
||||
false,
|
||||
"keep-env-derivations",
|
||||
R"(
|
||||
If `false` (default), derivations are not stored in Nix user
|
||||
environments. That is, the derivations of any build-time-only
|
||||
|
|
@ -594,12 +658,13 @@ public:
|
|||
|
||||
Setting<SandboxMode> sandboxMode{
|
||||
this,
|
||||
#ifdef __linux__
|
||||
smEnabled
|
||||
#else
|
||||
smDisabled
|
||||
#endif
|
||||
, "sandbox",
|
||||
#ifdef __linux__
|
||||
smEnabled
|
||||
#else
|
||||
smDisabled
|
||||
#endif
|
||||
,
|
||||
"sandbox",
|
||||
R"(
|
||||
If set to `true`, builds are performed in a *sandboxed
|
||||
environment*, i.e., they’re isolated from the normal file system
|
||||
|
|
@ -628,7 +693,9 @@ public:
|
|||
{"build-use-chroot", "build-use-sandbox"}};
|
||||
|
||||
Setting<PathSet> sandboxPaths{
|
||||
this, {}, "sandbox-paths",
|
||||
this,
|
||||
{},
|
||||
"sandbox-paths",
|
||||
R"(
|
||||
A list of paths bind-mounted into Nix sandbox environments. You can
|
||||
use the syntax `target=source` to mount a path in a different
|
||||
|
|
@ -646,11 +713,14 @@ public:
|
|||
)",
|
||||
{"build-chroot-dirs", "build-sandbox-paths"}};
|
||||
|
||||
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
|
||||
"Whether to disable sandboxing when the kernel doesn't allow it."};
|
||||
Setting<bool> sandboxFallback{
|
||||
this, true, "sandbox-fallback", "Whether to disable sandboxing when the kernel doesn't allow it."};
|
||||
|
||||
#ifndef _WIN32
|
||||
Setting<bool> requireDropSupplementaryGroups{this, isRootUser(), "require-drop-supplementary-groups",
|
||||
Setting<bool> requireDropSupplementaryGroups{
|
||||
this,
|
||||
isRootUser(),
|
||||
"require-drop-supplementary-groups",
|
||||
R"(
|
||||
Following the principle of least privilege,
|
||||
Nix attempts to drop supplementary groups when building with sandboxing.
|
||||
|
|
@ -671,7 +741,9 @@ public:
|
|||
|
||||
#ifdef __linux__
|
||||
Setting<std::string> sandboxShmSize{
|
||||
this, "50%", "sandbox-dev-shm-size",
|
||||
this,
|
||||
"50%",
|
||||
"sandbox-dev-shm-size",
|
||||
R"(
|
||||
*Linux only*
|
||||
|
||||
|
|
@ -683,7 +755,10 @@ public:
|
|||
#endif
|
||||
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
Setting<Path> sandboxBuildDir{this, "/build", "sandbox-build-dir",
|
||||
Setting<Path> sandboxBuildDir{
|
||||
this,
|
||||
"/build",
|
||||
"sandbox-build-dir",
|
||||
R"(
|
||||
*Linux only*
|
||||
|
||||
|
|
@ -693,21 +768,32 @@ public:
|
|||
)"};
|
||||
#endif
|
||||
|
||||
Setting<std::optional<Path>> buildDir{this, std::nullopt, "build-dir",
|
||||
Setting<std::optional<Path>> buildDir{
|
||||
this,
|
||||
std::nullopt,
|
||||
"build-dir",
|
||||
R"(
|
||||
Override the `build-dir` store setting for all stores that have this setting.
|
||||
)"};
|
||||
|
||||
Setting<PathSet> allowedImpureHostPrefixes{this, {}, "allowed-impure-host-deps",
|
||||
Setting<PathSet> allowedImpureHostPrefixes{
|
||||
this,
|
||||
{},
|
||||
"allowed-impure-host-deps",
|
||||
"Which prefixes to allow derivations to ask for access to (primarily for Darwin)."};
|
||||
|
||||
#ifdef __APPLE__
|
||||
Setting<bool> darwinLogSandboxViolations{this, false, "darwin-log-sandbox-violations",
|
||||
Setting<bool> darwinLogSandboxViolations{
|
||||
this,
|
||||
false,
|
||||
"darwin-log-sandbox-violations",
|
||||
"Whether to log Darwin sandbox access violations to the system log."};
|
||||
#endif
|
||||
|
||||
Setting<bool> runDiffHook{
|
||||
this, false, "run-diff-hook",
|
||||
this,
|
||||
false,
|
||||
"run-diff-hook",
|
||||
R"(
|
||||
If true, enable the execution of the `diff-hook` program.
|
||||
|
||||
|
|
@ -717,7 +803,9 @@ public:
|
|||
)"};
|
||||
|
||||
OptionalPathSetting diffHook{
|
||||
this, std::nullopt, "diff-hook",
|
||||
this,
|
||||
std::nullopt,
|
||||
"diff-hook",
|
||||
R"(
|
||||
Absolute path to an executable capable of diffing build
|
||||
results. The hook is executed if `run-diff-hook` is true, and the
|
||||
|
|
@ -765,7 +853,9 @@ public:
|
|||
{"binary-cache-public-keys"}};
|
||||
|
||||
Setting<Strings> secretKeyFiles{
|
||||
this, {}, "secret-key-files",
|
||||
this,
|
||||
{},
|
||||
"secret-key-files",
|
||||
R"(
|
||||
A whitespace-separated list of files containing secret (private)
|
||||
keys. These are used to sign locally-built paths. They can be
|
||||
|
|
@ -775,7 +865,9 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<unsigned int> tarballTtl{
|
||||
this, 60 * 60, "tarball-ttl",
|
||||
this,
|
||||
60 * 60,
|
||||
"tarball-ttl",
|
||||
R"(
|
||||
The number of seconds a downloaded tarball is considered fresh. If
|
||||
the cached tarball is stale, Nix checks whether it is still up
|
||||
|
|
@ -792,7 +884,9 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<bool> requireSigs{
|
||||
this, true, "require-sigs",
|
||||
this,
|
||||
true,
|
||||
"require-sigs",
|
||||
R"(
|
||||
If set to `true` (the default), any non-content-addressed path added
|
||||
or copied to the Nix store (e.g. when substituting from a binary
|
||||
|
|
@ -901,7 +995,9 @@ public:
|
|||
{"binary-caches"}};
|
||||
|
||||
Setting<StringSet> trustedSubstituters{
|
||||
this, {}, "trusted-substituters",
|
||||
this,
|
||||
{},
|
||||
"trusted-substituters",
|
||||
R"(
|
||||
A list of [Nix store URLs](@docroot@/store/types/index.md#store-url-format), separated by whitespace.
|
||||
These are not used by default, but users of the Nix daemon can enable them by specifying [`substituters`](#conf-substituters).
|
||||
|
|
@ -911,7 +1007,9 @@ public:
|
|||
{"trusted-binary-caches"}};
|
||||
|
||||
Setting<unsigned int> ttlNegativeNarInfoCache{
|
||||
this, 3600, "narinfo-cache-negative-ttl",
|
||||
this,
|
||||
3600,
|
||||
"narinfo-cache-negative-ttl",
|
||||
R"(
|
||||
The TTL in seconds for negative lookups.
|
||||
If a store path is queried from a [substituter](#conf-substituters) but was not found, a negative lookup is cached in the local disk cache database for the specified duration.
|
||||
|
|
@ -927,7 +1025,9 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<unsigned int> ttlPositiveNarInfoCache{
|
||||
this, 30 * 24 * 3600, "narinfo-cache-positive-ttl",
|
||||
this,
|
||||
30 * 24 * 3600,
|
||||
"narinfo-cache-positive-ttl",
|
||||
R"(
|
||||
The TTL in seconds for positive lookups. If a store path is queried
|
||||
from a substituter, the result of the query is cached in the
|
||||
|
|
@ -939,11 +1039,13 @@ public:
|
|||
mismatch if the build isn't reproducible.
|
||||
)"};
|
||||
|
||||
Setting<bool> printMissing{this, true, "print-missing",
|
||||
"Whether to print what paths need to be built or downloaded."};
|
||||
Setting<bool> printMissing{
|
||||
this, true, "print-missing", "Whether to print what paths need to be built or downloaded."};
|
||||
|
||||
Setting<std::string> preBuildHook{
|
||||
this, "", "pre-build-hook",
|
||||
this,
|
||||
"",
|
||||
"pre-build-hook",
|
||||
R"(
|
||||
If set, the path to a program that can set extra derivation-specific
|
||||
settings for this system. This is used for settings that can't be
|
||||
|
|
@ -962,7 +1064,9 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<std::string> postBuildHook{
|
||||
this, "", "post-build-hook",
|
||||
this,
|
||||
"",
|
||||
"post-build-hook",
|
||||
R"(
|
||||
Optional. The path to a program to execute after each build.
|
||||
|
||||
|
|
@ -1006,15 +1110,19 @@ public:
|
|||
/nix/store/xfghy8ixrhz3kyy6p724iv3cxji088dx-bash-4.4-p23`.
|
||||
)"};
|
||||
|
||||
Setting<unsigned int> downloadSpeed {
|
||||
this, 0, "download-speed",
|
||||
Setting<unsigned int> downloadSpeed{
|
||||
this,
|
||||
0,
|
||||
"download-speed",
|
||||
R"(
|
||||
Specify the maximum transfer rate in kilobytes per second you want
|
||||
Nix to use for downloads.
|
||||
)"};
|
||||
|
||||
Setting<std::string> netrcFile{
|
||||
this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
|
||||
this,
|
||||
fmt("%s/%s", nixConfDir, "netrc"),
|
||||
"netrc-file",
|
||||
R"(
|
||||
If set to an absolute path to a `netrc` file, Nix uses the HTTP
|
||||
authentication credentials in this file when trying to download from
|
||||
|
|
@ -1039,7 +1147,9 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<Path> caFile{
|
||||
this, getDefaultSSLCertFile(), "ssl-cert-file",
|
||||
this,
|
||||
getDefaultSSLCertFile(),
|
||||
"ssl-cert-file",
|
||||
R"(
|
||||
The path of a file containing CA certificates used to
|
||||
authenticate `https://` downloads. Nix by default uses
|
||||
|
|
@ -1060,7 +1170,9 @@ public:
|
|||
|
||||
#ifdef __linux__
|
||||
Setting<bool> filterSyscalls{
|
||||
this, true, "filter-syscalls",
|
||||
this,
|
||||
true,
|
||||
"filter-syscalls",
|
||||
R"(
|
||||
Whether to prevent certain dangerous system calls, such as
|
||||
creation of setuid/setgid files or adding ACLs or extended
|
||||
|
|
@ -1069,7 +1181,9 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<bool> allowNewPrivileges{
|
||||
this, false, "allow-new-privileges",
|
||||
this,
|
||||
false,
|
||||
"allow-new-privileges",
|
||||
R"(
|
||||
(Linux-specific.) By default, builders on Linux cannot acquire new
|
||||
privileges by calling setuid/setgid programs or programs that have
|
||||
|
|
@ -1085,7 +1199,9 @@ public:
|
|||
|
||||
#if NIX_SUPPORT_ACL
|
||||
Setting<StringSet> ignoredAcls{
|
||||
this, {"security.selinux", "system.nfs4_acl", "security.csm"}, "ignored-acls",
|
||||
this,
|
||||
{"security.selinux", "system.nfs4_acl", "security.csm"},
|
||||
"ignored-acls",
|
||||
R"(
|
||||
A list of ACLs that should be ignored, normally Nix attempts to
|
||||
remove all ACLs from files and directories in the Nix store, but
|
||||
|
|
@ -1095,7 +1211,9 @@ public:
|
|||
#endif
|
||||
|
||||
Setting<Strings> hashedMirrors{
|
||||
this, {}, "hashed-mirrors",
|
||||
this,
|
||||
{},
|
||||
"hashed-mirrors",
|
||||
R"(
|
||||
A list of web servers used by `builtins.fetchurl` to obtain files by
|
||||
hash. Given a hash algorithm *ha* and a base-16 hash *h*, Nix tries to
|
||||
|
|
@ -1117,7 +1235,9 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<uint64_t> minFree{
|
||||
this, 0, "min-free",
|
||||
this,
|
||||
0,
|
||||
"min-free",
|
||||
R"(
|
||||
When free disk space in `/nix/store` drops below `min-free` during a
|
||||
build, Nix performs a garbage-collection until `max-free` bytes are
|
||||
|
|
@ -1125,25 +1245,28 @@ public:
|
|||
disables this feature.
|
||||
)"};
|
||||
|
||||
Setting<uint64_t> maxFree{
|
||||
// n.b. this is deliberately int64 max rather than uint64 max because
|
||||
// this goes through the Nix language JSON parser and thus needs to be
|
||||
// representable in Nix language integers.
|
||||
this, std::numeric_limits<int64_t>::max(), "max-free",
|
||||
R"(
|
||||
Setting<uint64_t> maxFree{// n.b. this is deliberately int64 max rather than uint64 max because
|
||||
// this goes through the Nix language JSON parser and thus needs to be
|
||||
// representable in Nix language integers.
|
||||
this,
|
||||
std::numeric_limits<int64_t>::max(),
|
||||
"max-free",
|
||||
R"(
|
||||
When a garbage collection is triggered by the `min-free` option, it
|
||||
stops as soon as `max-free` bytes are available. The default is
|
||||
infinity (i.e. delete all garbage).
|
||||
)"};
|
||||
|
||||
Setting<uint64_t> minFreeCheckInterval{this, 5, "min-free-check-interval",
|
||||
"Number of seconds between checking free disk space."};
|
||||
Setting<uint64_t> minFreeCheckInterval{
|
||||
this, 5, "min-free-check-interval", "Number of seconds between checking free disk space."};
|
||||
|
||||
Setting<size_t> narBufferSize{this, 32 * 1024 * 1024, "nar-buffer-size",
|
||||
"Maximum size of NARs before spilling them to disk."};
|
||||
Setting<size_t> narBufferSize{
|
||||
this, 32 * 1024 * 1024, "nar-buffer-size", "Maximum size of NARs before spilling them to disk."};
|
||||
|
||||
Setting<bool> allowSymlinkedStore{
|
||||
this, false, "allow-symlinked-store",
|
||||
this,
|
||||
false,
|
||||
"allow-symlinked-store",
|
||||
R"(
|
||||
If set to `true`, Nix stops complaining if the store directory
|
||||
(typically `/nix/store`) contains symlink components.
|
||||
|
|
@ -1156,7 +1279,9 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<bool> useXDGBaseDirectories{
|
||||
this, false, "use-xdg-base-directories",
|
||||
this,
|
||||
false,
|
||||
"use-xdg-base-directories",
|
||||
R"(
|
||||
If set to `true`, Nix conforms to the [XDG Base Directory Specification] for files in `$HOME`.
|
||||
The environment variables used to implement this are documented in the [Environment Variables section](@docroot@/command-ref/env-common.md).
|
||||
|
|
@ -1185,10 +1310,12 @@ public:
|
|||
mv $HOME/.nix-defexpr $nix_state_home/defexpr
|
||||
mv $HOME/.nix-channels $nix_state_home/channels
|
||||
```
|
||||
)"
|
||||
};
|
||||
)"};
|
||||
|
||||
Setting<StringMap> impureEnv {this, {}, "impure-env",
|
||||
Setting<StringMap> impureEnv{
|
||||
this,
|
||||
{},
|
||||
"impure-env",
|
||||
R"(
|
||||
A list of items, each in the format of:
|
||||
|
||||
|
|
@ -1202,10 +1329,9 @@ public:
|
|||
fixed-output derivations and in a multi-user Nix installation, or
|
||||
setting private access tokens when fetching a private repository.
|
||||
)",
|
||||
{}, // aliases
|
||||
{}, // aliases
|
||||
true, // document default
|
||||
Xp::ConfigurableImpureEnv
|
||||
};
|
||||
Xp::ConfigurableImpureEnv};
|
||||
|
||||
Setting<std::string> upgradeNixStorePathUrl{
|
||||
this,
|
||||
|
|
@ -1214,8 +1340,7 @@ public:
|
|||
R"(
|
||||
Used by `nix upgrade-nix`, the URL of the file that contains the
|
||||
store paths of the latest Nix release.
|
||||
)"
|
||||
};
|
||||
)"};
|
||||
|
||||
Setting<uint64_t> warnLargePathThreshold{
|
||||
this,
|
||||
|
|
@ -1226,11 +1351,9 @@ public:
|
|||
(as determined by its NAR serialisation).
|
||||
Default is 0, which disables the warning.
|
||||
Set it to 1 to warn on all paths.
|
||||
)"
|
||||
};
|
||||
)"};
|
||||
};
|
||||
|
||||
|
||||
// FIXME: don't use a global variable.
|
||||
extern Settings settings;
|
||||
|
||||
|
|
@ -1268,4 +1391,4 @@ void initLibStore(bool loadConfig = true);
|
|||
*/
|
||||
void assertLibStoreInitialized();
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -25,4 +25,4 @@ struct HttpBinaryCacheStoreConfig : std::enable_shared_from_this<HttpBinaryCache
|
|||
ref<Store> openStore() const override;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -72,4 +72,4 @@ protected:
|
|||
void makeSymlink(const Path & link, const Path & target);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -14,10 +14,7 @@ struct LegacySSHStoreConfig : std::enable_shared_from_this<LegacySSHStoreConfig>
|
|||
{
|
||||
using CommonSSHStoreConfig::CommonSSHStoreConfig;
|
||||
|
||||
LegacySSHStoreConfig(
|
||||
std::string_view scheme,
|
||||
std::string_view authority,
|
||||
const Params & params);
|
||||
LegacySSHStoreConfig(std::string_view scheme, std::string_view authority, const Params & params);
|
||||
|
||||
#ifndef _WIN32
|
||||
// Hack for getting remote build log output.
|
||||
|
|
@ -28,11 +25,10 @@ struct LegacySSHStoreConfig : std::enable_shared_from_this<LegacySSHStoreConfig>
|
|||
Descriptor logFD = INVALID_DESCRIPTOR;
|
||||
#endif
|
||||
|
||||
const Setting<Strings> remoteProgram{this, {"nix-store"}, "remote-program",
|
||||
"Path to the `nix-store` executable on the remote machine."};
|
||||
const Setting<Strings> remoteProgram{
|
||||
this, {"nix-store"}, "remote-program", "Path to the `nix-store` executable on the remote machine."};
|
||||
|
||||
const Setting<int> maxConnections{this, 1, "max-connections",
|
||||
"Maximum number of concurrent SSH connections."};
|
||||
const Setting<int> maxConnections{this, 1, "max-connections", "Maximum number of concurrent SSH connections."};
|
||||
|
||||
/**
|
||||
* Hack for hydra
|
||||
|
|
@ -44,9 +40,15 @@ struct LegacySSHStoreConfig : std::enable_shared_from_this<LegacySSHStoreConfig>
|
|||
*/
|
||||
std::optional<size_t> connPipeSize;
|
||||
|
||||
static const std::string name() { return "SSH Store"; }
|
||||
static const std::string name()
|
||||
{
|
||||
return "SSH Store";
|
||||
}
|
||||
|
||||
static StringSet uriSchemes() { return {"ssh"}; }
|
||||
static StringSet uriSchemes()
|
||||
{
|
||||
return {"ssh"};
|
||||
}
|
||||
|
||||
static std::string doc();
|
||||
|
||||
|
|
@ -71,14 +73,12 @@ struct LegacySSHStore : public virtual Store
|
|||
|
||||
std::string getUri() override;
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
void queryPathInfoUncached(
|
||||
const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
std::map<StorePath, UnkeyedValidPathInfo> queryPathInfosUncached(
|
||||
const StorePathSet & paths);
|
||||
std::map<StorePath, UnkeyedValidPathInfo> queryPathInfosUncached(const StorePathSet & paths);
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override;
|
||||
|
||||
|
|
@ -93,7 +93,9 @@ struct LegacySSHStore : public virtual Store
|
|||
void narFromPath(const StorePath & path, std::function<void(Source &)> fun);
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
|
||||
{ unsupported("queryPathFromHashPart"); }
|
||||
{
|
||||
unsupported("queryPathFromHashPart");
|
||||
}
|
||||
|
||||
StorePath addToStore(
|
||||
std::string_view name,
|
||||
|
|
@ -103,7 +105,9 @@ struct LegacySSHStore : public virtual Store
|
|||
const StorePathSet & references,
|
||||
PathFilter & filter,
|
||||
RepairFlag repair) override
|
||||
{ unsupported("addToStore"); }
|
||||
{
|
||||
unsupported("addToStore");
|
||||
}
|
||||
|
||||
virtual StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
|
|
@ -113,12 +117,13 @@ struct LegacySSHStore : public virtual Store
|
|||
HashAlgorithm hashAlgo = HashAlgorithm::SHA256,
|
||||
const StorePathSet & references = StorePathSet(),
|
||||
RepairFlag repair = NoRepair) override
|
||||
{ unsupported("addToStore"); }
|
||||
{
|
||||
unsupported("addToStore");
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode) override;
|
||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode) override;
|
||||
|
||||
/**
|
||||
* Note, the returned function must only be called once, or we'll
|
||||
|
|
@ -127,16 +132,20 @@ public:
|
|||
* @todo Use C++23 `std::move_only_function`.
|
||||
*/
|
||||
std::function<BuildResult()> buildDerivationAsync(
|
||||
const StorePath & drvPath, const BasicDerivation & drv,
|
||||
const ServeProto::BuildOptions & options);
|
||||
const StorePath & drvPath, const BasicDerivation & drv, const ServeProto::BuildOptions & options);
|
||||
|
||||
void buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
|
||||
void buildPaths(
|
||||
const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
|
||||
|
||||
void ensurePath(const StorePath & path) override
|
||||
{ unsupported("ensurePath"); }
|
||||
{
|
||||
unsupported("ensurePath");
|
||||
}
|
||||
|
||||
virtual ref<SourceAccessor> getFSAccessor(bool requireValidPath) override
|
||||
{ unsupported("getFSAccessor"); }
|
||||
{
|
||||
unsupported("getFSAccessor");
|
||||
}
|
||||
|
||||
/**
|
||||
* The default instance would schedule the work on the client side, but
|
||||
|
|
@ -147,14 +156,18 @@ public:
|
|||
* without it being a breaking change.
|
||||
*/
|
||||
void repairPath(const StorePath & path) override
|
||||
{ unsupported("repairPath"); }
|
||||
{
|
||||
unsupported("repairPath");
|
||||
}
|
||||
|
||||
void computeFSClosure(const StorePathSet & paths,
|
||||
StorePathSet & out, bool flipDirection = false,
|
||||
bool includeOutputs = false, bool includeDerivers = false) override;
|
||||
void computeFSClosure(
|
||||
const StorePathSet & paths,
|
||||
StorePathSet & out,
|
||||
bool flipDirection = false,
|
||||
bool includeOutputs = false,
|
||||
bool includeDerivers = false) override;
|
||||
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
|
||||
/**
|
||||
* Custom variation that atomically creates temp locks on the remote
|
||||
|
|
@ -164,9 +177,7 @@ public:
|
|||
* garbage-collects paths that are already there. Optionally, ask
|
||||
* the remote host to substitute missing paths.
|
||||
*/
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths,
|
||||
bool lock,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute);
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths, bool lock, SubstituteFlag maybeSubstitute = NoSubstitute);
|
||||
|
||||
/**
|
||||
* Just exists because this is exactly what Hydra was doing, and we
|
||||
|
|
@ -178,7 +189,8 @@ public:
|
|||
|
||||
unsigned int getProtocol() override;
|
||||
|
||||
struct ConnectionStats {
|
||||
struct ConnectionStats
|
||||
{
|
||||
size_t bytesReceived, bytesSent;
|
||||
};
|
||||
|
||||
|
|
@ -192,10 +204,12 @@ public:
|
|||
*/
|
||||
std::optional<TrustedFlag> isTrustedClient() override;
|
||||
|
||||
void queryRealisationUncached(const DrvOutput &,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override
|
||||
void
|
||||
queryRealisationUncached(const DrvOutput &, Callback<std::shared_ptr<const Realisation>> callback) noexcept override
|
||||
// TODO: Implement
|
||||
{ unsupported("queryRealisation"); }
|
||||
{
|
||||
unsupported("queryRealisation");
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -30,23 +30,24 @@ struct StoreDirConfig;
|
|||
template<class Inner, typename T>
|
||||
struct LengthPrefixedProtoHelper;
|
||||
|
||||
#define LENGTH_PREFIXED_PROTO_HELPER(Inner, T) \
|
||||
struct LengthPrefixedProtoHelper< Inner, T > \
|
||||
{ \
|
||||
static T read(const StoreDirConfig & store, typename Inner::ReadConn conn); \
|
||||
#define LENGTH_PREFIXED_PROTO_HELPER(Inner, T) \
|
||||
struct LengthPrefixedProtoHelper<Inner, T> \
|
||||
{ \
|
||||
static T read(const StoreDirConfig & store, typename Inner::ReadConn conn); \
|
||||
static void write(const StoreDirConfig & store, typename Inner::WriteConn conn, const T & str); \
|
||||
private: \
|
||||
/*! \
|
||||
* Read this as simply `using S = Inner::Serialise;`. \
|
||||
* \
|
||||
* It would be nice to use that directly, but C++ doesn't seem to allow \
|
||||
* it. The `typename` keyword needed to refer to `Inner` seems to greedy \
|
||||
* (low precedence), and then C++ complains that `Serialise` is not a \
|
||||
* type parameter but a real type. \
|
||||
* \
|
||||
* Making this `S` alias seems to be the only way to avoid these issues. \
|
||||
*/ \
|
||||
template<typename U> using S = typename Inner::template Serialise<U>; \
|
||||
private: \
|
||||
/*! \
|
||||
* Read this as simply `using S = Inner::Serialise;`. \
|
||||
* \
|
||||
* It would be nice to use that directly, but C++ doesn't seem to allow \
|
||||
* it. The `typename` keyword needed to refer to `Inner` seems to greedy \
|
||||
* (low precedence), and then C++ complains that `Serialise` is not a \
|
||||
* type parameter but a real type. \
|
||||
* \
|
||||
* Making this `S` alias seems to be the only way to avoid these issues. \
|
||||
*/ \
|
||||
template<typename U> \
|
||||
using S = typename Inner::template Serialise<U>; \
|
||||
}
|
||||
|
||||
template<class Inner, typename T>
|
||||
|
|
@ -66,8 +67,7 @@ LENGTH_PREFIXED_PROTO_HELPER(Inner, LENGTH_PREFIXED_PROTO_HELPER_X);
|
|||
|
||||
template<class Inner, typename T>
|
||||
std::vector<T>
|
||||
LengthPrefixedProtoHelper<Inner, std::vector<T>>::read(
|
||||
const StoreDirConfig & store, typename Inner::ReadConn conn)
|
||||
LengthPrefixedProtoHelper<Inner, std::vector<T>>::read(const StoreDirConfig & store, typename Inner::ReadConn conn)
|
||||
{
|
||||
std::vector<T> resSet;
|
||||
auto size = readNum<size_t>(conn.from);
|
||||
|
|
@ -78,8 +78,7 @@ LengthPrefixedProtoHelper<Inner, std::vector<T>>::read(
|
|||
}
|
||||
|
||||
template<class Inner, typename T>
|
||||
void
|
||||
LengthPrefixedProtoHelper<Inner, std::vector<T>>::write(
|
||||
void LengthPrefixedProtoHelper<Inner, std::vector<T>>::write(
|
||||
const StoreDirConfig & store, typename Inner::WriteConn conn, const std::vector<T> & resSet)
|
||||
{
|
||||
conn.to << resSet.size();
|
||||
|
|
@ -112,8 +111,7 @@ void LengthPrefixedProtoHelper<Inner, std::set<T, Compare>>::write(
|
|||
|
||||
template<class Inner, typename K, typename V>
|
||||
std::map<K, V>
|
||||
LengthPrefixedProtoHelper<Inner, std::map<K, V>>::read(
|
||||
const StoreDirConfig & store, typename Inner::ReadConn conn)
|
||||
LengthPrefixedProtoHelper<Inner, std::map<K, V>>::read(const StoreDirConfig & store, typename Inner::ReadConn conn)
|
||||
{
|
||||
std::map<K, V> resMap;
|
||||
auto size = readNum<size_t>(conn.from);
|
||||
|
|
@ -126,8 +124,7 @@ LengthPrefixedProtoHelper<Inner, std::map<K, V>>::read(
|
|||
}
|
||||
|
||||
template<class Inner, typename K, typename V>
|
||||
void
|
||||
LengthPrefixedProtoHelper<Inner, std::map<K, V>>::write(
|
||||
void LengthPrefixedProtoHelper<Inner, std::map<K, V>>::write(
|
||||
const StoreDirConfig & store, typename Inner::WriteConn conn, const std::map<K, V> & resMap)
|
||||
{
|
||||
conn.to << resMap.size();
|
||||
|
|
@ -139,22 +136,18 @@ LengthPrefixedProtoHelper<Inner, std::map<K, V>>::write(
|
|||
|
||||
template<class Inner, typename... Ts>
|
||||
std::tuple<Ts...>
|
||||
LengthPrefixedProtoHelper<Inner, std::tuple<Ts...>>::read(
|
||||
const StoreDirConfig & store, typename Inner::ReadConn conn)
|
||||
LengthPrefixedProtoHelper<Inner, std::tuple<Ts...>>::read(const StoreDirConfig & store, typename Inner::ReadConn conn)
|
||||
{
|
||||
return std::tuple<Ts...> {
|
||||
return std::tuple<Ts...>{
|
||||
S<Ts>::read(store, conn)...,
|
||||
};
|
||||
}
|
||||
|
||||
template<class Inner, typename... Ts>
|
||||
void
|
||||
LengthPrefixedProtoHelper<Inner, std::tuple<Ts...>>::write(
|
||||
void LengthPrefixedProtoHelper<Inner, std::tuple<Ts...>>::write(
|
||||
const StoreDirConfig & store, typename Inner::WriteConn conn, const std::tuple<Ts...> & res)
|
||||
{
|
||||
std::apply([&]<typename... Us>(const Us &... args) {
|
||||
(S<Us>::write(store, conn, args), ...);
|
||||
}, res);
|
||||
std::apply([&]<typename... Us>(const Us &... args) { (S<Us>::write(store, conn, args), ...); }, res);
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -28,4 +28,4 @@ struct LocalBinaryCacheStoreConfig : std::enable_shared_from_this<LocalBinaryCac
|
|||
ref<Store> openStore() const override;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -20,29 +20,25 @@ struct LocalFSStoreConfig : virtual StoreConfig
|
|||
*/
|
||||
LocalFSStoreConfig(PathView path, const Params & params);
|
||||
|
||||
OptionalPathSetting rootDir{this, std::nullopt,
|
||||
"root",
|
||||
"Directory prefixed to all other paths."};
|
||||
OptionalPathSetting rootDir{this, std::nullopt, "root", "Directory prefixed to all other paths."};
|
||||
|
||||
PathSetting stateDir{this,
|
||||
PathSetting stateDir{
|
||||
this,
|
||||
rootDir.get() ? *rootDir.get() + "/nix/var/nix" : settings.nixStateDir,
|
||||
"state",
|
||||
"Directory where Nix stores state."};
|
||||
|
||||
PathSetting logDir{this,
|
||||
PathSetting logDir{
|
||||
this,
|
||||
rootDir.get() ? *rootDir.get() + "/nix/var/log/nix" : settings.nixLogDir,
|
||||
"log",
|
||||
"directory where Nix stores log files."};
|
||||
|
||||
PathSetting realStoreDir{this,
|
||||
rootDir.get() ? *rootDir.get() + "/nix/store" : storeDir, "real",
|
||||
"Physical path of the Nix store."};
|
||||
PathSetting realStoreDir{
|
||||
this, rootDir.get() ? *rootDir.get() + "/nix/store" : storeDir, "real", "Physical path of the Nix store."};
|
||||
};
|
||||
|
||||
struct LocalFSStore :
|
||||
virtual Store,
|
||||
virtual GcStore,
|
||||
virtual LogStore
|
||||
struct LocalFSStore : virtual Store, virtual GcStore, virtual LogStore
|
||||
{
|
||||
using Config = LocalFSStoreConfig;
|
||||
|
||||
|
|
@ -73,7 +69,10 @@ struct LocalFSStore :
|
|||
*/
|
||||
virtual Path addPermRoot(const StorePath & storePath, const Path & gcRoot) = 0;
|
||||
|
||||
virtual Path getRealStoreDir() { return config.realStoreDir; }
|
||||
virtual Path getRealStoreDir()
|
||||
{
|
||||
return config.realStoreDir;
|
||||
}
|
||||
|
||||
Path toRealPath(const Path & storePath) override
|
||||
{
|
||||
|
|
@ -82,7 +81,6 @@ struct LocalFSStore :
|
|||
}
|
||||
|
||||
std::optional<std::string> getBuildLogExact(const StorePath & path) override;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -9,7 +9,8 @@ struct LocalOverlayStoreConfig : virtual LocalStoreConfig
|
|||
{
|
||||
LocalOverlayStoreConfig(const StringMap & params)
|
||||
: LocalOverlayStoreConfig("local-overlay", "", params)
|
||||
{ }
|
||||
{
|
||||
}
|
||||
|
||||
LocalOverlayStoreConfig(std::string_view scheme, PathView path, const Params & params)
|
||||
: StoreConfig(params)
|
||||
|
|
@ -18,7 +19,10 @@ struct LocalOverlayStoreConfig : virtual LocalStoreConfig
|
|||
{
|
||||
}
|
||||
|
||||
const Setting<std::string> lowerStoreUri{(StoreConfig*) this, "", "lower-store",
|
||||
const Setting<std::string> lowerStoreUri{
|
||||
(StoreConfig *) this,
|
||||
"",
|
||||
"lower-store",
|
||||
R"(
|
||||
[Store URL](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
|
||||
for the lower store. The default is `auto` (i.e. use the Nix daemon or `/nix/store` directly).
|
||||
|
|
@ -27,12 +31,18 @@ struct LocalOverlayStoreConfig : virtual LocalStoreConfig
|
|||
Must be used as OverlayFS lower layer for this store's store dir.
|
||||
)"};
|
||||
|
||||
const PathSetting upperLayer{(StoreConfig*) this, "", "upper-layer",
|
||||
const PathSetting upperLayer{
|
||||
(StoreConfig *) this,
|
||||
"",
|
||||
"upper-layer",
|
||||
R"(
|
||||
Directory containing the OverlayFS upper layer for this store's store dir.
|
||||
)"};
|
||||
|
||||
Setting<bool> checkMount{(StoreConfig*) this, true, "check-mount",
|
||||
Setting<bool> checkMount{
|
||||
(StoreConfig *) this,
|
||||
true,
|
||||
"check-mount",
|
||||
R"(
|
||||
Check that the overlay filesystem is correctly mounted.
|
||||
|
||||
|
|
@ -43,7 +53,10 @@ struct LocalOverlayStoreConfig : virtual LocalStoreConfig
|
|||
default, but can be disabled if needed.
|
||||
)"};
|
||||
|
||||
const PathSetting remountHook{(StoreConfig*) this, "", "remount-hook",
|
||||
const PathSetting remountHook{
|
||||
(StoreConfig *) this,
|
||||
"",
|
||||
"remount-hook",
|
||||
R"(
|
||||
Script or other executable to run when overlay filesystem needs remounting.
|
||||
|
||||
|
|
@ -56,7 +69,10 @@ struct LocalOverlayStoreConfig : virtual LocalStoreConfig
|
|||
The store directory is passed as an argument to the invoked executable.
|
||||
)"};
|
||||
|
||||
static const std::string name() { return "Experimental Local Overlay Store"; }
|
||||
static const std::string name()
|
||||
{
|
||||
return "Experimental Local Overlay Store";
|
||||
}
|
||||
|
||||
static std::optional<ExperimentalFeature> experimentalFeature()
|
||||
{
|
||||
|
|
@ -65,7 +81,7 @@ struct LocalOverlayStoreConfig : virtual LocalStoreConfig
|
|||
|
||||
static StringSet uriSchemes()
|
||||
{
|
||||
return { "local-overlay" };
|
||||
return {"local-overlay"};
|
||||
}
|
||||
|
||||
static std::string doc();
|
||||
|
|
@ -124,8 +140,8 @@ private:
|
|||
/**
|
||||
* Check lower store if upper DB does not have.
|
||||
*/
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
void queryPathInfoUncached(
|
||||
const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
/**
|
||||
* Check lower store if upper DB does not have.
|
||||
|
|
@ -159,8 +175,8 @@ private:
|
|||
/**
|
||||
* Check lower store if upper DB does not have.
|
||||
*/
|
||||
void queryRealisationUncached(const DrvOutput&,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
void queryRealisationUncached(
|
||||
const DrvOutput &, Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
|
||||
/**
|
||||
* Call `remountIfNecessary` after collecting garbage normally.
|
||||
|
|
@ -217,4 +233,4 @@ private:
|
|||
std::atomic_bool _remountRequired = false;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -13,10 +13,8 @@
|
|||
#include <string>
|
||||
#include <unordered_set>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/**
|
||||
* Nix store and database schema version.
|
||||
*
|
||||
|
|
@ -27,7 +25,6 @@ namespace nix {
|
|||
*/
|
||||
const int nixSchemaVersion = 10;
|
||||
|
||||
|
||||
struct OptimiseStats
|
||||
{
|
||||
unsigned long filesLinked = 0;
|
||||
|
|
@ -41,7 +38,10 @@ private:
|
|||
/**
|
||||
Input for computing the build directory. See `getBuildDir()`.
|
||||
*/
|
||||
Setting<std::optional<Path>> buildDir{this, std::nullopt, "build-dir",
|
||||
Setting<std::optional<Path>> buildDir{
|
||||
this,
|
||||
std::nullopt,
|
||||
"build-dir",
|
||||
R"(
|
||||
The directory on the host, in which derivations' temporary build directories are created.
|
||||
|
||||
|
|
@ -66,21 +66,22 @@ public:
|
|||
Path getBuildDir() const;
|
||||
};
|
||||
|
||||
struct LocalStoreConfig : std::enable_shared_from_this<LocalStoreConfig>, virtual LocalFSStoreConfig, virtual LocalBuildStoreConfig
|
||||
struct LocalStoreConfig : std::enable_shared_from_this<LocalStoreConfig>,
|
||||
virtual LocalFSStoreConfig,
|
||||
virtual LocalBuildStoreConfig
|
||||
{
|
||||
using LocalFSStoreConfig::LocalFSStoreConfig;
|
||||
|
||||
LocalStoreConfig(
|
||||
std::string_view scheme,
|
||||
std::string_view authority,
|
||||
const Params & params);
|
||||
LocalStoreConfig(std::string_view scheme, std::string_view authority, const Params & params);
|
||||
|
||||
Setting<bool> requireSigs{this,
|
||||
Setting<bool> requireSigs{
|
||||
this,
|
||||
settings.requireSigs,
|
||||
"require-sigs",
|
||||
"Whether store paths copied into this store should have a trusted signature."};
|
||||
|
||||
Setting<bool> readOnly{this,
|
||||
Setting<bool> readOnly{
|
||||
this,
|
||||
false,
|
||||
"read-only",
|
||||
R"(
|
||||
|
|
@ -97,19 +98,22 @@ struct LocalStoreConfig : std::enable_shared_from_this<LocalStoreConfig>, virtua
|
|||
> While the filesystem the database resides on might appear to be read-only, consider whether another user or system might have write access to it.
|
||||
)"};
|
||||
|
||||
static const std::string name() { return "Local Store"; }
|
||||
static const std::string name()
|
||||
{
|
||||
return "Local Store";
|
||||
}
|
||||
|
||||
static StringSet uriSchemes()
|
||||
{ return {"local"}; }
|
||||
{
|
||||
return {"local"};
|
||||
}
|
||||
|
||||
static std::string doc();
|
||||
|
||||
ref<Store> openStore() const override;
|
||||
};
|
||||
|
||||
class LocalStore :
|
||||
public virtual IndirectRootStore,
|
||||
public virtual GcStore
|
||||
class LocalStore : public virtual IndirectRootStore, public virtual GcStore
|
||||
{
|
||||
public:
|
||||
|
||||
|
|
@ -196,29 +200,28 @@ public:
|
|||
|
||||
bool isValidPathUncached(const StorePath & path) override;
|
||||
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
|
||||
StorePathSet queryAllValidPaths() override;
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
void queryPathInfoUncached(
|
||||
const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
void queryReferrers(const StorePath & path, StorePathSet & referrers) override;
|
||||
|
||||
StorePathSet queryValidDerivers(const StorePath & path) override;
|
||||
|
||||
std::map<std::string, std::optional<StorePath>> queryStaticPartialDerivationOutputMap(const StorePath & path) override;
|
||||
std::map<std::string, std::optional<StorePath>>
|
||||
queryStaticPartialDerivationOutputMap(const StorePath & path) override;
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
StorePathSet querySubstitutablePaths(const StorePathSet & paths) override;
|
||||
|
||||
bool pathInfoIsUntrusted(const ValidPathInfo &) override;
|
||||
bool realisationIsUntrusted(const Realisation & ) override;
|
||||
bool realisationIsUntrusted(const Realisation &) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
|
|
@ -312,7 +315,8 @@ protected:
|
|||
/**
|
||||
* Result of `verifyAllValidPaths`
|
||||
*/
|
||||
struct VerificationResult {
|
||||
struct VerificationResult
|
||||
{
|
||||
/**
|
||||
* Whether any errors were encountered
|
||||
*/
|
||||
|
|
@ -365,22 +369,24 @@ public:
|
|||
void registerDrvOutput(const Realisation & info) override;
|
||||
void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override;
|
||||
void cacheDrvOutputMapping(
|
||||
State & state,
|
||||
const uint64_t deriver,
|
||||
const std::string & outputName,
|
||||
const StorePath & output);
|
||||
State & state, const uint64_t deriver, const std::string & outputName, const StorePath & output);
|
||||
|
||||
std::optional<const Realisation> queryRealisation_(State & state, const DrvOutput & id);
|
||||
std::optional<std::pair<int64_t, Realisation>> queryRealisationCore_(State & state, const DrvOutput & id);
|
||||
void queryRealisationUncached(const DrvOutput&,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
void queryRealisationUncached(
|
||||
const DrvOutput &, Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
|
||||
std::optional<std::string> getVersion() override;
|
||||
|
||||
protected:
|
||||
|
||||
void verifyPath(const StorePath & path, std::function<bool(const StorePath &)> existsInStoreDir,
|
||||
StorePathSet & done, StorePathSet & validPaths, RepairFlag repair, bool & errors);
|
||||
void verifyPath(
|
||||
const StorePath & path,
|
||||
std::function<bool(const StorePath &)> existsInStoreDir,
|
||||
StorePathSet & done,
|
||||
StorePathSet & validPaths,
|
||||
RepairFlag repair,
|
||||
bool & errors);
|
||||
|
||||
private:
|
||||
|
||||
|
|
@ -426,7 +432,8 @@ private:
|
|||
|
||||
InodeHash loadInodeHash();
|
||||
Strings readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash);
|
||||
void optimisePath_(Activity * act, OptimiseStats & stats, const Path & path, InodeHash & inodeHash, RepairFlag repair);
|
||||
void
|
||||
optimisePath_(Activity * act, OptimiseStats & stats, const Path & path, InodeHash & inodeHash, RepairFlag repair);
|
||||
|
||||
// Internal versions that are not wrapped in retry_sqlite.
|
||||
bool isValidPath_(State & state, const StorePath & path);
|
||||
|
|
@ -438,4 +445,4 @@ private:
|
|||
friend struct DerivationGoal;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
#include "nix/store/store-api.hh"
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct LogStore : public virtual Store
|
||||
|
|
@ -23,4 +22,4 @@ struct LogStore : public virtual Store
|
|||
static LogStore & require(Store & store);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -12,7 +12,8 @@ struct Machine;
|
|||
|
||||
typedef std::vector<Machine> Machines;
|
||||
|
||||
struct Machine {
|
||||
struct Machine
|
||||
{
|
||||
|
||||
const StoreReference storeUri;
|
||||
const StringSet systemTypes;
|
||||
|
|
@ -85,4 +86,4 @@ struct Machine {
|
|||
*/
|
||||
Machines getMachines();
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -7,18 +7,12 @@ namespace nix {
|
|||
|
||||
/** Rewrite a closure of store paths to be completely content addressed.
|
||||
*/
|
||||
std::map<StorePath, StorePath> makeContentAddressed(
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const StorePathSet & rootPaths);
|
||||
std::map<StorePath, StorePath> makeContentAddressed(Store & srcStore, Store & dstStore, const StorePathSet & rootPaths);
|
||||
|
||||
/** Rewrite a closure of a store path to be completely content addressed.
|
||||
*
|
||||
* This is a convenience function for the case where you only have one root path.
|
||||
*/
|
||||
StorePath makeContentAddressed(
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const StorePath & rootPath);
|
||||
StorePath makeContentAddressed(Store & srcStore, Store & dstStore, const StorePath & rootPath);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -28,9 +28,8 @@ private:
|
|||
|
||||
typedef std::list<DrvName> DrvNames;
|
||||
|
||||
std::string_view nextComponent(std::string_view::const_iterator & p,
|
||||
const std::string_view::const_iterator end);
|
||||
std::string_view nextComponent(std::string_view::const_iterator & p, const std::string_view::const_iterator end);
|
||||
std::strong_ordering compareVersions(const std::string_view v1, const std::string_view v2);
|
||||
DrvNames drvNamesFromArgs(const Strings & opArgs);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -27,9 +27,7 @@ ref<SourceAccessor> makeNarAccessor(Source & source);
|
|||
*/
|
||||
using GetNarBytes = std::function<std::string(uint64_t, uint64_t)>;
|
||||
|
||||
ref<SourceAccessor> makeLazyNarAccessor(
|
||||
const std::string & listing,
|
||||
GetNarBytes getNarBytes);
|
||||
ref<SourceAccessor> makeLazyNarAccessor(const std::string & listing, GetNarBytes getNarBytes);
|
||||
|
||||
/**
|
||||
* Write a JSON representation of the contents of a NAR (except file
|
||||
|
|
@ -37,4 +35,4 @@ ref<SourceAccessor> makeLazyNarAccessor(
|
|||
*/
|
||||
nlohmann::json listNar(ref<SourceAccessor> accessor, const CanonPath & path, bool recurse);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -12,10 +12,9 @@ class NarInfoDiskCache
|
|||
public:
|
||||
typedef enum { oValid, oInvalid, oUnknown } Outcome;
|
||||
|
||||
virtual ~NarInfoDiskCache() { }
|
||||
virtual ~NarInfoDiskCache() {}
|
||||
|
||||
virtual int createCache(const std::string & uri, const Path & storeDir,
|
||||
bool wantMassQuery, int priority) = 0;
|
||||
virtual int createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) = 0;
|
||||
|
||||
struct CacheInfo
|
||||
{
|
||||
|
|
@ -26,21 +25,16 @@ public:
|
|||
|
||||
virtual std::optional<CacheInfo> upToDateCacheExists(const std::string & uri) = 0;
|
||||
|
||||
virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
|
||||
const std::string & uri, const std::string & hashPart) = 0;
|
||||
virtual std::pair<Outcome, std::shared_ptr<NarInfo>>
|
||||
lookupNarInfo(const std::string & uri, const std::string & hashPart) = 0;
|
||||
|
||||
virtual void upsertNarInfo(
|
||||
const std::string & uri, const std::string & hashPart,
|
||||
std::shared_ptr<const ValidPathInfo> info) = 0;
|
||||
virtual void
|
||||
upsertNarInfo(const std::string & uri, const std::string & hashPart, std::shared_ptr<const ValidPathInfo> info) = 0;
|
||||
|
||||
virtual void upsertRealisation(
|
||||
const std::string & uri,
|
||||
const Realisation & realisation) = 0;
|
||||
virtual void upsertAbsentRealisation(
|
||||
const std::string & uri,
|
||||
const DrvOutput & id) = 0;
|
||||
virtual std::pair<Outcome, std::shared_ptr<Realisation>> lookupRealisation(
|
||||
const std::string & uri, const DrvOutput & id) = 0;
|
||||
virtual void upsertRealisation(const std::string & uri, const Realisation & realisation) = 0;
|
||||
virtual void upsertAbsentRealisation(const std::string & uri, const DrvOutput & id) = 0;
|
||||
virtual std::pair<Outcome, std::shared_ptr<Realisation>>
|
||||
lookupRealisation(const std::string & uri, const DrvOutput & id) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -51,4 +45,4 @@ ref<NarInfoDiskCache> getNarInfoDiskCache();
|
|||
|
||||
ref<NarInfoDiskCache> getTestNarInfoDiskCache(Path dbPath);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -17,27 +17,32 @@ struct NarInfo : ValidPathInfo
|
|||
uint64_t fileSize = 0;
|
||||
|
||||
NarInfo() = delete;
|
||||
|
||||
NarInfo(const Store & store, std::string name, ContentAddressWithReferences ca, Hash narHash)
|
||||
: ValidPathInfo(store, std::move(name), std::move(ca), narHash)
|
||||
{ }
|
||||
NarInfo(StorePath path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { }
|
||||
NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { }
|
||||
{
|
||||
}
|
||||
|
||||
NarInfo(StorePath path, Hash narHash)
|
||||
: ValidPathInfo(std::move(path), narHash)
|
||||
{
|
||||
}
|
||||
|
||||
NarInfo(const ValidPathInfo & info)
|
||||
: ValidPathInfo(info)
|
||||
{
|
||||
}
|
||||
|
||||
NarInfo(const Store & store, const std::string & s, const std::string & whence);
|
||||
|
||||
bool operator ==(const NarInfo &) const = default;
|
||||
bool operator==(const NarInfo &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::optional::operator <=>`, can't do yet
|
||||
//auto operator <=>(const NarInfo &) const = default;
|
||||
// auto operator <=>(const NarInfo &) const = default;
|
||||
|
||||
std::string to_string(const Store & store) const;
|
||||
|
||||
nlohmann::json toJSON(
|
||||
const Store & store,
|
||||
bool includeImpureInfo,
|
||||
HashFormat hashFormat) const override;
|
||||
static NarInfo fromJSON(
|
||||
const Store & store,
|
||||
const StorePath & path,
|
||||
const nlohmann::json & json);
|
||||
nlohmann::json toJSON(const Store & store, bool includeImpureInfo, HashFormat hashFormat) const override;
|
||||
static NarInfo fromJSON(const Store & store, const StorePath & path, const nlohmann::json & json);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -23,11 +23,13 @@ typedef std::string OutputName;
|
|||
*/
|
||||
typedef std::string_view OutputNameView;
|
||||
|
||||
struct OutputsSpec {
|
||||
struct OutputsSpec
|
||||
{
|
||||
/**
|
||||
* A non-empty set of outputs, specified by name
|
||||
*/
|
||||
struct Names : std::set<OutputName, std::less<>> {
|
||||
struct Names : std::set<OutputName, std::less<>>
|
||||
{
|
||||
private:
|
||||
using BaseType = std::set<OutputName, std::less<>>;
|
||||
|
||||
|
|
@ -38,14 +40,18 @@ struct OutputsSpec {
|
|||
|
||||
Names(const BaseType & s)
|
||||
: BaseType(s)
|
||||
{ assert(!empty()); }
|
||||
{
|
||||
assert(!empty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Needs to be "inherited manually"
|
||||
*/
|
||||
Names(BaseType && s)
|
||||
: BaseType(std::move(s))
|
||||
{ assert(!empty()); }
|
||||
{
|
||||
assert(!empty());
|
||||
}
|
||||
|
||||
/* This set should always be non-empty, so we delete this
|
||||
constructor in order make creating empty ones by mistake harder.
|
||||
|
|
@ -56,15 +62,18 @@ struct OutputsSpec {
|
|||
/**
|
||||
* The set of all outputs, without needing to name them explicitly
|
||||
*/
|
||||
struct All : std::monostate { };
|
||||
struct All : std::monostate
|
||||
{};
|
||||
|
||||
typedef std::variant<All, Names> Raw;
|
||||
|
||||
Raw raw;
|
||||
|
||||
bool operator == (const OutputsSpec &) const = default;
|
||||
bool operator==(const OutputsSpec &) const = default;
|
||||
|
||||
// TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet.
|
||||
bool operator < (const OutputsSpec & other) const {
|
||||
bool operator<(const OutputsSpec & other) const
|
||||
{
|
||||
return raw < other.raw;
|
||||
}
|
||||
|
||||
|
|
@ -97,17 +106,20 @@ struct OutputsSpec {
|
|||
std::string to_string() const;
|
||||
};
|
||||
|
||||
struct ExtendedOutputsSpec {
|
||||
struct Default : std::monostate { };
|
||||
struct ExtendedOutputsSpec
|
||||
{
|
||||
struct Default : std::monostate
|
||||
{};
|
||||
|
||||
using Explicit = OutputsSpec;
|
||||
|
||||
typedef std::variant<Default, Explicit> Raw;
|
||||
|
||||
Raw raw;
|
||||
|
||||
bool operator == (const ExtendedOutputsSpec &) const = default;
|
||||
bool operator==(const ExtendedOutputsSpec &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet.
|
||||
bool operator < (const ExtendedOutputsSpec &) const;
|
||||
bool operator<(const ExtendedOutputsSpec &) const;
|
||||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(ExtendedOutputsSpec);
|
||||
|
||||
|
|
@ -126,7 +138,7 @@ struct ExtendedOutputsSpec {
|
|||
std::string to_string() const;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
||||
JSON_IMPL(OutputsSpec)
|
||||
JSON_IMPL(ExtendedOutputsSpec)
|
||||
|
|
|
|||
|
|
@ -40,4 +40,4 @@ struct StructuredAttrs
|
|||
static std::string writeShell(const nlohmann::json & prepared);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -11,10 +11,8 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
|
||||
class Store;
|
||||
|
||||
|
||||
struct SubstitutablePathInfo
|
||||
{
|
||||
std::optional<StorePath> deriver;
|
||||
|
|
@ -31,7 +29,6 @@ struct SubstitutablePathInfo
|
|||
|
||||
using SubstitutablePathInfos = std::map<StorePath, SubstitutablePathInfo>;
|
||||
|
||||
|
||||
/**
|
||||
* Information about a store object.
|
||||
*
|
||||
|
|
@ -103,35 +100,32 @@ struct UnkeyedValidPathInfo
|
|||
|
||||
UnkeyedValidPathInfo(const UnkeyedValidPathInfo & other) = default;
|
||||
|
||||
UnkeyedValidPathInfo(Hash narHash) : narHash(narHash) { };
|
||||
UnkeyedValidPathInfo(Hash narHash)
|
||||
: narHash(narHash) {};
|
||||
|
||||
bool operator == (const UnkeyedValidPathInfo &) const noexcept;
|
||||
bool operator==(const UnkeyedValidPathInfo &) const noexcept;
|
||||
|
||||
/**
|
||||
* @todo return `std::strong_ordering` once `id` is removed
|
||||
*/
|
||||
std::weak_ordering operator <=> (const UnkeyedValidPathInfo &) const noexcept;
|
||||
std::weak_ordering operator<=>(const UnkeyedValidPathInfo &) const noexcept;
|
||||
|
||||
virtual ~UnkeyedValidPathInfo() { }
|
||||
virtual ~UnkeyedValidPathInfo() {}
|
||||
|
||||
/**
|
||||
* @param includeImpureInfo If true, variable elements such as the
|
||||
* registration time are included.
|
||||
*/
|
||||
virtual nlohmann::json toJSON(
|
||||
const Store & store,
|
||||
bool includeImpureInfo,
|
||||
HashFormat hashFormat) const;
|
||||
static UnkeyedValidPathInfo fromJSON(
|
||||
const Store & store,
|
||||
const nlohmann::json & json);
|
||||
virtual nlohmann::json toJSON(const Store & store, bool includeImpureInfo, HashFormat hashFormat) const;
|
||||
static UnkeyedValidPathInfo fromJSON(const Store & store, const nlohmann::json & json);
|
||||
};
|
||||
|
||||
struct ValidPathInfo : UnkeyedValidPathInfo {
|
||||
struct ValidPathInfo : UnkeyedValidPathInfo
|
||||
{
|
||||
StorePath path;
|
||||
|
||||
bool operator == (const ValidPathInfo &) const = default;
|
||||
auto operator <=> (const ValidPathInfo &) const = default;
|
||||
bool operator==(const ValidPathInfo &) const = default;
|
||||
auto operator<=>(const ValidPathInfo &) const = default;
|
||||
|
||||
/**
|
||||
* Return a fingerprint of the store path to be used in binary
|
||||
|
|
@ -177,11 +171,14 @@ struct ValidPathInfo : UnkeyedValidPathInfo {
|
|||
*/
|
||||
Strings shortRefs() const;
|
||||
|
||||
ValidPathInfo(StorePath && path, UnkeyedValidPathInfo info) : UnkeyedValidPathInfo(info), path(std::move(path)) { };
|
||||
ValidPathInfo(const StorePath & path, UnkeyedValidPathInfo info) : UnkeyedValidPathInfo(info), path(path) { };
|
||||
ValidPathInfo(StorePath && path, UnkeyedValidPathInfo info)
|
||||
: UnkeyedValidPathInfo(info)
|
||||
, path(std::move(path)) {};
|
||||
ValidPathInfo(const StorePath & path, UnkeyedValidPathInfo info)
|
||||
: UnkeyedValidPathInfo(info)
|
||||
, path(path) {};
|
||||
|
||||
ValidPathInfo(const Store & store,
|
||||
std::string_view name, ContentAddressWithReferences && ca, Hash narHash);
|
||||
ValidPathInfo(const Store & store, std::string_view name, ContentAddressWithReferences && ca, Hash narHash);
|
||||
};
|
||||
|
||||
static_assert(std::is_move_assignable_v<ValidPathInfo>);
|
||||
|
|
@ -191,4 +188,4 @@ static_assert(std::is_move_constructible_v<ValidPathInfo>);
|
|||
|
||||
using ValidPathInfos = std::map<StorePath, ValidPathInfo>;
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -23,4 +23,4 @@ public:
|
|||
StorePathSet getResultPaths();
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -5,11 +5,11 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static constexpr std::string_view nameRegexStr =
|
||||
// This uses a negative lookahead: (?!\.\.?(-|$))
|
||||
// - deny ".", "..", or those strings followed by '-'
|
||||
// - when it's not those, start again at the start of the input and apply the next regex, which is [0-9a-zA-Z\+\-\._\?=]+
|
||||
// - when it's not those, start again at the start of the input and apply the next regex, which is
|
||||
// [0-9a-zA-Z\+\-\._\?=]+
|
||||
R"((?!\.\.?(-|$))[0-9a-zA-Z\+\-\._\?=]+)";
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,4 +45,4 @@ class Store;
|
|||
|
||||
StorePathWithOutputs followLinksToStorePathWithOutputs(const Store & store, std::string_view pathWithOutputs);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -12,7 +12,8 @@ struct Hash;
|
|||
/**
|
||||
* Check whether a name is a valid store path name.
|
||||
*
|
||||
* @throws BadStorePathName if the name is invalid. The message is of the format "name %s is not valid, for this specific reason".
|
||||
* @throws BadStorePathName if the name is invalid. The message is of the format "name %s is not valid, for this
|
||||
* specific reason".
|
||||
*/
|
||||
void checkName(std::string_view name);
|
||||
|
||||
|
|
@ -49,8 +50,8 @@ public:
|
|||
return baseName;
|
||||
}
|
||||
|
||||
bool operator == (const StorePath & other) const noexcept = default;
|
||||
auto operator <=> (const StorePath & other) const noexcept = default;
|
||||
bool operator==(const StorePath & other) const noexcept = default;
|
||||
auto operator<=>(const StorePath & other) const noexcept = default;
|
||||
|
||||
/**
|
||||
* Check whether a file name ends with the extension for derivations.
|
||||
|
|
@ -86,15 +87,17 @@ typedef std::vector<StorePath> StorePaths;
|
|||
*/
|
||||
constexpr std::string_view drvExtension = ".drv";
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
||||
namespace std {
|
||||
|
||||
template<> struct hash<nix::StorePath> {
|
||||
template<>
|
||||
struct hash<nix::StorePath>
|
||||
{
|
||||
std::size_t operator()(const nix::StorePath & path) const noexcept
|
||||
{
|
||||
return * (std::size_t *) path.to_string().data();
|
||||
return *(std::size_t *) path.to_string().data();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace std
|
||||
|
|
|
|||
|
|
@ -30,11 +30,8 @@ private:
|
|||
|
||||
public:
|
||||
PathLocks();
|
||||
PathLocks(const PathSet & paths,
|
||||
const std::string & waitMsg = "");
|
||||
bool lockPaths(const PathSet & _paths,
|
||||
const std::string & waitMsg = "",
|
||||
bool wait = true);
|
||||
PathLocks(const PathSet & paths, const std::string & waitMsg = "");
|
||||
bool lockPaths(const PathSet & _paths, const std::string & waitMsg = "", bool wait = true);
|
||||
~PathLocks();
|
||||
void unlock();
|
||||
void setDeletion(bool deletePaths);
|
||||
|
|
@ -54,4 +51,4 @@ struct FdLock
|
|||
}
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ namespace nix {
|
|||
typedef std::pair<dev_t, ino_t> Inode;
|
||||
typedef std::set<Inode> InodesSeen;
|
||||
|
||||
|
||||
/**
|
||||
* "Fix", or canonicalise, the meta-data of the files in a store path
|
||||
* after it has been built. In particular:
|
||||
|
|
@ -40,12 +39,13 @@ void canonicalisePathMetaData(
|
|||
void canonicalisePathMetaData(
|
||||
const Path & path
|
||||
#ifndef _WIN32
|
||||
, std::optional<std::pair<uid_t, uid_t>> uidRange = std::nullopt
|
||||
,
|
||||
std::optional<std::pair<uid_t, uid_t>> uidRange = std::nullopt
|
||||
#endif
|
||||
);
|
||||
);
|
||||
|
||||
void canonicaliseTimestampAndPermissions(const Path & path);
|
||||
|
||||
MakeError(PathInUse, Error);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -13,12 +13,10 @@
|
|||
#include <optional>
|
||||
#include <time.h>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
class StorePath;
|
||||
|
||||
|
||||
/**
|
||||
* A positive number identifying a generation for a given profile.
|
||||
*
|
||||
|
|
@ -66,7 +64,6 @@ struct Generation
|
|||
*/
|
||||
typedef std::list<Generation> Generations;
|
||||
|
||||
|
||||
/**
|
||||
* Find all generations for the given profile.
|
||||
*
|
||||
|
|
@ -119,7 +116,8 @@ void deleteGeneration(const Path & profile, GenerationNumber gen);
|
|||
/**
|
||||
* Delete the given set of generations.
|
||||
*
|
||||
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
|
||||
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to
|
||||
* delete.
|
||||
*
|
||||
* @param gensToDelete The generations to delete, specified by a set of
|
||||
* numbers.
|
||||
|
|
@ -135,7 +133,8 @@ void deleteGenerations(const Path & profile, const std::set<GenerationNumber> &
|
|||
/**
|
||||
* Delete generations older than `max` passed the current generation.
|
||||
*
|
||||
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
|
||||
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to
|
||||
* delete.
|
||||
*
|
||||
* @param max How many generations to keep up to the current one. Must
|
||||
* be at least 1 so we don't delete the current one.
|
||||
|
|
@ -148,7 +147,8 @@ void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bo
|
|||
/**
|
||||
* Delete all generations other than the current one
|
||||
*
|
||||
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
|
||||
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to
|
||||
* delete.
|
||||
*
|
||||
* @param dryRun Log what would be deleted instead of actually doing
|
||||
* so.
|
||||
|
|
@ -159,7 +159,8 @@ void deleteOldGenerations(const Path & profile, bool dryRun);
|
|||
* Delete generations older than `t`, except for the most recent one
|
||||
* older than `t`.
|
||||
*
|
||||
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
|
||||
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to
|
||||
* delete.
|
||||
*
|
||||
* @param dryRun Log what would be deleted instead of actually doing
|
||||
* so.
|
||||
|
|
@ -185,10 +186,7 @@ void switchLink(Path link, Path target);
|
|||
* Roll back a profile to the specified generation, or to the most
|
||||
* recent one older than the current.
|
||||
*/
|
||||
void switchGeneration(
|
||||
const Path & profile,
|
||||
std::optional<GenerationNumber> dstGen,
|
||||
bool dryRun);
|
||||
void switchGeneration(const Path & profile, std::optional<GenerationNumber> dstGen, bool dryRun);
|
||||
|
||||
/**
|
||||
* Ensure exclusive access to a profile. Any command that modifies
|
||||
|
|
@ -237,4 +235,4 @@ Path rootChannelsDir();
|
|||
*/
|
||||
Path getDefaultProfile();
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -21,7 +21,8 @@ struct OutputsSpec;
|
|||
* This is similar to a `DerivedPath::Opaque`, but the derivation is
|
||||
* identified by its "hash modulo" instead of by its store path.
|
||||
*/
|
||||
struct DrvOutput {
|
||||
struct DrvOutput
|
||||
{
|
||||
/**
|
||||
* The hash modulo of the derivation.
|
||||
*
|
||||
|
|
@ -39,14 +40,17 @@ struct DrvOutput {
|
|||
std::string to_string() const;
|
||||
|
||||
std::string strHash() const
|
||||
{ return drvHash.to_string(HashFormat::Base16, true); }
|
||||
{
|
||||
return drvHash.to_string(HashFormat::Base16, true);
|
||||
}
|
||||
|
||||
static DrvOutput parse(const std::string &);
|
||||
|
||||
GENERATE_CMP(DrvOutput, me->drvHash, me->outputName);
|
||||
};
|
||||
|
||||
struct Realisation {
|
||||
struct Realisation
|
||||
{
|
||||
DrvOutput id;
|
||||
StorePath outPath;
|
||||
|
||||
|
|
@ -61,7 +65,7 @@ struct Realisation {
|
|||
std::map<DrvOutput, StorePath> dependentRealisations;
|
||||
|
||||
nlohmann::json toJSON() const;
|
||||
static Realisation fromJSON(const nlohmann::json& json, const std::string& whence);
|
||||
static Realisation fromJSON(const nlohmann::json & json, const std::string & whence);
|
||||
|
||||
std::string fingerprint() const;
|
||||
void sign(const Signer &);
|
||||
|
|
@ -73,7 +77,10 @@ struct Realisation {
|
|||
|
||||
bool isCompatibleWith(const Realisation & other) const;
|
||||
|
||||
StorePath getPath() const { return outPath; }
|
||||
StorePath getPath() const
|
||||
{
|
||||
return outPath;
|
||||
}
|
||||
|
||||
GENERATE_CMP(Realisation, me->id, me->outPath);
|
||||
};
|
||||
|
|
@ -100,22 +107,25 @@ typedef std::map<DrvOutput, Realisation> DrvOutputs;
|
|||
*
|
||||
* Moves the `outputs` input.
|
||||
*/
|
||||
SingleDrvOutputs filterDrvOutputs(const OutputsSpec&, SingleDrvOutputs&&);
|
||||
SingleDrvOutputs filterDrvOutputs(const OutputsSpec &, SingleDrvOutputs &&);
|
||||
|
||||
|
||||
struct OpaquePath {
|
||||
struct OpaquePath
|
||||
{
|
||||
StorePath path;
|
||||
|
||||
StorePath getPath() const { return path; }
|
||||
StorePath getPath() const
|
||||
{
|
||||
return path;
|
||||
}
|
||||
|
||||
GENERATE_CMP(OpaquePath, me->path);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* A store path with all the history of how it went into the store
|
||||
*/
|
||||
struct RealisedPath {
|
||||
struct RealisedPath
|
||||
{
|
||||
/*
|
||||
* A path is either the result of the realisation of a derivation or
|
||||
* an opaque blob that has been directly added to the store
|
||||
|
|
@ -125,17 +135,24 @@ struct RealisedPath {
|
|||
|
||||
using Set = std::set<RealisedPath>;
|
||||
|
||||
RealisedPath(StorePath path) : raw(OpaquePath{path}) {}
|
||||
RealisedPath(Realisation r) : raw(r) {}
|
||||
RealisedPath(StorePath path)
|
||||
: raw(OpaquePath{path})
|
||||
{
|
||||
}
|
||||
|
||||
RealisedPath(Realisation r)
|
||||
: raw(r)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the raw store path associated to this
|
||||
*/
|
||||
StorePath path() const;
|
||||
|
||||
void closure(Store& store, Set& ret) const;
|
||||
static void closure(Store& store, const Set& startPaths, Set& ret);
|
||||
Set closure(Store& store) const;
|
||||
void closure(Store & store, Set & ret) const;
|
||||
static void closure(Store & store, const Set & startPaths, Set & ret);
|
||||
Set closure(Store & store) const;
|
||||
|
||||
GENERATE_CMP(RealisedPath, me->raw);
|
||||
};
|
||||
|
|
@ -145,13 +162,17 @@ class MissingRealisation : public Error
|
|||
public:
|
||||
MissingRealisation(DrvOutput & outputId)
|
||||
: MissingRealisation(outputId.outputName, outputId.strHash())
|
||||
{}
|
||||
{
|
||||
}
|
||||
|
||||
MissingRealisation(std::string_view drv, OutputName outputName)
|
||||
: Error( "cannot operate on output '%s' of the "
|
||||
"unbuilt derivation '%s'",
|
||||
outputName,
|
||||
drv)
|
||||
{}
|
||||
: Error(
|
||||
"cannot operate on output '%s' of the "
|
||||
"unbuilt derivation '%s'",
|
||||
outputName,
|
||||
drv)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -27,9 +27,8 @@ class RemoteFSAccessor : public SourceAccessor
|
|||
|
||||
public:
|
||||
|
||||
RemoteFSAccessor(ref<Store> store,
|
||||
bool requireValidPath = true,
|
||||
const /* FIXME: use std::optional */ Path & cacheDir = "");
|
||||
RemoteFSAccessor(
|
||||
ref<Store> store, bool requireValidPath = true, const /* FIXME: use std::optional */ Path & cacheDir = "");
|
||||
|
||||
std::optional<Stat> maybeLstat(const CanonPath & path) override;
|
||||
|
||||
|
|
@ -40,4 +39,4 @@ public:
|
|||
std::string readLink(const CanonPath & path) override;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -15,8 +15,7 @@ namespace nix {
|
|||
* Contains `Source` and `Sink` for actual communication, along with
|
||||
* other information learned when negotiating the connection.
|
||||
*/
|
||||
struct RemoteStore::Connection : WorkerProto::BasicClientConnection,
|
||||
WorkerProto::ClientHandshakeInfo
|
||||
struct RemoteStore::Connection : WorkerProto::BasicClientConnection, WorkerProto::ClientHandshakeInfo
|
||||
{
|
||||
/**
|
||||
* Time this connection was established.
|
||||
|
|
@ -38,20 +37,29 @@ struct RemoteStore::ConnectionHandle
|
|||
|
||||
ConnectionHandle(Pool<RemoteStore::Connection>::Handle && handle)
|
||||
: handle(std::move(handle))
|
||||
{ }
|
||||
{
|
||||
}
|
||||
|
||||
ConnectionHandle(ConnectionHandle && h) noexcept
|
||||
: handle(std::move(h.handle))
|
||||
{ }
|
||||
{
|
||||
}
|
||||
|
||||
~ConnectionHandle();
|
||||
|
||||
RemoteStore::Connection & operator * () { return *handle; }
|
||||
RemoteStore::Connection * operator -> () { return &*handle; }
|
||||
RemoteStore::Connection & operator*()
|
||||
{
|
||||
return *handle;
|
||||
}
|
||||
|
||||
RemoteStore::Connection * operator->()
|
||||
{
|
||||
return &*handle;
|
||||
}
|
||||
|
||||
void processStderr(Sink * sink = 0, Source * source = 0, bool flush = true, bool block = true);
|
||||
|
||||
void withFramedSink(std::function<void(Sink & sink)> fun);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -8,24 +8,24 @@
|
|||
#include "nix/store/gc-store.hh"
|
||||
#include "nix/store/log-store.hh"
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
class Pipe;
|
||||
class Pid;
|
||||
struct FdSink;
|
||||
struct FdSource;
|
||||
template<typename T> class Pool;
|
||||
template<typename T>
|
||||
class Pool;
|
||||
|
||||
struct RemoteStoreConfig : virtual StoreConfig
|
||||
{
|
||||
using StoreConfig::StoreConfig;
|
||||
|
||||
const Setting<int> maxConnections{this, 1, "max-connections",
|
||||
"Maximum number of concurrent connections to the Nix daemon."};
|
||||
const Setting<int> maxConnections{
|
||||
this, 1, "max-connections", "Maximum number of concurrent connections to the Nix daemon."};
|
||||
|
||||
const Setting<unsigned int> maxConnectionAge{this,
|
||||
const Setting<unsigned int> maxConnectionAge{
|
||||
this,
|
||||
std::numeric_limits<unsigned int>::max(),
|
||||
"max-connection-age",
|
||||
"Maximum age of a connection before it is closed."};
|
||||
|
|
@ -35,10 +35,7 @@ struct RemoteStoreConfig : virtual StoreConfig
|
|||
* \todo RemoteStore is a misnomer - should be something like
|
||||
* DaemonStore.
|
||||
*/
|
||||
struct RemoteStore :
|
||||
public virtual Store,
|
||||
public virtual GcStore,
|
||||
public virtual LogStore
|
||||
struct RemoteStore : public virtual Store, public virtual GcStore, public virtual LogStore
|
||||
{
|
||||
using Config = RemoteStoreConfig;
|
||||
|
||||
|
|
@ -50,13 +47,12 @@ struct RemoteStore :
|
|||
|
||||
bool isValidPathUncached(const StorePath & path) override;
|
||||
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
|
||||
StorePathSet queryAllValidPaths() override;
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
void queryPathInfoUncached(
|
||||
const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
void queryReferrers(const StorePath & path, StorePathSet & referrers) override;
|
||||
|
||||
|
|
@ -64,24 +60,24 @@ struct RemoteStore :
|
|||
|
||||
StorePathSet queryDerivationOutputs(const StorePath & path) override;
|
||||
|
||||
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap(const StorePath & path, Store * evalStore = nullptr) override;
|
||||
std::map<std::string, std::optional<StorePath>>
|
||||
queryPartialDerivationOutputMap(const StorePath & path, Store * evalStore = nullptr) override;
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
StorePathSet querySubstitutablePaths(const StorePathSet & paths) override;
|
||||
|
||||
void querySubstitutablePathInfos(const StorePathCAMap & paths,
|
||||
SubstitutablePathInfos & infos) override;
|
||||
void querySubstitutablePathInfos(const StorePathCAMap & paths, SubstitutablePathInfos & infos) override;
|
||||
|
||||
/**
|
||||
* Add a content-addressable store path. `dump` will be drained.
|
||||
*/
|
||||
ref<const ValidPathInfo> addCAToStore(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
ContentAddressMethod caMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair);
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
ContentAddressMethod caMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair);
|
||||
|
||||
/**
|
||||
* Add a content-addressable store path. `dump` will be drained.
|
||||
|
|
@ -95,34 +91,25 @@ struct RemoteStore :
|
|||
const StorePathSet & references = StorePathSet(),
|
||||
RepairFlag repair = NoRepair) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & nar,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
void addToStore(const ValidPathInfo & info, Source & nar, RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
void addMultipleToStore(
|
||||
Source & source,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs) override;
|
||||
void addMultipleToStore(Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
void addMultipleToStore(
|
||||
PathsSource && pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs) override;
|
||||
void
|
||||
addMultipleToStore(PathsSource && pathsToCopy, Activity & act, RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
void registerDrvOutput(const Realisation & info) override;
|
||||
|
||||
void queryRealisationUncached(const DrvOutput &,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
void queryRealisationUncached(
|
||||
const DrvOutput &, Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
|
||||
void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
|
||||
void
|
||||
buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
|
||||
|
||||
std::vector<KeyedBuildResult> buildPathsWithResults(
|
||||
const std::vector<DerivedPath> & paths,
|
||||
BuildMode buildMode,
|
||||
std::shared_ptr<Store> evalStore) override;
|
||||
const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
|
||||
|
||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode) override;
|
||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode) override;
|
||||
|
||||
void ensurePath(const StorePath & path) override;
|
||||
|
||||
|
|
@ -145,7 +132,9 @@ struct RemoteStore :
|
|||
* without it being a breaking change.
|
||||
*/
|
||||
void repairPath(const StorePath & path) override
|
||||
{ unsupported("repairPath"); }
|
||||
{
|
||||
unsupported("repairPath");
|
||||
}
|
||||
|
||||
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
|
||||
|
||||
|
|
@ -193,9 +182,7 @@ private:
|
|||
|
||||
std::atomic_bool failed{false};
|
||||
|
||||
void copyDrvsFromEvalStore(
|
||||
const std::vector<DerivedPath> & paths,
|
||||
std::shared_ptr<Store> evalStore);
|
||||
void copyDrvsFromEvalStore(const std::vector<DerivedPath> & paths, std::shared_ptr<Store> evalStore);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -57,4 +57,4 @@ struct RestrictionContext
|
|||
*/
|
||||
ref<Store> makeRestrictedStore(ref<LocalStore::Config> config, ref<LocalStore> next, RestrictionContext & context);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -130,6 +130,6 @@ struct S3BinaryCacheStore : virtual BinaryCacheStore
|
|||
virtual const Stats & getS3Stats() = 0;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -3,13 +3,22 @@
|
|||
#include "store-config-private.hh"
|
||||
#if NIX_WITH_S3_SUPPORT
|
||||
|
||||
#include "nix/util/ref.hh"
|
||||
# include "nix/util/ref.hh"
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
# include <optional>
|
||||
# include <string>
|
||||
|
||||
namespace Aws { namespace Client { struct ClientConfiguration; } }
|
||||
namespace Aws { namespace S3 { class S3Client; } }
|
||||
namespace Aws {
|
||||
namespace Client {
|
||||
struct ClientConfiguration;
|
||||
}
|
||||
} // namespace Aws
|
||||
|
||||
namespace Aws {
|
||||
namespace S3 {
|
||||
class S3Client;
|
||||
}
|
||||
} // namespace Aws
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
@ -18,9 +27,14 @@ struct S3Helper
|
|||
ref<Aws::Client::ClientConfiguration> config;
|
||||
ref<Aws::S3::S3Client> client;
|
||||
|
||||
S3Helper(const std::string & profile, const std::string & region, const std::string & scheme, const std::string & endpoint);
|
||||
S3Helper(
|
||||
const std::string & profile,
|
||||
const std::string & region,
|
||||
const std::string & scheme,
|
||||
const std::string & endpoint);
|
||||
|
||||
ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region, const std::string & scheme, const std::string & endpoint);
|
||||
ref<Aws::Client::ClientConfiguration>
|
||||
makeConfig(const std::string & region, const std::string & scheme, const std::string & endpoint);
|
||||
|
||||
struct FileTransferResult
|
||||
{
|
||||
|
|
@ -28,10 +42,9 @@ struct S3Helper
|
|||
unsigned int durationMs;
|
||||
};
|
||||
|
||||
FileTransferResult getObject(
|
||||
const std::string & bucketName, const std::string & key);
|
||||
FileTransferResult getObject(const std::string & bucketName, const std::string & key);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -105,4 +105,4 @@ struct ServeProto::BasicServerConnection
|
|||
static ServeProto::Version handshake(BufferedSink & to, Source & from, ServeProto::Version localVersion);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -15,14 +15,15 @@ namespace nix {
|
|||
|
||||
/* protocol-agnostic templates */
|
||||
|
||||
#define SERVE_USE_LENGTH_PREFIX_SERIALISER(TEMPLATE, T) \
|
||||
TEMPLATE T ServeProto::Serialise< T >::read(const StoreDirConfig & store, ServeProto::ReadConn conn) \
|
||||
{ \
|
||||
return LengthPrefixedProtoHelper<ServeProto, T >::read(store, conn); \
|
||||
} \
|
||||
TEMPLATE void ServeProto::Serialise< T >::write(const StoreDirConfig & store, ServeProto::WriteConn conn, const T & t) \
|
||||
{ \
|
||||
LengthPrefixedProtoHelper<ServeProto, T >::write(store, conn, t); \
|
||||
#define SERVE_USE_LENGTH_PREFIX_SERIALISER(TEMPLATE, T) \
|
||||
TEMPLATE T ServeProto::Serialise<T>::read(const StoreDirConfig & store, ServeProto::ReadConn conn) \
|
||||
{ \
|
||||
return LengthPrefixedProtoHelper<ServeProto, T>::read(store, conn); \
|
||||
} \
|
||||
TEMPLATE void ServeProto::Serialise<T>::write( \
|
||||
const StoreDirConfig & store, ServeProto::WriteConn conn, const T & t) \
|
||||
{ \
|
||||
LengthPrefixedProtoHelper<ServeProto, T>::write(store, conn, t); \
|
||||
}
|
||||
|
||||
SERVE_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::vector<T>)
|
||||
|
|
@ -44,17 +45,15 @@ struct ServeProto::Serialise
|
|||
{
|
||||
static T read(const StoreDirConfig & store, ServeProto::ReadConn conn)
|
||||
{
|
||||
return CommonProto::Serialise<T>::read(store,
|
||||
CommonProto::ReadConn { .from = conn.from });
|
||||
return CommonProto::Serialise<T>::read(store, CommonProto::ReadConn{.from = conn.from});
|
||||
}
|
||||
|
||||
static void write(const StoreDirConfig & store, ServeProto::WriteConn conn, const T & t)
|
||||
{
|
||||
CommonProto::Serialise<T>::write(store,
|
||||
CommonProto::WriteConn { .to = conn.to },
|
||||
t);
|
||||
CommonProto::Serialise<T>::write(store, CommonProto::WriteConn{.to = conn.to}, t);
|
||||
}
|
||||
};
|
||||
|
||||
/* protocol-specific templates */
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ namespace nix {
|
|||
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||
|
||||
|
||||
struct StoreDirConfig;
|
||||
struct Source;
|
||||
|
||||
|
|
@ -20,7 +19,6 @@ struct Source;
|
|||
struct BuildResult;
|
||||
struct UnkeyedValidPathInfo;
|
||||
|
||||
|
||||
/**
|
||||
* The "serve protocol", used by ssh:// stores.
|
||||
*
|
||||
|
|
@ -45,7 +43,8 @@ struct ServeProto
|
|||
* A unidirectional read connection, to be used by the read half of the
|
||||
* canonical serializers below.
|
||||
*/
|
||||
struct ReadConn {
|
||||
struct ReadConn
|
||||
{
|
||||
Source & from;
|
||||
Version version;
|
||||
};
|
||||
|
|
@ -54,7 +53,8 @@ struct ServeProto
|
|||
* A unidirectional write connection, to be used by the write half of the
|
||||
* canonical serializers below.
|
||||
*/
|
||||
struct WriteConn {
|
||||
struct WriteConn
|
||||
{
|
||||
Sink & to;
|
||||
Version version;
|
||||
};
|
||||
|
|
@ -104,8 +104,7 @@ struct ServeProto
|
|||
struct BuildOptions;
|
||||
};
|
||||
|
||||
enum struct ServeProto::Command : uint64_t
|
||||
{
|
||||
enum struct ServeProto::Command : uint64_t {
|
||||
QueryValidPaths = 1,
|
||||
QueryPathInfos = 2,
|
||||
DumpStorePath = 3,
|
||||
|
|
@ -117,8 +116,8 @@ enum struct ServeProto::Command : uint64_t
|
|||
AddToStoreNar = 9,
|
||||
};
|
||||
|
||||
|
||||
struct ServeProto::BuildOptions {
|
||||
struct ServeProto::BuildOptions
|
||||
{
|
||||
/**
|
||||
* Default value in this and every other field is so tests pass when
|
||||
* testing older deserialisers which do not set all the fields.
|
||||
|
|
@ -130,7 +129,7 @@ struct ServeProto::BuildOptions {
|
|||
bool enforceDeterminism = -1;
|
||||
bool keepFailed = -1;
|
||||
|
||||
bool operator == (const ServeProto::BuildOptions &) const = default;
|
||||
bool operator==(const ServeProto::BuildOptions &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -139,7 +138,7 @@ struct ServeProto::BuildOptions {
|
|||
* @todo Switch to using `ServeProto::Serialize` instead probably. But
|
||||
* this was not done at this time so there would be less churn.
|
||||
*/
|
||||
inline Sink & operator << (Sink & sink, ServeProto::Command op)
|
||||
inline Sink & operator<<(Sink & sink, ServeProto::Command op)
|
||||
{
|
||||
return sink << (uint64_t) op;
|
||||
}
|
||||
|
|
@ -149,7 +148,7 @@ inline Sink & operator << (Sink & sink, ServeProto::Command op)
|
|||
*
|
||||
* @todo Perhaps render known opcodes more nicely.
|
||||
*/
|
||||
inline std::ostream & operator << (std::ostream & s, ServeProto::Command op)
|
||||
inline std::ostream & operator<<(std::ostream & s, ServeProto::Command op)
|
||||
{
|
||||
return s << (uint64_t) op;
|
||||
}
|
||||
|
|
@ -164,10 +163,10 @@ inline std::ostream & operator << (std::ostream & s, ServeProto::Command op)
|
|||
* be legal specialization syntax. See below for what that looks like in
|
||||
* practice.
|
||||
*/
|
||||
#define DECLARE_SERVE_SERIALISER(T) \
|
||||
struct ServeProto::Serialise< T > \
|
||||
{ \
|
||||
static T read(const StoreDirConfig & store, ServeProto::ReadConn conn); \
|
||||
#define DECLARE_SERVE_SERIALISER(T) \
|
||||
struct ServeProto::Serialise<T> \
|
||||
{ \
|
||||
static T read(const StoreDirConfig & store, ServeProto::ReadConn conn); \
|
||||
static void write(const StoreDirConfig & store, ServeProto::WriteConn conn, const T & t); \
|
||||
};
|
||||
|
||||
|
|
@ -190,4 +189,4 @@ template<typename K, typename V>
|
|||
DECLARE_SERVE_SERIALISER(std::map<K COMMA_ V>);
|
||||
#undef COMMA_
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -38,14 +38,27 @@ enum class SQLiteOpenMode {
|
|||
struct SQLite
|
||||
{
|
||||
sqlite3 * db = 0;
|
||||
SQLite() { }
|
||||
|
||||
SQLite() {}
|
||||
|
||||
SQLite(const Path & path, SQLiteOpenMode mode = SQLiteOpenMode::Normal);
|
||||
SQLite(const SQLite & from) = delete;
|
||||
SQLite& operator = (const SQLite & from) = delete;
|
||||
SQLite & operator=(const SQLite & from) = delete;
|
||||
|
||||
// NOTE: This is noexcept since we are only copying and assigning raw pointers.
|
||||
SQLite& operator = (SQLite && from) noexcept { db = from.db; from.db = 0; return *this; }
|
||||
SQLite & operator=(SQLite && from) noexcept
|
||||
{
|
||||
db = from.db;
|
||||
from.db = 0;
|
||||
return *this;
|
||||
}
|
||||
|
||||
~SQLite();
|
||||
operator sqlite3 * () { return db; }
|
||||
|
||||
operator sqlite3 *()
|
||||
{
|
||||
return db;
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable synchronous mode, set truncate journal mode.
|
||||
|
|
@ -65,11 +78,21 @@ struct SQLiteStmt
|
|||
sqlite3 * db = 0;
|
||||
sqlite3_stmt * stmt = 0;
|
||||
std::string sql;
|
||||
SQLiteStmt() { }
|
||||
SQLiteStmt(sqlite3 * db, const std::string & sql) { create(db, sql); }
|
||||
|
||||
SQLiteStmt() {}
|
||||
|
||||
SQLiteStmt(sqlite3 * db, const std::string & sql)
|
||||
{
|
||||
create(db, sql);
|
||||
}
|
||||
|
||||
void create(sqlite3 * db, const std::string & s);
|
||||
~SQLiteStmt();
|
||||
operator sqlite3_stmt * () { return stmt; }
|
||||
|
||||
operator sqlite3_stmt *()
|
||||
{
|
||||
return stmt;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper for binding / executing statements.
|
||||
|
|
@ -89,9 +112,9 @@ struct SQLiteStmt
|
|||
/**
|
||||
* Bind the next parameter.
|
||||
*/
|
||||
Use & operator () (std::string_view value, bool notNull = true);
|
||||
Use & operator () (const unsigned char * data, size_t len, bool notNull = true);
|
||||
Use & operator () (int64_t value, bool notNull = true);
|
||||
Use & operator()(std::string_view value, bool notNull = true);
|
||||
Use & operator()(const unsigned char * data, size_t len, bool notNull = true);
|
||||
Use & operator()(int64_t value, bool notNull = true);
|
||||
Use & bind(); // null
|
||||
|
||||
int step();
|
||||
|
|
@ -134,7 +157,6 @@ struct SQLiteTxn
|
|||
~SQLiteTxn();
|
||||
};
|
||||
|
||||
|
||||
struct SQLiteError : Error
|
||||
{
|
||||
std::string path;
|
||||
|
|
@ -142,21 +164,29 @@ struct SQLiteError : Error
|
|||
int errNo, extendedErrNo, offset;
|
||||
|
||||
template<typename... Args>
|
||||
[[noreturn]] static void throw_(sqlite3 * db, const std::string & fs, const Args & ... args) {
|
||||
[[noreturn]] static void throw_(sqlite3 * db, const std::string & fs, const Args &... args)
|
||||
{
|
||||
throw_(db, HintFmt(fs, args...));
|
||||
}
|
||||
|
||||
SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, HintFmt && hf);
|
||||
SQLiteError(const char * path, const char * errMsg, int errNo, int extendedErrNo, int offset, HintFmt && hf);
|
||||
|
||||
protected:
|
||||
|
||||
template<typename... Args>
|
||||
SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, const std::string & fs, const Args & ... args)
|
||||
: SQLiteError(path, errMsg, errNo, extendedErrNo, offset, HintFmt(fs, args...))
|
||||
{ }
|
||||
SQLiteError(
|
||||
const char * path,
|
||||
const char * errMsg,
|
||||
int errNo,
|
||||
int extendedErrNo,
|
||||
int offset,
|
||||
const std::string & fs,
|
||||
const Args &... args)
|
||||
: SQLiteError(path, errMsg, errNo, extendedErrNo, offset, HintFmt(fs, args...))
|
||||
{
|
||||
}
|
||||
|
||||
[[noreturn]] static void throw_(sqlite3 * db, HintFmt && hf);
|
||||
|
||||
};
|
||||
|
||||
MakeError(SQLiteBusy, SQLiteError);
|
||||
|
|
@ -181,4 +211,4 @@ T retrySQLite(F && fun)
|
|||
}
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -60,4 +60,4 @@ struct MountedSSHStoreConfig : virtual SSHStoreConfig, virtual LocalFSStoreConfi
|
|||
ref<Store> openStore() const override;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -46,7 +46,9 @@ public:
|
|||
std::string_view host,
|
||||
std::string_view keyFile,
|
||||
std::string_view sshPublicHostKey,
|
||||
bool useMaster, bool compress, Descriptor logFD = INVALID_DESCRIPTOR);
|
||||
bool useMaster,
|
||||
bool compress,
|
||||
Descriptor logFD = INVALID_DESCRIPTOR);
|
||||
|
||||
struct Connection
|
||||
{
|
||||
|
|
@ -75,9 +77,7 @@ public:
|
|||
* execute). Will not be used when "fake SSHing" to the local
|
||||
* machine.
|
||||
*/
|
||||
std::unique_ptr<Connection> startCommand(
|
||||
Strings && command,
|
||||
Strings && extraSshArgs = {});
|
||||
std::unique_ptr<Connection> startCommand(Strings && command, Strings && extraSshArgs = {});
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@
|
|||
#include <string>
|
||||
#include <chrono>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
MakeError(SubstError, Error);
|
||||
|
|
@ -49,11 +48,10 @@ struct SourceAccessor;
|
|||
class NarInfoDiskCache;
|
||||
class Store;
|
||||
|
||||
|
||||
typedef std::map<std::string, StorePath> OutputPathMap;
|
||||
|
||||
|
||||
enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true };
|
||||
|
||||
enum SubstituteFlag : bool { NoSubstitute = false, Substitute = true };
|
||||
|
||||
/**
|
||||
|
|
@ -61,14 +59,13 @@ enum SubstituteFlag : bool { NoSubstitute = false, Substitute = true };
|
|||
*/
|
||||
const uint32_t exportMagic = 0x4558494e;
|
||||
|
||||
|
||||
enum BuildMode : uint8_t { bmNormal, bmRepair, bmCheck };
|
||||
|
||||
enum TrustedFlag : bool { NotTrusted = false, Trusted = true };
|
||||
|
||||
struct BuildResult;
|
||||
struct KeyedBuildResult;
|
||||
|
||||
|
||||
typedef std::map<StorePath, std::optional<ContentAddress>> StorePathCAMap;
|
||||
|
||||
/**
|
||||
|
|
@ -117,7 +114,7 @@ struct StoreConfig : public StoreDirConfig
|
|||
|
||||
StoreConfig() = delete;
|
||||
|
||||
virtual ~StoreConfig() { }
|
||||
virtual ~StoreConfig() {}
|
||||
|
||||
static StringSet getDefaultSystemFeatures();
|
||||
|
||||
|
|
@ -138,10 +135,13 @@ struct StoreConfig : public StoreDirConfig
|
|||
return std::nullopt;
|
||||
}
|
||||
|
||||
Setting<int> pathInfoCacheSize{this, 65536, "path-info-cache-size",
|
||||
"Size of the in-memory store path metadata cache."};
|
||||
Setting<int> pathInfoCacheSize{
|
||||
this, 65536, "path-info-cache-size", "Size of the in-memory store path metadata cache."};
|
||||
|
||||
Setting<bool> isTrusted{this, false, "trusted",
|
||||
Setting<bool> isTrusted{
|
||||
this,
|
||||
false,
|
||||
"trusted",
|
||||
R"(
|
||||
Whether paths from this store can be used as substitutes
|
||||
even if they are not signed by a key listed in the
|
||||
|
|
@ -149,18 +149,26 @@ struct StoreConfig : public StoreDirConfig
|
|||
setting.
|
||||
)"};
|
||||
|
||||
Setting<int> priority{this, 0, "priority",
|
||||
Setting<int> priority{
|
||||
this,
|
||||
0,
|
||||
"priority",
|
||||
R"(
|
||||
Priority of this store when used as a [substituter](@docroot@/command-ref/conf-file.md#conf-substituters).
|
||||
A lower value means a higher priority.
|
||||
)"};
|
||||
|
||||
Setting<bool> wantMassQuery{this, false, "want-mass-query",
|
||||
Setting<bool> wantMassQuery{
|
||||
this,
|
||||
false,
|
||||
"want-mass-query",
|
||||
R"(
|
||||
Whether this store can be queried efficiently for path validity when used as a [substituter](@docroot@/command-ref/conf-file.md#conf-substituters).
|
||||
)"};
|
||||
|
||||
Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(),
|
||||
Setting<StringSet> systemFeatures{
|
||||
this,
|
||||
getDefaultSystemFeatures(),
|
||||
"system-features",
|
||||
R"(
|
||||
Optional [system features](@docroot@/command-ref/conf-file.md#conf-system-features) available on the system this store uses to build derivations.
|
||||
|
|
@ -200,11 +208,15 @@ public:
|
|||
/**
|
||||
* @note Avoid churn, since we used to inherit from `Config`.
|
||||
*/
|
||||
operator const Config &() const { return config; }
|
||||
operator const Config &() const
|
||||
{
|
||||
return config;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
struct PathInfoCacheValue {
|
||||
struct PathInfoCacheValue
|
||||
{
|
||||
|
||||
/**
|
||||
* Time of cache entry creation or update
|
||||
|
|
@ -226,8 +238,9 @@ protected:
|
|||
* Past tense, because a path can only be assumed to exists when
|
||||
* isKnownNow() && didExist()
|
||||
*/
|
||||
inline bool didExist() {
|
||||
return value != nullptr;
|
||||
inline bool didExist()
|
||||
{
|
||||
return value != nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -249,7 +262,7 @@ public:
|
|||
*/
|
||||
virtual void init() {};
|
||||
|
||||
virtual ~Store() { }
|
||||
virtual ~Store() {}
|
||||
|
||||
/**
|
||||
* @todo move to `StoreConfig` one we store enough information in
|
||||
|
|
@ -290,8 +303,7 @@ public:
|
|||
* Query which of the given paths is valid. Optionally, try to
|
||||
* substitute missing paths.
|
||||
*/
|
||||
virtual StorePathSet queryValidPaths(const StorePathSet & paths,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute);
|
||||
virtual StorePathSet queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute = NoSubstitute);
|
||||
|
||||
/**
|
||||
* Query the set of all valid paths. Note that for some store
|
||||
|
|
@ -302,7 +314,9 @@ public:
|
|||
* `std::variant<StorePath, HashPart>` to get rid of this hack.
|
||||
*/
|
||||
virtual StorePathSet queryAllValidPaths()
|
||||
{ unsupported("queryAllValidPaths"); }
|
||||
{
|
||||
unsupported("queryAllValidPaths");
|
||||
}
|
||||
|
||||
constexpr static const char * MissingName = "x";
|
||||
|
||||
|
|
@ -315,8 +329,7 @@ public:
|
|||
/**
|
||||
* Asynchronous version of queryPathInfo().
|
||||
*/
|
||||
void queryPathInfo(const StorePath & path,
|
||||
Callback<ref<const ValidPathInfo>> callback) noexcept;
|
||||
void queryPathInfo(const StorePath & path, Callback<ref<const ValidPathInfo>> callback) noexcept;
|
||||
|
||||
/**
|
||||
* Version of queryPathInfo() that only queries the local narinfo cache and not
|
||||
|
|
@ -336,9 +349,7 @@ public:
|
|||
/**
|
||||
* Asynchronous version of queryRealisation().
|
||||
*/
|
||||
void queryRealisation(const DrvOutput &,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept;
|
||||
|
||||
void queryRealisation(const DrvOutput &, Callback<std::shared_ptr<const Realisation>> callback) noexcept;
|
||||
|
||||
/**
|
||||
* Check whether the given valid path info is sufficiently attested, by
|
||||
|
|
@ -356,17 +367,17 @@ public:
|
|||
return true;
|
||||
}
|
||||
|
||||
virtual bool realisationIsUntrusted(const Realisation & )
|
||||
virtual bool realisationIsUntrusted(const Realisation &)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
virtual void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept = 0;
|
||||
virtual void queryRealisationUncached(const DrvOutput &,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept = 0;
|
||||
virtual void
|
||||
queryPathInfoUncached(const StorePath & path, Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept = 0;
|
||||
virtual void
|
||||
queryRealisationUncached(const DrvOutput &, Callback<std::shared_ptr<const Realisation>> callback) noexcept = 0;
|
||||
|
||||
public:
|
||||
|
||||
|
|
@ -375,7 +386,9 @@ public:
|
|||
* The result is not cleared.
|
||||
*/
|
||||
virtual void queryReferrers(const StorePath & path, StorePathSet & referrers)
|
||||
{ unsupported("queryReferrers"); }
|
||||
{
|
||||
unsupported("queryReferrers");
|
||||
}
|
||||
|
||||
/**
|
||||
* @return all currently valid derivations that have `path` as an
|
||||
|
|
@ -385,7 +398,10 @@ public:
|
|||
* was actually used to produce `path`, which may not exist
|
||||
* anymore.)
|
||||
*/
|
||||
virtual StorePathSet queryValidDerivers(const StorePath & path) { return {}; };
|
||||
virtual StorePathSet queryValidDerivers(const StorePath & path)
|
||||
{
|
||||
return {};
|
||||
};
|
||||
|
||||
/**
|
||||
* Query the outputs of the derivation denoted by `path`.
|
||||
|
|
@ -397,9 +413,8 @@ public:
|
|||
* derivation. All outputs are mentioned so ones missing the mapping
|
||||
* are mapped to `std::nullopt`.
|
||||
*/
|
||||
virtual std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap(
|
||||
const StorePath & path,
|
||||
Store * evalStore = nullptr);
|
||||
virtual std::map<std::string, std::optional<StorePath>>
|
||||
queryPartialDerivationOutputMap(const StorePath & path, Store * evalStore = nullptr);
|
||||
|
||||
/**
|
||||
* Like `queryPartialDerivationOutputMap` but only considers
|
||||
|
|
@ -409,8 +424,8 @@ public:
|
|||
* Just a helper function for implementing
|
||||
* `queryPartialDerivationOutputMap`.
|
||||
*/
|
||||
virtual std::map<std::string, std::optional<StorePath>> queryStaticPartialDerivationOutputMap(
|
||||
const StorePath & path);
|
||||
virtual std::map<std::string, std::optional<StorePath>>
|
||||
queryStaticPartialDerivationOutputMap(const StorePath & path);
|
||||
|
||||
/**
|
||||
* Query the mapping outputName=>outputPath for the given derivation.
|
||||
|
|
@ -427,7 +442,10 @@ public:
|
|||
/**
|
||||
* Query which of the given paths have substitutes.
|
||||
*/
|
||||
virtual StorePathSet querySubstitutablePaths(const StorePathSet & paths) { return {}; };
|
||||
virtual StorePathSet querySubstitutablePaths(const StorePathSet & paths)
|
||||
{
|
||||
return {};
|
||||
};
|
||||
|
||||
/**
|
||||
* Query substitute info (i.e. references, derivers and download
|
||||
|
|
@ -436,14 +454,16 @@ public:
|
|||
* If a path does not have substitute info, it's omitted from the
|
||||
* resulting ‘infos’ map.
|
||||
*/
|
||||
virtual void querySubstitutablePathInfos(const StorePathCAMap & paths,
|
||||
SubstitutablePathInfos & infos);
|
||||
virtual void querySubstitutablePathInfos(const StorePathCAMap & paths, SubstitutablePathInfos & infos);
|
||||
|
||||
/**
|
||||
* Import a path into the store.
|
||||
*/
|
||||
virtual void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs) = 0;
|
||||
virtual void addToStore(
|
||||
const ValidPathInfo & info,
|
||||
Source & narSource,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs) = 0;
|
||||
|
||||
/**
|
||||
* A list of paths infos along with a source providing the content
|
||||
|
|
@ -454,16 +474,10 @@ public:
|
|||
/**
|
||||
* Import multiple paths into the store.
|
||||
*/
|
||||
virtual void addMultipleToStore(
|
||||
Source & source,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs);
|
||||
virtual void addMultipleToStore(Source & source, RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs);
|
||||
|
||||
virtual void addMultipleToStore(
|
||||
PathsSource && pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs);
|
||||
PathsSource && pathsToCopy, Activity & act, RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs);
|
||||
|
||||
/**
|
||||
* Copy the contents of a path to the store and register the
|
||||
|
|
@ -531,9 +545,14 @@ public:
|
|||
* retrieve this information otherwise.
|
||||
*/
|
||||
virtual void registerDrvOutput(const Realisation & output)
|
||||
{ unsupported("registerDrvOutput"); }
|
||||
{
|
||||
unsupported("registerDrvOutput");
|
||||
}
|
||||
|
||||
virtual void registerDrvOutput(const Realisation & output, CheckSigsFlag checkSigs)
|
||||
{ return registerDrvOutput(output); }
|
||||
{
|
||||
return registerDrvOutput(output);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a NAR dump of a store path.
|
||||
|
|
@ -601,8 +620,8 @@ public:
|
|||
* up with multiple different versions of dependencies without
|
||||
* explicitly choosing to allow it).
|
||||
*/
|
||||
virtual BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode = bmNormal);
|
||||
virtual BuildResult
|
||||
buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode = bmNormal);
|
||||
|
||||
/**
|
||||
* Ensure that a path is valid. If it is not currently valid, it
|
||||
|
|
@ -616,28 +635,32 @@ public:
|
|||
* The root disappears as soon as we exit.
|
||||
*/
|
||||
virtual void addTempRoot(const StorePath & path)
|
||||
{ debug("not creating temporary root, store doesn't support GC"); }
|
||||
{
|
||||
debug("not creating temporary root, store doesn't support GC");
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a string representing information about the path that
|
||||
* can be loaded into the database using `nix-store --load-db` or
|
||||
* `nix-store --register-validity`.
|
||||
*/
|
||||
std::string makeValidityRegistration(const StorePathSet & paths,
|
||||
bool showDerivers, bool showHash);
|
||||
std::string makeValidityRegistration(const StorePathSet & paths, bool showDerivers, bool showHash);
|
||||
|
||||
/**
|
||||
* Optimise the disk space usage of the Nix store by hard-linking files
|
||||
* with the same contents.
|
||||
*/
|
||||
virtual void optimiseStore() { };
|
||||
virtual void optimiseStore() {};
|
||||
|
||||
/**
|
||||
* Check the integrity of the Nix store.
|
||||
*
|
||||
* @return true if errors remain.
|
||||
*/
|
||||
virtual bool verifyStore(bool checkContents, RepairFlag repair = NoRepair) { return false; };
|
||||
virtual bool verifyStore(bool checkContents, RepairFlag repair = NoRepair)
|
||||
{
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* @return An object to access files in the Nix store.
|
||||
|
|
@ -655,7 +678,9 @@ public:
|
|||
* not verified.
|
||||
*/
|
||||
virtual void addSignatures(const StorePath & storePath, const StringSet & sigs)
|
||||
{ unsupported("addSignatures"); }
|
||||
{
|
||||
unsupported("addSignatures");
|
||||
}
|
||||
|
||||
/**
|
||||
* Add signatures to a ValidPathInfo or Realisation using the secret keys
|
||||
|
|
@ -693,13 +718,19 @@ public:
|
|||
* `referrers` relation instead of the `references` relation is
|
||||
* returned.
|
||||
*/
|
||||
virtual void computeFSClosure(const StorePathSet & paths,
|
||||
StorePathSet & out, bool flipDirection = false,
|
||||
bool includeOutputs = false, bool includeDerivers = false);
|
||||
virtual void computeFSClosure(
|
||||
const StorePathSet & paths,
|
||||
StorePathSet & out,
|
||||
bool flipDirection = false,
|
||||
bool includeOutputs = false,
|
||||
bool includeDerivers = false);
|
||||
|
||||
void computeFSClosure(const StorePath & path,
|
||||
StorePathSet & out, bool flipDirection = false,
|
||||
bool includeOutputs = false, bool includeDerivers = false);
|
||||
void computeFSClosure(
|
||||
const StorePath & path,
|
||||
StorePathSet & out,
|
||||
bool flipDirection = false,
|
||||
bool includeOutputs = false,
|
||||
bool includeDerivers = false);
|
||||
|
||||
/**
|
||||
* Given a set of paths that are to be built, return the set of
|
||||
|
|
@ -774,7 +805,7 @@ public:
|
|||
* Establish a connection to the store, for store types that have
|
||||
* a notion of connection. Otherwise this is a no-op.
|
||||
*/
|
||||
virtual void connect() { };
|
||||
virtual void connect() {};
|
||||
|
||||
/**
|
||||
* Get the protocol version of this store or it's connection.
|
||||
|
|
@ -794,7 +825,6 @@ public:
|
|||
*/
|
||||
virtual std::optional<TrustedFlag> isTrustedClient() = 0;
|
||||
|
||||
|
||||
virtual Path toRealPath(const Path & storePath)
|
||||
{
|
||||
return storePath;
|
||||
|
|
@ -809,9 +839,12 @@ public:
|
|||
* Synchronises the options of the client with those of the daemon
|
||||
* (a no-op when there’s no daemon)
|
||||
*/
|
||||
virtual void setOptions() { }
|
||||
virtual void setOptions() {}
|
||||
|
||||
virtual std::optional<std::string> getVersion() { return {}; }
|
||||
virtual std::optional<std::string> getVersion()
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
|
|
@ -828,10 +861,8 @@ protected:
|
|||
{
|
||||
throw Unsupported("operation '%s' is not supported by store '%s'", op, getUri());
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Copy a path from one store to another.
|
||||
*/
|
||||
|
|
@ -842,7 +873,6 @@ void copyStorePath(
|
|||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs);
|
||||
|
||||
|
||||
/**
|
||||
* Copy store paths from one store to another. The paths may be copied
|
||||
* in parallel. They are copied in a topologically sorted order (i.e. if
|
||||
|
|
@ -852,14 +882,16 @@ void copyStorePath(
|
|||
* @return a map of what each path was copied to the dstStore as.
|
||||
*/
|
||||
std::map<StorePath, StorePath> copyPaths(
|
||||
Store & srcStore, Store & dstStore,
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const std::set<RealisedPath> &,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs,
|
||||
SubstituteFlag substitute = NoSubstitute);
|
||||
|
||||
std::map<StorePath, StorePath> copyPaths(
|
||||
Store & srcStore, Store & dstStore,
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const StorePathSet & paths,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs,
|
||||
|
|
@ -869,14 +901,16 @@ std::map<StorePath, StorePath> copyPaths(
|
|||
* Copy the closure of `paths` from `srcStore` to `dstStore`.
|
||||
*/
|
||||
void copyClosure(
|
||||
Store & srcStore, Store & dstStore,
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const std::set<RealisedPath> & paths,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs,
|
||||
SubstituteFlag substitute = NoSubstitute);
|
||||
|
||||
void copyClosure(
|
||||
Store & srcStore, Store & dstStore,
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const StorePathSet & paths,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs,
|
||||
|
|
@ -889,7 +923,6 @@ void copyClosure(
|
|||
*/
|
||||
void removeTempRoots();
|
||||
|
||||
|
||||
/**
|
||||
* Resolve the derived path completely, failing if any derivation output
|
||||
* is unknown.
|
||||
|
|
@ -897,25 +930,18 @@ void removeTempRoots();
|
|||
StorePath resolveDerivedPath(Store &, const SingleDerivedPath &, Store * evalStore = nullptr);
|
||||
OutputPathMap resolveDerivedPath(Store &, const DerivedPath::Built &, Store * evalStore = nullptr);
|
||||
|
||||
|
||||
/**
|
||||
* Display a set of paths in human-readable form (i.e., between quotes
|
||||
* and separated by commas).
|
||||
*/
|
||||
std::string showPaths(const PathSet & paths);
|
||||
|
||||
|
||||
std::optional<ValidPathInfo> decodeValidPathInfo(
|
||||
const Store & store,
|
||||
std::istream & str,
|
||||
std::optional<HashResult> hashGiven = std::nullopt);
|
||||
std::optional<ValidPathInfo>
|
||||
decodeValidPathInfo(const Store & store, std::istream & str, std::optional<HashResult> hashGiven = std::nullopt);
|
||||
|
||||
const ContentAddress * getDerivationCA(const BasicDerivation & drv);
|
||||
|
||||
std::map<DrvOutput, StorePath> drvOutputReferences(
|
||||
Store & store,
|
||||
const Derivation & drv,
|
||||
const StorePath & outputPath,
|
||||
Store * evalStore = nullptr);
|
||||
std::map<DrvOutput, StorePath>
|
||||
drvOutputReferences(Store & store, const Derivation & drv, const StorePath & outputPath, Store * evalStore = nullptr);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -21,4 +21,4 @@ T & require(Store & store)
|
|||
return *castedStore;
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@
|
|||
#include <string>
|
||||
#include <variant>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct SourcePath;
|
||||
|
|
@ -75,13 +74,10 @@ struct MixStoreDirMethods
|
|||
/**
|
||||
* Constructs a unique store path name.
|
||||
*/
|
||||
StorePath makeStorePath(std::string_view type,
|
||||
std::string_view hash, std::string_view name) const;
|
||||
StorePath makeStorePath(std::string_view type,
|
||||
const Hash & hash, std::string_view name) const;
|
||||
StorePath makeStorePath(std::string_view type, std::string_view hash, std::string_view name) const;
|
||||
StorePath makeStorePath(std::string_view type, const Hash & hash, std::string_view name) const;
|
||||
|
||||
StorePath makeOutputPath(std::string_view id,
|
||||
const Hash & hash, std::string_view name) const;
|
||||
StorePath makeOutputPath(std::string_view id, const Hash & hash, std::string_view name) const;
|
||||
|
||||
StorePath makeFixedOutputPath(std::string_view name, const FixedOutputInfo & info) const;
|
||||
|
||||
|
|
@ -108,7 +104,9 @@ struct StoreDirConfigBase : Config
|
|||
{
|
||||
using Config::Config;
|
||||
|
||||
const PathSetting storeDir_{this, settings.nixStore,
|
||||
const PathSetting storeDir_{
|
||||
this,
|
||||
settings.nixStore,
|
||||
"store",
|
||||
R"(
|
||||
Logical location of the Nix store, usually
|
||||
|
|
@ -134,4 +132,4 @@ struct StoreDirConfig : StoreDirConfigBase, MixStoreDirMethods
|
|||
virtual ~StoreDirConfig() = default;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -40,4 +40,4 @@ ref<Store> openStore(
|
|||
*/
|
||||
std::list<ref<Store>> getDefaultSubstituters();
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -88,4 +88,4 @@ struct StoreReference
|
|||
*/
|
||||
std::pair<std::string, StoreReference::Params> splitUriAndParams(const std::string & uri);
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -85,4 +85,4 @@ struct RegisterStoreImplementation
|
|||
}
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -7,10 +7,9 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
struct UDSRemoteStoreConfig :
|
||||
std::enable_shared_from_this<UDSRemoteStoreConfig>,
|
||||
virtual LocalFSStoreConfig,
|
||||
virtual RemoteStoreConfig
|
||||
struct UDSRemoteStoreConfig : std::enable_shared_from_this<UDSRemoteStoreConfig>,
|
||||
virtual LocalFSStoreConfig,
|
||||
virtual RemoteStoreConfig
|
||||
{
|
||||
// TODO(fzakaria): Delete this constructor once moved over to the factory pattern
|
||||
// outlined in https://github.com/NixOS/nix/issues/10766
|
||||
|
|
@ -20,14 +19,14 @@ struct UDSRemoteStoreConfig :
|
|||
/**
|
||||
* @param authority is the socket path.
|
||||
*/
|
||||
UDSRemoteStoreConfig(
|
||||
std::string_view scheme,
|
||||
std::string_view authority,
|
||||
const Params & params);
|
||||
UDSRemoteStoreConfig(std::string_view scheme, std::string_view authority, const Params & params);
|
||||
|
||||
UDSRemoteStoreConfig(const Params & params);
|
||||
|
||||
static const std::string name() { return "Local Daemon Store"; }
|
||||
static const std::string name()
|
||||
{
|
||||
return "Local Daemon Store";
|
||||
}
|
||||
|
||||
static std::string doc();
|
||||
|
||||
|
|
@ -40,14 +39,14 @@ struct UDSRemoteStoreConfig :
|
|||
Path path;
|
||||
|
||||
static StringSet uriSchemes()
|
||||
{ return {"unix"}; }
|
||||
{
|
||||
return {"unix"};
|
||||
}
|
||||
|
||||
ref<Store> openStore() const override;
|
||||
};
|
||||
|
||||
struct UDSRemoteStore :
|
||||
virtual IndirectRootStore,
|
||||
virtual RemoteStore
|
||||
struct UDSRemoteStore : virtual IndirectRootStore, virtual RemoteStore
|
||||
{
|
||||
using Config = UDSRemoteStoreConfig;
|
||||
|
||||
|
|
@ -58,10 +57,14 @@ struct UDSRemoteStore :
|
|||
std::string getUri() override;
|
||||
|
||||
ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override
|
||||
{ return LocalFSStore::getFSAccessor(requireValidPath); }
|
||||
{
|
||||
return LocalFSStore::getFSAccessor(requireValidPath);
|
||||
}
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override
|
||||
{ LocalFSStore::narFromPath(path, sink); }
|
||||
{
|
||||
LocalFSStore::narFromPath(path, sink);
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of `IndirectRootStore::addIndirectRoot()` which
|
||||
|
|
@ -84,4 +87,4 @@ private:
|
|||
ref<RemoteStore::Connection> openConnection() override;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -162,4 +162,4 @@ struct WorkerProto::BasicServerConnection : WorkerProto::BasicConnection
|
|||
void postHandshake(const StoreDirConfig & store, const ClientHandshakeInfo & info);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
|
|
@ -15,14 +15,15 @@ namespace nix {
|
|||
|
||||
/* protocol-agnostic templates */
|
||||
|
||||
#define WORKER_USE_LENGTH_PREFIX_SERIALISER(TEMPLATE, T) \
|
||||
TEMPLATE T WorkerProto::Serialise< T >::read(const StoreDirConfig & store, WorkerProto::ReadConn conn) \
|
||||
{ \
|
||||
return LengthPrefixedProtoHelper<WorkerProto, T >::read(store, conn); \
|
||||
} \
|
||||
TEMPLATE void WorkerProto::Serialise< T >::write(const StoreDirConfig & store, WorkerProto::WriteConn conn, const T & t) \
|
||||
{ \
|
||||
LengthPrefixedProtoHelper<WorkerProto, T >::write(store, conn, t); \
|
||||
#define WORKER_USE_LENGTH_PREFIX_SERIALISER(TEMPLATE, T) \
|
||||
TEMPLATE T WorkerProto::Serialise<T>::read(const StoreDirConfig & store, WorkerProto::ReadConn conn) \
|
||||
{ \
|
||||
return LengthPrefixedProtoHelper<WorkerProto, T>::read(store, conn); \
|
||||
} \
|
||||
TEMPLATE void WorkerProto::Serialise<T>::write( \
|
||||
const StoreDirConfig & store, WorkerProto::WriteConn conn, const T & t) \
|
||||
{ \
|
||||
LengthPrefixedProtoHelper<WorkerProto, T>::write(store, conn, t); \
|
||||
}
|
||||
|
||||
WORKER_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::vector<T>)
|
||||
|
|
@ -44,17 +45,15 @@ struct WorkerProto::Serialise
|
|||
{
|
||||
static T read(const StoreDirConfig & store, WorkerProto::ReadConn conn)
|
||||
{
|
||||
return CommonProto::Serialise<T>::read(store,
|
||||
CommonProto::ReadConn { .from = conn.from });
|
||||
return CommonProto::Serialise<T>::read(store, CommonProto::ReadConn{.from = conn.from});
|
||||
}
|
||||
|
||||
static void write(const StoreDirConfig & store, WorkerProto::WriteConn conn, const T & t)
|
||||
{
|
||||
CommonProto::Serialise<T>::write(store,
|
||||
CommonProto::WriteConn { .to = conn.to },
|
||||
t);
|
||||
CommonProto::Serialise<T>::write(store, CommonProto::WriteConn{.to = conn.to}, t);
|
||||
}
|
||||
};
|
||||
|
||||
/* protocol-specific templates */
|
||||
|
||||
}
|
||||
} // namespace nix
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue