1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-11 04:56:01 +01:00

Merge remote-tracking branch 'origin/master' into best-effort-supplementary-groups

This commit is contained in:
Ben Radford 2023-07-11 09:38:34 +01:00
commit 25b20b4ad2
No known key found for this signature in database
GPG key ID: 9DF5D4640AB888D5
236 changed files with 5449 additions and 2033 deletions

View file

@ -9,6 +9,7 @@
#include "archive.hh"
#include "compression.hh"
#include "worker-protocol.hh"
#include "worker-protocol-impl.hh"
#include "topo-sort.hh"
#include "callback.hh"
#include "local-store.hh" // TODO remove, along with remaining downcasts
@ -1150,9 +1151,11 @@ HookReply DerivationGoal::tryBuildHook()
throw;
}
WorkerProto::WriteConn conn { hook->sink };
/* Tell the hook all the inputs that have to be copied to the
remote system. */
worker_proto::write(worker.store, hook->sink, inputPaths);
WorkerProto::write(worker.store, conn, inputPaths);
/* Tell the hooks the missing outputs that have to be copied back
from the remote system. */
@ -1163,7 +1166,7 @@ HookReply DerivationGoal::tryBuildHook()
if (buildMode != bmCheck && status.known && status.known->isValid()) continue;
missingOutputs.insert(outputName);
}
worker_proto::write(worker.store, hook->sink, missingOutputs);
WorkerProto::write(worker.store, conn, missingOutputs);
}
hook->sink = FdSink();

View file

@ -31,11 +31,11 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
}
if (failed.size() == 1 && ex) {
ex->status = worker.exitStatus();
ex->status = worker.failingExitStatus();
throw std::move(*ex);
} else if (!failed.empty()) {
if (ex) logError(ex->info());
throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed));
throw Error(worker.failingExitStatus(), "build of %s failed", showPaths(failed));
}
}
@ -102,15 +102,15 @@ void Store::ensurePath(const StorePath & path)
if (goal->exitCode != Goal::ecSuccess) {
if (goal->ex) {
goal->ex->status = worker.exitStatus();
goal->ex->status = worker.failingExitStatus();
throw std::move(*goal->ex);
} else
throw Error(worker.exitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
throw Error(worker.failingExitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
}
}
void LocalStore::repairPath(const StorePath & path)
void Store::repairPath(const StorePath & path)
{
Worker worker(*this, *this);
GoalPtr goal = worker.makePathSubstitutionGoal(path, Repair);
@ -128,7 +128,7 @@ void LocalStore::repairPath(const StorePath & path)
goals.insert(worker.makeDerivationGoal(*info->deriver, OutputsSpec::All { }, bmRepair));
worker.run(goals);
} else
throw Error(worker.exitStatus(), "cannot repair path '%s'", printStorePath(path));
throw Error(worker.failingExitStatus(), "cannot repair path '%s'", printStorePath(path));
}
}

View file

@ -5,14 +5,14 @@ namespace nix {
HookInstance::HookInstance()
{
debug("starting build hook '%s'", settings.buildHook);
debug("starting build hook '%s'", concatStringsSep(" ", settings.buildHook.get()));
auto buildHookArgs = tokenizeString<std::list<std::string>>(settings.buildHook.get());
auto buildHookArgs = settings.buildHook.get();
if (buildHookArgs.empty())
throw Error("'build-hook' setting is empty");
auto buildHook = buildHookArgs.front();
auto buildHook = canonPath(buildHookArgs.front());
buildHookArgs.pop_front();
Strings args;

View file

@ -4,13 +4,12 @@
#include "worker.hh"
#include "builtins.hh"
#include "builtins/buildenv.hh"
#include "references.hh"
#include "path-references.hh"
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
#include "compression.hh"
#include "daemon.hh"
#include "worker-protocol.hh"
#include "topo-sort.hh"
#include "callback.hh"
#include "json-utils.hh"
@ -65,8 +64,9 @@ void handleDiffHook(
const Path & tryA, const Path & tryB,
const Path & drvPath, const Path & tmpDir)
{
auto diffHook = settings.diffHook;
if (diffHook != "" && settings.runDiffHook) {
auto & diffHookOpt = settings.diffHook.get();
if (diffHookOpt && settings.runDiffHook) {
auto & diffHook = *diffHookOpt;
try {
auto diffRes = runProgram(RunOptions {
.program = diffHook,
@ -357,7 +357,7 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull()
for (auto & [_, status] : initialOutputs) {
if (!status.known) continue;
if (buildMode != bmCheck && status.known->isValid()) continue;
auto p = worker.store.printStorePath(status.known->path);
auto p = worker.store.toRealPath(status.known->path);
if (pathExists(chrootRootDir + p))
renameFile((chrootRootDir + p), p);
}
@ -395,8 +395,9 @@ static void linkOrCopy(const Path & from, const Path & to)
bind-mount in this case?
It can also fail with EPERM in BeegFS v7 and earlier versions
or fail with EXDEV in OpenAFS
which don't allow hard-links to other directories */
if (errno != EMLINK && errno != EPERM)
if (errno != EMLINK && errno != EPERM && errno != EXDEV)
throw SysError("linking '%s' to '%s'", to, from);
copyPath(from, to);
}
@ -1423,7 +1424,8 @@ void LocalDerivationGoal::startDaemon()
Store::Params params;
params["path-info-cache-size"] = "0";
params["store"] = worker.store.storeDir;
params["root"] = getLocalStore().rootDir;
if (auto & optRoot = getLocalStore().rootDir.get())
params["root"] = *optRoot;
params["state"] = "/no-such-path";
params["log"] = "/no-such-path";
auto store = make_ref<RestrictedStore>(params,
@ -1452,7 +1454,7 @@ void LocalDerivationGoal::startDaemon()
(struct sockaddr *) &remoteAddr, &remoteAddrLen);
if (!remote) {
if (errno == EINTR || errno == EAGAIN) continue;
if (errno == EINVAL) break;
if (errno == EINVAL || errno == ECONNABORTED) break;
throw SysError("accepting connection");
}
@ -1482,8 +1484,22 @@ void LocalDerivationGoal::startDaemon()
void LocalDerivationGoal::stopDaemon()
{
if (daemonSocket && shutdown(daemonSocket.get(), SHUT_RDWR) == -1)
throw SysError("shutting down daemon socket");
if (daemonSocket && shutdown(daemonSocket.get(), SHUT_RDWR) == -1) {
// According to the POSIX standard, the 'shutdown' function should
// return an ENOTCONN error when attempting to shut down a socket that
// hasn't been connected yet. This situation occurs when the 'accept'
// function is called on a socket without any accepted connections,
// leaving the socket unconnected. While Linux doesn't seem to produce
// an error for sockets that have only been accepted, more
// POSIX-compliant operating systems like OpenBSD, macOS, and others do
// return the ENOTCONN error. Therefore, we handle this error here to
// avoid raising an exception for compliant behaviour.
if (errno == ENOTCONN) {
daemonSocket.close();
} else {
throw SysError("shutting down daemon socket");
}
}
if (daemonThread.joinable())
daemonThread.join();
@ -1494,7 +1510,8 @@ void LocalDerivationGoal::stopDaemon()
thread.join();
daemonWorkerThreads.clear();
daemonSocket = -1;
// release the socket.
daemonSocket.close();
}
@ -1771,6 +1788,9 @@ void LocalDerivationGoal::runChild()
for (auto & path : { "/etc/resolv.conf", "/etc/services", "/etc/hosts" })
if (pathExists(path))
ss.push_back(path);
if (settings.caFile != "")
dirsInChroot.try_emplace("/etc/ssl/certs/ca-certificates.crt", settings.caFile, true);
}
for (auto & i : ss) dirsInChroot.emplace(i, i);
@ -2371,18 +2391,21 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
continue;
auto references = *referencesOpt;
auto rewriteOutput = [&]() {
auto rewriteOutput = [&](const StringMap & rewrites) {
/* Apply hash rewriting if necessary. */
if (!outputRewrites.empty()) {
if (!rewrites.empty()) {
debug("rewriting hashes in '%1%'; cross fingers", actualPath);
/* FIXME: this is in-memory. */
StringSink sink;
dumpPath(actualPath, sink);
/* FIXME: Is this actually streaming? */
auto source = sinkToSource([&](Sink & nextSink) {
RewritingSink rsink(rewrites, nextSink);
dumpPath(actualPath, rsink);
rsink.flush();
});
Path tmpPath = actualPath + ".tmp";
restorePath(tmpPath, *source);
deletePath(actualPath);
sink.s = rewriteStrings(sink.s, outputRewrites);
StringSource source(sink.s);
restorePath(actualPath, source);
movePath(tmpPath, actualPath);
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
@ -2431,7 +2454,7 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
"since recursive hashing is not enabled (one of outputHashMode={flat,text} is true)",
actualPath);
}
rewriteOutput();
rewriteOutput(outputRewrites);
/* FIXME optimize and deduplicate with addToStore */
std::string oldHashPart { scratchPath->hashPart() };
HashModuloSink caSink { outputHash.hashType, oldHashPart };
@ -2469,16 +2492,14 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
Hash::dummy,
};
if (*scratchPath != newInfo0.path) {
// Also rewrite the output path
auto source = sinkToSource([&](Sink & nextSink) {
RewritingSink rsink2(oldHashPart, std::string(newInfo0.path.hashPart()), nextSink);
dumpPath(actualPath, rsink2);
rsink2.flush();
});
Path tmpPath = actualPath + ".tmp";
restorePath(tmpPath, *source);
deletePath(actualPath);
movePath(tmpPath, actualPath);
// If the path has some self-references, we need to rewrite
// them.
// (note that this doesn't invalidate the ca hash we calculated
// above because it's computed *modulo the self-references*, so
// it already takes this rewrite into account).
rewriteOutput(
StringMap{{oldHashPart,
std::string(newInfo0.path.hashPart())}});
}
HashResult narHashAndSize = hashPath(htSHA256, actualPath);
@ -2500,7 +2521,7 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
outputRewrites.insert_or_assign(
std::string { scratchPath->hashPart() },
std::string { requiredFinalPath.hashPart() });
rewriteOutput();
rewriteOutput(outputRewrites);
auto narHashAndSize = hashPath(htSHA256, actualPath);
ValidPathInfo newInfo0 { requiredFinalPath, narHashAndSize.first };
newInfo0.narSize = narHashAndSize.second;

View file

@ -21,7 +21,8 @@ void setPersonality(std::string_view system)
&& (std::string_view(SYSTEM) == "x86_64-linux"
|| (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
|| system == "armv7l-linux"
|| system == "armv6l-linux")
|| system == "armv6l-linux"
|| system == "armv5tel-linux")
{
if (personality(PER_LINUX32) == -1)
throw SysError("cannot set 32-bit personality");

View file

@ -468,16 +468,9 @@ void Worker::waitForInput()
}
unsigned int Worker::exitStatus()
unsigned int Worker::failingExitStatus()
{
/*
* 1100100
* ^^^^
* |||`- timeout
* ||`-- output hash mismatch
* |`--- build failure
* `---- not deterministic
*/
// See API docs in header for explanation
unsigned int mask = 0;
bool buildFailure = permanentFailure || timedOut || hashMismatch;
if (buildFailure)

View file

@ -280,7 +280,28 @@ public:
*/
void waitForInput();
unsigned int exitStatus();
/***
* The exit status in case of failure.
*
* In the case of a build failure, returned value follows this
* bitmask:
*
* ```
* 0b1100100
* ^^^^
* |||`- timeout
* ||`-- output hash mismatch
* |`--- build failure
* `---- not deterministic
* ```
*
* In other words, the failure code is at least 100 (0b1100100), but
* might also be greater.
*
* Otherwise (no build failure, but some other sort of failure by
* assumption), this returned value is 1.
*/
unsigned int failingExitStatus();
/**
* Check whether the given valid path exists and has the right

View file

@ -1,6 +1,7 @@
#include "daemon.hh"
#include "monitor-fd.hh"
#include "worker-protocol.hh"
#include "worker-protocol-impl.hh"
#include "build-result.hh"
#include "store-api.hh"
#include "store-cast.hh"
@ -259,13 +260,13 @@ struct ClientSettings
}
};
static std::vector<DerivedPath> readDerivedPaths(Store & store, unsigned int clientVersion, Source & from)
static std::vector<DerivedPath> readDerivedPaths(Store & store, unsigned int clientVersion, WorkerProto::ReadConn conn)
{
std::vector<DerivedPath> reqs;
if (GET_PROTOCOL_MINOR(clientVersion) >= 30) {
reqs = worker_proto::read(store, from, Phantom<std::vector<DerivedPath>> {});
reqs = WorkerProto::Serialise<std::vector<DerivedPath>>::read(store, conn);
} else {
for (auto & s : readStrings<Strings>(from))
for (auto & s : readStrings<Strings>(conn.from))
reqs.push_back(parsePathWithOutputs(store, s).toDerivedPath());
}
return reqs;
@ -273,11 +274,14 @@ static std::vector<DerivedPath> readDerivedPaths(Store & store, unsigned int cli
static void performOp(TunnelLogger * logger, ref<Store> store,
TrustedFlag trusted, RecursiveFlag recursive, unsigned int clientVersion,
Source & from, BufferedSink & to, unsigned int op)
Source & from, BufferedSink & to, WorkerProto::Op op)
{
WorkerProto::ReadConn rconn { .from = from };
WorkerProto::WriteConn wconn { .to = to };
switch (op) {
case wopIsValidPath: {
case WorkerProto::Op::IsValidPath: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
bool result = store->isValidPath(path);
@ -286,8 +290,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopQueryValidPaths: {
auto paths = worker_proto::read(*store, from, Phantom<StorePathSet> {});
case WorkerProto::Op::QueryValidPaths: {
auto paths = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
SubstituteFlag substitute = NoSubstitute;
if (GET_PROTOCOL_MINOR(clientVersion) >= 27) {
@ -300,11 +304,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
auto res = store->queryValidPaths(paths, substitute);
logger->stopWork();
worker_proto::write(*store, to, res);
WorkerProto::write(*store, wconn, res);
break;
}
case wopHasSubstitutes: {
case WorkerProto::Op::HasSubstitutes: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
StorePathSet paths; // FIXME
@ -315,16 +319,16 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopQuerySubstitutablePaths: {
auto paths = worker_proto::read(*store, from, Phantom<StorePathSet> {});
case WorkerProto::Op::QuerySubstitutablePaths: {
auto paths = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
logger->startWork();
auto res = store->querySubstitutablePaths(paths);
logger->stopWork();
worker_proto::write(*store, to, res);
WorkerProto::write(*store, wconn, res);
break;
}
case wopQueryPathHash: {
case WorkerProto::Op::QueryPathHash: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
auto hash = store->queryPathInfo(path)->narHash;
@ -333,27 +337,27 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopQueryReferences:
case wopQueryReferrers:
case wopQueryValidDerivers:
case wopQueryDerivationOutputs: {
case WorkerProto::Op::QueryReferences:
case WorkerProto::Op::QueryReferrers:
case WorkerProto::Op::QueryValidDerivers:
case WorkerProto::Op::QueryDerivationOutputs: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
StorePathSet paths;
if (op == wopQueryReferences)
if (op == WorkerProto::Op::QueryReferences)
for (auto & i : store->queryPathInfo(path)->references)
paths.insert(i);
else if (op == wopQueryReferrers)
else if (op == WorkerProto::Op::QueryReferrers)
store->queryReferrers(path, paths);
else if (op == wopQueryValidDerivers)
else if (op == WorkerProto::Op::QueryValidDerivers)
paths = store->queryValidDerivers(path);
else paths = store->queryDerivationOutputs(path);
logger->stopWork();
worker_proto::write(*store, to, paths);
WorkerProto::write(*store, wconn, paths);
break;
}
case wopQueryDerivationOutputNames: {
case WorkerProto::Op::QueryDerivationOutputNames: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
auto names = store->readDerivation(path).outputNames();
@ -362,16 +366,16 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopQueryDerivationOutputMap: {
case WorkerProto::Op::QueryDerivationOutputMap: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
auto outputs = store->queryPartialDerivationOutputMap(path);
logger->stopWork();
worker_proto::write(*store, to, outputs);
WorkerProto::write(*store, wconn, outputs);
break;
}
case wopQueryDeriver: {
case WorkerProto::Op::QueryDeriver: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
auto info = store->queryPathInfo(path);
@ -380,7 +384,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopQueryPathFromHashPart: {
case WorkerProto::Op::QueryPathFromHashPart: {
auto hashPart = readString(from);
logger->startWork();
auto path = store->queryPathFromHashPart(hashPart);
@ -389,11 +393,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopAddToStore: {
case WorkerProto::Op::AddToStore: {
if (GET_PROTOCOL_MINOR(clientVersion) >= 25) {
auto name = readString(from);
auto camStr = readString(from);
auto refs = worker_proto::read(*store, from, Phantom<StorePathSet> {});
auto refs = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
bool repairBool;
from >> repairBool;
auto repair = RepairFlag{repairBool};
@ -475,7 +479,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopAddMultipleToStore: {
case WorkerProto::Op::AddMultipleToStore: {
bool repair, dontCheckSigs;
from >> repair >> dontCheckSigs;
if (!trusted && dontCheckSigs)
@ -492,10 +496,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopAddTextToStore: {
case WorkerProto::Op::AddTextToStore: {
std::string suffix = readString(from);
std::string s = readString(from);
auto refs = worker_proto::read(*store, from, Phantom<StorePathSet> {});
auto refs = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
logger->startWork();
auto path = store->addTextToStore(suffix, s, refs, NoRepair);
logger->stopWork();
@ -503,7 +507,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopExportPath: {
case WorkerProto::Op::ExportPath: {
auto path = store->parseStorePath(readString(from));
readInt(from); // obsolete
logger->startWork();
@ -514,7 +518,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopImportPaths: {
case WorkerProto::Op::ImportPaths: {
logger->startWork();
TunnelSource source(from, to);
auto paths = store->importPaths(source,
@ -526,8 +530,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopBuildPaths: {
auto drvs = readDerivedPaths(*store, clientVersion, from);
case WorkerProto::Op::BuildPaths: {
auto drvs = readDerivedPaths(*store, clientVersion, rconn);
BuildMode mode = bmNormal;
if (GET_PROTOCOL_MINOR(clientVersion) >= 15) {
mode = (BuildMode) readInt(from);
@ -551,8 +555,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopBuildPathsWithResults: {
auto drvs = readDerivedPaths(*store, clientVersion, from);
case WorkerProto::Op::BuildPathsWithResults: {
auto drvs = readDerivedPaths(*store, clientVersion, rconn);
BuildMode mode = bmNormal;
mode = (BuildMode) readInt(from);
@ -567,12 +571,12 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
auto results = store->buildPathsWithResults(drvs, mode);
logger->stopWork();
worker_proto::write(*store, to, results);
WorkerProto::write(*store, wconn, results);
break;
}
case wopBuildDerivation: {
case WorkerProto::Op::BuildDerivation: {
auto drvPath = store->parseStorePath(readString(from));
BasicDerivation drv;
readDerivation(from, *store, drv, Derivation::nameFromPath(drvPath));
@ -644,12 +648,12 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
DrvOutputs builtOutputs;
for (auto & [output, realisation] : res.builtOutputs)
builtOutputs.insert_or_assign(realisation.id, realisation);
worker_proto::write(*store, to, builtOutputs);
WorkerProto::write(*store, wconn, builtOutputs);
}
break;
}
case wopEnsurePath: {
case WorkerProto::Op::EnsurePath: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
store->ensurePath(path);
@ -658,7 +662,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopAddTempRoot: {
case WorkerProto::Op::AddTempRoot: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
store->addTempRoot(path);
@ -667,7 +671,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopAddIndirectRoot: {
case WorkerProto::Op::AddIndirectRoot: {
Path path = absPath(readString(from));
logger->startWork();
@ -680,14 +684,14 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
// Obsolete.
case wopSyncWithGC: {
case WorkerProto::Op::SyncWithGC: {
logger->startWork();
logger->stopWork();
to << 1;
break;
}
case wopFindRoots: {
case WorkerProto::Op::FindRoots: {
logger->startWork();
auto & gcStore = require<GcStore>(*store);
Roots roots = gcStore.findRoots(!trusted);
@ -706,10 +710,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopCollectGarbage: {
case WorkerProto::Op::CollectGarbage: {
GCOptions options;
options.action = (GCOptions::GCAction) readInt(from);
options.pathsToDelete = worker_proto::read(*store, from, Phantom<StorePathSet> {});
options.pathsToDelete = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
from >> options.ignoreLiveness >> options.maxFreed;
// obsolete fields
readInt(from);
@ -730,7 +734,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopSetOptions: {
case WorkerProto::Op::SetOptions: {
ClientSettings clientSettings;
@ -767,7 +771,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopQuerySubstitutablePathInfo: {
case WorkerProto::Op::QuerySubstitutablePathInfo: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
SubstitutablePathInfos infos;
@ -779,22 +783,22 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
else {
to << 1
<< (i->second.deriver ? store->printStorePath(*i->second.deriver) : "");
worker_proto::write(*store, to, i->second.references);
WorkerProto::write(*store, wconn, i->second.references);
to << i->second.downloadSize
<< i->second.narSize;
}
break;
}
case wopQuerySubstitutablePathInfos: {
case WorkerProto::Op::QuerySubstitutablePathInfos: {
SubstitutablePathInfos infos;
StorePathCAMap pathsMap = {};
if (GET_PROTOCOL_MINOR(clientVersion) < 22) {
auto paths = worker_proto::read(*store, from, Phantom<StorePathSet> {});
auto paths = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
for (auto & path : paths)
pathsMap.emplace(path, std::nullopt);
} else
pathsMap = worker_proto::read(*store, from, Phantom<StorePathCAMap> {});
pathsMap = WorkerProto::Serialise<StorePathCAMap>::read(*store, rconn);
logger->startWork();
store->querySubstitutablePathInfos(pathsMap, infos);
logger->stopWork();
@ -802,21 +806,21 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
for (auto & i : infos) {
to << store->printStorePath(i.first)
<< (i.second.deriver ? store->printStorePath(*i.second.deriver) : "");
worker_proto::write(*store, to, i.second.references);
WorkerProto::write(*store, wconn, i.second.references);
to << i.second.downloadSize << i.second.narSize;
}
break;
}
case wopQueryAllValidPaths: {
case WorkerProto::Op::QueryAllValidPaths: {
logger->startWork();
auto paths = store->queryAllValidPaths();
logger->stopWork();
worker_proto::write(*store, to, paths);
WorkerProto::write(*store, wconn, paths);
break;
}
case wopQueryPathInfo: {
case WorkerProto::Op::QueryPathInfo: {
auto path = store->parseStorePath(readString(from));
std::shared_ptr<const ValidPathInfo> info;
logger->startWork();
@ -837,14 +841,14 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopOptimiseStore:
case WorkerProto::Op::OptimiseStore:
logger->startWork();
store->optimiseStore();
logger->stopWork();
to << 1;
break;
case wopVerifyStore: {
case WorkerProto::Op::VerifyStore: {
bool checkContents, repair;
from >> checkContents >> repair;
logger->startWork();
@ -856,19 +860,17 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopAddSignatures: {
case WorkerProto::Op::AddSignatures: {
auto path = store->parseStorePath(readString(from));
StringSet sigs = readStrings<StringSet>(from);
logger->startWork();
if (!trusted)
throw Error("you are not privileged to add signatures");
store->addSignatures(path, sigs);
logger->stopWork();
to << 1;
break;
}
case wopNarFromPath: {
case WorkerProto::Op::NarFromPath: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
logger->stopWork();
@ -876,7 +878,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopAddToStoreNar: {
case WorkerProto::Op::AddToStoreNar: {
bool repair, dontCheckSigs;
auto path = store->parseStorePath(readString(from));
auto deriver = readString(from);
@ -884,7 +886,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
ValidPathInfo info { path, narHash };
if (deriver != "")
info.deriver = store->parseStorePath(deriver);
info.references = worker_proto::read(*store, from, Phantom<StorePathSet> {});
info.references = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
from >> info.registrationTime >> info.narSize >> info.ultimate;
info.sigs = readStrings<StringSet>(from);
info.ca = ContentAddress::parseOpt(readString(from));
@ -928,21 +930,21 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case wopQueryMissing: {
auto targets = readDerivedPaths(*store, clientVersion, from);
case WorkerProto::Op::QueryMissing: {
auto targets = readDerivedPaths(*store, clientVersion, rconn);
logger->startWork();
StorePathSet willBuild, willSubstitute, unknown;
uint64_t downloadSize, narSize;
store->queryMissing(targets, willBuild, willSubstitute, unknown, downloadSize, narSize);
logger->stopWork();
worker_proto::write(*store, to, willBuild);
worker_proto::write(*store, to, willSubstitute);
worker_proto::write(*store, to, unknown);
WorkerProto::write(*store, wconn, willBuild);
WorkerProto::write(*store, wconn, willSubstitute);
WorkerProto::write(*store, wconn, unknown);
to << downloadSize << narSize;
break;
}
case wopRegisterDrvOutput: {
case WorkerProto::Op::RegisterDrvOutput: {
logger->startWork();
if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
auto outputId = DrvOutput::parse(readString(from));
@ -950,14 +952,14 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
store->registerDrvOutput(Realisation{
.id = outputId, .outPath = outputPath});
} else {
auto realisation = worker_proto::read(*store, from, Phantom<Realisation>());
auto realisation = WorkerProto::Serialise<Realisation>::read(*store, rconn);
store->registerDrvOutput(realisation);
}
logger->stopWork();
break;
}
case wopQueryRealisation: {
case WorkerProto::Op::QueryRealisation: {
logger->startWork();
auto outputId = DrvOutput::parse(readString(from));
auto info = store->queryRealisation(outputId);
@ -965,16 +967,16 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
std::set<StorePath> outPaths;
if (info) outPaths.insert(info->outPath);
worker_proto::write(*store, to, outPaths);
WorkerProto::write(*store, wconn, outPaths);
} else {
std::set<Realisation> realisations;
if (info) realisations.insert(*info);
worker_proto::write(*store, to, realisations);
WorkerProto::write(*store, wconn, realisations);
}
break;
}
case wopAddBuildLog: {
case WorkerProto::Op::AddBuildLog: {
StorePath path{readString(from)};
logger->startWork();
if (!trusted)
@ -991,6 +993,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
case WorkerProto::Op::QueryFailedPaths:
case WorkerProto::Op::ClearFailedPaths:
throw Error("Removed operation %1%", op);
default:
throw Error("invalid operation %1%", op);
}
@ -1045,7 +1051,8 @@ void processConnection(
auto temp = trusted
? store->isTrustedClient()
: std::optional { NotTrusted };
worker_proto::write(*store, to, temp);
WorkerProto::WriteConn wconn { .to = to };
WorkerProto::write(*store, wconn, temp);
}
/* Send startup error messages to the client. */
@ -1058,9 +1065,9 @@ void processConnection(
/* Process client requests. */
while (true) {
WorkerOp op;
WorkerProto::Op op;
try {
op = (WorkerOp) readInt(from);
op = (enum WorkerProto::Op) readInt(from);
} catch (Interrupted & e) {
break;
} catch (EndOfFile & e) {

View file

@ -1,9 +1,11 @@
#include "derivations.hh"
#include "downstream-placeholder.hh"
#include "store-api.hh"
#include "globals.hh"
#include "util.hh"
#include "split.hh"
#include "worker-protocol.hh"
#include "worker-protocol-impl.hh"
#include "fs-accessor.hh"
#include <boost/container/small_vector.hpp>
#include <nlohmann/json.hpp>
@ -748,7 +750,8 @@ Source & readDerivation(Source & in, const Store & store, BasicDerivation & drv,
drv.outputs.emplace(std::move(name), std::move(output));
}
drv.inputSrcs = worker_proto::read(store, in, Phantom<StorePathSet> {});
drv.inputSrcs = WorkerProto::Serialise<StorePathSet>::read(store,
WorkerProto::ReadConn { .from = in });
in >> drv.platform >> drv.builder;
drv.args = readStrings<Strings>(in);
@ -796,7 +799,9 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr
},
}, i.second.raw());
}
worker_proto::write(store, out, drv.inputSrcs);
WorkerProto::write(store,
WorkerProto::WriteConn { .to = out },
drv.inputSrcs);
out << drv.platform << drv.builder << drv.args;
out << drv.env.size();
for (auto & i : drv.env)
@ -810,13 +815,7 @@ std::string hashPlaceholder(const std::string_view outputName)
return "/" + hashString(htSHA256, concatStrings("nix-output:", outputName)).to_string(Base32, false);
}
std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath, std::string_view outputName)
{
auto drvNameWithExtension = drvPath.name();
auto drvName = drvNameWithExtension.substr(0, drvNameWithExtension.size() - 4);
auto clearText = "nix-upstream-output:" + std::string { drvPath.hashPart() } + ":" + outputPathName(drvName, outputName);
return "/" + hashString(htSHA256, clearText).to_string(Base32, false);
}
static void rewriteDerivation(Store & store, BasicDerivation & drv, const StringMap & rewrites)
@ -880,7 +879,7 @@ std::optional<BasicDerivation> Derivation::tryResolve(
for (auto & outputName : inputOutputs) {
if (auto actualPath = get(inputDrvOutputs, { inputDrv, outputName })) {
inputRewrites.emplace(
downstreamPlaceholder(store, inputDrv, outputName),
DownstreamPlaceholder::unknownCaOutput(inputDrv, outputName).render(),
store.printStorePath(*actualPath));
resolved.inputSrcs.insert(*actualPath);
} else {

View file

@ -6,6 +6,7 @@
#include "hash.hh"
#include "content-address.hh"
#include "repair-flag.hh"
#include "derived-path.hh"
#include "sync.hh"
#include "comparator.hh"
@ -495,17 +496,6 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr
*/
std::string hashPlaceholder(const std::string_view outputName);
/**
* This creates an opaque and almost certainly unique string
* deterministically from a derivation path and output name.
*
* It is used as a placeholder to allow derivations to refer to
* content-addressed paths whose content --- and thus the path
* themselves --- isn't yet known. This occurs when a derivation has a
* dependency which is a CA derivation.
*/
std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath, std::string_view outputName);
extern const Hash impureOutputHash;
}

View file

@ -0,0 +1,39 @@
#include "downstream-placeholder.hh"
#include "derivations.hh"
namespace nix {
std::string DownstreamPlaceholder::render() const
{
return "/" + hash.to_string(Base32, false);
}
DownstreamPlaceholder DownstreamPlaceholder::unknownCaOutput(
const StorePath & drvPath,
std::string_view outputName)
{
auto drvNameWithExtension = drvPath.name();
auto drvName = drvNameWithExtension.substr(0, drvNameWithExtension.size() - 4);
auto clearText = "nix-upstream-output:" + std::string { drvPath.hashPart() } + ":" + outputPathName(drvName, outputName);
return DownstreamPlaceholder {
hashString(htSHA256, clearText)
};
}
DownstreamPlaceholder DownstreamPlaceholder::unknownDerivation(
const DownstreamPlaceholder & placeholder,
std::string_view outputName,
const ExperimentalFeatureSettings & xpSettings)
{
xpSettings.require(Xp::DynamicDerivations);
auto compressed = compressHash(placeholder.hash, 20);
auto clearText = "nix-computed-output:"
+ compressed.to_string(Base32, false)
+ ":" + std::string { outputName };
return DownstreamPlaceholder {
hashString(htSHA256, clearText)
};
}
}

View file

@ -0,0 +1,75 @@
#pragma once
///@file
#include "hash.hh"
#include "path.hh"
namespace nix {
/**
* Downstream Placeholders are opaque and almost certainly unique values
* used to allow derivations to refer to store objects which are yet to
* be built and for we do not yet have store paths for.
*
* They correspond to `DerivedPaths` that are not `DerivedPath::Opaque`,
* except for the cases involving input addressing or fixed outputs
* where we do know a store path for the derivation output in advance.
*
* Unlike `DerivationPath`, however, `DownstreamPlaceholder` is
* purposefully opaque and obfuscated. This is so they are hard to
* create by accident, and so substituting them (once we know what the
* path to store object is) is unlikely to capture other stuff it
* shouldn't.
*
* We use them with `Derivation`: the `render()` method is called to
* render an opaque string which can be used in the derivation, and the
* resolving logic can substitute those strings for store paths when
* resolving `Derivation.inputDrvs` to `BasicDerivation.inputSrcs`.
*/
class DownstreamPlaceholder
{
/**
* `DownstreamPlaceholder` is just a newtype of `Hash`.
* This its only field.
*/
Hash hash;
/**
* Newtype constructor
*/
DownstreamPlaceholder(Hash hash) : hash(hash) { }
public:
/**
* This creates an opaque and almost certainly unique string
* deterministically from the placeholder.
*/
std::string render() const;
/**
* Create a placeholder for an unknown output of a content-addressed
* derivation.
*
* The derivation itself is known (we have a store path for it), but
* the output doesn't yet have a known store path.
*/
static DownstreamPlaceholder unknownCaOutput(
const StorePath & drvPath,
std::string_view outputName);
/**
* Create a placehold for the output of an unknown derivation.
*
* The derivation is not yet known because it is a dynamic
* derivaiton --- it is itself an output of another derivation ---
* and we just have (another) placeholder for it.
*
* @param xpSettings Stop-gap to avoid globals during unit tests.
*/
static DownstreamPlaceholder unknownDerivation(
const DownstreamPlaceholder & drvPlaceholder,
std::string_view outputName,
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
};
}

View file

@ -2,6 +2,7 @@
#include "store-api.hh"
#include "archive.hh"
#include "worker-protocol.hh"
#include "worker-protocol-impl.hh"
#include <algorithm>
@ -45,7 +46,9 @@ void Store::exportPath(const StorePath & path, Sink & sink)
teeSink
<< exportMagic
<< printStorePath(path);
worker_proto::write(*this, teeSink, info->references);
WorkerProto::write(*this,
WorkerProto::WriteConn { .to = teeSink },
info->references);
teeSink
<< (info->deriver ? printStorePath(*info->deriver) : "")
<< 0;
@ -73,7 +76,8 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
//Activity act(*logger, lvlInfo, "importing path '%s'", info.path);
auto references = worker_proto::read(*this, source, Phantom<StorePathSet> {});
auto references = WorkerProto::Serialise<StorePathSet>::read(*this,
WorkerProto::ReadConn { .from = source });
auto deriver = readString(source);
auto narHash = hashString(htSHA256, saved.s);

View file

@ -186,9 +186,9 @@ struct curlFileTransfer : public FileTransfer
size_t realSize = size * nmemb;
std::string line((char *) contents, realSize);
printMsg(lvlVomit, "got header for '%s': %s", request.uri, trim(line));
static std::regex statusLine("HTTP/[^ ]+ +[0-9]+(.*)", std::regex::extended | std::regex::icase);
std::smatch match;
if (std::regex_match(line, match, statusLine)) {
if (std::smatch match; std::regex_match(line, match, statusLine)) {
result.etag = "";
result.data.clear();
result.bodySize = 0;
@ -196,9 +196,11 @@ struct curlFileTransfer : public FileTransfer
acceptRanges = false;
encoding = "";
} else {
auto i = line.find(':');
if (i != std::string::npos) {
std::string name = toLower(trim(line.substr(0, i)));
if (name == "etag") {
result.etag = trim(line.substr(i + 1));
/* Hack to work around a GitHub bug: it sends
@ -212,10 +214,22 @@ struct curlFileTransfer : public FileTransfer
debug("shutting down on 200 HTTP response with expected ETag");
return 0;
}
} else if (name == "content-encoding")
}
else if (name == "content-encoding")
encoding = trim(line.substr(i + 1));
else if (name == "accept-ranges" && toLower(trim(line.substr(i + 1))) == "bytes")
acceptRanges = true;
else if (name == "link" || name == "x-amz-meta-link") {
auto value = trim(line.substr(i + 1));
static std::regex linkRegex("<([^>]*)>; rel=\"immutable\"", std::regex::extended | std::regex::icase);
if (std::smatch match; std::regex_match(value, match, linkRegex))
result.immutableUrl = match.str(1);
else
debug("got invalid link header '%s'", value);
}
}
}
return realSize;
@ -345,7 +359,7 @@ struct curlFileTransfer : public FileTransfer
{
auto httpStatus = getHTTPStatus();
char * effectiveUriCStr;
char * effectiveUriCStr = nullptr;
curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr);
if (effectiveUriCStr)
result.effectiveUri = effectiveUriCStr;

View file

@ -80,6 +80,10 @@ struct FileTransferResult
std::string effectiveUri;
std::string data;
uint64_t bodySize = 0;
/* An "immutable" URL for this resource (i.e. one whose contents
will never change), as returned by the `Link: <url>;
rel="immutable"` header. */
std::optional<std::string> immutableUrl;
};
class Store;

View file

@ -110,6 +110,11 @@ void LocalStore::createTempRootsFile()
void LocalStore::addTempRoot(const StorePath & path)
{
if (readOnly) {
debug("Read-only store doesn't support creating lock files for temp roots, but nothing can be deleted anyways.");
return;
}
createTempRootsFile();
/* Open/create the global GC lock file. */
@ -563,7 +568,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
/* On macOS, accepted sockets inherit the
non-blocking flag from the server socket, so
explicitly make it blocking. */
if (fcntl(fdServer.get(), F_SETFL, fcntl(fdServer.get(), F_GETFL) & ~O_NONBLOCK) == -1)
if (fcntl(fdClient.get(), F_SETFL, fcntl(fdClient.get(), F_GETFL) & ~O_NONBLOCK) == -1)
abort();
while (true) {

View file

@ -77,7 +77,33 @@ Settings::Settings()
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
#endif
buildHook = getSelfExe().value_or("nix") + " __build-remote";
/* Set the build hook location
For builds we perform a self-invocation, so Nix has to be self-aware.
That is, it has to know where it is installed. We don't think it's sentient.
Normally, nix is installed according to `nixBinDir`, which is set at compile time,
but can be overridden. This makes for a great default that works even if this
code is linked as a library into some other program whose main is not aware
that it might need to be a build remote hook.
However, it may not have been installed at all. For example, if it's a static build,
there's a good chance that it has been moved out of its installation directory.
That makes `nixBinDir` useless. Instead, we'll query the OS for the path to the
current executable, using `getSelfExe()`.
As a last resort, we resort to `PATH`. Hopefully we find a `nix` there that's compatible.
If you're porting Nix to a new platform, that might be good enough for a while, but
you'll want to improve `getSelfExe()` to work on your platform.
*/
std::string nixExePath = nixBinDir + "/nix";
if (!pathExists(nixExePath)) {
nixExePath = getSelfExe().value_or("nix");
}
buildHook = {
nixExePath,
"__build-remote",
};
}
void loadConfFile()
@ -183,7 +209,7 @@ bool Settings::isWSL1()
Path Settings::getDefaultSSLCertFile()
{
for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
if (pathExists(fn)) return fn;
if (pathAccessible(fn)) return fn;
return "";
}

View file

@ -236,7 +236,7 @@ public:
)",
{"build-timeout"}};
PathSetting buildHook{this, true, "", "build-hook",
Setting<Strings> buildHook{this, {}, "build-hook",
R"(
The path to the helper program that executes remote builds.
@ -575,8 +575,8 @@ public:
line.
)"};
PathSetting diffHook{
this, true, "", "diff-hook",
OptionalPathSetting diffHook{
this, std::nullopt, "diff-hook",
R"(
Absolute path to an executable capable of diffing build
results. The hook is executed if `run-diff-hook` is true, and the
@ -710,32 +710,29 @@ public:
Strings{"https://cache.nixos.org/"},
"substituters",
R"(
A list of [URLs of Nix stores](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
to be used as substituters, separated by whitespace.
Substituters are tried based on their Priority value, which each substituter can set
independently. Lower value means higher priority.
The default is `https://cache.nixos.org`, with a Priority of 40.
A list of [URLs of Nix stores](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format) to be used as substituters, separated by whitespace.
A substituter is an additional [store]{@docroot@/glossary.md##gloss-store} from which Nix can obtain [store objects](@docroot@/glossary.md#gloss-store-object) instead of building them.
At least one of the following conditions must be met for Nix to use
a substituter:
Substituters are tried based on their priority value, which each substituter can set independently.
Lower value means higher priority.
The default is `https://cache.nixos.org`, which has a priority of 40.
- the substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list
- the user calling Nix is in the [`trusted-users`](#conf-trusted-users) list
At least one of the following conditions must be met for Nix to use a substituter:
In addition, each store path should be trusted as described
in [`trusted-public-keys`](#conf-trusted-public-keys)
- The substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list
- The user calling Nix is in the [`trusted-users`](#conf-trusted-users) list
In addition, each store path should be trusted as described in [`trusted-public-keys`](#conf-trusted-public-keys)
)",
{"binary-caches"}};
Setting<StringSet> trustedSubstituters{
this, {}, "trusted-substituters",
R"(
A list of [URLs of Nix stores](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format),
separated by whitespace. These are
not used by default, but can be enabled by users of the Nix daemon
by specifying `--option substituters urls` on the command
line. Unprivileged users are only allowed to pass a subset of the
URLs listed in `substituters` and `trusted-substituters`.
A list of [Nix store URLs](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format), separated by whitespace.
These are not used by default, but users of the Nix daemon can enable them by specifying [`substituters`](#conf-substituters).
Unprivileged users (those set in only [`allowed-users`](#conf-allowed-users) but not [`trusted-users`](#conf-trusted-users)) can pass as `substituters` only those URLs listed in `trusted-substituters`.
)",
{"trusted-binary-caches"}};
@ -915,12 +912,11 @@ public:
this, {}, "hashed-mirrors",
R"(
A list of web servers used by `builtins.fetchurl` to obtain files by
hash. The default is `http://tarballs.nixos.org/`. Given a hash type
*ht* and a base-16 hash *h*, Nix will try to download the file from
*hashed-mirror*/*ht*/*h*. This allows files to be downloaded even if
they have disappeared from their original URI. For example, given
the default mirror `http://tarballs.nixos.org/`, when building the
derivation
hash. Given a hash type *ht* and a base-16 hash *h*, Nix will try to
download the file from *hashed-mirror*/*ht*/*h*. This allows files to
be downloaded even if they have disappeared from their original URI.
For example, given an example mirror `http://tarballs.nixos.org/`,
when building the derivation
```nix
builtins.fetchurl {
@ -1014,6 +1010,18 @@ public:
| `~/.nix-profile` | `$XDG_STATE_HOME/nix/profile` |
| `~/.nix-defexpr` | `$XDG_STATE_HOME/nix/defexpr` |
| `~/.nix-channels` | `$XDG_STATE_HOME/nix/channels` |
If you already have Nix installed and are using [profiles](@docroot@/package-management/profiles.md) or [channels](@docroot@/package-management/channels.md), you should migrate manually when you enable this option.
If `$XDG_STATE_HOME` is not set, use `$HOME/.local/state/nix` instead of `$XDG_STATE_HOME/nix`.
This can be achieved with the following shell commands:
```sh
nix_state_home=${XDG_STATE_HOME-$HOME/.local/state}/nix
mkdir -p $nix_state_home
mv $HOME/.nix-profile $nix_state_home/profile
mv $HOME/.nix-defexpr $nix_state_home/defexpr
mv $HOME/.nix-channels $nix_state_home/channels
```
)"
};
};

View file

@ -7,6 +7,7 @@
#include "store-api.hh"
#include "path-with-outputs.hh"
#include "worker-protocol.hh"
#include "worker-protocol-impl.hh"
#include "ssh.hh"
#include "derivations.hh"
#include "callback.hh"
@ -47,6 +48,42 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
FdSource from;
int remoteVersion;
bool good = true;
/**
* Coercion to `WorkerProto::ReadConn`. This makes it easy to use the
* factored out worker protocol searlizers with a
* `LegacySSHStore::Connection`.
*
* The worker protocol connection types are unidirectional, unlike
* this type.
*
* @todo Use server protocol serializers, not worker protocol
* serializers, once we have made that distiction.
*/
operator WorkerProto::ReadConn ()
{
return WorkerProto::ReadConn {
.from = from,
};
}
/*
* Coercion to `WorkerProto::WriteConn`. This makes it easy to use the
* factored out worker protocol searlizers with a
* `LegacySSHStore::Connection`.
*
* The worker protocol connection types are unidirectional, unlike
* this type.
*
* @todo Use server protocol serializers, not worker protocol
* serializers, once we have made that distiction.
*/
operator WorkerProto::WriteConn ()
{
return WorkerProto::WriteConn {
.to = to,
};
}
};
std::string host;
@ -133,7 +170,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
debug("querying remote host '%s' for info on '%s'", host, printStorePath(path));
conn->to << cmdQueryPathInfos << PathSet{printStorePath(path)};
conn->to << ServeProto::Command::QueryPathInfos << PathSet{printStorePath(path)};
conn->to.flush();
auto p = readString(conn->from);
@ -146,7 +183,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
auto deriver = readString(conn->from);
if (deriver != "")
info->deriver = parseStorePath(deriver);
info->references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
info->references = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
readLongLong(conn->from); // download size
info->narSize = readLongLong(conn->from);
@ -176,11 +213,11 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) {
conn->to
<< cmdAddToStoreNar
<< ServeProto::Command::AddToStoreNar
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(Base16, false);
worker_proto::write(*this, conn->to, info.references);
WorkerProto::write(*this, *conn, info.references);
conn->to
<< info.registrationTime
<< info.narSize
@ -198,7 +235,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
} else {
conn->to
<< cmdImportPaths
<< ServeProto::Command::ImportPaths
<< 1;
try {
copyNAR(source, conn->to);
@ -209,7 +246,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
conn->to
<< exportMagic
<< printStorePath(info.path);
worker_proto::write(*this, conn->to, info.references);
WorkerProto::write(*this, *conn, info.references);
conn->to
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0
@ -226,7 +263,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
{
auto conn(connections->get());
conn->to << cmdDumpStorePath << printStorePath(path);
conn->to << ServeProto::Command::DumpStorePath << printStorePath(path);
conn->to.flush();
copyNAR(conn->from, sink);
}
@ -279,7 +316,7 @@ public:
auto conn(connections->get());
conn->to
<< cmdBuildDerivation
<< ServeProto::Command::BuildDerivation
<< printStorePath(drvPath);
writeDerivation(conn->to, *this, drv);
@ -294,7 +331,7 @@ public:
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
conn->from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 6) {
auto builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {});
auto builtOutputs = WorkerProto::Serialise<DrvOutputs>::read(*this, *conn);
for (auto && [output, realisation] : builtOutputs)
status.builtOutputs.insert_or_assign(
std::move(output.outputName),
@ -310,7 +347,7 @@ public:
auto conn(connections->get());
conn->to << cmdBuildPaths;
conn->to << ServeProto::Command::BuildPaths;
Strings ss;
for (auto & p : drvPaths) {
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
@ -344,6 +381,17 @@ public:
virtual ref<FSAccessor> getFSAccessor() override
{ unsupported("getFSAccessor"); }
/**
* The default instance would schedule the work on the client side, but
* for consistency with `buildPaths` and `buildDerivation` it should happen
* on the remote side.
*
* We make this fail for now so we can add implement this properly later
* without it being a breaking change.
*/
void repairPath(const StorePath & path) override
{ unsupported("repairPath"); }
void computeFSClosure(const StorePathSet & paths,
StorePathSet & out, bool flipDirection = false,
bool includeOutputs = false, bool includeDerivers = false) override
@ -356,12 +404,12 @@ public:
auto conn(connections->get());
conn->to
<< cmdQueryClosure
<< ServeProto::Command::QueryClosure
<< includeOutputs;
worker_proto::write(*this, conn->to, paths);
WorkerProto::write(*this, *conn, paths);
conn->to.flush();
for (auto & i : worker_proto::read(*this, conn->from, Phantom<StorePathSet> {}))
for (auto & i : WorkerProto::Serialise<StorePathSet>::read(*this, *conn))
out.insert(i);
}
@ -371,13 +419,13 @@ public:
auto conn(connections->get());
conn->to
<< cmdQueryValidPaths
<< ServeProto::Command::QueryValidPaths
<< false // lock
<< maybeSubstitute;
worker_proto::write(*this, conn->to, paths);
WorkerProto::write(*this, *conn, paths);
conn->to.flush();
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
void connect() override

View file

@ -15,22 +15,22 @@ struct LocalFSStoreConfig : virtual StoreConfig
// it to omit the call to the Setting constructor. Clang works fine
// either way.
const PathSetting rootDir{(StoreConfig*) this, true, "",
const OptionalPathSetting rootDir{(StoreConfig*) this, std::nullopt,
"root",
"Directory prefixed to all other paths."};
const PathSetting stateDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir,
const PathSetting stateDir{(StoreConfig*) this,
rootDir.get() ? *rootDir.get() + "/nix/var/nix" : settings.nixStateDir,
"state",
"Directory where Nix will store state."};
const PathSetting logDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
const PathSetting logDir{(StoreConfig*) this,
rootDir.get() ? *rootDir.get() + "/nix/var/log/nix" : settings.nixLogDir,
"log",
"directory where Nix will store log files."};
const PathSetting realStoreDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
const PathSetting realStoreDir{(StoreConfig*) this,
rootDir.get() ? *rootDir.get() + "/nix/store" : storeDir, "real",
"Physical path of the Nix store."};
};

View file

@ -190,7 +190,11 @@ LocalStore::LocalStore(const Params & params)
/* Create missing state directories if they don't already exist. */
createDirs(realStoreDir);
makeStoreWritable();
if (readOnly) {
experimentalFeatureSettings.require(Xp::ReadOnlyLocalStore);
} else {
makeStoreWritable();
}
createDirs(linksDir);
Path profilesDir = stateDir + "/profiles";
createDirs(profilesDir);
@ -204,8 +208,10 @@ LocalStore::LocalStore(const Params & params)
for (auto & perUserDir : {profilesDir + "/per-user", gcRootsDir + "/per-user"}) {
createDirs(perUserDir);
if (chmod(perUserDir.c_str(), 0755) == -1)
throw SysError("could not set permissions on '%s' to 755", perUserDir);
if (!readOnly) {
if (chmod(perUserDir.c_str(), 0755) == -1)
throw SysError("could not set permissions on '%s' to 755", perUserDir);
}
}
/* Optionally, create directories and set permissions for a
@ -269,10 +275,12 @@ LocalStore::LocalStore(const Params & params)
/* Acquire the big fat lock in shared mode to make sure that no
schema upgrade is in progress. */
Path globalLockPath = dbDir + "/big-lock";
globalLock = openLockFile(globalLockPath.c_str(), true);
if (!readOnly) {
Path globalLockPath = dbDir + "/big-lock";
globalLock = openLockFile(globalLockPath.c_str(), true);
}
if (!lockFile(globalLock.get(), ltRead, false)) {
if (!readOnly && !lockFile(globalLock.get(), ltRead, false)) {
printInfo("waiting for the big Nix store lock...");
lockFile(globalLock.get(), ltRead, true);
}
@ -280,6 +288,14 @@ LocalStore::LocalStore(const Params & params)
/* Check the current database schema and if necessary do an
upgrade. */
int curSchema = getSchema();
if (readOnly && curSchema < nixSchemaVersion) {
debug("current schema version: %d", curSchema);
debug("supported schema version: %d", nixSchemaVersion);
throw Error(curSchema == 0 ?
"database does not exist, and cannot be created in read-only mode" :
"database schema needs migrating, but this cannot be done in read-only mode");
}
if (curSchema > nixSchemaVersion)
throw Error("current Nix store schema is version %1%, but I only support %2%",
curSchema, nixSchemaVersion);
@ -344,7 +360,11 @@ LocalStore::LocalStore(const Params & params)
else openDB(*state, false);
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
migrateCASchema(state->db, dbDir + "/ca-schema", globalLock);
if (!readOnly) {
migrateCASchema(state->db, dbDir + "/ca-schema", globalLock);
} else {
throw Error("need to migrate to content-addressed schema, but this cannot be done in read-only mode");
}
}
/* Prepare SQL statements. */
@ -475,13 +495,20 @@ int LocalStore::getSchema()
void LocalStore::openDB(State & state, bool create)
{
if (access(dbDir.c_str(), R_OK | W_OK))
if (create && readOnly) {
throw Error("cannot create database while in read-only mode");
}
if (access(dbDir.c_str(), R_OK | (readOnly ? 0 : W_OK)))
throw SysError("Nix database directory '%1%' is not writable", dbDir);
/* Open the Nix database. */
std::string dbPath = dbDir + "/db.sqlite";
auto & db(state.db);
state.db = SQLite(dbPath, create);
auto openMode = readOnly ? SQLiteOpenMode::Immutable
: create ? SQLiteOpenMode::Normal
: SQLiteOpenMode::NoCreate;
state.db = SQLite(dbPath, openMode);
#ifdef __CYGWIN__
/* The cygwin version of sqlite3 has a patch which calls

View file

@ -46,6 +46,23 @@ struct LocalStoreConfig : virtual LocalFSStoreConfig
"require-sigs",
"Whether store paths copied into this store should have a trusted signature."};
Setting<bool> readOnly{(StoreConfig*) this,
false,
"read-only",
R"(
Allow this store to be opened when its [database](@docroot@/glossary.md#gloss-nix-database) is on a read-only filesystem.
Normally Nix will attempt to open the store database in read-write mode, even for querying (when write access is not needed), causing it to fail if the database is on a read-only filesystem.
Enable read-only mode to disable locking and open the SQLite database with the [`immutable` parameter](https://www.sqlite.org/c3ref/open.html) set.
> **Warning**
> Do not use this unless the filesystem is read-only.
>
> Using it when the filesystem is writable can cause incorrect query results or corruption errors if the database is changed by another process.
> While the filesystem the database resides on might appear to be read-only, consider whether another user or system might have write access to it.
)"};
const std::string name() override { return "Local Store"; }
std::string doc() override;
@ -240,8 +257,6 @@ public:
void vacuumDB();
void repairPath(const StorePath & path) override;
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
/**
@ -271,6 +286,10 @@ public:
private:
/**
* Retrieve the current version of the database schema.
* If the database does not exist yet, the version returned will be 0.
*/
int getSchema();
void openDB(State & state, bool create);

View file

@ -80,4 +80,15 @@ std::map<StorePath, StorePath> makeContentAddressed(
return remappings;
}
StorePath makeContentAddressed(
Store & srcStore,
Store & dstStore,
const StorePath & fromPath)
{
auto remappings = makeContentAddressed(srcStore, dstStore, StorePathSet { fromPath });
auto i = remappings.find(fromPath);
assert(i != remappings.end());
return i->second;
}
}

View file

@ -5,9 +5,20 @@
namespace nix {
/** Rewrite a closure of store paths to be completely content addressed.
*/
std::map<StorePath, StorePath> makeContentAddressed(
Store & srcStore,
Store & dstStore,
const StorePathSet & storePaths);
const StorePathSet & rootPaths);
/** Rewrite a closure of a store path to be completely content addressed.
*
* This is a convenience function for the case where you only have one root path.
*/
StorePath makeContentAddressed(
Store & srcStore,
Store & dstStore,
const StorePath & rootPath);
}

View file

@ -1,5 +1,7 @@
#include "path-info.hh"
#include "worker-protocol.hh"
#include "worker-protocol-impl.hh"
#include "store-api.hh"
namespace nix {
@ -131,7 +133,8 @@ ValidPathInfo ValidPathInfo::read(Source & source, const Store & store, unsigned
auto narHash = Hash::parseAny(readString(source), htSHA256);
ValidPathInfo info(path, narHash);
if (deriver != "") info.deriver = store.parseStorePath(deriver);
info.references = worker_proto::read(store, source, Phantom<StorePathSet> {});
info.references = WorkerProto::Serialise<StorePathSet>::read(store,
WorkerProto::ReadConn { .from = source });
source >> info.registrationTime >> info.narSize;
if (format >= 16) {
source >> info.ultimate;
@ -152,7 +155,9 @@ void ValidPathInfo::write(
sink << store.printStorePath(path);
sink << (deriver ? store.printStorePath(*deriver) : "")
<< narHash.to_string(Base16, false);
worker_proto::write(store, sink, references);
WorkerProto::write(store,
WorkerProto::WriteConn { .to = sink },
references);
sink << registrationTime << narSize;
if (format >= 16) {
sink << ultimate

View file

@ -0,0 +1,73 @@
#include "path-references.hh"
#include "hash.hh"
#include "util.hh"
#include "archive.hh"
#include <map>
#include <cstdlib>
#include <mutex>
#include <algorithm>
namespace nix {
PathRefScanSink::PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap)
: RefScanSink(std::move(hashes))
, backMap(std::move(backMap))
{ }
PathRefScanSink PathRefScanSink::fromPaths(const StorePathSet & refs)
{
StringSet hashes;
std::map<std::string, StorePath> backMap;
for (auto & i : refs) {
std::string hashPart(i.hashPart());
auto inserted = backMap.emplace(hashPart, i).second;
assert(inserted);
hashes.insert(hashPart);
}
return PathRefScanSink(std::move(hashes), std::move(backMap));
}
StorePathSet PathRefScanSink::getResultPaths()
{
/* Map the hashes found back to their store paths. */
StorePathSet found;
for (auto & i : getResult()) {
auto j = backMap.find(i);
assert(j != backMap.end());
found.insert(j->second);
}
return found;
}
std::pair<StorePathSet, HashResult> scanForReferences(
const std::string & path,
const StorePathSet & refs)
{
HashSink hashSink { htSHA256 };
auto found = scanForReferences(hashSink, path, refs);
auto hash = hashSink.finish();
return std::pair<StorePathSet, HashResult>(found, hash);
}
StorePathSet scanForReferences(
Sink & toTee,
const Path & path,
const StorePathSet & refs)
{
PathRefScanSink refsSink = PathRefScanSink::fromPaths(refs);
TeeSink sink { refsSink, toTee };
/* Look for the hashes in the NAR dump of the path. */
dumpPath(path, sink);
return refsSink.getResultPaths();
}
}

View file

@ -0,0 +1,25 @@
#pragma once
#include "references.hh"
#include "path.hh"
namespace nix {
std::pair<StorePathSet, HashResult> scanForReferences(const Path & path, const StorePathSet & refs);
StorePathSet scanForReferences(Sink & toTee, const Path & path, const StorePathSet & refs);
class PathRefScanSink : public RefScanSink
{
std::map<std::string, StorePath> backMap;
PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap);
public:
static PathRefScanSink fromPaths(const StorePathSet & refs);
StorePathSet getResultPaths();
};
}

View file

@ -9,8 +9,8 @@ static void checkName(std::string_view path, std::string_view name)
if (name.empty())
throw BadStorePath("store path '%s' has an empty name", path);
if (name.size() > StorePath::MaxPathLen)
throw BadStorePath("store path '%s' has a name longer than '%d characters",
StorePath::MaxPathLen, path);
throw BadStorePath("store path '%s' has a name longer than %d characters",
path, StorePath::MaxPathLen);
// See nameRegexStr for the definition
for (auto c : name)
if (!((c >= '0' && c <= '9')

View file

@ -13,8 +13,10 @@
namespace nix {
/* Parse a generation name of the format
`<profilename>-<number>-link'. */
/**
* Parse a generation name of the format
* `<profilename>-<number>-link'.
*/
static std::optional<GenerationNumber> parseName(const std::string & profileName, const std::string & name)
{
if (name.substr(0, profileName.size() + 1) != profileName + "-") return {};
@ -28,7 +30,6 @@ static std::optional<GenerationNumber> parseName(const std::string & profileName
}
std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path profile)
{
Generations gens;
@ -61,15 +62,16 @@ std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path pro
}
static void makeName(const Path & profile, GenerationNumber num,
Path & outLink)
/**
* Create a generation name that can be parsed by `parseName()`.
*/
static Path makeName(const Path & profile, GenerationNumber num)
{
Path prefix = fmt("%1%-%2%", profile, num);
outLink = prefix + "-link";
return fmt("%s-%s-link", profile, num);
}
Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
Path createGeneration(LocalFSStore & store, Path profile, StorePath outPath)
{
/* The new generation number should be higher than old the
previous ones. */
@ -79,7 +81,7 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
if (gens.size() > 0) {
Generation last = gens.back();
if (readLink(last.path) == store->printStorePath(outPath)) {
if (readLink(last.path) == store.printStorePath(outPath)) {
/* We only create a new generation symlink if it differs
from the last one.
@ -89,7 +91,7 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
return last.path;
}
num = gens.back().number;
num = last.number;
} else {
num = 0;
}
@ -100,9 +102,8 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
to the permanent roots (of which the GC would have a stale
view). If we didn't do it this way, the GC might remove the
user environment etc. we've just built. */
Path generation;
makeName(profile, num + 1, generation);
store->addPermRoot(outPath, generation);
Path generation = makeName(profile, num + 1);
store.addPermRoot(outPath, generation);
return generation;
}
@ -117,12 +118,19 @@ static void removeFile(const Path & path)
void deleteGeneration(const Path & profile, GenerationNumber gen)
{
Path generation;
makeName(profile, gen, generation);
Path generation = makeName(profile, gen);
removeFile(generation);
}
/**
* Delete a generation with dry-run mode.
*
* Like `deleteGeneration()` but:
*
* - We log what we are going to do.
*
* - We only actually delete if `dryRun` is false.
*/
static void deleteGeneration2(const Path & profile, GenerationNumber gen, bool dryRun)
{
if (dryRun)
@ -150,27 +158,36 @@ void deleteGenerations(const Path & profile, const std::set<GenerationNumber> &
}
}
/**
* Advanced the iterator until the given predicate `cond` returns `true`.
*/
static inline void iterDropUntil(Generations & gens, auto && i, auto && cond)
{
for (; i != gens.rend() && !cond(*i); ++i);
}
void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun)
{
if (max == 0)
throw Error("Must keep at least one generation, otherwise the current one would be deleted");
PathLocks lock;
lockProfile(lock, profile);
bool fromCurGen = false;
auto [gens, curGen] = findGenerations(profile);
for (auto i = gens.rbegin(); i != gens.rend(); ++i) {
if (i->number == curGen) {
fromCurGen = true;
max--;
continue;
}
if (fromCurGen) {
if (max) {
max--;
continue;
}
deleteGeneration2(profile, i->number, dryRun);
}
}
auto [gens, _curGen] = findGenerations(profile);
auto curGen = _curGen;
auto i = gens.rbegin();
// Find the current generation
iterDropUntil(gens, i, [&](auto & g) { return g.number == curGen; });
// Skip over `max` generations, preserving them
for (auto keep = 0; i != gens.rend() && keep < max; ++i, ++keep);
// Delete the rest
for (; i != gens.rend(); ++i)
deleteGeneration2(profile, i->number, dryRun);
}
void deleteOldGenerations(const Path & profile, bool dryRun)
@ -193,23 +210,33 @@ void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun)
auto [gens, curGen] = findGenerations(profile);
bool canDelete = false;
for (auto i = gens.rbegin(); i != gens.rend(); ++i)
if (canDelete) {
assert(i->creationTime < t);
if (i->number != curGen)
deleteGeneration2(profile, i->number, dryRun);
} else if (i->creationTime < t) {
/* We may now start deleting generations, but we don't
delete this generation yet, because this generation was
still the one that was active at the requested point in
time. */
canDelete = true;
}
auto i = gens.rbegin();
// Predicate that the generation is older than the given time.
auto older = [&](auto & g) { return g.creationTime < t; };
// Find the first older generation, if one exists
iterDropUntil(gens, i, older);
/* Take the previous generation
We don't want delete this one yet because it
existed at the requested point in time, and
we want to be able to roll back to it. */
if (i != gens.rend()) ++i;
// Delete all previous generations (unless current).
for (; i != gens.rend(); ++i) {
/* Creating date and generations should be monotonic, so lower
numbered derivations should also be older. */
assert(older(*i));
if (i->number != curGen)
deleteGeneration2(profile, i->number, dryRun);
}
}
void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec, bool dryRun)
time_t parseOlderThanTimeSpec(std::string_view timeSpec)
{
if (timeSpec.empty() || timeSpec[timeSpec.size() - 1] != 'd')
throw UsageError("invalid number of days specifier '%1%', expected something like '14d'", timeSpec);
@ -221,9 +248,7 @@ void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec,
if (!days || *days < 1)
throw UsageError("invalid number of days specifier '%1%'", timeSpec);
time_t oldTime = curTime - *days * 24 * 3600;
deleteGenerationsOlderThan(profile, oldTime, dryRun);
return curTime - *days * 24 * 3600;
}

View file

@ -1,7 +1,11 @@
#pragma once
///@file
/**
* @file Implementation of Profiles.
*
* See the manual for additional information.
*/
#include "types.hh"
#include "types.hh"
#include "pathlocks.hh"
#include <time.h>
@ -12,41 +16,166 @@ namespace nix {
class StorePath;
/**
* A positive number identifying a generation for a given profile.
*
* Generation numbers are assigned sequentially. Each new generation is
* assigned 1 + the current highest generation number.
*/
typedef uint64_t GenerationNumber;
/**
* A generation is a revision of a profile.
*
* Each generation is a mapping (key-value pair) from an identifier
* (`number`) to a store object (specified by `path`).
*/
struct Generation
{
/**
* The number of a generation is its unique identifier within the
* profile.
*/
GenerationNumber number;
/**
* The store path identifies the store object that is the contents
* of the generation.
*
* These store paths / objects are not unique to the generation
* within a profile. Nix tries to ensure successive generations have
* distinct contents to avoid bloat, but nothing stops two
* non-adjacent generations from having the same contents.
*
* @todo Use `StorePath` instead of `Path`?
*/
Path path;
/**
* When the generation was created. This is extra metadata about the
* generation used to make garbage collecting old generations more
* convenient.
*/
time_t creationTime;
};
/**
* All the generations of a profile
*/
typedef std::list<Generation> Generations;
/**
* Returns the list of currently present generations for the specified
* profile, sorted by generation number. Also returns the number of
* the current generation.
* Find all generations for the given profile.
*
* @param profile A profile specified by its name and location combined
* into a path. E.g. if "foo" is the name of the profile, and "/bar/baz"
* is the directory it is in, then the path "/bar/baz/foo" would be the
* argument for this parameter.
*
* @return The pair of:
*
* - The list of currently present generations for the specified profile,
* sorted by ascending generation number.
*
* - The number of the current/active generation.
*
* Note that the current/active generation need not be the latest one.
*/
std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path profile);
class LocalFSStore;
Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath);
/**
* Create a new generation of the given profile
*
* If the previous generation (not the currently active one!) has a
* distinct store object, a fresh generation number is mapped to the
* given store object, referenced by path. Otherwise, the previous
* generation is assumed.
*
* The behavior of reusing existing generations like this makes this
* procedure idempotent. It also avoids clutter.
*/
Path createGeneration(LocalFSStore & store, Path profile, StorePath outPath);
/**
* Unconditionally delete a generation
*
* @param profile A profile specified by its name and location combined into a path.
*
* @param gen The generation number specifying exactly which generation
* to delete.
*
* Because there is no check of whether the generation to delete is
* active, this is somewhat unsafe.
*
* @todo Should we expose this at all?
*/
void deleteGeneration(const Path & profile, GenerationNumber gen);
/**
* Delete the given set of generations.
*
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
*
* @param gensToDelete The generations to delete, specified by a set of
* numbers.
*
* @param dryRun Log what would be deleted instead of actually doing
* so.
*
* Trying to delete the currently active generation will fail, and cause
* no generations to be deleted.
*/
void deleteGenerations(const Path & profile, const std::set<GenerationNumber> & gensToDelete, bool dryRun);
/**
* Delete generations older than `max` passed the current generation.
*
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
*
* @param max How many generations to keep up to the current one. Must
* be at least 1 so we don't delete the current one.
*
* @param dryRun Log what would be deleted instead of actually doing
* so.
*/
void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun);
/**
* Delete all generations other than the current one
*
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
*
* @param dryRun Log what would be deleted instead of actually doing
* so.
*/
void deleteOldGenerations(const Path & profile, bool dryRun);
/**
* Delete generations older than `t`, except for the most recent one
* older than `t`.
*
* @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
*
* @param dryRun Log what would be deleted instead of actually doing
* so.
*/
void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun);
void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec, bool dryRun);
/**
* Parse a temp spec intended for `deleteGenerationsOlderThan()`.
*
* Throws an exception if `timeSpec` fails to parse.
*/
time_t parseOlderThanTimeSpec(std::string_view timeSpec);
/**
* Smaller wrapper around `replaceSymlink` for replacing the current
* generation of a profile. Does not enforce proper structure.
*
* @todo Always use `switchGeneration()` instead, and delete this.
*/
void switchLink(Path link, Path target);
/**

View file

@ -1,188 +0,0 @@
#include "references.hh"
#include "hash.hh"
#include "util.hh"
#include "archive.hh"
#include <map>
#include <cstdlib>
#include <mutex>
namespace nix {
static size_t refLength = 32; /* characters */
static void search(
std::string_view s,
StringSet & hashes,
StringSet & seen)
{
static std::once_flag initialised;
static bool isBase32[256];
std::call_once(initialised, [](){
for (unsigned int i = 0; i < 256; ++i) isBase32[i] = false;
for (unsigned int i = 0; i < base32Chars.size(); ++i)
isBase32[(unsigned char) base32Chars[i]] = true;
});
for (size_t i = 0; i + refLength <= s.size(); ) {
int j;
bool match = true;
for (j = refLength - 1; j >= 0; --j)
if (!isBase32[(unsigned char) s[i + j]]) {
i += j + 1;
match = false;
break;
}
if (!match) continue;
std::string ref(s.substr(i, refLength));
if (hashes.erase(ref)) {
debug("found reference to '%1%' at offset '%2%'", ref, i);
seen.insert(ref);
}
++i;
}
}
void RefScanSink::operator () (std::string_view data)
{
/* It's possible that a reference spans the previous and current
fragment, so search in the concatenation of the tail of the
previous fragment and the start of the current fragment. */
auto s = tail;
auto tailLen = std::min(data.size(), refLength);
s.append(data.data(), tailLen);
search(s, hashes, seen);
search(data, hashes, seen);
auto rest = refLength - tailLen;
if (rest < tail.size())
tail = tail.substr(tail.size() - rest);
tail.append(data.data() + data.size() - tailLen, tailLen);
}
PathRefScanSink::PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap)
: RefScanSink(std::move(hashes))
, backMap(std::move(backMap))
{ }
PathRefScanSink PathRefScanSink::fromPaths(const StorePathSet & refs)
{
StringSet hashes;
std::map<std::string, StorePath> backMap;
for (auto & i : refs) {
std::string hashPart(i.hashPart());
auto inserted = backMap.emplace(hashPart, i).second;
assert(inserted);
hashes.insert(hashPart);
}
return PathRefScanSink(std::move(hashes), std::move(backMap));
}
StorePathSet PathRefScanSink::getResultPaths()
{
/* Map the hashes found back to their store paths. */
StorePathSet found;
for (auto & i : getResult()) {
auto j = backMap.find(i);
assert(j != backMap.end());
found.insert(j->second);
}
return found;
}
std::pair<StorePathSet, HashResult> scanForReferences(
const std::string & path,
const StorePathSet & refs)
{
HashSink hashSink { htSHA256 };
auto found = scanForReferences(hashSink, path, refs);
auto hash = hashSink.finish();
return std::pair<StorePathSet, HashResult>(found, hash);
}
StorePathSet scanForReferences(
Sink & toTee,
const Path & path,
const StorePathSet & refs)
{
PathRefScanSink refsSink = PathRefScanSink::fromPaths(refs);
TeeSink sink { refsSink, toTee };
/* Look for the hashes in the NAR dump of the path. */
dumpPath(path, sink);
return refsSink.getResultPaths();
}
RewritingSink::RewritingSink(const std::string & from, const std::string & to, Sink & nextSink)
: from(from), to(to), nextSink(nextSink)
{
assert(from.size() == to.size());
}
void RewritingSink::operator () (std::string_view data)
{
std::string s(prev);
s.append(data);
size_t j = 0;
while ((j = s.find(from, j)) != std::string::npos) {
matches.push_back(pos + j);
s.replace(j, from.size(), to);
}
prev = s.size() < from.size() ? s : std::string(s, s.size() - from.size() + 1, from.size() - 1);
auto consumed = s.size() - prev.size();
pos += consumed;
if (consumed) nextSink(s.substr(0, consumed));
}
void RewritingSink::flush()
{
if (prev.empty()) return;
pos += prev.size();
nextSink(prev);
prev.clear();
}
HashModuloSink::HashModuloSink(HashType ht, const std::string & modulus)
: hashSink(ht)
, rewritingSink(modulus, std::string(modulus.size(), 0), hashSink)
{
}
void HashModuloSink::operator () (std::string_view data)
{
rewritingSink(data);
}
HashResult HashModuloSink::finish()
{
rewritingSink.flush();
/* Hash the positions of the self-references. This ensures that a
NAR with self-references and a NAR with some of the
self-references already zeroed out do not produce a hash
collision. FIXME: proof. */
for (auto & pos : rewritingSink.matches)
hashSink(fmt("|%d", pos));
auto h = hashSink.finish();
return {h.first, rewritingSink.pos};
}
}

View file

@ -1,71 +0,0 @@
#pragma once
///@file
#include "hash.hh"
#include "path.hh"
namespace nix {
std::pair<StorePathSet, HashResult> scanForReferences(const Path & path, const StorePathSet & refs);
StorePathSet scanForReferences(Sink & toTee, const Path & path, const StorePathSet & refs);
class RefScanSink : public Sink
{
StringSet hashes;
StringSet seen;
std::string tail;
public:
RefScanSink(StringSet && hashes) : hashes(hashes)
{ }
StringSet & getResult()
{ return seen; }
void operator () (std::string_view data) override;
};
class PathRefScanSink : public RefScanSink
{
std::map<std::string, StorePath> backMap;
PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap);
public:
static PathRefScanSink fromPaths(const StorePathSet & refs);
StorePathSet getResultPaths();
};
struct RewritingSink : Sink
{
std::string from, to, prev;
Sink & nextSink;
uint64_t pos = 0;
std::vector<uint64_t> matches;
RewritingSink(const std::string & from, const std::string & to, Sink & nextSink);
void operator () (std::string_view data) override;
void flush();
};
struct HashModuloSink : AbstractHashSink
{
HashSink hashSink;
RewritingSink rewritingSink;
HashModuloSink(HashType ht, const std::string & modulus);
void operator () (std::string_view data) override;
HashResult finish() override;
};
}

View file

@ -0,0 +1,97 @@
#include "remote-store.hh"
#include "worker-protocol.hh"
namespace nix {
/**
* Bidirectional connection (send and receive) used by the Remote Store
* implementation.
*
* Contains `Source` and `Sink` for actual communication, along with
* other information learned when negotiating the connection.
*/
struct RemoteStore::Connection
{
/**
* Send with this.
*/
FdSink to;
/**
* Receive with this.
*/
FdSource from;
/**
* Worker protocol version used for the connection.
*
* Despite its name, I think it is actually the maximum version both
* sides support. (If the maximum doesn't exist, we would fail to
* establish a connection and produce a value of this type.)
*/
unsigned int daemonVersion;
/**
* Whether the remote side trusts us or not.
*
* 3 values: "yes", "no", or `std::nullopt` for "unknown".
*
* Note that the "remote side" might not be just the end daemon, but
* also an intermediary forwarder that can make its own trusting
* decisions. This would be the intersection of all their trust
* decisions, since it takes only one link in the chain to start
* denying operations.
*/
std::optional<TrustedFlag> remoteTrustsUs;
/**
* The version of the Nix daemon that is processing our requests.
*
* Do note, it may or may not communicating with another daemon,
* rather than being an "end" `LocalStore` or similar.
*/
std::optional<std::string> daemonNixVersion;
/**
* Time this connection was established.
*/
std::chrono::time_point<std::chrono::steady_clock> startTime;
/**
* Coercion to `WorkerProto::ReadConn`. This makes it easy to use the
* factored out worker protocol searlizers with a
* `RemoteStore::Connection`.
*
* The worker protocol connection types are unidirectional, unlike
* this type.
*/
operator WorkerProto::ReadConn ()
{
return WorkerProto::ReadConn {
.from = from,
};
}
/**
* Coercion to `WorkerProto::WriteConn`. This makes it easy to use the
* factored out worker protocol searlizers with a
* `RemoteStore::Connection`.
*
* The worker protocol connection types are unidirectional, unlike
* this type.
*/
operator WorkerProto::WriteConn ()
{
return WorkerProto::WriteConn {
.to = to,
};
}
virtual ~Connection();
virtual void closeWrite() = 0;
std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0, bool flush = true);
};
}

View file

@ -5,7 +5,9 @@
#include "remote-fs-accessor.hh"
#include "build-result.hh"
#include "remote-store.hh"
#include "remote-store-connection.hh"
#include "worker-protocol.hh"
#include "worker-protocol-impl.hh"
#include "archive.hh"
#include "globals.hh"
#include "derivations.hh"
@ -18,189 +20,6 @@
namespace nix {
namespace worker_proto {
std::string read(const Store & store, Source & from, Phantom<std::string> _)
{
return readString(from);
}
void write(const Store & store, Sink & out, const std::string & str)
{
out << str;
}
StorePath read(const Store & store, Source & from, Phantom<StorePath> _)
{
return store.parseStorePath(readString(from));
}
void write(const Store & store, Sink & out, const StorePath & storePath)
{
out << store.printStorePath(storePath);
}
std::optional<TrustedFlag> read(const Store & store, Source & from, Phantom<std::optional<TrustedFlag>> _)
{
auto temp = readNum<uint8_t>(from);
switch (temp) {
case 0:
return std::nullopt;
case 1:
return { Trusted };
case 2:
return { NotTrusted };
default:
throw Error("Invalid trusted status from remote");
}
}
void write(const Store & store, Sink & out, const std::optional<TrustedFlag> & optTrusted)
{
if (!optTrusted)
out << (uint8_t)0;
else {
switch (*optTrusted) {
case Trusted:
out << (uint8_t)1;
break;
case NotTrusted:
out << (uint8_t)2;
break;
default:
assert(false);
};
}
}
ContentAddress read(const Store & store, Source & from, Phantom<ContentAddress> _)
{
return ContentAddress::parse(readString(from));
}
void write(const Store & store, Sink & out, const ContentAddress & ca)
{
out << renderContentAddress(ca);
}
DerivedPath read(const Store & store, Source & from, Phantom<DerivedPath> _)
{
auto s = readString(from);
return DerivedPath::parseLegacy(store, s);
}
void write(const Store & store, Sink & out, const DerivedPath & req)
{
out << req.to_string_legacy(store);
}
Realisation read(const Store & store, Source & from, Phantom<Realisation> _)
{
std::string rawInput = readString(from);
return Realisation::fromJSON(
nlohmann::json::parse(rawInput),
"remote-protocol"
);
}
void write(const Store & store, Sink & out, const Realisation & realisation)
{
out << realisation.toJSON().dump();
}
DrvOutput read(const Store & store, Source & from, Phantom<DrvOutput> _)
{
return DrvOutput::parse(readString(from));
}
void write(const Store & store, Sink & out, const DrvOutput & drvOutput)
{
out << drvOutput.to_string();
}
KeyedBuildResult read(const Store & store, Source & from, Phantom<KeyedBuildResult> _)
{
auto path = worker_proto::read(store, from, Phantom<DerivedPath> {});
auto br = worker_proto::read(store, from, Phantom<BuildResult> {});
return KeyedBuildResult {
std::move(br),
/* .path = */ std::move(path),
};
}
void write(const Store & store, Sink & to, const KeyedBuildResult & res)
{
worker_proto::write(store, to, res.path);
worker_proto::write(store, to, static_cast<const BuildResult &>(res));
}
BuildResult read(const Store & store, Source & from, Phantom<BuildResult> _)
{
BuildResult res;
res.status = (BuildResult::Status) readInt(from);
from
>> res.errorMsg
>> res.timesBuilt
>> res.isNonDeterministic
>> res.startTime
>> res.stopTime;
auto builtOutputs = worker_proto::read(store, from, Phantom<DrvOutputs> {});
for (auto && [output, realisation] : builtOutputs)
res.builtOutputs.insert_or_assign(
std::move(output.outputName),
std::move(realisation));
return res;
}
void write(const Store & store, Sink & to, const BuildResult & res)
{
to
<< res.status
<< res.errorMsg
<< res.timesBuilt
<< res.isNonDeterministic
<< res.startTime
<< res.stopTime;
DrvOutputs builtOutputs;
for (auto & [output, realisation] : res.builtOutputs)
builtOutputs.insert_or_assign(realisation.id, realisation);
worker_proto::write(store, to, builtOutputs);
}
std::optional<StorePath> read(const Store & store, Source & from, Phantom<std::optional<StorePath>> _)
{
auto s = readString(from);
return s == "" ? std::optional<StorePath> {} : store.parseStorePath(s);
}
void write(const Store & store, Sink & out, const std::optional<StorePath> & storePathOpt)
{
out << (storePathOpt ? store.printStorePath(*storePathOpt) : "");
}
std::optional<ContentAddress> read(const Store & store, Source & from, Phantom<std::optional<ContentAddress>> _)
{
return ContentAddress::parseOpt(readString(from));
}
void write(const Store & store, Sink & out, const std::optional<ContentAddress> & caOpt)
{
out << (caOpt ? renderContentAddress(*caOpt) : "");
}
}
/* TODO: Separate these store impls into different files, give them better names */
RemoteStore::RemoteStore(const Params & params)
: RemoteStoreConfig(params)
@ -283,7 +102,7 @@ void RemoteStore::initConnection(Connection & conn)
}
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 35) {
conn.remoteTrustsUs = worker_proto::read(*this, conn.from, Phantom<std::optional<TrustedFlag>> {});
conn.remoteTrustsUs = WorkerProto::Serialise<std::optional<TrustedFlag>>::read(*this, conn);
} else {
// We don't know the answer; protocol to old.
conn.remoteTrustsUs = std::nullopt;
@ -302,7 +121,7 @@ void RemoteStore::initConnection(Connection & conn)
void RemoteStore::setOptions(Connection & conn)
{
conn.to << wopSetOptions
conn.to << WorkerProto::Op::SetOptions
<< settings.keepFailed
<< settings.keepGoing
<< settings.tryFallback
@ -367,6 +186,7 @@ struct ConnectionHandle
}
RemoteStore::Connection * operator -> () { return &*handle; }
RemoteStore::Connection & operator * () { return *handle; }
void processStderr(Sink * sink = 0, Source * source = 0, bool flush = true)
{
@ -394,7 +214,7 @@ void RemoteStore::setOptions()
bool RemoteStore::isValidPathUncached(const StorePath & path)
{
auto conn(getConnection());
conn->to << wopIsValidPath << printStorePath(path);
conn->to << WorkerProto::Op::IsValidPath << printStorePath(path);
conn.processStderr();
return readInt(conn->from);
}
@ -409,13 +229,13 @@ StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, Substitute
if (isValidPath(i)) res.insert(i);
return res;
} else {
conn->to << wopQueryValidPaths;
worker_proto::write(*this, conn->to, paths);
conn->to << WorkerProto::Op::QueryValidPaths;
WorkerProto::write(*this, *conn, paths);
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 27) {
conn->to << (settings.buildersUseSubstitutes ? 1 : 0);
}
conn.processStderr();
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
}
@ -423,9 +243,9 @@ StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, Substitute
StorePathSet RemoteStore::queryAllValidPaths()
{
auto conn(getConnection());
conn->to << wopQueryAllValidPaths;
conn->to << WorkerProto::Op::QueryAllValidPaths;
conn.processStderr();
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
@ -435,16 +255,16 @@ StorePathSet RemoteStore::querySubstitutablePaths(const StorePathSet & paths)
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
StorePathSet res;
for (auto & i : paths) {
conn->to << wopHasSubstitutes << printStorePath(i);
conn->to << WorkerProto::Op::HasSubstitutes << printStorePath(i);
conn.processStderr();
if (readInt(conn->from)) res.insert(i);
}
return res;
} else {
conn->to << wopQuerySubstitutablePaths;
worker_proto::write(*this, conn->to, paths);
conn->to << WorkerProto::Op::QuerySubstitutablePaths;
WorkerProto::write(*this, *conn, paths);
conn.processStderr();
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
}
@ -459,14 +279,14 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
for (auto & i : pathsMap) {
SubstitutablePathInfo info;
conn->to << wopQuerySubstitutablePathInfo << printStorePath(i.first);
conn->to << WorkerProto::Op::QuerySubstitutablePathInfo << printStorePath(i.first);
conn.processStderr();
unsigned int reply = readInt(conn->from);
if (reply == 0) continue;
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
info.references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
info.references = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
infos.insert_or_assign(i.first, std::move(info));
@ -474,14 +294,14 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
} else {
conn->to << wopQuerySubstitutablePathInfos;
conn->to << WorkerProto::Op::QuerySubstitutablePathInfos;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 22) {
StorePathSet paths;
for (auto & path : pathsMap)
paths.insert(path.first);
worker_proto::write(*this, conn->to, paths);
WorkerProto::write(*this, *conn, paths);
} else
worker_proto::write(*this, conn->to, pathsMap);
WorkerProto::write(*this, *conn, pathsMap);
conn.processStderr();
size_t count = readNum<size_t>(conn->from);
for (size_t n = 0; n < count; n++) {
@ -489,7 +309,7 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
info.references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
info.references = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
}
@ -505,7 +325,7 @@ void RemoteStore::queryPathInfoUncached(const StorePath & path,
std::shared_ptr<const ValidPathInfo> info;
{
auto conn(getConnection());
conn->to << wopQueryPathInfo << printStorePath(path);
conn->to << WorkerProto::Op::QueryPathInfo << printStorePath(path);
try {
conn.processStderr();
} catch (Error & e) {
@ -530,9 +350,9 @@ void RemoteStore::queryReferrers(const StorePath & path,
StorePathSet & referrers)
{
auto conn(getConnection());
conn->to << wopQueryReferrers << printStorePath(path);
conn->to << WorkerProto::Op::QueryReferrers << printStorePath(path);
conn.processStderr();
for (auto & i : worker_proto::read(*this, conn->from, Phantom<StorePathSet> {}))
for (auto & i : WorkerProto::Serialise<StorePathSet>::read(*this, *conn))
referrers.insert(i);
}
@ -540,9 +360,9 @@ void RemoteStore::queryReferrers(const StorePath & path,
StorePathSet RemoteStore::queryValidDerivers(const StorePath & path)
{
auto conn(getConnection());
conn->to << wopQueryValidDerivers << printStorePath(path);
conn->to << WorkerProto::Op::QueryValidDerivers << printStorePath(path);
conn.processStderr();
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
@ -552,9 +372,9 @@ StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path)
return Store::queryDerivationOutputs(path);
}
auto conn(getConnection());
conn->to << wopQueryDerivationOutputs << printStorePath(path);
conn->to << WorkerProto::Op::QueryDerivationOutputs << printStorePath(path);
conn.processStderr();
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
@ -562,9 +382,9 @@ std::map<std::string, std::optional<StorePath>> RemoteStore::queryPartialDerivat
{
if (GET_PROTOCOL_MINOR(getProtocol()) >= 0x16) {
auto conn(getConnection());
conn->to << wopQueryDerivationOutputMap << printStorePath(path);
conn->to << WorkerProto::Op::QueryDerivationOutputMap << printStorePath(path);
conn.processStderr();
return worker_proto::read(*this, conn->from, Phantom<std::map<std::string, std::optional<StorePath>>> {});
return WorkerProto::Serialise<std::map<std::string, std::optional<StorePath>>>::read(*this, *conn);
} else {
// Fallback for old daemon versions.
// For floating-CA derivations (and their co-dependencies) this is an
@ -585,7 +405,7 @@ std::map<std::string, std::optional<StorePath>> RemoteStore::queryPartialDerivat
std::optional<StorePath> RemoteStore::queryPathFromHashPart(const std::string & hashPart)
{
auto conn(getConnection());
conn->to << wopQueryPathFromHashPart << hashPart;
conn->to << WorkerProto::Op::QueryPathFromHashPart << hashPart;
conn.processStderr();
Path path = readString(conn->from);
if (path.empty()) return {};
@ -607,10 +427,10 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 25) {
conn->to
<< wopAddToStore
<< WorkerProto::Op::AddToStore
<< name
<< caMethod.render(hashType);
worker_proto::write(*this, conn->to, references);
WorkerProto::write(*this, *conn, references);
conn->to << repair;
// The dump source may invoke the store, so we need to make some room.
@ -634,13 +454,13 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
throw UnimplementedError("When adding text-hashed data called '%s', only SHA-256 is supported but '%s' was given",
name, printHashType(hashType));
std::string s = dump.drain();
conn->to << wopAddTextToStore << name << s;
worker_proto::write(*this, conn->to, references);
conn->to << WorkerProto::Op::AddTextToStore << name << s;
WorkerProto::write(*this, *conn, references);
conn.processStderr();
},
[&](const FileIngestionMethod & fim) -> void {
conn->to
<< wopAddToStore
<< WorkerProto::Op::AddToStore
<< name
<< ((hashType == htSHA256 && fim == FileIngestionMethod::Recursive) ? 0 : 1) /* backwards compatibility hack */
<< (fim == FileIngestionMethod::Recursive ? 1 : 0)
@ -692,7 +512,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
conn->to << wopImportPaths;
conn->to << WorkerProto::Op::ImportPaths;
auto source2 = sinkToSource([&](Sink & sink) {
sink << 1 // == path follows
@ -701,7 +521,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
sink
<< exportMagic
<< printStorePath(info.path);
worker_proto::write(*this, sink, info.references);
WorkerProto::write(*this, *conn, info.references);
sink
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0 // == no legacy signature
@ -711,16 +531,16 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
conn.processStderr(0, source2.get());
auto importedPaths = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
auto importedPaths = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
assert(importedPaths.size() <= 1);
}
else {
conn->to << wopAddToStoreNar
conn->to << WorkerProto::Op::AddToStoreNar
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(Base16, false);
worker_proto::write(*this, conn->to, info.references);
WorkerProto::write(*this, *conn, info.references);
conn->to << info.registrationTime << info.narSize
<< info.ultimate << info.sigs << renderContentAddress(info.ca)
<< repair << !checkSigs;
@ -764,7 +584,7 @@ void RemoteStore::addMultipleToStore(
if (GET_PROTOCOL_MINOR(getConnection()->daemonVersion) >= 32) {
auto conn(getConnection());
conn->to
<< wopAddMultipleToStore
<< WorkerProto::Op::AddMultipleToStore
<< repair
<< !checkSigs;
conn.withFramedSink([&](Sink & sink) {
@ -788,12 +608,12 @@ StorePath RemoteStore::addTextToStore(
void RemoteStore::registerDrvOutput(const Realisation & info)
{
auto conn(getConnection());
conn->to << wopRegisterDrvOutput;
conn->to << WorkerProto::Op::RegisterDrvOutput;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
conn->to << info.id.to_string();
conn->to << std::string(info.outPath.to_string());
} else {
worker_proto::write(*this, conn->to, info);
WorkerProto::write(*this, *conn, info);
}
conn.processStderr();
}
@ -809,20 +629,20 @@ void RemoteStore::queryRealisationUncached(const DrvOutput & id,
return callback(nullptr);
}
conn->to << wopQueryRealisation;
conn->to << WorkerProto::Op::QueryRealisation;
conn->to << id.to_string();
conn.processStderr();
auto real = [&]() -> std::shared_ptr<const Realisation> {
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
auto outPaths = worker_proto::read(
*this, conn->from, Phantom<std::set<StorePath>> {});
auto outPaths = WorkerProto::Serialise<std::set<StorePath>>::read(
*this, *conn);
if (outPaths.empty())
return nullptr;
return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
} else {
auto realisations = worker_proto::read(
*this, conn->from, Phantom<std::set<Realisation>> {});
auto realisations = WorkerProto::Serialise<std::set<Realisation>>::read(
*this, *conn);
if (realisations.empty())
return nullptr;
return std::make_shared<const Realisation>(*realisations.begin());
@ -833,10 +653,10 @@ void RemoteStore::queryRealisationUncached(const DrvOutput & id,
} catch (...) { return callback.rethrow(); }
}
static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, const std::vector<DerivedPath> & reqs)
static void writeDerivedPaths(RemoteStore & store, RemoteStore::Connection & conn, const std::vector<DerivedPath> & reqs)
{
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 30) {
worker_proto::write(store, conn->to, reqs);
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 30) {
WorkerProto::write(store, conn, reqs);
} else {
Strings ss;
for (auto & p : reqs) {
@ -848,12 +668,12 @@ static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, cons
[&](const StorePath & drvPath) {
throw Error("trying to request '%s', but daemon protocol %d.%d is too old (< 1.29) to request a derivation file",
store.printStorePath(drvPath),
GET_PROTOCOL_MAJOR(conn->daemonVersion),
GET_PROTOCOL_MINOR(conn->daemonVersion));
GET_PROTOCOL_MAJOR(conn.daemonVersion),
GET_PROTOCOL_MINOR(conn.daemonVersion));
},
}, sOrDrvPath);
}
conn->to << ss;
conn.to << ss;
}
}
@ -877,9 +697,9 @@ void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMod
copyDrvsFromEvalStore(drvPaths, evalStore);
auto conn(getConnection());
conn->to << wopBuildPaths;
conn->to << WorkerProto::Op::BuildPaths;
assert(GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13);
writeDerivedPaths(*this, conn, drvPaths);
writeDerivedPaths(*this, *conn, drvPaths);
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15)
conn->to << buildMode;
else
@ -902,11 +722,11 @@ std::vector<KeyedBuildResult> RemoteStore::buildPathsWithResults(
auto & conn = *conn_;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 34) {
conn->to << wopBuildPathsWithResults;
writeDerivedPaths(*this, conn, paths);
conn->to << WorkerProto::Op::BuildPathsWithResults;
writeDerivedPaths(*this, *conn, paths);
conn->to << buildMode;
conn.processStderr();
return worker_proto::read(*this, conn->from, Phantom<std::vector<KeyedBuildResult>> {});
return WorkerProto::Serialise<std::vector<KeyedBuildResult>>::read(*this, *conn);
} else {
// Avoid deadlock.
conn_.reset();
@ -978,7 +798,7 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
BuildMode buildMode)
{
auto conn(getConnection());
conn->to << wopBuildDerivation << printStorePath(drvPath);
conn->to << WorkerProto::Op::BuildDerivation << printStorePath(drvPath);
writeDerivation(conn->to, *this, drv);
conn->to << buildMode;
conn.processStderr();
@ -989,7 +809,7 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
conn->from >> res.timesBuilt >> res.isNonDeterministic >> res.startTime >> res.stopTime;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 28) {
auto builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {});
auto builtOutputs = WorkerProto::Serialise<DrvOutputs>::read(*this, *conn);
for (auto && [output, realisation] : builtOutputs)
res.builtOutputs.insert_or_assign(
std::move(output.outputName),
@ -1002,7 +822,7 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
void RemoteStore::ensurePath(const StorePath & path)
{
auto conn(getConnection());
conn->to << wopEnsurePath << printStorePath(path);
conn->to << WorkerProto::Op::EnsurePath << printStorePath(path);
conn.processStderr();
readInt(conn->from);
}
@ -1011,7 +831,7 @@ void RemoteStore::ensurePath(const StorePath & path)
void RemoteStore::addTempRoot(const StorePath & path)
{
auto conn(getConnection());
conn->to << wopAddTempRoot << printStorePath(path);
conn->to << WorkerProto::Op::AddTempRoot << printStorePath(path);
conn.processStderr();
readInt(conn->from);
}
@ -1020,7 +840,7 @@ void RemoteStore::addTempRoot(const StorePath & path)
void RemoteStore::addIndirectRoot(const Path & path)
{
auto conn(getConnection());
conn->to << wopAddIndirectRoot << path;
conn->to << WorkerProto::Op::AddIndirectRoot << path;
conn.processStderr();
readInt(conn->from);
}
@ -1029,7 +849,7 @@ void RemoteStore::addIndirectRoot(const Path & path)
Roots RemoteStore::findRoots(bool censor)
{
auto conn(getConnection());
conn->to << wopFindRoots;
conn->to << WorkerProto::Op::FindRoots;
conn.processStderr();
size_t count = readNum<size_t>(conn->from);
Roots result;
@ -1047,8 +867,8 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
auto conn(getConnection());
conn->to
<< wopCollectGarbage << options.action;
worker_proto::write(*this, conn->to, options.pathsToDelete);
<< WorkerProto::Op::CollectGarbage << options.action;
WorkerProto::write(*this, *conn, options.pathsToDelete);
conn->to << options.ignoreLiveness
<< options.maxFreed
/* removed options */
@ -1070,7 +890,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
void RemoteStore::optimiseStore()
{
auto conn(getConnection());
conn->to << wopOptimiseStore;
conn->to << WorkerProto::Op::OptimiseStore;
conn.processStderr();
readInt(conn->from);
}
@ -1079,7 +899,7 @@ void RemoteStore::optimiseStore()
bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair)
{
auto conn(getConnection());
conn->to << wopVerifyStore << checkContents << repair;
conn->to << WorkerProto::Op::VerifyStore << checkContents << repair;
conn.processStderr();
return readInt(conn->from);
}
@ -1088,7 +908,7 @@ bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair)
void RemoteStore::addSignatures(const StorePath & storePath, const StringSet & sigs)
{
auto conn(getConnection());
conn->to << wopAddSignatures << printStorePath(storePath) << sigs;
conn->to << WorkerProto::Op::AddSignatures << printStorePath(storePath) << sigs;
conn.processStderr();
readInt(conn->from);
}
@ -1104,12 +924,12 @@ void RemoteStore::queryMissing(const std::vector<DerivedPath> & targets,
// Don't hold the connection handle in the fallback case
// to prevent a deadlock.
goto fallback;
conn->to << wopQueryMissing;
writeDerivedPaths(*this, conn, targets);
conn->to << WorkerProto::Op::QueryMissing;
writeDerivedPaths(*this, *conn, targets);
conn.processStderr();
willBuild = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
willSubstitute = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
unknown = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
willBuild = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
willSubstitute = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
unknown = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
conn->from >> downloadSize >> narSize;
return;
}
@ -1123,7 +943,7 @@ void RemoteStore::queryMissing(const std::vector<DerivedPath> & targets,
void RemoteStore::addBuildLog(const StorePath & drvPath, std::string_view log)
{
auto conn(getConnection());
conn->to << wopAddBuildLog << drvPath.to_string();
conn->to << WorkerProto::Op::AddBuildLog << drvPath.to_string();
StringSource source(log);
conn.withFramedSink([&](Sink & sink) {
source.drainInto(sink);
@ -1175,7 +995,7 @@ RemoteStore::Connection::~Connection()
void RemoteStore::narFromPath(const StorePath & path, Sink & sink)
{
auto conn(connections->get());
conn->to << wopNarFromPath << printStorePath(path);
conn->to << WorkerProto::Op::NarFromPath << printStorePath(path);
conn->processStderr();
copyNAR(conn->from, sink);
}

View file

@ -137,6 +137,17 @@ public:
bool verifyStore(bool checkContents, RepairFlag repair) override;
/**
* The default instance would schedule the work on the client side, but
* for consistency with `buildPaths` and `buildDerivation` it should happen
* on the remote side.
*
* We make this fail for now so we can add implement this properly later
* without it being a breaking change.
*/
void repairPath(const StorePath & path) override
{ unsupported("repairPath"); }
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
void queryMissing(const std::vector<DerivedPath> & targets,
@ -155,21 +166,7 @@ public:
void flushBadConnections();
struct Connection
{
FdSink to;
FdSource from;
unsigned int daemonVersion;
std::optional<TrustedFlag> remoteTrustsUs;
std::optional<std::string> daemonNixVersion;
std::chrono::time_point<std::chrono::steady_clock> startTime;
virtual ~Connection();
virtual void closeWrite() = 0;
std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0, bool flush = true);
};
struct Connection;
ref<Connection> openConnectionWrapper();

View file

@ -10,16 +10,52 @@ namespace nix {
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
typedef enum {
cmdQueryValidPaths = 1,
cmdQueryPathInfos = 2,
cmdDumpStorePath = 3,
cmdImportPaths = 4,
cmdExportPaths = 5,
cmdBuildPaths = 6,
cmdQueryClosure = 7,
cmdBuildDerivation = 8,
cmdAddToStoreNar = 9,
} ServeCommand;
/**
* The "serve protocol", used by ssh:// stores.
*
* This `struct` is basically just a `namespace`; We use a type rather
* than a namespace just so we can use it as a template argument.
*/
struct ServeProto
{
/**
* Enumeration of all the request types for the protocol.
*/
enum struct Command : uint64_t;
};
enum struct ServeProto::Command : uint64_t
{
QueryValidPaths = 1,
QueryPathInfos = 2,
DumpStorePath = 3,
ImportPaths = 4,
ExportPaths = 5,
BuildPaths = 6,
QueryClosure = 7,
BuildDerivation = 8,
AddToStoreNar = 9,
};
/**
* Convenience for sending operation codes.
*
* @todo Switch to using `ServeProto::Serialize` instead probably. But
* this was not done at this time so there would be less churn.
*/
inline Sink & operator << (Sink & sink, ServeProto::Command op)
{
return sink << (uint64_t) op;
}
/**
* Convenience for debugging.
*
* @todo Perhaps render known opcodes more nicely.
*/
inline std::ostream & operator << (std::ostream & s, ServeProto::Command op)
{
return s << (uint64_t) op;
}
}

View file

@ -1,6 +1,7 @@
#include "sqlite.hh"
#include "globals.hh"
#include "util.hh"
#include "url.hh"
#include <sqlite3.h>
@ -50,15 +51,17 @@ static void traceSQL(void * x, const char * sql)
notice("SQL<[%1%]>", sql);
};
SQLite::SQLite(const Path & path, bool create)
SQLite::SQLite(const Path & path, SQLiteOpenMode mode)
{
// useSQLiteWAL also indicates what virtual file system we need. Using
// `unix-dotfile` is needed on NFS file systems and on Windows' Subsystem
// for Linux (WSL) where useSQLiteWAL should be false by default.
const char *vfs = settings.useSQLiteWAL ? 0 : "unix-dotfile";
int flags = SQLITE_OPEN_READWRITE;
if (create) flags |= SQLITE_OPEN_CREATE;
int ret = sqlite3_open_v2(path.c_str(), &db, flags, vfs);
bool immutable = mode == SQLiteOpenMode::Immutable;
int flags = immutable ? SQLITE_OPEN_READONLY : SQLITE_OPEN_READWRITE;
if (mode == SQLiteOpenMode::Normal) flags |= SQLITE_OPEN_CREATE;
auto uri = "file:" + percentEncode(path) + "?immutable=" + (immutable ? "1" : "0");
int ret = sqlite3_open_v2(uri.c_str(), &db, SQLITE_OPEN_URI | flags, vfs);
if (ret != SQLITE_OK) {
const char * err = sqlite3_errstr(ret);
throw Error("cannot open SQLite database '%s': %s", path, err);

View file

@ -11,6 +11,27 @@ struct sqlite3_stmt;
namespace nix {
enum class SQLiteOpenMode {
/**
* Open the database in read-write mode.
* If the database does not exist, it will be created.
*/
Normal,
/**
* Open the database in read-write mode.
* Fails with an error if the database does not exist.
*/
NoCreate,
/**
* Open the database in immutable mode.
* In addition to the database being read-only,
* no wal or journal files will be created by sqlite.
* Use this mode if the database is on a read-only filesystem.
* Fails with an error if the database does not exist.
*/
Immutable,
};
/**
* RAII wrapper to close a SQLite database automatically.
*/
@ -18,7 +39,7 @@ struct SQLite
{
sqlite3 * db = 0;
SQLite() { }
SQLite(const Path & path, bool create = true);
SQLite(const Path & path, SQLiteOpenMode mode = SQLiteOpenMode::Normal);
SQLite(const SQLite & from) = delete;
SQLite& operator = (const SQLite & from) = delete;
SQLite& operator = (SQLite && from) { db = from.db; from.db = 0; return *this; }

View file

@ -1,6 +1,7 @@
#include "ssh-store-config.hh"
#include "store-api.hh"
#include "remote-store.hh"
#include "remote-store-connection.hh"
#include "remote-fs-accessor.hh"
#include "archive.hh"
#include "worker-protocol.hh"

View file

@ -41,6 +41,11 @@ void SSHMaster::addCommonSSHOpts(Strings & args)
args.push_back("-oLocalCommand=echo started");
}
bool SSHMaster::isMasterRunning() {
auto res = runProgram(RunOptions {.program = "ssh", .args = {"-O", "check", host}, .mergeStderrToStdout = true});
return res.first == 0;
}
std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string & command)
{
Path socketPath = startMaster();
@ -97,7 +102,7 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
// Wait for the SSH connection to be established,
// So that we don't overwrite the password prompt with our progress bar.
if (!fakeSSH && !useMaster) {
if (!fakeSSH && !useMaster && !isMasterRunning()) {
std::string reply;
try {
reply = readLine(out.readSide.get());
@ -133,6 +138,8 @@ Path SSHMaster::startMaster()
logger->pause();
Finally cleanup = [&]() { logger->resume(); };
bool wasMasterRunning = isMasterRunning();
state->sshMaster = startProcess([&]() {
restoreProcessContext();
@ -152,13 +159,15 @@ Path SSHMaster::startMaster()
out.writeSide = -1;
std::string reply;
try {
reply = readLine(out.readSide.get());
} catch (EndOfFile & e) { }
if (!wasMasterRunning) {
std::string reply;
try {
reply = readLine(out.readSide.get());
} catch (EndOfFile & e) { }
if (reply != "started")
throw Error("failed to start SSH master connection to '%s'", host);
if (reply != "started")
throw Error("failed to start SSH master connection to '%s'", host);
}
return state->socketPath;
}

View file

@ -28,6 +28,7 @@ private:
Sync<State> state_;
void addCommonSSHOpts(Strings & args);
bool isMasterRunning();
public:

View file

@ -114,7 +114,7 @@ struct StoreConfig : public Config
return "";
}
const PathSetting storeDir_{this, false, settings.nixStore,
const PathSetting storeDir_{this, settings.nixStore,
"store",
R"(
Logical location of the Nix store, usually
@ -679,8 +679,7 @@ public:
* Repair the contents of the given path by redownloading it using
* a substituter (if available).
*/
virtual void repairPath(const StorePath & path)
{ unsupported("repairPath"); }
virtual void repairPath(const StorePath & path);
/**
* Add signatures to the specified store path. The signatures are

View file

@ -0,0 +1,33 @@
#include <gtest/gtest.h>
#include "downstream-placeholder.hh"
namespace nix {
TEST(DownstreamPlaceholder, unknownCaOutput) {
ASSERT_EQ(
DownstreamPlaceholder::unknownCaOutput(
StorePath { "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv" },
"out").render(),
"/0c6rn30q4frawknapgwq386zq358m8r6msvywcvc89n6m5p2dgbz");
}
TEST(DownstreamPlaceholder, unknownDerivation) {
/**
* We set these in tests rather than the regular globals so we don't have
* to worry about race conditions if the tests run concurrently.
*/
ExperimentalFeatureSettings mockXpSettings;
mockXpSettings.set("experimental-features", "dynamic-derivations ca-derivations");
ASSERT_EQ(
DownstreamPlaceholder::unknownDerivation(
DownstreamPlaceholder::unknownCaOutput(
StorePath { "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv.drv" },
"out"),
"out",
mockXpSettings).render(),
"/0gn6agqxjyyalf0dpihgyf49xq5hqxgw100f0wydnj6yqrhqsb3w");
}
}

View file

@ -13,6 +13,14 @@
namespace nix {
std::string UDSRemoteStoreConfig::doc()
{
return
#include "uds-remote-store.md"
;
}
UDSRemoteStore::UDSRemoteStore(const Params & params)
: StoreConfig(params)
, LocalFSStoreConfig(params)

View file

@ -2,6 +2,7 @@
///@file
#include "remote-store.hh"
#include "remote-store-connection.hh"
#include "local-fs-store.hh"
namespace nix {
@ -17,12 +18,7 @@ struct UDSRemoteStoreConfig : virtual LocalFSStoreConfig, virtual RemoteStoreCon
const std::string name() override { return "Local Daemon Store"; }
std::string doc() override
{
return
#include "uds-remote-store.md"
;
}
std::string doc() override;
};
class UDSRemoteStore : public virtual UDSRemoteStoreConfig, public virtual LocalFSStore, public virtual RemoteStore

View file

@ -0,0 +1,78 @@
#pragma once
/**
* @file
*
* Template implementations (as opposed to mere declarations).
*
* This file is an exmample of the "impl.hh" pattern. See the
* contributing guide.
*/
#include "worker-protocol.hh"
namespace nix {
template<typename T>
std::vector<T> WorkerProto::Serialise<std::vector<T>>::read(const Store & store, WorkerProto::ReadConn conn)
{
std::vector<T> resSet;
auto size = readNum<size_t>(conn.from);
while (size--) {
resSet.push_back(WorkerProto::Serialise<T>::read(store, conn));
}
return resSet;
}
template<typename T>
void WorkerProto::Serialise<std::vector<T>>::write(const Store & store, WorkerProto::WriteConn conn, const std::vector<T> & resSet)
{
conn.to << resSet.size();
for (auto & key : resSet) {
WorkerProto::Serialise<T>::write(store, conn, key);
}
}
template<typename T>
std::set<T> WorkerProto::Serialise<std::set<T>>::read(const Store & store, WorkerProto::ReadConn conn)
{
std::set<T> resSet;
auto size = readNum<size_t>(conn.from);
while (size--) {
resSet.insert(WorkerProto::Serialise<T>::read(store, conn));
}
return resSet;
}
template<typename T>
void WorkerProto::Serialise<std::set<T>>::write(const Store & store, WorkerProto::WriteConn conn, const std::set<T> & resSet)
{
conn.to << resSet.size();
for (auto & key : resSet) {
WorkerProto::Serialise<T>::write(store, conn, key);
}
}
template<typename K, typename V>
std::map<K, V> WorkerProto::Serialise<std::map<K, V>>::read(const Store & store, WorkerProto::ReadConn conn)
{
std::map<K, V> resMap;
auto size = readNum<size_t>(conn.from);
while (size--) {
auto k = WorkerProto::Serialise<K>::read(store, conn);
auto v = WorkerProto::Serialise<V>::read(store, conn);
resMap.insert_or_assign(std::move(k), std::move(v));
}
return resMap;
}
template<typename K, typename V>
void WorkerProto::Serialise<std::map<K, V>>::write(const Store & store, WorkerProto::WriteConn conn, const std::map<K, V> & resMap)
{
conn.to << resMap.size();
for (auto & i : resMap) {
WorkerProto::Serialise<K>::write(store, conn, i.first);
WorkerProto::Serialise<V>::write(store, conn, i.second);
}
}
}

View file

@ -0,0 +1,193 @@
#include "serialise.hh"
#include "util.hh"
#include "path-with-outputs.hh"
#include "store-api.hh"
#include "build-result.hh"
#include "worker-protocol.hh"
#include "worker-protocol-impl.hh"
#include "archive.hh"
#include "derivations.hh"
#include <nlohmann/json.hpp>
namespace nix {
std::string WorkerProto::Serialise<std::string>::read(const Store & store, WorkerProto::ReadConn conn)
{
return readString(conn.from);
}
void WorkerProto::Serialise<std::string>::write(const Store & store, WorkerProto::WriteConn conn, const std::string & str)
{
conn.to << str;
}
StorePath WorkerProto::Serialise<StorePath>::read(const Store & store, WorkerProto::ReadConn conn)
{
return store.parseStorePath(readString(conn.from));
}
void WorkerProto::Serialise<StorePath>::write(const Store & store, WorkerProto::WriteConn conn, const StorePath & storePath)
{
conn.to << store.printStorePath(storePath);
}
std::optional<TrustedFlag> WorkerProto::Serialise<std::optional<TrustedFlag>>::read(const Store & store, WorkerProto::ReadConn conn)
{
auto temp = readNum<uint8_t>(conn.from);
switch (temp) {
case 0:
return std::nullopt;
case 1:
return { Trusted };
case 2:
return { NotTrusted };
default:
throw Error("Invalid trusted status from remote");
}
}
void WorkerProto::Serialise<std::optional<TrustedFlag>>::write(const Store & store, WorkerProto::WriteConn conn, const std::optional<TrustedFlag> & optTrusted)
{
if (!optTrusted)
conn.to << (uint8_t)0;
else {
switch (*optTrusted) {
case Trusted:
conn.to << (uint8_t)1;
break;
case NotTrusted:
conn.to << (uint8_t)2;
break;
default:
assert(false);
};
}
}
ContentAddress WorkerProto::Serialise<ContentAddress>::read(const Store & store, WorkerProto::ReadConn conn)
{
return ContentAddress::parse(readString(conn.from));
}
void WorkerProto::Serialise<ContentAddress>::write(const Store & store, WorkerProto::WriteConn conn, const ContentAddress & ca)
{
conn.to << renderContentAddress(ca);
}
DerivedPath WorkerProto::Serialise<DerivedPath>::read(const Store & store, WorkerProto::ReadConn conn)
{
auto s = readString(conn.from);
return DerivedPath::parseLegacy(store, s);
}
void WorkerProto::Serialise<DerivedPath>::write(const Store & store, WorkerProto::WriteConn conn, const DerivedPath & req)
{
conn.to << req.to_string_legacy(store);
}
Realisation WorkerProto::Serialise<Realisation>::read(const Store & store, WorkerProto::ReadConn conn)
{
std::string rawInput = readString(conn.from);
return Realisation::fromJSON(
nlohmann::json::parse(rawInput),
"remote-protocol"
);
}
void WorkerProto::Serialise<Realisation>::write(const Store & store, WorkerProto::WriteConn conn, const Realisation & realisation)
{
conn.to << realisation.toJSON().dump();
}
DrvOutput WorkerProto::Serialise<DrvOutput>::read(const Store & store, WorkerProto::ReadConn conn)
{
return DrvOutput::parse(readString(conn.from));
}
void WorkerProto::Serialise<DrvOutput>::write(const Store & store, WorkerProto::WriteConn conn, const DrvOutput & drvOutput)
{
conn.to << drvOutput.to_string();
}
KeyedBuildResult WorkerProto::Serialise<KeyedBuildResult>::read(const Store & store, WorkerProto::ReadConn conn)
{
auto path = WorkerProto::Serialise<DerivedPath>::read(store, conn);
auto br = WorkerProto::Serialise<BuildResult>::read(store, conn);
return KeyedBuildResult {
std::move(br),
/* .path = */ std::move(path),
};
}
void WorkerProto::Serialise<KeyedBuildResult>::write(const Store & store, WorkerProto::WriteConn conn, const KeyedBuildResult & res)
{
WorkerProto::write(store, conn, res.path);
WorkerProto::write(store, conn, static_cast<const BuildResult &>(res));
}
BuildResult WorkerProto::Serialise<BuildResult>::read(const Store & store, WorkerProto::ReadConn conn)
{
BuildResult res;
res.status = (BuildResult::Status) readInt(conn.from);
conn.from
>> res.errorMsg
>> res.timesBuilt
>> res.isNonDeterministic
>> res.startTime
>> res.stopTime;
auto builtOutputs = WorkerProto::Serialise<DrvOutputs>::read(store, conn);
for (auto && [output, realisation] : builtOutputs)
res.builtOutputs.insert_or_assign(
std::move(output.outputName),
std::move(realisation));
return res;
}
void WorkerProto::Serialise<BuildResult>::write(const Store & store, WorkerProto::WriteConn conn, const BuildResult & res)
{
conn.to
<< res.status
<< res.errorMsg
<< res.timesBuilt
<< res.isNonDeterministic
<< res.startTime
<< res.stopTime;
DrvOutputs builtOutputs;
for (auto & [output, realisation] : res.builtOutputs)
builtOutputs.insert_or_assign(realisation.id, realisation);
WorkerProto::write(store, conn, builtOutputs);
}
std::optional<StorePath> WorkerProto::Serialise<std::optional<StorePath>>::read(const Store & store, WorkerProto::ReadConn conn)
{
auto s = readString(conn.from);
return s == "" ? std::optional<StorePath> {} : store.parseStorePath(s);
}
void WorkerProto::Serialise<std::optional<StorePath>>::write(const Store & store, WorkerProto::WriteConn conn, const std::optional<StorePath> & storePathOpt)
{
conn.to << (storePathOpt ? store.printStorePath(*storePathOpt) : "");
}
std::optional<ContentAddress> WorkerProto::Serialise<std::optional<ContentAddress>>::read(const Store & store, WorkerProto::ReadConn conn)
{
return ContentAddress::parseOpt(readString(conn.from));
}
void WorkerProto::Serialise<std::optional<ContentAddress>>::write(const Store & store, WorkerProto::WriteConn conn, const std::optional<ContentAddress> & caOpt)
{
conn.to << (caOpt ? renderContentAddress(*caOpt) : "");
}
}

View file

@ -1,7 +1,6 @@
#pragma once
///@file
#include "store-api.hh"
#include "serialise.hh"
namespace nix {
@ -15,57 +14,6 @@ namespace nix {
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
/**
* Enumeration of all the request types for the "worker protocol", used
* by unix:// and ssh-ng:// stores.
*/
typedef enum {
wopIsValidPath = 1,
wopHasSubstitutes = 3,
wopQueryPathHash = 4, // obsolete
wopQueryReferences = 5, // obsolete
wopQueryReferrers = 6,
wopAddToStore = 7,
wopAddTextToStore = 8, // obsolete since 1.25, Nix 3.0. Use wopAddToStore
wopBuildPaths = 9,
wopEnsurePath = 10,
wopAddTempRoot = 11,
wopAddIndirectRoot = 12,
wopSyncWithGC = 13,
wopFindRoots = 14,
wopExportPath = 16, // obsolete
wopQueryDeriver = 18, // obsolete
wopSetOptions = 19,
wopCollectGarbage = 20,
wopQuerySubstitutablePathInfo = 21,
wopQueryDerivationOutputs = 22, // obsolete
wopQueryAllValidPaths = 23,
wopQueryFailedPaths = 24,
wopClearFailedPaths = 25,
wopQueryPathInfo = 26,
wopImportPaths = 27, // obsolete
wopQueryDerivationOutputNames = 28, // obsolete
wopQueryPathFromHashPart = 29,
wopQuerySubstitutablePathInfos = 30,
wopQueryValidPaths = 31,
wopQuerySubstitutablePaths = 32,
wopQueryValidDerivers = 33,
wopOptimiseStore = 34,
wopVerifyStore = 35,
wopBuildDerivation = 36,
wopAddSignatures = 37,
wopNarFromPath = 38,
wopAddToStoreNar = 39,
wopQueryMissing = 40,
wopQueryDerivationOutputMap = 41,
wopRegisterDrvOutput = 42,
wopQueryRealisation = 43,
wopAddMultipleToStore = 44,
wopAddBuildLog = 45,
wopBuildPathsWithResults = 46,
} WorkerOp;
#define STDERR_NEXT 0x6f6c6d67
#define STDERR_READ 0x64617461 // data needed from source
#define STDERR_WRITE 0x64617416 // data for sink
@ -79,45 +27,208 @@ typedef enum {
class Store;
struct Source;
// items being serialised
struct DerivedPath;
struct DrvOutput;
struct Realisation;
struct BuildResult;
struct KeyedBuildResult;
enum TrustedFlag : bool;
/**
* Used to guide overloading
* The "worker protocol", used by unix:// and ssh-ng:// stores.
*
* See https://en.cppreference.com/w/cpp/language/adl for the broader
* concept of what is going on here.
* This `struct` is basically just a `namespace`; We use a type rather
* than a namespace just so we can use it as a template argument.
*/
struct WorkerProto
{
/**
* Enumeration of all the request types for the protocol.
*/
enum struct Op : uint64_t;
/**
* A unidirectional read connection, to be used by the read half of the
* canonical serializers below.
*
* This currently is just a `Source &`, but more fields will be added
* later.
*/
struct ReadConn {
Source & from;
};
/**
* A unidirectional write connection, to be used by the write half of the
* canonical serializers below.
*
* This currently is just a `Sink &`, but more fields will be added
* later.
*/
struct WriteConn {
Sink & to;
};
/**
* Data type for canonical pairs of serialisers for the worker protocol.
*
* See https://en.cppreference.com/w/cpp/language/adl for the broader
* concept of what is going on here.
*/
template<typename T>
struct Serialise;
// This is the definition of `Serialise` we *want* to put here, but
// do not do so.
//
// The problem is that if we do so, C++ will think we have
// seralisers for *all* types. We don't, of course, but that won't
// cause an error until link time. That makes for long debug cycles
// when there is a missing serialiser.
//
// By not defining it globally, and instead letting individual
// serialisers specialise the type, we get back the compile-time
// errors we would like. When no serialiser exists, C++ sees an
// abstract "incomplete" type with no definition, and any attempt to
// use `to` or `from` static methods is a compile-time error because
// they don't exist on an incomplete type.
//
// This makes for a quicker debug cycle, as desired.
#if 0
{
static T read(const Store & store, ReadConn conn);
static void write(const Store & store, WriteConn conn, const T & t);
};
#endif
/**
* Wrapper function around `WorkerProto::Serialise<T>::write` that allows us to
* infer the type instead of having to write it down explicitly.
*/
template<typename T>
static void write(const Store & store, WriteConn conn, const T & t)
{
WorkerProto::Serialise<T>::write(store, conn, t);
}
};
enum struct WorkerProto::Op : uint64_t
{
IsValidPath = 1,
HasSubstitutes = 3,
QueryPathHash = 4, // obsolete
QueryReferences = 5, // obsolete
QueryReferrers = 6,
AddToStore = 7,
AddTextToStore = 8, // obsolete since 1.25, Nix 3.0. Use WorkerProto::Op::AddToStore
BuildPaths = 9,
EnsurePath = 10,
AddTempRoot = 11,
AddIndirectRoot = 12,
SyncWithGC = 13,
FindRoots = 14,
ExportPath = 16, // obsolete
QueryDeriver = 18, // obsolete
SetOptions = 19,
CollectGarbage = 20,
QuerySubstitutablePathInfo = 21,
QueryDerivationOutputs = 22, // obsolete
QueryAllValidPaths = 23,
QueryFailedPaths = 24,
ClearFailedPaths = 25,
QueryPathInfo = 26,
ImportPaths = 27, // obsolete
QueryDerivationOutputNames = 28, // obsolete
QueryPathFromHashPart = 29,
QuerySubstitutablePathInfos = 30,
QueryValidPaths = 31,
QuerySubstitutablePaths = 32,
QueryValidDerivers = 33,
OptimiseStore = 34,
VerifyStore = 35,
BuildDerivation = 36,
AddSignatures = 37,
NarFromPath = 38,
AddToStoreNar = 39,
QueryMissing = 40,
QueryDerivationOutputMap = 41,
RegisterDrvOutput = 42,
QueryRealisation = 43,
AddMultipleToStore = 44,
AddBuildLog = 45,
BuildPathsWithResults = 46,
};
/**
* Convenience for sending operation codes.
*
* @todo Switch to using `WorkerProto::Serialise` instead probably. But
* this was not done at this time so there would be less churn.
*/
inline Sink & operator << (Sink & sink, WorkerProto::Op op)
{
return sink << (uint64_t) op;
}
/**
* Convenience for debugging.
*
* @todo Perhaps render known opcodes more nicely.
*/
inline std::ostream & operator << (std::ostream & s, WorkerProto::Op op)
{
return s << (uint64_t) op;
}
/**
* Declare a canonical serialiser pair for the worker protocol.
*
* We specialise the struct merely to indicate that we are implementing
* the function for the given type.
*
* Some sort of `template<...>` must be used with the caller for this to
* be legal specialization syntax. See below for what that looks like in
* practice.
*/
#define MAKE_WORKER_PROTO(T) \
struct WorkerProto::Serialise< T > { \
static T read(const Store & store, WorkerProto::ReadConn conn); \
static void write(const Store & store, WorkerProto::WriteConn conn, const T & t); \
};
template<>
MAKE_WORKER_PROTO(std::string);
template<>
MAKE_WORKER_PROTO(StorePath);
template<>
MAKE_WORKER_PROTO(ContentAddress);
template<>
MAKE_WORKER_PROTO(DerivedPath);
template<>
MAKE_WORKER_PROTO(Realisation);
template<>
MAKE_WORKER_PROTO(DrvOutput);
template<>
MAKE_WORKER_PROTO(BuildResult);
template<>
MAKE_WORKER_PROTO(KeyedBuildResult);
template<>
MAKE_WORKER_PROTO(std::optional<TrustedFlag>);
template<typename T>
struct Phantom {};
MAKE_WORKER_PROTO(std::vector<T>);
template<typename T>
MAKE_WORKER_PROTO(std::set<T>);
namespace worker_proto {
/* FIXME maybe move more stuff inside here */
#define MAKE_WORKER_PROTO(TEMPLATE, T) \
TEMPLATE T read(const Store & store, Source & from, Phantom< T > _); \
TEMPLATE void write(const Store & store, Sink & out, const T & str)
MAKE_WORKER_PROTO(, std::string);
MAKE_WORKER_PROTO(, StorePath);
MAKE_WORKER_PROTO(, ContentAddress);
MAKE_WORKER_PROTO(, DerivedPath);
MAKE_WORKER_PROTO(, Realisation);
MAKE_WORKER_PROTO(, DrvOutput);
MAKE_WORKER_PROTO(, BuildResult);
MAKE_WORKER_PROTO(, KeyedBuildResult);
MAKE_WORKER_PROTO(, std::optional<TrustedFlag>);
MAKE_WORKER_PROTO(template<typename T>, std::vector<T>);
MAKE_WORKER_PROTO(template<typename T>, std::set<T>);
#define X_ template<typename K, typename V>
#define Y_ std::map<K, V>
MAKE_WORKER_PROTO(X_, Y_);
template<typename K, typename V>
#define X_ std::map<K, V>
MAKE_WORKER_PROTO(X_);
#undef X_
#undef Y_
/**
* These use the empty string for the null case, relying on the fact
* that the underlying types never serialize to the empty string.
* that the underlying types never serialise to the empty string.
*
* We do this instead of a generic std::optional<T> instance because
* ordinal tags (0 or 1, here) are a bit of a compatability hazard. For
@ -129,72 +240,9 @@ MAKE_WORKER_PROTO(X_, Y_);
* worker protocol harder to implement in other languages where such
* specializations may not be allowed.
*/
MAKE_WORKER_PROTO(, std::optional<StorePath>);
MAKE_WORKER_PROTO(, std::optional<ContentAddress>);
template<typename T>
std::vector<T> read(const Store & store, Source & from, Phantom<std::vector<T>> _)
{
std::vector<T> resSet;
auto size = readNum<size_t>(from);
while (size--) {
resSet.push_back(read(store, from, Phantom<T> {}));
}
return resSet;
}
template<typename T>
void write(const Store & store, Sink & out, const std::vector<T> & resSet)
{
out << resSet.size();
for (auto & key : resSet) {
write(store, out, key);
}
}
template<typename T>
std::set<T> read(const Store & store, Source & from, Phantom<std::set<T>> _)
{
std::set<T> resSet;
auto size = readNum<size_t>(from);
while (size--) {
resSet.insert(read(store, from, Phantom<T> {}));
}
return resSet;
}
template<typename T>
void write(const Store & store, Sink & out, const std::set<T> & resSet)
{
out << resSet.size();
for (auto & key : resSet) {
write(store, out, key);
}
}
template<typename K, typename V>
std::map<K, V> read(const Store & store, Source & from, Phantom<std::map<K, V>> _)
{
std::map<K, V> resMap;
auto size = readNum<size_t>(from);
while (size--) {
auto k = read(store, from, Phantom<K> {});
auto v = read(store, from, Phantom<V> {});
resMap.insert_or_assign(std::move(k), std::move(v));
}
return resMap;
}
template<typename K, typename V>
void write(const Store & store, Sink & out, const std::map<K, V> & resMap)
{
out << resMap.size();
for (auto & i : resMap) {
write(store, out, i.first);
write(store, out, i.second);
}
}
}
template<>
MAKE_WORKER_PROTO(std::optional<StorePath>);
template<>
MAKE_WORKER_PROTO(std::optional<ContentAddress>);
}