mirror of
https://github.com/NixOS/nix.git
synced 2025-11-22 18:29:36 +01:00
Tagging release 2.26.2
-----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEtUHVUwEnDgvPFcpdgXC0cm1xmN4FAmetA5oTHGVkb2xzdHJh QGdtYWlsLmNvbQAKCRCBcLRybXGY3g2pB/9JAFyjmaXuccbMTO/6x9qwsWuuXNLk OQWzfbdUekvsihZZSFZg1r7KqqXHCi64f0nxLPsJ/0oeDWZktJ5KnbV630nuUlDj ulLCpKdvhWFa8dVx9LiziGwQw4KLx8PjOfwThtQ4DqCWxWEmu6lKkijag9cE+ai4 3mw9YtUjBRxlXyhYLzWz3whLbv37c/m+R8iGS8xm8W260pmei6D0beOIPdfXYBQF PzPlPORyI08A06uqyA3z7bTxzmSMnzvu0QInCPCKSHzFUnTZPHUYuYStFl28NrZS fXKK59L0G7QEfdTRAmqQkdHdtPj2RlYFiMN0kQiNLflvKfGGWdi/kvdx =rRix -----END PGP SIGNATURE----- Merge tag '2.26.2' into sync-2.26.2 Tagging release 2.26.2
This commit is contained in:
commit
4055239936
1395 changed files with 24694 additions and 16040 deletions
|
|
@ -39,15 +39,13 @@ BinaryCacheStore::BinaryCacheStore(const Params & params)
|
|||
|
||||
void BinaryCacheStore::init()
|
||||
{
|
||||
std::string cacheInfoFile = "nix-cache-info";
|
||||
|
||||
auto cacheInfo = getFile(cacheInfoFile);
|
||||
auto cacheInfo = getNixCacheInfo();
|
||||
if (!cacheInfo) {
|
||||
upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info");
|
||||
} else {
|
||||
for (auto & line : tokenizeString<Strings>(*cacheInfo, "\n")) {
|
||||
size_t colon= line.find(':');
|
||||
if (colon ==std::string::npos) continue;
|
||||
size_t colon = line.find(':');
|
||||
if (colon == std::string::npos) continue;
|
||||
auto name = line.substr(0, colon);
|
||||
auto value = trim(line.substr(colon + 1, std::string::npos));
|
||||
if (name == "StoreDir") {
|
||||
|
|
@ -63,6 +61,11 @@ void BinaryCacheStore::init()
|
|||
}
|
||||
}
|
||||
|
||||
std::optional<std::string> BinaryCacheStore::getNixCacheInfo()
|
||||
{
|
||||
return getFile(cacheInfoFile);
|
||||
}
|
||||
|
||||
void BinaryCacheStore::upsertFile(const std::string & path,
|
||||
std::string && data,
|
||||
const std::string & mimeType)
|
||||
|
|
|
|||
|
|
@ -64,6 +64,8 @@ protected:
|
|||
// The prefix under which realisation infos will be stored
|
||||
const std::string realisationsPrefix = "realisations";
|
||||
|
||||
const std::string cacheInfoFile = "nix-cache-info";
|
||||
|
||||
BinaryCacheStore(const Params & params);
|
||||
|
||||
public:
|
||||
|
|
@ -84,6 +86,12 @@ public:
|
|||
*/
|
||||
virtual void getFile(const std::string & path, Sink & sink);
|
||||
|
||||
/**
|
||||
* Get the contents of /nix-cache-info. Return std::nullopt if it
|
||||
* doesn't exist.
|
||||
*/
|
||||
virtual std::optional<std::string> getNixCacheInfo();
|
||||
|
||||
/**
|
||||
* Fetch the specified file and call the specified callback with
|
||||
* the result. A subclass may implement this asynchronously.
|
||||
|
|
|
|||
|
|
@ -1 +0,0 @@
|
|||
../../build-utils-meson
|
||||
|
|
@ -90,7 +90,7 @@ DerivationGoal::~DerivationGoal()
|
|||
{
|
||||
/* Careful: we should never ever throw an exception from a
|
||||
destructor. */
|
||||
try { closeLogFile(); } catch (...) { ignoreException(); }
|
||||
try { closeLogFile(); } catch (...) { ignoreExceptionInDestructor(); }
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -701,6 +701,7 @@ Goal::Co DerivationGoal::tryToBuild()
|
|||
if (buildMode != bmCheck && allValid) {
|
||||
debug("skipping build of derivation '%s', someone beat us to it", worker.store.printStorePath(drvPath));
|
||||
outputLocks.setDeletion(true);
|
||||
outputLocks.unlock();
|
||||
co_return done(BuildResult::AlreadyValid, std::move(validOutputs));
|
||||
}
|
||||
|
||||
|
|
@ -814,7 +815,7 @@ void replaceValidPath(const Path & storePath, const Path & tmpPath)
|
|||
// attempt to recover
|
||||
movePath(oldPath, storePath);
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionExceptInterrupt();
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
|
@ -989,7 +990,10 @@ Goal::Co DerivationGoal::buildDone()
|
|||
msg += "\n";
|
||||
}
|
||||
auto nixLogCommand = "nix log";
|
||||
msg += fmt("For full logs, run '" ANSI_BOLD "%s %s" ANSI_NORMAL "'.",
|
||||
// The command is on a separate line for easy copying, such as with triple click.
|
||||
// This message will be indented elsewhere, so removing the indentation before the
|
||||
// command will not put it at the start of the line unfortunately.
|
||||
msg += fmt("For full logs, run:\n " ANSI_BOLD "%s %s" ANSI_NORMAL,
|
||||
nixLogCommand,
|
||||
worker.store.printStorePath(drvPath));
|
||||
}
|
||||
|
|
@ -1156,7 +1160,7 @@ HookReply DerivationGoal::tryBuildHook()
|
|||
throw;
|
||||
}
|
||||
}();
|
||||
if (handleJSONLogMessage(s, worker.act, worker.hook->activities, true))
|
||||
if (handleJSONLogMessage(s, worker.act, worker.hook->activities, "the build hook", true))
|
||||
;
|
||||
else if (s.substr(0, 2) == "# ") {
|
||||
reply = s.substr(2);
|
||||
|
|
@ -1224,7 +1228,7 @@ HookReply DerivationGoal::tryBuildHook()
|
|||
hook->toHook.writeSide.close();
|
||||
|
||||
/* Create the log file and pipe. */
|
||||
Path logFile = openLogFile();
|
||||
[[maybe_unused]] Path logFile = openLogFile();
|
||||
|
||||
std::set<MuxablePipePollState::CommChannel> fds;
|
||||
fds.insert(hook->fromHook.readSide.get());
|
||||
|
|
@ -1341,9 +1345,9 @@ void DerivationGoal::handleChildOutput(Descriptor fd, std::string_view data)
|
|||
if (hook && fd == hook->fromHook.readSide.get()) {
|
||||
for (auto c : data)
|
||||
if (c == '\n') {
|
||||
auto json = parseJSONMessage(currentHookLine);
|
||||
auto json = parseJSONMessage(currentHookLine, "the derivation builder");
|
||||
if (json) {
|
||||
auto s = handleJSONLogMessage(*json, worker.act, hook->activities, true);
|
||||
auto s = handleJSONLogMessage(*json, worker.act, hook->activities, "the derivation builder", true);
|
||||
// ensure that logs from a builder using `ssh-ng://` as protocol
|
||||
// are also available to `nix log`.
|
||||
if (s && !isWrittenToLog && logSink) {
|
||||
|
|
@ -1385,7 +1389,7 @@ void DerivationGoal::handleEOF(Descriptor fd)
|
|||
|
||||
void DerivationGoal::flushLine()
|
||||
{
|
||||
if (handleJSONLogMessage(currentLogLine, *act, builderActivities, false))
|
||||
if (handleJSONLogMessage(currentLogLine, *act, builderActivities, "the derivation builder", false))
|
||||
;
|
||||
|
||||
else {
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ Goal::Co DrvOutputSubstitutionGoal::init()
|
|||
|
||||
bool substituterFailed = false;
|
||||
|
||||
for (auto sub : subs) {
|
||||
for (const auto & sub : subs) {
|
||||
trace("trying next substituter");
|
||||
|
||||
/* The callback of the curl download below can outlive `this` (if
|
||||
|
|
|
|||
|
|
@ -66,6 +66,7 @@ std::vector<KeyedBuildResult> Store::buildPathsWithResults(
|
|||
worker.run(goals);
|
||||
|
||||
std::vector<KeyedBuildResult> results;
|
||||
results.reserve(state.size());
|
||||
|
||||
for (auto & [req, goalPtr] : state)
|
||||
results.emplace_back(KeyedBuildResult {
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ protected:
|
|||
public:
|
||||
|
||||
/**
|
||||
* Suspend our goal and wait until we get @ref work()-ed again.
|
||||
* Suspend our goal and wait until we get `work`-ed again.
|
||||
* `co_await`-able by @ref Co.
|
||||
*/
|
||||
struct Suspend {};
|
||||
|
|
@ -192,7 +192,7 @@ public:
|
|||
|
||||
bool await_ready() { return false; };
|
||||
/**
|
||||
* When we `co_await` another @ref Co-returning coroutine,
|
||||
* When we `co_await` another `Co`-returning coroutine,
|
||||
* we tell the caller of `caller_coroutine.resume()` to switch to our coroutine (@ref handle).
|
||||
* To make sure we return to the original coroutine, we set it as the continuation of our
|
||||
* coroutine. In @ref promise_type::final_awaiter we check if it's set and if so we return to it.
|
||||
|
|
@ -208,7 +208,7 @@ public:
|
|||
};
|
||||
|
||||
/**
|
||||
* Used on initial suspend, does the same as @ref std::suspend_always,
|
||||
* Used on initial suspend, does the same as `std::suspend_always`,
|
||||
* but asserts that everything has been set correctly.
|
||||
*/
|
||||
struct InitialSuspend {
|
||||
|
|
@ -269,8 +269,8 @@ public:
|
|||
};
|
||||
|
||||
/**
|
||||
* Called by compiler generated code to construct the @ref Co
|
||||
* that is returned from a @ref Co-returning coroutine.
|
||||
* Called by compiler generated code to construct the `Co`
|
||||
* that is returned from a `Co`-returning coroutine.
|
||||
*/
|
||||
Co get_return_object();
|
||||
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ Goal::Co PathSubstitutionGoal::init()
|
|||
|
||||
bool substituterFailed = false;
|
||||
|
||||
for (auto sub : subs) {
|
||||
for (const auto & sub : subs) {
|
||||
trace("trying next substituter");
|
||||
|
||||
cleanup();
|
||||
|
|
@ -294,7 +294,7 @@ void PathSubstitutionGoal::cleanup()
|
|||
|
||||
outPipe.close();
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -208,7 +208,7 @@ public:
|
|||
const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal);
|
||||
|
||||
/**
|
||||
* @ref SubstitutionGoal "substitution goal"
|
||||
* @ref PathSubstitutionGoal "substitution goal"
|
||||
*/
|
||||
std::shared_ptr<PathSubstitutionGoal> makePathSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
std::shared_ptr<DrvOutputSubstitutionGoal> makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ void builtinUnpackChannel(
|
|||
auto & src = getAttr("src");
|
||||
|
||||
if (fs::path{channelName}.filename().string() != channelName) {
|
||||
throw Error("channelName is not allowed to contain filesystem seperators, got %1%", channelName);
|
||||
throw Error("channelName is not allowed to contain filesystem separators, got %1%", channelName);
|
||||
}
|
||||
|
||||
try {
|
||||
|
|
|
|||
|
|
@ -97,8 +97,9 @@ struct ContentAddressMethod
|
|||
* were ingested, with the fixed output case not prefixed for back
|
||||
* compat.
|
||||
*
|
||||
* @param [in] m A string that should begin with the prefix.
|
||||
* @param [out] m The remainder of the string after the prefix.
|
||||
* @param m A string that should begin with the
|
||||
* prefix. On return, the remainder of the string after the
|
||||
* prefix.
|
||||
*/
|
||||
static ContentAddressMethod parsePrefix(std::string_view & m);
|
||||
|
||||
|
|
@ -139,14 +140,14 @@ struct ContentAddressMethod
|
|||
/**
|
||||
* We've accumulated several types of content-addressed paths over the
|
||||
* years; fixed-output derivations support multiple hash algorithms and
|
||||
* serialisation methods (flat file vs NAR). Thus, ‘ca’ has one of the
|
||||
* serialisation methods (flat file vs NAR). Thus, `ca` has one of the
|
||||
* following forms:
|
||||
*
|
||||
* - `TextIngestionMethod`:
|
||||
* ‘text:sha256:<sha256 hash of file contents>’
|
||||
* `text:sha256:<sha256 hash of file contents>`
|
||||
*
|
||||
* - `FixedIngestionMethod`:
|
||||
* ‘fixed:<r?>:<hash algorithm>:<hash of file contents>’
|
||||
* `fixed:<r?>:<hash algorithm>:<hash of file contents>`
|
||||
*/
|
||||
struct ContentAddress
|
||||
{
|
||||
|
|
|
|||
|
|
@ -90,11 +90,11 @@ struct TunnelLogger : public Logger
|
|||
{
|
||||
if (ei.level > verbosity) return;
|
||||
|
||||
std::stringstream oss;
|
||||
std::ostringstream oss;
|
||||
showErrorInfo(oss, ei, false);
|
||||
|
||||
StringSink buf;
|
||||
buf << STDERR_NEXT << oss.str();
|
||||
buf << STDERR_NEXT << toView(oss);
|
||||
enqueueMsg(buf.s);
|
||||
}
|
||||
|
||||
|
|
@ -402,6 +402,9 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->startWork();
|
||||
auto pathInfo = [&]() {
|
||||
// NB: FramedSource must be out of scope before logger->stopWork();
|
||||
// FIXME: this means that if there is an error
|
||||
// half-way through, the client will keep sending
|
||||
// data, since we haven't sent it the error yet.
|
||||
auto [contentAddressMethod, hashAlgo] = ContentAddressMethod::parseWithAlgo(camStr);
|
||||
FramedSource source(conn.from);
|
||||
FileSerialisationMethod dumpMethod;
|
||||
|
|
|
|||
|
|
@ -7,11 +7,12 @@
|
|||
#include "split.hh"
|
||||
#include "common-protocol.hh"
|
||||
#include "common-protocol-impl.hh"
|
||||
#include "strings-inline.hh"
|
||||
#include "json-utils.hh"
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include "strings-inline.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::optional<StorePath> DerivationOutput::path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const
|
||||
|
|
@ -1016,29 +1017,31 @@ std::string hashPlaceholder(const OutputNameView outputName)
|
|||
return "/" + hashString(HashAlgorithm::SHA256, concatStrings("nix-output:", outputName)).to_string(HashFormat::Nix32, false);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
static void rewriteDerivation(Store & store, BasicDerivation & drv, const StringMap & rewrites)
|
||||
void BasicDerivation::applyRewrites(const StringMap & rewrites)
|
||||
{
|
||||
debug("Rewriting the derivation");
|
||||
if (rewrites.empty()) return;
|
||||
|
||||
for (auto & rewrite : rewrites) {
|
||||
debug("rewriting the derivation");
|
||||
|
||||
for (auto & rewrite : rewrites)
|
||||
debug("rewriting %s as %s", rewrite.first, rewrite.second);
|
||||
}
|
||||
|
||||
drv.builder = rewriteStrings(drv.builder, rewrites);
|
||||
for (auto & arg : drv.args) {
|
||||
builder = rewriteStrings(builder, rewrites);
|
||||
for (auto & arg : args)
|
||||
arg = rewriteStrings(arg, rewrites);
|
||||
}
|
||||
|
||||
StringPairs newEnv;
|
||||
for (auto & envVar : drv.env) {
|
||||
for (auto & envVar : env) {
|
||||
auto envName = rewriteStrings(envVar.first, rewrites);
|
||||
auto envValue = rewriteStrings(envVar.second, rewrites);
|
||||
newEnv.emplace(envName, envValue);
|
||||
}
|
||||
drv.env = newEnv;
|
||||
env = std::move(newEnv);
|
||||
}
|
||||
|
||||
static void rewriteDerivation(Store & store, BasicDerivation & drv, const StringMap & rewrites)
|
||||
{
|
||||
drv.applyRewrites(rewrites);
|
||||
|
||||
auto hashModulo = hashDerivationModulo(store, Derivation(drv), true);
|
||||
for (auto & [outputName, output] : drv.outputs) {
|
||||
|
|
|
|||
|
|
@ -298,6 +298,10 @@ struct BasicDerivation
|
|||
std::string name;
|
||||
|
||||
BasicDerivation() = default;
|
||||
BasicDerivation(BasicDerivation &&) = default;
|
||||
BasicDerivation(const BasicDerivation &) = default;
|
||||
BasicDerivation& operator=(BasicDerivation &&) = default;
|
||||
BasicDerivation& operator=(const BasicDerivation &) = default;
|
||||
virtual ~BasicDerivation() { };
|
||||
|
||||
bool isBuiltin() const;
|
||||
|
|
@ -321,6 +325,12 @@ struct BasicDerivation
|
|||
|
||||
static std::string_view nameFromPath(const StorePath & storePath);
|
||||
|
||||
/**
|
||||
* Apply string rewrites to the `env`, `args` and `builder`
|
||||
* fields.
|
||||
*/
|
||||
void applyRewrites(const StringMap & rewrites);
|
||||
|
||||
bool operator == (const BasicDerivation &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=> (const BasicDerivation &) const = default;
|
||||
|
|
|
|||
|
|
@ -13,14 +13,9 @@ void Store::exportPaths(const StorePathSet & paths, Sink & sink)
|
|||
auto sorted = topoSortPaths(paths);
|
||||
std::reverse(sorted.begin(), sorted.end());
|
||||
|
||||
std::string doneLabel("paths exported");
|
||||
//logger->incExpected(doneLabel, sorted.size());
|
||||
|
||||
for (auto & path : sorted) {
|
||||
//Activity act(*logger, lvlInfo, "exporting path '%s'", path);
|
||||
sink << 1;
|
||||
exportPath(path, sink);
|
||||
//logger->incProgress(doneLabel);
|
||||
}
|
||||
|
||||
sink << 0;
|
||||
|
|
|
|||
|
|
@ -54,6 +54,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
bool done = false; // whether either the success or failure function has been called
|
||||
Callback<FileTransferResult> callback;
|
||||
CURL * req = 0;
|
||||
// buffer to accompany the `req` above
|
||||
char errbuf[CURL_ERROR_SIZE];
|
||||
bool active = false; // whether the handle has been added to the multi object
|
||||
std::string statusMsg;
|
||||
|
||||
|
|
@ -137,7 +139,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
if (!done)
|
||||
fail(FileTransferError(Interrupted, {}, "download of '%s' was interrupted", request.uri));
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -151,7 +153,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
template<class T>
|
||||
void fail(T && e)
|
||||
{
|
||||
failEx(std::make_exception_ptr(std::move(e)));
|
||||
failEx(std::make_exception_ptr(std::forward<T>(e)));
|
||||
}
|
||||
|
||||
LambdaSink finalSink;
|
||||
|
|
@ -370,6 +372,9 @@ struct curlFileTransfer : public FileTransfer
|
|||
if (writtenToSink)
|
||||
curl_easy_setopt(req, CURLOPT_RESUME_FROM_LARGE, writtenToSink);
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_ERRORBUFFER, errbuf);
|
||||
errbuf[0] = 0;
|
||||
|
||||
result.data.clear();
|
||||
result.bodySize = 0;
|
||||
}
|
||||
|
|
@ -484,8 +489,8 @@ struct curlFileTransfer : public FileTransfer
|
|||
code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
|
||||
: FileTransferError(err,
|
||||
std::move(response),
|
||||
"unable to %s '%s': %s (%d)",
|
||||
request.verb(), request.uri, curl_easy_strerror(code), code);
|
||||
"unable to %s '%s': %s (%d) %s",
|
||||
request.verb(), request.uri, curl_easy_strerror(code), code, errbuf);
|
||||
|
||||
/* If this is a transient error, then maybe retry the
|
||||
download after a while. If we're writing to a
|
||||
|
|
@ -762,7 +767,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
auto s3Res = s3Helper.getObject(bucketName, key);
|
||||
FileTransferResult res;
|
||||
if (!s3Res.data)
|
||||
throw FileTransferError(NotFound, "S3 object '%s' does not exist", request.uri);
|
||||
throw FileTransferError(NotFound, {}, "S3 object '%s' does not exist", request.uri);
|
||||
res.data = std::move(*s3Res.data);
|
||||
res.urls.push_back(request.uri);
|
||||
callback(std::move(res));
|
||||
|
|
|
|||
|
|
@ -50,8 +50,9 @@ struct FileTransferSettings : Config
|
|||
|
||||
Setting<size_t> downloadBufferSize{this, 64 * 1024 * 1024, "download-buffer-size",
|
||||
R"(
|
||||
The size of Nix's internal download buffer during `curl` transfers. If data is
|
||||
The size of Nix's internal download buffer in bytes during `curl` transfers. If data is
|
||||
not processed quickly enough to exceed the size of this buffer, downloads may stall.
|
||||
The default is 67108864 (64 MiB).
|
||||
)"};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
#include "finally.hh"
|
||||
#include "unix-domain-socket.hh"
|
||||
#include "signals.hh"
|
||||
#include "posix-fs-canonicalise.hh"
|
||||
|
||||
#if !defined(__linux__)
|
||||
// For shelling out to lsof
|
||||
|
|
@ -333,7 +334,7 @@ static std::string quoteRegexChars(const std::string & raw)
|
|||
}
|
||||
|
||||
#if __linux__
|
||||
static void readFileRoots(const char * path, UncheckedRoots & roots)
|
||||
static void readFileRoots(const std::filesystem::path & path, UncheckedRoots & roots)
|
||||
{
|
||||
try {
|
||||
roots[readFile(path)].emplace(path);
|
||||
|
|
@ -454,7 +455,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
bool gcKeepOutputs = settings.gcKeepOutputs;
|
||||
bool gcKeepDerivations = settings.gcKeepDerivations;
|
||||
|
||||
StorePathSet roots, dead, alive;
|
||||
std::unordered_set<StorePath> roots, dead, alive;
|
||||
|
||||
struct Shared
|
||||
{
|
||||
|
|
@ -660,7 +661,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
}
|
||||
};
|
||||
|
||||
std::map<StorePath, StorePathSet> referrersCache;
|
||||
std::unordered_map<StorePath, StorePathSet> referrersCache;
|
||||
|
||||
/* Helper function that visits all paths reachable from `start`
|
||||
via the referrers edges and optionally derivers and derivation
|
||||
|
|
@ -763,13 +764,18 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto & path : topoSortPaths(visited)) {
|
||||
if (!dead.insert(path).second) continue;
|
||||
if (shouldDelete) {
|
||||
invalidatePathChecked(path);
|
||||
deleteFromStore(path.to_string());
|
||||
referrersCache.erase(path);
|
||||
try {
|
||||
invalidatePathChecked(path);
|
||||
deleteFromStore(path.to_string());
|
||||
referrersCache.erase(path);
|
||||
} catch (PathInUse &e) {
|
||||
// If we end up here, it's likely a new occurence
|
||||
// of https://github.com/NixOS/nix/issues/11923
|
||||
printError("BUG: %s", e.what());
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
@ -958,8 +964,8 @@ void LocalStore::autoGC(bool sync)
|
|||
|
||||
} catch (...) {
|
||||
// FIXME: we could propagate the exception to the
|
||||
// future, but we don't really care.
|
||||
ignoreException();
|
||||
// future, but we don't really care. (what??)
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
|
||||
}).detach();
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@
|
|||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#include <curl/curl.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#ifndef _WIN32
|
||||
|
|
@ -64,7 +65,6 @@ Settings::Settings()
|
|||
, nixStateDir(canonPath(getEnvNonEmpty("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
|
||||
, nixConfDir(canonPath(getEnvNonEmpty("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
|
||||
, nixUserConfFiles(getUserConfigFiles())
|
||||
, nixBinDir(canonPath(getEnvNonEmpty("NIX_BIN_DIR").value_or(NIX_BIN_DIR)))
|
||||
, nixManDir(canonPath(NIX_MAN_DIR))
|
||||
, nixDaemonSocketFile(canonPath(getEnvNonEmpty("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
|
||||
{
|
||||
|
|
@ -95,34 +95,6 @@ Settings::Settings()
|
|||
sandboxPaths = tokenizeString<StringSet>("/System/Library/Frameworks /System/Library/PrivateFrameworks /bin/sh /bin/bash /private/tmp /private/var/tmp /usr/lib");
|
||||
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
|
||||
#endif
|
||||
|
||||
/* Set the build hook location
|
||||
|
||||
For builds we perform a self-invocation, so Nix has to be self-aware.
|
||||
That is, it has to know where it is installed. We don't think it's sentient.
|
||||
|
||||
Normally, nix is installed according to `nixBinDir`, which is set at compile time,
|
||||
but can be overridden. This makes for a great default that works even if this
|
||||
code is linked as a library into some other program whose main is not aware
|
||||
that it might need to be a build remote hook.
|
||||
|
||||
However, it may not have been installed at all. For example, if it's a static build,
|
||||
there's a good chance that it has been moved out of its installation directory.
|
||||
That makes `nixBinDir` useless. Instead, we'll query the OS for the path to the
|
||||
current executable, using `getSelfExe()`.
|
||||
|
||||
As a last resort, we resort to `PATH`. Hopefully we find a `nix` there that's compatible.
|
||||
If you're porting Nix to a new platform, that might be good enough for a while, but
|
||||
you'll want to improve `getSelfExe()` to work on your platform.
|
||||
*/
|
||||
std::string nixExePath = nixBinDir + "/nix";
|
||||
if (!pathExists(nixExePath)) {
|
||||
nixExePath = getSelfExe().value_or("nix");
|
||||
}
|
||||
buildHook = {
|
||||
nixExePath,
|
||||
"__build-remote",
|
||||
};
|
||||
}
|
||||
|
||||
void loadConfFile(AbstractConfig & config)
|
||||
|
|
@ -164,7 +136,7 @@ std::vector<Path> getUserConfigFiles()
|
|||
std::vector<Path> files;
|
||||
auto dirs = getConfigDirs();
|
||||
for (auto & dir : dirs) {
|
||||
files.insert(files.end(), dir + "/nix/nix.conf");
|
||||
files.insert(files.end(), dir + "/nix.conf");
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
|
@ -394,10 +366,21 @@ void initLibStore(bool loadConfig) {
|
|||
|
||||
preloadNSS();
|
||||
|
||||
/* Because of an objc quirk[1], calling curl_global_init for the first time
|
||||
after fork() will always result in a crash.
|
||||
Up until now the solution has been to set OBJC_DISABLE_INITIALIZE_FORK_SAFETY
|
||||
for every nix process to ignore that error.
|
||||
Instead of working around that error we address it at the core -
|
||||
by calling curl_global_init here, which should mean curl will already
|
||||
have been initialized by the time we try to do so in a forked process.
|
||||
|
||||
[1] https://github.com/apple-oss-distributions/objc4/blob/01edf1705fbc3ff78a423cd21e03dfc21eb4d780/runtime/objc-initialize.mm#L614-L636
|
||||
*/
|
||||
curl_global_init(CURL_GLOBAL_ALL);
|
||||
#if __APPLE__
|
||||
/* On macOS, don't use the per-session TMPDIR (as set e.g. by
|
||||
sshd). This breaks build users because they don't have access
|
||||
to the TMPDIR, in particular in ‘nix-store --serve’. */
|
||||
#if __APPLE__
|
||||
if (hasPrefix(defaultTempDir(), "/var/folders/"))
|
||||
unsetenv("TMPDIR");
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -84,11 +84,6 @@ public:
|
|||
*/
|
||||
std::vector<Path> nixUserConfFiles;
|
||||
|
||||
/**
|
||||
* The directory where the main programs are stored.
|
||||
*/
|
||||
Path nixBinDir;
|
||||
|
||||
/**
|
||||
* The directory where the man pages are stored.
|
||||
*/
|
||||
|
|
@ -246,7 +241,7 @@ public:
|
|||
)",
|
||||
{"build-timeout"}};
|
||||
|
||||
Setting<Strings> buildHook{this, {}, "build-hook",
|
||||
Setting<Strings> buildHook{this, {"nix", "__build-remote"}, "build-hook",
|
||||
R"(
|
||||
The path to the helper program that executes remote builds.
|
||||
|
||||
|
|
@ -404,10 +399,18 @@ public:
|
|||
default is `true`.
|
||||
)"};
|
||||
|
||||
Setting<bool> fsyncStorePaths{this, false, "fsync-store-paths",
|
||||
R"(
|
||||
Whether to call `fsync()` on store paths before registering them, to
|
||||
flush them to disk. This improves robustness in case of system crashes,
|
||||
but reduces performance. The default is `false`.
|
||||
)"};
|
||||
|
||||
Setting<bool> useSQLiteWAL{this, !isWSL1(), "use-sqlite-wal",
|
||||
"Whether SQLite should use WAL mode."};
|
||||
|
||||
#ifndef _WIN32
|
||||
// FIXME: remove this option, `fsync-store-paths` is faster.
|
||||
Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering",
|
||||
"Whether to call `sync()` before registering a path as valid."};
|
||||
#endif
|
||||
|
|
@ -1131,7 +1134,10 @@ public:
|
|||
)"};
|
||||
|
||||
Setting<uint64_t> maxFree{
|
||||
this, std::numeric_limits<uint64_t>::max(), "max-free",
|
||||
// n.b. this is deliberately int64 max rather than uint64 max because
|
||||
// this goes through the Nix language JSON parser and thus needs to be
|
||||
// representable in Nix language integers.
|
||||
this, std::numeric_limits<int64_t>::max(), "max-free",
|
||||
R"(
|
||||
When a garbage collection is triggered by the `min-free` option, it
|
||||
stops as soon as `max-free` bytes are available. The default is
|
||||
|
|
@ -1198,7 +1204,7 @@ public:
|
|||
|
||||
If the user is trusted (see `trusted-users` option), when building
|
||||
a fixed-output derivation, environment variables set in this option
|
||||
will be passed to the builder if they are listed in [`impureEnvVars`](@docroot@/language/advanced-attributes.md##adv-attr-impureEnvVars).
|
||||
will be passed to the builder if they are listed in [`impureEnvVars`](@docroot@/language/advanced-attributes.md#adv-attr-impureEnvVars).
|
||||
|
||||
This option is useful for, e.g., setting `https_proxy` for
|
||||
fixed-output derivations and in a multi-user Nix installation, or
|
||||
|
|
@ -1221,11 +1227,13 @@ public:
|
|||
|
||||
Setting<uint64_t> warnLargePathThreshold{
|
||||
this,
|
||||
std::numeric_limits<uint64_t>::max(),
|
||||
0,
|
||||
"warn-large-path-threshold",
|
||||
R"(
|
||||
Warn when copying a path larger than this number of bytes to the Nix store
|
||||
(as determined by its NAR serialisation).
|
||||
Default is 0, which disables the warning.
|
||||
Set it to 1 to warn on all paths.
|
||||
)"
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -194,6 +194,19 @@ protected:
|
|||
}
|
||||
}
|
||||
|
||||
std::optional<std::string> getNixCacheInfo() override
|
||||
{
|
||||
try {
|
||||
auto result = getFileTransfer()->download(makeRequest(cacheInfoFile));
|
||||
return result.data;
|
||||
} catch (FileTransferError & e) {
|
||||
if (e.error == FileTransfer::NotFound)
|
||||
return std::nullopt;
|
||||
maybeDisable();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This isn't actually necessary read only. We support "upsert" now, so we
|
||||
* have a notion of authentication via HTTP POST/PUT.
|
||||
|
|
|
|||
|
|
@ -10,12 +10,12 @@ PublicKeys getDefaultPublicKeys()
|
|||
|
||||
// FIXME: filter duplicates
|
||||
|
||||
for (auto s : settings.trustedPublicKeys.get()) {
|
||||
for (const auto & s : settings.trustedPublicKeys.get()) {
|
||||
PublicKey key(s);
|
||||
publicKeys.emplace(key.name, key);
|
||||
}
|
||||
|
||||
for (auto secretKeyFile : settings.secretKeyFiles.get()) {
|
||||
for (const auto & secretKeyFile : settings.secretKeyFiles.get()) {
|
||||
try {
|
||||
SecretKey secretKey(readFile(secretKeyFile));
|
||||
publicKeys.emplace(secretKey.name, secretKey.toPublicKey());
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
#pragma once
|
||||
/**
|
||||
* @file Reusable serialisers for serialization container types in a
|
||||
* @file
|
||||
*
|
||||
* Reusable serialisers for serialization container types in a
|
||||
* length-prefixed manner.
|
||||
*
|
||||
* Used by both the Worker and Serve protocols.
|
||||
|
|
@ -28,25 +30,22 @@ struct StoreDirConfig;
|
|||
template<class Inner, typename T>
|
||||
struct LengthPrefixedProtoHelper;
|
||||
|
||||
/*!
|
||||
* \typedef LengthPrefixedProtoHelper::S
|
||||
*
|
||||
* Read this as simply `using S = Inner::Serialise;`.
|
||||
*
|
||||
* It would be nice to use that directly, but C++ doesn't seem to allow
|
||||
* it. The `typename` keyword needed to refer to `Inner` seems to greedy
|
||||
* (low precedence), and then C++ complains that `Serialise` is not a
|
||||
* type parameter but a real type.
|
||||
*
|
||||
* Making this `S` alias seems to be the only way to avoid these issues.
|
||||
*/
|
||||
|
||||
#define LENGTH_PREFIXED_PROTO_HELPER(Inner, T) \
|
||||
struct LengthPrefixedProtoHelper< Inner, T > \
|
||||
{ \
|
||||
static T read(const StoreDirConfig & store, typename Inner::ReadConn conn); \
|
||||
static void write(const StoreDirConfig & store, typename Inner::WriteConn conn, const T & str); \
|
||||
private: \
|
||||
/*! \
|
||||
* Read this as simply `using S = Inner::Serialise;`. \
|
||||
* \
|
||||
* It would be nice to use that directly, but C++ doesn't seem to allow \
|
||||
* it. The `typename` keyword needed to refer to `Inner` seems to greedy \
|
||||
* (low precedence), and then C++ complains that `Serialise` is not a \
|
||||
* type parameter but a real type. \
|
||||
* \
|
||||
* Making this `S` alias seems to be the only way to avoid these issues. \
|
||||
*/ \
|
||||
template<typename U> using S = typename Inner::template Serialise<U>; \
|
||||
}
|
||||
|
||||
|
|
@ -60,9 +59,8 @@ template<class Inner, typename... Ts>
|
|||
LENGTH_PREFIXED_PROTO_HELPER(Inner, std::tuple<Ts...>);
|
||||
|
||||
template<class Inner, typename K, typename V>
|
||||
#define _X std::map<K, V>
|
||||
LENGTH_PREFIXED_PROTO_HELPER(Inner, _X);
|
||||
#undef _X
|
||||
#define LENGTH_PREFIXED_PROTO_HELPER_X std::map<K, V>
|
||||
LENGTH_PREFIXED_PROTO_HELPER(Inner, LENGTH_PREFIXED_PROTO_HELPER_X);
|
||||
|
||||
template<class Inner, typename T>
|
||||
std::vector<T>
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ LocalOverlayStore::LocalOverlayStore(std::string_view scheme, PathView path, con
|
|||
if (checkMount.get()) {
|
||||
std::smatch match;
|
||||
std::string mountInfo;
|
||||
auto mounts = readFile("/proc/self/mounts");
|
||||
auto mounts = readFile(std::filesystem::path{"/proc/self/mounts"});
|
||||
auto regex = std::regex(R"((^|\n)overlay )" + realStoreDir.get() + R"( .*(\n|$))");
|
||||
|
||||
// Mount points can be stacked, so there might be multiple matching entries.
|
||||
|
|
@ -156,7 +156,7 @@ void LocalOverlayStore::queryGCReferrers(const StorePath & path, StorePathSet &
|
|||
StorePathSet LocalOverlayStore::queryValidDerivers(const StorePath & path)
|
||||
{
|
||||
auto res = LocalStore::queryValidDerivers(path);
|
||||
for (auto p : lowerStore->queryValidDerivers(path))
|
||||
for (const auto & p : lowerStore->queryValidDerivers(path))
|
||||
res.insert(p);
|
||||
return res;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ R"(
|
|||
|
||||
This store type is a variation of the [local store] designed to leverage Linux's [Overlay Filesystem](https://docs.kernel.org/filesystems/overlayfs.html) (OverlayFS for short).
|
||||
Just as OverlayFS combines a lower and upper filesystem by treating the upper one as a patch against the lower, the local overlay store combines a lower store with an upper almost-[local store].
|
||||
("almost" because while the upper fileystems for OverlayFS is valid on its own, the upper almost-store is not a valid local store on its own because some references will dangle.)
|
||||
("almost" because while the upper filesystems for OverlayFS is valid on its own, the upper almost-store is not a valid local store on its own because some references will dangle.)
|
||||
To use this store, you will first need to configure an OverlayFS mountpoint [appropriately](#example-filesystem-layout) as Nix will not do this for you (though it will verify the mountpoint is configured correctly).
|
||||
|
||||
### Conceptual parts of a local overlay store
|
||||
|
|
@ -77,13 +77,13 @@ The parts of a local overlay store are as follows:
|
|||
|
||||
The lower store directory and upper layer directory are combined via OverlayFS to create this directory.
|
||||
Nix doesn't do this itself, because it typically wouldn't have the permissions to do so, so it is the responsibility of the user to set this up first.
|
||||
Nix can, however, optionally check that that the OverlayFS mount settings appear as expected, matching Nix's own settings.
|
||||
Nix can, however, optionally check that the OverlayFS mount settings appear as expected, matching Nix's own settings.
|
||||
|
||||
- **Upper SQLite database**:
|
||||
|
||||
> Not directly specified.
|
||||
> The location of the database instead depends on the [`state`](#store-experimental-local-overlay-store-state) setting.
|
||||
> It is is always `${state}/db`.
|
||||
> It is always `${state}/db`.
|
||||
|
||||
This contains the metadata of all of the upper layer [store objects][store object] (everything beyond their file system objects), and also duplicate copies of some lower layer store object's metadta.
|
||||
The duplication is so the metadata for the [closure](@docroot@/glossary.md#gloss-closure) of upper layer [store objects][store object] can be found entirely within the upper layer.
|
||||
|
|
|
|||
|
|
@ -42,7 +42,6 @@
|
|||
# include <sched.h>
|
||||
# include <sys/statvfs.h>
|
||||
# include <sys/mount.h>
|
||||
# include <sys/ioctl.h>
|
||||
#endif
|
||||
|
||||
#ifdef __CYGWIN__
|
||||
|
|
@ -51,6 +50,8 @@
|
|||
|
||||
#include <sqlite3.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include "strings.hh"
|
||||
|
||||
|
||||
|
|
@ -94,104 +95,6 @@ struct LocalStore::State::Stmts {
|
|||
SQLiteStmt AddRealisationReference;
|
||||
};
|
||||
|
||||
int getSchema(Path schemaPath)
|
||||
{
|
||||
int curSchema = 0;
|
||||
if (pathExists(schemaPath)) {
|
||||
auto s = readFile(schemaPath);
|
||||
auto n = string2Int<int>(s);
|
||||
if (!n)
|
||||
throw Error("'%1%' is corrupt", schemaPath);
|
||||
curSchema = *n;
|
||||
}
|
||||
return curSchema;
|
||||
}
|
||||
|
||||
void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
|
||||
{
|
||||
const int nixCASchemaVersion = 4;
|
||||
int curCASchema = getSchema(schemaPath);
|
||||
if (curCASchema != nixCASchemaVersion) {
|
||||
if (curCASchema > nixCASchemaVersion) {
|
||||
throw Error("current Nix store ca-schema is version %1%, but I only support %2%",
|
||||
curCASchema, nixCASchemaVersion);
|
||||
}
|
||||
|
||||
if (!lockFile(lockFd.get(), ltWrite, false)) {
|
||||
printInfo("waiting for exclusive access to the Nix store for ca drvs...");
|
||||
lockFile(lockFd.get(), ltNone, false); // We have acquired a shared lock; release it to prevent deadlocks
|
||||
lockFile(lockFd.get(), ltWrite, true);
|
||||
}
|
||||
|
||||
if (curCASchema == 0) {
|
||||
static const char schema[] =
|
||||
#include "ca-specific-schema.sql.gen.hh"
|
||||
;
|
||||
db.exec(schema);
|
||||
curCASchema = nixCASchemaVersion;
|
||||
}
|
||||
|
||||
if (curCASchema < 2) {
|
||||
SQLiteTxn txn(db);
|
||||
// Ugly little sql dance to add a new `id` column and make it the primary key
|
||||
db.exec(R"(
|
||||
create table Realisations2 (
|
||||
id integer primary key autoincrement not null,
|
||||
drvPath text not null,
|
||||
outputName text not null, -- symbolic output id, usually "out"
|
||||
outputPath integer not null,
|
||||
signatures text, -- space-separated list
|
||||
foreign key (outputPath) references ValidPaths(id) on delete cascade
|
||||
);
|
||||
insert into Realisations2 (drvPath, outputName, outputPath, signatures)
|
||||
select drvPath, outputName, outputPath, signatures from Realisations;
|
||||
drop table Realisations;
|
||||
alter table Realisations2 rename to Realisations;
|
||||
)");
|
||||
db.exec(R"(
|
||||
create index if not exists IndexRealisations on Realisations(drvPath, outputName);
|
||||
|
||||
create table if not exists RealisationsRefs (
|
||||
referrer integer not null,
|
||||
realisationReference integer,
|
||||
foreign key (referrer) references Realisations(id) on delete cascade,
|
||||
foreign key (realisationReference) references Realisations(id) on delete restrict
|
||||
);
|
||||
)");
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
if (curCASchema < 3) {
|
||||
SQLiteTxn txn(db);
|
||||
// Apply new indices added in this schema update.
|
||||
db.exec(R"(
|
||||
-- used by QueryRealisationReferences
|
||||
create index if not exists IndexRealisationsRefs on RealisationsRefs(referrer);
|
||||
-- used by cascade deletion when ValidPaths is deleted
|
||||
create index if not exists IndexRealisationsRefsOnOutputPath on Realisations(outputPath);
|
||||
)");
|
||||
txn.commit();
|
||||
}
|
||||
if (curCASchema < 4) {
|
||||
SQLiteTxn txn(db);
|
||||
db.exec(R"(
|
||||
create trigger if not exists DeleteSelfRefsViaRealisations before delete on ValidPaths
|
||||
begin
|
||||
delete from RealisationsRefs where realisationReference in (
|
||||
select id from Realisations where outputPath = old.id
|
||||
);
|
||||
end;
|
||||
-- used by deletion trigger
|
||||
create index if not exists IndexRealisationsRefsRealisationReference on RealisationsRefs(realisationReference);
|
||||
)");
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
writeFile(schemaPath, fmt("%d", nixCASchemaVersion), 0666, true);
|
||||
lockFile(lockFd.get(), ltRead, true);
|
||||
}
|
||||
}
|
||||
|
||||
LocalStore::LocalStore(
|
||||
std::string_view scheme,
|
||||
PathView path,
|
||||
|
|
@ -366,10 +269,12 @@ LocalStore::LocalStore(
|
|||
have performed the upgrade already. */
|
||||
curSchema = getSchema();
|
||||
|
||||
if (curSchema < 7) { upgradeStore7(); }
|
||||
|
||||
openDB(*state, false);
|
||||
|
||||
/* Legacy database schema migrations. Don't bump 'schema' for
|
||||
new migrations; instead, add a migration to
|
||||
upgradeDBSchema(). */
|
||||
|
||||
if (curSchema < 8) {
|
||||
SQLiteTxn txn(state->db);
|
||||
state->db.exec("alter table ValidPaths add column ultimate integer");
|
||||
|
|
@ -396,13 +301,7 @@ LocalStore::LocalStore(
|
|||
|
||||
else openDB(*state, false);
|
||||
|
||||
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
|
||||
if (!readOnly) {
|
||||
migrateCASchema(state->db, dbDir + "/ca-schema", globalLock);
|
||||
} else {
|
||||
throw Error("need to migrate to content-addressed schema, but this cannot be done in read-only mode");
|
||||
}
|
||||
}
|
||||
upgradeDBSchema(*state);
|
||||
|
||||
/* Prepare SQL statements. */
|
||||
state->stmts->RegisterValidPath.create(state->db,
|
||||
|
|
@ -525,7 +424,7 @@ LocalStore::~LocalStore()
|
|||
unlink(fnTempRoots.c_str());
|
||||
}
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -537,7 +436,17 @@ std::string LocalStore::getUri()
|
|||
|
||||
|
||||
int LocalStore::getSchema()
|
||||
{ return nix::getSchema(schemaPath); }
|
||||
{
|
||||
int curSchema = 0;
|
||||
if (pathExists(schemaPath)) {
|
||||
auto s = readFile(schemaPath);
|
||||
auto n = string2Int<int>(s);
|
||||
if (!n)
|
||||
throw Error("'%1%' is corrupt", schemaPath);
|
||||
curSchema = *n;
|
||||
}
|
||||
return curSchema;
|
||||
}
|
||||
|
||||
void LocalStore::openDB(State & state, bool create)
|
||||
{
|
||||
|
|
@ -620,6 +529,42 @@ void LocalStore::openDB(State & state, bool create)
|
|||
}
|
||||
|
||||
|
||||
void LocalStore::upgradeDBSchema(State & state)
|
||||
{
|
||||
state.db.exec("create table if not exists SchemaMigrations (migration text primary key not null);");
|
||||
|
||||
std::set<std::string> schemaMigrations;
|
||||
|
||||
{
|
||||
SQLiteStmt querySchemaMigrations;
|
||||
querySchemaMigrations.create(state.db, "select migration from SchemaMigrations;");
|
||||
auto useQuerySchemaMigrations(querySchemaMigrations.use());
|
||||
while (useQuerySchemaMigrations.next())
|
||||
schemaMigrations.insert(useQuerySchemaMigrations.getStr(0));
|
||||
}
|
||||
|
||||
auto doUpgrade = [&](const std::string & migrationName, const std::string & stmt)
|
||||
{
|
||||
if (schemaMigrations.contains(migrationName))
|
||||
return;
|
||||
|
||||
debug("executing Nix database schema migration '%s'...", migrationName);
|
||||
|
||||
SQLiteTxn txn(state.db);
|
||||
state.db.exec(stmt + fmt(";\ninsert into SchemaMigrations values('%s')", migrationName));
|
||||
txn.commit();
|
||||
|
||||
schemaMigrations.insert(migrationName);
|
||||
};
|
||||
|
||||
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
|
||||
doUpgrade(
|
||||
"20220326-ca-derivations",
|
||||
#include "ca-specific-schema.sql.gen.hh"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
/* To improve purity, users may want to make the Nix store a read-only
|
||||
bind mount. So make the Nix store writable for this process. */
|
||||
void LocalStore::makeStoreWritable()
|
||||
|
|
@ -1099,103 +1044,114 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
if (checkSigs && pathInfoIsUntrusted(info))
|
||||
throw Error("cannot add path '%s' because it lacks a signature by a trusted key", printStorePath(info.path));
|
||||
|
||||
/* In case we are not interested in reading the NAR: discard it. */
|
||||
bool narRead = false;
|
||||
Finally cleanup = [&]() {
|
||||
if (!narRead) {
|
||||
NullFileSystemObjectSink sink;
|
||||
try {
|
||||
parseDump(sink, source);
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
{
|
||||
/* In case we are not interested in reading the NAR: discard it. */
|
||||
bool narRead = false;
|
||||
Finally cleanup = [&]() {
|
||||
if (!narRead) {
|
||||
NullFileSystemObjectSink sink;
|
||||
try {
|
||||
parseDump(sink, source);
|
||||
} catch (...) {
|
||||
// TODO: should Interrupted be handled here?
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
addTempRoot(info.path);
|
||||
|
||||
if (repair || !isValidPath(info.path)) {
|
||||
|
||||
PathLocks outputLock;
|
||||
|
||||
auto realPath = Store::toRealPath(info.path);
|
||||
|
||||
/* Lock the output path. But don't lock if we're being called
|
||||
from a build hook (whose parent process already acquired a
|
||||
lock on this path). */
|
||||
if (!locksHeld.count(printStorePath(info.path)))
|
||||
outputLock.lockPaths({realPath});
|
||||
addTempRoot(info.path);
|
||||
|
||||
if (repair || !isValidPath(info.path)) {
|
||||
|
||||
deletePath(realPath);
|
||||
PathLocks outputLock;
|
||||
|
||||
/* While restoring the path from the NAR, compute the hash
|
||||
of the NAR. */
|
||||
HashSink hashSink(HashAlgorithm::SHA256);
|
||||
auto realPath = Store::toRealPath(info.path);
|
||||
|
||||
TeeSource wrapperSource { source, hashSink };
|
||||
/* Lock the output path. But don't lock if we're being called
|
||||
from a build hook (whose parent process already acquired a
|
||||
lock on this path). */
|
||||
if (!locksHeld.count(printStorePath(info.path)))
|
||||
outputLock.lockPaths({realPath});
|
||||
|
||||
narRead = true;
|
||||
restorePath(realPath, wrapperSource);
|
||||
if (repair || !isValidPath(info.path)) {
|
||||
|
||||
auto hashResult = hashSink.finish();
|
||||
deletePath(realPath);
|
||||
|
||||
if (hashResult.first != info.narHash)
|
||||
throw Error("hash mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||
printStorePath(info.path), info.narHash.to_string(HashFormat::Nix32, true), hashResult.first.to_string(HashFormat::Nix32, true));
|
||||
/* While restoring the path from the NAR, compute the hash
|
||||
of the NAR. */
|
||||
HashSink hashSink(HashAlgorithm::SHA256);
|
||||
|
||||
if (hashResult.second != info.narSize)
|
||||
throw Error("size mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||
printStorePath(info.path), info.narSize, hashResult.second);
|
||||
TeeSource wrapperSource { source, hashSink };
|
||||
|
||||
if (info.ca) {
|
||||
auto & specified = *info.ca;
|
||||
auto actualHash = ({
|
||||
auto accessor = getFSAccessor(false);
|
||||
CanonPath path { printStorePath(info.path) };
|
||||
Hash h { HashAlgorithm::SHA256 }; // throwaway def to appease C++
|
||||
auto fim = specified.method.getFileIngestionMethod();
|
||||
switch (fim) {
|
||||
case FileIngestionMethod::Flat:
|
||||
case FileIngestionMethod::NixArchive:
|
||||
{
|
||||
HashModuloSink caSink {
|
||||
specified.hash.algo,
|
||||
std::string { info.path.hashPart() },
|
||||
narRead = true;
|
||||
restorePath(realPath, wrapperSource, settings.fsyncStorePaths);
|
||||
|
||||
auto hashResult = hashSink.finish();
|
||||
|
||||
if (hashResult.first != info.narHash)
|
||||
throw Error("hash mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||
printStorePath(info.path), info.narHash.to_string(HashFormat::Nix32, true), hashResult.first.to_string(HashFormat::Nix32, true));
|
||||
|
||||
if (hashResult.second != info.narSize)
|
||||
throw Error("size mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||
printStorePath(info.path), info.narSize, hashResult.second);
|
||||
|
||||
if (info.ca) {
|
||||
auto & specified = *info.ca;
|
||||
auto actualHash = ({
|
||||
auto accessor = getFSAccessor(false);
|
||||
CanonPath path { printStorePath(info.path) };
|
||||
Hash h { HashAlgorithm::SHA256 }; // throwaway def to appease C++
|
||||
auto fim = specified.method.getFileIngestionMethod();
|
||||
switch (fim) {
|
||||
case FileIngestionMethod::Flat:
|
||||
case FileIngestionMethod::NixArchive:
|
||||
{
|
||||
HashModuloSink caSink {
|
||||
specified.hash.algo,
|
||||
std::string { info.path.hashPart() },
|
||||
};
|
||||
dumpPath({accessor, path}, caSink, (FileSerialisationMethod) fim);
|
||||
h = caSink.finish().first;
|
||||
break;
|
||||
}
|
||||
case FileIngestionMethod::Git:
|
||||
h = git::dumpHash(specified.hash.algo, {accessor, path}).hash;
|
||||
break;
|
||||
}
|
||||
ContentAddress {
|
||||
.method = specified.method,
|
||||
.hash = std::move(h),
|
||||
};
|
||||
dumpPath({accessor, path}, caSink, (FileSerialisationMethod) fim);
|
||||
h = caSink.finish().first;
|
||||
break;
|
||||
});
|
||||
if (specified.hash != actualHash.hash) {
|
||||
throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||
printStorePath(info.path),
|
||||
specified.hash.to_string(HashFormat::Nix32, true),
|
||||
actualHash.hash.to_string(HashFormat::Nix32, true));
|
||||
}
|
||||
case FileIngestionMethod::Git:
|
||||
h = git::dumpHash(specified.hash.algo, {accessor, path}).hash;
|
||||
break;
|
||||
}
|
||||
ContentAddress {
|
||||
.method = specified.method,
|
||||
.hash = std::move(h),
|
||||
};
|
||||
});
|
||||
if (specified.hash != actualHash.hash) {
|
||||
throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||
printStorePath(info.path),
|
||||
specified.hash.to_string(HashFormat::Nix32, true),
|
||||
actualHash.hash.to_string(HashFormat::Nix32, true));
|
||||
}
|
||||
|
||||
autoGC();
|
||||
|
||||
canonicalisePathMetaData(realPath);
|
||||
|
||||
optimisePath(realPath, repair); // FIXME: combine with hashPath()
|
||||
|
||||
if (settings.fsyncStorePaths) {
|
||||
recursiveSync(realPath);
|
||||
syncParent(realPath);
|
||||
}
|
||||
|
||||
registerValidPath(info);
|
||||
}
|
||||
|
||||
autoGC();
|
||||
|
||||
canonicalisePathMetaData(realPath);
|
||||
|
||||
optimisePath(realPath, repair); // FIXME: combine with hashPath()
|
||||
|
||||
registerValidPath(info);
|
||||
outputLock.setDeletion(true);
|
||||
}
|
||||
|
||||
outputLock.setDeletion(true);
|
||||
}
|
||||
|
||||
// In case `cleanup` ignored an `Interrupted` exception
|
||||
checkInterrupt();
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1271,7 +1227,7 @@ StorePath LocalStore::addToStoreFromDump(
|
|||
delTempDir = std::make_unique<AutoDelete>(tempDir);
|
||||
tempPath = tempDir / "x";
|
||||
|
||||
restorePath(tempPath.string(), bothSource, dumpMethod);
|
||||
restorePath(tempPath.string(), bothSource, dumpMethod, settings.fsyncStorePaths);
|
||||
|
||||
dumpBuffer.reset();
|
||||
dump = {};
|
||||
|
|
@ -1318,7 +1274,7 @@ StorePath LocalStore::addToStoreFromDump(
|
|||
switch (fim) {
|
||||
case FileIngestionMethod::Flat:
|
||||
case FileIngestionMethod::NixArchive:
|
||||
restorePath(realPath, dumpSource, (FileSerialisationMethod) fim);
|
||||
restorePath(realPath, dumpSource, (FileSerialisationMethod) fim, settings.fsyncStorePaths);
|
||||
break;
|
||||
case FileIngestionMethod::Git:
|
||||
// doesn't correspond to serialization method, so
|
||||
|
|
@ -1343,6 +1299,11 @@ StorePath LocalStore::addToStoreFromDump(
|
|||
|
||||
optimisePath(realPath, repair);
|
||||
|
||||
if (settings.fsyncStorePaths) {
|
||||
recursiveSync(realPath);
|
||||
syncParent(realPath);
|
||||
}
|
||||
|
||||
ValidPathInfo info {
|
||||
*this,
|
||||
name,
|
||||
|
|
@ -1596,62 +1557,6 @@ std::optional<TrustedFlag> LocalStore::isTrustedClient()
|
|||
}
|
||||
|
||||
|
||||
#if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && defined(FS_IMMUTABLE_FL)
|
||||
|
||||
static void makeMutable(const Path & path)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
auto st = lstat(path);
|
||||
|
||||
if (!S_ISDIR(st.st_mode) && !S_ISREG(st.st_mode)) return;
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
for (auto & i : readDirectory(path))
|
||||
makeMutable(path + "/" + i.name);
|
||||
}
|
||||
|
||||
/* The O_NOFOLLOW is important to prevent us from changing the
|
||||
mutable bit on the target of a symlink (which would be a
|
||||
security hole). */
|
||||
AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_NOFOLLOW
|
||||
#ifndef _WIN32
|
||||
| O_CLOEXEC
|
||||
#endif
|
||||
);
|
||||
if (fd == INVALID_DESCRIPTOR) {
|
||||
if (errno == ELOOP) return; // it's a symlink
|
||||
throw SysError("opening file '%1%'", path);
|
||||
}
|
||||
|
||||
unsigned int flags = 0, old;
|
||||
|
||||
/* Silently ignore errors getting/setting the immutable flag so
|
||||
that we work correctly on filesystems that don't support it. */
|
||||
if (ioctl(fd, FS_IOC_GETFLAGS, &flags)) return;
|
||||
old = flags;
|
||||
flags &= ~FS_IMMUTABLE_FL;
|
||||
if (old == flags) return;
|
||||
if (ioctl(fd, FS_IOC_SETFLAGS, &flags)) return;
|
||||
}
|
||||
|
||||
/* Upgrade from schema 6 (Nix 0.15) to schema 7 (Nix >= 1.3). */
|
||||
void LocalStore::upgradeStore7()
|
||||
{
|
||||
if (!isRootUser()) return;
|
||||
printInfo("removing immutable bits from the Nix store (this may take a while)...");
|
||||
makeMutable(realStoreDir);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void LocalStore::upgradeStore7()
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
void LocalStore::vacuumDB()
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
|
|
|||
|
|
@ -356,6 +356,8 @@ private:
|
|||
|
||||
void openDB(State & state, bool create);
|
||||
|
||||
void upgradeDBSchema(State & state);
|
||||
|
||||
void makeStoreWritable();
|
||||
|
||||
uint64_t queryValidPathId(State & state, const StorePath & path);
|
||||
|
|
@ -373,8 +375,6 @@ private:
|
|||
|
||||
void updatePathInfo(State & state, const ValidPathInfo & info);
|
||||
|
||||
void upgradeStore6();
|
||||
void upgradeStore7();
|
||||
PathSet queryValidPathsOld();
|
||||
ValidPathInfo queryPathInfoOld(const Path & path);
|
||||
|
||||
|
|
|
|||
|
|
@ -1,105 +0,0 @@
|
|||
libraries += libstore
|
||||
|
||||
libstore_NAME = libnixstore
|
||||
|
||||
libstore_DIR := $(d)
|
||||
|
||||
libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc)
|
||||
ifdef HOST_UNIX
|
||||
libstore_SOURCES += $(wildcard $(d)/unix/*.cc $(d)/unix/build/*.cc)
|
||||
endif
|
||||
ifdef HOST_LINUX
|
||||
libstore_SOURCES += $(wildcard $(d)/linux/*.cc)
|
||||
endif
|
||||
ifdef HOST_WINDOWS
|
||||
libstore_SOURCES += $(wildcard $(d)/windows/*.cc)
|
||||
endif
|
||||
|
||||
libstore_LIBS = libutil
|
||||
|
||||
libstore_LDFLAGS += $(SQLITE3_LIBS) $(LIBCURL_LIBS) $(THREAD_LDFLAGS)
|
||||
ifdef HOST_LINUX
|
||||
libstore_LDFLAGS += -ldl
|
||||
endif
|
||||
ifdef HOST_WINDOWS
|
||||
libstore_LDFLAGS += -lws2_32
|
||||
endif
|
||||
|
||||
$(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox)))
|
||||
|
||||
ifeq ($(ENABLE_S3), 1)
|
||||
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core -laws-crt-cpp
|
||||
endif
|
||||
|
||||
ifdef HOST_SOLARIS
|
||||
libstore_LDFLAGS += -lsocket
|
||||
endif
|
||||
|
||||
ifeq ($(HAVE_SECCOMP), 1)
|
||||
libstore_LDFLAGS += $(LIBSECCOMP_LIBS)
|
||||
endif
|
||||
|
||||
# Not just for this library itself, but also for downstream libraries using this library
|
||||
|
||||
INCLUDE_libstore := -I $(d) -I $(d)/build
|
||||
ifdef HOST_UNIX
|
||||
INCLUDE_libstore += -I $(d)/unix -I $(d)/unix/build
|
||||
endif
|
||||
ifdef HOST_LINUX
|
||||
INCLUDE_libstore += -I $(d)/linux
|
||||
endif
|
||||
ifdef HOST_WINDOWS
|
||||
INCLUDE_libstore += -I $(d)/windows
|
||||
endif
|
||||
|
||||
ifdef HOST_WINDOWS
|
||||
NIX_ROOT = N:\\\\
|
||||
else
|
||||
NIX_ROOT =
|
||||
endif
|
||||
|
||||
# Prefix all but `NIX_STORE_DIR`, since we aren't doing a local store
|
||||
# yet so a "logical" store dir that is the same as unix is prefered.
|
||||
#
|
||||
# Also, it keeps the unit tests working.
|
||||
|
||||
libstore_CXXFLAGS += \
|
||||
$(INCLUDE_libutil) $(INCLUDE_libstore) $(INCLUDE_libstore) \
|
||||
-DNIX_PREFIX=\"$(NIX_ROOT)$(prefix)\" \
|
||||
-DNIX_STORE_DIR=\"$(storedir)\" \
|
||||
-DNIX_DATA_DIR=\"$(NIX_ROOT)$(datadir)\" \
|
||||
-DNIX_STATE_DIR=\"$(NIX_ROOT)$(localstatedir)/nix\" \
|
||||
-DNIX_LOG_DIR=\"$(NIX_ROOT)$(localstatedir)/log/nix\" \
|
||||
-DNIX_CONF_DIR=\"$(NIX_ROOT)$(sysconfdir)/nix\" \
|
||||
-DNIX_BIN_DIR=\"$(NIX_ROOT)$(bindir)\" \
|
||||
-DNIX_MAN_DIR=\"$(NIX_ROOT)$(mandir)\" \
|
||||
-DLSOF=\"$(NIX_ROOT)$(lsof)\" \
|
||||
-DDETERMINATE_NIX_VERSION=\""$(shell cat ./.version-determinate)"\"
|
||||
|
||||
ifeq ($(embedded_sandbox_shell),yes)
|
||||
libstore_CXXFLAGS += -DSANDBOX_SHELL=\"__embedded_sandbox_shell__\"
|
||||
|
||||
$(d)/unix/build/local-derivation-goal.cc: $(d)/unix/embedded-sandbox-shell.gen.hh
|
||||
|
||||
$(d)/unix/embedded-sandbox-shell.gen.hh: $(sandbox_shell)
|
||||
$(trace-gen) hexdump -v -e '1/1 "0x%x," "\n"' < $< > $@.tmp
|
||||
@mv $@.tmp $@
|
||||
else
|
||||
ifneq ($(sandbox_shell),)
|
||||
libstore_CXXFLAGS += -DSANDBOX_SHELL="\"$(sandbox_shell)\""
|
||||
endif
|
||||
endif
|
||||
|
||||
$(d)/local-store.cc: $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
|
||||
|
||||
$(d)/unix/build.cc:
|
||||
|
||||
clean-files += $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
|
||||
|
||||
$(eval $(call install-file-in, $(buildprefix)$(d)/nix-store.pc, $(libdir)/pkgconfig, 0644))
|
||||
|
||||
$(foreach i, $(wildcard src/libstore/builtins/*.hh), \
|
||||
$(eval $(call install-file-in, $(i), $(includedir)/nix/builtins, 0644)))
|
||||
|
||||
$(foreach i, $(wildcard src/libstore/build/*.hh), \
|
||||
$(eval $(call install-file-in, $(i), $(includedir)/nix/build, 0644)))
|
||||
|
|
@ -33,7 +33,7 @@ Machine::Machine(
|
|||
systemTypes(systemTypes),
|
||||
sshKey(sshKey),
|
||||
maxJobs(maxJobs),
|
||||
speedFactor(speedFactor == 0.0f ? 1.0f : std::move(speedFactor)),
|
||||
speedFactor(speedFactor == 0.0f ? 1.0f : speedFactor),
|
||||
supportedFeatures(supportedFeatures),
|
||||
mandatoryFeatures(mandatoryFeatures),
|
||||
sshPublicHostKey(sshPublicHostKey)
|
||||
|
|
|
|||
|
|
@ -32,12 +32,12 @@ struct Machine {
|
|||
|
||||
/**
|
||||
* @return Whether `features` is a subset of the union of `supportedFeatures` and
|
||||
* `mandatoryFeatures`
|
||||
* `mandatoryFeatures`.
|
||||
*/
|
||||
bool allSupported(const std::set<std::string> & features) const;
|
||||
|
||||
/**
|
||||
* @return @Whether `mandatoryFeatures` is a subset of `features`
|
||||
* @return Whether `mandatoryFeatures` is a subset of `features`.
|
||||
*/
|
||||
bool mandatoryMet(const std::set<std::string> & features) const;
|
||||
|
||||
|
|
|
|||
|
|
@ -4,9 +4,8 @@ project('nix-store', 'cpp',
|
|||
'cpp_std=c++2a',
|
||||
# TODO(Qyriad): increase the warning level
|
||||
'warning_level=1',
|
||||
'debug=true',
|
||||
'optimization=2',
|
||||
'errorlogs=true', # Please print logs for tests that fail
|
||||
'localstatedir=/nix/var',
|
||||
],
|
||||
meson_version : '>= 1.1',
|
||||
license : 'LGPL-2.1-or-later',
|
||||
|
|
@ -16,7 +15,7 @@ fs = import('fs')
|
|||
|
||||
cxx = meson.get_compiler('cpp')
|
||||
|
||||
subdir('build-utils-meson/deps-lists')
|
||||
subdir('nix-meson-build-support/deps-lists')
|
||||
|
||||
configdata = configuration_data()
|
||||
|
||||
|
|
@ -25,18 +24,20 @@ configdata.set_quoted('PACKAGE_VERSION', meson.project_version())
|
|||
|
||||
configdata.set_quoted('DETERMINATE_NIX_VERSION', fs.read('.version-determinate').strip())
|
||||
|
||||
configdata.set_quoted('SYSTEM', host_machine.system())
|
||||
configdata.set_quoted('SYSTEM', host_machine.cpu_family() + '-' + host_machine.system())
|
||||
|
||||
deps_private_maybe_subproject = [
|
||||
]
|
||||
deps_public_maybe_subproject = [
|
||||
dependency('nix-util'),
|
||||
]
|
||||
subdir('build-utils-meson/subprojects')
|
||||
subdir('nix-meson-build-support/subprojects')
|
||||
|
||||
run_command('ln', '-s',
|
||||
meson.project_build_root() / '__nothing_link_target',
|
||||
meson.project_build_root() / '__nothing_symlink',
|
||||
# native doesn't allow dangling symlinks, which the tests require
|
||||
env : { 'MSYS' : 'winsymlinks:lnk' },
|
||||
check : true,
|
||||
)
|
||||
can_link_symlink = run_command('ln',
|
||||
|
|
@ -77,7 +78,12 @@ if host_machine.system() == 'darwin'
|
|||
deps_other += [sandbox]
|
||||
endif
|
||||
|
||||
subdir('build-utils-meson/threads')
|
||||
if host_machine.system() == 'windows'
|
||||
wsock32 = cxx.find_library('wsock32')
|
||||
deps_other += [wsock32]
|
||||
endif
|
||||
|
||||
subdir('nix-meson-build-support/libatomic')
|
||||
|
||||
boost = dependency(
|
||||
'boost',
|
||||
|
|
@ -127,7 +133,7 @@ if aws_s3.found()
|
|||
endif
|
||||
deps_other += aws_s3
|
||||
|
||||
subdir('build-utils-meson/generate-header')
|
||||
subdir('nix-meson-build-support/generate-header')
|
||||
|
||||
generated_headers = []
|
||||
foreach header : [
|
||||
|
|
@ -175,7 +181,7 @@ add_project_arguments(
|
|||
language : 'cpp',
|
||||
)
|
||||
|
||||
subdir('build-utils-meson/diagnostics')
|
||||
subdir('nix-meson-build-support/common')
|
||||
|
||||
sources = files(
|
||||
'binary-cache-store.cc',
|
||||
|
|
@ -331,24 +337,23 @@ endif
|
|||
|
||||
prefix = get_option('prefix')
|
||||
# For each of these paths, assume that it is relative to the prefix unless
|
||||
# it is already an absolute path (which is the default for store-dir, state-dir, and log-dir).
|
||||
# it is already an absolute path (which is the default for store-dir, localstatedir, and log-dir).
|
||||
path_opts = [
|
||||
# Meson built-ins.
|
||||
'datadir',
|
||||
'bindir',
|
||||
'mandir',
|
||||
'libdir',
|
||||
'includedir',
|
||||
'libexecdir',
|
||||
# Homecooked Nix directories.
|
||||
'store-dir',
|
||||
'state-dir',
|
||||
'localstatedir',
|
||||
'log-dir',
|
||||
]
|
||||
# For your grepping pleasure, this loop sets the following variables that aren't mentioned
|
||||
# literally above:
|
||||
# store_dir
|
||||
# state_dir
|
||||
# localstatedir
|
||||
# log_dir
|
||||
# profile_dir
|
||||
foreach optname : path_opts
|
||||
|
|
@ -372,15 +377,14 @@ lsof = find_program('lsof', required : false)
|
|||
|
||||
# Aside from prefix itself, each of these was made into an absolute path
|
||||
# by joining it with prefix, unless it was already an absolute path
|
||||
# (which is the default for store-dir, state-dir, and log-dir).
|
||||
# (which is the default for store-dir, localstatedir, and log-dir).
|
||||
cpp_str_defines = {
|
||||
'NIX_PREFIX': prefix,
|
||||
'NIX_STORE_DIR': store_dir,
|
||||
'NIX_DATA_DIR': datadir,
|
||||
'NIX_STATE_DIR': state_dir / 'nix',
|
||||
'NIX_STATE_DIR': localstatedir / 'nix',
|
||||
'NIX_LOG_DIR': log_dir,
|
||||
'NIX_CONF_DIR': sysconfdir / 'nix',
|
||||
'NIX_BIN_DIR': bindir,
|
||||
'NIX_MAN_DIR': mandir,
|
||||
}
|
||||
|
||||
|
|
@ -412,7 +416,8 @@ foreach name, value : cpp_str_defines
|
|||
]
|
||||
endforeach
|
||||
|
||||
subdir('build-utils-meson/export-all-symbols')
|
||||
subdir('nix-meson-build-support/export-all-symbols')
|
||||
subdir('nix-meson-build-support/windows-version')
|
||||
|
||||
this_library = library(
|
||||
'nixstore',
|
||||
|
|
@ -430,4 +435,15 @@ install_headers(headers, subdir : 'nix', preserve_path : true)
|
|||
|
||||
libraries_private = []
|
||||
|
||||
subdir('build-utils-meson/export')
|
||||
extra_pkg_config_variables = {
|
||||
'storedir' : get_option('store-dir'),
|
||||
}
|
||||
|
||||
# Working around https://github.com/mesonbuild/meson/issues/13584
|
||||
if host_machine.system() != 'darwin'
|
||||
extra_pkg_config_variables += {
|
||||
'localstatedir' : get_option('localstatedir'),
|
||||
}
|
||||
endif
|
||||
|
||||
subdir('nix-meson-build-support/export')
|
||||
|
|
|
|||
|
|
@ -16,10 +16,6 @@ option('store-dir', type : 'string', value : '/nix/store',
|
|||
description : 'path of the Nix store',
|
||||
)
|
||||
|
||||
option('state-dir', type : 'string', value : '/nix/var',
|
||||
description : 'path to store state in for Nix',
|
||||
)
|
||||
|
||||
option('log-dir', type : 'string', value : '/nix/var/log/nix',
|
||||
description : 'path to store logs in for Nix',
|
||||
)
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ static bool componentsLT(const std::string_view c1, const std::string_view c2)
|
|||
}
|
||||
|
||||
|
||||
int compareVersions(const std::string_view v1, const std::string_view v2)
|
||||
std::strong_ordering compareVersions(const std::string_view v1, const std::string_view v2)
|
||||
{
|
||||
auto p1 = v1.begin();
|
||||
auto p2 = v2.begin();
|
||||
|
|
@ -102,11 +102,11 @@ int compareVersions(const std::string_view v1, const std::string_view v2)
|
|||
while (p1 != v1.end() || p2 != v2.end()) {
|
||||
auto c1 = nextComponent(p1, v1.end());
|
||||
auto c2 = nextComponent(p2, v2.end());
|
||||
if (componentsLT(c1, c2)) return -1;
|
||||
else if (componentsLT(c2, c1)) return 1;
|
||||
if (componentsLT(c1, c2)) return std::strong_ordering::less;
|
||||
else if (componentsLT(c2, c1)) return std::strong_ordering::greater;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return std::strong_ordering::equal;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ typedef std::list<DrvName> DrvNames;
|
|||
|
||||
std::string_view nextComponent(std::string_view::const_iterator & p,
|
||||
const std::string_view::const_iterator end);
|
||||
int compareVersions(const std::string_view v1, const std::string_view v2);
|
||||
std::strong_ordering compareVersions(const std::string_view v1, const std::string_view v2);
|
||||
DrvNames drvNamesFromArgs(const Strings & opArgs);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -291,7 +291,11 @@ json listNar(ref<SourceAccessor> accessor, const CanonPath & path, bool recurse)
|
|||
obj["type"] = "symlink";
|
||||
obj["target"] = accessor->readLink(path);
|
||||
break;
|
||||
case SourceAccessor::Type::tMisc:
|
||||
case SourceAccessor::Type::tBlock:
|
||||
case SourceAccessor::Type::tChar:
|
||||
case SourceAccessor::Type::tSocket:
|
||||
case SourceAccessor::Type::tFifo:
|
||||
case SourceAccessor::Type::tUnknown:
|
||||
assert(false); // cannot happen for NARs
|
||||
}
|
||||
return obj;
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ public:
|
|||
|
||||
Sync<State> _state;
|
||||
|
||||
NarInfoDiskCacheImpl(Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite")
|
||||
NarInfoDiskCacheImpl(Path dbPath = getCacheDir() + "/binary-cache-v6.sqlite")
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
#include "nar-info.hh"
|
||||
#include "store-api.hh"
|
||||
#include "strings.hh"
|
||||
#include "json-utils.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
@ -117,7 +118,7 @@ std::string NarInfo::to_string(const Store & store) const
|
|||
if (deriver)
|
||||
res += "Deriver: " + std::string(deriver->to_string()) + "\n";
|
||||
|
||||
for (auto sig : sigs)
|
||||
for (const auto & sig : sigs)
|
||||
res += "Sig: " + sig + "\n";
|
||||
|
||||
if (ca)
|
||||
|
|
|
|||
1
src/libstore/nix-meson-build-support
Symbolic link
1
src/libstore/nix-meson-build-support
Symbolic link
|
|
@ -0,0 +1 @@
|
|||
../../nix-meson-build-support
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
prefix=@prefix@
|
||||
libdir=@libdir@
|
||||
includedir=@includedir@
|
||||
|
||||
Name: Nix
|
||||
Description: Nix Package Manager
|
||||
Version: @PACKAGE_VERSION@
|
||||
Requires: nix-util
|
||||
Libs: -L${libdir} -lnixstore
|
||||
Cflags: -I${includedir}/nix -std=c++2a
|
||||
|
|
@ -35,7 +35,7 @@ struct MakeReadOnly
|
|||
/* This will make the path read-only. */
|
||||
if (path != "") canonicaliseTimestampAndPermissions(path);
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -30,16 +30,15 @@ std::optional<OutputsSpec> OutputsSpec::parseOpt(std::string_view s)
|
|||
{
|
||||
static std::regex regex(std::string { outputSpecRegexStr });
|
||||
|
||||
std::smatch match;
|
||||
std::string s2 { s }; // until some improves std::regex
|
||||
if (!std::regex_match(s2, match, regex))
|
||||
std::cmatch match;
|
||||
if (!std::regex_match(s.cbegin(), s.cend(), match, regex))
|
||||
return std::nullopt;
|
||||
|
||||
if (match[1].matched)
|
||||
return { OutputsSpec::All {} };
|
||||
|
||||
if (match[2].matched)
|
||||
return OutputsSpec::Names { tokenizeString<StringSet>(match[2].str(), ",") };
|
||||
return OutputsSpec::Names { tokenizeString<StringSet>({match[2].first, match[2].second}, ",") };
|
||||
|
||||
assert(false);
|
||||
}
|
||||
|
|
@ -154,7 +153,10 @@ namespace nlohmann {
|
|||
|
||||
using namespace nix;
|
||||
|
||||
OutputsSpec adl_serializer<OutputsSpec>::from_json(const json & json) {
|
||||
#ifndef DOXYGEN_SKIP
|
||||
|
||||
OutputsSpec adl_serializer<OutputsSpec>::from_json(const json & json)
|
||||
{
|
||||
auto names = json.get<StringSet>();
|
||||
if (names == StringSet({"*"}))
|
||||
return OutputsSpec::All {};
|
||||
|
|
@ -162,7 +164,8 @@ OutputsSpec adl_serializer<OutputsSpec>::from_json(const json & json) {
|
|||
return OutputsSpec::Names { std::move(names) };
|
||||
}
|
||||
|
||||
void adl_serializer<OutputsSpec>::to_json(json & json, OutputsSpec t) {
|
||||
void adl_serializer<OutputsSpec>::to_json(json & json, OutputsSpec t)
|
||||
{
|
||||
std::visit(overloaded {
|
||||
[&](const OutputsSpec::All &) {
|
||||
json = std::vector<std::string>({"*"});
|
||||
|
|
@ -173,8 +176,8 @@ void adl_serializer<OutputsSpec>::to_json(json & json, OutputsSpec t) {
|
|||
}, t.raw);
|
||||
}
|
||||
|
||||
|
||||
ExtendedOutputsSpec adl_serializer<ExtendedOutputsSpec>::from_json(const json & json) {
|
||||
ExtendedOutputsSpec adl_serializer<ExtendedOutputsSpec>::from_json(const json & json)
|
||||
{
|
||||
if (json.is_null())
|
||||
return ExtendedOutputsSpec::Default {};
|
||||
else {
|
||||
|
|
@ -182,7 +185,8 @@ ExtendedOutputsSpec adl_serializer<ExtendedOutputsSpec>::from_json(const json &
|
|||
}
|
||||
}
|
||||
|
||||
void adl_serializer<ExtendedOutputsSpec>::to_json(json & json, ExtendedOutputsSpec t) {
|
||||
void adl_serializer<ExtendedOutputsSpec>::to_json(json & json, ExtendedOutputsSpec t)
|
||||
{
|
||||
std::visit(overloaded {
|
||||
[&](const ExtendedOutputsSpec::Default &) {
|
||||
json = nullptr;
|
||||
|
|
@ -193,4 +197,6 @@ void adl_serializer<ExtendedOutputsSpec>::to_json(json & json, ExtendedOutputsSp
|
|||
}, t.raw);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,43 +1,40 @@
|
|||
{ lib
|
||||
, stdenv
|
||||
, mkMesonDerivation
|
||||
, releaseTools
|
||||
{
|
||||
lib,
|
||||
stdenv,
|
||||
mkMesonLibrary,
|
||||
|
||||
, meson
|
||||
, ninja
|
||||
, pkg-config
|
||||
, unixtools
|
||||
, darwin
|
||||
unixtools,
|
||||
darwin,
|
||||
|
||||
, nix-util
|
||||
, boost
|
||||
, curl
|
||||
, aws-sdk-cpp
|
||||
, libseccomp
|
||||
, nlohmann_json
|
||||
, sqlite
|
||||
nix-util,
|
||||
boost,
|
||||
curl,
|
||||
aws-sdk-cpp,
|
||||
libseccomp,
|
||||
nlohmann_json,
|
||||
sqlite,
|
||||
|
||||
, busybox-sandbox-shell ? null
|
||||
busybox-sandbox-shell ? null,
|
||||
|
||||
# Configuration Options
|
||||
# Configuration Options
|
||||
|
||||
, version
|
||||
version,
|
||||
|
||||
, embeddedSandboxShell ? stdenv.hostPlatform.isStatic
|
||||
embeddedSandboxShell ? stdenv.hostPlatform.isStatic,
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (lib) fileset;
|
||||
in
|
||||
|
||||
mkMesonDerivation (finalAttrs: {
|
||||
mkMesonLibrary (finalAttrs: {
|
||||
pname = "nix-store";
|
||||
inherit version;
|
||||
|
||||
workDir = ./.;
|
||||
fileset = fileset.unions [
|
||||
../../build-utils-meson
|
||||
./build-utils-meson
|
||||
../../nix-meson-build-support
|
||||
./nix-meson-build-support
|
||||
# FIXME: get rid of these symlinks.
|
||||
../../.version
|
||||
./.version
|
||||
|
|
@ -55,24 +52,20 @@ mkMesonDerivation (finalAttrs: {
|
|||
(fileset.fileFilter (file: file.hasExt "sql") ./.)
|
||||
];
|
||||
|
||||
outputs = [ "out" "dev" ];
|
||||
nativeBuildInputs = lib.optional embeddedSandboxShell unixtools.hexdump;
|
||||
|
||||
nativeBuildInputs = [
|
||||
meson
|
||||
ninja
|
||||
pkg-config
|
||||
] ++ lib.optional embeddedSandboxShell unixtools.hexdump;
|
||||
|
||||
buildInputs = [
|
||||
boost
|
||||
curl
|
||||
sqlite
|
||||
] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp
|
||||
buildInputs =
|
||||
[
|
||||
boost
|
||||
curl
|
||||
sqlite
|
||||
]
|
||||
++ lib.optional stdenv.hostPlatform.isLinux libseccomp
|
||||
# There have been issues building these dependencies
|
||||
++ lib.optional stdenv.hostPlatform.isDarwin darwin.apple_sdk.libs.sandbox
|
||||
++ lib.optional (stdenv.hostPlatform == stdenv.buildPlatform && (stdenv.isLinux || stdenv.isDarwin))
|
||||
aws-sdk-cpp
|
||||
;
|
||||
++ lib.optional (
|
||||
stdenv.hostPlatform == stdenv.buildPlatform && (stdenv.isLinux || stdenv.isDarwin)
|
||||
) aws-sdk-cpp;
|
||||
|
||||
propagatedBuildInputs = [
|
||||
nix-util
|
||||
|
|
@ -87,26 +80,22 @@ mkMesonDerivation (finalAttrs: {
|
|||
echo ${version} > ../../.version
|
||||
'';
|
||||
|
||||
mesonFlags = [
|
||||
(lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux)
|
||||
(lib.mesonBool "embedded-sandbox-shell" embeddedSandboxShell)
|
||||
] ++ lib.optionals stdenv.hostPlatform.isLinux [
|
||||
(lib.mesonOption "sandbox-shell" "${busybox-sandbox-shell}/bin/busybox")
|
||||
];
|
||||
mesonFlags =
|
||||
[
|
||||
(lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux)
|
||||
(lib.mesonBool "embedded-sandbox-shell" embeddedSandboxShell)
|
||||
]
|
||||
++ lib.optionals stdenv.hostPlatform.isLinux [
|
||||
(lib.mesonOption "sandbox-shell" "${busybox-sandbox-shell}/bin/busybox")
|
||||
];
|
||||
|
||||
env = {
|
||||
# Needed for Meson to find Boost.
|
||||
# https://github.com/NixOS/nixpkgs/issues/86131.
|
||||
BOOST_INCLUDEDIR = "${lib.getDev boost}/include";
|
||||
BOOST_LIBRARYDIR = "${lib.getLib boost}/lib";
|
||||
} // lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
|
||||
LDFLAGS = "-fuse-ld=gold";
|
||||
};
|
||||
|
||||
separateDebugInfo = !stdenv.hostPlatform.isStatic;
|
||||
|
||||
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
|
||||
|
||||
meta = {
|
||||
platforms = lib.platforms.unix ++ lib.platforms.windows;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -176,17 +176,18 @@ struct ValidPathInfo : UnkeyedValidPathInfo {
|
|||
*/
|
||||
Strings shortRefs() const;
|
||||
|
||||
ValidPathInfo(const ValidPathInfo & other) = default;
|
||||
|
||||
ValidPathInfo(StorePath && path, UnkeyedValidPathInfo info) : UnkeyedValidPathInfo(info), path(std::move(path)) { };
|
||||
ValidPathInfo(const StorePath & path, UnkeyedValidPathInfo info) : UnkeyedValidPathInfo(info), path(path) { };
|
||||
|
||||
ValidPathInfo(const Store & store,
|
||||
std::string_view name, ContentAddressWithReferences && ca, Hash narHash);
|
||||
|
||||
virtual ~ValidPathInfo() { }
|
||||
};
|
||||
|
||||
static_assert(std::is_move_assignable_v<ValidPathInfo>);
|
||||
static_assert(std::is_copy_assignable_v<ValidPathInfo>);
|
||||
static_assert(std::is_copy_constructible_v<ValidPathInfo>);
|
||||
static_assert(std::is_move_constructible_v<ValidPathInfo>);
|
||||
|
||||
using ValidPathInfos = std::map<StorePath, ValidPathInfo>;
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include <string_view>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ DerivedPath StorePathWithOutputs::toDerivedPath() const
|
|||
std::vector<DerivedPath> toDerivedPaths(const std::vector<StorePathWithOutputs> ss)
|
||||
{
|
||||
std::vector<DerivedPath> reqs;
|
||||
reqs.reserve(ss.size());
|
||||
for (auto & s : ss) reqs.push_back(s.toDerivedPath());
|
||||
return reqs;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ typedef std::set<StorePath> StorePathSet;
|
|||
typedef std::vector<StorePath> StorePaths;
|
||||
|
||||
/**
|
||||
* The file extension of \ref Derivation derivations when serialized
|
||||
* The file extension of \ref nix::Derivation derivations when serialized
|
||||
* into store objects.
|
||||
*/
|
||||
constexpr std::string_view drvExtension = ".drv";
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ PathLocks::~PathLocks()
|
|||
try {
|
||||
unlock();
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
#pragma once
|
||||
/**
|
||||
* @file Implementation of Profiles.
|
||||
* @file
|
||||
*
|
||||
* Implementation of Profiles.
|
||||
*
|
||||
* See the manual for additional information.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ ref<SourceAccessor> RemoteFSAccessor::addToCache(std::string_view hashPart, std:
|
|||
/* FIXME: do this asynchronously. */
|
||||
writeFile(makeCacheFile(hashPart, "nar"), nar);
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionExceptInterrupt();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -42,7 +42,7 @@ ref<SourceAccessor> RemoteFSAccessor::addToCache(std::string_view hashPart, std:
|
|||
nlohmann::json j = listNar(narAccessor, CanonPath::root, true);
|
||||
writeFile(makeCacheFile(hashPart, "ls"), j.dump());
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionExceptInterrupt();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ struct RemoteStore::ConnectionHandle
|
|||
: handle(std::move(handle))
|
||||
{ }
|
||||
|
||||
ConnectionHandle(ConnectionHandle && h)
|
||||
ConnectionHandle(ConnectionHandle && h) noexcept
|
||||
: handle(std::move(h.handle))
|
||||
{ }
|
||||
|
||||
|
|
@ -49,7 +49,7 @@ struct RemoteStore::ConnectionHandle
|
|||
RemoteStore::Connection & operator * () { return *handle; }
|
||||
RemoteStore::Connection * operator -> () { return &*handle; }
|
||||
|
||||
void processStderr(Sink * sink = 0, Source * source = 0, bool flush = true);
|
||||
void processStderr(Sink * sink = 0, Source * source = 0, bool flush = true, bool block = true);
|
||||
|
||||
void withFramedSink(std::function<void(Sink & sink)> fun);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -153,9 +153,9 @@ RemoteStore::ConnectionHandle::~ConnectionHandle()
|
|||
}
|
||||
}
|
||||
|
||||
void RemoteStore::ConnectionHandle::processStderr(Sink * sink, Source * source, bool flush)
|
||||
void RemoteStore::ConnectionHandle::processStderr(Sink * sink, Source * source, bool flush, bool block)
|
||||
{
|
||||
handle->processStderr(&daemonException, sink, source, flush);
|
||||
handle->processStderr(&daemonException, sink, source, flush, block);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -534,14 +534,27 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
|
||||
|
||||
void RemoteStore::addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
PathsSource && pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs)
|
||||
{
|
||||
// `addMultipleToStore` is single threaded
|
||||
size_t bytesExpected = 0;
|
||||
for (auto & [pathInfo, _] : pathsToCopy) {
|
||||
bytesExpected += pathInfo.narSize;
|
||||
}
|
||||
act.setExpected(actCopyPath, bytesExpected);
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
sink << pathsToCopy.size();
|
||||
for (auto & [pathInfo, pathSource] : pathsToCopy) {
|
||||
size_t nrTotal = pathsToCopy.size();
|
||||
sink << nrTotal;
|
||||
// Reverse, so we can release memory at the original start
|
||||
std::reverse(pathsToCopy.begin(), pathsToCopy.end());
|
||||
while (!pathsToCopy.empty()) {
|
||||
act.progress(nrTotal - pathsToCopy.size(), nrTotal, size_t(1), size_t(0));
|
||||
|
||||
auto & [pathInfo, pathSource] = pathsToCopy.back();
|
||||
WorkerProto::Serialise<ValidPathInfo>::write(*this,
|
||||
WorkerProto::WriteConn {
|
||||
.to = sink,
|
||||
|
|
@ -549,6 +562,7 @@ void RemoteStore::addMultipleToStore(
|
|||
},
|
||||
pathInfo);
|
||||
pathSource->drainInto(sink);
|
||||
pathsToCopy.pop_back();
|
||||
}
|
||||
});
|
||||
|
||||
|
|
@ -926,43 +940,17 @@ void RemoteStore::ConnectionHandle::withFramedSink(std::function<void(Sink & sin
|
|||
{
|
||||
(*this)->to.flush();
|
||||
|
||||
std::exception_ptr ex;
|
||||
|
||||
/* Handle log messages / exceptions from the remote on a separate
|
||||
thread. */
|
||||
std::thread stderrThread([&]()
|
||||
{
|
||||
try {
|
||||
ReceiveInterrupts receiveInterrupts;
|
||||
processStderr(nullptr, nullptr, false);
|
||||
} catch (...) {
|
||||
ex = std::current_exception();
|
||||
}
|
||||
});
|
||||
|
||||
Finally joinStderrThread([&]()
|
||||
{
|
||||
if (stderrThread.joinable()) {
|
||||
stderrThread.join();
|
||||
if (ex) {
|
||||
try {
|
||||
std::rethrow_exception(ex);
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
{
|
||||
FramedSink sink((*this)->to, ex);
|
||||
FramedSink sink((*this)->to, [&]() {
|
||||
/* Periodically process stderr messages and exceptions
|
||||
from the daemon. */
|
||||
processStderr(nullptr, nullptr, false, false);
|
||||
});
|
||||
fun(sink);
|
||||
sink.flush();
|
||||
}
|
||||
|
||||
stderrThread.join();
|
||||
if (ex)
|
||||
std::rethrow_exception(ex);
|
||||
processStderr(nullptr, nullptr, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ public:
|
|||
CheckSigsFlag checkSigs) override;
|
||||
|
||||
void addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
PathsSource && pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs) override;
|
||||
|
|
|
|||
|
|
@ -48,7 +48,11 @@ R && checkAws(std::string_view s, Aws::Utils::Outcome<R, E> && outcome)
|
|||
if (!outcome.IsSuccess())
|
||||
throw S3Error(
|
||||
outcome.GetError().GetErrorType(),
|
||||
s + ": " + outcome.GetError().GetMessage());
|
||||
fmt(
|
||||
"%s: %s (request id: %s)",
|
||||
s,
|
||||
outcome.GetError().GetMessage(),
|
||||
outcome.GetError().GetRequestId()));
|
||||
return outcome.GetResultWithOwnership();
|
||||
}
|
||||
|
||||
|
|
@ -121,9 +125,10 @@ class RetryStrategy : public Aws::Client::DefaultRetryStrategy
|
|||
checkInterrupt();
|
||||
auto retry = Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries);
|
||||
if (retry)
|
||||
printError("AWS error '%s' (%s), will retry in %d ms",
|
||||
printError("AWS error '%s' (%s; request id: %s), will retry in %d ms",
|
||||
error.GetExceptionName(),
|
||||
error.GetMessage(),
|
||||
error.GetRequestId(),
|
||||
CalculateDelayBeforeNextRetry(error, attemptedRetries));
|
||||
return retry;
|
||||
}
|
||||
|
|
@ -454,7 +459,7 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
|
|||
debug("got %d keys, next marker '%s'",
|
||||
contents.size(), res.GetNextMarker());
|
||||
|
||||
for (auto object : contents) {
|
||||
for (const auto & object : contents) {
|
||||
auto & key = object.GetKey();
|
||||
if (key.size() != 40 || !hasSuffix(key, ".narinfo")) continue;
|
||||
paths.insert(parseStorePath(storeDir + "/" + key.substr(0, key.size() - 8) + "-" + MissingName));
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ R"(
|
|||
**Store URL format**: `s3://`*bucket-name*
|
||||
|
||||
This store allows reading and writing a binary cache stored in an AWS S3 (or S3-compatible service) bucket.
|
||||
This store shares many idioms with the [HTTP Binary Cache Store](#http-binary-cache-store).
|
||||
This store shares many idioms with the [HTTP Binary Cache Store](@docroot@/store/types/http-binary-cache-store.md).
|
||||
|
||||
For AWS S3, the binary cache URL for a bucket named `example-nix-cache` will be exactly <s3://example-nix-cache>.
|
||||
For S3 compatible binary caches, consult that cache's documentation.
|
||||
|
|
|
|||
|
|
@ -29,11 +29,10 @@ SERVE_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::vector<T>)
|
|||
SERVE_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::set<T>)
|
||||
SERVE_USE_LENGTH_PREFIX_SERIALISER(template<typename... Ts>, std::tuple<Ts...>)
|
||||
|
||||
#define COMMA_ ,
|
||||
#define SERVE_USE_LENGTH_PREFIX_SERIALISER_COMMA ,
|
||||
SERVE_USE_LENGTH_PREFIX_SERIALISER(
|
||||
template<typename K COMMA_ typename V>,
|
||||
std::map<K COMMA_ V>)
|
||||
#undef COMMA_
|
||||
template<typename K SERVE_USE_LENGTH_PREFIX_SERIALISER_COMMA typename V>,
|
||||
std::map<K SERVE_USE_LENGTH_PREFIX_SERIALISER_COMMA V>)
|
||||
|
||||
/**
|
||||
* Use `CommonProto` where possible.
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ SQLite::~SQLite()
|
|||
if (db && sqlite3_close(db) != SQLITE_OK)
|
||||
SQLiteError::throw_(db, "closing database");
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -125,7 +125,7 @@ SQLiteStmt::~SQLiteStmt()
|
|||
if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
|
||||
SQLiteError::throw_(db, "finalizing statement '%s'", sql);
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -240,7 +240,7 @@ SQLiteTxn::~SQLiteTxn()
|
|||
if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
|
||||
SQLiteError::throw_(db, "aborting transaction");
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,8 @@ struct SQLite
|
|||
SQLite(const Path & path, SQLiteOpenMode mode = SQLiteOpenMode::Normal);
|
||||
SQLite(const SQLite & from) = delete;
|
||||
SQLite& operator = (const SQLite & from) = delete;
|
||||
SQLite& operator = (SQLite && from) { db = from.db; from.db = 0; return *this; }
|
||||
// NOTE: This is noexcept since we are only copying and assigning raw pointers.
|
||||
SQLite& operator = (SQLite && from) noexcept { db = from.db; from.db = 0; return *this; }
|
||||
~SQLite();
|
||||
operator sqlite3 * () { return db; }
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
#include "current-process.hh"
|
||||
#include "environment-variables.hh"
|
||||
#include "util.hh"
|
||||
#include "exec.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
@ -40,8 +41,17 @@ void SSHMaster::addCommonSSHOpts(Strings & args)
|
|||
{
|
||||
auto state(state_.lock());
|
||||
|
||||
for (auto & i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS").value_or("")))
|
||||
args.push_back(i);
|
||||
std::string sshOpts = getEnv("NIX_SSHOPTS").value_or("");
|
||||
|
||||
try {
|
||||
std::list<std::string> opts = shellSplitString(sshOpts);
|
||||
for (auto & i : opts)
|
||||
args.push_back(i);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "while splitting NIX_SSHOPTS '%s'", sshOpts);
|
||||
throw;
|
||||
}
|
||||
|
||||
if (!keyFile.empty())
|
||||
args.insert(args.end(), {"-i", keyFile});
|
||||
if (!sshPublicHostKey.empty()) {
|
||||
|
|
@ -54,6 +64,10 @@ void SSHMaster::addCommonSSHOpts(Strings & args)
|
|||
if (compress)
|
||||
args.push_back("-C");
|
||||
|
||||
// We use this to make ssh signal back to us that the connection is established.
|
||||
// It really does run locally; see createSSHEnv which sets up SHELL to make
|
||||
// it launch more reliably. The local command runs synchronously, so presumably
|
||||
// the remote session won't be garbled if the local command is slow.
|
||||
args.push_back("-oPermitLocalCommand=yes");
|
||||
args.push_back("-oLocalCommand=echo started");
|
||||
}
|
||||
|
|
@ -66,6 +80,27 @@ bool SSHMaster::isMasterRunning() {
|
|||
return res.first == 0;
|
||||
}
|
||||
|
||||
Strings createSSHEnv()
|
||||
{
|
||||
// Copy the environment and set SHELL=/bin/sh
|
||||
std::map<std::string, std::string> env = getEnv();
|
||||
|
||||
// SSH will invoke the "user" shell for -oLocalCommand, but that means
|
||||
// $SHELL. To keep things simple and avoid potential issues with other
|
||||
// shells, we set it to /bin/sh.
|
||||
// Technically, we don't need that, and we could reinvoke ourselves to print
|
||||
// "started". Self-reinvocation is tricky with library consumers, but mostly
|
||||
// solved; refer to the development history of nixExePath in libstore/globals.cc.
|
||||
env.insert_or_assign("SHELL", "/bin/sh");
|
||||
|
||||
Strings r;
|
||||
for (auto & [k, v] : env) {
|
||||
r.push_back(k + "=" + v);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(
|
||||
Strings && command, Strings && extraSshArgs)
|
||||
{
|
||||
|
|
@ -114,8 +149,8 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(
|
|||
}
|
||||
|
||||
args.splice(args.end(), std::move(command));
|
||||
|
||||
execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
|
||||
auto env = createSSHEnv();
|
||||
nix::execvpe(args.begin()->c_str(), stringsToCharPtrs(args).data(), stringsToCharPtrs(env).data());
|
||||
|
||||
// could not exec ssh/bash
|
||||
throw SysError("unable to execute '%s'", args.front());
|
||||
|
|
@ -182,7 +217,8 @@ Path SSHMaster::startMaster()
|
|||
if (verbosity >= lvlChatty)
|
||||
args.push_back("-v");
|
||||
addCommonSSHOpts(args);
|
||||
execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
|
||||
auto env = createSSHEnv();
|
||||
nix::execvpe(args.begin()->c_str(), stringsToCharPtrs(args).data(), stringsToCharPtrs(env).data());
|
||||
|
||||
throw SysError("unable to execute '%s'", args.front());
|
||||
}, options);
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ public:
|
|||
/**
|
||||
* @param command The command (arg vector) to execute.
|
||||
*
|
||||
* @param extraSShArgs Extra args to pass to SSH (not the command to
|
||||
* @param extraSshArgs Extra arguments to pass to SSH (not the command to
|
||||
* execute). Will not be used when "fake SSHing" to the local
|
||||
* machine.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -171,7 +171,7 @@ std::pair<StorePath, Hash> StoreDirConfig::computeStorePath(
|
|||
PathFilter & filter) const
|
||||
{
|
||||
auto [h, size] = hashPath(path, method.getFileIngestionMethod(), hashAlgo, filter);
|
||||
if (size && *size >= settings.warnLargePathThreshold)
|
||||
if (settings.warnLargePathThreshold && size && *size >= settings.warnLargePathThreshold)
|
||||
warn("hashed large path '%s' (%s)", path, renderSize(*size));
|
||||
return {
|
||||
makeFixedOutputPathFromCA(
|
||||
|
|
@ -214,7 +214,7 @@ StorePath Store::addToStore(
|
|||
auto sink = sourceToSink([&](Source & source) {
|
||||
LengthSource lengthSource(source);
|
||||
storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair);
|
||||
if (lengthSource.total >= settings.warnLargePathThreshold)
|
||||
if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold)
|
||||
warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total));
|
||||
});
|
||||
dumpPath(path, *sink, fsm, filter);
|
||||
|
|
@ -223,7 +223,7 @@ StorePath Store::addToStore(
|
|||
}
|
||||
|
||||
void Store::addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
PathsSource && pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair,
|
||||
CheckSigsFlag checkSigs)
|
||||
|
|
@ -242,13 +242,11 @@ void Store::addMultipleToStore(
|
|||
storePathsToAdd.insert(thingToAdd.first.path);
|
||||
}
|
||||
|
||||
auto showProgress = [&]() {
|
||||
act.progress(nrDone, pathsToCopy.size(), nrRunning, nrFailed);
|
||||
auto showProgress = [&, nrTotal = pathsToCopy.size()]() {
|
||||
act.progress(nrDone, nrTotal, nrRunning, nrFailed);
|
||||
};
|
||||
|
||||
ThreadPool pool;
|
||||
|
||||
processGraph<StorePath>(pool,
|
||||
processGraph<StorePath>(
|
||||
storePathsToAdd,
|
||||
|
||||
[&](const StorePath & path) {
|
||||
|
|
@ -822,14 +820,25 @@ StorePathSet Store::queryValidPaths(const StorePathSet & paths, SubstituteFlag m
|
|||
auto doQuery = [&](const StorePath & path) {
|
||||
checkInterrupt();
|
||||
queryPathInfo(path, {[path, &state_, &wakeup](std::future<ref<const ValidPathInfo>> fut) {
|
||||
auto state(state_.lock());
|
||||
bool exists = false;
|
||||
std::exception_ptr newExc{};
|
||||
|
||||
try {
|
||||
auto info = fut.get();
|
||||
state->valid.insert(path);
|
||||
exists = true;
|
||||
} catch (InvalidPath &) {
|
||||
} catch (...) {
|
||||
state->exc = std::current_exception();
|
||||
newExc = std::current_exception();
|
||||
}
|
||||
|
||||
auto state(state_.lock());
|
||||
|
||||
if (exists)
|
||||
state->valid.insert(path);
|
||||
|
||||
if (newExc)
|
||||
state->exc = newExc;
|
||||
|
||||
assert(state->left);
|
||||
if (!--state->left)
|
||||
wakeup.notify_one();
|
||||
|
|
@ -1017,12 +1026,10 @@ std::map<StorePath, StorePath> copyPaths(
|
|||
}
|
||||
auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute);
|
||||
|
||||
ThreadPool pool;
|
||||
|
||||
try {
|
||||
// Copy the realisation closure
|
||||
processGraph<Realisation>(
|
||||
pool, Realisation::closure(srcStore, toplevelRealisations),
|
||||
Realisation::closure(srcStore, toplevelRealisations),
|
||||
[&](const Realisation & current) -> std::set<Realisation> {
|
||||
std::set<Realisation> children;
|
||||
for (const auto & [drvOutput, _] : current.dependentRealisations) {
|
||||
|
|
@ -1044,7 +1051,7 @@ std::map<StorePath, StorePath> copyPaths(
|
|||
// not be within our control to change that, and we might still want
|
||||
// to at least copy the output paths.
|
||||
if (e.missingFeature == Xp::CaDerivations)
|
||||
ignoreException();
|
||||
ignoreExceptionExceptInterrupt();
|
||||
else
|
||||
throw;
|
||||
}
|
||||
|
|
@ -1097,9 +1104,6 @@ std::map<StorePath, StorePath> copyPaths(
|
|||
return storePathForDst;
|
||||
};
|
||||
|
||||
// total is accessed by each copy, which are each handled in separate threads
|
||||
std::atomic<uint64_t> total = 0;
|
||||
|
||||
for (auto & missingPath : sortedMissing) {
|
||||
auto info = srcStore.queryPathInfo(missingPath);
|
||||
|
||||
|
|
@ -1109,9 +1113,10 @@ std::map<StorePath, StorePath> copyPaths(
|
|||
ValidPathInfo infoForDst = *info;
|
||||
infoForDst.path = storePathForDst;
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
auto source = sinkToSource([&, narSize = info->narSize](Sink & sink) {
|
||||
// We can reasonably assume that the copy will happen whenever we
|
||||
// read the path, so log something about that at that point
|
||||
uint64_t total = 0;
|
||||
auto srcUri = srcStore.getUri();
|
||||
auto dstUri = dstStore.getUri();
|
||||
auto storePathS = srcStore.printStorePath(missingPath);
|
||||
|
|
@ -1122,16 +1127,16 @@ std::map<StorePath, StorePath> copyPaths(
|
|||
|
||||
LambdaSink progressSink([&](std::string_view data) {
|
||||
total += data.size();
|
||||
act.progress(total, info->narSize);
|
||||
act.progress(total, narSize);
|
||||
});
|
||||
TeeSink tee { sink, progressSink };
|
||||
|
||||
srcStore.narFromPath(missingPath, tee);
|
||||
});
|
||||
pathsToCopy.push_back(std::pair{infoForDst, std::move(source)});
|
||||
pathsToCopy.emplace_back(std::move(infoForDst), std::move(source));
|
||||
}
|
||||
|
||||
dstStore.addMultipleToStore(pathsToCopy, act, repair, checkSigs);
|
||||
dstStore.addMultipleToStore(std::move(pathsToCopy), act, repair, checkSigs);
|
||||
|
||||
return pathsMap;
|
||||
}
|
||||
|
|
@ -1304,7 +1309,7 @@ ref<Store> openStore(StoreReference && storeURI)
|
|||
/* If /nix doesn't exist, there is no daemon socket, and
|
||||
we're not root, then automatically set up a chroot
|
||||
store in ~/.local/share/nix/root. */
|
||||
auto chrootStore = getDataDir() + "/nix/root";
|
||||
auto chrootStore = getDataDir() + "/root";
|
||||
if (!pathExists(chrootStore)) {
|
||||
try {
|
||||
createDirs(chrootStore);
|
||||
|
|
@ -1321,7 +1326,7 @@ ref<Store> openStore(StoreReference && storeURI)
|
|||
return std::make_shared<LocalStore>(params);
|
||||
},
|
||||
[&](const StoreReference::Specified & g) {
|
||||
for (auto implem : *Implementations::registered)
|
||||
for (const auto & implem : *Implementations::registered)
|
||||
if (implem.uriSchemes.count(g.scheme))
|
||||
return implem.create(g.scheme, g.authority, params);
|
||||
|
||||
|
|
@ -1352,7 +1357,7 @@ std::list<ref<Store>> getDefaultSubstituters()
|
|||
}
|
||||
};
|
||||
|
||||
for (auto uri : settings.substituters.get())
|
||||
for (const auto & uri : settings.substituters.get())
|
||||
addStore(uri);
|
||||
|
||||
stores.sort([](ref<Store> & a, ref<Store> & b) {
|
||||
|
|
|
|||
|
|
@ -260,11 +260,11 @@ public:
|
|||
|
||||
/**
|
||||
* Query the set of all valid paths. Note that for some store
|
||||
* backends, the name part of store paths may be replaced by 'x'
|
||||
* (i.e. you'll get /nix/store/<hash>-x rather than
|
||||
* /nix/store/<hash>-<name>). Use queryPathInfo() to obtain the
|
||||
* backends, the name part of store paths may be replaced by `x`
|
||||
* (i.e. you'll get `/nix/store/<hash>-x` rather than
|
||||
* `/nix/store/<hash>-<name>`). Use queryPathInfo() to obtain the
|
||||
* full store path. FIXME: should return a set of
|
||||
* std::variant<StorePath, HashPart> to get rid of this hack.
|
||||
* `std::variant<StorePath, HashPart>` to get rid of this hack.
|
||||
*/
|
||||
virtual StorePathSet queryAllValidPaths()
|
||||
{ unsupported("queryAllValidPaths"); }
|
||||
|
|
@ -425,7 +425,7 @@ public:
|
|||
CheckSigsFlag checkSigs = CheckSigs);
|
||||
|
||||
virtual void addMultipleToStore(
|
||||
PathsSource & pathsToCopy,
|
||||
PathsSource && pathsToCopy,
|
||||
Activity & act,
|
||||
RepairFlag repair = NoRepair,
|
||||
CheckSigsFlag checkSigs = CheckSigs);
|
||||
|
|
|
|||
|
|
@ -59,20 +59,20 @@ struct StoreDirConfig : public Config
|
|||
std::string showPaths(const StorePathSet & paths);
|
||||
|
||||
/**
|
||||
* @return true if ‘path’ is in the Nix store (but not the Nix
|
||||
* @return true if *path* is in the Nix store (but not the Nix
|
||||
* store itself).
|
||||
*/
|
||||
bool isInStore(PathView path) const;
|
||||
|
||||
/**
|
||||
* @return true if ‘path’ is a store path, i.e. a direct child of the
|
||||
* @return true if *path* is a store path, i.e. a direct child of the
|
||||
* Nix store.
|
||||
*/
|
||||
bool isStorePath(std::string_view path) const;
|
||||
|
||||
/**
|
||||
* Split a path like /nix/store/<hash>-<name>/<bla> into
|
||||
* /nix/store/<hash>-<name> and /<bla>.
|
||||
* Split a path like `/nix/store/<hash>-<name>/<bla>` into
|
||||
* `/nix/store/<hash>-<name>` and `/<bla>`.
|
||||
*/
|
||||
std::pair<StorePath, Path> toStorePath(PathView path) const;
|
||||
|
||||
|
|
|
|||
|
|
@ -13,31 +13,31 @@ namespace nix {
|
|||
*
|
||||
* Supported values are:
|
||||
*
|
||||
* - ‘local’: The Nix store in /nix/store and database in
|
||||
* - `local`: The Nix store in /nix/store and database in
|
||||
* /nix/var/nix/db, accessed directly.
|
||||
*
|
||||
* - ‘daemon’: The Nix store accessed via a Unix domain socket
|
||||
* - `daemon`: The Nix store accessed via a Unix domain socket
|
||||
* connection to nix-daemon.
|
||||
*
|
||||
* - ‘unix://<path>’: The Nix store accessed via a Unix domain socket
|
||||
* connection to nix-daemon, with the socket located at <path>.
|
||||
* - `unix://<path>`: The Nix store accessed via a Unix domain socket
|
||||
* connection to nix-daemon, with the socket located at `<path>`.
|
||||
*
|
||||
* - ‘auto’ or ‘’: Equivalent to ‘local’ or ‘daemon’ depending on
|
||||
* - `auto` or ``: Equivalent to `local` or `daemon` depending on
|
||||
* whether the user has write access to the local Nix
|
||||
* store/database.
|
||||
*
|
||||
* - ‘file://<path>’: A binary cache stored in <path>.
|
||||
* - `file://<path>`: A binary cache stored in `<path>`.
|
||||
*
|
||||
* - ‘https://<path>’: A binary cache accessed via HTTP.
|
||||
* - `https://<path>`: A binary cache accessed via HTTP.
|
||||
*
|
||||
* - ‘s3://<path>’: A writable binary cache stored on Amazon's Simple
|
||||
* - `s3://<path>`: A writable binary cache stored on Amazon's Simple
|
||||
* Storage Service.
|
||||
*
|
||||
* - ‘ssh://[user@]<host>’: A remote Nix store accessed by running
|
||||
* ‘nix-store --serve’ via SSH.
|
||||
* - `ssh://[user@]<host>`: A remote Nix store accessed by running
|
||||
* `nix-store --serve` via SSH.
|
||||
*
|
||||
* You can pass parameters to the store type by appending
|
||||
* ‘?key=value&key=value&...’ to the URI.
|
||||
* `?key=value&key=value&...` to the URI.
|
||||
*/
|
||||
struct StoreReference
|
||||
{
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
#include "file-system.hh"
|
||||
#include "child.hh"
|
||||
#include "strings.hh"
|
||||
#include "executable-path.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
@ -16,11 +17,18 @@ HookInstance::HookInstance()
|
|||
if (buildHookArgs.empty())
|
||||
throw Error("'build-hook' setting is empty");
|
||||
|
||||
auto buildHook = canonPath(buildHookArgs.front());
|
||||
std::filesystem::path buildHook = buildHookArgs.front();
|
||||
buildHookArgs.pop_front();
|
||||
|
||||
try {
|
||||
buildHook = ExecutablePath::load().findPath(buildHook);
|
||||
} catch (ExecutableLookupError & e) {
|
||||
e.addTrace(nullptr, "while resolving the 'build-hook' setting'");
|
||||
throw;
|
||||
}
|
||||
|
||||
Strings args;
|
||||
args.push_back(std::string(baseNameOf(buildHook)));
|
||||
args.push_back(buildHook.filename().string());
|
||||
|
||||
for (auto & arg : buildHookArgs)
|
||||
args.push_back(arg);
|
||||
|
|
@ -59,7 +67,7 @@ HookInstance::HookInstance()
|
|||
if (dup2(builderOut.readSide.get(), 5) == -1)
|
||||
throw SysError("dupping builder's stdout/stderr");
|
||||
|
||||
execv(buildHook.c_str(), stringsToCharPtrs(args).data());
|
||||
execv(buildHook.native().c_str(), stringsToCharPtrs(args).data());
|
||||
|
||||
throw SysError("executing '%s'", buildHook);
|
||||
});
|
||||
|
|
@ -83,7 +91,7 @@ HookInstance::~HookInstance()
|
|||
toHook.writeSide = -1;
|
||||
if (pid != -1) pid.kill();
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -69,6 +69,7 @@ extern "C" int sandbox_init_with_parameters(const char *profile, uint64_t flags,
|
|||
#include <iostream>
|
||||
|
||||
#include "strings.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
@ -113,9 +114,9 @@ LocalDerivationGoal::~LocalDerivationGoal()
|
|||
{
|
||||
/* Careful: we should never ever throw an exception from a
|
||||
destructor. */
|
||||
try { deleteTmpDir(false); } catch (...) { ignoreException(); }
|
||||
try { killChild(); } catch (...) { ignoreException(); }
|
||||
try { stopDaemon(); } catch (...) { ignoreException(); }
|
||||
try { deleteTmpDir(false); } catch (...) { ignoreExceptionInDestructor(); }
|
||||
try { killChild(); } catch (...) { ignoreExceptionInDestructor(); }
|
||||
try { stopDaemon(); } catch (...) { ignoreExceptionInDestructor(); }
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -437,6 +438,41 @@ static void doBind(const Path & source, const Path & target, bool optional = fal
|
|||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Rethrow the current exception as a subclass of `Error`.
|
||||
*/
|
||||
static void rethrowExceptionAsError()
|
||||
{
|
||||
try {
|
||||
throw;
|
||||
} catch (Error &) {
|
||||
throw;
|
||||
} catch (std::exception & e) {
|
||||
throw Error(e.what());
|
||||
} catch (...) {
|
||||
throw Error("unknown exception");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send the current exception to the parent in the format expected by
|
||||
* `LocalDerivationGoal::processSandboxSetupMessages()`.
|
||||
*/
|
||||
static void handleChildException(bool sendException)
|
||||
{
|
||||
try {
|
||||
rethrowExceptionAsError();
|
||||
} catch (Error & e) {
|
||||
if (sendException) {
|
||||
writeFull(STDERR_FILENO, "\1\n");
|
||||
FdSink sink(STDERR_FILENO);
|
||||
sink << e;
|
||||
sink.flush();
|
||||
} else
|
||||
std::cerr << e.msg();
|
||||
}
|
||||
}
|
||||
|
||||
void LocalDerivationGoal::startBuilder()
|
||||
{
|
||||
if ((buildUser && buildUser->getUIDCount() != 1)
|
||||
|
|
@ -448,25 +484,22 @@ void LocalDerivationGoal::startBuilder()
|
|||
#if __linux__
|
||||
experimentalFeatureSettings.require(Xp::Cgroups);
|
||||
|
||||
/* If we're running from the daemon, then this will return the
|
||||
root cgroup of the service. Otherwise, it will return the
|
||||
current cgroup. */
|
||||
auto rootCgroup = getRootCgroup();
|
||||
auto cgroupFS = getCgroupFS();
|
||||
if (!cgroupFS)
|
||||
throw Error("cannot determine the cgroups file system");
|
||||
|
||||
auto ourCgroups = getCgroups("/proc/self/cgroup");
|
||||
auto ourCgroup = ourCgroups[""];
|
||||
if (ourCgroup == "")
|
||||
throw Error("cannot determine cgroup name from /proc/self/cgroup");
|
||||
|
||||
auto ourCgroupPath = canonPath(*cgroupFS + "/" + ourCgroup);
|
||||
|
||||
if (!pathExists(ourCgroupPath))
|
||||
throw Error("expected cgroup directory '%s'", ourCgroupPath);
|
||||
auto rootCgroupPath = canonPath(*cgroupFS + "/" + rootCgroup);
|
||||
if (!pathExists(rootCgroupPath))
|
||||
throw Error("expected cgroup directory '%s'", rootCgroupPath);
|
||||
|
||||
static std::atomic<unsigned int> counter{0};
|
||||
|
||||
cgroup = buildUser
|
||||
? fmt("%s/nix-build-uid-%d", ourCgroupPath, buildUser->getUID())
|
||||
: fmt("%s/nix-build-pid-%d-%d", ourCgroupPath, getpid(), counter++);
|
||||
? fmt("%s/nix-build-uid-%d", rootCgroupPath, buildUser->getUID())
|
||||
: fmt("%s/nix-build-pid-%d-%d", rootCgroupPath, getpid(), counter++);
|
||||
|
||||
debug("using cgroup '%s'", *cgroup);
|
||||
|
||||
|
|
@ -850,7 +883,7 @@ void LocalDerivationGoal::startBuilder()
|
|||
printMsg(lvlVomit, "setting builder env variable '%1%'='%2%'", i.first, i.second);
|
||||
|
||||
/* Create the log file. */
|
||||
Path logFile = openLogFile();
|
||||
[[maybe_unused]] Path logFile = openLogFile();
|
||||
|
||||
/* Create a pseudoterminal to get the output of the builder. */
|
||||
builderOut = posix_openpt(O_RDWR | O_NOCTTY);
|
||||
|
|
@ -956,32 +989,40 @@ void LocalDerivationGoal::startBuilder()
|
|||
root. */
|
||||
openSlave();
|
||||
|
||||
/* Drop additional groups here because we can't do it
|
||||
after we've created the new user namespace. */
|
||||
if (setgroups(0, 0) == -1) {
|
||||
if (errno != EPERM)
|
||||
throw SysError("setgroups failed");
|
||||
if (settings.requireDropSupplementaryGroups)
|
||||
throw Error("setgroups failed. Set the require-drop-supplementary-groups option to false to skip this step.");
|
||||
try {
|
||||
/* Drop additional groups here because we can't do it
|
||||
after we've created the new user namespace. */
|
||||
if (setgroups(0, 0) == -1) {
|
||||
if (errno != EPERM)
|
||||
throw SysError("setgroups failed");
|
||||
if (settings.requireDropSupplementaryGroups)
|
||||
throw Error("setgroups failed. Set the require-drop-supplementary-groups option to false to skip this step.");
|
||||
}
|
||||
|
||||
ProcessOptions options;
|
||||
options.cloneFlags = CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD;
|
||||
if (privateNetwork)
|
||||
options.cloneFlags |= CLONE_NEWNET;
|
||||
if (usingUserNamespace)
|
||||
options.cloneFlags |= CLONE_NEWUSER;
|
||||
|
||||
pid_t child = startProcess([&]() { runChild(); }, options);
|
||||
|
||||
writeFull(sendPid.writeSide.get(), fmt("%d\n", child));
|
||||
_exit(0);
|
||||
} catch (...) {
|
||||
handleChildException(true);
|
||||
_exit(1);
|
||||
}
|
||||
|
||||
ProcessOptions options;
|
||||
options.cloneFlags = CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD;
|
||||
if (privateNetwork)
|
||||
options.cloneFlags |= CLONE_NEWNET;
|
||||
if (usingUserNamespace)
|
||||
options.cloneFlags |= CLONE_NEWUSER;
|
||||
|
||||
pid_t child = startProcess([&]() { runChild(); }, options);
|
||||
|
||||
writeFull(sendPid.writeSide.get(), fmt("%d\n", child));
|
||||
_exit(0);
|
||||
});
|
||||
|
||||
sendPid.writeSide.close();
|
||||
|
||||
if (helper.wait() != 0)
|
||||
if (helper.wait() != 0) {
|
||||
processSandboxSetupMessages();
|
||||
// Only reached if the child process didn't send an exception.
|
||||
throw Error("unable to start build process");
|
||||
}
|
||||
|
||||
userNamespaceSync.readSide = -1;
|
||||
|
||||
|
|
@ -1057,7 +1098,12 @@ void LocalDerivationGoal::startBuilder()
|
|||
pid.setSeparatePG(true);
|
||||
worker.childStarted(shared_from_this(), {builderOut.get()}, true, true);
|
||||
|
||||
/* Check if setting up the build environment failed. */
|
||||
processSandboxSetupMessages();
|
||||
}
|
||||
|
||||
|
||||
void LocalDerivationGoal::processSandboxSetupMessages()
|
||||
{
|
||||
std::vector<std::string> msgs;
|
||||
while (true) {
|
||||
std::string msg = [&]() {
|
||||
|
|
@ -1085,7 +1131,8 @@ void LocalDerivationGoal::startBuilder()
|
|||
}
|
||||
|
||||
|
||||
void LocalDerivationGoal::initTmpDir() {
|
||||
void LocalDerivationGoal::initTmpDir()
|
||||
{
|
||||
/* In a sandbox, for determinism, always use the same temporary
|
||||
directory. */
|
||||
#if __linux__
|
||||
|
|
@ -1537,8 +1584,10 @@ void LocalDerivationGoal::startDaemon()
|
|||
FdSink(remote.get()),
|
||||
NotTrusted, daemon::Recursive);
|
||||
debug("terminated daemon connection");
|
||||
} catch (const Interrupted &) {
|
||||
debug("interrupted daemon connection");
|
||||
} catch (SystemError &) {
|
||||
ignoreException();
|
||||
ignoreExceptionExceptInterrupt();
|
||||
}
|
||||
});
|
||||
|
||||
|
|
@ -1966,7 +2015,7 @@ void LocalDerivationGoal::runChild()
|
|||
if (chdir(chrootRootDir.c_str()) == -1)
|
||||
throw SysError("cannot change directory to '%1%'", chrootRootDir);
|
||||
|
||||
if (mkdir("real-root", 0) == -1)
|
||||
if (mkdir("real-root", 0500) == -1)
|
||||
throw SysError("cannot create real-root directory");
|
||||
|
||||
if (pivot_root(".", "real-root") == -1)
|
||||
|
|
@ -1997,7 +2046,7 @@ void LocalDerivationGoal::runChild()
|
|||
throw SysError("changing into '%1%'", tmpDir);
|
||||
|
||||
/* Close all other file descriptors. */
|
||||
unix::closeMostFDs({STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO});
|
||||
unix::closeExtraFDs();
|
||||
|
||||
#if __linux__
|
||||
linux::setPersonality(drv->platform);
|
||||
|
|
@ -2228,14 +2277,8 @@ void LocalDerivationGoal::runChild()
|
|||
|
||||
throw SysError("executing '%1%'", drv->builder);
|
||||
|
||||
} catch (Error & e) {
|
||||
if (sendException) {
|
||||
writeFull(STDERR_FILENO, "\1\n");
|
||||
FdSink sink(STDERR_FILENO);
|
||||
sink << e;
|
||||
sink.flush();
|
||||
} else
|
||||
std::cerr << e.msg();
|
||||
} catch (...) {
|
||||
handleChildException(sendException);
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
|
|
@ -2614,10 +2657,14 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
|
|||
wanted.to_string(HashFormat::SRI, true),
|
||||
got.to_string(HashFormat::SRI, true)));
|
||||
}
|
||||
if (!newInfo0.references.empty())
|
||||
if (!newInfo0.references.empty()) {
|
||||
auto numViolations = newInfo.references.size();
|
||||
delayedException = std::make_exception_ptr(
|
||||
BuildError("illegal path references in fixed-output derivation '%s'",
|
||||
worker.store.printStorePath(drvPath)));
|
||||
BuildError("fixed-output derivations must not reference store paths: '%s' references %d distinct paths, e.g. '%s'",
|
||||
worker.store.printStorePath(drvPath),
|
||||
numViolations,
|
||||
worker.store.printStorePath(*newInfo.references.begin())));
|
||||
}
|
||||
|
||||
return newInfo0;
|
||||
},
|
||||
|
|
@ -3016,7 +3063,7 @@ bool LocalDerivationGoal::isReadDesc(int fd)
|
|||
StorePath LocalDerivationGoal::makeFallbackPath(OutputNameView outputName)
|
||||
{
|
||||
// This is a bogus path type, constructed this way to ensure that it doesn't collide with any other store path
|
||||
// See doc/manual/src/protocols/store-path.md for details
|
||||
// See doc/manual/source/protocols/store-path.md for details
|
||||
// TODO: We may want to separate the responsibilities of constructing the path fingerprint and of actually doing the hashing
|
||||
auto pathType = "rewrite:" + std::string(drvPath.to_string()) + ":name:" + std::string(outputName);
|
||||
return worker.store.makeStorePath(
|
||||
|
|
@ -3029,7 +3076,7 @@ StorePath LocalDerivationGoal::makeFallbackPath(OutputNameView outputName)
|
|||
StorePath LocalDerivationGoal::makeFallbackPath(const StorePath & path)
|
||||
{
|
||||
// This is a bogus path type, constructed this way to ensure that it doesn't collide with any other store path
|
||||
// See doc/manual/src/protocols/store-path.md for details
|
||||
// See doc/manual/source/protocols/store-path.md for details
|
||||
auto pathType = "rewrite:" + std::string(drvPath.to_string()) + ":" + std::string(path.to_string());
|
||||
return worker.store.makeStorePath(
|
||||
pathType,
|
||||
|
|
|
|||
|
|
@ -210,6 +210,11 @@ struct LocalDerivationGoal : public DerivationGoal
|
|||
*/
|
||||
void initEnv();
|
||||
|
||||
/**
|
||||
* Process messages send by the sandbox initialization.
|
||||
*/
|
||||
void processSandboxSetupMessages();
|
||||
|
||||
/**
|
||||
* Setup tmp dir location.
|
||||
*/
|
||||
|
|
@ -220,8 +225,15 @@ struct LocalDerivationGoal : public DerivationGoal
|
|||
*/
|
||||
void writeStructuredAttrs();
|
||||
|
||||
/**
|
||||
* Start an in-process nix daemon thread for recursive-nix.
|
||||
*/
|
||||
void startDaemon();
|
||||
|
||||
/**
|
||||
* Stop the in-process nix daemon thread.
|
||||
* @see startDaemon
|
||||
*/
|
||||
void stopDaemon();
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
#include "pathlocks.hh"
|
||||
#include "signals.hh"
|
||||
#include "util.hh"
|
||||
#include <errhandlingapi.h>
|
||||
#include <fileapi.h>
|
||||
#include <windows.h>
|
||||
#include "windows-error.hh"
|
||||
|
||||
#ifdef _WIN32
|
||||
# include <errhandlingapi.h>
|
||||
# include <fileapi.h>
|
||||
# include <windows.h>
|
||||
# include "windows-error.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
@ -154,3 +156,4 @@ FdLock::FdLock(Descriptor desc, LockType lockType, bool wait, std::string_view w
|
|||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ WorkerProto::BasicClientConnection::~BasicClientConnection()
|
|||
try {
|
||||
to.flush();
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -32,7 +32,8 @@ static Logger::Fields readFields(Source & from)
|
|||
return fields;
|
||||
}
|
||||
|
||||
std::exception_ptr WorkerProto::BasicClientConnection::processStderrReturn(Sink * sink, Source * source, bool flush)
|
||||
std::exception_ptr
|
||||
WorkerProto::BasicClientConnection::processStderrReturn(Sink * sink, Source * source, bool flush, bool block)
|
||||
{
|
||||
if (flush)
|
||||
to.flush();
|
||||
|
|
@ -41,6 +42,9 @@ std::exception_ptr WorkerProto::BasicClientConnection::processStderrReturn(Sink
|
|||
|
||||
while (true) {
|
||||
|
||||
if (!block && !from.hasData())
|
||||
break;
|
||||
|
||||
auto msg = readNum<uint64_t>(from);
|
||||
|
||||
if (msg == STDERR_WRITE) {
|
||||
|
|
@ -95,8 +99,10 @@ std::exception_ptr WorkerProto::BasicClientConnection::processStderrReturn(Sink
|
|||
logger->result(act, type, fields);
|
||||
}
|
||||
|
||||
else if (msg == STDERR_LAST)
|
||||
else if (msg == STDERR_LAST) {
|
||||
assert(block);
|
||||
break;
|
||||
}
|
||||
|
||||
else
|
||||
throw Error("got unknown message type %x from Nix daemon", msg);
|
||||
|
|
@ -130,9 +136,10 @@ std::exception_ptr WorkerProto::BasicClientConnection::processStderrReturn(Sink
|
|||
}
|
||||
}
|
||||
|
||||
void WorkerProto::BasicClientConnection::processStderr(bool * daemonException, Sink * sink, Source * source, bool flush)
|
||||
void WorkerProto::BasicClientConnection::processStderr(
|
||||
bool * daemonException, Sink * sink, Source * source, bool flush, bool block)
|
||||
{
|
||||
auto ex = processStderrReturn(sink, source, flush);
|
||||
auto ex = processStderrReturn(sink, source, flush, block);
|
||||
if (ex) {
|
||||
*daemonException = true;
|
||||
std::rethrow_exception(ex);
|
||||
|
|
|
|||
|
|
@ -70,14 +70,15 @@ struct WorkerProto::BasicClientConnection : WorkerProto::BasicConnection
|
|||
|
||||
virtual void closeWrite() = 0;
|
||||
|
||||
std::exception_ptr processStderrReturn(Sink * sink = 0, Source * source = 0, bool flush = true);
|
||||
std::exception_ptr processStderrReturn(Sink * sink = 0, Source * source = 0, bool flush = true, bool block = true);
|
||||
|
||||
void processStderr(bool * daemonException, Sink * sink = 0, Source * source = 0, bool flush = true);
|
||||
void
|
||||
processStderr(bool * daemonException, Sink * sink = 0, Source * source = 0, bool flush = true, bool block = true);
|
||||
|
||||
/**
|
||||
* Establishes connection, negotiating version.
|
||||
*
|
||||
* @return the minimum version supported by both sides and the set
|
||||
* @return The minimum version supported by both sides and the set
|
||||
* of protocol features supported by both sides.
|
||||
*
|
||||
* @param to Taken by reference to allow for various error handling
|
||||
|
|
@ -86,9 +87,9 @@ struct WorkerProto::BasicClientConnection : WorkerProto::BasicConnection
|
|||
* @param from Taken by reference to allow for various error
|
||||
* handling mechanisms.
|
||||
*
|
||||
* @param localVersion Our version which is sent over
|
||||
* @param localVersion Our version which is sent over.
|
||||
*
|
||||
* @param features The protocol features that we support
|
||||
* @param supportedFeatures The protocol features that we support.
|
||||
*/
|
||||
// FIXME: this should probably be a constructor.
|
||||
static std::tuple<Version, std::set<Feature>> handshake(
|
||||
|
|
@ -140,7 +141,7 @@ struct WorkerProto::BasicServerConnection : WorkerProto::BasicConnection
|
|||
/**
|
||||
* Establishes connection, negotiating version.
|
||||
*
|
||||
* @return the version provided by the other side of the
|
||||
* @return The version provided by the other side of the
|
||||
* connection.
|
||||
*
|
||||
* @param to Taken by reference to allow for various error handling
|
||||
|
|
@ -149,9 +150,9 @@ struct WorkerProto::BasicServerConnection : WorkerProto::BasicConnection
|
|||
* @param from Taken by reference to allow for various error
|
||||
* handling mechanisms.
|
||||
*
|
||||
* @param localVersion Our version which is sent over
|
||||
* @param localVersion Our version which is sent over.
|
||||
*
|
||||
* @param features The protocol features that we support
|
||||
* @param supportedFeatures The protocol features that we support.
|
||||
*/
|
||||
// FIXME: this should probably be a constructor.
|
||||
static std::tuple<Version, std::set<Feature>> handshake(
|
||||
|
|
|
|||
|
|
@ -29,11 +29,10 @@ WORKER_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::vector<T>)
|
|||
WORKER_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::set<T>)
|
||||
WORKER_USE_LENGTH_PREFIX_SERIALISER(template<typename... Ts>, std::tuple<Ts...>)
|
||||
|
||||
#define COMMA_ ,
|
||||
#define WORKER_USE_LENGTH_PREFIX_SERIALISER_COMMA ,
|
||||
WORKER_USE_LENGTH_PREFIX_SERIALISER(
|
||||
template<typename K COMMA_ typename V>,
|
||||
std::map<K COMMA_ V>)
|
||||
#undef COMMA_
|
||||
template<typename K WORKER_USE_LENGTH_PREFIX_SERIALISER_COMMA typename V>,
|
||||
std::map<K WORKER_USE_LENGTH_PREFIX_SERIALISER_COMMA V>)
|
||||
|
||||
/**
|
||||
* Use `CommonProto` where possible.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue