1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-20 01:09:37 +01:00

Merge remote-tracking branch 'upstream/master' into templated-daemon-protocol

This commit is contained in:
John Ericson 2020-09-22 00:45:55 +00:00
commit b92d3b2edd
360 changed files with 14613 additions and 19071 deletions

View file

@ -11,6 +11,7 @@
#include "nar-accessor.hh"
#include "json.hh"
#include "thread-pool.hh"
#include "callback.hh"
#include <chrono>
#include <future>
@ -22,7 +23,8 @@
namespace nix {
BinaryCacheStore::BinaryCacheStore(const Params & params)
: Store(params)
: BinaryCacheStoreConfig(params)
, Store(params)
{
if (secretKeyFile != "")
secretKey = std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile)));

View file

@ -11,17 +11,21 @@ namespace nix {
struct NarInfo;
class BinaryCacheStore : public Store
struct BinaryCacheStoreConfig : virtual StoreConfig
{
public:
using StoreConfig::StoreConfig;
const Setting<std::string> compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"};
const Setting<bool> writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
const Setting<bool> writeDebugInfo{this, false, "index-debug-info", "whether to index DWARF debug info files by build ID"};
const Setting<Path> secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"};
const Setting<Path> localNarCache{this, "", "local-nar-cache", "path to a local cache of NARs"};
const Setting<bool> parallelCompression{this, false, "parallel-compression",
const Setting<std::string> compression{(StoreConfig*) this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"};
const Setting<bool> writeNARListing{(StoreConfig*) this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
const Setting<bool> writeDebugInfo{(StoreConfig*) this, false, "index-debug-info", "whether to index DWARF debug info files by build ID"};
const Setting<Path> secretKeyFile{(StoreConfig*) this, "", "secret-key", "path to secret key used to sign the binary cache"};
const Setting<Path> localNarCache{(StoreConfig*) this, "", "local-nar-cache", "path to a local cache of NARs"};
const Setting<bool> parallelCompression{(StoreConfig*) this, false, "parallel-compression",
"enable multi-threading compression, available for xz only currently"};
};
class BinaryCacheStore : public Store, public virtual BinaryCacheStoreConfig
{
private:
@ -58,7 +62,7 @@ public:
public:
virtual void init();
virtual void init() override;
private:

File diff suppressed because it is too large Load diff

View file

@ -232,7 +232,7 @@ struct ClientSettings
else if (setSubstituters(settings.extraSubstituters))
;
else
warn("ignoring the user-specified setting '%s', because it is a restricted setting and you are not a trusted user", name);
debug("ignoring the client-specified setting '%s', because it is a restricted setting and you are not a trusted user", name);
} catch (UsageError & e) {
warn(e.what());
}

View file

@ -7,7 +7,7 @@
namespace nix {
std::optional<StorePath> DerivationOutput::pathOpt(const Store & store, std::string_view drvName) const
std::optional<StorePath> DerivationOutput::path(const Store & store, std::string_view drvName, std::string_view outputName) const
{
return std::visit(overloaded {
[](DerivationOutputInputAddressed doi) -> std::optional<StorePath> {
@ -15,7 +15,7 @@ std::optional<StorePath> DerivationOutput::pathOpt(const Store & store, std::str
},
[&](DerivationOutputCAFixed dof) -> std::optional<StorePath> {
return {
store.makeFixedOutputPath(dof.hash.method, dof.hash.hash, drvName)
dof.path(store, drvName, outputName)
};
},
[](DerivationOutputCAFloating dof) -> std::optional<StorePath> {
@ -25,6 +25,13 @@ std::optional<StorePath> DerivationOutput::pathOpt(const Store & store, std::str
}
StorePath DerivationOutputCAFixed::path(const Store & store, std::string_view drvName, std::string_view outputName) const {
return store.makeFixedOutputPath(
hash.method, hash.hash,
outputPathName(drvName, outputName));
}
bool derivationIsCA(DerivationType dt) {
switch (dt) {
case DerivationType::InputAddressed: return false;
@ -61,7 +68,7 @@ bool BasicDerivation::isBuiltin() const
}
StorePath writeDerivation(ref<Store> store,
StorePath writeDerivation(Store & store,
const Derivation & drv, RepairFlag repair)
{
auto references = drv.inputSrcs;
@ -71,10 +78,10 @@ StorePath writeDerivation(ref<Store> store,
(that can be missing (of course) and should not necessarily be
held during a garbage collection). */
auto suffix = std::string(drv.name) + drvExtension;
auto contents = drv.unparse(*store, false);
auto contents = drv.unparse(store, false);
return settings.readOnlyMode
? store->computeStorePathForText(suffix, contents, references)
: store->addTextToStore(suffix, contents, references, repair);
? store.computeStorePathForText(suffix, contents, references)
: store.addTextToStore(suffix, contents, references, repair);
}
@ -106,12 +113,15 @@ static string parseString(std::istream & str)
return res;
}
static void validatePath(std::string_view s) {
if (s.size() == 0 || s[0] != '/')
throw FormatError("bad path '%1%' in derivation", s);
}
static Path parsePath(std::istream & str)
{
string s = parseString(str);
if (s.size() == 0 || s[0] != '/')
throw FormatError("bad path '%1%' in derivation", s);
auto s = parseString(str);
validatePath(s);
return s;
}
@ -140,7 +150,7 @@ static StringSet parseStrings(std::istream & str, bool arePaths)
static DerivationOutput parseDerivationOutput(const Store & store,
StorePath path, std::string_view hashAlgo, std::string_view hash)
std::string_view pathS, std::string_view hashAlgo, std::string_view hash)
{
if (hashAlgo != "") {
auto method = FileIngestionMethod::Flat;
@ -148,44 +158,49 @@ static DerivationOutput parseDerivationOutput(const Store & store,
method = FileIngestionMethod::Recursive;
hashAlgo = hashAlgo.substr(2);
}
const HashType hashType = parseHashType(hashAlgo);
return hash != ""
? DerivationOutput {
.output = DerivationOutputCAFixed {
.hash = FixedOutputHash {
.method = std::move(method),
.hash = Hash::parseNonSRIUnprefixed(hash, hashType),
},
}
}
: (settings.requireExperimentalFeature("ca-derivations"),
DerivationOutput {
.output = DerivationOutputCAFloating {
.method = std::move(method),
.hashType = std::move(hashType),
},
});
} else
const auto hashType = parseHashType(hashAlgo);
if (hash != "") {
validatePath(pathS);
return DerivationOutput {
.output = DerivationOutputCAFixed {
.hash = FixedOutputHash {
.method = std::move(method),
.hash = Hash::parseNonSRIUnprefixed(hash, hashType),
},
},
};
} else {
settings.requireExperimentalFeature("ca-derivations");
assert(pathS == "");
return DerivationOutput {
.output = DerivationOutputCAFloating {
.method = std::move(method),
.hashType = std::move(hashType),
},
};
}
} else {
validatePath(pathS);
return DerivationOutput {
.output = DerivationOutputInputAddressed {
.path = std::move(path),
.path = store.parseStorePath(pathS),
}
};
}
}
static DerivationOutput parseDerivationOutput(const Store & store, std::istringstream & str)
{
expect(str, ","); auto path = store.parseStorePath(parsePath(str));
expect(str, ","); const auto pathS = parseString(str);
expect(str, ","); const auto hashAlgo = parseString(str);
expect(str, ","); const auto hash = parseString(str);
expect(str, ")");
return parseDerivationOutput(store, std::move(path), hashAlgo, hash);
return parseDerivationOutput(store, pathS, hashAlgo, hash);
}
static Derivation parseDerivation(const Store & store, std::string && s, std::string_view name)
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name)
{
Derivation drv;
drv.name = name;
@ -233,34 +248,6 @@ static Derivation parseDerivation(const Store & store, std::string && s, std::st
}
Derivation readDerivation(const Store & store, const Path & drvPath, std::string_view name)
{
try {
return parseDerivation(store, readFile(drvPath), name);
} catch (FormatError & e) {
throw Error("error parsing derivation '%1%': %2%", drvPath, e.msg());
}
}
Derivation Store::derivationFromPath(const StorePath & drvPath)
{
ensurePath(drvPath);
return readDerivation(drvPath);
}
Derivation Store::readDerivation(const StorePath & drvPath)
{
auto accessor = getFSAccessor();
try {
return parseDerivation(*this, accessor->readFile(printStorePath(drvPath)), Derivation::nameFromPath(drvPath));
} catch (FormatError & e) {
throw Error("error parsing derivation '%s': %s", printStorePath(drvPath), e.msg());
}
}
static void printString(string & res, std::string_view s)
{
char buf[s.size() * 2 + 2];
@ -322,17 +309,19 @@ string Derivation::unparse(const Store & store, bool maskOutputs,
for (auto & i : outputs) {
if (first) first = false; else s += ',';
s += '('; printUnquotedString(s, i.first);
s += ','; printUnquotedString(s, maskOutputs ? "" : store.printStorePath(i.second.path(store, name)));
std::visit(overloaded {
[&](DerivationOutputInputAddressed doi) {
s += ','; printUnquotedString(s, maskOutputs ? "" : store.printStorePath(doi.path));
s += ','; printUnquotedString(s, "");
s += ','; printUnquotedString(s, "");
},
[&](DerivationOutputCAFixed dof) {
s += ','; printUnquotedString(s, maskOutputs ? "" : store.printStorePath(dof.path(store, name, i.first)));
s += ','; printUnquotedString(s, dof.hash.printMethodAlgo());
s += ','; printUnquotedString(s, dof.hash.hash.to_string(Base16, false));
},
[&](DerivationOutputCAFloating dof) {
s += ','; printUnquotedString(s, "");
s += ','; printUnquotedString(s, makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
s += ','; printUnquotedString(s, "");
},
@ -388,6 +377,16 @@ bool isDerivation(const string & fileName)
}
std::string outputPathName(std::string_view drvName, std::string_view outputName) {
std::string res { drvName };
if (outputName != "out") {
res += "-";
res += outputName;
}
return res;
}
DerivationType BasicDerivation::type() const
{
std::set<std::string_view> inputAddressedOutputs, fixedCAOutputs, floatingCAOutputs;
@ -480,12 +479,12 @@ DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool m
throw Error("Regular input-addressed derivations are not yet allowed to depend on CA derivations");
case DerivationType::CAFixed: {
std::map<std::string, Hash> outputHashes;
for (const auto & i : drv.outputsAndPaths(store)) {
auto & dof = std::get<DerivationOutputCAFixed>(i.second.first.output);
for (const auto & i : drv.outputs) {
auto & dof = std::get<DerivationOutputCAFixed>(i.second.output);
auto hash = hashString(htSHA256, "fixed:out:"
+ dof.hash.printMethodAlgo() + ":"
+ dof.hash.hash.to_string(Base16, false) + ":"
+ store.printStorePath(i.second.second));
+ store.printStorePath(dof.path(store, drv.name, i.first)));
outputHashes.insert_or_assign(i.first, std::move(hash));
}
return outputHashes;
@ -536,21 +535,13 @@ bool wantOutput(const string & output, const std::set<string> & wanted)
}
StorePathSet BasicDerivation::outputPaths(const Store & store) const
{
StorePathSet paths;
for (auto & i : outputsAndPaths(store))
paths.insert(i.second.second);
return paths;
}
static DerivationOutput readDerivationOutput(Source & in, const Store & store)
{
auto path = store.parseStorePath(readString(in));
const auto pathS = readString(in);
const auto hashAlgo = readString(in);
const auto hash = readString(in);
return parseDerivationOutput(store, std::move(path), hashAlgo, hash);
return parseDerivationOutput(store, pathS, hashAlgo, hash);
}
StringSet BasicDerivation::outputNames() const
@ -561,23 +552,12 @@ StringSet BasicDerivation::outputNames() const
return names;
}
DerivationOutputsAndPaths BasicDerivation::outputsAndPaths(const Store & store) const {
DerivationOutputsAndPaths outsAndPaths;
for (auto output : outputs)
outsAndPaths.insert(std::make_pair(
output.first,
std::make_pair(output.second, output.second.path(store, name))
)
);
return outsAndPaths;
}
DerivationOutputsAndOptPaths BasicDerivation::outputsAndOptPaths(const Store & store) const {
DerivationOutputsAndOptPaths outsAndOptPaths;
for (auto output : outputs)
outsAndOptPaths.insert(std::make_pair(
output.first,
std::make_pair(output.second, output.second.pathOpt(store, output.first))
std::make_pair(output.second, output.second.path(store, name, output.first))
)
);
return outsAndOptPaths;
@ -622,22 +602,25 @@ Source & readDerivation(Source & in, const Store & store, BasicDerivation & drv,
void writeDerivation(Sink & out, const Store & store, const BasicDerivation & drv)
{
out << drv.outputs.size();
for (auto & i : drv.outputsAndPaths(store)) {
out << i.first
<< store.printStorePath(i.second.second);
for (auto & i : drv.outputs) {
out << i.first;
std::visit(overloaded {
[&](DerivationOutputInputAddressed doi) {
out << "" << "";
out << store.printStorePath(doi.path)
<< ""
<< "";
},
[&](DerivationOutputCAFixed dof) {
out << dof.hash.printMethodAlgo()
out << store.printStorePath(dof.path(store, drv.name, i.first))
<< dof.hash.printMethodAlgo()
<< dof.hash.hash.to_string(Base16, false);
},
[&](DerivationOutputCAFloating dof) {
out << (makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType))
out << ""
<< (makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType))
<< "";
},
}, i.second.first.output);
}, i.second.output);
}
WorkerProto<StorePathSet>::write(store, out, drv.inputSrcs);
out << drv.platform << drv.builder << drv.args;
@ -653,5 +636,12 @@ std::string hashPlaceholder(const std::string & outputName)
return "/" + hashString(htSHA256, "nix-output:" + outputName).to_string(Base32, false);
}
std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath, std::string_view outputName)
{
auto drvNameWithExtension = drvPath.name();
auto drvName = drvNameWithExtension.substr(0, drvNameWithExtension.size() - 4);
auto clearText = "nix-upstream-output:" + std::string { drvPath.hashPart() } + ":" + outputPathName(drvName, outputName);
return "/" + hashString(htSHA256, clearText).to_string(Base32, false);
}
}

View file

@ -27,6 +27,7 @@ struct DerivationOutputInputAddressed
struct DerivationOutputCAFixed
{
FixedOutputHash hash; /* hash used for expected hash computation */
StorePath path(const Store & store, std::string_view drvName, std::string_view outputName) const;
};
/* Floating-output derivations, whose output paths are content addressed, but
@ -49,14 +50,8 @@ struct DerivationOutput
std::optional<HashType> hashAlgoOpt(const Store & store) const;
/* Note, when you use this function you should make sure that you're passing
the right derivation name. When in doubt, you should use the safer
interface provided by BasicDerivation::outputsAndPaths */
std::optional<StorePath> pathOpt(const Store & store, std::string_view drvName) const;
/* DEPRECATED: Remove after CA drvs are fully implemented */
StorePath path(const Store & store, std::string_view drvName) const {
auto p = pathOpt(store, drvName);
if (!p) throw UnimplementedError("floating content-addressed derivations are not yet implemented");
return *p;
}
interface provided by BasicDerivation::outputsAndOptPaths */
std::optional<StorePath> path(const Store & store, std::string_view drvName, std::string_view outputName) const;
};
typedef std::map<string, DerivationOutput> DerivationOutputs;
@ -113,17 +108,12 @@ struct BasicDerivation
/* Return true iff this is a fixed-output derivation. */
DerivationType type() const;
/* Return the output paths of a derivation. */
StorePathSet outputPaths(const Store & store) const;
/* Return the output names of a derivation. */
StringSet outputNames() const;
/* Calculates the maps that contains all the DerivationOutputs, but
augmented with knowledge of the Store paths they would be written into.
The first one of these functions will be removed when the CA work is
completed */
DerivationOutputsAndPaths outputsAndPaths(const Store & store) const;
augmented with knowledge of the Store paths they would be written
into. */
DerivationOutputsAndOptPaths outputsAndOptPaths(const Store & store) const;
static std::string_view nameFromPath(const StorePath & storePath);
@ -146,15 +136,22 @@ class Store;
enum RepairFlag : bool { NoRepair = false, Repair = true };
/* Write a derivation to the Nix store, and return its path. */
StorePath writeDerivation(ref<Store> store,
StorePath writeDerivation(Store & store,
const Derivation & drv, RepairFlag repair = NoRepair);
/* Read a derivation from a file. */
Derivation readDerivation(const Store & store, const Path & drvPath, std::string_view name);
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name);
// FIXME: remove
bool isDerivation(const string & fileName);
/* Calculate the name that will be used for the store path for this
output.
This is usually <drv-name>-<output-name>, but is just <drv-name> when
the output name is "out". */
std::string outputPathName(std::string_view drvName, std::string_view outputName);
// known CA drv's output hashes, current just for fixed-output derivations
// whose output hashes are always known since they are fixed up-front.
typedef std::map<std::string, Hash> CaOutputHashes;
@ -202,6 +199,21 @@ struct Sink;
Source & readDerivation(Source & in, const Store & store, BasicDerivation & drv, std::string_view name);
void writeDerivation(Sink & out, const Store & store, const BasicDerivation & drv);
/* This creates an opaque and almost certainly unique string
deterministically from the output name.
It is used as a placeholder to allow derivations to refer to their
own outputs without needing to use the hash of a derivation in
itself, making the hash near-impossible to calculate. */
std::string hashPlaceholder(const std::string & outputName);
/* This creates an opaque and almost certainly unique string
deterministically from a derivation path and output name.
It is used as a placeholder to allow derivations to refer to
content-addressed paths whose content --- and thus the path
themselves --- isn't yet known. This occurs when a derivation has a
dependency which is a CA derivation. */
std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath, std::string_view outputName);
}

View file

@ -0,0 +1,68 @@
#include "store-api.hh"
#include "callback.hh"
namespace nix {
struct DummyStoreConfig : virtual StoreConfig {
using StoreConfig::StoreConfig;
const std::string name() override { return "Dummy Store"; }
};
struct DummyStore : public Store, public virtual DummyStoreConfig
{
DummyStore(const std::string scheme, const std::string uri, const Params & params)
: DummyStore(params)
{ }
DummyStore(const Params & params)
: StoreConfig(params)
, Store(params)
{
}
string getUri() override
{
return *uriSchemes().begin();
}
void queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override
{
callback(nullptr);
}
static std::set<std::string> uriSchemes() {
return {"dummy"};
}
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
{ unsupported("queryPathFromHashPart"); }
void addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs) override
{ unsupported("addToStore"); }
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method, HashType hashAlgo,
PathFilter & filter, RepairFlag repair) override
{ unsupported("addToStore"); }
StorePath addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair) override
{ unsupported("addTextToStore"); }
void narFromPath(const StorePath & path, Sink & sink) override
{ unsupported("narFromPath"); }
void ensurePath(const StorePath & path) override
{ unsupported("ensurePath"); }
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
BuildMode buildMode) override
{ unsupported("buildDerivation"); }
};
static RegisterStoreImplementation<DummyStore, DummyStoreConfig> regStore;
}

View file

@ -5,6 +5,7 @@
#include "s3.hh"
#include "compression.hh"
#include "finally.hh"
#include "callback.hh"
#ifdef ENABLE_S3
#include <aws/core/client/ClientConfiguration.h>

View file

@ -17,15 +17,30 @@ struct FileTransferSettings : Config
Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix",
"String appended to the user agent in HTTP requests."};
Setting<size_t> httpConnections{this, 25, "http-connections",
"Number of parallel HTTP connections.",
Setting<size_t> httpConnections{
this, 25, "http-connections",
R"(
The maximum number of parallel TCP connections used to fetch
files from binary caches and by other downloads. It defaults
to 25. 0 means no limit.
)",
{"binary-caches-parallel-connections"}};
Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
"Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
Setting<unsigned long> connectTimeout{
this, 0, "connect-timeout",
R"(
The timeout (in seconds) for establishing connections in the
binary cache substituter. It corresponds to `curl`s
`--connect-timeout` option.
)"};
Setting<unsigned long> stalledDownloadTimeout{this, 300, "stalled-download-timeout",
"Timeout (in seconds) for receiving data from servers during download. Nix cancels idle downloads after this timeout's duration."};
Setting<unsigned long> stalledDownloadTimeout{
this, 300, "stalled-download-timeout",
R"(
The timeout (in seconds) for receiving data from servers
during download. Nix cancels idle downloads after this
timeout's duration.
)"};
Setting<unsigned int> tries{this, 5, "download-attempts",
"How often Nix will attempt to download a file before giving up."};

View file

@ -85,8 +85,7 @@ void LocalStore::addIndirectRoot(const Path & path)
}
Path LocalFSStore::addPermRoot(const StorePath & storePath,
const Path & _gcRoot, bool indirect, bool allowOutsideRootsDir)
Path LocalFSStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot)
{
Path gcRoot(canonPath(_gcRoot));
@ -95,47 +94,12 @@ Path LocalFSStore::addPermRoot(const StorePath & storePath,
"creating a garbage collector root (%1%) in the Nix store is forbidden "
"(are you running nix-build inside the store?)", gcRoot);
if (indirect) {
/* Don't clobber the link if it already exists and doesn't
point to the Nix store. */
if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
throw Error("cannot create symlink '%1%'; already exists", gcRoot);
makeSymlink(gcRoot, printStorePath(storePath));
addIndirectRoot(gcRoot);
}
else {
if (!allowOutsideRootsDir) {
Path rootsDir = canonPath((format("%1%/%2%") % stateDir % gcRootsDir).str());
if (string(gcRoot, 0, rootsDir.size() + 1) != rootsDir + "/")
throw Error(
"path '%1%' is not a valid garbage collector root; "
"it's not in the directory '%2%'",
gcRoot, rootsDir);
}
if (baseNameOf(gcRoot) == std::string(storePath.to_string()))
writeFile(gcRoot, "");
else
makeSymlink(gcRoot, printStorePath(storePath));
}
/* Check that the root can be found by the garbage collector.
!!! This can be very slow on machines that have many roots.
Instead of reading all the roots, it would be more efficient to
check if the root is in a directory in or linked from the
gcroots directory. */
if (settings.checkRootReachability) {
auto roots = findRoots(false);
if (roots[storePath].count(gcRoot) == 0)
logWarning({
.name = "GC root",
.hint = hintfmt("warning: '%1%' is not in a directory where the garbage collector looks for roots; "
"therefore, '%2%' might be removed by the garbage collector",
gcRoot, printStorePath(storePath))
});
}
/* Don't clobber the link if it already exists and doesn't
point to the Nix store. */
if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
throw Error("cannot create symlink '%1%'; already exists", gcRoot);
makeSymlink(gcRoot, printStorePath(storePath));
addIndirectRoot(gcRoot);
/* Grab the global GC root, causing us to block while a GC is in
progress. This prevents the set of permanent roots from
@ -610,9 +574,12 @@ bool LocalStore::canReachRoot(GCState & state, StorePathSet & visited, const Sto
/* If keep-derivations is set and this is a derivation, then
don't delete the derivation if any of the outputs are alive. */
if (state.gcKeepDerivations && path.isDerivation()) {
for (auto & i : queryDerivationOutputs(path))
if (isValidPath(i) && queryPathInfo(i)->deriver == path)
incoming.insert(i);
for (auto & [name, maybeOutPath] : queryPartialDerivationOutputMap(path))
if (maybeOutPath &&
isValidPath(*maybeOutPath) &&
queryPathInfo(*maybeOutPath)->deriver == path
)
incoming.insert(*maybeOutPath);
}
/* If keep-outputs is set, then don't delete this path if there

View file

@ -2,6 +2,7 @@
#include "util.hh"
#include "archive.hh"
#include "args.hh"
#include "abstract-setting-to-json.hh"
#include <algorithm>
#include <map>
@ -9,6 +10,8 @@
#include <dlfcn.h>
#include <sys/utsname.h>
#include <nlohmann/json.hpp>
namespace nix {
@ -160,11 +163,6 @@ template<> std::string BaseSetting<SandboxMode>::to_string() const
else abort();
}
template<> void BaseSetting<SandboxMode>::toJSON(JSONPlaceholder & out)
{
AbstractSetting::toJSON(out);
}
template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::string & category)
{
args.addFlag({

View file

@ -80,89 +80,209 @@ public:
Setting<bool> keepGoing{this, false, "keep-going",
"Whether to keep building derivations when another build fails."};
Setting<bool> tryFallback{this, false, "fallback",
"Whether to fall back to building when substitution fails.",
Setting<bool> tryFallback{
this, false, "fallback",
R"(
If set to `true`, Nix will fall back to building from source if a
binary substitute fails. This is equivalent to the `--fallback`
flag. The default is `false`.
)",
{"build-fallback"}};
/* Whether to show build log output in real time. */
bool verboseBuild = true;
Setting<size_t> logLines{this, 10, "log-lines",
"If verbose-build is false, the number of lines of the tail of "
"If `verbose-build` is false, the number of lines of the tail of "
"the log to show if a build fails."};
MaxBuildJobsSetting maxBuildJobs{this, 1, "max-jobs",
"Maximum number of parallel build jobs. \"auto\" means use number of cores.",
MaxBuildJobsSetting maxBuildJobs{
this, 1, "max-jobs",
R"(
This option defines the maximum number of jobs that Nix will try to
build in parallel. The default is `1`. The special value `auto`
causes Nix to use the number of CPUs in your system. `0` is useful
when using remote builders to prevent any local builds (except for
`preferLocalBuild` derivation attribute which executes locally
regardless). It can be overridden using the `--max-jobs` (`-j`)
command line switch.
)",
{"build-max-jobs"}};
Setting<unsigned int> buildCores{this, getDefaultCores(), "cores",
"Number of CPU cores to utilize in parallel within a build, "
"i.e. by passing this number to Make via '-j'. 0 means that the "
"number of actual CPU cores on the local host ought to be "
"auto-detected.", {"build-cores"}};
Setting<unsigned int> buildCores{
this, getDefaultCores(), "cores",
R"(
Sets the value of the `NIX_BUILD_CORES` environment variable in the
invocation of builders. Builders can use this variable at their
discretion to control the maximum amount of parallelism. For
instance, in Nixpkgs, if the derivation attribute
`enableParallelBuilding` is set to `true`, the builder passes the
`-jN` flag to GNU Make. It can be overridden using the `--cores`
command line switch and defaults to `1`. The value `0` means that
the builder should use all available CPU cores in the system.
)",
{"build-cores"}};
/* Read-only mode. Don't copy stuff to the store, don't change
the database. */
bool readOnlyMode = false;
Setting<std::string> thisSystem{this, SYSTEM, "system",
"The canonical Nix system name."};
Setting<std::string> thisSystem{
this, SYSTEM, "system",
R"(
This option specifies the canonical Nix system name of the current
installation, such as `i686-linux` or `x86_64-darwin`. Nix can only
build derivations whose `system` attribute equals the value
specified here. In general, it never makes sense to modify this
value from its default, since you can use it to lie about the
platform you are building on (e.g., perform a Mac OS build on a
Linux machine; the result would obviously be wrong). It only makes
sense if the Nix binaries can run on multiple platforms, e.g.,
universal binaries that run on `x86_64-linux` and `i686-linux`.
Setting<time_t> maxSilentTime{this, 0, "max-silent-time",
"The maximum time in seconds that a builer can go without "
"producing any output on stdout/stderr before it is killed. "
"0 means infinity.",
It defaults to the canonical Nix system name detected by `configure`
at build time.
)"};
Setting<time_t> maxSilentTime{
this, 0, "max-silent-time",
R"(
This option defines the maximum number of seconds that a builder can
go without producing any data on standard output or standard error.
This is useful (for instance in an automated build system) to catch
builds that are stuck in an infinite loop, or to catch remote builds
that are hanging due to network problems. It can be overridden using
the `--max-silent-time` command line switch.
The value `0` means that there is no timeout. This is also the
default.
)",
{"build-max-silent-time"}};
Setting<time_t> buildTimeout{this, 0, "timeout",
"The maximum duration in seconds that a builder can run. "
"0 means infinity.", {"build-timeout"}};
Setting<time_t> buildTimeout{
this, 0, "timeout",
R"(
This option defines the maximum number of seconds that a builder can
run. This is useful (for instance in an automated build system) to
catch builds that are stuck in an infinite loop but keep writing to
their standard output or standard error. It can be overridden using
the `--timeout` command line switch.
The value `0` means that there is no timeout. This is also the
default.
)",
{"build-timeout"}};
PathSetting buildHook{this, true, nixLibexecDir + "/nix/build-remote", "build-hook",
"The path of the helper program that executes builds to remote machines."};
Setting<std::string> builders{this, "@" + nixConfDir + "/machines", "builders",
"A semicolon-separated list of build machines, in the format of nix.machines."};
Setting<std::string> builders{
this, "@" + nixConfDir + "/machines", "builders",
"A semicolon-separated list of build machines, in the format of `nix.machines`."};
Setting<bool> buildersUseSubstitutes{this, false, "builders-use-substitutes",
"Whether build machines should use their own substitutes for obtaining "
"build dependencies if possible, rather than waiting for this host to "
"upload them."};
Setting<bool> buildersUseSubstitutes{
this, false, "builders-use-substitutes",
R"(
If set to `true`, Nix will instruct remote build machines to use
their own binary substitutes if available. In practical terms, this
means that remote hosts will fetch as many build dependencies as
possible from their own substitutes (e.g, from `cache.nixos.org`),
instead of waiting for this host to upload them all. This can
drastically reduce build times if the network connection between
this computer and the remote build host is slow.
)"};
Setting<off_t> reservedSize{this, 8 * 1024 * 1024, "gc-reserved-space",
"Amount of reserved disk space for the garbage collector."};
Setting<bool> fsyncMetadata{this, true, "fsync-metadata",
"Whether SQLite should use fsync()."};
Setting<bool> fsyncMetadata{
this, true, "fsync-metadata",
R"(
If set to `true`, changes to the Nix store metadata (in
`/nix/var/nix/db`) are synchronously flushed to disk. This improves
robustness in case of system crashes, but reduces performance. The
default is `true`.
)"};
Setting<bool> useSQLiteWAL{this, !isWSL1(), "use-sqlite-wal",
"Whether SQLite should use WAL mode."};
Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering",
"Whether to call sync() before registering a path as valid."};
"Whether to call `sync()` before registering a path as valid."};
Setting<bool> useSubstitutes{this, true, "substitute",
"Whether to use substitutes.",
Setting<bool> useSubstitutes{
this, true, "substitute",
R"(
If set to `true` (default), Nix will use binary substitutes if
available. This option can be disabled to force building from
source.
)",
{"build-use-substitutes"}};
Setting<std::string> buildUsersGroup{this, "", "build-users-group",
"The Unix group that contains the build users."};
Setting<std::string> buildUsersGroup{
this, "", "build-users-group",
R"(
This options specifies the Unix group containing the Nix build user
accounts. In multi-user Nix installations, builds should not be
performed by the Nix account since that would allow users to
arbitrarily modify the Nix store and database by supplying specially
crafted builders; and they cannot be performed by the calling user
since that would allow him/her to influence the build result.
Therefore, if this option is non-empty and specifies a valid group,
builds will be performed under the user accounts that are a member
of the group specified here (as listed in `/etc/group`). Those user
accounts should not be used for any other purpose\!
Nix will never run two builds under the same user account at the
same time. This is to prevent an obvious security hole: a malicious
user writing a Nix expression that modifies the build result of a
legitimate Nix expression being built by another user. Therefore it
is good to have as many Nix build user accounts as you can spare.
(Remember: uids are cheap.)
The build users should have permission to create files in the Nix
store, but not delete them. Therefore, `/nix/store` should be owned
by the Nix account, its group should be the group specified here,
and its mode should be `1775`.
If the build users group is empty, builds will be performed under
the uid of the Nix process (that is, the uid of the caller if
`NIX_REMOTE` is empty, the uid under which the Nix daemon runs if
`NIX_REMOTE` is `daemon`). Obviously, this should not be used in
multi-user settings with untrusted users.
)"};
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
"Whether to impersonate a Linux 2.6 machine on newer kernels.",
{"build-impersonate-linux-26"}};
Setting<bool> keepLog{this, true, "keep-build-log",
"Whether to store build logs.",
Setting<bool> keepLog{
this, true, "keep-build-log",
R"(
If set to `true` (the default), Nix will write the build log of a
derivation (i.e. the standard output and error of its builder) to
the directory `/nix/var/log/nix/drvs`. The build log can be
retrieved using the command `nix-store -l path`.
)",
{"build-keep-log"}};
Setting<bool> compressLog{this, true, "compress-build-log",
"Whether to compress logs.",
Setting<bool> compressLog{
this, true, "compress-build-log",
R"(
If set to `true` (the default), build logs written to
`/nix/var/log/nix/drvs` will be compressed on the fly using bzip2.
Otherwise, they will not be compressed.
)",
{"build-compress-log"}};
Setting<unsigned long> maxLogSize{this, 0, "max-build-log-size",
"Maximum number of bytes a builder can write to stdout/stderr "
"before being killed (0 means no limit).",
Setting<unsigned long> maxLogSize{
this, 0, "max-build-log-size",
R"(
This option defines the maximum number of bytes that a builder can
write to its stdout/stderr. If the builder exceeds this limit, its
killed. A value of `0` (the default) means that there is no limit.
)",
{"build-max-log-size"}};
/* When buildRepeat > 0 and verboseBuild == true, whether to print
@ -173,57 +293,156 @@ public:
Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
"How often (in seconds) to poll for locks."};
Setting<bool> checkRootReachability{this, false, "gc-check-reachability",
"Whether to check if new GC roots can in fact be found by the "
"garbage collector."};
Setting<bool> gcKeepOutputs{
this, false, "keep-outputs",
R"(
If `true`, the garbage collector will keep the outputs of
non-garbage derivations. If `false` (default), outputs will be
deleted unless they are GC roots themselves (or reachable from other
roots).
Setting<bool> gcKeepOutputs{this, false, "keep-outputs",
"Whether the garbage collector should keep outputs of live derivations.",
In general, outputs must be registered as roots separately. However,
even if the output of a derivation is registered as a root, the
collector will still delete store paths that are used only at build
time (e.g., the C compiler, or source tarballs downloaded from the
network). To prevent it from doing so, set this option to `true`.
)",
{"gc-keep-outputs"}};
Setting<bool> gcKeepDerivations{this, true, "keep-derivations",
"Whether the garbage collector should keep derivers of live paths.",
Setting<bool> gcKeepDerivations{
this, true, "keep-derivations",
R"(
If `true` (default), the garbage collector will keep the derivations
from which non-garbage store paths were built. If `false`, they will
be deleted unless explicitly registered as a root (or reachable from
other roots).
Keeping derivation around is useful for querying and traceability
(e.g., it allows you to ask with what dependencies or options a
store path was built), so by default this option is on. Turn it off
to save a bit of disk space (or a lot if `keep-outputs` is also
turned on).
)",
{"gc-keep-derivations"}};
Setting<bool> autoOptimiseStore{this, false, "auto-optimise-store",
"Whether to automatically replace files with identical contents with hard links."};
Setting<bool> autoOptimiseStore{
this, false, "auto-optimise-store",
R"(
If set to `true`, Nix automatically detects files in the store
that have identical contents, and replaces them with hard links to
a single copy. This saves disk space. If set to `false` (the
default), you can still run `nix-store --optimise` to get rid of
duplicate files.
)"};
Setting<bool> envKeepDerivations{this, false, "keep-env-derivations",
"Whether to add derivations as a dependency of user environments "
"(to prevent them from being GCed).",
Setting<bool> envKeepDerivations{
this, false, "keep-env-derivations",
R"(
If `false` (default), derivations are not stored in Nix user
environments. That is, the derivations of any build-time-only
dependencies may be garbage-collected.
If `true`, when you add a Nix derivation to a user environment, the
path of the derivation is stored in the user environment. Thus, the
derivation will not be garbage-collected until the user environment
generation is deleted (`nix-env --delete-generations`). To prevent
build-time-only dependencies from being collected, you should also
turn on `keep-outputs`.
The difference between this option and `keep-derivations` is that
this one is sticky: it applies to any user environment created
while this option was enabled, while `keep-derivations` only applies
at the moment the garbage collector is run.
)",
{"env-keep-derivations"}};
/* Whether to lock the Nix client and worker to the same CPU. */
bool lockCPU;
Setting<SandboxMode> sandboxMode{this,
Setting<SandboxMode> sandboxMode{
this,
#if __linux__
smEnabled
#else
smDisabled
#endif
, "sandbox",
"Whether to enable sandboxed builds. Can be \"true\", \"false\" or \"relaxed\".",
R"(
If set to `true`, builds will be performed in a *sandboxed
environment*, i.e., theyre isolated from the normal file system
hierarchy and will only see their dependencies in the Nix store,
the temporary build directory, private versions of `/proc`,
`/dev`, `/dev/shm` and `/dev/pts` (on Linux), and the paths
configured with the `sandbox-paths` option. This is useful to
prevent undeclared dependencies on files in directories such as
`/usr/bin`. In addition, on Linux, builds run in private PID,
mount, network, IPC and UTS namespaces to isolate them from other
processes in the system (except that fixed-output derivations do
not run in private network namespace to ensure they can access the
network).
Currently, sandboxing only work on Linux and macOS. The use of a
sandbox requires that Nix is run as root (so you should use the
build users feature to perform the actual builds under different
users than root).
If this option is set to `relaxed`, then fixed-output derivations
and derivations that have the `__noChroot` attribute set to `true`
do not run in sandboxes.
The default is `true` on Linux and `false` on all other platforms.
)",
{"build-use-chroot", "build-use-sandbox"}};
Setting<PathSet> sandboxPaths{this, {}, "sandbox-paths",
"The paths to make available inside the build sandbox.",
Setting<PathSet> sandboxPaths{
this, {}, "sandbox-paths",
R"(
A list of paths bind-mounted into Nix sandbox environments. You can
use the syntax `target=source` to mount a path in a different
location in the sandbox; for instance, `/bin=/nix-bin` will mount
the path `/nix-bin` as `/bin` inside the sandbox. If *source* is
followed by `?`, then it is not an error if *source* does not exist;
for example, `/dev/nvidiactl?` specifies that `/dev/nvidiactl` will
only be mounted in the sandbox if it exists in the host filesystem.
Depending on how Nix was built, the default value for this option
may be empty or provide `/bin/sh` as a bind-mount of `bash`.
)",
{"build-chroot-dirs", "build-sandbox-paths"}};
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
"Whether to disable sandboxing when the kernel doesn't allow it."};
Setting<PathSet> extraSandboxPaths{this, {}, "extra-sandbox-paths",
"Additional paths to make available inside the build sandbox.",
Setting<PathSet> extraSandboxPaths{
this, {}, "extra-sandbox-paths",
R"(
A list of additional paths appended to `sandbox-paths`. Useful if
you want to extend its default value.
)",
{"build-extra-chroot-dirs", "build-extra-sandbox-paths"}};
Setting<size_t> buildRepeat{this, 0, "repeat",
"The number of times to repeat a build in order to verify determinism.",
Setting<size_t> buildRepeat{
this, 0, "repeat",
R"(
How many times to repeat builds to check whether they are
deterministic. The default value is 0. If the value is non-zero,
every build is repeated the specified number of times. If the
contents of any of the runs differs from the previous ones and
`enforce-determinism` is true, the build is rejected and the
resulting store paths are not registered as valid in Nixs
database.
)",
{"build-repeat"}};
#if __linux__
Setting<std::string> sandboxShmSize{this, "50%", "sandbox-dev-shm-size",
"The size of /dev/shm in the build sandbox."};
Setting<std::string> sandboxShmSize{
this, "50%", "sandbox-dev-shm-size",
R"(
This option determines the maximum size of the `tmpfs` filesystem
mounted on `/dev/shm` in Linux sandboxes. For the format, see the
description of the `size` option of `tmpfs` in mount8. The default
is `50%`.
)"};
Setting<Path> sandboxBuildDir{this, "/build", "sandbox-build-dir",
"The build directory inside the sandbox."};
@ -237,121 +456,411 @@ public:
"Whether to log Darwin sandbox access violations to the system log."};
#endif
Setting<bool> runDiffHook{this, false, "run-diff-hook",
"Whether to run the program specified by the diff-hook setting "
"repeated builds produce a different result. Typically used to "
"plug in diffoscope."};
Setting<bool> runDiffHook{
this, false, "run-diff-hook",
R"(
If true, enable the execution of the `diff-hook` program.
PathSetting diffHook{this, true, "", "diff-hook",
"A program that prints out the differences between the two paths "
"specified on its command line."};
When using the Nix daemon, `run-diff-hook` must be set in the
`nix.conf` configuration file, and cannot be passed at the command
line.
)"};
Setting<bool> enforceDeterminism{this, true, "enforce-determinism",
"Whether to fail if repeated builds produce different output."};
PathSetting diffHook{
this, true, "", "diff-hook",
R"(
Absolute path to an executable capable of diffing build
results. The hook is executed if `run-diff-hook` is true, and the
output of a build is known to not be the same. This program is not
executed to determine if two results are the same.
Setting<Strings> trustedPublicKeys{this,
The diff hook is executed by the same user and group who ran the
build. However, the diff hook does not have write access to the
store path just built.
The diff hook program receives three parameters:
1. A path to the previous build's results
2. A path to the current build's results
3. The path to the build's derivation
4. The path to the build's scratch directory. This directory will
exist only if the build was run with `--keep-failed`.
The stderr and stdout output from the diff hook will not be
displayed to the user. Instead, it will print to the nix-daemon's
log.
When using the Nix daemon, `diff-hook` must be set in the `nix.conf`
configuration file, and cannot be passed at the command line.
)"};
Setting<bool> enforceDeterminism{
this, true, "enforce-determinism",
"Whether to fail if repeated builds produce different output. See `repeat`."};
Setting<Strings> trustedPublicKeys{
this,
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
"trusted-public-keys",
"Trusted public keys for secure substitution.",
R"(
A whitespace-separated list of public keys. When paths are copied
from another Nix store (such as a binary cache), they must be
signed with one of these keys. For example:
`cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=`.
)",
{"binary-cache-public-keys"}};
Setting<Strings> secretKeyFiles{this, {}, "secret-key-files",
"Secret keys with which to sign local builds."};
Setting<Strings> secretKeyFiles{
this, {}, "secret-key-files",
R"(
A whitespace-separated list of files containing secret (private)
keys. These are used to sign locally-built paths. They can be
generated using `nix-store --generate-binary-cache-key`. The
corresponding public key can be distributed to other users, who
can add it to `trusted-public-keys` in their `nix.conf`.
)"};
Setting<unsigned int> tarballTtl{this, 60 * 60, "tarball-ttl",
"How long downloaded files are considered up-to-date."};
Setting<unsigned int> tarballTtl{
this, 60 * 60, "tarball-ttl",
R"(
The number of seconds a downloaded tarball is considered fresh. If
the cached tarball is stale, Nix will check whether it is still up
to date using the ETag header. Nix will download a new version if
the ETag header is unsupported, or the cached ETag doesn't match.
Setting<bool> requireSigs{this, true, "require-sigs",
"Whether to check that any non-content-addressed path added to the "
"Nix store has a valid signature (that is, one signed using a key "
"listed in 'trusted-public-keys'."};
Setting the TTL to `0` forces Nix to always check if the tarball is
up to date.
Setting<StringSet> extraPlatforms{this,
Nix caches tarballs in `$XDG_CACHE_HOME/nix/tarballs`.
Files fetched via `NIX_PATH`, `fetchGit`, `fetchMercurial`,
`fetchTarball`, and `fetchurl` respect this TTL.
)"};
Setting<bool> requireSigs{
this, true, "require-sigs",
R"(
If set to `true` (the default), any non-content-addressed path added
or copied to the Nix store (e.g. when substituting from a binary
cache) must have a valid signature, that is, be signed using one of
the keys listed in `trusted-public-keys` or `secret-key-files`. Set
to `false` to disable signature checking.
)"};
Setting<StringSet> extraPlatforms{
this,
std::string{SYSTEM} == "x86_64-linux" && !isWSL1() ? StringSet{"i686-linux"} : StringSet{},
"extra-platforms",
"Additional platforms that can be built on the local system. "
"These may be supported natively (e.g. armv7 on some aarch64 CPUs "
"or using hacks like qemu-user."};
R"(
Platforms other than the native one which this machine is capable of
building for. This can be useful for supporting additional
architectures on compatible machines: i686-linux can be built on
x86\_64-linux machines (and the default for this setting reflects
this); armv7 is backwards-compatible with armv6 and armv5tel; some
aarch64 machines can also natively run 32-bit ARM code; and
qemu-user may be used to support non-native platforms (though this
may be slow and buggy). Most values for this are not enabled by
default because build systems will often misdetect the target
platform and generate incompatible code, so you may wish to
cross-check the results of using this option against proper
natively-built versions of your derivations.
)"};
Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(),
Setting<StringSet> systemFeatures{
this, getDefaultSystemFeatures(),
"system-features",
"Optional features that this system implements (like \"kvm\")."};
R"(
A set of system features supported by this machine, e.g. `kvm`.
Derivations can express a dependency on such features through the
derivation attribute `requiredSystemFeatures`. For example, the
attribute
Setting<Strings> substituters{this,
requiredSystemFeatures = [ "kvm" ];
ensures that the derivation can only be built on a machine with the
`kvm` feature.
This setting by default includes `kvm` if `/dev/kvm` is accessible,
and the pseudo-features `nixos-test`, `benchmark` and `big-parallel`
that are used in Nixpkgs to route builds to specific machines.
)"};
Setting<Strings> substituters{
this,
nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(),
"substituters",
"The URIs of substituters (such as https://cache.nixos.org/).",
R"(
A list of URLs of substituters, separated by whitespace. The default
is `https://cache.nixos.org`.
)",
{"binary-caches"}};
// FIXME: provide a way to add to option values.
Setting<Strings> extraSubstituters{this, {}, "extra-substituters",
"Additional URIs of substituters.",
Setting<Strings> extraSubstituters{
this, {}, "extra-substituters",
R"(
Additional binary caches appended to those specified in
`substituters`. When used by unprivileged users, untrusted
substituters (i.e. those not listed in `trusted-substituters`) are
silently ignored.
)",
{"extra-binary-caches"}};
Setting<StringSet> trustedSubstituters{this, {}, "trusted-substituters",
"Disabled substituters that may be enabled via the substituters option by untrusted users.",
Setting<StringSet> trustedSubstituters{
this, {}, "trusted-substituters",
R"(
A list of URLs of substituters, separated by whitespace. These are
not used by default, but can be enabled by users of the Nix daemon
by specifying `--option substituters urls` on the command
line. Unprivileged users are only allowed to pass a subset of the
URLs listed in `substituters` and `trusted-substituters`.
)",
{"trusted-binary-caches"}};
Setting<Strings> trustedUsers{this, {"root"}, "trusted-users",
"Which users or groups are trusted to ask the daemon to do unsafe things."};
Setting<Strings> trustedUsers{
this, {"root"}, "trusted-users",
R"(
A list of names of users (separated by whitespace) that have
additional rights when connecting to the Nix daemon, such as the
ability to specify additional binary caches, or to import unsigned
NARs. You can also specify groups by prefixing them with `@`; for
instance, `@wheel` means all users in the `wheel` group. The default
is `root`.
Setting<unsigned int> ttlNegativeNarInfoCache{this, 3600, "narinfo-cache-negative-ttl",
"The TTL in seconds for negative lookups in the disk cache i.e binary cache lookups that "
"return an invalid path result"};
> **Warning**
>
> Adding a user to `trusted-users` is essentially equivalent to
> giving that user root access to the system. For example, the user
> can set `sandbox-paths` and thereby obtain read access to
> directories that are otherwise inacessible to them.
)"};
Setting<unsigned int> ttlPositiveNarInfoCache{this, 30 * 24 * 3600, "narinfo-cache-positive-ttl",
"The TTL in seconds for positive lookups in the disk cache i.e binary cache lookups that "
"return a valid path result."};
Setting<unsigned int> ttlNegativeNarInfoCache{
this, 3600, "narinfo-cache-negative-ttl",
R"(
The TTL in seconds for negative lookups. If a store path is queried
from a substituter but was not found, there will be a negative
lookup cached in the local disk cache database for the specified
duration.
)"};
Setting<unsigned int> ttlPositiveNarInfoCache{
this, 30 * 24 * 3600, "narinfo-cache-positive-ttl",
R"(
The TTL in seconds for positive lookups. If a store path is queried
from a substituter, the result of the query will be cached in the
local disk cache database including some of the NAR metadata. The
default TTL is a month, setting a shorter TTL for positive lookups
can be useful for binary caches that have frequent garbage
collection, in which case having a more frequent cache invalidation
would prevent trying to pull the path again and failing with a hash
mismatch if the build isn't reproducible.
)"};
/* ?Who we trust to use the daemon in safe ways */
Setting<Strings> allowedUsers{this, {"*"}, "allowed-users",
"Which users or groups are allowed to connect to the daemon."};
Setting<Strings> allowedUsers{
this, {"*"}, "allowed-users",
R"(
A list of names of users (separated by whitespace) that are allowed
to connect to the Nix daemon. As with the `trusted-users` option,
you can specify groups by prefixing them with `@`. Also, you can
allow all users by specifying `*`. The default is `*`.
Note that trusted users are always allowed to connect.
)"};
Setting<bool> printMissing{this, true, "print-missing",
"Whether to print what paths need to be built or downloaded."};
Setting<std::string> preBuildHook{this, "",
"pre-build-hook",
"A program to run just before a build to set derivation-specific build settings."};
Setting<std::string> preBuildHook{
this, "", "pre-build-hook",
R"(
If set, the path to a program that can set extra derivation-specific
settings for this system. This is used for settings that can't be
captured by the derivation model itself and are too variable between
different versions of the same system to be hard-coded into nix.
Setting<std::string> postBuildHook{this, "", "post-build-hook",
"A program to run just after each successful build."};
The hook is passed the derivation path and, if sandboxes are
enabled, the sandbox directory. It can then modify the sandbox and
send a series of commands to modify various settings to stdout. The
currently recognized commands are:
Setting<std::string> netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
"Path to the netrc file used to obtain usernames/passwords for downloads."};
- `extra-sandbox-paths`
Pass a list of files and directories to be included in the
sandbox for this build. One entry per line, terminated by an
empty line. Entries have the same format as `sandbox-paths`.
)"};
Setting<std::string> postBuildHook{
this, "", "post-build-hook",
R"(
Optional. The path to a program to execute after each build.
This option is only settable in the global `nix.conf`, or on the
command line by trusted users.
When using the nix-daemon, the daemon executes the hook as `root`.
If the nix-daemon is not involved, the hook runs as the user
executing the nix-build.
- The hook executes after an evaluation-time build.
- The hook does not execute on substituted paths.
- The hook's output always goes to the user's terminal.
- If the hook fails, the build succeeds but no further builds
execute.
- The hook executes synchronously, and blocks other builds from
progressing while it runs.
The program executes with no arguments. The program's environment
contains the following environment variables:
- `DRV_PATH`
The derivation for the built paths.
Example:
`/nix/store/5nihn1a7pa8b25l9zafqaqibznlvvp3f-bash-4.4-p23.drv`
- `OUT_PATHS`
Output paths of the built derivation, separated by a space
character.
Example:
`/nix/store/zf5lbh336mnzf1nlswdn11g4n2m8zh3g-bash-4.4-p23-dev
/nix/store/rjxwxwv1fpn9wa2x5ssk5phzwlcv4mna-bash-4.4-p23-doc
/nix/store/6bqvbzjkcp9695dq0dpl5y43nvy37pq1-bash-4.4-p23-info
/nix/store/r7fng3kk3vlpdlh2idnrbn37vh4imlj2-bash-4.4-p23-man
/nix/store/xfghy8ixrhz3kyy6p724iv3cxji088dx-bash-4.4-p23`.
)"};
Setting<std::string> netrcFile{
this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
R"(
If set to an absolute path to a `netrc` file, Nix will use the HTTP
authentication credentials in this file when trying to download from
a remote host through HTTP or HTTPS. Defaults to
`$NIX_CONF_DIR/netrc`.
The `netrc` file consists of a list of accounts in the following
format:
machine my-machine
login my-username
password my-password
For the exact syntax, see [the `curl`
documentation](https://ec.haxx.se/usingcurl-netrc.html).
> **Note**
>
> This must be an absolute path, and `~` is not resolved. For
> example, `~/.netrc` won't resolve to your home directory's
> `.netrc`.
)"};
/* Path to the SSL CA file used */
Path caFile;
#if __linux__
Setting<bool> filterSyscalls{this, true, "filter-syscalls",
"Whether to prevent certain dangerous system calls, such as "
"creation of setuid/setgid files or adding ACLs or extended "
"attributes. Only disable this if you're aware of the "
"security implications."};
Setting<bool> filterSyscalls{
this, true, "filter-syscalls",
R"(
Whether to prevent certain dangerous system calls, such as
creation of setuid/setgid files or adding ACLs or extended
attributes. Only disable this if you're aware of the
security implications.
)"};
Setting<bool> allowNewPrivileges{this, false, "allow-new-privileges",
"Whether builders can acquire new privileges by calling programs with "
"setuid/setgid bits or with file capabilities."};
Setting<bool> allowNewPrivileges{
this, false, "allow-new-privileges",
R"(
(Linux-specific.) By default, builders on Linux cannot acquire new
privileges by calling setuid/setgid programs or programs that have
file capabilities. For example, programs such as `sudo` or `ping`
will fail. (Note that in sandbox builds, no such programs are
available unless you bind-mount them into the sandbox via the
`sandbox-paths` option.) You can allow the use of such programs by
enabling this option. This is impure and usually undesirable, but
may be useful in certain scenarios (e.g. to spin up containers or
set up userspace network interfaces in tests).
)"};
#endif
Setting<Strings> hashedMirrors{this, {}, "hashed-mirrors",
"A list of servers used by builtins.fetchurl to fetch files by hash."};
Setting<Strings> hashedMirrors{
this, {}, "hashed-mirrors",
R"(
A list of web servers used by `builtins.fetchurl` to obtain files by
hash. The default is `http://tarballs.nixos.org/`. Given a hash type
*ht* and a base-16 hash *h*, Nix will try to download the file from
*hashed-mirror*/*ht*/*h*. This allows files to be downloaded even if
they have disappeared from their original URI. For example, given
the default mirror `http://tarballs.nixos.org/`, when building the
derivation
Setting<uint64_t> minFree{this, 0, "min-free",
"Automatically run the garbage collector when free disk space drops below the specified amount."};
```nix
builtins.fetchurl {
url = "https://example.org/foo-1.2.3.tar.xz";
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae";
}
```
Setting<uint64_t> maxFree{this, std::numeric_limits<uint64_t>::max(), "max-free",
"Stop deleting garbage when free disk space is above the specified amount."};
Nix will attempt to download this file from
`http://tarballs.nixos.org/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae`
first. If it is not available there, if will try the original URI.
)"};
Setting<uint64_t> minFree{
this, 0, "min-free",
R"(
When free disk space in `/nix/store` drops below `min-free` during a
build, Nix performs a garbage-collection until `max-free` bytes are
available or there is no more garbage. A value of `0` (the default)
disables this feature.
)"};
Setting<uint64_t> maxFree{
this, std::numeric_limits<uint64_t>::max(), "max-free",
R"(
When a garbage collection is triggered by the `min-free` option, it
stops as soon as `max-free` bytes are available. The default is
infinity (i.e. delete all garbage).
)"};
Setting<uint64_t> minFreeCheckInterval{this, 5, "min-free-check-interval",
"Number of seconds between checking free disk space."};
Setting<Paths> pluginFiles{this, {}, "plugin-files",
"Plugins to dynamically load at nix initialization time."};
Setting<Paths> pluginFiles{
this, {}, "plugin-files",
R"(
A list of plugin files to be loaded by Nix. Each of these files will
be dlopened by Nix, allowing them to affect execution through static
initialization. In particular, these plugins may construct static
instances of RegisterPrimOp to add new primops or constants to the
expression language, RegisterStoreImplementation to add new store
implementations, RegisterCommand to add new subcommands to the `nix`
command, and RegisterSetting to add new nix config settings. See the
constructors for those types for more details.
Since these files are loaded into the same address space as Nix
itself, they must be DSOs compatible with the instance of Nix
running at the time (i.e. compiled against the same headers, not
linked to any incompatible libraries). They should not be linked to
any Nix libs directly, as those will be available already at load
time.
If an entry in the list is a directory, all files in the directory
are loaded as plugins (non-recursively).
)"};
Setting<std::string> githubAccessToken{this, "", "github-access-token",
"GitHub access token to get access to GitHub data through the GitHub API for github:<..> flakes."};
"GitHub access token to get access to GitHub data through the GitHub API for `github:<..>` flakes."};
Setting<Strings> experimentalFeatures{this, {}, "experimental-features",
"Experimental Nix features to enable."};

View file

@ -2,12 +2,20 @@
#include "filetransfer.hh"
#include "globals.hh"
#include "nar-info-disk-cache.hh"
#include "callback.hh"
namespace nix {
MakeError(UploadToHTTP, Error);
class HttpBinaryCacheStore : public BinaryCacheStore
struct HttpBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
const std::string name() override { return "Http Binary Cache Store"; }
};
class HttpBinaryCacheStore : public BinaryCacheStore, public HttpBinaryCacheStoreConfig
{
private:
@ -24,9 +32,12 @@ private:
public:
HttpBinaryCacheStore(
const Params & params, const Path & _cacheUri)
: BinaryCacheStore(params)
, cacheUri(_cacheUri)
const std::string & scheme,
const Path & _cacheUri,
const Params & params)
: StoreConfig(params)
, BinaryCacheStore(params)
, cacheUri(scheme + "://" + _cacheUri)
{
if (cacheUri.back() == '/')
cacheUri.pop_back();
@ -55,6 +66,13 @@ public:
}
}
static std::set<std::string> uriSchemes()
{
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1";
auto ret = std::set<std::string>({"http", "https"});
if (forceHttp) ret.insert("file");
return ret;
}
protected:
void maybeDisable()
@ -85,7 +103,7 @@ protected:
checkEnabled();
try {
FileTransferRequest request(cacheUri + "/" + path);
FileTransferRequest request(makeRequest(path));
request.head = true;
getFileTransfer()->download(request);
return true;
@ -103,7 +121,7 @@ protected:
std::shared_ptr<std::basic_iostream<char>> istream,
const std::string & mimeType) override
{
auto req = FileTransferRequest(cacheUri + "/" + path);
auto req = makeRequest(path);
req.data = std::make_shared<string>(StreamToSourceAdapter(istream).drain());
req.mimeType = mimeType;
try {
@ -115,8 +133,11 @@ protected:
FileTransferRequest makeRequest(const std::string & path)
{
FileTransferRequest request(cacheUri + "/" + path);
return request;
return FileTransferRequest(
hasPrefix(path, "https://") || hasPrefix(path, "http://") || hasPrefix(path, "file://")
? path
: cacheUri + "/" + path);
}
void getFile(const std::string & path, Sink & sink) override
@ -159,18 +180,6 @@ protected:
};
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1";
if (std::string(uri, 0, 7) != "http://" &&
std::string(uri, 0, 8) != "https://" &&
(!forceHttp || std::string(uri, 0, 7) != "file://"))
return 0;
auto store = std::make_shared<HttpBinaryCacheStore>(params, uri);
store->init();
return store;
});
static RegisterStoreImplementation<HttpBinaryCacheStore, HttpBinaryCacheStoreConfig> regStore;
}

View file

@ -6,21 +6,28 @@
#include "worker-protocol.hh"
#include "ssh.hh"
#include "derivations.hh"
#include "callback.hh"
namespace nix {
static std::string uriScheme = "ssh://";
struct LegacySSHStore : public Store
struct LegacySSHStoreConfig : virtual StoreConfig
{
const Setting<int> maxConnections{this, 1, "max-connections", "maximum number of concurrent SSH connections"};
const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"};
const Setting<bool> compress{this, false, "compress", "whether to compress the connection"};
const Setting<Path> remoteProgram{this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"};
const Setting<std::string> remoteStore{this, "", "remote-store", "URI of the store on the remote system"};
using StoreConfig::StoreConfig;
const Setting<int> maxConnections{(StoreConfig*) this, 1, "max-connections", "maximum number of concurrent SSH connections"};
const Setting<Path> sshKey{(StoreConfig*) this, "", "ssh-key", "path to an SSH private key"};
const Setting<bool> compress{(StoreConfig*) this, false, "compress", "whether to compress the connection"};
const Setting<Path> remoteProgram{(StoreConfig*) this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"};
const Setting<std::string> remoteStore{(StoreConfig*) this, "", "remote-store", "URI of the store on the remote system"};
const std::string name() override { return "Legacy SSH Store"; }
};
struct LegacySSHStore : public Store, public virtual LegacySSHStoreConfig
{
// Hack for getting remote build log output.
const Setting<int> logFD{this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"};
// Intentionally not in `LegacySSHStoreConfig` so that it doesn't appear in
// the documentation
const Setting<int> logFD{(StoreConfig*) this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"};
struct Connection
{
@ -37,8 +44,11 @@ struct LegacySSHStore : public Store
SSHMaster master;
LegacySSHStore(const string & host, const Params & params)
: Store(params)
static std::set<std::string> uriSchemes() { return {"ssh"}; }
LegacySSHStore(const string & scheme, const string & host, const Params & params)
: StoreConfig(params)
, Store(params)
, host(host)
, connections(make_ref<Pool<Connection>>(
std::max(1, (int) maxConnections),
@ -84,7 +94,7 @@ struct LegacySSHStore : public Store
string getUri() override
{
return uriScheme + host;
return *uriSchemes().begin() + "://" + host;
}
void queryPathInfoUncached(const StorePath & path,
@ -325,12 +335,6 @@ public:
}
};
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
return std::make_shared<LegacySSHStore>(std::string(uri, uriScheme.size()), params);
});
static RegisterStoreImplementation<LegacySSHStore, LegacySSHStoreConfig> regStore;
}

View file

@ -4,7 +4,14 @@
namespace nix {
class LocalBinaryCacheStore : public BinaryCacheStore
struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
const std::string name() override { return "Local Binary Cache Store"; }
};
class LocalBinaryCacheStore : public BinaryCacheStore, public virtual LocalBinaryCacheStoreConfig
{
private:
@ -13,8 +20,11 @@ private:
public:
LocalBinaryCacheStore(
const Params & params, const Path & binaryCacheDir)
: BinaryCacheStore(params)
const std::string scheme,
const Path & binaryCacheDir,
const Params & params)
: StoreConfig(params)
, BinaryCacheStore(params)
, binaryCacheDir(binaryCacheDir)
{
}
@ -26,6 +36,8 @@ public:
return "file://" + binaryCacheDir;
}
static std::set<std::string> uriSchemes();
protected:
bool fileExists(const std::string & path) override;
@ -85,16 +97,14 @@ bool LocalBinaryCacheStore::fileExists(const std::string & path)
return pathExists(binaryCacheDir + "/" + path);
}
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
std::set<std::string> LocalBinaryCacheStore::uriSchemes()
{
if (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") == "1" ||
std::string(uri, 0, 7) != "file://")
return 0;
auto store = std::make_shared<LocalBinaryCacheStore>(params, std::string(uri, 7));
store->init();
return store;
});
if (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") == "1")
return {};
else
return {"file"};
}
static RegisterStoreImplementation<LocalBinaryCacheStore, LocalBinaryCacheStoreConfig> regStore;
}

View file

@ -6,6 +6,7 @@
#include "derivations.hh"
#include "nar-info.hh"
#include "references.hh"
#include "callback.hh"
#include <iostream>
#include <algorithm>
@ -42,7 +43,8 @@ namespace nix {
LocalStore::LocalStore(const Params & params)
: Store(params)
: StoreConfig(params)
, Store(params)
, LocalFSStore(params)
, realStoreDir_{this, false, rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
"physical path to the Nix store"}
@ -578,13 +580,32 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
envHasRightPath(path, i.first);
},
[&](DerivationOutputCAFloating _) {
throw UnimplementedError("floating CA output derivations are not yet implemented");
/* Nothing to check */
},
}, i.second.output);
}
}
void LocalStore::linkDeriverToPath(const StorePath & deriver, const string & outputName, const StorePath & output)
{
auto state(_state.lock());
return linkDeriverToPath(*state, queryValidPathId(*state, deriver), outputName, output);
}
void LocalStore::linkDeriverToPath(State & state, uint64_t deriver, const string & outputName, const StorePath & output)
{
retrySQLite<void>([&]() {
state.stmtAddDerivationOutput.use()
(deriver)
(outputName)
(printStorePath(output))
.exec();
});
}
uint64_t LocalStore::addValidPath(State & state,
const ValidPathInfo & info, bool checkOutputs)
{
@ -618,12 +639,11 @@ uint64_t LocalStore::addValidPath(State & state,
registration above is undone. */
if (checkOutputs) checkDerivationOutputs(info.path, drv);
for (auto & i : drv.outputsAndPaths(*this)) {
state.stmtAddDerivationOutput.use()
(id)
(i.first)
(printStorePath(i.second.second))
.exec();
for (auto & i : drv.outputsAndOptPaths(*this)) {
/* Floating CA derivations have indeterminate output paths until
they are built, so don't register anything in that case */
if (i.second.second)
linkDeriverToPath(state, id, i.first, *i.second.second);
}
}

View file

@ -23,9 +23,6 @@ namespace nix {
const int nixSchemaVersion = 10;
struct Derivation;
struct OptimiseStats
{
unsigned long filesLinked = 0;
@ -33,8 +30,19 @@ struct OptimiseStats
uint64_t blocksFreed = 0;
};
struct LocalStoreConfig : virtual LocalFSStoreConfig
{
using LocalFSStoreConfig::LocalFSStoreConfig;
class LocalStore : public LocalFSStore
Setting<bool> requireSigs{(StoreConfig*) this,
settings.requireSigs,
"require-sigs", "whether store paths should have a trusted signature on import"};
const std::string name() override { return "Local Store"; }
};
class LocalStore : public LocalFSStore, public virtual LocalStoreConfig
{
private:
@ -98,10 +106,6 @@ public:
private:
Setting<bool> requireSigs{(Store*) this,
settings.requireSigs,
"require-sigs", "whether store paths should have a trusted signature on import"};
const PublicKeys & getPublicKeys();
public:
@ -282,6 +286,11 @@ private:
specified by the secret-key-files option. */
void signPathInfo(ValidPathInfo & info);
/* Register the store path 'output' as the output named 'outputName' of
derivation 'deriver'. */
void linkDeriverToPath(const StorePath & deriver, const string & outputName, const StorePath & output);
void linkDeriverToPath(State & state, uint64_t deriver, const string & outputName, const StorePath & output);
Path getRealStoreDir() override { return realStoreDir; }
void createUser(const std::string & userName, uid_t userId) override;

View file

@ -5,7 +5,7 @@
#include "store-api.hh"
#include "thread-pool.hh"
#include "topo-sort.hh"
#include "callback.hh"
namespace nix {
@ -203,17 +203,24 @@ void Store::queryMissing(const std::vector<StorePathWithOutputs> & targets,
return;
}
PathSet invalid;
/* true for regular derivations, and CA derivations for which we
have a trust mapping for all wanted outputs. */
auto knownOutputPaths = true;
for (auto & [outputName, pathOpt] : queryPartialDerivationOutputMap(path.path)) {
if (!pathOpt) {
knownOutputPaths = false;
break;
}
if (wantOutput(outputName, path.outputs) && !isValidPath(*pathOpt))
invalid.insert(printStorePath(*pathOpt));
}
if (knownOutputPaths && invalid.empty()) return;
auto drv = make_ref<Derivation>(derivationFromPath(path.path));
ParsedDerivation parsedDrv(StorePath(path.path), *drv);
PathSet invalid;
for (auto & j : drv->outputsAndPaths(*this))
if (wantOutput(j.first, path.outputs)
&& !isValidPath(j.second.second))
invalid.insert(printStorePath(j.second.second));
if (invalid.empty()) return;
if (settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
if (knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
for (auto & output : invalid)
pool.enqueue(std::bind(checkOutput, printStorePath(path.path), drv, output, drvState));

View file

@ -1,10 +1,18 @@
#include "names.hh"
#include "util.hh"
#include <regex>
namespace nix {
struct Regex
{
std::regex regex;
};
DrvName::DrvName()
{
name = "";
@ -30,11 +38,18 @@ DrvName::DrvName(std::string_view s) : hits(0)
}
DrvName::~DrvName()
{ }
bool DrvName::matches(DrvName & n)
{
if (name != "*") {
if (!regex) regex = std::unique_ptr<std::regex>(new std::regex(name, std::regex::extended));
if (!std::regex_match(n.name, *regex)) return false;
if (!regex) {
regex = std::make_unique<Regex>();
regex->regex = std::regex(name, std::regex::extended);
}
if (!std::regex_match(n.name, regex->regex)) return false;
}
if (version != "" && version != n.version) return false;
return true;
@ -99,7 +114,7 @@ DrvNames drvNamesFromArgs(const Strings & opArgs)
{
DrvNames result;
for (auto & i : opArgs)
result.push_back(DrvName(i));
result.emplace_back(i);
return result;
}

View file

@ -3,10 +3,11 @@
#include <memory>
#include "types.hh"
#include <regex>
namespace nix {
struct Regex;
struct DrvName
{
string fullName;
@ -16,10 +17,12 @@ struct DrvName
DrvName();
DrvName(std::string_view s);
~DrvName();
bool matches(DrvName & n);
private:
std::unique_ptr<std::regex> regex;
std::unique_ptr<Regex> regex;
};
typedef list<DrvName> DrvNames;

View file

@ -49,7 +49,8 @@ struct NarAccessor : public FSAccessor
: acc(acc), source(source)
{ }
void createMember(const Path & path, NarMember member) {
void createMember(const Path & path, NarMember member)
{
size_t level = std::count(path.begin(), path.end(), '/');
while (parents.size() > level) parents.pop();

View file

@ -72,7 +72,7 @@ static void makeName(const Path & profile, GenerationNumber num,
}
Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath)
Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
{
/* The new generation number should be higher than old the
previous ones. */
@ -82,7 +82,7 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath)
if (gens.size() > 0) {
Generation last = gens.back();
if (readLink(last.path) == outPath) {
if (readLink(last.path) == store->printStorePath(outPath)) {
/* We only create a new generation symlink if it differs
from the last one.
@ -105,7 +105,7 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath)
user environment etc. we've just built. */
Path generation;
makeName(profile, num + 1, generation);
store->addPermRoot(store->parseStorePath(outPath), generation, false, true);
store->addPermRoot(outPath, generation);
return generation;
}

View file

@ -8,6 +8,8 @@
namespace nix {
class StorePath;
typedef unsigned int GenerationNumber;
@ -28,7 +30,7 @@ std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path pro
class LocalFSStore;
Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath);
Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath);
void deleteGeneration(const Path & profile, GenerationNumber gen);

View file

@ -79,9 +79,17 @@ void RefScanSink::operator () (const unsigned char * data, size_t len)
std::pair<PathSet, HashResult> scanForReferences(const string & path,
const PathSet & refs)
{
RefScanSink refsSink;
HashSink hashSink { htSHA256 };
TeeSink sink { refsSink, hashSink };
auto found = scanForReferences(hashSink, path, refs);
auto hash = hashSink.finish();
return std::pair<PathSet, HashResult>(found, hash);
}
PathSet scanForReferences(Sink & toTee,
const string & path, const PathSet & refs)
{
RefScanSink refsSink;
TeeSink sink { refsSink, toTee };
std::map<string, Path> backMap;
/* For efficiency (and a higher hit rate), just search for the
@ -111,9 +119,7 @@ std::pair<PathSet, HashResult> scanForReferences(const string & path,
found.insert(j->second);
}
auto hash = hashSink.finish();
return std::pair<PathSet, HashResult>(found, hash);
return found;
}

View file

@ -7,6 +7,8 @@ namespace nix {
std::pair<PathSet, HashResult> scanForReferences(const Path & path, const PathSet & refs);
PathSet scanForReferences(Sink & toTee, const Path & path, const PathSet & refs);
struct RewritingSink : Sink
{
std::string from, to, prev;

View file

@ -1,5 +1,6 @@
#include "serialise.hh"
#include "util.hh"
#include "remote-fs-accessor.hh"
#include "remote-store.hh"
#include "worker-protocol.hh"
#include "archive.hh"
@ -9,6 +10,7 @@
#include "pool.hh"
#include "finally.hh"
#include "logging.hh"
#include "callback.hh"
#include <sys/types.h>
#include <sys/stat.h>
@ -57,7 +59,7 @@ void WorkerProto<ContentAddress>::write(const Store & store, Sink & out, const C
std::optional<StorePath> WorkerProto<std::optional<StorePath>>::read(const Store & store, Source & from)
{
auto s = readString(from);
auto s = readString(from);
return s == "" ? std::optional<StorePath> {} : store.parseStorePath(s);
}
@ -81,9 +83,19 @@ void WorkerProto<std::optional<ContentAddress>>::write(const Store & store, Sink
/* TODO: Separate these store impls into different files, give them better names */
RemoteStore::RemoteStore(const Params & params)
: Store(params)
, RemoteStoreConfig(params)
, connections(make_ref<Pool<Connection>>(
std::max(1, (int) maxConnections),
[this]() { return openConnectionWrapper(); },
[this]() {
auto conn = openConnectionWrapper();
try {
initConnection(*conn);
} catch (...) {
failed = true;
throw;
}
return conn;
},
[this](const ref<Connection> & r) {
return
r->to.good()
@ -110,19 +122,21 @@ ref<RemoteStore::Connection> RemoteStore::openConnectionWrapper()
UDSRemoteStore::UDSRemoteStore(const Params & params)
: Store(params)
: StoreConfig(params)
, Store(params)
, LocalFSStore(params)
, RemoteStore(params)
{
}
UDSRemoteStore::UDSRemoteStore(std::string socket_path, const Params & params)
: Store(params)
, LocalFSStore(params)
, RemoteStore(params)
, path(socket_path)
UDSRemoteStore::UDSRemoteStore(
const std::string scheme,
std::string socket_path,
const Params & params)
: UDSRemoteStore(params)
{
path.emplace(socket_path);
}
@ -166,8 +180,6 @@ ref<RemoteStore::Connection> UDSRemoteStore::openConnection()
conn->startTime = std::chrono::steady_clock::now();
initConnection(*conn);
return conn;
}
@ -275,9 +287,9 @@ struct ConnectionHandle
RemoteStore::Connection * operator -> () { return &*handle; }
void processStderr(Sink * sink = 0, Source * source = 0)
void processStderr(Sink * sink = 0, Source * source = 0, bool flush = true)
{
auto ex = handle->processStderr(sink, source);
auto ex = handle->processStderr(sink, source, flush);
if (ex) {
daemonException = true;
std::rethrow_exception(ex);
@ -467,10 +479,26 @@ StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path)
std::map<std::string, std::optional<StorePath>> RemoteStore::queryPartialDerivationOutputMap(const StorePath & path)
{
auto conn(getConnection());
conn->to << wopQueryDerivationOutputMap << printStorePath(path);
conn.processStderr();
return WorkerProto<std::map<std::string, std::optional<StorePath>>>::read(*this, conn->from);
if (GET_PROTOCOL_MINOR(getProtocol()) >= 0x16) {
auto conn(getConnection());
conn->to << wopQueryDerivationOutputMap << printStorePath(path);
conn.processStderr();
return WorkerProto<std::map<std::string, std::optional<StorePath>>>::read(*this, conn->from);
} else {
// Fallback for old daemon versions.
// For floating-CA derivations (and their co-dependencies) this is an
// under-approximation as it only returns the paths that can be inferred
// from the derivation itself (and not the ones that are known because
// the have been built), but as old stores don't handle floating-CA
// derivations this shouldn't matter
auto derivation = readDerivation(path);
auto outputsWithOptPaths = derivation.outputsAndOptPaths(*this);
std::map<std::string, std::optional<StorePath>> ret;
for (auto & [outputName, outputAndPath] : outputsWithOptPaths) {
ret.emplace(outputName, outputAndPath.second);
}
return ret;
}
}
std::optional<StorePath> RemoteStore::queryPathFromHashPart(const std::string & hashPart)
@ -525,6 +553,8 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 23) {
conn->to.flush();
std::exception_ptr ex;
struct FramedSink : BufferedSink
@ -564,7 +594,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
std::thread stderrThread([&]()
{
try {
conn.processStderr();
conn.processStderr(nullptr, nullptr, false);
} catch (...) {
ex = std::current_exception();
}
@ -856,6 +886,18 @@ RemoteStore::Connection::~Connection()
}
}
void RemoteStore::narFromPath(const StorePath & path, Sink & sink)
{
auto conn(connections->get());
conn->to << wopNarFromPath << printStorePath(path);
conn->processStderr();
copyNAR(conn->from, sink);
}
ref<FSAccessor> RemoteStore::getFSAccessor()
{
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
}
static Logger::Fields readFields(Source & from)
{
@ -874,9 +916,10 @@ static Logger::Fields readFields(Source & from)
}
std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source)
std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source, bool flush)
{
to.flush();
if (flush)
to.flush();
while (true) {
@ -937,14 +980,6 @@ std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source *
return nullptr;
}
static std::string uriScheme = "unix://";
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
return std::make_shared<UDSRemoteStore>(std::string(uri, uriScheme.size()), params);
});
static RegisterStoreImplementation<UDSRemoteStore, UDSRemoteStoreConfig> regStore;
}

View file

@ -16,19 +16,23 @@ struct FdSource;
template<typename T> class Pool;
struct ConnectionHandle;
struct RemoteStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
const Setting<int> maxConnections{(StoreConfig*) this, 1,
"max-connections", "maximum number of concurrent connections to the Nix daemon"};
const Setting<unsigned int> maxConnectionAge{(StoreConfig*) this, std::numeric_limits<unsigned int>::max(),
"max-connection-age", "number of seconds to reuse a connection"};
};
/* FIXME: RemoteStore is a misnomer - should be something like
DaemonStore. */
class RemoteStore : public virtual Store
class RemoteStore : public virtual Store, public virtual RemoteStoreConfig
{
public:
const Setting<int> maxConnections{(Store*) this, 1,
"max-connections", "maximum number of concurrent connections to the Nix daemon"};
const Setting<unsigned int> maxConnectionAge{(Store*) this, std::numeric_limits<unsigned int>::max(),
"max-connection-age", "number of seconds to reuse a connection"};
virtual bool sameMachine() = 0;
RemoteStore(const Params & params);
@ -102,8 +106,6 @@ public:
void flushBadConnections();
protected:
struct Connection
{
AutoCloseFD fd;
@ -114,11 +116,13 @@ protected:
virtual ~Connection();
std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0);
std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0, bool flush = true);
};
ref<Connection> openConnectionWrapper();
protected:
virtual ref<Connection> openConnection() = 0;
void initConnection(Connection & conn);
@ -131,24 +135,54 @@ protected:
friend struct ConnectionHandle;
virtual ref<FSAccessor> getFSAccessor() override;
virtual void narFromPath(const StorePath & path, Sink & sink) override;
private:
std::atomic_bool failed{false};
};
class UDSRemoteStore : public LocalFSStore, public RemoteStore
struct UDSRemoteStoreConfig : virtual LocalFSStoreConfig, virtual RemoteStoreConfig
{
UDSRemoteStoreConfig(const Store::Params & params)
: StoreConfig(params)
, LocalFSStoreConfig(params)
, RemoteStoreConfig(params)
{
}
UDSRemoteStoreConfig()
: UDSRemoteStoreConfig(Store::Params({}))
{
}
const std::string name() override { return "Local Daemon Store"; }
};
class UDSRemoteStore : public LocalFSStore, public RemoteStore, public virtual UDSRemoteStoreConfig
{
public:
UDSRemoteStore(const Params & params);
UDSRemoteStore(std::string path, const Params & params);
UDSRemoteStore(const std::string scheme, std::string path, const Params & params);
std::string getUri() override;
static std::set<std::string> uriSchemes()
{ return {"unix"}; }
bool sameMachine() override
{ return true; }
ref<FSAccessor> getFSAccessor() override
{ return LocalFSStore::getFSAccessor(); }
void narFromPath(const StorePath & path, Sink & sink) override
{ LocalFSStore::narFromPath(path, sink); }
private:
ref<RemoteStore::Connection> openConnection() override;

View file

@ -172,20 +172,26 @@ S3Helper::FileTransferResult S3Helper::getObject(
return res;
}
struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
struct S3BinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
{
const Setting<std::string> profile{this, "", "profile", "The name of the AWS configuration profile to use."};
const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
const Setting<std::string> scheme{this, "", "scheme", "The scheme to use for S3 requests, https by default."};
const Setting<std::string> endpoint{this, "", "endpoint", "An optional override of the endpoint to use when talking to S3."};
const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
const Setting<std::string> profile{(StoreConfig*) this, "", "profile", "The name of the AWS configuration profile to use."};
const Setting<std::string> region{(StoreConfig*) this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
const Setting<std::string> scheme{(StoreConfig*) this, "", "scheme", "The scheme to use for S3 requests, https by default."};
const Setting<std::string> endpoint{(StoreConfig*) this, "", "endpoint", "An optional override of the endpoint to use when talking to S3."};
const Setting<std::string> narinfoCompression{(StoreConfig*) this, "", "narinfo-compression", "compression method for .narinfo files"};
const Setting<std::string> lsCompression{(StoreConfig*) this, "", "ls-compression", "compression method for .ls files"};
const Setting<std::string> logCompression{(StoreConfig*) this, "", "log-compression", "compression method for log/* files"};
const Setting<bool> multipartUpload{
this, false, "multipart-upload", "whether to use multi-part uploads"};
(StoreConfig*) this, false, "multipart-upload", "whether to use multi-part uploads"};
const Setting<uint64_t> bufferSize{
this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"};
(StoreConfig*) this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"};
const std::string name() override { return "S3 Binary Cache Store"; }
};
struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore, virtual S3BinaryCacheStoreConfig
{
std::string bucketName;
Stats stats;
@ -193,8 +199,11 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
S3Helper s3Helper;
S3BinaryCacheStoreImpl(
const Params & params, const std::string & bucketName)
: S3BinaryCacheStore(params)
const std::string & scheme,
const std::string & bucketName,
const Params & params)
: StoreConfig(params)
, S3BinaryCacheStore(params)
, bucketName(bucketName)
, s3Helper(profile, region, scheme, endpoint)
{
@ -426,17 +435,11 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
return paths;
}
static std::set<std::string> uriSchemes() { return {"s3"}; }
};
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{
if (std::string(uri, 0, 5) != "s3://") return 0;
auto store = std::make_shared<S3BinaryCacheStoreImpl>(params, std::string(uri, 5));
store->init();
return store;
});
static RegisterStoreImplementation<S3BinaryCacheStoreImpl, S3BinaryCacheStoreConfig> regStore;
}

View file

@ -8,19 +8,25 @@
namespace nix {
static std::string uriScheme = "ssh-ng://";
struct SSHStoreConfig : virtual RemoteStoreConfig
{
using RemoteStoreConfig::RemoteStoreConfig;
class SSHStore : public RemoteStore
const Setting<Path> sshKey{(StoreConfig*) this, "", "ssh-key", "path to an SSH private key"};
const Setting<bool> compress{(StoreConfig*) this, false, "compress", "whether to compress the connection"};
const Setting<Path> remoteProgram{(StoreConfig*) this, "nix-daemon", "remote-program", "path to the nix-daemon executable on the remote system"};
const Setting<std::string> remoteStore{(StoreConfig*) this, "", "remote-store", "URI of the store on the remote system"};
const std::string name() override { return "SSH Store"; }
};
class SSHStore : public virtual RemoteStore, public virtual SSHStoreConfig
{
public:
const Setting<Path> sshKey{(Store*) this, "", "ssh-key", "path to an SSH private key"};
const Setting<bool> compress{(Store*) this, false, "compress", "whether to compress the connection"};
const Setting<Path> remoteProgram{(Store*) this, "nix-daemon", "remote-program", "path to the nix-daemon executable on the remote system"};
const Setting<std::string> remoteStore{(Store*) this, "", "remote-store", "URI of the store on the remote system"};
SSHStore(const std::string & host, const Params & params)
: Store(params)
SSHStore(const std::string & scheme, const std::string & host, const Params & params)
: StoreConfig(params)
, Store(params)
, RemoteStore(params)
, host(host)
, master(
@ -32,18 +38,16 @@ public:
{
}
static std::set<std::string> uriSchemes() { return {"ssh-ng"}; }
std::string getUri() override
{
return uriScheme + host;
return *uriSchemes().begin() + "://" + host;
}
bool sameMachine() override
{ return false; }
void narFromPath(const StorePath & path, Sink & sink) override;
ref<FSAccessor> getFSAccessor() override;
private:
struct Connection : RemoteStore::Connection
@ -68,19 +72,6 @@ private:
};
};
void SSHStore::narFromPath(const StorePath & path, Sink & sink)
{
auto conn(connections->get());
conn->to << wopNarFromPath << printStorePath(path);
conn->processStderr();
copyNAR(conn->from, sink);
}
ref<FSAccessor> SSHStore::getFSAccessor()
{
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
}
ref<RemoteStore::Connection> SSHStore::openConnection()
{
auto conn = make_ref<Connection>();
@ -89,16 +80,9 @@ ref<RemoteStore::Connection> SSHStore::openConnection()
+ (remoteStore.get() == "" ? "" : " --store " + shellEscape(remoteStore.get())));
conn->to = FdSink(conn->sshConn->in.get());
conn->from = FdSource(conn->sshConn->out.get());
initConnection(*conn);
return conn;
}
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
return std::make_shared<SSHStore>(std::string(uri, uriScheme.size()), params);
});
static RegisterStoreImplementation<SSHStore, SSHStoreConfig> regStore;
}

View file

@ -1,16 +1,14 @@
#include "crypto.hh"
#include "fs-accessor.hh"
#include "globals.hh"
#include "store-api.hh"
#include "util.hh"
#include "nar-info-disk-cache.hh"
#include "thread-pool.hh"
#include "json.hh"
#include "derivations.hh"
#include "url.hh"
#include "archive.hh"
#include <future>
#include "callback.hh"
namespace nix {
@ -140,21 +138,28 @@ StorePathWithOutputs Store::followLinksToStorePathWithOutputs(std::string_view p
*/
StorePath Store::makeStorePath(const string & type,
const Hash & hash, std::string_view name) const
StorePath Store::makeStorePath(std::string_view type,
std::string_view hash, std::string_view name) const
{
/* e.g., "source:sha256:1abc...:/nix/store:foo.tar.gz" */
string s = type + ":" + hash.to_string(Base16, true) + ":" + storeDir + ":" + std::string(name);
string s = std::string { type } + ":" + std::string { hash }
+ ":" + storeDir + ":" + std::string { name };
auto h = compressHash(hashString(htSHA256, s), 20);
return StorePath(h, name);
}
StorePath Store::makeOutputPath(const string & id,
StorePath Store::makeStorePath(std::string_view type,
const Hash & hash, std::string_view name) const
{
return makeStorePath("output:" + id, hash,
std::string(name) + (id == "out" ? "" : "-" + id));
return makeStorePath(type, hash.to_string(Base16, true), name);
}
StorePath Store::makeOutputPath(std::string_view id,
const Hash & hash, std::string_view name) const
{
return makeStorePath("output:" + std::string { id }, hash, outputPathName(name, id));
}
@ -339,7 +344,7 @@ ValidPathInfo Store::addToStoreSlow(std::string_view name, const Path & srcPath,
Store::Store(const Params & params)
: Config(params)
: StoreConfig(params)
, state({(size_t) pathInfoCacheSize})
{
}
@ -983,6 +988,25 @@ Strings ValidPathInfo::shortRefs() const
}
Derivation Store::derivationFromPath(const StorePath & drvPath)
{
ensurePath(drvPath);
return readDerivation(drvPath);
}
Derivation Store::readDerivation(const StorePath & drvPath)
{
auto accessor = getFSAccessor();
try {
return parseDerivation(*this,
accessor->readFile(printStorePath(drvPath)),
Derivation::nameFromPath(drvPath));
} catch (FormatError & e) {
throw Error("error parsing derivation '%s': %s", printStorePath(drvPath), e.msg());
}
}
}
@ -992,9 +1016,6 @@ Strings ValidPathInfo::shortRefs() const
namespace nix {
RegisterStoreImplementation::Implementations * RegisterStoreImplementation::implementations = 0;
/* Split URI into protocol+hierarchy part and its parameter set. */
std::pair<std::string, Store::Params> splitUriAndParams(const std::string & uri_)
{
@ -1008,24 +1029,6 @@ std::pair<std::string, Store::Params> splitUriAndParams(const std::string & uri_
return {uri, params};
}
ref<Store> openStore(const std::string & uri_,
const Store::Params & extraParams)
{
auto [uri, uriParams] = splitUriAndParams(uri_);
auto params = extraParams;
params.insert(uriParams.begin(), uriParams.end());
for (auto fun : *RegisterStoreImplementation::implementations) {
auto store = fun(uri, params);
if (store) {
store->warnUnknownSettings();
return ref<Store>(store);
}
}
throw Error("don't know how to open Nix store '%s'", uri);
}
static bool isNonUriPath(const std::string & spec) {
return
// is not a URL
@ -1035,44 +1038,62 @@ static bool isNonUriPath(const std::string & spec) {
&& spec.find("/") != std::string::npos;
}
StoreType getStoreType(const std::string & uri, const std::string & stateDir)
std::shared_ptr<Store> openFromNonUri(const std::string & uri, const Store::Params & params)
{
if (uri == "daemon") {
return tDaemon;
} else if (uri == "local" || isNonUriPath(uri)) {
return tLocal;
} else if (uri == "" || uri == "auto") {
if (uri == "" || uri == "auto") {
auto stateDir = get(params, "state").value_or(settings.nixStateDir);
if (access(stateDir.c_str(), R_OK | W_OK) == 0)
return tLocal;
return std::make_shared<LocalStore>(params);
else if (pathExists(settings.nixDaemonSocketFile))
return tDaemon;
return std::make_shared<UDSRemoteStore>(params);
else
return tLocal;
return std::make_shared<LocalStore>(params);
} else if (uri == "daemon") {
return std::make_shared<UDSRemoteStore>(params);
} else if (uri == "local") {
return std::make_shared<LocalStore>(params);
} else if (isNonUriPath(uri)) {
Store::Params params2 = params;
params2["root"] = absPath(uri);
return std::make_shared<LocalStore>(params2);
} else {
return tOther;
return nullptr;
}
}
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
ref<Store> openStore(const std::string & uri_,
const Store::Params & extraParams)
{
switch (getStoreType(uri, get(params, "state").value_or(settings.nixStateDir))) {
case tDaemon:
return std::shared_ptr<Store>(std::make_shared<UDSRemoteStore>(params));
case tLocal: {
Store::Params params2 = params;
if (isNonUriPath(uri)) {
params2["root"] = absPath(uri);
}
return std::shared_ptr<Store>(std::make_shared<LocalStore>(params2));
}
default:
return nullptr;
}
});
auto params = extraParams;
try {
auto parsedUri = parseURL(uri_);
params.insert(parsedUri.query.begin(), parsedUri.query.end());
auto baseURI = parsedUri.authority.value_or("") + parsedUri.path;
for (auto implem : *Implementations::registered) {
if (implem.uriSchemes.count(parsedUri.scheme)) {
auto store = implem.create(parsedUri.scheme, baseURI, params);
if (store) {
store->init();
store->warnUnknownSettings();
return ref<Store>(store);
}
}
}
}
catch (BadURL &) {
auto [uri, uriParams] = splitUriAndParams(uri_);
params.insert(uriParams.begin(), uriParams.end());
if (auto store = openFromNonUri(uri, params)) {
store->warnUnknownSettings();
return ref<Store>(store);
}
}
throw Error("don't know how to open Nix store '%s'", uri_);
}
std::list<ref<Store>> getDefaultSubstituters()
{
@ -1106,5 +1127,6 @@ std::list<ref<Store>> getDefaultSubstituters()
return stores;
}
std::vector<StoreFactory> * Implementations::registered = 0;
}

View file

@ -24,6 +24,31 @@
namespace nix {
/**
* About the class hierarchy of the store implementations:
*
* Each store type `Foo` consists of two classes:
*
* 1. A class `FooConfig : virtual StoreConfig` that contains the configuration
* for the store
*
* It should only contain members of type `const Setting<T>` (or subclasses
* of it) and inherit the constructors of `StoreConfig`
* (`using StoreConfig::StoreConfig`).
*
* 2. A class `Foo : virtual Store, virtual FooConfig` that contains the
* implementation of the store.
*
* This class is expected to have a constructor `Foo(const Params & params)`
* that calls `StoreConfig(params)` (otherwise you're gonna encounter an
* `assertion failure` when trying to instantiate it).
*
* You can then register the new store using:
*
* ```
* cpp static RegisterStoreImplementation<Foo, FooConfig> regStore;
* ```
*/
MakeError(SubstError, Error);
MakeError(BuildError, Error); // denotes a permanent build failure
@ -33,6 +58,7 @@ MakeError(SubstituteGone, Error);
MakeError(SubstituterDisabled, Error);
MakeError(BadStorePath, Error);
MakeError(InvalidStoreURI, Error);
class FSAccessor;
class NarInfoDiskCache;
@ -144,12 +170,31 @@ struct BuildResult
}
};
class Store : public std::enable_shared_from_this<Store>, public Config
struct StoreConfig : public Config
{
public:
using Config::Config;
typedef std::map<std::string, std::string> Params;
/**
* When constructing a store implementation, we pass in a map `params` of
* parameters that's supposed to initialize the associated config.
* To do that, we must use the `StoreConfig(StringMap & params)`
* constructor, so we'd like to `delete` its default constructor to enforce
* it.
*
* However, actually deleting it means that all the subclasses of
* `StoreConfig` will have their default constructor deleted (because it's
* supposed to call the deleted default constructor of `StoreConfig`). But
* because we're always using virtual inheritance, the constructors of
* child classes will never implicitely call this one, so deleting it will
* be more painful than anything else.
*
* So we `assert(false)` here to ensure at runtime that the right
* constructor is always called without having to redefine a custom
* constructor for each `*Config` class.
*/
StoreConfig() { assert(false); }
virtual const std::string name() = 0;
const PathSetting storeDir_{this, false, settings.nixStore,
"store", "path to the Nix store"};
@ -167,6 +212,14 @@ public:
"system-features",
"Optional features that the system this store builds on implements (like \"kvm\")."};
};
class Store : public std::enable_shared_from_this<Store>, public virtual StoreConfig
{
public:
typedef std::map<std::string, std::string> Params;
protected:
struct PathInfoCacheValue {
@ -200,6 +253,11 @@ protected:
Store(const Params & params);
public:
/**
* Perform any necessary effectful operation to make the store up and
* running
*/
virtual void init() {};
virtual ~Store() { }
@ -247,10 +305,12 @@ public:
StorePathWithOutputs followLinksToStorePathWithOutputs(std::string_view path) const;
/* Constructs a unique store path name. */
StorePath makeStorePath(const string & type,
StorePath makeStorePath(std::string_view type,
std::string_view hash, std::string_view name) const;
StorePath makeStorePath(std::string_view type,
const Hash & hash, std::string_view name) const;
StorePath makeOutputPath(const string & id,
StorePath makeOutputPath(std::string_view id,
const Hash & hash, std::string_view name) const;
StorePath makeFixedOutputPath(FileIngestionMethod method,
@ -624,22 +684,25 @@ protected:
};
class LocalFSStore : public virtual Store
struct LocalFSStoreConfig : virtual StoreConfig
{
public:
// FIXME: the (Store*) cast works around a bug in gcc that causes
using StoreConfig::StoreConfig;
// FIXME: the (StoreConfig*) cast works around a bug in gcc that causes
// it to omit the call to the Setting constructor. Clang works fine
// either way.
const PathSetting rootDir{(Store*) this, true, "",
const PathSetting rootDir{(StoreConfig*) this, true, "",
"root", "directory prefixed to all other paths"};
const PathSetting stateDir{(Store*) this, false,
const PathSetting stateDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir,
"state", "directory where Nix will store state"};
const PathSetting logDir{(Store*) this, false,
const PathSetting logDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
"log", "directory where Nix will store state"};
};
class LocalFSStore : public virtual Store, public virtual LocalFSStoreConfig
{
public:
const static string drvsLogDir;
@ -649,8 +712,7 @@ public:
ref<FSAccessor> getFSAccessor() override;
/* Register a permanent GC root. */
Path addPermRoot(const StorePath & storePath,
const Path & gcRoot, bool indirect, bool allowOutsideRootsDir = false);
Path addPermRoot(const StorePath & storePath, const Path & gcRoot);
virtual Path getRealStoreDir() { return storeDir; }
@ -729,37 +791,47 @@ ref<Store> openStore(const std::string & uri = settings.storeUri.get(),
const Store::Params & extraParams = Store::Params());
enum StoreType {
tDaemon,
tLocal,
tOther
};
StoreType getStoreType(const std::string & uri = settings.storeUri.get(),
const std::string & stateDir = settings.nixStateDir);
/* Return the default substituter stores, defined by the
substituters option and various legacy options. */
std::list<ref<Store>> getDefaultSubstituters();
/* Store implementation registration. */
typedef std::function<std::shared_ptr<Store>(
const std::string & uri, const Store::Params & params)> OpenStore;
struct RegisterStoreImplementation
struct StoreFactory
{
typedef std::vector<OpenStore> Implementations;
static Implementations * implementations;
std::set<std::string> uriSchemes;
std::function<std::shared_ptr<Store> (const std::string & scheme, const std::string & uri, const Store::Params & params)> create;
std::function<std::shared_ptr<StoreConfig> ()> getConfig;
};
struct Implementations
{
static std::vector<StoreFactory> * registered;
RegisterStoreImplementation(OpenStore fun)
template<typename T, typename TConfig>
static void add()
{
if (!implementations) implementations = new Implementations;
implementations->push_back(fun);
if (!registered) registered = new std::vector<StoreFactory>();
StoreFactory factory{
.uriSchemes = T::uriSchemes(),
.create =
([](const std::string & scheme, const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{ return std::make_shared<T>(scheme, uri, params); }),
.getConfig =
([]()
-> std::shared_ptr<StoreConfig>
{ return std::make_shared<TConfig>(StringMap({})); })
};
registered->push_back(factory);
}
};
template<typename T, typename TConfig>
struct RegisterStoreImplementation
{
RegisterStoreImplementation()
{
Implementations::add<T, TConfig>();
}
};
/* Display a set of paths in human-readable form (i.e., between quotes