mirror of
https://github.com/NixOS/nix.git
synced 2025-12-21 16:31:07 +01:00
Move /src to /subprojects
This will facilitate breaking up Nix into multiple packages for each component with Meson.
This commit is contained in:
parent
4db9487823
commit
84e2963f8e
737 changed files with 504 additions and 505 deletions
1
subprojects/libstore/.version
Symbolic link
1
subprojects/libstore/.version
Symbolic link
|
|
@ -0,0 +1 @@
|
|||
../../.version
|
||||
551
subprojects/libstore/binary-cache-store.cc
Normal file
551
subprojects/libstore/binary-cache-store.cc
Normal file
|
|
@ -0,0 +1,551 @@
|
|||
#include "archive.hh"
|
||||
#include "binary-cache-store.hh"
|
||||
#include "compression.hh"
|
||||
#include "derivations.hh"
|
||||
#include "source-accessor.hh"
|
||||
#include "globals.hh"
|
||||
#include "nar-info.hh"
|
||||
#include "sync.hh"
|
||||
#include "remote-fs-accessor.hh"
|
||||
#include "nar-info-disk-cache.hh"
|
||||
#include "nar-accessor.hh"
|
||||
#include "thread-pool.hh"
|
||||
#include "callback.hh"
|
||||
#include "signals.hh"
|
||||
#include "archive.hh"
|
||||
|
||||
#include <chrono>
|
||||
#include <future>
|
||||
#include <regex>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
BinaryCacheStore::BinaryCacheStore(const Params & params)
|
||||
: BinaryCacheStoreConfig(params)
|
||||
, Store(params)
|
||||
{
|
||||
if (secretKeyFile != "")
|
||||
signer = std::make_unique<LocalSigner>(
|
||||
SecretKey { readFile(secretKeyFile) });
|
||||
|
||||
StringSink sink;
|
||||
sink << narVersionMagic1;
|
||||
narMagic = sink.s;
|
||||
}
|
||||
|
||||
void BinaryCacheStore::init()
|
||||
{
|
||||
std::string cacheInfoFile = "nix-cache-info";
|
||||
|
||||
auto cacheInfo = getFile(cacheInfoFile);
|
||||
if (!cacheInfo) {
|
||||
upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info");
|
||||
} else {
|
||||
for (auto & line : tokenizeString<Strings>(*cacheInfo, "\n")) {
|
||||
size_t colon= line.find(':');
|
||||
if (colon ==std::string::npos) continue;
|
||||
auto name = line.substr(0, colon);
|
||||
auto value = trim(line.substr(colon + 1, std::string::npos));
|
||||
if (name == "StoreDir") {
|
||||
if (value != storeDir)
|
||||
throw Error("binary cache '%s' is for Nix stores with prefix '%s', not '%s'",
|
||||
getUri(), value, storeDir);
|
||||
} else if (name == "WantMassQuery") {
|
||||
wantMassQuery.setDefault(value == "1");
|
||||
} else if (name == "Priority") {
|
||||
priority.setDefault(std::stoi(value));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BinaryCacheStore::upsertFile(const std::string & path,
|
||||
std::string && data,
|
||||
const std::string & mimeType)
|
||||
{
|
||||
upsertFile(path, std::make_shared<std::stringstream>(std::move(data)), mimeType);
|
||||
}
|
||||
|
||||
void BinaryCacheStore::getFile(const std::string & path,
|
||||
Callback<std::optional<std::string>> callback) noexcept
|
||||
{
|
||||
try {
|
||||
callback(getFile(path));
|
||||
} catch (...) { callback.rethrow(); }
|
||||
}
|
||||
|
||||
void BinaryCacheStore::getFile(const std::string & path, Sink & sink)
|
||||
{
|
||||
std::promise<std::optional<std::string>> promise;
|
||||
getFile(path,
|
||||
{[&](std::future<std::optional<std::string>> result) {
|
||||
try {
|
||||
promise.set_value(result.get());
|
||||
} catch (...) {
|
||||
promise.set_exception(std::current_exception());
|
||||
}
|
||||
}});
|
||||
sink(*promise.get_future().get());
|
||||
}
|
||||
|
||||
std::optional<std::string> BinaryCacheStore::getFile(const std::string & path)
|
||||
{
|
||||
StringSink sink;
|
||||
try {
|
||||
getFile(path, sink);
|
||||
} catch (NoSuchBinaryCacheFile &) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return std::move(sink.s);
|
||||
}
|
||||
|
||||
std::string BinaryCacheStore::narInfoFileFor(const StorePath & storePath)
|
||||
{
|
||||
return std::string(storePath.hashPart()) + ".narinfo";
|
||||
}
|
||||
|
||||
void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo)
|
||||
{
|
||||
auto narInfoFile = narInfoFileFor(narInfo->path);
|
||||
|
||||
upsertFile(narInfoFile, narInfo->to_string(*this), "text/x-nix-narinfo");
|
||||
|
||||
{
|
||||
auto state_(state.lock());
|
||||
state_->pathInfoCache.upsert(
|
||||
std::string(narInfo->path.to_string()),
|
||||
PathInfoCacheValue { .value = std::shared_ptr<NarInfo>(narInfo) });
|
||||
}
|
||||
|
||||
if (diskCache)
|
||||
diskCache->upsertNarInfo(getUri(), std::string(narInfo->path.hashPart()), std::shared_ptr<NarInfo>(narInfo));
|
||||
}
|
||||
|
||||
ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
|
||||
Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::function<ValidPathInfo(HashResult)> mkInfo)
|
||||
{
|
||||
auto [fdTemp, fnTemp] = createTempFile();
|
||||
|
||||
AutoDelete autoDelete(fnTemp);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Read the NAR simultaneously into a CompressionSink+FileSink (to
|
||||
write the compressed NAR to disk), into a HashSink (to get the
|
||||
NAR hash), and into a NarAccessor (to get the NAR listing). */
|
||||
HashSink fileHashSink { HashAlgorithm::SHA256 };
|
||||
std::shared_ptr<SourceAccessor> narAccessor;
|
||||
HashSink narHashSink { HashAlgorithm::SHA256 };
|
||||
{
|
||||
FdSink fileSink(fdTemp.get());
|
||||
TeeSink teeSinkCompressed { fileSink, fileHashSink };
|
||||
auto compressionSink = makeCompressionSink(compression, teeSinkCompressed, parallelCompression, compressionLevel);
|
||||
TeeSink teeSinkUncompressed { *compressionSink, narHashSink };
|
||||
TeeSource teeSource { narSource, teeSinkUncompressed };
|
||||
narAccessor = makeNarAccessor(teeSource);
|
||||
compressionSink->finish();
|
||||
fileSink.flush();
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
auto info = mkInfo(narHashSink.finish());
|
||||
auto narInfo = make_ref<NarInfo>(info);
|
||||
narInfo->compression = compression;
|
||||
auto [fileHash, fileSize] = fileHashSink.finish();
|
||||
narInfo->fileHash = fileHash;
|
||||
narInfo->fileSize = fileSize;
|
||||
narInfo->url = "nar/" + narInfo->fileHash->to_string(HashFormat::Nix32, false) + ".nar"
|
||||
+ (compression == "xz" ? ".xz" :
|
||||
compression == "bzip2" ? ".bz2" :
|
||||
compression == "zstd" ? ".zst" :
|
||||
compression == "lzip" ? ".lzip" :
|
||||
compression == "lz4" ? ".lz4" :
|
||||
compression == "br" ? ".br" :
|
||||
"");
|
||||
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
printMsg(lvlTalkative, "copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache",
|
||||
printStorePath(narInfo->path), info.narSize,
|
||||
((1.0 - (double) fileSize / info.narSize) * 100.0),
|
||||
duration);
|
||||
|
||||
/* Verify that all references are valid. This may do some .narinfo
|
||||
reads, but typically they'll already be cached. */
|
||||
for (auto & ref : info.references)
|
||||
try {
|
||||
if (ref != info.path)
|
||||
queryPathInfo(ref);
|
||||
} catch (InvalidPath &) {
|
||||
throw Error("cannot add '%s' to the binary cache because the reference '%s' is not valid",
|
||||
printStorePath(info.path), printStorePath(ref));
|
||||
}
|
||||
|
||||
/* Optionally write a JSON file containing a listing of the
|
||||
contents of the NAR. */
|
||||
if (writeNARListing) {
|
||||
nlohmann::json j = {
|
||||
{"version", 1},
|
||||
{"root", listNar(ref<SourceAccessor>(narAccessor), CanonPath::root, true)},
|
||||
};
|
||||
|
||||
upsertFile(std::string(info.path.hashPart()) + ".ls", j.dump(), "application/json");
|
||||
}
|
||||
|
||||
/* Optionally maintain an index of DWARF debug info files
|
||||
consisting of JSON files named 'debuginfo/<build-id>' that
|
||||
specify the NAR file and member containing the debug info. */
|
||||
if (writeDebugInfo) {
|
||||
|
||||
CanonPath buildIdDir("lib/debug/.build-id");
|
||||
|
||||
if (auto st = narAccessor->maybeLstat(buildIdDir); st && st->type == SourceAccessor::tDirectory) {
|
||||
|
||||
ThreadPool threadPool(25);
|
||||
|
||||
auto doFile = [&](std::string member, std::string key, std::string target) {
|
||||
checkInterrupt();
|
||||
|
||||
nlohmann::json json;
|
||||
json["archive"] = target;
|
||||
json["member"] = member;
|
||||
|
||||
// FIXME: or should we overwrite? The previous link may point
|
||||
// to a GC'ed file, so overwriting might be useful...
|
||||
if (fileExists(key)) return;
|
||||
|
||||
printMsg(lvlTalkative, "creating debuginfo link from '%s' to '%s'", key, target);
|
||||
|
||||
upsertFile(key, json.dump(), "application/json");
|
||||
};
|
||||
|
||||
std::regex regex1("^[0-9a-f]{2}$");
|
||||
std::regex regex2("^[0-9a-f]{38}\\.debug$");
|
||||
|
||||
for (auto & [s1, _type] : narAccessor->readDirectory(buildIdDir)) {
|
||||
auto dir = buildIdDir / s1;
|
||||
|
||||
if (narAccessor->lstat(dir).type != SourceAccessor::tDirectory
|
||||
|| !std::regex_match(s1, regex1))
|
||||
continue;
|
||||
|
||||
for (auto & [s2, _type] : narAccessor->readDirectory(dir)) {
|
||||
auto debugPath = dir / s2;
|
||||
|
||||
if (narAccessor->lstat(debugPath).type != SourceAccessor::tRegular
|
||||
|| !std::regex_match(s2, regex2))
|
||||
continue;
|
||||
|
||||
auto buildId = s1 + s2;
|
||||
|
||||
std::string key = "debuginfo/" + buildId;
|
||||
std::string target = "../" + narInfo->url;
|
||||
|
||||
threadPool.enqueue(std::bind(doFile, std::string(debugPath.rel()), key, target));
|
||||
}
|
||||
}
|
||||
|
||||
threadPool.process();
|
||||
}
|
||||
}
|
||||
|
||||
/* Atomically write the NAR file. */
|
||||
if (repair || !fileExists(narInfo->url)) {
|
||||
stats.narWrite++;
|
||||
upsertFile(narInfo->url,
|
||||
std::make_shared<std::fstream>(fnTemp, std::ios_base::in | std::ios_base::binary),
|
||||
"application/x-nix-nar");
|
||||
} else
|
||||
stats.narWriteAverted++;
|
||||
|
||||
stats.narWriteBytes += info.narSize;
|
||||
stats.narWriteCompressedBytes += fileSize;
|
||||
stats.narWriteCompressionTimeMs += duration;
|
||||
|
||||
/* Atomically write the NAR info file.*/
|
||||
if (signer) narInfo->sign(*this, *signer);
|
||||
|
||||
writeNarInfo(narInfo);
|
||||
|
||||
stats.narInfoWrite++;
|
||||
|
||||
return narInfo;
|
||||
}
|
||||
|
||||
void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs)
|
||||
{
|
||||
if (!repair && isValidPath(info.path)) {
|
||||
// FIXME: copyNAR -> null sink
|
||||
narSource.drain();
|
||||
return;
|
||||
}
|
||||
|
||||
addToStoreCommon(narSource, repair, checkSigs, {[&](HashResult nar) {
|
||||
/* FIXME reinstate these, once we can correctly do hash modulo sink as
|
||||
needed. We need to throw here in case we uploaded a corrupted store path. */
|
||||
// assert(info.narHash == nar.first);
|
||||
// assert(info.narSize == nar.second);
|
||||
return info;
|
||||
}});
|
||||
}
|
||||
|
||||
StorePath BinaryCacheStore::addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
FileSerialisationMethod dumpMethod,
|
||||
ContentAddressMethod hashMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair)
|
||||
{
|
||||
std::optional<Hash> caHash;
|
||||
std::string nar;
|
||||
|
||||
// Calculating Git hash from NAR stream not yet implemented. May not
|
||||
// be possible to implement in single-pass if the NAR is in an
|
||||
// inconvenient order. Could fetch after uploading, however.
|
||||
if (hashMethod.getFileIngestionMethod() == FileIngestionMethod::Git)
|
||||
unsupported("addToStoreFromDump");
|
||||
|
||||
if (auto * dump2p = dynamic_cast<StringSource *>(&dump)) {
|
||||
auto & dump2 = *dump2p;
|
||||
// Hack, this gives us a "replayable" source so we can compute
|
||||
// multiple hashes more easily.
|
||||
//
|
||||
// Only calculate if the dump is in the right format, however.
|
||||
if (static_cast<FileIngestionMethod>(dumpMethod) == hashMethod.getFileIngestionMethod())
|
||||
caHash = hashString(HashAlgorithm::SHA256, dump2.s);
|
||||
switch (dumpMethod) {
|
||||
case FileSerialisationMethod::NixArchive:
|
||||
// The dump is already NAR in this case, just use it.
|
||||
nar = dump2.s;
|
||||
break;
|
||||
case FileSerialisationMethod::Flat:
|
||||
{
|
||||
// The dump is Flat, so we need to convert it to NAR with a
|
||||
// single file.
|
||||
StringSink s;
|
||||
dumpString(dump2.s, s);
|
||||
nar = std::move(s.s);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Otherwise, we have to do th same hashing as NAR so our single
|
||||
// hash will suffice for both purposes.
|
||||
if (dumpMethod != FileSerialisationMethod::NixArchive || hashAlgo != HashAlgorithm::SHA256)
|
||||
unsupported("addToStoreFromDump");
|
||||
}
|
||||
StringSource narDump { nar };
|
||||
|
||||
// Use `narDump` if we wrote to `nar`.
|
||||
Source & narDump2 = nar.size() > 0
|
||||
? static_cast<Source &>(narDump)
|
||||
: dump;
|
||||
|
||||
return addToStoreCommon(narDump2, repair, CheckSigs, [&](HashResult nar) {
|
||||
ValidPathInfo info {
|
||||
*this,
|
||||
name,
|
||||
ContentAddressWithReferences::fromParts(
|
||||
hashMethod,
|
||||
caHash ? *caHash : nar.first,
|
||||
{
|
||||
.others = references,
|
||||
// caller is not capable of creating a self-reference, because this is content-addressed without modulus
|
||||
.self = false,
|
||||
}),
|
||||
nar.first,
|
||||
};
|
||||
info.narSize = nar.second;
|
||||
return info;
|
||||
})->path;
|
||||
}
|
||||
|
||||
bool BinaryCacheStore::isValidPathUncached(const StorePath & storePath)
|
||||
{
|
||||
// FIXME: this only checks whether a .narinfo with a matching hash
|
||||
// part exists. So ‘f4kb...-foo’ matches ‘f4kb...-bar’, even
|
||||
// though they shouldn't. Not easily fixed.
|
||||
return fileExists(narInfoFileFor(storePath));
|
||||
}
|
||||
|
||||
std::optional<StorePath> BinaryCacheStore::queryPathFromHashPart(const std::string & hashPart)
|
||||
{
|
||||
auto pseudoPath = StorePath(hashPart + "-" + MissingName);
|
||||
try {
|
||||
auto info = queryPathInfo(pseudoPath);
|
||||
return info->path;
|
||||
} catch (InvalidPath &) {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
|
||||
{
|
||||
auto info = queryPathInfo(storePath).cast<const NarInfo>();
|
||||
|
||||
LengthSink narSize;
|
||||
TeeSink tee { sink, narSize };
|
||||
|
||||
auto decompressor = makeDecompressionSink(info->compression, tee);
|
||||
|
||||
try {
|
||||
getFile(info->url, *decompressor);
|
||||
} catch (NoSuchBinaryCacheFile & e) {
|
||||
throw SubstituteGone(std::move(e.info()));
|
||||
}
|
||||
|
||||
decompressor->finish();
|
||||
|
||||
stats.narRead++;
|
||||
//stats.narReadCompressedBytes += nar->size(); // FIXME
|
||||
stats.narReadBytes += narSize.length;
|
||||
}
|
||||
|
||||
void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
|
||||
{
|
||||
auto uri = getUri();
|
||||
auto storePathS = printStorePath(storePath);
|
||||
auto act = std::make_shared<Activity>(*logger, lvlTalkative, actQueryPathInfo,
|
||||
fmt("querying info about '%s' on '%s'", storePathS, uri), Logger::Fields{storePathS, uri});
|
||||
PushActivity pact(act->id);
|
||||
|
||||
auto narInfoFile = narInfoFileFor(storePath);
|
||||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
getFile(narInfoFile,
|
||||
{[=,this](std::future<std::optional<std::string>> fut) {
|
||||
try {
|
||||
auto data = fut.get();
|
||||
|
||||
if (!data) return (*callbackPtr)({});
|
||||
|
||||
stats.narInfoRead++;
|
||||
|
||||
(*callbackPtr)((std::shared_ptr<ValidPathInfo>)
|
||||
std::make_shared<NarInfo>(*this, *data, narInfoFile));
|
||||
|
||||
(void) act; // force Activity into this lambda to ensure it stays alive
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
}
|
||||
|
||||
StorePath BinaryCacheStore::addToStore(
|
||||
std::string_view name,
|
||||
const SourcePath & path,
|
||||
ContentAddressMethod method,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
PathFilter & filter,
|
||||
RepairFlag repair)
|
||||
{
|
||||
/* FIXME: Make BinaryCacheStore::addToStoreCommon support
|
||||
non-recursive+sha256 so we can just use the default
|
||||
implementation of this method in terms of addToStoreFromDump. */
|
||||
|
||||
auto h = hashPath(path, method.getFileIngestionMethod(), hashAlgo, filter).first;
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
path.dumpPath(sink, filter);
|
||||
});
|
||||
return addToStoreCommon(*source, repair, CheckSigs, [&](HashResult nar) {
|
||||
ValidPathInfo info {
|
||||
*this,
|
||||
name,
|
||||
ContentAddressWithReferences::fromParts(
|
||||
method,
|
||||
h,
|
||||
{
|
||||
.others = references,
|
||||
// caller is not capable of creating a self-reference, because this is content-addressed without modulus
|
||||
.self = false,
|
||||
}),
|
||||
nar.first,
|
||||
};
|
||||
info.narSize = nar.second;
|
||||
return info;
|
||||
})->path;
|
||||
}
|
||||
|
||||
void BinaryCacheStore::queryRealisationUncached(const DrvOutput & id,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept
|
||||
{
|
||||
auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi";
|
||||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
Callback<std::optional<std::string>> newCallback = {
|
||||
[=](std::future<std::optional<std::string>> fut) {
|
||||
try {
|
||||
auto data = fut.get();
|
||||
if (!data) return (*callbackPtr)({});
|
||||
|
||||
auto realisation = Realisation::fromJSON(
|
||||
nlohmann::json::parse(*data), outputInfoFilePath);
|
||||
return (*callbackPtr)(std::make_shared<const Realisation>(realisation));
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
getFile(outputInfoFilePath, std::move(newCallback));
|
||||
}
|
||||
|
||||
void BinaryCacheStore::registerDrvOutput(const Realisation& info) {
|
||||
if (diskCache)
|
||||
diskCache->upsertRealisation(getUri(), info);
|
||||
auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi";
|
||||
upsertFile(filePath, info.toJSON().dump(), "application/json");
|
||||
}
|
||||
|
||||
ref<SourceAccessor> BinaryCacheStore::getFSAccessor(bool requireValidPath)
|
||||
{
|
||||
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()), requireValidPath, localNarCache);
|
||||
}
|
||||
|
||||
void BinaryCacheStore::addSignatures(const StorePath & storePath, const StringSet & sigs)
|
||||
{
|
||||
/* Note: this is inherently racy since there is no locking on
|
||||
binary caches. In particular, with S3 this unreliable, even
|
||||
when addSignatures() is called sequentially on a path, because
|
||||
S3 might return an outdated cached version. */
|
||||
|
||||
auto narInfo = make_ref<NarInfo>((NarInfo &) *queryPathInfo(storePath));
|
||||
|
||||
narInfo->sigs.insert(sigs.begin(), sigs.end());
|
||||
|
||||
writeNarInfo(narInfo);
|
||||
}
|
||||
|
||||
std::optional<std::string> BinaryCacheStore::getBuildLogExact(const StorePath & path)
|
||||
{
|
||||
auto logPath = "log/" + std::string(baseNameOf(printStorePath(path)));
|
||||
|
||||
debug("fetching build log from binary cache '%s/%s'", getUri(), logPath);
|
||||
|
||||
return getFile(logPath);
|
||||
}
|
||||
|
||||
void BinaryCacheStore::addBuildLog(const StorePath & drvPath, std::string_view log)
|
||||
{
|
||||
assert(drvPath.isDerivation());
|
||||
|
||||
upsertFile(
|
||||
"log/" + std::string(drvPath.to_string()),
|
||||
(std::string) log, // FIXME: don't copy
|
||||
"text/plain; charset=utf-8");
|
||||
}
|
||||
|
||||
}
|
||||
162
subprojects/libstore/binary-cache-store.hh
Normal file
162
subprojects/libstore/binary-cache-store.hh
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "signature/local-keys.hh"
|
||||
#include "store-api.hh"
|
||||
#include "log-store.hh"
|
||||
|
||||
#include "pool.hh"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct NarInfo;
|
||||
|
||||
struct BinaryCacheStoreConfig : virtual StoreConfig
|
||||
{
|
||||
using StoreConfig::StoreConfig;
|
||||
|
||||
const Setting<std::string> compression{this, "xz", "compression",
|
||||
"NAR compression method (`xz`, `bzip2`, `gzip`, `zstd`, or `none`)."};
|
||||
|
||||
const Setting<bool> writeNARListing{this, false, "write-nar-listing",
|
||||
"Whether to write a JSON file that lists the files in each NAR."};
|
||||
|
||||
const Setting<bool> writeDebugInfo{this, false, "index-debug-info",
|
||||
R"(
|
||||
Whether to index DWARF debug info files by build ID. This allows [`dwarffs`](https://github.com/edolstra/dwarffs) to
|
||||
fetch debug info on demand
|
||||
)"};
|
||||
|
||||
const Setting<Path> secretKeyFile{this, "", "secret-key",
|
||||
"Path to the secret key used to sign the binary cache."};
|
||||
|
||||
const Setting<Path> localNarCache{this, "", "local-nar-cache",
|
||||
"Path to a local cache of NARs fetched from this binary cache, used by commands such as `nix store cat`."};
|
||||
|
||||
const Setting<bool> parallelCompression{this, false, "parallel-compression",
|
||||
"Enable multi-threaded compression of NARs. This is currently only available for `xz` and `zstd`."};
|
||||
|
||||
const Setting<int> compressionLevel{this, -1, "compression-level",
|
||||
R"(
|
||||
The *preset level* to be used when compressing NARs.
|
||||
The meaning and accepted values depend on the compression method selected.
|
||||
`-1` specifies that the default compression level should be used.
|
||||
)"};
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* @note subclasses must implement at least one of the two
|
||||
* virtual getFile() methods.
|
||||
*/
|
||||
class BinaryCacheStore : public virtual BinaryCacheStoreConfig,
|
||||
public virtual Store,
|
||||
public virtual LogStore
|
||||
{
|
||||
|
||||
private:
|
||||
std::unique_ptr<Signer> signer;
|
||||
|
||||
protected:
|
||||
|
||||
// The prefix under which realisation infos will be stored
|
||||
const std::string realisationsPrefix = "realisations";
|
||||
|
||||
BinaryCacheStore(const Params & params);
|
||||
|
||||
public:
|
||||
|
||||
virtual bool fileExists(const std::string & path) = 0;
|
||||
|
||||
virtual void upsertFile(const std::string & path,
|
||||
std::shared_ptr<std::basic_iostream<char>> istream,
|
||||
const std::string & mimeType) = 0;
|
||||
|
||||
void upsertFile(const std::string & path,
|
||||
// FIXME: use std::string_view
|
||||
std::string && data,
|
||||
const std::string & mimeType);
|
||||
|
||||
/**
|
||||
* Dump the contents of the specified file to a sink.
|
||||
*/
|
||||
virtual void getFile(const std::string & path, Sink & sink);
|
||||
|
||||
/**
|
||||
* Fetch the specified file and call the specified callback with
|
||||
* the result. A subclass may implement this asynchronously.
|
||||
*/
|
||||
virtual void getFile(
|
||||
const std::string & path,
|
||||
Callback<std::optional<std::string>> callback) noexcept;
|
||||
|
||||
std::optional<std::string> getFile(const std::string & path);
|
||||
|
||||
public:
|
||||
|
||||
virtual void init() override;
|
||||
|
||||
private:
|
||||
|
||||
std::string narMagic;
|
||||
|
||||
std::string narInfoFileFor(const StorePath & storePath);
|
||||
|
||||
void writeNarInfo(ref<NarInfo> narInfo);
|
||||
|
||||
ref<const ValidPathInfo> addToStoreCommon(
|
||||
Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::function<ValidPathInfo(HashResult)> mkInfo);
|
||||
|
||||
public:
|
||||
|
||||
bool isValidPathUncached(const StorePath & path) override;
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
FileSerialisationMethod dumpMethod,
|
||||
ContentAddressMethod hashMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair) override;
|
||||
|
||||
StorePath addToStore(
|
||||
std::string_view name,
|
||||
const SourcePath & path,
|
||||
ContentAddressMethod method,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
PathFilter & filter,
|
||||
RepairFlag repair) override;
|
||||
|
||||
void registerDrvOutput(const Realisation & info) override;
|
||||
|
||||
void queryRealisationUncached(const DrvOutput &,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override;
|
||||
|
||||
ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override;
|
||||
|
||||
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
|
||||
|
||||
std::optional<std::string> getBuildLogExact(const StorePath & path) override;
|
||||
|
||||
void addBuildLog(const StorePath & drvPath, std::string_view log) override;
|
||||
|
||||
};
|
||||
|
||||
MakeError(NoSuchBinaryCacheFile, Error);
|
||||
|
||||
}
|
||||
8
subprojects/libstore/build-result.cc
Normal file
8
subprojects/libstore/build-result.cc
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
#include "build-result.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
bool BuildResult::operator==(const BuildResult &) const noexcept = default;
|
||||
std::strong_ordering BuildResult::operator<=>(const BuildResult &) const noexcept = default;
|
||||
|
||||
}
|
||||
133
subprojects/libstore/build-result.hh
Normal file
133
subprojects/libstore/build-result.hh
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "realisation.hh"
|
||||
#include "derived-path.hh"
|
||||
|
||||
#include <string>
|
||||
#include <chrono>
|
||||
#include <optional>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct BuildResult
|
||||
{
|
||||
/**
|
||||
* @note This is directly used in the nix-store --serve protocol.
|
||||
* That means we need to worry about compatability across versions.
|
||||
* Therefore, don't remove status codes, and only add new status
|
||||
* codes at the end of the list.
|
||||
*/
|
||||
enum Status {
|
||||
Built = 0,
|
||||
Substituted,
|
||||
AlreadyValid,
|
||||
PermanentFailure,
|
||||
InputRejected,
|
||||
OutputRejected,
|
||||
/// possibly transient
|
||||
TransientFailure,
|
||||
/// no longer used
|
||||
CachedFailure,
|
||||
TimedOut,
|
||||
MiscFailure,
|
||||
DependencyFailed,
|
||||
LogLimitExceeded,
|
||||
NotDeterministic,
|
||||
ResolvesToAlreadyValid,
|
||||
NoSubstituters,
|
||||
} status = MiscFailure;
|
||||
|
||||
/**
|
||||
* Information about the error if the build failed.
|
||||
*
|
||||
* @todo This should be an entire ErrorInfo object, not just a
|
||||
* string, for richer information.
|
||||
*/
|
||||
std::string errorMsg;
|
||||
|
||||
std::string toString() const {
|
||||
auto strStatus = [&]() {
|
||||
switch (status) {
|
||||
case Built: return "Built";
|
||||
case Substituted: return "Substituted";
|
||||
case AlreadyValid: return "AlreadyValid";
|
||||
case PermanentFailure: return "PermanentFailure";
|
||||
case InputRejected: return "InputRejected";
|
||||
case OutputRejected: return "OutputRejected";
|
||||
case TransientFailure: return "TransientFailure";
|
||||
case CachedFailure: return "CachedFailure";
|
||||
case TimedOut: return "TimedOut";
|
||||
case MiscFailure: return "MiscFailure";
|
||||
case DependencyFailed: return "DependencyFailed";
|
||||
case LogLimitExceeded: return "LogLimitExceeded";
|
||||
case NotDeterministic: return "NotDeterministic";
|
||||
case ResolvesToAlreadyValid: return "ResolvesToAlreadyValid";
|
||||
case NoSubstituters: return "NoSubstituters";
|
||||
default: return "Unknown";
|
||||
};
|
||||
}();
|
||||
return strStatus + ((errorMsg == "") ? "" : " : " + errorMsg);
|
||||
}
|
||||
|
||||
/**
|
||||
* How many times this build was performed.
|
||||
*/
|
||||
unsigned int timesBuilt = 0;
|
||||
|
||||
/**
|
||||
* If timesBuilt > 1, whether some builds did not produce the same
|
||||
* result. (Note that 'isNonDeterministic = false' does not mean
|
||||
* the build is deterministic, just that we don't have evidence of
|
||||
* non-determinism.)
|
||||
*/
|
||||
bool isNonDeterministic = false;
|
||||
|
||||
/**
|
||||
* For derivations, a mapping from the names of the wanted outputs
|
||||
* to actual paths.
|
||||
*/
|
||||
SingleDrvOutputs builtOutputs;
|
||||
|
||||
/**
|
||||
* The start/stop times of the build (or one of the rounds, if it
|
||||
* was repeated).
|
||||
*/
|
||||
time_t startTime = 0, stopTime = 0;
|
||||
|
||||
/**
|
||||
* User and system CPU time the build took.
|
||||
*/
|
||||
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
|
||||
|
||||
bool operator ==(const BuildResult &) const noexcept;
|
||||
std::strong_ordering operator <=>(const BuildResult &) const noexcept;
|
||||
|
||||
bool success()
|
||||
{
|
||||
return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid;
|
||||
}
|
||||
|
||||
void rethrow()
|
||||
{
|
||||
throw Error("%s", errorMsg);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A `BuildResult` together with its "primary key".
|
||||
*/
|
||||
struct KeyedBuildResult : BuildResult
|
||||
{
|
||||
/**
|
||||
* The derivation we built or the store path we substituted.
|
||||
*/
|
||||
DerivedPath path;
|
||||
|
||||
// Hack to work around a gcc "may be used uninitialized" warning.
|
||||
KeyedBuildResult(BuildResult res, DerivedPath path)
|
||||
: BuildResult(std::move(res)), path(std::move(path))
|
||||
{ }
|
||||
};
|
||||
|
||||
}
|
||||
1
subprojects/libstore/build-utils-meson
Symbolic link
1
subprojects/libstore/build-utils-meson
Symbolic link
|
|
@ -0,0 +1 @@
|
|||
../../build-utils-meson
|
||||
1597
subprojects/libstore/build/derivation-goal.cc
Normal file
1597
subprojects/libstore/build/derivation-goal.cc
Normal file
File diff suppressed because it is too large
Load diff
348
subprojects/libstore/build/derivation-goal.hh
Normal file
348
subprojects/libstore/build/derivation-goal.hh
Normal file
|
|
@ -0,0 +1,348 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "parsed-derivations.hh"
|
||||
#ifndef _WIN32
|
||||
# include "user-lock.hh"
|
||||
#endif
|
||||
#include "outputs-spec.hh"
|
||||
#include "store-api.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "goal.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
using std::map;
|
||||
|
||||
#ifndef _WIN32 // TODO enable build hook on Windows
|
||||
struct HookInstance;
|
||||
#endif
|
||||
|
||||
typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
|
||||
|
||||
/**
|
||||
* Unless we are repairing, we don't both to test validity and just assume it,
|
||||
* so the choices are `Absent` or `Valid`.
|
||||
*/
|
||||
enum struct PathStatus {
|
||||
Corrupt,
|
||||
Absent,
|
||||
Valid,
|
||||
};
|
||||
|
||||
struct InitialOutputStatus {
|
||||
StorePath path;
|
||||
PathStatus status;
|
||||
/**
|
||||
* Valid in the store, and additionally non-corrupt if we are repairing
|
||||
*/
|
||||
bool isValid() const {
|
||||
return status == PathStatus::Valid;
|
||||
}
|
||||
/**
|
||||
* Merely present, allowed to be corrupt
|
||||
*/
|
||||
bool isPresent() const {
|
||||
return status == PathStatus::Corrupt
|
||||
|| status == PathStatus::Valid;
|
||||
}
|
||||
};
|
||||
|
||||
struct InitialOutput {
|
||||
bool wanted;
|
||||
Hash outputHash;
|
||||
std::optional<InitialOutputStatus> known;
|
||||
};
|
||||
|
||||
/**
|
||||
* A goal for building some or all of the outputs of a derivation.
|
||||
*/
|
||||
struct DerivationGoal : public Goal
|
||||
{
|
||||
/**
|
||||
* Whether to use an on-disk .drv file.
|
||||
*/
|
||||
bool useDerivation;
|
||||
|
||||
/** The path of the derivation. */
|
||||
StorePath drvPath;
|
||||
|
||||
/**
|
||||
* The goal for the corresponding resolved derivation
|
||||
*/
|
||||
std::shared_ptr<DerivationGoal> resolvedDrvGoal;
|
||||
|
||||
/**
|
||||
* The specific outputs that we need to build.
|
||||
*/
|
||||
OutputsSpec wantedOutputs;
|
||||
|
||||
/**
|
||||
* Mapping from input derivations + output names to actual store
|
||||
* paths. This is filled in by waiteeDone() as each dependency
|
||||
* finishes, before inputsRealised() is reached.
|
||||
*/
|
||||
std::map<std::pair<StorePath, std::string>, StorePath> inputDrvOutputs;
|
||||
|
||||
/**
|
||||
* See `needRestart`; just for that field.
|
||||
*/
|
||||
enum struct NeedRestartForMoreOutputs {
|
||||
/**
|
||||
* The goal state machine is progressing based on the current value of
|
||||
* `wantedOutputs. No actions are needed.
|
||||
*/
|
||||
OutputsUnmodifedDontNeed,
|
||||
/**
|
||||
* `wantedOutputs` has been extended, but the state machine is
|
||||
* proceeding according to its old value, so we need to restart.
|
||||
*/
|
||||
OutputsAddedDoNeed,
|
||||
/**
|
||||
* The goal state machine has progressed to the point of doing a build,
|
||||
* in which case all outputs will be produced, so extensions to
|
||||
* `wantedOutputs` no longer require a restart.
|
||||
*/
|
||||
BuildInProgressWillNotNeed,
|
||||
};
|
||||
|
||||
/**
|
||||
* Whether additional wanted outputs have been added.
|
||||
*/
|
||||
NeedRestartForMoreOutputs needRestart = NeedRestartForMoreOutputs::OutputsUnmodifedDontNeed;
|
||||
|
||||
/**
|
||||
* See `retrySubstitution`; just for that field.
|
||||
*/
|
||||
enum RetrySubstitution {
|
||||
/**
|
||||
* No issues have yet arose, no need to restart.
|
||||
*/
|
||||
NoNeed,
|
||||
/**
|
||||
* Something failed and there is an incomplete closure. Let's retry
|
||||
* substituting.
|
||||
*/
|
||||
YesNeed,
|
||||
/**
|
||||
* We are current or have already retried substitution, and whether or
|
||||
* not something goes wrong we will not retry again.
|
||||
*/
|
||||
AlreadyRetried,
|
||||
};
|
||||
|
||||
/**
|
||||
* Whether to retry substituting the outputs after building the
|
||||
* inputs. This is done in case of an incomplete closure.
|
||||
*/
|
||||
RetrySubstitution retrySubstitution = RetrySubstitution::NoNeed;
|
||||
|
||||
/**
|
||||
* The derivation stored at drvPath.
|
||||
*/
|
||||
std::unique_ptr<Derivation> drv;
|
||||
|
||||
std::unique_ptr<ParsedDerivation> parsedDrv;
|
||||
|
||||
/**
|
||||
* The remainder is state held during the build.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Locks on (fixed) output paths.
|
||||
*/
|
||||
PathLocks outputLocks;
|
||||
|
||||
/**
|
||||
* All input paths (that is, the union of FS closures of the
|
||||
* immediate input paths).
|
||||
*/
|
||||
StorePathSet inputPaths;
|
||||
|
||||
std::map<std::string, InitialOutput> initialOutputs;
|
||||
|
||||
/**
|
||||
* File descriptor for the log file.
|
||||
*/
|
||||
AutoCloseFD fdLogFile;
|
||||
std::shared_ptr<BufferedSink> logFileSink, logSink;
|
||||
|
||||
/**
|
||||
* Number of bytes received from the builder's stdout/stderr.
|
||||
*/
|
||||
unsigned long logSize;
|
||||
|
||||
/**
|
||||
* The most recent log lines.
|
||||
*/
|
||||
std::list<std::string> logTail;
|
||||
|
||||
std::string currentLogLine;
|
||||
size_t currentLogLinePos = 0; // to handle carriage return
|
||||
|
||||
std::string currentHookLine;
|
||||
|
||||
#ifndef _WIN32 // TODO enable build hook on Windows
|
||||
/**
|
||||
* The build hook.
|
||||
*/
|
||||
std::unique_ptr<HookInstance> hook;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* The sort of derivation we are building.
|
||||
*/
|
||||
std::optional<DerivationType> derivationType;
|
||||
|
||||
BuildMode buildMode;
|
||||
|
||||
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds;
|
||||
|
||||
std::unique_ptr<Activity> act;
|
||||
|
||||
/**
|
||||
* Activity that denotes waiting for a lock.
|
||||
*/
|
||||
std::unique_ptr<Activity> actLock;
|
||||
|
||||
std::map<ActivityId, Activity> builderActivities;
|
||||
|
||||
/**
|
||||
* The remote machine on which we're building.
|
||||
*/
|
||||
std::string machineName;
|
||||
|
||||
DerivationGoal(const StorePath & drvPath,
|
||||
const OutputsSpec & wantedOutputs, Worker & worker,
|
||||
BuildMode buildMode = bmNormal);
|
||||
DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
const OutputsSpec & wantedOutputs, Worker & worker,
|
||||
BuildMode buildMode = bmNormal);
|
||||
virtual ~DerivationGoal();
|
||||
|
||||
void timedOut(Error && ex) override;
|
||||
|
||||
std::string key() override;
|
||||
|
||||
/**
|
||||
* Add wanted outputs to an already existing derivation goal.
|
||||
*/
|
||||
void addWantedOutputs(const OutputsSpec & outputs);
|
||||
|
||||
/**
|
||||
* The states.
|
||||
*/
|
||||
Co init() override;
|
||||
Co getDerivation();
|
||||
Co loadDerivation();
|
||||
Co haveDerivation();
|
||||
Co outputsSubstitutionTried();
|
||||
Co gaveUpOnSubstitution();
|
||||
Co closureRepaired();
|
||||
Co inputsRealised();
|
||||
Co tryToBuild();
|
||||
virtual Co tryLocalBuild();
|
||||
Co buildDone();
|
||||
|
||||
Co resolvedFinished();
|
||||
|
||||
/**
|
||||
* Is the build hook willing to perform the build?
|
||||
*/
|
||||
HookReply tryBuildHook();
|
||||
|
||||
virtual int getChildStatus();
|
||||
|
||||
/**
|
||||
* Check that the derivation outputs all exist and register them
|
||||
* as valid.
|
||||
*/
|
||||
virtual SingleDrvOutputs registerOutputs();
|
||||
|
||||
/**
|
||||
* Open a log file and a pipe to it.
|
||||
*/
|
||||
Path openLogFile();
|
||||
|
||||
/**
|
||||
* Sign the newly built realisation if the store allows it
|
||||
*/
|
||||
virtual void signRealisation(Realisation&) {}
|
||||
|
||||
/**
|
||||
* Close the log file.
|
||||
*/
|
||||
void closeLogFile();
|
||||
|
||||
/**
|
||||
* Close the read side of the logger pipe.
|
||||
*/
|
||||
virtual void closeReadPipes();
|
||||
|
||||
/**
|
||||
* Cleanup hooks for buildDone()
|
||||
*/
|
||||
virtual void cleanupHookFinally();
|
||||
virtual void cleanupPreChildKill();
|
||||
virtual void cleanupPostChildKill();
|
||||
virtual bool cleanupDecideWhetherDiskFull();
|
||||
virtual void cleanupPostOutputsRegisteredModeCheck();
|
||||
virtual void cleanupPostOutputsRegisteredModeNonCheck();
|
||||
|
||||
virtual bool isReadDesc(Descriptor fd);
|
||||
|
||||
/**
|
||||
* Callback used by the worker to write to the log.
|
||||
*/
|
||||
void handleChildOutput(Descriptor fd, std::string_view data) override;
|
||||
void handleEOF(Descriptor fd) override;
|
||||
void flushLine();
|
||||
|
||||
/**
|
||||
* Wrappers around the corresponding Store methods that first consult the
|
||||
* derivation. This is currently needed because when there is no drv file
|
||||
* there also is no DB entry.
|
||||
*/
|
||||
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap();
|
||||
OutputPathMap queryDerivationOutputMap();
|
||||
|
||||
/**
|
||||
* Update 'initialOutputs' to determine the current status of the
|
||||
* outputs of the derivation. Also returns a Boolean denoting
|
||||
* whether all outputs are valid and non-corrupt, and a
|
||||
* 'SingleDrvOutputs' structure containing the valid outputs.
|
||||
*/
|
||||
std::pair<bool, SingleDrvOutputs> checkPathValidity();
|
||||
|
||||
/**
|
||||
* Aborts if any output is not valid or corrupt, and otherwise
|
||||
* returns a 'SingleDrvOutputs' structure containing all outputs.
|
||||
*/
|
||||
SingleDrvOutputs assertPathValidity();
|
||||
|
||||
/**
|
||||
* Forcibly kill the child process, if any.
|
||||
*/
|
||||
virtual void killChild();
|
||||
|
||||
Co repairClosure();
|
||||
|
||||
void started();
|
||||
|
||||
Done done(
|
||||
BuildResult::Status status,
|
||||
SingleDrvOutputs builtOutputs = {},
|
||||
std::optional<Error> ex = {});
|
||||
|
||||
void waiteeDone(GoalPtr waitee, ExitCode result) override;
|
||||
|
||||
StorePathSet exportReferences(const StorePathSet & storePaths);
|
||||
|
||||
JobCategory jobCategory() const override {
|
||||
return JobCategory::Build;
|
||||
};
|
||||
};
|
||||
|
||||
MakeError(NotDeterministic, BuildError);
|
||||
|
||||
}
|
||||
161
subprojects/libstore/build/drv-output-substitution-goal.cc
Normal file
161
subprojects/libstore/build/drv-output-substitution-goal.cc
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
#include "drv-output-substitution-goal.hh"
|
||||
#include "finally.hh"
|
||||
#include "worker.hh"
|
||||
#include "substitution-goal.hh"
|
||||
#include "callback.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(
|
||||
const DrvOutput & id,
|
||||
Worker & worker,
|
||||
RepairFlag repair,
|
||||
std::optional<ContentAddress> ca)
|
||||
: Goal(worker, DerivedPath::Opaque { StorePath::dummy })
|
||||
, id(id)
|
||||
{
|
||||
name = fmt("substitution of '%s'", id.to_string());
|
||||
trace("created");
|
||||
}
|
||||
|
||||
|
||||
Goal::Co DrvOutputSubstitutionGoal::init()
|
||||
{
|
||||
trace("init");
|
||||
|
||||
/* If the derivation already exists, we’re done */
|
||||
if (worker.store.queryRealisation(id)) {
|
||||
co_return amDone(ecSuccess);
|
||||
}
|
||||
|
||||
auto subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
|
||||
|
||||
bool substituterFailed = false;
|
||||
|
||||
for (auto sub : subs) {
|
||||
trace("trying next substituter");
|
||||
|
||||
/* The callback of the curl download below can outlive `this` (if
|
||||
some other error occurs), so it must not touch `this`. So put
|
||||
the shared state in a separate refcounted object. */
|
||||
auto outPipe = std::make_shared<MuxablePipe>();
|
||||
#ifndef _WIN32
|
||||
outPipe->create();
|
||||
#else
|
||||
outPipe->createAsyncPipe(worker.ioport.get());
|
||||
#endif
|
||||
|
||||
auto promise = std::make_shared<std::promise<std::shared_ptr<const Realisation>>>();
|
||||
|
||||
sub->queryRealisation(
|
||||
id,
|
||||
{ [outPipe(outPipe), promise(promise)](std::future<std::shared_ptr<const Realisation>> res) {
|
||||
try {
|
||||
Finally updateStats([&]() { outPipe->writeSide.close(); });
|
||||
promise->set_value(res.get());
|
||||
} catch (...) {
|
||||
promise->set_exception(std::current_exception());
|
||||
}
|
||||
} });
|
||||
|
||||
worker.childStarted(shared_from_this(), {
|
||||
#ifndef _WIN32
|
||||
outPipe->readSide.get()
|
||||
#else
|
||||
&*outPipe
|
||||
#endif
|
||||
}, true, false);
|
||||
|
||||
co_await Suspend{};
|
||||
|
||||
worker.childTerminated(this);
|
||||
|
||||
/*
|
||||
* The realisation corresponding to the given output id.
|
||||
* Will be filled once we can get it.
|
||||
*/
|
||||
std::shared_ptr<const Realisation> outputInfo;
|
||||
|
||||
try {
|
||||
outputInfo = promise->get_future().get();
|
||||
} catch (std::exception & e) {
|
||||
printError(e.what());
|
||||
substituterFailed = true;
|
||||
}
|
||||
|
||||
if (!outputInfo) continue;
|
||||
|
||||
bool failed = false;
|
||||
|
||||
for (const auto & [depId, depPath] : outputInfo->dependentRealisations) {
|
||||
if (depId != id) {
|
||||
if (auto localOutputInfo = worker.store.queryRealisation(depId);
|
||||
localOutputInfo && localOutputInfo->outPath != depPath) {
|
||||
warn(
|
||||
"substituter '%s' has an incompatible realisation for '%s', ignoring.\n"
|
||||
"Local: %s\n"
|
||||
"Remote: %s",
|
||||
sub->getUri(),
|
||||
depId.to_string(),
|
||||
worker.store.printStorePath(localOutputInfo->outPath),
|
||||
worker.store.printStorePath(depPath)
|
||||
);
|
||||
failed = true;
|
||||
break;
|
||||
}
|
||||
addWaitee(worker.makeDrvOutputSubstitutionGoal(depId));
|
||||
}
|
||||
}
|
||||
|
||||
if (failed) continue;
|
||||
|
||||
co_return realisationFetched(outputInfo, sub);
|
||||
}
|
||||
|
||||
/* None left. Terminate this goal and let someone else deal
|
||||
with it. */
|
||||
debug("derivation output '%s' is required, but there is no substituter that can provide it", id.to_string());
|
||||
|
||||
if (substituterFailed) {
|
||||
worker.failedSubstitutions++;
|
||||
worker.updateProgress();
|
||||
}
|
||||
|
||||
/* Hack: don't indicate failure if there were no substituters.
|
||||
In that case the calling derivation should just do a
|
||||
build. */
|
||||
co_return amDone(substituterFailed ? ecFailed : ecNoSubstituters);
|
||||
}
|
||||
|
||||
Goal::Co DrvOutputSubstitutionGoal::realisationFetched(std::shared_ptr<const Realisation> outputInfo, nix::ref<nix::Store> sub) {
|
||||
addWaitee(worker.makePathSubstitutionGoal(outputInfo->outPath));
|
||||
|
||||
if (!waitees.empty()) co_await Suspend{};
|
||||
|
||||
trace("output path substituted");
|
||||
|
||||
if (nrFailed > 0) {
|
||||
debug("The output path of the derivation output '%s' could not be substituted", id.to_string());
|
||||
co_return amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
|
||||
}
|
||||
|
||||
worker.store.registerDrvOutput(*outputInfo);
|
||||
|
||||
trace("finished");
|
||||
co_return amDone(ecSuccess);
|
||||
}
|
||||
|
||||
std::string DrvOutputSubstitutionGoal::key()
|
||||
{
|
||||
/* "a$" ensures substitution goals happen before derivation
|
||||
goals. */
|
||||
return "a$" + std::string(id.to_string());
|
||||
}
|
||||
|
||||
void DrvOutputSubstitutionGoal::handleEOF(Descriptor fd)
|
||||
{
|
||||
worker.wakeUp(shared_from_this());
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
50
subprojects/libstore/build/drv-output-substitution-goal.hh
Normal file
50
subprojects/libstore/build/drv-output-substitution-goal.hh
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include <thread>
|
||||
#include <future>
|
||||
|
||||
#include "store-api.hh"
|
||||
#include "goal.hh"
|
||||
#include "realisation.hh"
|
||||
#include "muxable-pipe.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
class Worker;
|
||||
|
||||
/**
|
||||
* Substitution of a derivation output.
|
||||
* This is done in three steps:
|
||||
* 1. Fetch the output info from a substituter
|
||||
* 2. Substitute the corresponding output path
|
||||
* 3. Register the output info
|
||||
*/
|
||||
class DrvOutputSubstitutionGoal : public Goal {
|
||||
|
||||
/**
|
||||
* The drv output we're trying to substitute
|
||||
*/
|
||||
DrvOutput id;
|
||||
|
||||
public:
|
||||
DrvOutputSubstitutionGoal(const DrvOutput& id, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
|
||||
typedef void (DrvOutputSubstitutionGoal::*GoalState)();
|
||||
GoalState state;
|
||||
|
||||
Co init() override;
|
||||
Co realisationFetched(std::shared_ptr<const Realisation> outputInfo, nix::ref<nix::Store> sub);
|
||||
|
||||
void timedOut(Error && ex) override { unreachable(); };
|
||||
|
||||
std::string key() override;
|
||||
|
||||
void handleEOF(Descriptor fd) override;
|
||||
|
||||
JobCategory jobCategory() const override {
|
||||
return JobCategory::Substitution;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
151
subprojects/libstore/build/entry-points.cc
Normal file
151
subprojects/libstore/build/entry-points.cc
Normal file
|
|
@ -0,0 +1,151 @@
|
|||
#include "worker.hh"
|
||||
#include "substitution-goal.hh"
|
||||
#ifndef _WIN32 // TODO Enable building on Windows
|
||||
# include "derivation-goal.hh"
|
||||
#endif
|
||||
#include "local-store.hh"
|
||||
#include "strings.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMode, std::shared_ptr<Store> evalStore)
|
||||
{
|
||||
Worker worker(*this, evalStore ? *evalStore : *this);
|
||||
|
||||
Goals goals;
|
||||
for (auto & br : reqs)
|
||||
goals.insert(worker.makeGoal(br, buildMode));
|
||||
|
||||
worker.run(goals);
|
||||
|
||||
StringSet failed;
|
||||
std::optional<Error> ex;
|
||||
for (auto & i : goals) {
|
||||
if (i->ex) {
|
||||
if (ex)
|
||||
logError(i->ex->info());
|
||||
else
|
||||
ex = std::move(i->ex);
|
||||
}
|
||||
if (i->exitCode != Goal::ecSuccess) {
|
||||
#ifndef _WIN32 // TODO Enable building on Windows
|
||||
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get()))
|
||||
failed.insert(printStorePath(i2->drvPath));
|
||||
else
|
||||
#endif
|
||||
if (auto i2 = dynamic_cast<PathSubstitutionGoal *>(i.get()))
|
||||
failed.insert(printStorePath(i2->storePath));
|
||||
}
|
||||
}
|
||||
|
||||
if (failed.size() == 1 && ex) {
|
||||
ex->withExitStatus(worker.failingExitStatus());
|
||||
throw std::move(*ex);
|
||||
} else if (!failed.empty()) {
|
||||
if (ex) logError(ex->info());
|
||||
throw Error(worker.failingExitStatus(), "build of %s failed", concatStringsSep(", ", quoteStrings(failed)));
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<KeyedBuildResult> Store::buildPathsWithResults(
|
||||
const std::vector<DerivedPath> & reqs,
|
||||
BuildMode buildMode,
|
||||
std::shared_ptr<Store> evalStore)
|
||||
{
|
||||
Worker worker(*this, evalStore ? *evalStore : *this);
|
||||
|
||||
Goals goals;
|
||||
std::vector<std::pair<const DerivedPath &, GoalPtr>> state;
|
||||
|
||||
for (const auto & req : reqs) {
|
||||
auto goal = worker.makeGoal(req, buildMode);
|
||||
goals.insert(goal);
|
||||
state.push_back({req, goal});
|
||||
}
|
||||
|
||||
worker.run(goals);
|
||||
|
||||
std::vector<KeyedBuildResult> results;
|
||||
|
||||
for (auto & [req, goalPtr] : state)
|
||||
results.emplace_back(KeyedBuildResult {
|
||||
goalPtr->getBuildResult(req),
|
||||
/* .path = */ req,
|
||||
});
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode)
|
||||
{
|
||||
Worker worker(*this, *this);
|
||||
#ifndef _WIN32 // TODO Enable building on Windows
|
||||
auto goal = worker.makeBasicDerivationGoal(drvPath, drv, OutputsSpec::All {}, buildMode);
|
||||
#else
|
||||
std::shared_ptr<Goal> goal;
|
||||
throw UnimplementedError("Building derivations not yet implemented on windows.");
|
||||
#endif
|
||||
|
||||
try {
|
||||
worker.run(Goals{goal});
|
||||
return goal->getBuildResult(DerivedPath::Built {
|
||||
.drvPath = makeConstantStorePathRef(drvPath),
|
||||
.outputs = OutputsSpec::All {},
|
||||
});
|
||||
} catch (Error & e) {
|
||||
return BuildResult {
|
||||
.status = BuildResult::MiscFailure,
|
||||
.errorMsg = e.msg(),
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
void Store::ensurePath(const StorePath & path)
|
||||
{
|
||||
/* If the path is already valid, we're done. */
|
||||
if (isValidPath(path)) return;
|
||||
|
||||
Worker worker(*this, *this);
|
||||
GoalPtr goal = worker.makePathSubstitutionGoal(path);
|
||||
Goals goals = {goal};
|
||||
|
||||
worker.run(goals);
|
||||
|
||||
if (goal->exitCode != Goal::ecSuccess) {
|
||||
if (goal->ex) {
|
||||
goal->ex->withExitStatus(worker.failingExitStatus());
|
||||
throw std::move(*goal->ex);
|
||||
} else
|
||||
throw Error(worker.failingExitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Store::repairPath(const StorePath & path)
|
||||
{
|
||||
Worker worker(*this, *this);
|
||||
GoalPtr goal = worker.makePathSubstitutionGoal(path, Repair);
|
||||
Goals goals = {goal};
|
||||
|
||||
worker.run(goals);
|
||||
|
||||
if (goal->exitCode != Goal::ecSuccess) {
|
||||
/* Since substituting the path didn't work, if we have a valid
|
||||
deriver, then rebuild the deriver. */
|
||||
auto info = queryPathInfo(path);
|
||||
if (info->deriver && isValidPath(*info->deriver)) {
|
||||
goals.clear();
|
||||
goals.insert(worker.makeGoal(DerivedPath::Built {
|
||||
.drvPath = makeConstantStorePathRef(*info->deriver),
|
||||
// FIXME: Should just build the specific output we need.
|
||||
.outputs = OutputsSpec::All { },
|
||||
}, bmRepair));
|
||||
worker.run(goals);
|
||||
} else
|
||||
throw Error(worker.failingExitStatus(), "cannot repair path '%s'", printStorePath(path));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
219
subprojects/libstore/build/goal.cc
Normal file
219
subprojects/libstore/build/goal.cc
Normal file
|
|
@ -0,0 +1,219 @@
|
|||
#include "goal.hh"
|
||||
#include "worker.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
using Co = nix::Goal::Co;
|
||||
using promise_type = nix::Goal::promise_type;
|
||||
using handle_type = nix::Goal::handle_type;
|
||||
using Suspend = nix::Goal::Suspend;
|
||||
|
||||
Co::Co(Co&& rhs) {
|
||||
this->handle = rhs.handle;
|
||||
rhs.handle = nullptr;
|
||||
}
|
||||
void Co::operator=(Co&& rhs) {
|
||||
this->handle = rhs.handle;
|
||||
rhs.handle = nullptr;
|
||||
}
|
||||
Co::~Co() {
|
||||
if (handle) {
|
||||
handle.promise().alive = false;
|
||||
handle.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
Co promise_type::get_return_object() {
|
||||
auto handle = handle_type::from_promise(*this);
|
||||
return Co{handle};
|
||||
};
|
||||
|
||||
std::coroutine_handle<> promise_type::final_awaiter::await_suspend(handle_type h) noexcept {
|
||||
auto& p = h.promise();
|
||||
auto goal = p.goal;
|
||||
assert(goal);
|
||||
goal->trace("in final_awaiter");
|
||||
auto c = std::move(p.continuation);
|
||||
|
||||
if (c) {
|
||||
// We still have a continuation, i.e. work to do.
|
||||
// We assert that the goal is still busy.
|
||||
assert(goal->exitCode == ecBusy);
|
||||
assert(goal->top_co); // Goal must have an active coroutine.
|
||||
assert(goal->top_co->handle == h); // The active coroutine must be us.
|
||||
assert(p.alive); // We must not have been destructed.
|
||||
|
||||
// we move continuation to the top,
|
||||
// note: previous top_co is actually h, so by moving into it,
|
||||
// we're calling the destructor on h, DON'T use h and p after this!
|
||||
|
||||
// We move our continuation into `top_co`, i.e. the marker for the active continuation.
|
||||
// By doing this we destruct the old `top_co`, i.e. us, so `h` can't be used anymore.
|
||||
// Be careful not to access freed memory!
|
||||
goal->top_co = std::move(c);
|
||||
|
||||
// We resume `top_co`.
|
||||
return goal->top_co->handle;
|
||||
} else {
|
||||
// We have no continuation, i.e. no more work to do,
|
||||
// so the goal must not be busy anymore.
|
||||
assert(goal->exitCode != ecBusy);
|
||||
|
||||
// We reset `top_co` for good measure.
|
||||
p.goal->top_co = {};
|
||||
|
||||
// We jump to the noop coroutine, which doesn't do anything and immediately suspends.
|
||||
// This passes control back to the caller of goal.work().
|
||||
return std::noop_coroutine();
|
||||
}
|
||||
}
|
||||
|
||||
void promise_type::return_value(Co&& next) {
|
||||
goal->trace("return_value(Co&&)");
|
||||
// Save old continuation.
|
||||
auto old_continuation = std::move(continuation);
|
||||
// We set next as our continuation.
|
||||
continuation = std::move(next);
|
||||
// We set next's goal, and thus it must not have one already.
|
||||
assert(!continuation->handle.promise().goal);
|
||||
continuation->handle.promise().goal = goal;
|
||||
// Nor can next have a continuation, as we set it to our old one.
|
||||
assert(!continuation->handle.promise().continuation);
|
||||
continuation->handle.promise().continuation = std::move(old_continuation);
|
||||
}
|
||||
|
||||
std::coroutine_handle<> nix::Goal::Co::await_suspend(handle_type caller) {
|
||||
assert(handle); // we must be a valid coroutine
|
||||
auto& p = handle.promise();
|
||||
assert(!p.continuation); // we must have no continuation
|
||||
assert(!p.goal); // we must not have a goal yet
|
||||
auto goal = caller.promise().goal;
|
||||
assert(goal);
|
||||
p.goal = goal;
|
||||
p.continuation = std::move(goal->top_co); // we set our continuation to be top_co (i.e. caller)
|
||||
goal->top_co = std::move(*this); // we set top_co to ourselves, don't use this anymore after this!
|
||||
return p.goal->top_co->handle; // we execute ourselves
|
||||
}
|
||||
|
||||
bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const {
|
||||
std::string s1 = a->key();
|
||||
std::string s2 = b->key();
|
||||
return s1 < s2;
|
||||
}
|
||||
|
||||
|
||||
BuildResult Goal::getBuildResult(const DerivedPath & req) const {
|
||||
BuildResult res { buildResult };
|
||||
|
||||
if (auto pbp = std::get_if<DerivedPath::Built>(&req)) {
|
||||
auto & bp = *pbp;
|
||||
|
||||
/* Because goals are in general shared between derived paths
|
||||
that share the same derivation, we need to filter their
|
||||
results to get back just the results we care about.
|
||||
*/
|
||||
|
||||
for (auto it = res.builtOutputs.begin(); it != res.builtOutputs.end();) {
|
||||
if (bp.outputs.contains(it->first))
|
||||
++it;
|
||||
else
|
||||
it = res.builtOutputs.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void addToWeakGoals(WeakGoals & goals, GoalPtr p)
|
||||
{
|
||||
if (goals.find(p) != goals.end())
|
||||
return;
|
||||
goals.insert(p);
|
||||
}
|
||||
|
||||
|
||||
void Goal::addWaitee(GoalPtr waitee)
|
||||
{
|
||||
waitees.insert(waitee);
|
||||
addToWeakGoals(waitee->waiters, shared_from_this());
|
||||
}
|
||||
|
||||
|
||||
void Goal::waiteeDone(GoalPtr waitee, ExitCode result)
|
||||
{
|
||||
assert(waitees.count(waitee));
|
||||
waitees.erase(waitee);
|
||||
|
||||
trace(fmt("waitee '%s' done; %d left", waitee->name, waitees.size()));
|
||||
|
||||
if (result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure) ++nrFailed;
|
||||
|
||||
if (result == ecNoSubstituters) ++nrNoSubstituters;
|
||||
|
||||
if (result == ecIncompleteClosure) ++nrIncompleteClosure;
|
||||
|
||||
if (waitees.empty() || (result == ecFailed && !settings.keepGoing)) {
|
||||
|
||||
/* If we failed and keepGoing is not set, we remove all
|
||||
remaining waitees. */
|
||||
for (auto & goal : waitees) {
|
||||
goal->waiters.extract(shared_from_this());
|
||||
}
|
||||
waitees.clear();
|
||||
|
||||
worker.wakeUp(shared_from_this());
|
||||
}
|
||||
}
|
||||
|
||||
Goal::Done Goal::amDone(ExitCode result, std::optional<Error> ex)
|
||||
{
|
||||
trace("done");
|
||||
assert(top_co);
|
||||
assert(exitCode == ecBusy);
|
||||
assert(result == ecSuccess || result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure);
|
||||
exitCode = result;
|
||||
|
||||
if (ex) {
|
||||
if (!waiters.empty())
|
||||
logError(ex->info());
|
||||
else
|
||||
this->ex = std::move(*ex);
|
||||
}
|
||||
|
||||
for (auto & i : waiters) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) goal->waiteeDone(shared_from_this(), result);
|
||||
}
|
||||
waiters.clear();
|
||||
worker.removeGoal(shared_from_this());
|
||||
|
||||
cleanup();
|
||||
|
||||
// We drop the continuation.
|
||||
// In `final_awaiter` this will signal that there is no more work to be done.
|
||||
top_co->handle.promise().continuation = {};
|
||||
|
||||
// won't return to caller because of logic in final_awaiter
|
||||
return Done{};
|
||||
}
|
||||
|
||||
|
||||
void Goal::trace(std::string_view s)
|
||||
{
|
||||
debug("%1%: %2%", name, s);
|
||||
}
|
||||
|
||||
void Goal::work()
|
||||
{
|
||||
assert(top_co);
|
||||
assert(top_co->handle);
|
||||
assert(top_co->handle.promise().alive);
|
||||
top_co->handle.resume();
|
||||
// We either should be in a state where we can be work()-ed again,
|
||||
// or we should be done.
|
||||
assert(top_co || exitCode != ecBusy);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
445
subprojects/libstore/build/goal.hh
Normal file
445
subprojects/libstore/build/goal.hh
Normal file
|
|
@ -0,0 +1,445 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "store-api.hh"
|
||||
#include "build-result.hh"
|
||||
|
||||
#include <coroutine>
|
||||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
* Forward definition.
|
||||
*/
|
||||
struct Goal;
|
||||
class Worker;
|
||||
|
||||
/**
|
||||
* A pointer to a goal.
|
||||
*/
|
||||
typedef std::shared_ptr<Goal> GoalPtr;
|
||||
typedef std::weak_ptr<Goal> WeakGoalPtr;
|
||||
|
||||
struct CompareGoalPtrs {
|
||||
bool operator() (const GoalPtr & a, const GoalPtr & b) const;
|
||||
};
|
||||
|
||||
/**
|
||||
* Set of goals.
|
||||
*/
|
||||
typedef std::set<GoalPtr, CompareGoalPtrs> Goals;
|
||||
typedef std::set<WeakGoalPtr, std::owner_less<WeakGoalPtr>> WeakGoals;
|
||||
|
||||
/**
|
||||
* A map of paths to goals (and the other way around).
|
||||
*/
|
||||
typedef std::map<StorePath, WeakGoalPtr> WeakGoalMap;
|
||||
|
||||
/**
|
||||
* Used as a hint to the worker on how to schedule a particular goal. For example,
|
||||
* builds are typically CPU- and memory-bound, while substitutions are I/O bound.
|
||||
* Using this information, the worker might decide to schedule more or fewer goals
|
||||
* of each category in parallel.
|
||||
*/
|
||||
enum struct JobCategory {
|
||||
/**
|
||||
* A build of a derivation; it will use CPU and disk resources.
|
||||
*/
|
||||
Build,
|
||||
/**
|
||||
* A substitution an arbitrary store object; it will use network resources.
|
||||
*/
|
||||
Substitution,
|
||||
};
|
||||
|
||||
struct Goal : public std::enable_shared_from_this<Goal>
|
||||
{
|
||||
typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters, ecIncompleteClosure} ExitCode;
|
||||
|
||||
/**
|
||||
* Backlink to the worker.
|
||||
*/
|
||||
Worker & worker;
|
||||
|
||||
/**
|
||||
* Goals that this goal is waiting for.
|
||||
*/
|
||||
Goals waitees;
|
||||
|
||||
/**
|
||||
* Goals waiting for this one to finish. Must use weak pointers
|
||||
* here to prevent cycles.
|
||||
*/
|
||||
WeakGoals waiters;
|
||||
|
||||
/**
|
||||
* Number of goals we are/were waiting for that have failed.
|
||||
*/
|
||||
size_t nrFailed = 0;
|
||||
|
||||
/**
|
||||
* Number of substitution goals we are/were waiting for that
|
||||
* failed because there are no substituters.
|
||||
*/
|
||||
size_t nrNoSubstituters = 0;
|
||||
|
||||
/**
|
||||
* Number of substitution goals we are/were waiting for that
|
||||
* failed because they had unsubstitutable references.
|
||||
*/
|
||||
size_t nrIncompleteClosure = 0;
|
||||
|
||||
/**
|
||||
* Name of this goal for debugging purposes.
|
||||
*/
|
||||
std::string name;
|
||||
|
||||
/**
|
||||
* Whether the goal is finished.
|
||||
*/
|
||||
ExitCode exitCode = ecBusy;
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Build result.
|
||||
*/
|
||||
BuildResult buildResult;
|
||||
public:
|
||||
|
||||
/**
|
||||
* Suspend our goal and wait until we get @ref work()-ed again.
|
||||
* `co_await`-able by @ref Co.
|
||||
*/
|
||||
struct Suspend {};
|
||||
|
||||
/**
|
||||
* Return from the current coroutine and suspend our goal
|
||||
* if we're not busy anymore, or jump to the next coroutine
|
||||
* set to be executed/resumed.
|
||||
*/
|
||||
struct Return {};
|
||||
|
||||
/**
|
||||
* `co_return`-ing this will end the goal.
|
||||
* If you're not inside a coroutine, you can safely discard this.
|
||||
*/
|
||||
struct [[nodiscard]] Done {
|
||||
private:
|
||||
Done(){}
|
||||
|
||||
friend Goal;
|
||||
};
|
||||
|
||||
// forward declaration of promise_type, see below
|
||||
struct promise_type;
|
||||
|
||||
/**
|
||||
* Handle to coroutine using @ref Co and @ref promise_type.
|
||||
*/
|
||||
using handle_type = std::coroutine_handle<promise_type>;
|
||||
|
||||
/**
|
||||
* C++20 coroutine wrapper for use in goal logic.
|
||||
* Coroutines are functions that use `co_await`/`co_return` (and `co_yield`, but not supported by @ref Co).
|
||||
*
|
||||
* @ref Co is meant to be used by methods of subclasses of @ref Goal.
|
||||
* The main functionality provided by `Co` is
|
||||
* - `co_await Suspend{}`: Suspends the goal.
|
||||
* - `co_await f()`: Waits until `f()` finishes.
|
||||
* - `co_return f()`: Tail-calls `f()`.
|
||||
* - `co_return Return{}`: Ends coroutine.
|
||||
*
|
||||
* The idea is that you implement the goal logic using coroutines,
|
||||
* and do the core thing a goal can do, suspension, when you have
|
||||
* children you're waiting for.
|
||||
* Coroutines allow you to resume the work cleanly.
|
||||
*
|
||||
* @note Brief explanation of C++20 coroutines:
|
||||
* When you `Co f()`, a `std::coroutine_handle<promise_type>` is created,
|
||||
* alongside its @ref promise_type.
|
||||
* There are suspension points at the beginning of the coroutine,
|
||||
* at every `co_await`, and at the final (possibly implicit) `co_return`.
|
||||
* Once suspended, you can resume the `std::coroutine_handle` by doing `coroutine_handle.resume()`.
|
||||
* Suspension points are implemented by passing a struct to the compiler
|
||||
* that implements `await_sus`pend.
|
||||
* `await_suspend` can either say "cancel suspension", in which case execution resumes,
|
||||
* "suspend", in which case control is passed back to the caller of `coroutine_handle.resume()`
|
||||
* or the place where the coroutine function is initially executed in the case of the initial
|
||||
* suspension, or `await_suspend` can specify another coroutine to jump to, which is
|
||||
* how tail calls are implemented.
|
||||
*
|
||||
* @note Resources:
|
||||
* - https://lewissbaker.github.io/
|
||||
* - https://www.chiark.greenend.org.uk/~sgtatham/quasiblog/coroutines-c++20/
|
||||
* - https://www.scs.stanford.edu/~dm/blog/c++-coroutines.html
|
||||
*
|
||||
* @todo Allocate explicitly on stack since HALO thing doesn't really work,
|
||||
* specifically, there's no way to uphold the requirements when trying to do
|
||||
* tail-calls without using a trampoline AFAICT.
|
||||
*
|
||||
* @todo Support returning data natively
|
||||
*/
|
||||
struct [[nodiscard]] Co {
|
||||
/**
|
||||
* The underlying handle.
|
||||
*/
|
||||
handle_type handle;
|
||||
|
||||
explicit Co(handle_type handle) : handle(handle) {};
|
||||
void operator=(Co&&);
|
||||
Co(Co&& rhs);
|
||||
~Co();
|
||||
|
||||
bool await_ready() { return false; };
|
||||
/**
|
||||
* When we `co_await` another @ref Co-returning coroutine,
|
||||
* we tell the caller of `caller_coroutine.resume()` to switch to our coroutine (@ref handle).
|
||||
* To make sure we return to the original coroutine, we set it as the continuation of our
|
||||
* coroutine. In @ref promise_type::final_awaiter we check if it's set and if so we return to it.
|
||||
*
|
||||
* To explain in more understandable terms:
|
||||
* When we `co_await Co_returning_function()`, this function is called on the resultant @ref Co of
|
||||
* the _called_ function, and C++ automatically passes the caller in.
|
||||
*
|
||||
* `goal` field of @ref promise_type is also set here by copying it from the caller.
|
||||
*/
|
||||
std::coroutine_handle<> await_suspend(handle_type handle);
|
||||
void await_resume() {};
|
||||
};
|
||||
|
||||
/**
|
||||
* Used on initial suspend, does the same as @ref std::suspend_always,
|
||||
* but asserts that everything has been set correctly.
|
||||
*/
|
||||
struct InitialSuspend {
|
||||
/**
|
||||
* Handle of coroutine that does the
|
||||
* initial suspend
|
||||
*/
|
||||
handle_type handle;
|
||||
|
||||
bool await_ready() { return false; };
|
||||
void await_suspend(handle_type handle_) {
|
||||
handle = handle_;
|
||||
}
|
||||
void await_resume() {
|
||||
assert(handle);
|
||||
assert(handle.promise().goal); // goal must be set
|
||||
assert(handle.promise().goal->top_co); // top_co of goal must be set
|
||||
assert(handle.promise().goal->top_co->handle == handle); // top_co of goal must be us
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Promise type for coroutines defined using @ref Co.
|
||||
* Attached to coroutine handle.
|
||||
*/
|
||||
struct promise_type {
|
||||
/**
|
||||
* Either this is who called us, or it is who we will tail-call.
|
||||
* It is what we "jump" to once we are done.
|
||||
*/
|
||||
std::optional<Co> continuation;
|
||||
|
||||
/**
|
||||
* The goal that we're a part of.
|
||||
* Set either in @ref Co::await_suspend or in constructor of @ref Goal.
|
||||
*/
|
||||
Goal* goal = nullptr;
|
||||
|
||||
/**
|
||||
* Is set to false when destructed to ensure we don't use a
|
||||
* destructed coroutine by accident
|
||||
*/
|
||||
bool alive = true;
|
||||
|
||||
/**
|
||||
* The awaiter used by @ref final_suspend.
|
||||
*/
|
||||
struct final_awaiter {
|
||||
bool await_ready() noexcept { return false; };
|
||||
/**
|
||||
* Here we execute our continuation, by passing it back to the caller.
|
||||
* C++ compiler will create code that takes that and executes it promptly.
|
||||
* `h` is the handle for the coroutine that is finishing execution,
|
||||
* thus it must be destroyed.
|
||||
*/
|
||||
std::coroutine_handle<> await_suspend(handle_type h) noexcept;
|
||||
void await_resume() noexcept { assert(false); };
|
||||
};
|
||||
|
||||
/**
|
||||
* Called by compiler generated code to construct the @ref Co
|
||||
* that is returned from a @ref Co-returning coroutine.
|
||||
*/
|
||||
Co get_return_object();
|
||||
|
||||
/**
|
||||
* Called by compiler generated code before body of coroutine.
|
||||
* We use this opportunity to set the @ref goal field
|
||||
* and `top_co` field of @ref Goal.
|
||||
*/
|
||||
InitialSuspend initial_suspend() { return {}; };
|
||||
|
||||
/**
|
||||
* Called on `co_return`. Creates @ref final_awaiter which
|
||||
* either jumps to continuation or suspends goal.
|
||||
*/
|
||||
final_awaiter final_suspend() noexcept { return {}; };
|
||||
|
||||
/**
|
||||
* Does nothing, but provides an opportunity for
|
||||
* @ref final_suspend to happen.
|
||||
*/
|
||||
void return_value(Return) {}
|
||||
|
||||
/**
|
||||
* Does nothing, but provides an opportunity for
|
||||
* @ref final_suspend to happen.
|
||||
*/
|
||||
void return_value(Done) {}
|
||||
|
||||
/**
|
||||
* When "returning" another coroutine, what happens is that
|
||||
* we set it as our own continuation, thus once the final suspend
|
||||
* happens, we transfer control to it.
|
||||
* The original continuation we had is set as the continuation
|
||||
* of the coroutine passed in.
|
||||
* @ref final_suspend is called after this, and @ref final_awaiter will
|
||||
* pass control off to @ref continuation.
|
||||
*
|
||||
* If we already have a continuation, that continuation is set as
|
||||
* the continuation of the new continuation. Thus, the continuation
|
||||
* passed to @ref return_value must not have a continuation set.
|
||||
*/
|
||||
void return_value(Co&&);
|
||||
|
||||
/**
|
||||
* If an exception is thrown inside a coroutine,
|
||||
* we re-throw it in the context of the "resumer" of the continuation.
|
||||
*/
|
||||
void unhandled_exception() { throw; };
|
||||
|
||||
/**
|
||||
* Allows awaiting a @ref Co.
|
||||
*/
|
||||
Co&& await_transform(Co&& co) { return static_cast<Co&&>(co); }
|
||||
|
||||
/**
|
||||
* Allows awaiting a @ref Suspend.
|
||||
* Always suspends.
|
||||
*/
|
||||
std::suspend_always await_transform(Suspend) { return {}; };
|
||||
};
|
||||
|
||||
/**
|
||||
* The coroutine being currently executed.
|
||||
* MUST be updated when switching the coroutine being executed.
|
||||
* This is used both for memory management and to resume the last
|
||||
* coroutine executed.
|
||||
* Destroying this should destroy all coroutines created for this goal.
|
||||
*/
|
||||
std::optional<Co> top_co;
|
||||
|
||||
/**
|
||||
* The entry point for the goal
|
||||
*/
|
||||
virtual Co init() = 0;
|
||||
|
||||
/**
|
||||
* Wrapper around @ref init since virtual functions
|
||||
* can't be used in constructors.
|
||||
*/
|
||||
inline Co init_wrapper();
|
||||
|
||||
/**
|
||||
* Signals that the goal is done.
|
||||
* `co_return` the result. If you're not inside a coroutine, you can ignore
|
||||
* the return value safely.
|
||||
*/
|
||||
Done amDone(ExitCode result, std::optional<Error> ex = {});
|
||||
|
||||
virtual void cleanup() { }
|
||||
|
||||
/**
|
||||
* Project a `BuildResult` with just the information that pertains
|
||||
* to the given request.
|
||||
*
|
||||
* In general, goals may be aliased between multiple requests, and
|
||||
* the stored `BuildResult` has information for the union of all
|
||||
* requests. We don't want to leak what the other request are for
|
||||
* sake of both privacy and determinism, and this "safe accessor"
|
||||
* ensures we don't.
|
||||
*/
|
||||
BuildResult getBuildResult(const DerivedPath &) const;
|
||||
|
||||
/**
|
||||
* Exception containing an error message, if any.
|
||||
*/
|
||||
std::optional<Error> ex;
|
||||
|
||||
Goal(Worker & worker, DerivedPath path)
|
||||
: worker(worker), top_co(init_wrapper())
|
||||
{
|
||||
// top_co shouldn't have a goal already, should be nullptr.
|
||||
assert(!top_co->handle.promise().goal);
|
||||
// we set it such that top_co can pass it down to its subcoroutines.
|
||||
top_co->handle.promise().goal = this;
|
||||
}
|
||||
|
||||
virtual ~Goal()
|
||||
{
|
||||
trace("goal destroyed");
|
||||
}
|
||||
|
||||
void work();
|
||||
|
||||
void addWaitee(GoalPtr waitee);
|
||||
|
||||
virtual void waiteeDone(GoalPtr waitee, ExitCode result);
|
||||
|
||||
virtual void handleChildOutput(Descriptor fd, std::string_view data)
|
||||
{
|
||||
unreachable();
|
||||
}
|
||||
|
||||
virtual void handleEOF(Descriptor fd)
|
||||
{
|
||||
unreachable();
|
||||
}
|
||||
|
||||
void trace(std::string_view s);
|
||||
|
||||
std::string getName() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback in case of a timeout. It should wake up its waiters,
|
||||
* get rid of any running child processes that are being monitored
|
||||
* by the worker (important!), etc.
|
||||
*/
|
||||
virtual void timedOut(Error && ex) = 0;
|
||||
|
||||
virtual std::string key() = 0;
|
||||
|
||||
/**
|
||||
* @brief Hint for the scheduler, which concurrency limit applies.
|
||||
* @see JobCategory
|
||||
*/
|
||||
virtual JobCategory jobCategory() const = 0;
|
||||
};
|
||||
|
||||
void addToWeakGoals(WeakGoals & goals, GoalPtr p);
|
||||
|
||||
}
|
||||
|
||||
template<typename... ArgTypes>
|
||||
struct std::coroutine_traits<nix::Goal::Co, ArgTypes...> {
|
||||
using promise_type = nix::Goal::promise_type;
|
||||
};
|
||||
|
||||
nix::Goal::Co nix::Goal::init_wrapper() {
|
||||
co_return init();
|
||||
}
|
||||
302
subprojects/libstore/build/substitution-goal.cc
Normal file
302
subprojects/libstore/build/substitution-goal.cc
Normal file
|
|
@ -0,0 +1,302 @@
|
|||
#include "worker.hh"
|
||||
#include "substitution-goal.hh"
|
||||
#include "nar-info.hh"
|
||||
#include "finally.hh"
|
||||
#include "signals.hh"
|
||||
#include <coroutine>
|
||||
|
||||
namespace nix {
|
||||
|
||||
PathSubstitutionGoal::PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
: Goal(worker, DerivedPath::Opaque { storePath })
|
||||
, storePath(storePath)
|
||||
, repair(repair)
|
||||
, ca(ca)
|
||||
{
|
||||
name = fmt("substitution of '%s'", worker.store.printStorePath(this->storePath));
|
||||
trace("created");
|
||||
maintainExpectedSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.expectedSubstitutions);
|
||||
}
|
||||
|
||||
|
||||
PathSubstitutionGoal::~PathSubstitutionGoal()
|
||||
{
|
||||
cleanup();
|
||||
}
|
||||
|
||||
|
||||
Goal::Done PathSubstitutionGoal::done(
|
||||
ExitCode result,
|
||||
BuildResult::Status status,
|
||||
std::optional<std::string> errorMsg)
|
||||
{
|
||||
buildResult.status = status;
|
||||
if (errorMsg) {
|
||||
debug(*errorMsg);
|
||||
buildResult.errorMsg = *errorMsg;
|
||||
}
|
||||
return amDone(result);
|
||||
}
|
||||
|
||||
|
||||
Goal::Co PathSubstitutionGoal::init()
|
||||
{
|
||||
trace("init");
|
||||
|
||||
worker.store.addTempRoot(storePath);
|
||||
|
||||
/* If the path already exists we're done. */
|
||||
if (!repair && worker.store.isValidPath(storePath)) {
|
||||
co_return done(ecSuccess, BuildResult::AlreadyValid);
|
||||
}
|
||||
|
||||
if (settings.readOnlyMode)
|
||||
throw Error("cannot substitute path '%s' - no write access to the Nix store", worker.store.printStorePath(storePath));
|
||||
|
||||
auto subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
|
||||
|
||||
bool substituterFailed = false;
|
||||
|
||||
for (auto sub : subs) {
|
||||
trace("trying next substituter");
|
||||
|
||||
cleanup();
|
||||
|
||||
/* The path the substituter refers to the path as. This will be
|
||||
* different when the stores have different names. */
|
||||
std::optional<StorePath> subPath;
|
||||
|
||||
/* Path info returned by the substituter's query info operation. */
|
||||
std::shared_ptr<const ValidPathInfo> info;
|
||||
|
||||
if (ca) {
|
||||
subPath = sub->makeFixedOutputPathFromCA(
|
||||
std::string { storePath.name() },
|
||||
ContentAddressWithReferences::withoutRefs(*ca));
|
||||
if (sub->storeDir == worker.store.storeDir)
|
||||
assert(subPath == storePath);
|
||||
} else if (sub->storeDir != worker.store.storeDir) {
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
// FIXME: make async
|
||||
info = sub->queryPathInfo(subPath ? *subPath : storePath);
|
||||
} catch (InvalidPath &) {
|
||||
continue;
|
||||
} catch (SubstituterDisabled & e) {
|
||||
if (settings.tryFallback) continue;
|
||||
else throw e;
|
||||
} catch (Error & e) {
|
||||
if (settings.tryFallback) {
|
||||
logError(e.info());
|
||||
continue;
|
||||
} else throw e;
|
||||
}
|
||||
|
||||
if (info->path != storePath) {
|
||||
if (info->isContentAddressed(*sub) && info->references.empty()) {
|
||||
auto info2 = std::make_shared<ValidPathInfo>(*info);
|
||||
info2->path = storePath;
|
||||
info = info2;
|
||||
} else {
|
||||
printError("asked '%s' for '%s' but got '%s'",
|
||||
sub->getUri(), worker.store.printStorePath(storePath), sub->printStorePath(info->path));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the total expected download size. */
|
||||
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(info);
|
||||
|
||||
maintainExpectedNar = std::make_unique<MaintainCount<uint64_t>>(worker.expectedNarSize, info->narSize);
|
||||
|
||||
maintainExpectedDownload =
|
||||
narInfo && narInfo->fileSize
|
||||
? std::make_unique<MaintainCount<uint64_t>>(worker.expectedDownloadSize, narInfo->fileSize)
|
||||
: nullptr;
|
||||
|
||||
worker.updateProgress();
|
||||
|
||||
/* Bail out early if this substituter lacks a valid
|
||||
signature. LocalStore::addToStore() also checks for this, but
|
||||
only after we've downloaded the path. */
|
||||
if (!sub->isTrusted && worker.store.pathInfoIsUntrusted(*info))
|
||||
{
|
||||
warn("ignoring substitute for '%s' from '%s', as it's not signed by any of the keys in 'trusted-public-keys'",
|
||||
worker.store.printStorePath(storePath), sub->getUri());
|
||||
continue;
|
||||
}
|
||||
|
||||
/* To maintain the closure invariant, we first have to realise the
|
||||
paths referenced by this one. */
|
||||
for (auto & i : info->references)
|
||||
if (i != storePath) /* ignore self-references */
|
||||
addWaitee(worker.makePathSubstitutionGoal(i));
|
||||
|
||||
if (!waitees.empty()) co_await Suspend{};
|
||||
|
||||
// FIXME: consider returning boolean instead of passing in reference
|
||||
bool out = false; // is mutated by tryToRun
|
||||
co_await tryToRun(subPath ? *subPath : storePath, sub, info, out);
|
||||
substituterFailed = substituterFailed || out;
|
||||
}
|
||||
|
||||
/* None left. Terminate this goal and let someone else deal
|
||||
with it. */
|
||||
|
||||
if (substituterFailed) {
|
||||
worker.failedSubstitutions++;
|
||||
worker.updateProgress();
|
||||
}
|
||||
|
||||
/* Hack: don't indicate failure if there were no substituters.
|
||||
In that case the calling derivation should just do a
|
||||
build. */
|
||||
co_return done(
|
||||
substituterFailed ? ecFailed : ecNoSubstituters,
|
||||
BuildResult::NoSubstituters,
|
||||
fmt("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath)));
|
||||
}
|
||||
|
||||
|
||||
Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed)
|
||||
{
|
||||
trace("all references realised");
|
||||
|
||||
if (nrFailed > 0) {
|
||||
co_return done(
|
||||
nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed,
|
||||
BuildResult::DependencyFailed,
|
||||
fmt("some references of path '%s' could not be realised", worker.store.printStorePath(storePath)));
|
||||
}
|
||||
|
||||
for (auto & i : info->references)
|
||||
if (i != storePath) /* ignore self-references */
|
||||
assert(worker.store.isValidPath(i));
|
||||
|
||||
worker.wakeUp(shared_from_this());
|
||||
co_await Suspend{};
|
||||
|
||||
trace("trying to run");
|
||||
|
||||
/* Make sure that we are allowed to start a substitution. Note that even
|
||||
if maxSubstitutionJobs == 0, we still allow a substituter to run. This
|
||||
prevents infinite waiting. */
|
||||
while (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) {
|
||||
worker.waitForBuildSlot(shared_from_this());
|
||||
co_await Suspend{};
|
||||
}
|
||||
|
||||
auto maintainRunningSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.runningSubstitutions);
|
||||
worker.updateProgress();
|
||||
|
||||
#ifndef _WIN32
|
||||
outPipe.create();
|
||||
#else
|
||||
outPipe.createAsyncPipe(worker.ioport.get());
|
||||
#endif
|
||||
|
||||
auto promise = std::promise<void>();
|
||||
|
||||
thr = std::thread([this, &promise, &subPath, &sub]() {
|
||||
try {
|
||||
ReceiveInterrupts receiveInterrupts;
|
||||
|
||||
/* Wake up the worker loop when we're done. */
|
||||
Finally updateStats([this]() { outPipe.writeSide.close(); });
|
||||
|
||||
Activity act(*logger, actSubstitute, Logger::Fields{worker.store.printStorePath(storePath), sub->getUri()});
|
||||
PushActivity pact(act.id);
|
||||
|
||||
copyStorePath(*sub, worker.store,
|
||||
subPath, repair, sub->isTrusted ? NoCheckSigs : CheckSigs);
|
||||
|
||||
promise.set_value();
|
||||
} catch (...) {
|
||||
promise.set_exception(std::current_exception());
|
||||
}
|
||||
});
|
||||
|
||||
worker.childStarted(shared_from_this(), {
|
||||
#ifndef _WIN32
|
||||
outPipe.readSide.get()
|
||||
#else
|
||||
&outPipe
|
||||
#endif
|
||||
}, true, false);
|
||||
|
||||
co_await Suspend{};
|
||||
|
||||
trace("substitute finished");
|
||||
|
||||
thr.join();
|
||||
worker.childTerminated(this);
|
||||
|
||||
try {
|
||||
promise.get_future().get();
|
||||
} catch (std::exception & e) {
|
||||
printError(e.what());
|
||||
|
||||
/* Cause the parent build to fail unless --fallback is given,
|
||||
or the substitute has disappeared. The latter case behaves
|
||||
the same as the substitute never having existed in the
|
||||
first place. */
|
||||
try {
|
||||
throw;
|
||||
} catch (SubstituteGone &) {
|
||||
} catch (...) {
|
||||
substituterFailed = true;
|
||||
}
|
||||
|
||||
co_return Return{};
|
||||
}
|
||||
|
||||
worker.markContentsGood(storePath);
|
||||
|
||||
printMsg(lvlChatty, "substitution of path '%s' succeeded", worker.store.printStorePath(storePath));
|
||||
|
||||
maintainRunningSubstitutions.reset();
|
||||
|
||||
maintainExpectedSubstitutions.reset();
|
||||
worker.doneSubstitutions++;
|
||||
|
||||
if (maintainExpectedDownload) {
|
||||
auto fileSize = maintainExpectedDownload->delta;
|
||||
maintainExpectedDownload.reset();
|
||||
worker.doneDownloadSize += fileSize;
|
||||
}
|
||||
|
||||
assert(maintainExpectedNar);
|
||||
worker.doneNarSize += maintainExpectedNar->delta;
|
||||
maintainExpectedNar.reset();
|
||||
|
||||
worker.updateProgress();
|
||||
|
||||
co_return done(ecSuccess, BuildResult::Substituted);
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::handleEOF(Descriptor fd)
|
||||
{
|
||||
worker.wakeUp(shared_from_this());
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::cleanup()
|
||||
{
|
||||
try {
|
||||
if (thr.joinable()) {
|
||||
// FIXME: signal worker thread to quit.
|
||||
thr.join();
|
||||
worker.childTerminated(this);
|
||||
}
|
||||
|
||||
outPipe.close();
|
||||
} catch (...) {
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
86
subprojects/libstore/build/substitution-goal.hh
Normal file
86
subprojects/libstore/build/substitution-goal.hh
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "worker.hh"
|
||||
#include "store-api.hh"
|
||||
#include "goal.hh"
|
||||
#include "muxable-pipe.hh"
|
||||
#include <coroutine>
|
||||
#include <future>
|
||||
#include <source_location>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct PathSubstitutionGoal : public Goal
|
||||
{
|
||||
/**
|
||||
* The store path that should be realised through a substitute.
|
||||
*/
|
||||
StorePath storePath;
|
||||
|
||||
/**
|
||||
* Whether to try to repair a valid path.
|
||||
*/
|
||||
RepairFlag repair;
|
||||
|
||||
/**
|
||||
* Pipe for the substituter's standard output.
|
||||
*/
|
||||
MuxablePipe outPipe;
|
||||
|
||||
/**
|
||||
* The substituter thread.
|
||||
*/
|
||||
std::thread thr;
|
||||
|
||||
std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions,
|
||||
maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload;
|
||||
|
||||
/**
|
||||
* Content address for recomputing store path
|
||||
*/
|
||||
std::optional<ContentAddress> ca;
|
||||
|
||||
Done done(
|
||||
ExitCode result,
|
||||
BuildResult::Status status,
|
||||
std::optional<std::string> errorMsg = {});
|
||||
|
||||
public:
|
||||
PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
~PathSubstitutionGoal();
|
||||
|
||||
void timedOut(Error && ex) override { unreachable(); };
|
||||
|
||||
/**
|
||||
* We prepend "a$" to the key name to ensure substitution goals
|
||||
* happen before derivation goals.
|
||||
*/
|
||||
std::string key() override
|
||||
{
|
||||
return "a$" + std::string(storePath.name()) + "$" + worker.store.printStorePath(storePath);
|
||||
}
|
||||
|
||||
/**
|
||||
* The states.
|
||||
*/
|
||||
Co init() override;
|
||||
Co gotInfo();
|
||||
Co tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed);
|
||||
Co finished();
|
||||
|
||||
/**
|
||||
* Callback used by the worker to write to the log.
|
||||
*/
|
||||
void handleChildOutput(Descriptor fd, std::string_view data) override {};
|
||||
void handleEOF(Descriptor fd) override;
|
||||
|
||||
/* Called by destructor, can't be overridden */
|
||||
void cleanup() override final;
|
||||
|
||||
JobCategory jobCategory() const override {
|
||||
return JobCategory::Substitution;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
555
subprojects/libstore/build/worker.cc
Normal file
555
subprojects/libstore/build/worker.cc
Normal file
|
|
@ -0,0 +1,555 @@
|
|||
#include "local-store.hh"
|
||||
#include "machines.hh"
|
||||
#include "worker.hh"
|
||||
#include "substitution-goal.hh"
|
||||
#include "drv-output-substitution-goal.hh"
|
||||
#include "derivation-goal.hh"
|
||||
#ifndef _WIN32 // TODO Enable building on Windows
|
||||
# include "local-derivation-goal.hh"
|
||||
# include "hook-instance.hh"
|
||||
#endif
|
||||
#include "signals.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
Worker::Worker(Store & store, Store & evalStore)
|
||||
: act(*logger, actRealise)
|
||||
, actDerivations(*logger, actBuilds)
|
||||
, actSubstitutions(*logger, actCopyPaths)
|
||||
, store(store)
|
||||
, evalStore(evalStore)
|
||||
{
|
||||
nrLocalBuilds = 0;
|
||||
nrSubstitutions = 0;
|
||||
lastWokenUp = steady_time_point::min();
|
||||
permanentFailure = false;
|
||||
timedOut = false;
|
||||
hashMismatch = false;
|
||||
checkMismatch = false;
|
||||
}
|
||||
|
||||
|
||||
Worker::~Worker()
|
||||
{
|
||||
/* Explicitly get rid of all strong pointers now. After this all
|
||||
goals that refer to this worker should be gone. (Otherwise we
|
||||
are in trouble, since goals may call childTerminated() etc. in
|
||||
their destructors). */
|
||||
topGoals.clear();
|
||||
|
||||
assert(expectedSubstitutions == 0);
|
||||
assert(expectedDownloadSize == 0);
|
||||
assert(expectedNarSize == 0);
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoalCommon(
|
||||
const StorePath & drvPath,
|
||||
const OutputsSpec & wantedOutputs,
|
||||
std::function<std::shared_ptr<DerivationGoal>()> mkDrvGoal)
|
||||
{
|
||||
std::weak_ptr<DerivationGoal> & goal_weak = derivationGoals[drvPath];
|
||||
std::shared_ptr<DerivationGoal> goal = goal_weak.lock();
|
||||
if (!goal) {
|
||||
goal = mkDrvGoal();
|
||||
goal_weak = goal;
|
||||
wakeUp(goal);
|
||||
} else {
|
||||
goal->addWantedOutputs(wantedOutputs);
|
||||
}
|
||||
return goal;
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(const StorePath & drvPath,
|
||||
const OutputsSpec & wantedOutputs, BuildMode buildMode)
|
||||
{
|
||||
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr<DerivationGoal> {
|
||||
return
|
||||
#ifndef _WIN32 // TODO Enable building on Windows
|
||||
dynamic_cast<LocalStore *>(&store)
|
||||
? std::make_shared<LocalDerivationGoal>(drvPath, wantedOutputs, *this, buildMode)
|
||||
:
|
||||
#endif
|
||||
std::make_shared</* */DerivationGoal>(drvPath, wantedOutputs, *this, buildMode);
|
||||
});
|
||||
}
|
||||
|
||||
std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath & drvPath,
|
||||
const BasicDerivation & drv, const OutputsSpec & wantedOutputs, BuildMode buildMode)
|
||||
{
|
||||
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr<DerivationGoal> {
|
||||
return
|
||||
#ifndef _WIN32 // TODO Enable building on Windows
|
||||
dynamic_cast<LocalStore *>(&store)
|
||||
? std::make_shared<LocalDerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode)
|
||||
:
|
||||
#endif
|
||||
std::make_shared</* */DerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<PathSubstitutionGoal> Worker::makePathSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
{
|
||||
std::weak_ptr<PathSubstitutionGoal> & goal_weak = substitutionGoals[path];
|
||||
auto goal = goal_weak.lock(); // FIXME
|
||||
if (!goal) {
|
||||
goal = std::make_shared<PathSubstitutionGoal>(path, *this, repair, ca);
|
||||
goal_weak = goal;
|
||||
wakeUp(goal);
|
||||
}
|
||||
return goal;
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DrvOutputSubstitutionGoal> Worker::makeDrvOutputSubstitutionGoal(const DrvOutput& id, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
{
|
||||
std::weak_ptr<DrvOutputSubstitutionGoal> & goal_weak = drvOutputSubstitutionGoals[id];
|
||||
auto goal = goal_weak.lock(); // FIXME
|
||||
if (!goal) {
|
||||
goal = std::make_shared<DrvOutputSubstitutionGoal>(id, *this, repair, ca);
|
||||
goal_weak = goal;
|
||||
wakeUp(goal);
|
||||
}
|
||||
return goal;
|
||||
}
|
||||
|
||||
|
||||
GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode)
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const DerivedPath::Built & bfd) -> GoalPtr {
|
||||
if (auto bop = std::get_if<DerivedPath::Opaque>(&*bfd.drvPath))
|
||||
return makeDerivationGoal(bop->path, bfd.outputs, buildMode);
|
||||
else
|
||||
throw UnimplementedError("Building dynamic derivations in one shot is not yet implemented.");
|
||||
},
|
||||
[&](const DerivedPath::Opaque & bo) -> GoalPtr {
|
||||
return makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair);
|
||||
},
|
||||
}, req.raw());
|
||||
}
|
||||
|
||||
|
||||
template<typename K, typename G>
|
||||
static void removeGoal(std::shared_ptr<G> goal, std::map<K, std::weak_ptr<G>> & goalMap)
|
||||
{
|
||||
/* !!! inefficient */
|
||||
for (auto i = goalMap.begin();
|
||||
i != goalMap.end(); )
|
||||
if (i->second.lock() == goal) {
|
||||
auto j = i; ++j;
|
||||
goalMap.erase(i);
|
||||
i = j;
|
||||
}
|
||||
else ++i;
|
||||
}
|
||||
|
||||
|
||||
void Worker::removeGoal(GoalPtr goal)
|
||||
{
|
||||
if (auto drvGoal = std::dynamic_pointer_cast<DerivationGoal>(goal))
|
||||
nix::removeGoal(drvGoal, derivationGoals);
|
||||
else
|
||||
if (auto subGoal = std::dynamic_pointer_cast<PathSubstitutionGoal>(goal))
|
||||
nix::removeGoal(subGoal, substitutionGoals);
|
||||
else if (auto subGoal = std::dynamic_pointer_cast<DrvOutputSubstitutionGoal>(goal))
|
||||
nix::removeGoal(subGoal, drvOutputSubstitutionGoals);
|
||||
else
|
||||
assert(false);
|
||||
|
||||
if (topGoals.find(goal) != topGoals.end()) {
|
||||
topGoals.erase(goal);
|
||||
/* If a top-level goal failed, then kill all other goals
|
||||
(unless keepGoing was set). */
|
||||
if (goal->exitCode == Goal::ecFailed && !settings.keepGoing)
|
||||
topGoals.clear();
|
||||
}
|
||||
|
||||
/* Wake up goals waiting for any goal to finish. */
|
||||
for (auto & i : waitingForAnyGoal) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
}
|
||||
|
||||
waitingForAnyGoal.clear();
|
||||
}
|
||||
|
||||
|
||||
void Worker::wakeUp(GoalPtr goal)
|
||||
{
|
||||
goal->trace("woken up");
|
||||
addToWeakGoals(awake, goal);
|
||||
}
|
||||
|
||||
|
||||
size_t Worker::getNrLocalBuilds()
|
||||
{
|
||||
return nrLocalBuilds;
|
||||
}
|
||||
|
||||
|
||||
size_t Worker::getNrSubstitutions()
|
||||
{
|
||||
return nrSubstitutions;
|
||||
}
|
||||
|
||||
|
||||
void Worker::childStarted(GoalPtr goal, const std::set<MuxablePipePollState::CommChannel> & channels,
|
||||
bool inBuildSlot, bool respectTimeouts)
|
||||
{
|
||||
Child child;
|
||||
child.goal = goal;
|
||||
child.goal2 = goal.get();
|
||||
child.channels = channels;
|
||||
child.timeStarted = child.lastOutput = steady_time_point::clock::now();
|
||||
child.inBuildSlot = inBuildSlot;
|
||||
child.respectTimeouts = respectTimeouts;
|
||||
children.emplace_back(child);
|
||||
if (inBuildSlot) {
|
||||
switch (goal->jobCategory()) {
|
||||
case JobCategory::Substitution:
|
||||
nrSubstitutions++;
|
||||
break;
|
||||
case JobCategory::Build:
|
||||
nrLocalBuilds++;
|
||||
break;
|
||||
default:
|
||||
unreachable();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Worker::childTerminated(Goal * goal, bool wakeSleepers)
|
||||
{
|
||||
auto i = std::find_if(children.begin(), children.end(),
|
||||
[&](const Child & child) { return child.goal2 == goal; });
|
||||
if (i == children.end()) return;
|
||||
|
||||
if (i->inBuildSlot) {
|
||||
switch (goal->jobCategory()) {
|
||||
case JobCategory::Substitution:
|
||||
assert(nrSubstitutions > 0);
|
||||
nrSubstitutions--;
|
||||
break;
|
||||
case JobCategory::Build:
|
||||
assert(nrLocalBuilds > 0);
|
||||
nrLocalBuilds--;
|
||||
break;
|
||||
default:
|
||||
unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
children.erase(i);
|
||||
|
||||
if (wakeSleepers) {
|
||||
|
||||
/* Wake up goals waiting for a build slot. */
|
||||
for (auto & j : wantingToBuild) {
|
||||
GoalPtr goal = j.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
}
|
||||
|
||||
wantingToBuild.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Worker::waitForBuildSlot(GoalPtr goal)
|
||||
{
|
||||
goal->trace("wait for build slot");
|
||||
bool isSubstitutionGoal = goal->jobCategory() == JobCategory::Substitution;
|
||||
if ((!isSubstitutionGoal && getNrLocalBuilds() < settings.maxBuildJobs) ||
|
||||
(isSubstitutionGoal && getNrSubstitutions() < settings.maxSubstitutionJobs))
|
||||
wakeUp(goal); /* we can do it right away */
|
||||
else
|
||||
addToWeakGoals(wantingToBuild, goal);
|
||||
}
|
||||
|
||||
|
||||
void Worker::waitForAnyGoal(GoalPtr goal)
|
||||
{
|
||||
debug("wait for any goal");
|
||||
addToWeakGoals(waitingForAnyGoal, goal);
|
||||
}
|
||||
|
||||
|
||||
void Worker::waitForAWhile(GoalPtr goal)
|
||||
{
|
||||
debug("wait for a while");
|
||||
addToWeakGoals(waitingForAWhile, goal);
|
||||
}
|
||||
|
||||
|
||||
void Worker::run(const Goals & _topGoals)
|
||||
{
|
||||
std::vector<nix::DerivedPath> topPaths;
|
||||
|
||||
for (auto & i : _topGoals) {
|
||||
topGoals.insert(i);
|
||||
if (auto goal = dynamic_cast<DerivationGoal *>(i.get())) {
|
||||
topPaths.push_back(DerivedPath::Built {
|
||||
.drvPath = makeConstantStorePathRef(goal->drvPath),
|
||||
.outputs = goal->wantedOutputs,
|
||||
});
|
||||
} else
|
||||
if (auto goal = dynamic_cast<PathSubstitutionGoal *>(i.get())) {
|
||||
topPaths.push_back(DerivedPath::Opaque{goal->storePath});
|
||||
}
|
||||
}
|
||||
|
||||
/* Call queryMissing() to efficiently query substitutes. */
|
||||
StorePathSet willBuild, willSubstitute, unknown;
|
||||
uint64_t downloadSize, narSize;
|
||||
store.queryMissing(topPaths, willBuild, willSubstitute, unknown, downloadSize, narSize);
|
||||
|
||||
debug("entered goal loop");
|
||||
|
||||
while (1) {
|
||||
|
||||
checkInterrupt();
|
||||
|
||||
// TODO GC interface?
|
||||
if (auto localStore = dynamic_cast<LocalStore *>(&store))
|
||||
localStore->autoGC(false);
|
||||
|
||||
/* Call every wake goal (in the ordering established by
|
||||
CompareGoalPtrs). */
|
||||
while (!awake.empty() && !topGoals.empty()) {
|
||||
Goals awake2;
|
||||
for (auto & i : awake) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) awake2.insert(goal);
|
||||
}
|
||||
awake.clear();
|
||||
for (auto & goal : awake2) {
|
||||
checkInterrupt();
|
||||
goal->work();
|
||||
if (topGoals.empty()) break; // stuff may have been cancelled
|
||||
}
|
||||
}
|
||||
|
||||
if (topGoals.empty()) break;
|
||||
|
||||
/* Wait for input. */
|
||||
if (!children.empty() || !waitingForAWhile.empty())
|
||||
waitForInput();
|
||||
else if (awake.empty() && 0U == settings.maxBuildJobs) {
|
||||
if (getMachines().empty())
|
||||
throw Error(
|
||||
R"(
|
||||
Unable to start any build;
|
||||
either increase '--max-jobs' or enable remote builds.
|
||||
|
||||
For more information run 'man nix.conf' and search for '/machines'.
|
||||
)"
|
||||
);
|
||||
else
|
||||
throw Error(
|
||||
R"(
|
||||
Unable to start any build;
|
||||
remote machines may not have all required system features.
|
||||
|
||||
For more information run 'man nix.conf' and search for '/machines'.
|
||||
)"
|
||||
);
|
||||
|
||||
} else assert(!awake.empty());
|
||||
}
|
||||
|
||||
/* If --keep-going is not set, it's possible that the main goal
|
||||
exited while some of its subgoals were still active. But if
|
||||
--keep-going *is* set, then they must all be finished now. */
|
||||
assert(!settings.keepGoing || awake.empty());
|
||||
assert(!settings.keepGoing || wantingToBuild.empty());
|
||||
assert(!settings.keepGoing || children.empty());
|
||||
}
|
||||
|
||||
void Worker::waitForInput()
|
||||
{
|
||||
printMsg(lvlVomit, "waiting for children");
|
||||
|
||||
/* Process output from the file descriptors attached to the
|
||||
children, namely log output and output path creation commands.
|
||||
We also use this to detect child termination: if we get EOF on
|
||||
the logger pipe of a build, we assume that the builder has
|
||||
terminated. */
|
||||
|
||||
bool useTimeout = false;
|
||||
long timeout = 0;
|
||||
auto before = steady_time_point::clock::now();
|
||||
|
||||
/* If we're monitoring for silence on stdout/stderr, or if there
|
||||
is a build timeout, then wait for input until the first
|
||||
deadline for any child. */
|
||||
auto nearest = steady_time_point::max(); // nearest deadline
|
||||
if (settings.minFree.get() != 0)
|
||||
// Periodicallty wake up to see if we need to run the garbage collector.
|
||||
nearest = before + std::chrono::seconds(10);
|
||||
for (auto & i : children) {
|
||||
if (!i.respectTimeouts) continue;
|
||||
if (0 != settings.maxSilentTime)
|
||||
nearest = std::min(nearest, i.lastOutput + std::chrono::seconds(settings.maxSilentTime));
|
||||
if (0 != settings.buildTimeout)
|
||||
nearest = std::min(nearest, i.timeStarted + std::chrono::seconds(settings.buildTimeout));
|
||||
}
|
||||
if (nearest != steady_time_point::max()) {
|
||||
timeout = std::max(1L, (long) std::chrono::duration_cast<std::chrono::seconds>(nearest - before).count());
|
||||
useTimeout = true;
|
||||
}
|
||||
|
||||
/* If we are polling goals that are waiting for a lock, then wake
|
||||
up after a few seconds at most. */
|
||||
if (!waitingForAWhile.empty()) {
|
||||
useTimeout = true;
|
||||
if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before;
|
||||
timeout = std::max(1L,
|
||||
(long) std::chrono::duration_cast<std::chrono::seconds>(
|
||||
lastWokenUp + std::chrono::seconds(settings.pollInterval) - before).count());
|
||||
} else lastWokenUp = steady_time_point::min();
|
||||
|
||||
if (useTimeout)
|
||||
vomit("sleeping %d seconds", timeout);
|
||||
|
||||
MuxablePipePollState state;
|
||||
|
||||
#ifndef _WIN32
|
||||
/* Use select() to wait for the input side of any logger pipe to
|
||||
become `available'. Note that `available' (i.e., non-blocking)
|
||||
includes EOF. */
|
||||
for (auto & i : children) {
|
||||
for (auto & j : i.channels) {
|
||||
state.pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN });
|
||||
state.fdToPollStatus[j] = state.pollStatus.size() - 1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
state.poll(
|
||||
#ifdef _WIN32
|
||||
ioport.get(),
|
||||
#endif
|
||||
useTimeout ? (std::optional { timeout * 1000 }) : std::nullopt);
|
||||
|
||||
auto after = steady_time_point::clock::now();
|
||||
|
||||
/* Process all available file descriptors. FIXME: this is
|
||||
O(children * fds). */
|
||||
decltype(children)::iterator i;
|
||||
for (auto j = children.begin(); j != children.end(); j = i) {
|
||||
i = std::next(j);
|
||||
|
||||
checkInterrupt();
|
||||
|
||||
GoalPtr goal = j->goal.lock();
|
||||
assert(goal);
|
||||
|
||||
state.iterate(
|
||||
j->channels,
|
||||
[&](Descriptor k, std::string_view data) {
|
||||
printMsg(lvlVomit, "%1%: read %2% bytes",
|
||||
goal->getName(), data.size());
|
||||
j->lastOutput = after;
|
||||
goal->handleChildOutput(k, data);
|
||||
},
|
||||
[&](Descriptor k) {
|
||||
debug("%1%: got EOF", goal->getName());
|
||||
goal->handleEOF(k);
|
||||
});
|
||||
|
||||
if (goal->exitCode == Goal::ecBusy &&
|
||||
0 != settings.maxSilentTime &&
|
||||
j->respectTimeouts &&
|
||||
after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime))
|
||||
{
|
||||
goal->timedOut(Error(
|
||||
"%1% timed out after %2% seconds of silence",
|
||||
goal->getName(), settings.maxSilentTime));
|
||||
}
|
||||
|
||||
else if (goal->exitCode == Goal::ecBusy &&
|
||||
0 != settings.buildTimeout &&
|
||||
j->respectTimeouts &&
|
||||
after - j->timeStarted >= std::chrono::seconds(settings.buildTimeout))
|
||||
{
|
||||
goal->timedOut(Error(
|
||||
"%1% timed out after %2% seconds",
|
||||
goal->getName(), settings.buildTimeout));
|
||||
}
|
||||
}
|
||||
|
||||
if (!waitingForAWhile.empty() && lastWokenUp + std::chrono::seconds(settings.pollInterval) <= after) {
|
||||
lastWokenUp = after;
|
||||
for (auto & i : waitingForAWhile) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
}
|
||||
waitingForAWhile.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned int Worker::failingExitStatus()
|
||||
{
|
||||
// See API docs in header for explanation
|
||||
unsigned int mask = 0;
|
||||
bool buildFailure = permanentFailure || timedOut || hashMismatch;
|
||||
if (buildFailure)
|
||||
mask |= 0x04; // 100
|
||||
if (timedOut)
|
||||
mask |= 0x01; // 101
|
||||
if (hashMismatch)
|
||||
mask |= 0x02; // 102
|
||||
if (checkMismatch) {
|
||||
mask |= 0x08; // 104
|
||||
}
|
||||
|
||||
if (mask)
|
||||
mask |= 0x60;
|
||||
return mask ? mask : 1;
|
||||
}
|
||||
|
||||
|
||||
bool Worker::pathContentsGood(const StorePath & path)
|
||||
{
|
||||
auto i = pathContentsGoodCache.find(path);
|
||||
if (i != pathContentsGoodCache.end()) return i->second;
|
||||
printInfo("checking path '%s'...", store.printStorePath(path));
|
||||
auto info = store.queryPathInfo(path);
|
||||
bool res;
|
||||
if (!pathExists(store.printStorePath(path)))
|
||||
res = false;
|
||||
else {
|
||||
auto current = hashPath(
|
||||
{store.getFSAccessor(), CanonPath(store.printStorePath(path))},
|
||||
FileIngestionMethod::NixArchive, info->narHash.algo).first;
|
||||
Hash nullHash(HashAlgorithm::SHA256);
|
||||
res = info->narHash == nullHash || info->narHash == current;
|
||||
}
|
||||
pathContentsGoodCache.insert_or_assign(path, res);
|
||||
if (!res)
|
||||
printError("path '%s' is corrupted or missing!", store.printStorePath(path));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void Worker::markContentsGood(const StorePath & path)
|
||||
{
|
||||
pathContentsGoodCache.insert_or_assign(path, true);
|
||||
}
|
||||
|
||||
|
||||
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal)
|
||||
{
|
||||
return subGoal;
|
||||
}
|
||||
|
||||
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal)
|
||||
{
|
||||
return subGoal;
|
||||
}
|
||||
|
||||
}
|
||||
330
subprojects/libstore/build/worker.hh
Normal file
330
subprojects/libstore/build/worker.hh
Normal file
|
|
@ -0,0 +1,330 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "types.hh"
|
||||
#include "store-api.hh"
|
||||
#include "goal.hh"
|
||||
#include "realisation.hh"
|
||||
#include "muxable-pipe.hh"
|
||||
|
||||
#include <future>
|
||||
#include <thread>
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* Forward definition. */
|
||||
struct DerivationGoal;
|
||||
struct PathSubstitutionGoal;
|
||||
class DrvOutputSubstitutionGoal;
|
||||
|
||||
/**
|
||||
* Workaround for not being able to declare a something like
|
||||
*
|
||||
* ```c++
|
||||
* class PathSubstitutionGoal : public Goal;
|
||||
* ```
|
||||
* even when Goal is a complete type.
|
||||
*
|
||||
* This is still a static cast. The purpose of exporting it is to define it in
|
||||
* a place where `PathSubstitutionGoal` is concrete, and use it in a place where it
|
||||
* is opaque.
|
||||
*/
|
||||
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal);
|
||||
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal);
|
||||
|
||||
typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
|
||||
|
||||
/**
|
||||
* A mapping used to remember for each child process to what goal it
|
||||
* belongs, and comm channels for receiving log data and output
|
||||
* path creation commands.
|
||||
*/
|
||||
struct Child
|
||||
{
|
||||
WeakGoalPtr goal;
|
||||
Goal * goal2; // ugly hackery
|
||||
std::set<MuxablePipePollState::CommChannel> channels;
|
||||
bool respectTimeouts;
|
||||
bool inBuildSlot;
|
||||
/**
|
||||
* Time we last got output on stdout/stderr
|
||||
*/
|
||||
steady_time_point lastOutput;
|
||||
steady_time_point timeStarted;
|
||||
};
|
||||
|
||||
#ifndef _WIN32 // TODO Enable building on Windows
|
||||
/* Forward definition. */
|
||||
struct HookInstance;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Coordinates one or more realisations and their interdependencies.
|
||||
*/
|
||||
class Worker
|
||||
{
|
||||
private:
|
||||
|
||||
/* Note: the worker should only have strong pointers to the
|
||||
top-level goals. */
|
||||
|
||||
/**
|
||||
* The top-level goals of the worker.
|
||||
*/
|
||||
Goals topGoals;
|
||||
|
||||
/**
|
||||
* Goals that are ready to do some work.
|
||||
*/
|
||||
WeakGoals awake;
|
||||
|
||||
/**
|
||||
* Goals waiting for a build slot.
|
||||
*/
|
||||
WeakGoals wantingToBuild;
|
||||
|
||||
/**
|
||||
* Child processes currently running.
|
||||
*/
|
||||
std::list<Child> children;
|
||||
|
||||
/**
|
||||
* Number of build slots occupied. This includes local builds but does not
|
||||
* include substitutions or remote builds via the build hook.
|
||||
*/
|
||||
size_t nrLocalBuilds;
|
||||
|
||||
/**
|
||||
* Number of substitution slots occupied.
|
||||
*/
|
||||
size_t nrSubstitutions;
|
||||
|
||||
/**
|
||||
* Maps used to prevent multiple instantiations of a goal for the
|
||||
* same derivation / path.
|
||||
*/
|
||||
std::map<StorePath, std::weak_ptr<DerivationGoal>> derivationGoals;
|
||||
std::map<StorePath, std::weak_ptr<PathSubstitutionGoal>> substitutionGoals;
|
||||
std::map<DrvOutput, std::weak_ptr<DrvOutputSubstitutionGoal>> drvOutputSubstitutionGoals;
|
||||
|
||||
/**
|
||||
* Goals waiting for busy paths to be unlocked.
|
||||
*/
|
||||
WeakGoals waitingForAnyGoal;
|
||||
|
||||
/**
|
||||
* Goals sleeping for a few seconds (polling a lock).
|
||||
*/
|
||||
WeakGoals waitingForAWhile;
|
||||
|
||||
/**
|
||||
* Last time the goals in `waitingForAWhile` were woken up.
|
||||
*/
|
||||
steady_time_point lastWokenUp;
|
||||
|
||||
/**
|
||||
* Cache for pathContentsGood().
|
||||
*/
|
||||
std::map<StorePath, bool> pathContentsGoodCache;
|
||||
|
||||
public:
|
||||
|
||||
const Activity act;
|
||||
const Activity actDerivations;
|
||||
const Activity actSubstitutions;
|
||||
|
||||
/**
|
||||
* Set if at least one derivation had a BuildError (i.e. permanent
|
||||
* failure).
|
||||
*/
|
||||
bool permanentFailure;
|
||||
|
||||
/**
|
||||
* Set if at least one derivation had a timeout.
|
||||
*/
|
||||
bool timedOut;
|
||||
|
||||
/**
|
||||
* Set if at least one derivation fails with a hash mismatch.
|
||||
*/
|
||||
bool hashMismatch;
|
||||
|
||||
/**
|
||||
* Set if at least one derivation is not deterministic in check mode.
|
||||
*/
|
||||
bool checkMismatch;
|
||||
|
||||
#ifdef _WIN32
|
||||
AutoCloseFD ioport;
|
||||
#endif
|
||||
|
||||
Store & store;
|
||||
Store & evalStore;
|
||||
|
||||
#ifndef _WIN32 // TODO Enable building on Windows
|
||||
std::unique_ptr<HookInstance> hook;
|
||||
#endif
|
||||
|
||||
uint64_t expectedBuilds = 0;
|
||||
uint64_t doneBuilds = 0;
|
||||
uint64_t failedBuilds = 0;
|
||||
uint64_t runningBuilds = 0;
|
||||
|
||||
uint64_t expectedSubstitutions = 0;
|
||||
uint64_t doneSubstitutions = 0;
|
||||
uint64_t failedSubstitutions = 0;
|
||||
uint64_t runningSubstitutions = 0;
|
||||
uint64_t expectedDownloadSize = 0;
|
||||
uint64_t doneDownloadSize = 0;
|
||||
uint64_t expectedNarSize = 0;
|
||||
uint64_t doneNarSize = 0;
|
||||
|
||||
/**
|
||||
* Whether to ask the build hook if it can build a derivation. If
|
||||
* it answers with "decline-permanently", we don't try again.
|
||||
*/
|
||||
bool tryBuildHook = true;
|
||||
|
||||
Worker(Store & store, Store & evalStore);
|
||||
~Worker();
|
||||
|
||||
/**
|
||||
* Make a goal (with caching).
|
||||
*/
|
||||
|
||||
/**
|
||||
* @ref DerivationGoal "derivation goal"
|
||||
*/
|
||||
private:
|
||||
std::shared_ptr<DerivationGoal> makeDerivationGoalCommon(
|
||||
const StorePath & drvPath, const OutputsSpec & wantedOutputs,
|
||||
std::function<std::shared_ptr<DerivationGoal>()> mkDrvGoal);
|
||||
public:
|
||||
std::shared_ptr<DerivationGoal> makeDerivationGoal(
|
||||
const StorePath & drvPath,
|
||||
const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal);
|
||||
std::shared_ptr<DerivationGoal> makeBasicDerivationGoal(
|
||||
const StorePath & drvPath, const BasicDerivation & drv,
|
||||
const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal);
|
||||
|
||||
/**
|
||||
* @ref SubstitutionGoal "substitution goal"
|
||||
*/
|
||||
std::shared_ptr<PathSubstitutionGoal> makePathSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
std::shared_ptr<DrvOutputSubstitutionGoal> makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
|
||||
/**
|
||||
* Make a goal corresponding to the `DerivedPath`.
|
||||
*
|
||||
* It will be a `DerivationGoal` for a `DerivedPath::Built` or
|
||||
* a `SubstitutionGoal` for a `DerivedPath::Opaque`.
|
||||
*/
|
||||
GoalPtr makeGoal(const DerivedPath & req, BuildMode buildMode = bmNormal);
|
||||
|
||||
/**
|
||||
* Remove a dead goal.
|
||||
*/
|
||||
void removeGoal(GoalPtr goal);
|
||||
|
||||
/**
|
||||
* Wake up a goal (i.e., there is something for it to do).
|
||||
*/
|
||||
void wakeUp(GoalPtr goal);
|
||||
|
||||
/**
|
||||
* Return the number of local build processes currently running (but not
|
||||
* remote builds via the build hook).
|
||||
*/
|
||||
size_t getNrLocalBuilds();
|
||||
|
||||
/**
|
||||
* Return the number of substitution processes currently running.
|
||||
*/
|
||||
size_t getNrSubstitutions();
|
||||
|
||||
/**
|
||||
* Registers a running child process. `inBuildSlot` means that
|
||||
* the process counts towards the jobs limit.
|
||||
*/
|
||||
void childStarted(GoalPtr goal, const std::set<MuxablePipePollState::CommChannel> & channels,
|
||||
bool inBuildSlot, bool respectTimeouts);
|
||||
|
||||
/**
|
||||
* Unregisters a running child process. `wakeSleepers` should be
|
||||
* false if there is no sense in waking up goals that are sleeping
|
||||
* because they can't run yet (e.g., there is no free build slot,
|
||||
* or the hook would still say `postpone`).
|
||||
*/
|
||||
void childTerminated(Goal * goal, bool wakeSleepers = true);
|
||||
|
||||
/**
|
||||
* Put `goal` to sleep until a build slot becomes available (which
|
||||
* might be right away).
|
||||
*/
|
||||
void waitForBuildSlot(GoalPtr goal);
|
||||
|
||||
/**
|
||||
* Wait for any goal to finish. Pretty indiscriminate way to
|
||||
* wait for some resource that some other goal is holding.
|
||||
*/
|
||||
void waitForAnyGoal(GoalPtr goal);
|
||||
|
||||
/**
|
||||
* Wait for a few seconds and then retry this goal. Used when
|
||||
* waiting for a lock held by another process. This kind of
|
||||
* polling is inefficient, but POSIX doesn't really provide a way
|
||||
* to wait for multiple locks in the main select() loop.
|
||||
*/
|
||||
void waitForAWhile(GoalPtr goal);
|
||||
|
||||
/**
|
||||
* Loop until the specified top-level goals have finished.
|
||||
*/
|
||||
void run(const Goals & topGoals);
|
||||
|
||||
/**
|
||||
* Wait for input to become available.
|
||||
*/
|
||||
void waitForInput();
|
||||
|
||||
/***
|
||||
* The exit status in case of failure.
|
||||
*
|
||||
* In the case of a build failure, returned value follows this
|
||||
* bitmask:
|
||||
*
|
||||
* ```
|
||||
* 0b1100100
|
||||
* ^^^^
|
||||
* |||`- timeout
|
||||
* ||`-- output hash mismatch
|
||||
* |`--- build failure
|
||||
* `---- not deterministic
|
||||
* ```
|
||||
*
|
||||
* In other words, the failure code is at least 100 (0b1100100), but
|
||||
* might also be greater.
|
||||
*
|
||||
* Otherwise (no build failure, but some other sort of failure by
|
||||
* assumption), this returned value is 1.
|
||||
*/
|
||||
unsigned int failingExitStatus();
|
||||
|
||||
/**
|
||||
* Check whether the given valid path exists and has the right
|
||||
* contents.
|
||||
*/
|
||||
bool pathContentsGood(const StorePath & path);
|
||||
|
||||
void markContentsGood(const StorePath & path);
|
||||
|
||||
void updateProgress()
|
||||
{
|
||||
actDerivations.progress(doneBuilds, expectedBuilds + doneBuilds, runningBuilds, failedBuilds);
|
||||
actSubstitutions.progress(doneSubstitutions, expectedSubstitutions + doneSubstitutions, runningSubstitutions, failedSubstitutions);
|
||||
act.setExpected(actFileTransfer, expectedDownloadSize + doneDownloadSize);
|
||||
act.setExpected(actCopyPath, expectedNarSize + doneNarSize);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
19
subprojects/libstore/builtins.hh
Normal file
19
subprojects/libstore/builtins.hh
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "derivations.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
// TODO: make pluggable.
|
||||
void builtinFetchurl(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs,
|
||||
const std::string & netrcData,
|
||||
const std::string & caFileData);
|
||||
|
||||
void builtinUnpackChannel(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs);
|
||||
|
||||
}
|
||||
206
subprojects/libstore/builtins/buildenv.cc
Normal file
206
subprojects/libstore/builtins/buildenv.cc
Normal file
|
|
@ -0,0 +1,206 @@
|
|||
#include "buildenv.hh"
|
||||
#include "derivations.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <fcntl.h>
|
||||
#include <algorithm>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct State
|
||||
{
|
||||
std::map<Path, int> priorities;
|
||||
unsigned long symlinks = 0;
|
||||
};
|
||||
|
||||
/* For each activated package, create symlinks */
|
||||
static void createLinks(State & state, const Path & srcDir, const Path & dstDir, int priority)
|
||||
{
|
||||
std::filesystem::directory_iterator srcFiles;
|
||||
|
||||
try {
|
||||
srcFiles = std::filesystem::directory_iterator{srcDir};
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
if (e.code() == std::errc::not_a_directory) {
|
||||
warn("not including '%s' in the user environment because it's not a directory", srcDir);
|
||||
return;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
for (const auto & ent : srcFiles) {
|
||||
checkInterrupt();
|
||||
auto name = ent.path().filename();
|
||||
if (name.string()[0] == '.')
|
||||
/* not matched by glob */
|
||||
continue;
|
||||
auto srcFile = (std::filesystem::path{srcDir} / name).string();
|
||||
auto dstFile = (std::filesystem::path{dstDir} / name).string();
|
||||
|
||||
struct stat srcSt;
|
||||
try {
|
||||
if (stat(srcFile.c_str(), &srcSt) == -1)
|
||||
throw SysError("getting status of '%1%'", srcFile);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo == ENOENT || e.errNo == ENOTDIR) {
|
||||
warn("skipping dangling symlink '%s'", dstFile);
|
||||
continue;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
/* The files below are special-cased to that they don't show
|
||||
* up in user profiles, either because they are useless, or
|
||||
* because they would cause pointless collisions (e.g., each
|
||||
* Python package brings its own
|
||||
* `$out/lib/pythonX.Y/site-packages/easy-install.pth'.)
|
||||
*/
|
||||
if (hasSuffix(srcFile, "/propagated-build-inputs") ||
|
||||
hasSuffix(srcFile, "/nix-support") ||
|
||||
hasSuffix(srcFile, "/perllocal.pod") ||
|
||||
hasSuffix(srcFile, "/info/dir") ||
|
||||
hasSuffix(srcFile, "/log") ||
|
||||
hasSuffix(srcFile, "/manifest.nix") ||
|
||||
hasSuffix(srcFile, "/manifest.json"))
|
||||
continue;
|
||||
|
||||
else if (S_ISDIR(srcSt.st_mode)) {
|
||||
auto dstStOpt = maybeLstat(dstFile.c_str());
|
||||
if (dstStOpt) {
|
||||
auto & dstSt = *dstStOpt;
|
||||
if (S_ISDIR(dstSt.st_mode)) {
|
||||
createLinks(state, srcFile, dstFile, priority);
|
||||
continue;
|
||||
} else if (S_ISLNK(dstSt.st_mode)) {
|
||||
auto target = canonPath(dstFile, true);
|
||||
if (!S_ISDIR(lstat(target).st_mode))
|
||||
throw Error("collision between '%1%' and non-directory '%2%'", srcFile, target);
|
||||
if (unlink(dstFile.c_str()) == -1)
|
||||
throw SysError("unlinking '%1%'", dstFile);
|
||||
if (mkdir(dstFile.c_str()
|
||||
#ifndef _WIN32 // TODO abstract mkdir perms for Windows
|
||||
, 0755
|
||||
#endif
|
||||
) == -1)
|
||||
throw SysError("creating directory '%1%'", dstFile);
|
||||
createLinks(state, target, dstFile, state.priorities[dstFile]);
|
||||
createLinks(state, srcFile, dstFile, priority);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else {
|
||||
auto dstStOpt = maybeLstat(dstFile.c_str());
|
||||
if (dstStOpt) {
|
||||
auto & dstSt = *dstStOpt;
|
||||
if (S_ISLNK(dstSt.st_mode)) {
|
||||
auto prevPriority = state.priorities[dstFile];
|
||||
if (prevPriority == priority)
|
||||
throw BuildEnvFileConflictError(
|
||||
readLink(dstFile),
|
||||
srcFile,
|
||||
priority
|
||||
);
|
||||
if (prevPriority < priority)
|
||||
continue;
|
||||
if (unlink(dstFile.c_str()) == -1)
|
||||
throw SysError("unlinking '%1%'", dstFile);
|
||||
} else if (S_ISDIR(dstSt.st_mode))
|
||||
throw Error("collision between non-directory '%1%' and directory '%2%'", srcFile, dstFile);
|
||||
}
|
||||
}
|
||||
|
||||
createSymlink(srcFile, dstFile);
|
||||
state.priorities[dstFile] = priority;
|
||||
state.symlinks++;
|
||||
}
|
||||
}
|
||||
|
||||
void buildProfile(const Path & out, Packages && pkgs)
|
||||
{
|
||||
State state;
|
||||
|
||||
std::set<Path> done, postponed;
|
||||
|
||||
auto addPkg = [&](const Path & pkgDir, int priority) {
|
||||
if (!done.insert(pkgDir).second) return;
|
||||
createLinks(state, pkgDir, out, priority);
|
||||
|
||||
try {
|
||||
for (const auto & p : tokenizeString<std::vector<std::string>>(
|
||||
readFile(pkgDir + "/nix-support/propagated-user-env-packages"), " \n"))
|
||||
if (!done.count(p))
|
||||
postponed.insert(p);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw;
|
||||
}
|
||||
};
|
||||
|
||||
/* Symlink to the packages that have been installed explicitly by the
|
||||
* user. Process in priority order to reduce unnecessary
|
||||
* symlink/unlink steps.
|
||||
*/
|
||||
std::sort(pkgs.begin(), pkgs.end(), [](const Package & a, const Package & b) {
|
||||
return a.priority < b.priority || (a.priority == b.priority && a.path < b.path);
|
||||
});
|
||||
for (const auto & pkg : pkgs)
|
||||
if (pkg.active)
|
||||
addPkg(pkg.path, pkg.priority);
|
||||
|
||||
/* Symlink to the packages that have been "propagated" by packages
|
||||
* installed by the user (i.e., package X declares that it wants Y
|
||||
* installed as well). We do these later because they have a lower
|
||||
* priority in case of collisions.
|
||||
*/
|
||||
auto priorityCounter = 1000;
|
||||
while (!postponed.empty()) {
|
||||
std::set<Path> pkgDirs;
|
||||
postponed.swap(pkgDirs);
|
||||
for (const auto & pkgDir : pkgDirs)
|
||||
addPkg(pkgDir, priorityCounter++);
|
||||
}
|
||||
|
||||
debug("created %d symlinks in user environment", state.symlinks);
|
||||
}
|
||||
|
||||
void builtinBuildenv(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs)
|
||||
{
|
||||
auto getAttr = [&](const std::string & name) {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
return i->second;
|
||||
};
|
||||
|
||||
auto out = outputs.at("out");
|
||||
createDirs(out);
|
||||
|
||||
/* Convert the stuff we get from the environment back into a
|
||||
* coherent data type. */
|
||||
Packages pkgs;
|
||||
{
|
||||
auto derivations = tokenizeString<Strings>(getAttr("derivations"));
|
||||
|
||||
auto itemIt = derivations.begin();
|
||||
while (itemIt != derivations.end()) {
|
||||
/* !!! We're trusting the caller to structure derivations env var correctly */
|
||||
const bool active = "false" != *itemIt++;
|
||||
const int priority = stoi(*itemIt++);
|
||||
const size_t outputs = stoul(*itemIt++);
|
||||
|
||||
for (size_t n {0}; n < outputs; n++) {
|
||||
pkgs.emplace_back(std::move(*itemIt++), active, priority);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buildProfile(out, std::move(pkgs));
|
||||
|
||||
createSymlink(getAttr("manifest"), out + "/manifest.nix");
|
||||
}
|
||||
|
||||
}
|
||||
52
subprojects/libstore/builtins/buildenv.hh
Normal file
52
subprojects/libstore/builtins/buildenv.hh
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
* Think of this as a "store level package attrset", but stripped down to no more than the needs of buildenv.
|
||||
*/
|
||||
struct Package {
|
||||
Path path;
|
||||
bool active;
|
||||
int priority;
|
||||
Package(const Path & path, bool active, int priority) : path{path}, active{active}, priority{priority} {}
|
||||
};
|
||||
|
||||
class BuildEnvFileConflictError : public Error
|
||||
{
|
||||
public:
|
||||
const Path fileA;
|
||||
const Path fileB;
|
||||
int priority;
|
||||
|
||||
BuildEnvFileConflictError(
|
||||
const Path fileA,
|
||||
const Path fileB,
|
||||
int priority
|
||||
)
|
||||
: Error(
|
||||
"Unable to build profile. There is a conflict for the following files:\n"
|
||||
"\n"
|
||||
" %1%\n"
|
||||
" %2%",
|
||||
fileA,
|
||||
fileB
|
||||
)
|
||||
, fileA(fileA)
|
||||
, fileB(fileB)
|
||||
, priority(priority)
|
||||
{}
|
||||
};
|
||||
|
||||
typedef std::vector<Package> Packages;
|
||||
|
||||
void buildProfile(const Path & out, Packages && pkgs);
|
||||
|
||||
void builtinBuildenv(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs);
|
||||
|
||||
}
|
||||
82
subprojects/libstore/builtins/fetchurl.cc
Normal file
82
subprojects/libstore/builtins/fetchurl.cc
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
#include "builtins.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "store-api.hh"
|
||||
#include "archive.hh"
|
||||
#include "compression.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
void builtinFetchurl(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs,
|
||||
const std::string & netrcData,
|
||||
const std::string & caFileData)
|
||||
{
|
||||
/* Make the host's netrc data available. Too bad curl requires
|
||||
this to be stored in a file. It would be nice if we could just
|
||||
pass a pointer to the data. */
|
||||
if (netrcData != "") {
|
||||
settings.netrcFile = "netrc";
|
||||
writeFile(settings.netrcFile, netrcData, 0600);
|
||||
}
|
||||
|
||||
settings.caFile = "ca-certificates.crt";
|
||||
writeFile(settings.caFile, caFileData, 0600);
|
||||
|
||||
auto out = get(drv.outputs, "out");
|
||||
if (!out)
|
||||
throw Error("'builtin:fetchurl' requires an 'out' output");
|
||||
|
||||
if (!(drv.type().isFixed() || drv.type().isImpure()))
|
||||
throw Error("'builtin:fetchurl' must be a fixed-output or impure derivation");
|
||||
|
||||
auto storePath = outputs.at("out");
|
||||
auto mainUrl = drv.env.at("url");
|
||||
bool unpack = getOr(drv.env, "unpack", "") == "1";
|
||||
|
||||
/* Note: have to use a fresh fileTransfer here because we're in
|
||||
a forked process. */
|
||||
auto fileTransfer = makeFileTransfer();
|
||||
|
||||
auto fetch = [&](const std::string & url) {
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
|
||||
FileTransferRequest request(url);
|
||||
request.decompress = false;
|
||||
|
||||
auto decompressor = makeDecompressionSink(
|
||||
unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
|
||||
fileTransfer->download(std::move(request), *decompressor);
|
||||
decompressor->finish();
|
||||
});
|
||||
|
||||
if (unpack)
|
||||
restorePath(storePath, *source);
|
||||
else
|
||||
writeFile(storePath, *source);
|
||||
|
||||
auto executable = drv.env.find("executable");
|
||||
if (executable != drv.env.end() && executable->second == "1") {
|
||||
if (chmod(storePath.c_str(), 0755) == -1)
|
||||
throw SysError("making '%1%' executable", storePath);
|
||||
}
|
||||
};
|
||||
|
||||
/* Try the hashed mirrors first. */
|
||||
auto dof = std::get_if<DerivationOutput::CAFixed>(&out->raw);
|
||||
if (dof && dof->ca.method.getFileIngestionMethod() == FileIngestionMethod::Flat)
|
||||
for (auto hashedMirror : settings.hashedMirrors.get())
|
||||
try {
|
||||
if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
|
||||
fetch(hashedMirror + printHashAlgo(dof->ca.hash.algo) + "/" + dof->ca.hash.to_string(HashFormat::Base16, false));
|
||||
return;
|
||||
} catch (Error & e) {
|
||||
debug(e.what());
|
||||
}
|
||||
|
||||
/* Otherwise try the specified URL. */
|
||||
fetch(mainUrl);
|
||||
}
|
||||
|
||||
}
|
||||
55
subprojects/libstore/builtins/unpack-channel.cc
Normal file
55
subprojects/libstore/builtins/unpack-channel.cc
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
#include "builtins.hh"
|
||||
#include "tarfile.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
namespace fs { using namespace std::filesystem; }
|
||||
|
||||
void builtinUnpackChannel(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs)
|
||||
{
|
||||
auto getAttr = [&](const std::string & name) -> const std::string & {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
return i->second;
|
||||
};
|
||||
|
||||
fs::path out{outputs.at("out")};
|
||||
auto & channelName = getAttr("channelName");
|
||||
auto & src = getAttr("src");
|
||||
|
||||
if (fs::path{channelName}.filename().string() != channelName) {
|
||||
throw Error("channelName is not allowed to contain filesystem separators, got %1%", channelName);
|
||||
}
|
||||
|
||||
try {
|
||||
fs::create_directories(out);
|
||||
} catch (fs::filesystem_error &) {
|
||||
throw SysError("creating directory '%1%'", out.string());
|
||||
}
|
||||
|
||||
unpackTarfile(src, out);
|
||||
|
||||
size_t fileCount;
|
||||
std::string fileName;
|
||||
try {
|
||||
auto entries = fs::directory_iterator{out};
|
||||
fileName = entries->path().string();
|
||||
fileCount = std::distance(fs::begin(entries), fs::end(entries));
|
||||
} catch (fs::filesystem_error &) {
|
||||
throw SysError("failed to read directory %1%", out.string());
|
||||
}
|
||||
|
||||
if (fileCount != 1)
|
||||
throw Error("channel tarball '%s' contains more than one file", src);
|
||||
|
||||
auto target = out / channelName;
|
||||
try {
|
||||
fs::rename(fileName, target);
|
||||
} catch (fs::filesystem_error &) {
|
||||
throw SysError("failed to rename %1% to %2%", fileName, target.string());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
41
subprojects/libstore/ca-specific-schema.sql
Normal file
41
subprojects/libstore/ca-specific-schema.sql
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
-- Extension of the sql schema for content-addressed derivations.
|
||||
-- Won't be loaded unless the experimental feature `ca-derivations`
|
||||
-- is enabled
|
||||
|
||||
create table if not exists Realisations (
|
||||
id integer primary key autoincrement not null,
|
||||
drvPath text not null,
|
||||
outputName text not null, -- symbolic output id, usually "out"
|
||||
outputPath integer not null,
|
||||
signatures text, -- space-separated list
|
||||
foreign key (outputPath) references ValidPaths(id) on delete cascade
|
||||
);
|
||||
|
||||
create index if not exists IndexRealisations on Realisations(drvPath, outputName);
|
||||
|
||||
-- We can end-up in a weird edge-case where a path depends on itself because
|
||||
-- it’s an output of a CA derivation, that happens to be the same as one of its
|
||||
-- dependencies.
|
||||
-- In that case we have a dependency loop (path -> realisation1 -> realisation2
|
||||
-- -> path) that we need to break by removing the dependencies between the
|
||||
-- realisations
|
||||
create trigger if not exists DeleteSelfRefsViaRealisations before delete on ValidPaths
|
||||
begin
|
||||
delete from RealisationsRefs where realisationReference in (
|
||||
select id from Realisations where outputPath = old.id
|
||||
);
|
||||
end;
|
||||
|
||||
create table if not exists RealisationsRefs (
|
||||
referrer integer not null,
|
||||
realisationReference integer,
|
||||
foreign key (referrer) references Realisations(id) on delete cascade,
|
||||
foreign key (realisationReference) references Realisations(id) on delete restrict
|
||||
);
|
||||
-- used by deletion trigger
|
||||
create index if not exists IndexRealisationsRefsRealisationReference on RealisationsRefs(realisationReference);
|
||||
|
||||
-- used by QueryRealisationReferences
|
||||
create index if not exists IndexRealisationsRefs on RealisationsRefs(referrer);
|
||||
-- used by cascade deletion when ValidPaths is deleted
|
||||
create index if not exists IndexRealisationsRefsOnOutputPath on Realisations(outputPath);
|
||||
41
subprojects/libstore/common-protocol-impl.hh
Normal file
41
subprojects/libstore/common-protocol-impl.hh
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
#pragma once
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* Template implementations (as opposed to mere declarations).
|
||||
*
|
||||
* This file is an exmample of the "impl.hh" pattern. See the
|
||||
* contributing guide.
|
||||
*/
|
||||
|
||||
#include "common-protocol.hh"
|
||||
#include "length-prefixed-protocol-helper.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* protocol-agnostic templates */
|
||||
|
||||
#define COMMON_USE_LENGTH_PREFIX_SERIALISER(TEMPLATE, T) \
|
||||
TEMPLATE T CommonProto::Serialise< T >::read(const StoreDirConfig & store, CommonProto::ReadConn conn) \
|
||||
{ \
|
||||
return LengthPrefixedProtoHelper<CommonProto, T >::read(store, conn); \
|
||||
} \
|
||||
TEMPLATE void CommonProto::Serialise< T >::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const T & t) \
|
||||
{ \
|
||||
LengthPrefixedProtoHelper<CommonProto, T >::write(store, conn, t); \
|
||||
}
|
||||
|
||||
COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::vector<T>)
|
||||
COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename T>, std::set<T>)
|
||||
COMMON_USE_LENGTH_PREFIX_SERIALISER(template<typename... Ts>, std::tuple<Ts...>)
|
||||
|
||||
#define COMMA_ ,
|
||||
COMMON_USE_LENGTH_PREFIX_SERIALISER(
|
||||
template<typename K COMMA_ typename V>,
|
||||
std::map<K COMMA_ V>)
|
||||
#undef COMMA_
|
||||
|
||||
|
||||
/* protocol-specific templates */
|
||||
|
||||
}
|
||||
97
subprojects/libstore/common-protocol.cc
Normal file
97
subprojects/libstore/common-protocol.cc
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
#include "serialise.hh"
|
||||
#include "path-with-outputs.hh"
|
||||
#include "store-api.hh"
|
||||
#include "build-result.hh"
|
||||
#include "common-protocol.hh"
|
||||
#include "common-protocol-impl.hh"
|
||||
#include "archive.hh"
|
||||
#include "derivations.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* protocol-agnostic definitions */
|
||||
|
||||
std::string CommonProto::Serialise<std::string>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
return readString(conn.from);
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<std::string>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const std::string & str)
|
||||
{
|
||||
conn.to << str;
|
||||
}
|
||||
|
||||
|
||||
StorePath CommonProto::Serialise<StorePath>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
return store.parseStorePath(readString(conn.from));
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<StorePath>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const StorePath & storePath)
|
||||
{
|
||||
conn.to << store.printStorePath(storePath);
|
||||
}
|
||||
|
||||
|
||||
ContentAddress CommonProto::Serialise<ContentAddress>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
return ContentAddress::parse(readString(conn.from));
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<ContentAddress>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const ContentAddress & ca)
|
||||
{
|
||||
conn.to << renderContentAddress(ca);
|
||||
}
|
||||
|
||||
|
||||
Realisation CommonProto::Serialise<Realisation>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
std::string rawInput = readString(conn.from);
|
||||
return Realisation::fromJSON(
|
||||
nlohmann::json::parse(rawInput),
|
||||
"remote-protocol"
|
||||
);
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<Realisation>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const Realisation & realisation)
|
||||
{
|
||||
conn.to << realisation.toJSON().dump();
|
||||
}
|
||||
|
||||
|
||||
DrvOutput CommonProto::Serialise<DrvOutput>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
return DrvOutput::parse(readString(conn.from));
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<DrvOutput>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const DrvOutput & drvOutput)
|
||||
{
|
||||
conn.to << drvOutput.to_string();
|
||||
}
|
||||
|
||||
|
||||
std::optional<StorePath> CommonProto::Serialise<std::optional<StorePath>>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
auto s = readString(conn.from);
|
||||
return s == "" ? std::optional<StorePath> {} : store.parseStorePath(s);
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<std::optional<StorePath>>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const std::optional<StorePath> & storePathOpt)
|
||||
{
|
||||
conn.to << (storePathOpt ? store.printStorePath(*storePathOpt) : "");
|
||||
}
|
||||
|
||||
|
||||
std::optional<ContentAddress> CommonProto::Serialise<std::optional<ContentAddress>>::read(const StoreDirConfig & store, CommonProto::ReadConn conn)
|
||||
{
|
||||
return ContentAddress::parseOpt(readString(conn.from));
|
||||
}
|
||||
|
||||
void CommonProto::Serialise<std::optional<ContentAddress>>::write(const StoreDirConfig & store, CommonProto::WriteConn conn, const std::optional<ContentAddress> & caOpt)
|
||||
{
|
||||
conn.to << (caOpt ? renderContentAddress(*caOpt) : "");
|
||||
}
|
||||
|
||||
}
|
||||
106
subprojects/libstore/common-protocol.hh
Normal file
106
subprojects/libstore/common-protocol.hh
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "serialise.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct StoreDirConfig;
|
||||
struct Source;
|
||||
|
||||
// items being serialized
|
||||
class StorePath;
|
||||
struct ContentAddress;
|
||||
struct DrvOutput;
|
||||
struct Realisation;
|
||||
|
||||
|
||||
/**
|
||||
* Shared serializers between the worker protocol, serve protocol, and a
|
||||
* few others.
|
||||
*
|
||||
* This `struct` is basically just a `namespace`; We use a type rather
|
||||
* than a namespace just so we can use it as a template argument.
|
||||
*/
|
||||
struct CommonProto
|
||||
{
|
||||
/**
|
||||
* A unidirectional read connection, to be used by the read half of the
|
||||
* canonical serializers below.
|
||||
*/
|
||||
struct ReadConn {
|
||||
Source & from;
|
||||
};
|
||||
|
||||
/**
|
||||
* A unidirectional write connection, to be used by the write half of the
|
||||
* canonical serializers below.
|
||||
*/
|
||||
struct WriteConn {
|
||||
Sink & to;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct Serialise;
|
||||
|
||||
/**
|
||||
* Wrapper function around `CommonProto::Serialise<T>::write` that allows us to
|
||||
* infer the type instead of having to write it down explicitly.
|
||||
*/
|
||||
template<typename T>
|
||||
static void write(const StoreDirConfig & store, WriteConn conn, const T & t)
|
||||
{
|
||||
CommonProto::Serialise<T>::write(store, conn, t);
|
||||
}
|
||||
};
|
||||
|
||||
#define DECLARE_COMMON_SERIALISER(T) \
|
||||
struct CommonProto::Serialise< T > \
|
||||
{ \
|
||||
static T read(const StoreDirConfig & store, CommonProto::ReadConn conn); \
|
||||
static void write(const StoreDirConfig & store, CommonProto::WriteConn conn, const T & str); \
|
||||
}
|
||||
|
||||
template<>
|
||||
DECLARE_COMMON_SERIALISER(std::string);
|
||||
template<>
|
||||
DECLARE_COMMON_SERIALISER(StorePath);
|
||||
template<>
|
||||
DECLARE_COMMON_SERIALISER(ContentAddress);
|
||||
template<>
|
||||
DECLARE_COMMON_SERIALISER(DrvOutput);
|
||||
template<>
|
||||
DECLARE_COMMON_SERIALISER(Realisation);
|
||||
|
||||
template<typename T>
|
||||
DECLARE_COMMON_SERIALISER(std::vector<T>);
|
||||
template<typename T>
|
||||
DECLARE_COMMON_SERIALISER(std::set<T>);
|
||||
template<typename... Ts>
|
||||
DECLARE_COMMON_SERIALISER(std::tuple<Ts...>);
|
||||
|
||||
#define COMMA_ ,
|
||||
template<typename K, typename V>
|
||||
DECLARE_COMMON_SERIALISER(std::map<K COMMA_ V>);
|
||||
#undef COMMA_
|
||||
|
||||
/**
|
||||
* These use the empty string for the null case, relying on the fact
|
||||
* that the underlying types never serialize to the empty string.
|
||||
*
|
||||
* We do this instead of a generic std::optional<T> instance because
|
||||
* ordinal tags (0 or 1, here) are a bit of a compatability hazard. For
|
||||
* the same reason, we don't have a std::variant<T..> instances (ordinal
|
||||
* tags 0...n).
|
||||
*
|
||||
* We could the generic instances and then these as specializations for
|
||||
* compatability, but that's proven a bit finnicky, and also makes the
|
||||
* worker protocol harder to implement in other languages where such
|
||||
* specializations may not be allowed.
|
||||
*/
|
||||
template<>
|
||||
DECLARE_COMMON_SERIALISER(std::optional<StorePath>);
|
||||
template<>
|
||||
DECLARE_COMMON_SERIALISER(std::optional<ContentAddress>);
|
||||
|
||||
}
|
||||
43
subprojects/libstore/common-ssh-store-config.cc
Normal file
43
subprojects/libstore/common-ssh-store-config.cc
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
#include <regex>
|
||||
|
||||
#include "common-ssh-store-config.hh"
|
||||
#include "ssh.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
static std::string extractConnStr(std::string_view scheme, std::string_view _connStr)
|
||||
{
|
||||
if (_connStr.empty())
|
||||
throw UsageError("`%s` store requires a valid SSH host as the authority part in Store URI", scheme);
|
||||
|
||||
std::string connStr{_connStr};
|
||||
|
||||
std::smatch result;
|
||||
static std::regex v6AddrRegex("^((.*)@)?\\[(.*)\\]$");
|
||||
|
||||
if (std::regex_match(connStr, result, v6AddrRegex)) {
|
||||
connStr = result[1].matched ? result.str(1) + result.str(3) : result.str(3);
|
||||
}
|
||||
|
||||
return connStr;
|
||||
}
|
||||
|
||||
CommonSSHStoreConfig::CommonSSHStoreConfig(std::string_view scheme, std::string_view host, const Params & params)
|
||||
: StoreConfig(params)
|
||||
, host(extractConnStr(scheme, host))
|
||||
{
|
||||
}
|
||||
|
||||
SSHMaster CommonSSHStoreConfig::createSSHMaster(bool useMaster, Descriptor logFD)
|
||||
{
|
||||
return {
|
||||
host,
|
||||
sshKey.get(),
|
||||
sshPublicHostKey.get(),
|
||||
useMaster,
|
||||
compress,
|
||||
logFD,
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
62
subprojects/libstore/common-ssh-store-config.hh
Normal file
62
subprojects/libstore/common-ssh-store-config.hh
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
class SSHMaster;
|
||||
|
||||
struct CommonSSHStoreConfig : virtual StoreConfig
|
||||
{
|
||||
using StoreConfig::StoreConfig;
|
||||
|
||||
CommonSSHStoreConfig(std::string_view scheme, std::string_view host, const Params & params);
|
||||
|
||||
const Setting<Path> sshKey{this, "", "ssh-key",
|
||||
"Path to the SSH private key used to authenticate to the remote machine."};
|
||||
|
||||
const Setting<std::string> sshPublicHostKey{this, "", "base64-ssh-public-host-key",
|
||||
"The public host key of the remote machine."};
|
||||
|
||||
const Setting<bool> compress{this, false, "compress",
|
||||
"Whether to enable SSH compression."};
|
||||
|
||||
const Setting<std::string> remoteStore{this, "", "remote-store",
|
||||
R"(
|
||||
[Store URL](@docroot@/store/types/index.md#store-url-format)
|
||||
to be used on the remote machine. The default is `auto`
|
||||
(i.e. use the Nix daemon or `/nix/store` directly).
|
||||
)"};
|
||||
|
||||
/**
|
||||
* The `parseURL` function supports both IPv6 URIs as defined in
|
||||
* RFC2732, but also pure addresses. The latter one is needed here to
|
||||
* connect to a remote store via SSH (it's possible to do e.g. `ssh root@::1`).
|
||||
*
|
||||
* When initialized, the following adjustments are made:
|
||||
*
|
||||
* - If the URL looks like `root@[::1]` (which is allowed by the URL parser and probably
|
||||
* needed to pass further flags), it
|
||||
* will be transformed into `root@::1` for SSH (same for `[::1]` -> `::1`).
|
||||
*
|
||||
* - If the URL looks like `root@::1` it will be left as-is.
|
||||
*
|
||||
* - In any other case, the string will be left as-is.
|
||||
*
|
||||
* Will throw an error if `connStr` is empty too.
|
||||
*/
|
||||
std::string host;
|
||||
|
||||
/**
|
||||
* Small wrapper around `SSHMaster::SSHMaster` that gets most
|
||||
* arguments from this configuration.
|
||||
*
|
||||
* See that constructor for details on the remaining two arguments.
|
||||
*/
|
||||
SSHMaster createSSHMaster(
|
||||
bool useMaster,
|
||||
Descriptor logFD = INVALID_DESCRIPTOR);
|
||||
};
|
||||
|
||||
}
|
||||
310
subprojects/libstore/content-address.cc
Normal file
310
subprojects/libstore/content-address.cc
Normal file
|
|
@ -0,0 +1,310 @@
|
|||
#include "args.hh"
|
||||
#include "content-address.hh"
|
||||
#include "split.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::string_view makeFileIngestionPrefix(FileIngestionMethod m)
|
||||
{
|
||||
switch (m) {
|
||||
case FileIngestionMethod::Flat:
|
||||
// Not prefixed for back compat
|
||||
return "";
|
||||
case FileIngestionMethod::NixArchive:
|
||||
return "r:";
|
||||
case FileIngestionMethod::Git:
|
||||
experimentalFeatureSettings.require(Xp::GitHashing);
|
||||
return "git:";
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
std::string_view ContentAddressMethod::render() const
|
||||
{
|
||||
switch (raw) {
|
||||
case ContentAddressMethod::Raw::Text:
|
||||
return "text";
|
||||
case ContentAddressMethod::Raw::Flat:
|
||||
case ContentAddressMethod::Raw::NixArchive:
|
||||
case ContentAddressMethod::Raw::Git:
|
||||
return renderFileIngestionMethod(getFileIngestionMethod());
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* **Not surjective**
|
||||
*
|
||||
* This is not exposed because `FileIngestionMethod::Flat` maps to
|
||||
* `ContentAddressMethod::Raw::Flat` and
|
||||
* `ContentAddressMethod::Raw::Text` alike. We can thus only safely use
|
||||
* this when the latter is ruled out (e.g. because it is already
|
||||
* handled).
|
||||
*/
|
||||
static ContentAddressMethod fileIngestionMethodToContentAddressMethod(FileIngestionMethod m)
|
||||
{
|
||||
switch (m) {
|
||||
case FileIngestionMethod::Flat:
|
||||
return ContentAddressMethod::Raw::Flat;
|
||||
case FileIngestionMethod::NixArchive:
|
||||
return ContentAddressMethod::Raw::NixArchive;
|
||||
case FileIngestionMethod::Git:
|
||||
return ContentAddressMethod::Raw::Git;
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
ContentAddressMethod ContentAddressMethod::parse(std::string_view m)
|
||||
{
|
||||
if (m == "text")
|
||||
return ContentAddressMethod::Raw::Text;
|
||||
else
|
||||
return fileIngestionMethodToContentAddressMethod(
|
||||
parseFileIngestionMethod(m));
|
||||
}
|
||||
|
||||
std::string_view ContentAddressMethod::renderPrefix() const
|
||||
{
|
||||
switch (raw) {
|
||||
case ContentAddressMethod::Raw::Text:
|
||||
return "text:";
|
||||
case ContentAddressMethod::Raw::Flat:
|
||||
case ContentAddressMethod::Raw::NixArchive:
|
||||
case ContentAddressMethod::Raw::Git:
|
||||
return makeFileIngestionPrefix(getFileIngestionMethod());
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
ContentAddressMethod ContentAddressMethod::parsePrefix(std::string_view & m)
|
||||
{
|
||||
if (splitPrefix(m, "r:")) {
|
||||
return ContentAddressMethod::Raw::NixArchive;
|
||||
}
|
||||
else if (splitPrefix(m, "git:")) {
|
||||
experimentalFeatureSettings.require(Xp::GitHashing);
|
||||
return ContentAddressMethod::Raw::Git;
|
||||
}
|
||||
else if (splitPrefix(m, "text:")) {
|
||||
return ContentAddressMethod::Raw::Text;
|
||||
}
|
||||
return ContentAddressMethod::Raw::Flat;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is slightly more mindful of forward compat in that it uses `fixed:`
|
||||
* rather than just doing a raw empty prefix or `r:`, which doesn't "save room"
|
||||
* for future changes very well.
|
||||
*/
|
||||
static std::string renderPrefixModern(const ContentAddressMethod & ca)
|
||||
{
|
||||
switch (ca.raw) {
|
||||
case ContentAddressMethod::Raw::Text:
|
||||
return "text:";
|
||||
case ContentAddressMethod::Raw::Flat:
|
||||
case ContentAddressMethod::Raw::NixArchive:
|
||||
case ContentAddressMethod::Raw::Git:
|
||||
return "fixed:" + makeFileIngestionPrefix(ca.getFileIngestionMethod());
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
std::string ContentAddressMethod::renderWithAlgo(HashAlgorithm ha) const
|
||||
{
|
||||
return renderPrefixModern(*this) + printHashAlgo(ha);
|
||||
}
|
||||
|
||||
FileIngestionMethod ContentAddressMethod::getFileIngestionMethod() const
|
||||
{
|
||||
switch (raw) {
|
||||
case ContentAddressMethod::Raw::Flat:
|
||||
return FileIngestionMethod::Flat;
|
||||
case ContentAddressMethod::Raw::NixArchive:
|
||||
return FileIngestionMethod::NixArchive;
|
||||
case ContentAddressMethod::Raw::Git:
|
||||
return FileIngestionMethod::Git;
|
||||
case ContentAddressMethod::Raw::Text:
|
||||
return FileIngestionMethod::Flat;
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
std::string ContentAddress::render() const
|
||||
{
|
||||
return renderPrefixModern(method) + this->hash.to_string(HashFormat::Nix32, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses content address strings up to the hash.
|
||||
*/
|
||||
static std::pair<ContentAddressMethod, HashAlgorithm> parseContentAddressMethodPrefix(std::string_view & rest)
|
||||
{
|
||||
std::string_view wholeInput { rest };
|
||||
|
||||
std::string_view prefix;
|
||||
{
|
||||
auto optPrefix = splitPrefixTo(rest, ':');
|
||||
if (!optPrefix)
|
||||
throw UsageError("not a content address because it is not in the form '<prefix>:<rest>': %s", wholeInput);
|
||||
prefix = *optPrefix;
|
||||
}
|
||||
|
||||
auto parseHashAlgorithm_ = [&](){
|
||||
auto hashAlgoRaw = splitPrefixTo(rest, ':');
|
||||
if (!hashAlgoRaw)
|
||||
throw UsageError("content address hash must be in form '<algo>:<hash>', but found: %s", wholeInput);
|
||||
HashAlgorithm hashAlgo = parseHashAlgo(*hashAlgoRaw);
|
||||
return hashAlgo;
|
||||
};
|
||||
|
||||
// Switch on prefix
|
||||
if (prefix == "text") {
|
||||
// No parsing of the ingestion method, "text" only support flat.
|
||||
HashAlgorithm hashAlgo = parseHashAlgorithm_();
|
||||
return {
|
||||
ContentAddressMethod::Raw::Text,
|
||||
std::move(hashAlgo),
|
||||
};
|
||||
} else if (prefix == "fixed") {
|
||||
// Parse method
|
||||
auto method = ContentAddressMethod::Raw::Flat;
|
||||
if (splitPrefix(rest, "r:"))
|
||||
method = ContentAddressMethod::Raw::NixArchive;
|
||||
else if (splitPrefix(rest, "git:")) {
|
||||
experimentalFeatureSettings.require(Xp::GitHashing);
|
||||
method = ContentAddressMethod::Raw::Git;
|
||||
}
|
||||
HashAlgorithm hashAlgo = parseHashAlgorithm_();
|
||||
return {
|
||||
std::move(method),
|
||||
std::move(hashAlgo),
|
||||
};
|
||||
} else
|
||||
throw UsageError("content address prefix '%s' is unrecognized. Recogonized prefixes are 'text' or 'fixed'", prefix);
|
||||
}
|
||||
|
||||
ContentAddress ContentAddress::parse(std::string_view rawCa)
|
||||
{
|
||||
auto rest = rawCa;
|
||||
|
||||
auto [caMethod, hashAlgo] = parseContentAddressMethodPrefix(rest);
|
||||
|
||||
return ContentAddress {
|
||||
.method = std::move(caMethod),
|
||||
.hash = Hash::parseNonSRIUnprefixed(rest, hashAlgo),
|
||||
};
|
||||
}
|
||||
|
||||
std::pair<ContentAddressMethod, HashAlgorithm> ContentAddressMethod::parseWithAlgo(std::string_view caMethod)
|
||||
{
|
||||
std::string asPrefix = std::string{caMethod} + ":";
|
||||
// parseContentAddressMethodPrefix takes its argument by reference
|
||||
std::string_view asPrefixView = asPrefix;
|
||||
return parseContentAddressMethodPrefix(asPrefixView);
|
||||
}
|
||||
|
||||
std::optional<ContentAddress> ContentAddress::parseOpt(std::string_view rawCaOpt)
|
||||
{
|
||||
return rawCaOpt == ""
|
||||
? std::nullopt
|
||||
: std::optional { ContentAddress::parse(rawCaOpt) };
|
||||
};
|
||||
|
||||
std::string renderContentAddress(std::optional<ContentAddress> ca)
|
||||
{
|
||||
return ca ? ca->render() : "";
|
||||
}
|
||||
|
||||
std::string ContentAddress::printMethodAlgo() const
|
||||
{
|
||||
return std::string { method.renderPrefix() }
|
||||
+ printHashAlgo(hash.algo);
|
||||
}
|
||||
|
||||
bool StoreReferences::empty() const
|
||||
{
|
||||
return !self && others.empty();
|
||||
}
|
||||
|
||||
size_t StoreReferences::size() const
|
||||
{
|
||||
return (self ? 1 : 0) + others.size();
|
||||
}
|
||||
|
||||
ContentAddressWithReferences ContentAddressWithReferences::withoutRefs(const ContentAddress & ca) noexcept
|
||||
{
|
||||
switch (ca.method.raw) {
|
||||
case ContentAddressMethod::Raw::Text:
|
||||
return TextInfo {
|
||||
.hash = ca.hash,
|
||||
.references = {},
|
||||
};
|
||||
case ContentAddressMethod::Raw::Flat:
|
||||
case ContentAddressMethod::Raw::NixArchive:
|
||||
case ContentAddressMethod::Raw::Git:
|
||||
return FixedOutputInfo {
|
||||
.method = ca.method.getFileIngestionMethod(),
|
||||
.hash = ca.hash,
|
||||
.references = {},
|
||||
};
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
ContentAddressWithReferences ContentAddressWithReferences::fromParts(
|
||||
ContentAddressMethod method, Hash hash, StoreReferences refs)
|
||||
{
|
||||
switch (method.raw) {
|
||||
case ContentAddressMethod::Raw::Text:
|
||||
if (refs.self)
|
||||
throw Error("self-reference not allowed with text hashing");
|
||||
return TextInfo {
|
||||
.hash = std::move(hash),
|
||||
.references = std::move(refs.others),
|
||||
};
|
||||
case ContentAddressMethod::Raw::Flat:
|
||||
case ContentAddressMethod::Raw::NixArchive:
|
||||
case ContentAddressMethod::Raw::Git:
|
||||
return FixedOutputInfo {
|
||||
.method = method.getFileIngestionMethod(),
|
||||
.hash = std::move(hash),
|
||||
.references = std::move(refs),
|
||||
};
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
ContentAddressMethod ContentAddressWithReferences::getMethod() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](const TextInfo & th) -> ContentAddressMethod {
|
||||
return ContentAddressMethod::Raw::Text;
|
||||
},
|
||||
[](const FixedOutputInfo & fsh) -> ContentAddressMethod {
|
||||
return fileIngestionMethodToContentAddressMethod(
|
||||
fsh.method);
|
||||
},
|
||||
}, raw);
|
||||
}
|
||||
|
||||
Hash ContentAddressWithReferences::getHash() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[](const TextInfo & th) {
|
||||
return th.hash;
|
||||
},
|
||||
[](const FixedOutputInfo & fsh) {
|
||||
return fsh.hash;
|
||||
},
|
||||
}, raw);
|
||||
}
|
||||
|
||||
}
|
||||
316
subprojects/libstore/content-address.hh
Normal file
316
subprojects/libstore/content-address.hh
Normal file
|
|
@ -0,0 +1,316 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include <variant>
|
||||
#include "hash.hh"
|
||||
#include "path.hh"
|
||||
#include "file-content-address.hh"
|
||||
#include "variant-wrapper.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/*
|
||||
* Content addressing method
|
||||
*/
|
||||
|
||||
/**
|
||||
* Compute the prefix to the hash algorithm which indicates how the
|
||||
* files were ingested.
|
||||
*/
|
||||
std::string_view makeFileIngestionPrefix(FileIngestionMethod m);
|
||||
|
||||
/**
|
||||
* An enumeration of all the ways we can content-address store objects.
|
||||
*
|
||||
* Just the type of a content address. Combine with the hash itself, and
|
||||
* we have a `ContentAddress` as defined below. Combine that, in turn,
|
||||
* with info on references, and we have `ContentAddressWithReferences`,
|
||||
* as defined further below.
|
||||
*/
|
||||
struct ContentAddressMethod
|
||||
{
|
||||
enum struct Raw {
|
||||
/**
|
||||
* Calculate a store path using the `FileIngestionMethod::Flat`
|
||||
* hash of the file system objects, and references.
|
||||
*
|
||||
* See `store-object/content-address.md#method-flat` in the
|
||||
* manual.
|
||||
*/
|
||||
Flat,
|
||||
|
||||
/**
|
||||
* Calculate a store path using the
|
||||
* `FileIngestionMethod::NixArchive` hash of the file system
|
||||
* objects, and references.
|
||||
*
|
||||
* See `store-object/content-address.md#method-flat` in the
|
||||
* manual.
|
||||
*/
|
||||
NixArchive,
|
||||
|
||||
/**
|
||||
* Calculate a store path using the `FileIngestionMethod::Git`
|
||||
* hash of the file system objects, and references.
|
||||
*
|
||||
* Part of `ExperimentalFeature::GitHashing`.
|
||||
*
|
||||
* See `store-object/content-address.md#method-git` in the
|
||||
* manual.
|
||||
*/
|
||||
Git,
|
||||
|
||||
/**
|
||||
* Calculate a store path using the `FileIngestionMethod::Flat`
|
||||
* hash of the file system objects, and references, but in a
|
||||
* different way than `ContentAddressMethod::Raw::Flat`.
|
||||
*
|
||||
* See `store-object/content-address.md#method-text` in the
|
||||
* manual.
|
||||
*/
|
||||
Text,
|
||||
};
|
||||
|
||||
Raw raw;
|
||||
|
||||
bool operator ==(const ContentAddressMethod &) const = default;
|
||||
auto operator <=>(const ContentAddressMethod &) const = default;
|
||||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(ContentAddressMethod);
|
||||
|
||||
/**
|
||||
* Parse a content addressing method (name).
|
||||
*
|
||||
* The inverse of `render`.
|
||||
*/
|
||||
static ContentAddressMethod parse(std::string_view rawCaMethod);
|
||||
|
||||
/**
|
||||
* Render a content addressing method (name).
|
||||
*
|
||||
* The inverse of `parse`.
|
||||
*/
|
||||
std::string_view render() const;
|
||||
|
||||
/**
|
||||
* Parse the prefix tag which indicates how the files
|
||||
* were ingested, with the fixed output case not prefixed for back
|
||||
* compat.
|
||||
*
|
||||
* @param [in] m A string that should begin with the prefix.
|
||||
* @param [out] m The remainder of the string after the prefix.
|
||||
*/
|
||||
static ContentAddressMethod parsePrefix(std::string_view & m);
|
||||
|
||||
/**
|
||||
* Render the prefix tag which indicates how the files wre ingested.
|
||||
*
|
||||
* The rough inverse of `parsePrefix()`.
|
||||
*/
|
||||
std::string_view renderPrefix() const;
|
||||
|
||||
/**
|
||||
* Parse a content addressing method and hash algorithm.
|
||||
*/
|
||||
static std::pair<ContentAddressMethod, HashAlgorithm> parseWithAlgo(std::string_view rawCaMethod);
|
||||
|
||||
/**
|
||||
* Render a content addressing method and hash algorithm in a
|
||||
* nicer way, prefixing both cases.
|
||||
*
|
||||
* The rough inverse of `parse()`.
|
||||
*/
|
||||
std::string renderWithAlgo(HashAlgorithm ha) const;
|
||||
|
||||
/**
|
||||
* Get the underlying way to content-address file system objects.
|
||||
*
|
||||
* Different ways of hashing store objects may use the same method
|
||||
* for hashing file systeme objects.
|
||||
*/
|
||||
FileIngestionMethod getFileIngestionMethod() const;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Mini content address
|
||||
*/
|
||||
|
||||
/**
|
||||
* We've accumulated several types of content-addressed paths over the
|
||||
* years; fixed-output derivations support multiple hash algorithms and
|
||||
* serialisation methods (flat file vs NAR). Thus, ‘ca’ has one of the
|
||||
* following forms:
|
||||
*
|
||||
* - `TextIngestionMethod`:
|
||||
* ‘text:sha256:<sha256 hash of file contents>’
|
||||
*
|
||||
* - `FixedIngestionMethod`:
|
||||
* ‘fixed:<r?>:<hash algorithm>:<hash of file contents>’
|
||||
*/
|
||||
struct ContentAddress
|
||||
{
|
||||
/**
|
||||
* How the file system objects are serialized
|
||||
*/
|
||||
ContentAddressMethod method;
|
||||
|
||||
/**
|
||||
* Hash of that serialization
|
||||
*/
|
||||
Hash hash;
|
||||
|
||||
bool operator ==(const ContentAddress &) const = default;
|
||||
auto operator <=>(const ContentAddress &) const = default;
|
||||
|
||||
/**
|
||||
* Compute the content-addressability assertion
|
||||
* (`ValidPathInfo::ca`) for paths created by
|
||||
* `Store::makeFixedOutputPath()` / `Store::addToStore()`.
|
||||
*/
|
||||
std::string render() const;
|
||||
|
||||
static ContentAddress parse(std::string_view rawCa);
|
||||
|
||||
static std::optional<ContentAddress> parseOpt(std::string_view rawCaOpt);
|
||||
|
||||
std::string printMethodAlgo() const;
|
||||
};
|
||||
|
||||
/**
|
||||
* Render the `ContentAddress` if it exists to a string, return empty
|
||||
* string otherwise.
|
||||
*/
|
||||
std::string renderContentAddress(std::optional<ContentAddress> ca);
|
||||
|
||||
|
||||
/*
|
||||
* Full content address
|
||||
*
|
||||
* See the schema for store paths in store-api.cc
|
||||
*/
|
||||
|
||||
/**
|
||||
* A set of references to other store objects.
|
||||
*
|
||||
* References to other store objects are tracked with store paths, self
|
||||
* references however are tracked with a boolean.
|
||||
*/
|
||||
struct StoreReferences
|
||||
{
|
||||
/**
|
||||
* References to other store objects
|
||||
*/
|
||||
StorePathSet others;
|
||||
|
||||
/**
|
||||
* Reference to this store object
|
||||
*/
|
||||
bool self = false;
|
||||
|
||||
/**
|
||||
* @return true iff no references, i.e. others is empty and self is
|
||||
* false.
|
||||
*/
|
||||
bool empty() const;
|
||||
|
||||
/**
|
||||
* Returns the numbers of references, i.e. the size of others + 1
|
||||
* iff self is true.
|
||||
*/
|
||||
size_t size() const;
|
||||
|
||||
bool operator ==(const StoreReferences &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=>(const StoreReferences &) const = default;
|
||||
};
|
||||
|
||||
// This matches the additional info that we need for makeTextPath
|
||||
struct TextInfo
|
||||
{
|
||||
/**
|
||||
* Hash of the contents of the text/file.
|
||||
*/
|
||||
Hash hash;
|
||||
|
||||
/**
|
||||
* References to other store objects only; self references
|
||||
* disallowed
|
||||
*/
|
||||
StorePathSet references;
|
||||
|
||||
bool operator ==(const TextInfo &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=>(const TextInfo &) const = default;
|
||||
};
|
||||
|
||||
struct FixedOutputInfo
|
||||
{
|
||||
/**
|
||||
* How the file system objects are serialized
|
||||
*/
|
||||
FileIngestionMethod method;
|
||||
|
||||
/**
|
||||
* Hash of that serialization
|
||||
*/
|
||||
Hash hash;
|
||||
|
||||
/**
|
||||
* References to other store objects or this one.
|
||||
*/
|
||||
StoreReferences references;
|
||||
|
||||
bool operator ==(const FixedOutputInfo &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=>(const FixedOutputInfo &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Ways of content addressing but not a complete ContentAddress.
|
||||
*
|
||||
* A ContentAddress without a Hash.
|
||||
*/
|
||||
struct ContentAddressWithReferences
|
||||
{
|
||||
typedef std::variant<
|
||||
TextInfo,
|
||||
FixedOutputInfo
|
||||
> Raw;
|
||||
|
||||
Raw raw;
|
||||
|
||||
bool operator ==(const ContentAddressWithReferences &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=>(const ContentAddressWithReferences &) const = default;
|
||||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(ContentAddressWithReferences);
|
||||
|
||||
/**
|
||||
* Create a `ContentAddressWithReferences` from a mere
|
||||
* `ContentAddress`, by claiming no references.
|
||||
*/
|
||||
static ContentAddressWithReferences withoutRefs(const ContentAddress &) noexcept;
|
||||
|
||||
/**
|
||||
* Create a `ContentAddressWithReferences` from 3 parts:
|
||||
*
|
||||
* @param method Way ingesting the file system data.
|
||||
*
|
||||
* @param hash Hash of ingested file system data.
|
||||
*
|
||||
* @param refs References to other store objects or oneself.
|
||||
*
|
||||
* @note note that all combinations are supported. This is a
|
||||
* *partial function* and exceptions will be thrown for invalid
|
||||
* combinations.
|
||||
*/
|
||||
static ContentAddressWithReferences fromParts(
|
||||
ContentAddressMethod method, Hash hash, StoreReferences refs);
|
||||
|
||||
ContentAddressMethod getMethod() const;
|
||||
|
||||
Hash getHash() const;
|
||||
};
|
||||
|
||||
}
|
||||
1125
subprojects/libstore/daemon.cc
Normal file
1125
subprojects/libstore/daemon.cc
Normal file
File diff suppressed because it is too large
Load diff
18
subprojects/libstore/daemon.hh
Normal file
18
subprojects/libstore/daemon.hh
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "serialise.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix::daemon {
|
||||
|
||||
enum RecursiveFlag : bool { NotRecursive = false, Recursive = true };
|
||||
|
||||
void processConnection(
|
||||
ref<Store> store,
|
||||
FdSource && from,
|
||||
FdSink && to,
|
||||
TrustedFlag trusted,
|
||||
RecursiveFlag recursive);
|
||||
|
||||
}
|
||||
1422
subprojects/libstore/derivations.cc
Normal file
1422
subprojects/libstore/derivations.cc
Normal file
File diff suppressed because it is too large
Load diff
521
subprojects/libstore/derivations.hh
Normal file
521
subprojects/libstore/derivations.hh
Normal file
|
|
@ -0,0 +1,521 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "path.hh"
|
||||
#include "types.hh"
|
||||
#include "hash.hh"
|
||||
#include "content-address.hh"
|
||||
#include "repair-flag.hh"
|
||||
#include "derived-path-map.hh"
|
||||
#include "sync.hh"
|
||||
#include "variant-wrapper.hh"
|
||||
|
||||
#include <map>
|
||||
#include <variant>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct StoreDirConfig;
|
||||
|
||||
/* Abstract syntax of derivations. */
|
||||
|
||||
/**
|
||||
* A single output of a BasicDerivation (and Derivation).
|
||||
*/
|
||||
struct DerivationOutput
|
||||
{
|
||||
/**
|
||||
* The traditional non-fixed-output derivation type.
|
||||
*/
|
||||
struct InputAddressed
|
||||
{
|
||||
StorePath path;
|
||||
|
||||
bool operator == (const InputAddressed &) const = default;
|
||||
auto operator <=> (const InputAddressed &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Fixed-output derivations, whose output paths are content
|
||||
* addressed according to that fixed output.
|
||||
*/
|
||||
struct CAFixed
|
||||
{
|
||||
/**
|
||||
* Method and hash used for expected hash computation.
|
||||
*
|
||||
* References are not allowed by fiat.
|
||||
*/
|
||||
ContentAddress ca;
|
||||
|
||||
/**
|
||||
* Return the \ref StorePath "store path" corresponding to this output
|
||||
*
|
||||
* @param drvName The name of the derivation this is an output of, without the `.drv`.
|
||||
* @param outputName The name of this output.
|
||||
*/
|
||||
StorePath path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const;
|
||||
|
||||
bool operator == (const CAFixed &) const = default;
|
||||
auto operator <=> (const CAFixed &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Floating-output derivations, whose output paths are content
|
||||
* addressed, but not fixed, and so are dynamically calculated from
|
||||
* whatever the output ends up being.
|
||||
* */
|
||||
struct CAFloating
|
||||
{
|
||||
/**
|
||||
* How the file system objects will be serialized for hashing
|
||||
*/
|
||||
ContentAddressMethod method;
|
||||
|
||||
/**
|
||||
* How the serialization will be hashed
|
||||
*/
|
||||
HashAlgorithm hashAlgo;
|
||||
|
||||
bool operator == (const CAFloating &) const = default;
|
||||
auto operator <=> (const CAFloating &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Input-addressed output which depends on a (CA) derivation whose hash
|
||||
* isn't known yet.
|
||||
*/
|
||||
struct Deferred {
|
||||
bool operator == (const Deferred &) const = default;
|
||||
auto operator <=> (const Deferred &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Impure output which is moved to a content-addressed location (like
|
||||
* CAFloating) but isn't registered as a realization.
|
||||
*/
|
||||
struct Impure
|
||||
{
|
||||
/**
|
||||
* How the file system objects will be serialized for hashing
|
||||
*/
|
||||
ContentAddressMethod method;
|
||||
|
||||
/**
|
||||
* How the serialization will be hashed
|
||||
*/
|
||||
HashAlgorithm hashAlgo;
|
||||
|
||||
bool operator == (const Impure &) const = default;
|
||||
auto operator <=> (const Impure &) const = default;
|
||||
};
|
||||
|
||||
typedef std::variant<
|
||||
InputAddressed,
|
||||
CAFixed,
|
||||
CAFloating,
|
||||
Deferred,
|
||||
Impure
|
||||
> Raw;
|
||||
|
||||
Raw raw;
|
||||
|
||||
bool operator == (const DerivationOutput &) const = default;
|
||||
auto operator <=> (const DerivationOutput &) const = default;
|
||||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(DerivationOutput);
|
||||
|
||||
/**
|
||||
* Force choosing a variant
|
||||
*/
|
||||
DerivationOutput() = delete;
|
||||
|
||||
/**
|
||||
* \note when you use this function you should make sure that you're
|
||||
* passing the right derivation name. When in doubt, you should use
|
||||
* the safer interface provided by
|
||||
* BasicDerivation::outputsAndOptPaths
|
||||
*/
|
||||
std::optional<StorePath> path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const;
|
||||
|
||||
nlohmann::json toJSON(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view drvName,
|
||||
OutputNameView outputName) const;
|
||||
/**
|
||||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
static DerivationOutput fromJSON(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view drvName,
|
||||
OutputNameView outputName,
|
||||
const nlohmann::json & json,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
};
|
||||
|
||||
typedef std::map<std::string, DerivationOutput> DerivationOutputs;
|
||||
|
||||
/**
|
||||
* These are analogues to the previous DerivationOutputs data type,
|
||||
* but they also contains, for each output, the (optional) store
|
||||
* path in which it would be written. To calculate values of these
|
||||
* types, see the corresponding functions in BasicDerivation.
|
||||
*/
|
||||
typedef std::map<std::string, std::pair<DerivationOutput, std::optional<StorePath>>>
|
||||
DerivationOutputsAndOptPaths;
|
||||
|
||||
/**
|
||||
* For inputs that are sub-derivations, we specify exactly which
|
||||
* output IDs we are interested in.
|
||||
*/
|
||||
typedef std::map<StorePath, StringSet> DerivationInputs;
|
||||
|
||||
struct DerivationType {
|
||||
/**
|
||||
* Input-addressed derivation types
|
||||
*/
|
||||
struct InputAddressed {
|
||||
/**
|
||||
* True iff the derivation type can't be determined statically,
|
||||
* for instance because it (transitively) depends on a content-addressed
|
||||
* derivation.
|
||||
*/
|
||||
bool deferred;
|
||||
|
||||
bool operator == (const InputAddressed &) const = default;
|
||||
auto operator <=> (const InputAddressed &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Content-addressed derivation types
|
||||
*/
|
||||
struct ContentAddressed {
|
||||
/**
|
||||
* Whether the derivation should be built safely inside a sandbox.
|
||||
*/
|
||||
bool sandboxed;
|
||||
/**
|
||||
* Whether the derivation's outputs' content-addresses are "fixed"
|
||||
* or "floating".
|
||||
*
|
||||
* - Fixed: content-addresses are written down as part of the
|
||||
* derivation itself. If the outputs don't end up matching the
|
||||
* build fails.
|
||||
*
|
||||
* - Floating: content-addresses are not written down, we do not
|
||||
* know them until we perform the build.
|
||||
*/
|
||||
bool fixed;
|
||||
|
||||
bool operator == (const ContentAddressed &) const = default;
|
||||
auto operator <=> (const ContentAddressed &) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Impure derivation type
|
||||
*
|
||||
* This is similar at buil-time to the content addressed, not standboxed, not fixed
|
||||
* type, but has some restrictions on its usage.
|
||||
*/
|
||||
struct Impure {
|
||||
bool operator == (const Impure &) const = default;
|
||||
auto operator <=> (const Impure &) const = default;
|
||||
};
|
||||
|
||||
typedef std::variant<
|
||||
InputAddressed,
|
||||
ContentAddressed,
|
||||
Impure
|
||||
> Raw;
|
||||
|
||||
Raw raw;
|
||||
|
||||
bool operator == (const DerivationType &) const = default;
|
||||
auto operator <=> (const DerivationType &) const = default;
|
||||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(DerivationType);
|
||||
|
||||
/**
|
||||
* Force choosing a variant
|
||||
*/
|
||||
DerivationType() = delete;
|
||||
|
||||
/**
|
||||
* Do the outputs of the derivation have paths calculated from their
|
||||
* content, or from the derivation itself?
|
||||
*/
|
||||
bool isCA() const;
|
||||
|
||||
/**
|
||||
* Is the content of the outputs fixed <em>a priori</em> via a hash?
|
||||
* Never true for non-CA derivations.
|
||||
*/
|
||||
bool isFixed() const;
|
||||
|
||||
/**
|
||||
* Whether the derivation is fully sandboxed. If false, the sandbox
|
||||
* is opened up, e.g. the derivation has access to the network. Note
|
||||
* that whether or not we actually sandbox the derivation is
|
||||
* controlled separately. Always true for non-CA derivations.
|
||||
*/
|
||||
bool isSandboxed() const;
|
||||
|
||||
/**
|
||||
* Whether the derivation is expected to produce a different result
|
||||
* every time, and therefore it needs to be rebuilt every time. This is
|
||||
* only true for derivations that have the attribute '__impure =
|
||||
* true'.
|
||||
*
|
||||
* Non-impure derivations can still behave impurely, to the degree permitted
|
||||
* by the sandbox. Hence why this method isn't `isPure`: impure derivations
|
||||
* are not the negation of pure derivations. Purity can not be ascertained
|
||||
* except by rather heavy tools.
|
||||
*/
|
||||
bool isImpure() const;
|
||||
|
||||
/**
|
||||
* Does the derivation knows its own output paths?
|
||||
* Only true when there's no floating-ca derivation involved in the
|
||||
* closure, or if fixed output.
|
||||
*/
|
||||
bool hasKnownOutputPaths() const;
|
||||
};
|
||||
|
||||
struct BasicDerivation
|
||||
{
|
||||
/**
|
||||
* keyed on symbolic IDs
|
||||
*/
|
||||
DerivationOutputs outputs;
|
||||
/**
|
||||
* inputs that are sources
|
||||
*/
|
||||
StorePathSet inputSrcs;
|
||||
std::string platform;
|
||||
Path builder;
|
||||
Strings args;
|
||||
StringPairs env;
|
||||
std::string name;
|
||||
|
||||
BasicDerivation() = default;
|
||||
virtual ~BasicDerivation() { };
|
||||
|
||||
bool isBuiltin() const;
|
||||
|
||||
/**
|
||||
* Return true iff this is a fixed-output derivation.
|
||||
*/
|
||||
DerivationType type() const;
|
||||
|
||||
/**
|
||||
* Return the output names of a derivation.
|
||||
*/
|
||||
StringSet outputNames() const;
|
||||
|
||||
/**
|
||||
* Calculates the maps that contains all the DerivationOutputs, but
|
||||
* augmented with knowledge of the Store paths they would be written
|
||||
* into.
|
||||
*/
|
||||
DerivationOutputsAndOptPaths outputsAndOptPaths(const StoreDirConfig & store) const;
|
||||
|
||||
static std::string_view nameFromPath(const StorePath & storePath);
|
||||
|
||||
bool operator == (const BasicDerivation &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=> (const BasicDerivation &) const = default;
|
||||
};
|
||||
|
||||
class Store;
|
||||
|
||||
struct Derivation : BasicDerivation
|
||||
{
|
||||
/**
|
||||
* inputs that are sub-derivations
|
||||
*/
|
||||
DerivedPathMap<std::set<OutputName>> inputDrvs;
|
||||
|
||||
/**
|
||||
* Print a derivation.
|
||||
*/
|
||||
std::string unparse(const StoreDirConfig & store, bool maskOutputs,
|
||||
DerivedPathMap<StringSet>::ChildNode::Map * actualInputs = nullptr) const;
|
||||
|
||||
/**
|
||||
* Return the underlying basic derivation but with these changes:
|
||||
*
|
||||
* 1. Input drvs are emptied, but the outputs of them that were used
|
||||
* are added directly to input sources.
|
||||
*
|
||||
* 2. Input placeholders are replaced with realized input store
|
||||
* paths.
|
||||
*/
|
||||
std::optional<BasicDerivation> tryResolve(Store & store, Store * evalStore = nullptr) const;
|
||||
|
||||
/**
|
||||
* Like the above, but instead of querying the Nix database for
|
||||
* realisations, uses a given mapping from input derivation paths +
|
||||
* output names to actual output store paths.
|
||||
*/
|
||||
std::optional<BasicDerivation> tryResolve(
|
||||
Store & store,
|
||||
const std::map<std::pair<StorePath, std::string>, StorePath> & inputDrvOutputs) const;
|
||||
|
||||
/**
|
||||
* Check that the derivation is valid and does not present any
|
||||
* illegal states.
|
||||
*
|
||||
* This is mainly a matter of checking the outputs, where our C++
|
||||
* representation supports all sorts of combinations we do not yet
|
||||
* allow.
|
||||
*/
|
||||
void checkInvariants(Store & store, const StorePath & drvPath) const;
|
||||
|
||||
Derivation() = default;
|
||||
Derivation(const BasicDerivation & bd) : BasicDerivation(bd) { }
|
||||
Derivation(BasicDerivation && bd) : BasicDerivation(std::move(bd)) { }
|
||||
|
||||
nlohmann::json toJSON(const StoreDirConfig & store) const;
|
||||
static Derivation fromJSON(
|
||||
const StoreDirConfig & store,
|
||||
const nlohmann::json & json,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
|
||||
bool operator == (const Derivation &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
//auto operator <=> (const Derivation &) const = default;
|
||||
};
|
||||
|
||||
|
||||
class Store;
|
||||
|
||||
/**
|
||||
* Write a derivation to the Nix store, and return its path.
|
||||
*/
|
||||
StorePath writeDerivation(Store & store,
|
||||
const Derivation & drv,
|
||||
RepairFlag repair = NoRepair,
|
||||
bool readOnly = false);
|
||||
|
||||
/**
|
||||
* Read a derivation from a file.
|
||||
*/
|
||||
Derivation parseDerivation(
|
||||
const StoreDirConfig & store,
|
||||
std::string && s,
|
||||
std::string_view name,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
|
||||
/**
|
||||
* \todo Remove.
|
||||
*
|
||||
* Use Path::isDerivation instead.
|
||||
*/
|
||||
bool isDerivation(std::string_view fileName);
|
||||
|
||||
/**
|
||||
* Calculate the name that will be used for the store path for this
|
||||
* output.
|
||||
*
|
||||
* This is usually <drv-name>-<output-name>, but is just <drv-name> when
|
||||
* the output name is "out".
|
||||
*/
|
||||
std::string outputPathName(std::string_view drvName, OutputNameView outputName);
|
||||
|
||||
|
||||
/**
|
||||
* The hashes modulo of a derivation.
|
||||
*
|
||||
* Each output is given a hash, although in practice only the content-addressed
|
||||
* derivations (fixed-output or not) will have a different hash for each
|
||||
* output.
|
||||
*/
|
||||
struct DrvHash {
|
||||
/**
|
||||
* Map from output names to hashes
|
||||
*/
|
||||
std::map<std::string, Hash> hashes;
|
||||
|
||||
enum struct Kind : bool {
|
||||
/**
|
||||
* Statically determined derivations.
|
||||
* This hash will be directly used to compute the output paths
|
||||
*/
|
||||
Regular,
|
||||
|
||||
/**
|
||||
* Floating-output derivations (and their reverse dependencies).
|
||||
*/
|
||||
Deferred,
|
||||
};
|
||||
|
||||
/**
|
||||
* The kind of derivation this is, simplified for just "derivation hash
|
||||
* modulo" purposes.
|
||||
*/
|
||||
Kind kind;
|
||||
};
|
||||
|
||||
void operator |= (DrvHash::Kind & self, const DrvHash::Kind & other) noexcept;
|
||||
|
||||
/**
|
||||
* Returns hashes with the details of fixed-output subderivations
|
||||
* expunged.
|
||||
*
|
||||
* A fixed-output derivation is a derivation whose outputs have a
|
||||
* specified content hash and hash algorithm. (Currently they must have
|
||||
* exactly one output (`out`), which is specified using the `outputHash`
|
||||
* and `outputHashAlgo` attributes, but the algorithm doesn't assume
|
||||
* this.) We don't want changes to such derivations to propagate upwards
|
||||
* through the dependency graph, changing output paths everywhere.
|
||||
*
|
||||
* For instance, if we change the url in a call to the `fetchurl`
|
||||
* function, we do not want to rebuild everything depending on it---after
|
||||
* all, (the hash of) the file being downloaded is unchanged. So the
|
||||
* *output paths* should not change. On the other hand, the *derivation
|
||||
* paths* should change to reflect the new dependency graph.
|
||||
*
|
||||
* For fixed-output derivations, this returns a map from the name of
|
||||
* each output to its hash, unique up to the output's contents.
|
||||
*
|
||||
* For regular derivations, it returns a single hash of the derivation
|
||||
* ATerm, after subderivations have been likewise expunged from that
|
||||
* derivation.
|
||||
*/
|
||||
DrvHash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs);
|
||||
|
||||
/**
|
||||
* Return a map associating each output to a hash that uniquely identifies its
|
||||
* derivation (modulo the self-references).
|
||||
*
|
||||
* \todo What is the Hash in this map?
|
||||
*/
|
||||
std::map<std::string, Hash> staticOutputHashes(Store & store, const Derivation & drv);
|
||||
|
||||
/**
|
||||
* Memoisation of hashDerivationModulo().
|
||||
*/
|
||||
typedef std::map<StorePath, DrvHash> DrvHashes;
|
||||
|
||||
// FIXME: global, though at least thread-safe.
|
||||
extern Sync<DrvHashes> drvHashes;
|
||||
|
||||
struct Source;
|
||||
struct Sink;
|
||||
|
||||
Source & readDerivation(Source & in, const StoreDirConfig & store, BasicDerivation & drv, std::string_view name);
|
||||
void writeDerivation(Sink & out, const StoreDirConfig & store, const BasicDerivation & drv);
|
||||
|
||||
/**
|
||||
* This creates an opaque and almost certainly unique string
|
||||
* deterministically from the output name.
|
||||
*
|
||||
* It is used as a placeholder to allow derivations to refer to their
|
||||
* own outputs without needing to use the hash of a derivation in
|
||||
* itself, making the hash near-impossible to calculate.
|
||||
*/
|
||||
std::string hashPlaceholder(const OutputNameView outputName);
|
||||
|
||||
extern const Hash impureOutputHash;
|
||||
|
||||
}
|
||||
71
subprojects/libstore/derived-path-map.cc
Normal file
71
subprojects/libstore/derived-path-map.cc
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
#include "derived-path-map.hh"
|
||||
#include "util.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
template<typename V>
|
||||
typename DerivedPathMap<V>::ChildNode & DerivedPathMap<V>::ensureSlot(const SingleDerivedPath & k)
|
||||
{
|
||||
std::function<ChildNode &(const SingleDerivedPath & )> initIter;
|
||||
initIter = [&](const auto & k) -> auto & {
|
||||
return std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Opaque & bo) -> auto & {
|
||||
// will not overwrite if already there
|
||||
return map[bo.path];
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & bfd) -> auto & {
|
||||
auto & n = initIter(*bfd.drvPath);
|
||||
return n.childMap[bfd.output];
|
||||
},
|
||||
}, k.raw());
|
||||
};
|
||||
return initIter(k);
|
||||
}
|
||||
|
||||
template<typename V>
|
||||
typename DerivedPathMap<V>::ChildNode * DerivedPathMap<V>::findSlot(const SingleDerivedPath & k)
|
||||
{
|
||||
std::function<ChildNode *(const SingleDerivedPath & )> initIter;
|
||||
initIter = [&](const auto & k) {
|
||||
return std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Opaque & bo) {
|
||||
auto it = map.find(bo.path);
|
||||
return it != map.end()
|
||||
? &it->second
|
||||
: nullptr;
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & bfd) {
|
||||
auto * n = initIter(*bfd.drvPath);
|
||||
if (!n) return (ChildNode *)nullptr;
|
||||
|
||||
auto it = n->childMap.find(bfd.output);
|
||||
return it != n->childMap.end()
|
||||
? &it->second
|
||||
: nullptr;
|
||||
},
|
||||
}, k.raw());
|
||||
};
|
||||
return initIter(k);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// instantiations
|
||||
|
||||
namespace nix {
|
||||
|
||||
template<>
|
||||
bool DerivedPathMap<std::set<std::string>>::ChildNode::operator == (
|
||||
const DerivedPathMap<std::set<std::string>>::ChildNode &) const noexcept = default;
|
||||
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
#if 0
|
||||
template<>
|
||||
std::strong_ordering DerivedPathMap<std::set<std::string>>::ChildNode::operator <=> (
|
||||
const DerivedPathMap<std::set<std::string>>::ChildNode &) const noexcept = default;
|
||||
#endif
|
||||
|
||||
template struct DerivedPathMap<std::set<std::string>>::ChildNode;
|
||||
template struct DerivedPathMap<std::set<std::string>>;
|
||||
|
||||
};
|
||||
110
subprojects/libstore/derived-path-map.hh
Normal file
110
subprojects/libstore/derived-path-map.hh
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "types.hh"
|
||||
#include "derived-path.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
* A simple Trie, of sorts. Conceptually a map of `SingleDerivedPath` to
|
||||
* values.
|
||||
*
|
||||
* Concretely, an n-ary tree, as described below. A
|
||||
* `SingleDerivedPath::Opaque` maps to the value of an immediate child
|
||||
* of the root node. A `SingleDerivedPath::Built` maps to a deeper child
|
||||
* node: the `SingleDerivedPath::Built::drvPath` is first mapped to a a
|
||||
* child node (inductively), and then the
|
||||
* `SingleDerivedPath::Built::output` is used to look up that child's
|
||||
* child via its map. In this manner, every `SingleDerivedPath` is
|
||||
* mapped to a child node.
|
||||
*
|
||||
* @param V A type to instantiate for each output. It should probably
|
||||
* should be an "optional" type so not every interior node has to have a
|
||||
* value. `* const Something` or `std::optional<Something>` would be
|
||||
* good choices for "optional" types.
|
||||
*/
|
||||
template<typename V>
|
||||
struct DerivedPathMap {
|
||||
/**
|
||||
* A child node (non-root node).
|
||||
*/
|
||||
struct ChildNode {
|
||||
/**
|
||||
* Value of this child node.
|
||||
*
|
||||
* @see DerivedPathMap for what `V` should be.
|
||||
*/
|
||||
V value;
|
||||
|
||||
/**
|
||||
* The map type for the root node.
|
||||
*/
|
||||
using Map = std::map<OutputName, ChildNode>;
|
||||
|
||||
/**
|
||||
* The map of the root node.
|
||||
*/
|
||||
Map childMap;
|
||||
|
||||
bool operator == (const ChildNode &) const noexcept;
|
||||
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
// decltype(std::declval<V>() <=> std::declval<V>())
|
||||
// operator <=> (const ChildNode &) const noexcept;
|
||||
};
|
||||
|
||||
/**
|
||||
* The map type for the root node.
|
||||
*/
|
||||
using Map = std::map<StorePath, ChildNode>;
|
||||
|
||||
/**
|
||||
* The map of root node.
|
||||
*/
|
||||
Map map;
|
||||
|
||||
bool operator == (const DerivedPathMap &) const = default;
|
||||
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
// auto operator <=> (const DerivedPathMap &) const noexcept;
|
||||
|
||||
/**
|
||||
* Find the node for `k`, creating it if needed.
|
||||
*
|
||||
* The node is referred to as a "slot" on the assumption that `V` is
|
||||
* some sort of optional type, so the given key can be set or unset
|
||||
* by changing this node.
|
||||
*/
|
||||
ChildNode & ensureSlot(const SingleDerivedPath & k);
|
||||
|
||||
/**
|
||||
* Like `ensureSlot` but does not create the slot if it doesn't exist.
|
||||
*
|
||||
* Read the entire description of `ensureSlot` to understand an
|
||||
* important caveat here that "have slot" does *not* imply "key is
|
||||
* set in map". To ensure a key is set one would need to get the
|
||||
* child node (with `findSlot` or `ensureSlot`) *and* check the
|
||||
* `ChildNode::value`.
|
||||
*/
|
||||
ChildNode * findSlot(const SingleDerivedPath & k);
|
||||
};
|
||||
|
||||
template<>
|
||||
bool DerivedPathMap<std::set<std::string>>::ChildNode::operator == (
|
||||
const DerivedPathMap<std::set<std::string>>::ChildNode &) const noexcept;
|
||||
|
||||
// TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet.
|
||||
#if 0
|
||||
template<>
|
||||
std::strong_ordering DerivedPathMap<std::set<std::string>>::ChildNode::operator <=> (
|
||||
const DerivedPathMap<std::set<std::string>>::ChildNode &) const noexcept;
|
||||
|
||||
template<>
|
||||
inline auto DerivedPathMap<std::set<std::string>>::operator <=> (const DerivedPathMap<std::set<std::string>> &) const noexcept = default;
|
||||
#endif
|
||||
|
||||
extern template struct DerivedPathMap<std::set<std::string>>::ChildNode;
|
||||
extern template struct DerivedPathMap<std::set<std::string>>;
|
||||
|
||||
}
|
||||
310
subprojects/libstore/derived-path.cc
Normal file
310
subprojects/libstore/derived-path.cc
Normal file
|
|
@ -0,0 +1,310 @@
|
|||
#include "derived-path.hh"
|
||||
#include "derivations.hh"
|
||||
#include "store-api.hh"
|
||||
#include "comparator.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace nix {
|
||||
|
||||
// Custom implementation to avoid `ref` ptr equality
|
||||
GENERATE_CMP_EXT(
|
||||
,
|
||||
std::strong_ordering,
|
||||
SingleDerivedPathBuilt,
|
||||
*me->drvPath,
|
||||
me->output);
|
||||
|
||||
// Custom implementation to avoid `ref` ptr equality
|
||||
|
||||
// TODO no `GENERATE_CMP_EXT` because no `std::set::operator<=>` on
|
||||
// Darwin, per header.
|
||||
GENERATE_EQUAL(
|
||||
,
|
||||
DerivedPathBuilt ::,
|
||||
DerivedPathBuilt,
|
||||
*me->drvPath,
|
||||
me->outputs);
|
||||
GENERATE_ONE_CMP(
|
||||
,
|
||||
bool,
|
||||
DerivedPathBuilt ::,
|
||||
<,
|
||||
DerivedPathBuilt,
|
||||
*me->drvPath,
|
||||
me->outputs);
|
||||
|
||||
nlohmann::json DerivedPath::Opaque::toJSON(const StoreDirConfig & store) const
|
||||
{
|
||||
return store.printStorePath(path);
|
||||
}
|
||||
|
||||
nlohmann::json SingleDerivedPath::Built::toJSON(Store & store) const {
|
||||
nlohmann::json res;
|
||||
res["drvPath"] = drvPath->toJSON(store);
|
||||
// Fallback for the input-addressed derivation case: We expect to always be
|
||||
// able to print the output paths, so let’s do it
|
||||
// FIXME try-resolve on drvPath
|
||||
const auto outputMap = store.queryPartialDerivationOutputMap(resolveDerivedPath(store, *drvPath));
|
||||
res["output"] = output;
|
||||
auto outputPathIter = outputMap.find(output);
|
||||
if (outputPathIter == outputMap.end())
|
||||
res["outputPath"] = nullptr;
|
||||
else if (std::optional p = outputPathIter->second)
|
||||
res["outputPath"] = store.printStorePath(*p);
|
||||
else
|
||||
res["outputPath"] = nullptr;
|
||||
return res;
|
||||
}
|
||||
|
||||
nlohmann::json DerivedPath::Built::toJSON(Store & store) const {
|
||||
nlohmann::json res;
|
||||
res["drvPath"] = drvPath->toJSON(store);
|
||||
// Fallback for the input-addressed derivation case: We expect to always be
|
||||
// able to print the output paths, so let’s do it
|
||||
// FIXME try-resolve on drvPath
|
||||
const auto outputMap = store.queryPartialDerivationOutputMap(resolveDerivedPath(store, *drvPath));
|
||||
for (const auto & [output, outputPathOpt] : outputMap) {
|
||||
if (!outputs.contains(output)) continue;
|
||||
if (outputPathOpt)
|
||||
res["outputs"][output] = store.printStorePath(*outputPathOpt);
|
||||
else
|
||||
res["outputs"][output] = nullptr;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
nlohmann::json SingleDerivedPath::toJSON(Store & store) const
|
||||
{
|
||||
return std::visit([&](const auto & buildable) {
|
||||
return buildable.toJSON(store);
|
||||
}, raw());
|
||||
}
|
||||
|
||||
nlohmann::json DerivedPath::toJSON(Store & store) const
|
||||
{
|
||||
return std::visit([&](const auto & buildable) {
|
||||
return buildable.toJSON(store);
|
||||
}, raw());
|
||||
}
|
||||
|
||||
std::string DerivedPath::Opaque::to_string(const StoreDirConfig & store) const
|
||||
{
|
||||
return store.printStorePath(path);
|
||||
}
|
||||
|
||||
std::string SingleDerivedPath::Built::to_string(const StoreDirConfig & store) const
|
||||
{
|
||||
return drvPath->to_string(store) + "^" + output;
|
||||
}
|
||||
|
||||
std::string SingleDerivedPath::Built::to_string_legacy(const StoreDirConfig & store) const
|
||||
{
|
||||
return drvPath->to_string(store) + "!" + output;
|
||||
}
|
||||
|
||||
std::string DerivedPath::Built::to_string(const StoreDirConfig & store) const
|
||||
{
|
||||
return drvPath->to_string(store)
|
||||
+ '^'
|
||||
+ outputs.to_string();
|
||||
}
|
||||
|
||||
std::string DerivedPath::Built::to_string_legacy(const StoreDirConfig & store) const
|
||||
{
|
||||
return drvPath->to_string_legacy(store)
|
||||
+ "!"
|
||||
+ outputs.to_string();
|
||||
}
|
||||
|
||||
std::string SingleDerivedPath::to_string(const StoreDirConfig & store) const
|
||||
{
|
||||
return std::visit(
|
||||
[&](const auto & req) { return req.to_string(store); },
|
||||
raw());
|
||||
}
|
||||
|
||||
std::string DerivedPath::to_string(const StoreDirConfig & store) const
|
||||
{
|
||||
return std::visit(
|
||||
[&](const auto & req) { return req.to_string(store); },
|
||||
raw());
|
||||
}
|
||||
|
||||
std::string SingleDerivedPath::to_string_legacy(const StoreDirConfig & store) const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Built & req) { return req.to_string_legacy(store); },
|
||||
[&](const SingleDerivedPath::Opaque & req) { return req.to_string(store); },
|
||||
}, this->raw());
|
||||
}
|
||||
|
||||
std::string DerivedPath::to_string_legacy(const StoreDirConfig & store) const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const DerivedPath::Built & req) { return req.to_string_legacy(store); },
|
||||
[&](const DerivedPath::Opaque & req) { return req.to_string(store); },
|
||||
}, this->raw());
|
||||
}
|
||||
|
||||
|
||||
DerivedPath::Opaque DerivedPath::Opaque::parse(const StoreDirConfig & store, std::string_view s)
|
||||
{
|
||||
return {store.parseStorePath(s)};
|
||||
}
|
||||
|
||||
void drvRequireExperiment(
|
||||
const SingleDerivedPath & drv,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Opaque &) {
|
||||
// plain drv path; no experimental features required.
|
||||
},
|
||||
[&](const SingleDerivedPath::Built &) {
|
||||
xpSettings.require(Xp::DynamicDerivations);
|
||||
},
|
||||
}, drv.raw());
|
||||
}
|
||||
|
||||
SingleDerivedPath::Built SingleDerivedPath::Built::parse(
|
||||
const StoreDirConfig & store, ref<SingleDerivedPath> drv,
|
||||
OutputNameView output,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
drvRequireExperiment(*drv, xpSettings);
|
||||
return {
|
||||
.drvPath = drv,
|
||||
.output = std::string { output },
|
||||
};
|
||||
}
|
||||
|
||||
DerivedPath::Built DerivedPath::Built::parse(
|
||||
const StoreDirConfig & store, ref<SingleDerivedPath> drv,
|
||||
OutputNameView outputsS,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
drvRequireExperiment(*drv, xpSettings);
|
||||
return {
|
||||
.drvPath = drv,
|
||||
.outputs = OutputsSpec::parse(outputsS),
|
||||
};
|
||||
}
|
||||
|
||||
static SingleDerivedPath parseWithSingle(
|
||||
const StoreDirConfig & store, std::string_view s, std::string_view separator,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
size_t n = s.rfind(separator);
|
||||
return n == s.npos
|
||||
? (SingleDerivedPath) SingleDerivedPath::Opaque::parse(store, s)
|
||||
: (SingleDerivedPath) SingleDerivedPath::Built::parse(store,
|
||||
make_ref<SingleDerivedPath>(parseWithSingle(
|
||||
store,
|
||||
s.substr(0, n),
|
||||
separator,
|
||||
xpSettings)),
|
||||
s.substr(n + 1),
|
||||
xpSettings);
|
||||
}
|
||||
|
||||
SingleDerivedPath SingleDerivedPath::parse(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view s,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
return parseWithSingle(store, s, "^", xpSettings);
|
||||
}
|
||||
|
||||
SingleDerivedPath SingleDerivedPath::parseLegacy(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view s,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
return parseWithSingle(store, s, "!", xpSettings);
|
||||
}
|
||||
|
||||
static DerivedPath parseWith(
|
||||
const StoreDirConfig & store, std::string_view s, std::string_view separator,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
size_t n = s.rfind(separator);
|
||||
return n == s.npos
|
||||
? (DerivedPath) DerivedPath::Opaque::parse(store, s)
|
||||
: (DerivedPath) DerivedPath::Built::parse(store,
|
||||
make_ref<SingleDerivedPath>(parseWithSingle(
|
||||
store,
|
||||
s.substr(0, n),
|
||||
separator,
|
||||
xpSettings)),
|
||||
s.substr(n + 1),
|
||||
xpSettings);
|
||||
}
|
||||
|
||||
DerivedPath DerivedPath::parse(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view s,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
return parseWith(store, s, "^", xpSettings);
|
||||
}
|
||||
|
||||
DerivedPath DerivedPath::parseLegacy(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view s,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
return parseWith(store, s, "!", xpSettings);
|
||||
}
|
||||
|
||||
DerivedPath DerivedPath::fromSingle(const SingleDerivedPath & req)
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Opaque & o) -> DerivedPath {
|
||||
return o;
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & b) -> DerivedPath {
|
||||
return DerivedPath::Built {
|
||||
.drvPath = b.drvPath,
|
||||
.outputs = OutputsSpec::Names { b.output },
|
||||
};
|
||||
},
|
||||
}, req.raw());
|
||||
}
|
||||
|
||||
const StorePath & SingleDerivedPath::Built::getBaseStorePath() const
|
||||
{
|
||||
return drvPath->getBaseStorePath();
|
||||
}
|
||||
|
||||
const StorePath & DerivedPath::Built::getBaseStorePath() const
|
||||
{
|
||||
return drvPath->getBaseStorePath();
|
||||
}
|
||||
|
||||
template<typename DP>
|
||||
static inline const StorePath & getBaseStorePath_(const DP & derivedPath)
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const typename DP::Built & bfd) -> auto & {
|
||||
return bfd.drvPath->getBaseStorePath();
|
||||
},
|
||||
[&](const typename DP::Opaque & bo) -> auto & {
|
||||
return bo.path;
|
||||
},
|
||||
}, derivedPath.raw());
|
||||
}
|
||||
|
||||
const StorePath & SingleDerivedPath::getBaseStorePath() const
|
||||
{
|
||||
return getBaseStorePath_(*this);
|
||||
}
|
||||
|
||||
const StorePath & DerivedPath::getBaseStorePath() const
|
||||
{
|
||||
return getBaseStorePath_(*this);
|
||||
}
|
||||
|
||||
}
|
||||
305
subprojects/libstore/derived-path.hh
Normal file
305
subprojects/libstore/derived-path.hh
Normal file
|
|
@ -0,0 +1,305 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "path.hh"
|
||||
#include "outputs-spec.hh"
|
||||
#include "config.hh"
|
||||
#include "ref.hh"
|
||||
|
||||
#include <variant>
|
||||
|
||||
#include <nlohmann/json_fwd.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct StoreDirConfig;
|
||||
|
||||
// TODO stop needing this, `toJSON` below should be pure
|
||||
class Store;
|
||||
|
||||
/**
|
||||
* An opaque derived path.
|
||||
*
|
||||
* Opaque derived paths are just store paths, and fully evaluated. They
|
||||
* cannot be simplified further. Since they are opaque, they cannot be
|
||||
* built, but they can fetched.
|
||||
*/
|
||||
struct DerivedPathOpaque {
|
||||
StorePath path;
|
||||
|
||||
std::string to_string(const StoreDirConfig & store) const;
|
||||
static DerivedPathOpaque parse(const StoreDirConfig & store, std::string_view);
|
||||
nlohmann::json toJSON(const StoreDirConfig & store) const;
|
||||
|
||||
bool operator == (const DerivedPathOpaque &) const = default;
|
||||
auto operator <=> (const DerivedPathOpaque &) const = default;
|
||||
};
|
||||
|
||||
struct SingleDerivedPath;
|
||||
|
||||
/**
|
||||
* A single derived path that is built from a derivation
|
||||
*
|
||||
* Built derived paths are pair of a derivation and an output name. They are
|
||||
* evaluated by building the derivation, and then taking the resulting output
|
||||
* path of the given output name.
|
||||
*/
|
||||
struct SingleDerivedPathBuilt {
|
||||
ref<SingleDerivedPath> drvPath;
|
||||
OutputName output;
|
||||
|
||||
/**
|
||||
* Get the store path this is ultimately derived from (by realising
|
||||
* and projecting outputs).
|
||||
*
|
||||
* Note that this is *not* a property of the store object being
|
||||
* referred to, but just of this path --- how we happened to be
|
||||
* referring to that store object. In other words, this means this
|
||||
* function breaks "referential transparency". It should therefore
|
||||
* be used only with great care.
|
||||
*/
|
||||
const StorePath & getBaseStorePath() const;
|
||||
|
||||
/**
|
||||
* Uses `^` as the separator
|
||||
*/
|
||||
std::string to_string(const StoreDirConfig & store) const;
|
||||
/**
|
||||
* Uses `!` as the separator
|
||||
*/
|
||||
std::string to_string_legacy(const StoreDirConfig & store) const;
|
||||
/**
|
||||
* The caller splits on the separator, so it works for both variants.
|
||||
*
|
||||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
static SingleDerivedPathBuilt parse(
|
||||
const StoreDirConfig & store, ref<SingleDerivedPath> drvPath,
|
||||
OutputNameView outputs,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
nlohmann::json toJSON(Store & store) const;
|
||||
|
||||
bool operator == (const SingleDerivedPathBuilt &) const noexcept;
|
||||
std::strong_ordering operator <=> (const SingleDerivedPathBuilt &) const noexcept;
|
||||
};
|
||||
|
||||
using _SingleDerivedPathRaw = std::variant<
|
||||
DerivedPathOpaque,
|
||||
SingleDerivedPathBuilt
|
||||
>;
|
||||
|
||||
/**
|
||||
* A "derived path" is a very simple sort of expression (not a Nix
|
||||
* language expression! But an expression in a the general sense) that
|
||||
* evaluates to (concrete) store path. It is either:
|
||||
*
|
||||
* - opaque, in which case it is just a concrete store path with
|
||||
* possibly no known derivation
|
||||
*
|
||||
* - built, in which case it is a pair of a derivation path and an
|
||||
* output name.
|
||||
*/
|
||||
struct SingleDerivedPath : _SingleDerivedPathRaw {
|
||||
using Raw = _SingleDerivedPathRaw;
|
||||
using Raw::Raw;
|
||||
|
||||
using Opaque = DerivedPathOpaque;
|
||||
using Built = SingleDerivedPathBuilt;
|
||||
|
||||
inline const Raw & raw() const {
|
||||
return static_cast<const Raw &>(*this);
|
||||
}
|
||||
|
||||
bool operator == (const SingleDerivedPath &) const = default;
|
||||
auto operator <=> (const SingleDerivedPath &) const = default;
|
||||
|
||||
/**
|
||||
* Get the store path this is ultimately derived from (by realising
|
||||
* and projecting outputs).
|
||||
*
|
||||
* Note that this is *not* a property of the store object being
|
||||
* referred to, but just of this path --- how we happened to be
|
||||
* referring to that store object. In other words, this means this
|
||||
* function breaks "referential transparency". It should therefore
|
||||
* be used only with great care.
|
||||
*/
|
||||
const StorePath & getBaseStorePath() const;
|
||||
|
||||
/**
|
||||
* Uses `^` as the separator
|
||||
*/
|
||||
std::string to_string(const StoreDirConfig & store) const;
|
||||
/**
|
||||
* Uses `!` as the separator
|
||||
*/
|
||||
std::string to_string_legacy(const StoreDirConfig & store) const;
|
||||
/**
|
||||
* Uses `^` as the separator
|
||||
*
|
||||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
static SingleDerivedPath parse(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
/**
|
||||
* Uses `!` as the separator
|
||||
*
|
||||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
static SingleDerivedPath parseLegacy(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
nlohmann::json toJSON(Store & store) const;
|
||||
};
|
||||
|
||||
static inline ref<SingleDerivedPath> makeConstantStorePathRef(StorePath drvPath)
|
||||
{
|
||||
return make_ref<SingleDerivedPath>(SingleDerivedPath::Opaque { drvPath });
|
||||
}
|
||||
|
||||
/**
|
||||
* A set of derived paths that are built from a derivation
|
||||
*
|
||||
* Built derived paths are pair of a derivation and some output names.
|
||||
* They are evaluated by building the derivation, and then replacing the
|
||||
* output names with the resulting outputs.
|
||||
*
|
||||
* Note that does mean a derived store paths evaluates to multiple
|
||||
* opaque paths, which is sort of icky as expressions are supposed to
|
||||
* evaluate to single values. Perhaps this should have just a single
|
||||
* output name.
|
||||
*/
|
||||
struct DerivedPathBuilt {
|
||||
ref<SingleDerivedPath> drvPath;
|
||||
OutputsSpec outputs;
|
||||
|
||||
/**
|
||||
* Get the store path this is ultimately derived from (by realising
|
||||
* and projecting outputs).
|
||||
*
|
||||
* Note that this is *not* a property of the store object being
|
||||
* referred to, but just of this path --- how we happened to be
|
||||
* referring to that store object. In other words, this means this
|
||||
* function breaks "referential transparency". It should therefore
|
||||
* be used only with great care.
|
||||
*/
|
||||
const StorePath & getBaseStorePath() const;
|
||||
|
||||
/**
|
||||
* Uses `^` as the separator
|
||||
*/
|
||||
std::string to_string(const StoreDirConfig & store) const;
|
||||
/**
|
||||
* Uses `!` as the separator
|
||||
*/
|
||||
std::string to_string_legacy(const StoreDirConfig & store) const;
|
||||
/**
|
||||
* The caller splits on the separator, so it works for both variants.
|
||||
*
|
||||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
static DerivedPathBuilt parse(
|
||||
const StoreDirConfig & store, ref<SingleDerivedPath>,
|
||||
std::string_view,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
nlohmann::json toJSON(Store & store) const;
|
||||
|
||||
bool operator == (const DerivedPathBuilt &) const noexcept;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet.
|
||||
bool operator < (const DerivedPathBuilt &) const noexcept;
|
||||
};
|
||||
|
||||
using _DerivedPathRaw = std::variant<
|
||||
DerivedPathOpaque,
|
||||
DerivedPathBuilt
|
||||
>;
|
||||
|
||||
/**
|
||||
* A "derived path" is a very simple sort of expression that evaluates
|
||||
* to one or more (concrete) store paths. It is either:
|
||||
*
|
||||
* - opaque, in which case it is just a single concrete store path with
|
||||
* possibly no known derivation
|
||||
*
|
||||
* - built, in which case it is a pair of a derivation path and some
|
||||
* output names.
|
||||
*/
|
||||
struct DerivedPath : _DerivedPathRaw {
|
||||
using Raw = _DerivedPathRaw;
|
||||
using Raw::Raw;
|
||||
|
||||
using Opaque = DerivedPathOpaque;
|
||||
using Built = DerivedPathBuilt;
|
||||
|
||||
inline const Raw & raw() const {
|
||||
return static_cast<const Raw &>(*this);
|
||||
}
|
||||
|
||||
bool operator == (const DerivedPath &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet.
|
||||
//auto operator <=> (const DerivedPath &) const = default;
|
||||
|
||||
/**
|
||||
* Get the store path this is ultimately derived from (by realising
|
||||
* and projecting outputs).
|
||||
*
|
||||
* Note that this is *not* a property of the store object being
|
||||
* referred to, but just of this path --- how we happened to be
|
||||
* referring to that store object. In other words, this means this
|
||||
* function breaks "referential transparency". It should therefore
|
||||
* be used only with great care.
|
||||
*/
|
||||
const StorePath & getBaseStorePath() const;
|
||||
|
||||
/**
|
||||
* Uses `^` as the separator
|
||||
*/
|
||||
std::string to_string(const StoreDirConfig & store) const;
|
||||
/**
|
||||
* Uses `!` as the separator
|
||||
*/
|
||||
std::string to_string_legacy(const StoreDirConfig & store) const;
|
||||
/**
|
||||
* Uses `^` as the separator
|
||||
*
|
||||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
static DerivedPath parse(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
/**
|
||||
* Uses `!` as the separator
|
||||
*
|
||||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
static DerivedPath parseLegacy(
|
||||
const StoreDirConfig & store,
|
||||
std::string_view,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
|
||||
/**
|
||||
* Convert a `SingleDerivedPath` to a `DerivedPath`.
|
||||
*/
|
||||
static DerivedPath fromSingle(const SingleDerivedPath &);
|
||||
|
||||
nlohmann::json toJSON(Store & store) const;
|
||||
};
|
||||
|
||||
typedef std::vector<DerivedPath> DerivedPaths;
|
||||
|
||||
/**
|
||||
* Used by various parser functions to require experimental features as
|
||||
* needed.
|
||||
*
|
||||
* Somewhat unfortunate this cannot just be an implementation detail for
|
||||
* this module.
|
||||
*
|
||||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
void drvRequireExperiment(
|
||||
const SingleDerivedPath & drv,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
}
|
||||
58
subprojects/libstore/downstream-placeholder.cc
Normal file
58
subprojects/libstore/downstream-placeholder.cc
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
#include "downstream-placeholder.hh"
|
||||
#include "derivations.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::string DownstreamPlaceholder::render() const
|
||||
{
|
||||
return "/" + hash.to_string(HashFormat::Nix32, false);
|
||||
}
|
||||
|
||||
|
||||
DownstreamPlaceholder DownstreamPlaceholder::unknownCaOutput(
|
||||
const StorePath & drvPath,
|
||||
OutputNameView outputName,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
xpSettings.require(Xp::CaDerivations);
|
||||
auto drvNameWithExtension = drvPath.name();
|
||||
auto drvName = drvNameWithExtension.substr(0, drvNameWithExtension.size() - 4);
|
||||
auto clearText = "nix-upstream-output:" + std::string { drvPath.hashPart() } + ":" + outputPathName(drvName, outputName);
|
||||
return DownstreamPlaceholder {
|
||||
hashString(HashAlgorithm::SHA256, clearText)
|
||||
};
|
||||
}
|
||||
|
||||
DownstreamPlaceholder DownstreamPlaceholder::unknownDerivation(
|
||||
const DownstreamPlaceholder & placeholder,
|
||||
OutputNameView outputName,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
xpSettings.require(Xp::DynamicDerivations);
|
||||
auto compressed = compressHash(placeholder.hash, 20);
|
||||
auto clearText = "nix-computed-output:"
|
||||
+ compressed.to_string(HashFormat::Nix32, false)
|
||||
+ ":" + std::string { outputName };
|
||||
return DownstreamPlaceholder {
|
||||
hashString(HashAlgorithm::SHA256, clearText)
|
||||
};
|
||||
}
|
||||
|
||||
DownstreamPlaceholder DownstreamPlaceholder::fromSingleDerivedPathBuilt(
|
||||
const SingleDerivedPath::Built & b,
|
||||
const ExperimentalFeatureSettings & xpSettings)
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Opaque & o) {
|
||||
return DownstreamPlaceholder::unknownCaOutput(o.path, b.output, xpSettings);
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & b2) {
|
||||
return DownstreamPlaceholder::unknownDerivation(
|
||||
DownstreamPlaceholder::fromSingleDerivedPathBuilt(b2, xpSettings),
|
||||
b.output,
|
||||
xpSettings);
|
||||
},
|
||||
}, b.drvPath->raw());
|
||||
}
|
||||
|
||||
}
|
||||
91
subprojects/libstore/downstream-placeholder.hh
Normal file
91
subprojects/libstore/downstream-placeholder.hh
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "hash.hh"
|
||||
#include "path.hh"
|
||||
#include "derived-path.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
* Downstream Placeholders are opaque and almost certainly unique values
|
||||
* used to allow derivations to refer to store objects which are yet to
|
||||
* be built and for we do not yet have store paths for.
|
||||
*
|
||||
* They correspond to `DerivedPaths` that are not `DerivedPath::Opaque`,
|
||||
* except for the cases involving input addressing or fixed outputs
|
||||
* where we do know a store path for the derivation output in advance.
|
||||
*
|
||||
* Unlike `DerivationPath`, however, `DownstreamPlaceholder` is
|
||||
* purposefully opaque and obfuscated. This is so they are hard to
|
||||
* create by accident, and so substituting them (once we know what the
|
||||
* path to store object is) is unlikely to capture other stuff it
|
||||
* shouldn't.
|
||||
*
|
||||
* We use them with `Derivation`: the `render()` method is called to
|
||||
* render an opaque string which can be used in the derivation, and the
|
||||
* resolving logic can substitute those strings for store paths when
|
||||
* resolving `Derivation.inputDrvs` to `BasicDerivation.inputSrcs`.
|
||||
*/
|
||||
class DownstreamPlaceholder
|
||||
{
|
||||
/**
|
||||
* `DownstreamPlaceholder` is just a newtype of `Hash`.
|
||||
* This its only field.
|
||||
*/
|
||||
Hash hash;
|
||||
|
||||
/**
|
||||
* Newtype constructor
|
||||
*/
|
||||
DownstreamPlaceholder(Hash hash) : hash(hash) { }
|
||||
|
||||
public:
|
||||
/**
|
||||
* This creates an opaque and almost certainly unique string
|
||||
* deterministically from the placeholder.
|
||||
*/
|
||||
std::string render() const;
|
||||
|
||||
/**
|
||||
* Create a placeholder for an unknown output of a content-addressed
|
||||
* derivation.
|
||||
*
|
||||
* The derivation itself is known (we have a store path for it), but
|
||||
* the output doesn't yet have a known store path.
|
||||
*
|
||||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
static DownstreamPlaceholder unknownCaOutput(
|
||||
const StorePath & drvPath,
|
||||
OutputNameView outputName,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
|
||||
/**
|
||||
* Create a placehold for the output of an unknown derivation.
|
||||
*
|
||||
* The derivation is not yet known because it is a dynamic
|
||||
* derivaiton --- it is itself an output of another derivation ---
|
||||
* and we just have (another) placeholder for it.
|
||||
*
|
||||
* @param xpSettings Stop-gap to avoid globals during unit tests.
|
||||
*/
|
||||
static DownstreamPlaceholder unknownDerivation(
|
||||
const DownstreamPlaceholder & drvPlaceholder,
|
||||
OutputNameView outputName,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
|
||||
/**
|
||||
* Convenience constructor that handles both cases (unknown
|
||||
* content-addressed output and unknown derivation), delegating as
|
||||
* needed to `unknownCaOutput` and `unknownDerivation`.
|
||||
*
|
||||
* Recursively builds up a placeholder from a
|
||||
* `SingleDerivedPath::Built.drvPath` chain.
|
||||
*/
|
||||
static DownstreamPlaceholder fromSingleDerivedPathBuilt(
|
||||
const SingleDerivedPath::Built & built,
|
||||
const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
|
||||
};
|
||||
|
||||
}
|
||||
91
subprojects/libstore/dummy-store.cc
Normal file
91
subprojects/libstore/dummy-store.cc
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
#include "store-api.hh"
|
||||
#include "callback.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct DummyStoreConfig : virtual StoreConfig {
|
||||
using StoreConfig::StoreConfig;
|
||||
|
||||
DummyStoreConfig(std::string_view scheme, std::string_view authority, const Params & params)
|
||||
: StoreConfig(params)
|
||||
{
|
||||
if (!authority.empty())
|
||||
throw UsageError("`%s` store URIs must not contain an authority part %s", scheme, authority);
|
||||
}
|
||||
|
||||
const std::string name() override { return "Dummy Store"; }
|
||||
|
||||
std::string doc() override
|
||||
{
|
||||
return
|
||||
#include "dummy-store.md"
|
||||
;
|
||||
}
|
||||
|
||||
static std::set<std::string> uriSchemes() {
|
||||
return {"dummy"};
|
||||
}
|
||||
};
|
||||
|
||||
struct DummyStore : public virtual DummyStoreConfig, public virtual Store
|
||||
{
|
||||
DummyStore(std::string_view scheme, std::string_view authority, const Params & params)
|
||||
: StoreConfig(params)
|
||||
, DummyStoreConfig(scheme, authority, params)
|
||||
, Store(params)
|
||||
{ }
|
||||
|
||||
DummyStore(const Params & params)
|
||||
: DummyStore("dummy", "", params)
|
||||
{ }
|
||||
|
||||
std::string getUri() override
|
||||
{
|
||||
return *uriSchemes().begin();
|
||||
}
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override
|
||||
{
|
||||
callback(nullptr);
|
||||
}
|
||||
|
||||
/**
|
||||
* The dummy store is incapable of *not* trusting! :)
|
||||
*/
|
||||
virtual std::optional<TrustedFlag> isTrustedClient() override
|
||||
{
|
||||
return Trusted;
|
||||
}
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
|
||||
{ unsupported("queryPathFromHashPart"); }
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override
|
||||
{ unsupported("addToStore"); }
|
||||
|
||||
virtual StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive,
|
||||
ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive,
|
||||
HashAlgorithm hashAlgo = HashAlgorithm::SHA256,
|
||||
const StorePathSet & references = StorePathSet(),
|
||||
RepairFlag repair = NoRepair) override
|
||||
{ unsupported("addToStore"); }
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override
|
||||
{ unsupported("narFromPath"); }
|
||||
|
||||
void queryRealisationUncached(const DrvOutput &,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override
|
||||
{ callback(nullptr); }
|
||||
|
||||
virtual ref<SourceAccessor> getFSAccessor(bool requireValidPath) override
|
||||
{ unsupported("getFSAccessor"); }
|
||||
};
|
||||
|
||||
static RegisterStoreImplementation<DummyStore, DummyStoreConfig> regDummyStore;
|
||||
|
||||
}
|
||||
13
subprojects/libstore/dummy-store.md
Normal file
13
subprojects/libstore/dummy-store.md
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
R"(
|
||||
|
||||
**Store URL format**: `dummy://`
|
||||
|
||||
This store type represents a store that contains no store paths and
|
||||
cannot be written to. It's useful when you want to use the Nix
|
||||
evaluator when no actual Nix store exists, e.g.
|
||||
|
||||
```console
|
||||
# nix eval --store dummy:// --expr '1 + 2'
|
||||
```
|
||||
|
||||
)"
|
||||
104
subprojects/libstore/export-import.cc
Normal file
104
subprojects/libstore/export-import.cc
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
#include "serialise.hh"
|
||||
#include "store-api.hh"
|
||||
#include "archive.hh"
|
||||
#include "common-protocol.hh"
|
||||
#include "common-protocol-impl.hh"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
namespace nix {
|
||||
|
||||
void Store::exportPaths(const StorePathSet & paths, Sink & sink)
|
||||
{
|
||||
auto sorted = topoSortPaths(paths);
|
||||
std::reverse(sorted.begin(), sorted.end());
|
||||
|
||||
std::string doneLabel("paths exported");
|
||||
//logger->incExpected(doneLabel, sorted.size());
|
||||
|
||||
for (auto & path : sorted) {
|
||||
//Activity act(*logger, lvlInfo, "exporting path '%s'", path);
|
||||
sink << 1;
|
||||
exportPath(path, sink);
|
||||
//logger->incProgress(doneLabel);
|
||||
}
|
||||
|
||||
sink << 0;
|
||||
}
|
||||
|
||||
void Store::exportPath(const StorePath & path, Sink & sink)
|
||||
{
|
||||
auto info = queryPathInfo(path);
|
||||
|
||||
HashSink hashSink(HashAlgorithm::SHA256);
|
||||
TeeSink teeSink(sink, hashSink);
|
||||
|
||||
narFromPath(path, teeSink);
|
||||
|
||||
/* Refuse to export paths that have changed. This prevents
|
||||
filesystem corruption from spreading to other machines.
|
||||
Don't complain if the stored hash is zero (unknown). */
|
||||
Hash hash = hashSink.currentHash().first;
|
||||
if (hash != info->narHash && info->narHash != Hash(info->narHash.algo))
|
||||
throw Error("hash of path '%s' has changed from '%s' to '%s'!",
|
||||
printStorePath(path), info->narHash.to_string(HashFormat::Nix32, true), hash.to_string(HashFormat::Nix32, true));
|
||||
|
||||
teeSink
|
||||
<< exportMagic
|
||||
<< printStorePath(path);
|
||||
CommonProto::write(*this,
|
||||
CommonProto::WriteConn { .to = teeSink },
|
||||
info->references);
|
||||
teeSink
|
||||
<< (info->deriver ? printStorePath(*info->deriver) : "")
|
||||
<< 0;
|
||||
}
|
||||
|
||||
StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
|
||||
{
|
||||
StorePaths res;
|
||||
while (true) {
|
||||
auto n = readNum<uint64_t>(source);
|
||||
if (n == 0) break;
|
||||
if (n != 1) throw Error("input doesn't look like something created by 'nix-store --export'");
|
||||
|
||||
/* Extract the NAR from the source. */
|
||||
StringSink saved;
|
||||
TeeSource tee { source, saved };
|
||||
NullFileSystemObjectSink ether;
|
||||
parseDump(ether, tee);
|
||||
|
||||
uint32_t magic = readInt(source);
|
||||
if (magic != exportMagic)
|
||||
throw Error("Nix archive cannot be imported; wrong format");
|
||||
|
||||
auto path = parseStorePath(readString(source));
|
||||
|
||||
//Activity act(*logger, lvlInfo, "importing path '%s'", info.path);
|
||||
|
||||
auto references = CommonProto::Serialise<StorePathSet>::read(*this,
|
||||
CommonProto::ReadConn { .from = source });
|
||||
auto deriver = readString(source);
|
||||
auto narHash = hashString(HashAlgorithm::SHA256, saved.s);
|
||||
|
||||
ValidPathInfo info { path, narHash };
|
||||
if (deriver != "")
|
||||
info.deriver = parseStorePath(deriver);
|
||||
info.references = references;
|
||||
info.narSize = saved.s.size();
|
||||
|
||||
// Ignore optional legacy signature.
|
||||
if (readInt(source) == 1)
|
||||
readString(source);
|
||||
|
||||
// Can't use underlying source, which would have been exhausted
|
||||
auto source = StringSource(saved.s);
|
||||
addToStore(info, source, NoRepair, checkSigs);
|
||||
|
||||
res.push_back(info.path);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
945
subprojects/libstore/filetransfer.cc
Normal file
945
subprojects/libstore/filetransfer.cc
Normal file
|
|
@ -0,0 +1,945 @@
|
|||
#include "filetransfer.hh"
|
||||
#include "globals.hh"
|
||||
#include "config-global.hh"
|
||||
#include "store-api.hh"
|
||||
#include "s3.hh"
|
||||
#include "compression.hh"
|
||||
#include "finally.hh"
|
||||
#include "callback.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
#if ENABLE_S3
|
||||
#include <aws/core/client/ClientConfiguration.h>
|
||||
#endif
|
||||
|
||||
#if __linux__
|
||||
# include "namespaces.hh"
|
||||
#endif
|
||||
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include <curl/curl.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <queue>
|
||||
#include <random>
|
||||
#include <thread>
|
||||
#include <regex>
|
||||
|
||||
using namespace std::string_literals;
|
||||
|
||||
namespace nix {
|
||||
|
||||
FileTransferSettings fileTransferSettings;
|
||||
|
||||
static GlobalConfig::Register rFileTransferSettings(&fileTransferSettings);
|
||||
|
||||
struct curlFileTransfer : public FileTransfer
|
||||
{
|
||||
CURLM * curlm = 0;
|
||||
|
||||
std::random_device rd;
|
||||
std::mt19937 mt19937;
|
||||
|
||||
struct TransferItem : public std::enable_shared_from_this<TransferItem>
|
||||
{
|
||||
curlFileTransfer & fileTransfer;
|
||||
FileTransferRequest request;
|
||||
FileTransferResult result;
|
||||
Activity act;
|
||||
bool done = false; // whether either the success or failure function has been called
|
||||
Callback<FileTransferResult> callback;
|
||||
CURL * req = 0;
|
||||
// buffer to accompany the `req` above
|
||||
char errbuf[CURL_ERROR_SIZE];
|
||||
bool active = false; // whether the handle has been added to the multi object
|
||||
std::string statusMsg;
|
||||
|
||||
unsigned int attempt = 0;
|
||||
|
||||
/* Don't start this download until the specified time point
|
||||
has been reached. */
|
||||
std::chrono::steady_clock::time_point embargo;
|
||||
|
||||
struct curl_slist * requestHeaders = 0;
|
||||
|
||||
std::string encoding;
|
||||
|
||||
bool acceptRanges = false;
|
||||
|
||||
curl_off_t writtenToSink = 0;
|
||||
|
||||
std::chrono::steady_clock::time_point startTime = std::chrono::steady_clock::now();
|
||||
|
||||
inline static const std::set<long> successfulStatuses {200, 201, 204, 206, 304, 0 /* other protocol */};
|
||||
|
||||
/* Get the HTTP status code, or 0 for other protocols. */
|
||||
long getHTTPStatus()
|
||||
{
|
||||
long httpStatus = 0;
|
||||
long protocol = 0;
|
||||
curl_easy_getinfo(req, CURLINFO_PROTOCOL, &protocol);
|
||||
if (protocol == CURLPROTO_HTTP || protocol == CURLPROTO_HTTPS)
|
||||
curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus);
|
||||
return httpStatus;
|
||||
}
|
||||
|
||||
TransferItem(curlFileTransfer & fileTransfer,
|
||||
const FileTransferRequest & request,
|
||||
Callback<FileTransferResult> && callback)
|
||||
: fileTransfer(fileTransfer)
|
||||
, request(request)
|
||||
, act(*logger, lvlTalkative, actFileTransfer,
|
||||
fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
|
||||
{request.uri}, request.parentAct)
|
||||
, callback(std::move(callback))
|
||||
, finalSink([this](std::string_view data) {
|
||||
if (errorSink) {
|
||||
(*errorSink)(data);
|
||||
}
|
||||
|
||||
if (this->request.dataCallback) {
|
||||
auto httpStatus = getHTTPStatus();
|
||||
|
||||
/* Only write data to the sink if this is a
|
||||
successful response. */
|
||||
if (successfulStatuses.count(httpStatus)) {
|
||||
writtenToSink += data.size();
|
||||
this->request.dataCallback(data);
|
||||
}
|
||||
} else
|
||||
this->result.data.append(data);
|
||||
})
|
||||
{
|
||||
result.urls.push_back(request.uri);
|
||||
|
||||
requestHeaders = curl_slist_append(requestHeaders, "Accept-Encoding: zstd, br, gzip, deflate, bzip2, xz");
|
||||
if (!request.expectedETag.empty())
|
||||
requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
|
||||
if (!request.mimeType.empty())
|
||||
requestHeaders = curl_slist_append(requestHeaders, ("Content-Type: " + request.mimeType).c_str());
|
||||
for (auto it = request.headers.begin(); it != request.headers.end(); ++it){
|
||||
requestHeaders = curl_slist_append(requestHeaders, fmt("%s: %s", it->first, it->second).c_str());
|
||||
}
|
||||
}
|
||||
|
||||
~TransferItem()
|
||||
{
|
||||
if (req) {
|
||||
if (active)
|
||||
curl_multi_remove_handle(fileTransfer.curlm, req);
|
||||
curl_easy_cleanup(req);
|
||||
}
|
||||
if (requestHeaders) curl_slist_free_all(requestHeaders);
|
||||
try {
|
||||
if (!done)
|
||||
fail(FileTransferError(Interrupted, {}, "download of '%s' was interrupted", request.uri));
|
||||
} catch (...) {
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
|
||||
void failEx(std::exception_ptr ex)
|
||||
{
|
||||
assert(!done);
|
||||
done = true;
|
||||
callback.rethrow(ex);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
void fail(T && e)
|
||||
{
|
||||
failEx(std::make_exception_ptr(std::move(e)));
|
||||
}
|
||||
|
||||
LambdaSink finalSink;
|
||||
std::shared_ptr<FinishSink> decompressionSink;
|
||||
std::optional<StringSink> errorSink;
|
||||
|
||||
std::exception_ptr writeException;
|
||||
|
||||
size_t writeCallback(void * contents, size_t size, size_t nmemb)
|
||||
{
|
||||
try {
|
||||
size_t realSize = size * nmemb;
|
||||
result.bodySize += realSize;
|
||||
|
||||
if (!decompressionSink) {
|
||||
decompressionSink = makeDecompressionSink(encoding, finalSink);
|
||||
if (! successfulStatuses.count(getHTTPStatus())) {
|
||||
// In this case we want to construct a TeeSink, to keep
|
||||
// the response around (which we figure won't be big
|
||||
// like an actual download should be) to improve error
|
||||
// messages.
|
||||
errorSink = StringSink { };
|
||||
}
|
||||
}
|
||||
|
||||
(*decompressionSink)({(char *) contents, realSize});
|
||||
|
||||
return realSize;
|
||||
} catch (...) {
|
||||
writeException = std::current_exception();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static size_t writeCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
|
||||
{
|
||||
return ((TransferItem *) userp)->writeCallback(contents, size, nmemb);
|
||||
}
|
||||
|
||||
void appendCurrentUrl()
|
||||
{
|
||||
char * effectiveUriCStr = nullptr;
|
||||
curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr);
|
||||
if (effectiveUriCStr && *result.urls.rbegin() != effectiveUriCStr)
|
||||
result.urls.push_back(effectiveUriCStr);
|
||||
}
|
||||
|
||||
size_t headerCallback(void * contents, size_t size, size_t nmemb)
|
||||
{
|
||||
size_t realSize = size * nmemb;
|
||||
std::string line((char *) contents, realSize);
|
||||
printMsg(lvlVomit, "got header for '%s': %s", request.uri, trim(line));
|
||||
|
||||
static std::regex statusLine("HTTP/[^ ]+ +[0-9]+(.*)", std::regex::extended | std::regex::icase);
|
||||
if (std::smatch match; std::regex_match(line, match, statusLine)) {
|
||||
result.etag = "";
|
||||
result.data.clear();
|
||||
result.bodySize = 0;
|
||||
statusMsg = trim(match.str(1));
|
||||
acceptRanges = false;
|
||||
encoding = "";
|
||||
appendCurrentUrl();
|
||||
} else {
|
||||
|
||||
auto i = line.find(':');
|
||||
if (i != std::string::npos) {
|
||||
std::string name = toLower(trim(line.substr(0, i)));
|
||||
|
||||
if (name == "etag") {
|
||||
result.etag = trim(line.substr(i + 1));
|
||||
/* Hack to work around a GitHub bug: it sends
|
||||
ETags, but ignores If-None-Match. So if we get
|
||||
the expected ETag on a 200 response, then shut
|
||||
down the connection because we already have the
|
||||
data. */
|
||||
long httpStatus = 0;
|
||||
curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus);
|
||||
if (result.etag == request.expectedETag && httpStatus == 200) {
|
||||
debug("shutting down on 200 HTTP response with expected ETag");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
else if (name == "content-encoding")
|
||||
encoding = trim(line.substr(i + 1));
|
||||
|
||||
else if (name == "accept-ranges" && toLower(trim(line.substr(i + 1))) == "bytes")
|
||||
acceptRanges = true;
|
||||
|
||||
else if (name == "link" || name == "x-amz-meta-link") {
|
||||
auto value = trim(line.substr(i + 1));
|
||||
static std::regex linkRegex("<([^>]*)>; rel=\"immutable\"", std::regex::extended | std::regex::icase);
|
||||
if (std::smatch match; std::regex_match(value, match, linkRegex))
|
||||
result.immutableUrl = match.str(1);
|
||||
else
|
||||
debug("got invalid link header '%s'", value);
|
||||
}
|
||||
}
|
||||
}
|
||||
return realSize;
|
||||
}
|
||||
|
||||
static size_t headerCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
|
||||
{
|
||||
return ((TransferItem *) userp)->headerCallback(contents, size, nmemb);
|
||||
}
|
||||
|
||||
int progressCallback(double dltotal, double dlnow)
|
||||
{
|
||||
try {
|
||||
act.progress(dlnow, dltotal);
|
||||
} catch (nix::Interrupted &) {
|
||||
assert(getInterrupted());
|
||||
}
|
||||
return getInterrupted();
|
||||
}
|
||||
|
||||
static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow)
|
||||
{
|
||||
return ((TransferItem *) userp)->progressCallback(dltotal, dlnow);
|
||||
}
|
||||
|
||||
static int debugCallback(CURL * handle, curl_infotype type, char * data, size_t size, void * userptr)
|
||||
{
|
||||
if (type == CURLINFO_TEXT)
|
||||
vomit("curl: %s", chomp(std::string(data, size)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t readOffset = 0;
|
||||
size_t readCallback(char *buffer, size_t size, size_t nitems)
|
||||
{
|
||||
if (readOffset == request.data->length())
|
||||
return 0;
|
||||
auto count = std::min(size * nitems, request.data->length() - readOffset);
|
||||
assert(count);
|
||||
memcpy(buffer, request.data->data() + readOffset, count);
|
||||
readOffset += count;
|
||||
return count;
|
||||
}
|
||||
|
||||
static size_t readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp)
|
||||
{
|
||||
return ((TransferItem *) userp)->readCallback(buffer, size, nitems);
|
||||
}
|
||||
|
||||
void init()
|
||||
{
|
||||
if (!req) req = curl_easy_init();
|
||||
|
||||
curl_easy_reset(req);
|
||||
|
||||
if (verbosity >= lvlVomit) {
|
||||
curl_easy_setopt(req, CURLOPT_VERBOSE, 1);
|
||||
curl_easy_setopt(req, CURLOPT_DEBUGFUNCTION, TransferItem::debugCallback);
|
||||
}
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str());
|
||||
curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L);
|
||||
curl_easy_setopt(req, CURLOPT_MAXREDIRS, 10);
|
||||
curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1);
|
||||
curl_easy_setopt(req, CURLOPT_USERAGENT,
|
||||
("curl/" LIBCURL_VERSION " Nix/" + nixVersion +
|
||||
(fileTransferSettings.userAgentSuffix != "" ? " " + fileTransferSettings.userAgentSuffix.get() : "")).c_str());
|
||||
#if LIBCURL_VERSION_NUM >= 0x072b00
|
||||
curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1);
|
||||
#endif
|
||||
#if LIBCURL_VERSION_NUM >= 0x072f00
|
||||
if (fileTransferSettings.enableHttp2)
|
||||
curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
|
||||
else
|
||||
curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1);
|
||||
#endif
|
||||
curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, TransferItem::writeCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_WRITEDATA, this);
|
||||
curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, TransferItem::headerCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_HEADERDATA, this);
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_PROGRESSFUNCTION, progressCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_PROGRESSDATA, this);
|
||||
curl_easy_setopt(req, CURLOPT_NOPROGRESS, 0);
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders);
|
||||
|
||||
if (settings.downloadSpeed.get() > 0)
|
||||
curl_easy_setopt(req, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t) (settings.downloadSpeed.get() * 1024));
|
||||
|
||||
if (request.head)
|
||||
curl_easy_setopt(req, CURLOPT_NOBODY, 1);
|
||||
|
||||
if (request.data) {
|
||||
curl_easy_setopt(req, CURLOPT_UPLOAD, 1L);
|
||||
curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_READDATA, this);
|
||||
curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->length());
|
||||
}
|
||||
|
||||
if (request.verifyTLS) {
|
||||
if (settings.caFile != "")
|
||||
curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.get().c_str());
|
||||
} else {
|
||||
curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0);
|
||||
curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
|
||||
}
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, fileTransferSettings.connectTimeout.get());
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L);
|
||||
curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, fileTransferSettings.stalledDownloadTimeout.get());
|
||||
|
||||
/* If no file exist in the specified path, curl continues to work
|
||||
anyway as if netrc support was disabled. */
|
||||
curl_easy_setopt(req, CURLOPT_NETRC_FILE, settings.netrcFile.get().c_str());
|
||||
curl_easy_setopt(req, CURLOPT_NETRC, CURL_NETRC_OPTIONAL);
|
||||
|
||||
if (writtenToSink)
|
||||
curl_easy_setopt(req, CURLOPT_RESUME_FROM_LARGE, writtenToSink);
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_ERRORBUFFER, errbuf);
|
||||
errbuf[0] = 0;
|
||||
|
||||
result.data.clear();
|
||||
result.bodySize = 0;
|
||||
}
|
||||
|
||||
void finish(CURLcode code)
|
||||
{
|
||||
auto finishTime = std::chrono::steady_clock::now();
|
||||
|
||||
auto httpStatus = getHTTPStatus();
|
||||
|
||||
debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes, duration = %.2f s",
|
||||
request.verb(), request.uri, code, httpStatus, result.bodySize,
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(finishTime - startTime).count() / 1000.0f
|
||||
);
|
||||
|
||||
appendCurrentUrl();
|
||||
|
||||
if (decompressionSink) {
|
||||
try {
|
||||
decompressionSink->finish();
|
||||
} catch (...) {
|
||||
writeException = std::current_exception();
|
||||
}
|
||||
}
|
||||
|
||||
if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) {
|
||||
code = CURLE_OK;
|
||||
httpStatus = 304;
|
||||
}
|
||||
|
||||
if (writeException)
|
||||
failEx(writeException);
|
||||
|
||||
else if (code == CURLE_OK && successfulStatuses.count(httpStatus))
|
||||
{
|
||||
result.cached = httpStatus == 304;
|
||||
|
||||
// In 2021, GitHub responds to If-None-Match with 304,
|
||||
// but omits ETag. We just use the If-None-Match etag
|
||||
// since 304 implies they are the same.
|
||||
if (httpStatus == 304 && result.etag == "")
|
||||
result.etag = request.expectedETag;
|
||||
|
||||
act.progress(result.bodySize, result.bodySize);
|
||||
done = true;
|
||||
callback(std::move(result));
|
||||
}
|
||||
|
||||
else {
|
||||
// We treat most errors as transient, but won't retry when hopeless
|
||||
Error err = Transient;
|
||||
|
||||
if (httpStatus == 404 || httpStatus == 410 || code == CURLE_FILE_COULDNT_READ_FILE) {
|
||||
// The file is definitely not there
|
||||
err = NotFound;
|
||||
} else if (httpStatus == 401 || httpStatus == 403 || httpStatus == 407) {
|
||||
// Don't retry on authentication/authorization failures
|
||||
err = Forbidden;
|
||||
} else if (httpStatus >= 400 && httpStatus < 500 && httpStatus != 408 && httpStatus != 429) {
|
||||
// Most 4xx errors are client errors and are probably not worth retrying:
|
||||
// * 408 means the server timed out waiting for us, so we try again
|
||||
// * 429 means too many requests, so we retry (with a delay)
|
||||
err = Misc;
|
||||
} else if (httpStatus == 501 || httpStatus == 505 || httpStatus == 511) {
|
||||
// Let's treat most 5xx (server) errors as transient, except for a handful:
|
||||
// * 501 not implemented
|
||||
// * 505 http version not supported
|
||||
// * 511 we're behind a captive portal
|
||||
err = Misc;
|
||||
} else {
|
||||
// Don't bother retrying on certain cURL errors either
|
||||
|
||||
// Allow selecting a subset of enum values
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wswitch-enum"
|
||||
switch (code) {
|
||||
case CURLE_FAILED_INIT:
|
||||
case CURLE_URL_MALFORMAT:
|
||||
case CURLE_NOT_BUILT_IN:
|
||||
case CURLE_REMOTE_ACCESS_DENIED:
|
||||
case CURLE_FILE_COULDNT_READ_FILE:
|
||||
case CURLE_FUNCTION_NOT_FOUND:
|
||||
case CURLE_ABORTED_BY_CALLBACK:
|
||||
case CURLE_BAD_FUNCTION_ARGUMENT:
|
||||
case CURLE_INTERFACE_FAILED:
|
||||
case CURLE_UNKNOWN_OPTION:
|
||||
case CURLE_SSL_CACERT_BADFILE:
|
||||
case CURLE_TOO_MANY_REDIRECTS:
|
||||
case CURLE_WRITE_ERROR:
|
||||
case CURLE_UNSUPPORTED_PROTOCOL:
|
||||
err = Misc;
|
||||
break;
|
||||
default: // Shut up warnings
|
||||
break;
|
||||
}
|
||||
#pragma GCC diagnostic pop
|
||||
}
|
||||
|
||||
attempt++;
|
||||
|
||||
std::optional<std::string> response;
|
||||
if (errorSink)
|
||||
response = std::move(errorSink->s);
|
||||
auto exc =
|
||||
code == CURLE_ABORTED_BY_CALLBACK && getInterrupted()
|
||||
? FileTransferError(Interrupted, std::move(response), "%s of '%s' was interrupted", request.verb(), request.uri)
|
||||
: httpStatus != 0
|
||||
? FileTransferError(err,
|
||||
std::move(response),
|
||||
"unable to %s '%s': HTTP error %d%s",
|
||||
request.verb(), request.uri, httpStatus,
|
||||
code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
|
||||
: FileTransferError(err,
|
||||
std::move(response),
|
||||
"unable to %s '%s': %s (%d) %s",
|
||||
request.verb(), request.uri, curl_easy_strerror(code), code, errbuf);
|
||||
|
||||
/* If this is a transient error, then maybe retry the
|
||||
download after a while. If we're writing to a
|
||||
sink, we can only retry if the server supports
|
||||
ranged requests. */
|
||||
if (err == Transient
|
||||
&& attempt < request.tries
|
||||
&& (!this->request.dataCallback
|
||||
|| writtenToSink == 0
|
||||
|| (acceptRanges && encoding.empty())))
|
||||
{
|
||||
int ms = request.baseRetryTimeMs * std::pow(2.0f, attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(fileTransfer.mt19937));
|
||||
if (writtenToSink)
|
||||
warn("%s; retrying from offset %d in %d ms", exc.what(), writtenToSink, ms);
|
||||
else
|
||||
warn("%s; retrying in %d ms", exc.what(), ms);
|
||||
embargo = std::chrono::steady_clock::now() + std::chrono::milliseconds(ms);
|
||||
fileTransfer.enqueueItem(shared_from_this());
|
||||
}
|
||||
else
|
||||
fail(std::move(exc));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct State
|
||||
{
|
||||
struct EmbargoComparator {
|
||||
bool operator() (const std::shared_ptr<TransferItem> & i1, const std::shared_ptr<TransferItem> & i2) {
|
||||
return i1->embargo > i2->embargo;
|
||||
}
|
||||
};
|
||||
bool quit = false;
|
||||
std::priority_queue<std::shared_ptr<TransferItem>, std::vector<std::shared_ptr<TransferItem>>, EmbargoComparator> incoming;
|
||||
};
|
||||
|
||||
Sync<State> state_;
|
||||
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
/* We can't use a std::condition_variable to wake up the curl
|
||||
thread, because it only monitors file descriptors. So use a
|
||||
pipe instead. */
|
||||
Pipe wakeupPipe;
|
||||
#endif
|
||||
|
||||
std::thread workerThread;
|
||||
|
||||
curlFileTransfer()
|
||||
: mt19937(rd())
|
||||
{
|
||||
static std::once_flag globalInit;
|
||||
std::call_once(globalInit, curl_global_init, CURL_GLOBAL_ALL);
|
||||
|
||||
curlm = curl_multi_init();
|
||||
|
||||
#if LIBCURL_VERSION_NUM >= 0x072b00 // Multiplex requires >= 7.43.0
|
||||
curl_multi_setopt(curlm, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX);
|
||||
#endif
|
||||
#if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0
|
||||
curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS,
|
||||
fileTransferSettings.httpConnections.get());
|
||||
#endif
|
||||
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
wakeupPipe.create();
|
||||
fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK);
|
||||
#endif
|
||||
|
||||
workerThread = std::thread([&]() { workerThreadEntry(); });
|
||||
}
|
||||
|
||||
~curlFileTransfer()
|
||||
{
|
||||
stopWorkerThread();
|
||||
|
||||
workerThread.join();
|
||||
|
||||
if (curlm) curl_multi_cleanup(curlm);
|
||||
}
|
||||
|
||||
void stopWorkerThread()
|
||||
{
|
||||
/* Signal the worker thread to exit. */
|
||||
{
|
||||
auto state(state_.lock());
|
||||
state->quit = true;
|
||||
}
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
writeFull(wakeupPipe.writeSide.get(), " ", false);
|
||||
#endif
|
||||
}
|
||||
|
||||
void workerThreadMain()
|
||||
{
|
||||
/* Cause this thread to be notified on SIGINT. */
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
auto callback = createInterruptCallback([&]() {
|
||||
stopWorkerThread();
|
||||
});
|
||||
#endif
|
||||
|
||||
#if __linux__
|
||||
try {
|
||||
tryUnshareFilesystem();
|
||||
} catch (nix::Error & e) {
|
||||
e.addTrace({}, "in download thread");
|
||||
throw;
|
||||
}
|
||||
#endif
|
||||
|
||||
std::map<CURL *, std::shared_ptr<TransferItem>> items;
|
||||
|
||||
bool quit = false;
|
||||
|
||||
std::chrono::steady_clock::time_point nextWakeup;
|
||||
|
||||
while (!quit) {
|
||||
checkInterrupt();
|
||||
|
||||
/* Let curl do its thing. */
|
||||
int running;
|
||||
CURLMcode mc = curl_multi_perform(curlm, &running);
|
||||
if (mc != CURLM_OK)
|
||||
throw nix::Error("unexpected error from curl_multi_perform(): %s", curl_multi_strerror(mc));
|
||||
|
||||
/* Set the promises of any finished requests. */
|
||||
CURLMsg * msg;
|
||||
int left;
|
||||
while ((msg = curl_multi_info_read(curlm, &left))) {
|
||||
if (msg->msg == CURLMSG_DONE) {
|
||||
auto i = items.find(msg->easy_handle);
|
||||
assert(i != items.end());
|
||||
i->second->finish(msg->data.result);
|
||||
curl_multi_remove_handle(curlm, i->second->req);
|
||||
i->second->active = false;
|
||||
items.erase(i);
|
||||
}
|
||||
}
|
||||
|
||||
/* Wait for activity, including wakeup events. */
|
||||
int numfds = 0;
|
||||
struct curl_waitfd extraFDs[1];
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
extraFDs[0].fd = wakeupPipe.readSide.get();
|
||||
extraFDs[0].events = CURL_WAIT_POLLIN;
|
||||
extraFDs[0].revents = 0;
|
||||
#endif
|
||||
long maxSleepTimeMs = items.empty() ? 10000 : 100;
|
||||
auto sleepTimeMs =
|
||||
nextWakeup != std::chrono::steady_clock::time_point()
|
||||
? std::max(0, (int) std::chrono::duration_cast<std::chrono::milliseconds>(nextWakeup - std::chrono::steady_clock::now()).count())
|
||||
: maxSleepTimeMs;
|
||||
vomit("download thread waiting for %d ms", sleepTimeMs);
|
||||
mc = curl_multi_wait(curlm, extraFDs, 1, sleepTimeMs, &numfds);
|
||||
if (mc != CURLM_OK)
|
||||
throw nix::Error("unexpected error from curl_multi_wait(): %s", curl_multi_strerror(mc));
|
||||
|
||||
nextWakeup = std::chrono::steady_clock::time_point();
|
||||
|
||||
/* Add new curl requests from the incoming requests queue,
|
||||
except for requests that are embargoed (waiting for a
|
||||
retry timeout to expire). */
|
||||
if (extraFDs[0].revents & CURL_WAIT_POLLIN) {
|
||||
char buf[1024];
|
||||
auto res = read(extraFDs[0].fd, buf, sizeof(buf));
|
||||
if (res == -1 && errno != EINTR)
|
||||
throw SysError("reading curl wakeup socket");
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<TransferItem>> incoming;
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
while (!state->incoming.empty()) {
|
||||
auto item = state->incoming.top();
|
||||
if (item->embargo <= now) {
|
||||
incoming.push_back(item);
|
||||
state->incoming.pop();
|
||||
} else {
|
||||
if (nextWakeup == std::chrono::steady_clock::time_point()
|
||||
|| item->embargo < nextWakeup)
|
||||
nextWakeup = item->embargo;
|
||||
break;
|
||||
}
|
||||
}
|
||||
quit = state->quit;
|
||||
}
|
||||
|
||||
for (auto & item : incoming) {
|
||||
debug("starting %s of %s", item->request.verb(), item->request.uri);
|
||||
item->init();
|
||||
curl_multi_add_handle(curlm, item->req);
|
||||
item->active = true;
|
||||
items[item->req] = item;
|
||||
}
|
||||
}
|
||||
|
||||
debug("download thread shutting down");
|
||||
}
|
||||
|
||||
void workerThreadEntry()
|
||||
{
|
||||
try {
|
||||
workerThreadMain();
|
||||
} catch (nix::Interrupted & e) {
|
||||
} catch (std::exception & e) {
|
||||
printError("unexpected error in download thread: %s", e.what());
|
||||
}
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
while (!state->incoming.empty()) state->incoming.pop();
|
||||
state->quit = true;
|
||||
}
|
||||
}
|
||||
|
||||
void enqueueItem(std::shared_ptr<TransferItem> item)
|
||||
{
|
||||
if (item->request.data
|
||||
&& !hasPrefix(item->request.uri, "http://")
|
||||
&& !hasPrefix(item->request.uri, "https://"))
|
||||
throw nix::Error("uploading to '%s' is not supported", item->request.uri);
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
if (state->quit)
|
||||
throw nix::Error("cannot enqueue download request because the download thread is shutting down");
|
||||
state->incoming.push(item);
|
||||
}
|
||||
#ifndef _WIN32 // TODO need graceful async exit support on Windows?
|
||||
writeFull(wakeupPipe.writeSide.get(), " ");
|
||||
#endif
|
||||
}
|
||||
|
||||
#if ENABLE_S3
|
||||
std::tuple<std::string, std::string, Store::Params> parseS3Uri(std::string uri)
|
||||
{
|
||||
auto [path, params] = splitUriAndParams(uri);
|
||||
|
||||
auto slash = path.find('/', 5); // 5 is the length of "s3://" prefix
|
||||
if (slash == std::string::npos)
|
||||
throw nix::Error("bad S3 URI '%s'", path);
|
||||
|
||||
std::string bucketName(path, 5, slash - 5);
|
||||
std::string key(path, slash + 1);
|
||||
|
||||
return {bucketName, key, params};
|
||||
}
|
||||
#endif
|
||||
|
||||
void enqueueFileTransfer(const FileTransferRequest & request,
|
||||
Callback<FileTransferResult> callback) override
|
||||
{
|
||||
/* Ugly hack to support s3:// URIs. */
|
||||
if (hasPrefix(request.uri, "s3://")) {
|
||||
// FIXME: do this on a worker thread
|
||||
try {
|
||||
#if ENABLE_S3
|
||||
auto [bucketName, key, params] = parseS3Uri(request.uri);
|
||||
|
||||
std::string profile = getOr(params, "profile", "");
|
||||
std::string region = getOr(params, "region", Aws::Region::US_EAST_1);
|
||||
std::string scheme = getOr(params, "scheme", "");
|
||||
std::string endpoint = getOr(params, "endpoint", "");
|
||||
|
||||
S3Helper s3Helper(profile, region, scheme, endpoint);
|
||||
|
||||
// FIXME: implement ETag
|
||||
auto s3Res = s3Helper.getObject(bucketName, key);
|
||||
FileTransferResult res;
|
||||
if (!s3Res.data)
|
||||
throw FileTransferError(NotFound, "S3 object '%s' does not exist", request.uri);
|
||||
res.data = std::move(*s3Res.data);
|
||||
callback(std::move(res));
|
||||
#else
|
||||
throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri);
|
||||
#endif
|
||||
} catch (...) { callback.rethrow(); }
|
||||
return;
|
||||
}
|
||||
|
||||
enqueueItem(std::make_shared<TransferItem>(*this, request, std::move(callback)));
|
||||
}
|
||||
};
|
||||
|
||||
ref<curlFileTransfer> makeCurlFileTransfer()
|
||||
{
|
||||
return make_ref<curlFileTransfer>();
|
||||
}
|
||||
|
||||
ref<FileTransfer> getFileTransfer()
|
||||
{
|
||||
static ref<curlFileTransfer> fileTransfer = makeCurlFileTransfer();
|
||||
|
||||
if (fileTransfer->state_.lock()->quit)
|
||||
fileTransfer = makeCurlFileTransfer();
|
||||
|
||||
return fileTransfer;
|
||||
}
|
||||
|
||||
ref<FileTransfer> makeFileTransfer()
|
||||
{
|
||||
return makeCurlFileTransfer();
|
||||
}
|
||||
|
||||
std::future<FileTransferResult> FileTransfer::enqueueFileTransfer(const FileTransferRequest & request)
|
||||
{
|
||||
auto promise = std::make_shared<std::promise<FileTransferResult>>();
|
||||
enqueueFileTransfer(request,
|
||||
{[promise](std::future<FileTransferResult> fut) {
|
||||
try {
|
||||
promise->set_value(fut.get());
|
||||
} catch (...) {
|
||||
promise->set_exception(std::current_exception());
|
||||
}
|
||||
}});
|
||||
return promise->get_future();
|
||||
}
|
||||
|
||||
FileTransferResult FileTransfer::download(const FileTransferRequest & request)
|
||||
{
|
||||
return enqueueFileTransfer(request).get();
|
||||
}
|
||||
|
||||
FileTransferResult FileTransfer::upload(const FileTransferRequest & request)
|
||||
{
|
||||
/* Note: this method is the same as download, but helps in readability */
|
||||
return enqueueFileTransfer(request).get();
|
||||
}
|
||||
|
||||
void FileTransfer::download(
|
||||
FileTransferRequest && request,
|
||||
Sink & sink,
|
||||
std::function<void(FileTransferResult)> resultCallback)
|
||||
{
|
||||
/* Note: we can't call 'sink' via request.dataCallback, because
|
||||
that would cause the sink to execute on the fileTransfer
|
||||
thread. If 'sink' is a coroutine, this will fail. Also, if the
|
||||
sink is expensive (e.g. one that does decompression and writing
|
||||
to the Nix store), it would stall the download thread too much.
|
||||
Therefore we use a buffer to communicate data between the
|
||||
download thread and the calling thread. */
|
||||
|
||||
struct State {
|
||||
bool quit = false;
|
||||
std::exception_ptr exc;
|
||||
std::string data;
|
||||
std::condition_variable avail, request;
|
||||
};
|
||||
|
||||
auto _state = std::make_shared<Sync<State>>();
|
||||
|
||||
/* In case of an exception, wake up the download thread. FIXME:
|
||||
abort the download request. */
|
||||
Finally finally([&]() {
|
||||
auto state(_state->lock());
|
||||
state->quit = true;
|
||||
state->request.notify_one();
|
||||
});
|
||||
|
||||
request.dataCallback = [_state](std::string_view data) {
|
||||
|
||||
auto state(_state->lock());
|
||||
|
||||
if (state->quit) return;
|
||||
|
||||
/* If the buffer is full, then go to sleep until the calling
|
||||
thread wakes us up (i.e. when it has removed data from the
|
||||
buffer). We don't wait forever to prevent stalling the
|
||||
download thread. (Hopefully sleeping will throttle the
|
||||
sender.) */
|
||||
if (state->data.size() > fileTransferSettings.downloadBufferSize) {
|
||||
debug("download buffer is full; going to sleep");
|
||||
static bool haveWarned = false;
|
||||
warnOnce(haveWarned, "download buffer is full; consider increasing the 'download-buffer-size' setting");
|
||||
state.wait_for(state->request, std::chrono::seconds(10));
|
||||
}
|
||||
|
||||
/* Append data to the buffer and wake up the calling
|
||||
thread. */
|
||||
state->data.append(data);
|
||||
state->avail.notify_one();
|
||||
};
|
||||
|
||||
enqueueFileTransfer(request,
|
||||
{[_state, resultCallback{std::move(resultCallback)}](std::future<FileTransferResult> fut) {
|
||||
auto state(_state->lock());
|
||||
state->quit = true;
|
||||
try {
|
||||
auto res = fut.get();
|
||||
if (resultCallback)
|
||||
resultCallback(std::move(res));
|
||||
} catch (...) {
|
||||
state->exc = std::current_exception();
|
||||
}
|
||||
state->avail.notify_one();
|
||||
state->request.notify_one();
|
||||
}});
|
||||
|
||||
while (true) {
|
||||
checkInterrupt();
|
||||
|
||||
std::string chunk;
|
||||
|
||||
/* Grab data if available, otherwise wait for the download
|
||||
thread to wake us up. */
|
||||
{
|
||||
auto state(_state->lock());
|
||||
|
||||
if (state->data.empty()) {
|
||||
|
||||
if (state->quit) {
|
||||
if (state->exc) std::rethrow_exception(state->exc);
|
||||
return;
|
||||
}
|
||||
|
||||
state.wait(state->avail);
|
||||
|
||||
if (state->data.empty()) continue;
|
||||
}
|
||||
|
||||
chunk = std::move(state->data);
|
||||
/* Reset state->data after the move, since we check data.empty() */
|
||||
state->data = "";
|
||||
|
||||
state->request.notify_one();
|
||||
}
|
||||
|
||||
/* Flush the data to the sink and wake up the download thread
|
||||
if it's blocked on a full buffer. We don't hold the state
|
||||
lock while doing this to prevent blocking the download
|
||||
thread if sink() takes a long time. */
|
||||
sink(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename... Args>
|
||||
FileTransferError::FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args)
|
||||
: Error(args...), error(error), response(response)
|
||||
{
|
||||
const auto hf = HintFmt(args...);
|
||||
// FIXME: Due to https://github.com/NixOS/nix/issues/3841 we don't know how
|
||||
// to print different messages for different verbosity levels. For now
|
||||
// we add some heuristics for detecting when we want to show the response.
|
||||
if (response && (response->size() < 1024 || response->find("<html>") != std::string::npos))
|
||||
err.msg = HintFmt("%1%\n\nresponse body:\n\n%2%", Uncolored(hf.str()), chomp(*response));
|
||||
else
|
||||
err.msg = hf;
|
||||
}
|
||||
|
||||
}
|
||||
181
subprojects/libstore/filetransfer.hh
Normal file
181
subprojects/libstore/filetransfer.hh
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include <string>
|
||||
#include <future>
|
||||
|
||||
#include "logging.hh"
|
||||
#include "types.hh"
|
||||
#include "ref.hh"
|
||||
#include "config.hh"
|
||||
#include "serialise.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct FileTransferSettings : Config
|
||||
{
|
||||
Setting<bool> enableHttp2{this, true, "http2",
|
||||
"Whether to enable HTTP/2 support."};
|
||||
|
||||
Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix",
|
||||
"String appended to the user agent in HTTP requests."};
|
||||
|
||||
Setting<size_t> httpConnections{
|
||||
this, 25, "http-connections",
|
||||
R"(
|
||||
The maximum number of parallel TCP connections used to fetch
|
||||
files from binary caches and by other downloads. It defaults
|
||||
to 25. 0 means no limit.
|
||||
)",
|
||||
{"binary-caches-parallel-connections"}};
|
||||
|
||||
Setting<unsigned long> connectTimeout{
|
||||
this, 0, "connect-timeout",
|
||||
R"(
|
||||
The timeout (in seconds) for establishing connections in the
|
||||
binary cache substituter. It corresponds to `curl`’s
|
||||
`--connect-timeout` option. A value of 0 means no limit.
|
||||
)"};
|
||||
|
||||
Setting<unsigned long> stalledDownloadTimeout{
|
||||
this, 300, "stalled-download-timeout",
|
||||
R"(
|
||||
The timeout (in seconds) for receiving data from servers
|
||||
during download. Nix cancels idle downloads after this
|
||||
timeout's duration.
|
||||
)"};
|
||||
|
||||
Setting<unsigned int> tries{this, 5, "download-attempts",
|
||||
"How often Nix will attempt to download a file before giving up."};
|
||||
|
||||
Setting<size_t> downloadBufferSize{this, 64 * 1024 * 1024, "download-buffer-size",
|
||||
R"(
|
||||
The size of Nix's internal download buffer during `curl` transfers. If data is
|
||||
not processed quickly enough to exceed the size of this buffer, downloads may stall.
|
||||
)"};
|
||||
};
|
||||
|
||||
extern FileTransferSettings fileTransferSettings;
|
||||
|
||||
struct FileTransferRequest
|
||||
{
|
||||
std::string uri;
|
||||
Headers headers;
|
||||
std::string expectedETag;
|
||||
bool verifyTLS = true;
|
||||
bool head = false;
|
||||
size_t tries = fileTransferSettings.tries;
|
||||
unsigned int baseRetryTimeMs = 250;
|
||||
ActivityId parentAct;
|
||||
bool decompress = true;
|
||||
std::optional<std::string> data;
|
||||
std::string mimeType;
|
||||
std::function<void(std::string_view data)> dataCallback;
|
||||
|
||||
FileTransferRequest(std::string_view uri)
|
||||
: uri(uri), parentAct(getCurActivity()) { }
|
||||
|
||||
std::string verb()
|
||||
{
|
||||
return data ? "upload" : "download";
|
||||
}
|
||||
};
|
||||
|
||||
struct FileTransferResult
|
||||
{
|
||||
/**
|
||||
* Whether this is a cache hit (i.e. the ETag supplied in the
|
||||
* request is still valid). If so, `data` is empty.
|
||||
*/
|
||||
bool cached = false;
|
||||
|
||||
/**
|
||||
* The ETag of the object.
|
||||
*/
|
||||
std::string etag;
|
||||
|
||||
/**
|
||||
* All URLs visited in the redirect chain.
|
||||
*/
|
||||
std::vector<std::string> urls;
|
||||
|
||||
/**
|
||||
* The response body.
|
||||
*/
|
||||
std::string data;
|
||||
|
||||
uint64_t bodySize = 0;
|
||||
|
||||
/**
|
||||
* An "immutable" URL for this resource (i.e. one whose contents
|
||||
* will never change), as returned by the `Link: <url>;
|
||||
* rel="immutable"` header.
|
||||
*/
|
||||
std::optional<std::string> immutableUrl;
|
||||
};
|
||||
|
||||
class Store;
|
||||
|
||||
struct FileTransfer
|
||||
{
|
||||
virtual ~FileTransfer() { }
|
||||
|
||||
/**
|
||||
* Enqueue a data transfer request, returning a future to the result of
|
||||
* the download. The future may throw a FileTransferError
|
||||
* exception.
|
||||
*/
|
||||
virtual void enqueueFileTransfer(const FileTransferRequest & request,
|
||||
Callback<FileTransferResult> callback) = 0;
|
||||
|
||||
std::future<FileTransferResult> enqueueFileTransfer(const FileTransferRequest & request);
|
||||
|
||||
/**
|
||||
* Synchronously download a file.
|
||||
*/
|
||||
FileTransferResult download(const FileTransferRequest & request);
|
||||
|
||||
/**
|
||||
* Synchronously upload a file.
|
||||
*/
|
||||
FileTransferResult upload(const FileTransferRequest & request);
|
||||
|
||||
/**
|
||||
* Download a file, writing its data to a sink. The sink will be
|
||||
* invoked on the thread of the caller.
|
||||
*/
|
||||
void download(
|
||||
FileTransferRequest && request,
|
||||
Sink & sink,
|
||||
std::function<void(FileTransferResult)> resultCallback = {});
|
||||
|
||||
enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
|
||||
};
|
||||
|
||||
/**
|
||||
* @return a shared FileTransfer object.
|
||||
*
|
||||
* Using this object is preferred because it enables connection reuse
|
||||
* and HTTP/2 multiplexing.
|
||||
*/
|
||||
ref<FileTransfer> getFileTransfer();
|
||||
|
||||
/**
|
||||
* @return a new FileTransfer object
|
||||
*
|
||||
* Prefer getFileTransfer() to this; see its docs for why.
|
||||
*/
|
||||
ref<FileTransfer> makeFileTransfer();
|
||||
|
||||
class FileTransferError : public Error
|
||||
{
|
||||
public:
|
||||
FileTransfer::Error error;
|
||||
/// intentionally optional
|
||||
std::optional<std::string> response;
|
||||
|
||||
template<typename... Args>
|
||||
FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args);
|
||||
};
|
||||
|
||||
}
|
||||
120
subprojects/libstore/gc-store.hh
Normal file
120
subprojects/libstore/gc-store.hh
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
typedef std::unordered_map<StorePath, std::unordered_set<std::string>> Roots;
|
||||
|
||||
|
||||
struct GCOptions
|
||||
{
|
||||
/**
|
||||
* Garbage collector operation:
|
||||
*
|
||||
* - `gcReturnLive`: return the set of paths reachable from
|
||||
* (i.e. in the closure of) the roots.
|
||||
*
|
||||
* - `gcReturnDead`: return the set of paths not reachable from
|
||||
* the roots.
|
||||
*
|
||||
* - `gcDeleteDead`: actually delete the latter set.
|
||||
*
|
||||
* - `gcDeleteSpecific`: delete the paths listed in
|
||||
* `pathsToDelete`, insofar as they are not reachable.
|
||||
*/
|
||||
typedef enum {
|
||||
gcReturnLive,
|
||||
gcReturnDead,
|
||||
gcDeleteDead,
|
||||
gcDeleteSpecific,
|
||||
} GCAction;
|
||||
|
||||
GCAction action{gcDeleteDead};
|
||||
|
||||
/**
|
||||
* If `ignoreLiveness` is set, then reachability from the roots is
|
||||
* ignored (dangerous!). However, the paths must still be
|
||||
* unreferenced *within* the store (i.e., there can be no other
|
||||
* store paths that depend on them).
|
||||
*/
|
||||
bool ignoreLiveness{false};
|
||||
|
||||
/**
|
||||
* For `gcDeleteSpecific`, the paths to delete.
|
||||
*/
|
||||
StorePathSet pathsToDelete;
|
||||
|
||||
/**
|
||||
* Stop after at least `maxFreed` bytes have been freed.
|
||||
*/
|
||||
uint64_t maxFreed{std::numeric_limits<uint64_t>::max()};
|
||||
};
|
||||
|
||||
|
||||
struct GCResults
|
||||
{
|
||||
/**
|
||||
* Depending on the action, the GC roots, or the paths that would
|
||||
* be or have been deleted.
|
||||
*/
|
||||
PathSet paths;
|
||||
|
||||
/**
|
||||
* For `gcReturnDead`, `gcDeleteDead` and `gcDeleteSpecific`, the
|
||||
* number of bytes that would be or was freed.
|
||||
*/
|
||||
uint64_t bytesFreed = 0;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Mix-in class for \ref Store "stores" which expose a notion of garbage
|
||||
* collection.
|
||||
*
|
||||
* Garbage collection will allow deleting paths which are not
|
||||
* transitively "rooted".
|
||||
*
|
||||
* The notion of GC roots actually not part of this class.
|
||||
*
|
||||
* - The base `Store` class has `Store::addTempRoot()` because for a store
|
||||
* that doesn't support garbage collection at all, a temporary GC root is
|
||||
* safely implementable as no-op.
|
||||
*
|
||||
* @todo actually this is not so good because stores are *views*.
|
||||
* Some views have only a no-op temp roots even though others to the
|
||||
* same store allow triggering GC. For instance one can't add a root
|
||||
* over ssh, but that doesn't prevent someone from gc-ing that store
|
||||
* accesed via SSH locally).
|
||||
*
|
||||
* - The derived `LocalFSStore` class has `LocalFSStore::addPermRoot`,
|
||||
* which is not part of this class because it relies on the notion of
|
||||
* an ambient file system. There are stores (`ssh-ng://`, for one),
|
||||
* that *do* support garbage collection but *don't* expose any file
|
||||
* system, and `LocalFSStore::addPermRoot` thus does not make sense
|
||||
* for them.
|
||||
*/
|
||||
struct GcStore : public virtual Store
|
||||
{
|
||||
inline static std::string operationName = "Garbage collection";
|
||||
|
||||
/**
|
||||
* Find the roots of the garbage collector. Each root is a pair
|
||||
* `(link, storepath)` where `link` is the path of the symlink
|
||||
* outside of the Nix store that point to `storePath`. If
|
||||
* `censor` is true, privacy-sensitive information about roots
|
||||
* found in `/proc` is censored.
|
||||
*/
|
||||
virtual Roots findRoots(bool censor) = 0;
|
||||
|
||||
/**
|
||||
* Perform a garbage collection.
|
||||
*/
|
||||
virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
|
||||
};
|
||||
|
||||
}
|
||||
975
subprojects/libstore/gc.cc
Normal file
975
subprojects/libstore/gc.cc
Normal file
|
|
@ -0,0 +1,975 @@
|
|||
#include "derivations.hh"
|
||||
#include "globals.hh"
|
||||
#include "local-store.hh"
|
||||
#include "finally.hh"
|
||||
#include "unix-domain-socket.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
#if !defined(__linux__)
|
||||
// For shelling out to lsof
|
||||
# include "processes.hh"
|
||||
#endif
|
||||
|
||||
#include <functional>
|
||||
#include <queue>
|
||||
#include <algorithm>
|
||||
#include <regex>
|
||||
#include <random>
|
||||
|
||||
#include <climits>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/stat.h>
|
||||
#if HAVE_STATVFS
|
||||
# include <sys/statvfs.h>
|
||||
#endif
|
||||
#ifndef _WIN32
|
||||
# include <poll.h>
|
||||
# include <sys/socket.h>
|
||||
# include <sys/un.h>
|
||||
#endif
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
static std::string gcSocketPath = "/gc-socket/socket";
|
||||
static std::string gcRootsDir = "gcroots";
|
||||
|
||||
|
||||
void LocalStore::addIndirectRoot(const Path & path)
|
||||
{
|
||||
std::string hash = hashString(HashAlgorithm::SHA1, path).to_string(HashFormat::Nix32, false);
|
||||
Path realRoot = canonPath(fmt("%1%/%2%/auto/%3%", stateDir, gcRootsDir, hash));
|
||||
makeSymlink(realRoot, path);
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::createTempRootsFile()
|
||||
{
|
||||
auto fdTempRoots(_fdTempRoots.lock());
|
||||
|
||||
/* Create the temporary roots file for this process. */
|
||||
if (*fdTempRoots) return;
|
||||
|
||||
while (1) {
|
||||
if (pathExists(fnTempRoots))
|
||||
/* It *must* be stale, since there can be no two
|
||||
processes with the same pid. */
|
||||
unlink(fnTempRoots.c_str());
|
||||
|
||||
*fdTempRoots = openLockFile(fnTempRoots, true);
|
||||
|
||||
debug("acquiring write lock on '%s'", fnTempRoots);
|
||||
lockFile(fdTempRoots->get(), ltWrite, true);
|
||||
|
||||
/* Check whether the garbage collector didn't get in our
|
||||
way. */
|
||||
struct stat st;
|
||||
if (fstat(fromDescriptorReadOnly(fdTempRoots->get()), &st) == -1)
|
||||
throw SysError("statting '%1%'", fnTempRoots);
|
||||
if (st.st_size == 0) break;
|
||||
|
||||
/* The garbage collector deleted this file before we could get
|
||||
a lock. (It won't delete the file after we get a lock.)
|
||||
Try again. */
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::addTempRoot(const StorePath & path)
|
||||
{
|
||||
if (readOnly) {
|
||||
debug("Read-only store doesn't support creating lock files for temp roots, but nothing can be deleted anyways.");
|
||||
return;
|
||||
}
|
||||
|
||||
createTempRootsFile();
|
||||
|
||||
/* Open/create the global GC lock file. */
|
||||
{
|
||||
auto fdGCLock(_fdGCLock.lock());
|
||||
if (!*fdGCLock)
|
||||
*fdGCLock = openGCLock();
|
||||
}
|
||||
|
||||
restart:
|
||||
/* Try to acquire a shared global GC lock (non-blocking). This
|
||||
only succeeds if the garbage collector is not currently
|
||||
running. */
|
||||
FdLock gcLock(_fdGCLock.lock()->get(), ltRead, false, "");
|
||||
|
||||
if (!gcLock.acquired) {
|
||||
/* We couldn't get a shared global GC lock, so the garbage
|
||||
collector is running. So we have to connect to the garbage
|
||||
collector and inform it about our root. */
|
||||
auto fdRootsSocket(_fdRootsSocket.lock());
|
||||
|
||||
if (!*fdRootsSocket) {
|
||||
auto socketPath = stateDir.get() + gcSocketPath;
|
||||
debug("connecting to '%s'", socketPath);
|
||||
*fdRootsSocket = createUnixDomainSocket();
|
||||
try {
|
||||
nix::connect(toSocket(fdRootsSocket->get()), socketPath);
|
||||
} catch (SysError & e) {
|
||||
/* The garbage collector may have exited or not
|
||||
created the socket yet, so we need to restart. */
|
||||
if (e.errNo == ECONNREFUSED || e.errNo == ENOENT) {
|
||||
debug("GC socket connection refused: %s", e.msg());
|
||||
fdRootsSocket->close();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
goto restart;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
debug("sending GC root '%s'", printStorePath(path));
|
||||
writeFull(fdRootsSocket->get(), printStorePath(path) + "\n", false);
|
||||
char c;
|
||||
readFull(fdRootsSocket->get(), &c, 1);
|
||||
assert(c == '1');
|
||||
debug("got ack for GC root '%s'", printStorePath(path));
|
||||
} catch (SysError & e) {
|
||||
/* The garbage collector may have exited, so we need to
|
||||
restart. */
|
||||
if (e.errNo == EPIPE || e.errNo == ECONNRESET) {
|
||||
debug("GC socket disconnected");
|
||||
fdRootsSocket->close();
|
||||
goto restart;
|
||||
}
|
||||
throw;
|
||||
} catch (EndOfFile & e) {
|
||||
debug("GC socket disconnected");
|
||||
fdRootsSocket->close();
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
|
||||
/* Record the store path in the temporary roots file so it will be
|
||||
seen by a future run of the garbage collector. */
|
||||
auto s = printStorePath(path) + '\0';
|
||||
writeFull(_fdTempRoots.lock()->get(), s);
|
||||
}
|
||||
|
||||
|
||||
static std::string censored = "{censored}";
|
||||
|
||||
|
||||
void LocalStore::findTempRoots(Roots & tempRoots, bool censor)
|
||||
{
|
||||
/* Read the `temproots' directory for per-process temporary root
|
||||
files. */
|
||||
for (auto & i : std::filesystem::directory_iterator{tempRootsDir}) {
|
||||
checkInterrupt();
|
||||
auto name = i.path().filename().string();
|
||||
if (name[0] == '.') {
|
||||
// Ignore hidden files. Some package managers (notably portage) create
|
||||
// those to keep the directory alive.
|
||||
continue;
|
||||
}
|
||||
Path path = i.path().string();
|
||||
|
||||
pid_t pid = std::stoi(name);
|
||||
|
||||
debug("reading temporary root file '%1%'", path);
|
||||
AutoCloseFD fd(toDescriptor(open(path.c_str(),
|
||||
#ifndef _WIN32
|
||||
O_CLOEXEC |
|
||||
#endif
|
||||
O_RDWR, 0666)));
|
||||
if (!fd) {
|
||||
/* It's okay if the file has disappeared. */
|
||||
if (errno == ENOENT) continue;
|
||||
throw SysError("opening temporary roots file '%1%'", path);
|
||||
}
|
||||
|
||||
/* Try to acquire a write lock without blocking. This can
|
||||
only succeed if the owning process has died. In that case
|
||||
we don't care about its temporary roots. */
|
||||
if (lockFile(fd.get(), ltWrite, false)) {
|
||||
printInfo("removing stale temporary roots file '%1%'", path);
|
||||
unlink(path.c_str());
|
||||
writeFull(fd.get(), "d");
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Read the entire file. */
|
||||
auto contents = readFile(fd.get());
|
||||
|
||||
/* Extract the roots. */
|
||||
std::string::size_type pos = 0, end;
|
||||
|
||||
while ((end = contents.find((char) 0, pos)) != std::string::npos) {
|
||||
Path root(contents, pos, end - pos);
|
||||
debug("got temporary root '%s'", root);
|
||||
tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{temp:%d}", pid));
|
||||
pos = end + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::findRoots(const Path & path, std::filesystem::file_type type, Roots & roots)
|
||||
{
|
||||
auto foundRoot = [&](const Path & path, const Path & target) {
|
||||
try {
|
||||
auto storePath = toStorePath(target).first;
|
||||
if (isValidPath(storePath))
|
||||
roots[std::move(storePath)].emplace(path);
|
||||
else
|
||||
printInfo("skipping invalid root from '%1%' to '%2%'", path, target);
|
||||
} catch (BadStorePath &) { }
|
||||
};
|
||||
|
||||
try {
|
||||
|
||||
if (type == std::filesystem::file_type::unknown)
|
||||
type = std::filesystem::symlink_status(path).type();
|
||||
|
||||
if (type == std::filesystem::file_type::directory) {
|
||||
for (auto & i : std::filesystem::directory_iterator{path}) {
|
||||
checkInterrupt();
|
||||
findRoots(i.path().string(), i.symlink_status().type(), roots);
|
||||
}
|
||||
}
|
||||
|
||||
else if (type == std::filesystem::file_type::symlink) {
|
||||
Path target = readLink(path);
|
||||
if (isInStore(target))
|
||||
foundRoot(path, target);
|
||||
|
||||
/* Handle indirect roots. */
|
||||
else {
|
||||
target = absPath(target, dirOf(path));
|
||||
if (!pathExists(target)) {
|
||||
if (isInDir(path, stateDir + "/" + gcRootsDir + "/auto")) {
|
||||
printInfo("removing stale link from '%1%' to '%2%'", path, target);
|
||||
unlink(path.c_str());
|
||||
}
|
||||
} else {
|
||||
if (!std::filesystem::is_symlink(target)) return;
|
||||
Path target2 = readLink(target);
|
||||
if (isInStore(target2)) foundRoot(target, target2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else if (type == std::filesystem::file_type::regular) {
|
||||
auto storePath = maybeParseStorePath(storeDir + "/" + std::string(baseNameOf(path)));
|
||||
if (storePath && isValidPath(*storePath))
|
||||
roots[std::move(*storePath)].emplace(path);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
catch (std::filesystem::filesystem_error & e) {
|
||||
/* We only ignore permanent failures. */
|
||||
if (e.code() == std::errc::permission_denied || e.code() == std::errc::no_such_file_or_directory || e.code() == std::errc::not_a_directory)
|
||||
printInfo("cannot read potential root '%1%'", path);
|
||||
else
|
||||
throw;
|
||||
}
|
||||
|
||||
catch (SysError & e) {
|
||||
/* We only ignore permanent failures. */
|
||||
if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR)
|
||||
printInfo("cannot read potential root '%1%'", path);
|
||||
else
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::findRootsNoTemp(Roots & roots, bool censor)
|
||||
{
|
||||
/* Process direct roots in {gcroots,profiles}. */
|
||||
findRoots(stateDir + "/" + gcRootsDir, std::filesystem::file_type::unknown, roots);
|
||||
findRoots(stateDir + "/profiles", std::filesystem::file_type::unknown, roots);
|
||||
|
||||
/* Add additional roots returned by different platforms-specific
|
||||
heuristics. This is typically used to add running programs to
|
||||
the set of roots (to prevent them from being garbage collected). */
|
||||
findRuntimeRoots(roots, censor);
|
||||
}
|
||||
|
||||
|
||||
Roots LocalStore::findRoots(bool censor)
|
||||
{
|
||||
Roots roots;
|
||||
findRootsNoTemp(roots, censor);
|
||||
|
||||
findTempRoots(roots, censor);
|
||||
|
||||
return roots;
|
||||
}
|
||||
|
||||
/**
|
||||
* Key is a mere string because cannot has path with macOS's libc++
|
||||
*/
|
||||
typedef std::unordered_map<std::string, std::unordered_set<std::string>> UncheckedRoots;
|
||||
|
||||
static void readProcLink(const std::filesystem::path & file, UncheckedRoots & roots)
|
||||
{
|
||||
std::filesystem::path buf;
|
||||
try {
|
||||
buf = std::filesystem::read_symlink(file);
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
if (e.code() == std::errc::no_such_file_or_directory
|
||||
|| e.code() == std::errc::permission_denied
|
||||
|| e.code() == std::errc::no_such_process)
|
||||
return;
|
||||
throw;
|
||||
}
|
||||
if (buf.is_absolute())
|
||||
roots[buf.string()].emplace(file.string());
|
||||
}
|
||||
|
||||
static std::string quoteRegexChars(const std::string & raw)
|
||||
{
|
||||
static auto specialRegex = std::regex(R"([.^$\\*+?()\[\]{}|])");
|
||||
return std::regex_replace(raw, specialRegex, R"(\$&)");
|
||||
}
|
||||
|
||||
#if __linux__
|
||||
static void readFileRoots(const std::filesystem::path & path, UncheckedRoots & roots)
|
||||
{
|
||||
try {
|
||||
roots[readFile(path)].emplace(path);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT && e.errNo != EACCES)
|
||||
throw;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
|
||||
{
|
||||
UncheckedRoots unchecked;
|
||||
|
||||
auto procDir = AutoCloseDir{opendir("/proc")};
|
||||
if (procDir) {
|
||||
struct dirent * ent;
|
||||
auto digitsRegex = std::regex(R"(^\d+$)");
|
||||
auto mapRegex = std::regex(R"(^\s*\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(/\S+)\s*$)");
|
||||
auto storePathRegex = std::regex(quoteRegexChars(storeDir) + R"(/[0-9a-z]+[0-9a-zA-Z\+\-\._\?=]*)");
|
||||
while (errno = 0, ent = readdir(procDir.get())) {
|
||||
checkInterrupt();
|
||||
if (std::regex_match(ent->d_name, digitsRegex)) {
|
||||
try {
|
||||
readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked);
|
||||
readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked);
|
||||
|
||||
auto fdStr = fmt("/proc/%s/fd", ent->d_name);
|
||||
auto fdDir = AutoCloseDir(opendir(fdStr.c_str()));
|
||||
if (!fdDir) {
|
||||
if (errno == ENOENT || errno == EACCES)
|
||||
continue;
|
||||
throw SysError("opening %1%", fdStr);
|
||||
}
|
||||
struct dirent * fd_ent;
|
||||
while (errno = 0, fd_ent = readdir(fdDir.get())) {
|
||||
if (fd_ent->d_name[0] != '.')
|
||||
readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked);
|
||||
}
|
||||
if (errno) {
|
||||
if (errno == ESRCH)
|
||||
continue;
|
||||
throw SysError("iterating /proc/%1%/fd", ent->d_name);
|
||||
}
|
||||
fdDir.reset();
|
||||
|
||||
std::filesystem::path mapFile = fmt("/proc/%s/maps", ent->d_name);
|
||||
auto mapLines = tokenizeString<std::vector<std::string>>(readFile(mapFile.string()), "\n");
|
||||
for (const auto & line : mapLines) {
|
||||
auto match = std::smatch{};
|
||||
if (std::regex_match(line, match, mapRegex))
|
||||
unchecked[match[1]].emplace(mapFile.string());
|
||||
}
|
||||
|
||||
auto envFile = fmt("/proc/%s/environ", ent->d_name);
|
||||
auto envString = readFile(envFile);
|
||||
auto env_end = std::sregex_iterator{};
|
||||
for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
|
||||
unchecked[i->str()].emplace(envFile);
|
||||
} catch (SystemError & e) {
|
||||
if (errno == ENOENT || errno == EACCES || errno == ESRCH)
|
||||
continue;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (errno)
|
||||
throw SysError("iterating /proc");
|
||||
}
|
||||
|
||||
#if !defined(__linux__)
|
||||
// lsof is really slow on OS X. This actually causes the gc-concurrent.sh test to fail.
|
||||
// See: https://github.com/NixOS/nix/issues/3011
|
||||
// Because of this we disable lsof when running the tests.
|
||||
if (getEnv("_NIX_TEST_NO_LSOF") != "1") {
|
||||
try {
|
||||
std::regex lsofRegex(R"(^n(/.*)$)");
|
||||
auto lsofLines =
|
||||
tokenizeString<std::vector<std::string>>(runProgram(LSOF, true, { "-n", "-w", "-F", "n" }), "\n");
|
||||
for (const auto & line : lsofLines) {
|
||||
std::smatch match;
|
||||
if (std::regex_match(line, match, lsofRegex))
|
||||
unchecked[match[1].str()].emplace("{lsof}");
|
||||
}
|
||||
} catch (ExecError & e) {
|
||||
/* lsof not installed, lsof failed */
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if __linux__
|
||||
readFileRoots("/proc/sys/kernel/modprobe", unchecked);
|
||||
readFileRoots("/proc/sys/kernel/fbsplash", unchecked);
|
||||
readFileRoots("/proc/sys/kernel/poweroff_cmd", unchecked);
|
||||
#endif
|
||||
|
||||
for (auto & [target, links] : unchecked) {
|
||||
if (!isInStore(target)) continue;
|
||||
try {
|
||||
auto path = toStorePath(target).first;
|
||||
if (!isValidPath(path)) continue;
|
||||
debug("got additional root '%1%'", printStorePath(path));
|
||||
if (censor)
|
||||
roots[path].insert(censored);
|
||||
else
|
||||
roots[path].insert(links.begin(), links.end());
|
||||
} catch (BadStorePath &) { }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct GCLimitReached { };
|
||||
|
||||
|
||||
void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||
{
|
||||
bool shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
|
||||
bool gcKeepOutputs = settings.gcKeepOutputs;
|
||||
bool gcKeepDerivations = settings.gcKeepDerivations;
|
||||
|
||||
StorePathSet roots, dead, alive;
|
||||
|
||||
struct Shared
|
||||
{
|
||||
// The temp roots only store the hash part to make it easier to
|
||||
// ignore suffixes like '.lock', '.chroot' and '.check'.
|
||||
std::unordered_set<std::string> tempRoots;
|
||||
|
||||
// Hash part of the store path currently being deleted, if
|
||||
// any.
|
||||
std::optional<std::string> pending;
|
||||
};
|
||||
|
||||
Sync<Shared> _shared;
|
||||
|
||||
std::condition_variable wakeup;
|
||||
|
||||
/* Using `--ignore-liveness' with `--delete' can have unintended
|
||||
consequences if `keep-outputs' or `keep-derivations' are true
|
||||
(the garbage collector will recurse into deleting the outputs
|
||||
or derivers, respectively). So disable them. */
|
||||
if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
|
||||
gcKeepOutputs = false;
|
||||
gcKeepDerivations = false;
|
||||
}
|
||||
|
||||
if (shouldDelete)
|
||||
deletePath(reservedPath);
|
||||
|
||||
/* Acquire the global GC root. Note: we don't use fdGCLock
|
||||
here because then in auto-gc mode, another thread could
|
||||
downgrade our exclusive lock. */
|
||||
auto fdGCLock = openGCLock();
|
||||
FdLock gcLock(fdGCLock.get(), ltWrite, true, "waiting for the big garbage collector lock...");
|
||||
|
||||
/* Synchronisation point to test ENOENT handling in
|
||||
addTempRoot(), see tests/gc-non-blocking.sh. */
|
||||
if (auto p = getEnv("_NIX_TEST_GC_SYNC_1"))
|
||||
readFile(*p);
|
||||
|
||||
/* Start the server for receiving new roots. */
|
||||
auto socketPath = stateDir.get() + gcSocketPath;
|
||||
createDirs(dirOf(socketPath));
|
||||
auto fdServer = createUnixDomainSocket(socketPath, 0666);
|
||||
|
||||
// TODO nonblocking socket on windows?
|
||||
#ifdef _WIN32
|
||||
throw UnimplementedError("External GC client not implemented yet");
|
||||
#else
|
||||
if (fcntl(fdServer.get(), F_SETFL, fcntl(fdServer.get(), F_GETFL) | O_NONBLOCK) == -1)
|
||||
throw SysError("making socket '%1%' non-blocking", socketPath);
|
||||
|
||||
Pipe shutdownPipe;
|
||||
shutdownPipe.create();
|
||||
|
||||
std::thread serverThread([&]() {
|
||||
Sync<std::map<int, std::thread>> connections;
|
||||
|
||||
Finally cleanup([&]() {
|
||||
debug("GC roots server shutting down");
|
||||
fdServer.close();
|
||||
while (true) {
|
||||
auto item = remove_begin(*connections.lock());
|
||||
if (!item) break;
|
||||
auto & [fd, thread] = *item;
|
||||
shutdown(fd, SHUT_RDWR);
|
||||
thread.join();
|
||||
}
|
||||
});
|
||||
|
||||
while (true) {
|
||||
std::vector<struct pollfd> fds;
|
||||
fds.push_back({.fd = shutdownPipe.readSide.get(), .events = POLLIN});
|
||||
fds.push_back({.fd = fdServer.get(), .events = POLLIN});
|
||||
auto count = poll(fds.data(), fds.size(), -1);
|
||||
assert(count != -1);
|
||||
|
||||
if (fds[0].revents)
|
||||
/* Parent is asking us to quit. */
|
||||
break;
|
||||
|
||||
if (fds[1].revents) {
|
||||
/* Accept a new connection. */
|
||||
assert(fds[1].revents & POLLIN);
|
||||
AutoCloseFD fdClient = accept(fdServer.get(), nullptr, nullptr);
|
||||
if (!fdClient) continue;
|
||||
|
||||
debug("GC roots server accepted new client");
|
||||
|
||||
/* Process the connection in a separate thread. */
|
||||
auto fdClient_ = fdClient.get();
|
||||
std::thread clientThread([&, fdClient = std::move(fdClient)]() {
|
||||
Finally cleanup([&]() {
|
||||
auto conn(connections.lock());
|
||||
auto i = conn->find(fdClient.get());
|
||||
if (i != conn->end()) {
|
||||
i->second.detach();
|
||||
conn->erase(i);
|
||||
}
|
||||
});
|
||||
|
||||
/* On macOS, accepted sockets inherit the
|
||||
non-blocking flag from the server socket, so
|
||||
explicitly make it blocking. */
|
||||
if (fcntl(fdClient.get(), F_SETFL, fcntl(fdClient.get(), F_GETFL) & ~O_NONBLOCK) == -1)
|
||||
panic("Could not set non-blocking flag on client socket");
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
auto path = readLine(fdClient.get());
|
||||
auto storePath = maybeParseStorePath(path);
|
||||
if (storePath) {
|
||||
debug("got new GC root '%s'", path);
|
||||
auto hashPart = std::string(storePath->hashPart());
|
||||
auto shared(_shared.lock());
|
||||
shared->tempRoots.insert(hashPart);
|
||||
/* If this path is currently being
|
||||
deleted, then we have to wait until
|
||||
deletion is finished to ensure that
|
||||
the client doesn't start
|
||||
re-creating it before we're
|
||||
done. FIXME: ideally we would use a
|
||||
FD for this so we don't block the
|
||||
poll loop. */
|
||||
while (shared->pending == hashPart) {
|
||||
debug("synchronising with deletion of path '%s'", path);
|
||||
shared.wait(wakeup);
|
||||
}
|
||||
} else
|
||||
printError("received garbage instead of a root from client");
|
||||
writeFull(fdClient.get(), "1", false);
|
||||
} catch (Error & e) {
|
||||
debug("reading GC root from client: %s", e.msg());
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
connections.lock()->insert({fdClient_, std::move(clientThread)});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Finally stopServer([&]() {
|
||||
writeFull(shutdownPipe.writeSide.get(), "x", false);
|
||||
wakeup.notify_all();
|
||||
if (serverThread.joinable()) serverThread.join();
|
||||
});
|
||||
|
||||
#endif
|
||||
|
||||
/* Find the roots. Since we've grabbed the GC lock, the set of
|
||||
permanent roots cannot increase now. */
|
||||
printInfo("finding garbage collector roots...");
|
||||
Roots rootMap;
|
||||
if (!options.ignoreLiveness)
|
||||
findRootsNoTemp(rootMap, true);
|
||||
|
||||
for (auto & i : rootMap) roots.insert(i.first);
|
||||
|
||||
/* Read the temporary roots created before we acquired the global
|
||||
GC root. Any new roots will be sent to our socket. */
|
||||
Roots tempRoots;
|
||||
findTempRoots(tempRoots, true);
|
||||
for (auto & root : tempRoots) {
|
||||
_shared.lock()->tempRoots.insert(std::string(root.first.hashPart()));
|
||||
roots.insert(root.first);
|
||||
}
|
||||
|
||||
/* Synchronisation point for testing, see tests/functional/gc-non-blocking.sh. */
|
||||
if (auto p = getEnv("_NIX_TEST_GC_SYNC_2"))
|
||||
readFile(*p);
|
||||
|
||||
/* Helper function that deletes a path from the store and throws
|
||||
GCLimitReached if we've deleted enough garbage. */
|
||||
auto deleteFromStore = [&](std::string_view baseName)
|
||||
{
|
||||
Path path = storeDir + "/" + std::string(baseName);
|
||||
Path realPath = realStoreDir + "/" + std::string(baseName);
|
||||
|
||||
/* There may be temp directories in the store that are still in use
|
||||
by another process. We need to be sure that we can acquire an
|
||||
exclusive lock before deleting them. */
|
||||
if (baseName.find("tmp-", 0) == 0) {
|
||||
AutoCloseFD tmpDirFd = openDirectory(realPath);
|
||||
if (!tmpDirFd || !lockFile(tmpDirFd.get(), ltWrite, false)) {
|
||||
debug("skipping locked tempdir '%s'", realPath);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
printInfo("deleting '%1%'", path);
|
||||
|
||||
results.paths.insert(path);
|
||||
|
||||
uint64_t bytesFreed;
|
||||
deleteStorePath(realPath, bytesFreed);
|
||||
|
||||
results.bytesFreed += bytesFreed;
|
||||
|
||||
if (results.bytesFreed > options.maxFreed) {
|
||||
printInfo("deleted more than %d bytes; stopping", options.maxFreed);
|
||||
throw GCLimitReached();
|
||||
}
|
||||
};
|
||||
|
||||
std::map<StorePath, StorePathSet> referrersCache;
|
||||
|
||||
/* Helper function that visits all paths reachable from `start`
|
||||
via the referrers edges and optionally derivers and derivation
|
||||
output edges. If none of those paths are roots, then all
|
||||
visited paths are garbage and are deleted. */
|
||||
auto deleteReferrersClosure = [&](const StorePath & start) {
|
||||
StorePathSet visited;
|
||||
std::queue<StorePath> todo;
|
||||
|
||||
/* Wake up any GC client waiting for deletion of the paths in
|
||||
'visited' to finish. */
|
||||
Finally releasePending([&]() {
|
||||
auto shared(_shared.lock());
|
||||
shared->pending.reset();
|
||||
wakeup.notify_all();
|
||||
});
|
||||
|
||||
auto enqueue = [&](const StorePath & path) {
|
||||
if (visited.insert(path).second)
|
||||
todo.push(path);
|
||||
};
|
||||
|
||||
enqueue(start);
|
||||
|
||||
while (auto path = pop(todo)) {
|
||||
checkInterrupt();
|
||||
|
||||
/* Bail out if we've previously discovered that this path
|
||||
is alive. */
|
||||
if (alive.count(*path)) {
|
||||
alive.insert(start);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If we've previously deleted this path, we don't have to
|
||||
handle it again. */
|
||||
if (dead.count(*path)) continue;
|
||||
|
||||
auto markAlive = [&]()
|
||||
{
|
||||
alive.insert(*path);
|
||||
alive.insert(start);
|
||||
try {
|
||||
StorePathSet closure;
|
||||
computeFSClosure(*path, closure,
|
||||
/* flipDirection */ false, gcKeepOutputs, gcKeepDerivations);
|
||||
for (auto & p : closure)
|
||||
alive.insert(p);
|
||||
} catch (InvalidPath &) { }
|
||||
};
|
||||
|
||||
/* If this is a root, bail out. */
|
||||
if (roots.count(*path)) {
|
||||
debug("cannot delete '%s' because it's a root", printStorePath(*path));
|
||||
return markAlive();
|
||||
}
|
||||
|
||||
if (options.action == GCOptions::gcDeleteSpecific
|
||||
&& !options.pathsToDelete.count(*path))
|
||||
return;
|
||||
|
||||
{
|
||||
auto hashPart = std::string(path->hashPart());
|
||||
auto shared(_shared.lock());
|
||||
if (shared->tempRoots.count(hashPart)) {
|
||||
debug("cannot delete '%s' because it's a temporary root", printStorePath(*path));
|
||||
return markAlive();
|
||||
}
|
||||
shared->pending = hashPart;
|
||||
}
|
||||
|
||||
if (isValidPath(*path)) {
|
||||
|
||||
/* Visit the referrers of this path. */
|
||||
auto i = referrersCache.find(*path);
|
||||
if (i == referrersCache.end()) {
|
||||
StorePathSet referrers;
|
||||
queryGCReferrers(*path, referrers);
|
||||
referrersCache.emplace(*path, std::move(referrers));
|
||||
i = referrersCache.find(*path);
|
||||
}
|
||||
for (auto & p : i->second)
|
||||
enqueue(p);
|
||||
|
||||
/* If keep-derivations is set and this is a
|
||||
derivation, then visit the derivation outputs. */
|
||||
if (gcKeepDerivations && path->isDerivation()) {
|
||||
for (auto & [name, maybeOutPath] : queryPartialDerivationOutputMap(*path))
|
||||
if (maybeOutPath &&
|
||||
isValidPath(*maybeOutPath) &&
|
||||
queryPathInfo(*maybeOutPath)->deriver == *path)
|
||||
enqueue(*maybeOutPath);
|
||||
}
|
||||
|
||||
/* If keep-outputs is set, then visit the derivers. */
|
||||
if (gcKeepOutputs) {
|
||||
auto derivers = queryValidDerivers(*path);
|
||||
for (auto & i : derivers)
|
||||
enqueue(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto & path : topoSortPaths(visited)) {
|
||||
if (!dead.insert(path).second) continue;
|
||||
if (shouldDelete) {
|
||||
invalidatePathChecked(path);
|
||||
deleteFromStore(path.to_string());
|
||||
referrersCache.erase(path);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/* Either delete all garbage paths, or just the specified
|
||||
paths (for gcDeleteSpecific). */
|
||||
if (options.action == GCOptions::gcDeleteSpecific) {
|
||||
|
||||
for (auto & i : options.pathsToDelete) {
|
||||
deleteReferrersClosure(i);
|
||||
if (!dead.count(i))
|
||||
throw Error(
|
||||
"Cannot delete path '%1%' since it is still alive. "
|
||||
"To find out why, use: "
|
||||
"nix-store --query --roots and nix-store --query --referrers",
|
||||
printStorePath(i));
|
||||
}
|
||||
|
||||
} else if (options.maxFreed > 0) {
|
||||
|
||||
if (shouldDelete)
|
||||
printInfo("deleting garbage...");
|
||||
else
|
||||
printInfo("determining live/dead paths...");
|
||||
|
||||
try {
|
||||
AutoCloseDir dir(opendir(realStoreDir.get().c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", realStoreDir);
|
||||
|
||||
/* Read the store and delete all paths that are invalid or
|
||||
unreachable. We don't use readDirectory() here so that
|
||||
GCing can start faster. */
|
||||
auto linksName = baseNameOf(linksDir);
|
||||
Paths entries;
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
std::string name = dirent->d_name;
|
||||
if (name == "." || name == ".." || name == linksName) continue;
|
||||
|
||||
if (auto storePath = maybeParseStorePath(storeDir + "/" + name))
|
||||
deleteReferrersClosure(*storePath);
|
||||
else
|
||||
deleteFromStore(name);
|
||||
|
||||
}
|
||||
} catch (GCLimitReached & e) {
|
||||
}
|
||||
}
|
||||
|
||||
if (options.action == GCOptions::gcReturnLive) {
|
||||
for (auto & i : alive)
|
||||
results.paths.insert(printStorePath(i));
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.action == GCOptions::gcReturnDead) {
|
||||
for (auto & i : dead)
|
||||
results.paths.insert(printStorePath(i));
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unlink all files in /nix/store/.links that have a link count of 1,
|
||||
which indicates that there are no other links and so they can be
|
||||
safely deleted. FIXME: race condition with optimisePath(): we
|
||||
might see a link count of 1 just before optimisePath() increases
|
||||
the link count. */
|
||||
if (options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific) {
|
||||
printInfo("deleting unused links...");
|
||||
|
||||
AutoCloseDir dir(opendir(linksDir.c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", linksDir);
|
||||
|
||||
int64_t actualSize = 0, unsharedSize = 0;
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
std::string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
Path path = linksDir + "/" + name;
|
||||
|
||||
auto st = lstat(path);
|
||||
|
||||
if (st.st_nlink != 1) {
|
||||
actualSize += st.st_size;
|
||||
unsharedSize += (st.st_nlink - 1) * st.st_size;
|
||||
continue;
|
||||
}
|
||||
|
||||
printMsg(lvlTalkative, "deleting unused link '%1%'", path);
|
||||
|
||||
if (unlink(path.c_str()) == -1)
|
||||
throw SysError("deleting '%1%'", path);
|
||||
|
||||
/* Do not account for deleted file here. Rely on deletePath()
|
||||
accounting. */
|
||||
}
|
||||
|
||||
struct stat st;
|
||||
if (stat(linksDir.c_str(), &st) == -1)
|
||||
throw SysError("statting '%1%'", linksDir);
|
||||
int64_t overhead =
|
||||
#ifdef _WIN32
|
||||
0
|
||||
#else
|
||||
st.st_blocks * 512ULL
|
||||
#endif
|
||||
;
|
||||
|
||||
printInfo("note: currently hard linking saves %.2f MiB",
|
||||
((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
|
||||
}
|
||||
|
||||
/* While we're at it, vacuum the database. */
|
||||
//if (options.action == GCOptions::gcDeleteDead) vacuumDB();
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::autoGC(bool sync)
|
||||
{
|
||||
#if HAVE_STATVFS
|
||||
static auto fakeFreeSpaceFile = getEnv("_NIX_TEST_FREE_SPACE_FILE");
|
||||
|
||||
auto getAvail = [this]() -> uint64_t {
|
||||
if (fakeFreeSpaceFile)
|
||||
return std::stoll(readFile(*fakeFreeSpaceFile));
|
||||
|
||||
struct statvfs st;
|
||||
if (statvfs(realStoreDir.get().c_str(), &st))
|
||||
throw SysError("getting filesystem info about '%s'", realStoreDir);
|
||||
|
||||
return (uint64_t) st.f_bavail * st.f_frsize;
|
||||
};
|
||||
|
||||
std::shared_future<void> future;
|
||||
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
if (state->gcRunning) {
|
||||
future = state->gcFuture;
|
||||
debug("waiting for auto-GC to finish");
|
||||
goto sync;
|
||||
}
|
||||
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
|
||||
if (now < state->lastGCCheck + std::chrono::seconds(settings.minFreeCheckInterval)) return;
|
||||
|
||||
auto avail = getAvail();
|
||||
|
||||
state->lastGCCheck = now;
|
||||
|
||||
if (avail >= settings.minFree || avail >= settings.maxFree) return;
|
||||
|
||||
if (avail > state->availAfterGC * 0.97) return;
|
||||
|
||||
state->gcRunning = true;
|
||||
|
||||
std::promise<void> promise;
|
||||
future = state->gcFuture = promise.get_future().share();
|
||||
|
||||
std::thread([promise{std::move(promise)}, this, avail, getAvail]() mutable {
|
||||
|
||||
try {
|
||||
|
||||
/* Wake up any threads waiting for the auto-GC to finish. */
|
||||
Finally wakeup([&]() {
|
||||
auto state(_state.lock());
|
||||
state->gcRunning = false;
|
||||
state->lastGCCheck = std::chrono::steady_clock::now();
|
||||
promise.set_value();
|
||||
});
|
||||
|
||||
GCOptions options;
|
||||
options.maxFreed = settings.maxFree - avail;
|
||||
|
||||
printInfo("running auto-GC to free %d bytes", options.maxFreed);
|
||||
|
||||
GCResults results;
|
||||
|
||||
collectGarbage(options, results);
|
||||
|
||||
_state.lock()->availAfterGC = getAvail();
|
||||
|
||||
} catch (...) {
|
||||
// FIXME: we could propagate the exception to the
|
||||
// future, but we don't really care. (what??)
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
|
||||
}).detach();
|
||||
}
|
||||
|
||||
sync:
|
||||
// Wait for the future outside of the state lock.
|
||||
if (sync) future.get();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
390
subprojects/libstore/globals.cc
Normal file
390
subprojects/libstore/globals.cc
Normal file
|
|
@ -0,0 +1,390 @@
|
|||
#include "globals.hh"
|
||||
#include "config-global.hh"
|
||||
#include "current-process.hh"
|
||||
#include "archive.hh"
|
||||
#include "args.hh"
|
||||
#include "abstract-setting-to-json.hh"
|
||||
#include "compute-levels.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#include <curl/curl.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#ifndef _WIN32
|
||||
# include <sys/utsname.h>
|
||||
#endif
|
||||
|
||||
#ifdef __GLIBC__
|
||||
# include <gnu/lib-names.h>
|
||||
# include <nss.h>
|
||||
# include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
#if __APPLE__
|
||||
# include "processes.hh"
|
||||
#endif
|
||||
|
||||
#include "config-impl.hh"
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include <sys/sysctl.h>
|
||||
#endif
|
||||
|
||||
#include "strings.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/* The default location of the daemon socket, relative to nixStateDir.
|
||||
The socket is in a directory to allow you to control access to the
|
||||
Nix daemon by setting the mode/ownership of the directory
|
||||
appropriately. (This wouldn't work on the socket itself since it
|
||||
must be deleted and recreated on startup.) */
|
||||
#define DEFAULT_SOCKET_PATH "/daemon-socket/socket"
|
||||
|
||||
Settings settings;
|
||||
|
||||
static GlobalConfig::Register rSettings(&settings);
|
||||
|
||||
Settings::Settings()
|
||||
: nixPrefix(NIX_PREFIX)
|
||||
, nixStore(
|
||||
#ifndef _WIN32
|
||||
// On Windows `/nix/store` is not a canonical path, but we dont'
|
||||
// want to deal with that yet.
|
||||
canonPath
|
||||
#endif
|
||||
(getEnvNonEmpty("NIX_STORE_DIR").value_or(getEnvNonEmpty("NIX_STORE").value_or(NIX_STORE_DIR))))
|
||||
, nixDataDir(canonPath(getEnvNonEmpty("NIX_DATA_DIR").value_or(NIX_DATA_DIR)))
|
||||
, nixLogDir(canonPath(getEnvNonEmpty("NIX_LOG_DIR").value_or(NIX_LOG_DIR)))
|
||||
, nixStateDir(canonPath(getEnvNonEmpty("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
|
||||
, nixConfDir(canonPath(getEnvNonEmpty("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
|
||||
, nixUserConfFiles(getUserConfigFiles())
|
||||
, nixManDir(canonPath(NIX_MAN_DIR))
|
||||
, nixDaemonSocketFile(canonPath(getEnvNonEmpty("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
|
||||
{
|
||||
#ifndef _WIN32
|
||||
buildUsersGroup = isRootUser() ? "nixbld" : "";
|
||||
#endif
|
||||
allowSymlinkedStore = getEnv("NIX_IGNORE_SYMLINK_STORE") == "1";
|
||||
|
||||
auto sslOverride = getEnv("NIX_SSL_CERT_FILE").value_or(getEnv("SSL_CERT_FILE").value_or(""));
|
||||
if (sslOverride != "")
|
||||
caFile = sslOverride;
|
||||
|
||||
/* Backwards compatibility. */
|
||||
auto s = getEnv("NIX_REMOTE_SYSTEMS");
|
||||
if (s) {
|
||||
Strings ss;
|
||||
for (auto & p : tokenizeString<Strings>(*s, ":"))
|
||||
ss.push_back("@" + p);
|
||||
builders = concatStringsSep("\n", ss);
|
||||
}
|
||||
|
||||
#if defined(__linux__) && defined(SANDBOX_SHELL)
|
||||
sandboxPaths = tokenizeString<StringSet>("/bin/sh=" SANDBOX_SHELL);
|
||||
#endif
|
||||
|
||||
/* chroot-like behavior from Apple's sandbox */
|
||||
#if __APPLE__
|
||||
sandboxPaths = tokenizeString<StringSet>("/System/Library/Frameworks /System/Library/PrivateFrameworks /bin/sh /bin/bash /private/tmp /private/var/tmp /usr/lib");
|
||||
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
|
||||
#endif
|
||||
}
|
||||
|
||||
void loadConfFile(AbstractConfig & config)
|
||||
{
|
||||
auto applyConfigFile = [&](const Path & path) {
|
||||
try {
|
||||
std::string contents = readFile(path);
|
||||
config.applyConfig(contents, path);
|
||||
} catch (SystemError &) { }
|
||||
};
|
||||
|
||||
applyConfigFile(settings.nixConfDir + "/nix.conf");
|
||||
|
||||
/* We only want to send overrides to the daemon, i.e. stuff from
|
||||
~/.nix/nix.conf or the command line. */
|
||||
config.resetOverridden();
|
||||
|
||||
auto files = settings.nixUserConfFiles;
|
||||
for (auto file = files.rbegin(); file != files.rend(); file++) {
|
||||
applyConfigFile(*file);
|
||||
}
|
||||
|
||||
auto nixConfEnv = getEnv("NIX_CONFIG");
|
||||
if (nixConfEnv.has_value()) {
|
||||
config.applyConfig(nixConfEnv.value(), "NIX_CONFIG");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
std::vector<Path> getUserConfigFiles()
|
||||
{
|
||||
// Use the paths specified in NIX_USER_CONF_FILES if it has been defined
|
||||
auto nixConfFiles = getEnv("NIX_USER_CONF_FILES");
|
||||
if (nixConfFiles.has_value()) {
|
||||
return tokenizeString<std::vector<std::string>>(nixConfFiles.value(), ":");
|
||||
}
|
||||
|
||||
// Use the paths specified by the XDG spec
|
||||
std::vector<Path> files;
|
||||
auto dirs = getConfigDirs();
|
||||
for (auto & dir : dirs) {
|
||||
files.insert(files.end(), dir + "/nix.conf");
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
unsigned int Settings::getDefaultCores()
|
||||
{
|
||||
const unsigned int concurrency = std::max(1U, std::thread::hardware_concurrency());
|
||||
const unsigned int maxCPU = getMaxCPU();
|
||||
|
||||
if (maxCPU > 0)
|
||||
return maxCPU;
|
||||
else
|
||||
return concurrency;
|
||||
}
|
||||
|
||||
#if __APPLE__
|
||||
static bool hasVirt() {
|
||||
|
||||
int hasVMM;
|
||||
int hvSupport;
|
||||
size_t size;
|
||||
|
||||
size = sizeof(hasVMM);
|
||||
if (sysctlbyname("kern.hv_vmm_present", &hasVMM, &size, NULL, 0) == 0) {
|
||||
if (hasVMM)
|
||||
return false;
|
||||
}
|
||||
|
||||
// whether the kernel and hardware supports virt
|
||||
size = sizeof(hvSupport);
|
||||
if (sysctlbyname("kern.hv_support", &hvSupport, &size, NULL, 0) == 0) {
|
||||
return hvSupport == 1;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
StringSet Settings::getDefaultSystemFeatures()
|
||||
{
|
||||
/* For backwards compatibility, accept some "features" that are
|
||||
used in Nixpkgs to route builds to certain machines but don't
|
||||
actually require anything special on the machines. */
|
||||
StringSet features{"nixos-test", "benchmark", "big-parallel"};
|
||||
|
||||
#if __linux__
|
||||
features.insert("uid-range");
|
||||
#endif
|
||||
|
||||
#if __linux__
|
||||
if (access("/dev/kvm", R_OK | W_OK) == 0)
|
||||
features.insert("kvm");
|
||||
#endif
|
||||
|
||||
#if __APPLE__
|
||||
if (hasVirt())
|
||||
features.insert("apple-virt");
|
||||
#endif
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
StringSet Settings::getDefaultExtraPlatforms()
|
||||
{
|
||||
StringSet extraPlatforms;
|
||||
|
||||
if (std::string{SYSTEM} == "x86_64-linux" && !isWSL1())
|
||||
extraPlatforms.insert("i686-linux");
|
||||
|
||||
#if __linux__
|
||||
StringSet levels = computeLevels();
|
||||
for (auto iter = levels.begin(); iter != levels.end(); ++iter)
|
||||
extraPlatforms.insert(*iter + "-linux");
|
||||
#elif __APPLE__
|
||||
// Rosetta 2 emulation layer can run x86_64 binaries on aarch64
|
||||
// machines. Note that we can’t force processes from executing
|
||||
// x86_64 in aarch64 environments or vice versa since they can
|
||||
// always exec with their own binary preferences.
|
||||
if (std::string{SYSTEM} == "aarch64-darwin" &&
|
||||
runProgram(RunOptions {.program = "arch", .args = {"-arch", "x86_64", "/usr/bin/true"}, .mergeStderrToStdout = true}).first == 0)
|
||||
extraPlatforms.insert("x86_64-darwin");
|
||||
#endif
|
||||
|
||||
return extraPlatforms;
|
||||
}
|
||||
|
||||
bool Settings::isWSL1()
|
||||
{
|
||||
#if __linux__
|
||||
struct utsname utsbuf;
|
||||
uname(&utsbuf);
|
||||
// WSL1 uses -Microsoft suffix
|
||||
// WSL2 uses -microsoft-standard suffix
|
||||
return hasSuffix(utsbuf.release, "-Microsoft");
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
Path Settings::getDefaultSSLCertFile()
|
||||
{
|
||||
for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
|
||||
if (pathAccessible(fn)) return fn;
|
||||
return "";
|
||||
}
|
||||
|
||||
const std::string nixVersion = PACKAGE_VERSION;
|
||||
|
||||
NLOHMANN_JSON_SERIALIZE_ENUM(SandboxMode, {
|
||||
{SandboxMode::smEnabled, true},
|
||||
{SandboxMode::smRelaxed, "relaxed"},
|
||||
{SandboxMode::smDisabled, false},
|
||||
});
|
||||
|
||||
template<> SandboxMode BaseSetting<SandboxMode>::parse(const std::string & str) const
|
||||
{
|
||||
if (str == "true") return smEnabled;
|
||||
else if (str == "relaxed") return smRelaxed;
|
||||
else if (str == "false") return smDisabled;
|
||||
else throw UsageError("option '%s' has invalid value '%s'", name, str);
|
||||
}
|
||||
|
||||
template<> struct BaseSetting<SandboxMode>::trait
|
||||
{
|
||||
static constexpr bool appendable = false;
|
||||
};
|
||||
|
||||
template<> std::string BaseSetting<SandboxMode>::to_string() const
|
||||
{
|
||||
if (value == smEnabled) return "true";
|
||||
else if (value == smRelaxed) return "relaxed";
|
||||
else if (value == smDisabled) return "false";
|
||||
else unreachable();
|
||||
}
|
||||
|
||||
template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::string & category)
|
||||
{
|
||||
args.addFlag({
|
||||
.longName = name,
|
||||
.aliases = aliases,
|
||||
.description = "Enable sandboxing.",
|
||||
.category = category,
|
||||
.handler = {[this]() { override(smEnabled); }}
|
||||
});
|
||||
args.addFlag({
|
||||
.longName = "no-" + name,
|
||||
.aliases = aliases,
|
||||
.description = "Disable sandboxing.",
|
||||
.category = category,
|
||||
.handler = {[this]() { override(smDisabled); }}
|
||||
});
|
||||
args.addFlag({
|
||||
.longName = "relaxed-" + name,
|
||||
.aliases = aliases,
|
||||
.description = "Enable sandboxing, but allow builds to disable it.",
|
||||
.category = category,
|
||||
.handler = {[this]() { override(smRelaxed); }}
|
||||
});
|
||||
}
|
||||
|
||||
unsigned int MaxBuildJobsSetting::parse(const std::string & str) const
|
||||
{
|
||||
if (str == "auto") return std::max(1U, std::thread::hardware_concurrency());
|
||||
else {
|
||||
if (auto n = string2Int<decltype(value)>(str))
|
||||
return *n;
|
||||
else
|
||||
throw UsageError("configuration setting '%s' should be 'auto' or an integer", name);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void preloadNSS()
|
||||
{
|
||||
/* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
|
||||
one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
|
||||
been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
|
||||
load its lookup libraries in the parent before any child gets a chance to. */
|
||||
static std::once_flag dns_resolve_flag;
|
||||
|
||||
std::call_once(dns_resolve_flag, []() {
|
||||
#ifdef __GLIBC__
|
||||
/* On linux, glibc will run every lookup through the nss layer.
|
||||
* That means every lookup goes, by default, through nscd, which acts as a local
|
||||
* cache.
|
||||
* Because we run builds in a sandbox, we also remove access to nscd otherwise
|
||||
* lookups would leak into the sandbox.
|
||||
*
|
||||
* But now we have a new problem, we need to make sure the nss_dns backend that
|
||||
* does the dns lookups when nscd is not available is loaded or available.
|
||||
*
|
||||
* We can't make it available without leaking nix's environment, so instead we'll
|
||||
* load the backend, and configure nss so it does not try to run dns lookups
|
||||
* through nscd.
|
||||
*
|
||||
* This is technically only used for builtins:fetch* functions so we only care
|
||||
* about dns.
|
||||
*
|
||||
* All other platforms are unaffected.
|
||||
*/
|
||||
if (!dlopen(LIBNSS_DNS_SO, RTLD_NOW))
|
||||
warn("unable to load nss_dns backend");
|
||||
// FIXME: get hosts entry from nsswitch.conf.
|
||||
__nss_configure_lookup("hosts", "files dns");
|
||||
#endif
|
||||
});
|
||||
}
|
||||
|
||||
static bool initLibStoreDone = false;
|
||||
|
||||
void assertLibStoreInitialized() {
|
||||
if (!initLibStoreDone) {
|
||||
printError("The program must call nix::initNix() before calling any libstore library functions.");
|
||||
abort();
|
||||
};
|
||||
}
|
||||
|
||||
void initLibStore(bool loadConfig) {
|
||||
if (initLibStoreDone) return;
|
||||
|
||||
initLibUtil();
|
||||
|
||||
if (loadConfig)
|
||||
loadConfFile(globalConfig);
|
||||
|
||||
preloadNSS();
|
||||
|
||||
/* Because of an objc quirk[1], calling curl_global_init for the first time
|
||||
after fork() will always result in a crash.
|
||||
Up until now the solution has been to set OBJC_DISABLE_INITIALIZE_FORK_SAFETY
|
||||
for every nix process to ignore that error.
|
||||
Instead of working around that error we address it at the core -
|
||||
by calling curl_global_init here, which should mean curl will already
|
||||
have been initialized by the time we try to do so in a forked process.
|
||||
|
||||
[1] https://github.com/apple-oss-distributions/objc4/blob/01edf1705fbc3ff78a423cd21e03dfc21eb4d780/runtime/objc-initialize.mm#L614-L636
|
||||
*/
|
||||
curl_global_init(CURL_GLOBAL_ALL);
|
||||
#if __APPLE__
|
||||
/* On macOS, don't use the per-session TMPDIR (as set e.g. by
|
||||
sshd). This breaks build users because they don't have access
|
||||
to the TMPDIR, in particular in ‘nix-store --serve’. */
|
||||
if (hasPrefix(defaultTempDir(), "/var/folders/"))
|
||||
unsetenv("TMPDIR");
|
||||
#endif
|
||||
|
||||
initLibStoreDone = true;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
1272
subprojects/libstore/globals.hh
Normal file
1272
subprojects/libstore/globals.hh
Normal file
File diff suppressed because it is too large
Load diff
213
subprojects/libstore/http-binary-cache-store.cc
Normal file
213
subprojects/libstore/http-binary-cache-store.cc
Normal file
|
|
@ -0,0 +1,213 @@
|
|||
#include "http-binary-cache-store.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "globals.hh"
|
||||
#include "nar-info-disk-cache.hh"
|
||||
#include "callback.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
MakeError(UploadToHTTP, Error);
|
||||
|
||||
|
||||
HttpBinaryCacheStoreConfig::HttpBinaryCacheStoreConfig(
|
||||
std::string_view scheme,
|
||||
std::string_view _cacheUri,
|
||||
const Params & params)
|
||||
: StoreConfig(params)
|
||||
, BinaryCacheStoreConfig(params)
|
||||
, cacheUri(
|
||||
std::string { scheme }
|
||||
+ "://"
|
||||
+ (!_cacheUri.empty()
|
||||
? _cacheUri
|
||||
: throw UsageError("`%s` Store requires a non-empty authority in Store URL", scheme)))
|
||||
{
|
||||
while (!cacheUri.empty() && cacheUri.back() == '/')
|
||||
cacheUri.pop_back();
|
||||
}
|
||||
|
||||
|
||||
std::string HttpBinaryCacheStoreConfig::doc()
|
||||
{
|
||||
return
|
||||
#include "http-binary-cache-store.md"
|
||||
;
|
||||
}
|
||||
|
||||
|
||||
class HttpBinaryCacheStore : public virtual HttpBinaryCacheStoreConfig, public virtual BinaryCacheStore
|
||||
{
|
||||
private:
|
||||
|
||||
struct State
|
||||
{
|
||||
bool enabled = true;
|
||||
std::chrono::steady_clock::time_point disabledUntil;
|
||||
};
|
||||
|
||||
Sync<State> _state;
|
||||
|
||||
public:
|
||||
|
||||
HttpBinaryCacheStore(
|
||||
std::string_view scheme,
|
||||
PathView cacheUri,
|
||||
const Params & params)
|
||||
: StoreConfig(params)
|
||||
, BinaryCacheStoreConfig(params)
|
||||
, HttpBinaryCacheStoreConfig(scheme, cacheUri, params)
|
||||
, Store(params)
|
||||
, BinaryCacheStore(params)
|
||||
{
|
||||
diskCache = getNarInfoDiskCache();
|
||||
}
|
||||
|
||||
std::string getUri() override
|
||||
{
|
||||
return cacheUri;
|
||||
}
|
||||
|
||||
void init() override
|
||||
{
|
||||
// FIXME: do this lazily?
|
||||
if (auto cacheInfo = diskCache->upToDateCacheExists(cacheUri)) {
|
||||
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
|
||||
priority.setDefault(cacheInfo->priority);
|
||||
} else {
|
||||
try {
|
||||
BinaryCacheStore::init();
|
||||
} catch (UploadToHTTP &) {
|
||||
throw Error("'%s' does not appear to be a binary cache", cacheUri);
|
||||
}
|
||||
diskCache->createCache(cacheUri, storeDir, wantMassQuery, priority);
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
void maybeDisable()
|
||||
{
|
||||
auto state(_state.lock());
|
||||
if (state->enabled && settings.tryFallback) {
|
||||
int t = 60;
|
||||
printError("disabling binary cache '%s' for %s seconds", getUri(), t);
|
||||
state->enabled = false;
|
||||
state->disabledUntil = std::chrono::steady_clock::now() + std::chrono::seconds(t);
|
||||
}
|
||||
}
|
||||
|
||||
void checkEnabled()
|
||||
{
|
||||
auto state(_state.lock());
|
||||
if (state->enabled) return;
|
||||
if (std::chrono::steady_clock::now() > state->disabledUntil) {
|
||||
state->enabled = true;
|
||||
debug("re-enabling binary cache '%s'", getUri());
|
||||
return;
|
||||
}
|
||||
throw SubstituterDisabled("substituter '%s' is disabled", getUri());
|
||||
}
|
||||
|
||||
bool fileExists(const std::string & path) override
|
||||
{
|
||||
checkEnabled();
|
||||
|
||||
try {
|
||||
FileTransferRequest request(makeRequest(path));
|
||||
request.head = true;
|
||||
getFileTransfer()->download(request);
|
||||
return true;
|
||||
} catch (FileTransferError & e) {
|
||||
/* S3 buckets return 403 if a file doesn't exist and the
|
||||
bucket is unlistable, so treat 403 as 404. */
|
||||
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
|
||||
return false;
|
||||
maybeDisable();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void upsertFile(const std::string & path,
|
||||
std::shared_ptr<std::basic_iostream<char>> istream,
|
||||
const std::string & mimeType) override
|
||||
{
|
||||
auto req = makeRequest(path);
|
||||
req.data = StreamToSourceAdapter(istream).drain();
|
||||
req.mimeType = mimeType;
|
||||
try {
|
||||
getFileTransfer()->upload(req);
|
||||
} catch (FileTransferError & e) {
|
||||
throw UploadToHTTP("while uploading to HTTP binary cache at '%s': %s", cacheUri, e.msg());
|
||||
}
|
||||
}
|
||||
|
||||
FileTransferRequest makeRequest(const std::string & path)
|
||||
{
|
||||
return FileTransferRequest(
|
||||
hasPrefix(path, "https://") || hasPrefix(path, "http://") || hasPrefix(path, "file://")
|
||||
? path
|
||||
: cacheUri + "/" + path);
|
||||
|
||||
}
|
||||
|
||||
void getFile(const std::string & path, Sink & sink) override
|
||||
{
|
||||
checkEnabled();
|
||||
auto request(makeRequest(path));
|
||||
try {
|
||||
getFileTransfer()->download(std::move(request), sink);
|
||||
} catch (FileTransferError & e) {
|
||||
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
|
||||
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
|
||||
maybeDisable();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void getFile(const std::string & path,
|
||||
Callback<std::optional<std::string>> callback) noexcept override
|
||||
{
|
||||
try {
|
||||
checkEnabled();
|
||||
|
||||
auto request(makeRequest(path));
|
||||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
getFileTransfer()->enqueueFileTransfer(request,
|
||||
{[callbackPtr, this](std::future<FileTransferResult> result) {
|
||||
try {
|
||||
(*callbackPtr)(std::move(result.get().data));
|
||||
} catch (FileTransferError & e) {
|
||||
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
|
||||
return (*callbackPtr)({});
|
||||
maybeDisable();
|
||||
callbackPtr->rethrow();
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
|
||||
} catch (...) {
|
||||
callback.rethrow();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This isn't actually necessary read only. We support "upsert" now, so we
|
||||
* have a notion of authentication via HTTP POST/PUT.
|
||||
*
|
||||
* For now, we conservatively say we don't know.
|
||||
*
|
||||
* \todo try to expose our HTTP authentication status.
|
||||
*/
|
||||
std::optional<TrustedFlag> isTrustedClient() override
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
static RegisterStoreImplementation<HttpBinaryCacheStore, HttpBinaryCacheStoreConfig> regHttpBinaryCacheStore;
|
||||
|
||||
}
|
||||
30
subprojects/libstore/http-binary-cache-store.hh
Normal file
30
subprojects/libstore/http-binary-cache-store.hh
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
#include "binary-cache-store.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct HttpBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
|
||||
{
|
||||
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
|
||||
|
||||
HttpBinaryCacheStoreConfig(std::string_view scheme, std::string_view _cacheUri, const Params & params);
|
||||
|
||||
Path cacheUri;
|
||||
|
||||
const std::string name() override
|
||||
{
|
||||
return "HTTP Binary Cache Store";
|
||||
}
|
||||
|
||||
static std::set<std::string> uriSchemes()
|
||||
{
|
||||
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1";
|
||||
auto ret = std::set<std::string>({"http", "https"});
|
||||
if (forceHttp)
|
||||
ret.insert("file");
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::string doc() override;
|
||||
};
|
||||
|
||||
}
|
||||
8
subprojects/libstore/http-binary-cache-store.md
Normal file
8
subprojects/libstore/http-binary-cache-store.md
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
R"(
|
||||
|
||||
**Store URL format**: `http://...`, `https://...`
|
||||
|
||||
This store allows a binary cache to be accessed via the HTTP
|
||||
protocol.
|
||||
|
||||
)"
|
||||
45
subprojects/libstore/indirect-root-store.cc
Normal file
45
subprojects/libstore/indirect-root-store.cc
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
#include "indirect-root-store.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
void IndirectRootStore::makeSymlink(const Path & link, const Path & target)
|
||||
{
|
||||
/* Create directories up to `gcRoot'. */
|
||||
createDirs(dirOf(link));
|
||||
|
||||
/* Create the new symlink. */
|
||||
Path tempLink = fmt("%1%.tmp-%2%-%3%", link, getpid(), rand());
|
||||
createSymlink(target, tempLink);
|
||||
|
||||
/* Atomically replace the old one. */
|
||||
std::filesystem::rename(tempLink, link);
|
||||
}
|
||||
|
||||
Path IndirectRootStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot)
|
||||
{
|
||||
Path gcRoot(canonPath(_gcRoot));
|
||||
|
||||
if (isInStore(gcRoot))
|
||||
throw Error(
|
||||
"creating a garbage collector root (%1%) in the Nix store is forbidden "
|
||||
"(are you running nix-build inside the store?)",
|
||||
gcRoot);
|
||||
|
||||
/* Register this root with the garbage collector, if it's
|
||||
running. This should be superfluous since the caller should
|
||||
have registered this root yet, but let's be on the safe
|
||||
side. */
|
||||
addTempRoot(storePath);
|
||||
|
||||
/* Don't clobber the link if it already exists and doesn't
|
||||
point to the Nix store. */
|
||||
if (pathExists(gcRoot) && (!std::filesystem::is_symlink(gcRoot) || !isInStore(readLink(gcRoot))))
|
||||
throw Error("cannot create symlink '%1%'; already exists", gcRoot);
|
||||
|
||||
makeSymlink(gcRoot, printStorePath(storePath));
|
||||
addIndirectRoot(gcRoot);
|
||||
|
||||
return gcRoot;
|
||||
}
|
||||
|
||||
}
|
||||
75
subprojects/libstore/indirect-root-store.hh
Normal file
75
subprojects/libstore/indirect-root-store.hh
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "local-fs-store.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
* Mix-in class for implementing permanent roots as a pair of a direct
|
||||
* (strong) reference and indirect weak reference to the first
|
||||
* reference.
|
||||
*
|
||||
* See methods for details on the operations it represents.
|
||||
*
|
||||
* @note
|
||||
* To understand the purpose of this class, it might help to do some
|
||||
* "closed-world" rather than "open-world" reasoning, and consider the
|
||||
* problem it solved for us. This class was factored out from
|
||||
* `LocalFSStore` in order to support the following table, which
|
||||
* contains 4 concrete store types (non-abstract classes, exposed to the
|
||||
* user), and how they implemented the two GC root methods:
|
||||
*
|
||||
* @note
|
||||
* | | `addPermRoot()` | `addIndirectRoot()` |
|
||||
* |-------------------|-----------------|---------------------|
|
||||
* | `LocalStore` | local | local |
|
||||
* | `UDSRemoteStore` | local | remote |
|
||||
* | `SSHStore` | doesn't have | doesn't have |
|
||||
* | `MountedSSHStore` | remote | doesn't have |
|
||||
*
|
||||
* @note
|
||||
* Note how only the local implementations of `addPermRoot()` need
|
||||
* `addIndirectRoot()`; that is what this class enforces. Without it,
|
||||
* and with `addPermRoot()` and `addIndirectRoot()` both `virtual`, we
|
||||
* would accidentally be allowing for a combinatorial explosion of
|
||||
* possible implementations many of which make no sense. Having this and
|
||||
* that invariant enforced cuts down that space.
|
||||
*/
|
||||
struct IndirectRootStore : public virtual LocalFSStore
|
||||
{
|
||||
inline static std::string operationName = "Indirect GC roots registration";
|
||||
|
||||
/**
|
||||
* Implementation of `LocalFSStore::addPermRoot` where the permanent
|
||||
* root is a pair of
|
||||
*
|
||||
* - The user-facing symlink which all implementations must create
|
||||
*
|
||||
* - An additional weak reference known as the "indirect root" that
|
||||
* points to that symlink.
|
||||
*
|
||||
* The garbage collector will automatically remove the indirect root
|
||||
* when it finds that the symlink has disappeared.
|
||||
*
|
||||
* The implementation of this method is concrete, but it delegates
|
||||
* to `addIndirectRoot()` which is abstract.
|
||||
*/
|
||||
Path addPermRoot(const StorePath & storePath, const Path & gcRoot) override final;
|
||||
|
||||
/**
|
||||
* Add an indirect root, which is a weak reference to the
|
||||
* user-facing symlink created by `addPermRoot()`.
|
||||
*
|
||||
* @param path user-facing and user-controlled symlink to a store
|
||||
* path.
|
||||
*
|
||||
* The form this weak-reference takes is implementation-specific.
|
||||
*/
|
||||
virtual void addIndirectRoot(const Path & path) = 0;
|
||||
|
||||
protected:
|
||||
void makeSymlink(const Path & link, const Path & target);
|
||||
};
|
||||
|
||||
}
|
||||
31
subprojects/libstore/keys.cc
Normal file
31
subprojects/libstore/keys.cc
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
#include "file-system.hh"
|
||||
#include "globals.hh"
|
||||
#include "keys.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
PublicKeys getDefaultPublicKeys()
|
||||
{
|
||||
PublicKeys publicKeys;
|
||||
|
||||
// FIXME: filter duplicates
|
||||
|
||||
for (auto s : settings.trustedPublicKeys.get()) {
|
||||
PublicKey key(s);
|
||||
publicKeys.emplace(key.name, key);
|
||||
}
|
||||
|
||||
for (auto secretKeyFile : settings.secretKeyFiles.get()) {
|
||||
try {
|
||||
SecretKey secretKey(readFile(secretKeyFile));
|
||||
publicKeys.emplace(secretKey.name, secretKey.toPublicKey());
|
||||
} catch (SystemError & e) {
|
||||
/* Ignore unreadable key files. That's normal in a
|
||||
multi-user installation. */
|
||||
}
|
||||
}
|
||||
|
||||
return publicKeys;
|
||||
}
|
||||
|
||||
}
|
||||
10
subprojects/libstore/keys.hh
Normal file
10
subprojects/libstore/keys.hh
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "signature/local-keys.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
PublicKeys getDefaultPublicKeys();
|
||||
|
||||
}
|
||||
322
subprojects/libstore/legacy-ssh-store.cc
Normal file
322
subprojects/libstore/legacy-ssh-store.cc
Normal file
|
|
@ -0,0 +1,322 @@
|
|||
#include "legacy-ssh-store.hh"
|
||||
#include "common-ssh-store-config.hh"
|
||||
#include "archive.hh"
|
||||
#include "pool.hh"
|
||||
#include "remote-store.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "serve-protocol-connection.hh"
|
||||
#include "serve-protocol-impl.hh"
|
||||
#include "build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "path-with-outputs.hh"
|
||||
#include "ssh.hh"
|
||||
#include "derivations.hh"
|
||||
#include "callback.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
LegacySSHStoreConfig::LegacySSHStoreConfig(
|
||||
std::string_view scheme,
|
||||
std::string_view authority,
|
||||
const Params & params)
|
||||
: StoreConfig(params)
|
||||
, CommonSSHStoreConfig(scheme, authority, params)
|
||||
{
|
||||
}
|
||||
|
||||
std::string LegacySSHStoreConfig::doc()
|
||||
{
|
||||
return
|
||||
#include "legacy-ssh-store.md"
|
||||
;
|
||||
}
|
||||
|
||||
|
||||
struct LegacySSHStore::Connection : public ServeProto::BasicClientConnection
|
||||
{
|
||||
std::unique_ptr<SSHMaster::Connection> sshConn;
|
||||
bool good = true;
|
||||
};
|
||||
|
||||
LegacySSHStore::LegacySSHStore(
|
||||
std::string_view scheme,
|
||||
std::string_view host,
|
||||
const Params & params)
|
||||
: StoreConfig(params)
|
||||
, CommonSSHStoreConfig(scheme, host, params)
|
||||
, LegacySSHStoreConfig(scheme, host, params)
|
||||
, Store(params)
|
||||
, connections(make_ref<Pool<Connection>>(
|
||||
std::max(1, (int) maxConnections),
|
||||
[this]() { return openConnection(); },
|
||||
[](const ref<Connection> & r) { return r->good; }
|
||||
))
|
||||
, master(createSSHMaster(
|
||||
// Use SSH master only if using more than 1 connection.
|
||||
connections->capacity() > 1,
|
||||
logFD))
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ref<LegacySSHStore::Connection> LegacySSHStore::openConnection()
|
||||
{
|
||||
auto conn = make_ref<Connection>();
|
||||
Strings command = remoteProgram.get();
|
||||
command.push_back("--serve");
|
||||
command.push_back("--write");
|
||||
if (remoteStore.get() != "") {
|
||||
command.push_back("--store");
|
||||
command.push_back(remoteStore.get());
|
||||
}
|
||||
conn->sshConn = master.startCommand(std::move(command));
|
||||
conn->to = FdSink(conn->sshConn->in.get());
|
||||
conn->from = FdSource(conn->sshConn->out.get());
|
||||
|
||||
StringSink saved;
|
||||
TeeSource tee(conn->from, saved);
|
||||
try {
|
||||
conn->remoteVersion = ServeProto::BasicClientConnection::handshake(
|
||||
conn->to, tee, SERVE_PROTOCOL_VERSION, host);
|
||||
} catch (SerialisationError & e) {
|
||||
// in.close(): Don't let the remote block on us not writing.
|
||||
conn->sshConn->in.close();
|
||||
{
|
||||
NullSink nullSink;
|
||||
tee.drainInto(nullSink);
|
||||
}
|
||||
throw Error("'nix-store --serve' protocol mismatch from '%s', got '%s'",
|
||||
host, chomp(saved.s));
|
||||
} catch (EndOfFile & e) {
|
||||
throw Error("cannot connect to '%1%'", host);
|
||||
}
|
||||
|
||||
return conn;
|
||||
};
|
||||
|
||||
|
||||
std::string LegacySSHStore::getUri()
|
||||
{
|
||||
return *uriSchemes().begin() + "://" + host;
|
||||
}
|
||||
|
||||
|
||||
void LegacySSHStore::queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
|
||||
{
|
||||
try {
|
||||
auto conn(connections->get());
|
||||
|
||||
/* No longer support missing NAR hash */
|
||||
assert(GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4);
|
||||
|
||||
debug("querying remote host '%s' for info on '%s'", host, printStorePath(path));
|
||||
|
||||
auto infos = conn->queryPathInfos(*this, {path});
|
||||
|
||||
switch (infos.size()) {
|
||||
case 0:
|
||||
return callback(nullptr);
|
||||
case 1: {
|
||||
auto & [path2, info] = *infos.begin();
|
||||
|
||||
if (info.narHash == Hash::dummy)
|
||||
throw Error("NAR hash is now mandatory");
|
||||
|
||||
assert(path == path2);
|
||||
return callback(std::make_shared<ValidPathInfo>(
|
||||
std::move(path),
|
||||
std::move(info)
|
||||
));
|
||||
}
|
||||
default:
|
||||
throw Error("More path infos returned than queried");
|
||||
}
|
||||
} catch (...) { callback.rethrow(); }
|
||||
}
|
||||
|
||||
|
||||
void LegacySSHStore::addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs)
|
||||
{
|
||||
debug("adding path '%s' to remote host '%s'", printStorePath(info.path), host);
|
||||
|
||||
auto conn(connections->get());
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) {
|
||||
|
||||
conn->to
|
||||
<< ServeProto::Command::AddToStoreNar
|
||||
<< printStorePath(info.path)
|
||||
<< (info.deriver ? printStorePath(*info.deriver) : "")
|
||||
<< info.narHash.to_string(HashFormat::Base16, false);
|
||||
ServeProto::write(*this, *conn, info.references);
|
||||
conn->to
|
||||
<< info.registrationTime
|
||||
<< info.narSize
|
||||
<< info.ultimate
|
||||
<< info.sigs
|
||||
<< renderContentAddress(info.ca);
|
||||
try {
|
||||
copyNAR(source, conn->to);
|
||||
} catch (...) {
|
||||
conn->good = false;
|
||||
throw;
|
||||
}
|
||||
conn->to.flush();
|
||||
|
||||
if (readInt(conn->from) != 1)
|
||||
throw Error("failed to add path '%s' to remote host '%s'", printStorePath(info.path), host);
|
||||
|
||||
} else {
|
||||
|
||||
conn->importPaths(*this, [&](Sink & sink) {
|
||||
try {
|
||||
copyNAR(source, sink);
|
||||
} catch (...) {
|
||||
conn->good = false;
|
||||
throw;
|
||||
}
|
||||
sink
|
||||
<< exportMagic
|
||||
<< printStorePath(info.path);
|
||||
ServeProto::write(*this, *conn, info.references);
|
||||
sink
|
||||
<< (info.deriver ? printStorePath(*info.deriver) : "")
|
||||
<< 0
|
||||
<< 0;
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LegacySSHStore::narFromPath(const StorePath & path, Sink & sink)
|
||||
{
|
||||
auto conn(connections->get());
|
||||
conn->narFromPath(*this, path, [&](auto & source) {
|
||||
copyNAR(source, sink);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
static ServeProto::BuildOptions buildSettings()
|
||||
{
|
||||
return {
|
||||
.maxSilentTime = settings.maxSilentTime,
|
||||
.buildTimeout = settings.buildTimeout,
|
||||
.maxLogSize = settings.maxLogSize,
|
||||
.nrRepeats = 0, // buildRepeat hasn't worked for ages anyway
|
||||
.enforceDeterminism = 0,
|
||||
.keepFailed = settings.keepFailed,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
BuildResult LegacySSHStore::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode)
|
||||
{
|
||||
auto conn(connections->get());
|
||||
|
||||
conn->putBuildDerivationRequest(*this, drvPath, drv, buildSettings());
|
||||
|
||||
return conn->getBuildDerivationResponse(*this);
|
||||
}
|
||||
|
||||
|
||||
void LegacySSHStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore)
|
||||
{
|
||||
if (evalStore && evalStore.get() != this)
|
||||
throw Error("building on an SSH store is incompatible with '--eval-store'");
|
||||
|
||||
auto conn(connections->get());
|
||||
|
||||
conn->to << ServeProto::Command::BuildPaths;
|
||||
Strings ss;
|
||||
for (auto & p : drvPaths) {
|
||||
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
|
||||
std::visit(overloaded {
|
||||
[&](const StorePathWithOutputs & s) {
|
||||
ss.push_back(s.to_string(*this));
|
||||
},
|
||||
[&](const StorePath & drvPath) {
|
||||
throw Error("wanted to fetch '%s' but the legacy ssh protocol doesn't support merely substituting drv files via the build paths command. It would build them instead. Try using ssh-ng://", printStorePath(drvPath));
|
||||
},
|
||||
[&](std::monostate) {
|
||||
throw Error("wanted build derivation that is itself a build product, but the legacy ssh protocol doesn't support that. Try using ssh-ng://");
|
||||
},
|
||||
}, sOrDrvPath);
|
||||
}
|
||||
conn->to << ss;
|
||||
|
||||
ServeProto::write(*this, *conn, buildSettings());
|
||||
|
||||
conn->to.flush();
|
||||
|
||||
BuildResult result;
|
||||
result.status = (BuildResult::Status) readInt(conn->from);
|
||||
|
||||
if (!result.success()) {
|
||||
conn->from >> result.errorMsg;
|
||||
throw Error(result.status, result.errorMsg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LegacySSHStore::computeFSClosure(const StorePathSet & paths,
|
||||
StorePathSet & out, bool flipDirection,
|
||||
bool includeOutputs, bool includeDerivers)
|
||||
{
|
||||
if (flipDirection || includeDerivers) {
|
||||
Store::computeFSClosure(paths, out, flipDirection, includeOutputs, includeDerivers);
|
||||
return;
|
||||
}
|
||||
|
||||
auto conn(connections->get());
|
||||
|
||||
conn->to
|
||||
<< ServeProto::Command::QueryClosure
|
||||
<< includeOutputs;
|
||||
ServeProto::write(*this, *conn, paths);
|
||||
conn->to.flush();
|
||||
|
||||
for (auto & i : ServeProto::Serialise<StorePathSet>::read(*this, *conn))
|
||||
out.insert(i);
|
||||
}
|
||||
|
||||
|
||||
StorePathSet LegacySSHStore::queryValidPaths(const StorePathSet & paths,
|
||||
SubstituteFlag maybeSubstitute)
|
||||
{
|
||||
auto conn(connections->get());
|
||||
return conn->queryValidPaths(*this,
|
||||
false, paths, maybeSubstitute);
|
||||
}
|
||||
|
||||
|
||||
void LegacySSHStore::connect()
|
||||
{
|
||||
auto conn(connections->get());
|
||||
}
|
||||
|
||||
|
||||
unsigned int LegacySSHStore::getProtocol()
|
||||
{
|
||||
auto conn(connections->get());
|
||||
return conn->remoteVersion;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The legacy ssh protocol doesn't support checking for trusted-user.
|
||||
* Try using ssh-ng:// instead if you want to know.
|
||||
*/
|
||||
std::optional<TrustedFlag> isTrustedClient()
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
|
||||
static RegisterStoreImplementation<LegacySSHStore, LegacySSHStoreConfig> regLegacySSHStore;
|
||||
|
||||
}
|
||||
141
subprojects/libstore/legacy-ssh-store.hh
Normal file
141
subprojects/libstore/legacy-ssh-store.hh
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "common-ssh-store-config.hh"
|
||||
#include "store-api.hh"
|
||||
#include "ssh.hh"
|
||||
#include "callback.hh"
|
||||
#include "pool.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct LegacySSHStoreConfig : virtual CommonSSHStoreConfig
|
||||
{
|
||||
using CommonSSHStoreConfig::CommonSSHStoreConfig;
|
||||
|
||||
LegacySSHStoreConfig(
|
||||
std::string_view scheme,
|
||||
std::string_view authority,
|
||||
const Params & params);
|
||||
|
||||
const Setting<Strings> remoteProgram{this, {"nix-store"}, "remote-program",
|
||||
"Path to the `nix-store` executable on the remote machine."};
|
||||
|
||||
const Setting<int> maxConnections{this, 1, "max-connections",
|
||||
"Maximum number of concurrent SSH connections."};
|
||||
|
||||
const std::string name() override { return "SSH Store"; }
|
||||
|
||||
static std::set<std::string> uriSchemes() { return {"ssh"}; }
|
||||
|
||||
std::string doc() override;
|
||||
};
|
||||
|
||||
struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Store
|
||||
{
|
||||
#ifndef _WIN32
|
||||
// Hack for getting remote build log output.
|
||||
// Intentionally not in `LegacySSHStoreConfig` so that it doesn't appear in
|
||||
// the documentation
|
||||
const Setting<int> logFD{this, INVALID_DESCRIPTOR, "log-fd", "file descriptor to which SSH's stderr is connected"};
|
||||
#else
|
||||
Descriptor logFD = INVALID_DESCRIPTOR;
|
||||
#endif
|
||||
|
||||
struct Connection;
|
||||
|
||||
ref<Pool<Connection>> connections;
|
||||
|
||||
SSHMaster master;
|
||||
|
||||
LegacySSHStore(
|
||||
std::string_view scheme,
|
||||
std::string_view host,
|
||||
const Params & params);
|
||||
|
||||
ref<Connection> openConnection();
|
||||
|
||||
std::string getUri() override;
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override;
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
|
||||
{ unsupported("queryPathFromHashPart"); }
|
||||
|
||||
StorePath addToStore(
|
||||
std::string_view name,
|
||||
const SourcePath & path,
|
||||
ContentAddressMethod method,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
PathFilter & filter,
|
||||
RepairFlag repair) override
|
||||
{ unsupported("addToStore"); }
|
||||
|
||||
virtual StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive,
|
||||
ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive,
|
||||
HashAlgorithm hashAlgo = HashAlgorithm::SHA256,
|
||||
const StorePathSet & references = StorePathSet(),
|
||||
RepairFlag repair = NoRepair) override
|
||||
{ unsupported("addToStore"); }
|
||||
|
||||
public:
|
||||
|
||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode) override;
|
||||
|
||||
void buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
|
||||
|
||||
void ensurePath(const StorePath & path) override
|
||||
{ unsupported("ensurePath"); }
|
||||
|
||||
virtual ref<SourceAccessor> getFSAccessor(bool requireValidPath) override
|
||||
{ unsupported("getFSAccessor"); }
|
||||
|
||||
/**
|
||||
* The default instance would schedule the work on the client side, but
|
||||
* for consistency with `buildPaths` and `buildDerivation` it should happen
|
||||
* on the remote side.
|
||||
*
|
||||
* We make this fail for now so we can add implement this properly later
|
||||
* without it being a breaking change.
|
||||
*/
|
||||
void repairPath(const StorePath & path) override
|
||||
{ unsupported("repairPath"); }
|
||||
|
||||
void computeFSClosure(const StorePathSet & paths,
|
||||
StorePathSet & out, bool flipDirection = false,
|
||||
bool includeOutputs = false, bool includeDerivers = false) override;
|
||||
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
|
||||
void connect() override;
|
||||
|
||||
unsigned int getProtocol() override;
|
||||
|
||||
/**
|
||||
* The legacy ssh protocol doesn't support checking for trusted-user.
|
||||
* Try using ssh-ng:// instead if you want to know.
|
||||
*/
|
||||
std::optional<TrustedFlag> isTrustedClient() override
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void queryRealisationUncached(const DrvOutput &,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override
|
||||
// TODO: Implement
|
||||
{ unsupported("queryRealisation"); }
|
||||
};
|
||||
|
||||
}
|
||||
8
subprojects/libstore/legacy-ssh-store.md
Normal file
8
subprojects/libstore/legacy-ssh-store.md
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
R"(
|
||||
|
||||
**Store URL format**: `ssh://[username@]hostname`
|
||||
|
||||
This store type allows limited access to a remote store on another
|
||||
machine via SSH.
|
||||
|
||||
)"
|
||||
162
subprojects/libstore/length-prefixed-protocol-helper.hh
Normal file
162
subprojects/libstore/length-prefixed-protocol-helper.hh
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
#pragma once
|
||||
/**
|
||||
* @file Reusable serialisers for serialization container types in a
|
||||
* length-prefixed manner.
|
||||
*
|
||||
* Used by both the Worker and Serve protocols.
|
||||
*/
|
||||
|
||||
#include "types.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct StoreDirConfig;
|
||||
|
||||
/**
|
||||
* Reusable serialisers for serialization container types in a
|
||||
* length-prefixed manner.
|
||||
*
|
||||
* @param T The type of the collection being serialised
|
||||
*
|
||||
* @param Inner This the most important parameter; this is the "inner"
|
||||
* protocol. The user of this will substitute `MyProtocol` or similar
|
||||
* when making a `MyProtocol::Serialiser<Collection<T>>`. Note that the
|
||||
* inside is allowed to call to call `Inner::Serialiser` on different
|
||||
* types. This is especially important for `std::map` which doesn't have
|
||||
* a single `T` but one `K` and one `V`.
|
||||
*/
|
||||
template<class Inner, typename T>
|
||||
struct LengthPrefixedProtoHelper;
|
||||
|
||||
/*!
|
||||
* \typedef LengthPrefixedProtoHelper::S
|
||||
*
|
||||
* Read this as simply `using S = Inner::Serialise;`.
|
||||
*
|
||||
* It would be nice to use that directly, but C++ doesn't seem to allow
|
||||
* it. The `typename` keyword needed to refer to `Inner` seems to greedy
|
||||
* (low precedence), and then C++ complains that `Serialise` is not a
|
||||
* type parameter but a real type.
|
||||
*
|
||||
* Making this `S` alias seems to be the only way to avoid these issues.
|
||||
*/
|
||||
|
||||
#define LENGTH_PREFIXED_PROTO_HELPER(Inner, T) \
|
||||
struct LengthPrefixedProtoHelper< Inner, T > \
|
||||
{ \
|
||||
static T read(const StoreDirConfig & store, typename Inner::ReadConn conn); \
|
||||
static void write(const StoreDirConfig & store, typename Inner::WriteConn conn, const T & str); \
|
||||
private: \
|
||||
template<typename U> using S = typename Inner::template Serialise<U>; \
|
||||
}
|
||||
|
||||
template<class Inner, typename T>
|
||||
LENGTH_PREFIXED_PROTO_HELPER(Inner, std::vector<T>);
|
||||
|
||||
template<class Inner, typename T>
|
||||
LENGTH_PREFIXED_PROTO_HELPER(Inner, std::set<T>);
|
||||
|
||||
template<class Inner, typename... Ts>
|
||||
LENGTH_PREFIXED_PROTO_HELPER(Inner, std::tuple<Ts...>);
|
||||
|
||||
template<class Inner, typename K, typename V>
|
||||
#define _X std::map<K, V>
|
||||
LENGTH_PREFIXED_PROTO_HELPER(Inner, _X);
|
||||
#undef _X
|
||||
|
||||
template<class Inner, typename T>
|
||||
std::vector<T>
|
||||
LengthPrefixedProtoHelper<Inner, std::vector<T>>::read(
|
||||
const StoreDirConfig & store, typename Inner::ReadConn conn)
|
||||
{
|
||||
std::vector<T> resSet;
|
||||
auto size = readNum<size_t>(conn.from);
|
||||
while (size--) {
|
||||
resSet.push_back(S<T>::read(store, conn));
|
||||
}
|
||||
return resSet;
|
||||
}
|
||||
|
||||
template<class Inner, typename T>
|
||||
void
|
||||
LengthPrefixedProtoHelper<Inner, std::vector<T>>::write(
|
||||
const StoreDirConfig & store, typename Inner::WriteConn conn, const std::vector<T> & resSet)
|
||||
{
|
||||
conn.to << resSet.size();
|
||||
for (auto & key : resSet) {
|
||||
S<T>::write(store, conn, key);
|
||||
}
|
||||
}
|
||||
|
||||
template<class Inner, typename T>
|
||||
std::set<T>
|
||||
LengthPrefixedProtoHelper<Inner, std::set<T>>::read(
|
||||
const StoreDirConfig & store, typename Inner::ReadConn conn)
|
||||
{
|
||||
std::set<T> resSet;
|
||||
auto size = readNum<size_t>(conn.from);
|
||||
while (size--) {
|
||||
resSet.insert(S<T>::read(store, conn));
|
||||
}
|
||||
return resSet;
|
||||
}
|
||||
|
||||
template<class Inner, typename T>
|
||||
void
|
||||
LengthPrefixedProtoHelper<Inner, std::set<T>>::write(
|
||||
const StoreDirConfig & store, typename Inner::WriteConn conn, const std::set<T> & resSet)
|
||||
{
|
||||
conn.to << resSet.size();
|
||||
for (auto & key : resSet) {
|
||||
S<T>::write(store, conn, key);
|
||||
}
|
||||
}
|
||||
|
||||
template<class Inner, typename K, typename V>
|
||||
std::map<K, V>
|
||||
LengthPrefixedProtoHelper<Inner, std::map<K, V>>::read(
|
||||
const StoreDirConfig & store, typename Inner::ReadConn conn)
|
||||
{
|
||||
std::map<K, V> resMap;
|
||||
auto size = readNum<size_t>(conn.from);
|
||||
while (size--) {
|
||||
auto k = S<K>::read(store, conn);
|
||||
auto v = S<V>::read(store, conn);
|
||||
resMap.insert_or_assign(std::move(k), std::move(v));
|
||||
}
|
||||
return resMap;
|
||||
}
|
||||
|
||||
template<class Inner, typename K, typename V>
|
||||
void
|
||||
LengthPrefixedProtoHelper<Inner, std::map<K, V>>::write(
|
||||
const StoreDirConfig & store, typename Inner::WriteConn conn, const std::map<K, V> & resMap)
|
||||
{
|
||||
conn.to << resMap.size();
|
||||
for (auto & i : resMap) {
|
||||
S<K>::write(store, conn, i.first);
|
||||
S<V>::write(store, conn, i.second);
|
||||
}
|
||||
}
|
||||
|
||||
template<class Inner, typename... Ts>
|
||||
std::tuple<Ts...>
|
||||
LengthPrefixedProtoHelper<Inner, std::tuple<Ts...>>::read(
|
||||
const StoreDirConfig & store, typename Inner::ReadConn conn)
|
||||
{
|
||||
return std::tuple<Ts...> {
|
||||
S<Ts>::read(store, conn)...,
|
||||
};
|
||||
}
|
||||
|
||||
template<class Inner, typename... Ts>
|
||||
void
|
||||
LengthPrefixedProtoHelper<Inner, std::tuple<Ts...>>::write(
|
||||
const StoreDirConfig & store, typename Inner::WriteConn conn, const std::tuple<Ts...> & res)
|
||||
{
|
||||
std::apply([&]<typename... Us>(const Us &... args) {
|
||||
(S<Us>::write(store, conn, args), ...);
|
||||
}, res);
|
||||
}
|
||||
|
||||
}
|
||||
34
subprojects/libstore/linux/fchmodat2-compat.hh
Normal file
34
subprojects/libstore/linux/fchmodat2-compat.hh
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Determine the syscall number for `fchmodat2`.
|
||||
*
|
||||
* On most platforms this is 452. Exceptions can be found on
|
||||
* a glibc git checkout via `rg --pcre2 'define __NR_fchmodat2 (?!452)'`.
|
||||
*
|
||||
* The problem is that glibc 2.39 and libseccomp 2.5.5 are needed to
|
||||
* get the syscall number. However, a Nix built against nixpkgs 23.11
|
||||
* (glibc 2.38) should still have the issue fixed without depending
|
||||
* on the build environment.
|
||||
*
|
||||
* To achieve that, the macros below try to determine the platform and
|
||||
* set the syscall number which is platform-specific, but
|
||||
* in most cases 452.
|
||||
*
|
||||
* TODO: remove this when 23.11 is EOL and the entire (supported) ecosystem
|
||||
* is on glibc 2.39.
|
||||
*/
|
||||
|
||||
#if HAVE_SECCOMP
|
||||
# if defined(__alpha__)
|
||||
# define NIX_SYSCALL_FCHMODAT2 562
|
||||
# elif defined(__x86_64__) && SIZE_MAX == 0xFFFFFFFF // x32
|
||||
# define NIX_SYSCALL_FCHMODAT2 1073742276
|
||||
# elif defined(__mips__) && defined(__mips64) && defined(_ABIN64) // mips64/n64
|
||||
# define NIX_SYSCALL_FCHMODAT2 5452
|
||||
# elif defined(__mips__) && defined(__mips64) && defined(_ABIN32) // mips64/n32
|
||||
# define NIX_SYSCALL_FCHMODAT2 6452
|
||||
# elif defined(__mips__) && defined(_ABIO32) // mips32
|
||||
# define NIX_SYSCALL_FCHMODAT2 4452
|
||||
# else
|
||||
# define NIX_SYSCALL_FCHMODAT2 452
|
||||
# endif
|
||||
#endif // HAVE_SECCOMP
|
||||
10
subprojects/libstore/linux/meson.build
Normal file
10
subprojects/libstore/linux/meson.build
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
sources += files(
|
||||
'personality.cc',
|
||||
)
|
||||
|
||||
include_dirs += include_directories('.')
|
||||
|
||||
headers += files(
|
||||
'fchmodat2-compat.hh',
|
||||
'personality.hh',
|
||||
)
|
||||
41
subprojects/libstore/linux/personality.cc
Normal file
41
subprojects/libstore/linux/personality.cc
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
#include "personality.hh"
|
||||
#include "globals.hh"
|
||||
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/personality.h>
|
||||
|
||||
#include <cstring>
|
||||
|
||||
namespace nix::linux {
|
||||
|
||||
void setPersonality(std::string_view system)
|
||||
{
|
||||
/* Change the personality to 32-bit if we're doing an
|
||||
i686-linux build on an x86_64-linux machine. */
|
||||
struct utsname utsbuf;
|
||||
uname(&utsbuf);
|
||||
if ((system == "i686-linux"
|
||||
&& (std::string_view(SYSTEM) == "x86_64-linux"
|
||||
|| (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
|
||||
|| system == "armv7l-linux"
|
||||
|| system == "armv6l-linux"
|
||||
|| system == "armv5tel-linux")
|
||||
{
|
||||
if (personality(PER_LINUX32) == -1)
|
||||
throw SysError("cannot set 32-bit personality");
|
||||
}
|
||||
|
||||
/* Impersonate a Linux 2.6 machine to get some determinism in
|
||||
builds that depend on the kernel version. */
|
||||
if ((system == "i686-linux" || system == "x86_64-linux") && settings.impersonateLinux26) {
|
||||
int cur = personality(0xffffffff);
|
||||
if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
|
||||
}
|
||||
|
||||
/* Disable address space randomization for improved
|
||||
determinism. */
|
||||
int cur = personality(0xffffffff);
|
||||
if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
|
||||
}
|
||||
|
||||
}
|
||||
12
subprojects/libstore/linux/personality.hh
Normal file
12
subprojects/libstore/linux/personality.hh
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace nix::linux {
|
||||
|
||||
void setPersonality(std::string_view system);
|
||||
|
||||
}
|
||||
|
||||
|
||||
132
subprojects/libstore/local-binary-cache-store.cc
Normal file
132
subprojects/libstore/local-binary-cache-store.cc
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
#include "local-binary-cache-store.hh"
|
||||
#include "globals.hh"
|
||||
#include "nar-info-disk-cache.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
namespace nix {
|
||||
|
||||
LocalBinaryCacheStoreConfig::LocalBinaryCacheStoreConfig(
|
||||
std::string_view scheme,
|
||||
PathView binaryCacheDir,
|
||||
const Params & params)
|
||||
: StoreConfig(params)
|
||||
, BinaryCacheStoreConfig(params)
|
||||
, binaryCacheDir(binaryCacheDir)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
std::string LocalBinaryCacheStoreConfig::doc()
|
||||
{
|
||||
return
|
||||
#include "local-binary-cache-store.md"
|
||||
;
|
||||
}
|
||||
|
||||
|
||||
struct LocalBinaryCacheStore : virtual LocalBinaryCacheStoreConfig, virtual BinaryCacheStore
|
||||
{
|
||||
/**
|
||||
* @param binaryCacheDir `file://` is a short-hand for `file:///`
|
||||
* for now.
|
||||
*/
|
||||
LocalBinaryCacheStore(
|
||||
std::string_view scheme,
|
||||
PathView binaryCacheDir,
|
||||
const Params & params)
|
||||
: StoreConfig(params)
|
||||
, BinaryCacheStoreConfig(params)
|
||||
, LocalBinaryCacheStoreConfig(scheme, binaryCacheDir, params)
|
||||
, Store(params)
|
||||
, BinaryCacheStore(params)
|
||||
{
|
||||
}
|
||||
|
||||
void init() override;
|
||||
|
||||
std::string getUri() override
|
||||
{
|
||||
return "file://" + binaryCacheDir;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
bool fileExists(const std::string & path) override;
|
||||
|
||||
void upsertFile(const std::string & path,
|
||||
std::shared_ptr<std::basic_iostream<char>> istream,
|
||||
const std::string & mimeType) override
|
||||
{
|
||||
auto path2 = binaryCacheDir + "/" + path;
|
||||
static std::atomic<int> counter{0};
|
||||
Path tmp = fmt("%s.tmp.%d.%d", path2, getpid(), ++counter);
|
||||
AutoDelete del(tmp, false);
|
||||
StreamToSourceAdapter source(istream);
|
||||
writeFile(tmp, source);
|
||||
std::filesystem::rename(tmp, path2);
|
||||
del.cancel();
|
||||
}
|
||||
|
||||
void getFile(const std::string & path, Sink & sink) override
|
||||
{
|
||||
try {
|
||||
readFile(binaryCacheDir + "/" + path, sink);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo == ENOENT)
|
||||
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache", path);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
StorePathSet queryAllValidPaths() override
|
||||
{
|
||||
StorePathSet paths;
|
||||
|
||||
for (auto & entry : std::filesystem::directory_iterator{binaryCacheDir}) {
|
||||
checkInterrupt();
|
||||
auto name = entry.path().filename().string();
|
||||
if (name.size() != 40 ||
|
||||
!hasSuffix(name, ".narinfo"))
|
||||
continue;
|
||||
paths.insert(parseStorePath(
|
||||
storeDir + "/" + name.substr(0, name.size() - 8)
|
||||
+ "-" + MissingName));
|
||||
}
|
||||
|
||||
return paths;
|
||||
}
|
||||
|
||||
std::optional<TrustedFlag> isTrustedClient() override
|
||||
{
|
||||
return Trusted;
|
||||
}
|
||||
};
|
||||
|
||||
void LocalBinaryCacheStore::init()
|
||||
{
|
||||
createDirs(binaryCacheDir + "/nar");
|
||||
createDirs(binaryCacheDir + "/" + realisationsPrefix);
|
||||
if (writeDebugInfo)
|
||||
createDirs(binaryCacheDir + "/debuginfo");
|
||||
createDirs(binaryCacheDir + "/log");
|
||||
BinaryCacheStore::init();
|
||||
}
|
||||
|
||||
bool LocalBinaryCacheStore::fileExists(const std::string & path)
|
||||
{
|
||||
return pathExists(binaryCacheDir + "/" + path);
|
||||
}
|
||||
|
||||
std::set<std::string> LocalBinaryCacheStoreConfig::uriSchemes()
|
||||
{
|
||||
if (getEnv("_NIX_FORCE_HTTP") == "1")
|
||||
return {};
|
||||
else
|
||||
return {"file"};
|
||||
}
|
||||
|
||||
static RegisterStoreImplementation<LocalBinaryCacheStore, LocalBinaryCacheStoreConfig> regLocalBinaryCacheStore;
|
||||
|
||||
}
|
||||
23
subprojects/libstore/local-binary-cache-store.hh
Normal file
23
subprojects/libstore/local-binary-cache-store.hh
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
#include "binary-cache-store.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
|
||||
{
|
||||
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
|
||||
|
||||
LocalBinaryCacheStoreConfig(std::string_view scheme, PathView binaryCacheDir, const Params & params);
|
||||
|
||||
Path binaryCacheDir;
|
||||
|
||||
const std::string name() override
|
||||
{
|
||||
return "Local Binary Cache Store";
|
||||
}
|
||||
|
||||
static std::set<std::string> uriSchemes();
|
||||
|
||||
std::string doc() override;
|
||||
};
|
||||
|
||||
}
|
||||
16
subprojects/libstore/local-binary-cache-store.md
Normal file
16
subprojects/libstore/local-binary-cache-store.md
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
R"(
|
||||
|
||||
**Store URL format**: `file://`*path*
|
||||
|
||||
This store allows reading and writing a binary cache stored in *path*
|
||||
in the local filesystem. If *path* does not exist, it will be created.
|
||||
|
||||
For example, the following builds or downloads `nixpkgs#hello` into
|
||||
the local store and then copies it to the binary cache in
|
||||
`/tmp/binary-cache`:
|
||||
|
||||
```
|
||||
# nix copy --to file:///tmp/binary-cache nixpkgs#hello
|
||||
```
|
||||
|
||||
)"
|
||||
118
subprojects/libstore/local-fs-store.cc
Normal file
118
subprojects/libstore/local-fs-store.cc
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
#include "archive.hh"
|
||||
#include "posix-source-accessor.hh"
|
||||
#include "store-api.hh"
|
||||
#include "local-fs-store.hh"
|
||||
#include "globals.hh"
|
||||
#include "compression.hh"
|
||||
#include "derivations.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
LocalFSStoreConfig::LocalFSStoreConfig(PathView rootDir, const Params & params)
|
||||
: StoreConfig(params)
|
||||
// Default `?root` from `rootDir` if non set
|
||||
// FIXME don't duplicate description once we don't have root setting
|
||||
, rootDir{
|
||||
this,
|
||||
!rootDir.empty() && params.count("root") == 0
|
||||
? (std::optional<Path>{rootDir})
|
||||
: std::nullopt,
|
||||
"root",
|
||||
"Directory prefixed to all other paths."}
|
||||
{
|
||||
}
|
||||
|
||||
LocalFSStore::LocalFSStore(const Params & params)
|
||||
: Store(params)
|
||||
{
|
||||
}
|
||||
|
||||
struct LocalStoreAccessor : PosixSourceAccessor
|
||||
{
|
||||
ref<LocalFSStore> store;
|
||||
bool requireValidPath;
|
||||
|
||||
LocalStoreAccessor(ref<LocalFSStore> store, bool requireValidPath)
|
||||
: store(store)
|
||||
, requireValidPath(requireValidPath)
|
||||
{ }
|
||||
|
||||
CanonPath toRealPath(const CanonPath & path)
|
||||
{
|
||||
auto [storePath, rest] = store->toStorePath(path.abs());
|
||||
if (requireValidPath && !store->isValidPath(storePath))
|
||||
throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath));
|
||||
return CanonPath(store->getRealStoreDir()) / storePath.to_string() / CanonPath(rest);
|
||||
}
|
||||
|
||||
std::optional<Stat> maybeLstat(const CanonPath & path) override
|
||||
{
|
||||
/* Handle the case where `path` is (a parent of) the store. */
|
||||
if (isDirOrInDir(store->storeDir, path.abs()))
|
||||
return Stat{ .type = tDirectory };
|
||||
|
||||
return PosixSourceAccessor::maybeLstat(toRealPath(path));
|
||||
}
|
||||
|
||||
DirEntries readDirectory(const CanonPath & path) override
|
||||
{
|
||||
return PosixSourceAccessor::readDirectory(toRealPath(path));
|
||||
}
|
||||
|
||||
void readFile(
|
||||
const CanonPath & path,
|
||||
Sink & sink,
|
||||
std::function<void(uint64_t)> sizeCallback) override
|
||||
{
|
||||
return PosixSourceAccessor::readFile(toRealPath(path), sink, sizeCallback);
|
||||
}
|
||||
|
||||
std::string readLink(const CanonPath & path) override
|
||||
{
|
||||
return PosixSourceAccessor::readLink(toRealPath(path));
|
||||
}
|
||||
};
|
||||
|
||||
ref<SourceAccessor> LocalFSStore::getFSAccessor(bool requireValidPath)
|
||||
{
|
||||
return make_ref<LocalStoreAccessor>(ref<LocalFSStore>(
|
||||
std::dynamic_pointer_cast<LocalFSStore>(shared_from_this())),
|
||||
requireValidPath);
|
||||
}
|
||||
|
||||
void LocalFSStore::narFromPath(const StorePath & path, Sink & sink)
|
||||
{
|
||||
if (!isValidPath(path))
|
||||
throw Error("path '%s' is not valid", printStorePath(path));
|
||||
dumpPath(getRealStoreDir() + std::string(printStorePath(path), storeDir.size()), sink);
|
||||
}
|
||||
|
||||
const std::string LocalFSStore::drvsLogDir = "drvs";
|
||||
|
||||
std::optional<std::string> LocalFSStore::getBuildLogExact(const StorePath & path)
|
||||
{
|
||||
auto baseName = path.to_string();
|
||||
|
||||
for (int j = 0; j < 2; j++) {
|
||||
|
||||
Path logPath =
|
||||
j == 0
|
||||
? fmt("%s/%s/%s/%s", logDir, drvsLogDir, baseName.substr(0, 2), baseName.substr(2))
|
||||
: fmt("%s/%s/%s", logDir, drvsLogDir, baseName);
|
||||
Path logBz2Path = logPath + ".bz2";
|
||||
|
||||
if (pathExists(logPath))
|
||||
return readFile(logPath);
|
||||
|
||||
else if (pathExists(logBz2Path)) {
|
||||
try {
|
||||
return decompress("bzip2", readFile(logBz2Path));
|
||||
} catch (Error &) { }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
}
|
||||
85
subprojects/libstore/local-fs-store.hh
Normal file
85
subprojects/libstore/local-fs-store.hh
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "store-api.hh"
|
||||
#include "gc-store.hh"
|
||||
#include "log-store.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct LocalFSStoreConfig : virtual StoreConfig
|
||||
{
|
||||
using StoreConfig::StoreConfig;
|
||||
|
||||
/**
|
||||
* Used to override the `root` settings. Can't be done via modifying
|
||||
* `params` reliably because this parameter is unused except for
|
||||
* passing to base class constructors.
|
||||
*
|
||||
* @todo Make this less error-prone with new store settings system.
|
||||
*/
|
||||
LocalFSStoreConfig(PathView path, const Params & params);
|
||||
|
||||
const OptionalPathSetting rootDir{this, std::nullopt,
|
||||
"root",
|
||||
"Directory prefixed to all other paths."};
|
||||
|
||||
const PathSetting stateDir{this,
|
||||
rootDir.get() ? *rootDir.get() + "/nix/var/nix" : settings.nixStateDir,
|
||||
"state",
|
||||
"Directory where Nix will store state."};
|
||||
|
||||
const PathSetting logDir{this,
|
||||
rootDir.get() ? *rootDir.get() + "/nix/var/log/nix" : settings.nixLogDir,
|
||||
"log",
|
||||
"directory where Nix will store log files."};
|
||||
|
||||
const PathSetting realStoreDir{this,
|
||||
rootDir.get() ? *rootDir.get() + "/nix/store" : storeDir, "real",
|
||||
"Physical path of the Nix store."};
|
||||
};
|
||||
|
||||
class LocalFSStore : public virtual LocalFSStoreConfig,
|
||||
public virtual Store,
|
||||
public virtual GcStore,
|
||||
public virtual LogStore
|
||||
{
|
||||
public:
|
||||
inline static std::string operationName = "Local Filesystem Store";
|
||||
|
||||
const static std::string drvsLogDir;
|
||||
|
||||
LocalFSStore(const Params & params);
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override;
|
||||
ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override;
|
||||
|
||||
/**
|
||||
* Creates symlink from the `gcRoot` to the `storePath` and
|
||||
* registers the `gcRoot` as a permanent GC root. The `gcRoot`
|
||||
* symlink lives outside the store and is created and owned by the
|
||||
* user.
|
||||
*
|
||||
* @param gcRoot The location of the symlink.
|
||||
*
|
||||
* @param storePath The store object being rooted. The symlink will
|
||||
* point to `toRealPath(store.printStorePath(storePath))`.
|
||||
*
|
||||
* How the permanent GC root corresponding to this symlink is
|
||||
* managed is implementation-specific.
|
||||
*/
|
||||
virtual Path addPermRoot(const StorePath & storePath, const Path & gcRoot) = 0;
|
||||
|
||||
virtual Path getRealStoreDir() { return realStoreDir; }
|
||||
|
||||
Path toRealPath(const Path & storePath) override
|
||||
{
|
||||
assert(isInStore(storePath));
|
||||
return getRealStoreDir() + "/" + std::string(storePath, storeDir.size() + 1);
|
||||
}
|
||||
|
||||
std::optional<std::string> getBuildLogExact(const StorePath & path) override;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
292
subprojects/libstore/local-overlay-store.cc
Normal file
292
subprojects/libstore/local-overlay-store.cc
Normal file
|
|
@ -0,0 +1,292 @@
|
|||
#include "local-overlay-store.hh"
|
||||
#include "callback.hh"
|
||||
#include "realisation.hh"
|
||||
#include "processes.hh"
|
||||
#include "url.hh"
|
||||
#include <regex>
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::string LocalOverlayStoreConfig::doc()
|
||||
{
|
||||
return
|
||||
#include "local-overlay-store.md"
|
||||
;
|
||||
}
|
||||
|
||||
Path LocalOverlayStoreConfig::toUpperPath(const StorePath & path) {
|
||||
return upperLayer + "/" + path.to_string();
|
||||
}
|
||||
|
||||
LocalOverlayStore::LocalOverlayStore(std::string_view scheme, PathView path, const Params & params)
|
||||
: StoreConfig(params)
|
||||
, LocalFSStoreConfig(path, params)
|
||||
, LocalStoreConfig(params)
|
||||
, LocalOverlayStoreConfig(scheme, path, params)
|
||||
, Store(params)
|
||||
, LocalFSStore(params)
|
||||
, LocalStore(params)
|
||||
, lowerStore(openStore(percentDecode(lowerStoreUri.get())).dynamic_pointer_cast<LocalFSStore>())
|
||||
{
|
||||
if (checkMount.get()) {
|
||||
std::smatch match;
|
||||
std::string mountInfo;
|
||||
auto mounts = readFile(std::filesystem::path{"/proc/self/mounts"});
|
||||
auto regex = std::regex(R"((^|\n)overlay )" + realStoreDir.get() + R"( .*(\n|$))");
|
||||
|
||||
// Mount points can be stacked, so there might be multiple matching entries.
|
||||
// Loop until the last match, which will be the current state of the mount point.
|
||||
while (std::regex_search(mounts, match, regex)) {
|
||||
mountInfo = match.str();
|
||||
mounts = match.suffix();
|
||||
}
|
||||
|
||||
auto checkOption = [&](std::string option, std::string value) {
|
||||
return std::regex_search(mountInfo, std::regex("\\b" + option + "=" + value + "( |,)"));
|
||||
};
|
||||
|
||||
auto expectedLowerDir = lowerStore->realStoreDir.get();
|
||||
if (!checkOption("lowerdir", expectedLowerDir) || !checkOption("upperdir", upperLayer)) {
|
||||
debug("expected lowerdir: %s", expectedLowerDir);
|
||||
debug("expected upperdir: %s", upperLayer);
|
||||
debug("actual mount: %s", mountInfo);
|
||||
throw Error("overlay filesystem '%s' mounted incorrectly",
|
||||
realStoreDir.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::registerDrvOutput(const Realisation & info)
|
||||
{
|
||||
// First do queryRealisation on lower layer to populate DB
|
||||
auto res = lowerStore->queryRealisation(info.id);
|
||||
if (res)
|
||||
LocalStore::registerDrvOutput(*res);
|
||||
|
||||
LocalStore::registerDrvOutput(info);
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
|
||||
{
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
LocalStore::queryPathInfoUncached(path,
|
||||
{[this, path, callbackPtr](std::future<std::shared_ptr<const ValidPathInfo>> fut) {
|
||||
try {
|
||||
auto info = fut.get();
|
||||
if (info)
|
||||
return (*callbackPtr)(std::move(info));
|
||||
} catch (...) {
|
||||
return callbackPtr->rethrow();
|
||||
}
|
||||
// If we don't have it, check lower store
|
||||
lowerStore->queryPathInfo(path,
|
||||
{[path, callbackPtr](std::future<ref<const ValidPathInfo>> fut) {
|
||||
try {
|
||||
(*callbackPtr)(fut.get().get_ptr());
|
||||
} catch (...) {
|
||||
return callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
}});
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::queryRealisationUncached(const DrvOutput & drvOutput,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept
|
||||
{
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
LocalStore::queryRealisationUncached(drvOutput,
|
||||
{[this, drvOutput, callbackPtr](std::future<std::shared_ptr<const Realisation>> fut) {
|
||||
try {
|
||||
auto info = fut.get();
|
||||
if (info)
|
||||
return (*callbackPtr)(std::move(info));
|
||||
} catch (...) {
|
||||
return callbackPtr->rethrow();
|
||||
}
|
||||
// If we don't have it, check lower store
|
||||
lowerStore->queryRealisation(drvOutput,
|
||||
{[callbackPtr](std::future<std::shared_ptr<const Realisation>> fut) {
|
||||
try {
|
||||
(*callbackPtr)(fut.get());
|
||||
} catch (...) {
|
||||
return callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
}});
|
||||
}
|
||||
|
||||
|
||||
bool LocalOverlayStore::isValidPathUncached(const StorePath & path)
|
||||
{
|
||||
auto res = LocalStore::isValidPathUncached(path);
|
||||
if (res) return res;
|
||||
res = lowerStore->isValidPath(path);
|
||||
if (res) {
|
||||
// Get path info from lower store so upper DB genuinely has it.
|
||||
auto p = lowerStore->queryPathInfo(path);
|
||||
// recur on references, syncing entire closure.
|
||||
for (auto & r : p->references)
|
||||
if (r != path)
|
||||
isValidPath(r);
|
||||
LocalStore::registerValidPath(*p);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::queryReferrers(const StorePath & path, StorePathSet & referrers)
|
||||
{
|
||||
LocalStore::queryReferrers(path, referrers);
|
||||
lowerStore->queryReferrers(path, referrers);
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::queryGCReferrers(const StorePath & path, StorePathSet & referrers)
|
||||
{
|
||||
LocalStore::queryReferrers(path, referrers);
|
||||
}
|
||||
|
||||
|
||||
StorePathSet LocalOverlayStore::queryValidDerivers(const StorePath & path)
|
||||
{
|
||||
auto res = LocalStore::queryValidDerivers(path);
|
||||
for (auto p : lowerStore->queryValidDerivers(path))
|
||||
res.insert(p);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
std::optional<StorePath> LocalOverlayStore::queryPathFromHashPart(const std::string & hashPart)
|
||||
{
|
||||
auto res = LocalStore::queryPathFromHashPart(hashPart);
|
||||
if (res)
|
||||
return res;
|
||||
else
|
||||
return lowerStore->queryPathFromHashPart(hashPart);
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::registerValidPaths(const ValidPathInfos & infos)
|
||||
{
|
||||
// First, get any from lower store so we merge
|
||||
{
|
||||
StorePathSet notInUpper;
|
||||
for (auto & [p, _] : infos)
|
||||
if (!LocalStore::isValidPathUncached(p)) // avoid divergence
|
||||
notInUpper.insert(p);
|
||||
auto pathsInLower = lowerStore->queryValidPaths(notInUpper);
|
||||
ValidPathInfos inLower;
|
||||
for (auto & p : pathsInLower)
|
||||
inLower.insert_or_assign(p, *lowerStore->queryPathInfo(p));
|
||||
LocalStore::registerValidPaths(inLower);
|
||||
}
|
||||
// Then do original request
|
||||
LocalStore::registerValidPaths(infos);
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||
{
|
||||
LocalStore::collectGarbage(options, results);
|
||||
|
||||
remountIfNecessary();
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::deleteStorePath(const Path & path, uint64_t & bytesFreed)
|
||||
{
|
||||
auto mergedDir = realStoreDir.get() + "/";
|
||||
if (path.substr(0, mergedDir.length()) != mergedDir) {
|
||||
warn("local-overlay: unexpected gc path '%s' ", path);
|
||||
return;
|
||||
}
|
||||
|
||||
StorePath storePath = {path.substr(mergedDir.length())};
|
||||
auto upperPath = toUpperPath(storePath);
|
||||
|
||||
if (pathExists(upperPath)) {
|
||||
debug("upper exists: %s", path);
|
||||
if (lowerStore->isValidPath(storePath)) {
|
||||
debug("lower exists: %s", storePath.to_string());
|
||||
// Path also exists in lower store.
|
||||
// We must delete via upper layer to avoid creating a whiteout.
|
||||
deletePath(upperPath, bytesFreed);
|
||||
_remountRequired = true;
|
||||
} else {
|
||||
// Path does not exist in lower store.
|
||||
// So we can delete via overlayfs and not need to remount.
|
||||
LocalStore::deleteStorePath(path, bytesFreed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::optimiseStore()
|
||||
{
|
||||
Activity act(*logger, actOptimiseStore);
|
||||
|
||||
// Note for LocalOverlayStore, queryAllValidPaths only returns paths in upper layer
|
||||
auto paths = queryAllValidPaths();
|
||||
|
||||
act.progress(0, paths.size());
|
||||
|
||||
uint64_t done = 0;
|
||||
|
||||
for (auto & path : paths) {
|
||||
if (lowerStore->isValidPath(path)) {
|
||||
uint64_t bytesFreed = 0;
|
||||
// Deduplicate store path
|
||||
deleteStorePath(Store::toRealPath(path), bytesFreed);
|
||||
}
|
||||
done++;
|
||||
act.progress(done, paths.size());
|
||||
}
|
||||
|
||||
remountIfNecessary();
|
||||
}
|
||||
|
||||
|
||||
LocalStore::VerificationResult LocalOverlayStore::verifyAllValidPaths(RepairFlag repair)
|
||||
{
|
||||
StorePathSet done;
|
||||
|
||||
auto existsInStoreDir = [&](const StorePath & storePath) {
|
||||
return pathExists(realStoreDir.get() + "/" + storePath.to_string());
|
||||
};
|
||||
|
||||
bool errors = false;
|
||||
StorePathSet validPaths;
|
||||
|
||||
for (auto & i : queryAllValidPaths())
|
||||
verifyPath(i, existsInStoreDir, done, validPaths, repair, errors);
|
||||
|
||||
return {
|
||||
.errors = errors,
|
||||
.validPaths = validPaths,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::remountIfNecessary()
|
||||
{
|
||||
if (!_remountRequired) return;
|
||||
|
||||
if (remountHook.get().empty()) {
|
||||
warn("'%s' needs remounting, set remount-hook to do this automatically", realStoreDir.get());
|
||||
} else {
|
||||
runProgram(remountHook, false, {realStoreDir});
|
||||
}
|
||||
|
||||
_remountRequired = false;
|
||||
}
|
||||
|
||||
|
||||
static RegisterStoreImplementation<LocalOverlayStore, LocalOverlayStoreConfig> regLocalOverlayStore;
|
||||
|
||||
}
|
||||
218
subprojects/libstore/local-overlay-store.hh
Normal file
218
subprojects/libstore/local-overlay-store.hh
Normal file
|
|
@ -0,0 +1,218 @@
|
|||
#include "local-store.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
* Configuration for `LocalOverlayStore`.
|
||||
*/
|
||||
struct LocalOverlayStoreConfig : virtual LocalStoreConfig
|
||||
{
|
||||
LocalOverlayStoreConfig(const StringMap & params)
|
||||
: LocalOverlayStoreConfig("local-overlay", "", params)
|
||||
{ }
|
||||
|
||||
LocalOverlayStoreConfig(std::string_view scheme, PathView path, const Params & params)
|
||||
: StoreConfig(params)
|
||||
, LocalFSStoreConfig(path, params)
|
||||
, LocalStoreConfig(scheme, path, params)
|
||||
{
|
||||
}
|
||||
|
||||
const Setting<std::string> lowerStoreUri{(StoreConfig*) this, "", "lower-store",
|
||||
R"(
|
||||
[Store URL](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
|
||||
for the lower store. The default is `auto` (i.e. use the Nix daemon or `/nix/store` directly).
|
||||
|
||||
Must be a store with a store dir on the file system.
|
||||
Must be used as OverlayFS lower layer for this store's store dir.
|
||||
)"};
|
||||
|
||||
const PathSetting upperLayer{(StoreConfig*) this, "", "upper-layer",
|
||||
R"(
|
||||
Directory containing the OverlayFS upper layer for this store's store dir.
|
||||
)"};
|
||||
|
||||
Setting<bool> checkMount{(StoreConfig*) this, true, "check-mount",
|
||||
R"(
|
||||
Check that the overlay filesystem is correctly mounted.
|
||||
|
||||
Nix does not manage the overlayfs mount point itself, but the correct
|
||||
functioning of the overlay store does depend on this mount point being set up
|
||||
correctly. Rather than just assume this is the case, check that the lowerdir
|
||||
and upperdir options are what we expect them to be. This check is on by
|
||||
default, but can be disabled if needed.
|
||||
)"};
|
||||
|
||||
const PathSetting remountHook{(StoreConfig*) this, "", "remount-hook",
|
||||
R"(
|
||||
Script or other executable to run when overlay filesystem needs remounting.
|
||||
|
||||
This is occasionally necessary when deleting a store path that exists in both upper and lower layers.
|
||||
In such a situation, bypassing OverlayFS and deleting the path in the upper layer directly
|
||||
is the only way to perform the deletion without creating a "whiteout".
|
||||
However this causes the OverlayFS kernel data structures to get out-of-sync,
|
||||
and can lead to 'stale file handle' errors; remounting solves the problem.
|
||||
|
||||
The store directory is passed as an argument to the invoked executable.
|
||||
)"};
|
||||
|
||||
const std::string name() override { return "Experimental Local Overlay Store"; }
|
||||
|
||||
std::optional<ExperimentalFeature> experimentalFeature() const override
|
||||
{
|
||||
return ExperimentalFeature::LocalOverlayStore;
|
||||
}
|
||||
|
||||
static std::set<std::string> uriSchemes()
|
||||
{
|
||||
return { "local-overlay" };
|
||||
}
|
||||
|
||||
std::string doc() override;
|
||||
|
||||
protected:
|
||||
/**
|
||||
* @return The host OS path corresponding to the store path for the
|
||||
* upper layer.
|
||||
*
|
||||
* @note The there is no guarantee a store object is actually stored
|
||||
* at that file path. It might be stored in the lower layer instead,
|
||||
* or it might not be part of this store at all.
|
||||
*/
|
||||
Path toUpperPath(const StorePath & path);
|
||||
};
|
||||
|
||||
/**
|
||||
* Variation of local store using OverlayFS for the store directory.
|
||||
*
|
||||
* Documentation on overridden methods states how they differ from their
|
||||
* `LocalStore` counterparts.
|
||||
*/
|
||||
class LocalOverlayStore : public virtual LocalOverlayStoreConfig, public virtual LocalStore
|
||||
{
|
||||
/**
|
||||
* The store beneath us.
|
||||
*
|
||||
* Our store dir should be an overlay fs where the lower layer
|
||||
* is that store's store dir, and the upper layer is some
|
||||
* scratch storage just for us.
|
||||
*/
|
||||
ref<LocalFSStore> lowerStore;
|
||||
|
||||
public:
|
||||
LocalOverlayStore(const Params & params)
|
||||
: LocalOverlayStore("local-overlay", "", params)
|
||||
{
|
||||
}
|
||||
|
||||
LocalOverlayStore(std::string_view scheme, PathView path, const Params & params);
|
||||
|
||||
std::string getUri() override
|
||||
{
|
||||
return "local-overlay://";
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* First copy up any lower store realisation with the same key, so we
|
||||
* merge rather than mask it.
|
||||
*/
|
||||
void registerDrvOutput(const Realisation & info) override;
|
||||
|
||||
/**
|
||||
* Check lower store if upper DB does not have.
|
||||
*/
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
/**
|
||||
* Check lower store if upper DB does not have.
|
||||
*
|
||||
* In addition, copy up metadata for lower store objects (and their
|
||||
* closure). (I.e. Optimistically cache in the upper DB.)
|
||||
*/
|
||||
bool isValidPathUncached(const StorePath & path) override;
|
||||
|
||||
/**
|
||||
* Check the lower store and upper DB.
|
||||
*/
|
||||
void queryReferrers(const StorePath & path, StorePathSet & referrers) override;
|
||||
|
||||
/**
|
||||
* Check the lower store and upper DB.
|
||||
*/
|
||||
StorePathSet queryValidDerivers(const StorePath & path) override;
|
||||
|
||||
/**
|
||||
* Check lower store if upper DB does not have.
|
||||
*/
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
/**
|
||||
* First copy up any lower store realisation with the same key, so we
|
||||
* merge rather than mask it.
|
||||
*/
|
||||
void registerValidPaths(const ValidPathInfos & infos) override;
|
||||
|
||||
/**
|
||||
* Check lower store if upper DB does not have.
|
||||
*/
|
||||
void queryRealisationUncached(const DrvOutput&,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
|
||||
/**
|
||||
* Call `remountIfNecessary` after collecting garbage normally.
|
||||
*/
|
||||
void collectGarbage(const GCOptions & options, GCResults & results) override;
|
||||
|
||||
/**
|
||||
* Check which layers the store object exists in to try to avoid
|
||||
* needing to remount.
|
||||
*/
|
||||
void deleteStorePath(const Path & path, uint64_t & bytesFreed) override;
|
||||
|
||||
/**
|
||||
* Deduplicate by removing store objects from the upper layer that
|
||||
* are now in the lower layer.
|
||||
*
|
||||
* Operations on a layered store will not cause duplications, but addition of
|
||||
* new store objects to the lower layer can instill induce them
|
||||
* (there is no way to prevent that). This cleans up those
|
||||
* duplications.
|
||||
*
|
||||
* @note We do not yet optomise the upper layer in the normal way
|
||||
* (hardlink) yet. We would like to, but it requires more
|
||||
* refactoring of existing code to support this sustainably.
|
||||
*/
|
||||
void optimiseStore() override;
|
||||
|
||||
/**
|
||||
* Check all paths registered in the upper DB.
|
||||
*
|
||||
* Note that this includes store objects that reside in either overlayfs layer;
|
||||
* just enumerating the contents of the upper layer would skip them.
|
||||
*
|
||||
* We don't verify the contents of both layers on the assumption that the lower layer is far bigger,
|
||||
* and also the observation that anything not in the upper db the overlayfs doesn't yet care about.
|
||||
*/
|
||||
VerificationResult verifyAllValidPaths(RepairFlag repair) override;
|
||||
|
||||
/**
|
||||
* Deletion only effects the upper layer, so we ignore lower-layer referrers.
|
||||
*/
|
||||
void queryGCReferrers(const StorePath & path, StorePathSet & referrers) override;
|
||||
|
||||
/**
|
||||
* Call the `remountHook` if we have done something such that the
|
||||
* OverlayFS needed to be remounted. See that hook's user-facing
|
||||
* documentation for further details.
|
||||
*/
|
||||
void remountIfNecessary();
|
||||
|
||||
/**
|
||||
* State for `remountIfNecessary`
|
||||
*/
|
||||
std::atomic_bool _remountRequired = false;
|
||||
};
|
||||
|
||||
}
|
||||
131
subprojects/libstore/local-overlay-store.md
Normal file
131
subprojects/libstore/local-overlay-store.md
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
R"(
|
||||
|
||||
**Store URL format**: `local-overlay`
|
||||
|
||||
This store type is a variation of the [local store] designed to leverage Linux's [Overlay Filesystem](https://docs.kernel.org/filesystems/overlayfs.html) (OverlayFS for short).
|
||||
Just as OverlayFS combines a lower and upper filesystem by treating the upper one as a patch against the lower, the local overlay store combines a lower store with an upper almost-[local store].
|
||||
("almost" because while the upper filesystems for OverlayFS is valid on its own, the upper almost-store is not a valid local store on its own because some references will dangle.)
|
||||
To use this store, you will first need to configure an OverlayFS mountpoint [appropriately](#example-filesystem-layout) as Nix will not do this for you (though it will verify the mountpoint is configured correctly).
|
||||
|
||||
### Conceptual parts of a local overlay store
|
||||
|
||||
*This is a more abstract/conceptual description of the parts of a layered store, an authoritative reference.
|
||||
For more "practical" instructions, see the worked-out example in the next subsection.*
|
||||
|
||||
The parts of a local overlay store are as follows:
|
||||
|
||||
- **Lower store**:
|
||||
|
||||
> Specified with the [`lower-store`](#store-experimental-local-overlay-store-lower-store) setting.
|
||||
|
||||
This is any store implementation that includes a store directory as part of the native operating system filesystem.
|
||||
For example, this could be a [local store], [local daemon store], or even another local overlay store.
|
||||
|
||||
The local overlay store never tries to modify the lower store in any way.
|
||||
Something else could modify the lower store, but there are restrictions on this
|
||||
Nix itself requires that this store only grow, and not change in other ways.
|
||||
For example, new store objects can be added, but deleting or modifying store objects is not allowed in general, because that will confuse and corrupt any local overlay store using those objects.
|
||||
(In addition, the underlying filesystem overlay mechanism may impose additional restrictions, see below.)
|
||||
|
||||
The lower store must not change while it is mounted as part of an overlay store.
|
||||
To ensure it does not, you might want to mount the store directory read-only (which then requires the [read-only] parameter to be set to `true`).
|
||||
|
||||
- **Lower store directory**:
|
||||
|
||||
> Specified with `lower-store.real` setting.
|
||||
|
||||
This is the directory used/exposed by the lower store.
|
||||
|
||||
As specified above, Nix requires the local store can only grow not change in other ways.
|
||||
Linux's OverlayFS in addition imposes the further requirement that this directory cannot change at all.
|
||||
That means that, while any local overlay store exists that is using this store as a lower store, this directory must not change.
|
||||
|
||||
- **Lower metadata source**:
|
||||
|
||||
> Not directly specified.
|
||||
> A consequence of the `lower-store` setting, depending on the type of lower store chosen.
|
||||
|
||||
This is abstract, just some way to read the metadata of lower store [store objects][store object].
|
||||
For example it could be a SQLite database (for the [local store]), or a socket connection (for the [local daemon store]).
|
||||
|
||||
This need not be writable.
|
||||
As stated above a local overlay store never tries to modify its lower store.
|
||||
The lower store's metadata is considered part of the lower store, just as the store's [file system objects][file system object] that appear in the store directory are.
|
||||
|
||||
- **Upper almost-store**:
|
||||
|
||||
> Not directly specified.
|
||||
> Instead the constituent parts are independently specified as described below.
|
||||
|
||||
This is almost but not quite just a [local store].
|
||||
That is because taken in isolation, not as part of a local overlay store, by itself, it would appear corrupted.
|
||||
But combined with everything else as part of an overlay local store, it is valid.
|
||||
|
||||
- **Upper layer directory**:
|
||||
|
||||
> Specified with [`upper-layer`](#store-experimental-local-overlay-store-upper-layer) setting.
|
||||
|
||||
This contains additional [store objects][store object]
|
||||
(or, strictly speaking, their [file system objects][file system object] that the local overlay store will extend the lower store with).
|
||||
|
||||
- **Upper store directory**:
|
||||
|
||||
> Specified with the [`real`](#store-experimental-local-overlay-store-real) setting.
|
||||
> This the same as the base local store setting, and can also be indirectly specified with the [`root`](#store-experimental-local-overlay-store-root) setting.
|
||||
|
||||
This contains all the store objects from each of the two directories.
|
||||
|
||||
The lower store directory and upper layer directory are combined via OverlayFS to create this directory.
|
||||
Nix doesn't do this itself, because it typically wouldn't have the permissions to do so, so it is the responsibility of the user to set this up first.
|
||||
Nix can, however, optionally check that the OverlayFS mount settings appear as expected, matching Nix's own settings.
|
||||
|
||||
- **Upper SQLite database**:
|
||||
|
||||
> Not directly specified.
|
||||
> The location of the database instead depends on the [`state`](#store-experimental-local-overlay-store-state) setting.
|
||||
> It is always `${state}/db`.
|
||||
|
||||
This contains the metadata of all of the upper layer [store objects][store object] (everything beyond their file system objects), and also duplicate copies of some lower layer store object's metadta.
|
||||
The duplication is so the metadata for the [closure](@docroot@/glossary.md#gloss-closure) of upper layer [store objects][store object] can be found entirely within the upper layer.
|
||||
(This allows us to use the same SQL Schema as the [local store]'s SQLite database, as foreign keys in that schema enforce closure metadata to be self-contained in this way.)
|
||||
|
||||
[file system object]: @docroot@/store/file-system-object.md
|
||||
[store object]: @docroot@/store/store-object.md
|
||||
|
||||
|
||||
### Example filesystem layout
|
||||
|
||||
Here is a worked out example of usage, following the concepts in the previous section.
|
||||
|
||||
Say we have the following paths:
|
||||
|
||||
- `/mnt/example/merged-store/nix/store`
|
||||
|
||||
- `/mnt/example/store-a/nix/store`
|
||||
|
||||
- `/mnt/example/store-b`
|
||||
|
||||
Then the following store URI can be used to access a local-overlay store at `/mnt/example/merged-store`:
|
||||
|
||||
```
|
||||
local-overlay://?root=/mnt/example/merged-store&lower-store=/mnt/example/store-a&upper-layer=/mnt/example/store-b
|
||||
```
|
||||
|
||||
The lower store directory is located at `/mnt/example/store-a/nix/store`, while the upper layer is at `/mnt/example/store-b`.
|
||||
|
||||
Before accessing the overlay store you will need to ensure the OverlayFS mount is set up correctly:
|
||||
|
||||
```shell
|
||||
mount -t overlay overlay \
|
||||
-o lowerdir="/mnt/example/store-a/nix/store" \
|
||||
-o upperdir="/mnt/example/store-b" \
|
||||
-o workdir="/mnt/example/workdir" \
|
||||
"/mnt/example/merged-store/nix/store"
|
||||
```
|
||||
|
||||
Note that OverlayFS requires `/mnt/example/workdir` to be on the same volume as the `upperdir`.
|
||||
|
||||
By default, Nix will check that the mountpoint as been set up correctly and fail with an error if it has not.
|
||||
You can override this behaviour by passing [`check-mount=false`](#store-experimental-local-overlay-store-check-mount) if you need to.
|
||||
|
||||
)"
|
||||
1713
subprojects/libstore/local-store.cc
Normal file
1713
subprojects/libstore/local-store.cc
Normal file
File diff suppressed because it is too large
Load diff
412
subprojects/libstore/local-store.hh
Normal file
412
subprojects/libstore/local-store.hh
Normal file
|
|
@ -0,0 +1,412 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "sqlite.hh"
|
||||
|
||||
#include "pathlocks.hh"
|
||||
#include "store-api.hh"
|
||||
#include "indirect-root-store.hh"
|
||||
#include "sync.hh"
|
||||
|
||||
#include <chrono>
|
||||
#include <future>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/**
|
||||
* Nix store and database schema version.
|
||||
*
|
||||
* Version 1 (or 0) was Nix <=
|
||||
* 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
|
||||
* Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
|
||||
* Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0.
|
||||
*/
|
||||
const int nixSchemaVersion = 10;
|
||||
|
||||
|
||||
struct OptimiseStats
|
||||
{
|
||||
unsigned long filesLinked = 0;
|
||||
uint64_t bytesFreed = 0;
|
||||
};
|
||||
|
||||
struct LocalStoreConfig : virtual LocalFSStoreConfig
|
||||
{
|
||||
using LocalFSStoreConfig::LocalFSStoreConfig;
|
||||
|
||||
LocalStoreConfig(
|
||||
std::string_view scheme,
|
||||
std::string_view authority,
|
||||
const Params & params);
|
||||
|
||||
Setting<bool> requireSigs{this,
|
||||
settings.requireSigs,
|
||||
"require-sigs",
|
||||
"Whether store paths copied into this store should have a trusted signature."};
|
||||
|
||||
Setting<bool> readOnly{this,
|
||||
false,
|
||||
"read-only",
|
||||
R"(
|
||||
Allow this store to be opened when its [database](@docroot@/glossary.md#gloss-nix-database) is on a read-only filesystem.
|
||||
|
||||
Normally Nix will attempt to open the store database in read-write mode, even for querying (when write access is not needed), causing it to fail if the database is on a read-only filesystem.
|
||||
|
||||
Enable read-only mode to disable locking and open the SQLite database with the [`immutable` parameter](https://www.sqlite.org/c3ref/open.html) set.
|
||||
|
||||
> **Warning**
|
||||
> Do not use this unless the filesystem is read-only.
|
||||
>
|
||||
> Using it when the filesystem is writable can cause incorrect query results or corruption errors if the database is changed by another process.
|
||||
> While the filesystem the database resides on might appear to be read-only, consider whether another user or system might have write access to it.
|
||||
)"};
|
||||
|
||||
const std::string name() override { return "Local Store"; }
|
||||
|
||||
static std::set<std::string> uriSchemes()
|
||||
{ return {"local"}; }
|
||||
|
||||
std::string doc() override;
|
||||
};
|
||||
|
||||
class LocalStore : public virtual LocalStoreConfig
|
||||
, public virtual IndirectRootStore
|
||||
, public virtual GcStore
|
||||
{
|
||||
private:
|
||||
|
||||
/**
|
||||
* Lock file used for upgrading.
|
||||
*/
|
||||
AutoCloseFD globalLock;
|
||||
|
||||
struct State
|
||||
{
|
||||
/**
|
||||
* The SQLite database object.
|
||||
*/
|
||||
SQLite db;
|
||||
|
||||
struct Stmts;
|
||||
std::unique_ptr<Stmts> stmts;
|
||||
|
||||
/**
|
||||
* The last time we checked whether to do an auto-GC, or an
|
||||
* auto-GC finished.
|
||||
*/
|
||||
std::chrono::time_point<std::chrono::steady_clock> lastGCCheck;
|
||||
|
||||
/**
|
||||
* Whether auto-GC is running. If so, get gcFuture to wait for
|
||||
* the GC to finish.
|
||||
*/
|
||||
bool gcRunning = false;
|
||||
std::shared_future<void> gcFuture;
|
||||
|
||||
/**
|
||||
* How much disk space was available after the previous
|
||||
* auto-GC. If the current available disk space is below
|
||||
* minFree but not much below availAfterGC, then there is no
|
||||
* point in starting a new GC.
|
||||
*/
|
||||
uint64_t availAfterGC = std::numeric_limits<uint64_t>::max();
|
||||
|
||||
std::unique_ptr<PublicKeys> publicKeys;
|
||||
};
|
||||
|
||||
Sync<State> _state;
|
||||
|
||||
public:
|
||||
|
||||
const Path dbDir;
|
||||
const Path linksDir;
|
||||
const Path reservedPath;
|
||||
const Path schemaPath;
|
||||
const Path tempRootsDir;
|
||||
const Path fnTempRoots;
|
||||
|
||||
private:
|
||||
|
||||
const PublicKeys & getPublicKeys();
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Hack for build-remote.cc.
|
||||
*/
|
||||
PathSet locksHeld;
|
||||
|
||||
/**
|
||||
* Initialise the local store, upgrading the schema if
|
||||
* necessary.
|
||||
*/
|
||||
LocalStore(const Params & params);
|
||||
LocalStore(
|
||||
std::string_view scheme,
|
||||
PathView path,
|
||||
const Params & params);
|
||||
|
||||
~LocalStore();
|
||||
|
||||
/**
|
||||
* Implementations of abstract store API methods.
|
||||
*/
|
||||
|
||||
std::string getUri() override;
|
||||
|
||||
bool isValidPathUncached(const StorePath & path) override;
|
||||
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
|
||||
StorePathSet queryAllValidPaths() override;
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
void queryReferrers(const StorePath & path, StorePathSet & referrers) override;
|
||||
|
||||
StorePathSet queryValidDerivers(const StorePath & path) override;
|
||||
|
||||
std::map<std::string, std::optional<StorePath>> queryStaticPartialDerivationOutputMap(const StorePath & path) override;
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
StorePathSet querySubstitutablePaths(const StorePathSet & paths) override;
|
||||
|
||||
bool pathInfoIsUntrusted(const ValidPathInfo &) override;
|
||||
bool realisationIsUntrusted(const Realisation & ) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
FileSerialisationMethod dumpMethod,
|
||||
ContentAddressMethod hashMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair) override;
|
||||
|
||||
void addTempRoot(const StorePath & path) override;
|
||||
|
||||
private:
|
||||
|
||||
void createTempRootsFile();
|
||||
|
||||
/**
|
||||
* The file to which we write our temporary roots.
|
||||
*/
|
||||
Sync<AutoCloseFD> _fdTempRoots;
|
||||
|
||||
/**
|
||||
* The global GC lock.
|
||||
*/
|
||||
Sync<AutoCloseFD> _fdGCLock;
|
||||
|
||||
/**
|
||||
* Connection to the garbage collector.
|
||||
*/
|
||||
Sync<AutoCloseFD> _fdRootsSocket;
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Implementation of IndirectRootStore::addIndirectRoot().
|
||||
*
|
||||
* The weak reference merely is a symlink to `path' from
|
||||
* /nix/var/nix/gcroots/auto/<hash of `path'>.
|
||||
*/
|
||||
void addIndirectRoot(const Path & path) override;
|
||||
|
||||
private:
|
||||
|
||||
void findTempRoots(Roots & roots, bool censor);
|
||||
|
||||
AutoCloseFD openGCLock();
|
||||
|
||||
public:
|
||||
|
||||
Roots findRoots(bool censor) override;
|
||||
|
||||
void collectGarbage(const GCOptions & options, GCResults & results) override;
|
||||
|
||||
/**
|
||||
* Called by `collectGarbage` to trace in reverse.
|
||||
*
|
||||
* Using this rather than `queryReferrers` directly allows us to
|
||||
* fine-tune which referrers we consider for garbage collection;
|
||||
* some store implementations take advantage of this.
|
||||
*/
|
||||
virtual void queryGCReferrers(const StorePath & path, StorePathSet & referrers)
|
||||
{
|
||||
return queryReferrers(path, referrers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by `collectGarbage` to recursively delete a path.
|
||||
* The default implementation simply calls `deletePath`, but it can be
|
||||
* overridden by stores that wish to provide their own deletion behaviour.
|
||||
*/
|
||||
virtual void deleteStorePath(const Path & path, uint64_t & bytesFreed);
|
||||
|
||||
/**
|
||||
* Optimise the disk space usage of the Nix store by hard-linking
|
||||
* files with the same contents.
|
||||
*/
|
||||
void optimiseStore(OptimiseStats & stats);
|
||||
|
||||
void optimiseStore() override;
|
||||
|
||||
/**
|
||||
* Optimise a single store path. Optionally, test the encountered
|
||||
* symlinks for corruption.
|
||||
*/
|
||||
void optimisePath(const Path & path, RepairFlag repair);
|
||||
|
||||
bool verifyStore(bool checkContents, RepairFlag repair) override;
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* Result of `verifyAllValidPaths`
|
||||
*/
|
||||
struct VerificationResult {
|
||||
/**
|
||||
* Whether any errors were encountered
|
||||
*/
|
||||
bool errors;
|
||||
|
||||
/**
|
||||
* A set of so-far valid paths. The store objects pointed to by
|
||||
* those paths are suitable for further validation checking.
|
||||
*/
|
||||
StorePathSet validPaths;
|
||||
};
|
||||
|
||||
/**
|
||||
* First, unconditional step of `verifyStore`
|
||||
*/
|
||||
virtual VerificationResult verifyAllValidPaths(RepairFlag repair);
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Register the validity of a path, i.e., that `path` exists, that
|
||||
* the paths referenced by it exists, and in the case of an output
|
||||
* path of a derivation, that it has been produced by a successful
|
||||
* execution of the derivation (or something equivalent). Also
|
||||
* register the hash of the file system contents of the path. The
|
||||
* hash must be a SHA-256 hash.
|
||||
*/
|
||||
void registerValidPath(const ValidPathInfo & info);
|
||||
|
||||
virtual void registerValidPaths(const ValidPathInfos & infos);
|
||||
|
||||
unsigned int getProtocol() override;
|
||||
|
||||
std::optional<TrustedFlag> isTrustedClient() override;
|
||||
|
||||
void vacuumDB();
|
||||
|
||||
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
|
||||
|
||||
/**
|
||||
* If free disk space in /nix/store if below minFree, delete
|
||||
* garbage until it exceeds maxFree.
|
||||
*/
|
||||
void autoGC(bool sync = true);
|
||||
|
||||
/**
|
||||
* Register the store path 'output' as the output named 'outputName' of
|
||||
* derivation 'deriver'.
|
||||
*/
|
||||
void registerDrvOutput(const Realisation & info) override;
|
||||
void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override;
|
||||
void cacheDrvOutputMapping(
|
||||
State & state,
|
||||
const uint64_t deriver,
|
||||
const std::string & outputName,
|
||||
const StorePath & output);
|
||||
|
||||
std::optional<const Realisation> queryRealisation_(State & state, const DrvOutput & id);
|
||||
std::optional<std::pair<int64_t, Realisation>> queryRealisationCore_(State & state, const DrvOutput & id);
|
||||
void queryRealisationUncached(const DrvOutput&,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
|
||||
std::optional<std::string> getVersion() override;
|
||||
|
||||
protected:
|
||||
|
||||
void verifyPath(const StorePath & path, std::function<bool(const StorePath &)> existsInStoreDir,
|
||||
StorePathSet & done, StorePathSet & validPaths, RepairFlag repair, bool & errors);
|
||||
|
||||
private:
|
||||
|
||||
/**
|
||||
* Retrieve the current version of the database schema.
|
||||
* If the database does not exist yet, the version returned will be 0.
|
||||
*/
|
||||
int getSchema();
|
||||
|
||||
void openDB(State & state, bool create);
|
||||
|
||||
void makeStoreWritable();
|
||||
|
||||
uint64_t queryValidPathId(State & state, const StorePath & path);
|
||||
|
||||
uint64_t addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs = true);
|
||||
|
||||
void invalidatePath(State & state, const StorePath & path);
|
||||
|
||||
/**
|
||||
* Delete a path from the Nix store.
|
||||
*/
|
||||
void invalidatePathChecked(const StorePath & path);
|
||||
|
||||
std::shared_ptr<const ValidPathInfo> queryPathInfoInternal(State & state, const StorePath & path);
|
||||
|
||||
void updatePathInfo(State & state, const ValidPathInfo & info);
|
||||
|
||||
PathSet queryValidPathsOld();
|
||||
ValidPathInfo queryPathInfoOld(const Path & path);
|
||||
|
||||
void findRoots(const Path & path, std::filesystem::file_type type, Roots & roots);
|
||||
|
||||
void findRootsNoTemp(Roots & roots, bool censor);
|
||||
|
||||
void findRuntimeRoots(Roots & roots, bool censor);
|
||||
|
||||
std::pair<std::filesystem::path, AutoCloseFD> createTempDirInStore();
|
||||
|
||||
typedef std::unordered_set<ino_t> InodeHash;
|
||||
|
||||
InodeHash loadInodeHash();
|
||||
Strings readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash);
|
||||
void optimisePath_(Activity * act, OptimiseStats & stats, const Path & path, InodeHash & inodeHash, RepairFlag repair);
|
||||
|
||||
// Internal versions that are not wrapped in retry_sqlite.
|
||||
bool isValidPath_(State & state, const StorePath & path);
|
||||
void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers);
|
||||
|
||||
/**
|
||||
* Add signatures to a ValidPathInfo or Realisation using the secret keys
|
||||
* specified by the ‘secret-key-files’ option.
|
||||
*/
|
||||
void signPathInfo(ValidPathInfo & info);
|
||||
void signRealisation(Realisation &);
|
||||
|
||||
void addBuildLog(const StorePath & drvPath, std::string_view log) override;
|
||||
|
||||
friend struct LocalDerivationGoal;
|
||||
friend struct PathSubstitutionGoal;
|
||||
friend struct SubstitutionGoal;
|
||||
friend struct DerivationGoal;
|
||||
};
|
||||
|
||||
}
|
||||
39
subprojects/libstore/local-store.md
Normal file
39
subprojects/libstore/local-store.md
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
R"(
|
||||
|
||||
**Store URL format**: `local`, *root*
|
||||
|
||||
This store type accesses a Nix store in the local filesystem directly
|
||||
(i.e. not via the Nix daemon). *root* is an absolute path that is
|
||||
prefixed to other directories such as the Nix store directory. The
|
||||
store pseudo-URL `local` denotes a store that uses `/` as its root
|
||||
directory.
|
||||
|
||||
A store that uses a *root* other than `/` is called a *chroot
|
||||
store*. With such stores, the store directory is "logically" still
|
||||
`/nix/store`, so programs stored in them can only be built and
|
||||
executed by `chroot`-ing into *root*. Chroot stores only support
|
||||
building and running on Linux when [`mount namespaces`](https://man7.org/linux/man-pages/man7/mount_namespaces.7.html) and [`user namespaces`](https://man7.org/linux/man-pages/man7/user_namespaces.7.html) are
|
||||
enabled.
|
||||
|
||||
For example, the following uses `/tmp/root` as the chroot environment
|
||||
to build or download `nixpkgs#hello` and then execute it:
|
||||
|
||||
```console
|
||||
# nix run --store /tmp/root nixpkgs#hello
|
||||
Hello, world!
|
||||
```
|
||||
|
||||
Here, the "physical" store location is `/tmp/root/nix/store`, and
|
||||
Nix's store metadata is in `/tmp/root/nix/var/nix/db`.
|
||||
|
||||
It is also possible, but not recommended, to change the "logical"
|
||||
location of the Nix store from its default of `/nix/store`. This makes
|
||||
it impossible to use default substituters such as
|
||||
`https://cache.nixos.org/`, and thus you may have to build everything
|
||||
locally. Here is an example:
|
||||
|
||||
```console
|
||||
# nix build --store 'local?store=/tmp/my-nix/store&state=/tmp/my-nix/state&log=/tmp/my-nix/log' nixpkgs#hello
|
||||
```
|
||||
|
||||
)"
|
||||
103
subprojects/libstore/local.mk
Normal file
103
subprojects/libstore/local.mk
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
libraries += libstore
|
||||
|
||||
libstore_NAME = libnixstore
|
||||
|
||||
libstore_DIR := $(d)
|
||||
|
||||
libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc)
|
||||
ifdef HOST_UNIX
|
||||
libstore_SOURCES += $(wildcard $(d)/unix/*.cc $(d)/unix/build/*.cc)
|
||||
endif
|
||||
ifdef HOST_LINUX
|
||||
libstore_SOURCES += $(wildcard $(d)/linux/*.cc)
|
||||
endif
|
||||
ifdef HOST_WINDOWS
|
||||
libstore_SOURCES += $(wildcard $(d)/windows/*.cc)
|
||||
endif
|
||||
|
||||
libstore_LIBS = libutil
|
||||
|
||||
libstore_LDFLAGS += $(SQLITE3_LIBS) $(LIBCURL_LIBS) $(THREAD_LDFLAGS)
|
||||
ifdef HOST_LINUX
|
||||
libstore_LDFLAGS += -ldl
|
||||
endif
|
||||
ifdef HOST_WINDOWS
|
||||
libstore_LDFLAGS += -lws2_32
|
||||
endif
|
||||
|
||||
$(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox)))
|
||||
|
||||
ifeq ($(ENABLE_S3), 1)
|
||||
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core -laws-crt-cpp
|
||||
endif
|
||||
|
||||
ifdef HOST_SOLARIS
|
||||
libstore_LDFLAGS += -lsocket
|
||||
endif
|
||||
|
||||
ifeq ($(HAVE_SECCOMP), 1)
|
||||
libstore_LDFLAGS += $(LIBSECCOMP_LIBS)
|
||||
endif
|
||||
|
||||
# Not just for this library itself, but also for downstream libraries using this library
|
||||
|
||||
INCLUDE_libstore := -I $(d) -I $(d)/build
|
||||
ifdef HOST_UNIX
|
||||
INCLUDE_libstore += -I $(d)/unix -I $(d)/unix/build
|
||||
endif
|
||||
ifdef HOST_LINUX
|
||||
INCLUDE_libstore += -I $(d)/linux
|
||||
endif
|
||||
ifdef HOST_WINDOWS
|
||||
INCLUDE_libstore += -I $(d)/windows
|
||||
endif
|
||||
|
||||
ifdef HOST_WINDOWS
|
||||
NIX_ROOT = N:\\\\
|
||||
else
|
||||
NIX_ROOT =
|
||||
endif
|
||||
|
||||
# Prefix all but `NIX_STORE_DIR`, since we aren't doing a local store
|
||||
# yet so a "logical" store dir that is the same as unix is preferred.
|
||||
#
|
||||
# Also, it keeps the unit tests working.
|
||||
|
||||
libstore_CXXFLAGS += \
|
||||
$(INCLUDE_libutil) $(INCLUDE_libstore) $(INCLUDE_libstore) \
|
||||
-DNIX_PREFIX=\"$(NIX_ROOT)$(prefix)\" \
|
||||
-DNIX_STORE_DIR=\"$(storedir)\" \
|
||||
-DNIX_DATA_DIR=\"$(NIX_ROOT)$(datadir)\" \
|
||||
-DNIX_STATE_DIR=\"$(NIX_ROOT)$(localstatedir)/nix\" \
|
||||
-DNIX_LOG_DIR=\"$(NIX_ROOT)$(localstatedir)/log/nix\" \
|
||||
-DNIX_CONF_DIR=\"$(NIX_ROOT)$(sysconfdir)/nix\" \
|
||||
-DNIX_MAN_DIR=\"$(NIX_ROOT)$(mandir)\" \
|
||||
-DLSOF=\"$(NIX_ROOT)$(lsof)\"
|
||||
|
||||
ifeq ($(embedded_sandbox_shell),yes)
|
||||
libstore_CXXFLAGS += -DSANDBOX_SHELL=\"__embedded_sandbox_shell__\"
|
||||
|
||||
$(d)/unix/build/local-derivation-goal.cc: $(d)/unix/embedded-sandbox-shell.gen.hh
|
||||
|
||||
$(d)/unix/embedded-sandbox-shell.gen.hh: $(sandbox_shell)
|
||||
$(trace-gen) hexdump -v -e '1/1 "0x%x," "\n"' < $< > $@.tmp
|
||||
@mv $@.tmp $@
|
||||
else
|
||||
ifneq ($(sandbox_shell),)
|
||||
libstore_CXXFLAGS += -DSANDBOX_SHELL="\"$(sandbox_shell)\""
|
||||
endif
|
||||
endif
|
||||
|
||||
$(d)/local-store.cc: $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
|
||||
|
||||
$(d)/unix/build.cc:
|
||||
|
||||
clean-files += $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
|
||||
|
||||
$(eval $(call install-file-in, $(buildprefix)$(d)/nix-store.pc, $(libdir)/pkgconfig, 0644))
|
||||
|
||||
$(foreach i, $(wildcard $(d)/builtins/*.hh), \
|
||||
$(eval $(call install-file-in, $(i), $(includedir)/nix/builtins, 0644)))
|
||||
|
||||
$(foreach i, $(wildcard $(d)/build/*.hh), \
|
||||
$(eval $(call install-file-in, $(i), $(includedir)/nix/build, 0644)))
|
||||
12
subprojects/libstore/log-store.cc
Normal file
12
subprojects/libstore/log-store.cc
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
#include "log-store.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::optional<std::string> LogStore::getBuildLog(const StorePath & path) {
|
||||
auto maybePath = getBuildDerivationPath(path);
|
||||
if (!maybePath)
|
||||
return std::nullopt;
|
||||
return getBuildLogExact(maybePath.value());
|
||||
}
|
||||
|
||||
}
|
||||
26
subprojects/libstore/log-store.hh
Normal file
26
subprojects/libstore/log-store.hh
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "store-api.hh"
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct LogStore : public virtual Store
|
||||
{
|
||||
inline static std::string operationName = "Build log storage and retrieval";
|
||||
|
||||
/**
|
||||
* Return the build log of the specified store path, if available,
|
||||
* or null otherwise.
|
||||
*/
|
||||
std::optional<std::string> getBuildLog(const StorePath & path);
|
||||
|
||||
virtual std::optional<std::string> getBuildLogExact(const StorePath & path) = 0;
|
||||
|
||||
virtual void addBuildLog(const StorePath & path, std::string_view log) = 0;
|
||||
|
||||
static LogStore & require(Store & store);
|
||||
};
|
||||
|
||||
}
|
||||
214
subprojects/libstore/machines.cc
Normal file
214
subprojects/libstore/machines.cc
Normal file
|
|
@ -0,0 +1,214 @@
|
|||
#include "machines.hh"
|
||||
#include "globals.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
namespace nix {
|
||||
|
||||
Machine::Machine(
|
||||
const std::string & storeUri,
|
||||
decltype(systemTypes) systemTypes,
|
||||
decltype(sshKey) sshKey,
|
||||
decltype(maxJobs) maxJobs,
|
||||
decltype(speedFactor) speedFactor,
|
||||
decltype(supportedFeatures) supportedFeatures,
|
||||
decltype(mandatoryFeatures) mandatoryFeatures,
|
||||
decltype(sshPublicHostKey) sshPublicHostKey) :
|
||||
storeUri(StoreReference::parse(
|
||||
// Backwards compatibility: if the URI is schemeless, is not a path,
|
||||
// and is not one of the special store connection words, prepend
|
||||
// ssh://.
|
||||
storeUri.find("://") != std::string::npos
|
||||
|| storeUri.find("/") != std::string::npos
|
||||
|| storeUri == "auto"
|
||||
|| storeUri == "daemon"
|
||||
|| storeUri == "local"
|
||||
|| hasPrefix(storeUri, "auto?")
|
||||
|| hasPrefix(storeUri, "daemon?")
|
||||
|| hasPrefix(storeUri, "local?")
|
||||
|| hasPrefix(storeUri, "?")
|
||||
? storeUri
|
||||
: "ssh://" + storeUri)),
|
||||
systemTypes(systemTypes),
|
||||
sshKey(sshKey),
|
||||
maxJobs(maxJobs),
|
||||
speedFactor(speedFactor == 0.0f ? 1.0f : std::move(speedFactor)),
|
||||
supportedFeatures(supportedFeatures),
|
||||
mandatoryFeatures(mandatoryFeatures),
|
||||
sshPublicHostKey(sshPublicHostKey)
|
||||
{
|
||||
if (speedFactor < 0.0)
|
||||
throw UsageError("speed factor must be >= 0");
|
||||
}
|
||||
|
||||
bool Machine::systemSupported(const std::string & system) const
|
||||
{
|
||||
return system == "builtin" || (systemTypes.count(system) > 0);
|
||||
}
|
||||
|
||||
bool Machine::allSupported(const std::set<std::string> & features) const
|
||||
{
|
||||
return std::all_of(features.begin(), features.end(),
|
||||
[&](const std::string & feature) {
|
||||
return supportedFeatures.count(feature) ||
|
||||
mandatoryFeatures.count(feature);
|
||||
});
|
||||
}
|
||||
|
||||
bool Machine::mandatoryMet(const std::set<std::string> & features) const
|
||||
{
|
||||
return std::all_of(mandatoryFeatures.begin(), mandatoryFeatures.end(),
|
||||
[&](const std::string & feature) {
|
||||
return features.count(feature);
|
||||
});
|
||||
}
|
||||
|
||||
StoreReference Machine::completeStoreReference() const
|
||||
{
|
||||
auto storeUri = this->storeUri;
|
||||
|
||||
auto * generic = std::get_if<StoreReference::Specified>(&storeUri.variant);
|
||||
|
||||
if (generic && generic->scheme == "ssh") {
|
||||
storeUri.params["max-connections"] = "1";
|
||||
storeUri.params["log-fd"] = "4";
|
||||
}
|
||||
|
||||
if (generic && (generic->scheme == "ssh" || generic->scheme == "ssh-ng")) {
|
||||
if (sshKey != "")
|
||||
storeUri.params["ssh-key"] = sshKey;
|
||||
if (sshPublicHostKey != "")
|
||||
storeUri.params["base64-ssh-public-host-key"] = sshPublicHostKey;
|
||||
}
|
||||
|
||||
{
|
||||
auto & fs = storeUri.params["system-features"];
|
||||
auto append = [&](auto feats) {
|
||||
for (auto & f : feats) {
|
||||
if (fs.size() > 0) fs += ' ';
|
||||
fs += f;
|
||||
}
|
||||
};
|
||||
append(supportedFeatures);
|
||||
append(mandatoryFeatures);
|
||||
}
|
||||
|
||||
return storeUri;
|
||||
}
|
||||
|
||||
ref<Store> Machine::openStore() const
|
||||
{
|
||||
return nix::openStore(completeStoreReference());
|
||||
}
|
||||
|
||||
static std::vector<std::string> expandBuilderLines(const std::string & builders)
|
||||
{
|
||||
std::vector<std::string> result;
|
||||
for (auto line : tokenizeString<std::vector<std::string>>(builders, "\n;")) {
|
||||
trim(line);
|
||||
line.erase(std::find(line.begin(), line.end(), '#'), line.end());
|
||||
if (line.empty()) continue;
|
||||
|
||||
if (line[0] == '@') {
|
||||
const std::string path = trim(std::string(line, 1));
|
||||
std::string text;
|
||||
try {
|
||||
text = readFile(path);
|
||||
} catch (const SysError & e) {
|
||||
if (e.errNo != ENOENT)
|
||||
throw;
|
||||
debug("cannot find machines file '%s'", path);
|
||||
}
|
||||
|
||||
const auto lines = expandBuilderLines(text);
|
||||
result.insert(end(result), begin(lines), end(lines));
|
||||
continue;
|
||||
}
|
||||
|
||||
result.emplace_back(line);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static Machine parseBuilderLine(const std::set<std::string> & defaultSystems, const std::string & line)
|
||||
{
|
||||
const auto tokens = tokenizeString<std::vector<std::string>>(line);
|
||||
|
||||
auto isSet = [&](size_t fieldIndex) {
|
||||
return tokens.size() > fieldIndex && tokens[fieldIndex] != "" && tokens[fieldIndex] != "-";
|
||||
};
|
||||
|
||||
auto parseUnsignedIntField = [&](size_t fieldIndex) {
|
||||
const auto result = string2Int<unsigned int>(tokens[fieldIndex]);
|
||||
if (!result) {
|
||||
throw FormatError("bad machine specification: failed to convert column #%lu in a row: '%s' to 'unsigned int'", fieldIndex, line);
|
||||
}
|
||||
return result.value();
|
||||
};
|
||||
|
||||
auto parseFloatField = [&](size_t fieldIndex) {
|
||||
const auto result = string2Float<float>(tokens[fieldIndex]);
|
||||
if (!result) {
|
||||
throw FormatError("bad machine specification: failed to convert column #%lu in a row: '%s' to 'float'", fieldIndex, line);
|
||||
}
|
||||
return result.value();
|
||||
};
|
||||
|
||||
auto ensureBase64 = [&](size_t fieldIndex) {
|
||||
const auto & str = tokens[fieldIndex];
|
||||
try {
|
||||
base64Decode(str);
|
||||
} catch (FormatError & e) {
|
||||
e.addTrace({}, "while parsing machine specification at a column #%lu in a row: '%s'", fieldIndex, line);
|
||||
throw;
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
||||
if (!isSet(0))
|
||||
throw FormatError("bad machine specification: store URL was not found at the first column of a row: '%s'", line);
|
||||
|
||||
// TODO use designated initializers, once C++ supports those with
|
||||
// custom constructors.
|
||||
return {
|
||||
// `storeUri`
|
||||
tokens[0],
|
||||
// `systemTypes`
|
||||
isSet(1) ? tokenizeString<std::set<std::string>>(tokens[1], ",") : defaultSystems,
|
||||
// `sshKey`
|
||||
isSet(2) ? tokens[2] : "",
|
||||
// `maxJobs`
|
||||
isSet(3) ? parseUnsignedIntField(3) : 1U,
|
||||
// `speedFactor`
|
||||
isSet(4) ? parseFloatField(4) : 1.0f,
|
||||
// `supportedFeatures`
|
||||
isSet(5) ? tokenizeString<std::set<std::string>>(tokens[5], ",") : std::set<std::string>{},
|
||||
// `mandatoryFeatures`
|
||||
isSet(6) ? tokenizeString<std::set<std::string>>(tokens[6], ",") : std::set<std::string>{},
|
||||
// `sshPublicHostKey`
|
||||
isSet(7) ? ensureBase64(7) : ""
|
||||
};
|
||||
}
|
||||
|
||||
static Machines parseBuilderLines(const std::set<std::string> & defaultSystems, const std::vector<std::string> & builders)
|
||||
{
|
||||
Machines result;
|
||||
std::transform(
|
||||
builders.begin(), builders.end(), std::back_inserter(result),
|
||||
[&](auto && line) { return parseBuilderLine(defaultSystems, line); });
|
||||
return result;
|
||||
}
|
||||
|
||||
Machines Machine::parseConfig(const std::set<std::string> & defaultSystems, const std::string & s)
|
||||
{
|
||||
const auto builderLines = expandBuilderLines(s);
|
||||
return parseBuilderLines(defaultSystems, builderLines);
|
||||
}
|
||||
|
||||
Machines getMachines()
|
||||
{
|
||||
return Machine::parseConfig({settings.thisSystem}, settings.builders);
|
||||
}
|
||||
|
||||
}
|
||||
88
subprojects/libstore/machines.hh
Normal file
88
subprojects/libstore/machines.hh
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "ref.hh"
|
||||
#include "store-reference.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
class Store;
|
||||
|
||||
struct Machine;
|
||||
|
||||
typedef std::vector<Machine> Machines;
|
||||
|
||||
struct Machine {
|
||||
|
||||
const StoreReference storeUri;
|
||||
const std::set<std::string> systemTypes;
|
||||
const std::string sshKey;
|
||||
const unsigned int maxJobs;
|
||||
const float speedFactor;
|
||||
const std::set<std::string> supportedFeatures;
|
||||
const std::set<std::string> mandatoryFeatures;
|
||||
const std::string sshPublicHostKey;
|
||||
bool enabled = true;
|
||||
|
||||
/**
|
||||
* @return Whether `system` is either `"builtin"` or in
|
||||
* `systemTypes`.
|
||||
*/
|
||||
bool systemSupported(const std::string & system) const;
|
||||
|
||||
/**
|
||||
* @return Whether `features` is a subset of the union of `supportedFeatures` and
|
||||
* `mandatoryFeatures`
|
||||
*/
|
||||
bool allSupported(const std::set<std::string> & features) const;
|
||||
|
||||
/**
|
||||
* @return @Whether `mandatoryFeatures` is a subset of `features`
|
||||
*/
|
||||
bool mandatoryMet(const std::set<std::string> & features) const;
|
||||
|
||||
Machine(
|
||||
const std::string & storeUri,
|
||||
decltype(systemTypes) systemTypes,
|
||||
decltype(sshKey) sshKey,
|
||||
decltype(maxJobs) maxJobs,
|
||||
decltype(speedFactor) speedFactor,
|
||||
decltype(supportedFeatures) supportedFeatures,
|
||||
decltype(mandatoryFeatures) mandatoryFeatures,
|
||||
decltype(sshPublicHostKey) sshPublicHostKey);
|
||||
|
||||
/**
|
||||
* Elaborate `storeUri` into a complete store reference,
|
||||
* incorporating information from the other fields of the `Machine`
|
||||
* as applicable.
|
||||
*/
|
||||
StoreReference completeStoreReference() const;
|
||||
|
||||
/**
|
||||
* Open a `Store` for this machine.
|
||||
*
|
||||
* Just a simple function composition:
|
||||
* ```c++
|
||||
* nix::openStore(completeStoreReference())
|
||||
* ```
|
||||
*/
|
||||
ref<Store> openStore() const;
|
||||
|
||||
/**
|
||||
* Parse a machine configuration.
|
||||
*
|
||||
* Every machine is specified on its own line, and lines beginning
|
||||
* with `@` are interpreted as paths to other configuration files in
|
||||
* the same format.
|
||||
*/
|
||||
static Machines parseConfig(const std::set<std::string> & defaultSystems, const std::string & config);
|
||||
};
|
||||
|
||||
/**
|
||||
* Parse machines from the global config
|
||||
*
|
||||
* @todo Remove, globals are bad.
|
||||
*/
|
||||
Machines getMachines();
|
||||
|
||||
}
|
||||
92
subprojects/libstore/make-content-addressed.cc
Normal file
92
subprojects/libstore/make-content-addressed.cc
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
#include "make-content-addressed.hh"
|
||||
#include "references.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::map<StorePath, StorePath> makeContentAddressed(
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const StorePathSet & storePaths)
|
||||
{
|
||||
StorePathSet closure;
|
||||
srcStore.computeFSClosure(storePaths, closure);
|
||||
|
||||
auto paths = srcStore.topoSortPaths(closure);
|
||||
|
||||
std::reverse(paths.begin(), paths.end());
|
||||
|
||||
std::map<StorePath, StorePath> remappings;
|
||||
|
||||
for (auto & path : paths) {
|
||||
auto pathS = srcStore.printStorePath(path);
|
||||
auto oldInfo = srcStore.queryPathInfo(path);
|
||||
std::string oldHashPart(path.hashPart());
|
||||
|
||||
StringSink sink;
|
||||
srcStore.narFromPath(path, sink);
|
||||
|
||||
StringMap rewrites;
|
||||
|
||||
StoreReferences refs;
|
||||
for (auto & ref : oldInfo->references) {
|
||||
if (ref == path)
|
||||
refs.self = true;
|
||||
else {
|
||||
auto i = remappings.find(ref);
|
||||
auto replacement = i != remappings.end() ? i->second : ref;
|
||||
// FIXME: warn about unremapped paths?
|
||||
if (replacement != ref)
|
||||
rewrites.insert_or_assign(srcStore.printStorePath(ref), srcStore.printStorePath(replacement));
|
||||
refs.others.insert(std::move(replacement));
|
||||
}
|
||||
}
|
||||
|
||||
sink.s = rewriteStrings(sink.s, rewrites);
|
||||
|
||||
HashModuloSink hashModuloSink(HashAlgorithm::SHA256, oldHashPart);
|
||||
hashModuloSink(sink.s);
|
||||
|
||||
auto narModuloHash = hashModuloSink.finish().first;
|
||||
|
||||
ValidPathInfo info {
|
||||
dstStore,
|
||||
path.name(),
|
||||
FixedOutputInfo {
|
||||
.method = FileIngestionMethod::NixArchive,
|
||||
.hash = narModuloHash,
|
||||
.references = std::move(refs),
|
||||
},
|
||||
Hash::dummy,
|
||||
};
|
||||
|
||||
printInfo("rewriting '%s' to '%s'", pathS, dstStore.printStorePath(info.path));
|
||||
|
||||
StringSink sink2;
|
||||
RewritingSink rsink2(oldHashPart, std::string(info.path.hashPart()), sink2);
|
||||
rsink2(sink.s);
|
||||
rsink2.flush();
|
||||
|
||||
info.narHash = hashString(HashAlgorithm::SHA256, sink2.s);
|
||||
info.narSize = sink.s.size();
|
||||
|
||||
StringSource source(sink2.s);
|
||||
dstStore.addToStore(info, source);
|
||||
|
||||
remappings.insert_or_assign(std::move(path), std::move(info.path));
|
||||
}
|
||||
|
||||
return remappings;
|
||||
}
|
||||
|
||||
StorePath makeContentAddressed(
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const StorePath & fromPath)
|
||||
{
|
||||
auto remappings = makeContentAddressed(srcStore, dstStore, StorePathSet { fromPath });
|
||||
auto i = remappings.find(fromPath);
|
||||
assert(i != remappings.end());
|
||||
return i->second;
|
||||
}
|
||||
|
||||
}
|
||||
24
subprojects/libstore/make-content-addressed.hh
Normal file
24
subprojects/libstore/make-content-addressed.hh
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/** Rewrite a closure of store paths to be completely content addressed.
|
||||
*/
|
||||
std::map<StorePath, StorePath> makeContentAddressed(
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const StorePathSet & rootPaths);
|
||||
|
||||
/** Rewrite a closure of a store path to be completely content addressed.
|
||||
*
|
||||
* This is a convenience function for the case where you only have one root path.
|
||||
*/
|
||||
StorePath makeContentAddressed(
|
||||
Store & srcStore,
|
||||
Store & dstStore,
|
||||
const StorePath & rootPath);
|
||||
|
||||
}
|
||||
436
subprojects/libstore/meson.build
Normal file
436
subprojects/libstore/meson.build
Normal file
|
|
@ -0,0 +1,436 @@
|
|||
project('nix-store', 'cpp',
|
||||
version : files('.version'),
|
||||
default_options : [
|
||||
'cpp_std=c++2a',
|
||||
# TODO(Qyriad): increase the warning level
|
||||
'warning_level=1',
|
||||
'debug=true',
|
||||
'optimization=2',
|
||||
'errorlogs=true', # Please print logs for tests that fail
|
||||
'localstatedir=/nix/var',
|
||||
],
|
||||
meson_version : '>= 1.1',
|
||||
license : 'LGPL-2.1-or-later',
|
||||
)
|
||||
|
||||
cxx = meson.get_compiler('cpp')
|
||||
|
||||
subdir('build-utils-meson/deps-lists')
|
||||
|
||||
configdata = configuration_data()
|
||||
|
||||
# TODO rename, because it will conflict with downstream projects
|
||||
configdata.set_quoted('PACKAGE_VERSION', meson.project_version())
|
||||
|
||||
configdata.set_quoted('SYSTEM', host_machine.cpu_family() + '-' + host_machine.system())
|
||||
|
||||
deps_private_maybe_subproject = [
|
||||
]
|
||||
deps_public_maybe_subproject = [
|
||||
dependency('nix-util'),
|
||||
]
|
||||
subdir('build-utils-meson/subprojects')
|
||||
|
||||
run_command('ln', '-s',
|
||||
meson.project_build_root() / '__nothing_link_target',
|
||||
meson.project_build_root() / '__nothing_symlink',
|
||||
check : true,
|
||||
)
|
||||
can_link_symlink = run_command('ln',
|
||||
meson.project_build_root() / '__nothing_symlink',
|
||||
meson.project_build_root() / '__nothing_hardlink',
|
||||
check : false,
|
||||
).returncode() == 0
|
||||
run_command('rm', '-f',
|
||||
meson.project_build_root() / '__nothing_symlink',
|
||||
meson.project_build_root() / '__nothing_hardlink',
|
||||
check : true,
|
||||
)
|
||||
summary('can hardlink to symlink', can_link_symlink, bool_yn : true)
|
||||
configdata.set('CAN_LINK_SYMLINK', can_link_symlink.to_int())
|
||||
|
||||
# Check for each of these functions, and create a define like `#define HAVE_LCHOWN 1`.
|
||||
#
|
||||
# Only need to do functions that deps (like `libnixutil`) didn't already
|
||||
# check for.
|
||||
check_funcs = [
|
||||
# Optionally used for canonicalising files from the build
|
||||
'lchown',
|
||||
'statvfs',
|
||||
]
|
||||
foreach funcspec : check_funcs
|
||||
define_name = 'HAVE_' + funcspec.underscorify().to_upper()
|
||||
define_value = cxx.has_function(funcspec).to_int()
|
||||
configdata.set(define_name, define_value)
|
||||
endforeach
|
||||
|
||||
has_acl_support = cxx.has_header('sys/xattr.h') \
|
||||
and cxx.has_function('llistxattr') \
|
||||
and cxx.has_function('lremovexattr')
|
||||
configdata.set('HAVE_ACL_SUPPORT', has_acl_support.to_int())
|
||||
|
||||
subdir('build-utils-meson/threads')
|
||||
|
||||
boost = dependency(
|
||||
'boost',
|
||||
modules : ['container'],
|
||||
include_type: 'system',
|
||||
)
|
||||
# boost is a public dependency, but not a pkg-config dependency unfortunately, so we
|
||||
# put in `deps_other`.
|
||||
deps_other += boost
|
||||
|
||||
curl = dependency('libcurl', 'curl')
|
||||
deps_private += curl
|
||||
|
||||
# seccomp only makes sense on Linux
|
||||
is_linux = host_machine.system() == 'linux'
|
||||
seccomp_required = get_option('seccomp-sandboxing')
|
||||
if not is_linux and seccomp_required.enabled()
|
||||
warning('Force-enabling seccomp on non-Linux does not make sense')
|
||||
endif
|
||||
seccomp = dependency('libseccomp', 'seccomp', required : seccomp_required, version : '>=2.5.5')
|
||||
if is_linux and not seccomp.found()
|
||||
warning('Sandbox security is reduced because libseccomp has not been found! Please provide libseccomp if it supports your CPU architecture.')
|
||||
endif
|
||||
configdata.set('HAVE_SECCOMP', seccomp.found().to_int())
|
||||
deps_private += seccomp
|
||||
|
||||
nlohmann_json = dependency('nlohmann_json', version : '>= 3.9')
|
||||
deps_public += nlohmann_json
|
||||
|
||||
sqlite = dependency('sqlite3', 'sqlite', version : '>=3.6.19')
|
||||
deps_private += sqlite
|
||||
|
||||
# AWS C++ SDK has bad pkg-config
|
||||
aws_s3 = dependency('aws-cpp-sdk-s3', required : false)
|
||||
configdata.set('ENABLE_S3', aws_s3.found().to_int())
|
||||
if aws_s3.found()
|
||||
aws_s3 = declare_dependency(
|
||||
include_directories: include_directories(aws_s3.get_variable('includedir')),
|
||||
link_args: [
|
||||
'-L' + aws_s3.get_variable('libdir'),
|
||||
'-laws-cpp-sdk-transfer',
|
||||
'-laws-cpp-sdk-s3',
|
||||
'-laws-cpp-sdk-core',
|
||||
'-laws-crt-cpp',
|
||||
],
|
||||
).as_system('system')
|
||||
endif
|
||||
deps_other += aws_s3
|
||||
|
||||
subdir('build-utils-meson/generate-header')
|
||||
|
||||
generated_headers = []
|
||||
foreach header : [
|
||||
'schema.sql',
|
||||
'ca-specific-schema.sql',
|
||||
]
|
||||
generated_headers += gen_header.process(header)
|
||||
endforeach
|
||||
|
||||
busybox = find_program(get_option('sandbox-shell'), required : false)
|
||||
|
||||
if get_option('embedded-sandbox-shell')
|
||||
# This one goes in config.h
|
||||
# The path to busybox is passed as a -D flag when compiling this_library.
|
||||
# This solution is inherited from the old make buildsystem
|
||||
# TODO: do this differently?
|
||||
configdata.set('HAVE_EMBEDDED_SANDBOX_SHELL', 1)
|
||||
hexdump = find_program('hexdump', native : true)
|
||||
embedded_sandbox_shell_gen = custom_target(
|
||||
'embedded-sandbox-shell.gen.hh',
|
||||
command : [
|
||||
hexdump,
|
||||
'-v',
|
||||
'-e',
|
||||
'1/1 "0x%x," "\n"'
|
||||
],
|
||||
input : busybox.full_path(),
|
||||
output : 'embedded-sandbox-shell.gen.hh',
|
||||
capture : true,
|
||||
feed : true,
|
||||
)
|
||||
generated_headers += embedded_sandbox_shell_gen
|
||||
endif
|
||||
|
||||
config_h = configure_file(
|
||||
configuration : configdata,
|
||||
output : 'config-store.hh',
|
||||
)
|
||||
|
||||
add_project_arguments(
|
||||
# TODO(Qyriad): Yes this is how the autoconf+Make system did it.
|
||||
# It would be nice for our headers to be idempotent instead.
|
||||
'-include', 'config-util.hh',
|
||||
'-include', 'config-store.hh',
|
||||
language : 'cpp',
|
||||
)
|
||||
|
||||
subdir('build-utils-meson/diagnostics')
|
||||
|
||||
sources = files(
|
||||
'binary-cache-store.cc',
|
||||
'build-result.cc',
|
||||
'build/derivation-goal.cc',
|
||||
'build/drv-output-substitution-goal.cc',
|
||||
'build/entry-points.cc',
|
||||
'build/goal.cc',
|
||||
'build/substitution-goal.cc',
|
||||
'build/worker.cc',
|
||||
'builtins/buildenv.cc',
|
||||
'builtins/fetchurl.cc',
|
||||
'builtins/unpack-channel.cc',
|
||||
'common-protocol.cc',
|
||||
'common-ssh-store-config.cc',
|
||||
'content-address.cc',
|
||||
'daemon.cc',
|
||||
'derivations.cc',
|
||||
'derived-path-map.cc',
|
||||
'derived-path.cc',
|
||||
'downstream-placeholder.cc',
|
||||
'dummy-store.cc',
|
||||
'export-import.cc',
|
||||
'filetransfer.cc',
|
||||
'gc.cc',
|
||||
'globals.cc',
|
||||
'http-binary-cache-store.cc',
|
||||
'indirect-root-store.cc',
|
||||
'keys.cc',
|
||||
'legacy-ssh-store.cc',
|
||||
'local-binary-cache-store.cc',
|
||||
'local-fs-store.cc',
|
||||
'local-overlay-store.cc',
|
||||
'local-store.cc',
|
||||
'log-store.cc',
|
||||
'machines.cc',
|
||||
'make-content-addressed.cc',
|
||||
'misc.cc',
|
||||
'names.cc',
|
||||
'nar-accessor.cc',
|
||||
'nar-info-disk-cache.cc',
|
||||
'nar-info.cc',
|
||||
'optimise-store.cc',
|
||||
'outputs-spec.cc',
|
||||
'parsed-derivations.cc',
|
||||
'path-info.cc',
|
||||
'path-references.cc',
|
||||
'path-with-outputs.cc',
|
||||
'path.cc',
|
||||
'pathlocks.cc',
|
||||
'posix-fs-canonicalise.cc',
|
||||
'profiles.cc',
|
||||
'realisation.cc',
|
||||
'remote-fs-accessor.cc',
|
||||
'remote-store.cc',
|
||||
's3-binary-cache-store.cc',
|
||||
'serve-protocol-connection.cc',
|
||||
'serve-protocol.cc',
|
||||
'sqlite.cc',
|
||||
'ssh-store.cc',
|
||||
'ssh.cc',
|
||||
'store-api.cc',
|
||||
'store-reference.cc',
|
||||
'uds-remote-store.cc',
|
||||
'worker-protocol-connection.cc',
|
||||
'worker-protocol.cc',
|
||||
)
|
||||
|
||||
include_dirs = [
|
||||
include_directories('.'),
|
||||
include_directories('build'),
|
||||
]
|
||||
|
||||
headers = [config_h] + files(
|
||||
'binary-cache-store.hh',
|
||||
'build-result.hh',
|
||||
'build/derivation-goal.hh',
|
||||
'build/drv-output-substitution-goal.hh',
|
||||
'build/goal.hh',
|
||||
'build/substitution-goal.hh',
|
||||
'build/worker.hh',
|
||||
'builtins.hh',
|
||||
'builtins/buildenv.hh',
|
||||
'common-protocol-impl.hh',
|
||||
'common-protocol.hh',
|
||||
'common-ssh-store-config.hh',
|
||||
'content-address.hh',
|
||||
'daemon.hh',
|
||||
'derivations.hh',
|
||||
'derived-path-map.hh',
|
||||
'derived-path.hh',
|
||||
'downstream-placeholder.hh',
|
||||
'filetransfer.hh',
|
||||
'gc-store.hh',
|
||||
'globals.hh',
|
||||
'http-binary-cache-store.hh',
|
||||
'indirect-root-store.hh',
|
||||
'keys.hh',
|
||||
'legacy-ssh-store.hh',
|
||||
'length-prefixed-protocol-helper.hh',
|
||||
'local-binary-cache-store.hh',
|
||||
'local-fs-store.hh',
|
||||
'local-overlay-store.hh',
|
||||
'local-store.hh',
|
||||
'log-store.hh',
|
||||
'machines.hh',
|
||||
'make-content-addressed.hh',
|
||||
'names.hh',
|
||||
'nar-accessor.hh',
|
||||
'nar-info-disk-cache.hh',
|
||||
'nar-info.hh',
|
||||
'outputs-spec.hh',
|
||||
'parsed-derivations.hh',
|
||||
'path-info.hh',
|
||||
'path-references.hh',
|
||||
'path-regex.hh',
|
||||
'path-with-outputs.hh',
|
||||
'path.hh',
|
||||
'pathlocks.hh',
|
||||
'posix-fs-canonicalise.hh',
|
||||
'profiles.hh',
|
||||
'realisation.hh',
|
||||
'remote-fs-accessor.hh',
|
||||
'remote-store-connection.hh',
|
||||
'remote-store.hh',
|
||||
's3-binary-cache-store.hh',
|
||||
's3.hh',
|
||||
'ssh-store.hh',
|
||||
'serve-protocol-connection.hh',
|
||||
'serve-protocol-impl.hh',
|
||||
'serve-protocol.hh',
|
||||
'sqlite.hh',
|
||||
'ssh.hh',
|
||||
'store-api.hh',
|
||||
'store-cast.hh',
|
||||
'store-dir-config.hh',
|
||||
'store-reference.hh',
|
||||
'uds-remote-store.hh',
|
||||
'worker-protocol-connection.hh',
|
||||
'worker-protocol-impl.hh',
|
||||
'worker-protocol.hh',
|
||||
)
|
||||
|
||||
if host_machine.system() == 'linux'
|
||||
subdir('linux')
|
||||
endif
|
||||
|
||||
if host_machine.system() == 'windows'
|
||||
subdir('windows')
|
||||
else
|
||||
subdir('unix')
|
||||
endif
|
||||
|
||||
fs = import('fs')
|
||||
|
||||
prefix = get_option('prefix')
|
||||
# For each of these paths, assume that it is relative to the prefix unless
|
||||
# it is already an absolute path (which is the default for store-dir, localstatedir, and log-dir).
|
||||
path_opts = [
|
||||
# Meson built-ins.
|
||||
'datadir',
|
||||
'mandir',
|
||||
'libdir',
|
||||
'includedir',
|
||||
'libexecdir',
|
||||
# Homecooked Nix directories.
|
||||
'store-dir',
|
||||
'localstatedir',
|
||||
'log-dir',
|
||||
]
|
||||
# For your grepping pleasure, this loop sets the following variables that aren't mentioned
|
||||
# literally above:
|
||||
# store_dir
|
||||
# localstatedir
|
||||
# log_dir
|
||||
# profile_dir
|
||||
foreach optname : path_opts
|
||||
varname = optname.replace('-', '_')
|
||||
path = get_option(optname)
|
||||
if fs.is_absolute(path)
|
||||
set_variable(varname, path)
|
||||
else
|
||||
set_variable(varname, prefix / path)
|
||||
endif
|
||||
endforeach
|
||||
|
||||
# sysconfdir doesn't get anything installed to directly, and is only used to
|
||||
# tell Nix where to look for nix.conf, so it doesn't get appended to prefix.
|
||||
sysconfdir = get_option('sysconfdir')
|
||||
if not fs.is_absolute(sysconfdir)
|
||||
sysconfdir = '/' / sysconfdir
|
||||
endif
|
||||
|
||||
lsof = find_program('lsof', required : false)
|
||||
|
||||
# Aside from prefix itself, each of these was made into an absolute path
|
||||
# by joining it with prefix, unless it was already an absolute path
|
||||
# (which is the default for store-dir, localstatedir, and log-dir).
|
||||
cpp_str_defines = {
|
||||
'NIX_PREFIX': prefix,
|
||||
'NIX_STORE_DIR': store_dir,
|
||||
'NIX_DATA_DIR': datadir,
|
||||
'NIX_STATE_DIR': localstatedir / 'nix',
|
||||
'NIX_LOG_DIR': log_dir,
|
||||
'NIX_CONF_DIR': sysconfdir / 'nix',
|
||||
'NIX_MAN_DIR': mandir,
|
||||
}
|
||||
|
||||
if lsof.found()
|
||||
lsof_path = lsof.full_path()
|
||||
else
|
||||
# Just look up on the PATH
|
||||
lsof_path = 'lsof'
|
||||
endif
|
||||
cpp_str_defines += {
|
||||
'LSOF': lsof_path
|
||||
}
|
||||
|
||||
if get_option('embedded-sandbox-shell')
|
||||
cpp_str_defines += {
|
||||
'SANDBOX_SHELL': '__embedded_sandbox_shell__'
|
||||
}
|
||||
elif busybox.found()
|
||||
cpp_str_defines += {
|
||||
'SANDBOX_SHELL': busybox.full_path()
|
||||
}
|
||||
endif
|
||||
|
||||
cpp_args = []
|
||||
|
||||
foreach name, value : cpp_str_defines
|
||||
cpp_args += [
|
||||
'-D' + name + '=' + '"' + value + '"'
|
||||
]
|
||||
endforeach
|
||||
|
||||
subdir('build-utils-meson/export-all-symbols')
|
||||
|
||||
this_library = library(
|
||||
'nixstore',
|
||||
generated_headers,
|
||||
sources,
|
||||
dependencies : deps_public + deps_private + deps_other,
|
||||
include_directories : include_dirs,
|
||||
cpp_args : cpp_args,
|
||||
link_args: linker_export_flags,
|
||||
prelink : true, # For C++ static initializers
|
||||
install : true,
|
||||
)
|
||||
|
||||
install_headers(headers, subdir : 'nix', preserve_path : true)
|
||||
|
||||
libraries_private = []
|
||||
|
||||
extra_pkg_config_variables = {
|
||||
'storedir' : get_option('store-dir'),
|
||||
}
|
||||
|
||||
# Working around https://github.com/mesonbuild/meson/issues/13584
|
||||
if host_machine.system() != 'darwin'
|
||||
extra_pkg_config_variables += {
|
||||
'localstatedir' : get_option('localstatedir'),
|
||||
}
|
||||
endif
|
||||
|
||||
subdir('build-utils-meson/export')
|
||||
21
subprojects/libstore/meson.options
Normal file
21
subprojects/libstore/meson.options
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
# vim: filetype=meson
|
||||
|
||||
option('embedded-sandbox-shell', type : 'boolean', value : false,
|
||||
description : 'include the sandbox shell in the Nix binary',
|
||||
)
|
||||
|
||||
option('seccomp-sandboxing', type : 'feature',
|
||||
description : 'build support for seccomp sandboxing (recommended unless your arch doesn\'t support libseccomp, only relevant on Linux)',
|
||||
)
|
||||
|
||||
option('sandbox-shell', type : 'string', value : 'busybox',
|
||||
description : 'path to a statically-linked shell to use as /bin/sh in sandboxes (usually busybox)',
|
||||
)
|
||||
|
||||
option('store-dir', type : 'string', value : '/nix/store',
|
||||
description : 'path of the Nix store',
|
||||
)
|
||||
|
||||
option('log-dir', type : 'string', value : '/nix/var/log/nix',
|
||||
description : 'path to store logs in for Nix',
|
||||
)
|
||||
472
subprojects/libstore/misc.cc
Normal file
472
subprojects/libstore/misc.cc
Normal file
|
|
@ -0,0 +1,472 @@
|
|||
#include <unordered_set>
|
||||
|
||||
#include "derivations.hh"
|
||||
#include "parsed-derivations.hh"
|
||||
#include "globals.hh"
|
||||
#include "store-api.hh"
|
||||
#include "thread-pool.hh"
|
||||
#include "realisation.hh"
|
||||
#include "topo-sort.hh"
|
||||
#include "callback.hh"
|
||||
#include "closure.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "strings.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
void Store::computeFSClosure(const StorePathSet & startPaths,
|
||||
StorePathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
|
||||
{
|
||||
std::function<std::set<StorePath>(const StorePath & path, std::future<ref<const ValidPathInfo>> &)> queryDeps;
|
||||
if (flipDirection)
|
||||
queryDeps = [&](const StorePath& path,
|
||||
std::future<ref<const ValidPathInfo>> & fut) {
|
||||
StorePathSet res;
|
||||
StorePathSet referrers;
|
||||
queryReferrers(path, referrers);
|
||||
for (auto& ref : referrers)
|
||||
if (ref != path)
|
||||
res.insert(ref);
|
||||
|
||||
if (includeOutputs)
|
||||
for (auto& i : queryValidDerivers(path))
|
||||
res.insert(i);
|
||||
|
||||
if (includeDerivers && path.isDerivation())
|
||||
for (auto& [_, maybeOutPath] : queryPartialDerivationOutputMap(path))
|
||||
if (maybeOutPath && isValidPath(*maybeOutPath))
|
||||
res.insert(*maybeOutPath);
|
||||
return res;
|
||||
};
|
||||
else
|
||||
queryDeps = [&](const StorePath& path,
|
||||
std::future<ref<const ValidPathInfo>> & fut) {
|
||||
StorePathSet res;
|
||||
auto info = fut.get();
|
||||
for (auto& ref : info->references)
|
||||
if (ref != path)
|
||||
res.insert(ref);
|
||||
|
||||
if (includeOutputs && path.isDerivation())
|
||||
for (auto& [_, maybeOutPath] : queryPartialDerivationOutputMap(path))
|
||||
if (maybeOutPath && isValidPath(*maybeOutPath))
|
||||
res.insert(*maybeOutPath);
|
||||
|
||||
if (includeDerivers && info->deriver && isValidPath(*info->deriver))
|
||||
res.insert(*info->deriver);
|
||||
return res;
|
||||
};
|
||||
|
||||
computeClosure<StorePath>(
|
||||
startPaths, paths_,
|
||||
[&](const StorePath& path,
|
||||
std::function<void(std::promise<std::set<StorePath>>&)>
|
||||
processEdges) {
|
||||
std::promise<std::set<StorePath>> promise;
|
||||
std::function<void(std::future<ref<const ValidPathInfo>>)>
|
||||
getDependencies =
|
||||
[&](std::future<ref<const ValidPathInfo>> fut) {
|
||||
try {
|
||||
promise.set_value(queryDeps(path, fut));
|
||||
} catch (...) {
|
||||
promise.set_exception(std::current_exception());
|
||||
}
|
||||
};
|
||||
queryPathInfo(path, getDependencies);
|
||||
processEdges(promise);
|
||||
});
|
||||
}
|
||||
|
||||
void Store::computeFSClosure(const StorePath & startPath,
|
||||
StorePathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
|
||||
{
|
||||
StorePathSet paths;
|
||||
paths.insert(startPath);
|
||||
computeFSClosure(paths, paths_, flipDirection, includeOutputs, includeDerivers);
|
||||
}
|
||||
|
||||
|
||||
const ContentAddress * getDerivationCA(const BasicDerivation & drv)
|
||||
{
|
||||
auto out = drv.outputs.find("out");
|
||||
if (out == drv.outputs.end())
|
||||
return nullptr;
|
||||
if (auto dof = std::get_if<DerivationOutput::CAFixed>(&out->second.raw)) {
|
||||
return &dof->ca;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void Store::queryMissing(const std::vector<DerivedPath> & targets,
|
||||
StorePathSet & willBuild_, StorePathSet & willSubstitute_, StorePathSet & unknown_,
|
||||
uint64_t & downloadSize_, uint64_t & narSize_)
|
||||
{
|
||||
Activity act(*logger, lvlDebug, actUnknown, "querying info about missing paths");
|
||||
|
||||
downloadSize_ = narSize_ = 0;
|
||||
|
||||
// FIXME: make async.
|
||||
ThreadPool pool(fileTransferSettings.httpConnections);
|
||||
|
||||
struct State
|
||||
{
|
||||
std::unordered_set<std::string> done;
|
||||
StorePathSet & unknown, & willSubstitute, & willBuild;
|
||||
uint64_t & downloadSize;
|
||||
uint64_t & narSize;
|
||||
};
|
||||
|
||||
struct DrvState
|
||||
{
|
||||
size_t left;
|
||||
bool done = false;
|
||||
StorePathSet outPaths;
|
||||
DrvState(size_t left) : left(left) { }
|
||||
};
|
||||
|
||||
Sync<State> state_(State{{}, unknown_, willSubstitute_, willBuild_, downloadSize_, narSize_});
|
||||
|
||||
std::function<void(DerivedPath)> doPath;
|
||||
|
||||
std::function<void(ref<SingleDerivedPath>, const DerivedPathMap<StringSet>::ChildNode &)> enqueueDerivedPaths;
|
||||
|
||||
enqueueDerivedPaths = [&](ref<SingleDerivedPath> inputDrv, const DerivedPathMap<StringSet>::ChildNode & inputNode) {
|
||||
if (!inputNode.value.empty())
|
||||
pool.enqueue(std::bind(doPath, DerivedPath::Built { inputDrv, inputNode.value }));
|
||||
for (const auto & [outputName, childNode] : inputNode.childMap)
|
||||
enqueueDerivedPaths(
|
||||
make_ref<SingleDerivedPath>(SingleDerivedPath::Built { inputDrv, outputName }),
|
||||
childNode);
|
||||
};
|
||||
|
||||
auto mustBuildDrv = [&](const StorePath & drvPath, const Derivation & drv) {
|
||||
{
|
||||
auto state(state_.lock());
|
||||
state->willBuild.insert(drvPath);
|
||||
}
|
||||
|
||||
for (const auto & [inputDrv, inputNode] : drv.inputDrvs.map) {
|
||||
enqueueDerivedPaths(makeConstantStorePathRef(inputDrv), inputNode);
|
||||
}
|
||||
};
|
||||
|
||||
auto checkOutput = [&](
|
||||
const StorePath & drvPath, ref<Derivation> drv, const StorePath & outPath, ref<Sync<DrvState>> drvState_)
|
||||
{
|
||||
if (drvState_->lock()->done) return;
|
||||
|
||||
SubstitutablePathInfos infos;
|
||||
auto * cap = getDerivationCA(*drv);
|
||||
querySubstitutablePathInfos({
|
||||
{
|
||||
outPath,
|
||||
cap ? std::optional { *cap } : std::nullopt,
|
||||
},
|
||||
}, infos);
|
||||
|
||||
if (infos.empty()) {
|
||||
drvState_->lock()->done = true;
|
||||
mustBuildDrv(drvPath, *drv);
|
||||
} else {
|
||||
{
|
||||
auto drvState(drvState_->lock());
|
||||
if (drvState->done) return;
|
||||
assert(drvState->left);
|
||||
drvState->left--;
|
||||
drvState->outPaths.insert(outPath);
|
||||
if (!drvState->left) {
|
||||
for (auto & path : drvState->outPaths)
|
||||
pool.enqueue(std::bind(doPath, DerivedPath::Opaque { path } ));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
doPath = [&](const DerivedPath & req) {
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
if (!state->done.insert(req.to_string(*this)).second) return;
|
||||
}
|
||||
|
||||
std::visit(overloaded {
|
||||
[&](const DerivedPath::Built & bfd) {
|
||||
auto drvPathP = std::get_if<DerivedPath::Opaque>(&*bfd.drvPath);
|
||||
if (!drvPathP) {
|
||||
// TODO make work in this case.
|
||||
warn("Ignoring dynamic derivation %s while querying missing paths; not yet implemented", bfd.drvPath->to_string(*this));
|
||||
return;
|
||||
}
|
||||
auto & drvPath = drvPathP->path;
|
||||
|
||||
if (!isValidPath(drvPath)) {
|
||||
// FIXME: we could try to substitute the derivation.
|
||||
auto state(state_.lock());
|
||||
state->unknown.insert(drvPath);
|
||||
return;
|
||||
}
|
||||
|
||||
StorePathSet invalid;
|
||||
/* true for regular derivations, and CA derivations for which we
|
||||
have a trust mapping for all wanted outputs. */
|
||||
auto knownOutputPaths = true;
|
||||
for (auto & [outputName, pathOpt] : queryPartialDerivationOutputMap(drvPath)) {
|
||||
if (!pathOpt) {
|
||||
knownOutputPaths = false;
|
||||
break;
|
||||
}
|
||||
if (bfd.outputs.contains(outputName) && !isValidPath(*pathOpt))
|
||||
invalid.insert(*pathOpt);
|
||||
}
|
||||
if (knownOutputPaths && invalid.empty()) return;
|
||||
|
||||
auto drv = make_ref<Derivation>(derivationFromPath(drvPath));
|
||||
ParsedDerivation parsedDrv(StorePath(drvPath), *drv);
|
||||
|
||||
if (!knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
|
||||
experimentalFeatureSettings.require(Xp::CaDerivations);
|
||||
|
||||
// If there are unknown output paths, attempt to find if the
|
||||
// paths are known to substituters through a realisation.
|
||||
auto outputHashes = staticOutputHashes(*this, *drv);
|
||||
knownOutputPaths = true;
|
||||
|
||||
for (auto [outputName, hash] : outputHashes) {
|
||||
if (!bfd.outputs.contains(outputName))
|
||||
continue;
|
||||
|
||||
bool found = false;
|
||||
for (auto &sub : getDefaultSubstituters()) {
|
||||
auto realisation = sub->queryRealisation({hash, outputName});
|
||||
if (!realisation)
|
||||
continue;
|
||||
found = true;
|
||||
if (!isValidPath(realisation->outPath))
|
||||
invalid.insert(realisation->outPath);
|
||||
break;
|
||||
}
|
||||
if (!found) {
|
||||
// Some paths did not have a realisation, this must be built.
|
||||
knownOutputPaths = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
|
||||
auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
|
||||
for (auto & output : invalid)
|
||||
pool.enqueue(std::bind(checkOutput, drvPath, drv, output, drvState));
|
||||
} else
|
||||
mustBuildDrv(drvPath, *drv);
|
||||
|
||||
},
|
||||
[&](const DerivedPath::Opaque & bo) {
|
||||
|
||||
if (isValidPath(bo.path)) return;
|
||||
|
||||
SubstitutablePathInfos infos;
|
||||
querySubstitutablePathInfos({{bo.path, std::nullopt}}, infos);
|
||||
|
||||
if (infos.empty()) {
|
||||
auto state(state_.lock());
|
||||
state->unknown.insert(bo.path);
|
||||
return;
|
||||
}
|
||||
|
||||
auto info = infos.find(bo.path);
|
||||
assert(info != infos.end());
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
state->willSubstitute.insert(bo.path);
|
||||
state->downloadSize += info->second.downloadSize;
|
||||
state->narSize += info->second.narSize;
|
||||
}
|
||||
|
||||
for (auto & ref : info->second.references)
|
||||
pool.enqueue(std::bind(doPath, DerivedPath::Opaque { ref }));
|
||||
},
|
||||
}, req.raw());
|
||||
};
|
||||
|
||||
for (auto & path : targets)
|
||||
pool.enqueue(std::bind(doPath, path));
|
||||
|
||||
pool.process();
|
||||
}
|
||||
|
||||
|
||||
StorePaths Store::topoSortPaths(const StorePathSet & paths)
|
||||
{
|
||||
return topoSort(paths,
|
||||
{[&](const StorePath & path) {
|
||||
try {
|
||||
return queryPathInfo(path)->references;
|
||||
} catch (InvalidPath &) {
|
||||
return StorePathSet();
|
||||
}
|
||||
}},
|
||||
{[&](const StorePath & path, const StorePath & parent) {
|
||||
return BuildError(
|
||||
"cycle detected in the references of '%s' from '%s'",
|
||||
printStorePath(path),
|
||||
printStorePath(parent));
|
||||
}});
|
||||
}
|
||||
|
||||
std::map<DrvOutput, StorePath> drvOutputReferences(
|
||||
const std::set<Realisation> & inputRealisations,
|
||||
const StorePathSet & pathReferences)
|
||||
{
|
||||
std::map<DrvOutput, StorePath> res;
|
||||
|
||||
for (const auto & input : inputRealisations) {
|
||||
if (pathReferences.count(input.outPath)) {
|
||||
res.insert({input.id, input.outPath});
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
std::map<DrvOutput, StorePath> drvOutputReferences(
|
||||
Store & store,
|
||||
const Derivation & drv,
|
||||
const StorePath & outputPath,
|
||||
Store * evalStore_)
|
||||
{
|
||||
auto & evalStore = evalStore_ ? *evalStore_ : store;
|
||||
|
||||
std::set<Realisation> inputRealisations;
|
||||
|
||||
std::function<void(const StorePath &, const DerivedPathMap<StringSet>::ChildNode &)> accumRealisations;
|
||||
|
||||
accumRealisations = [&](const StorePath & inputDrv, const DerivedPathMap<StringSet>::ChildNode & inputNode) {
|
||||
if (!inputNode.value.empty()) {
|
||||
auto outputHashes =
|
||||
staticOutputHashes(evalStore, evalStore.readDerivation(inputDrv));
|
||||
for (const auto & outputName : inputNode.value) {
|
||||
auto outputHash = get(outputHashes, outputName);
|
||||
if (!outputHash)
|
||||
throw Error(
|
||||
"output '%s' of derivation '%s' isn't realised", outputName,
|
||||
store.printStorePath(inputDrv));
|
||||
auto thisRealisation = store.queryRealisation(
|
||||
DrvOutput{*outputHash, outputName});
|
||||
if (!thisRealisation)
|
||||
throw Error(
|
||||
"output '%s' of derivation '%s' isn’t built", outputName,
|
||||
store.printStorePath(inputDrv));
|
||||
inputRealisations.insert(*thisRealisation);
|
||||
}
|
||||
}
|
||||
if (!inputNode.value.empty()) {
|
||||
auto d = makeConstantStorePathRef(inputDrv);
|
||||
for (const auto & [outputName, childNode] : inputNode.childMap) {
|
||||
SingleDerivedPath next = SingleDerivedPath::Built { d, outputName };
|
||||
accumRealisations(
|
||||
// TODO deep resolutions for dynamic derivations, issue #8947, would go here.
|
||||
resolveDerivedPath(store, next, evalStore_),
|
||||
childNode);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
for (const auto & [inputDrv, inputNode] : drv.inputDrvs.map)
|
||||
accumRealisations(inputDrv, inputNode);
|
||||
|
||||
auto info = store.queryPathInfo(outputPath);
|
||||
|
||||
return drvOutputReferences(Realisation::closure(store, inputRealisations), info->references);
|
||||
}
|
||||
|
||||
OutputPathMap resolveDerivedPath(Store & store, const DerivedPath::Built & bfd, Store * evalStore_)
|
||||
{
|
||||
auto drvPath = resolveDerivedPath(store, *bfd.drvPath, evalStore_);
|
||||
|
||||
auto outputsOpt_ = store.queryPartialDerivationOutputMap(drvPath, evalStore_);
|
||||
|
||||
auto outputsOpt = std::visit(overloaded {
|
||||
[&](const OutputsSpec::All &) {
|
||||
// Keep all outputs
|
||||
return std::move(outputsOpt_);
|
||||
},
|
||||
[&](const OutputsSpec::Names & names) {
|
||||
// Get just those mentioned by name
|
||||
std::map<std::string, std::optional<StorePath>> outputsOpt;
|
||||
for (auto & output : names) {
|
||||
auto * pOutputPathOpt = get(outputsOpt_, output);
|
||||
if (!pOutputPathOpt)
|
||||
throw Error(
|
||||
"the derivation '%s' doesn't have an output named '%s'",
|
||||
bfd.drvPath->to_string(store), output);
|
||||
outputsOpt.insert_or_assign(output, std::move(*pOutputPathOpt));
|
||||
}
|
||||
return outputsOpt;
|
||||
},
|
||||
}, bfd.outputs.raw);
|
||||
|
||||
OutputPathMap outputs;
|
||||
for (auto & [outputName, outputPathOpt] : outputsOpt) {
|
||||
if (!outputPathOpt)
|
||||
throw MissingRealisation(bfd.drvPath->to_string(store), outputName);
|
||||
auto & outputPath = *outputPathOpt;
|
||||
outputs.insert_or_assign(outputName, outputPath);
|
||||
}
|
||||
return outputs;
|
||||
}
|
||||
|
||||
|
||||
StorePath resolveDerivedPath(Store & store, const SingleDerivedPath & req, Store * evalStore_)
|
||||
{
|
||||
auto & evalStore = evalStore_ ? *evalStore_ : store;
|
||||
|
||||
return std::visit(overloaded {
|
||||
[&](const SingleDerivedPath::Opaque & bo) {
|
||||
return bo.path;
|
||||
},
|
||||
[&](const SingleDerivedPath::Built & bfd) {
|
||||
auto drvPath = resolveDerivedPath(store, *bfd.drvPath, evalStore_);
|
||||
auto outputPaths = evalStore.queryPartialDerivationOutputMap(drvPath, evalStore_);
|
||||
if (outputPaths.count(bfd.output) == 0)
|
||||
throw Error("derivation '%s' does not have an output named '%s'",
|
||||
store.printStorePath(drvPath), bfd.output);
|
||||
auto & optPath = outputPaths.at(bfd.output);
|
||||
if (!optPath)
|
||||
throw MissingRealisation(bfd.drvPath->to_string(store), bfd.output);
|
||||
return *optPath;
|
||||
},
|
||||
}, req.raw());
|
||||
}
|
||||
|
||||
|
||||
OutputPathMap resolveDerivedPath(Store & store, const DerivedPath::Built & bfd)
|
||||
{
|
||||
auto drvPath = resolveDerivedPath(store, *bfd.drvPath);
|
||||
auto outputMap = store.queryDerivationOutputMap(drvPath);
|
||||
auto outputsLeft = std::visit(overloaded {
|
||||
[&](const OutputsSpec::All &) {
|
||||
return StringSet {};
|
||||
},
|
||||
[&](const OutputsSpec::Names & names) {
|
||||
return static_cast<StringSet>(names);
|
||||
},
|
||||
}, bfd.outputs.raw);
|
||||
for (auto iter = outputMap.begin(); iter != outputMap.end();) {
|
||||
auto & outputName = iter->first;
|
||||
if (bfd.outputs.contains(outputName)) {
|
||||
outputsLeft.erase(outputName);
|
||||
++iter;
|
||||
} else {
|
||||
iter = outputMap.erase(iter);
|
||||
}
|
||||
}
|
||||
if (!outputsLeft.empty())
|
||||
throw Error("derivation '%s' does not have an outputs %s",
|
||||
store.printStorePath(drvPath),
|
||||
concatStringsSep(", ", quoteStrings(std::get<OutputsSpec::Names>(bfd.outputs.raw))));
|
||||
return outputMap;
|
||||
}
|
||||
|
||||
}
|
||||
18
subprojects/libstore/mounted-ssh-store.md
Normal file
18
subprojects/libstore/mounted-ssh-store.md
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
R"(
|
||||
|
||||
**Store URL format**: `mounted-ssh-ng://[username@]hostname`
|
||||
|
||||
Experimental store type that allows full access to a Nix store on a remote machine,
|
||||
and additionally requires that store be mounted in the local file system.
|
||||
|
||||
The mounting of that store is not managed by Nix, and must by managed manually.
|
||||
It could be accomplished with SSHFS or NFS, for example.
|
||||
|
||||
The local file system is used to optimize certain operations.
|
||||
For example, rather than serializing Nix archives and sending over the Nix channel,
|
||||
we can directly access the file system data via the mount-point.
|
||||
|
||||
The local file system is also used to make certain operations possible that wouldn't otherwise be.
|
||||
For example, persistent GC roots can be created if they reside on the same file system as the remote store:
|
||||
the remote side will create the symlinks necessary to avoid race conditions.
|
||||
)"
|
||||
122
subprojects/libstore/names.cc
Normal file
122
subprojects/libstore/names.cc
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
#include "names.hh"
|
||||
#include "util.hh"
|
||||
|
||||
#include <regex>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
struct Regex
|
||||
{
|
||||
std::regex regex;
|
||||
};
|
||||
|
||||
|
||||
DrvName::DrvName()
|
||||
{
|
||||
name = "";
|
||||
}
|
||||
|
||||
|
||||
/* Parse a derivation name. The `name' part of a derivation name is
|
||||
everything up to but not including the first dash *not* followed by
|
||||
a letter. The `version' part is the rest (excluding the separating
|
||||
dash). E.g., `apache-httpd-2.0.48' is parsed to (`apache-httpd',
|
||||
'2.0.48'). */
|
||||
DrvName::DrvName(std::string_view s) : hits(0)
|
||||
{
|
||||
name = fullName = std::string(s);
|
||||
for (unsigned int i = 0; i < s.size(); ++i) {
|
||||
/* !!! isalpha/isdigit are affected by the locale. */
|
||||
if (s[i] == '-' && i + 1 < s.size() && !isalpha(s[i + 1])) {
|
||||
name = s.substr(0, i);
|
||||
version = s.substr(i + 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DrvName::~DrvName()
|
||||
{ }
|
||||
|
||||
|
||||
bool DrvName::matches(const DrvName & n)
|
||||
{
|
||||
if (name != "*") {
|
||||
if (!regex) {
|
||||
regex = std::make_unique<Regex>();
|
||||
regex->regex = std::regex(name, std::regex::extended);
|
||||
}
|
||||
if (!std::regex_match(n.name, regex->regex)) return false;
|
||||
}
|
||||
if (version != "" && version != n.version) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
std::string_view nextComponent(std::string_view::const_iterator & p,
|
||||
const std::string_view::const_iterator end)
|
||||
{
|
||||
/* Skip any dots and dashes (component separators). */
|
||||
while (p != end && (*p == '.' || *p == '-')) ++p;
|
||||
|
||||
if (p == end) return "";
|
||||
|
||||
/* If the first character is a digit, consume the longest sequence
|
||||
of digits. Otherwise, consume the longest sequence of
|
||||
non-digit, non-separator characters. */
|
||||
auto s = p;
|
||||
if (isdigit(*p))
|
||||
while (p != end && isdigit(*p)) p++;
|
||||
else
|
||||
while (p != end && (!isdigit(*p) && *p != '.' && *p != '-'))
|
||||
p++;
|
||||
|
||||
return {s, size_t(p - s)};
|
||||
}
|
||||
|
||||
|
||||
static bool componentsLT(const std::string_view c1, const std::string_view c2)
|
||||
{
|
||||
auto n1 = string2Int<int>(c1);
|
||||
auto n2 = string2Int<int>(c2);
|
||||
|
||||
if (n1 && n2) return *n1 < *n2;
|
||||
else if (c1 == "" && n2) return true;
|
||||
else if (c1 == "pre" && c2 != "pre") return true;
|
||||
else if (c2 == "pre") return false;
|
||||
/* Assume that `2.3a' < `2.3.1'. */
|
||||
else if (n2) return true;
|
||||
else if (n1) return false;
|
||||
else return c1 < c2;
|
||||
}
|
||||
|
||||
|
||||
std::strong_ordering compareVersions(const std::string_view v1, const std::string_view v2)
|
||||
{
|
||||
auto p1 = v1.begin();
|
||||
auto p2 = v2.begin();
|
||||
|
||||
while (p1 != v1.end() || p2 != v2.end()) {
|
||||
auto c1 = nextComponent(p1, v1.end());
|
||||
auto c2 = nextComponent(p2, v2.end());
|
||||
if (componentsLT(c1, c2)) return std::strong_ordering::less;
|
||||
else if (componentsLT(c2, c1)) return std::strong_ordering::greater;
|
||||
}
|
||||
|
||||
return std::strong_ordering::equal;
|
||||
}
|
||||
|
||||
|
||||
DrvNames drvNamesFromArgs(const Strings & opArgs)
|
||||
{
|
||||
DrvNames result;
|
||||
for (auto & i : opArgs)
|
||||
result.emplace_back(i);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
36
subprojects/libstore/names.hh
Normal file
36
subprojects/libstore/names.hh
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "types.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct Regex;
|
||||
|
||||
struct DrvName
|
||||
{
|
||||
std::string fullName;
|
||||
std::string name;
|
||||
std::string version;
|
||||
unsigned int hits;
|
||||
|
||||
DrvName();
|
||||
DrvName(std::string_view s);
|
||||
~DrvName();
|
||||
|
||||
bool matches(const DrvName & n);
|
||||
|
||||
private:
|
||||
std::unique_ptr<Regex> regex;
|
||||
};
|
||||
|
||||
typedef std::list<DrvName> DrvNames;
|
||||
|
||||
std::string_view nextComponent(std::string_view::const_iterator & p,
|
||||
const std::string_view::const_iterator end);
|
||||
std::strong_ordering compareVersions(const std::string_view v1, const std::string_view v2);
|
||||
DrvNames drvNamesFromArgs(const Strings & opArgs);
|
||||
|
||||
}
|
||||
300
subprojects/libstore/nar-accessor.cc
Normal file
300
subprojects/libstore/nar-accessor.cc
Normal file
|
|
@ -0,0 +1,300 @@
|
|||
#include "nar-accessor.hh"
|
||||
#include "archive.hh"
|
||||
|
||||
#include <map>
|
||||
#include <stack>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct NarMember
|
||||
{
|
||||
SourceAccessor::Stat stat;
|
||||
|
||||
std::string target;
|
||||
|
||||
/* If this is a directory, all the children of the directory. */
|
||||
std::map<std::string, NarMember> children;
|
||||
};
|
||||
|
||||
struct NarMemberConstructor : CreateRegularFileSink
|
||||
{
|
||||
private:
|
||||
|
||||
NarMember & narMember;
|
||||
|
||||
uint64_t & pos;
|
||||
|
||||
public:
|
||||
|
||||
NarMemberConstructor(NarMember & nm, uint64_t & pos)
|
||||
: narMember(nm), pos(pos)
|
||||
{ }
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
narMember.stat.isExecutable = true;
|
||||
}
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
{
|
||||
narMember.stat.fileSize = size;
|
||||
narMember.stat.narOffset = pos;
|
||||
}
|
||||
|
||||
void operator () (std::string_view data) override
|
||||
{ }
|
||||
};
|
||||
|
||||
struct NarAccessor : public SourceAccessor
|
||||
{
|
||||
std::optional<const std::string> nar;
|
||||
|
||||
GetNarBytes getNarBytes;
|
||||
|
||||
NarMember root;
|
||||
|
||||
struct NarIndexer : FileSystemObjectSink, Source
|
||||
{
|
||||
NarAccessor & acc;
|
||||
Source & source;
|
||||
|
||||
std::stack<NarMember *> parents;
|
||||
|
||||
bool isExec = false;
|
||||
|
||||
uint64_t pos = 0;
|
||||
|
||||
NarIndexer(NarAccessor & acc, Source & source)
|
||||
: acc(acc), source(source)
|
||||
{ }
|
||||
|
||||
NarMember & createMember(const CanonPath & path, NarMember member)
|
||||
{
|
||||
size_t level = 0;
|
||||
for (auto _ : path) {
|
||||
(void)_;
|
||||
++level;
|
||||
}
|
||||
|
||||
while (parents.size() > level) parents.pop();
|
||||
|
||||
if (parents.empty()) {
|
||||
acc.root = std::move(member);
|
||||
parents.push(&acc.root);
|
||||
return acc.root;
|
||||
} else {
|
||||
if (parents.top()->stat.type != Type::tDirectory)
|
||||
throw Error("NAR file missing parent directory of path '%s'", path);
|
||||
auto result = parents.top()->children.emplace(*path.baseName(), std::move(member));
|
||||
auto & ref = result.first->second;
|
||||
parents.push(&ref);
|
||||
return ref;
|
||||
}
|
||||
}
|
||||
|
||||
void createDirectory(const CanonPath & path) override
|
||||
{
|
||||
createMember(path, NarMember{ .stat = {
|
||||
.type = Type::tDirectory,
|
||||
.fileSize = 0,
|
||||
.isExecutable = false,
|
||||
.narOffset = 0
|
||||
} });
|
||||
}
|
||||
|
||||
void createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func) override
|
||||
{
|
||||
auto & nm = createMember(path, NarMember{ .stat = {
|
||||
.type = Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.isExecutable = false,
|
||||
.narOffset = 0
|
||||
} });
|
||||
NarMemberConstructor nmc { nm, pos };
|
||||
func(nmc);
|
||||
}
|
||||
|
||||
void createSymlink(const CanonPath & path, const std::string & target) override
|
||||
{
|
||||
createMember(path,
|
||||
NarMember{
|
||||
.stat = {.type = Type::tSymlink},
|
||||
.target = target});
|
||||
}
|
||||
|
||||
size_t read(char * data, size_t len) override
|
||||
{
|
||||
auto n = source.read(data, len);
|
||||
pos += n;
|
||||
return n;
|
||||
}
|
||||
};
|
||||
|
||||
NarAccessor(std::string && _nar) : nar(_nar)
|
||||
{
|
||||
StringSource source(*nar);
|
||||
NarIndexer indexer(*this, source);
|
||||
parseDump(indexer, indexer);
|
||||
}
|
||||
|
||||
NarAccessor(Source & source)
|
||||
{
|
||||
NarIndexer indexer(*this, source);
|
||||
parseDump(indexer, indexer);
|
||||
}
|
||||
|
||||
NarAccessor(const std::string & listing, GetNarBytes getNarBytes)
|
||||
: getNarBytes(getNarBytes)
|
||||
{
|
||||
using json = nlohmann::json;
|
||||
|
||||
std::function<void(NarMember &, json &)> recurse;
|
||||
|
||||
recurse = [&](NarMember & member, json & v) {
|
||||
std::string type = v["type"];
|
||||
|
||||
if (type == "directory") {
|
||||
member.stat = {.type = Type::tDirectory};
|
||||
for (const auto &[name, function] : v["entries"].items()) {
|
||||
recurse(member.children[name], function);
|
||||
}
|
||||
} else if (type == "regular") {
|
||||
member.stat = {
|
||||
.type = Type::tRegular,
|
||||
.fileSize = v["size"],
|
||||
.isExecutable = v.value("executable", false),
|
||||
.narOffset = v["narOffset"]
|
||||
};
|
||||
} else if (type == "symlink") {
|
||||
member.stat = {.type = Type::tSymlink};
|
||||
member.target = v.value("target", "");
|
||||
} else return;
|
||||
};
|
||||
|
||||
json v = json::parse(listing);
|
||||
recurse(root, v);
|
||||
}
|
||||
|
||||
NarMember * find(const CanonPath & path)
|
||||
{
|
||||
NarMember * current = &root;
|
||||
|
||||
for (const auto & i : path) {
|
||||
if (current->stat.type != Type::tDirectory) return nullptr;
|
||||
auto child = current->children.find(std::string(i));
|
||||
if (child == current->children.end()) return nullptr;
|
||||
current = &child->second;
|
||||
}
|
||||
|
||||
return current;
|
||||
}
|
||||
|
||||
NarMember & get(const CanonPath & path) {
|
||||
auto result = find(path);
|
||||
if (!result)
|
||||
throw Error("NAR file does not contain path '%1%'", path);
|
||||
return *result;
|
||||
}
|
||||
|
||||
std::optional<Stat> maybeLstat(const CanonPath & path) override
|
||||
{
|
||||
auto i = find(path);
|
||||
if (!i)
|
||||
return std::nullopt;
|
||||
return i->stat;
|
||||
}
|
||||
|
||||
DirEntries readDirectory(const CanonPath & path) override
|
||||
{
|
||||
auto i = get(path);
|
||||
|
||||
if (i.stat.type != Type::tDirectory)
|
||||
throw Error("path '%1%' inside NAR file is not a directory", path);
|
||||
|
||||
DirEntries res;
|
||||
for (const auto & child : i.children)
|
||||
res.insert_or_assign(child.first, std::nullopt);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string readFile(const CanonPath & path) override
|
||||
{
|
||||
auto i = get(path);
|
||||
if (i.stat.type != Type::tRegular)
|
||||
throw Error("path '%1%' inside NAR file is not a regular file", path);
|
||||
|
||||
if (getNarBytes) return getNarBytes(*i.stat.narOffset, *i.stat.fileSize);
|
||||
|
||||
assert(nar);
|
||||
return std::string(*nar, *i.stat.narOffset, *i.stat.fileSize);
|
||||
}
|
||||
|
||||
std::string readLink(const CanonPath & path) override
|
||||
{
|
||||
auto i = get(path);
|
||||
if (i.stat.type != Type::tSymlink)
|
||||
throw Error("path '%1%' inside NAR file is not a symlink", path);
|
||||
return i.target;
|
||||
}
|
||||
};
|
||||
|
||||
ref<SourceAccessor> makeNarAccessor(std::string && nar)
|
||||
{
|
||||
return make_ref<NarAccessor>(std::move(nar));
|
||||
}
|
||||
|
||||
ref<SourceAccessor> makeNarAccessor(Source & source)
|
||||
{
|
||||
return make_ref<NarAccessor>(source);
|
||||
}
|
||||
|
||||
ref<SourceAccessor> makeLazyNarAccessor(const std::string & listing,
|
||||
GetNarBytes getNarBytes)
|
||||
{
|
||||
return make_ref<NarAccessor>(listing, getNarBytes);
|
||||
}
|
||||
|
||||
using nlohmann::json;
|
||||
json listNar(ref<SourceAccessor> accessor, const CanonPath & path, bool recurse)
|
||||
{
|
||||
auto st = accessor->lstat(path);
|
||||
|
||||
json obj = json::object();
|
||||
|
||||
switch (st.type) {
|
||||
case SourceAccessor::Type::tRegular:
|
||||
obj["type"] = "regular";
|
||||
if (st.fileSize)
|
||||
obj["size"] = *st.fileSize;
|
||||
if (st.isExecutable)
|
||||
obj["executable"] = true;
|
||||
if (st.narOffset && *st.narOffset)
|
||||
obj["narOffset"] = *st.narOffset;
|
||||
break;
|
||||
case SourceAccessor::Type::tDirectory:
|
||||
obj["type"] = "directory";
|
||||
{
|
||||
obj["entries"] = json::object();
|
||||
json &res2 = obj["entries"];
|
||||
for (const auto & [name, type] : accessor->readDirectory(path)) {
|
||||
if (recurse) {
|
||||
res2[name] = listNar(accessor, path / name, true);
|
||||
} else
|
||||
res2[name] = json::object();
|
||||
}
|
||||
}
|
||||
break;
|
||||
case SourceAccessor::Type::tSymlink:
|
||||
obj["type"] = "symlink";
|
||||
obj["target"] = accessor->readLink(path);
|
||||
break;
|
||||
case SourceAccessor::Type::tMisc:
|
||||
assert(false); // cannot happen for NARs
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
}
|
||||
40
subprojects/libstore/nar-accessor.hh
Normal file
40
subprojects/libstore/nar-accessor.hh
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "source-accessor.hh"
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include <nlohmann/json_fwd.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct Source;
|
||||
|
||||
/**
|
||||
* Return an object that provides access to the contents of a NAR
|
||||
* file.
|
||||
*/
|
||||
ref<SourceAccessor> makeNarAccessor(std::string && nar);
|
||||
|
||||
ref<SourceAccessor> makeNarAccessor(Source & source);
|
||||
|
||||
/**
|
||||
* Create a NAR accessor from a NAR listing (in the format produced by
|
||||
* listNar()). The callback getNarBytes(offset, length) is used by the
|
||||
* readFile() method of the accessor to get the contents of files
|
||||
* inside the NAR.
|
||||
*/
|
||||
using GetNarBytes = std::function<std::string(uint64_t, uint64_t)>;
|
||||
|
||||
ref<SourceAccessor> makeLazyNarAccessor(
|
||||
const std::string & listing,
|
||||
GetNarBytes getNarBytes);
|
||||
|
||||
/**
|
||||
* Write a JSON representation of the contents of a NAR (except file
|
||||
* contents).
|
||||
*/
|
||||
nlohmann::json listNar(ref<SourceAccessor> accessor, const CanonPath & path, bool recurse);
|
||||
|
||||
}
|
||||
402
subprojects/libstore/nar-info-disk-cache.cc
Normal file
402
subprojects/libstore/nar-info-disk-cache.cc
Normal file
|
|
@ -0,0 +1,402 @@
|
|||
#include "nar-info-disk-cache.hh"
|
||||
#include "users.hh"
|
||||
#include "sync.hh"
|
||||
#include "sqlite.hh"
|
||||
#include "globals.hh"
|
||||
|
||||
#include <sqlite3.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include "strings.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
static const char * schema = R"sql(
|
||||
|
||||
create table if not exists BinaryCaches (
|
||||
id integer primary key autoincrement not null,
|
||||
url text unique not null,
|
||||
timestamp integer not null,
|
||||
storeDir text not null,
|
||||
wantMassQuery integer not null,
|
||||
priority integer not null
|
||||
);
|
||||
|
||||
create table if not exists NARs (
|
||||
cache integer not null,
|
||||
hashPart text not null,
|
||||
namePart text,
|
||||
url text,
|
||||
compression text,
|
||||
fileHash text,
|
||||
fileSize integer,
|
||||
narHash text,
|
||||
narSize integer,
|
||||
refs text,
|
||||
deriver text,
|
||||
sigs text,
|
||||
ca text,
|
||||
timestamp integer not null,
|
||||
present integer not null,
|
||||
primary key (cache, hashPart),
|
||||
foreign key (cache) references BinaryCaches(id) on delete cascade
|
||||
);
|
||||
|
||||
create table if not exists Realisations (
|
||||
cache integer not null,
|
||||
outputId text not null,
|
||||
content blob, -- Json serialisation of the realisation, or null if the realisation is absent
|
||||
timestamp integer not null,
|
||||
primary key (cache, outputId),
|
||||
foreign key (cache) references BinaryCaches(id) on delete cascade
|
||||
);
|
||||
|
||||
create table if not exists LastPurge (
|
||||
dummy text primary key,
|
||||
value integer
|
||||
);
|
||||
|
||||
)sql";
|
||||
|
||||
class NarInfoDiskCacheImpl : public NarInfoDiskCache
|
||||
{
|
||||
public:
|
||||
|
||||
/* How often to purge expired entries from the cache. */
|
||||
const int purgeInterval = 24 * 3600;
|
||||
|
||||
/* How long to cache binary cache info (i.e. /nix-cache-info) */
|
||||
const int cacheInfoTtl = 7 * 24 * 3600;
|
||||
|
||||
struct Cache
|
||||
{
|
||||
int id;
|
||||
Path storeDir;
|
||||
bool wantMassQuery;
|
||||
int priority;
|
||||
};
|
||||
|
||||
struct State
|
||||
{
|
||||
SQLite db;
|
||||
SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR,
|
||||
queryNAR, insertRealisation, insertMissingRealisation,
|
||||
queryRealisation, purgeCache;
|
||||
std::map<std::string, Cache> caches;
|
||||
};
|
||||
|
||||
Sync<State> _state;
|
||||
|
||||
NarInfoDiskCacheImpl(Path dbPath = getCacheDir() + "/binary-cache-v6.sqlite")
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
createDirs(dirOf(dbPath));
|
||||
|
||||
state->db = SQLite(dbPath);
|
||||
|
||||
state->db.isCache();
|
||||
|
||||
state->db.exec(schema);
|
||||
|
||||
state->insertCache.create(state->db,
|
||||
"insert into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?1, ?2, ?3, ?4, ?5) on conflict (url) do update set timestamp = ?2, storeDir = ?3, wantMassQuery = ?4, priority = ?5 returning id;");
|
||||
|
||||
state->queryCache.create(state->db,
|
||||
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ? and timestamp > ?");
|
||||
|
||||
state->insertNAR.create(state->db,
|
||||
"insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, "
|
||||
"narSize, refs, deriver, sigs, ca, timestamp, present) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)");
|
||||
|
||||
state->insertMissingNAR.create(state->db,
|
||||
"insert or replace into NARs(cache, hashPart, timestamp, present) values (?, ?, ?, 0)");
|
||||
|
||||
state->queryNAR.create(state->db,
|
||||
"select present, namePart, url, compression, fileHash, fileSize, narHash, narSize, refs, deriver, sigs, ca from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))");
|
||||
|
||||
state->insertRealisation.create(state->db,
|
||||
R"(
|
||||
insert or replace into Realisations(cache, outputId, content, timestamp)
|
||||
values (?, ?, ?, ?)
|
||||
)");
|
||||
|
||||
state->insertMissingRealisation.create(state->db,
|
||||
R"(
|
||||
insert or replace into Realisations(cache, outputId, timestamp)
|
||||
values (?, ?, ?)
|
||||
)");
|
||||
|
||||
state->queryRealisation.create(state->db,
|
||||
R"(
|
||||
select content from Realisations
|
||||
where cache = ? and outputId = ? and
|
||||
((content is null and timestamp > ?) or
|
||||
(content is not null and timestamp > ?))
|
||||
)");
|
||||
|
||||
/* Periodically purge expired entries from the database. */
|
||||
retrySQLite<void>([&]() {
|
||||
auto now = time(0);
|
||||
|
||||
SQLiteStmt queryLastPurge(state->db, "select value from LastPurge");
|
||||
auto queryLastPurge_(queryLastPurge.use());
|
||||
|
||||
if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) {
|
||||
SQLiteStmt(state->db,
|
||||
"delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))")
|
||||
.use()
|
||||
// Use a minimum TTL to prevent --refresh from
|
||||
// nuking the entire disk cache.
|
||||
(now - std::max(settings.ttlNegativeNarInfoCache.get(), 3600U))
|
||||
(now - std::max(settings.ttlPositiveNarInfoCache.get(), 30 * 24 * 3600U))
|
||||
.exec();
|
||||
|
||||
debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db));
|
||||
|
||||
SQLiteStmt(state->db,
|
||||
"insert or replace into LastPurge(dummy, value) values ('', ?)")
|
||||
.use()(now).exec();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Cache & getCache(State & state, const std::string & uri)
|
||||
{
|
||||
auto i = state.caches.find(uri);
|
||||
if (i == state.caches.end()) unreachable();
|
||||
return i->second;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
std::optional<Cache> queryCacheRaw(State & state, const std::string & uri)
|
||||
{
|
||||
auto i = state.caches.find(uri);
|
||||
if (i == state.caches.end()) {
|
||||
auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl));
|
||||
if (!queryCache.next())
|
||||
return std::nullopt;
|
||||
auto cache = Cache {
|
||||
.id = (int) queryCache.getInt(0),
|
||||
.storeDir = queryCache.getStr(1),
|
||||
.wantMassQuery = queryCache.getInt(2) != 0,
|
||||
.priority = (int) queryCache.getInt(3),
|
||||
};
|
||||
state.caches.emplace(uri, cache);
|
||||
}
|
||||
return getCache(state, uri);
|
||||
}
|
||||
|
||||
public:
|
||||
int createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
|
||||
{
|
||||
return retrySQLite<int>([&]() {
|
||||
auto state(_state.lock());
|
||||
SQLiteTxn txn(state->db);
|
||||
|
||||
// To avoid the race, we have to check if maybe someone hasn't yet created
|
||||
// the cache for this URI in the meantime.
|
||||
auto cache(queryCacheRaw(*state, uri));
|
||||
|
||||
if (cache)
|
||||
return cache->id;
|
||||
|
||||
Cache ret {
|
||||
.id = -1, // set below
|
||||
.storeDir = storeDir,
|
||||
.wantMassQuery = wantMassQuery,
|
||||
.priority = priority,
|
||||
};
|
||||
|
||||
{
|
||||
auto r(state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority));
|
||||
if (!r.next()) { unreachable(); }
|
||||
ret.id = (int) r.getInt(0);
|
||||
}
|
||||
|
||||
state->caches[uri] = ret;
|
||||
|
||||
txn.commit();
|
||||
return ret.id;
|
||||
});
|
||||
}
|
||||
|
||||
std::optional<CacheInfo> upToDateCacheExists(const std::string & uri) override
|
||||
{
|
||||
return retrySQLite<std::optional<CacheInfo>>([&]() -> std::optional<CacheInfo> {
|
||||
auto state(_state.lock());
|
||||
auto cache(queryCacheRaw(*state, uri));
|
||||
if (!cache)
|
||||
return std::nullopt;
|
||||
return CacheInfo {
|
||||
.id = cache->id,
|
||||
.wantMassQuery = cache->wantMassQuery,
|
||||
.priority = cache->priority
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
|
||||
const std::string & uri, const std::string & hashPart) override
|
||||
{
|
||||
return retrySQLite<std::pair<Outcome, std::shared_ptr<NarInfo>>>(
|
||||
[&]() -> std::pair<Outcome, std::shared_ptr<NarInfo>> {
|
||||
auto state(_state.lock());
|
||||
|
||||
auto & cache(getCache(*state, uri));
|
||||
|
||||
auto now = time(0);
|
||||
|
||||
auto queryNAR(state->queryNAR.use()
|
||||
(cache.id)
|
||||
(hashPart)
|
||||
(now - settings.ttlNegativeNarInfoCache)
|
||||
(now - settings.ttlPositiveNarInfoCache));
|
||||
|
||||
if (!queryNAR.next())
|
||||
return {oUnknown, 0};
|
||||
|
||||
if (!queryNAR.getInt(0))
|
||||
return {oInvalid, 0};
|
||||
|
||||
auto namePart = queryNAR.getStr(1);
|
||||
auto narInfo = make_ref<NarInfo>(
|
||||
StorePath(hashPart + "-" + namePart),
|
||||
Hash::parseAnyPrefixed(queryNAR.getStr(6)));
|
||||
narInfo->url = queryNAR.getStr(2);
|
||||
narInfo->compression = queryNAR.getStr(3);
|
||||
if (!queryNAR.isNull(4))
|
||||
narInfo->fileHash = Hash::parseAnyPrefixed(queryNAR.getStr(4));
|
||||
narInfo->fileSize = queryNAR.getInt(5);
|
||||
narInfo->narSize = queryNAR.getInt(7);
|
||||
for (auto & r : tokenizeString<Strings>(queryNAR.getStr(8), " "))
|
||||
narInfo->references.insert(StorePath(r));
|
||||
if (!queryNAR.isNull(9))
|
||||
narInfo->deriver = StorePath(queryNAR.getStr(9));
|
||||
for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(10), " "))
|
||||
narInfo->sigs.insert(sig);
|
||||
narInfo->ca = ContentAddress::parseOpt(queryNAR.getStr(11));
|
||||
|
||||
return {oValid, narInfo};
|
||||
});
|
||||
}
|
||||
|
||||
std::pair<Outcome, std::shared_ptr<Realisation>> lookupRealisation(
|
||||
const std::string & uri, const DrvOutput & id) override
|
||||
{
|
||||
return retrySQLite<std::pair<Outcome, std::shared_ptr<Realisation>>>(
|
||||
[&]() -> std::pair<Outcome, std::shared_ptr<Realisation>> {
|
||||
auto state(_state.lock());
|
||||
|
||||
auto & cache(getCache(*state, uri));
|
||||
|
||||
auto now = time(0);
|
||||
|
||||
auto queryRealisation(state->queryRealisation.use()
|
||||
(cache.id)
|
||||
(id.to_string())
|
||||
(now - settings.ttlNegativeNarInfoCache)
|
||||
(now - settings.ttlPositiveNarInfoCache));
|
||||
|
||||
if (!queryRealisation.next())
|
||||
return {oUnknown, 0};
|
||||
|
||||
if (queryRealisation.isNull(0))
|
||||
return {oInvalid, 0};
|
||||
|
||||
auto realisation =
|
||||
std::make_shared<Realisation>(Realisation::fromJSON(
|
||||
nlohmann::json::parse(queryRealisation.getStr(0)),
|
||||
"Local disk cache"));
|
||||
|
||||
return {oValid, realisation};
|
||||
});
|
||||
}
|
||||
|
||||
void upsertNarInfo(
|
||||
const std::string & uri, const std::string & hashPart,
|
||||
std::shared_ptr<const ValidPathInfo> info) override
|
||||
{
|
||||
retrySQLite<void>([&]() {
|
||||
auto state(_state.lock());
|
||||
|
||||
auto & cache(getCache(*state, uri));
|
||||
|
||||
if (info) {
|
||||
|
||||
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(info);
|
||||
|
||||
//assert(hashPart == storePathToHash(info->path));
|
||||
|
||||
state->insertNAR.use()
|
||||
(cache.id)
|
||||
(hashPart)
|
||||
(std::string(info->path.name()))
|
||||
(narInfo ? narInfo->url : "", narInfo != 0)
|
||||
(narInfo ? narInfo->compression : "", narInfo != 0)
|
||||
(narInfo && narInfo->fileHash ? narInfo->fileHash->to_string(HashFormat::Nix32, true) : "", narInfo && narInfo->fileHash)
|
||||
(narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)
|
||||
(info->narHash.to_string(HashFormat::Nix32, true))
|
||||
(info->narSize)
|
||||
(concatStringsSep(" ", info->shortRefs()))
|
||||
(info->deriver ? std::string(info->deriver->to_string()) : "", (bool) info->deriver)
|
||||
(concatStringsSep(" ", info->sigs))
|
||||
(renderContentAddress(info->ca))
|
||||
(time(0)).exec();
|
||||
|
||||
} else {
|
||||
state->insertMissingNAR.use()
|
||||
(cache.id)
|
||||
(hashPart)
|
||||
(time(0)).exec();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void upsertRealisation(
|
||||
const std::string & uri,
|
||||
const Realisation & realisation) override
|
||||
{
|
||||
retrySQLite<void>([&]() {
|
||||
auto state(_state.lock());
|
||||
|
||||
auto & cache(getCache(*state, uri));
|
||||
|
||||
state->insertRealisation.use()
|
||||
(cache.id)
|
||||
(realisation.id.to_string())
|
||||
(realisation.toJSON().dump())
|
||||
(time(0)).exec();
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
virtual void upsertAbsentRealisation(
|
||||
const std::string & uri,
|
||||
const DrvOutput & id) override
|
||||
{
|
||||
retrySQLite<void>([&]() {
|
||||
auto state(_state.lock());
|
||||
|
||||
auto & cache(getCache(*state, uri));
|
||||
state->insertMissingRealisation.use()
|
||||
(cache.id)
|
||||
(id.to_string())
|
||||
(time(0)).exec();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
ref<NarInfoDiskCache> getNarInfoDiskCache()
|
||||
{
|
||||
static ref<NarInfoDiskCache> cache = make_ref<NarInfoDiskCacheImpl>();
|
||||
return cache;
|
||||
}
|
||||
|
||||
ref<NarInfoDiskCache> getTestNarInfoDiskCache(Path dbPath)
|
||||
{
|
||||
return make_ref<NarInfoDiskCacheImpl>(dbPath);
|
||||
}
|
||||
|
||||
}
|
||||
54
subprojects/libstore/nar-info-disk-cache.hh
Normal file
54
subprojects/libstore/nar-info-disk-cache.hh
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "ref.hh"
|
||||
#include "nar-info.hh"
|
||||
#include "realisation.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
class NarInfoDiskCache
|
||||
{
|
||||
public:
|
||||
typedef enum { oValid, oInvalid, oUnknown } Outcome;
|
||||
|
||||
virtual ~NarInfoDiskCache() { }
|
||||
|
||||
virtual int createCache(const std::string & uri, const Path & storeDir,
|
||||
bool wantMassQuery, int priority) = 0;
|
||||
|
||||
struct CacheInfo
|
||||
{
|
||||
int id;
|
||||
bool wantMassQuery;
|
||||
int priority;
|
||||
};
|
||||
|
||||
virtual std::optional<CacheInfo> upToDateCacheExists(const std::string & uri) = 0;
|
||||
|
||||
virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
|
||||
const std::string & uri, const std::string & hashPart) = 0;
|
||||
|
||||
virtual void upsertNarInfo(
|
||||
const std::string & uri, const std::string & hashPart,
|
||||
std::shared_ptr<const ValidPathInfo> info) = 0;
|
||||
|
||||
virtual void upsertRealisation(
|
||||
const std::string & uri,
|
||||
const Realisation & realisation) = 0;
|
||||
virtual void upsertAbsentRealisation(
|
||||
const std::string & uri,
|
||||
const DrvOutput & id) = 0;
|
||||
virtual std::pair<Outcome, std::shared_ptr<Realisation>> lookupRealisation(
|
||||
const std::string & uri, const DrvOutput & id) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Return a singleton cache object that can be used concurrently by
|
||||
* multiple threads.
|
||||
*/
|
||||
ref<NarInfoDiskCache> getNarInfoDiskCache();
|
||||
|
||||
ref<NarInfoDiskCache> getTestNarInfoDiskCache(Path dbPath);
|
||||
|
||||
}
|
||||
184
subprojects/libstore/nar-info.cc
Normal file
184
subprojects/libstore/nar-info.cc
Normal file
|
|
@ -0,0 +1,184 @@
|
|||
#include "globals.hh"
|
||||
#include "nar-info.hh"
|
||||
#include "store-api.hh"
|
||||
#include "strings.hh"
|
||||
#include "json-utils.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & whence)
|
||||
: ValidPathInfo(StorePath(StorePath::dummy), Hash(Hash::dummy)) // FIXME: hack
|
||||
{
|
||||
unsigned line = 1;
|
||||
|
||||
auto corrupt = [&](const char * reason) {
|
||||
return Error("NAR info file '%1%' is corrupt: %2%", whence,
|
||||
std::string(reason) + (line > 0 ? " at line " + std::to_string(line) : ""));
|
||||
};
|
||||
|
||||
auto parseHashField = [&](const std::string & s) {
|
||||
try {
|
||||
return Hash::parseAnyPrefixed(s);
|
||||
} catch (BadHash &) {
|
||||
throw corrupt("bad hash");
|
||||
}
|
||||
};
|
||||
|
||||
bool havePath = false;
|
||||
bool haveNarHash = false;
|
||||
|
||||
size_t pos = 0;
|
||||
while (pos < s.size()) {
|
||||
|
||||
size_t colon = s.find(':', pos);
|
||||
if (colon == s.npos) throw corrupt("expecting ':'");
|
||||
|
||||
std::string name(s, pos, colon - pos);
|
||||
|
||||
size_t eol = s.find('\n', colon + 2);
|
||||
if (eol == s.npos) throw corrupt("expecting '\\n'");
|
||||
|
||||
std::string value(s, colon + 2, eol - colon - 2);
|
||||
|
||||
if (name == "StorePath") {
|
||||
path = store.parseStorePath(value);
|
||||
havePath = true;
|
||||
}
|
||||
else if (name == "URL")
|
||||
url = value;
|
||||
else if (name == "Compression")
|
||||
compression = value;
|
||||
else if (name == "FileHash")
|
||||
fileHash = parseHashField(value);
|
||||
else if (name == "FileSize") {
|
||||
auto n = string2Int<decltype(fileSize)>(value);
|
||||
if (!n) throw corrupt("invalid FileSize");
|
||||
fileSize = *n;
|
||||
}
|
||||
else if (name == "NarHash") {
|
||||
narHash = parseHashField(value);
|
||||
haveNarHash = true;
|
||||
}
|
||||
else if (name == "NarSize") {
|
||||
auto n = string2Int<decltype(narSize)>(value);
|
||||
if (!n) throw corrupt("invalid NarSize");
|
||||
narSize = *n;
|
||||
}
|
||||
else if (name == "References") {
|
||||
auto refs = tokenizeString<Strings>(value, " ");
|
||||
if (!references.empty()) throw corrupt("extra References");
|
||||
for (auto & r : refs)
|
||||
references.insert(StorePath(r));
|
||||
}
|
||||
else if (name == "Deriver") {
|
||||
if (value != "unknown-deriver")
|
||||
deriver = StorePath(value);
|
||||
}
|
||||
else if (name == "Sig")
|
||||
sigs.insert(value);
|
||||
else if (name == "CA") {
|
||||
if (ca) throw corrupt("extra CA");
|
||||
// FIXME: allow blank ca or require skipping field?
|
||||
ca = ContentAddress::parseOpt(value);
|
||||
}
|
||||
|
||||
pos = eol + 1;
|
||||
line += 1;
|
||||
}
|
||||
|
||||
if (compression == "") compression = "bzip2";
|
||||
|
||||
if (!havePath || !haveNarHash || url.empty() || narSize == 0) {
|
||||
line = 0; // don't include line information in the error
|
||||
throw corrupt(
|
||||
!havePath ? "StorePath missing" :
|
||||
!haveNarHash ? "NarHash missing" :
|
||||
url.empty() ? "URL missing" :
|
||||
narSize == 0 ? "NarSize missing or zero"
|
||||
: "?");
|
||||
}
|
||||
}
|
||||
|
||||
std::string NarInfo::to_string(const Store & store) const
|
||||
{
|
||||
std::string res;
|
||||
res += "StorePath: " + store.printStorePath(path) + "\n";
|
||||
res += "URL: " + url + "\n";
|
||||
assert(compression != "");
|
||||
res += "Compression: " + compression + "\n";
|
||||
assert(fileHash && fileHash->algo == HashAlgorithm::SHA256);
|
||||
res += "FileHash: " + fileHash->to_string(HashFormat::Nix32, true) + "\n";
|
||||
res += "FileSize: " + std::to_string(fileSize) + "\n";
|
||||
assert(narHash.algo == HashAlgorithm::SHA256);
|
||||
res += "NarHash: " + narHash.to_string(HashFormat::Nix32, true) + "\n";
|
||||
res += "NarSize: " + std::to_string(narSize) + "\n";
|
||||
|
||||
res += "References: " + concatStringsSep(" ", shortRefs()) + "\n";
|
||||
|
||||
if (deriver)
|
||||
res += "Deriver: " + std::string(deriver->to_string()) + "\n";
|
||||
|
||||
for (auto sig : sigs)
|
||||
res += "Sig: " + sig + "\n";
|
||||
|
||||
if (ca)
|
||||
res += "CA: " + renderContentAddress(*ca) + "\n";
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
nlohmann::json NarInfo::toJSON(
|
||||
const Store & store,
|
||||
bool includeImpureInfo,
|
||||
HashFormat hashFormat) const
|
||||
{
|
||||
using nlohmann::json;
|
||||
|
||||
auto jsonObject = ValidPathInfo::toJSON(store, includeImpureInfo, hashFormat);
|
||||
|
||||
if (includeImpureInfo) {
|
||||
if (!url.empty())
|
||||
jsonObject["url"] = url;
|
||||
if (!compression.empty())
|
||||
jsonObject["compression"] = compression;
|
||||
if (fileHash)
|
||||
jsonObject["downloadHash"] = fileHash->to_string(hashFormat, true);
|
||||
if (fileSize)
|
||||
jsonObject["downloadSize"] = fileSize;
|
||||
}
|
||||
|
||||
return jsonObject;
|
||||
}
|
||||
|
||||
NarInfo NarInfo::fromJSON(
|
||||
const Store & store,
|
||||
const StorePath & path,
|
||||
const nlohmann::json & json)
|
||||
{
|
||||
using nlohmann::detail::value_t;
|
||||
|
||||
NarInfo res {
|
||||
ValidPathInfo {
|
||||
path,
|
||||
UnkeyedValidPathInfo::fromJSON(store, json),
|
||||
}
|
||||
};
|
||||
|
||||
if (json.contains("url"))
|
||||
res.url = getString(valueAt(json, "url"));
|
||||
|
||||
if (json.contains("compression"))
|
||||
res.compression = getString(valueAt(json, "compression"));
|
||||
|
||||
if (json.contains("downloadHash"))
|
||||
res.fileHash = Hash::parseAny(
|
||||
getString(valueAt(json, "downloadHash")),
|
||||
std::nullopt);
|
||||
|
||||
if (json.contains("downloadSize"))
|
||||
res.fileSize = getInteger(valueAt(json, "downloadSize"));
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
43
subprojects/libstore/nar-info.hh
Normal file
43
subprojects/libstore/nar-info.hh
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "types.hh"
|
||||
#include "hash.hh"
|
||||
#include "path-info.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
class Store;
|
||||
|
||||
struct NarInfo : ValidPathInfo
|
||||
{
|
||||
std::string url;
|
||||
std::string compression;
|
||||
std::optional<Hash> fileHash;
|
||||
uint64_t fileSize = 0;
|
||||
|
||||
NarInfo() = delete;
|
||||
NarInfo(const Store & store, std::string name, ContentAddressWithReferences ca, Hash narHash)
|
||||
: ValidPathInfo(store, std::move(name), std::move(ca), narHash)
|
||||
{ }
|
||||
NarInfo(StorePath path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { }
|
||||
NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { }
|
||||
NarInfo(const Store & store, const std::string & s, const std::string & whence);
|
||||
|
||||
bool operator ==(const NarInfo &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::optional::operator <=>`, can't do yet
|
||||
//auto operator <=>(const NarInfo &) const = default;
|
||||
|
||||
std::string to_string(const Store & store) const;
|
||||
|
||||
nlohmann::json toJSON(
|
||||
const Store & store,
|
||||
bool includeImpureInfo,
|
||||
HashFormat hashFormat) const override;
|
||||
static NarInfo fromJSON(
|
||||
const Store & store,
|
||||
const StorePath & path,
|
||||
const nlohmann::json & json);
|
||||
};
|
||||
|
||||
}
|
||||
10
subprojects/libstore/nix-store.pc.in
Normal file
10
subprojects/libstore/nix-store.pc.in
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
prefix=@prefix@
|
||||
libdir=@libdir@
|
||||
includedir=@includedir@
|
||||
|
||||
Name: Nix
|
||||
Description: Nix Package Manager
|
||||
Version: @PACKAGE_VERSION@
|
||||
Requires: nix-util
|
||||
Libs: -L${libdir} -lnixstore
|
||||
Cflags: -I${includedir}/nix -std=c++2a
|
||||
314
subprojects/libstore/optimise-store.cc
Normal file
314
subprojects/libstore/optimise-store.cc
Normal file
|
|
@ -0,0 +1,314 @@
|
|||
#include "local-store.hh"
|
||||
#include "globals.hh"
|
||||
#include "signals.hh"
|
||||
#include "posix-fs-canonicalise.hh"
|
||||
#include "posix-source-accessor.hh"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <regex>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static void makeWritable(const Path & path)
|
||||
{
|
||||
auto st = lstat(path);
|
||||
if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
|
||||
throw SysError("changing writability of '%1%'", path);
|
||||
}
|
||||
|
||||
|
||||
struct MakeReadOnly
|
||||
{
|
||||
Path path;
|
||||
MakeReadOnly(const PathView path) : path(path) { }
|
||||
~MakeReadOnly()
|
||||
{
|
||||
try {
|
||||
/* This will make the path read-only. */
|
||||
if (path != "") canonicaliseTimestampAndPermissions(path);
|
||||
} catch (...) {
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
LocalStore::InodeHash LocalStore::loadInodeHash()
|
||||
{
|
||||
debug("loading hash inodes in memory");
|
||||
InodeHash inodeHash;
|
||||
|
||||
AutoCloseDir dir(opendir(linksDir.c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", linksDir);
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
|
||||
checkInterrupt();
|
||||
// We don't care if we hit non-hash files, anything goes
|
||||
inodeHash.insert(dirent->d_ino);
|
||||
}
|
||||
if (errno) throw SysError("reading directory '%1%'", linksDir);
|
||||
|
||||
printMsg(lvlTalkative, "loaded %1% hash inodes", inodeHash.size());
|
||||
|
||||
return inodeHash;
|
||||
}
|
||||
|
||||
|
||||
Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash)
|
||||
{
|
||||
Strings names;
|
||||
|
||||
AutoCloseDir dir(opendir(path.c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", path);
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
|
||||
checkInterrupt();
|
||||
|
||||
if (inodeHash.count(dirent->d_ino)) {
|
||||
debug("'%1%' is already linked", dirent->d_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
names.push_back(name);
|
||||
}
|
||||
if (errno) throw SysError("reading directory '%1%'", path);
|
||||
|
||||
return names;
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
||||
const Path & path, InodeHash & inodeHash, RepairFlag repair)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
auto st = lstat(path);
|
||||
|
||||
#if __APPLE__
|
||||
/* HFS/macOS has some undocumented security feature disabling hardlinking for
|
||||
special files within .app dirs. Known affected paths include
|
||||
*.app/Contents/{PkgInfo,Resources/\*.lproj,_CodeSignature} and .DS_Store.
|
||||
See https://github.com/NixOS/nix/issues/1443 and
|
||||
https://github.com/NixOS/nix/pull/2230 for more discussion. */
|
||||
|
||||
if (std::regex_search(path, std::regex("\\.app/Contents/.+$")))
|
||||
{
|
||||
debug("'%1%' is not allowed to be linked in macOS", path);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
Strings names = readDirectoryIgnoringInodes(path, inodeHash);
|
||||
for (auto & i : names)
|
||||
optimisePath_(act, stats, path + "/" + i, inodeHash, repair);
|
||||
return;
|
||||
}
|
||||
|
||||
/* We can hard link regular files and maybe symlinks. */
|
||||
if (!S_ISREG(st.st_mode)
|
||||
#if CAN_LINK_SYMLINK
|
||||
&& !S_ISLNK(st.st_mode)
|
||||
#endif
|
||||
) return;
|
||||
|
||||
/* Sometimes SNAFUs can cause files in the Nix store to be
|
||||
modified, in particular when running programs as root under
|
||||
NixOS (example: $fontconfig/var/cache being modified). Skip
|
||||
those files. FIXME: check the modification time. */
|
||||
if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
|
||||
warn("skipping suspicious writable file '%1%'", path);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This can still happen on top-level files. */
|
||||
if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) {
|
||||
debug("'%s' is already linked, with %d other file(s)", path, st.st_nlink - 2);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Hash the file. Note that hashPath() returns the hash over the
|
||||
NAR serialisation, which includes the execute bit on the file.
|
||||
Thus, executable and non-executable files with the same
|
||||
contents *won't* be linked (which is good because otherwise the
|
||||
permissions would be screwed up).
|
||||
|
||||
Also note that if `path' is a symlink, then we're hashing the
|
||||
contents of the symlink (i.e. the result of readlink()), not
|
||||
the contents of the target (which may not even exist). */
|
||||
Hash hash = ({
|
||||
hashPath(
|
||||
{make_ref<PosixSourceAccessor>(), CanonPath(path)},
|
||||
FileSerialisationMethod::NixArchive, HashAlgorithm::SHA256).first;
|
||||
});
|
||||
debug("'%1%' has hash '%2%'", path, hash.to_string(HashFormat::Nix32, true));
|
||||
|
||||
/* Check if this is a known hash. */
|
||||
std::filesystem::path linkPath = std::filesystem::path{linksDir} / hash.to_string(HashFormat::Nix32, false);
|
||||
|
||||
/* Maybe delete the link, if it has been corrupted. */
|
||||
if (std::filesystem::exists(std::filesystem::symlink_status(linkPath))) {
|
||||
auto stLink = lstat(linkPath.string());
|
||||
if (st.st_size != stLink.st_size
|
||||
|| (repair && hash != ({
|
||||
hashPath(
|
||||
PosixSourceAccessor::createAtRoot(linkPath),
|
||||
FileSerialisationMethod::NixArchive, HashAlgorithm::SHA256).first;
|
||||
})))
|
||||
{
|
||||
// XXX: Consider overwriting linkPath with our valid version.
|
||||
warn("removing corrupted link %s", linkPath);
|
||||
warn("There may be more corrupted paths."
|
||||
"\nYou should run `nix-store --verify --check-contents --repair` to fix them all");
|
||||
std::filesystem::remove(linkPath);
|
||||
}
|
||||
}
|
||||
|
||||
if (!std::filesystem::exists(std::filesystem::symlink_status(linkPath))) {
|
||||
/* Nope, create a hard link in the links directory. */
|
||||
try {
|
||||
std::filesystem::create_hard_link(path, linkPath);
|
||||
inodeHash.insert(st.st_ino);
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
if (e.code() == std::errc::file_exists) {
|
||||
/* Fall through if another process created ‘linkPath’ before
|
||||
we did. */
|
||||
}
|
||||
|
||||
else if (e.code() == std::errc::no_space_on_device) {
|
||||
/* On ext4, that probably means the directory index is
|
||||
full. When that happens, it's fine to ignore it: we
|
||||
just effectively disable deduplication of this
|
||||
file. */
|
||||
printInfo("cannot link '%s' to '%s': %s", linkPath, path, strerror(errno));
|
||||
return;
|
||||
}
|
||||
|
||||
else throw;
|
||||
}
|
||||
}
|
||||
|
||||
/* Yes! We've seen a file with the same contents. Replace the
|
||||
current file with a hard link to that file. */
|
||||
auto stLink = lstat(linkPath.string());
|
||||
|
||||
if (st.st_ino == stLink.st_ino) {
|
||||
debug("'%1%' is already linked to '%2%'", path, linkPath);
|
||||
return;
|
||||
}
|
||||
|
||||
printMsg(lvlTalkative, "linking '%1%' to '%2%'", path, linkPath);
|
||||
|
||||
/* Make the containing directory writable, but only if it's not
|
||||
the store itself (we don't want or need to mess with its
|
||||
permissions). */
|
||||
const Path dirOfPath(dirOf(path));
|
||||
bool mustToggle = dirOfPath != realStoreDir.get();
|
||||
if (mustToggle) makeWritable(dirOfPath);
|
||||
|
||||
/* When we're done, make the directory read-only again and reset
|
||||
its timestamp back to 0. */
|
||||
MakeReadOnly makeReadOnly(mustToggle ? dirOfPath : "");
|
||||
|
||||
std::filesystem::path tempLink = fmt("%1%/.tmp-link-%2%-%3%", realStoreDir, getpid(), rand());
|
||||
|
||||
try {
|
||||
std::filesystem::create_hard_link(linkPath, tempLink);
|
||||
inodeHash.insert(st.st_ino);
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
if (e.code() == std::errc::too_many_links) {
|
||||
/* Too many links to the same file (>= 32000 on most file
|
||||
systems). This is likely to happen with empty files.
|
||||
Just shrug and ignore. */
|
||||
if (st.st_size)
|
||||
printInfo("'%1%' has maximum number of links", linkPath);
|
||||
return;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
/* Atomically replace the old file with the new hard link. */
|
||||
try {
|
||||
std::filesystem::rename(tempLink, path);
|
||||
} catch (std::filesystem::filesystem_error & e) {
|
||||
std::filesystem::remove(tempLink);
|
||||
printError("unable to unlink '%1%'", tempLink);
|
||||
if (e.code() == std::errc::too_many_links) {
|
||||
/* Some filesystems generate too many links on the rename,
|
||||
rather than on the original link. (Probably it
|
||||
temporarily increases the st_nlink field before
|
||||
decreasing it again.) */
|
||||
debug("'%s' has reached maximum number of links", linkPath);
|
||||
return;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
stats.filesLinked++;
|
||||
stats.bytesFreed += st.st_size;
|
||||
|
||||
if (act)
|
||||
act->result(resFileLinked, st.st_size
|
||||
#ifndef _WIN32
|
||||
, st.st_blocks
|
||||
#endif
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::optimiseStore(OptimiseStats & stats)
|
||||
{
|
||||
Activity act(*logger, actOptimiseStore);
|
||||
|
||||
auto paths = queryAllValidPaths();
|
||||
InodeHash inodeHash = loadInodeHash();
|
||||
|
||||
act.progress(0, paths.size());
|
||||
|
||||
uint64_t done = 0;
|
||||
|
||||
for (auto & i : paths) {
|
||||
addTempRoot(i);
|
||||
if (!isValidPath(i)) continue; /* path was GC'ed, probably */
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("optimising path '%s'", printStorePath(i)));
|
||||
optimisePath_(&act, stats, realStoreDir + "/" + std::string(i.to_string()), inodeHash, NoRepair);
|
||||
}
|
||||
done++;
|
||||
act.progress(done, paths.size());
|
||||
}
|
||||
}
|
||||
|
||||
void LocalStore::optimiseStore()
|
||||
{
|
||||
OptimiseStats stats;
|
||||
|
||||
optimiseStore(stats);
|
||||
|
||||
printInfo("%s freed by hard-linking %d files",
|
||||
showBytes(stats.bytesFreed),
|
||||
stats.filesLinked);
|
||||
}
|
||||
|
||||
void LocalStore::optimisePath(const Path & path, RepairFlag repair)
|
||||
{
|
||||
OptimiseStats stats;
|
||||
InodeHash inodeHash;
|
||||
|
||||
if (settings.autoOptimiseStore) optimisePath_(nullptr, stats, path, inodeHash, repair);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
196
subprojects/libstore/outputs-spec.cc
Normal file
196
subprojects/libstore/outputs-spec.cc
Normal file
|
|
@ -0,0 +1,196 @@
|
|||
#include <regex>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include "util.hh"
|
||||
#include "regex-combinators.hh"
|
||||
#include "outputs-spec.hh"
|
||||
#include "path-regex.hh"
|
||||
#include "strings-inline.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
bool OutputsSpec::contains(const std::string & outputName) const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const OutputsSpec::All &) {
|
||||
return true;
|
||||
},
|
||||
[&](const OutputsSpec::Names & outputNames) {
|
||||
return outputNames.count(outputName) > 0;
|
||||
},
|
||||
}, raw);
|
||||
}
|
||||
|
||||
static std::string outputSpecRegexStr =
|
||||
regex::either(
|
||||
regex::group(R"(\*)"),
|
||||
regex::group(regex::list(nameRegexStr)));
|
||||
|
||||
std::optional<OutputsSpec> OutputsSpec::parseOpt(std::string_view s)
|
||||
{
|
||||
static std::regex regex(std::string { outputSpecRegexStr });
|
||||
|
||||
std::smatch match;
|
||||
std::string s2 { s }; // until some improves std::regex
|
||||
if (!std::regex_match(s2, match, regex))
|
||||
return std::nullopt;
|
||||
|
||||
if (match[1].matched)
|
||||
return { OutputsSpec::All {} };
|
||||
|
||||
if (match[2].matched)
|
||||
return OutputsSpec::Names { tokenizeString<StringSet>(match[2].str(), ",") };
|
||||
|
||||
assert(false);
|
||||
}
|
||||
|
||||
|
||||
OutputsSpec OutputsSpec::parse(std::string_view s)
|
||||
{
|
||||
std::optional spec = parseOpt(s);
|
||||
if (!spec)
|
||||
throw Error("invalid outputs specifier '%s'", s);
|
||||
return std::move(*spec);
|
||||
}
|
||||
|
||||
|
||||
std::optional<std::pair<std::string_view, ExtendedOutputsSpec>> ExtendedOutputsSpec::parseOpt(std::string_view s)
|
||||
{
|
||||
auto found = s.rfind('^');
|
||||
|
||||
if (found == std::string::npos)
|
||||
return std::pair { s, ExtendedOutputsSpec::Default {} };
|
||||
|
||||
auto specOpt = OutputsSpec::parseOpt(s.substr(found + 1));
|
||||
if (!specOpt)
|
||||
return std::nullopt;
|
||||
return std::pair { s.substr(0, found), ExtendedOutputsSpec::Explicit { std::move(*specOpt) } };
|
||||
}
|
||||
|
||||
|
||||
std::pair<std::string_view, ExtendedOutputsSpec> ExtendedOutputsSpec::parse(std::string_view s)
|
||||
{
|
||||
std::optional spec = parseOpt(s);
|
||||
if (!spec)
|
||||
throw Error("invalid extended outputs specifier '%s'", s);
|
||||
return *spec;
|
||||
}
|
||||
|
||||
|
||||
std::string OutputsSpec::to_string() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const OutputsSpec::All &) -> std::string {
|
||||
return "*";
|
||||
},
|
||||
[&](const OutputsSpec::Names & outputNames) -> std::string {
|
||||
return concatStringsSep(",", outputNames);
|
||||
},
|
||||
}, raw);
|
||||
}
|
||||
|
||||
|
||||
std::string ExtendedOutputsSpec::to_string() const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const ExtendedOutputsSpec::Default &) -> std::string {
|
||||
return "";
|
||||
},
|
||||
[&](const ExtendedOutputsSpec::Explicit & outputSpec) -> std::string {
|
||||
return "^" + outputSpec.to_string();
|
||||
},
|
||||
}, raw);
|
||||
}
|
||||
|
||||
|
||||
OutputsSpec OutputsSpec::union_(const OutputsSpec & that) const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const OutputsSpec::All &) -> OutputsSpec {
|
||||
return OutputsSpec::All { };
|
||||
},
|
||||
[&](const OutputsSpec::Names & theseNames) -> OutputsSpec {
|
||||
return std::visit(overloaded {
|
||||
[&](const OutputsSpec::All &) -> OutputsSpec {
|
||||
return OutputsSpec::All {};
|
||||
},
|
||||
[&](const OutputsSpec::Names & thoseNames) -> OutputsSpec {
|
||||
OutputsSpec::Names ret = theseNames;
|
||||
ret.insert(thoseNames.begin(), thoseNames.end());
|
||||
return ret;
|
||||
},
|
||||
}, that.raw);
|
||||
},
|
||||
}, raw);
|
||||
}
|
||||
|
||||
|
||||
bool OutputsSpec::isSubsetOf(const OutputsSpec & that) const
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const OutputsSpec::All &) {
|
||||
return true;
|
||||
},
|
||||
[&](const OutputsSpec::Names & thoseNames) {
|
||||
return std::visit(overloaded {
|
||||
[&](const OutputsSpec::All &) {
|
||||
return false;
|
||||
},
|
||||
[&](const OutputsSpec::Names & theseNames) {
|
||||
bool ret = true;
|
||||
for (auto & o : theseNames)
|
||||
if (thoseNames.count(o) == 0)
|
||||
ret = false;
|
||||
return ret;
|
||||
},
|
||||
}, raw);
|
||||
},
|
||||
}, that.raw);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace nlohmann {
|
||||
|
||||
using namespace nix;
|
||||
|
||||
OutputsSpec adl_serializer<OutputsSpec>::from_json(const json & json) {
|
||||
auto names = json.get<StringSet>();
|
||||
if (names == StringSet({"*"}))
|
||||
return OutputsSpec::All {};
|
||||
else
|
||||
return OutputsSpec::Names { std::move(names) };
|
||||
}
|
||||
|
||||
void adl_serializer<OutputsSpec>::to_json(json & json, OutputsSpec t) {
|
||||
std::visit(overloaded {
|
||||
[&](const OutputsSpec::All &) {
|
||||
json = std::vector<std::string>({"*"});
|
||||
},
|
||||
[&](const OutputsSpec::Names & names) {
|
||||
json = names;
|
||||
},
|
||||
}, t.raw);
|
||||
}
|
||||
|
||||
|
||||
ExtendedOutputsSpec adl_serializer<ExtendedOutputsSpec>::from_json(const json & json) {
|
||||
if (json.is_null())
|
||||
return ExtendedOutputsSpec::Default {};
|
||||
else {
|
||||
return ExtendedOutputsSpec::Explicit { json.get<OutputsSpec>() };
|
||||
}
|
||||
}
|
||||
|
||||
void adl_serializer<ExtendedOutputsSpec>::to_json(json & json, ExtendedOutputsSpec t) {
|
||||
std::visit(overloaded {
|
||||
[&](const ExtendedOutputsSpec::Default &) {
|
||||
json = nullptr;
|
||||
},
|
||||
[&](const ExtendedOutputsSpec::Explicit & e) {
|
||||
adl_serializer<OutputsSpec>::to_json(json, e);
|
||||
},
|
||||
}, t.raw);
|
||||
}
|
||||
|
||||
}
|
||||
128
subprojects/libstore/outputs-spec.hh
Normal file
128
subprojects/libstore/outputs-spec.hh
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include <cassert>
|
||||
#include <optional>
|
||||
#include <set>
|
||||
#include <variant>
|
||||
|
||||
#include "json-impls.hh"
|
||||
#include "variant-wrapper.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
* An (owned) output name. Just a type alias used to make code more
|
||||
* readible.
|
||||
*/
|
||||
typedef std::string OutputName;
|
||||
|
||||
/**
|
||||
* A borrowed output name. Just a type alias used to make code more
|
||||
* readible.
|
||||
*/
|
||||
typedef std::string_view OutputNameView;
|
||||
|
||||
struct OutputsSpec {
|
||||
/**
|
||||
* A non-empty set of outputs, specified by name
|
||||
*/
|
||||
struct Names : std::set<OutputName> {
|
||||
using std::set<OutputName>::set;
|
||||
|
||||
/* These need to be "inherited manually" */
|
||||
|
||||
Names(const std::set<OutputName> & s)
|
||||
: std::set<OutputName>(s)
|
||||
{ assert(!empty()); }
|
||||
|
||||
/**
|
||||
* Needs to be "inherited manually"
|
||||
*/
|
||||
Names(std::set<OutputName> && s)
|
||||
: std::set<OutputName>(s)
|
||||
{ assert(!empty()); }
|
||||
|
||||
/* This set should always be non-empty, so we delete this
|
||||
constructor in order make creating empty ones by mistake harder.
|
||||
*/
|
||||
Names() = delete;
|
||||
};
|
||||
|
||||
/**
|
||||
* The set of all outputs, without needing to name them explicitly
|
||||
*/
|
||||
struct All : std::monostate { };
|
||||
|
||||
typedef std::variant<All, Names> Raw;
|
||||
|
||||
Raw raw;
|
||||
|
||||
bool operator == (const OutputsSpec &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet.
|
||||
bool operator < (const OutputsSpec & other) const {
|
||||
return raw < other.raw;
|
||||
}
|
||||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(OutputsSpec);
|
||||
|
||||
/**
|
||||
* Force choosing a variant
|
||||
*/
|
||||
OutputsSpec() = delete;
|
||||
|
||||
bool contains(const OutputName & output) const;
|
||||
|
||||
/**
|
||||
* Create a new OutputsSpec which is the union of this and that.
|
||||
*/
|
||||
OutputsSpec union_(const OutputsSpec & that) const;
|
||||
|
||||
/**
|
||||
* Whether this OutputsSpec is a subset of that.
|
||||
*/
|
||||
bool isSubsetOf(const OutputsSpec & outputs) const;
|
||||
|
||||
/**
|
||||
* Parse a string of the form 'output1,...outputN' or '*', returning
|
||||
* the outputs spec.
|
||||
*/
|
||||
static OutputsSpec parse(std::string_view s);
|
||||
static std::optional<OutputsSpec> parseOpt(std::string_view s);
|
||||
|
||||
std::string to_string() const;
|
||||
};
|
||||
|
||||
struct ExtendedOutputsSpec {
|
||||
struct Default : std::monostate { };
|
||||
using Explicit = OutputsSpec;
|
||||
|
||||
typedef std::variant<Default, Explicit> Raw;
|
||||
|
||||
Raw raw;
|
||||
|
||||
bool operator == (const ExtendedOutputsSpec &) const = default;
|
||||
// TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet.
|
||||
bool operator < (const ExtendedOutputsSpec &) const;
|
||||
|
||||
MAKE_WRAPPER_CONSTRUCTOR(ExtendedOutputsSpec);
|
||||
|
||||
/**
|
||||
* Force choosing a variant
|
||||
*/
|
||||
ExtendedOutputsSpec() = delete;
|
||||
|
||||
/**
|
||||
* Parse a string of the form 'prefix^output1,...outputN' or
|
||||
* 'prefix^*', returning the prefix and the extended outputs spec.
|
||||
*/
|
||||
static std::pair<std::string_view, ExtendedOutputsSpec> parse(std::string_view s);
|
||||
static std::optional<std::pair<std::string_view, ExtendedOutputsSpec>> parseOpt(std::string_view s);
|
||||
|
||||
std::string to_string() const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
JSON_IMPL(OutputsSpec)
|
||||
JSON_IMPL(ExtendedOutputsSpec)
|
||||
109
subprojects/libstore/package.nix
Normal file
109
subprojects/libstore/package.nix
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
{ lib
|
||||
, stdenv
|
||||
, mkMesonDerivation
|
||||
, releaseTools
|
||||
|
||||
, meson
|
||||
, ninja
|
||||
, pkg-config
|
||||
, unixtools
|
||||
|
||||
, nix-util
|
||||
, boost
|
||||
, curl
|
||||
, aws-sdk-cpp
|
||||
, libseccomp
|
||||
, nlohmann_json
|
||||
, sqlite
|
||||
|
||||
, busybox-sandbox-shell ? null
|
||||
|
||||
# Configuration Options
|
||||
|
||||
, version
|
||||
|
||||
, embeddedSandboxShell ? stdenv.hostPlatform.isStatic
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (lib) fileset;
|
||||
in
|
||||
|
||||
mkMesonDerivation (finalAttrs: {
|
||||
pname = "nix-store";
|
||||
inherit version;
|
||||
|
||||
workDir = ./.;
|
||||
fileset = fileset.unions [
|
||||
../../build-utils-meson
|
||||
./build-utils-meson
|
||||
../../.version
|
||||
./.version
|
||||
./meson.build
|
||||
./meson.options
|
||||
./linux/meson.build
|
||||
./unix/meson.build
|
||||
./windows/meson.build
|
||||
(fileset.fileFilter (file: file.hasExt "cc") ./.)
|
||||
(fileset.fileFilter (file: file.hasExt "hh") ./.)
|
||||
(fileset.fileFilter (file: file.hasExt "sb") ./.)
|
||||
(fileset.fileFilter (file: file.hasExt "md") ./.)
|
||||
(fileset.fileFilter (file: file.hasExt "sql") ./.)
|
||||
];
|
||||
|
||||
outputs = [ "out" "dev" ];
|
||||
|
||||
nativeBuildInputs = [
|
||||
meson
|
||||
ninja
|
||||
pkg-config
|
||||
] ++ lib.optional embeddedSandboxShell unixtools.hexdump;
|
||||
|
||||
buildInputs = [
|
||||
boost
|
||||
curl
|
||||
sqlite
|
||||
] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp
|
||||
# There have been issues building these dependencies
|
||||
++ lib.optional (stdenv.hostPlatform == stdenv.buildPlatform && (stdenv.isLinux || stdenv.isDarwin))
|
||||
aws-sdk-cpp
|
||||
;
|
||||
|
||||
propagatedBuildInputs = [
|
||||
nix-util
|
||||
nlohmann_json
|
||||
];
|
||||
|
||||
preConfigure =
|
||||
# "Inline" .version so it's not a symlink, and includes the suffix.
|
||||
# Do the meson utils, without modification.
|
||||
''
|
||||
chmod u+w ./.version
|
||||
echo ${version} > ../../.version
|
||||
'';
|
||||
|
||||
mesonFlags = [
|
||||
(lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux)
|
||||
(lib.mesonBool "embedded-sandbox-shell" embeddedSandboxShell)
|
||||
] ++ lib.optionals stdenv.hostPlatform.isLinux [
|
||||
(lib.mesonOption "sandbox-shell" "${busybox-sandbox-shell}/bin/busybox")
|
||||
];
|
||||
|
||||
env = {
|
||||
# Needed for Meson to find Boost.
|
||||
# https://github.com/NixOS/nixpkgs/issues/86131.
|
||||
BOOST_INCLUDEDIR = "${lib.getDev boost}/include";
|
||||
BOOST_LIBRARYDIR = "${lib.getLib boost}/lib";
|
||||
} // lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
|
||||
LDFLAGS = "-fuse-ld=gold";
|
||||
};
|
||||
|
||||
separateDebugInfo = !stdenv.hostPlatform.isStatic;
|
||||
|
||||
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
|
||||
|
||||
meta = {
|
||||
platforms = lib.platforms.unix ++ lib.platforms.windows;
|
||||
};
|
||||
|
||||
})
|
||||
285
subprojects/libstore/parsed-derivations.cc
Normal file
285
subprojects/libstore/parsed-derivations.cc
Normal file
|
|
@ -0,0 +1,285 @@
|
|||
#include "parsed-derivations.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
#include <regex>
|
||||
|
||||
namespace nix {
|
||||
|
||||
ParsedDerivation::ParsedDerivation(const StorePath & drvPath, BasicDerivation & drv)
|
||||
: drvPath(drvPath), drv(drv)
|
||||
{
|
||||
/* Parse the __json attribute, if any. */
|
||||
auto jsonAttr = drv.env.find("__json");
|
||||
if (jsonAttr != drv.env.end()) {
|
||||
try {
|
||||
structuredAttrs = std::make_unique<nlohmann::json>(nlohmann::json::parse(jsonAttr->second));
|
||||
} catch (std::exception & e) {
|
||||
throw Error("cannot process __json attribute of '%s': %s", drvPath.to_string(), e.what());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ParsedDerivation::~ParsedDerivation() { }
|
||||
|
||||
std::optional<std::string> ParsedDerivation::getStringAttr(const std::string & name) const
|
||||
{
|
||||
if (structuredAttrs) {
|
||||
auto i = structuredAttrs->find(name);
|
||||
if (i == structuredAttrs->end())
|
||||
return {};
|
||||
else {
|
||||
if (!i->is_string())
|
||||
throw Error("attribute '%s' of derivation '%s' must be a string", name, drvPath.to_string());
|
||||
return i->get<std::string>();
|
||||
}
|
||||
} else {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end())
|
||||
return {};
|
||||
else
|
||||
return i->second;
|
||||
}
|
||||
}
|
||||
|
||||
bool ParsedDerivation::getBoolAttr(const std::string & name, bool def) const
|
||||
{
|
||||
if (structuredAttrs) {
|
||||
auto i = structuredAttrs->find(name);
|
||||
if (i == structuredAttrs->end())
|
||||
return def;
|
||||
else {
|
||||
if (!i->is_boolean())
|
||||
throw Error("attribute '%s' of derivation '%s' must be a Boolean", name, drvPath.to_string());
|
||||
return i->get<bool>();
|
||||
}
|
||||
} else {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end())
|
||||
return def;
|
||||
else
|
||||
return i->second == "1";
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name) const
|
||||
{
|
||||
if (structuredAttrs) {
|
||||
auto i = structuredAttrs->find(name);
|
||||
if (i == structuredAttrs->end())
|
||||
return {};
|
||||
else {
|
||||
if (!i->is_array())
|
||||
throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath.to_string());
|
||||
Strings res;
|
||||
for (auto j = i->begin(); j != i->end(); ++j) {
|
||||
if (!j->is_string())
|
||||
throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath.to_string());
|
||||
res.push_back(j->get<std::string>());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
} else {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end())
|
||||
return {};
|
||||
else
|
||||
return tokenizeString<Strings>(i->second);
|
||||
}
|
||||
}
|
||||
|
||||
StringSet ParsedDerivation::getRequiredSystemFeatures() const
|
||||
{
|
||||
// FIXME: cache this?
|
||||
StringSet res;
|
||||
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
|
||||
res.insert(i);
|
||||
if (!drv.type().hasKnownOutputPaths())
|
||||
res.insert("ca-derivations");
|
||||
return res;
|
||||
}
|
||||
|
||||
bool ParsedDerivation::canBuildLocally(Store & localStore) const
|
||||
{
|
||||
if (drv.platform != settings.thisSystem.get()
|
||||
&& !settings.extraPlatforms.get().count(drv.platform)
|
||||
&& !drv.isBuiltin())
|
||||
return false;
|
||||
|
||||
if (settings.maxBuildJobs.get() == 0
|
||||
&& !drv.isBuiltin())
|
||||
return false;
|
||||
|
||||
for (auto & feature : getRequiredSystemFeatures())
|
||||
if (!localStore.systemFeatures.get().count(feature)) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ParsedDerivation::willBuildLocally(Store & localStore) const
|
||||
{
|
||||
return getBoolAttr("preferLocalBuild") && canBuildLocally(localStore);
|
||||
}
|
||||
|
||||
bool ParsedDerivation::substitutesAllowed() const
|
||||
{
|
||||
return settings.alwaysAllowSubstitutes ? true : getBoolAttr("allowSubstitutes", true);
|
||||
}
|
||||
|
||||
bool ParsedDerivation::useUidRange() const
|
||||
{
|
||||
return getRequiredSystemFeatures().count("uid-range");
|
||||
}
|
||||
|
||||
static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
|
||||
|
||||
/**
|
||||
* Write a JSON representation of store object metadata, such as the
|
||||
* hash and the references.
|
||||
*
|
||||
* @note Do *not* use `ValidPathInfo::toJSON` because this function is
|
||||
* subject to stronger stability requirements since it is used to
|
||||
* prepare build environments. Perhaps someday we'll have a versionining
|
||||
* mechanism to allow this to evolve again and get back in sync, but for
|
||||
* now we must not change - not even extend - the behavior.
|
||||
*/
|
||||
static nlohmann::json pathInfoToJSON(
|
||||
Store & store,
|
||||
const StorePathSet & storePaths)
|
||||
{
|
||||
using nlohmann::json;
|
||||
|
||||
nlohmann::json::array_t jsonList = json::array();
|
||||
|
||||
for (auto & storePath : storePaths) {
|
||||
auto info = store.queryPathInfo(storePath);
|
||||
|
||||
auto & jsonPath = jsonList.emplace_back(json::object());
|
||||
|
||||
jsonPath["narHash"] = info->narHash.to_string(HashFormat::Nix32, true);
|
||||
jsonPath["narSize"] = info->narSize;
|
||||
|
||||
{
|
||||
auto & jsonRefs = jsonPath["references"] = json::array();
|
||||
for (auto & ref : info->references)
|
||||
jsonRefs.emplace_back(store.printStorePath(ref));
|
||||
}
|
||||
|
||||
if (info->ca)
|
||||
jsonPath["ca"] = renderContentAddress(info->ca);
|
||||
|
||||
// Add the path to the object whose metadata we are including.
|
||||
jsonPath["path"] = store.printStorePath(storePath);
|
||||
|
||||
jsonPath["valid"] = true;
|
||||
|
||||
jsonPath["closureSize"] = ({
|
||||
uint64_t totalNarSize = 0;
|
||||
StorePathSet closure;
|
||||
store.computeFSClosure(info->path, closure, false, false);
|
||||
for (auto & p : closure) {
|
||||
auto info = store.queryPathInfo(p);
|
||||
totalNarSize += info->narSize;
|
||||
}
|
||||
totalNarSize;
|
||||
});
|
||||
}
|
||||
return jsonList;
|
||||
}
|
||||
|
||||
std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths)
|
||||
{
|
||||
auto structuredAttrs = getStructuredAttrs();
|
||||
if (!structuredAttrs) return std::nullopt;
|
||||
|
||||
auto json = *structuredAttrs;
|
||||
|
||||
/* Add an "outputs" object containing the output paths. */
|
||||
nlohmann::json outputs;
|
||||
for (auto & i : drv.outputs)
|
||||
outputs[i.first] = hashPlaceholder(i.first);
|
||||
json["outputs"] = outputs;
|
||||
|
||||
/* Handle exportReferencesGraph. */
|
||||
auto e = json.find("exportReferencesGraph");
|
||||
if (e != json.end() && e->is_object()) {
|
||||
for (auto i = e->begin(); i != e->end(); ++i) {
|
||||
StorePathSet storePaths;
|
||||
for (auto & p : *i)
|
||||
storePaths.insert(store.toStorePath(p.get<std::string>()).first);
|
||||
json[i.key()] = pathInfoToJSON(store,
|
||||
store.exportReferences(storePaths, inputPaths));
|
||||
}
|
||||
}
|
||||
|
||||
return json;
|
||||
}
|
||||
|
||||
/* As a convenience to bash scripts, write a shell file that
|
||||
maps all attributes that are representable in bash -
|
||||
namely, strings, integers, nulls, Booleans, and arrays and
|
||||
objects consisting entirely of those values. (So nested
|
||||
arrays or objects are not supported.) */
|
||||
std::string writeStructuredAttrsShell(const nlohmann::json & json)
|
||||
{
|
||||
|
||||
auto handleSimpleType = [](const nlohmann::json & value) -> std::optional<std::string> {
|
||||
if (value.is_string())
|
||||
return shellEscape(value.get<std::string_view>());
|
||||
|
||||
if (value.is_number()) {
|
||||
auto f = value.get<float>();
|
||||
if (std::ceil(f) == f)
|
||||
return std::to_string(value.get<int>());
|
||||
}
|
||||
|
||||
if (value.is_null())
|
||||
return std::string("''");
|
||||
|
||||
if (value.is_boolean())
|
||||
return value.get<bool>() ? std::string("1") : std::string("");
|
||||
|
||||
return {};
|
||||
};
|
||||
|
||||
std::string jsonSh;
|
||||
|
||||
for (auto & [key, value] : json.items()) {
|
||||
|
||||
if (!std::regex_match(key, shVarName)) continue;
|
||||
|
||||
auto s = handleSimpleType(value);
|
||||
if (s)
|
||||
jsonSh += fmt("declare %s=%s\n", key, *s);
|
||||
|
||||
else if (value.is_array()) {
|
||||
std::string s2;
|
||||
bool good = true;
|
||||
|
||||
for (auto & value2 : value) {
|
||||
auto s3 = handleSimpleType(value2);
|
||||
if (!s3) { good = false; break; }
|
||||
s2 += *s3; s2 += ' ';
|
||||
}
|
||||
|
||||
if (good)
|
||||
jsonSh += fmt("declare -a %s=(%s)\n", key, s2);
|
||||
}
|
||||
|
||||
else if (value.is_object()) {
|
||||
std::string s2;
|
||||
bool good = true;
|
||||
|
||||
for (auto & [key2, value2] : value.items()) {
|
||||
auto s3 = handleSimpleType(value2);
|
||||
if (!s3) { good = false; break; }
|
||||
s2 += fmt("[%s]=%s ", shellEscape(key2), *s3);
|
||||
}
|
||||
|
||||
if (good)
|
||||
jsonSh += fmt("declare -A %s=(%s)\n", key, s2);
|
||||
}
|
||||
}
|
||||
|
||||
return jsonSh;
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue