mirror of
https://github.com/NixOS/nix.git
synced 2025-11-14 14:32:42 +01:00
Build a minimized Nix with MinGW
At this point many features are stripped out, but this works:
- Can run libnix{util,store,expr} unit tests
- Can run some Nix commands
Co-Authored-By volth <volth@volth.com>
Co-Authored-By Brian McKenna <brian@brianmckenna.org>
This commit is contained in:
parent
2248a3f545
commit
8433027e35
111 changed files with 1162 additions and 140 deletions
37
src/libstore/unix/build/child.cc
Normal file
37
src/libstore/unix/build/child.cc
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
#include "child.hh"
|
||||
#include "current-process.hh"
|
||||
#include "logging.hh"
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
void commonChildInit()
|
||||
{
|
||||
logger = makeSimpleLogger();
|
||||
|
||||
const static std::string pathNullDevice = "/dev/null";
|
||||
restoreProcessContext(false);
|
||||
|
||||
/* Put the child in a separate session (and thus a separate
|
||||
process group) so that it has no controlling terminal (meaning
|
||||
that e.g. ssh cannot open /dev/tty) and it doesn't receive
|
||||
terminal signals. */
|
||||
if (setsid() == -1)
|
||||
throw SysError("creating a new session");
|
||||
|
||||
/* Dup stderr to stdout. */
|
||||
if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1)
|
||||
throw SysError("cannot dup stderr into stdout");
|
||||
|
||||
/* Reroute stdin to /dev/null. */
|
||||
int fdDevNull = open(pathNullDevice.c_str(), O_RDWR);
|
||||
if (fdDevNull == -1)
|
||||
throw SysError("cannot open '%1%'", pathNullDevice);
|
||||
if (dup2(fdDevNull, STDIN_FILENO) == -1)
|
||||
throw SysError("cannot dup null device into stdin");
|
||||
close(fdDevNull);
|
||||
}
|
||||
|
||||
}
|
||||
11
src/libstore/unix/build/child.hh
Normal file
11
src/libstore/unix/build/child.hh
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
* Common initialisation performed in child processes.
|
||||
*/
|
||||
void commonChildInit();
|
||||
|
||||
}
|
||||
1592
src/libstore/unix/build/derivation-goal.cc
Normal file
1592
src/libstore/unix/build/derivation-goal.cc
Normal file
File diff suppressed because it is too large
Load diff
346
src/libstore/unix/build/derivation-goal.hh
Normal file
346
src/libstore/unix/build/derivation-goal.hh
Normal file
|
|
@ -0,0 +1,346 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "parsed-derivations.hh"
|
||||
#include "lock.hh"
|
||||
#include "outputs-spec.hh"
|
||||
#include "store-api.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "goal.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
using std::map;
|
||||
|
||||
struct HookInstance;
|
||||
|
||||
typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
|
||||
|
||||
/**
|
||||
* Unless we are repairing, we don't both to test validity and just assume it,
|
||||
* so the choices are `Absent` or `Valid`.
|
||||
*/
|
||||
enum struct PathStatus {
|
||||
Corrupt,
|
||||
Absent,
|
||||
Valid,
|
||||
};
|
||||
|
||||
struct InitialOutputStatus {
|
||||
StorePath path;
|
||||
PathStatus status;
|
||||
/**
|
||||
* Valid in the store, and additionally non-corrupt if we are repairing
|
||||
*/
|
||||
bool isValid() const {
|
||||
return status == PathStatus::Valid;
|
||||
}
|
||||
/**
|
||||
* Merely present, allowed to be corrupt
|
||||
*/
|
||||
bool isPresent() const {
|
||||
return status == PathStatus::Corrupt
|
||||
|| status == PathStatus::Valid;
|
||||
}
|
||||
};
|
||||
|
||||
struct InitialOutput {
|
||||
bool wanted;
|
||||
Hash outputHash;
|
||||
std::optional<InitialOutputStatus> known;
|
||||
};
|
||||
|
||||
/**
|
||||
* A goal for building some or all of the outputs of a derivation.
|
||||
*/
|
||||
struct DerivationGoal : public Goal
|
||||
{
|
||||
/**
|
||||
* Whether to use an on-disk .drv file.
|
||||
*/
|
||||
bool useDerivation;
|
||||
|
||||
/** The path of the derivation. */
|
||||
StorePath drvPath;
|
||||
|
||||
/**
|
||||
* The goal for the corresponding resolved derivation
|
||||
*/
|
||||
std::shared_ptr<DerivationGoal> resolvedDrvGoal;
|
||||
|
||||
/**
|
||||
* The specific outputs that we need to build.
|
||||
*/
|
||||
OutputsSpec wantedOutputs;
|
||||
|
||||
/**
|
||||
* Mapping from input derivations + output names to actual store
|
||||
* paths. This is filled in by waiteeDone() as each dependency
|
||||
* finishes, before inputsRealised() is reached.
|
||||
*/
|
||||
std::map<std::pair<StorePath, std::string>, StorePath> inputDrvOutputs;
|
||||
|
||||
/**
|
||||
* See `needRestart`; just for that field.
|
||||
*/
|
||||
enum struct NeedRestartForMoreOutputs {
|
||||
/**
|
||||
* The goal state machine is progressing based on the current value of
|
||||
* `wantedOutputs. No actions are needed.
|
||||
*/
|
||||
OutputsUnmodifedDontNeed,
|
||||
/**
|
||||
* `wantedOutputs` has been extended, but the state machine is
|
||||
* proceeding according to its old value, so we need to restart.
|
||||
*/
|
||||
OutputsAddedDoNeed,
|
||||
/**
|
||||
* The goal state machine has progressed to the point of doing a build,
|
||||
* in which case all outputs will be produced, so extensions to
|
||||
* `wantedOutputs` no longer require a restart.
|
||||
*/
|
||||
BuildInProgressWillNotNeed,
|
||||
};
|
||||
|
||||
/**
|
||||
* Whether additional wanted outputs have been added.
|
||||
*/
|
||||
NeedRestartForMoreOutputs needRestart = NeedRestartForMoreOutputs::OutputsUnmodifedDontNeed;
|
||||
|
||||
/**
|
||||
* See `retrySubstitution`; just for that field.
|
||||
*/
|
||||
enum RetrySubstitution {
|
||||
/**
|
||||
* No issues have yet arose, no need to restart.
|
||||
*/
|
||||
NoNeed,
|
||||
/**
|
||||
* Something failed and there is an incomplete closure. Let's retry
|
||||
* substituting.
|
||||
*/
|
||||
YesNeed,
|
||||
/**
|
||||
* We are current or have already retried substitution, and whether or
|
||||
* not something goes wrong we will not retry again.
|
||||
*/
|
||||
AlreadyRetried,
|
||||
};
|
||||
|
||||
/**
|
||||
* Whether to retry substituting the outputs after building the
|
||||
* inputs. This is done in case of an incomplete closure.
|
||||
*/
|
||||
RetrySubstitution retrySubstitution = RetrySubstitution::NoNeed;
|
||||
|
||||
/**
|
||||
* The derivation stored at drvPath.
|
||||
*/
|
||||
std::unique_ptr<Derivation> drv;
|
||||
|
||||
std::unique_ptr<ParsedDerivation> parsedDrv;
|
||||
|
||||
/**
|
||||
* The remainder is state held during the build.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Locks on (fixed) output paths.
|
||||
*/
|
||||
PathLocks outputLocks;
|
||||
|
||||
/**
|
||||
* All input paths (that is, the union of FS closures of the
|
||||
* immediate input paths).
|
||||
*/
|
||||
StorePathSet inputPaths;
|
||||
|
||||
std::map<std::string, InitialOutput> initialOutputs;
|
||||
|
||||
/**
|
||||
* File descriptor for the log file.
|
||||
*/
|
||||
AutoCloseFD fdLogFile;
|
||||
std::shared_ptr<BufferedSink> logFileSink, logSink;
|
||||
|
||||
/**
|
||||
* Number of bytes received from the builder's stdout/stderr.
|
||||
*/
|
||||
unsigned long logSize;
|
||||
|
||||
/**
|
||||
* The most recent log lines.
|
||||
*/
|
||||
std::list<std::string> logTail;
|
||||
|
||||
std::string currentLogLine;
|
||||
size_t currentLogLinePos = 0; // to handle carriage return
|
||||
|
||||
std::string currentHookLine;
|
||||
|
||||
/**
|
||||
* The build hook.
|
||||
*/
|
||||
std::unique_ptr<HookInstance> hook;
|
||||
|
||||
/**
|
||||
* The sort of derivation we are building.
|
||||
*/
|
||||
std::optional<DerivationType> derivationType;
|
||||
|
||||
typedef void (DerivationGoal::*GoalState)();
|
||||
GoalState state;
|
||||
|
||||
BuildMode buildMode;
|
||||
|
||||
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds;
|
||||
|
||||
std::unique_ptr<Activity> act;
|
||||
|
||||
/**
|
||||
* Activity that denotes waiting for a lock.
|
||||
*/
|
||||
std::unique_ptr<Activity> actLock;
|
||||
|
||||
std::map<ActivityId, Activity> builderActivities;
|
||||
|
||||
/**
|
||||
* The remote machine on which we're building.
|
||||
*/
|
||||
std::string machineName;
|
||||
|
||||
DerivationGoal(const StorePath & drvPath,
|
||||
const OutputsSpec & wantedOutputs, Worker & worker,
|
||||
BuildMode buildMode = bmNormal);
|
||||
DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
const OutputsSpec & wantedOutputs, Worker & worker,
|
||||
BuildMode buildMode = bmNormal);
|
||||
virtual ~DerivationGoal();
|
||||
|
||||
void timedOut(Error && ex) override;
|
||||
|
||||
std::string key() override;
|
||||
|
||||
void work() override;
|
||||
|
||||
/**
|
||||
* Add wanted outputs to an already existing derivation goal.
|
||||
*/
|
||||
void addWantedOutputs(const OutputsSpec & outputs);
|
||||
|
||||
/**
|
||||
* The states.
|
||||
*/
|
||||
void getDerivation();
|
||||
void loadDerivation();
|
||||
void haveDerivation();
|
||||
void outputsSubstitutionTried();
|
||||
void gaveUpOnSubstitution();
|
||||
void closureRepaired();
|
||||
void inputsRealised();
|
||||
void tryToBuild();
|
||||
virtual void tryLocalBuild();
|
||||
void buildDone();
|
||||
|
||||
void resolvedFinished();
|
||||
|
||||
/**
|
||||
* Is the build hook willing to perform the build?
|
||||
*/
|
||||
HookReply tryBuildHook();
|
||||
|
||||
virtual int getChildStatus();
|
||||
|
||||
/**
|
||||
* Check that the derivation outputs all exist and register them
|
||||
* as valid.
|
||||
*/
|
||||
virtual SingleDrvOutputs registerOutputs();
|
||||
|
||||
/**
|
||||
* Open a log file and a pipe to it.
|
||||
*/
|
||||
Path openLogFile();
|
||||
|
||||
/**
|
||||
* Sign the newly built realisation if the store allows it
|
||||
*/
|
||||
virtual void signRealisation(Realisation&) {}
|
||||
|
||||
/**
|
||||
* Close the log file.
|
||||
*/
|
||||
void closeLogFile();
|
||||
|
||||
/**
|
||||
* Close the read side of the logger pipe.
|
||||
*/
|
||||
virtual void closeReadPipes();
|
||||
|
||||
/**
|
||||
* Cleanup hooks for buildDone()
|
||||
*/
|
||||
virtual void cleanupHookFinally();
|
||||
virtual void cleanupPreChildKill();
|
||||
virtual void cleanupPostChildKill();
|
||||
virtual bool cleanupDecideWhetherDiskFull();
|
||||
virtual void cleanupPostOutputsRegisteredModeCheck();
|
||||
virtual void cleanupPostOutputsRegisteredModeNonCheck();
|
||||
|
||||
virtual bool isReadDesc(int fd);
|
||||
|
||||
/**
|
||||
* Callback used by the worker to write to the log.
|
||||
*/
|
||||
void handleChildOutput(int fd, std::string_view data) override;
|
||||
void handleEOF(int fd) override;
|
||||
void flushLine();
|
||||
|
||||
/**
|
||||
* Wrappers around the corresponding Store methods that first consult the
|
||||
* derivation. This is currently needed because when there is no drv file
|
||||
* there also is no DB entry.
|
||||
*/
|
||||
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap();
|
||||
OutputPathMap queryDerivationOutputMap();
|
||||
|
||||
/**
|
||||
* Update 'initialOutputs' to determine the current status of the
|
||||
* outputs of the derivation. Also returns a Boolean denoting
|
||||
* whether all outputs are valid and non-corrupt, and a
|
||||
* 'SingleDrvOutputs' structure containing the valid outputs.
|
||||
*/
|
||||
std::pair<bool, SingleDrvOutputs> checkPathValidity();
|
||||
|
||||
/**
|
||||
* Aborts if any output is not valid or corrupt, and otherwise
|
||||
* returns a 'SingleDrvOutputs' structure containing all outputs.
|
||||
*/
|
||||
SingleDrvOutputs assertPathValidity();
|
||||
|
||||
/**
|
||||
* Forcibly kill the child process, if any.
|
||||
*/
|
||||
virtual void killChild();
|
||||
|
||||
void repairClosure();
|
||||
|
||||
void started();
|
||||
|
||||
void done(
|
||||
BuildResult::Status status,
|
||||
SingleDrvOutputs builtOutputs = {},
|
||||
std::optional<Error> ex = {});
|
||||
|
||||
void waiteeDone(GoalPtr waitee, ExitCode result) override;
|
||||
|
||||
StorePathSet exportReferences(const StorePathSet & storePaths);
|
||||
|
||||
JobCategory jobCategory() const override {
|
||||
return JobCategory::Build;
|
||||
};
|
||||
};
|
||||
|
||||
MakeError(NotDeterministic, BuildError);
|
||||
|
||||
}
|
||||
167
src/libstore/unix/build/drv-output-substitution-goal.cc
Normal file
167
src/libstore/unix/build/drv-output-substitution-goal.cc
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
#include "drv-output-substitution-goal.hh"
|
||||
#include "finally.hh"
|
||||
#include "worker.hh"
|
||||
#include "substitution-goal.hh"
|
||||
#include "callback.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(
|
||||
const DrvOutput & id,
|
||||
Worker & worker,
|
||||
RepairFlag repair,
|
||||
std::optional<ContentAddress> ca)
|
||||
: Goal(worker, DerivedPath::Opaque { StorePath::dummy })
|
||||
, id(id)
|
||||
{
|
||||
state = &DrvOutputSubstitutionGoal::init;
|
||||
name = fmt("substitution of '%s'", id.to_string());
|
||||
trace("created");
|
||||
}
|
||||
|
||||
|
||||
void DrvOutputSubstitutionGoal::init()
|
||||
{
|
||||
trace("init");
|
||||
|
||||
/* If the derivation already exists, we’re done */
|
||||
if (worker.store.queryRealisation(id)) {
|
||||
amDone(ecSuccess);
|
||||
return;
|
||||
}
|
||||
|
||||
subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
|
||||
tryNext();
|
||||
}
|
||||
|
||||
void DrvOutputSubstitutionGoal::tryNext()
|
||||
{
|
||||
trace("trying next substituter");
|
||||
|
||||
if (subs.size() == 0) {
|
||||
/* None left. Terminate this goal and let someone else deal
|
||||
with it. */
|
||||
debug("derivation output '%s' is required, but there is no substituter that can provide it", id.to_string());
|
||||
|
||||
/* Hack: don't indicate failure if there were no substituters.
|
||||
In that case the calling derivation should just do a
|
||||
build. */
|
||||
amDone(substituterFailed ? ecFailed : ecNoSubstituters);
|
||||
|
||||
if (substituterFailed) {
|
||||
worker.failedSubstitutions++;
|
||||
worker.updateProgress();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub = subs.front();
|
||||
subs.pop_front();
|
||||
|
||||
// FIXME: Make async
|
||||
// outputInfo = sub->queryRealisation(id);
|
||||
|
||||
/* The callback of the curl download below can outlive `this` (if
|
||||
some other error occurs), so it must not touch `this`. So put
|
||||
the shared state in a separate refcounted object. */
|
||||
downloadState = std::make_shared<DownloadState>();
|
||||
downloadState->outPipe.create();
|
||||
|
||||
sub->queryRealisation(
|
||||
id,
|
||||
{ [downloadState(downloadState)](std::future<std::shared_ptr<const Realisation>> res) {
|
||||
try {
|
||||
Finally updateStats([&]() { downloadState->outPipe.writeSide.close(); });
|
||||
downloadState->promise.set_value(res.get());
|
||||
} catch (...) {
|
||||
downloadState->promise.set_exception(std::current_exception());
|
||||
}
|
||||
} });
|
||||
|
||||
worker.childStarted(shared_from_this(), {downloadState->outPipe.readSide.get()}, true, false);
|
||||
|
||||
state = &DrvOutputSubstitutionGoal::realisationFetched;
|
||||
}
|
||||
|
||||
void DrvOutputSubstitutionGoal::realisationFetched()
|
||||
{
|
||||
worker.childTerminated(this);
|
||||
|
||||
try {
|
||||
outputInfo = downloadState->promise.get_future().get();
|
||||
} catch (std::exception & e) {
|
||||
printError(e.what());
|
||||
substituterFailed = true;
|
||||
}
|
||||
|
||||
if (!outputInfo) {
|
||||
return tryNext();
|
||||
}
|
||||
|
||||
for (const auto & [depId, depPath] : outputInfo->dependentRealisations) {
|
||||
if (depId != id) {
|
||||
if (auto localOutputInfo = worker.store.queryRealisation(depId);
|
||||
localOutputInfo && localOutputInfo->outPath != depPath) {
|
||||
warn(
|
||||
"substituter '%s' has an incompatible realisation for '%s', ignoring.\n"
|
||||
"Local: %s\n"
|
||||
"Remote: %s",
|
||||
sub->getUri(),
|
||||
depId.to_string(),
|
||||
worker.store.printStorePath(localOutputInfo->outPath),
|
||||
worker.store.printStorePath(depPath)
|
||||
);
|
||||
tryNext();
|
||||
return;
|
||||
}
|
||||
addWaitee(worker.makeDrvOutputSubstitutionGoal(depId));
|
||||
}
|
||||
}
|
||||
|
||||
addWaitee(worker.makePathSubstitutionGoal(outputInfo->outPath));
|
||||
|
||||
if (waitees.empty()) outPathValid();
|
||||
else state = &DrvOutputSubstitutionGoal::outPathValid;
|
||||
}
|
||||
|
||||
void DrvOutputSubstitutionGoal::outPathValid()
|
||||
{
|
||||
assert(outputInfo);
|
||||
trace("output path substituted");
|
||||
|
||||
if (nrFailed > 0) {
|
||||
debug("The output path of the derivation output '%s' could not be substituted", id.to_string());
|
||||
amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
|
||||
return;
|
||||
}
|
||||
|
||||
worker.store.registerDrvOutput(*outputInfo);
|
||||
finished();
|
||||
}
|
||||
|
||||
void DrvOutputSubstitutionGoal::finished()
|
||||
{
|
||||
trace("finished");
|
||||
amDone(ecSuccess);
|
||||
}
|
||||
|
||||
std::string DrvOutputSubstitutionGoal::key()
|
||||
{
|
||||
/* "a$" ensures substitution goals happen before derivation
|
||||
goals. */
|
||||
return "a$" + std::string(id.to_string());
|
||||
}
|
||||
|
||||
void DrvOutputSubstitutionGoal::work()
|
||||
{
|
||||
(this->*state)();
|
||||
}
|
||||
|
||||
void DrvOutputSubstitutionGoal::handleEOF(int fd)
|
||||
{
|
||||
if (fd == downloadState->outPipe.readSide.get()) worker.wakeUp(shared_from_this());
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
81
src/libstore/unix/build/drv-output-substitution-goal.hh
Normal file
81
src/libstore/unix/build/drv-output-substitution-goal.hh
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "store-api.hh"
|
||||
#include "goal.hh"
|
||||
#include "realisation.hh"
|
||||
#include <thread>
|
||||
#include <future>
|
||||
|
||||
namespace nix {
|
||||
|
||||
class Worker;
|
||||
|
||||
/**
|
||||
* Substitution of a derivation output.
|
||||
* This is done in three steps:
|
||||
* 1. Fetch the output info from a substituter
|
||||
* 2. Substitute the corresponding output path
|
||||
* 3. Register the output info
|
||||
*/
|
||||
class DrvOutputSubstitutionGoal : public Goal {
|
||||
|
||||
/**
|
||||
* The drv output we're trying to substitute
|
||||
*/
|
||||
DrvOutput id;
|
||||
|
||||
/**
|
||||
* The realisation corresponding to the given output id.
|
||||
* Will be filled once we can get it.
|
||||
*/
|
||||
std::shared_ptr<const Realisation> outputInfo;
|
||||
|
||||
/**
|
||||
* The remaining substituters.
|
||||
*/
|
||||
std::list<ref<Store>> subs;
|
||||
|
||||
/**
|
||||
* The current substituter.
|
||||
*/
|
||||
std::shared_ptr<Store> sub;
|
||||
|
||||
struct DownloadState
|
||||
{
|
||||
Pipe outPipe;
|
||||
std::promise<std::shared_ptr<const Realisation>> promise;
|
||||
};
|
||||
|
||||
std::shared_ptr<DownloadState> downloadState;
|
||||
|
||||
/**
|
||||
* Whether a substituter failed.
|
||||
*/
|
||||
bool substituterFailed = false;
|
||||
|
||||
public:
|
||||
DrvOutputSubstitutionGoal(const DrvOutput& id, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
|
||||
typedef void (DrvOutputSubstitutionGoal::*GoalState)();
|
||||
GoalState state;
|
||||
|
||||
void init();
|
||||
void tryNext();
|
||||
void realisationFetched();
|
||||
void outPathValid();
|
||||
void finished();
|
||||
|
||||
void timedOut(Error && ex) override { abort(); };
|
||||
|
||||
std::string key() override;
|
||||
|
||||
void work() override;
|
||||
void handleEOF(int fd) override;
|
||||
|
||||
JobCategory jobCategory() const override {
|
||||
return JobCategory::Substitution;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
140
src/libstore/unix/build/entry-points.cc
Normal file
140
src/libstore/unix/build/entry-points.cc
Normal file
|
|
@ -0,0 +1,140 @@
|
|||
#include "worker.hh"
|
||||
#include "substitution-goal.hh"
|
||||
#include "derivation-goal.hh"
|
||||
#include "local-store.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMode, std::shared_ptr<Store> evalStore)
|
||||
{
|
||||
Worker worker(*this, evalStore ? *evalStore : *this);
|
||||
|
||||
Goals goals;
|
||||
for (auto & br : reqs)
|
||||
goals.insert(worker.makeGoal(br, buildMode));
|
||||
|
||||
worker.run(goals);
|
||||
|
||||
StringSet failed;
|
||||
std::optional<Error> ex;
|
||||
for (auto & i : goals) {
|
||||
if (i->ex) {
|
||||
if (ex)
|
||||
logError(i->ex->info());
|
||||
else
|
||||
ex = std::move(i->ex);
|
||||
}
|
||||
if (i->exitCode != Goal::ecSuccess) {
|
||||
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get()))
|
||||
failed.insert(printStorePath(i2->drvPath));
|
||||
else if (auto i2 = dynamic_cast<PathSubstitutionGoal *>(i.get()))
|
||||
failed.insert(printStorePath(i2->storePath));
|
||||
}
|
||||
}
|
||||
|
||||
if (failed.size() == 1 && ex) {
|
||||
ex->withExitStatus(worker.failingExitStatus());
|
||||
throw std::move(*ex);
|
||||
} else if (!failed.empty()) {
|
||||
if (ex) logError(ex->info());
|
||||
throw Error(worker.failingExitStatus(), "build of %s failed", concatStringsSep(", ", quoteStrings(failed)));
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<KeyedBuildResult> Store::buildPathsWithResults(
|
||||
const std::vector<DerivedPath> & reqs,
|
||||
BuildMode buildMode,
|
||||
std::shared_ptr<Store> evalStore)
|
||||
{
|
||||
Worker worker(*this, evalStore ? *evalStore : *this);
|
||||
|
||||
Goals goals;
|
||||
std::vector<std::pair<const DerivedPath &, GoalPtr>> state;
|
||||
|
||||
for (const auto & req : reqs) {
|
||||
auto goal = worker.makeGoal(req, buildMode);
|
||||
goals.insert(goal);
|
||||
state.push_back({req, goal});
|
||||
}
|
||||
|
||||
worker.run(goals);
|
||||
|
||||
std::vector<KeyedBuildResult> results;
|
||||
|
||||
for (auto & [req, goalPtr] : state)
|
||||
results.emplace_back(KeyedBuildResult {
|
||||
goalPtr->getBuildResult(req),
|
||||
/* .path = */ req,
|
||||
});
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode)
|
||||
{
|
||||
Worker worker(*this, *this);
|
||||
auto goal = worker.makeBasicDerivationGoal(drvPath, drv, OutputsSpec::All {}, buildMode);
|
||||
|
||||
try {
|
||||
worker.run(Goals{goal});
|
||||
return goal->getBuildResult(DerivedPath::Built {
|
||||
.drvPath = makeConstantStorePathRef(drvPath),
|
||||
.outputs = OutputsSpec::All {},
|
||||
});
|
||||
} catch (Error & e) {
|
||||
return BuildResult {
|
||||
.status = BuildResult::MiscFailure,
|
||||
.errorMsg = e.msg(),
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
void Store::ensurePath(const StorePath & path)
|
||||
{
|
||||
/* If the path is already valid, we're done. */
|
||||
if (isValidPath(path)) return;
|
||||
|
||||
Worker worker(*this, *this);
|
||||
GoalPtr goal = worker.makePathSubstitutionGoal(path);
|
||||
Goals goals = {goal};
|
||||
|
||||
worker.run(goals);
|
||||
|
||||
if (goal->exitCode != Goal::ecSuccess) {
|
||||
if (goal->ex) {
|
||||
goal->ex->withExitStatus(worker.failingExitStatus());
|
||||
throw std::move(*goal->ex);
|
||||
} else
|
||||
throw Error(worker.failingExitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Store::repairPath(const StorePath & path)
|
||||
{
|
||||
Worker worker(*this, *this);
|
||||
GoalPtr goal = worker.makePathSubstitutionGoal(path, Repair);
|
||||
Goals goals = {goal};
|
||||
|
||||
worker.run(goals);
|
||||
|
||||
if (goal->exitCode != Goal::ecSuccess) {
|
||||
/* Since substituting the path didn't work, if we have a valid
|
||||
deriver, then rebuild the deriver. */
|
||||
auto info = queryPathInfo(path);
|
||||
if (info->deriver && isValidPath(*info->deriver)) {
|
||||
goals.clear();
|
||||
goals.insert(worker.makeGoal(DerivedPath::Built {
|
||||
.drvPath = makeConstantStorePathRef(*info->deriver),
|
||||
// FIXME: Should just build the specific output we need.
|
||||
.outputs = OutputsSpec::All { },
|
||||
}, bmRepair));
|
||||
worker.run(goals);
|
||||
} else
|
||||
throw Error(worker.failingExitStatus(), "cannot repair path '%s'", printStorePath(path));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
109
src/libstore/unix/build/goal.cc
Normal file
109
src/libstore/unix/build/goal.cc
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
#include "goal.hh"
|
||||
#include "worker.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const {
|
||||
std::string s1 = a->key();
|
||||
std::string s2 = b->key();
|
||||
return s1 < s2;
|
||||
}
|
||||
|
||||
|
||||
BuildResult Goal::getBuildResult(const DerivedPath & req) const {
|
||||
BuildResult res { buildResult };
|
||||
|
||||
if (auto pbp = std::get_if<DerivedPath::Built>(&req)) {
|
||||
auto & bp = *pbp;
|
||||
|
||||
/* Because goals are in general shared between derived paths
|
||||
that share the same derivation, we need to filter their
|
||||
results to get back just the results we care about.
|
||||
*/
|
||||
|
||||
for (auto it = res.builtOutputs.begin(); it != res.builtOutputs.end();) {
|
||||
if (bp.outputs.contains(it->first))
|
||||
++it;
|
||||
else
|
||||
it = res.builtOutputs.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void addToWeakGoals(WeakGoals & goals, GoalPtr p)
|
||||
{
|
||||
if (goals.find(p) != goals.end())
|
||||
return;
|
||||
goals.insert(p);
|
||||
}
|
||||
|
||||
|
||||
void Goal::addWaitee(GoalPtr waitee)
|
||||
{
|
||||
waitees.insert(waitee);
|
||||
addToWeakGoals(waitee->waiters, shared_from_this());
|
||||
}
|
||||
|
||||
|
||||
void Goal::waiteeDone(GoalPtr waitee, ExitCode result)
|
||||
{
|
||||
assert(waitees.count(waitee));
|
||||
waitees.erase(waitee);
|
||||
|
||||
trace(fmt("waitee '%s' done; %d left", waitee->name, waitees.size()));
|
||||
|
||||
if (result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure) ++nrFailed;
|
||||
|
||||
if (result == ecNoSubstituters) ++nrNoSubstituters;
|
||||
|
||||
if (result == ecIncompleteClosure) ++nrIncompleteClosure;
|
||||
|
||||
if (waitees.empty() || (result == ecFailed && !settings.keepGoing)) {
|
||||
|
||||
/* If we failed and keepGoing is not set, we remove all
|
||||
remaining waitees. */
|
||||
for (auto & goal : waitees) {
|
||||
goal->waiters.extract(shared_from_this());
|
||||
}
|
||||
waitees.clear();
|
||||
|
||||
worker.wakeUp(shared_from_this());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Goal::amDone(ExitCode result, std::optional<Error> ex)
|
||||
{
|
||||
trace("done");
|
||||
assert(exitCode == ecBusy);
|
||||
assert(result == ecSuccess || result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure);
|
||||
exitCode = result;
|
||||
|
||||
if (ex) {
|
||||
if (!waiters.empty())
|
||||
logError(ex->info());
|
||||
else
|
||||
this->ex = std::move(*ex);
|
||||
}
|
||||
|
||||
for (auto & i : waiters) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) goal->waiteeDone(shared_from_this(), result);
|
||||
}
|
||||
waiters.clear();
|
||||
worker.removeGoal(shared_from_this());
|
||||
|
||||
cleanup();
|
||||
}
|
||||
|
||||
|
||||
void Goal::trace(std::string_view s)
|
||||
{
|
||||
debug("%1%: %2%", name, s);
|
||||
}
|
||||
|
||||
}
|
||||
180
src/libstore/unix/build/goal.hh
Normal file
180
src/libstore/unix/build/goal.hh
Normal file
|
|
@ -0,0 +1,180 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "types.hh"
|
||||
#include "store-api.hh"
|
||||
#include "build-result.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
* Forward definition.
|
||||
*/
|
||||
struct Goal;
|
||||
class Worker;
|
||||
|
||||
/**
|
||||
* A pointer to a goal.
|
||||
*/
|
||||
typedef std::shared_ptr<Goal> GoalPtr;
|
||||
typedef std::weak_ptr<Goal> WeakGoalPtr;
|
||||
|
||||
struct CompareGoalPtrs {
|
||||
bool operator() (const GoalPtr & a, const GoalPtr & b) const;
|
||||
};
|
||||
|
||||
/**
|
||||
* Set of goals.
|
||||
*/
|
||||
typedef std::set<GoalPtr, CompareGoalPtrs> Goals;
|
||||
typedef std::set<WeakGoalPtr, std::owner_less<WeakGoalPtr>> WeakGoals;
|
||||
|
||||
/**
|
||||
* A map of paths to goals (and the other way around).
|
||||
*/
|
||||
typedef std::map<StorePath, WeakGoalPtr> WeakGoalMap;
|
||||
|
||||
/**
|
||||
* Used as a hint to the worker on how to schedule a particular goal. For example,
|
||||
* builds are typically CPU- and memory-bound, while substitutions are I/O bound.
|
||||
* Using this information, the worker might decide to schedule more or fewer goals
|
||||
* of each category in parallel.
|
||||
*/
|
||||
enum struct JobCategory {
|
||||
/**
|
||||
* A build of a derivation; it will use CPU and disk resources.
|
||||
*/
|
||||
Build,
|
||||
/**
|
||||
* A substitution an arbitrary store object; it will use network resources.
|
||||
*/
|
||||
Substitution,
|
||||
};
|
||||
|
||||
struct Goal : public std::enable_shared_from_this<Goal>
|
||||
{
|
||||
typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters, ecIncompleteClosure} ExitCode;
|
||||
|
||||
/**
|
||||
* Backlink to the worker.
|
||||
*/
|
||||
Worker & worker;
|
||||
|
||||
/**
|
||||
* Goals that this goal is waiting for.
|
||||
*/
|
||||
Goals waitees;
|
||||
|
||||
/**
|
||||
* Goals waiting for this one to finish. Must use weak pointers
|
||||
* here to prevent cycles.
|
||||
*/
|
||||
WeakGoals waiters;
|
||||
|
||||
/**
|
||||
* Number of goals we are/were waiting for that have failed.
|
||||
*/
|
||||
size_t nrFailed = 0;
|
||||
|
||||
/**
|
||||
* Number of substitution goals we are/were waiting for that
|
||||
* failed because there are no substituters.
|
||||
*/
|
||||
size_t nrNoSubstituters = 0;
|
||||
|
||||
/**
|
||||
* Number of substitution goals we are/were waiting for that
|
||||
* failed because they had unsubstitutable references.
|
||||
*/
|
||||
size_t nrIncompleteClosure = 0;
|
||||
|
||||
/**
|
||||
* Name of this goal for debugging purposes.
|
||||
*/
|
||||
std::string name;
|
||||
|
||||
/**
|
||||
* Whether the goal is finished.
|
||||
*/
|
||||
ExitCode exitCode = ecBusy;
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Build result.
|
||||
*/
|
||||
BuildResult buildResult;
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Project a `BuildResult` with just the information that pertains
|
||||
* to the given request.
|
||||
*
|
||||
* In general, goals may be aliased between multiple requests, and
|
||||
* the stored `BuildResult` has information for the union of all
|
||||
* requests. We don't want to leak what the other request are for
|
||||
* sake of both privacy and determinism, and this "safe accessor"
|
||||
* ensures we don't.
|
||||
*/
|
||||
BuildResult getBuildResult(const DerivedPath &) const;
|
||||
|
||||
/**
|
||||
* Exception containing an error message, if any.
|
||||
*/
|
||||
std::optional<Error> ex;
|
||||
|
||||
Goal(Worker & worker, DerivedPath path)
|
||||
: worker(worker)
|
||||
{ }
|
||||
|
||||
virtual ~Goal()
|
||||
{
|
||||
trace("goal destroyed");
|
||||
}
|
||||
|
||||
virtual void work() = 0;
|
||||
|
||||
void addWaitee(GoalPtr waitee);
|
||||
|
||||
virtual void waiteeDone(GoalPtr waitee, ExitCode result);
|
||||
|
||||
virtual void handleChildOutput(int fd, std::string_view data)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
virtual void handleEOF(int fd)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
void trace(std::string_view s);
|
||||
|
||||
std::string getName() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback in case of a timeout. It should wake up its waiters,
|
||||
* get rid of any running child processes that are being monitored
|
||||
* by the worker (important!), etc.
|
||||
*/
|
||||
virtual void timedOut(Error && ex) = 0;
|
||||
|
||||
virtual std::string key() = 0;
|
||||
|
||||
void amDone(ExitCode result, std::optional<Error> ex = {});
|
||||
|
||||
virtual void cleanup() { }
|
||||
|
||||
/**
|
||||
* @brief Hint for the scheduler, which concurrency limit applies.
|
||||
* @see JobCategory
|
||||
*/
|
||||
virtual JobCategory jobCategory() const = 0;
|
||||
};
|
||||
|
||||
void addToWeakGoals(WeakGoals & goals, GoalPtr p);
|
||||
|
||||
}
|
||||
88
src/libstore/unix/build/hook-instance.cc
Normal file
88
src/libstore/unix/build/hook-instance.cc
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
#include "globals.hh"
|
||||
#include "hook-instance.hh"
|
||||
#include "file-system.hh"
|
||||
#include "child.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
HookInstance::HookInstance()
|
||||
{
|
||||
debug("starting build hook '%s'", concatStringsSep(" ", settings.buildHook.get()));
|
||||
|
||||
auto buildHookArgs = settings.buildHook.get();
|
||||
|
||||
if (buildHookArgs.empty())
|
||||
throw Error("'build-hook' setting is empty");
|
||||
|
||||
auto buildHook = canonPath(buildHookArgs.front());
|
||||
buildHookArgs.pop_front();
|
||||
|
||||
Strings args;
|
||||
args.push_back(std::string(baseNameOf(buildHook)));
|
||||
|
||||
for (auto & arg : buildHookArgs)
|
||||
args.push_back(arg);
|
||||
|
||||
args.push_back(std::to_string(verbosity));
|
||||
|
||||
/* Create a pipe to get the output of the child. */
|
||||
fromHook.create();
|
||||
|
||||
/* Create the communication pipes. */
|
||||
toHook.create();
|
||||
|
||||
/* Create a pipe to get the output of the builder. */
|
||||
builderOut.create();
|
||||
|
||||
/* Fork the hook. */
|
||||
pid = startProcess([&]() {
|
||||
|
||||
if (dup2(fromHook.writeSide.get(), STDERR_FILENO) == -1)
|
||||
throw SysError("cannot pipe standard error into log file");
|
||||
|
||||
commonChildInit();
|
||||
|
||||
if (chdir("/") == -1) throw SysError("changing into /");
|
||||
|
||||
/* Dup the communication pipes. */
|
||||
if (dup2(toHook.readSide.get(), STDIN_FILENO) == -1)
|
||||
throw SysError("dupping to-hook read side");
|
||||
|
||||
/* Use fd 4 for the builder's stdout/stderr. */
|
||||
if (dup2(builderOut.writeSide.get(), 4) == -1)
|
||||
throw SysError("dupping builder's stdout/stderr");
|
||||
|
||||
/* Hack: pass the read side of that fd to allow build-remote
|
||||
to read SSH error messages. */
|
||||
if (dup2(builderOut.readSide.get(), 5) == -1)
|
||||
throw SysError("dupping builder's stdout/stderr");
|
||||
|
||||
execv(buildHook.c_str(), stringsToCharPtrs(args).data());
|
||||
|
||||
throw SysError("executing '%s'", buildHook);
|
||||
});
|
||||
|
||||
pid.setSeparatePG(true);
|
||||
fromHook.writeSide = -1;
|
||||
toHook.readSide = -1;
|
||||
|
||||
sink = FdSink(toHook.writeSide.get());
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
globalConfig.getSettings(settings);
|
||||
for (auto & setting : settings)
|
||||
sink << 1 << setting.first << setting.second.value;
|
||||
sink << 0;
|
||||
}
|
||||
|
||||
|
||||
HookInstance::~HookInstance()
|
||||
{
|
||||
try {
|
||||
toHook.writeSide = -1;
|
||||
if (pid != -1) pid.kill();
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
41
src/libstore/unix/build/hook-instance.hh
Normal file
41
src/libstore/unix/build/hook-instance.hh
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "logging.hh"
|
||||
#include "serialise.hh"
|
||||
#include "processes.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct HookInstance
|
||||
{
|
||||
/**
|
||||
* Pipes for talking to the build hook.
|
||||
*/
|
||||
Pipe toHook;
|
||||
|
||||
/**
|
||||
* Pipe for the hook's standard output/error.
|
||||
*/
|
||||
Pipe fromHook;
|
||||
|
||||
/**
|
||||
* Pipe for the builder's standard output/error.
|
||||
*/
|
||||
Pipe builderOut;
|
||||
|
||||
/**
|
||||
* The process ID of the hook.
|
||||
*/
|
||||
Pid pid;
|
||||
|
||||
FdSink sink;
|
||||
|
||||
std::map<ActivityId, Activity> activities;
|
||||
|
||||
HookInstance();
|
||||
|
||||
~HookInstance();
|
||||
};
|
||||
|
||||
}
|
||||
2993
src/libstore/unix/build/local-derivation-goal.cc
Normal file
2993
src/libstore/unix/build/local-derivation-goal.cc
Normal file
File diff suppressed because it is too large
Load diff
296
src/libstore/unix/build/local-derivation-goal.hh
Normal file
296
src/libstore/unix/build/local-derivation-goal.hh
Normal file
|
|
@ -0,0 +1,296 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "derivation-goal.hh"
|
||||
#include "local-store.hh"
|
||||
#include "processes.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct LocalDerivationGoal : public DerivationGoal
|
||||
{
|
||||
LocalStore & getLocalStore();
|
||||
|
||||
/**
|
||||
* User selected for running the builder.
|
||||
*/
|
||||
std::unique_ptr<UserLock> buildUser;
|
||||
|
||||
/**
|
||||
* The process ID of the builder.
|
||||
*/
|
||||
Pid pid;
|
||||
|
||||
/**
|
||||
* The cgroup of the builder, if any.
|
||||
*/
|
||||
std::optional<Path> cgroup;
|
||||
|
||||
/**
|
||||
* The temporary directory.
|
||||
*/
|
||||
Path tmpDir;
|
||||
|
||||
/**
|
||||
* The path of the temporary directory in the sandbox.
|
||||
*/
|
||||
Path tmpDirInSandbox;
|
||||
|
||||
/**
|
||||
* Master side of the pseudoterminal used for the builder's
|
||||
* standard output/error.
|
||||
*/
|
||||
AutoCloseFD builderOut;
|
||||
|
||||
/**
|
||||
* Pipe for synchronising updates to the builder namespaces.
|
||||
*/
|
||||
Pipe userNamespaceSync;
|
||||
|
||||
/**
|
||||
* The mount namespace and user namespace of the builder, used to add additional
|
||||
* paths to the sandbox as a result of recursive Nix calls.
|
||||
*/
|
||||
AutoCloseFD sandboxMountNamespace;
|
||||
AutoCloseFD sandboxUserNamespace;
|
||||
|
||||
/**
|
||||
* On Linux, whether we're doing the build in its own user
|
||||
* namespace.
|
||||
*/
|
||||
bool usingUserNamespace = true;
|
||||
|
||||
/**
|
||||
* Whether we're currently doing a chroot build.
|
||||
*/
|
||||
bool useChroot = false;
|
||||
|
||||
Path chrootRootDir;
|
||||
|
||||
/**
|
||||
* RAII object to delete the chroot directory.
|
||||
*/
|
||||
std::shared_ptr<AutoDelete> autoDelChroot;
|
||||
|
||||
/**
|
||||
* Whether to run the build in a private network namespace.
|
||||
*/
|
||||
bool privateNetwork = false;
|
||||
|
||||
/**
|
||||
* Stuff we need to pass to initChild().
|
||||
*/
|
||||
struct ChrootPath {
|
||||
Path source;
|
||||
bool optional;
|
||||
ChrootPath(Path source = "", bool optional = false)
|
||||
: source(source), optional(optional)
|
||||
{ }
|
||||
};
|
||||
typedef map<Path, ChrootPath> PathsInChroot; // maps target path to source path
|
||||
PathsInChroot pathsInChroot;
|
||||
|
||||
typedef map<std::string, std::string> Environment;
|
||||
Environment env;
|
||||
|
||||
#if __APPLE__
|
||||
typedef std::string SandboxProfile;
|
||||
SandboxProfile additionalSandboxProfile;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Hash rewriting.
|
||||
*/
|
||||
StringMap inputRewrites, outputRewrites;
|
||||
typedef map<StorePath, StorePath> RedirectedOutputs;
|
||||
RedirectedOutputs redirectedOutputs;
|
||||
|
||||
/**
|
||||
* The output paths used during the build.
|
||||
*
|
||||
* - Input-addressed derivations or fixed content-addressed outputs are
|
||||
* sometimes built when some of their outputs already exist, and can not
|
||||
* be hidden via sandboxing. We use temporary locations instead and
|
||||
* rewrite after the build. Otherwise the regular predetermined paths are
|
||||
* put here.
|
||||
*
|
||||
* - Floating content-addressed derivations do not know their final build
|
||||
* output paths until the outputs are hashed, so random locations are
|
||||
* used, and then renamed. The randomness helps guard against hidden
|
||||
* self-references.
|
||||
*/
|
||||
OutputPathMap scratchOutputs;
|
||||
|
||||
uid_t sandboxUid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 1000 : 0) : buildUser->getUID(); }
|
||||
gid_t sandboxGid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 100 : 0) : buildUser->getGID(); }
|
||||
|
||||
const static Path homeDir;
|
||||
|
||||
/**
|
||||
* The recursive Nix daemon socket.
|
||||
*/
|
||||
AutoCloseFD daemonSocket;
|
||||
|
||||
/**
|
||||
* The daemon main thread.
|
||||
*/
|
||||
std::thread daemonThread;
|
||||
|
||||
/**
|
||||
* The daemon worker threads.
|
||||
*/
|
||||
std::vector<std::thread> daemonWorkerThreads;
|
||||
|
||||
/**
|
||||
* Paths that were added via recursive Nix calls.
|
||||
*/
|
||||
StorePathSet addedPaths;
|
||||
|
||||
/**
|
||||
* Realisations that were added via recursive Nix calls.
|
||||
*/
|
||||
std::set<DrvOutput> addedDrvOutputs;
|
||||
|
||||
/**
|
||||
* Recursive Nix calls are only allowed to build or realize paths
|
||||
* in the original input closure or added via a recursive Nix call
|
||||
* (so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
|
||||
* /nix/store/<bla> is some arbitrary path in a binary cache).
|
||||
*/
|
||||
bool isAllowed(const StorePath & path)
|
||||
{
|
||||
return inputPaths.count(path) || addedPaths.count(path);
|
||||
}
|
||||
bool isAllowed(const DrvOutput & id)
|
||||
{
|
||||
return addedDrvOutputs.count(id);
|
||||
}
|
||||
|
||||
bool isAllowed(const DerivedPath & req);
|
||||
|
||||
friend struct RestrictedStore;
|
||||
|
||||
using DerivationGoal::DerivationGoal;
|
||||
|
||||
virtual ~LocalDerivationGoal() override;
|
||||
|
||||
/**
|
||||
* Whether we need to perform hash rewriting if there are valid output paths.
|
||||
*/
|
||||
bool needsHashRewrite();
|
||||
|
||||
/**
|
||||
* The additional states.
|
||||
*/
|
||||
void tryLocalBuild() override;
|
||||
|
||||
/**
|
||||
* Start building a derivation.
|
||||
*/
|
||||
void startBuilder();
|
||||
|
||||
/**
|
||||
* Fill in the environment for the builder.
|
||||
*/
|
||||
void initEnv();
|
||||
|
||||
/**
|
||||
* Setup tmp dir location.
|
||||
*/
|
||||
void initTmpDir();
|
||||
|
||||
/**
|
||||
* Write a JSON file containing the derivation attributes.
|
||||
*/
|
||||
void writeStructuredAttrs();
|
||||
|
||||
void startDaemon();
|
||||
|
||||
void stopDaemon();
|
||||
|
||||
/**
|
||||
* Add 'path' to the set of paths that may be referenced by the
|
||||
* outputs, and make it appear in the sandbox.
|
||||
*/
|
||||
void addDependency(const StorePath & path);
|
||||
|
||||
/**
|
||||
* Make a file owned by the builder.
|
||||
*/
|
||||
void chownToBuilder(const Path & path);
|
||||
|
||||
int getChildStatus() override;
|
||||
|
||||
/**
|
||||
* Run the builder's process.
|
||||
*/
|
||||
void runChild();
|
||||
|
||||
/**
|
||||
* Check that the derivation outputs all exist and register them
|
||||
* as valid.
|
||||
*/
|
||||
SingleDrvOutputs registerOutputs() override;
|
||||
|
||||
void signRealisation(Realisation &) override;
|
||||
|
||||
/**
|
||||
* Check that an output meets the requirements specified by the
|
||||
* 'outputChecks' attribute (or the legacy
|
||||
* '{allowed,disallowed}{References,Requisites}' attributes).
|
||||
*/
|
||||
void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs);
|
||||
|
||||
/**
|
||||
* Close the read side of the logger pipe.
|
||||
*/
|
||||
void closeReadPipes() override;
|
||||
|
||||
/**
|
||||
* Cleanup hooks for buildDone()
|
||||
*/
|
||||
void cleanupHookFinally() override;
|
||||
void cleanupPreChildKill() override;
|
||||
void cleanupPostChildKill() override;
|
||||
bool cleanupDecideWhetherDiskFull() override;
|
||||
void cleanupPostOutputsRegisteredModeCheck() override;
|
||||
void cleanupPostOutputsRegisteredModeNonCheck() override;
|
||||
|
||||
bool isReadDesc(int fd) override;
|
||||
|
||||
/**
|
||||
* Delete the temporary directory, if we have one.
|
||||
*/
|
||||
void deleteTmpDir(bool force);
|
||||
|
||||
/**
|
||||
* Forcibly kill the child process, if any.
|
||||
*
|
||||
* Called by destructor, can't be overridden
|
||||
*/
|
||||
void killChild() override final;
|
||||
|
||||
/**
|
||||
* Kill any processes running under the build user UID or in the
|
||||
* cgroup of the build.
|
||||
*/
|
||||
void killSandbox(bool getStats);
|
||||
|
||||
/**
|
||||
* Create alternative path calculated from but distinct from the
|
||||
* input, so we can avoid overwriting outputs (or other store paths)
|
||||
* that already exist.
|
||||
*/
|
||||
StorePath makeFallbackPath(const StorePath & path);
|
||||
|
||||
/**
|
||||
* Make a path to another based on the output name along with the
|
||||
* derivation hash.
|
||||
*
|
||||
* @todo Add option to randomize, so we can audit whether our
|
||||
* rewrites caught everything
|
||||
*/
|
||||
StorePath makeFallbackPath(OutputNameView outputName);
|
||||
};
|
||||
|
||||
}
|
||||
45
src/libstore/unix/build/personality.cc
Normal file
45
src/libstore/unix/build/personality.cc
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
#include "personality.hh"
|
||||
#include "globals.hh"
|
||||
|
||||
#if __linux__
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/personality.h>
|
||||
#endif
|
||||
|
||||
#include <cstring>
|
||||
|
||||
namespace nix {
|
||||
|
||||
void setPersonality(std::string_view system)
|
||||
{
|
||||
#if __linux__
|
||||
/* Change the personality to 32-bit if we're doing an
|
||||
i686-linux build on an x86_64-linux machine. */
|
||||
struct utsname utsbuf;
|
||||
uname(&utsbuf);
|
||||
if ((system == "i686-linux"
|
||||
&& (std::string_view(SYSTEM) == "x86_64-linux"
|
||||
|| (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
|
||||
|| system == "armv7l-linux"
|
||||
|| system == "armv6l-linux"
|
||||
|| system == "armv5tel-linux")
|
||||
{
|
||||
if (personality(PER_LINUX32) == -1)
|
||||
throw SysError("cannot set 32-bit personality");
|
||||
}
|
||||
|
||||
/* Impersonate a Linux 2.6 machine to get some determinism in
|
||||
builds that depend on the kernel version. */
|
||||
if ((system == "i686-linux" || system == "x86_64-linux") && settings.impersonateLinux26) {
|
||||
int cur = personality(0xffffffff);
|
||||
if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
|
||||
}
|
||||
|
||||
/* Disable address space randomization for improved
|
||||
determinism. */
|
||||
int cur = personality(0xffffffff);
|
||||
if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
12
src/libstore/unix/build/personality.hh
Normal file
12
src/libstore/unix/build/personality.hh
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace nix {
|
||||
|
||||
void setPersonality(std::string_view system);
|
||||
|
||||
}
|
||||
|
||||
|
||||
111
src/libstore/unix/build/sandbox-defaults.sb
Normal file
111
src/libstore/unix/build/sandbox-defaults.sb
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
R""(
|
||||
|
||||
(define TMPDIR (param "_GLOBAL_TMP_DIR"))
|
||||
|
||||
(deny default)
|
||||
|
||||
; Disallow creating setuid/setgid binaries, since that
|
||||
; would allow breaking build user isolation.
|
||||
(deny file-write-setugid)
|
||||
|
||||
; Allow forking.
|
||||
(allow process-fork)
|
||||
|
||||
; Allow reading system information like #CPUs, etc.
|
||||
(allow sysctl-read)
|
||||
|
||||
; Allow POSIX semaphores and shared memory.
|
||||
(allow ipc-posix*)
|
||||
|
||||
; Allow socket creation.
|
||||
(allow system-socket)
|
||||
|
||||
; Allow sending signals within the sandbox.
|
||||
(allow signal (target same-sandbox))
|
||||
|
||||
; Allow getpwuid.
|
||||
(allow mach-lookup (global-name "com.apple.system.opendirectoryd.libinfo"))
|
||||
|
||||
; Access to /tmp.
|
||||
; The network-outbound/network-inbound ones are for unix domain sockets, which
|
||||
; we allow access to in TMPDIR (but if we allow them more broadly, you could in
|
||||
; theory escape the sandbox)
|
||||
(allow file* process-exec network-outbound network-inbound
|
||||
(literal "/tmp") (subpath TMPDIR))
|
||||
|
||||
; Some packages like to read the system version.
|
||||
(allow file-read*
|
||||
(literal "/System/Library/CoreServices/SystemVersion.plist")
|
||||
(literal "/System/Library/CoreServices/SystemVersionCompat.plist"))
|
||||
|
||||
; Without this line clang cannot write to /dev/null, breaking some configure tests.
|
||||
(allow file-read-metadata (literal "/dev"))
|
||||
|
||||
; Many packages like to do local networking in their test suites, but let's only
|
||||
; allow it if the package explicitly asks for it.
|
||||
(if (param "_ALLOW_LOCAL_NETWORKING")
|
||||
(begin
|
||||
(allow network* (remote ip "localhost:*"))
|
||||
|
||||
; Allow access to /etc/resolv.conf (which is a symlink to
|
||||
; /private/var/run/resolv.conf).
|
||||
; TODO: deduplicate with sandbox-network.sb
|
||||
(allow file-read-metadata
|
||||
(literal "/var")
|
||||
(literal "/etc")
|
||||
(literal "/etc/resolv.conf")
|
||||
(literal "/private/etc/resolv.conf"))
|
||||
|
||||
(allow file-read*
|
||||
(literal "/private/var/run/resolv.conf"))
|
||||
|
||||
; Allow DNS lookups. This is even needed for localhost, which lots of tests rely on
|
||||
(allow file-read-metadata (literal "/etc/hosts"))
|
||||
(allow file-read* (literal "/private/etc/hosts"))
|
||||
(allow network-outbound (remote unix-socket (path-literal "/private/var/run/mDNSResponder")))))
|
||||
|
||||
; Standard devices.
|
||||
(allow file*
|
||||
(literal "/dev/null")
|
||||
(literal "/dev/random")
|
||||
(literal "/dev/stderr")
|
||||
(literal "/dev/stdin")
|
||||
(literal "/dev/stdout")
|
||||
(literal "/dev/tty")
|
||||
(literal "/dev/urandom")
|
||||
(literal "/dev/zero")
|
||||
(subpath "/dev/fd"))
|
||||
|
||||
; Allow pseudo-terminals.
|
||||
(allow file*
|
||||
(literal "/dev/ptmx")
|
||||
(regex #"^/dev/pty[a-z]+")
|
||||
(regex #"^/dev/ttys[0-9]+"))
|
||||
|
||||
; Does nothing, but reduces build noise.
|
||||
(allow file* (literal "/dev/dtracehelper"))
|
||||
|
||||
; Allow access to zoneinfo since libSystem needs it.
|
||||
(allow file-read* (subpath "/usr/share/zoneinfo"))
|
||||
|
||||
(allow file-read* (subpath "/usr/share/locale"))
|
||||
|
||||
; This is mostly to get more specific log messages when builds try to
|
||||
; access something in /etc or /var.
|
||||
(allow file-read-metadata
|
||||
(literal "/etc")
|
||||
(literal "/var")
|
||||
(literal "/private/var/tmp"))
|
||||
|
||||
; This is used by /bin/sh on macOS 10.15 and later.
|
||||
(allow file*
|
||||
(literal "/private/var/select/sh"))
|
||||
|
||||
; Allow Rosetta 2 to run x86_64 binaries on aarch64-darwin (and vice versa).
|
||||
(allow file-read*
|
||||
(subpath "/Library/Apple/usr/libexec/oah")
|
||||
(subpath "/System/Library/Apple/usr/libexec/oah")
|
||||
(subpath "/System/Library/LaunchDaemons/com.apple.oahd.plist")
|
||||
(subpath "/Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist"))
|
||||
|
||||
)""
|
||||
9
src/libstore/unix/build/sandbox-minimal.sb
Normal file
9
src/libstore/unix/build/sandbox-minimal.sb
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
R""(
|
||||
|
||||
(allow default)
|
||||
|
||||
; Disallow creating setuid/setgid binaries, since that
|
||||
; would allow breaking build user isolation.
|
||||
(deny file-write-setugid)
|
||||
|
||||
)""
|
||||
24
src/libstore/unix/build/sandbox-network.sb
Normal file
24
src/libstore/unix/build/sandbox-network.sb
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
R""(
|
||||
|
||||
; Allow local and remote network traffic.
|
||||
(allow network* (local ip) (remote ip))
|
||||
|
||||
; Allow access to /etc/resolv.conf (which is a symlink to
|
||||
; /private/var/run/resolv.conf).
|
||||
(allow file-read-metadata
|
||||
(literal "/var")
|
||||
(literal "/etc")
|
||||
(literal "/etc/resolv.conf")
|
||||
(literal "/private/etc/resolv.conf"))
|
||||
|
||||
(allow file-read*
|
||||
(literal "/private/var/run/resolv.conf"))
|
||||
|
||||
; Allow DNS lookups.
|
||||
(allow network-outbound (remote unix-socket (path-literal "/private/var/run/mDNSResponder")))
|
||||
|
||||
; Allow access to trustd.
|
||||
(allow mach-lookup (global-name "com.apple.trustd"))
|
||||
(allow mach-lookup (global-name "com.apple.trustd.agent"))
|
||||
|
||||
)""
|
||||
324
src/libstore/unix/build/substitution-goal.cc
Normal file
324
src/libstore/unix/build/substitution-goal.cc
Normal file
|
|
@ -0,0 +1,324 @@
|
|||
#include "worker.hh"
|
||||
#include "substitution-goal.hh"
|
||||
#include "nar-info.hh"
|
||||
#include "finally.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
PathSubstitutionGoal::PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
: Goal(worker, DerivedPath::Opaque { storePath })
|
||||
, storePath(storePath)
|
||||
, repair(repair)
|
||||
, ca(ca)
|
||||
{
|
||||
state = &PathSubstitutionGoal::init;
|
||||
name = fmt("substitution of '%s'", worker.store.printStorePath(this->storePath));
|
||||
trace("created");
|
||||
maintainExpectedSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.expectedSubstitutions);
|
||||
}
|
||||
|
||||
|
||||
PathSubstitutionGoal::~PathSubstitutionGoal()
|
||||
{
|
||||
cleanup();
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::done(
|
||||
ExitCode result,
|
||||
BuildResult::Status status,
|
||||
std::optional<std::string> errorMsg)
|
||||
{
|
||||
buildResult.status = status;
|
||||
if (errorMsg) {
|
||||
debug(*errorMsg);
|
||||
buildResult.errorMsg = *errorMsg;
|
||||
}
|
||||
amDone(result);
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::work()
|
||||
{
|
||||
(this->*state)();
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::init()
|
||||
{
|
||||
trace("init");
|
||||
|
||||
worker.store.addTempRoot(storePath);
|
||||
|
||||
/* If the path already exists we're done. */
|
||||
if (!repair && worker.store.isValidPath(storePath)) {
|
||||
done(ecSuccess, BuildResult::AlreadyValid);
|
||||
return;
|
||||
}
|
||||
|
||||
if (settings.readOnlyMode)
|
||||
throw Error("cannot substitute path '%s' - no write access to the Nix store", worker.store.printStorePath(storePath));
|
||||
|
||||
subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
|
||||
|
||||
tryNext();
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::tryNext()
|
||||
{
|
||||
trace("trying next substituter");
|
||||
|
||||
cleanup();
|
||||
|
||||
if (subs.size() == 0) {
|
||||
/* None left. Terminate this goal and let someone else deal
|
||||
with it. */
|
||||
|
||||
/* Hack: don't indicate failure if there were no substituters.
|
||||
In that case the calling derivation should just do a
|
||||
build. */
|
||||
done(
|
||||
substituterFailed ? ecFailed : ecNoSubstituters,
|
||||
BuildResult::NoSubstituters,
|
||||
fmt("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath)));
|
||||
|
||||
if (substituterFailed) {
|
||||
worker.failedSubstitutions++;
|
||||
worker.updateProgress();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub = subs.front();
|
||||
subs.pop_front();
|
||||
|
||||
if (ca) {
|
||||
subPath = sub->makeFixedOutputPathFromCA(
|
||||
std::string { storePath.name() },
|
||||
ContentAddressWithReferences::withoutRefs(*ca));
|
||||
if (sub->storeDir == worker.store.storeDir)
|
||||
assert(subPath == storePath);
|
||||
} else if (sub->storeDir != worker.store.storeDir) {
|
||||
tryNext();
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// FIXME: make async
|
||||
info = sub->queryPathInfo(subPath ? *subPath : storePath);
|
||||
} catch (InvalidPath &) {
|
||||
tryNext();
|
||||
return;
|
||||
} catch (SubstituterDisabled &) {
|
||||
if (settings.tryFallback) {
|
||||
tryNext();
|
||||
return;
|
||||
}
|
||||
throw;
|
||||
} catch (Error & e) {
|
||||
if (settings.tryFallback) {
|
||||
logError(e.info());
|
||||
tryNext();
|
||||
return;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
if (info->path != storePath) {
|
||||
if (info->isContentAddressed(*sub) && info->references.empty()) {
|
||||
auto info2 = std::make_shared<ValidPathInfo>(*info);
|
||||
info2->path = storePath;
|
||||
info = info2;
|
||||
} else {
|
||||
printError("asked '%s' for '%s' but got '%s'",
|
||||
sub->getUri(), worker.store.printStorePath(storePath), sub->printStorePath(info->path));
|
||||
tryNext();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the total expected download size. */
|
||||
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(info);
|
||||
|
||||
maintainExpectedNar = std::make_unique<MaintainCount<uint64_t>>(worker.expectedNarSize, info->narSize);
|
||||
|
||||
maintainExpectedDownload =
|
||||
narInfo && narInfo->fileSize
|
||||
? std::make_unique<MaintainCount<uint64_t>>(worker.expectedDownloadSize, narInfo->fileSize)
|
||||
: nullptr;
|
||||
|
||||
worker.updateProgress();
|
||||
|
||||
/* Bail out early if this substituter lacks a valid
|
||||
signature. LocalStore::addToStore() also checks for this, but
|
||||
only after we've downloaded the path. */
|
||||
if (!sub->isTrusted && worker.store.pathInfoIsUntrusted(*info))
|
||||
{
|
||||
warn("ignoring substitute for '%s' from '%s', as it's not signed by any of the keys in 'trusted-public-keys'",
|
||||
worker.store.printStorePath(storePath), sub->getUri());
|
||||
tryNext();
|
||||
return;
|
||||
}
|
||||
|
||||
/* To maintain the closure invariant, we first have to realise the
|
||||
paths referenced by this one. */
|
||||
for (auto & i : info->references)
|
||||
if (i != storePath) /* ignore self-references */
|
||||
addWaitee(worker.makePathSubstitutionGoal(i));
|
||||
|
||||
if (waitees.empty()) /* to prevent hang (no wake-up event) */
|
||||
referencesValid();
|
||||
else
|
||||
state = &PathSubstitutionGoal::referencesValid;
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::referencesValid()
|
||||
{
|
||||
trace("all references realised");
|
||||
|
||||
if (nrFailed > 0) {
|
||||
done(
|
||||
nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed,
|
||||
BuildResult::DependencyFailed,
|
||||
fmt("some references of path '%s' could not be realised", worker.store.printStorePath(storePath)));
|
||||
return;
|
||||
}
|
||||
|
||||
for (auto & i : info->references)
|
||||
if (i != storePath) /* ignore self-references */
|
||||
assert(worker.store.isValidPath(i));
|
||||
|
||||
state = &PathSubstitutionGoal::tryToRun;
|
||||
worker.wakeUp(shared_from_this());
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::tryToRun()
|
||||
{
|
||||
trace("trying to run");
|
||||
|
||||
/* Make sure that we are allowed to start a substitution. Note that even
|
||||
if maxSubstitutionJobs == 0, we still allow a substituter to run. This
|
||||
prevents infinite waiting. */
|
||||
if (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) {
|
||||
worker.waitForBuildSlot(shared_from_this());
|
||||
return;
|
||||
}
|
||||
|
||||
maintainRunningSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.runningSubstitutions);
|
||||
worker.updateProgress();
|
||||
|
||||
outPipe.create();
|
||||
|
||||
promise = std::promise<void>();
|
||||
|
||||
thr = std::thread([this]() {
|
||||
try {
|
||||
ReceiveInterrupts receiveInterrupts;
|
||||
|
||||
/* Wake up the worker loop when we're done. */
|
||||
Finally updateStats([this]() { outPipe.writeSide.close(); });
|
||||
|
||||
Activity act(*logger, actSubstitute, Logger::Fields{worker.store.printStorePath(storePath), sub->getUri()});
|
||||
PushActivity pact(act.id);
|
||||
|
||||
copyStorePath(*sub, worker.store,
|
||||
subPath ? *subPath : storePath, repair, sub->isTrusted ? NoCheckSigs : CheckSigs);
|
||||
|
||||
promise.set_value();
|
||||
} catch (...) {
|
||||
promise.set_exception(std::current_exception());
|
||||
}
|
||||
});
|
||||
|
||||
worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
|
||||
|
||||
state = &PathSubstitutionGoal::finished;
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::finished()
|
||||
{
|
||||
trace("substitute finished");
|
||||
|
||||
thr.join();
|
||||
worker.childTerminated(this);
|
||||
|
||||
try {
|
||||
promise.get_future().get();
|
||||
} catch (std::exception & e) {
|
||||
printError(e.what());
|
||||
|
||||
/* Cause the parent build to fail unless --fallback is given,
|
||||
or the substitute has disappeared. The latter case behaves
|
||||
the same as the substitute never having existed in the
|
||||
first place. */
|
||||
try {
|
||||
throw;
|
||||
} catch (SubstituteGone &) {
|
||||
} catch (...) {
|
||||
substituterFailed = true;
|
||||
}
|
||||
|
||||
/* Try the next substitute. */
|
||||
state = &PathSubstitutionGoal::tryNext;
|
||||
worker.wakeUp(shared_from_this());
|
||||
return;
|
||||
}
|
||||
|
||||
worker.markContentsGood(storePath);
|
||||
|
||||
printMsg(lvlChatty, "substitution of path '%s' succeeded", worker.store.printStorePath(storePath));
|
||||
|
||||
maintainRunningSubstitutions.reset();
|
||||
|
||||
maintainExpectedSubstitutions.reset();
|
||||
worker.doneSubstitutions++;
|
||||
|
||||
if (maintainExpectedDownload) {
|
||||
auto fileSize = maintainExpectedDownload->delta;
|
||||
maintainExpectedDownload.reset();
|
||||
worker.doneDownloadSize += fileSize;
|
||||
}
|
||||
|
||||
worker.doneNarSize += maintainExpectedNar->delta;
|
||||
maintainExpectedNar.reset();
|
||||
|
||||
worker.updateProgress();
|
||||
|
||||
done(ecSuccess, BuildResult::Substituted);
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::handleChildOutput(int fd, std::string_view data)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::handleEOF(int fd)
|
||||
{
|
||||
if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
|
||||
}
|
||||
|
||||
|
||||
void PathSubstitutionGoal::cleanup()
|
||||
{
|
||||
try {
|
||||
if (thr.joinable()) {
|
||||
// FIXME: signal worker thread to quit.
|
||||
thr.join();
|
||||
worker.childTerminated(this);
|
||||
}
|
||||
|
||||
outPipe.close();
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
125
src/libstore/unix/build/substitution-goal.hh
Normal file
125
src/libstore/unix/build/substitution-goal.hh
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "lock.hh"
|
||||
#include "store-api.hh"
|
||||
#include "goal.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
class Worker;
|
||||
|
||||
struct PathSubstitutionGoal : public Goal
|
||||
{
|
||||
/**
|
||||
* The store path that should be realised through a substitute.
|
||||
*/
|
||||
StorePath storePath;
|
||||
|
||||
/**
|
||||
* The path the substituter refers to the path as. This will be
|
||||
* different when the stores have different names.
|
||||
*/
|
||||
std::optional<StorePath> subPath;
|
||||
|
||||
/**
|
||||
* The remaining substituters.
|
||||
*/
|
||||
std::list<ref<Store>> subs;
|
||||
|
||||
/**
|
||||
* The current substituter.
|
||||
*/
|
||||
std::shared_ptr<Store> sub;
|
||||
|
||||
/**
|
||||
* Whether a substituter failed.
|
||||
*/
|
||||
bool substituterFailed = false;
|
||||
|
||||
/**
|
||||
* Path info returned by the substituter's query info operation.
|
||||
*/
|
||||
std::shared_ptr<const ValidPathInfo> info;
|
||||
|
||||
/**
|
||||
* Pipe for the substituter's standard output.
|
||||
*/
|
||||
Pipe outPipe;
|
||||
|
||||
/**
|
||||
* The substituter thread.
|
||||
*/
|
||||
std::thread thr;
|
||||
|
||||
std::promise<void> promise;
|
||||
|
||||
/**
|
||||
* Whether to try to repair a valid path.
|
||||
*/
|
||||
RepairFlag repair;
|
||||
|
||||
/**
|
||||
* Location where we're downloading the substitute. Differs from
|
||||
* storePath when doing a repair.
|
||||
*/
|
||||
Path destPath;
|
||||
|
||||
std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions,
|
||||
maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload;
|
||||
|
||||
typedef void (PathSubstitutionGoal::*GoalState)();
|
||||
GoalState state;
|
||||
|
||||
/**
|
||||
* Content address for recomputing store path
|
||||
*/
|
||||
std::optional<ContentAddress> ca;
|
||||
|
||||
void done(
|
||||
ExitCode result,
|
||||
BuildResult::Status status,
|
||||
std::optional<std::string> errorMsg = {});
|
||||
|
||||
public:
|
||||
PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
~PathSubstitutionGoal();
|
||||
|
||||
void timedOut(Error && ex) override { abort(); };
|
||||
|
||||
/**
|
||||
* We prepend "a$" to the key name to ensure substitution goals
|
||||
* happen before derivation goals.
|
||||
*/
|
||||
std::string key() override
|
||||
{
|
||||
return "a$" + std::string(storePath.name()) + "$" + worker.store.printStorePath(storePath);
|
||||
}
|
||||
|
||||
void work() override;
|
||||
|
||||
/**
|
||||
* The states.
|
||||
*/
|
||||
void init();
|
||||
void tryNext();
|
||||
void gotInfo();
|
||||
void referencesValid();
|
||||
void tryToRun();
|
||||
void finished();
|
||||
|
||||
/**
|
||||
* Callback used by the worker to write to the log.
|
||||
*/
|
||||
void handleChildOutput(int fd, std::string_view data) override;
|
||||
void handleEOF(int fd) override;
|
||||
|
||||
/* Called by destructor, can't be overridden */
|
||||
void cleanup() override final;
|
||||
|
||||
JobCategory jobCategory() const override {
|
||||
return JobCategory::Substitution;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
561
src/libstore/unix/build/worker.cc
Normal file
561
src/libstore/unix/build/worker.cc
Normal file
|
|
@ -0,0 +1,561 @@
|
|||
#include "machines.hh"
|
||||
#include "worker.hh"
|
||||
#include "substitution-goal.hh"
|
||||
#include "drv-output-substitution-goal.hh"
|
||||
#include "local-derivation-goal.hh"
|
||||
#include "hook-instance.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
#include <poll.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
Worker::Worker(Store & store, Store & evalStore)
|
||||
: act(*logger, actRealise)
|
||||
, actDerivations(*logger, actBuilds)
|
||||
, actSubstitutions(*logger, actCopyPaths)
|
||||
, store(store)
|
||||
, evalStore(evalStore)
|
||||
{
|
||||
/* Debugging: prevent recursive workers. */
|
||||
nrLocalBuilds = 0;
|
||||
nrSubstitutions = 0;
|
||||
lastWokenUp = steady_time_point::min();
|
||||
permanentFailure = false;
|
||||
timedOut = false;
|
||||
hashMismatch = false;
|
||||
checkMismatch = false;
|
||||
}
|
||||
|
||||
|
||||
Worker::~Worker()
|
||||
{
|
||||
/* Explicitly get rid of all strong pointers now. After this all
|
||||
goals that refer to this worker should be gone. (Otherwise we
|
||||
are in trouble, since goals may call childTerminated() etc. in
|
||||
their destructors). */
|
||||
topGoals.clear();
|
||||
|
||||
assert(expectedSubstitutions == 0);
|
||||
assert(expectedDownloadSize == 0);
|
||||
assert(expectedNarSize == 0);
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoalCommon(
|
||||
const StorePath & drvPath,
|
||||
const OutputsSpec & wantedOutputs,
|
||||
std::function<std::shared_ptr<DerivationGoal>()> mkDrvGoal)
|
||||
{
|
||||
std::weak_ptr<DerivationGoal> & goal_weak = derivationGoals[drvPath];
|
||||
std::shared_ptr<DerivationGoal> goal = goal_weak.lock();
|
||||
if (!goal) {
|
||||
goal = mkDrvGoal();
|
||||
goal_weak = goal;
|
||||
wakeUp(goal);
|
||||
} else {
|
||||
goal->addWantedOutputs(wantedOutputs);
|
||||
}
|
||||
return goal;
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(const StorePath & drvPath,
|
||||
const OutputsSpec & wantedOutputs, BuildMode buildMode)
|
||||
{
|
||||
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr<DerivationGoal> {
|
||||
return !dynamic_cast<LocalStore *>(&store)
|
||||
? std::make_shared</* */DerivationGoal>(drvPath, wantedOutputs, *this, buildMode)
|
||||
: std::make_shared<LocalDerivationGoal>(drvPath, wantedOutputs, *this, buildMode);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath & drvPath,
|
||||
const BasicDerivation & drv, const OutputsSpec & wantedOutputs, BuildMode buildMode)
|
||||
{
|
||||
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr<DerivationGoal> {
|
||||
return !dynamic_cast<LocalStore *>(&store)
|
||||
? std::make_shared</* */DerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode)
|
||||
: std::make_shared<LocalDerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<PathSubstitutionGoal> Worker::makePathSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
{
|
||||
std::weak_ptr<PathSubstitutionGoal> & goal_weak = substitutionGoals[path];
|
||||
auto goal = goal_weak.lock(); // FIXME
|
||||
if (!goal) {
|
||||
goal = std::make_shared<PathSubstitutionGoal>(path, *this, repair, ca);
|
||||
goal_weak = goal;
|
||||
wakeUp(goal);
|
||||
}
|
||||
return goal;
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DrvOutputSubstitutionGoal> Worker::makeDrvOutputSubstitutionGoal(const DrvOutput& id, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||
{
|
||||
std::weak_ptr<DrvOutputSubstitutionGoal> & goal_weak = drvOutputSubstitutionGoals[id];
|
||||
auto goal = goal_weak.lock(); // FIXME
|
||||
if (!goal) {
|
||||
goal = std::make_shared<DrvOutputSubstitutionGoal>(id, *this, repair, ca);
|
||||
goal_weak = goal;
|
||||
wakeUp(goal);
|
||||
}
|
||||
return goal;
|
||||
}
|
||||
|
||||
|
||||
GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode)
|
||||
{
|
||||
return std::visit(overloaded {
|
||||
[&](const DerivedPath::Built & bfd) -> GoalPtr {
|
||||
if (auto bop = std::get_if<DerivedPath::Opaque>(&*bfd.drvPath))
|
||||
return makeDerivationGoal(bop->path, bfd.outputs, buildMode);
|
||||
else
|
||||
throw UnimplementedError("Building dynamic derivations in one shot is not yet implemented.");
|
||||
},
|
||||
[&](const DerivedPath::Opaque & bo) -> GoalPtr {
|
||||
return makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair);
|
||||
},
|
||||
}, req.raw());
|
||||
}
|
||||
|
||||
|
||||
template<typename K, typename G>
|
||||
static void removeGoal(std::shared_ptr<G> goal, std::map<K, std::weak_ptr<G>> & goalMap)
|
||||
{
|
||||
/* !!! inefficient */
|
||||
for (auto i = goalMap.begin();
|
||||
i != goalMap.end(); )
|
||||
if (i->second.lock() == goal) {
|
||||
auto j = i; ++j;
|
||||
goalMap.erase(i);
|
||||
i = j;
|
||||
}
|
||||
else ++i;
|
||||
}
|
||||
|
||||
|
||||
void Worker::removeGoal(GoalPtr goal)
|
||||
{
|
||||
if (auto drvGoal = std::dynamic_pointer_cast<DerivationGoal>(goal))
|
||||
nix::removeGoal(drvGoal, derivationGoals);
|
||||
else if (auto subGoal = std::dynamic_pointer_cast<PathSubstitutionGoal>(goal))
|
||||
nix::removeGoal(subGoal, substitutionGoals);
|
||||
else if (auto subGoal = std::dynamic_pointer_cast<DrvOutputSubstitutionGoal>(goal))
|
||||
nix::removeGoal(subGoal, drvOutputSubstitutionGoals);
|
||||
else
|
||||
assert(false);
|
||||
|
||||
if (topGoals.find(goal) != topGoals.end()) {
|
||||
topGoals.erase(goal);
|
||||
/* If a top-level goal failed, then kill all other goals
|
||||
(unless keepGoing was set). */
|
||||
if (goal->exitCode == Goal::ecFailed && !settings.keepGoing)
|
||||
topGoals.clear();
|
||||
}
|
||||
|
||||
/* Wake up goals waiting for any goal to finish. */
|
||||
for (auto & i : waitingForAnyGoal) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
}
|
||||
|
||||
waitingForAnyGoal.clear();
|
||||
}
|
||||
|
||||
|
||||
void Worker::wakeUp(GoalPtr goal)
|
||||
{
|
||||
goal->trace("woken up");
|
||||
addToWeakGoals(awake, goal);
|
||||
}
|
||||
|
||||
|
||||
unsigned Worker::getNrLocalBuilds()
|
||||
{
|
||||
return nrLocalBuilds;
|
||||
}
|
||||
|
||||
|
||||
unsigned Worker::getNrSubstitutions()
|
||||
{
|
||||
return nrSubstitutions;
|
||||
}
|
||||
|
||||
|
||||
void Worker::childStarted(GoalPtr goal, const std::set<int> & fds,
|
||||
bool inBuildSlot, bool respectTimeouts)
|
||||
{
|
||||
Child child;
|
||||
child.goal = goal;
|
||||
child.goal2 = goal.get();
|
||||
child.fds = fds;
|
||||
child.timeStarted = child.lastOutput = steady_time_point::clock::now();
|
||||
child.inBuildSlot = inBuildSlot;
|
||||
child.respectTimeouts = respectTimeouts;
|
||||
children.emplace_back(child);
|
||||
if (inBuildSlot) {
|
||||
switch (goal->jobCategory()) {
|
||||
case JobCategory::Substitution:
|
||||
nrSubstitutions++;
|
||||
break;
|
||||
case JobCategory::Build:
|
||||
nrLocalBuilds++;
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Worker::childTerminated(Goal * goal, bool wakeSleepers)
|
||||
{
|
||||
auto i = std::find_if(children.begin(), children.end(),
|
||||
[&](const Child & child) { return child.goal2 == goal; });
|
||||
if (i == children.end()) return;
|
||||
|
||||
if (i->inBuildSlot) {
|
||||
switch (goal->jobCategory()) {
|
||||
case JobCategory::Substitution:
|
||||
assert(nrSubstitutions > 0);
|
||||
nrSubstitutions--;
|
||||
break;
|
||||
case JobCategory::Build:
|
||||
assert(nrLocalBuilds > 0);
|
||||
nrLocalBuilds--;
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
children.erase(i);
|
||||
|
||||
if (wakeSleepers) {
|
||||
|
||||
/* Wake up goals waiting for a build slot. */
|
||||
for (auto & j : wantingToBuild) {
|
||||
GoalPtr goal = j.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
}
|
||||
|
||||
wantingToBuild.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Worker::waitForBuildSlot(GoalPtr goal)
|
||||
{
|
||||
goal->trace("wait for build slot");
|
||||
bool isSubstitutionGoal = goal->jobCategory() == JobCategory::Substitution;
|
||||
if ((!isSubstitutionGoal && getNrLocalBuilds() < settings.maxBuildJobs) ||
|
||||
(isSubstitutionGoal && getNrSubstitutions() < settings.maxSubstitutionJobs))
|
||||
wakeUp(goal); /* we can do it right away */
|
||||
else
|
||||
addToWeakGoals(wantingToBuild, goal);
|
||||
}
|
||||
|
||||
|
||||
void Worker::waitForAnyGoal(GoalPtr goal)
|
||||
{
|
||||
debug("wait for any goal");
|
||||
addToWeakGoals(waitingForAnyGoal, goal);
|
||||
}
|
||||
|
||||
|
||||
void Worker::waitForAWhile(GoalPtr goal)
|
||||
{
|
||||
debug("wait for a while");
|
||||
addToWeakGoals(waitingForAWhile, goal);
|
||||
}
|
||||
|
||||
|
||||
void Worker::run(const Goals & _topGoals)
|
||||
{
|
||||
std::vector<nix::DerivedPath> topPaths;
|
||||
|
||||
for (auto & i : _topGoals) {
|
||||
topGoals.insert(i);
|
||||
if (auto goal = dynamic_cast<DerivationGoal *>(i.get())) {
|
||||
topPaths.push_back(DerivedPath::Built {
|
||||
.drvPath = makeConstantStorePathRef(goal->drvPath),
|
||||
.outputs = goal->wantedOutputs,
|
||||
});
|
||||
} else if (auto goal = dynamic_cast<PathSubstitutionGoal *>(i.get())) {
|
||||
topPaths.push_back(DerivedPath::Opaque{goal->storePath});
|
||||
}
|
||||
}
|
||||
|
||||
/* Call queryMissing() to efficiently query substitutes. */
|
||||
StorePathSet willBuild, willSubstitute, unknown;
|
||||
uint64_t downloadSize, narSize;
|
||||
store.queryMissing(topPaths, willBuild, willSubstitute, unknown, downloadSize, narSize);
|
||||
|
||||
debug("entered goal loop");
|
||||
|
||||
while (1) {
|
||||
|
||||
checkInterrupt();
|
||||
|
||||
// TODO GC interface?
|
||||
if (auto localStore = dynamic_cast<LocalStore *>(&store))
|
||||
localStore->autoGC(false);
|
||||
|
||||
/* Call every wake goal (in the ordering established by
|
||||
CompareGoalPtrs). */
|
||||
while (!awake.empty() && !topGoals.empty()) {
|
||||
Goals awake2;
|
||||
for (auto & i : awake) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) awake2.insert(goal);
|
||||
}
|
||||
awake.clear();
|
||||
for (auto & goal : awake2) {
|
||||
checkInterrupt();
|
||||
goal->work();
|
||||
if (topGoals.empty()) break; // stuff may have been cancelled
|
||||
}
|
||||
}
|
||||
|
||||
if (topGoals.empty()) break;
|
||||
|
||||
/* Wait for input. */
|
||||
if (!children.empty() || !waitingForAWhile.empty())
|
||||
waitForInput();
|
||||
else {
|
||||
if (awake.empty() && 0U == settings.maxBuildJobs)
|
||||
{
|
||||
if (getMachines().empty())
|
||||
throw Error(
|
||||
R"(
|
||||
Unable to start any build;
|
||||
either increase '--max-jobs' or enable remote builds.
|
||||
|
||||
For more information run 'man nix.conf' and search for '/machines'.
|
||||
)"
|
||||
);
|
||||
else
|
||||
throw Error(
|
||||
R"(
|
||||
Unable to start any build;
|
||||
remote machines may not have all required system features.
|
||||
|
||||
For more information run 'man nix.conf' and search for '/machines'.
|
||||
)"
|
||||
);
|
||||
|
||||
}
|
||||
assert(!awake.empty());
|
||||
}
|
||||
}
|
||||
|
||||
/* If --keep-going is not set, it's possible that the main goal
|
||||
exited while some of its subgoals were still active. But if
|
||||
--keep-going *is* set, then they must all be finished now. */
|
||||
assert(!settings.keepGoing || awake.empty());
|
||||
assert(!settings.keepGoing || wantingToBuild.empty());
|
||||
assert(!settings.keepGoing || children.empty());
|
||||
}
|
||||
|
||||
void Worker::waitForInput()
|
||||
{
|
||||
printMsg(lvlVomit, "waiting for children");
|
||||
|
||||
/* Process output from the file descriptors attached to the
|
||||
children, namely log output and output path creation commands.
|
||||
We also use this to detect child termination: if we get EOF on
|
||||
the logger pipe of a build, we assume that the builder has
|
||||
terminated. */
|
||||
|
||||
bool useTimeout = false;
|
||||
long timeout = 0;
|
||||
auto before = steady_time_point::clock::now();
|
||||
|
||||
/* If we're monitoring for silence on stdout/stderr, or if there
|
||||
is a build timeout, then wait for input until the first
|
||||
deadline for any child. */
|
||||
auto nearest = steady_time_point::max(); // nearest deadline
|
||||
if (settings.minFree.get() != 0)
|
||||
// Periodicallty wake up to see if we need to run the garbage collector.
|
||||
nearest = before + std::chrono::seconds(10);
|
||||
for (auto & i : children) {
|
||||
if (!i.respectTimeouts) continue;
|
||||
if (0 != settings.maxSilentTime)
|
||||
nearest = std::min(nearest, i.lastOutput + std::chrono::seconds(settings.maxSilentTime));
|
||||
if (0 != settings.buildTimeout)
|
||||
nearest = std::min(nearest, i.timeStarted + std::chrono::seconds(settings.buildTimeout));
|
||||
}
|
||||
if (nearest != steady_time_point::max()) {
|
||||
timeout = std::max(1L, (long) std::chrono::duration_cast<std::chrono::seconds>(nearest - before).count());
|
||||
useTimeout = true;
|
||||
}
|
||||
|
||||
/* If we are polling goals that are waiting for a lock, then wake
|
||||
up after a few seconds at most. */
|
||||
if (!waitingForAWhile.empty()) {
|
||||
useTimeout = true;
|
||||
if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before;
|
||||
timeout = std::max(1L,
|
||||
(long) std::chrono::duration_cast<std::chrono::seconds>(
|
||||
lastWokenUp + std::chrono::seconds(settings.pollInterval) - before).count());
|
||||
} else lastWokenUp = steady_time_point::min();
|
||||
|
||||
if (useTimeout)
|
||||
vomit("sleeping %d seconds", timeout);
|
||||
|
||||
/* Use select() to wait for the input side of any logger pipe to
|
||||
become `available'. Note that `available' (i.e., non-blocking)
|
||||
includes EOF. */
|
||||
std::vector<struct pollfd> pollStatus;
|
||||
std::map<int, size_t> fdToPollStatus;
|
||||
for (auto & i : children) {
|
||||
for (auto & j : i.fds) {
|
||||
pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN });
|
||||
fdToPollStatus[j] = pollStatus.size() - 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (poll(pollStatus.data(), pollStatus.size(),
|
||||
useTimeout ? timeout * 1000 : -1) == -1) {
|
||||
if (errno == EINTR) return;
|
||||
throw SysError("waiting for input");
|
||||
}
|
||||
|
||||
auto after = steady_time_point::clock::now();
|
||||
|
||||
/* Process all available file descriptors. FIXME: this is
|
||||
O(children * fds). */
|
||||
decltype(children)::iterator i;
|
||||
for (auto j = children.begin(); j != children.end(); j = i) {
|
||||
i = std::next(j);
|
||||
|
||||
checkInterrupt();
|
||||
|
||||
GoalPtr goal = j->goal.lock();
|
||||
assert(goal);
|
||||
|
||||
std::set<int> fds2(j->fds);
|
||||
std::vector<unsigned char> buffer(4096);
|
||||
for (auto & k : fds2) {
|
||||
const auto fdPollStatusId = get(fdToPollStatus, k);
|
||||
assert(fdPollStatusId);
|
||||
assert(*fdPollStatusId < pollStatus.size());
|
||||
if (pollStatus.at(*fdPollStatusId).revents) {
|
||||
ssize_t rd = ::read(k, buffer.data(), buffer.size());
|
||||
// FIXME: is there a cleaner way to handle pt close
|
||||
// than EIO? Is this even standard?
|
||||
if (rd == 0 || (rd == -1 && errno == EIO)) {
|
||||
debug("%1%: got EOF", goal->getName());
|
||||
goal->handleEOF(k);
|
||||
j->fds.erase(k);
|
||||
} else if (rd == -1) {
|
||||
if (errno != EINTR)
|
||||
throw SysError("%s: read failed", goal->getName());
|
||||
} else {
|
||||
printMsg(lvlVomit, "%1%: read %2% bytes",
|
||||
goal->getName(), rd);
|
||||
std::string_view data((char *) buffer.data(), rd);
|
||||
j->lastOutput = after;
|
||||
goal->handleChildOutput(k, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (goal->exitCode == Goal::ecBusy &&
|
||||
0 != settings.maxSilentTime &&
|
||||
j->respectTimeouts &&
|
||||
after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime))
|
||||
{
|
||||
goal->timedOut(Error(
|
||||
"%1% timed out after %2% seconds of silence",
|
||||
goal->getName(), settings.maxSilentTime));
|
||||
}
|
||||
|
||||
else if (goal->exitCode == Goal::ecBusy &&
|
||||
0 != settings.buildTimeout &&
|
||||
j->respectTimeouts &&
|
||||
after - j->timeStarted >= std::chrono::seconds(settings.buildTimeout))
|
||||
{
|
||||
goal->timedOut(Error(
|
||||
"%1% timed out after %2% seconds",
|
||||
goal->getName(), settings.buildTimeout));
|
||||
}
|
||||
}
|
||||
|
||||
if (!waitingForAWhile.empty() && lastWokenUp + std::chrono::seconds(settings.pollInterval) <= after) {
|
||||
lastWokenUp = after;
|
||||
for (auto & i : waitingForAWhile) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
}
|
||||
waitingForAWhile.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned int Worker::failingExitStatus()
|
||||
{
|
||||
// See API docs in header for explanation
|
||||
unsigned int mask = 0;
|
||||
bool buildFailure = permanentFailure || timedOut || hashMismatch;
|
||||
if (buildFailure)
|
||||
mask |= 0x04; // 100
|
||||
if (timedOut)
|
||||
mask |= 0x01; // 101
|
||||
if (hashMismatch)
|
||||
mask |= 0x02; // 102
|
||||
if (checkMismatch) {
|
||||
mask |= 0x08; // 104
|
||||
}
|
||||
|
||||
if (mask)
|
||||
mask |= 0x60;
|
||||
return mask ? mask : 1;
|
||||
}
|
||||
|
||||
|
||||
bool Worker::pathContentsGood(const StorePath & path)
|
||||
{
|
||||
auto i = pathContentsGoodCache.find(path);
|
||||
if (i != pathContentsGoodCache.end()) return i->second;
|
||||
printInfo("checking path '%s'...", store.printStorePath(path));
|
||||
auto info = store.queryPathInfo(path);
|
||||
bool res;
|
||||
if (!pathExists(store.printStorePath(path)))
|
||||
res = false;
|
||||
else {
|
||||
Hash current = hashPath(
|
||||
*store.getFSAccessor(), CanonPath { store.printStorePath(path) },
|
||||
FileIngestionMethod::Recursive, info->narHash.algo);
|
||||
Hash nullHash(HashAlgorithm::SHA256);
|
||||
res = info->narHash == nullHash || info->narHash == current;
|
||||
}
|
||||
pathContentsGoodCache.insert_or_assign(path, res);
|
||||
if (!res)
|
||||
printError("path '%s' is corrupted or missing!", store.printStorePath(path));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void Worker::markContentsGood(const StorePath & path)
|
||||
{
|
||||
pathContentsGoodCache.insert_or_assign(path, true);
|
||||
}
|
||||
|
||||
|
||||
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal)
|
||||
{
|
||||
return subGoal;
|
||||
}
|
||||
|
||||
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal)
|
||||
{
|
||||
return subGoal;
|
||||
}
|
||||
|
||||
}
|
||||
322
src/libstore/unix/build/worker.hh
Normal file
322
src/libstore/unix/build/worker.hh
Normal file
|
|
@ -0,0 +1,322 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "types.hh"
|
||||
#include "lock.hh"
|
||||
#include "store-api.hh"
|
||||
#include "goal.hh"
|
||||
#include "realisation.hh"
|
||||
|
||||
#include <future>
|
||||
#include <thread>
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* Forward definition. */
|
||||
struct DerivationGoal;
|
||||
struct PathSubstitutionGoal;
|
||||
class DrvOutputSubstitutionGoal;
|
||||
|
||||
/**
|
||||
* Workaround for not being able to declare a something like
|
||||
*
|
||||
* ```c++
|
||||
* class PathSubstitutionGoal : public Goal;
|
||||
* ```
|
||||
* even when Goal is a complete type.
|
||||
*
|
||||
* This is still a static cast. The purpose of exporting it is to define it in
|
||||
* a place where `PathSubstitutionGoal` is concrete, and use it in a place where it
|
||||
* is opaque.
|
||||
*/
|
||||
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal);
|
||||
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal);
|
||||
|
||||
typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
|
||||
|
||||
/**
|
||||
* A mapping used to remember for each child process to what goal it
|
||||
* belongs, and file descriptors for receiving log data and output
|
||||
* path creation commands.
|
||||
*/
|
||||
struct Child
|
||||
{
|
||||
WeakGoalPtr goal;
|
||||
Goal * goal2; // ugly hackery
|
||||
std::set<int> fds;
|
||||
bool respectTimeouts;
|
||||
bool inBuildSlot;
|
||||
/**
|
||||
* Time we last got output on stdout/stderr
|
||||
*/
|
||||
steady_time_point lastOutput;
|
||||
steady_time_point timeStarted;
|
||||
};
|
||||
|
||||
/* Forward definition. */
|
||||
struct HookInstance;
|
||||
|
||||
/**
|
||||
* The worker class.
|
||||
*/
|
||||
class Worker
|
||||
{
|
||||
private:
|
||||
|
||||
/* Note: the worker should only have strong pointers to the
|
||||
top-level goals. */
|
||||
|
||||
/**
|
||||
* The top-level goals of the worker.
|
||||
*/
|
||||
Goals topGoals;
|
||||
|
||||
/**
|
||||
* Goals that are ready to do some work.
|
||||
*/
|
||||
WeakGoals awake;
|
||||
|
||||
/**
|
||||
* Goals waiting for a build slot.
|
||||
*/
|
||||
WeakGoals wantingToBuild;
|
||||
|
||||
/**
|
||||
* Child processes currently running.
|
||||
*/
|
||||
std::list<Child> children;
|
||||
|
||||
/**
|
||||
* Number of build slots occupied. This includes local builds but does not
|
||||
* include substitutions or remote builds via the build hook.
|
||||
*/
|
||||
unsigned int nrLocalBuilds;
|
||||
|
||||
/**
|
||||
* Number of substitution slots occupied.
|
||||
*/
|
||||
unsigned int nrSubstitutions;
|
||||
|
||||
/**
|
||||
* Maps used to prevent multiple instantiations of a goal for the
|
||||
* same derivation / path.
|
||||
*/
|
||||
std::map<StorePath, std::weak_ptr<DerivationGoal>> derivationGoals;
|
||||
std::map<StorePath, std::weak_ptr<PathSubstitutionGoal>> substitutionGoals;
|
||||
std::map<DrvOutput, std::weak_ptr<DrvOutputSubstitutionGoal>> drvOutputSubstitutionGoals;
|
||||
|
||||
/**
|
||||
* Goals waiting for busy paths to be unlocked.
|
||||
*/
|
||||
WeakGoals waitingForAnyGoal;
|
||||
|
||||
/**
|
||||
* Goals sleeping for a few seconds (polling a lock).
|
||||
*/
|
||||
WeakGoals waitingForAWhile;
|
||||
|
||||
/**
|
||||
* Last time the goals in `waitingForAWhile` were woken up.
|
||||
*/
|
||||
steady_time_point lastWokenUp;
|
||||
|
||||
/**
|
||||
* Cache for pathContentsGood().
|
||||
*/
|
||||
std::map<StorePath, bool> pathContentsGoodCache;
|
||||
|
||||
public:
|
||||
|
||||
const Activity act;
|
||||
const Activity actDerivations;
|
||||
const Activity actSubstitutions;
|
||||
|
||||
/**
|
||||
* Set if at least one derivation had a BuildError (i.e. permanent
|
||||
* failure).
|
||||
*/
|
||||
bool permanentFailure;
|
||||
|
||||
/**
|
||||
* Set if at least one derivation had a timeout.
|
||||
*/
|
||||
bool timedOut;
|
||||
|
||||
/**
|
||||
* Set if at least one derivation fails with a hash mismatch.
|
||||
*/
|
||||
bool hashMismatch;
|
||||
|
||||
/**
|
||||
* Set if at least one derivation is not deterministic in check mode.
|
||||
*/
|
||||
bool checkMismatch;
|
||||
|
||||
Store & store;
|
||||
Store & evalStore;
|
||||
|
||||
std::unique_ptr<HookInstance> hook;
|
||||
|
||||
uint64_t expectedBuilds = 0;
|
||||
uint64_t doneBuilds = 0;
|
||||
uint64_t failedBuilds = 0;
|
||||
uint64_t runningBuilds = 0;
|
||||
|
||||
uint64_t expectedSubstitutions = 0;
|
||||
uint64_t doneSubstitutions = 0;
|
||||
uint64_t failedSubstitutions = 0;
|
||||
uint64_t runningSubstitutions = 0;
|
||||
uint64_t expectedDownloadSize = 0;
|
||||
uint64_t doneDownloadSize = 0;
|
||||
uint64_t expectedNarSize = 0;
|
||||
uint64_t doneNarSize = 0;
|
||||
|
||||
/**
|
||||
* Whether to ask the build hook if it can build a derivation. If
|
||||
* it answers with "decline-permanently", we don't try again.
|
||||
*/
|
||||
bool tryBuildHook = true;
|
||||
|
||||
Worker(Store & store, Store & evalStore);
|
||||
~Worker();
|
||||
|
||||
/**
|
||||
* Make a goal (with caching).
|
||||
*/
|
||||
|
||||
/**
|
||||
* @ref DerivationGoal "derivation goal"
|
||||
*/
|
||||
private:
|
||||
std::shared_ptr<DerivationGoal> makeDerivationGoalCommon(
|
||||
const StorePath & drvPath, const OutputsSpec & wantedOutputs,
|
||||
std::function<std::shared_ptr<DerivationGoal>()> mkDrvGoal);
|
||||
public:
|
||||
std::shared_ptr<DerivationGoal> makeDerivationGoal(
|
||||
const StorePath & drvPath,
|
||||
const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal);
|
||||
std::shared_ptr<DerivationGoal> makeBasicDerivationGoal(
|
||||
const StorePath & drvPath, const BasicDerivation & drv,
|
||||
const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal);
|
||||
|
||||
/**
|
||||
* @ref SubstitutionGoal "substitution goal"
|
||||
*/
|
||||
std::shared_ptr<PathSubstitutionGoal> makePathSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
std::shared_ptr<DrvOutputSubstitutionGoal> makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||
|
||||
/**
|
||||
* Make a goal corresponding to the `DerivedPath`.
|
||||
*
|
||||
* It will be a `DerivationGoal` for a `DerivedPath::Built` or
|
||||
* a `SubstitutionGoal` for a `DerivedPath::Opaque`.
|
||||
*/
|
||||
GoalPtr makeGoal(const DerivedPath & req, BuildMode buildMode = bmNormal);
|
||||
|
||||
/**
|
||||
* Remove a dead goal.
|
||||
*/
|
||||
void removeGoal(GoalPtr goal);
|
||||
|
||||
/**
|
||||
* Wake up a goal (i.e., there is something for it to do).
|
||||
*/
|
||||
void wakeUp(GoalPtr goal);
|
||||
|
||||
/**
|
||||
* Return the number of local build processes currently running (but not
|
||||
* remote builds via the build hook).
|
||||
*/
|
||||
unsigned int getNrLocalBuilds();
|
||||
|
||||
/**
|
||||
* Return the number of substitution processes currently running.
|
||||
*/
|
||||
unsigned int getNrSubstitutions();
|
||||
|
||||
/**
|
||||
* Registers a running child process. `inBuildSlot` means that
|
||||
* the process counts towards the jobs limit.
|
||||
*/
|
||||
void childStarted(GoalPtr goal, const std::set<int> & fds,
|
||||
bool inBuildSlot, bool respectTimeouts);
|
||||
|
||||
/**
|
||||
* Unregisters a running child process. `wakeSleepers` should be
|
||||
* false if there is no sense in waking up goals that are sleeping
|
||||
* because they can't run yet (e.g., there is no free build slot,
|
||||
* or the hook would still say `postpone`).
|
||||
*/
|
||||
void childTerminated(Goal * goal, bool wakeSleepers = true);
|
||||
|
||||
/**
|
||||
* Put `goal` to sleep until a build slot becomes available (which
|
||||
* might be right away).
|
||||
*/
|
||||
void waitForBuildSlot(GoalPtr goal);
|
||||
|
||||
/**
|
||||
* Wait for any goal to finish. Pretty indiscriminate way to
|
||||
* wait for some resource that some other goal is holding.
|
||||
*/
|
||||
void waitForAnyGoal(GoalPtr goal);
|
||||
|
||||
/**
|
||||
* Wait for a few seconds and then retry this goal. Used when
|
||||
* waiting for a lock held by another process. This kind of
|
||||
* polling is inefficient, but POSIX doesn't really provide a way
|
||||
* to wait for multiple locks in the main select() loop.
|
||||
*/
|
||||
void waitForAWhile(GoalPtr goal);
|
||||
|
||||
/**
|
||||
* Loop until the specified top-level goals have finished.
|
||||
*/
|
||||
void run(const Goals & topGoals);
|
||||
|
||||
/**
|
||||
* Wait for input to become available.
|
||||
*/
|
||||
void waitForInput();
|
||||
|
||||
/***
|
||||
* The exit status in case of failure.
|
||||
*
|
||||
* In the case of a build failure, returned value follows this
|
||||
* bitmask:
|
||||
*
|
||||
* ```
|
||||
* 0b1100100
|
||||
* ^^^^
|
||||
* |||`- timeout
|
||||
* ||`-- output hash mismatch
|
||||
* |`--- build failure
|
||||
* `---- not deterministic
|
||||
* ```
|
||||
*
|
||||
* In other words, the failure code is at least 100 (0b1100100), but
|
||||
* might also be greater.
|
||||
*
|
||||
* Otherwise (no build failure, but some other sort of failure by
|
||||
* assumption), this returned value is 1.
|
||||
*/
|
||||
unsigned int failingExitStatus();
|
||||
|
||||
/**
|
||||
* Check whether the given valid path exists and has the right
|
||||
* contents.
|
||||
*/
|
||||
bool pathContentsGood(const StorePath & path);
|
||||
|
||||
void markContentsGood(const StorePath & path);
|
||||
|
||||
void updateProgress()
|
||||
{
|
||||
actDerivations.progress(doneBuilds, expectedBuilds + doneBuilds, runningBuilds, failedBuilds);
|
||||
actSubstitutions.progress(doneSubstitutions, expectedSubstitutions + doneSubstitutions, runningSubstitutions, failedSubstitutions);
|
||||
act.setExpected(actFileTransfer, expectedDownloadSize + doneDownloadSize);
|
||||
act.setExpected(actCopyPath, expectedNarSize + doneNarSize);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
81
src/libstore/unix/builtins/fetchurl.cc
Normal file
81
src/libstore/unix/builtins/fetchurl.cc
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
#include "builtins.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "store-api.hh"
|
||||
#include "archive.hh"
|
||||
#include "compression.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
void builtinFetchurl(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs,
|
||||
const std::string & netrcData)
|
||||
{
|
||||
/* Make the host's netrc data available. Too bad curl requires
|
||||
this to be stored in a file. It would be nice if we could just
|
||||
pass a pointer to the data. */
|
||||
if (netrcData != "") {
|
||||
settings.netrcFile = "netrc";
|
||||
writeFile(settings.netrcFile, netrcData, 0600);
|
||||
}
|
||||
|
||||
auto out = get(drv.outputs, "out");
|
||||
if (!out)
|
||||
throw Error("'builtin:fetchurl' requires an 'out' output");
|
||||
|
||||
if (!(drv.type().isFixed() || drv.type().isImpure()))
|
||||
throw Error("'builtin:fetchurl' must be a fixed-output or impure derivation");
|
||||
|
||||
auto storePath = outputs.at("out");
|
||||
auto mainUrl = drv.env.at("url");
|
||||
bool unpack = getOr(drv.env, "unpack", "") == "1";
|
||||
|
||||
/* Note: have to use a fresh fileTransfer here because we're in
|
||||
a forked process. */
|
||||
auto fileTransfer = makeFileTransfer();
|
||||
|
||||
auto fetch = [&](const std::string & url) {
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
|
||||
/* No need to do TLS verification, because we check the hash of
|
||||
the result anyway. */
|
||||
FileTransferRequest request(url);
|
||||
request.verifyTLS = false;
|
||||
request.decompress = false;
|
||||
|
||||
auto decompressor = makeDecompressionSink(
|
||||
unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
|
||||
fileTransfer->download(std::move(request), *decompressor);
|
||||
decompressor->finish();
|
||||
});
|
||||
|
||||
if (unpack)
|
||||
restorePath(storePath, *source);
|
||||
else
|
||||
writeFile(storePath, *source);
|
||||
|
||||
auto executable = drv.env.find("executable");
|
||||
if (executable != drv.env.end() && executable->second == "1") {
|
||||
if (chmod(storePath.c_str(), 0755) == -1)
|
||||
throw SysError("making '%1%' executable", storePath);
|
||||
}
|
||||
};
|
||||
|
||||
/* Try the hashed mirrors first. */
|
||||
auto dof = std::get_if<DerivationOutput::CAFixed>(&out->raw);
|
||||
if (dof && dof->ca.method.getFileIngestionMethod() == FileIngestionMethod::Flat)
|
||||
for (auto hashedMirror : settings.hashedMirrors.get())
|
||||
try {
|
||||
if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
|
||||
fetch(hashedMirror + printHashAlgo(dof->ca.hash.algo) + "/" + dof->ca.hash.to_string(HashFormat::Base16, false));
|
||||
return;
|
||||
} catch (Error & e) {
|
||||
debug(e.what());
|
||||
}
|
||||
|
||||
/* Otherwise try the specified URL. */
|
||||
fetch(mainUrl);
|
||||
}
|
||||
|
||||
}
|
||||
30
src/libstore/unix/builtins/unpack-channel.cc
Normal file
30
src/libstore/unix/builtins/unpack-channel.cc
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
#include "builtins.hh"
|
||||
#include "tarfile.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
void builtinUnpackChannel(
|
||||
const BasicDerivation & drv,
|
||||
const std::map<std::string, Path> & outputs)
|
||||
{
|
||||
auto getAttr = [&](const std::string & name) {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
return i->second;
|
||||
};
|
||||
|
||||
auto out = outputs.at("out");
|
||||
auto channelName = getAttr("channelName");
|
||||
auto src = getAttr("src");
|
||||
|
||||
createDirs(out);
|
||||
|
||||
unpackTarfile(src, out);
|
||||
|
||||
auto entries = readDirectory(out);
|
||||
if (entries.size() != 1)
|
||||
throw Error("channel tarball '%s' contains more than one file", src);
|
||||
renameFile((out + "/" + entries[0].name), (out + "/" + channelName));
|
||||
}
|
||||
|
||||
}
|
||||
41
src/libstore/unix/ca-specific-schema.sql
Normal file
41
src/libstore/unix/ca-specific-schema.sql
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
-- Extension of the sql schema for content-addressed derivations.
|
||||
-- Won't be loaded unless the experimental feature `ca-derivations`
|
||||
-- is enabled
|
||||
|
||||
create table if not exists Realisations (
|
||||
id integer primary key autoincrement not null,
|
||||
drvPath text not null,
|
||||
outputName text not null, -- symbolic output id, usually "out"
|
||||
outputPath integer not null,
|
||||
signatures text, -- space-separated list
|
||||
foreign key (outputPath) references ValidPaths(id) on delete cascade
|
||||
);
|
||||
|
||||
create index if not exists IndexRealisations on Realisations(drvPath, outputName);
|
||||
|
||||
-- We can end-up in a weird edge-case where a path depends on itself because
|
||||
-- it’s an output of a CA derivation, that happens to be the same as one of its
|
||||
-- dependencies.
|
||||
-- In that case we have a dependency loop (path -> realisation1 -> realisation2
|
||||
-- -> path) that we need to break by removing the dependencies between the
|
||||
-- realisations
|
||||
create trigger if not exists DeleteSelfRefsViaRealisations before delete on ValidPaths
|
||||
begin
|
||||
delete from RealisationsRefs where realisationReference in (
|
||||
select id from Realisations where outputPath = old.id
|
||||
);
|
||||
end;
|
||||
|
||||
create table if not exists RealisationsRefs (
|
||||
referrer integer not null,
|
||||
realisationReference integer,
|
||||
foreign key (referrer) references Realisations(id) on delete cascade,
|
||||
foreign key (realisationReference) references Realisations(id) on delete restrict
|
||||
);
|
||||
-- used by deletion trigger
|
||||
create index if not exists IndexRealisationsRefsRealisationReference on RealisationsRefs(realisationReference);
|
||||
|
||||
-- used by QueryRealisationReferences
|
||||
create index if not exists IndexRealisationsRefs on RealisationsRefs(referrer);
|
||||
-- used by cascade deletion when ValidPaths is deleted
|
||||
create index if not exists IndexRealisationsRefsOnOutputPath on Realisations(outputPath);
|
||||
983
src/libstore/unix/gc.cc
Normal file
983
src/libstore/unix/gc.cc
Normal file
|
|
@ -0,0 +1,983 @@
|
|||
#include "derivations.hh"
|
||||
#include "globals.hh"
|
||||
#include "local-store.hh"
|
||||
#include "finally.hh"
|
||||
#include "unix-domain-socket.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
#if !defined(__linux__)
|
||||
// For shelling out to lsof
|
||||
# include "processes.hh"
|
||||
#endif
|
||||
|
||||
#include <functional>
|
||||
#include <queue>
|
||||
#include <algorithm>
|
||||
#include <regex>
|
||||
#include <random>
|
||||
|
||||
#include <climits>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <poll.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statvfs.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/un.h>
|
||||
#include <unistd.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
using namespace nix::unix;
|
||||
|
||||
static std::string gcSocketPath = "/gc-socket/socket";
|
||||
static std::string gcRootsDir = "gcroots";
|
||||
|
||||
|
||||
static void makeSymlink(const Path & link, const Path & target)
|
||||
{
|
||||
/* Create directories up to `gcRoot'. */
|
||||
createDirs(dirOf(link));
|
||||
|
||||
/* Create the new symlink. */
|
||||
Path tempLink = fmt("%1%.tmp-%2%-%3%", link, getpid(), random());
|
||||
createSymlink(target, tempLink);
|
||||
|
||||
/* Atomically replace the old one. */
|
||||
renameFile(tempLink, link);
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::addIndirectRoot(const Path & path)
|
||||
{
|
||||
std::string hash = hashString(HashAlgorithm::SHA1, path).to_string(HashFormat::Nix32, false);
|
||||
Path realRoot = canonPath(fmt("%1%/%2%/auto/%3%", stateDir, gcRootsDir, hash));
|
||||
makeSymlink(realRoot, path);
|
||||
}
|
||||
|
||||
|
||||
Path IndirectRootStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot)
|
||||
{
|
||||
Path gcRoot(canonPath(_gcRoot));
|
||||
|
||||
if (isInStore(gcRoot))
|
||||
throw Error(
|
||||
"creating a garbage collector root (%1%) in the Nix store is forbidden "
|
||||
"(are you running nix-build inside the store?)", gcRoot);
|
||||
|
||||
/* Register this root with the garbage collector, if it's
|
||||
running. This should be superfluous since the caller should
|
||||
have registered this root yet, but let's be on the safe
|
||||
side. */
|
||||
addTempRoot(storePath);
|
||||
|
||||
/* Don't clobber the link if it already exists and doesn't
|
||||
point to the Nix store. */
|
||||
if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
|
||||
throw Error("cannot create symlink '%1%'; already exists", gcRoot);
|
||||
makeSymlink(gcRoot, printStorePath(storePath));
|
||||
addIndirectRoot(gcRoot);
|
||||
|
||||
return gcRoot;
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::createTempRootsFile()
|
||||
{
|
||||
auto fdTempRoots(_fdTempRoots.lock());
|
||||
|
||||
/* Create the temporary roots file for this process. */
|
||||
if (*fdTempRoots) return;
|
||||
|
||||
while (1) {
|
||||
if (pathExists(fnTempRoots))
|
||||
/* It *must* be stale, since there can be no two
|
||||
processes with the same pid. */
|
||||
unlink(fnTempRoots.c_str());
|
||||
|
||||
*fdTempRoots = openLockFile(fnTempRoots, true);
|
||||
|
||||
debug("acquiring write lock on '%s'", fnTempRoots);
|
||||
lockFile(fdTempRoots->get(), ltWrite, true);
|
||||
|
||||
/* Check whether the garbage collector didn't get in our
|
||||
way. */
|
||||
struct stat st;
|
||||
if (fstat(fdTempRoots->get(), &st) == -1)
|
||||
throw SysError("statting '%1%'", fnTempRoots);
|
||||
if (st.st_size == 0) break;
|
||||
|
||||
/* The garbage collector deleted this file before we could get
|
||||
a lock. (It won't delete the file after we get a lock.)
|
||||
Try again. */
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::addTempRoot(const StorePath & path)
|
||||
{
|
||||
if (readOnly) {
|
||||
debug("Read-only store doesn't support creating lock files for temp roots, but nothing can be deleted anyways.");
|
||||
return;
|
||||
}
|
||||
|
||||
createTempRootsFile();
|
||||
|
||||
/* Open/create the global GC lock file. */
|
||||
{
|
||||
auto fdGCLock(_fdGCLock.lock());
|
||||
if (!*fdGCLock)
|
||||
*fdGCLock = openGCLock();
|
||||
}
|
||||
|
||||
restart:
|
||||
/* Try to acquire a shared global GC lock (non-blocking). This
|
||||
only succeeds if the garbage collector is not currently
|
||||
running. */
|
||||
FdLock gcLock(_fdGCLock.lock()->get(), ltRead, false, "");
|
||||
|
||||
if (!gcLock.acquired) {
|
||||
/* We couldn't get a shared global GC lock, so the garbage
|
||||
collector is running. So we have to connect to the garbage
|
||||
collector and inform it about our root. */
|
||||
auto fdRootsSocket(_fdRootsSocket.lock());
|
||||
|
||||
if (!*fdRootsSocket) {
|
||||
auto socketPath = stateDir.get() + gcSocketPath;
|
||||
debug("connecting to '%s'", socketPath);
|
||||
*fdRootsSocket = createUnixDomainSocket();
|
||||
try {
|
||||
nix::connect(fdRootsSocket->get(), socketPath);
|
||||
} catch (SysError & e) {
|
||||
/* The garbage collector may have exited or not
|
||||
created the socket yet, so we need to restart. */
|
||||
if (e.errNo == ECONNREFUSED || e.errNo == ENOENT) {
|
||||
debug("GC socket connection refused: %s", e.msg());
|
||||
fdRootsSocket->close();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
goto restart;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
debug("sending GC root '%s'", printStorePath(path));
|
||||
writeFull(fdRootsSocket->get(), printStorePath(path) + "\n", false);
|
||||
char c;
|
||||
readFull(fdRootsSocket->get(), &c, 1);
|
||||
assert(c == '1');
|
||||
debug("got ack for GC root '%s'", printStorePath(path));
|
||||
} catch (SysError & e) {
|
||||
/* The garbage collector may have exited, so we need to
|
||||
restart. */
|
||||
if (e.errNo == EPIPE || e.errNo == ECONNRESET) {
|
||||
debug("GC socket disconnected");
|
||||
fdRootsSocket->close();
|
||||
goto restart;
|
||||
}
|
||||
throw;
|
||||
} catch (EndOfFile & e) {
|
||||
debug("GC socket disconnected");
|
||||
fdRootsSocket->close();
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
|
||||
/* Record the store path in the temporary roots file so it will be
|
||||
seen by a future run of the garbage collector. */
|
||||
auto s = printStorePath(path) + '\0';
|
||||
writeFull(_fdTempRoots.lock()->get(), s);
|
||||
}
|
||||
|
||||
|
||||
static std::string censored = "{censored}";
|
||||
|
||||
|
||||
void LocalStore::findTempRoots(Roots & tempRoots, bool censor)
|
||||
{
|
||||
/* Read the `temproots' directory for per-process temporary root
|
||||
files. */
|
||||
for (auto & i : readDirectory(tempRootsDir)) {
|
||||
if (i.name[0] == '.') {
|
||||
// Ignore hidden files. Some package managers (notably portage) create
|
||||
// those to keep the directory alive.
|
||||
continue;
|
||||
}
|
||||
Path path = tempRootsDir + "/" + i.name;
|
||||
|
||||
pid_t pid = std::stoi(i.name);
|
||||
|
||||
debug("reading temporary root file '%1%'", path);
|
||||
AutoCloseFD fd(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666));
|
||||
if (!fd) {
|
||||
/* It's okay if the file has disappeared. */
|
||||
if (errno == ENOENT) continue;
|
||||
throw SysError("opening temporary roots file '%1%'", path);
|
||||
}
|
||||
|
||||
/* Try to acquire a write lock without blocking. This can
|
||||
only succeed if the owning process has died. In that case
|
||||
we don't care about its temporary roots. */
|
||||
if (lockFile(fd.get(), ltWrite, false)) {
|
||||
printInfo("removing stale temporary roots file '%1%'", path);
|
||||
unlink(path.c_str());
|
||||
writeFull(fd.get(), "d");
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Read the entire file. */
|
||||
auto contents = readFile(fd.get());
|
||||
|
||||
/* Extract the roots. */
|
||||
std::string::size_type pos = 0, end;
|
||||
|
||||
while ((end = contents.find((char) 0, pos)) != std::string::npos) {
|
||||
Path root(contents, pos, end - pos);
|
||||
debug("got temporary root '%s'", root);
|
||||
tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{temp:%d}", pid));
|
||||
pos = end + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
|
||||
{
|
||||
auto foundRoot = [&](const Path & path, const Path & target) {
|
||||
try {
|
||||
auto storePath = toStorePath(target).first;
|
||||
if (isValidPath(storePath))
|
||||
roots[std::move(storePath)].emplace(path);
|
||||
else
|
||||
printInfo("skipping invalid root from '%1%' to '%2%'", path, target);
|
||||
} catch (BadStorePath &) { }
|
||||
};
|
||||
|
||||
try {
|
||||
|
||||
if (type == DT_UNKNOWN)
|
||||
type = getFileType(path);
|
||||
|
||||
if (type == DT_DIR) {
|
||||
for (auto & i : readDirectory(path))
|
||||
findRoots(path + "/" + i.name, i.type, roots);
|
||||
}
|
||||
|
||||
else if (type == DT_LNK) {
|
||||
Path target = readLink(path);
|
||||
if (isInStore(target))
|
||||
foundRoot(path, target);
|
||||
|
||||
/* Handle indirect roots. */
|
||||
else {
|
||||
target = absPath(target, dirOf(path));
|
||||
if (!pathExists(target)) {
|
||||
if (isInDir(path, stateDir + "/" + gcRootsDir + "/auto")) {
|
||||
printInfo("removing stale link from '%1%' to '%2%'", path, target);
|
||||
unlink(path.c_str());
|
||||
}
|
||||
} else {
|
||||
struct stat st2 = lstat(target);
|
||||
if (!S_ISLNK(st2.st_mode)) return;
|
||||
Path target2 = readLink(target);
|
||||
if (isInStore(target2)) foundRoot(target, target2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else if (type == DT_REG) {
|
||||
auto storePath = maybeParseStorePath(storeDir + "/" + std::string(baseNameOf(path)));
|
||||
if (storePath && isValidPath(*storePath))
|
||||
roots[std::move(*storePath)].emplace(path);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
catch (SysError & e) {
|
||||
/* We only ignore permanent failures. */
|
||||
if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR)
|
||||
printInfo("cannot read potential root '%1%'", path);
|
||||
else
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::findRootsNoTemp(Roots & roots, bool censor)
|
||||
{
|
||||
/* Process direct roots in {gcroots,profiles}. */
|
||||
findRoots(stateDir + "/" + gcRootsDir, DT_UNKNOWN, roots);
|
||||
findRoots(stateDir + "/profiles", DT_UNKNOWN, roots);
|
||||
|
||||
/* Add additional roots returned by different platforms-specific
|
||||
heuristics. This is typically used to add running programs to
|
||||
the set of roots (to prevent them from being garbage collected). */
|
||||
findRuntimeRoots(roots, censor);
|
||||
}
|
||||
|
||||
|
||||
Roots LocalStore::findRoots(bool censor)
|
||||
{
|
||||
Roots roots;
|
||||
findRootsNoTemp(roots, censor);
|
||||
|
||||
findTempRoots(roots, censor);
|
||||
|
||||
return roots;
|
||||
}
|
||||
|
||||
typedef std::unordered_map<Path, std::unordered_set<std::string>> UncheckedRoots;
|
||||
|
||||
static void readProcLink(const std::string & file, UncheckedRoots & roots)
|
||||
{
|
||||
constexpr auto bufsiz = PATH_MAX;
|
||||
char buf[bufsiz];
|
||||
auto res = readlink(file.c_str(), buf, bufsiz);
|
||||
if (res == -1) {
|
||||
if (errno == ENOENT || errno == EACCES || errno == ESRCH)
|
||||
return;
|
||||
throw SysError("reading symlink");
|
||||
}
|
||||
if (res == bufsiz) {
|
||||
throw Error("overly long symlink starting with '%1%'", std::string_view(buf, bufsiz));
|
||||
}
|
||||
if (res > 0 && buf[0] == '/')
|
||||
roots[std::string(static_cast<char *>(buf), res)]
|
||||
.emplace(file);
|
||||
}
|
||||
|
||||
static std::string quoteRegexChars(const std::string & raw)
|
||||
{
|
||||
static auto specialRegex = std::regex(R"([.^$\\*+?()\[\]{}|])");
|
||||
return std::regex_replace(raw, specialRegex, R"(\$&)");
|
||||
}
|
||||
|
||||
#if __linux__
|
||||
static void readFileRoots(const char * path, UncheckedRoots & roots)
|
||||
{
|
||||
try {
|
||||
roots[readFile(path)].emplace(path);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT && e.errNo != EACCES)
|
||||
throw;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
|
||||
{
|
||||
UncheckedRoots unchecked;
|
||||
|
||||
auto procDir = AutoCloseDir{opendir("/proc")};
|
||||
if (procDir) {
|
||||
struct dirent * ent;
|
||||
auto digitsRegex = std::regex(R"(^\d+$)");
|
||||
auto mapRegex = std::regex(R"(^\s*\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(/\S+)\s*$)");
|
||||
auto storePathRegex = std::regex(quoteRegexChars(storeDir) + R"(/[0-9a-z]+[0-9a-zA-Z\+\-\._\?=]*)");
|
||||
while (errno = 0, ent = readdir(procDir.get())) {
|
||||
checkInterrupt();
|
||||
if (std::regex_match(ent->d_name, digitsRegex)) {
|
||||
try {
|
||||
readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked);
|
||||
readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked);
|
||||
|
||||
auto fdStr = fmt("/proc/%s/fd", ent->d_name);
|
||||
auto fdDir = AutoCloseDir(opendir(fdStr.c_str()));
|
||||
if (!fdDir) {
|
||||
if (errno == ENOENT || errno == EACCES)
|
||||
continue;
|
||||
throw SysError("opening %1%", fdStr);
|
||||
}
|
||||
struct dirent * fd_ent;
|
||||
while (errno = 0, fd_ent = readdir(fdDir.get())) {
|
||||
if (fd_ent->d_name[0] != '.')
|
||||
readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked);
|
||||
}
|
||||
if (errno) {
|
||||
if (errno == ESRCH)
|
||||
continue;
|
||||
throw SysError("iterating /proc/%1%/fd", ent->d_name);
|
||||
}
|
||||
fdDir.reset();
|
||||
|
||||
auto mapFile = fmt("/proc/%s/maps", ent->d_name);
|
||||
auto mapLines = tokenizeString<std::vector<std::string>>(readFile(mapFile), "\n");
|
||||
for (const auto & line : mapLines) {
|
||||
auto match = std::smatch{};
|
||||
if (std::regex_match(line, match, mapRegex))
|
||||
unchecked[match[1]].emplace(mapFile);
|
||||
}
|
||||
|
||||
auto envFile = fmt("/proc/%s/environ", ent->d_name);
|
||||
auto envString = readFile(envFile);
|
||||
auto env_end = std::sregex_iterator{};
|
||||
for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
|
||||
unchecked[i->str()].emplace(envFile);
|
||||
} catch (SystemError & e) {
|
||||
if (errno == ENOENT || errno == EACCES || errno == ESRCH)
|
||||
continue;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (errno)
|
||||
throw SysError("iterating /proc");
|
||||
}
|
||||
|
||||
#if !defined(__linux__)
|
||||
// lsof is really slow on OS X. This actually causes the gc-concurrent.sh test to fail.
|
||||
// See: https://github.com/NixOS/nix/issues/3011
|
||||
// Because of this we disable lsof when running the tests.
|
||||
if (getEnv("_NIX_TEST_NO_LSOF") != "1") {
|
||||
try {
|
||||
std::regex lsofRegex(R"(^n(/.*)$)");
|
||||
auto lsofLines =
|
||||
tokenizeString<std::vector<std::string>>(runProgram(LSOF, true, { "-n", "-w", "-F", "n" }), "\n");
|
||||
for (const auto & line : lsofLines) {
|
||||
std::smatch match;
|
||||
if (std::regex_match(line, match, lsofRegex))
|
||||
unchecked[match[1]].emplace("{lsof}");
|
||||
}
|
||||
} catch (ExecError & e) {
|
||||
/* lsof not installed, lsof failed */
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if __linux__
|
||||
readFileRoots("/proc/sys/kernel/modprobe", unchecked);
|
||||
readFileRoots("/proc/sys/kernel/fbsplash", unchecked);
|
||||
readFileRoots("/proc/sys/kernel/poweroff_cmd", unchecked);
|
||||
#endif
|
||||
|
||||
for (auto & [target, links] : unchecked) {
|
||||
if (!isInStore(target)) continue;
|
||||
try {
|
||||
auto path = toStorePath(target).first;
|
||||
if (!isValidPath(path)) continue;
|
||||
debug("got additional root '%1%'", printStorePath(path));
|
||||
if (censor)
|
||||
roots[path].insert(censored);
|
||||
else
|
||||
roots[path].insert(links.begin(), links.end());
|
||||
} catch (BadStorePath &) { }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct GCLimitReached { };
|
||||
|
||||
|
||||
void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||
{
|
||||
bool shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
|
||||
bool gcKeepOutputs = settings.gcKeepOutputs;
|
||||
bool gcKeepDerivations = settings.gcKeepDerivations;
|
||||
|
||||
StorePathSet roots, dead, alive;
|
||||
|
||||
struct Shared
|
||||
{
|
||||
// The temp roots only store the hash part to make it easier to
|
||||
// ignore suffixes like '.lock', '.chroot' and '.check'.
|
||||
std::unordered_set<std::string> tempRoots;
|
||||
|
||||
// Hash part of the store path currently being deleted, if
|
||||
// any.
|
||||
std::optional<std::string> pending;
|
||||
};
|
||||
|
||||
Sync<Shared> _shared;
|
||||
|
||||
std::condition_variable wakeup;
|
||||
|
||||
/* Using `--ignore-liveness' with `--delete' can have unintended
|
||||
consequences if `keep-outputs' or `keep-derivations' are true
|
||||
(the garbage collector will recurse into deleting the outputs
|
||||
or derivers, respectively). So disable them. */
|
||||
if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
|
||||
gcKeepOutputs = false;
|
||||
gcKeepDerivations = false;
|
||||
}
|
||||
|
||||
if (shouldDelete)
|
||||
deletePath(reservedPath);
|
||||
|
||||
/* Acquire the global GC root. Note: we don't use fdGCLock
|
||||
here because then in auto-gc mode, another thread could
|
||||
downgrade our exclusive lock. */
|
||||
auto fdGCLock = openGCLock();
|
||||
FdLock gcLock(fdGCLock.get(), ltWrite, true, "waiting for the big garbage collector lock...");
|
||||
|
||||
/* Synchronisation point to test ENOENT handling in
|
||||
addTempRoot(), see tests/gc-non-blocking.sh. */
|
||||
if (auto p = getEnv("_NIX_TEST_GC_SYNC_1"))
|
||||
readFile(*p);
|
||||
|
||||
/* Start the server for receiving new roots. */
|
||||
auto socketPath = stateDir.get() + gcSocketPath;
|
||||
createDirs(dirOf(socketPath));
|
||||
auto fdServer = createUnixDomainSocket(socketPath, 0666);
|
||||
|
||||
if (fcntl(fdServer.get(), F_SETFL, fcntl(fdServer.get(), F_GETFL) | O_NONBLOCK) == -1)
|
||||
throw SysError("making socket '%1%' non-blocking", socketPath);
|
||||
|
||||
Pipe shutdownPipe;
|
||||
shutdownPipe.create();
|
||||
|
||||
std::thread serverThread([&]() {
|
||||
Sync<std::map<int, std::thread>> connections;
|
||||
|
||||
Finally cleanup([&]() {
|
||||
debug("GC roots server shutting down");
|
||||
fdServer.close();
|
||||
while (true) {
|
||||
auto item = remove_begin(*connections.lock());
|
||||
if (!item) break;
|
||||
auto & [fd, thread] = *item;
|
||||
shutdown(fd, SHUT_RDWR);
|
||||
thread.join();
|
||||
}
|
||||
});
|
||||
|
||||
while (true) {
|
||||
std::vector<struct pollfd> fds;
|
||||
fds.push_back({.fd = shutdownPipe.readSide.get(), .events = POLLIN});
|
||||
fds.push_back({.fd = fdServer.get(), .events = POLLIN});
|
||||
auto count = poll(fds.data(), fds.size(), -1);
|
||||
assert(count != -1);
|
||||
|
||||
if (fds[0].revents)
|
||||
/* Parent is asking us to quit. */
|
||||
break;
|
||||
|
||||
if (fds[1].revents) {
|
||||
/* Accept a new connection. */
|
||||
assert(fds[1].revents & POLLIN);
|
||||
AutoCloseFD fdClient = accept(fdServer.get(), nullptr, nullptr);
|
||||
if (!fdClient) continue;
|
||||
|
||||
debug("GC roots server accepted new client");
|
||||
|
||||
/* Process the connection in a separate thread. */
|
||||
auto fdClient_ = fdClient.get();
|
||||
std::thread clientThread([&, fdClient = std::move(fdClient)]() {
|
||||
Finally cleanup([&]() {
|
||||
auto conn(connections.lock());
|
||||
auto i = conn->find(fdClient.get());
|
||||
if (i != conn->end()) {
|
||||
i->second.detach();
|
||||
conn->erase(i);
|
||||
}
|
||||
});
|
||||
|
||||
/* On macOS, accepted sockets inherit the
|
||||
non-blocking flag from the server socket, so
|
||||
explicitly make it blocking. */
|
||||
if (fcntl(fdClient.get(), F_SETFL, fcntl(fdClient.get(), F_GETFL) & ~O_NONBLOCK) == -1)
|
||||
abort();
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
auto path = readLine(fdClient.get());
|
||||
auto storePath = maybeParseStorePath(path);
|
||||
if (storePath) {
|
||||
debug("got new GC root '%s'", path);
|
||||
auto hashPart = std::string(storePath->hashPart());
|
||||
auto shared(_shared.lock());
|
||||
shared->tempRoots.insert(hashPart);
|
||||
/* If this path is currently being
|
||||
deleted, then we have to wait until
|
||||
deletion is finished to ensure that
|
||||
the client doesn't start
|
||||
re-creating it before we're
|
||||
done. FIXME: ideally we would use a
|
||||
FD for this so we don't block the
|
||||
poll loop. */
|
||||
while (shared->pending == hashPart) {
|
||||
debug("synchronising with deletion of path '%s'", path);
|
||||
shared.wait(wakeup);
|
||||
}
|
||||
} else
|
||||
printError("received garbage instead of a root from client");
|
||||
writeFull(fdClient.get(), "1", false);
|
||||
} catch (Error & e) {
|
||||
debug("reading GC root from client: %s", e.msg());
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
connections.lock()->insert({fdClient_, std::move(clientThread)});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Finally stopServer([&]() {
|
||||
writeFull(shutdownPipe.writeSide.get(), "x", false);
|
||||
wakeup.notify_all();
|
||||
if (serverThread.joinable()) serverThread.join();
|
||||
});
|
||||
|
||||
/* Find the roots. Since we've grabbed the GC lock, the set of
|
||||
permanent roots cannot increase now. */
|
||||
printInfo("finding garbage collector roots...");
|
||||
Roots rootMap;
|
||||
if (!options.ignoreLiveness)
|
||||
findRootsNoTemp(rootMap, true);
|
||||
|
||||
for (auto & i : rootMap) roots.insert(i.first);
|
||||
|
||||
/* Read the temporary roots created before we acquired the global
|
||||
GC root. Any new roots will be sent to our socket. */
|
||||
Roots tempRoots;
|
||||
findTempRoots(tempRoots, true);
|
||||
for (auto & root : tempRoots) {
|
||||
_shared.lock()->tempRoots.insert(std::string(root.first.hashPart()));
|
||||
roots.insert(root.first);
|
||||
}
|
||||
|
||||
/* Synchronisation point for testing, see tests/functional/gc-non-blocking.sh. */
|
||||
if (auto p = getEnv("_NIX_TEST_GC_SYNC_2"))
|
||||
readFile(*p);
|
||||
|
||||
/* Helper function that deletes a path from the store and throws
|
||||
GCLimitReached if we've deleted enough garbage. */
|
||||
auto deleteFromStore = [&](std::string_view baseName)
|
||||
{
|
||||
Path path = storeDir + "/" + std::string(baseName);
|
||||
Path realPath = realStoreDir + "/" + std::string(baseName);
|
||||
|
||||
/* There may be temp directories in the store that are still in use
|
||||
by another process. We need to be sure that we can acquire an
|
||||
exclusive lock before deleting them. */
|
||||
if (baseName.find("tmp-", 0) == 0) {
|
||||
AutoCloseFD tmpDirFd = open(realPath.c_str(), O_RDONLY | O_DIRECTORY);
|
||||
if (tmpDirFd.get() == -1 || !lockFile(tmpDirFd.get(), ltWrite, false)) {
|
||||
debug("skipping locked tempdir '%s'", realPath);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
printInfo("deleting '%1%'", path);
|
||||
|
||||
results.paths.insert(path);
|
||||
|
||||
uint64_t bytesFreed;
|
||||
deleteStorePath(realPath, bytesFreed);
|
||||
|
||||
results.bytesFreed += bytesFreed;
|
||||
|
||||
if (results.bytesFreed > options.maxFreed) {
|
||||
printInfo("deleted more than %d bytes; stopping", options.maxFreed);
|
||||
throw GCLimitReached();
|
||||
}
|
||||
};
|
||||
|
||||
std::map<StorePath, StorePathSet> referrersCache;
|
||||
|
||||
/* Helper function that visits all paths reachable from `start`
|
||||
via the referrers edges and optionally derivers and derivation
|
||||
output edges. If none of those paths are roots, then all
|
||||
visited paths are garbage and are deleted. */
|
||||
auto deleteReferrersClosure = [&](const StorePath & start) {
|
||||
StorePathSet visited;
|
||||
std::queue<StorePath> todo;
|
||||
|
||||
/* Wake up any GC client waiting for deletion of the paths in
|
||||
'visited' to finish. */
|
||||
Finally releasePending([&]() {
|
||||
auto shared(_shared.lock());
|
||||
shared->pending.reset();
|
||||
wakeup.notify_all();
|
||||
});
|
||||
|
||||
auto enqueue = [&](const StorePath & path) {
|
||||
if (visited.insert(path).second)
|
||||
todo.push(path);
|
||||
};
|
||||
|
||||
enqueue(start);
|
||||
|
||||
while (auto path = pop(todo)) {
|
||||
checkInterrupt();
|
||||
|
||||
/* Bail out if we've previously discovered that this path
|
||||
is alive. */
|
||||
if (alive.count(*path)) {
|
||||
alive.insert(start);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If we've previously deleted this path, we don't have to
|
||||
handle it again. */
|
||||
if (dead.count(*path)) continue;
|
||||
|
||||
auto markAlive = [&]()
|
||||
{
|
||||
alive.insert(*path);
|
||||
alive.insert(start);
|
||||
try {
|
||||
StorePathSet closure;
|
||||
computeFSClosure(*path, closure,
|
||||
/* flipDirection */ false, gcKeepOutputs, gcKeepDerivations);
|
||||
for (auto & p : closure)
|
||||
alive.insert(p);
|
||||
} catch (InvalidPath &) { }
|
||||
};
|
||||
|
||||
/* If this is a root, bail out. */
|
||||
if (roots.count(*path)) {
|
||||
debug("cannot delete '%s' because it's a root", printStorePath(*path));
|
||||
return markAlive();
|
||||
}
|
||||
|
||||
if (options.action == GCOptions::gcDeleteSpecific
|
||||
&& !options.pathsToDelete.count(*path))
|
||||
return;
|
||||
|
||||
{
|
||||
auto hashPart = std::string(path->hashPart());
|
||||
auto shared(_shared.lock());
|
||||
if (shared->tempRoots.count(hashPart)) {
|
||||
debug("cannot delete '%s' because it's a temporary root", printStorePath(*path));
|
||||
return markAlive();
|
||||
}
|
||||
shared->pending = hashPart;
|
||||
}
|
||||
|
||||
if (isValidPath(*path)) {
|
||||
|
||||
/* Visit the referrers of this path. */
|
||||
auto i = referrersCache.find(*path);
|
||||
if (i == referrersCache.end()) {
|
||||
StorePathSet referrers;
|
||||
queryGCReferrers(*path, referrers);
|
||||
referrersCache.emplace(*path, std::move(referrers));
|
||||
i = referrersCache.find(*path);
|
||||
}
|
||||
for (auto & p : i->second)
|
||||
enqueue(p);
|
||||
|
||||
/* If keep-derivations is set and this is a
|
||||
derivation, then visit the derivation outputs. */
|
||||
if (gcKeepDerivations && path->isDerivation()) {
|
||||
for (auto & [name, maybeOutPath] : queryPartialDerivationOutputMap(*path))
|
||||
if (maybeOutPath &&
|
||||
isValidPath(*maybeOutPath) &&
|
||||
queryPathInfo(*maybeOutPath)->deriver == *path)
|
||||
enqueue(*maybeOutPath);
|
||||
}
|
||||
|
||||
/* If keep-outputs is set, then visit the derivers. */
|
||||
if (gcKeepOutputs) {
|
||||
auto derivers = queryValidDerivers(*path);
|
||||
for (auto & i : derivers)
|
||||
enqueue(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto & path : topoSortPaths(visited)) {
|
||||
if (!dead.insert(path).second) continue;
|
||||
if (shouldDelete) {
|
||||
invalidatePathChecked(path);
|
||||
deleteFromStore(path.to_string());
|
||||
referrersCache.erase(path);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/* Either delete all garbage paths, or just the specified
|
||||
paths (for gcDeleteSpecific). */
|
||||
if (options.action == GCOptions::gcDeleteSpecific) {
|
||||
|
||||
for (auto & i : options.pathsToDelete) {
|
||||
deleteReferrersClosure(i);
|
||||
if (!dead.count(i))
|
||||
throw Error(
|
||||
"Cannot delete path '%1%' since it is still alive. "
|
||||
"To find out why, use: "
|
||||
"nix-store --query --roots",
|
||||
printStorePath(i));
|
||||
}
|
||||
|
||||
} else if (options.maxFreed > 0) {
|
||||
|
||||
if (shouldDelete)
|
||||
printInfo("deleting garbage...");
|
||||
else
|
||||
printInfo("determining live/dead paths...");
|
||||
|
||||
try {
|
||||
AutoCloseDir dir(opendir(realStoreDir.get().c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", realStoreDir);
|
||||
|
||||
/* Read the store and delete all paths that are invalid or
|
||||
unreachable. We don't use readDirectory() here so that
|
||||
GCing can start faster. */
|
||||
auto linksName = baseNameOf(linksDir);
|
||||
Paths entries;
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
std::string name = dirent->d_name;
|
||||
if (name == "." || name == ".." || name == linksName) continue;
|
||||
|
||||
if (auto storePath = maybeParseStorePath(storeDir + "/" + name))
|
||||
deleteReferrersClosure(*storePath);
|
||||
else
|
||||
deleteFromStore(name);
|
||||
|
||||
}
|
||||
} catch (GCLimitReached & e) {
|
||||
}
|
||||
}
|
||||
|
||||
if (options.action == GCOptions::gcReturnLive) {
|
||||
for (auto & i : alive)
|
||||
results.paths.insert(printStorePath(i));
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.action == GCOptions::gcReturnDead) {
|
||||
for (auto & i : dead)
|
||||
results.paths.insert(printStorePath(i));
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unlink all files in /nix/store/.links that have a link count of 1,
|
||||
which indicates that there are no other links and so they can be
|
||||
safely deleted. FIXME: race condition with optimisePath(): we
|
||||
might see a link count of 1 just before optimisePath() increases
|
||||
the link count. */
|
||||
if (options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific) {
|
||||
printInfo("deleting unused links...");
|
||||
|
||||
AutoCloseDir dir(opendir(linksDir.c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", linksDir);
|
||||
|
||||
int64_t actualSize = 0, unsharedSize = 0;
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
std::string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
Path path = linksDir + "/" + name;
|
||||
|
||||
auto st = lstat(path);
|
||||
|
||||
if (st.st_nlink != 1) {
|
||||
actualSize += st.st_size;
|
||||
unsharedSize += (st.st_nlink - 1) * st.st_size;
|
||||
continue;
|
||||
}
|
||||
|
||||
printMsg(lvlTalkative, "deleting unused link '%1%'", path);
|
||||
|
||||
if (unlink(path.c_str()) == -1)
|
||||
throw SysError("deleting '%1%'", path);
|
||||
|
||||
/* Do not account for deleted file here. Rely on deletePath()
|
||||
accounting. */
|
||||
}
|
||||
|
||||
struct stat st;
|
||||
if (stat(linksDir.c_str(), &st) == -1)
|
||||
throw SysError("statting '%1%'", linksDir);
|
||||
int64_t overhead = st.st_blocks * 512ULL;
|
||||
|
||||
printInfo("note: currently hard linking saves %.2f MiB",
|
||||
((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
|
||||
}
|
||||
|
||||
/* While we're at it, vacuum the database. */
|
||||
//if (options.action == GCOptions::gcDeleteDead) vacuumDB();
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::autoGC(bool sync)
|
||||
{
|
||||
static auto fakeFreeSpaceFile = getEnv("_NIX_TEST_FREE_SPACE_FILE");
|
||||
|
||||
auto getAvail = [this]() -> uint64_t {
|
||||
if (fakeFreeSpaceFile)
|
||||
return std::stoll(readFile(*fakeFreeSpaceFile));
|
||||
|
||||
struct statvfs st;
|
||||
if (statvfs(realStoreDir.get().c_str(), &st))
|
||||
throw SysError("getting filesystem info about '%s'", realStoreDir);
|
||||
|
||||
return (uint64_t) st.f_bavail * st.f_frsize;
|
||||
};
|
||||
|
||||
std::shared_future<void> future;
|
||||
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
if (state->gcRunning) {
|
||||
future = state->gcFuture;
|
||||
debug("waiting for auto-GC to finish");
|
||||
goto sync;
|
||||
}
|
||||
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
|
||||
if (now < state->lastGCCheck + std::chrono::seconds(settings.minFreeCheckInterval)) return;
|
||||
|
||||
auto avail = getAvail();
|
||||
|
||||
state->lastGCCheck = now;
|
||||
|
||||
if (avail >= settings.minFree || avail >= settings.maxFree) return;
|
||||
|
||||
if (avail > state->availAfterGC * 0.97) return;
|
||||
|
||||
state->gcRunning = true;
|
||||
|
||||
std::promise<void> promise;
|
||||
future = state->gcFuture = promise.get_future().share();
|
||||
|
||||
std::thread([promise{std::move(promise)}, this, avail, getAvail]() mutable {
|
||||
|
||||
try {
|
||||
|
||||
/* Wake up any threads waiting for the auto-GC to finish. */
|
||||
Finally wakeup([&]() {
|
||||
auto state(_state.lock());
|
||||
state->gcRunning = false;
|
||||
state->lastGCCheck = std::chrono::steady_clock::now();
|
||||
promise.set_value();
|
||||
});
|
||||
|
||||
GCOptions options;
|
||||
options.maxFreed = settings.maxFree - avail;
|
||||
|
||||
printInfo("running auto-GC to free %d bytes", options.maxFreed);
|
||||
|
||||
GCResults results;
|
||||
|
||||
collectGarbage(options, results);
|
||||
|
||||
_state.lock()->availAfterGC = getAvail();
|
||||
|
||||
} catch (...) {
|
||||
// FIXME: we could propagate the exception to the
|
||||
// future, but we don't really care.
|
||||
ignoreException();
|
||||
}
|
||||
|
||||
}).detach();
|
||||
}
|
||||
|
||||
sync:
|
||||
// Wait for the future outside of the state lock.
|
||||
if (sync) future.get();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
292
src/libstore/unix/local-overlay-store.cc
Normal file
292
src/libstore/unix/local-overlay-store.cc
Normal file
|
|
@ -0,0 +1,292 @@
|
|||
#include "local-overlay-store.hh"
|
||||
#include "callback.hh"
|
||||
#include "realisation.hh"
|
||||
#include "processes.hh"
|
||||
#include "url.hh"
|
||||
#include <regex>
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::string LocalOverlayStoreConfig::doc()
|
||||
{
|
||||
return
|
||||
#include "local-overlay-store.md"
|
||||
;
|
||||
}
|
||||
|
||||
Path LocalOverlayStoreConfig::toUpperPath(const StorePath & path) {
|
||||
return upperLayer + "/" + path.to_string();
|
||||
}
|
||||
|
||||
LocalOverlayStore::LocalOverlayStore(const Params & params)
|
||||
: StoreConfig(params)
|
||||
, LocalFSStoreConfig(params)
|
||||
, LocalStoreConfig(params)
|
||||
, LocalOverlayStoreConfig(params)
|
||||
, Store(params)
|
||||
, LocalFSStore(params)
|
||||
, LocalStore(params)
|
||||
, lowerStore(openStore(percentDecode(lowerStoreUri.get())).dynamic_pointer_cast<LocalFSStore>())
|
||||
{
|
||||
if (checkMount.get()) {
|
||||
std::smatch match;
|
||||
std::string mountInfo;
|
||||
auto mounts = readFile("/proc/self/mounts");
|
||||
auto regex = std::regex(R"((^|\n)overlay )" + realStoreDir.get() + R"( .*(\n|$))");
|
||||
|
||||
// Mount points can be stacked, so there might be multiple matching entries.
|
||||
// Loop until the last match, which will be the current state of the mount point.
|
||||
while (std::regex_search(mounts, match, regex)) {
|
||||
mountInfo = match.str();
|
||||
mounts = match.suffix();
|
||||
}
|
||||
|
||||
auto checkOption = [&](std::string option, std::string value) {
|
||||
return std::regex_search(mountInfo, std::regex("\\b" + option + "=" + value + "( |,)"));
|
||||
};
|
||||
|
||||
auto expectedLowerDir = lowerStore->realStoreDir.get();
|
||||
if (!checkOption("lowerdir", expectedLowerDir) || !checkOption("upperdir", upperLayer)) {
|
||||
debug("expected lowerdir: %s", expectedLowerDir);
|
||||
debug("expected upperdir: %s", upperLayer);
|
||||
debug("actual mount: %s", mountInfo);
|
||||
throw Error("overlay filesystem '%s' mounted incorrectly",
|
||||
realStoreDir.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::registerDrvOutput(const Realisation & info)
|
||||
{
|
||||
// First do queryRealisation on lower layer to populate DB
|
||||
auto res = lowerStore->queryRealisation(info.id);
|
||||
if (res)
|
||||
LocalStore::registerDrvOutput(*res);
|
||||
|
||||
LocalStore::registerDrvOutput(info);
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
|
||||
{
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
LocalStore::queryPathInfoUncached(path,
|
||||
{[this, path, callbackPtr](std::future<std::shared_ptr<const ValidPathInfo>> fut) {
|
||||
try {
|
||||
auto info = fut.get();
|
||||
if (info)
|
||||
return (*callbackPtr)(std::move(info));
|
||||
} catch (...) {
|
||||
return callbackPtr->rethrow();
|
||||
}
|
||||
// If we don't have it, check lower store
|
||||
lowerStore->queryPathInfo(path,
|
||||
{[path, callbackPtr](std::future<ref<const ValidPathInfo>> fut) {
|
||||
try {
|
||||
(*callbackPtr)(fut.get().get_ptr());
|
||||
} catch (...) {
|
||||
return callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
}});
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::queryRealisationUncached(const DrvOutput & drvOutput,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept
|
||||
{
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
LocalStore::queryRealisationUncached(drvOutput,
|
||||
{[this, drvOutput, callbackPtr](std::future<std::shared_ptr<const Realisation>> fut) {
|
||||
try {
|
||||
auto info = fut.get();
|
||||
if (info)
|
||||
return (*callbackPtr)(std::move(info));
|
||||
} catch (...) {
|
||||
return callbackPtr->rethrow();
|
||||
}
|
||||
// If we don't have it, check lower store
|
||||
lowerStore->queryRealisation(drvOutput,
|
||||
{[callbackPtr](std::future<std::shared_ptr<const Realisation>> fut) {
|
||||
try {
|
||||
(*callbackPtr)(fut.get());
|
||||
} catch (...) {
|
||||
return callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
}});
|
||||
}
|
||||
|
||||
|
||||
bool LocalOverlayStore::isValidPathUncached(const StorePath & path)
|
||||
{
|
||||
auto res = LocalStore::isValidPathUncached(path);
|
||||
if (res) return res;
|
||||
res = lowerStore->isValidPath(path);
|
||||
if (res) {
|
||||
// Get path info from lower store so upper DB genuinely has it.
|
||||
auto p = lowerStore->queryPathInfo(path);
|
||||
// recur on references, syncing entire closure.
|
||||
for (auto & r : p->references)
|
||||
if (r != path)
|
||||
isValidPath(r);
|
||||
LocalStore::registerValidPath(*p);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::queryReferrers(const StorePath & path, StorePathSet & referrers)
|
||||
{
|
||||
LocalStore::queryReferrers(path, referrers);
|
||||
lowerStore->queryReferrers(path, referrers);
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::queryGCReferrers(const StorePath & path, StorePathSet & referrers)
|
||||
{
|
||||
LocalStore::queryReferrers(path, referrers);
|
||||
}
|
||||
|
||||
|
||||
StorePathSet LocalOverlayStore::queryValidDerivers(const StorePath & path)
|
||||
{
|
||||
auto res = LocalStore::queryValidDerivers(path);
|
||||
for (auto p : lowerStore->queryValidDerivers(path))
|
||||
res.insert(p);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
std::optional<StorePath> LocalOverlayStore::queryPathFromHashPart(const std::string & hashPart)
|
||||
{
|
||||
auto res = LocalStore::queryPathFromHashPart(hashPart);
|
||||
if (res)
|
||||
return res;
|
||||
else
|
||||
return lowerStore->queryPathFromHashPart(hashPart);
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::registerValidPaths(const ValidPathInfos & infos)
|
||||
{
|
||||
// First, get any from lower store so we merge
|
||||
{
|
||||
StorePathSet notInUpper;
|
||||
for (auto & [p, _] : infos)
|
||||
if (!LocalStore::isValidPathUncached(p)) // avoid divergence
|
||||
notInUpper.insert(p);
|
||||
auto pathsInLower = lowerStore->queryValidPaths(notInUpper);
|
||||
ValidPathInfos inLower;
|
||||
for (auto & p : pathsInLower)
|
||||
inLower.insert_or_assign(p, *lowerStore->queryPathInfo(p));
|
||||
LocalStore::registerValidPaths(inLower);
|
||||
}
|
||||
// Then do original request
|
||||
LocalStore::registerValidPaths(infos);
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||
{
|
||||
LocalStore::collectGarbage(options, results);
|
||||
|
||||
remountIfNecessary();
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::deleteStorePath(const Path & path, uint64_t & bytesFreed)
|
||||
{
|
||||
auto mergedDir = realStoreDir.get() + "/";
|
||||
if (path.substr(0, mergedDir.length()) != mergedDir) {
|
||||
warn("local-overlay: unexpected gc path '%s' ", path);
|
||||
return;
|
||||
}
|
||||
|
||||
StorePath storePath = {path.substr(mergedDir.length())};
|
||||
auto upperPath = toUpperPath(storePath);
|
||||
|
||||
if (pathExists(upperPath)) {
|
||||
debug("upper exists: %s", path);
|
||||
if (lowerStore->isValidPath(storePath)) {
|
||||
debug("lower exists: %s", storePath.to_string());
|
||||
// Path also exists in lower store.
|
||||
// We must delete via upper layer to avoid creating a whiteout.
|
||||
deletePath(upperPath, bytesFreed);
|
||||
_remountRequired = true;
|
||||
} else {
|
||||
// Path does not exist in lower store.
|
||||
// So we can delete via overlayfs and not need to remount.
|
||||
LocalStore::deleteStorePath(path, bytesFreed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::optimiseStore()
|
||||
{
|
||||
Activity act(*logger, actOptimiseStore);
|
||||
|
||||
// Note for LocalOverlayStore, queryAllValidPaths only returns paths in upper layer
|
||||
auto paths = queryAllValidPaths();
|
||||
|
||||
act.progress(0, paths.size());
|
||||
|
||||
uint64_t done = 0;
|
||||
|
||||
for (auto & path : paths) {
|
||||
if (lowerStore->isValidPath(path)) {
|
||||
uint64_t bytesFreed = 0;
|
||||
// Deduplicate store path
|
||||
deleteStorePath(Store::toRealPath(path), bytesFreed);
|
||||
}
|
||||
done++;
|
||||
act.progress(done, paths.size());
|
||||
}
|
||||
|
||||
remountIfNecessary();
|
||||
}
|
||||
|
||||
|
||||
LocalStore::VerificationResult LocalOverlayStore::verifyAllValidPaths(RepairFlag repair)
|
||||
{
|
||||
StorePathSet done;
|
||||
|
||||
auto existsInStoreDir = [&](const StorePath & storePath) {
|
||||
return pathExists(realStoreDir.get() + "/" + storePath.to_string());
|
||||
};
|
||||
|
||||
bool errors = false;
|
||||
StorePathSet validPaths;
|
||||
|
||||
for (auto & i : queryAllValidPaths())
|
||||
verifyPath(i, existsInStoreDir, done, validPaths, repair, errors);
|
||||
|
||||
return {
|
||||
.errors = errors,
|
||||
.validPaths = validPaths,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
void LocalOverlayStore::remountIfNecessary()
|
||||
{
|
||||
if (!_remountRequired) return;
|
||||
|
||||
if (remountHook.get().empty()) {
|
||||
warn("'%s' needs remounting, set remount-hook to do this automatically", realStoreDir.get());
|
||||
} else {
|
||||
runProgram(remountHook, false, {realStoreDir});
|
||||
}
|
||||
|
||||
_remountRequired = false;
|
||||
}
|
||||
|
||||
|
||||
static RegisterStoreImplementation<LocalOverlayStore, LocalOverlayStoreConfig> regLocalOverlayStore;
|
||||
|
||||
}
|
||||
215
src/libstore/unix/local-overlay-store.hh
Normal file
215
src/libstore/unix/local-overlay-store.hh
Normal file
|
|
@ -0,0 +1,215 @@
|
|||
#include "local-store.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
* Configuration for `LocalOverlayStore`.
|
||||
*/
|
||||
struct LocalOverlayStoreConfig : virtual LocalStoreConfig
|
||||
{
|
||||
LocalOverlayStoreConfig(const StringMap & params)
|
||||
: StoreConfig(params)
|
||||
, LocalFSStoreConfig(params)
|
||||
, LocalStoreConfig(params)
|
||||
{ }
|
||||
|
||||
const Setting<std::string> lowerStoreUri{(StoreConfig*) this, "", "lower-store",
|
||||
R"(
|
||||
[Store URL](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
|
||||
for the lower store. The default is `auto` (i.e. use the Nix daemon or `/nix/store` directly).
|
||||
|
||||
Must be a store with a store dir on the file system.
|
||||
Must be used as OverlayFS lower layer for this store's store dir.
|
||||
)"};
|
||||
|
||||
const PathSetting upperLayer{(StoreConfig*) this, "", "upper-layer",
|
||||
R"(
|
||||
Directory containing the OverlayFS upper layer for this store's store dir.
|
||||
)"};
|
||||
|
||||
Setting<bool> checkMount{(StoreConfig*) this, true, "check-mount",
|
||||
R"(
|
||||
Check that the overlay filesystem is correctly mounted.
|
||||
|
||||
Nix does not manage the overlayfs mount point itself, but the correct
|
||||
functioning of the overlay store does depend on this mount point being set up
|
||||
correctly. Rather than just assume this is the case, check that the lowerdir
|
||||
and upperdir options are what we expect them to be. This check is on by
|
||||
default, but can be disabled if needed.
|
||||
)"};
|
||||
|
||||
const PathSetting remountHook{(StoreConfig*) this, "", "remount-hook",
|
||||
R"(
|
||||
Script or other executable to run when overlay filesystem needs remounting.
|
||||
|
||||
This is occasionally necessary when deleting a store path that exists in both upper and lower layers.
|
||||
In such a situation, bypassing OverlayFS and deleting the path in the upper layer directly
|
||||
is the only way to perform the deletion without creating a "whiteout".
|
||||
However this causes the OverlayFS kernel data structures to get out-of-sync,
|
||||
and can lead to 'stale file handle' errors; remounting solves the problem.
|
||||
|
||||
The store directory is passed as an argument to the invoked executable.
|
||||
)"};
|
||||
|
||||
const std::string name() override { return "Experimental Local Overlay Store"; }
|
||||
|
||||
std::optional<ExperimentalFeature> experimentalFeature() const override
|
||||
{
|
||||
return ExperimentalFeature::LocalOverlayStore;
|
||||
}
|
||||
|
||||
std::string doc() override;
|
||||
|
||||
protected:
|
||||
/**
|
||||
* @return The host OS path corresponding to the store path for the
|
||||
* upper layer.
|
||||
*
|
||||
* @note The there is no guarantee a store object is actually stored
|
||||
* at that file path. It might be stored in the lower layer instead,
|
||||
* or it might not be part of this store at all.
|
||||
*/
|
||||
Path toUpperPath(const StorePath & path);
|
||||
};
|
||||
|
||||
/**
|
||||
* Variation of local store using OverlayFS for the store directory.
|
||||
*
|
||||
* Documentation on overridden methods states how they differ from their
|
||||
* `LocalStore` counterparts.
|
||||
*/
|
||||
class LocalOverlayStore : public virtual LocalOverlayStoreConfig, public virtual LocalStore
|
||||
{
|
||||
/**
|
||||
* The store beneath us.
|
||||
*
|
||||
* Our store dir should be an overlay fs where the lower layer
|
||||
* is that store's store dir, and the upper layer is some
|
||||
* scratch storage just for us.
|
||||
*/
|
||||
ref<LocalFSStore> lowerStore;
|
||||
|
||||
public:
|
||||
LocalOverlayStore(const Params & params);
|
||||
|
||||
LocalOverlayStore(std::string scheme, std::string path, const Params & params)
|
||||
: LocalOverlayStore(params)
|
||||
{
|
||||
if (!path.empty())
|
||||
throw UsageError("local-overlay:// store url doesn't support path part, only scheme and query params");
|
||||
}
|
||||
|
||||
static std::set<std::string> uriSchemes()
|
||||
{
|
||||
return { "local-overlay" };
|
||||
}
|
||||
|
||||
std::string getUri() override
|
||||
{
|
||||
return "local-overlay://";
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* First copy up any lower store realisation with the same key, so we
|
||||
* merge rather than mask it.
|
||||
*/
|
||||
void registerDrvOutput(const Realisation & info) override;
|
||||
|
||||
/**
|
||||
* Check lower store if upper DB does not have.
|
||||
*/
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
/**
|
||||
* Check lower store if upper DB does not have.
|
||||
*
|
||||
* In addition, copy up metadata for lower store objects (and their
|
||||
* closure). (I.e. Optimistically cache in the upper DB.)
|
||||
*/
|
||||
bool isValidPathUncached(const StorePath & path) override;
|
||||
|
||||
/**
|
||||
* Check the lower store and upper DB.
|
||||
*/
|
||||
void queryReferrers(const StorePath & path, StorePathSet & referrers) override;
|
||||
|
||||
/**
|
||||
* Check the lower store and upper DB.
|
||||
*/
|
||||
StorePathSet queryValidDerivers(const StorePath & path) override;
|
||||
|
||||
/**
|
||||
* Check lower store if upper DB does not have.
|
||||
*/
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
/**
|
||||
* First copy up any lower store realisation with the same key, so we
|
||||
* merge rather than mask it.
|
||||
*/
|
||||
void registerValidPaths(const ValidPathInfos & infos) override;
|
||||
|
||||
/**
|
||||
* Check lower store if upper DB does not have.
|
||||
*/
|
||||
void queryRealisationUncached(const DrvOutput&,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
|
||||
/**
|
||||
* Call `remountIfNecessary` after collecting garbage normally.
|
||||
*/
|
||||
void collectGarbage(const GCOptions & options, GCResults & results) override;
|
||||
|
||||
/**
|
||||
* Check which layers the store object exists in to try to avoid
|
||||
* needing to remount.
|
||||
*/
|
||||
void deleteStorePath(const Path & path, uint64_t & bytesFreed) override;
|
||||
|
||||
/**
|
||||
* Deduplicate by removing store objects from the upper layer that
|
||||
* are now in the lower layer.
|
||||
*
|
||||
* Operations on a layered store will not cause duplications, but addition of
|
||||
* new store objects to the lower layer can instill induce them
|
||||
* (there is no way to prevent that). This cleans up those
|
||||
* duplications.
|
||||
*
|
||||
* @note We do not yet optomise the upper layer in the normal way
|
||||
* (hardlink) yet. We would like to, but it requires more
|
||||
* refactoring of existing code to support this sustainably.
|
||||
*/
|
||||
void optimiseStore() override;
|
||||
|
||||
/**
|
||||
* Check all paths registered in the upper DB.
|
||||
*
|
||||
* Note that this includes store objects that reside in either overlayfs layer;
|
||||
* just enumerating the contents of the upper layer would skip them.
|
||||
*
|
||||
* We don't verify the contents of both layers on the assumption that the lower layer is far bigger,
|
||||
* and also the observation that anything not in the upper db the overlayfs doesn't yet care about.
|
||||
*/
|
||||
VerificationResult verifyAllValidPaths(RepairFlag repair) override;
|
||||
|
||||
/**
|
||||
* Deletion only effects the upper layer, so we ignore lower-layer referrers.
|
||||
*/
|
||||
void queryGCReferrers(const StorePath & path, StorePathSet & referrers) override;
|
||||
|
||||
/**
|
||||
* Call the `remountHook` if we have done something such that the
|
||||
* OverlayFS needed to be remounted. See that hook's user-facing
|
||||
* documentation for further details.
|
||||
*/
|
||||
void remountIfNecessary();
|
||||
|
||||
/**
|
||||
* State for `remountIfNecessary`
|
||||
*/
|
||||
std::atomic_bool _remountRequired = false;
|
||||
};
|
||||
|
||||
}
|
||||
123
src/libstore/unix/local-overlay-store.md
Normal file
123
src/libstore/unix/local-overlay-store.md
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
R"(
|
||||
|
||||
**Store URL format**: `local-overlay`
|
||||
|
||||
This store type is a variation of the [local store] designed to leverage Linux's [Overlay Filesystem](https://docs.kernel.org/filesystems/overlayfs.html) (OverlayFS for short).
|
||||
Just as OverlayFS combines a lower and upper filesystem by treating the upper one as a patch against the lower, the local overlay store combines a lower store with an upper almost-[local store].
|
||||
("almost" because while the upper fileystems for OverlayFS is valid on its own, the upper almost-store is not a valid local store on its own because some references will dangle.)
|
||||
To use this store, you will first need to configure an OverlayFS mountpoint [appropriately](#example-filesystem-layout) as Nix will not do this for you (though it will verify the mountpoint is configured correctly).
|
||||
|
||||
### Conceptual parts of a local overlay store
|
||||
|
||||
*This is a more abstract/conceptual description of the parts of a layered store, an authoritative reference.
|
||||
For more "practical" instructions, see the worked-out example in the next subsection.*
|
||||
|
||||
The parts of a local overlay store are as follows:
|
||||
|
||||
- **Lower store**:
|
||||
|
||||
This is any store implementation that includes a store directory as part of the native operating system filesystem.
|
||||
For example, this could be a [local store], [local daemon store], or even another local overlay store.
|
||||
|
||||
The local overlay store never tries to modify the lower store in any way.
|
||||
Something else could modify the lower store, but there are restrictions on this
|
||||
Nix itself requires that this store only grow, and not change in other ways.
|
||||
For example, new store objects can be added, but deleting or modifying store objects is not allowed in general, because that will confuse and corrupt any local overlay store using those objects.
|
||||
(In addition, the underlying filesystem overlay mechanism may impose additional restrictions, see below.)
|
||||
|
||||
The lower store must not change while it is mounted as part of an overlay store.
|
||||
To ensure it does not, you might want to mount the store directory read-only (which then requires the [read-only] parameter to be set to `true`).
|
||||
|
||||
Specified with the [`lower-store`](#store-experimental-local-overlay-store-lower-store) setting.
|
||||
|
||||
- **Lower store directory**:
|
||||
|
||||
This is the directory used/exposed by the lower store.
|
||||
|
||||
Specified with `lower-store.real` setting.
|
||||
|
||||
As specified above, Nix requires the local store can only grow not change in other ways.
|
||||
Linux's OverlayFS in addition imposes the further requirement that this directory cannot change at all.
|
||||
That means that, while any local overlay store exists that is using this store as a lower store, this directory must not change.
|
||||
|
||||
- **Lower metadata source**:
|
||||
|
||||
This is abstract, just some way to read the metadata of lower store [store objects][store object].
|
||||
For example it could be a SQLite database (for the [local store]), or a socket connection (for the [local daemon store]).
|
||||
|
||||
This need not be writable.
|
||||
As stated above a local overlay store never tries to modify its lower store.
|
||||
The lower store's metadata is considered part of the lower store, just as the store's [file system objects][file system object] that appear in the store directory are.
|
||||
|
||||
- **Upper almost-store**:
|
||||
|
||||
This is almost but not quite just a [local store].
|
||||
That is because taken in isolation, not as part of a local overlay store, by itself, it would appear corrupted.
|
||||
But combined with everything else as part of an overlay local store, it is valid.
|
||||
|
||||
- **Upper layer directory**:
|
||||
|
||||
This contains additional [store objects][store object]
|
||||
(or, strictly speaking, their [file system objects][file system object] that the local overlay store will extend the lower store with).
|
||||
|
||||
Specified with [`upper-layer`](#store-experimental-local-overlay-store-upper-layer) setting.
|
||||
|
||||
- **Upper store directory**:
|
||||
|
||||
This contains all the store objects from each of the two directories.
|
||||
|
||||
The lower store directory and upper layer directory are combined via OverlayFS to create this directory.
|
||||
Nix doesn't do this itself, because it typically wouldn't have the permissions to do so, so it is the responsibility of the user to set this up first.
|
||||
Nix can, however, optionally check that that the OverlayFS mount settings appear as expected, matching Nix's own settings.
|
||||
|
||||
Specified with the [`real`](#store-experimental-local-overlay-store-real) setting.
|
||||
|
||||
- **Upper SQLite database**:
|
||||
|
||||
This contains the metadata of all of the upper layer [store objects][store object] (everything beyond their file system objects), and also duplicate copies of some lower layer store object's metadta.
|
||||
The duplication is so the metadata for the [closure](@docroot@/glossary.md#gloss-closure) of upper layer [store objects][store object] can be found entirely within the upper layer.
|
||||
(This allows us to use the same SQL Schema as the [local store]'s SQLite database, as foreign keys in that schema enforce closure metadata to be self-contained in this way.)
|
||||
|
||||
The location of the database is directly specified, but depends on the [`state`](#store-experimental-local-overlay-store-state) setting.
|
||||
It is is always `${state}/db`.
|
||||
|
||||
[file system object]: @docroot@/store/file-system-object.md
|
||||
[store object]: @docroot@/store/store-object.md
|
||||
|
||||
|
||||
### Example filesystem layout
|
||||
|
||||
Here is a worked out example of usage, following the concepts in the previous section.
|
||||
|
||||
Say we have the following paths:
|
||||
|
||||
- `/mnt/example/merged-store/nix/store`
|
||||
|
||||
- `/mnt/example/store-a/nix/store`
|
||||
|
||||
- `/mnt/example/store-b`
|
||||
|
||||
Then the following store URI can be used to access a local-overlay store at `/mnt/example/merged-store`:
|
||||
|
||||
```
|
||||
local-overlay://?root=/mnt/example/merged-store&lower-store=/mnt/example/store-a&upper-layer=/mnt/example/store-b
|
||||
```
|
||||
|
||||
The lower store directory is located at `/mnt/example/store-a/nix/store`, while the upper layer is at `/mnt/example/store-b`.
|
||||
|
||||
Before accessing the overlay store you will need to ensure the OverlayFS mount is set up correctly:
|
||||
|
||||
```shell
|
||||
mount -t overlay overlay \
|
||||
-o lowerdir="/mnt/example/store-a/nix/store" \
|
||||
-o upperdir="/mnt/example/store-b" \
|
||||
-o workdir="/mnt/example/workdir" \
|
||||
"/mnt/example/merged-store/nix/store"
|
||||
```
|
||||
|
||||
Note that OverlayFS requires `/mnt/example/workdir` to be on the same volume as the `upperdir`.
|
||||
|
||||
By default, Nix will check that the mountpoint as been set up correctly and fail with an error if it has not.
|
||||
You can override this behaviour by passing [`check-mount=false`](#store-experimental-local-overlay-store-check-mount) if you need to.
|
||||
|
||||
)"
|
||||
1773
src/libstore/unix/local-store.cc
Normal file
1773
src/libstore/unix/local-store.cc
Normal file
File diff suppressed because it is too large
Load diff
407
src/libstore/unix/local-store.hh
Normal file
407
src/libstore/unix/local-store.hh
Normal file
|
|
@ -0,0 +1,407 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "sqlite.hh"
|
||||
|
||||
#include "pathlocks.hh"
|
||||
#include "store-api.hh"
|
||||
#include "indirect-root-store.hh"
|
||||
#include "sync.hh"
|
||||
|
||||
#include <chrono>
|
||||
#include <future>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/**
|
||||
* Nix store and database schema version.
|
||||
*
|
||||
* Version 1 (or 0) was Nix <=
|
||||
* 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
|
||||
* Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
|
||||
* Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0.
|
||||
*/
|
||||
const int nixSchemaVersion = 10;
|
||||
|
||||
|
||||
struct OptimiseStats
|
||||
{
|
||||
unsigned long filesLinked = 0;
|
||||
uint64_t bytesFreed = 0;
|
||||
uint64_t blocksFreed = 0;
|
||||
};
|
||||
|
||||
struct LocalStoreConfig : virtual LocalFSStoreConfig
|
||||
{
|
||||
using LocalFSStoreConfig::LocalFSStoreConfig;
|
||||
|
||||
Setting<bool> requireSigs{this,
|
||||
settings.requireSigs,
|
||||
"require-sigs",
|
||||
"Whether store paths copied into this store should have a trusted signature."};
|
||||
|
||||
Setting<bool> readOnly{this,
|
||||
false,
|
||||
"read-only",
|
||||
R"(
|
||||
Allow this store to be opened when its [database](@docroot@/glossary.md#gloss-nix-database) is on a read-only filesystem.
|
||||
|
||||
Normally Nix will attempt to open the store database in read-write mode, even for querying (when write access is not needed), causing it to fail if the database is on a read-only filesystem.
|
||||
|
||||
Enable read-only mode to disable locking and open the SQLite database with the [`immutable` parameter](https://www.sqlite.org/c3ref/open.html) set.
|
||||
|
||||
> **Warning**
|
||||
> Do not use this unless the filesystem is read-only.
|
||||
>
|
||||
> Using it when the filesystem is writable can cause incorrect query results or corruption errors if the database is changed by another process.
|
||||
> While the filesystem the database resides on might appear to be read-only, consider whether another user or system might have write access to it.
|
||||
)"};
|
||||
|
||||
const std::string name() override { return "Local Store"; }
|
||||
|
||||
std::string doc() override;
|
||||
};
|
||||
|
||||
class LocalStore : public virtual LocalStoreConfig
|
||||
, public virtual IndirectRootStore
|
||||
, public virtual GcStore
|
||||
{
|
||||
private:
|
||||
|
||||
/**
|
||||
* Lock file used for upgrading.
|
||||
*/
|
||||
AutoCloseFD globalLock;
|
||||
|
||||
struct State
|
||||
{
|
||||
/**
|
||||
* The SQLite database object.
|
||||
*/
|
||||
SQLite db;
|
||||
|
||||
struct Stmts;
|
||||
std::unique_ptr<Stmts> stmts;
|
||||
|
||||
/**
|
||||
* The last time we checked whether to do an auto-GC, or an
|
||||
* auto-GC finished.
|
||||
*/
|
||||
std::chrono::time_point<std::chrono::steady_clock> lastGCCheck;
|
||||
|
||||
/**
|
||||
* Whether auto-GC is running. If so, get gcFuture to wait for
|
||||
* the GC to finish.
|
||||
*/
|
||||
bool gcRunning = false;
|
||||
std::shared_future<void> gcFuture;
|
||||
|
||||
/**
|
||||
* How much disk space was available after the previous
|
||||
* auto-GC. If the current available disk space is below
|
||||
* minFree but not much below availAfterGC, then there is no
|
||||
* point in starting a new GC.
|
||||
*/
|
||||
uint64_t availAfterGC = std::numeric_limits<uint64_t>::max();
|
||||
|
||||
std::unique_ptr<PublicKeys> publicKeys;
|
||||
};
|
||||
|
||||
Sync<State> _state;
|
||||
|
||||
public:
|
||||
|
||||
const Path dbDir;
|
||||
const Path linksDir;
|
||||
const Path reservedPath;
|
||||
const Path schemaPath;
|
||||
const Path tempRootsDir;
|
||||
const Path fnTempRoots;
|
||||
|
||||
private:
|
||||
|
||||
const PublicKeys & getPublicKeys();
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Hack for build-remote.cc.
|
||||
*/
|
||||
PathSet locksHeld;
|
||||
|
||||
/**
|
||||
* Initialise the local store, upgrading the schema if
|
||||
* necessary.
|
||||
*/
|
||||
LocalStore(const Params & params);
|
||||
LocalStore(std::string scheme, std::string path, const Params & params);
|
||||
|
||||
~LocalStore();
|
||||
|
||||
static std::set<std::string> uriSchemes()
|
||||
{ return {}; }
|
||||
|
||||
/**
|
||||
* Implementations of abstract store API methods.
|
||||
*/
|
||||
|
||||
std::string getUri() override;
|
||||
|
||||
bool isValidPathUncached(const StorePath & path) override;
|
||||
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
|
||||
StorePathSet queryAllValidPaths() override;
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
void queryReferrers(const StorePath & path, StorePathSet & referrers) override;
|
||||
|
||||
StorePathSet queryValidDerivers(const StorePath & path) override;
|
||||
|
||||
std::map<std::string, std::optional<StorePath>> queryStaticPartialDerivationOutputMap(const StorePath & path) override;
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
StorePathSet querySubstitutablePaths(const StorePathSet & paths) override;
|
||||
|
||||
bool pathInfoIsUntrusted(const ValidPathInfo &) override;
|
||||
bool realisationIsUntrusted(const Realisation & ) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
FileSerialisationMethod dumpMethod,
|
||||
ContentAddressMethod hashMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair) override;
|
||||
|
||||
void addTempRoot(const StorePath & path) override;
|
||||
|
||||
private:
|
||||
|
||||
void createTempRootsFile();
|
||||
|
||||
/**
|
||||
* The file to which we write our temporary roots.
|
||||
*/
|
||||
Sync<AutoCloseFD> _fdTempRoots;
|
||||
|
||||
/**
|
||||
* The global GC lock.
|
||||
*/
|
||||
Sync<AutoCloseFD> _fdGCLock;
|
||||
|
||||
/**
|
||||
* Connection to the garbage collector.
|
||||
*/
|
||||
Sync<AutoCloseFD> _fdRootsSocket;
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Implementation of IndirectRootStore::addIndirectRoot().
|
||||
*
|
||||
* The weak reference merely is a symlink to `path' from
|
||||
* /nix/var/nix/gcroots/auto/<hash of `path'>.
|
||||
*/
|
||||
void addIndirectRoot(const Path & path) override;
|
||||
|
||||
private:
|
||||
|
||||
void findTempRoots(Roots & roots, bool censor);
|
||||
|
||||
AutoCloseFD openGCLock();
|
||||
|
||||
public:
|
||||
|
||||
Roots findRoots(bool censor) override;
|
||||
|
||||
void collectGarbage(const GCOptions & options, GCResults & results) override;
|
||||
|
||||
/**
|
||||
* Called by `collectGarbage` to trace in reverse.
|
||||
*
|
||||
* Using this rather than `queryReferrers` directly allows us to
|
||||
* fine-tune which referrers we consider for garbage collection;
|
||||
* some store implementations take advantage of this.
|
||||
*/
|
||||
virtual void queryGCReferrers(const StorePath & path, StorePathSet & referrers)
|
||||
{
|
||||
return queryReferrers(path, referrers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by `collectGarbage` to recursively delete a path.
|
||||
* The default implementation simply calls `deletePath`, but it can be
|
||||
* overridden by stores that wish to provide their own deletion behaviour.
|
||||
*/
|
||||
virtual void deleteStorePath(const Path & path, uint64_t & bytesFreed);
|
||||
|
||||
/**
|
||||
* Optimise the disk space usage of the Nix store by hard-linking
|
||||
* files with the same contents.
|
||||
*/
|
||||
void optimiseStore(OptimiseStats & stats);
|
||||
|
||||
void optimiseStore() override;
|
||||
|
||||
/**
|
||||
* Optimise a single store path. Optionally, test the encountered
|
||||
* symlinks for corruption.
|
||||
*/
|
||||
void optimisePath(const Path & path, RepairFlag repair);
|
||||
|
||||
bool verifyStore(bool checkContents, RepairFlag repair) override;
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* Result of `verifyAllValidPaths`
|
||||
*/
|
||||
struct VerificationResult {
|
||||
/**
|
||||
* Whether any errors were encountered
|
||||
*/
|
||||
bool errors;
|
||||
|
||||
/**
|
||||
* A set of so-far valid paths. The store objects pointed to by
|
||||
* those paths are suitable for further validation checking.
|
||||
*/
|
||||
StorePathSet validPaths;
|
||||
};
|
||||
|
||||
/**
|
||||
* First, unconditional step of `verifyStore`
|
||||
*/
|
||||
virtual VerificationResult verifyAllValidPaths(RepairFlag repair);
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Register the validity of a path, i.e., that `path` exists, that
|
||||
* the paths referenced by it exists, and in the case of an output
|
||||
* path of a derivation, that it has been produced by a successful
|
||||
* execution of the derivation (or something equivalent). Also
|
||||
* register the hash of the file system contents of the path. The
|
||||
* hash must be a SHA-256 hash.
|
||||
*/
|
||||
void registerValidPath(const ValidPathInfo & info);
|
||||
|
||||
virtual void registerValidPaths(const ValidPathInfos & infos);
|
||||
|
||||
unsigned int getProtocol() override;
|
||||
|
||||
std::optional<TrustedFlag> isTrustedClient() override;
|
||||
|
||||
void vacuumDB();
|
||||
|
||||
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
|
||||
|
||||
/**
|
||||
* If free disk space in /nix/store if below minFree, delete
|
||||
* garbage until it exceeds maxFree.
|
||||
*/
|
||||
void autoGC(bool sync = true);
|
||||
|
||||
/**
|
||||
* Register the store path 'output' as the output named 'outputName' of
|
||||
* derivation 'deriver'.
|
||||
*/
|
||||
void registerDrvOutput(const Realisation & info) override;
|
||||
void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override;
|
||||
void cacheDrvOutputMapping(
|
||||
State & state,
|
||||
const uint64_t deriver,
|
||||
const std::string & outputName,
|
||||
const StorePath & output);
|
||||
|
||||
std::optional<const Realisation> queryRealisation_(State & state, const DrvOutput & id);
|
||||
std::optional<std::pair<int64_t, Realisation>> queryRealisationCore_(State & state, const DrvOutput & id);
|
||||
void queryRealisationUncached(const DrvOutput&,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
|
||||
std::optional<std::string> getVersion() override;
|
||||
|
||||
protected:
|
||||
|
||||
void verifyPath(const StorePath & path, std::function<bool(const StorePath &)> existsInStoreDir,
|
||||
StorePathSet & done, StorePathSet & validPaths, RepairFlag repair, bool & errors);
|
||||
|
||||
private:
|
||||
|
||||
/**
|
||||
* Retrieve the current version of the database schema.
|
||||
* If the database does not exist yet, the version returned will be 0.
|
||||
*/
|
||||
int getSchema();
|
||||
|
||||
void openDB(State & state, bool create);
|
||||
|
||||
void makeStoreWritable();
|
||||
|
||||
uint64_t queryValidPathId(State & state, const StorePath & path);
|
||||
|
||||
uint64_t addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs = true);
|
||||
|
||||
void invalidatePath(State & state, const StorePath & path);
|
||||
|
||||
/**
|
||||
* Delete a path from the Nix store.
|
||||
*/
|
||||
void invalidatePathChecked(const StorePath & path);
|
||||
|
||||
std::shared_ptr<const ValidPathInfo> queryPathInfoInternal(State & state, const StorePath & path);
|
||||
|
||||
void updatePathInfo(State & state, const ValidPathInfo & info);
|
||||
|
||||
void upgradeStore6();
|
||||
void upgradeStore7();
|
||||
PathSet queryValidPathsOld();
|
||||
ValidPathInfo queryPathInfoOld(const Path & path);
|
||||
|
||||
void findRoots(const Path & path, unsigned char type, Roots & roots);
|
||||
|
||||
void findRootsNoTemp(Roots & roots, bool censor);
|
||||
|
||||
void findRuntimeRoots(Roots & roots, bool censor);
|
||||
|
||||
std::pair<Path, AutoCloseFD> createTempDirInStore();
|
||||
|
||||
typedef std::unordered_set<ino_t> InodeHash;
|
||||
|
||||
InodeHash loadInodeHash();
|
||||
Strings readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash);
|
||||
void optimisePath_(Activity * act, OptimiseStats & stats, const Path & path, InodeHash & inodeHash, RepairFlag repair);
|
||||
|
||||
// Internal versions that are not wrapped in retry_sqlite.
|
||||
bool isValidPath_(State & state, const StorePath & path);
|
||||
void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers);
|
||||
|
||||
/**
|
||||
* Add signatures to a ValidPathInfo or Realisation using the secret keys
|
||||
* specified by the ‘secret-key-files’ option.
|
||||
*/
|
||||
void signPathInfo(ValidPathInfo & info);
|
||||
void signRealisation(Realisation &);
|
||||
|
||||
void addBuildLog(const StorePath & drvPath, std::string_view log) override;
|
||||
|
||||
friend struct LocalDerivationGoal;
|
||||
friend struct PathSubstitutionGoal;
|
||||
friend struct SubstitutionGoal;
|
||||
friend struct DerivationGoal;
|
||||
};
|
||||
|
||||
}
|
||||
39
src/libstore/unix/local-store.md
Normal file
39
src/libstore/unix/local-store.md
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
R"(
|
||||
|
||||
**Store URL format**: `local`, *root*
|
||||
|
||||
This store type accesses a Nix store in the local filesystem directly
|
||||
(i.e. not via the Nix daemon). *root* is an absolute path that is
|
||||
prefixed to other directories such as the Nix store directory. The
|
||||
store pseudo-URL `local` denotes a store that uses `/` as its root
|
||||
directory.
|
||||
|
||||
A store that uses a *root* other than `/` is called a *chroot
|
||||
store*. With such stores, the store directory is "logically" still
|
||||
`/nix/store`, so programs stored in them can only be built and
|
||||
executed by `chroot`-ing into *root*. Chroot stores only support
|
||||
building and running on Linux when [`mount namespaces`](https://man7.org/linux/man-pages/man7/mount_namespaces.7.html) and [`user namespaces`](https://man7.org/linux/man-pages/man7/user_namespaces.7.html) are
|
||||
enabled.
|
||||
|
||||
For example, the following uses `/tmp/root` as the chroot environment
|
||||
to build or download `nixpkgs#hello` and then execute it:
|
||||
|
||||
```console
|
||||
# nix run --store /tmp/root nixpkgs#hello
|
||||
Hello, world!
|
||||
```
|
||||
|
||||
Here, the "physical" store location is `/tmp/root/nix/store`, and
|
||||
Nix's store metadata is in `/tmp/root/nix/var/nix/db`.
|
||||
|
||||
It is also possible, but not recommended, to change the "logical"
|
||||
location of the Nix store from its default of `/nix/store`. This makes
|
||||
it impossible to use default substituters such as
|
||||
`https://cache.nixos.org/`, and thus you may have to build everything
|
||||
locally. Here is an example:
|
||||
|
||||
```console
|
||||
# nix build --store 'local?store=/tmp/my-nix/store&state=/tmp/my-nix/state&log=/tmp/my-nix/log' nixpkgs#hello
|
||||
```
|
||||
|
||||
)"
|
||||
208
src/libstore/unix/lock.cc
Normal file
208
src/libstore/unix/lock.cc
Normal file
|
|
@ -0,0 +1,208 @@
|
|||
#include "lock.hh"
|
||||
#include "file-system.hh"
|
||||
#include "globals.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "users.hh"
|
||||
|
||||
#include <pwd.h>
|
||||
#include <grp.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
using namespace nix::unix;
|
||||
|
||||
#if __linux__
|
||||
|
||||
static std::vector<gid_t> get_group_list(const char *username, gid_t group_id)
|
||||
{
|
||||
std::vector<gid_t> gids;
|
||||
gids.resize(32); // Initial guess
|
||||
|
||||
auto getgroupl_failed {[&] {
|
||||
int ngroups = gids.size();
|
||||
int err = getgrouplist(username, group_id, gids.data(), &ngroups);
|
||||
gids.resize(ngroups);
|
||||
return err == -1;
|
||||
}};
|
||||
|
||||
// The first error means that the vector was not big enough.
|
||||
// If it happens again, there is some different problem.
|
||||
if (getgroupl_failed() && getgroupl_failed()) {
|
||||
throw SysError("failed to get list of supplementary groups for '%s'", username);
|
||||
}
|
||||
|
||||
return gids;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
struct SimpleUserLock : UserLock
|
||||
{
|
||||
AutoCloseFD fdUserLock;
|
||||
uid_t uid;
|
||||
gid_t gid;
|
||||
std::vector<gid_t> supplementaryGIDs;
|
||||
|
||||
uid_t getUID() override { assert(uid); return uid; }
|
||||
uid_t getUIDCount() override { return 1; }
|
||||
gid_t getGID() override { assert(gid); return gid; }
|
||||
|
||||
std::vector<gid_t> getSupplementaryGIDs() override { return supplementaryGIDs; }
|
||||
|
||||
static std::unique_ptr<UserLock> acquire()
|
||||
{
|
||||
assert(settings.buildUsersGroup != "");
|
||||
createDirs(settings.nixStateDir + "/userpool");
|
||||
|
||||
/* Get the members of the build-users-group. */
|
||||
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
|
||||
if (!gr)
|
||||
throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
|
||||
|
||||
/* Copy the result of getgrnam. */
|
||||
Strings users;
|
||||
for (char * * p = gr->gr_mem; *p; ++p) {
|
||||
debug("found build user '%s'", *p);
|
||||
users.push_back(*p);
|
||||
}
|
||||
|
||||
if (users.empty())
|
||||
throw Error("the build users group '%s' has no members", settings.buildUsersGroup);
|
||||
|
||||
/* Find a user account that isn't currently in use for another
|
||||
build. */
|
||||
for (auto & i : users) {
|
||||
debug("trying user '%s'", i);
|
||||
|
||||
struct passwd * pw = getpwnam(i.c_str());
|
||||
if (!pw)
|
||||
throw Error("the user '%s' in the group '%s' does not exist", i, settings.buildUsersGroup);
|
||||
|
||||
auto fnUserLock = fmt("%s/userpool/%s", settings.nixStateDir,pw->pw_uid);
|
||||
|
||||
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
|
||||
if (!fd)
|
||||
throw SysError("opening user lock '%s'", fnUserLock);
|
||||
|
||||
if (lockFile(fd.get(), ltWrite, false)) {
|
||||
auto lock = std::make_unique<SimpleUserLock>();
|
||||
|
||||
lock->fdUserLock = std::move(fd);
|
||||
lock->uid = pw->pw_uid;
|
||||
lock->gid = gr->gr_gid;
|
||||
|
||||
/* Sanity check... */
|
||||
if (lock->uid == getuid() || lock->uid == geteuid())
|
||||
throw Error("the Nix user should not be a member of '%s'", settings.buildUsersGroup);
|
||||
|
||||
#if __linux__
|
||||
/* Get the list of supplementary groups of this user. This is
|
||||
* usually either empty or contains a group such as "kvm". */
|
||||
|
||||
// Finally, trim back the GID list to its real size.
|
||||
for (auto gid : get_group_list(pw->pw_name, pw->pw_gid)) {
|
||||
if (gid != lock->gid)
|
||||
lock->supplementaryGIDs.push_back(gid);
|
||||
}
|
||||
#endif
|
||||
|
||||
return lock;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
struct AutoUserLock : UserLock
|
||||
{
|
||||
AutoCloseFD fdUserLock;
|
||||
uid_t firstUid = 0;
|
||||
gid_t firstGid = 0;
|
||||
uid_t nrIds = 1;
|
||||
|
||||
uid_t getUID() override { assert(firstUid); return firstUid; }
|
||||
|
||||
gid_t getUIDCount() override { return nrIds; }
|
||||
|
||||
gid_t getGID() override { assert(firstGid); return firstGid; }
|
||||
|
||||
std::vector<gid_t> getSupplementaryGIDs() override { return {}; }
|
||||
|
||||
static std::unique_ptr<UserLock> acquire(uid_t nrIds, bool useUserNamespace)
|
||||
{
|
||||
#if !defined(__linux__)
|
||||
useUserNamespace = false;
|
||||
#endif
|
||||
|
||||
experimentalFeatureSettings.require(Xp::AutoAllocateUids);
|
||||
assert(settings.startId > 0);
|
||||
assert(settings.uidCount % maxIdsPerBuild == 0);
|
||||
assert((uint64_t) settings.startId + (uint64_t) settings.uidCount <= std::numeric_limits<uid_t>::max());
|
||||
assert(nrIds <= maxIdsPerBuild);
|
||||
|
||||
createDirs(settings.nixStateDir + "/userpool2");
|
||||
|
||||
size_t nrSlots = settings.uidCount / maxIdsPerBuild;
|
||||
|
||||
for (size_t i = 0; i < nrSlots; i++) {
|
||||
debug("trying user slot '%d'", i);
|
||||
|
||||
createDirs(settings.nixStateDir + "/userpool2");
|
||||
|
||||
auto fnUserLock = fmt("%s/userpool2/slot-%d", settings.nixStateDir, i);
|
||||
|
||||
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
|
||||
if (!fd)
|
||||
throw SysError("opening user lock '%s'", fnUserLock);
|
||||
|
||||
if (lockFile(fd.get(), ltWrite, false)) {
|
||||
|
||||
auto firstUid = settings.startId + i * maxIdsPerBuild;
|
||||
|
||||
auto pw = getpwuid(firstUid);
|
||||
if (pw)
|
||||
throw Error("auto-allocated UID %d clashes with existing user account '%s'", firstUid, pw->pw_name);
|
||||
|
||||
auto lock = std::make_unique<AutoUserLock>();
|
||||
lock->fdUserLock = std::move(fd);
|
||||
lock->firstUid = firstUid;
|
||||
if (useUserNamespace)
|
||||
lock->firstGid = firstUid;
|
||||
else {
|
||||
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
|
||||
if (!gr)
|
||||
throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
|
||||
lock->firstGid = gr->gr_gid;
|
||||
}
|
||||
lock->nrIds = nrIds;
|
||||
return lock;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useUserNamespace)
|
||||
{
|
||||
if (settings.autoAllocateUids)
|
||||
return AutoUserLock::acquire(nrIds, useUserNamespace);
|
||||
else
|
||||
return SimpleUserLock::acquire();
|
||||
}
|
||||
|
||||
bool useBuildUsers()
|
||||
{
|
||||
#if __linux__
|
||||
static bool b = (settings.buildUsersGroup != "" || settings.autoAllocateUids) && isRootUser();
|
||||
return b;
|
||||
#elif __APPLE__
|
||||
static bool b = settings.buildUsersGroup != "" && isRootUser();
|
||||
return b;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
45
src/libstore/unix/lock.hh
Normal file
45
src/libstore/unix/lock.hh
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "types.hh"
|
||||
|
||||
#include <optional>
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct UserLock
|
||||
{
|
||||
virtual ~UserLock() { }
|
||||
|
||||
/**
|
||||
* Get the first and last UID.
|
||||
*/
|
||||
std::pair<uid_t, uid_t> getUIDRange()
|
||||
{
|
||||
auto first = getUID();
|
||||
return {first, first + getUIDCount() - 1};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the first UID.
|
||||
*/
|
||||
virtual uid_t getUID() = 0;
|
||||
|
||||
virtual uid_t getUIDCount() = 0;
|
||||
|
||||
virtual gid_t getGID() = 0;
|
||||
|
||||
virtual std::vector<gid_t> getSupplementaryGIDs() = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Acquire a user lock for a UID range of size `nrIds`. Note that this
|
||||
* may return nullptr if no user is available.
|
||||
*/
|
||||
std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useUserNamespace);
|
||||
|
||||
bool useBuildUsers();
|
||||
|
||||
}
|
||||
311
src/libstore/unix/optimise-store.cc
Normal file
311
src/libstore/unix/optimise-store.cc
Normal file
|
|
@ -0,0 +1,311 @@
|
|||
#include "local-store.hh"
|
||||
#include "globals.hh"
|
||||
#include "signals.hh"
|
||||
#include "posix-fs-canonicalise.hh"
|
||||
#include "posix-source-accessor.hh"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <regex>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static void makeWritable(const Path & path)
|
||||
{
|
||||
auto st = lstat(path);
|
||||
if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
|
||||
throw SysError("changing writability of '%1%'", path);
|
||||
}
|
||||
|
||||
|
||||
struct MakeReadOnly
|
||||
{
|
||||
Path path;
|
||||
MakeReadOnly(const PathView path) : path(path) { }
|
||||
~MakeReadOnly()
|
||||
{
|
||||
try {
|
||||
/* This will make the path read-only. */
|
||||
if (path != "") canonicaliseTimestampAndPermissions(path);
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
LocalStore::InodeHash LocalStore::loadInodeHash()
|
||||
{
|
||||
debug("loading hash inodes in memory");
|
||||
InodeHash inodeHash;
|
||||
|
||||
AutoCloseDir dir(opendir(linksDir.c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", linksDir);
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
|
||||
checkInterrupt();
|
||||
// We don't care if we hit non-hash files, anything goes
|
||||
inodeHash.insert(dirent->d_ino);
|
||||
}
|
||||
if (errno) throw SysError("reading directory '%1%'", linksDir);
|
||||
|
||||
printMsg(lvlTalkative, "loaded %1% hash inodes", inodeHash.size());
|
||||
|
||||
return inodeHash;
|
||||
}
|
||||
|
||||
|
||||
Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash)
|
||||
{
|
||||
Strings names;
|
||||
|
||||
AutoCloseDir dir(opendir(path.c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", path);
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
|
||||
checkInterrupt();
|
||||
|
||||
if (inodeHash.count(dirent->d_ino)) {
|
||||
debug("'%1%' is already linked", dirent->d_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
names.push_back(name);
|
||||
}
|
||||
if (errno) throw SysError("reading directory '%1%'", path);
|
||||
|
||||
return names;
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
||||
const Path & path, InodeHash & inodeHash, RepairFlag repair)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
auto st = lstat(path);
|
||||
|
||||
#if __APPLE__
|
||||
/* HFS/macOS has some undocumented security feature disabling hardlinking for
|
||||
special files within .app dirs. *.app/Contents/PkgInfo and
|
||||
*.app/Contents/Resources/\*.lproj seem to be the only paths affected. See
|
||||
https://github.com/NixOS/nix/issues/1443 for more discussion. */
|
||||
|
||||
if (std::regex_search(path, std::regex("\\.app/Contents/.+$")))
|
||||
{
|
||||
debug("'%1%' is not allowed to be linked in macOS", path);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
Strings names = readDirectoryIgnoringInodes(path, inodeHash);
|
||||
for (auto & i : names)
|
||||
optimisePath_(act, stats, path + "/" + i, inodeHash, repair);
|
||||
return;
|
||||
}
|
||||
|
||||
/* We can hard link regular files and maybe symlinks. */
|
||||
if (!S_ISREG(st.st_mode)
|
||||
#if CAN_LINK_SYMLINK
|
||||
&& !S_ISLNK(st.st_mode)
|
||||
#endif
|
||||
) return;
|
||||
|
||||
/* Sometimes SNAFUs can cause files in the Nix store to be
|
||||
modified, in particular when running programs as root under
|
||||
NixOS (example: $fontconfig/var/cache being modified). Skip
|
||||
those files. FIXME: check the modification time. */
|
||||
if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
|
||||
warn("skipping suspicious writable file '%1%'", path);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This can still happen on top-level files. */
|
||||
if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) {
|
||||
debug("'%s' is already linked, with %d other file(s)", path, st.st_nlink - 2);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Hash the file. Note that hashPath() returns the hash over the
|
||||
NAR serialisation, which includes the execute bit on the file.
|
||||
Thus, executable and non-executable files with the same
|
||||
contents *won't* be linked (which is good because otherwise the
|
||||
permissions would be screwed up).
|
||||
|
||||
Also note that if `path' is a symlink, then we're hashing the
|
||||
contents of the symlink (i.e. the result of readlink()), not
|
||||
the contents of the target (which may not even exist). */
|
||||
Hash hash = ({
|
||||
PosixSourceAccessor accessor;
|
||||
hashPath(
|
||||
accessor, CanonPath { path },
|
||||
FileSerialisationMethod::Recursive, HashAlgorithm::SHA256).first;
|
||||
});
|
||||
debug("'%1%' has hash '%2%'", path, hash.to_string(HashFormat::Nix32, true));
|
||||
|
||||
/* Check if this is a known hash. */
|
||||
Path linkPath = linksDir + "/" + hash.to_string(HashFormat::Nix32, false);
|
||||
|
||||
/* Maybe delete the link, if it has been corrupted. */
|
||||
if (pathExists(linkPath)) {
|
||||
auto stLink = lstat(linkPath);
|
||||
if (st.st_size != stLink.st_size
|
||||
|| (repair && hash != ({
|
||||
PosixSourceAccessor accessor;
|
||||
hashPath(
|
||||
accessor, CanonPath { linkPath },
|
||||
FileSerialisationMethod::Recursive, HashAlgorithm::SHA256).first;
|
||||
})))
|
||||
{
|
||||
// XXX: Consider overwriting linkPath with our valid version.
|
||||
warn("removing corrupted link '%s'", linkPath);
|
||||
warn("There may be more corrupted paths."
|
||||
"\nYou should run `nix-store --verify --check-contents --repair` to fix them all");
|
||||
unlink(linkPath.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
if (!pathExists(linkPath)) {
|
||||
/* Nope, create a hard link in the links directory. */
|
||||
if (link(path.c_str(), linkPath.c_str()) == 0) {
|
||||
inodeHash.insert(st.st_ino);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (errno) {
|
||||
case EEXIST:
|
||||
/* Fall through if another process created ‘linkPath’ before
|
||||
we did. */
|
||||
break;
|
||||
|
||||
case ENOSPC:
|
||||
/* On ext4, that probably means the directory index is
|
||||
full. When that happens, it's fine to ignore it: we
|
||||
just effectively disable deduplication of this
|
||||
file. */
|
||||
printInfo("cannot link '%s' to '%s': %s", linkPath, path, strerror(errno));
|
||||
return;
|
||||
|
||||
default:
|
||||
throw SysError("cannot link '%1%' to '%2%'", linkPath, path);
|
||||
}
|
||||
}
|
||||
|
||||
/* Yes! We've seen a file with the same contents. Replace the
|
||||
current file with a hard link to that file. */
|
||||
auto stLink = lstat(linkPath);
|
||||
|
||||
if (st.st_ino == stLink.st_ino) {
|
||||
debug("'%1%' is already linked to '%2%'", path, linkPath);
|
||||
return;
|
||||
}
|
||||
|
||||
printMsg(lvlTalkative, "linking '%1%' to '%2%'", path, linkPath);
|
||||
|
||||
/* Make the containing directory writable, but only if it's not
|
||||
the store itself (we don't want or need to mess with its
|
||||
permissions). */
|
||||
const Path dirOfPath(dirOf(path));
|
||||
bool mustToggle = dirOfPath != realStoreDir.get();
|
||||
if (mustToggle) makeWritable(dirOfPath);
|
||||
|
||||
/* When we're done, make the directory read-only again and reset
|
||||
its timestamp back to 0. */
|
||||
MakeReadOnly makeReadOnly(mustToggle ? dirOfPath : "");
|
||||
|
||||
Path tempLink = fmt("%1%/.tmp-link-%2%-%3%", realStoreDir, getpid(), random());
|
||||
|
||||
if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
|
||||
if (errno == EMLINK) {
|
||||
/* Too many links to the same file (>= 32000 on most file
|
||||
systems). This is likely to happen with empty files.
|
||||
Just shrug and ignore. */
|
||||
if (st.st_size)
|
||||
printInfo("'%1%' has maximum number of links", linkPath);
|
||||
return;
|
||||
}
|
||||
throw SysError("cannot link '%1%' to '%2%'", tempLink, linkPath);
|
||||
}
|
||||
|
||||
/* Atomically replace the old file with the new hard link. */
|
||||
try {
|
||||
renameFile(tempLink, path);
|
||||
} catch (SystemError & e) {
|
||||
if (unlink(tempLink.c_str()) == -1)
|
||||
printError("unable to unlink '%1%'", tempLink);
|
||||
if (errno == EMLINK) {
|
||||
/* Some filesystems generate too many links on the rename,
|
||||
rather than on the original link. (Probably it
|
||||
temporarily increases the st_nlink field before
|
||||
decreasing it again.) */
|
||||
debug("'%s' has reached maximum number of links", linkPath);
|
||||
return;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
stats.filesLinked++;
|
||||
stats.bytesFreed += st.st_size;
|
||||
stats.blocksFreed += st.st_blocks;
|
||||
|
||||
if (act)
|
||||
act->result(resFileLinked, st.st_size, st.st_blocks);
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::optimiseStore(OptimiseStats & stats)
|
||||
{
|
||||
Activity act(*logger, actOptimiseStore);
|
||||
|
||||
auto paths = queryAllValidPaths();
|
||||
InodeHash inodeHash = loadInodeHash();
|
||||
|
||||
act.progress(0, paths.size());
|
||||
|
||||
uint64_t done = 0;
|
||||
|
||||
for (auto & i : paths) {
|
||||
addTempRoot(i);
|
||||
if (!isValidPath(i)) continue; /* path was GC'ed, probably */
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("optimising path '%s'", printStorePath(i)));
|
||||
optimisePath_(&act, stats, realStoreDir + "/" + std::string(i.to_string()), inodeHash, NoRepair);
|
||||
}
|
||||
done++;
|
||||
act.progress(done, paths.size());
|
||||
}
|
||||
}
|
||||
|
||||
void LocalStore::optimiseStore()
|
||||
{
|
||||
OptimiseStats stats;
|
||||
|
||||
optimiseStore(stats);
|
||||
|
||||
printInfo("%s freed by hard-linking %d files",
|
||||
showBytes(stats.bytesFreed),
|
||||
stats.filesLinked);
|
||||
}
|
||||
|
||||
void LocalStore::optimisePath(const Path & path, RepairFlag repair)
|
||||
{
|
||||
OptimiseStats stats;
|
||||
InodeHash inodeHash;
|
||||
|
||||
if (settings.autoOptimiseStore) optimisePath_(nullptr, stats, path, inodeHash, repair);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
171
src/libstore/unix/posix-fs-canonicalise.cc
Normal file
171
src/libstore/unix/posix-fs-canonicalise.cc
Normal file
|
|
@ -0,0 +1,171 @@
|
|||
#if HAVE_ACL_SUPPORT
|
||||
# include <sys/xattr.h>
|
||||
#endif
|
||||
|
||||
#include "posix-fs-canonicalise.hh"
|
||||
#include "file-system.hh"
|
||||
#include "signals.hh"
|
||||
#include "util.hh"
|
||||
#include "globals.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
const time_t mtimeStore = 1; /* 1 second into the epoch */
|
||||
|
||||
|
||||
static void canonicaliseTimestampAndPermissions(const Path & path, const struct stat & st)
|
||||
{
|
||||
if (!S_ISLNK(st.st_mode)) {
|
||||
|
||||
/* Mask out all type related bits. */
|
||||
mode_t mode = st.st_mode & ~S_IFMT;
|
||||
|
||||
if (mode != 0444 && mode != 0555) {
|
||||
mode = (st.st_mode & S_IFMT)
|
||||
| 0444
|
||||
| (st.st_mode & S_IXUSR ? 0111 : 0);
|
||||
if (chmod(path.c_str(), mode) == -1)
|
||||
throw SysError("changing mode of '%1%' to %2$o", path, mode);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (st.st_mtime != mtimeStore) {
|
||||
struct timeval times[2];
|
||||
times[0].tv_sec = st.st_atime;
|
||||
times[0].tv_usec = 0;
|
||||
times[1].tv_sec = mtimeStore;
|
||||
times[1].tv_usec = 0;
|
||||
#if HAVE_LUTIMES
|
||||
if (lutimes(path.c_str(), times) == -1)
|
||||
if (errno != ENOSYS ||
|
||||
(!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1))
|
||||
#else
|
||||
if (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1)
|
||||
#endif
|
||||
throw SysError("changing modification time of '%1%'", path);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void canonicaliseTimestampAndPermissions(const Path & path)
|
||||
{
|
||||
canonicaliseTimestampAndPermissions(path, lstat(path));
|
||||
}
|
||||
|
||||
|
||||
static void canonicalisePathMetaData_(
|
||||
const Path & path,
|
||||
std::optional<std::pair<uid_t, uid_t>> uidRange,
|
||||
InodesSeen & inodesSeen)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
#if __APPLE__
|
||||
/* Remove flags, in particular UF_IMMUTABLE which would prevent
|
||||
the file from being garbage-collected. FIXME: Use
|
||||
setattrlist() to remove other attributes as well. */
|
||||
if (lchflags(path.c_str(), 0)) {
|
||||
if (errno != ENOTSUP)
|
||||
throw SysError("clearing flags of path '%1%'", path);
|
||||
}
|
||||
#endif
|
||||
|
||||
auto st = lstat(path);
|
||||
|
||||
/* Really make sure that the path is of a supported type. */
|
||||
if (!(S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode)))
|
||||
throw Error("file '%1%' has an unsupported type", path);
|
||||
|
||||
#if HAVE_ACL_SUPPORT
|
||||
/* Remove extended attributes / ACLs. */
|
||||
ssize_t eaSize = llistxattr(path.c_str(), nullptr, 0);
|
||||
|
||||
if (eaSize < 0) {
|
||||
if (errno != ENOTSUP && errno != ENODATA)
|
||||
throw SysError("querying extended attributes of '%s'", path);
|
||||
} else if (eaSize > 0) {
|
||||
std::vector<char> eaBuf(eaSize);
|
||||
|
||||
if ((eaSize = llistxattr(path.c_str(), eaBuf.data(), eaBuf.size())) < 0)
|
||||
throw SysError("querying extended attributes of '%s'", path);
|
||||
|
||||
for (auto & eaName: tokenizeString<Strings>(std::string(eaBuf.data(), eaSize), std::string("\000", 1))) {
|
||||
if (settings.ignoredAcls.get().count(eaName)) continue;
|
||||
if (lremovexattr(path.c_str(), eaName.c_str()) == -1)
|
||||
throw SysError("removing extended attribute '%s' from '%s'", eaName, path);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Fail if the file is not owned by the build user. This prevents
|
||||
us from messing up the ownership/permissions of files
|
||||
hard-linked into the output (e.g. "ln /etc/shadow $out/foo").
|
||||
However, ignore files that we chown'ed ourselves previously to
|
||||
ensure that we don't fail on hard links within the same build
|
||||
(i.e. "touch $out/foo; ln $out/foo $out/bar"). */
|
||||
if (uidRange && (st.st_uid < uidRange->first || st.st_uid > uidRange->second)) {
|
||||
if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino)))
|
||||
throw BuildError("invalid ownership on file '%1%'", path);
|
||||
mode_t mode = st.st_mode & ~S_IFMT;
|
||||
assert(S_ISLNK(st.st_mode) || (st.st_uid == geteuid() && (mode == 0444 || mode == 0555) && st.st_mtime == mtimeStore));
|
||||
return;
|
||||
}
|
||||
|
||||
inodesSeen.insert(Inode(st.st_dev, st.st_ino));
|
||||
|
||||
canonicaliseTimestampAndPermissions(path, st);
|
||||
|
||||
/* Change ownership to the current uid. If it's a symlink, use
|
||||
lchown if available, otherwise don't bother. Wrong ownership
|
||||
of a symlink doesn't matter, since the owning user can't change
|
||||
the symlink and can't delete it because the directory is not
|
||||
writable. The only exception is top-level paths in the Nix
|
||||
store (since that directory is group-writable for the Nix build
|
||||
users group); we check for this case below. */
|
||||
if (st.st_uid != geteuid()) {
|
||||
#if HAVE_LCHOWN
|
||||
if (lchown(path.c_str(), geteuid(), getegid()) == -1)
|
||||
#else
|
||||
if (!S_ISLNK(st.st_mode) &&
|
||||
chown(path.c_str(), geteuid(), getegid()) == -1)
|
||||
#endif
|
||||
throw SysError("changing owner of '%1%' to %2%",
|
||||
path, geteuid());
|
||||
}
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
DirEntries entries = readDirectory(path);
|
||||
for (auto & i : entries)
|
||||
canonicalisePathMetaData_(path + "/" + i.name, uidRange, inodesSeen);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void canonicalisePathMetaData(
|
||||
const Path & path,
|
||||
std::optional<std::pair<uid_t, uid_t>> uidRange,
|
||||
InodesSeen & inodesSeen)
|
||||
{
|
||||
canonicalisePathMetaData_(path, uidRange, inodesSeen);
|
||||
|
||||
/* On platforms that don't have lchown(), the top-level path can't
|
||||
be a symlink, since we can't change its ownership. */
|
||||
auto st = lstat(path);
|
||||
|
||||
if (st.st_uid != geteuid()) {
|
||||
assert(S_ISLNK(st.st_mode));
|
||||
throw Error("wrong ownership of top-level store path '%1%'", path);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void canonicalisePathMetaData(const Path & path,
|
||||
std::optional<std::pair<uid_t, uid_t>> uidRange)
|
||||
{
|
||||
InodesSeen inodesSeen;
|
||||
canonicalisePathMetaData(path, uidRange, inodesSeen);
|
||||
}
|
||||
|
||||
}
|
||||
45
src/libstore/unix/posix-fs-canonicalise.hh
Normal file
45
src/libstore/unix/posix-fs-canonicalise.hh
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "types.hh"
|
||||
#include "error.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
typedef std::pair<dev_t, ino_t> Inode;
|
||||
typedef std::set<Inode> InodesSeen;
|
||||
|
||||
|
||||
/**
|
||||
* "Fix", or canonicalise, the meta-data of the files in a store path
|
||||
* after it has been built. In particular:
|
||||
*
|
||||
* - the last modification date on each file is set to 1 (i.e.,
|
||||
* 00:00:01 1/1/1970 UTC)
|
||||
*
|
||||
* - the permissions are set of 444 or 555 (i.e., read-only with or
|
||||
* without execute permission; setuid bits etc. are cleared)
|
||||
*
|
||||
* - the owner and group are set to the Nix user and group, if we're
|
||||
* running as root.
|
||||
*
|
||||
* If uidRange is not empty, this function will throw an error if it
|
||||
* encounters files owned by a user outside of the closed interval
|
||||
* [uidRange->first, uidRange->second].
|
||||
*/
|
||||
void canonicalisePathMetaData(
|
||||
const Path & path,
|
||||
std::optional<std::pair<uid_t, uid_t>> uidRange,
|
||||
InodesSeen & inodesSeen);
|
||||
void canonicalisePathMetaData(
|
||||
const Path & path,
|
||||
std::optional<std::pair<uid_t, uid_t>> uidRange);
|
||||
|
||||
void canonicaliseTimestampAndPermissions(const Path & path);
|
||||
|
||||
MakeError(PathInUse, Error);
|
||||
|
||||
}
|
||||
42
src/libstore/unix/schema.sql
Normal file
42
src/libstore/unix/schema.sql
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
create table if not exists ValidPaths (
|
||||
id integer primary key autoincrement not null,
|
||||
path text unique not null,
|
||||
hash text not null, -- base16 representation
|
||||
registrationTime integer not null,
|
||||
deriver text,
|
||||
narSize integer,
|
||||
ultimate integer, -- null implies "false"
|
||||
sigs text, -- space-separated
|
||||
ca text -- if not null, an assertion that the path is content-addressed; see ValidPathInfo
|
||||
);
|
||||
|
||||
create table if not exists Refs (
|
||||
referrer integer not null,
|
||||
reference integer not null,
|
||||
primary key (referrer, reference),
|
||||
foreign key (referrer) references ValidPaths(id) on delete cascade,
|
||||
foreign key (reference) references ValidPaths(id) on delete restrict
|
||||
);
|
||||
|
||||
create index if not exists IndexReferrer on Refs(referrer);
|
||||
create index if not exists IndexReference on Refs(reference);
|
||||
|
||||
-- Paths can refer to themselves, causing a tuple (N, N) in the Refs
|
||||
-- table. This causes a deletion of the corresponding row in
|
||||
-- ValidPaths to cause a foreign key constraint violation (due to `on
|
||||
-- delete restrict' on the `reference' column). Therefore, explicitly
|
||||
-- get rid of self-references.
|
||||
create trigger if not exists DeleteSelfRefs before delete on ValidPaths
|
||||
begin
|
||||
delete from Refs where referrer = old.id and reference = old.id;
|
||||
end;
|
||||
|
||||
create table if not exists DerivationOutputs (
|
||||
drv integer not null,
|
||||
id text not null, -- symbolic output id, usually "out"
|
||||
path text not null,
|
||||
primary key (drv, id),
|
||||
foreign key (drv) references ValidPaths(id) on delete cascade
|
||||
);
|
||||
|
||||
create index if not exists IndexDerivationOutputs on DerivationOutputs(path);
|
||||
93
src/libstore/unix/uds-remote-store.cc
Normal file
93
src/libstore/unix/uds-remote-store.cc
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
#include "uds-remote-store.hh"
|
||||
#include "unix-domain-socket.hh"
|
||||
#include "worker-protocol.hh"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/un.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <cstring>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
std::string UDSRemoteStoreConfig::doc()
|
||||
{
|
||||
return
|
||||
#include "uds-remote-store.md"
|
||||
;
|
||||
}
|
||||
|
||||
|
||||
UDSRemoteStore::UDSRemoteStore(const Params & params)
|
||||
: StoreConfig(params)
|
||||
, LocalFSStoreConfig(params)
|
||||
, RemoteStoreConfig(params)
|
||||
, UDSRemoteStoreConfig(params)
|
||||
, Store(params)
|
||||
, LocalFSStore(params)
|
||||
, RemoteStore(params)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
UDSRemoteStore::UDSRemoteStore(
|
||||
const std::string scheme,
|
||||
std::string socket_path,
|
||||
const Params & params)
|
||||
: UDSRemoteStore(params)
|
||||
{
|
||||
path.emplace(socket_path);
|
||||
}
|
||||
|
||||
|
||||
std::string UDSRemoteStore::getUri()
|
||||
{
|
||||
if (path) {
|
||||
return std::string("unix://") + *path;
|
||||
} else {
|
||||
return "daemon";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void UDSRemoteStore::Connection::closeWrite()
|
||||
{
|
||||
shutdown(fd.get(), SHUT_WR);
|
||||
}
|
||||
|
||||
|
||||
ref<RemoteStore::Connection> UDSRemoteStore::openConnection()
|
||||
{
|
||||
auto conn = make_ref<Connection>();
|
||||
|
||||
/* Connect to a daemon that does the privileged work for us. */
|
||||
conn->fd = createUnixDomainSocket();
|
||||
|
||||
nix::connect(conn->fd.get(), path ? *path : settings.nixDaemonSocketFile);
|
||||
|
||||
conn->from.fd = conn->fd.get();
|
||||
conn->to.fd = conn->fd.get();
|
||||
|
||||
conn->startTime = std::chrono::steady_clock::now();
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
|
||||
void UDSRemoteStore::addIndirectRoot(const Path & path)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << WorkerProto::Op::AddIndirectRoot << path;
|
||||
conn.processStderr();
|
||||
readInt(conn->from);
|
||||
}
|
||||
|
||||
|
||||
static RegisterStoreImplementation<UDSRemoteStore, UDSRemoteStoreConfig> regUDSRemoteStore;
|
||||
|
||||
}
|
||||
66
src/libstore/unix/uds-remote-store.hh
Normal file
66
src/libstore/unix/uds-remote-store.hh
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "remote-store.hh"
|
||||
#include "remote-store-connection.hh"
|
||||
#include "indirect-root-store.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct UDSRemoteStoreConfig : virtual LocalFSStoreConfig, virtual RemoteStoreConfig
|
||||
{
|
||||
UDSRemoteStoreConfig(const Params & params)
|
||||
: StoreConfig(params)
|
||||
, LocalFSStoreConfig(params)
|
||||
, RemoteStoreConfig(params)
|
||||
{
|
||||
}
|
||||
|
||||
const std::string name() override { return "Local Daemon Store"; }
|
||||
|
||||
std::string doc() override;
|
||||
};
|
||||
|
||||
class UDSRemoteStore : public virtual UDSRemoteStoreConfig
|
||||
, public virtual IndirectRootStore
|
||||
, public virtual RemoteStore
|
||||
{
|
||||
public:
|
||||
|
||||
UDSRemoteStore(const Params & params);
|
||||
UDSRemoteStore(const std::string scheme, std::string path, const Params & params);
|
||||
|
||||
std::string getUri() override;
|
||||
|
||||
static std::set<std::string> uriSchemes()
|
||||
{ return {"unix"}; }
|
||||
|
||||
ref<SourceAccessor> getFSAccessor(bool requireValidPath = true) override
|
||||
{ return LocalFSStore::getFSAccessor(requireValidPath); }
|
||||
|
||||
void narFromPath(const StorePath & path, Sink & sink) override
|
||||
{ LocalFSStore::narFromPath(path, sink); }
|
||||
|
||||
/**
|
||||
* Implementation of `IndirectRootStore::addIndirectRoot()` which
|
||||
* delegates to the remote store.
|
||||
*
|
||||
* The idea is that the client makes the direct symlink, so it is
|
||||
* owned managed by the client's user account, and the server makes
|
||||
* the indirect symlink.
|
||||
*/
|
||||
void addIndirectRoot(const Path & path) override;
|
||||
|
||||
private:
|
||||
|
||||
struct Connection : RemoteStore::Connection
|
||||
{
|
||||
AutoCloseFD fd;
|
||||
void closeWrite() override;
|
||||
};
|
||||
|
||||
ref<RemoteStore::Connection> openConnection() override;
|
||||
std::optional<std::string> path;
|
||||
};
|
||||
|
||||
}
|
||||
9
src/libstore/unix/uds-remote-store.md
Normal file
9
src/libstore/unix/uds-remote-store.md
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
R"(
|
||||
|
||||
**Store URL format**: `daemon`, `unix://`*path*
|
||||
|
||||
This store type accesses a Nix store by talking to a Nix daemon
|
||||
listening on the Unix domain socket *path*. The store pseudo-URL
|
||||
`daemon` is equivalent to `unix:///nix/var/nix/daemon-socket/socket`.
|
||||
|
||||
)"
|
||||
Loading…
Add table
Add a link
Reference in a new issue