mirror of
https://github.com/NixOS/nix.git
synced 2025-11-14 14:32:42 +01:00
Build the local store on Windows
Fixes #10558 Co-Authored-By: Eugene Butler <eugene@eugene4.com> Co-authored-by: Eelco Dolstra <edolstra@gmail.com>
This commit is contained in:
parent
0998a3ac01
commit
e0ff8da9d5
20 changed files with 228 additions and 155 deletions
|
|
@ -30,32 +30,6 @@
|
|||
#include <sys/utsname.h>
|
||||
#include <sys/resource.h>
|
||||
|
||||
#if HAVE_STATVFS
|
||||
#include <sys/statvfs.h>
|
||||
#endif
|
||||
|
||||
/* Includes required for chroot support. */
|
||||
#if __linux__
|
||||
#include <sys/socket.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <net/if.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sched.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/syscall.h>
|
||||
#if HAVE_SECCOMP
|
||||
#include <seccomp.h>
|
||||
#endif
|
||||
#define pivot_root(new_root, put_old) (syscall(SYS_pivot_root, new_root, put_old))
|
||||
#endif
|
||||
|
||||
#if __APPLE__
|
||||
#include <spawn.h>
|
||||
#include <sys/sysctl.h>
|
||||
#endif
|
||||
|
||||
#include <pwd.h>
|
||||
#include <grp.h>
|
||||
|
||||
|
|
|
|||
|
|
@ -1,41 +0,0 @@
|
|||
-- Extension of the sql schema for content-addressed derivations.
|
||||
-- Won't be loaded unless the experimental feature `ca-derivations`
|
||||
-- is enabled
|
||||
|
||||
create table if not exists Realisations (
|
||||
id integer primary key autoincrement not null,
|
||||
drvPath text not null,
|
||||
outputName text not null, -- symbolic output id, usually "out"
|
||||
outputPath integer not null,
|
||||
signatures text, -- space-separated list
|
||||
foreign key (outputPath) references ValidPaths(id) on delete cascade
|
||||
);
|
||||
|
||||
create index if not exists IndexRealisations on Realisations(drvPath, outputName);
|
||||
|
||||
-- We can end-up in a weird edge-case where a path depends on itself because
|
||||
-- it’s an output of a CA derivation, that happens to be the same as one of its
|
||||
-- dependencies.
|
||||
-- In that case we have a dependency loop (path -> realisation1 -> realisation2
|
||||
-- -> path) that we need to break by removing the dependencies between the
|
||||
-- realisations
|
||||
create trigger if not exists DeleteSelfRefsViaRealisations before delete on ValidPaths
|
||||
begin
|
||||
delete from RealisationsRefs where realisationReference in (
|
||||
select id from Realisations where outputPath = old.id
|
||||
);
|
||||
end;
|
||||
|
||||
create table if not exists RealisationsRefs (
|
||||
referrer integer not null,
|
||||
realisationReference integer,
|
||||
foreign key (referrer) references Realisations(id) on delete cascade,
|
||||
foreign key (realisationReference) references Realisations(id) on delete restrict
|
||||
);
|
||||
-- used by deletion trigger
|
||||
create index if not exists IndexRealisationsRefsRealisationReference on RealisationsRefs(realisationReference);
|
||||
|
||||
-- used by QueryRealisationReferences
|
||||
create index if not exists IndexRealisationsRefs on RealisationsRefs(referrer);
|
||||
-- used by cascade deletion when ValidPaths is deleted
|
||||
create index if not exists IndexRealisationsRefsOnOutputPath on Realisations(outputPath);
|
||||
|
|
@ -1,952 +0,0 @@
|
|||
#include "derivations.hh"
|
||||
#include "globals.hh"
|
||||
#include "local-store.hh"
|
||||
#include "finally.hh"
|
||||
#include "unix-domain-socket.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
#if !defined(__linux__)
|
||||
// For shelling out to lsof
|
||||
# include "processes.hh"
|
||||
#endif
|
||||
|
||||
#include <functional>
|
||||
#include <queue>
|
||||
#include <algorithm>
|
||||
#include <regex>
|
||||
#include <random>
|
||||
|
||||
#include <climits>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <poll.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statvfs.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/un.h>
|
||||
#include <unistd.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
using namespace nix::unix;
|
||||
|
||||
static std::string gcSocketPath = "/gc-socket/socket";
|
||||
static std::string gcRootsDir = "gcroots";
|
||||
|
||||
|
||||
void LocalStore::addIndirectRoot(const Path & path)
|
||||
{
|
||||
std::string hash = hashString(HashAlgorithm::SHA1, path).to_string(HashFormat::Nix32, false);
|
||||
Path realRoot = canonPath(fmt("%1%/%2%/auto/%3%", stateDir, gcRootsDir, hash));
|
||||
makeSymlink(realRoot, path);
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::createTempRootsFile()
|
||||
{
|
||||
auto fdTempRoots(_fdTempRoots.lock());
|
||||
|
||||
/* Create the temporary roots file for this process. */
|
||||
if (*fdTempRoots) return;
|
||||
|
||||
while (1) {
|
||||
if (pathExists(fnTempRoots))
|
||||
/* It *must* be stale, since there can be no two
|
||||
processes with the same pid. */
|
||||
unlink(fnTempRoots.c_str());
|
||||
|
||||
*fdTempRoots = openLockFile(fnTempRoots, true);
|
||||
|
||||
debug("acquiring write lock on '%s'", fnTempRoots);
|
||||
lockFile(fdTempRoots->get(), ltWrite, true);
|
||||
|
||||
/* Check whether the garbage collector didn't get in our
|
||||
way. */
|
||||
struct stat st;
|
||||
if (fstat(fdTempRoots->get(), &st) == -1)
|
||||
throw SysError("statting '%1%'", fnTempRoots);
|
||||
if (st.st_size == 0) break;
|
||||
|
||||
/* The garbage collector deleted this file before we could get
|
||||
a lock. (It won't delete the file after we get a lock.)
|
||||
Try again. */
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::addTempRoot(const StorePath & path)
|
||||
{
|
||||
if (readOnly) {
|
||||
debug("Read-only store doesn't support creating lock files for temp roots, but nothing can be deleted anyways.");
|
||||
return;
|
||||
}
|
||||
|
||||
createTempRootsFile();
|
||||
|
||||
/* Open/create the global GC lock file. */
|
||||
{
|
||||
auto fdGCLock(_fdGCLock.lock());
|
||||
if (!*fdGCLock)
|
||||
*fdGCLock = openGCLock();
|
||||
}
|
||||
|
||||
restart:
|
||||
/* Try to acquire a shared global GC lock (non-blocking). This
|
||||
only succeeds if the garbage collector is not currently
|
||||
running. */
|
||||
FdLock gcLock(_fdGCLock.lock()->get(), ltRead, false, "");
|
||||
|
||||
if (!gcLock.acquired) {
|
||||
/* We couldn't get a shared global GC lock, so the garbage
|
||||
collector is running. So we have to connect to the garbage
|
||||
collector and inform it about our root. */
|
||||
auto fdRootsSocket(_fdRootsSocket.lock());
|
||||
|
||||
if (!*fdRootsSocket) {
|
||||
auto socketPath = stateDir.get() + gcSocketPath;
|
||||
debug("connecting to '%s'", socketPath);
|
||||
*fdRootsSocket = createUnixDomainSocket();
|
||||
try {
|
||||
nix::connect(fdRootsSocket->get(), socketPath);
|
||||
} catch (SysError & e) {
|
||||
/* The garbage collector may have exited or not
|
||||
created the socket yet, so we need to restart. */
|
||||
if (e.errNo == ECONNREFUSED || e.errNo == ENOENT) {
|
||||
debug("GC socket connection refused: %s", e.msg());
|
||||
fdRootsSocket->close();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
goto restart;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
debug("sending GC root '%s'", printStorePath(path));
|
||||
writeFull(fdRootsSocket->get(), printStorePath(path) + "\n", false);
|
||||
char c;
|
||||
readFull(fdRootsSocket->get(), &c, 1);
|
||||
assert(c == '1');
|
||||
debug("got ack for GC root '%s'", printStorePath(path));
|
||||
} catch (SysError & e) {
|
||||
/* The garbage collector may have exited, so we need to
|
||||
restart. */
|
||||
if (e.errNo == EPIPE || e.errNo == ECONNRESET) {
|
||||
debug("GC socket disconnected");
|
||||
fdRootsSocket->close();
|
||||
goto restart;
|
||||
}
|
||||
throw;
|
||||
} catch (EndOfFile & e) {
|
||||
debug("GC socket disconnected");
|
||||
fdRootsSocket->close();
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
|
||||
/* Record the store path in the temporary roots file so it will be
|
||||
seen by a future run of the garbage collector. */
|
||||
auto s = printStorePath(path) + '\0';
|
||||
writeFull(_fdTempRoots.lock()->get(), s);
|
||||
}
|
||||
|
||||
|
||||
static std::string censored = "{censored}";
|
||||
|
||||
|
||||
void LocalStore::findTempRoots(Roots & tempRoots, bool censor)
|
||||
{
|
||||
/* Read the `temproots' directory for per-process temporary root
|
||||
files. */
|
||||
for (auto & i : readDirectory(tempRootsDir)) {
|
||||
auto name = i.path().filename().string();
|
||||
if (name[0] == '.') {
|
||||
// Ignore hidden files. Some package managers (notably portage) create
|
||||
// those to keep the directory alive.
|
||||
continue;
|
||||
}
|
||||
Path path = i.path();
|
||||
|
||||
pid_t pid = std::stoi(name);
|
||||
|
||||
debug("reading temporary root file '%1%'", path);
|
||||
AutoCloseFD fd(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666));
|
||||
if (!fd) {
|
||||
/* It's okay if the file has disappeared. */
|
||||
if (errno == ENOENT) continue;
|
||||
throw SysError("opening temporary roots file '%1%'", path);
|
||||
}
|
||||
|
||||
/* Try to acquire a write lock without blocking. This can
|
||||
only succeed if the owning process has died. In that case
|
||||
we don't care about its temporary roots. */
|
||||
if (lockFile(fd.get(), ltWrite, false)) {
|
||||
printInfo("removing stale temporary roots file '%1%'", path);
|
||||
unlink(path.c_str());
|
||||
writeFull(fd.get(), "d");
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Read the entire file. */
|
||||
auto contents = readFile(fd.get());
|
||||
|
||||
/* Extract the roots. */
|
||||
std::string::size_type pos = 0, end;
|
||||
|
||||
while ((end = contents.find((char) 0, pos)) != std::string::npos) {
|
||||
Path root(contents, pos, end - pos);
|
||||
debug("got temporary root '%s'", root);
|
||||
tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{temp:%d}", pid));
|
||||
pos = end + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::findRoots(const Path & path, std::filesystem::file_type type, Roots & roots)
|
||||
{
|
||||
auto foundRoot = [&](const Path & path, const Path & target) {
|
||||
try {
|
||||
auto storePath = toStorePath(target).first;
|
||||
if (isValidPath(storePath))
|
||||
roots[std::move(storePath)].emplace(path);
|
||||
else
|
||||
printInfo("skipping invalid root from '%1%' to '%2%'", path, target);
|
||||
} catch (BadStorePath &) { }
|
||||
};
|
||||
|
||||
try {
|
||||
|
||||
if (type == std::filesystem::file_type::unknown)
|
||||
type = getFileType(path);
|
||||
|
||||
if (type == std::filesystem::file_type::directory) {
|
||||
for (auto & i : readDirectory(path))
|
||||
findRoots(i.path().string(), i.symlink_status().type(), roots);
|
||||
}
|
||||
|
||||
else if (type == std::filesystem::file_type::symlink) {
|
||||
Path target = readLink(path);
|
||||
if (isInStore(target))
|
||||
foundRoot(path, target);
|
||||
|
||||
/* Handle indirect roots. */
|
||||
else {
|
||||
target = absPath(target, dirOf(path));
|
||||
if (!pathExists(target)) {
|
||||
if (isInDir(path, stateDir + "/" + gcRootsDir + "/auto")) {
|
||||
printInfo("removing stale link from '%1%' to '%2%'", path, target);
|
||||
unlink(path.c_str());
|
||||
}
|
||||
} else {
|
||||
struct stat st2 = lstat(target);
|
||||
if (!S_ISLNK(st2.st_mode)) return;
|
||||
Path target2 = readLink(target);
|
||||
if (isInStore(target2)) foundRoot(target, target2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else if (type == std::filesystem::file_type::regular) {
|
||||
auto storePath = maybeParseStorePath(storeDir + "/" + std::string(baseNameOf(path)));
|
||||
if (storePath && isValidPath(*storePath))
|
||||
roots[std::move(*storePath)].emplace(path);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
catch (std::filesystem::filesystem_error & e) {
|
||||
/* We only ignore permanent failures. */
|
||||
if (e.code() == std::errc::permission_denied || e.code() == std::errc::no_such_file_or_directory || e.code() == std::errc::not_a_directory)
|
||||
printInfo("cannot read potential root '%1%'", path);
|
||||
else
|
||||
throw;
|
||||
}
|
||||
|
||||
catch (SysError & e) {
|
||||
/* We only ignore permanent failures. */
|
||||
if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR)
|
||||
printInfo("cannot read potential root '%1%'", path);
|
||||
else
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::findRootsNoTemp(Roots & roots, bool censor)
|
||||
{
|
||||
/* Process direct roots in {gcroots,profiles}. */
|
||||
findRoots(stateDir + "/" + gcRootsDir, std::filesystem::file_type::unknown, roots);
|
||||
findRoots(stateDir + "/profiles", std::filesystem::file_type::unknown, roots);
|
||||
|
||||
/* Add additional roots returned by different platforms-specific
|
||||
heuristics. This is typically used to add running programs to
|
||||
the set of roots (to prevent them from being garbage collected). */
|
||||
findRuntimeRoots(roots, censor);
|
||||
}
|
||||
|
||||
|
||||
Roots LocalStore::findRoots(bool censor)
|
||||
{
|
||||
Roots roots;
|
||||
findRootsNoTemp(roots, censor);
|
||||
|
||||
findTempRoots(roots, censor);
|
||||
|
||||
return roots;
|
||||
}
|
||||
|
||||
typedef std::unordered_map<Path, std::unordered_set<std::string>> UncheckedRoots;
|
||||
|
||||
static void readProcLink(const std::string & file, UncheckedRoots & roots)
|
||||
{
|
||||
constexpr auto bufsiz = PATH_MAX;
|
||||
char buf[bufsiz];
|
||||
auto res = readlink(file.c_str(), buf, bufsiz);
|
||||
if (res == -1) {
|
||||
if (errno == ENOENT || errno == EACCES || errno == ESRCH)
|
||||
return;
|
||||
throw SysError("reading symlink");
|
||||
}
|
||||
if (res == bufsiz) {
|
||||
throw Error("overly long symlink starting with '%1%'", std::string_view(buf, bufsiz));
|
||||
}
|
||||
if (res > 0 && buf[0] == '/')
|
||||
roots[std::string(static_cast<char *>(buf), res)]
|
||||
.emplace(file);
|
||||
}
|
||||
|
||||
static std::string quoteRegexChars(const std::string & raw)
|
||||
{
|
||||
static auto specialRegex = std::regex(R"([.^$\\*+?()\[\]{}|])");
|
||||
return std::regex_replace(raw, specialRegex, R"(\$&)");
|
||||
}
|
||||
|
||||
#if __linux__
|
||||
static void readFileRoots(const char * path, UncheckedRoots & roots)
|
||||
{
|
||||
try {
|
||||
roots[readFile(path)].emplace(path);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT && e.errNo != EACCES)
|
||||
throw;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
|
||||
{
|
||||
UncheckedRoots unchecked;
|
||||
|
||||
auto procDir = AutoCloseDir{opendir("/proc")};
|
||||
if (procDir) {
|
||||
struct dirent * ent;
|
||||
auto digitsRegex = std::regex(R"(^\d+$)");
|
||||
auto mapRegex = std::regex(R"(^\s*\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(/\S+)\s*$)");
|
||||
auto storePathRegex = std::regex(quoteRegexChars(storeDir) + R"(/[0-9a-z]+[0-9a-zA-Z\+\-\._\?=]*)");
|
||||
while (errno = 0, ent = readdir(procDir.get())) {
|
||||
checkInterrupt();
|
||||
if (std::regex_match(ent->d_name, digitsRegex)) {
|
||||
try {
|
||||
readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked);
|
||||
readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked);
|
||||
|
||||
auto fdStr = fmt("/proc/%s/fd", ent->d_name);
|
||||
auto fdDir = AutoCloseDir(opendir(fdStr.c_str()));
|
||||
if (!fdDir) {
|
||||
if (errno == ENOENT || errno == EACCES)
|
||||
continue;
|
||||
throw SysError("opening %1%", fdStr);
|
||||
}
|
||||
struct dirent * fd_ent;
|
||||
while (errno = 0, fd_ent = readdir(fdDir.get())) {
|
||||
if (fd_ent->d_name[0] != '.')
|
||||
readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked);
|
||||
}
|
||||
if (errno) {
|
||||
if (errno == ESRCH)
|
||||
continue;
|
||||
throw SysError("iterating /proc/%1%/fd", ent->d_name);
|
||||
}
|
||||
fdDir.reset();
|
||||
|
||||
auto mapFile = fmt("/proc/%s/maps", ent->d_name);
|
||||
auto mapLines = tokenizeString<std::vector<std::string>>(readFile(mapFile), "\n");
|
||||
for (const auto & line : mapLines) {
|
||||
auto match = std::smatch{};
|
||||
if (std::regex_match(line, match, mapRegex))
|
||||
unchecked[match[1]].emplace(mapFile);
|
||||
}
|
||||
|
||||
auto envFile = fmt("/proc/%s/environ", ent->d_name);
|
||||
auto envString = readFile(envFile);
|
||||
auto env_end = std::sregex_iterator{};
|
||||
for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
|
||||
unchecked[i->str()].emplace(envFile);
|
||||
} catch (SystemError & e) {
|
||||
if (errno == ENOENT || errno == EACCES || errno == ESRCH)
|
||||
continue;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (errno)
|
||||
throw SysError("iterating /proc");
|
||||
}
|
||||
|
||||
#if !defined(__linux__)
|
||||
// lsof is really slow on OS X. This actually causes the gc-concurrent.sh test to fail.
|
||||
// See: https://github.com/NixOS/nix/issues/3011
|
||||
// Because of this we disable lsof when running the tests.
|
||||
if (getEnv("_NIX_TEST_NO_LSOF") != "1") {
|
||||
try {
|
||||
std::regex lsofRegex(R"(^n(/.*)$)");
|
||||
auto lsofLines =
|
||||
tokenizeString<std::vector<std::string>>(runProgram(LSOF, true, { "-n", "-w", "-F", "n" }), "\n");
|
||||
for (const auto & line : lsofLines) {
|
||||
std::smatch match;
|
||||
if (std::regex_match(line, match, lsofRegex))
|
||||
unchecked[match[1]].emplace("{lsof}");
|
||||
}
|
||||
} catch (ExecError & e) {
|
||||
/* lsof not installed, lsof failed */
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if __linux__
|
||||
readFileRoots("/proc/sys/kernel/modprobe", unchecked);
|
||||
readFileRoots("/proc/sys/kernel/fbsplash", unchecked);
|
||||
readFileRoots("/proc/sys/kernel/poweroff_cmd", unchecked);
|
||||
#endif
|
||||
|
||||
for (auto & [target, links] : unchecked) {
|
||||
if (!isInStore(target)) continue;
|
||||
try {
|
||||
auto path = toStorePath(target).first;
|
||||
if (!isValidPath(path)) continue;
|
||||
debug("got additional root '%1%'", printStorePath(path));
|
||||
if (censor)
|
||||
roots[path].insert(censored);
|
||||
else
|
||||
roots[path].insert(links.begin(), links.end());
|
||||
} catch (BadStorePath &) { }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct GCLimitReached { };
|
||||
|
||||
|
||||
void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||
{
|
||||
bool shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
|
||||
bool gcKeepOutputs = settings.gcKeepOutputs;
|
||||
bool gcKeepDerivations = settings.gcKeepDerivations;
|
||||
|
||||
StorePathSet roots, dead, alive;
|
||||
|
||||
struct Shared
|
||||
{
|
||||
// The temp roots only store the hash part to make it easier to
|
||||
// ignore suffixes like '.lock', '.chroot' and '.check'.
|
||||
std::unordered_set<std::string> tempRoots;
|
||||
|
||||
// Hash part of the store path currently being deleted, if
|
||||
// any.
|
||||
std::optional<std::string> pending;
|
||||
};
|
||||
|
||||
Sync<Shared> _shared;
|
||||
|
||||
std::condition_variable wakeup;
|
||||
|
||||
/* Using `--ignore-liveness' with `--delete' can have unintended
|
||||
consequences if `keep-outputs' or `keep-derivations' are true
|
||||
(the garbage collector will recurse into deleting the outputs
|
||||
or derivers, respectively). So disable them. */
|
||||
if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
|
||||
gcKeepOutputs = false;
|
||||
gcKeepDerivations = false;
|
||||
}
|
||||
|
||||
if (shouldDelete)
|
||||
deletePath(reservedPath);
|
||||
|
||||
/* Acquire the global GC root. Note: we don't use fdGCLock
|
||||
here because then in auto-gc mode, another thread could
|
||||
downgrade our exclusive lock. */
|
||||
auto fdGCLock = openGCLock();
|
||||
FdLock gcLock(fdGCLock.get(), ltWrite, true, "waiting for the big garbage collector lock...");
|
||||
|
||||
/* Synchronisation point to test ENOENT handling in
|
||||
addTempRoot(), see tests/gc-non-blocking.sh. */
|
||||
if (auto p = getEnv("_NIX_TEST_GC_SYNC_1"))
|
||||
readFile(*p);
|
||||
|
||||
/* Start the server for receiving new roots. */
|
||||
auto socketPath = stateDir.get() + gcSocketPath;
|
||||
createDirs(dirOf(socketPath));
|
||||
auto fdServer = createUnixDomainSocket(socketPath, 0666);
|
||||
|
||||
if (fcntl(fdServer.get(), F_SETFL, fcntl(fdServer.get(), F_GETFL) | O_NONBLOCK) == -1)
|
||||
throw SysError("making socket '%1%' non-blocking", socketPath);
|
||||
|
||||
Pipe shutdownPipe;
|
||||
shutdownPipe.create();
|
||||
|
||||
std::thread serverThread([&]() {
|
||||
Sync<std::map<int, std::thread>> connections;
|
||||
|
||||
Finally cleanup([&]() {
|
||||
debug("GC roots server shutting down");
|
||||
fdServer.close();
|
||||
while (true) {
|
||||
auto item = remove_begin(*connections.lock());
|
||||
if (!item) break;
|
||||
auto & [fd, thread] = *item;
|
||||
shutdown(fd, SHUT_RDWR);
|
||||
thread.join();
|
||||
}
|
||||
});
|
||||
|
||||
while (true) {
|
||||
std::vector<struct pollfd> fds;
|
||||
fds.push_back({.fd = shutdownPipe.readSide.get(), .events = POLLIN});
|
||||
fds.push_back({.fd = fdServer.get(), .events = POLLIN});
|
||||
auto count = poll(fds.data(), fds.size(), -1);
|
||||
assert(count != -1);
|
||||
|
||||
if (fds[0].revents)
|
||||
/* Parent is asking us to quit. */
|
||||
break;
|
||||
|
||||
if (fds[1].revents) {
|
||||
/* Accept a new connection. */
|
||||
assert(fds[1].revents & POLLIN);
|
||||
AutoCloseFD fdClient = accept(fdServer.get(), nullptr, nullptr);
|
||||
if (!fdClient) continue;
|
||||
|
||||
debug("GC roots server accepted new client");
|
||||
|
||||
/* Process the connection in a separate thread. */
|
||||
auto fdClient_ = fdClient.get();
|
||||
std::thread clientThread([&, fdClient = std::move(fdClient)]() {
|
||||
Finally cleanup([&]() {
|
||||
auto conn(connections.lock());
|
||||
auto i = conn->find(fdClient.get());
|
||||
if (i != conn->end()) {
|
||||
i->second.detach();
|
||||
conn->erase(i);
|
||||
}
|
||||
});
|
||||
|
||||
/* On macOS, accepted sockets inherit the
|
||||
non-blocking flag from the server socket, so
|
||||
explicitly make it blocking. */
|
||||
if (fcntl(fdClient.get(), F_SETFL, fcntl(fdClient.get(), F_GETFL) & ~O_NONBLOCK) == -1)
|
||||
abort();
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
auto path = readLine(fdClient.get());
|
||||
auto storePath = maybeParseStorePath(path);
|
||||
if (storePath) {
|
||||
debug("got new GC root '%s'", path);
|
||||
auto hashPart = std::string(storePath->hashPart());
|
||||
auto shared(_shared.lock());
|
||||
shared->tempRoots.insert(hashPart);
|
||||
/* If this path is currently being
|
||||
deleted, then we have to wait until
|
||||
deletion is finished to ensure that
|
||||
the client doesn't start
|
||||
re-creating it before we're
|
||||
done. FIXME: ideally we would use a
|
||||
FD for this so we don't block the
|
||||
poll loop. */
|
||||
while (shared->pending == hashPart) {
|
||||
debug("synchronising with deletion of path '%s'", path);
|
||||
shared.wait(wakeup);
|
||||
}
|
||||
} else
|
||||
printError("received garbage instead of a root from client");
|
||||
writeFull(fdClient.get(), "1", false);
|
||||
} catch (Error & e) {
|
||||
debug("reading GC root from client: %s", e.msg());
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
connections.lock()->insert({fdClient_, std::move(clientThread)});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Finally stopServer([&]() {
|
||||
writeFull(shutdownPipe.writeSide.get(), "x", false);
|
||||
wakeup.notify_all();
|
||||
if (serverThread.joinable()) serverThread.join();
|
||||
});
|
||||
|
||||
/* Find the roots. Since we've grabbed the GC lock, the set of
|
||||
permanent roots cannot increase now. */
|
||||
printInfo("finding garbage collector roots...");
|
||||
Roots rootMap;
|
||||
if (!options.ignoreLiveness)
|
||||
findRootsNoTemp(rootMap, true);
|
||||
|
||||
for (auto & i : rootMap) roots.insert(i.first);
|
||||
|
||||
/* Read the temporary roots created before we acquired the global
|
||||
GC root. Any new roots will be sent to our socket. */
|
||||
Roots tempRoots;
|
||||
findTempRoots(tempRoots, true);
|
||||
for (auto & root : tempRoots) {
|
||||
_shared.lock()->tempRoots.insert(std::string(root.first.hashPart()));
|
||||
roots.insert(root.first);
|
||||
}
|
||||
|
||||
/* Synchronisation point for testing, see tests/functional/gc-non-blocking.sh. */
|
||||
if (auto p = getEnv("_NIX_TEST_GC_SYNC_2"))
|
||||
readFile(*p);
|
||||
|
||||
/* Helper function that deletes a path from the store and throws
|
||||
GCLimitReached if we've deleted enough garbage. */
|
||||
auto deleteFromStore = [&](std::string_view baseName)
|
||||
{
|
||||
Path path = storeDir + "/" + std::string(baseName);
|
||||
Path realPath = realStoreDir + "/" + std::string(baseName);
|
||||
|
||||
/* There may be temp directories in the store that are still in use
|
||||
by another process. We need to be sure that we can acquire an
|
||||
exclusive lock before deleting them. */
|
||||
if (baseName.find("tmp-", 0) == 0) {
|
||||
AutoCloseFD tmpDirFd = open(realPath.c_str(), O_RDONLY | O_DIRECTORY);
|
||||
if (tmpDirFd.get() == -1 || !lockFile(tmpDirFd.get(), ltWrite, false)) {
|
||||
debug("skipping locked tempdir '%s'", realPath);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
printInfo("deleting '%1%'", path);
|
||||
|
||||
results.paths.insert(path);
|
||||
|
||||
uint64_t bytesFreed;
|
||||
deleteStorePath(realPath, bytesFreed);
|
||||
|
||||
results.bytesFreed += bytesFreed;
|
||||
|
||||
if (results.bytesFreed > options.maxFreed) {
|
||||
printInfo("deleted more than %d bytes; stopping", options.maxFreed);
|
||||
throw GCLimitReached();
|
||||
}
|
||||
};
|
||||
|
||||
std::map<StorePath, StorePathSet> referrersCache;
|
||||
|
||||
/* Helper function that visits all paths reachable from `start`
|
||||
via the referrers edges and optionally derivers and derivation
|
||||
output edges. If none of those paths are roots, then all
|
||||
visited paths are garbage and are deleted. */
|
||||
auto deleteReferrersClosure = [&](const StorePath & start) {
|
||||
StorePathSet visited;
|
||||
std::queue<StorePath> todo;
|
||||
|
||||
/* Wake up any GC client waiting for deletion of the paths in
|
||||
'visited' to finish. */
|
||||
Finally releasePending([&]() {
|
||||
auto shared(_shared.lock());
|
||||
shared->pending.reset();
|
||||
wakeup.notify_all();
|
||||
});
|
||||
|
||||
auto enqueue = [&](const StorePath & path) {
|
||||
if (visited.insert(path).second)
|
||||
todo.push(path);
|
||||
};
|
||||
|
||||
enqueue(start);
|
||||
|
||||
while (auto path = pop(todo)) {
|
||||
checkInterrupt();
|
||||
|
||||
/* Bail out if we've previously discovered that this path
|
||||
is alive. */
|
||||
if (alive.count(*path)) {
|
||||
alive.insert(start);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If we've previously deleted this path, we don't have to
|
||||
handle it again. */
|
||||
if (dead.count(*path)) continue;
|
||||
|
||||
auto markAlive = [&]()
|
||||
{
|
||||
alive.insert(*path);
|
||||
alive.insert(start);
|
||||
try {
|
||||
StorePathSet closure;
|
||||
computeFSClosure(*path, closure,
|
||||
/* flipDirection */ false, gcKeepOutputs, gcKeepDerivations);
|
||||
for (auto & p : closure)
|
||||
alive.insert(p);
|
||||
} catch (InvalidPath &) { }
|
||||
};
|
||||
|
||||
/* If this is a root, bail out. */
|
||||
if (roots.count(*path)) {
|
||||
debug("cannot delete '%s' because it's a root", printStorePath(*path));
|
||||
return markAlive();
|
||||
}
|
||||
|
||||
if (options.action == GCOptions::gcDeleteSpecific
|
||||
&& !options.pathsToDelete.count(*path))
|
||||
return;
|
||||
|
||||
{
|
||||
auto hashPart = std::string(path->hashPart());
|
||||
auto shared(_shared.lock());
|
||||
if (shared->tempRoots.count(hashPart)) {
|
||||
debug("cannot delete '%s' because it's a temporary root", printStorePath(*path));
|
||||
return markAlive();
|
||||
}
|
||||
shared->pending = hashPart;
|
||||
}
|
||||
|
||||
if (isValidPath(*path)) {
|
||||
|
||||
/* Visit the referrers of this path. */
|
||||
auto i = referrersCache.find(*path);
|
||||
if (i == referrersCache.end()) {
|
||||
StorePathSet referrers;
|
||||
queryGCReferrers(*path, referrers);
|
||||
referrersCache.emplace(*path, std::move(referrers));
|
||||
i = referrersCache.find(*path);
|
||||
}
|
||||
for (auto & p : i->second)
|
||||
enqueue(p);
|
||||
|
||||
/* If keep-derivations is set and this is a
|
||||
derivation, then visit the derivation outputs. */
|
||||
if (gcKeepDerivations && path->isDerivation()) {
|
||||
for (auto & [name, maybeOutPath] : queryPartialDerivationOutputMap(*path))
|
||||
if (maybeOutPath &&
|
||||
isValidPath(*maybeOutPath) &&
|
||||
queryPathInfo(*maybeOutPath)->deriver == *path)
|
||||
enqueue(*maybeOutPath);
|
||||
}
|
||||
|
||||
/* If keep-outputs is set, then visit the derivers. */
|
||||
if (gcKeepOutputs) {
|
||||
auto derivers = queryValidDerivers(*path);
|
||||
for (auto & i : derivers)
|
||||
enqueue(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto & path : topoSortPaths(visited)) {
|
||||
if (!dead.insert(path).second) continue;
|
||||
if (shouldDelete) {
|
||||
invalidatePathChecked(path);
|
||||
deleteFromStore(path.to_string());
|
||||
referrersCache.erase(path);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/* Either delete all garbage paths, or just the specified
|
||||
paths (for gcDeleteSpecific). */
|
||||
if (options.action == GCOptions::gcDeleteSpecific) {
|
||||
|
||||
for (auto & i : options.pathsToDelete) {
|
||||
deleteReferrersClosure(i);
|
||||
if (!dead.count(i))
|
||||
throw Error(
|
||||
"Cannot delete path '%1%' since it is still alive. "
|
||||
"To find out why, use: "
|
||||
"nix-store --query --roots",
|
||||
printStorePath(i));
|
||||
}
|
||||
|
||||
} else if (options.maxFreed > 0) {
|
||||
|
||||
if (shouldDelete)
|
||||
printInfo("deleting garbage...");
|
||||
else
|
||||
printInfo("determining live/dead paths...");
|
||||
|
||||
try {
|
||||
AutoCloseDir dir(opendir(realStoreDir.get().c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", realStoreDir);
|
||||
|
||||
/* Read the store and delete all paths that are invalid or
|
||||
unreachable. We don't use readDirectory() here so that
|
||||
GCing can start faster. */
|
||||
auto linksName = baseNameOf(linksDir);
|
||||
Paths entries;
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
std::string name = dirent->d_name;
|
||||
if (name == "." || name == ".." || name == linksName) continue;
|
||||
|
||||
if (auto storePath = maybeParseStorePath(storeDir + "/" + name))
|
||||
deleteReferrersClosure(*storePath);
|
||||
else
|
||||
deleteFromStore(name);
|
||||
|
||||
}
|
||||
} catch (GCLimitReached & e) {
|
||||
}
|
||||
}
|
||||
|
||||
if (options.action == GCOptions::gcReturnLive) {
|
||||
for (auto & i : alive)
|
||||
results.paths.insert(printStorePath(i));
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.action == GCOptions::gcReturnDead) {
|
||||
for (auto & i : dead)
|
||||
results.paths.insert(printStorePath(i));
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unlink all files in /nix/store/.links that have a link count of 1,
|
||||
which indicates that there are no other links and so they can be
|
||||
safely deleted. FIXME: race condition with optimisePath(): we
|
||||
might see a link count of 1 just before optimisePath() increases
|
||||
the link count. */
|
||||
if (options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific) {
|
||||
printInfo("deleting unused links...");
|
||||
|
||||
AutoCloseDir dir(opendir(linksDir.c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", linksDir);
|
||||
|
||||
int64_t actualSize = 0, unsharedSize = 0;
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
std::string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
Path path = linksDir + "/" + name;
|
||||
|
||||
auto st = lstat(path);
|
||||
|
||||
if (st.st_nlink != 1) {
|
||||
actualSize += st.st_size;
|
||||
unsharedSize += (st.st_nlink - 1) * st.st_size;
|
||||
continue;
|
||||
}
|
||||
|
||||
printMsg(lvlTalkative, "deleting unused link '%1%'", path);
|
||||
|
||||
if (unlink(path.c_str()) == -1)
|
||||
throw SysError("deleting '%1%'", path);
|
||||
|
||||
/* Do not account for deleted file here. Rely on deletePath()
|
||||
accounting. */
|
||||
}
|
||||
|
||||
struct stat st;
|
||||
if (stat(linksDir.c_str(), &st) == -1)
|
||||
throw SysError("statting '%1%'", linksDir);
|
||||
int64_t overhead = st.st_blocks * 512ULL;
|
||||
|
||||
printInfo("note: currently hard linking saves %.2f MiB",
|
||||
((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
|
||||
}
|
||||
|
||||
/* While we're at it, vacuum the database. */
|
||||
//if (options.action == GCOptions::gcDeleteDead) vacuumDB();
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::autoGC(bool sync)
|
||||
{
|
||||
static auto fakeFreeSpaceFile = getEnv("_NIX_TEST_FREE_SPACE_FILE");
|
||||
|
||||
auto getAvail = [this]() -> uint64_t {
|
||||
if (fakeFreeSpaceFile)
|
||||
return std::stoll(readFile(*fakeFreeSpaceFile));
|
||||
|
||||
struct statvfs st;
|
||||
if (statvfs(realStoreDir.get().c_str(), &st))
|
||||
throw SysError("getting filesystem info about '%s'", realStoreDir);
|
||||
|
||||
return (uint64_t) st.f_bavail * st.f_frsize;
|
||||
};
|
||||
|
||||
std::shared_future<void> future;
|
||||
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
if (state->gcRunning) {
|
||||
future = state->gcFuture;
|
||||
debug("waiting for auto-GC to finish");
|
||||
goto sync;
|
||||
}
|
||||
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
|
||||
if (now < state->lastGCCheck + std::chrono::seconds(settings.minFreeCheckInterval)) return;
|
||||
|
||||
auto avail = getAvail();
|
||||
|
||||
state->lastGCCheck = now;
|
||||
|
||||
if (avail >= settings.minFree || avail >= settings.maxFree) return;
|
||||
|
||||
if (avail > state->availAfterGC * 0.97) return;
|
||||
|
||||
state->gcRunning = true;
|
||||
|
||||
std::promise<void> promise;
|
||||
future = state->gcFuture = promise.get_future().share();
|
||||
|
||||
std::thread([promise{std::move(promise)}, this, avail, getAvail]() mutable {
|
||||
|
||||
try {
|
||||
|
||||
/* Wake up any threads waiting for the auto-GC to finish. */
|
||||
Finally wakeup([&]() {
|
||||
auto state(_state.lock());
|
||||
state->gcRunning = false;
|
||||
state->lastGCCheck = std::chrono::steady_clock::now();
|
||||
promise.set_value();
|
||||
});
|
||||
|
||||
GCOptions options;
|
||||
options.maxFreed = settings.maxFree - avail;
|
||||
|
||||
printInfo("running auto-GC to free %d bytes", options.maxFreed);
|
||||
|
||||
GCResults results;
|
||||
|
||||
collectGarbage(options, results);
|
||||
|
||||
_state.lock()->availAfterGC = getAvail();
|
||||
|
||||
} catch (...) {
|
||||
// FIXME: we could propagate the exception to the
|
||||
// future, but we don't really care.
|
||||
ignoreException();
|
||||
}
|
||||
|
||||
}).detach();
|
||||
}
|
||||
|
||||
sync:
|
||||
// Wait for the future outside of the state lock.
|
||||
if (sync) future.get();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,406 +0,0 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include "sqlite.hh"
|
||||
|
||||
#include "pathlocks.hh"
|
||||
#include "store-api.hh"
|
||||
#include "indirect-root-store.hh"
|
||||
#include "sync.hh"
|
||||
|
||||
#include <chrono>
|
||||
#include <future>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/**
|
||||
* Nix store and database schema version.
|
||||
*
|
||||
* Version 1 (or 0) was Nix <=
|
||||
* 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
|
||||
* Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
|
||||
* Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0.
|
||||
*/
|
||||
const int nixSchemaVersion = 10;
|
||||
|
||||
|
||||
struct OptimiseStats
|
||||
{
|
||||
unsigned long filesLinked = 0;
|
||||
uint64_t bytesFreed = 0;
|
||||
};
|
||||
|
||||
struct LocalStoreConfig : virtual LocalFSStoreConfig
|
||||
{
|
||||
using LocalFSStoreConfig::LocalFSStoreConfig;
|
||||
|
||||
Setting<bool> requireSigs{this,
|
||||
settings.requireSigs,
|
||||
"require-sigs",
|
||||
"Whether store paths copied into this store should have a trusted signature."};
|
||||
|
||||
Setting<bool> readOnly{this,
|
||||
false,
|
||||
"read-only",
|
||||
R"(
|
||||
Allow this store to be opened when its [database](@docroot@/glossary.md#gloss-nix-database) is on a read-only filesystem.
|
||||
|
||||
Normally Nix will attempt to open the store database in read-write mode, even for querying (when write access is not needed), causing it to fail if the database is on a read-only filesystem.
|
||||
|
||||
Enable read-only mode to disable locking and open the SQLite database with the [`immutable` parameter](https://www.sqlite.org/c3ref/open.html) set.
|
||||
|
||||
> **Warning**
|
||||
> Do not use this unless the filesystem is read-only.
|
||||
>
|
||||
> Using it when the filesystem is writable can cause incorrect query results or corruption errors if the database is changed by another process.
|
||||
> While the filesystem the database resides on might appear to be read-only, consider whether another user or system might have write access to it.
|
||||
)"};
|
||||
|
||||
const std::string name() override { return "Local Store"; }
|
||||
|
||||
std::string doc() override;
|
||||
};
|
||||
|
||||
class LocalStore : public virtual LocalStoreConfig
|
||||
, public virtual IndirectRootStore
|
||||
, public virtual GcStore
|
||||
{
|
||||
private:
|
||||
|
||||
/**
|
||||
* Lock file used for upgrading.
|
||||
*/
|
||||
AutoCloseFD globalLock;
|
||||
|
||||
struct State
|
||||
{
|
||||
/**
|
||||
* The SQLite database object.
|
||||
*/
|
||||
SQLite db;
|
||||
|
||||
struct Stmts;
|
||||
std::unique_ptr<Stmts> stmts;
|
||||
|
||||
/**
|
||||
* The last time we checked whether to do an auto-GC, or an
|
||||
* auto-GC finished.
|
||||
*/
|
||||
std::chrono::time_point<std::chrono::steady_clock> lastGCCheck;
|
||||
|
||||
/**
|
||||
* Whether auto-GC is running. If so, get gcFuture to wait for
|
||||
* the GC to finish.
|
||||
*/
|
||||
bool gcRunning = false;
|
||||
std::shared_future<void> gcFuture;
|
||||
|
||||
/**
|
||||
* How much disk space was available after the previous
|
||||
* auto-GC. If the current available disk space is below
|
||||
* minFree but not much below availAfterGC, then there is no
|
||||
* point in starting a new GC.
|
||||
*/
|
||||
uint64_t availAfterGC = std::numeric_limits<uint64_t>::max();
|
||||
|
||||
std::unique_ptr<PublicKeys> publicKeys;
|
||||
};
|
||||
|
||||
Sync<State> _state;
|
||||
|
||||
public:
|
||||
|
||||
const Path dbDir;
|
||||
const Path linksDir;
|
||||
const Path reservedPath;
|
||||
const Path schemaPath;
|
||||
const Path tempRootsDir;
|
||||
const Path fnTempRoots;
|
||||
|
||||
private:
|
||||
|
||||
const PublicKeys & getPublicKeys();
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Hack for build-remote.cc.
|
||||
*/
|
||||
PathSet locksHeld;
|
||||
|
||||
/**
|
||||
* Initialise the local store, upgrading the schema if
|
||||
* necessary.
|
||||
*/
|
||||
LocalStore(const Params & params);
|
||||
LocalStore(std::string scheme, std::string path, const Params & params);
|
||||
|
||||
~LocalStore();
|
||||
|
||||
static std::set<std::string> uriSchemes()
|
||||
{ return {}; }
|
||||
|
||||
/**
|
||||
* Implementations of abstract store API methods.
|
||||
*/
|
||||
|
||||
std::string getUri() override;
|
||||
|
||||
bool isValidPathUncached(const StorePath & path) override;
|
||||
|
||||
StorePathSet queryValidPaths(const StorePathSet & paths,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
|
||||
StorePathSet queryAllValidPaths() override;
|
||||
|
||||
void queryPathInfoUncached(const StorePath & path,
|
||||
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
void queryReferrers(const StorePath & path, StorePathSet & referrers) override;
|
||||
|
||||
StorePathSet queryValidDerivers(const StorePath & path) override;
|
||||
|
||||
std::map<std::string, std::optional<StorePath>> queryStaticPartialDerivationOutputMap(const StorePath & path) override;
|
||||
|
||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||
|
||||
StorePathSet querySubstitutablePaths(const StorePathSet & paths) override;
|
||||
|
||||
bool pathInfoIsUntrusted(const ValidPathInfo &) override;
|
||||
bool realisationIsUntrusted(const Realisation & ) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||
|
||||
StorePath addToStoreFromDump(
|
||||
Source & dump,
|
||||
std::string_view name,
|
||||
FileSerialisationMethod dumpMethod,
|
||||
ContentAddressMethod hashMethod,
|
||||
HashAlgorithm hashAlgo,
|
||||
const StorePathSet & references,
|
||||
RepairFlag repair) override;
|
||||
|
||||
void addTempRoot(const StorePath & path) override;
|
||||
|
||||
private:
|
||||
|
||||
void createTempRootsFile();
|
||||
|
||||
/**
|
||||
* The file to which we write our temporary roots.
|
||||
*/
|
||||
Sync<AutoCloseFD> _fdTempRoots;
|
||||
|
||||
/**
|
||||
* The global GC lock.
|
||||
*/
|
||||
Sync<AutoCloseFD> _fdGCLock;
|
||||
|
||||
/**
|
||||
* Connection to the garbage collector.
|
||||
*/
|
||||
Sync<AutoCloseFD> _fdRootsSocket;
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Implementation of IndirectRootStore::addIndirectRoot().
|
||||
*
|
||||
* The weak reference merely is a symlink to `path' from
|
||||
* /nix/var/nix/gcroots/auto/<hash of `path'>.
|
||||
*/
|
||||
void addIndirectRoot(const Path & path) override;
|
||||
|
||||
private:
|
||||
|
||||
void findTempRoots(Roots & roots, bool censor);
|
||||
|
||||
AutoCloseFD openGCLock();
|
||||
|
||||
public:
|
||||
|
||||
Roots findRoots(bool censor) override;
|
||||
|
||||
void collectGarbage(const GCOptions & options, GCResults & results) override;
|
||||
|
||||
/**
|
||||
* Called by `collectGarbage` to trace in reverse.
|
||||
*
|
||||
* Using this rather than `queryReferrers` directly allows us to
|
||||
* fine-tune which referrers we consider for garbage collection;
|
||||
* some store implementations take advantage of this.
|
||||
*/
|
||||
virtual void queryGCReferrers(const StorePath & path, StorePathSet & referrers)
|
||||
{
|
||||
return queryReferrers(path, referrers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by `collectGarbage` to recursively delete a path.
|
||||
* The default implementation simply calls `deletePath`, but it can be
|
||||
* overridden by stores that wish to provide their own deletion behaviour.
|
||||
*/
|
||||
virtual void deleteStorePath(const Path & path, uint64_t & bytesFreed);
|
||||
|
||||
/**
|
||||
* Optimise the disk space usage of the Nix store by hard-linking
|
||||
* files with the same contents.
|
||||
*/
|
||||
void optimiseStore(OptimiseStats & stats);
|
||||
|
||||
void optimiseStore() override;
|
||||
|
||||
/**
|
||||
* Optimise a single store path. Optionally, test the encountered
|
||||
* symlinks for corruption.
|
||||
*/
|
||||
void optimisePath(const Path & path, RepairFlag repair);
|
||||
|
||||
bool verifyStore(bool checkContents, RepairFlag repair) override;
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* Result of `verifyAllValidPaths`
|
||||
*/
|
||||
struct VerificationResult {
|
||||
/**
|
||||
* Whether any errors were encountered
|
||||
*/
|
||||
bool errors;
|
||||
|
||||
/**
|
||||
* A set of so-far valid paths. The store objects pointed to by
|
||||
* those paths are suitable for further validation checking.
|
||||
*/
|
||||
StorePathSet validPaths;
|
||||
};
|
||||
|
||||
/**
|
||||
* First, unconditional step of `verifyStore`
|
||||
*/
|
||||
virtual VerificationResult verifyAllValidPaths(RepairFlag repair);
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Register the validity of a path, i.e., that `path` exists, that
|
||||
* the paths referenced by it exists, and in the case of an output
|
||||
* path of a derivation, that it has been produced by a successful
|
||||
* execution of the derivation (or something equivalent). Also
|
||||
* register the hash of the file system contents of the path. The
|
||||
* hash must be a SHA-256 hash.
|
||||
*/
|
||||
void registerValidPath(const ValidPathInfo & info);
|
||||
|
||||
virtual void registerValidPaths(const ValidPathInfos & infos);
|
||||
|
||||
unsigned int getProtocol() override;
|
||||
|
||||
std::optional<TrustedFlag> isTrustedClient() override;
|
||||
|
||||
void vacuumDB();
|
||||
|
||||
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
|
||||
|
||||
/**
|
||||
* If free disk space in /nix/store if below minFree, delete
|
||||
* garbage until it exceeds maxFree.
|
||||
*/
|
||||
void autoGC(bool sync = true);
|
||||
|
||||
/**
|
||||
* Register the store path 'output' as the output named 'outputName' of
|
||||
* derivation 'deriver'.
|
||||
*/
|
||||
void registerDrvOutput(const Realisation & info) override;
|
||||
void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override;
|
||||
void cacheDrvOutputMapping(
|
||||
State & state,
|
||||
const uint64_t deriver,
|
||||
const std::string & outputName,
|
||||
const StorePath & output);
|
||||
|
||||
std::optional<const Realisation> queryRealisation_(State & state, const DrvOutput & id);
|
||||
std::optional<std::pair<int64_t, Realisation>> queryRealisationCore_(State & state, const DrvOutput & id);
|
||||
void queryRealisationUncached(const DrvOutput&,
|
||||
Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
|
||||
|
||||
std::optional<std::string> getVersion() override;
|
||||
|
||||
protected:
|
||||
|
||||
void verifyPath(const StorePath & path, std::function<bool(const StorePath &)> existsInStoreDir,
|
||||
StorePathSet & done, StorePathSet & validPaths, RepairFlag repair, bool & errors);
|
||||
|
||||
private:
|
||||
|
||||
/**
|
||||
* Retrieve the current version of the database schema.
|
||||
* If the database does not exist yet, the version returned will be 0.
|
||||
*/
|
||||
int getSchema();
|
||||
|
||||
void openDB(State & state, bool create);
|
||||
|
||||
void makeStoreWritable();
|
||||
|
||||
uint64_t queryValidPathId(State & state, const StorePath & path);
|
||||
|
||||
uint64_t addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs = true);
|
||||
|
||||
void invalidatePath(State & state, const StorePath & path);
|
||||
|
||||
/**
|
||||
* Delete a path from the Nix store.
|
||||
*/
|
||||
void invalidatePathChecked(const StorePath & path);
|
||||
|
||||
std::shared_ptr<const ValidPathInfo> queryPathInfoInternal(State & state, const StorePath & path);
|
||||
|
||||
void updatePathInfo(State & state, const ValidPathInfo & info);
|
||||
|
||||
void upgradeStore6();
|
||||
void upgradeStore7();
|
||||
PathSet queryValidPathsOld();
|
||||
ValidPathInfo queryPathInfoOld(const Path & path);
|
||||
|
||||
void findRoots(const Path & path, std::filesystem::file_type type, Roots & roots);
|
||||
|
||||
void findRootsNoTemp(Roots & roots, bool censor);
|
||||
|
||||
void findRuntimeRoots(Roots & roots, bool censor);
|
||||
|
||||
std::pair<std::filesystem::path, AutoCloseFD> createTempDirInStore();
|
||||
|
||||
typedef std::unordered_set<ino_t> InodeHash;
|
||||
|
||||
InodeHash loadInodeHash();
|
||||
Strings readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash);
|
||||
void optimisePath_(Activity * act, OptimiseStats & stats, const Path & path, InodeHash & inodeHash, RepairFlag repair);
|
||||
|
||||
// Internal versions that are not wrapped in retry_sqlite.
|
||||
bool isValidPath_(State & state, const StorePath & path);
|
||||
void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers);
|
||||
|
||||
/**
|
||||
* Add signatures to a ValidPathInfo or Realisation using the secret keys
|
||||
* specified by the ‘secret-key-files’ option.
|
||||
*/
|
||||
void signPathInfo(ValidPathInfo & info);
|
||||
void signRealisation(Realisation &);
|
||||
|
||||
void addBuildLog(const StorePath & drvPath, std::string_view log) override;
|
||||
|
||||
friend struct LocalDerivationGoal;
|
||||
friend struct PathSubstitutionGoal;
|
||||
friend struct SubstitutionGoal;
|
||||
friend struct DerivationGoal;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
R"(
|
||||
|
||||
**Store URL format**: `local`, *root*
|
||||
|
||||
This store type accesses a Nix store in the local filesystem directly
|
||||
(i.e. not via the Nix daemon). *root* is an absolute path that is
|
||||
prefixed to other directories such as the Nix store directory. The
|
||||
store pseudo-URL `local` denotes a store that uses `/` as its root
|
||||
directory.
|
||||
|
||||
A store that uses a *root* other than `/` is called a *chroot
|
||||
store*. With such stores, the store directory is "logically" still
|
||||
`/nix/store`, so programs stored in them can only be built and
|
||||
executed by `chroot`-ing into *root*. Chroot stores only support
|
||||
building and running on Linux when [`mount namespaces`](https://man7.org/linux/man-pages/man7/mount_namespaces.7.html) and [`user namespaces`](https://man7.org/linux/man-pages/man7/user_namespaces.7.html) are
|
||||
enabled.
|
||||
|
||||
For example, the following uses `/tmp/root` as the chroot environment
|
||||
to build or download `nixpkgs#hello` and then execute it:
|
||||
|
||||
```console
|
||||
# nix run --store /tmp/root nixpkgs#hello
|
||||
Hello, world!
|
||||
```
|
||||
|
||||
Here, the "physical" store location is `/tmp/root/nix/store`, and
|
||||
Nix's store metadata is in `/tmp/root/nix/var/nix/db`.
|
||||
|
||||
It is also possible, but not recommended, to change the "logical"
|
||||
location of the Nix store from its default of `/nix/store`. This makes
|
||||
it impossible to use default substituters such as
|
||||
`https://cache.nixos.org/`, and thus you may have to build everything
|
||||
locally. Here is an example:
|
||||
|
||||
```console
|
||||
# nix build --store 'local?store=/tmp/my-nix/store&state=/tmp/my-nix/state&log=/tmp/my-nix/log' nixpkgs#hello
|
||||
```
|
||||
|
||||
)"
|
||||
|
|
@ -1,308 +0,0 @@
|
|||
#include "local-store.hh"
|
||||
#include "globals.hh"
|
||||
#include "signals.hh"
|
||||
#include "posix-fs-canonicalise.hh"
|
||||
#include "posix-source-accessor.hh"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <regex>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static void makeWritable(const Path & path)
|
||||
{
|
||||
auto st = lstat(path);
|
||||
if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
|
||||
throw SysError("changing writability of '%1%'", path);
|
||||
}
|
||||
|
||||
|
||||
struct MakeReadOnly
|
||||
{
|
||||
Path path;
|
||||
MakeReadOnly(const PathView path) : path(path) { }
|
||||
~MakeReadOnly()
|
||||
{
|
||||
try {
|
||||
/* This will make the path read-only. */
|
||||
if (path != "") canonicaliseTimestampAndPermissions(path);
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
LocalStore::InodeHash LocalStore::loadInodeHash()
|
||||
{
|
||||
debug("loading hash inodes in memory");
|
||||
InodeHash inodeHash;
|
||||
|
||||
AutoCloseDir dir(opendir(linksDir.c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", linksDir);
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
|
||||
checkInterrupt();
|
||||
// We don't care if we hit non-hash files, anything goes
|
||||
inodeHash.insert(dirent->d_ino);
|
||||
}
|
||||
if (errno) throw SysError("reading directory '%1%'", linksDir);
|
||||
|
||||
printMsg(lvlTalkative, "loaded %1% hash inodes", inodeHash.size());
|
||||
|
||||
return inodeHash;
|
||||
}
|
||||
|
||||
|
||||
Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash)
|
||||
{
|
||||
Strings names;
|
||||
|
||||
AutoCloseDir dir(opendir(path.c_str()));
|
||||
if (!dir) throw SysError("opening directory '%1%'", path);
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
|
||||
checkInterrupt();
|
||||
|
||||
if (inodeHash.count(dirent->d_ino)) {
|
||||
debug("'%1%' is already linked", dirent->d_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
names.push_back(name);
|
||||
}
|
||||
if (errno) throw SysError("reading directory '%1%'", path);
|
||||
|
||||
return names;
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
||||
const Path & path, InodeHash & inodeHash, RepairFlag repair)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
auto st = lstat(path);
|
||||
|
||||
#if __APPLE__
|
||||
/* HFS/macOS has some undocumented security feature disabling hardlinking for
|
||||
special files within .app dirs. *.app/Contents/PkgInfo and
|
||||
*.app/Contents/Resources/\*.lproj seem to be the only paths affected. See
|
||||
https://github.com/NixOS/nix/issues/1443 for more discussion. */
|
||||
|
||||
if (std::regex_search(path, std::regex("\\.app/Contents/.+$")))
|
||||
{
|
||||
debug("'%1%' is not allowed to be linked in macOS", path);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
Strings names = readDirectoryIgnoringInodes(path, inodeHash);
|
||||
for (auto & i : names)
|
||||
optimisePath_(act, stats, path + "/" + i, inodeHash, repair);
|
||||
return;
|
||||
}
|
||||
|
||||
/* We can hard link regular files and maybe symlinks. */
|
||||
if (!S_ISREG(st.st_mode)
|
||||
#if CAN_LINK_SYMLINK
|
||||
&& !S_ISLNK(st.st_mode)
|
||||
#endif
|
||||
) return;
|
||||
|
||||
/* Sometimes SNAFUs can cause files in the Nix store to be
|
||||
modified, in particular when running programs as root under
|
||||
NixOS (example: $fontconfig/var/cache being modified). Skip
|
||||
those files. FIXME: check the modification time. */
|
||||
if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
|
||||
warn("skipping suspicious writable file '%1%'", path);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This can still happen on top-level files. */
|
||||
if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) {
|
||||
debug("'%s' is already linked, with %d other file(s)", path, st.st_nlink - 2);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Hash the file. Note that hashPath() returns the hash over the
|
||||
NAR serialisation, which includes the execute bit on the file.
|
||||
Thus, executable and non-executable files with the same
|
||||
contents *won't* be linked (which is good because otherwise the
|
||||
permissions would be screwed up).
|
||||
|
||||
Also note that if `path' is a symlink, then we're hashing the
|
||||
contents of the symlink (i.e. the result of readlink()), not
|
||||
the contents of the target (which may not even exist). */
|
||||
Hash hash = ({
|
||||
hashPath(
|
||||
{make_ref<PosixSourceAccessor>(), CanonPath(path)},
|
||||
FileSerialisationMethod::Recursive, HashAlgorithm::SHA256).first;
|
||||
});
|
||||
debug("'%1%' has hash '%2%'", path, hash.to_string(HashFormat::Nix32, true));
|
||||
|
||||
/* Check if this is a known hash. */
|
||||
Path linkPath = linksDir + "/" + hash.to_string(HashFormat::Nix32, false);
|
||||
|
||||
/* Maybe delete the link, if it has been corrupted. */
|
||||
if (pathExists(linkPath)) {
|
||||
auto stLink = lstat(linkPath);
|
||||
if (st.st_size != stLink.st_size
|
||||
|| (repair && hash != ({
|
||||
hashPath(
|
||||
{make_ref<PosixSourceAccessor>(), CanonPath(linkPath)},
|
||||
FileSerialisationMethod::Recursive, HashAlgorithm::SHA256).first;
|
||||
})))
|
||||
{
|
||||
// XXX: Consider overwriting linkPath with our valid version.
|
||||
warn("removing corrupted link '%s'", linkPath);
|
||||
warn("There may be more corrupted paths."
|
||||
"\nYou should run `nix-store --verify --check-contents --repair` to fix them all");
|
||||
unlink(linkPath.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
if (!pathExists(linkPath)) {
|
||||
/* Nope, create a hard link in the links directory. */
|
||||
if (link(path.c_str(), linkPath.c_str()) == 0) {
|
||||
inodeHash.insert(st.st_ino);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (errno) {
|
||||
case EEXIST:
|
||||
/* Fall through if another process created ‘linkPath’ before
|
||||
we did. */
|
||||
break;
|
||||
|
||||
case ENOSPC:
|
||||
/* On ext4, that probably means the directory index is
|
||||
full. When that happens, it's fine to ignore it: we
|
||||
just effectively disable deduplication of this
|
||||
file. */
|
||||
printInfo("cannot link '%s' to '%s': %s", linkPath, path, strerror(errno));
|
||||
return;
|
||||
|
||||
default:
|
||||
throw SysError("cannot link '%1%' to '%2%'", linkPath, path);
|
||||
}
|
||||
}
|
||||
|
||||
/* Yes! We've seen a file with the same contents. Replace the
|
||||
current file with a hard link to that file. */
|
||||
auto stLink = lstat(linkPath);
|
||||
|
||||
if (st.st_ino == stLink.st_ino) {
|
||||
debug("'%1%' is already linked to '%2%'", path, linkPath);
|
||||
return;
|
||||
}
|
||||
|
||||
printMsg(lvlTalkative, "linking '%1%' to '%2%'", path, linkPath);
|
||||
|
||||
/* Make the containing directory writable, but only if it's not
|
||||
the store itself (we don't want or need to mess with its
|
||||
permissions). */
|
||||
const Path dirOfPath(dirOf(path));
|
||||
bool mustToggle = dirOfPath != realStoreDir.get();
|
||||
if (mustToggle) makeWritable(dirOfPath);
|
||||
|
||||
/* When we're done, make the directory read-only again and reset
|
||||
its timestamp back to 0. */
|
||||
MakeReadOnly makeReadOnly(mustToggle ? dirOfPath : "");
|
||||
|
||||
Path tempLink = fmt("%1%/.tmp-link-%2%-%3%", realStoreDir, getpid(), random());
|
||||
|
||||
if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
|
||||
if (errno == EMLINK) {
|
||||
/* Too many links to the same file (>= 32000 on most file
|
||||
systems). This is likely to happen with empty files.
|
||||
Just shrug and ignore. */
|
||||
if (st.st_size)
|
||||
printInfo("'%1%' has maximum number of links", linkPath);
|
||||
return;
|
||||
}
|
||||
throw SysError("cannot link '%1%' to '%2%'", tempLink, linkPath);
|
||||
}
|
||||
|
||||
/* Atomically replace the old file with the new hard link. */
|
||||
try {
|
||||
renameFile(tempLink, path);
|
||||
} catch (SystemError & e) {
|
||||
if (unlink(tempLink.c_str()) == -1)
|
||||
printError("unable to unlink '%1%'", tempLink);
|
||||
if (errno == EMLINK) {
|
||||
/* Some filesystems generate too many links on the rename,
|
||||
rather than on the original link. (Probably it
|
||||
temporarily increases the st_nlink field before
|
||||
decreasing it again.) */
|
||||
debug("'%s' has reached maximum number of links", linkPath);
|
||||
return;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
stats.filesLinked++;
|
||||
stats.bytesFreed += st.st_size;
|
||||
|
||||
if (act)
|
||||
act->result(resFileLinked, st.st_size, st.st_blocks);
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::optimiseStore(OptimiseStats & stats)
|
||||
{
|
||||
Activity act(*logger, actOptimiseStore);
|
||||
|
||||
auto paths = queryAllValidPaths();
|
||||
InodeHash inodeHash = loadInodeHash();
|
||||
|
||||
act.progress(0, paths.size());
|
||||
|
||||
uint64_t done = 0;
|
||||
|
||||
for (auto & i : paths) {
|
||||
addTempRoot(i);
|
||||
if (!isValidPath(i)) continue; /* path was GC'ed, probably */
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("optimising path '%s'", printStorePath(i)));
|
||||
optimisePath_(&act, stats, realStoreDir + "/" + std::string(i.to_string()), inodeHash, NoRepair);
|
||||
}
|
||||
done++;
|
||||
act.progress(done, paths.size());
|
||||
}
|
||||
}
|
||||
|
||||
void LocalStore::optimiseStore()
|
||||
{
|
||||
OptimiseStats stats;
|
||||
|
||||
optimiseStore(stats);
|
||||
|
||||
printInfo("%s freed by hard-linking %d files",
|
||||
showBytes(stats.bytesFreed),
|
||||
stats.filesLinked);
|
||||
}
|
||||
|
||||
void LocalStore::optimisePath(const Path & path, RepairFlag repair)
|
||||
{
|
||||
OptimiseStats stats;
|
||||
InodeHash inodeHash;
|
||||
|
||||
if (settings.autoOptimiseStore) optimisePath_(nullptr, stats, path, inodeHash, repair);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -1,171 +0,0 @@
|
|||
#if HAVE_ACL_SUPPORT
|
||||
# include <sys/xattr.h>
|
||||
#endif
|
||||
|
||||
#include "posix-fs-canonicalise.hh"
|
||||
#include "file-system.hh"
|
||||
#include "signals.hh"
|
||||
#include "util.hh"
|
||||
#include "globals.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
const time_t mtimeStore = 1; /* 1 second into the epoch */
|
||||
|
||||
|
||||
static void canonicaliseTimestampAndPermissions(const Path & path, const struct stat & st)
|
||||
{
|
||||
if (!S_ISLNK(st.st_mode)) {
|
||||
|
||||
/* Mask out all type related bits. */
|
||||
mode_t mode = st.st_mode & ~S_IFMT;
|
||||
|
||||
if (mode != 0444 && mode != 0555) {
|
||||
mode = (st.st_mode & S_IFMT)
|
||||
| 0444
|
||||
| (st.st_mode & S_IXUSR ? 0111 : 0);
|
||||
if (chmod(path.c_str(), mode) == -1)
|
||||
throw SysError("changing mode of '%1%' to %2$o", path, mode);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (st.st_mtime != mtimeStore) {
|
||||
struct timeval times[2];
|
||||
times[0].tv_sec = st.st_atime;
|
||||
times[0].tv_usec = 0;
|
||||
times[1].tv_sec = mtimeStore;
|
||||
times[1].tv_usec = 0;
|
||||
#if HAVE_LUTIMES
|
||||
if (lutimes(path.c_str(), times) == -1)
|
||||
if (errno != ENOSYS ||
|
||||
(!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1))
|
||||
#else
|
||||
if (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1)
|
||||
#endif
|
||||
throw SysError("changing modification time of '%1%'", path);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void canonicaliseTimestampAndPermissions(const Path & path)
|
||||
{
|
||||
canonicaliseTimestampAndPermissions(path, lstat(path));
|
||||
}
|
||||
|
||||
|
||||
static void canonicalisePathMetaData_(
|
||||
const Path & path,
|
||||
std::optional<std::pair<uid_t, uid_t>> uidRange,
|
||||
InodesSeen & inodesSeen)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
#if __APPLE__
|
||||
/* Remove flags, in particular UF_IMMUTABLE which would prevent
|
||||
the file from being garbage-collected. FIXME: Use
|
||||
setattrlist() to remove other attributes as well. */
|
||||
if (lchflags(path.c_str(), 0)) {
|
||||
if (errno != ENOTSUP)
|
||||
throw SysError("clearing flags of path '%1%'", path);
|
||||
}
|
||||
#endif
|
||||
|
||||
auto st = lstat(path);
|
||||
|
||||
/* Really make sure that the path is of a supported type. */
|
||||
if (!(S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode)))
|
||||
throw Error("file '%1%' has an unsupported type", path);
|
||||
|
||||
#if HAVE_ACL_SUPPORT
|
||||
/* Remove extended attributes / ACLs. */
|
||||
ssize_t eaSize = llistxattr(path.c_str(), nullptr, 0);
|
||||
|
||||
if (eaSize < 0) {
|
||||
if (errno != ENOTSUP && errno != ENODATA)
|
||||
throw SysError("querying extended attributes of '%s'", path);
|
||||
} else if (eaSize > 0) {
|
||||
std::vector<char> eaBuf(eaSize);
|
||||
|
||||
if ((eaSize = llistxattr(path.c_str(), eaBuf.data(), eaBuf.size())) < 0)
|
||||
throw SysError("querying extended attributes of '%s'", path);
|
||||
|
||||
for (auto & eaName: tokenizeString<Strings>(std::string(eaBuf.data(), eaSize), std::string("\000", 1))) {
|
||||
if (settings.ignoredAcls.get().count(eaName)) continue;
|
||||
if (lremovexattr(path.c_str(), eaName.c_str()) == -1)
|
||||
throw SysError("removing extended attribute '%s' from '%s'", eaName, path);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Fail if the file is not owned by the build user. This prevents
|
||||
us from messing up the ownership/permissions of files
|
||||
hard-linked into the output (e.g. "ln /etc/shadow $out/foo").
|
||||
However, ignore files that we chown'ed ourselves previously to
|
||||
ensure that we don't fail on hard links within the same build
|
||||
(i.e. "touch $out/foo; ln $out/foo $out/bar"). */
|
||||
if (uidRange && (st.st_uid < uidRange->first || st.st_uid > uidRange->second)) {
|
||||
if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino)))
|
||||
throw BuildError("invalid ownership on file '%1%'", path);
|
||||
mode_t mode = st.st_mode & ~S_IFMT;
|
||||
assert(S_ISLNK(st.st_mode) || (st.st_uid == geteuid() && (mode == 0444 || mode == 0555) && st.st_mtime == mtimeStore));
|
||||
return;
|
||||
}
|
||||
|
||||
inodesSeen.insert(Inode(st.st_dev, st.st_ino));
|
||||
|
||||
canonicaliseTimestampAndPermissions(path, st);
|
||||
|
||||
/* Change ownership to the current uid. If it's a symlink, use
|
||||
lchown if available, otherwise don't bother. Wrong ownership
|
||||
of a symlink doesn't matter, since the owning user can't change
|
||||
the symlink and can't delete it because the directory is not
|
||||
writable. The only exception is top-level paths in the Nix
|
||||
store (since that directory is group-writable for the Nix build
|
||||
users group); we check for this case below. */
|
||||
if (st.st_uid != geteuid()) {
|
||||
#if HAVE_LCHOWN
|
||||
if (lchown(path.c_str(), geteuid(), getegid()) == -1)
|
||||
#else
|
||||
if (!S_ISLNK(st.st_mode) &&
|
||||
chown(path.c_str(), geteuid(), getegid()) == -1)
|
||||
#endif
|
||||
throw SysError("changing owner of '%1%' to %2%",
|
||||
path, geteuid());
|
||||
}
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
std::vector<std::filesystem::directory_entry> entries = readDirectory(path);
|
||||
for (auto & i : entries)
|
||||
canonicalisePathMetaData_(i.path().string(), uidRange, inodesSeen);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void canonicalisePathMetaData(
|
||||
const Path & path,
|
||||
std::optional<std::pair<uid_t, uid_t>> uidRange,
|
||||
InodesSeen & inodesSeen)
|
||||
{
|
||||
canonicalisePathMetaData_(path, uidRange, inodesSeen);
|
||||
|
||||
/* On platforms that don't have lchown(), the top-level path can't
|
||||
be a symlink, since we can't change its ownership. */
|
||||
auto st = lstat(path);
|
||||
|
||||
if (st.st_uid != geteuid()) {
|
||||
assert(S_ISLNK(st.st_mode));
|
||||
throw Error("wrong ownership of top-level store path '%1%'", path);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void canonicalisePathMetaData(const Path & path,
|
||||
std::optional<std::pair<uid_t, uid_t>> uidRange)
|
||||
{
|
||||
InodesSeen inodesSeen;
|
||||
canonicalisePathMetaData(path, uidRange, inodesSeen);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
#pragma once
|
||||
///@file
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "types.hh"
|
||||
#include "error.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
typedef std::pair<dev_t, ino_t> Inode;
|
||||
typedef std::set<Inode> InodesSeen;
|
||||
|
||||
|
||||
/**
|
||||
* "Fix", or canonicalise, the meta-data of the files in a store path
|
||||
* after it has been built. In particular:
|
||||
*
|
||||
* - the last modification date on each file is set to 1 (i.e.,
|
||||
* 00:00:01 1/1/1970 UTC)
|
||||
*
|
||||
* - the permissions are set of 444 or 555 (i.e., read-only with or
|
||||
* without execute permission; setuid bits etc. are cleared)
|
||||
*
|
||||
* - the owner and group are set to the Nix user and group, if we're
|
||||
* running as root.
|
||||
*
|
||||
* If uidRange is not empty, this function will throw an error if it
|
||||
* encounters files owned by a user outside of the closed interval
|
||||
* [uidRange->first, uidRange->second].
|
||||
*/
|
||||
void canonicalisePathMetaData(
|
||||
const Path & path,
|
||||
std::optional<std::pair<uid_t, uid_t>> uidRange,
|
||||
InodesSeen & inodesSeen);
|
||||
void canonicalisePathMetaData(
|
||||
const Path & path,
|
||||
std::optional<std::pair<uid_t, uid_t>> uidRange);
|
||||
|
||||
void canonicaliseTimestampAndPermissions(const Path & path);
|
||||
|
||||
MakeError(PathInUse, Error);
|
||||
|
||||
}
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
create table if not exists ValidPaths (
|
||||
id integer primary key autoincrement not null,
|
||||
path text unique not null,
|
||||
hash text not null, -- base16 representation
|
||||
registrationTime integer not null,
|
||||
deriver text,
|
||||
narSize integer,
|
||||
ultimate integer, -- null implies "false"
|
||||
sigs text, -- space-separated
|
||||
ca text -- if not null, an assertion that the path is content-addressed; see ValidPathInfo
|
||||
);
|
||||
|
||||
create table if not exists Refs (
|
||||
referrer integer not null,
|
||||
reference integer not null,
|
||||
primary key (referrer, reference),
|
||||
foreign key (referrer) references ValidPaths(id) on delete cascade,
|
||||
foreign key (reference) references ValidPaths(id) on delete restrict
|
||||
);
|
||||
|
||||
create index if not exists IndexReferrer on Refs(referrer);
|
||||
create index if not exists IndexReference on Refs(reference);
|
||||
|
||||
-- Paths can refer to themselves, causing a tuple (N, N) in the Refs
|
||||
-- table. This causes a deletion of the corresponding row in
|
||||
-- ValidPaths to cause a foreign key constraint violation (due to `on
|
||||
-- delete restrict' on the `reference' column). Therefore, explicitly
|
||||
-- get rid of self-references.
|
||||
create trigger if not exists DeleteSelfRefs before delete on ValidPaths
|
||||
begin
|
||||
delete from Refs where referrer = old.id and reference = old.id;
|
||||
end;
|
||||
|
||||
create table if not exists DerivationOutputs (
|
||||
drv integer not null,
|
||||
id text not null, -- symbolic output id, usually "out"
|
||||
path text not null,
|
||||
primary key (drv, id),
|
||||
foreign key (drv) references ValidPaths(id) on delete cascade
|
||||
);
|
||||
|
||||
create index if not exists IndexDerivationOutputs on DerivationOutputs(path);
|
||||
Loading…
Add table
Add a link
Reference in a new issue