1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-22 02:09:36 +01:00

Tagging release 2.27.1

-----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEtUHVUwEnDgvPFcpdgXC0cm1xmN4FAmfheacTHGVkb2xzdHJh
 QGdtYWlsLmNvbQAKCRCBcLRybXGY3kt2B/4tQvs6iDXA12d409ClHbVQjr1d0FLP
 rv8RxZ7Z4+Jaw8r2ra/I+gpr9juI5ULyEJWqfES72hTvbYPjH1Grsrrjak1tx57E
 +STs21oEPojE8LXsFH1oZamGPPIIpyQdxCvTgZs1N6cqUfCRQ3Jx97X6E6SIGJDR
 VqBM4ruSXCY57yT36HqwYydTkxzZHiNP5wwABGfSb7u9pYW5x3r8W7+fQ3udTnCw
 kCRhA5vnfxIQSlxu4j7dJqSCGzOIPnhYB19bXDV4aPhl4sn3pkBCdMZxPBlCWSwx
 it0ngMITf+TeiMpVl2TtvMBOHtlGrbhusbyKcsqzFYULGyGOC9ngTAY3
 =/JzB
 -----END PGP SIGNATURE-----

Merge tag '2.27.1' into detsys-main

Tagging release 2.27.1
This commit is contained in:
Eelco Dolstra 2025-03-24 21:28:03 +01:00
commit dab0ff4f9e
200 changed files with 4734 additions and 1977 deletions

View file

@ -19,10 +19,6 @@
# include "namespaces.hh"
#endif
#ifndef _WIN32
# include <sys/resource.h>
#endif
namespace nix {
unsigned int getMaxCPU()
@ -55,11 +51,11 @@ unsigned int getMaxCPU()
//////////////////////////////////////////////////////////////////////
#ifndef _WIN32
size_t savedStackSize = 0;
void setStackSize(size_t stackSize)
{
#ifndef _WIN32
struct rlimit limit;
if (getrlimit(RLIMIT_STACK, &limit) == 0 && limit.rlim_cur < stackSize) {
savedStackSize = limit.rlim_cur;
@ -77,31 +73,8 @@ void setStackSize(size_t stackSize)
);
}
}
#else
ULONG_PTR stackLow, stackHigh;
GetCurrentThreadStackLimits(&stackLow, &stackHigh);
ULONG maxStackSize = stackHigh - stackLow;
ULONG currStackSize = 0;
// This retrieves the current promised stack size
SetThreadStackGuarantee(&currStackSize);
if (currStackSize < stackSize) {
savedStackSize = currStackSize;
ULONG newStackSize = std::min(static_cast<ULONG>(stackSize), maxStackSize);
if (SetThreadStackGuarantee(&newStackSize) == 0) {
logger->log(
lvlError,
HintFmt(
"Failed to increase stack size from %1% to %2% (maximum allowed stack size: %3%): %4%",
savedStackSize,
stackSize,
maxStackSize,
std::to_string(GetLastError())
).str()
);
}
}
#endif
}
#endif
void restoreProcessContext(bool restoreMounts)
{

View file

@ -17,10 +17,13 @@ namespace nix {
*/
unsigned int getMaxCPU();
// It does not seem possible to dynamically change stack size on Windows.
#ifndef _WIN32
/**
* Change the stack size.
*/
void setStackSize(size_t stackSize);
#endif
/**
* Restore the original inherited Unix process context (such as signal

View file

@ -50,6 +50,14 @@ struct LinesOfCode {
std::optional<std::string> nextLineOfCode;
};
/* NOTE: position.hh recursively depends on source-path.hh -> source-accessor.hh
-> hash.hh -> config.hh -> experimental-features.hh -> error.hh -> Pos.
There are other such cycles.
Thus, Pos has to be an incomplete type in this header. But since ErrorInfo/Trace
have to refer to Pos, they have to use pointer indirection via std::shared_ptr
to break the recursive header dependency.
FIXME: Untangle this mess. Should there be AbstractPos as there used to be before
4feb7d9f71? */
struct Pos;
void printCodeLines(std::ostream & out,

View file

@ -24,7 +24,7 @@ struct ExperimentalFeatureDetails
* feature, we either have no issue at all if few features are not added
* at the end of the list, or a proper merge conflict if they are.
*/
constexpr size_t numXpFeatures = 1 + static_cast<size_t>(Xp::PipeOperators);
constexpr size_t numXpFeatures = 1 + static_cast<size_t>(Xp::BLAKE3Hashes);
constexpr std::array<ExperimentalFeatureDetails, numXpFeatures> xpFeatureDetails = {{
{
@ -109,6 +109,8 @@ constexpr std::array<ExperimentalFeatureDetails, numXpFeatures> xpFeatureDetails
runCommand "foo"
{
# Optional: let Nix know "foo" requires the experimental feature
requiredSystemFeatures = [ "recursive-nix" ];
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${<nixpkgs>}";
}
@ -286,6 +288,14 @@ constexpr std::array<ExperimentalFeatureDetails, numXpFeatures> xpFeatureDetails
)",
.trackingUrl = "https://github.com/NixOS/nix/milestone/55",
},
{
.tag = Xp::BLAKE3Hashes,
.name = "blake3-hashes",
.description = R"(
Enables support for BLAKE3 hashes.
)",
.trackingUrl = "",
},
}};
static_assert(

View file

@ -35,6 +35,7 @@ enum struct ExperimentalFeature
MountedSSHStore,
VerifiedFetches,
PipeOperators,
BLAKE3Hashes,
};
extern std::set<std::string> stabilizedFeatures;

View file

@ -1,6 +1,3 @@
#include "file-system.hh"
#include "signals.hh"
#include "finally.hh"
#include "serialise.hh"
#include "util.hh"

View file

@ -1,6 +1,7 @@
#include <iostream>
#include <cstring>
#include <blake3.h>
#include <openssl/crypto.h>
#include <openssl/md5.h>
#include <openssl/sha.h>
@ -8,6 +9,7 @@
#include "args.hh"
#include "hash.hh"
#include "archive.hh"
#include "config.hh"
#include "split.hh"
#include <sys/types.h>
@ -20,6 +22,7 @@ namespace nix {
static size_t regularHashSize(HashAlgorithm type) {
switch (type) {
case HashAlgorithm::BLAKE3: return blake3HashSize;
case HashAlgorithm::MD5: return md5HashSize;
case HashAlgorithm::SHA1: return sha1HashSize;
case HashAlgorithm::SHA256: return sha256HashSize;
@ -29,12 +32,15 @@ static size_t regularHashSize(HashAlgorithm type) {
}
const std::set<std::string> hashAlgorithms = {"md5", "sha1", "sha256", "sha512" };
const std::set<std::string> hashAlgorithms = {"blake3", "md5", "sha1", "sha256", "sha512" };
const std::set<std::string> hashFormats = {"base64", "nix32", "base16", "sri" };
Hash::Hash(HashAlgorithm algo) : algo(algo)
Hash::Hash(HashAlgorithm algo, const ExperimentalFeatureSettings & xpSettings) : algo(algo)
{
if (algo == HashAlgorithm::BLAKE3) {
xpSettings.require(Xp::BLAKE3Hashes);
}
hashSize = regularHashSize(algo);
assert(hashSize <= maxHashSize);
memset(hash, 0, maxHashSize);
@ -284,6 +290,7 @@ Hash newHashAllowEmpty(std::string_view hashStr, std::optional<HashAlgorithm> ha
union Ctx
{
blake3_hasher blake3;
MD5_CTX md5;
SHA_CTX sha1;
SHA256_CTX sha256;
@ -293,7 +300,8 @@ union Ctx
static void start(HashAlgorithm ha, Ctx & ctx)
{
if (ha == HashAlgorithm::MD5) MD5_Init(&ctx.md5);
if (ha == HashAlgorithm::BLAKE3) blake3_hasher_init(&ctx.blake3);
else if (ha == HashAlgorithm::MD5) MD5_Init(&ctx.md5);
else if (ha == HashAlgorithm::SHA1) SHA1_Init(&ctx.sha1);
else if (ha == HashAlgorithm::SHA256) SHA256_Init(&ctx.sha256);
else if (ha == HashAlgorithm::SHA512) SHA512_Init(&ctx.sha512);
@ -303,7 +311,8 @@ static void start(HashAlgorithm ha, Ctx & ctx)
static void update(HashAlgorithm ha, Ctx & ctx,
std::string_view data)
{
if (ha == HashAlgorithm::MD5) MD5_Update(&ctx.md5, data.data(), data.size());
if (ha == HashAlgorithm::BLAKE3) blake3_hasher_update(&ctx.blake3, data.data(), data.size());
else if (ha == HashAlgorithm::MD5) MD5_Update(&ctx.md5, data.data(), data.size());
else if (ha == HashAlgorithm::SHA1) SHA1_Update(&ctx.sha1, data.data(), data.size());
else if (ha == HashAlgorithm::SHA256) SHA256_Update(&ctx.sha256, data.data(), data.size());
else if (ha == HashAlgorithm::SHA512) SHA512_Update(&ctx.sha512, data.data(), data.size());
@ -312,24 +321,24 @@ static void update(HashAlgorithm ha, Ctx & ctx,
static void finish(HashAlgorithm ha, Ctx & ctx, unsigned char * hash)
{
if (ha == HashAlgorithm::MD5) MD5_Final(hash, &ctx.md5);
if (ha == HashAlgorithm::BLAKE3) blake3_hasher_finalize(&ctx.blake3, hash, BLAKE3_OUT_LEN);
else if (ha == HashAlgorithm::MD5) MD5_Final(hash, &ctx.md5);
else if (ha == HashAlgorithm::SHA1) SHA1_Final(hash, &ctx.sha1);
else if (ha == HashAlgorithm::SHA256) SHA256_Final(hash, &ctx.sha256);
else if (ha == HashAlgorithm::SHA512) SHA512_Final(hash, &ctx.sha512);
}
Hash hashString(HashAlgorithm ha, std::string_view s)
Hash hashString(
HashAlgorithm ha, std::string_view s, const ExperimentalFeatureSettings & xpSettings)
{
Ctx ctx;
Hash hash(ha);
Hash hash(ha, xpSettings);
start(ha, ctx);
update(ha, ctx, s);
finish(ha, ctx, hash.hash);
return hash;
}
Hash hashFile(HashAlgorithm ha, const Path & path)
{
HashSink sink(ha);
@ -426,6 +435,7 @@ std::string_view printHashFormat(HashFormat HashFormat)
std::optional<HashAlgorithm> parseHashAlgoOpt(std::string_view s)
{
if (s == "blake3") return HashAlgorithm::BLAKE3;
if (s == "md5") return HashAlgorithm::MD5;
if (s == "sha1") return HashAlgorithm::SHA1;
if (s == "sha256") return HashAlgorithm::SHA256;
@ -439,12 +449,13 @@ HashAlgorithm parseHashAlgo(std::string_view s)
if (opt_h)
return *opt_h;
else
throw UsageError("unknown hash algorithm '%1%', expect 'md5', 'sha1', 'sha256', or 'sha512'", s);
throw UsageError("unknown hash algorithm '%1%', expect 'blake3', 'md5', 'sha1', 'sha256', or 'sha512'", s);
}
std::string_view printHashAlgo(HashAlgorithm ha)
{
switch (ha) {
case HashAlgorithm::BLAKE3: return "blake3";
case HashAlgorithm::MD5: return "md5";
case HashAlgorithm::SHA1: return "sha1";
case HashAlgorithm::SHA256: return "sha256";

View file

@ -1,6 +1,7 @@
#pragma once
///@file
#include "config.hh"
#include "types.hh"
#include "serialise.hh"
#include "file-system.hh"
@ -11,9 +12,9 @@ namespace nix {
MakeError(BadHash, Error);
enum struct HashAlgorithm : char { MD5 = 42, SHA1, SHA256, SHA512 };
enum struct HashAlgorithm : char { MD5 = 42, SHA1, SHA256, SHA512, BLAKE3 };
const int blake3HashSize = 32;
const int md5HashSize = 16;
const int sha1HashSize = 20;
const int sha256HashSize = 32;
@ -52,7 +53,7 @@ struct Hash
/**
* Create a zero-filled hash object.
*/
explicit Hash(HashAlgorithm algo);
explicit Hash(HashAlgorithm algo, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
/**
* Parse the hash from a string representation in the format
@ -157,7 +158,7 @@ std::string printHash16or32(const Hash & hash);
/**
* Compute the hash of the given string.
*/
Hash hashString(HashAlgorithm ha, std::string_view s);
Hash hashString(HashAlgorithm ha, std::string_view s, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
/**
* Compute the hash of the given file, hashing its contents directly.

View file

@ -3,6 +3,7 @@
#include "types.hh"
#include <nlohmann/json_fwd.hpp>
#include <iostream>
#include <optional>
namespace nix {
@ -38,6 +39,15 @@ std::optional<nlohmann::json> optionalValueAt(const nlohmann::json::object_t & m
return std::optional { map.at(key) };
}
std::optional<nlohmann::json> nullableValueAt(const nlohmann::json::object_t & map, const std::string & key)
{
auto value = valueAt(map, key);
if (value.is_null())
return std::nullopt;
return std::optional { std::move(value) };
}
const nlohmann::json * getNullable(const nlohmann::json & value)
{

View file

@ -25,6 +25,7 @@ const nlohmann::json & valueAt(
const std::string & key);
std::optional<nlohmann::json> optionalValueAt(const nlohmann::json::object_t & value, const std::string & key);
std::optional<nlohmann::json> nullableValueAt(const nlohmann::json::object_t & value, const std::string & key);
/**
* Downcast the json object, failing with a nice error if the conversion fails.
@ -69,6 +70,9 @@ struct json_avoids_null<std::vector<T>> : std::true_type {};
template<typename T>
struct json_avoids_null<std::list<T>> : std::true_type {};
template<typename T>
struct json_avoids_null<std::set<T>> : std::true_type {};
template<typename K, typename V>
struct json_avoids_null<std::map<K, V>> : std::true_type {};

View file

@ -29,7 +29,7 @@ void setCurActivity(const ActivityId activityId)
curActivity = activityId;
}
Logger * logger = makeSimpleLogger(true);
std::unique_ptr<Logger> logger = makeSimpleLogger(true);
void Logger::warn(const std::string & msg)
{
@ -43,6 +43,19 @@ void Logger::writeToStdout(std::string_view s)
writeFull(standard_out, "\n");
}
Logger::Suspension Logger::suspend()
{
pause();
return Suspension { ._finalize = {[this](){this->resume();}} };
}
std::optional<Logger::Suspension> Logger::suspendIf(bool cond)
{
if (cond)
return suspend();
return {};
}
class SimpleLogger : public Logger
{
public:
@ -128,9 +141,9 @@ void writeToStderr(std::string_view s)
}
}
Logger * makeSimpleLogger(bool printBuildLogs)
std::unique_ptr<Logger> makeSimpleLogger(bool printBuildLogs)
{
return new SimpleLogger(printBuildLogs);
return std::make_unique<SimpleLogger>(printBuildLogs);
}
std::atomic<uint64_t> nextId{0};
@ -167,9 +180,9 @@ void to_json(nlohmann::json & json, std::shared_ptr<Pos> pos)
}
struct JSONLogger : Logger {
Logger & prevLogger;
Descriptor fd;
JSONLogger(Logger & prevLogger) : prevLogger(prevLogger) { }
JSONLogger(Descriptor fd) : fd(fd) { }
bool isVerbose() override {
return true;
@ -190,7 +203,7 @@ struct JSONLogger : Logger {
void write(const nlohmann::json & json)
{
prevLogger.log(lvlError, "@nix " + json.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace));
writeLine(fd, "@nix " + json.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace));
}
void log(Verbosity lvl, std::string_view s) override
@ -262,9 +275,9 @@ struct JSONLogger : Logger {
}
};
Logger * makeJSONLogger(Logger & prevLogger)
std::unique_ptr<Logger> makeJSONLogger(Descriptor fd)
{
return new JSONLogger(prevLogger);
return std::make_unique<JSONLogger>(fd);
}
static Logger::Fields getFields(nlohmann::json & json)

View file

@ -3,6 +3,8 @@
#include "error.hh"
#include "config.hh"
#include "file-descriptor.hh"
#include "finally.hh"
#include <nlohmann/json_fwd.hpp>
@ -74,6 +76,17 @@ public:
virtual void stop() { };
/**
* Guard object to resume the logger when done.
*/
struct Suspension {
Finally<std::function<void()>> _finalize;
};
Suspension suspend();
std::optional<Suspension> suspendIf(bool cond);
virtual void pause() { };
virtual void resume() { };
@ -179,11 +192,11 @@ struct PushActivity
~PushActivity() { setCurActivity(prevAct); }
};
extern Logger * logger;
extern std::unique_ptr<Logger> logger;
Logger * makeSimpleLogger(bool printBuildLogs = true);
std::unique_ptr<Logger> makeSimpleLogger(bool printBuildLogs = true);
Logger * makeJSONLogger(Logger & prevLogger);
std::unique_ptr<Logger> makeJSONLogger(Descriptor fd);
/**
* @param source A noun phrase describing the source of the message, e.g. "the builder".

View file

@ -62,6 +62,12 @@ elif host_machine.system() == 'sunos'
deps_other += [socket, network_service_library]
endif
blake3 = dependency(
'libblake3',
version: '>= 1.5.5',
)
deps_private += blake3
boost = dependency(
'boost',
modules : ['context', 'coroutine'],
@ -147,7 +153,9 @@ sources = files(
'json-utils.cc',
'logging.cc',
'memory-source-accessor.cc',
'mounted-source-accessor.cc',
'position.cc',
'pos-table.cc',
'posix-source-accessor.cc',
'references.cc',
'serialise.cc',
@ -160,6 +168,7 @@ sources = files(
'tarfile.cc',
'terminal.cc',
'thread-pool.cc',
'union-source-accessor.cc',
'unix-domain-socket.cc',
'url.cc',
'users.cc',
@ -217,6 +226,8 @@ headers = [config_h] + files(
'muxable-pipe.hh',
'os-string.hh',
'pool.hh',
'pos-idx.hh',
'pos-table.hh',
'position.hh',
'posix-source-accessor.hh',
'processes.hh',

View file

@ -0,0 +1,79 @@
#include "source-accessor.hh"
namespace nix {
struct MountedSourceAccessor : SourceAccessor
{
std::map<CanonPath, ref<SourceAccessor>> mounts;
MountedSourceAccessor(std::map<CanonPath, ref<SourceAccessor>> _mounts)
: mounts(std::move(_mounts))
{
displayPrefix.clear();
// Currently we require a root filesystem. This could be relaxed.
assert(mounts.contains(CanonPath::root));
// FIXME: return dummy parent directories automatically?
}
std::string readFile(const CanonPath & path) override
{
auto [accessor, subpath] = resolve(path);
return accessor->readFile(subpath);
}
std::optional<Stat> maybeLstat(const CanonPath & path) override
{
auto [accessor, subpath] = resolve(path);
return accessor->maybeLstat(subpath);
}
DirEntries readDirectory(const CanonPath & path) override
{
auto [accessor, subpath] = resolve(path);
return accessor->readDirectory(subpath);
}
std::string readLink(const CanonPath & path) override
{
auto [accessor, subpath] = resolve(path);
return accessor->readLink(subpath);
}
std::string showPath(const CanonPath & path) override
{
auto [accessor, subpath] = resolve(path);
return displayPrefix + accessor->showPath(subpath) + displaySuffix;
}
std::pair<ref<SourceAccessor>, CanonPath> resolve(CanonPath path)
{
// Find the nearest parent of `path` that is a mount point.
std::vector<std::string> subpath;
while (true) {
auto i = mounts.find(path);
if (i != mounts.end()) {
std::reverse(subpath.begin(), subpath.end());
return {i->second, CanonPath(subpath)};
}
assert(!path.isRoot());
subpath.push_back(std::string(*path.baseName()));
path.pop();
}
}
std::optional<std::filesystem::path> getPhysicalPath(const CanonPath & path) override
{
auto [accessor, subpath] = resolve(path);
return accessor->getPhysicalPath(subpath);
}
};
ref<SourceAccessor> makeMountedSourceAccessor(std::map<CanonPath, ref<SourceAccessor>> mounts)
{
return make_ref<MountedSourceAccessor>(std::move(mounts));
}
}

View file

@ -6,6 +6,7 @@
boost,
brotli,
libarchive,
libblake3,
libcpuid,
libsodium,
nlohmann_json,
@ -42,6 +43,7 @@ mkMesonLibrary (finalAttrs: {
buildInputs = [
brotli
libblake3
libsodium
openssl
] ++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid;

65
src/libutil/pos-idx.hh Normal file
View file

@ -0,0 +1,65 @@
#pragma once
///@file
#include <cinttypes>
#include <functional>
namespace nix {
class PosIdx
{
friend struct LazyPosAcessors;
friend class PosTable;
friend class std::hash<PosIdx>;
private:
uint32_t id;
explicit PosIdx(uint32_t id)
: id(id)
{
}
public:
PosIdx()
: id(0)
{
}
explicit operator bool() const
{
return id > 0;
}
auto operator<=>(const PosIdx other) const
{
return id <=> other.id;
}
bool operator==(const PosIdx other) const
{
return id == other.id;
}
size_t hash() const noexcept
{
return std::hash<uint32_t>{}(id);
}
};
inline PosIdx noPos = {};
}
namespace std {
template<>
struct hash<nix::PosIdx>
{
std::size_t operator()(nix::PosIdx pos) const noexcept
{
return pos.hash();
}
};
} // namespace std

37
src/libutil/pos-table.cc Normal file
View file

@ -0,0 +1,37 @@
#include "pos-table.hh"
#include <algorithm>
namespace nix {
/* Position table. */
Pos PosTable::operator[](PosIdx p) const
{
auto origin = resolve(p);
if (!origin)
return {};
const auto offset = origin->offsetOf(p);
Pos result{0, 0, origin->origin};
auto lines = this->lines.lock();
auto linesForInput = (*lines)[origin->offset];
if (linesForInput.empty()) {
auto source = result.getSource().value_or("");
const char * begin = source.data();
for (Pos::LinesIterator it(source), end; it != end; it++)
linesForInput.push_back(it->data() - begin);
if (linesForInput.empty())
linesForInput.push_back(0);
}
// as above: the first line starts at byte 0 and is always present
auto lineStartOffset = std::prev(std::upper_bound(linesForInput.begin(), linesForInput.end(), offset));
result.line = 1 + (lineStartOffset - linesForInput.begin());
result.column = 1 + (offset - *lineStartOffset);
return result;
}
}

100
src/libutil/pos-table.hh Normal file
View file

@ -0,0 +1,100 @@
#pragma once
///@file
#include <cstdint>
#include <vector>
#include "pos-idx.hh"
#include "position.hh"
#include "sync.hh"
namespace nix {
class PosTable
{
public:
class Origin
{
friend PosTable;
private:
uint32_t offset;
Origin(Pos::Origin origin, uint32_t offset, size_t size)
: offset(offset)
, origin(origin)
, size(size)
{
}
public:
const Pos::Origin origin;
const size_t size;
uint32_t offsetOf(PosIdx p) const
{
return p.id - 1 - offset;
}
};
private:
using Lines = std::vector<uint32_t>;
std::map<uint32_t, Origin> origins;
mutable Sync<std::map<uint32_t, Lines>> lines;
const Origin * resolve(PosIdx p) const
{
if (p.id == 0)
return nullptr;
const auto idx = p.id - 1;
/* we want the last key <= idx, so we'll take prev(first key > idx).
this is guaranteed to never rewind origin.begin because the first
key is always 0. */
const auto pastOrigin = origins.upper_bound(idx);
return &std::prev(pastOrigin)->second;
}
public:
Origin addOrigin(Pos::Origin origin, size_t size)
{
uint32_t offset = 0;
if (auto it = origins.rbegin(); it != origins.rend())
offset = it->first + it->second.size;
// +1 because all PosIdx are offset by 1 to begin with, and
// another +1 to ensure that all origins can point to EOF, eg
// on (invalid) empty inputs.
if (2 + offset + size < offset)
return Origin{origin, offset, 0};
return origins.emplace(offset, Origin{origin, offset, size}).first->second;
}
PosIdx add(const Origin & origin, size_t offset)
{
if (offset > origin.size)
return PosIdx();
return PosIdx(1 + origin.offset + offset);
}
/**
* Convert a byte-offset PosIdx into a Pos with line/column information.
*
* @param p Byte offset into the virtual concatenation of all parsed contents
* @return Position
*
* @warning Very expensive to call, as this has to read the entire source
* into memory each time. Call this only if absolutely necessary. Prefer
* to keep PosIdx around instead of needlessly converting it into Pos by
* using this lookup method.
*/
Pos operator[](PosIdx p) const;
Pos::Origin originOf(PosIdx p) const
{
if (auto o = resolve(p))
return o->origin;
return std::monostate{};
}
};
}

View file

@ -66,6 +66,13 @@ std::optional<std::string> Pos::getSource() const
}, origin);
}
std::optional<SourcePath> Pos::getSourcePath() const
{
if (auto * path = std::get_if<SourcePath>(&origin))
return *path;
return std::nullopt;
}
void Pos::print(std::ostream & out, bool showOrigin) const
{
if (showOrigin) {

View file

@ -50,6 +50,7 @@ struct Pos
explicit operator bool() const { return line > 0; }
/* TODO: Why std::shared_ptr<Pos> and not std::shared_ptr<const Pos>? */
operator std::shared_ptr<Pos>() const;
/**
@ -69,9 +70,7 @@ struct Pos
/**
* Get the SourcePath, if the source was loaded from a file.
*/
std::optional<SourcePath> getSourcePath() const {
return *std::get_if<SourcePath>(&origin);
}
std::optional<SourcePath> getSourcePath() const;
struct LinesIterator {
using difference_type = size_t;

View file

@ -227,8 +227,7 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
throw EndOfFile("coroutine has finished");
}
size_t n = std::min(cur.size(), out_len);
memcpy(out, cur.data(), n);
size_t n = cur.copy(out, out_len);
cur.remove_prefix(n);
return n;
});
@ -260,7 +259,7 @@ std::unique_ptr<Source> sinkToSource(
{
struct SinkToSource : Source
{
typedef boost::coroutines2::coroutine<std::string> coro_t;
typedef boost::coroutines2::coroutine<std::string_view> coro_t;
std::function<void(Sink &)> fun;
std::function<void()> eof;
@ -271,33 +270,37 @@ std::unique_ptr<Source> sinkToSource(
{
}
std::string cur;
size_t pos = 0;
std::string_view cur;
size_t read(char * data, size_t len) override
{
if (!coro) {
bool hasCoro = coro.has_value();
if (!hasCoro) {
coro = coro_t::pull_type([&](coro_t::push_type & yield) {
LambdaSink sink([&](std::string_view data) {
if (!data.empty()) yield(std::string(data));
if (!data.empty()) {
yield(data);
}
});
fun(sink);
});
}
if (!*coro) { eof(); unreachable(); }
if (pos == cur.size()) {
if (!cur.empty()) {
if (cur.empty()) {
if (hasCoro) {
(*coro)();
}
cur = coro->get();
pos = 0;
if (*coro) {
cur = coro->get();
} else {
coro.reset();
eof();
unreachable();
}
}
auto n = std::min(cur.size() - pos, len);
memcpy(data, cur.data() + pos, n);
pos += n;
size_t n = cur.copy(data, len);
cur.remove_prefix(n);
return n;
}

View file

@ -214,4 +214,12 @@ ref<SourceAccessor> getFSSourceAccessor();
*/
ref<SourceAccessor> makeFSSourceAccessor(std::filesystem::path root);
ref<SourceAccessor> makeMountedSourceAccessor(std::map<CanonPath, ref<SourceAccessor>> mounts);
/**
* Construct an accessor that presents a "union" view of a vector of
* underlying accessors. Earlier accessors take precedence over later.
*/
ref<SourceAccessor> makeUnionSourceAccessor(std::vector<ref<SourceAccessor>> && accessors);
}

View file

@ -0,0 +1,82 @@
#include "source-accessor.hh"
namespace nix {
struct UnionSourceAccessor : SourceAccessor
{
std::vector<ref<SourceAccessor>> accessors;
UnionSourceAccessor(std::vector<ref<SourceAccessor>> _accessors)
: accessors(std::move(_accessors))
{
displayPrefix.clear();
}
std::string readFile(const CanonPath & path) override
{
for (auto & accessor : accessors) {
auto st = accessor->maybeLstat(path);
if (st)
return accessor->readFile(path);
}
throw FileNotFound("path '%s' does not exist", showPath(path));
}
std::optional<Stat> maybeLstat(const CanonPath & path) override
{
for (auto & accessor : accessors) {
auto st = accessor->maybeLstat(path);
if (st)
return st;
}
return std::nullopt;
}
DirEntries readDirectory(const CanonPath & path) override
{
DirEntries result;
for (auto & accessor : accessors) {
auto st = accessor->maybeLstat(path);
if (!st)
continue;
for (auto & entry : accessor->readDirectory(path))
// Don't override entries from previous accessors.
result.insert(entry);
}
return result;
}
std::string readLink(const CanonPath & path) override
{
for (auto & accessor : accessors) {
auto st = accessor->maybeLstat(path);
if (st)
return accessor->readLink(path);
}
throw FileNotFound("path '%s' does not exist", showPath(path));
}
std::string showPath(const CanonPath & path) override
{
for (auto & accessor : accessors)
return accessor->showPath(path);
return SourceAccessor::showPath(path);
}
std::optional<std::filesystem::path> getPhysicalPath(const CanonPath & path) override
{
for (auto & accessor : accessors) {
auto p = accessor->getPhysicalPath(path);
if (p)
return p;
}
return std::nullopt;
}
};
ref<SourceAccessor> makeUnionSourceAccessor(std::vector<ref<SourceAccessor>> && accessors)
{
return make_ref<UnionSourceAccessor>(std::move(accessors));
}
}

View file

@ -5,9 +5,27 @@
#include <fcntl.h>
#include <unistd.h>
#include <poll.h>
namespace nix {
namespace {
// This function is needed to handle non-blocking reads/writes. This is needed in the buildhook, because
// somehow the json logger file descriptor ends up beeing non-blocking and breaks remote-building.
// TODO: get rid of buildhook and remove this function again (https://github.com/NixOS/nix/issues/12688)
void pollFD(int fd, int events)
{
struct pollfd pfd;
pfd.fd = fd;
pfd.events = events;
int ret = poll(&pfd, 1, -1);
if (ret == -1) {
throw SysError("poll on file descriptor failed");
}
}
}
std::string readFile(int fd)
{
struct stat st;
@ -17,14 +35,18 @@ std::string readFile(int fd)
return drainFD(fd, true, st.st_size);
}
void readFull(int fd, char * buf, size_t count)
{
while (count) {
checkInterrupt();
ssize_t res = read(fd, buf, count);
if (res == -1) {
if (errno == EINTR) continue;
switch (errno) {
case EINTR: continue;
case EAGAIN:
pollFD(fd, POLLIN);
continue;
}
throw SysError("reading from file");
}
if (res == 0) throw EndOfFile("unexpected end-of-file");
@ -39,8 +61,15 @@ void writeFull(int fd, std::string_view s, bool allowInterrupts)
while (!s.empty()) {
if (allowInterrupts) checkInterrupt();
ssize_t res = write(fd, s.data(), s.size());
if (res == -1 && errno != EINTR)
if (res == -1) {
switch (errno) {
case EINTR: continue;
case EAGAIN:
pollFD(fd, POLLOUT);
continue;
}
throw SysError("writing to file");
}
if (res > 0)
s.remove_prefix(res);
}
@ -56,8 +85,15 @@ std::string readLine(int fd, bool eofOk)
// FIXME: inefficient
ssize_t rd = read(fd, &ch, 1);
if (rd == -1) {
if (errno != EINTR)
switch (errno) {
case EINTR: continue;
case EAGAIN: {
pollFD(fd, POLLIN);
continue;
}
default:
throw SysError("reading a line");
}
} else if (rd == 0) {
if (eofOk)
return s;

View file

@ -14,35 +14,74 @@
namespace nix {
class MonitorFdHup
{
private:
std::thread thread;
Pipe notifyPipe;
public:
MonitorFdHup(int fd)
{
thread = std::thread([fd]() {
notifyPipe.create();
thread = std::thread([this, fd]() {
while (true) {
/* Wait indefinitely until a POLLHUP occurs. */
struct pollfd fds[1];
fds[0].fd = fd;
/* Polling for no specific events (i.e. just waiting
for an error/hangup) doesn't work on macOS
anymore. So wait for read events and ignore
them. */
fds[0].events =
#ifdef __APPLE__
POLLRDNORM
#else
// There is a POSIX violation on macOS: you have to listen for
// at least POLLHUP to receive HUP events for a FD. POSIX says
// this is not so, and you should just receive them regardless.
// However, as of our testing on macOS 14.5, the events do not
// get delivered if in the all-bits-unset case, but do get
// delivered if `POLLHUP` is set.
//
// This bug filed as rdar://37537852
// (https://openradar.appspot.com/37537852).
//
// macOS's own man page
// (https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/poll.2.html)
// additionally says that `POLLHUP` is ignored as an input. It
// seems the likely order of events here was
//
// 1. macOS did not follow the POSIX spec
//
// 2. Somebody ninja-fixed this other spec violation to make
// sure `POLLHUP` was not forgotten about, even though they
// "fixed" this issue in a spec-non-compliant way. Whatever,
// we'll use the fix.
//
// Relevant code, current version, which shows the :
// https://github.com/apple-oss-distributions/xnu/blob/94d3b452840153a99b38a3a9659680b2a006908e/bsd/kern/sys_generic.c#L1751-L1758
//
// The `POLLHUP` detection was added in
// https://github.com/apple-oss-distributions/xnu/commit/e13b1fa57645afc8a7b2e7d868fe9845c6b08c40#diff-a5aa0b0e7f4d866ca417f60702689fc797e9cdfe33b601b05ccf43086c35d395R1468
// That means added in 2007 or earlier. Should be good enough
// for us.
short hangup_events =
#ifdef __APPLE__
POLLHUP
#else
0
#endif
#endif
;
auto count = poll(fds, 1, -1);
if (count == -1)
unreachable();
/* Wait indefinitely until a POLLHUP occurs. */
constexpr size_t num_fds = 2;
struct pollfd fds[num_fds] = {
{
.fd = fd,
.events = hangup_events,
},
{
.fd = notifyPipe.readSide.get(),
.events = hangup_events,
},
};
auto count = poll(fds, num_fds, -1);
if (count == -1) {
if (errno == EINTR || errno == EAGAIN)
continue;
throw SysError("failed to poll() in MonitorFdHup");
}
/* This shouldn't happen, but can on macOS due to a bug.
See rdar://37550628.
@ -50,25 +89,42 @@ public:
coordination with the main thread if spinning proves
too harmful.
*/
if (count == 0) continue;
if (count == 0)
continue;
if (fds[0].revents & POLLHUP) {
unix::triggerInterrupt();
break;
}
/* This will only happen on macOS. We sleep a bit to
avoid waking up too often if the client is sending
input. */
sleep(1);
if (fds[1].revents & POLLHUP) {
break;
}
// On macOS, (jade thinks that) it is possible (although not
// observed on macOS 14.5) that in some limited cases on buggy
// kernel versions, all the non-POLLHUP events for the socket
// get delivered.
//
// We could sleep to avoid pointlessly spinning a thread on
// those, but this opens up a different problem, which is that
// if do sleep, it will be longer before the daemon fork for a
// client exits. Imagine a sequential shell script, running Nix
// commands, each of which talk to the daemon. If the previous
// command registered a temp root, exits, and then the next
// command issues a delete request before the temp root is
// cleaned up, that delete request might fail.
//
// Not sleeping doesn't actually fix the race condition --- we
// would need to block on the old connections' tempt roots being
// cleaned up in in the new connection --- but it does make it
// much less likely.
}
});
};
~MonitorFdHup()
{
pthread_cancel(thread.native_handle());
notifyPipe.writeSide.close();
thread.join();
}
};
}

View file

@ -200,8 +200,15 @@ static int childEntry(void * arg)
pid_t startProcess(std::function<void()> fun, const ProcessOptions & options)
{
ChildWrapperFunction wrapper = [&] {
if (!options.allowVfork)
if (!options.allowVfork) {
/* Set a simple logger, while releasing (not destroying)
the parent logger. We don't want to run the parent
logger's destructor since that will crash (e.g. when
~ProgressBar() tries to join a thread that doesn't
exist. */
logger.release();
logger = makeSimpleLogger();
}
try {
#if __linux__
if (options.dieWithParent && prctl(PR_SET_PDEATHSIG, SIGKILL) == -1)
@ -299,15 +306,7 @@ void runProgram2(const RunOptions & options)
// case), so we can't use it if we alter the environment
processOptions.allowVfork = !options.environment;
std::optional<Finally<std::function<void()>>> resumeLoggerDefer;
if (options.isInteractive) {
logger->pause();
resumeLoggerDefer.emplace(
[]() {
logger->resume();
}
);
}
auto suspension = logger->suspendIf(options.isInteractive);
/* Fork. */
Pid pid = startProcess([&] {

View file

@ -312,11 +312,7 @@ void runProgram2(const RunOptions & options)
// TODO: Implement shebang / program interpreter lookup on Windows
auto interpreter = getProgramInterpreter(realProgram);
std::optional<Finally<std::function<void()>>> resumeLoggerDefer;
if (options.isInteractive) {
logger->pause();
resumeLoggerDefer.emplace([]() { logger->resume(); });
}
auto suspension = logger->suspendIf(options.isInteractive);
Pid pid = spawnProcess(interpreter.has_value() ? *interpreter : realProgram, options, out, in);