1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-11 13:06:01 +01:00

Merge detsys-main

This commit is contained in:
Cole Helbling 2025-06-27 14:01:28 -07:00
commit ef4e7df6a5
No known key found for this signature in database
28 changed files with 326 additions and 79 deletions

View file

@ -1 +1 @@
2.29.0
2.29.1

View file

@ -1 +1 @@
3.6.5
3.6.8

View file

@ -129,6 +129,9 @@
- [Contributing](development/contributing.md)
- [Determinate Nix Release Notes](release-notes-determinate/index.md)
- [Changes between Nix and Determinate Nix](release-notes-determinate/changes.md)<!-- next -->
- [Release 3.6.8 (2025-06-25)](release-notes-determinate/rl-3.6.8.md)
- [Release 3.6.7 (2025-06-24)](release-notes-determinate/rl-3.6.7.md)
- [Release 3.6.6 (2025-06-17)](release-notes-determinate/rl-3.6.6.md)
- [Release 3.6.5 (2025-06-16)](release-notes-determinate/rl-3.6.5.md)
- [Release 3.6.2 (2025-06-02)](release-notes-determinate/rl-3.6.2.md)
- [Release 3.6.1 (2025-05-24)](release-notes-determinate/rl-3.6.1.md)

View file

@ -1,6 +1,6 @@
# Changes between Nix and Determinate Nix
This section lists the differences between upstream Nix 2.29 and Determinate Nix 3.6.5.<!-- differences -->
This section lists the differences between upstream Nix 2.29 and Determinate Nix 3.6.8.<!-- differences -->
* In Determinate Nix, flakes are stable. You no longer need to enable the `flakes` experimental feature.
@ -72,3 +72,17 @@ This section lists the differences between upstream Nix 2.29 and Determinate Nix
* Improve error messages that use the hypothetical future tense of "will" by @lucperkins in [DeterminateSystems/nix-src#92](https://github.com/DeterminateSystems/nix-src/pull/92)
* Improve caching of inputs in dry-run mode by @edolstra in [DeterminateSystems/nix-src#98](https://github.com/DeterminateSystems/nix-src/pull/98)
<!-- Determinate Nix version 3.6.6 -->
<!-- Determinate Nix version 3.6.7 -->
<!-- Determinate Nix version 3.6.8 -->
* Fix fetchToStore() caching with --impure, improve testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117)
* Add lazy-locks setting by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113)
* Sync 2.29.1 by @edolstra in [DeterminateSystems/nix-src#124](https://github.com/DeterminateSystems/nix-src/pull/124)
* Release v3.6.7 by @github-actions in [DeterminateSystems/nix-src#126](https://github.com/DeterminateSystems/nix-src/pull/126)

View file

@ -0,0 +1,7 @@
# Release 3.6.6 (2025-06-17)
* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md).
## What's Changed
* No-op release on the nix-src side, due to a regression on nix-darwin in determinate-nixd.

View file

@ -0,0 +1,17 @@
# Release 3.6.7 (2025-06-24)
* Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md).
## What's Changed
### Security contents
* Patched against GHSA-g948-229j-48j3
### Lazy trees:
* Lazy trees now produces `flake.lock` files with NAR hashes unless `lazy-locks` is set to `true` by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113)
* Improved caching with lazy-trees when using --impure, with enhanced testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117)
**Full Changelog**: [v3.6.6...v3.6.7](https://github.com/DeterminateSystems/nix-src/compare/v3.6.6...v3.6.7)

View file

@ -0,0 +1,12 @@
# Release 3.6.8 (2025-06-25)
* Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md).
## What's Changed
* Fix fetchToStore() caching with --impure, improve testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117)
* Add lazy-locks setting by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113)
* Sync 2.29.1 by @edolstra in [DeterminateSystems/nix-src#124](https://github.com/DeterminateSystems/nix-src/pull/124)
* Release v3.6.7 by @github-actions in [DeterminateSystems/nix-src#126](https://github.com/DeterminateSystems/nix-src/pull/126)
**Full Changelog**: [v3.6.6...v3.6.8](https://github.com/DeterminateSystems/nix-src/compare/v3.6.6...v3.6.8)

View file

@ -262,6 +262,19 @@ struct EvalSettings : Config
R"(
If set to true, flakes and trees fetched by [`builtins.fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) are only copied to the Nix store when they're used as a dependency of a derivation. This avoids copying (potentially large) source trees unnecessarily.
)"};
// FIXME: this setting should really be in libflake, but it's
// currently needed in mountInput().
Setting<bool> lazyLocks{
this,
false,
"lazy-locks",
R"(
If enabled, Nix only includes NAR hashes in lock file entries if they're necessary to lock the input (i.e. when there is no other attribute that allows the content to be verified, like a Git revision).
This is not backward compatible with older versions of Nix.
If disabled, lock file entries always contain a NAR hash.
)"
};
};
/**

View file

@ -91,7 +91,7 @@ StorePath EvalState::mountInput(
storeFS->mount(CanonPath(store->printStorePath(storePath)), accessor);
if (requireLockable && (!settings.lazyTrees || !input.isLocked()) && !input.getNarHash())
if (requireLockable && (!settings.lazyTrees || !settings.lazyLocks || !input.isLocked()) && !input.getNarHash())
input.attrs.insert_or_assign("narHash", getNarHash()->to_string(HashFormat::SRI, true));
if (originalInput.getNarHash() && *getNarHash() != *originalInput.getNarHash())

View file

@ -55,9 +55,13 @@ std::pair<StorePath, Hash> fetchToStore2(
}
debug("source path '%s' not in store", path);
}
} else
} else {
static auto barf = getEnv("_NIX_TEST_BARF_ON_UNCACHEABLE").value_or("") == "1";
if (barf)
throw Error("source path '%s' is uncacheable (filter=%d)", path, (bool) filter);
// FIXME: could still provide in-memory caching keyed on `SourcePath`.
debug("source path '%s' is uncacheable (%d, %d)", path, (bool) filter, (bool) fingerprint);
debug("source path '%s' is uncacheable", path);
}
Activity act(*logger, lvlChatty, actUnknown,
fmt(mode == FetchMode::DryRun ? "hashing '%s'" : "copying '%s' to the store", path));

View file

@ -860,7 +860,7 @@ struct GitInputScheme : InputScheme
return makeFingerprint(*rev);
else {
auto repoInfo = getRepoInfo(input);
if (auto repoPath = repoInfo.getPath(); repoPath && repoInfo.workdirInfo.headRev && repoInfo.workdirInfo.submodules.empty()) {
if (auto repoPath = repoInfo.getPath(); repoPath && repoInfo.workdirInfo.submodules.empty()) {
/* Calculate a fingerprint that takes into account the
deleted and modified/added files. */
HashSink hashSink{HashAlgorithm::SHA512};
@ -873,7 +873,7 @@ struct GitInputScheme : InputScheme
writeString("deleted:", hashSink);
writeString(file.abs(), hashSink);
}
return makeFingerprint(*repoInfo.workdirInfo.headRev)
return makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev))
+ ";d=" + hashSink.finish().first.to_string(HashFormat::Base16, false);
}
return std::nullopt;

View file

@ -85,7 +85,6 @@ static void parseFlakeInputAttr(
static FlakeInput parseFlakeInput(
EvalState & state,
std::string_view inputName,
Value * value,
const PosIdx pos,
const InputAttrPath & lockRootAttrPath,
@ -155,8 +154,8 @@ static FlakeInput parseFlakeInput(
input.ref = parseFlakeRef(state.fetchSettings, *url, {}, true, input.isFlake, true);
}
if (!input.follows && !input.ref)
input.ref = FlakeRef::fromAttrs(state.fetchSettings, {{"type", "indirect"}, {"id", std::string(inputName)}});
if (input.ref && input.follows)
throw Error("flake input has both a flake reference and a follows attribute, at %s", state.positions[pos]);
return input;
}
@ -185,7 +184,6 @@ static std::pair<std::map<FlakeId, FlakeInput>, fetchers::Attrs> parseFlakeInput
} else {
inputs.emplace(inputName,
parseFlakeInput(state,
inputName,
inputAttr.value,
inputAttr.pos,
lockRootAttrPath,
@ -467,18 +465,27 @@ LockedFlake lockFlake(
/* Get the overrides (i.e. attributes of the form
'inputs.nixops.inputs.nixpkgs.url = ...'). */
for (auto & [id, input] : flakeInputs) {
std::function<void(const FlakeInput & input, const InputAttrPath & prefix)> addOverrides;
addOverrides = [&](const FlakeInput & input, const InputAttrPath & prefix)
{
for (auto & [idOverride, inputOverride] : input.overrides) {
auto inputAttrPath(inputAttrPathPrefix);
inputAttrPath.push_back(id);
auto inputAttrPath(prefix);
inputAttrPath.push_back(idOverride);
if (inputOverride.ref || inputOverride.follows)
overrides.emplace(inputAttrPath,
OverrideTarget {
.input = inputOverride,
.sourcePath = sourcePath,
.parentInputAttrPath = inputAttrPathPrefix
});
addOverrides(inputOverride, inputAttrPath);
}
};
for (auto & [id, input] : flakeInputs) {
auto inputAttrPath(inputAttrPathPrefix);
inputAttrPath.push_back(id);
addOverrides(input, inputAttrPath);
}
/* Check whether this input has overrides for a
@ -534,7 +541,8 @@ LockedFlake lockFlake(
continue;
}
assert(input.ref);
if (!input.ref)
input.ref = FlakeRef::fromAttrs(state.fetchSettings, {{"type", "indirect"}, {"id", std::string(id)}});
auto overridenParentPath =
input.ref->input.isRelative()
@ -554,7 +562,7 @@ LockedFlake lockFlake(
/* Get the input flake, resolve 'path:./...'
flakerefs relative to the parent flake. */
auto getInputFlake = [&](const FlakeRef & ref)
auto getInputFlake = [&](const FlakeRef & ref, const fetchers::UseRegistries useRegistries)
{
if (auto resolvedPath = resolveRelativePath()) {
return readFlake(state, ref, ref, ref, *resolvedPath, inputAttrPath);
@ -645,7 +653,7 @@ LockedFlake lockFlake(
}
if (mustRefetch) {
auto inputFlake = getInputFlake(oldLock->lockedRef);
auto inputFlake = getInputFlake(oldLock->lockedRef, useRegistriesInputs);
nodePaths.emplace(childNode, inputFlake.path.parent());
computeLocks(inputFlake.inputs, childNode, inputAttrPath, oldLock, followsPrefix,
inputFlake.path, false);
@ -670,7 +678,8 @@ LockedFlake lockFlake(
nuked the next time we update the lock
file. That is, overrides are sticky unless you
use --no-write-lock-file. */
auto ref = (input2.ref && explicitCliOverrides.contains(inputAttrPath)) ? *input2.ref : *input.ref;
auto inputIsOverride = explicitCliOverrides.contains(inputAttrPath);
auto ref = (input2.ref && inputIsOverride) ? *input2.ref : *input.ref;
/* Warn against the use of indirect flakerefs
(but only at top-level since we don't want
@ -696,7 +705,7 @@ LockedFlake lockFlake(
};
if (input.isFlake) {
auto inputFlake = getInputFlake(*input.ref);
auto inputFlake = getInputFlake(*input.ref, inputIsOverride ? fetchers::UseRegistries::All : useRegistriesInputs);
auto childNode = make_ref<LockedNode>(
inputFlake.lockedRef,

View file

@ -247,7 +247,7 @@ LocalStore::LocalStore(ref<const Config> config)
else if (curSchema == 0) { /* new store */
curSchema = nixSchemaVersion;
openDB(*state, true);
writeFile(schemaPath, fmt("%1%", curSchema), 0666, true);
writeFile(schemaPath, fmt("%1%", curSchema), 0666, FsSync::Yes);
}
else if (curSchema < nixSchemaVersion) {
@ -298,7 +298,7 @@ LocalStore::LocalStore(ref<const Config> config)
txn.commit();
}
writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true);
writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, FsSync::Yes);
lockFile(globalLock.get(), ltRead, true);
}

View file

@ -102,6 +102,11 @@ protected:
*/
Path topTmpDir;
/**
* The file descriptor of the temporary directory.
*/
AutoCloseFD tmpDirFd;
/**
* The sort of derivation we are building.
*
@ -313,9 +318,24 @@ protected:
/**
* Make a file owned by the builder.
*
* SAFETY: this function is prone to TOCTOU as it receives a path and not a descriptor.
* It's only safe to call in a child of a directory only visible to the owner.
*/
void chownToBuilder(const Path & path);
/**
* Make a file owned by the builder addressed by its file descriptor.
*/
void chownToBuilder(int fd, const Path & path);
/**
* Create a file in `tmpDir` owned by the builder.
*/
void writeBuilderFile(
const std::string & name,
std::string_view contents);
/**
* Run the builder's process.
*/
@ -716,9 +736,17 @@ void DerivationBuilderImpl::startBuilder()
/* Create a temporary directory where the build will take
place. */
topTmpDir = createTempDir(settings.buildDir.get().value_or(""), "nix-build-" + std::string(drvPath.name()), false, false, 0700);
setBuildTmpDir();
assert(!tmpDir.empty());
chownToBuilder(tmpDir);
/* The TOCTOU between the previous mkdir call and this open call is unavoidable due to
POSIX semantics.*/
tmpDirFd = AutoCloseFD{open(tmpDir.c_str(), O_RDONLY | O_NOFOLLOW | O_DIRECTORY)};
if (!tmpDirFd)
throw SysError("failed to open the build temporary directory descriptor '%1%'", tmpDir);
chownToBuilder(tmpDirFd.get(), tmpDir);
for (auto & [outputName, status] : initialOutputs) {
/* Set scratch path we'll actually use during the build.
@ -1068,9 +1096,7 @@ void DerivationBuilderImpl::initEnv()
} else {
auto hash = hashString(HashAlgorithm::SHA256, i.first);
std::string fn = ".attr-" + hash.to_string(HashFormat::Nix32, false);
Path p = tmpDir + "/" + fn;
writeFile(p, rewriteStrings(i.second, inputRewrites));
chownToBuilder(p);
writeBuilderFile(fn, rewriteStrings(i.second, inputRewrites));
env[i.first + "Path"] = tmpDirInSandbox() + "/" + fn;
}
}
@ -1149,11 +1175,9 @@ void DerivationBuilderImpl::writeStructuredAttrs()
auto jsonSh = StructuredAttrs::writeShell(json);
writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites));
chownToBuilder(tmpDir + "/.attrs.sh");
writeBuilderFile(".attrs.sh", rewriteStrings(jsonSh, inputRewrites));
env["NIX_ATTRS_SH_FILE"] = tmpDirInSandbox() + "/.attrs.sh";
writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites));
chownToBuilder(tmpDir + "/.attrs.json");
writeBuilderFile(".attrs.json", rewriteStrings(json.dump(), inputRewrites));
env["NIX_ATTRS_JSON_FILE"] = tmpDirInSandbox() + "/.attrs.json";
}
}
@ -1274,6 +1298,25 @@ void DerivationBuilderImpl::chownToBuilder(const Path & path)
throw SysError("cannot change ownership of '%1%'", path);
}
void DerivationBuilderImpl::chownToBuilder(int fd, const Path & path)
{
if (!buildUser) return;
if (fchown(fd, buildUser->getUID(), buildUser->getGID()) == -1)
throw SysError("cannot change ownership of file '%1%'", path);
}
void DerivationBuilderImpl::writeBuilderFile(
const std::string & name,
std::string_view contents)
{
auto path = std::filesystem::path(tmpDir) / name;
AutoCloseFD fd{openat(tmpDirFd.get(), name.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC | O_EXCL | O_NOFOLLOW, 0666)};
if (!fd)
throw SysError("creating file %s", path);
writeFile(fd, path, contents);
chownToBuilder(fd.get(), path);
}
void DerivationBuilderImpl::runChild()
{
/* Warning: in the child we should absolutely not make any SQLite
@ -2088,6 +2131,15 @@ void DerivationBuilderImpl::checkOutputs(const std::map<std::string, ValidPathIn
void DerivationBuilderImpl::deleteTmpDir(bool force)
{
if (topTmpDir != "") {
/* As an extra precaution, even in the event of `deletePath` failing to
* clean up, the `tmpDir` will be chowned as if we were to move
* it inside the Nix store.
*
* This hardens against an attack which smuggles a file descriptor
* to make use of the temporary directory.
*/
chmod(topTmpDir.c_str(), 0000);
/* Don't keep temporary directories for builtins because they
might have privileged stuff (like a copy of netrc). */
if (settings.keepFailed && !force && !drv.isBuiltin()) {

View file

@ -93,7 +93,7 @@ void restorePath(
{
switch (method) {
case FileSerialisationMethod::Flat:
writeFile(path, source, 0666, startFsync);
writeFile(path, source, 0666, startFsync ? FsSync::Yes : FsSync::No);
break;
case FileSerialisationMethod::NixArchive:
restorePath(path, source, startFsync);

View file

@ -303,7 +303,7 @@ void readFile(const Path & path, Sink & sink, bool memory_map)
}
void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync)
void writeFile(const Path & path, std::string_view s, mode_t mode, FsSync sync)
{
AutoCloseFD fd = toDescriptor(open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT
// TODO
@ -313,22 +313,29 @@ void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync)
, mode));
if (!fd)
throw SysError("opening file '%1%'", path);
try {
writeFull(fd.get(), s);
} catch (Error & e) {
e.addTrace({}, "writing file '%1%'", path);
throw;
}
if (sync)
fd.fsync();
// Explicitly close to make sure exceptions are propagated.
writeFile(fd, path, s, mode, sync);
/* Close explicitly to propagate the exceptions. */
fd.close();
if (sync)
syncParent(path);
}
void writeFile(AutoCloseFD & fd, const Path & origPath, std::string_view s, mode_t mode, FsSync sync)
{
assert(fd);
try {
writeFull(fd.get(), s);
void writeFile(const Path & path, Source & source, mode_t mode, bool sync)
if (sync == FsSync::Yes)
fd.fsync();
} catch (Error & e) {
e.addTrace({}, "writing file '%1%'", origPath);
throw;
}
}
void writeFile(const Path & path, Source & source, mode_t mode, FsSync sync)
{
AutoCloseFD fd = toDescriptor(open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT
// TODO
@ -352,11 +359,11 @@ void writeFile(const Path & path, Source & source, mode_t mode, bool sync)
e.addTrace({}, "writing file '%1%'", path);
throw;
}
if (sync)
if (sync == FsSync::Yes)
fd.fsync();
// Explicitly close to make sure exceptions are propagated.
fd.close();
if (sync)
if (sync == FsSync::Yes)
syncParent(path);
}
@ -419,7 +426,8 @@ static void _deletePath(Descriptor parentfd, const std::filesystem::path & path,
#ifndef _WIN32
checkInterrupt();
std::string name(baseNameOf(path.native()));
std::string name(path.filename());
assert(name != "." && name != ".." && !name.empty());
struct stat st;
if (fstatat(parentfd, name.c_str(), &st,
@ -460,7 +468,7 @@ static void _deletePath(Descriptor parentfd, const std::filesystem::path & path,
throw SysError("chmod %1%", path);
}
int fd = openat(parentfd, path.c_str(), O_RDONLY);
int fd = openat(parentfd, name.c_str(), O_RDONLY | O_DIRECTORY | O_NOFOLLOW);
if (fd == -1)
throw SysError("opening directory %1%", path);
AutoCloseDir dir(fdopendir(fd));
@ -472,7 +480,7 @@ static void _deletePath(Descriptor parentfd, const std::filesystem::path & path,
checkInterrupt();
std::string childName = dirent->d_name;
if (childName == "." || childName == "..") continue;
_deletePath(dirfd(dir.get()), path + "/" + childName, bytesFreed, ex);
_deletePath(dirfd(dir.get()), path / childName, bytesFreed, ex);
}
if (errno) throw SysError("reading directory %1%", path);
}
@ -497,14 +505,13 @@ static void _deletePath(Descriptor parentfd, const std::filesystem::path & path,
static void _deletePath(const std::filesystem::path & path, uint64_t & bytesFreed)
{
Path dir = dirOf(path.string());
if (dir == "")
dir = "/";
assert(path.is_absolute());
assert(path.parent_path() != path);
AutoCloseFD dirfd = toDescriptor(open(dir.c_str(), O_RDONLY));
AutoCloseFD dirfd = toDescriptor(open(path.parent_path().string().c_str(), O_RDONLY));
if (!dirfd) {
if (errno == ENOENT) return;
throw SysError("opening directory '%1%'", path);
throw SysError("opening directory %s", path.parent_path());
}
std::exception_ptr ex;

View file

@ -175,21 +175,27 @@ std::string readFile(const Path & path);
std::string readFile(const std::filesystem::path & path);
void readFile(const Path & path, Sink & sink, bool memory_map = true);
enum struct FsSync { Yes, No };
/**
* Write a string to a file.
*/
void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false);
static inline void writeFile(const std::filesystem::path & path, std::string_view s, mode_t mode = 0666, bool sync = false)
void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No);
static inline void writeFile(const std::filesystem::path & path, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No)
{
return writeFile(path.string(), s, mode, sync);
}
void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false);
static inline void writeFile(const std::filesystem::path & path, Source & source, mode_t mode = 0666, bool sync = false)
void writeFile(const Path & path, Source & source, mode_t mode = 0666, FsSync sync = FsSync::No);
static inline void writeFile(const std::filesystem::path & path, Source & source, mode_t mode = 0666, FsSync sync = FsSync::No)
{
return writeFile(path.string(), source, mode, sync);
}
void writeFile(AutoCloseFD & fd, const Path & origPath, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No);
/**
* Flush a path's parent directory to disk.
*/

View file

@ -187,6 +187,10 @@ void MemorySink::createSymlink(const CanonPath & path, const std::string & targe
ref<SourceAccessor> makeEmptySourceAccessor()
{
static auto empty = make_ref<MemorySourceAccessor>().cast<SourceAccessor>();
/* Don't forget to clear the display prefix, as the default constructed
SourceAccessor has the «unknown» prefix. Since this accessor is supposed
to mimic an empty root directory the prefix needs to be empty. */
empty->setPathDisplay("");
return empty;
}

View file

@ -72,6 +72,18 @@ struct UnionSourceAccessor : SourceAccessor
}
return std::nullopt;
}
std::pair<CanonPath, std::optional<std::string>> getFingerprint(const CanonPath & path) override
{
if (fingerprint)
return {path, fingerprint};
for (auto & accessor : accessors) {
auto [subpath, fingerprint] = accessor->getFingerprint(path);
if (fingerprint)
return {subpath, fingerprint};
}
return {path, std::nullopt};
}
};
ref<SourceAccessor> makeUnionSourceAccessor(std::vector<ref<SourceAccessor>> && accessors)

View file

@ -2,6 +2,8 @@
source ../common.sh
export _NIX_TEST_BARF_ON_UNCACHEABLE=1
# shellcheck disable=SC2034 # this variable is used by tests that source this file
registry=$TEST_ROOT/registry.json

View file

@ -62,8 +62,8 @@ flakeref=git+file://$rootRepo\?submodules=1\&dir=submodule
# Check that dirtying a submodule makes the entire thing dirty.
[[ $(nix flake metadata --json "$flakeref" | jq -r .locked.rev) != null ]]
echo '"foo"' > "$rootRepo"/submodule/sub.nix
[[ $(nix eval --json "$flakeref#sub" ) = '"foo"' ]]
[[ $(nix flake metadata --json "$flakeref" | jq -r .locked.rev) = null ]]
[[ $(_NIX_TEST_BARF_ON_UNCACHEABLE='' nix eval --json "$flakeref#sub" ) = '"foo"' ]]
[[ $(_NIX_TEST_BARF_ON_UNCACHEABLE='' nix flake metadata --json "$flakeref" | jq -r .locked.rev) = null ]]
# Test that `nix flake metadata` parses `submodule` correctly.
cat > "$rootRepo"/flake.nix <<EOF
@ -75,7 +75,7 @@ EOF
git -C "$rootRepo" add flake.nix
git -C "$rootRepo" commit -m "Add flake.nix"
storePath=$(nix flake prefetch --json "$rootRepo?submodules=1" | jq -r .storePath)
storePath=$(_NIX_TEST_BARF_ON_UNCACHEABLE='' nix flake prefetch --json "$rootRepo?submodules=1" | jq -r .storePath)
[[ -e "$storePath/submodule" ]]
# Test the use of inputs.self.

View file

@ -114,7 +114,6 @@ nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default"
# Check that the fetcher cache works.
if [[ $(nix config show lazy-trees) = false ]]; then
nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default" -vvvvv 2>&1 | grepQuietInverse "source path.*is uncacheable"
nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default" -vvvvv 2>&1 | grepQuiet "source path.*cache hit"
fi
@ -169,10 +168,11 @@ expect 1 nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --no-update-lock-file
nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --commit-lock-file
[[ -e "$flake2Dir/flake.lock" ]]
[[ -z $(git -C "$flake2Dir" diff main || echo failed) ]]
if [[ $(nix config show lazy-trees) = false ]]; then
[[ $(jq --indent 0 . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'.*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]]
else
[[ $(jq --indent 0 . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'.*',"ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]]
[[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'[0-9]*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]]
if [[ $(nix config show lazy-trees) = true ]]; then
# Test that `lazy-locks` causes NAR hashes to be omitted from the lock file.
nix flake update --flake "$flake2Dir" --commit-lock-file --lazy-locks
[[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'[0-9]*',"ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]]
fi
# Rerunning the build should not change the lockfile.

View file

@ -359,3 +359,74 @@ rm "$flakeFollowsCustomUrlA"/flake.lock
json=$(nix flake metadata "$flakeFollowsCustomUrlA" --override-input B/C "$flakeFollowsCustomUrlD" --json)
echo "$json" | jq .locks.nodes.C.original
[[ $(echo "$json" | jq -r .locks.nodes.C.original.path) = './flakeC' ]]
# Test deep overrides, e.g. `inputs.B.inputs.C.inputs.D.follows = ...`.
cat <<EOF > $flakeFollowsD/flake.nix
{ outputs = _: {}; }
EOF
cat <<EOF > $flakeFollowsC/flake.nix
{
inputs.D.url = "path:nosuchflake";
outputs = _: {};
}
EOF
cat <<EOF > $flakeFollowsB/flake.nix
{
inputs.C.url = "path:$flakeFollowsC";
outputs = _: {};
}
EOF
cat <<EOF > $flakeFollowsA/flake.nix
{
inputs.B.url = "path:$flakeFollowsB";
inputs.D.url = "path:$flakeFollowsD";
inputs.B.inputs.C.inputs.D.follows = "D";
outputs = _: {};
}
EOF
nix flake lock $flakeFollowsA
[[ $(jq -c .nodes.C.inputs.D $flakeFollowsA/flake.lock) = '["D"]' ]]
# Test overlapping flake follows: B has D follow C/D, while A has B/C follow C
cat <<EOF > $flakeFollowsC/flake.nix
{
inputs.D.url = "path:$flakeFollowsD";
outputs = _: {};
}
EOF
cat <<EOF > $flakeFollowsB/flake.nix
{
inputs.C.url = "path:nosuchflake";
inputs.D.follows = "C/D";
outputs = _: {};
}
EOF
cat <<EOF > $flakeFollowsA/flake.nix
{
inputs.B.url = "path:$flakeFollowsB";
inputs.C.url = "path:$flakeFollowsC";
inputs.B.inputs.C.follows = "C";
outputs = _: {};
}
EOF
# bug was not triggered without recreating the lockfile
nix flake lock $flakeFollowsA --recreate-lock-file
[[ $(jq -c .nodes.B.inputs.D $flakeFollowsA/flake.lock) = '["B","C","D"]' ]]
# Check that you can't have both a flakeref and a follows attribute on an input.
cat <<EOF > $flakeFollowsB/flake.nix
{
inputs.C.url = "path:nosuchflake";
inputs.D.url = "path:nosuchflake";
inputs.D.follows = "C/D";
outputs = _: {};
}
EOF
expectStderr 1 nix flake lock $flakeFollowsA --recreate-lock-file | grepQuiet "flake input has both a flake reference and a follows attribute"

View file

@ -27,9 +27,9 @@ nix build -o "$TEST_ROOT/result" "hg+file://$flake2Dir"
(! nix flake metadata --json "hg+file://$flake2Dir" | jq -e -r .revision)
nix eval "hg+file://$flake2Dir"#expr
_NIX_TEST_BARF_ON_UNCACHEABLE='' nix eval "hg+file://$flake2Dir"#expr
nix eval "hg+file://$flake2Dir"#expr
_NIX_TEST_BARF_ON_UNCACHEABLE='' nix eval "hg+file://$flake2Dir"#expr
(! nix eval "hg+file://$flake2Dir"#expr --no-allow-dirty)

View file

@ -72,7 +72,7 @@ nix build -o "$TEST_ROOT/result" "$flake3Dir#sth" --commit-lock-file
nix registry add --registry "$registry" flake3 "git+file://$flake3Dir"
nix build -o "$TEST_ROOT/result" flake3#fnord
_NIX_TEST_BARF_ON_UNCACHEABLE='' nix build -o "$TEST_ROOT/result" flake3#fnord
[[ $(cat "$TEST_ROOT/result") = FNORD ]]
# Check whether flake input fetching is lazy: flake3#sth does not
@ -82,11 +82,11 @@ clearStore
mv "$flake2Dir" "$flake2Dir.tmp"
mv "$nonFlakeDir" "$nonFlakeDir.tmp"
nix build -o "$TEST_ROOT/result" flake3#sth
(! nix build -o "$TEST_ROOT/result" flake3#xyzzy)
(! nix build -o "$TEST_ROOT/result" flake3#fnord)
(! _NIX_TEST_BARF_ON_UNCACHEABLE='' nix build -o "$TEST_ROOT/result" flake3#xyzzy)
(! _NIX_TEST_BARF_ON_UNCACHEABLE='' nix build -o "$TEST_ROOT/result" flake3#fnord)
mv "$flake2Dir.tmp" "$flake2Dir"
mv "$nonFlakeDir.tmp" "$nonFlakeDir"
nix build -o "$TEST_ROOT/result" flake3#xyzzy flake3#fnord
_NIX_TEST_BARF_ON_UNCACHEABLE='' nix build -o "$TEST_ROOT/result" flake3#xyzzy flake3#fnord
# Make branch "removeXyzzy" where flake3 doesn't have xyzzy anymore
git -C "$flake3Dir" checkout -b removeXyzzy

View file

@ -4,6 +4,8 @@ source ./common.sh
requireGit
unset _NIX_TEST_BARF_ON_UNCACHEABLE
# Test a "vendored" subflake dependency. This is a relative path flake
# which doesn't reference the root flake and has its own lock file.
#

View file

@ -69,7 +69,7 @@ git -C "$rootFlake" add flake.nix sub2/flake.nix
git -C "$rootFlake" add sub2/flake.lock
[[ $(nix eval "$subflake2#y") = 15 ]]
[[ $(jq --indent 0 . < "$subflake2/flake.lock") =~ ^'{"nodes":{"root":{"inputs":{"root":"root_2","sub1":"sub1"}},"root_2":{"inputs":{"sub0":"sub0"},"locked":{"path":"..","type":"path"},"original":{"path":"..","type":"path"},"parent":[]},"root_3":{"inputs":{"sub0":"sub0_2"},"locked":{"path":"../","type":"path"},"original":{"path":"../","type":"path"},"parent":["sub1"]},"sub0":{"locked":{"path":"sub0","type":"path"},"original":{"path":"sub0","type":"path"},"parent":["root"]},"sub0_2":{"locked":{"path":"sub0","type":"path"},"original":{"path":"sub0","type":"path"},"parent":["sub1","root"]},"sub1":{"inputs":{"root":"root_3"},"locked":{"path":"../sub1","type":"path"},"original":{"path":"../sub1","type":"path"},"parent":[]}},"root":"root","version":7}'$ ]]
[[ $(jq --indent 0 --compact-output . < "$subflake2/flake.lock") =~ ^'{"nodes":{"root":{"inputs":{"root":"root_2","sub1":"sub1"}},"root_2":{"inputs":{"sub0":"sub0"},"locked":{"path":"..","type":"path"},"original":{"path":"..","type":"path"},"parent":[]},"root_3":{"inputs":{"sub0":"sub0_2"},"locked":{"path":"../","type":"path"},"original":{"path":"../","type":"path"},"parent":["sub1"]},"sub0":{"locked":{"path":"sub0","type":"path"},"original":{"path":"sub0","type":"path"},"parent":["root"]},"sub0_2":{"locked":{"path":"sub0","type":"path"},"original":{"path":"sub0","type":"path"},"parent":["sub1","root"]},"sub1":{"inputs":{"root":"root_3"},"locked":{"path":"../sub1","type":"path"},"original":{"path":"../sub1","type":"path"},"parent":[]}},"root":"root","version":7}'$ ]]
# Make sure there are no content locks for relative path flakes.
(! grep "$TEST_ROOT" "$subflake2/flake.lock")

View file

@ -34,3 +34,15 @@ rm -rf $TEST_ROOT/eval-out
(! nix eval --store dummy:// --write-to $TEST_ROOT/eval-out --expr '{ "." = "bla"; }')
(! nix eval --expr '~/foo')
expectStderr 0 nix eval --expr "/some/absolute/path" \
| grepQuiet "/some/absolute/path"
expectStderr 0 nix eval --expr "/some/absolute/path" --impure \
| grepQuiet "/some/absolute/path"
expectStderr 0 nix eval --expr "some/relative/path" \
| grepQuiet "$PWD/some/relative/path"
expectStderr 0 nix eval --expr "some/relative/path" --impure \
| grepQuiet "$PWD/some/relative/path"