From 55c7ef9d40f1c473034701810ac43b398a9492eb Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 22:43:12 +0200 Subject: [PATCH 001/373] SourceAccessor: Make lstat() virtual With FilteringSourceAccessor, lstat() needs to throw a different exception if the path is inaccessible than if it doesn't exist. --- src/libexpr/eval.cc | 10 ++++++++++ src/libfetchers/filtering-source-accessor.cc | 13 ++++++++++++- .../nix/fetchers/filtering-source-accessor.hh | 4 ++++ src/libutil/include/nix/util/source-accessor.hh | 2 +- src/libutil/mounted-source-accessor.cc | 6 ++++++ 5 files changed, 33 insertions(+), 2 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index f82fd93b5..4db598871 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -3127,6 +3127,11 @@ SourcePath EvalState::findFile(const LookupPath & lookupPath, const std::string_ auto res = (r / CanonPath(suffix)).resolveSymlinks(); if (res.pathExists()) return res; + + // Backward compatibility hack: throw an exception if access + // to this path is not allowed. + if (auto accessor = res.accessor.dynamic_pointer_cast()) + accessor->checkAccess(res.path); } if (hasPrefix(path, "nix/")) @@ -3193,6 +3198,11 @@ std::optional EvalState::resolveLookupPathPath(const LookupPath::Pat if (path.resolveSymlinks().pathExists()) return finish(std::move(path)); else { + // Backward compatibility hack: throw an exception if access + // to this path is not allowed. + if (auto accessor = path.accessor.dynamic_pointer_cast()) + accessor->checkAccess(path.path); + logWarning({.msg = HintFmt("Nix search path entry '%1%' does not exist, ignoring", value)}); } } diff --git a/src/libfetchers/filtering-source-accessor.cc b/src/libfetchers/filtering-source-accessor.cc index a99ecacef..5a3a0f07b 100644 --- a/src/libfetchers/filtering-source-accessor.cc +++ b/src/libfetchers/filtering-source-accessor.cc @@ -16,15 +16,26 @@ std::string FilteringSourceAccessor::readFile(const CanonPath & path) return next->readFile(prefix / path); } +void FilteringSourceAccessor::readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) +{ + checkAccess(path); + return next->readFile(prefix / path, sink, sizeCallback); +} + bool FilteringSourceAccessor::pathExists(const CanonPath & path) { return isAllowed(path) && next->pathExists(prefix / path); } std::optional FilteringSourceAccessor::maybeLstat(const CanonPath & path) +{ + return isAllowed(path) ? next->maybeLstat(prefix / path) : std::nullopt; +} + +SourceAccessor::Stat FilteringSourceAccessor::lstat(const CanonPath & path) { checkAccess(path); - return next->maybeLstat(prefix / path); + return next->lstat(prefix / path); } SourceAccessor::DirEntries FilteringSourceAccessor::readDirectory(const CanonPath & path) diff --git a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh index f8a57bfb3..1c2fd60b0 100644 --- a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh +++ b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh @@ -36,8 +36,12 @@ struct FilteringSourceAccessor : SourceAccessor std::string readFile(const CanonPath & path) override; + void readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) override; + bool pathExists(const CanonPath & path) override; + Stat lstat(const CanonPath & path) override; + std::optional maybeLstat(const CanonPath & path) override; DirEntries readDirectory(const CanonPath & path) override; diff --git a/src/libutil/include/nix/util/source-accessor.hh b/src/libutil/include/nix/util/source-accessor.hh index 7419ef392..e57b85411 100644 --- a/src/libutil/include/nix/util/source-accessor.hh +++ b/src/libutil/include/nix/util/source-accessor.hh @@ -121,7 +121,7 @@ struct SourceAccessor : std::enable_shared_from_this std::string typeString(); }; - Stat lstat(const CanonPath & path); + virtual Stat lstat(const CanonPath & path); virtual std::optional maybeLstat(const CanonPath & path) = 0; diff --git a/src/libutil/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc index 5c0ecc1ff..cd7e3d496 100644 --- a/src/libutil/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -27,6 +27,12 @@ struct MountedSourceAccessorImpl : MountedSourceAccessor return accessor->readFile(subpath); } + Stat lstat(const CanonPath & path) override + { + auto [accessor, subpath] = resolve(path); + return accessor->lstat(subpath); + } + std::optional maybeLstat(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); From 28d11c5bcc9930ec20293d672c90585d1dbc1557 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 22:52:18 +0200 Subject: [PATCH 002/373] Add SourceAccessor::getFingerprint() This returns the fingerprint for a specific subpath. This is intended for "composite" accessors like MountedSourceAccessor, where different subdirectories can have different fingerprints. --- src/libfetchers/filtering-source-accessor.cc | 7 +++++++ .../nix/fetchers/filtering-source-accessor.hh | 2 ++ .../include/nix/util/source-accessor.hh | 21 +++++++++++++++++++ src/libutil/mounted-source-accessor.cc | 8 +++++++ src/libutil/union-source-accessor.cc | 12 +++++++++++ 5 files changed, 50 insertions(+) diff --git a/src/libfetchers/filtering-source-accessor.cc b/src/libfetchers/filtering-source-accessor.cc index 5a3a0f07b..8f1b50eb9 100644 --- a/src/libfetchers/filtering-source-accessor.cc +++ b/src/libfetchers/filtering-source-accessor.cc @@ -60,6 +60,13 @@ std::string FilteringSourceAccessor::showPath(const CanonPath & path) return displayPrefix + next->showPath(prefix / path) + displaySuffix; } +std::pair> FilteringSourceAccessor::getFingerprint(const CanonPath & path) +{ + if (fingerprint) + return {path, fingerprint}; + return next->getFingerprint(prefix / path); +} + void FilteringSourceAccessor::checkAccess(const CanonPath & path) { if (!isAllowed(path)) diff --git a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh index 1c2fd60b0..5e98caa58 100644 --- a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh +++ b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh @@ -50,6 +50,8 @@ struct FilteringSourceAccessor : SourceAccessor std::string showPath(const CanonPath & path) override; + std::pair> getFingerprint(const CanonPath & path) override; + /** * Call `makeNotAllowedError` to throw a `RestrictedPathError` * exception if `isAllowed()` returns `false` for `path`. diff --git a/src/libutil/include/nix/util/source-accessor.hh b/src/libutil/include/nix/util/source-accessor.hh index e57b85411..671444e6f 100644 --- a/src/libutil/include/nix/util/source-accessor.hh +++ b/src/libutil/include/nix/util/source-accessor.hh @@ -180,6 +180,27 @@ struct SourceAccessor : std::enable_shared_from_this */ std::optional fingerprint; + /** + * Return the fingerprint for `path`. This is usually the + * fingerprint of the current accessor, but for composite + * accessors (like `MountedSourceAccessor`), we want to return the + * fingerprint of the "inner" accessor if the current one lacks a + * fingerprint. + * + * So this method is intended to return the most-outer accessor + * that has a fingerprint for `path`. It also returns the path that `path` + * corresponds to in that accessor. + * + * For example: in a `MountedSourceAccessor` that has + * `/nix/store/foo` mounted, + * `getFingerprint("/nix/store/foo/bar")` will return the path + * `/bar` and the fingerprint of the `/nix/store/foo` accessor. + */ + virtual std::pair> getFingerprint(const CanonPath & path) + { + return {path, fingerprint}; + } + /** * Return the maximum last-modified time of the files in this * tree, if available. diff --git a/src/libutil/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc index cd7e3d496..d9398045c 100644 --- a/src/libutil/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -91,6 +91,14 @@ struct MountedSourceAccessorImpl : MountedSourceAccessor else return nullptr; } + + std::pair> getFingerprint(const CanonPath & path) override + { + if (fingerprint) + return {path, fingerprint}; + auto [accessor, subpath] = resolve(path); + return accessor->getFingerprint(subpath); + } }; ref makeMountedSourceAccessor(std::map> mounts) diff --git a/src/libutil/union-source-accessor.cc b/src/libutil/union-source-accessor.cc index 96b6a643a..e3b39f14e 100644 --- a/src/libutil/union-source-accessor.cc +++ b/src/libutil/union-source-accessor.cc @@ -72,6 +72,18 @@ struct UnionSourceAccessor : SourceAccessor } return std::nullopt; } + + std::pair> getFingerprint(const CanonPath & path) override + { + if (fingerprint) + return {path, fingerprint}; + for (auto & accessor : accessors) { + auto [subpath, fingerprint] = accessor->getFingerprint(path); + if (fingerprint) + return {subpath, fingerprint}; + } + return {path, std::nullopt}; + } }; ref makeUnionSourceAccessor(std::vector> && accessors) From 3450a72ba02ccd5311cfc75b0e02c4d773013794 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 16 Jun 2025 18:16:30 +0200 Subject: [PATCH 003/373] Git fetcher: Make dirty repos with no commits cacheable --- src/libfetchers/git.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index f6f5c30ee..7c1630167 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -893,8 +893,7 @@ struct GitInputScheme : InputScheme return makeFingerprint(*rev); else { auto repoInfo = getRepoInfo(input); - if (auto repoPath = repoInfo.getPath(); - repoPath && repoInfo.workdirInfo.headRev && repoInfo.workdirInfo.submodules.empty()) { + if (auto repoPath = repoInfo.getPath(); repoPath && repoInfo.workdirInfo.submodules.empty()) { /* Calculate a fingerprint that takes into account the deleted and modified/added files. */ HashSink hashSink{HashAlgorithm::SHA512}; @@ -907,7 +906,7 @@ struct GitInputScheme : InputScheme writeString("deleted:", hashSink); writeString(file.abs(), hashSink); } - return makeFingerprint(*repoInfo.workdirInfo.headRev) + return makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev)) + ";d=" + hashSink.finish().hash.to_string(HashFormat::Base16, false); } return std::nullopt; From ec6d5c7de3b3701a74cbc16515813cab7c7ef580 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 23:02:02 +0200 Subject: [PATCH 004/373] Path fetcher: Simplify fingerprint computation --- src/libfetchers/path.cc | 42 +++++++++++++++-------------------------- 1 file changed, 15 insertions(+), 27 deletions(-) diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index 3c4b9c06d..aa0411ff9 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -123,8 +123,6 @@ struct PathInputScheme : InputScheme auto absPath = getAbsPath(input); - Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath)); - // FIXME: check whether access to 'path' is allowed. auto storePath = store->maybeParseStorePath(absPath.string()); @@ -133,43 +131,33 @@ struct PathInputScheme : InputScheme time_t mtime = 0; if (!storePath || storePath->name() != "source" || !store->isValidPath(*storePath)) { + Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath)); // FIXME: try to substitute storePath. auto src = sinkToSource( [&](Sink & sink) { mtime = dumpPathAndGetMtime(absPath.string(), sink, defaultPathFilter); }); storePath = store->addToStoreFromDump(*src, "source"); } - // To avoid copying the path again to the /nix/store, we need to add a cache entry. - ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive; - auto fp = getFingerprint(store, input); - if (fp) { - auto cacheKey = makeFetchToStoreCacheKey(input.getName(), *fp, method, "/"); - input.settings->getCache()->upsert(cacheKey, *store, {}, *storePath); - } + auto accessor = ref{store->getFSAccessor(*storePath)}; + + // To prevent `fetchToStore()` copying the path again to Nix + // store, pre-create an entry in the fetcher cache. + auto info = store->queryPathInfo(*storePath); + accessor->fingerprint = + fmt("path:%s", store->queryPathInfo(*storePath)->narHash.to_string(HashFormat::SRI, true)); + input.settings->getCache()->upsert( + makeFetchToStoreCacheKey( + input.getName(), *accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, "/"), + *store, + {}, + *storePath); /* Trust the lastModified value supplied by the user, if any. It's not a "secure" attribute so we don't care. */ if (!input.getLastModified()) input.attrs.insert_or_assign("lastModified", uint64_t(mtime)); - return {ref{store->getFSAccessor(*storePath)}, std::move(input)}; - } - - std::optional getFingerprint(ref store, const Input & input) const override - { - if (isRelative(input)) - return std::nullopt; - - /* If this path is in the Nix store, use the hash of the - store object and the subpath. */ - auto path = getAbsPath(input); - try { - auto [storePath, subPath] = store->toStorePath(path.string()); - auto info = store->queryPathInfo(storePath); - return fmt("path:%s:%s", info->narHash.to_string(HashFormat::Base16, false), subPath); - } catch (Error &) { - return std::nullopt; - } + return {accessor, std::move(input)}; } std::optional experimentalFeature() const override From 1d130492d743345715107d24f0204fda19896db1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 23:04:58 +0200 Subject: [PATCH 005/373] Mount inputs on storeFS to restore fetchToStore() caching fetchToStore() caching was broken because it uses the fingerprint of the accessor, but now that the accessor (typically storeFS) is a composite (like MountedSourceAccessor or AllowListSourceAccessor), there was no fingerprint anymore. So fetchToStore now uses the new getFingerprint() method to get the specific fingerprint for the subpath. --- src/libexpr/eval.cc | 27 ++++++-------- src/libexpr/include/nix/expr/eval.hh | 6 ++++ src/libexpr/paths.cc | 25 +++++++++++++ src/libexpr/primops/fetchTree.cc | 7 ++-- src/libfetchers/fetch-to-store.cc | 11 ++++-- src/libfetchers/fetchers.cc | 6 ++-- src/libflake/flake.cc | 35 ++++++------------- .../lang/eval-fail-hashfile-missing.err.exp | 2 +- 8 files changed, 69 insertions(+), 50 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 4db598871..98219fb17 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -227,24 +227,17 @@ EvalState::EvalState( {CanonPath(store->storeDir), store->getFSAccessor(settings.pureEval)}, })) , rootFS([&] { - auto accessor = [&]() -> decltype(rootFS) { - /* In pure eval mode, we provide a filesystem that only - contains the Nix store. */ - if (settings.pureEval) - return storeFS; + /* In pure eval mode, we provide a filesystem that only + contains the Nix store. - /* If we have a chroot store and pure eval is not enabled, - use a union accessor to make the chroot store available - at its logical location while still having the underlying - directory available. This is necessary for instance if - we're evaluating a file from the physical /nix/store - while using a chroot store. */ - auto realStoreDir = dirOf(store->toRealPath(StorePath::dummy)); - if (store->storeDir != realStoreDir) - return makeUnionSourceAccessor({getFSSourceAccessor(), storeFS}); - - return getFSSourceAccessor(); - }(); + Otherwise, use a union accessor to make the augmented store + available at its logical location while still having the + underlying directory available. This is necessary for + instance if we're evaluating a file from the physical + /nix/store while using a chroot store, and also for lazy + mounted fetchTree. */ + auto accessor = settings.pureEval ? storeFS.cast() + : makeUnionSourceAccessor({getFSSourceAccessor(), storeFS}); /* Apply access control if needed. */ if (settings.restrictEval || settings.pureEval) diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index e5b87cc97..c56836076 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -42,6 +42,7 @@ class Store; namespace fetchers { struct Settings; struct InputCache; +struct Input; } // namespace fetchers struct EvalSettings; class EvalState; @@ -514,6 +515,11 @@ public: void checkURI(const std::string & uri); + /** + * Mount an input on the Nix store. + */ + StorePath mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor); + /** * Parse a Nix expression from the specified file. */ diff --git a/src/libexpr/paths.cc b/src/libexpr/paths.cc index f90bc37df..8622ab208 100644 --- a/src/libexpr/paths.cc +++ b/src/libexpr/paths.cc @@ -1,5 +1,7 @@ #include "nix/store/store-api.hh" #include "nix/expr/eval.hh" +#include "nix/util/mounted-source-accessor.hh" +#include "nix/fetchers/fetch-to-store.hh" namespace nix { @@ -18,4 +20,27 @@ SourcePath EvalState::storePath(const StorePath & path) return {rootFS, CanonPath{store->printStorePath(path)}}; } +StorePath +EvalState::mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor) +{ + auto storePath = fetchToStore(fetchSettings, *store, accessor, FetchMode::Copy, input.getName()); + + allowPath(storePath); // FIXME: should just whitelist the entire virtual store + + storeFS->mount(CanonPath(store->printStorePath(storePath)), accessor); + + auto narHash = store->queryPathInfo(storePath)->narHash; + input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); + + if (originalInput.getNarHash() && narHash != *originalInput.getNarHash()) + throw Error( + (unsigned int) 102, + "NAR hash mismatch in input '%s', expected '%s' but got '%s'", + originalInput.to_string(), + narHash.to_string(HashFormat::SRI, true), + originalInput.getNarHash()->to_string(HashFormat::SRI, true)); + + return storePath; +} + } // namespace nix diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index e673e55a0..e76e39f7d 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -10,6 +10,7 @@ #include "nix/util/url.hh" #include "nix/expr/value-to-json.hh" #include "nix/fetchers/fetch-to-store.hh" +#include "nix/fetchers/input-cache.hh" #include @@ -218,11 +219,11 @@ static void fetchTree( throw Error("input '%s' is not allowed to use the '__final' attribute", input.to_string()); } - auto [storePath, input2] = input.fetchToStore(state.store); + auto cachedInput = state.inputCache->getAccessor(state.store, input, fetchers::UseRegistries::No); - state.allowPath(storePath); + auto storePath = state.mountInput(cachedInput.lockedInput, input, cachedInput.accessor); - emitTreeAttrs(state, storePath, input2, v, params.emptyRevFallback, false); + emitTreeAttrs(state, storePath, cachedInput.lockedInput, v, params.emptyRevFallback, false); } static void prim_fetchTree(EvalState & state, const PosIdx pos, Value ** args, Value & v) diff --git a/src/libfetchers/fetch-to-store.cc b/src/libfetchers/fetch-to-store.cc index 6ce78e115..5961379ee 100644 --- a/src/libfetchers/fetch-to-store.cc +++ b/src/libfetchers/fetch-to-store.cc @@ -27,14 +27,19 @@ StorePath fetchToStore( std::optional cacheKey; - if (!filter && path.accessor->fingerprint) { - cacheKey = makeFetchToStoreCacheKey(std::string{name}, *path.accessor->fingerprint, method, path.path.abs()); + auto [subpath, fingerprint] = filter ? std::pair>{path.path, std::nullopt} + : path.accessor->getFingerprint(path.path); + + if (fingerprint) { + cacheKey = makeFetchToStoreCacheKey(std::string{name}, *fingerprint, method, subpath.abs()); if (auto res = settings.getCache()->lookupStorePath(*cacheKey, store)) { debug("store path cache hit for '%s'", path); return res->storePath; } - } else + } else { + // FIXME: could still provide in-memory caching keyed on `SourcePath`. debug("source path '%s' is uncacheable", path); + } Activity act( *logger, diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 045aafdcb..f697ec6f5 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -356,8 +356,10 @@ std::pair, Input> Input::getAccessorUnchecked(ref sto auto [accessor, result] = scheme->getAccessor(store, *this); - assert(!accessor->fingerprint); - accessor->fingerprint = result.getFingerprint(store); + if (!accessor->fingerprint) + accessor->fingerprint = result.getFingerprint(store); + else + result.cachedFingerprint = accessor->fingerprint; return {accessor, std::move(result)}; } diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index 3acf589a5..486118963 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -24,21 +24,6 @@ using namespace flake; namespace flake { -static StorePath copyInputToStore( - EvalState & state, fetchers::Input & input, const fetchers::Input & originalInput, ref accessor) -{ - auto storePath = fetchToStore(*input.settings, *state.store, accessor, FetchMode::Copy, input.getName()); - - state.allowPath(storePath); - - auto narHash = state.store->queryPathInfo(storePath)->narHash; - input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); - - assert(!originalInput.getNarHash() || storePath == originalInput.computeStorePath(*state.store)); - - return storePath; -} - static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos) { if (value.isThunk() && value.isTrivial()) @@ -360,11 +345,14 @@ static Flake getFlake( lockedRef = FlakeRef(std::move(cachedInput2.lockedInput), newLockedRef.subdir); } - // Copy the tree to the store. - auto storePath = copyInputToStore(state, lockedRef.input, originalRef.input, cachedInput.accessor); - // Re-parse flake.nix from the store. - return readFlake(state, originalRef, resolvedRef, lockedRef, state.storePath(storePath), lockRootAttrPath); + return readFlake( + state, + originalRef, + resolvedRef, + lockedRef, + state.storePath(state.mountInput(lockedRef.input, originalRef.input, cachedInput.accessor)), + lockRootAttrPath); } Flake getFlake(EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries) @@ -721,11 +709,10 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, auto lockedRef = FlakeRef(std::move(cachedInput.lockedInput), input.ref->subdir); - // FIXME: allow input to be lazy. - auto storePath = copyInputToStore( - state, lockedRef.input, input.ref->input, cachedInput.accessor); - - return {state.storePath(storePath), lockedRef}; + return { + state.storePath( + state.mountInput(lockedRef.input, input.ref->input, cachedInput.accessor)), + lockedRef}; } }(); diff --git a/tests/functional/lang/eval-fail-hashfile-missing.err.exp b/tests/functional/lang/eval-fail-hashfile-missing.err.exp index 0d3747a6d..901dea2b5 100644 --- a/tests/functional/lang/eval-fail-hashfile-missing.err.exp +++ b/tests/functional/lang/eval-fail-hashfile-missing.err.exp @@ -10,4 +10,4 @@ error: … while calling the 'hashFile' builtin - error: opening file '/pwd/lang/this-file-is-definitely-not-there-7392097': No such file or directory + error: path '/pwd/lang/this-file-is-definitely-not-there-7392097' does not exist From 4b9735b761047d6cb606229919fc3d71468fb241 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 23:09:47 +0200 Subject: [PATCH 006/373] Test against uncacheable paths This is to test the non-functional property that most paths should be cacheable. We've had frequent cases where caching broken but we didn't notice. --- src/libfetchers/fetch-to-store.cc | 4 ++++ tests/functional/flakes/common.sh | 2 ++ tests/functional/flakes/flake-in-submodule.sh | 6 +++--- tests/functional/flakes/follow-paths.sh | 2 +- tests/functional/flakes/mercurial.sh | 4 ++-- tests/functional/flakes/non-flake-inputs.sh | 9 +++++---- tests/functional/flakes/relative-paths-lockfile.sh | 2 ++ 7 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/libfetchers/fetch-to-store.cc b/src/libfetchers/fetch-to-store.cc index 5961379ee..b1e8b9d72 100644 --- a/src/libfetchers/fetch-to-store.cc +++ b/src/libfetchers/fetch-to-store.cc @@ -1,6 +1,7 @@ #include "nix/fetchers/fetch-to-store.hh" #include "nix/fetchers/fetchers.hh" #include "nix/fetchers/fetch-settings.hh" +#include "nix/util/environment-variables.hh" namespace nix { @@ -37,6 +38,9 @@ StorePath fetchToStore( return res->storePath; } } else { + static auto barf = getEnv("_NIX_TEST_BARF_ON_UNCACHEABLE").value_or("") == "1"; + if (barf && !filter) + throw Error("source path '%s' is uncacheable (filter=%d)", path, (bool) filter); // FIXME: could still provide in-memory caching keyed on `SourcePath`. debug("source path '%s' is uncacheable", path); } diff --git a/tests/functional/flakes/common.sh b/tests/functional/flakes/common.sh index 422cab96c..77bc03060 100644 --- a/tests/functional/flakes/common.sh +++ b/tests/functional/flakes/common.sh @@ -2,6 +2,8 @@ source ../common.sh +export _NIX_TEST_BARF_ON_UNCACHEABLE=1 + # shellcheck disable=SC2034 # this variable is used by tests that source this file registry=$TEST_ROOT/registry.json diff --git a/tests/functional/flakes/flake-in-submodule.sh b/tests/functional/flakes/flake-in-submodule.sh index fe5acf26d..a7d86698d 100755 --- a/tests/functional/flakes/flake-in-submodule.sh +++ b/tests/functional/flakes/flake-in-submodule.sh @@ -62,8 +62,8 @@ flakeref=git+file://$rootRepo\?submodules=1\&dir=submodule # Check that dirtying a submodule makes the entire thing dirty. [[ $(nix flake metadata --json "$flakeref" | jq -r .locked.rev) != null ]] echo '"foo"' > "$rootRepo"/submodule/sub.nix -[[ $(nix eval --json "$flakeref#sub" ) = '"foo"' ]] -[[ $(nix flake metadata --json "$flakeref" | jq -r .locked.rev) = null ]] +[[ $(_NIX_TEST_BARF_ON_UNCACHEABLE='' nix eval --json "$flakeref#sub" ) = '"foo"' ]] +[[ $(_NIX_TEST_BARF_ON_UNCACHEABLE='' nix flake metadata --json "$flakeref" | jq -r .locked.rev) = null ]] # Test that `nix flake metadata` parses `submodule` correctly. cat > "$rootRepo"/flake.nix <&1 | grep '/flakeB.*is forbidden in pure evaluation mode' -expect 1 nix flake lock --impure $flakeFollowsA 2>&1 | grep '/flakeB.*does not exist' +expect 1 nix flake lock --impure $flakeFollowsA 2>&1 | grep "'flakeB' is too short to be a valid store path" # Test relative non-flake inputs. cat > $flakeFollowsA/flake.nix < Date: Wed, 10 Sep 2025 17:40:54 +0200 Subject: [PATCH 007/373] Args::Flag: Add required attribute --- src/libutil/args.cc | 22 +++++++++++++++++++++- src/libutil/include/nix/util/args.hh | 8 ++++++++ src/nix/sigs.cc | 8 +++----- 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/src/libutil/args.cc b/src/libutil/args.cc index f4309473b..05b5a25c7 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -318,6 +318,7 @@ void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang) } catch (SystemError &) { } } + for (auto pos = cmdline.begin(); pos != cmdline.end();) { auto arg = *pos; @@ -354,6 +355,9 @@ void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang) processArgs(pendingArgs, true); + if (!completions) + checkArgs(); + initialFlagsProcessed(); /* Now that we are done parsing, make sure that any experimental @@ -384,7 +388,7 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) auto & rootArgs = getRoot(); - auto process = [&](const std::string & name, const Flag & flag) -> bool { + auto process = [&](const std::string & name, Flag & flag) -> bool { ++pos; if (auto & f = flag.experimentalFeature) @@ -413,6 +417,7 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) } if (!anyCompleted) flag.handler.fun(std::move(args)); + flag.timesUsed++; return true; }; @@ -504,6 +509,14 @@ bool Args::processArgs(const Strings & args, bool finish) return res; } +void Args::checkArgs() +{ + for (auto & [name, flag] : longFlags) { + if (flag->required && flag->timesUsed == 0) + throw UsageError("required argument '--%s' is missing", name); + } +} + nlohmann::json Args::toJSON() { auto flags = nlohmann::json::object(); @@ -643,6 +656,13 @@ bool MultiCommand::processArgs(const Strings & args, bool finish) return Args::processArgs(args, finish); } +void MultiCommand::checkArgs() +{ + Args::checkArgs(); + if (command) + command->second->checkArgs(); +} + nlohmann::json MultiCommand::toJSON() { auto cmds = nlohmann::json::object(); diff --git a/src/libutil/include/nix/util/args.hh b/src/libutil/include/nix/util/args.hh index 443db445f..99f6e23e8 100644 --- a/src/libutil/include/nix/util/args.hh +++ b/src/libutil/include/nix/util/args.hh @@ -202,8 +202,12 @@ public: Strings labels; Handler handler; CompleterClosure completer; + bool required = false; std::optional experimentalFeature; + + // FIXME: this should be private, but that breaks designated initializers. + size_t timesUsed = 0; }; protected: @@ -283,6 +287,8 @@ protected: StringSet hiddenCategories; + virtual void checkArgs(); + /** * Called after all command line flags before the first non-flag * argument (if any) have been processed. @@ -428,6 +434,8 @@ public: protected: std::string commandName = ""; bool aliasUsed = false; + + void checkArgs() override; }; Strings argvToStrings(int argc, char ** argv); diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index 92bb00500..470cd3951 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -144,7 +144,7 @@ static auto rCmdSign = registerCommand2({"store", "sign"}); struct CmdKeyGenerateSecret : Command { - std::optional keyName; + std::string keyName; CmdKeyGenerateSecret() { @@ -153,6 +153,7 @@ struct CmdKeyGenerateSecret : Command .description = "Identifier of the key (e.g. `cache.example.org-1`).", .labels = {"name"}, .handler = {&keyName}, + .required = true, }); } @@ -170,11 +171,8 @@ struct CmdKeyGenerateSecret : Command void run() override { - if (!keyName) - throw UsageError("required argument '--key-name' is missing"); - logger->stop(); - writeFull(getStandardOutput(), SecretKey::generate(*keyName).to_string()); + writeFull(getStandardOutput(), SecretKey::generate(keyName).to_string()); } }; From 43550e8edb81e423619c2bc6d18018e095c5c468 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 23 Sep 2025 16:21:56 -0400 Subject: [PATCH 008/373] Lock down `BuildResult::Status` enum values This allows refactoring without changing wire protocol by mistake. --- .../include/nix/store/build-result.hh | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/libstore/include/nix/store/build-result.hh b/src/libstore/include/nix/store/build-result.hh index d7249d420..1911fef39 100644 --- a/src/libstore/include/nix/store/build-result.hh +++ b/src/libstore/include/nix/store/build-result.hh @@ -20,26 +20,26 @@ struct BuildResult */ enum Status { Built = 0, - Substituted, - AlreadyValid, - PermanentFailure, - InputRejected, - OutputRejected, + Substituted = 1, + AlreadyValid = 2, + PermanentFailure = 3, + InputRejected = 4, + OutputRejected = 5, /// possibly transient - TransientFailure, + TransientFailure = 6, /// no longer used - CachedFailure, - TimedOut, - MiscFailure, - DependencyFailed, - LogLimitExceeded, - NotDeterministic, - ResolvesToAlreadyValid, - NoSubstituters, + CachedFailure = 7, + TimedOut = 8, + MiscFailure = 9, + DependencyFailed = 10, + LogLimitExceeded = 11, + NotDeterministic = 12, + ResolvesToAlreadyValid = 13, + NoSubstituters = 14, /// A certain type of `OutputRejected`. The protocols do not yet /// know about this one, so change it back to `OutputRejected` /// before serialization. - HashMismatch, + HashMismatch = 15, } status = MiscFailure; /** From e731c43eae9c08b8649708dcc5a76e8a99eda929 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 23 Sep 2025 18:09:56 -0400 Subject: [PATCH 009/373] Use `std::variant` to enforce `BuildResult` invariants There is now a clean separation between successful and failing build results. --- src/libcmd/installables.cc | 18 +- src/libstore-c/nix_api_store.cc | 8 +- src/libstore-tests/serve-protocol.cc | 108 +++++----- src/libstore-tests/worker-protocol.cc | 194 +++++++++--------- src/libstore/build-result.cc | 6 + .../build/derivation-building-goal.cc | 87 ++++---- src/libstore/build/derivation-check.cc | 12 +- src/libstore/build/derivation-goal.cc | 72 ++++--- .../build/derivation-trampoline-goal.cc | 9 +- src/libstore/build/entry-points.cc | 6 +- src/libstore/build/substitution-goal.cc | 32 +-- src/libstore/derivation-options.cc | 4 +- .../include/nix/store/build-result.hh | 174 ++++++++++------ .../nix/store/build/derivation-builder.hh | 2 +- .../store/build/derivation-building-goal.hh | 2 +- .../nix/store/build/derivation-goal.hh | 2 +- .../nix/store/build/substitution-goal.hh | 4 +- src/libstore/legacy-ssh-store.cc | 13 +- src/libstore/local-store.cc | 2 +- src/libstore/misc.cc | 2 +- src/libstore/posix-fs-canonicalise.cc | 2 +- src/libstore/remote-store.cc | 21 +- src/libstore/restricted-store.cc | 12 +- src/libstore/serve-protocol.cc | 60 ++++-- src/libstore/store-api.cc | 2 +- src/libstore/unix/build/derivation-builder.cc | 20 +- src/libstore/worker-protocol.cc | 68 ++++-- src/nix/build-remote/build-remote.cc | 17 +- .../functional/test-libstoreconsumer/main.cc | 6 +- 29 files changed, 568 insertions(+), 397 deletions(-) diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 96ff06ad3..91ad74308 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -604,28 +604,28 @@ std::vector Installable::build( static void throwBuildErrors(std::vector & buildResults, const Store & store) { - std::vector failed; + std::vector> failed; for (auto & buildResult : buildResults) { - if (!buildResult.success()) { - failed.push_back(buildResult); + if (auto * failure = buildResult.tryGetFailure()) { + failed.push_back({&buildResult, failure}); } } auto failedResult = failed.begin(); if (failedResult != failed.end()) { if (failed.size() == 1) { - failedResult->rethrow(); + failedResult->second->rethrow(); } else { StringSet failedPaths; for (; failedResult != failed.end(); failedResult++) { - if (!failedResult->errorMsg.empty()) { + if (!failedResult->second->errorMsg.empty()) { logError( ErrorInfo{ .level = lvlError, - .msg = failedResult->errorMsg, + .msg = failedResult->second->errorMsg, }); } - failedPaths.insert(failedResult->path.to_string(store)); + failedPaths.insert(failedResult->first->path.to_string(store)); } throw Error("build of %s failed", concatStringsSep(", ", quoteStrings(failedPaths))); } @@ -695,12 +695,14 @@ std::vector, BuiltPathWithResult>> Installable::build auto buildResults = store->buildPathsWithResults(pathsToBuild, bMode, evalStore); throwBuildErrors(buildResults, *store); for (auto & buildResult : buildResults) { + // If we didn't throw, they must all be sucesses + auto & success = std::get(buildResult.inner); for (auto & aux : backmap[buildResult.path]) { std::visit( overloaded{ [&](const DerivedPath::Built & bfd) { std::map outputs; - for (auto & [outputName, realisation] : buildResult.builtOutputs) + for (auto & [outputName, realisation] : success.builtOutputs) outputs.emplace(outputName, realisation.outPath); res.push_back( {aux.installable, diff --git a/src/libstore-c/nix_api_store.cc b/src/libstore-c/nix_api_store.cc index c4c17f127..68b642d86 100644 --- a/src/libstore-c/nix_api_store.cc +++ b/src/libstore-c/nix_api_store.cc @@ -145,9 +145,11 @@ nix_err nix_store_realise( if (callback) { for (const auto & result : results) { - for (const auto & [outputName, realisation] : result.builtOutputs) { - StorePath p{realisation.outPath}; - callback(userdata, outputName.c_str(), &p); + if (auto * success = result.tryGetSuccess()) { + for (const auto & [outputName, realisation] : success->builtOutputs) { + StorePath p{realisation.outPath}; + callback(userdata, outputName.c_str(), &p); + } } } } diff --git a/src/libstore-tests/serve-protocol.cc b/src/libstore-tests/serve-protocol.cc index b513e1365..a63201164 100644 --- a/src/libstore-tests/serve-protocol.cc +++ b/src/libstore-tests/serve-protocol.cc @@ -127,17 +127,17 @@ VERSIONED_CHARACTERIZATION_TEST( VERSIONED_CHARACTERIZATION_TEST(ServeProtoTest, buildResult_2_2, "build-result-2.2", 2 << 8 | 2, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::NotDeterministic, + }}}, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::Built, - }, + }}}, + BuildResult{.inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + }}}, }; t; })) @@ -145,20 +145,24 @@ VERSIONED_CHARACTERIZATION_TEST(ServeProtoTest, buildResult_2_2, "build-result-2 VERSIONED_CHARACTERIZATION_TEST(ServeProtoTest, buildResult_2_3, "build-result-2.3", 2 << 8 | 3, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + }}, .startTime = 30, .stopTime = 50, }, @@ -170,48 +174,52 @@ VERSIONED_CHARACTERIZATION_TEST( ServeProtoTest, buildResult_2_6, "build-result-2.6", 2 << 8 | 6, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + .builtOutputs = + { + { + "foo", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + }, + { + "bar", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + }, + }, + }}, .timesBuilt = 1, - .builtOutputs = - { - { - "foo", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - }, - { - "bar", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - }, - }, .startTime = 30, .stopTime = 50, #if 0 diff --git a/src/libstore-tests/worker-protocol.cc b/src/libstore-tests/worker-protocol.cc index 823d8d85a..489151c8c 100644 --- a/src/libstore-tests/worker-protocol.cc +++ b/src/libstore-tests/worker-protocol.cc @@ -180,17 +180,17 @@ VERSIONED_CHARACTERIZATION_TEST( VERSIONED_CHARACTERIZATION_TEST(WorkerProtoTest, buildResult_1_27, "build-result-1.27", 1 << 8 | 27, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::NotDeterministic, + }}}, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::Built, - }, + }}}, + BuildResult{.inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + }}}, }; t; })) @@ -199,16 +199,16 @@ VERSIONED_CHARACTERIZATION_TEST( WorkerProtoTest, buildResult_1_28, "build-result-1.28", 1 << 8 | 28, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::NotDeterministic, + }}}, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::Built, + }}}, + BuildResult{.inner{BuildResult::Success{ + .status = BuildResult::Success::Built, .builtOutputs = { { @@ -236,7 +236,7 @@ VERSIONED_CHARACTERIZATION_TEST( }, }, }, - }, + }}}, }; t; })) @@ -245,48 +245,52 @@ VERSIONED_CHARACTERIZATION_TEST( WorkerProtoTest, buildResult_1_29, "build-result-1.29", 1 << 8 | 29, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + .builtOutputs = + { + { + "foo", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + }, + { + "bar", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + }, + }, + }}, .timesBuilt = 1, - .builtOutputs = - { - { - "foo", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - }, - { - "bar", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - }, - }, .startTime = 30, .stopTime = 50, }, @@ -298,48 +302,52 @@ VERSIONED_CHARACTERIZATION_TEST( WorkerProtoTest, buildResult_1_37, "build-result-1.37", 1 << 8 | 37, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + .builtOutputs = + { + { + "foo", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + }, + { + "bar", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + }, + }, + }}, .timesBuilt = 1, - .builtOutputs = - { - { - "foo", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - }, - { - "bar", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - }, - }, .startTime = 30, .stopTime = 50, .cpuUser = std::chrono::microseconds(500s), @@ -353,10 +361,10 @@ VERSIONED_CHARACTERIZATION_TEST(WorkerProtoTest, keyedBuildResult_1_29, "keyed-b using namespace std::literals::chrono_literals; std::tuple t{ KeyedBuildResult{ - { - .status = KeyedBuildResult::OutputRejected, + {.inner{BuildResult::Failure{ + .status = KeyedBuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, /* .path = */ DerivedPath::Opaque{ StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-xxx"}, @@ -364,10 +372,12 @@ VERSIONED_CHARACTERIZATION_TEST(WorkerProtoTest, keyedBuildResult_1_29, "keyed-b }, KeyedBuildResult{ { - .status = KeyedBuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = KeyedBuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, diff --git a/src/libstore/build-result.cc b/src/libstore/build-result.cc index 43c7adb11..ecbd27b49 100644 --- a/src/libstore/build-result.cc +++ b/src/libstore/build-result.cc @@ -5,4 +5,10 @@ namespace nix { bool BuildResult::operator==(const BuildResult &) const noexcept = default; std::strong_ordering BuildResult::operator<=>(const BuildResult &) const noexcept = default; +bool BuildResult::Success::operator==(const BuildResult::Success &) const noexcept = default; +std::strong_ordering BuildResult::Success::operator<=>(const BuildResult::Success &) const noexcept = default; + +bool BuildResult::Failure::operator==(const BuildResult::Failure &) const noexcept = default; +std::strong_ordering BuildResult::Failure::operator<=>(const BuildResult::Failure &) const noexcept = default; + } // namespace nix diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index ebef2a375..001816ca0 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -90,7 +90,7 @@ void DerivationBuildingGoal::timedOut(Error && ex) killChild(); // We're not inside a coroutine, hence we can't use co_return here. // Thus we ignore the return value. - [[maybe_unused]] Done _ = doneFailure({BuildResult::TimedOut, std::move(ex)}); + [[maybe_unused]] Done _ = doneFailure({BuildResult::Failure::TimedOut, std::move(ex)}); } /** @@ -205,7 +205,7 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() nrFailed, nrFailed == 1 ? "dependency" : "dependencies"); msg += showKnownOutputs(worker.store, *drv); - co_return doneFailure(BuildError(BuildResult::DependencyFailed, msg)); + co_return doneFailure(BuildError(BuildResult::Failure::DependencyFailed, msg)); } /* Gather information necessary for computing the closure and/or @@ -256,14 +256,18 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() return std::nullopt; auto & buildResult = (*mEntry)->buildResult; - if (!buildResult.success()) - return std::nullopt; + return std::visit( + overloaded{ + [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, + [&](const BuildResult::Success & success) -> std::optional { + auto i = get(success.builtOutputs, outputName); + if (!i) + return std::nullopt; - auto i = get(buildResult.builtOutputs, outputName); - if (!i) - return std::nullopt; - - return i->outPath; + return i->outPath; + }, + }, + buildResult.inner); }); if (!attempt) { /* TODO (impure derivations-induced tech debt) (see below): @@ -306,7 +310,9 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() auto resolvedResult = resolvedDrvGoal->buildResult; - if (resolvedResult.success()) { + // No `std::visit` for coroutines yet + if (auto * successP = resolvedResult.tryGetSuccess()) { + auto & success = *successP; SingleDrvOutputs builtOutputs; auto outputHashes = staticOutputHashes(worker.evalStore, *drv); @@ -324,7 +330,7 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() outputName); auto realisation = [&] { - auto take1 = get(resolvedResult.builtOutputs, outputName); + auto take1 = get(success.builtOutputs, outputName); if (take1) return *take1; @@ -360,18 +366,19 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() runPostBuildHook(worker.store, *logger, drvPath, outputPaths); - auto status = resolvedResult.status; - if (status == BuildResult::AlreadyValid) - status = BuildResult::ResolvesToAlreadyValid; + auto status = success.status; + if (status == BuildResult::Success::AlreadyValid) + status = BuildResult::Success::ResolvesToAlreadyValid; - co_return doneSuccess(status, std::move(builtOutputs)); - } else { + co_return doneSuccess(success.status, std::move(builtOutputs)); + } else if (resolvedResult.tryGetFailure()) { co_return doneFailure({ - BuildResult::DependencyFailed, + BuildResult::Failure::DependencyFailed, "build of resolved derivation '%s' failed", worker.store.printStorePath(pathResolved), }); - } + } else + assert(false); } /* If we get this far, we know no dynamic drvs inputs */ @@ -536,7 +543,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() debug("skipping build of derivation '%s', someone beat us to it", worker.store.printStorePath(drvPath)); outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::AlreadyValid, std::move(validOutputs)); + co_return doneSuccess(BuildResult::Success::AlreadyValid, std::move(validOutputs)); } /* If any of the outputs already exist but are not valid, delete @@ -628,7 +635,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() /* Check the exit status. */ if (!statusOk(status)) { - auto e = fixupBuilderFailureErrorMessage({BuildResult::MiscFailure, status, ""}); + auto e = fixupBuilderFailureErrorMessage({BuildResult::Failure::MiscFailure, status, ""}); outputLocks.unlock(); @@ -669,7 +676,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::Built, std::move(builtOutputs)); + co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs)); } co_await yield(); @@ -832,15 +839,15 @@ Goal::Co DerivationBuildingGoal::tryToBuild() # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wswitch-enum" switch (e.status) { - case BuildResult::HashMismatch: + case BuildResult::Failure::HashMismatch: worker.hashMismatch = true; /* See header, the protocols don't know about `HashMismatch` yet, so change it to `OutputRejected`, which they expect for this case (hash mismatch is a type of output rejection). */ - e.status = BuildResult::OutputRejected; + e.status = BuildResult::Failure::OutputRejected; break; - case BuildResult::NotDeterministic: + case BuildResult::Failure::NotDeterministic: worker.checkMismatch = true; break; default: @@ -866,7 +873,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() (unlinked) lock files. */ outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::Built, std::move(builtOutputs)); + co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs)); } #endif } @@ -1149,7 +1156,7 @@ void DerivationBuildingGoal::handleChildOutput(Descriptor fd, std::string_view d // We're not inside a coroutine, hence we can't use co_return here. // Thus we ignore the return value. [[maybe_unused]] Done _ = doneFailure(BuildError( - BuildResult::LogLimitExceeded, + BuildResult::Failure::LogLimitExceeded, "%s killed after writing more than %d bytes of log output", getName(), settings.maxLogSize)); @@ -1306,16 +1313,16 @@ DerivationBuildingGoal::checkPathValidity(std::map & return {allValid, validOutputs}; } -Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Status status, SingleDrvOutputs builtOutputs) +Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Success::Status status, SingleDrvOutputs builtOutputs) { - buildResult.status = status; - - assert(buildResult.success()); + buildResult.inner = BuildResult::Success{ + .status = status, + .builtOutputs = std::move(builtOutputs), + }; mcRunningBuilds.reset(); - buildResult.builtOutputs = std::move(builtOutputs); - if (status == BuildResult::Built) + if (status == BuildResult::Success::Built) worker.doneBuilds++; worker.updateProgress(); @@ -1325,16 +1332,18 @@ Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Status status, Singl Goal::Done DerivationBuildingGoal::doneFailure(BuildError ex) { - buildResult.status = ex.status; - buildResult.errorMsg = fmt("%s", Uncolored(ex.info().msg)); - if (buildResult.status == BuildResult::TimedOut) - worker.timedOut = true; - if (buildResult.status == BuildResult::PermanentFailure) - worker.permanentFailure = true; + buildResult.inner = BuildResult::Failure{ + .status = ex.status, + .errorMsg = fmt("%s", Uncolored(ex.info().msg)), + }; mcRunningBuilds.reset(); - if (ex.status != BuildResult::DependencyFailed) + if (ex.status == BuildResult::Failure::TimedOut) + worker.timedOut = true; + if (ex.status == BuildResult::Failure::PermanentFailure) + worker.permanentFailure = true; + if (ex.status != BuildResult::Failure::DependencyFailed) worker.failedBuilds++; worker.updateProgress(); diff --git a/src/libstore/build/derivation-check.cc b/src/libstore/build/derivation-check.cc index 82e92e1f3..db3ec7c3d 100644 --- a/src/libstore/build/derivation-check.cc +++ b/src/libstore/build/derivation-check.cc @@ -33,7 +33,7 @@ void checkOutputs( /* Throw an error after registering the path as valid. */ throw BuildError( - BuildResult::HashMismatch, + BuildResult::Failure::HashMismatch, "hash mismatch in fixed-output derivation '%s':\n specified: %s\n got: %s", store.printStorePath(drvPath), wanted.to_string(HashFormat::SRI, true), @@ -42,7 +42,7 @@ void checkOutputs( if (!info.references.empty()) { auto numViolations = info.references.size(); throw BuildError( - BuildResult::HashMismatch, + BuildResult::Failure::HashMismatch, "fixed-output derivations must not reference store paths: '%s' references %d distinct paths, e.g. '%s'", store.printStorePath(drvPath), numViolations, @@ -84,7 +84,7 @@ void checkOutputs( auto applyChecks = [&](const DerivationOptions::OutputChecks & checks) { if (checks.maxSize && info.narSize > *checks.maxSize) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "path '%s' is too large at %d bytes; limit is %d bytes", store.printStorePath(info.path), info.narSize, @@ -94,7 +94,7 @@ void checkOutputs( uint64_t closureSize = getClosure(info.path).second; if (closureSize > *checks.maxClosureSize) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "closure of path '%s' is too large at %d bytes; limit is %d bytes", store.printStorePath(info.path), closureSize, @@ -115,7 +115,7 @@ void checkOutputs( std::string outputsListing = concatMapStringsSep(", ", outputs, [](auto & o) { return o.first; }); throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "derivation '%s' output check for '%s' contains an illegal reference specifier '%s'," " expected store path or output name (one of [%s])", store.printStorePath(drvPath), @@ -148,7 +148,7 @@ void checkOutputs( badPathsStr += store.printStorePath(i); } throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "output '%s' is not allowed to refer to the following paths:%s", store.printStorePath(info.path), badPathsStr); diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index b9046744a..5dfc334a8 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -94,7 +94,7 @@ Goal::Co DerivationGoal::haveDerivation() /* If they are all valid, then we're done. */ if (checkResult && checkResult->second == PathStatus::Valid && buildMode == bmNormal) { - co_return doneSuccess(BuildResult::AlreadyValid, checkResult->first); + co_return doneSuccess(BuildResult::Success::AlreadyValid, checkResult->first); } Goals waitees; @@ -123,7 +123,7 @@ Goal::Co DerivationGoal::haveDerivation() if (nrFailed > 0 && nrFailed > nrNoSubstituters && !settings.tryFallback) { co_return doneFailure(BuildError( - BuildResult::TransientFailure, + BuildResult::Failure::TransientFailure, "some substitutes for the outputs of derivation '%s' failed (usually happens due to networking issues); try '--fallback' to build derivation from source ", worker.store.printStorePath(drvPath))); } @@ -135,7 +135,7 @@ Goal::Co DerivationGoal::haveDerivation() bool allValid = checkResult && checkResult->second == PathStatus::Valid; if (buildMode == bmNormal && allValid) { - co_return doneSuccess(BuildResult::Substituted, checkResult->first); + co_return doneSuccess(BuildResult::Success::Substituted, checkResult->first); } if (buildMode == bmRepair && allValid) { co_return repairClosure(); @@ -163,25 +163,27 @@ Goal::Co DerivationGoal::haveDerivation() buildResult = g->buildResult; - if (buildMode == bmCheck) { - /* In checking mode, the builder will not register any outputs. - So we want to make sure the ones that we wanted to check are - properly there. */ - buildResult.builtOutputs = {{wantedOutput, assertPathValidity()}}; - } else { - /* Otherwise the builder will give us info for out output, but - also for other outputs. Filter down to just our output so as - not to leak info on unrelated things. */ - for (auto it = buildResult.builtOutputs.begin(); it != buildResult.builtOutputs.end();) { - if (it->first != wantedOutput) { - it = buildResult.builtOutputs.erase(it); - } else { - ++it; + if (auto * successP = buildResult.tryGetSuccess()) { + auto & success = *successP; + if (buildMode == bmCheck) { + /* In checking mode, the builder will not register any outputs. + So we want to make sure the ones that we wanted to check are + properly there. */ + success.builtOutputs = {{wantedOutput, assertPathValidity()}}; + } else { + /* Otherwise the builder will give us info for out output, but + also for other outputs. Filter down to just our output so as + not to leak info on unrelated things. */ + for (auto it = success.builtOutputs.begin(); it != success.builtOutputs.end();) { + if (it->first != wantedOutput) { + it = success.builtOutputs.erase(it); + } else { + ++it; + } } - } - if (buildResult.success()) - assert(buildResult.builtOutputs.count(wantedOutput) > 0); + assert(success.builtOutputs.count(wantedOutput) > 0); + } } co_return amDone(g->exitCode, g->ex); @@ -279,7 +281,7 @@ Goal::Co DerivationGoal::repairClosure() "some paths in the output closure of derivation '%s' could not be repaired", worker.store.printStorePath(drvPath)); } - co_return doneSuccess(BuildResult::AlreadyValid, assertPathValidity()); + co_return doneSuccess(BuildResult::Success::AlreadyValid, assertPathValidity()); } std::optional> DerivationGoal::checkPathValidity() @@ -337,16 +339,16 @@ Realisation DerivationGoal::assertPathValidity() return checkResult->first; } -Goal::Done DerivationGoal::doneSuccess(BuildResult::Status status, Realisation builtOutput) +Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Realisation builtOutput) { - buildResult.status = status; - - assert(buildResult.success()); + buildResult.inner = BuildResult::Success{ + .status = status, + .builtOutputs = {{wantedOutput, std::move(builtOutput)}}, + }; mcExpectedBuilds.reset(); - buildResult.builtOutputs = {{wantedOutput, std::move(builtOutput)}}; - if (status == BuildResult::Built) + if (status == BuildResult::Success::Built) worker.doneBuilds++; worker.updateProgress(); @@ -356,16 +358,18 @@ Goal::Done DerivationGoal::doneSuccess(BuildResult::Status status, Realisation b Goal::Done DerivationGoal::doneFailure(BuildError ex) { - buildResult.status = ex.status; - buildResult.errorMsg = fmt("%s", Uncolored(ex.info().msg)); - if (buildResult.status == BuildResult::TimedOut) - worker.timedOut = true; - if (buildResult.status == BuildResult::PermanentFailure) - worker.permanentFailure = true; + buildResult.inner = BuildResult::Failure{ + .status = ex.status, + .errorMsg = fmt("%s", Uncolored(ex.info().msg)), + }; mcExpectedBuilds.reset(); - if (ex.status != BuildResult::DependencyFailed) + if (ex.status == BuildResult::Failure::TimedOut) + worker.timedOut = true; + if (ex.status == BuildResult::Failure::PermanentFailure) + worker.permanentFailure = true; + if (ex.status != BuildResult::Failure::DependencyFailed) worker.failedBuilds++; worker.updateProgress(); diff --git a/src/libstore/build/derivation-trampoline-goal.cc b/src/libstore/build/derivation-trampoline-goal.cc index 5038a4ea0..205f5c427 100644 --- a/src/libstore/build/derivation-trampoline-goal.cc +++ b/src/libstore/build/derivation-trampoline-goal.cc @@ -164,10 +164,11 @@ Goal::Co DerivationTrampolineGoal::haveDerivation(StorePath drvPath, Derivation auto & g = *concreteDrvGoals.begin(); buildResult = g->buildResult; - for (auto & g2 : concreteDrvGoals) { - for (auto && [x, y] : g2->buildResult.builtOutputs) - buildResult.builtOutputs.insert_or_assign(x, y); - } + if (auto * successP = buildResult.tryGetSuccess()) + for (auto & g2 : concreteDrvGoals) + if (auto * successP2 = g2->buildResult.tryGetSuccess()) + for (auto && [x, y] : successP2->builtOutputs) + successP->builtOutputs.insert_or_assign(x, y); co_return amDone(g->exitCode, g->ex); } diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc index 1dd540265..4bbd4c8f0 100644 --- a/src/libstore/build/entry-points.cc +++ b/src/libstore/build/entry-points.cc @@ -82,10 +82,10 @@ BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivat worker.run(Goals{goal}); return goal->buildResult; } catch (Error & e) { - return BuildResult{ - .status = BuildResult::MiscFailure, + return BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::MiscFailure, .errorMsg = e.msg(), - }; + }}}; }; } diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc index d219834f2..d16e530a4 100644 --- a/src/libstore/build/substitution-goal.cc +++ b/src/libstore/build/substitution-goal.cc @@ -27,13 +27,21 @@ PathSubstitutionGoal::~PathSubstitutionGoal() cleanup(); } -Goal::Done PathSubstitutionGoal::done(ExitCode result, BuildResult::Status status, std::optional errorMsg) +Goal::Done PathSubstitutionGoal::doneSuccess(BuildResult::Success::Status status) { - buildResult.status = status; - if (errorMsg) { - debug(*errorMsg); - buildResult.errorMsg = *errorMsg; - } + buildResult.inner = BuildResult::Success{ + .status = status, + }; + return amDone(ecSuccess); +} + +Goal::Done PathSubstitutionGoal::doneFailure(ExitCode result, BuildResult::Failure::Status status, std::string errorMsg) +{ + debug(errorMsg); + buildResult.inner = BuildResult::Failure{ + .status = status, + .errorMsg = std::move(errorMsg), + }; return amDone(result); } @@ -45,7 +53,7 @@ Goal::Co PathSubstitutionGoal::init() /* If the path already exists we're done. */ if (!repair && worker.store.isValidPath(storePath)) { - co_return done(ecSuccess, BuildResult::AlreadyValid); + co_return doneSuccess(BuildResult::Success::AlreadyValid); } if (settings.readOnlyMode) @@ -165,9 +173,9 @@ Goal::Co PathSubstitutionGoal::init() /* Hack: don't indicate failure if there were no substituters. In that case the calling derivation should just do a build. */ - co_return done( + co_return doneFailure( substituterFailed ? ecFailed : ecNoSubstituters, - BuildResult::NoSubstituters, + BuildResult::Failure::NoSubstituters, fmt("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath))); } @@ -178,9 +186,9 @@ Goal::Co PathSubstitutionGoal::tryToRun( trace("all references realised"); if (nrFailed > 0) { - co_return done( + co_return doneFailure( nrNoSubstituters > 0 ? ecNoSubstituters : ecFailed, - BuildResult::DependencyFailed, + BuildResult::Failure::DependencyFailed, fmt("some references of path '%s' could not be realised", worker.store.printStorePath(storePath))); } @@ -297,7 +305,7 @@ Goal::Co PathSubstitutionGoal::tryToRun( worker.updateProgress(); - co_return done(ecSuccess, BuildResult::Substituted); + co_return doneSuccess(BuildResult::Success::Substituted); } void PathSubstitutionGoal::handleEOF(Descriptor fd) diff --git a/src/libstore/derivation-options.cc b/src/libstore/derivation-options.cc index 4cb9bf726..844bce840 100644 --- a/src/libstore/derivation-options.cc +++ b/src/libstore/derivation-options.cc @@ -266,7 +266,9 @@ DerivationOptions::getParsedExportReferencesGraph(const StoreDirConfig & store) for (auto & storePathS : ss) { if (!store.isInStore(storePathS)) throw BuildError( - BuildResult::InputRejected, "'exportReferencesGraph' contains a non-store path '%1%'", storePathS); + BuildResult::Failure::InputRejected, + "'exportReferencesGraph' contains a non-store path '%1%'", + storePathS); storePaths.insert(store.toStorePath(storePathS).first); } res.insert_or_assign(fileName, storePaths); diff --git a/src/libstore/include/nix/store/build-result.hh b/src/libstore/include/nix/store/build-result.hh index 1911fef39..0446c4038 100644 --- a/src/libstore/include/nix/store/build-result.hh +++ b/src/libstore/include/nix/store/build-result.hh @@ -12,63 +12,121 @@ namespace nix { struct BuildResult { - /** - * @note This is directly used in the nix-store --serve protocol. - * That means we need to worry about compatibility across versions. - * Therefore, don't remove status codes, and only add new status - * codes at the end of the list. - */ - enum Status { - Built = 0, - Substituted = 1, - AlreadyValid = 2, - PermanentFailure = 3, - InputRejected = 4, - OutputRejected = 5, - /// possibly transient - TransientFailure = 6, - /// no longer used - CachedFailure = 7, - TimedOut = 8, - MiscFailure = 9, - DependencyFailed = 10, - LogLimitExceeded = 11, - NotDeterministic = 12, - ResolvesToAlreadyValid = 13, - NoSubstituters = 14, - /// A certain type of `OutputRejected`. The protocols do not yet - /// know about this one, so change it back to `OutputRejected` - /// before serialization. - HashMismatch = 15, - } status = MiscFailure; + struct Success + { + /** + * @note This is directly used in the nix-store --serve protocol. + * That means we need to worry about compatibility across versions. + * Therefore, don't remove status codes, and only add new status + * codes at the end of the list. + * + * Must be disjoint with `Failure::Status`. + */ + enum Status : uint8_t { + Built = 0, + Substituted = 1, + AlreadyValid = 2, + ResolvesToAlreadyValid = 13, + } status; + + /** + * For derivations, a mapping from the names of the wanted outputs + * to actual paths. + */ + SingleDrvOutputs builtOutputs; + + bool operator==(const BuildResult::Success &) const noexcept; + std::strong_ordering operator<=>(const BuildResult::Success &) const noexcept; + + static bool statusIs(uint8_t status) + { + return status == Built || status == Substituted || status == AlreadyValid + || status == ResolvesToAlreadyValid; + } + }; + + struct Failure + { + /** + * @note This is directly used in the nix-store --serve protocol. + * That means we need to worry about compatibility across versions. + * Therefore, don't remove status codes, and only add new status + * codes at the end of the list. + * + * Must be disjoint with `Success::Status`. + */ + enum Status : uint8_t { + PermanentFailure = 3, + InputRejected = 4, + OutputRejected = 5, + /// possibly transient + TransientFailure = 6, + /// no longer used + CachedFailure = 7, + TimedOut = 8, + MiscFailure = 9, + DependencyFailed = 10, + LogLimitExceeded = 11, + NotDeterministic = 12, + NoSubstituters = 14, + /// A certain type of `OutputRejected`. The protocols do not yet + /// know about this one, so change it back to `OutputRejected` + /// before serialization. + HashMismatch = 15, + } status = MiscFailure; + + /** + * Information about the error if the build failed. + * + * @todo This should be an entire ErrorInfo object, not just a + * string, for richer information. + */ + std::string errorMsg; + + /** + * If timesBuilt > 1, whether some builds did not produce the same + * result. (Note that 'isNonDeterministic = false' does not mean + * the build is deterministic, just that we don't have evidence of + * non-determinism.) + */ + bool isNonDeterministic = false; + + bool operator==(const BuildResult::Failure &) const noexcept; + std::strong_ordering operator<=>(const BuildResult::Failure &) const noexcept; + + [[noreturn]] void rethrow() const + { + throw Error("%s", errorMsg); + } + }; + + std::variant inner = Failure{}; /** - * Information about the error if the build failed. - * - * @todo This should be an entire ErrorInfo object, not just a - * string, for richer information. + * Convenience wrapper to avoid a longer `std::get_if` usage by the + * caller (which will have to add more `BuildResult::` than we do + * below also, do note.) */ - std::string errorMsg; + auto * tryGetSuccess(this auto & self) + { + return std::get_if(&self.inner); + } + + /** + * Convenience wrapper to avoid a longer `std::get_if` usage by the + * caller (which will have to add more `BuildResult::` than we do + * below also, do note.) + */ + auto * tryGetFailure(this auto & self) + { + return std::get_if(&self.inner); + } /** * How many times this build was performed. */ unsigned int timesBuilt = 0; - /** - * If timesBuilt > 1, whether some builds did not produce the same - * result. (Note that 'isNonDeterministic = false' does not mean - * the build is deterministic, just that we don't have evidence of - * non-determinism.) - */ - bool isNonDeterministic = false; - - /** - * For derivations, a mapping from the names of the wanted outputs - * to actual paths. - */ - SingleDrvOutputs builtOutputs; - /** * The start/stop times of the build (or one of the rounds, if it * was repeated). @@ -82,16 +140,6 @@ struct BuildResult bool operator==(const BuildResult &) const noexcept; std::strong_ordering operator<=>(const BuildResult &) const noexcept; - - bool success() - { - return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid; - } - - void rethrow() - { - throw Error("%s", errorMsg); - } }; /** @@ -99,15 +147,9 @@ struct BuildResult */ struct BuildError : public Error { - BuildResult::Status status; + BuildResult::Failure::Status status; - BuildError(BuildResult::Status status, BuildError && error) - : Error{std::move(error)} - , status{status} - { - } - - BuildError(BuildResult::Status status, auto &&... args) + BuildError(BuildResult::Failure::Status status, auto &&... args) : Error{args...} , status{status} { diff --git a/src/libstore/include/nix/store/build/derivation-builder.hh b/src/libstore/include/nix/store/build/derivation-builder.hh index 7fad2837a..63ef2b665 100644 --- a/src/libstore/include/nix/store/build/derivation-builder.hh +++ b/src/libstore/include/nix/store/build/derivation-builder.hh @@ -22,7 +22,7 @@ struct BuilderFailureError : BuildError std::string extraMsgAfter; - BuilderFailureError(BuildResult::Status status, int builderStatus, std::string extraMsgAfter) + BuilderFailureError(BuildResult::Failure::Status status, int builderStatus, std::string extraMsgAfter) : BuildError{ status, /* No message for now, because the caller will make for diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index d394eb3c9..edb496024 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -147,7 +147,7 @@ private: */ void killChild(); - Done doneSuccess(BuildResult::Status status, SingleDrvOutputs builtOutputs); + Done doneSuccess(BuildResult::Success::Status status, SingleDrvOutputs builtOutputs); Done doneFailure(BuildError ex); diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index 85b471e28..e05bf1c0b 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -99,7 +99,7 @@ private: Co repairClosure(); - Done doneSuccess(BuildResult::Status status, Realisation builtOutput); + Done doneSuccess(BuildResult::Success::Status status, Realisation builtOutput); Done doneFailure(BuildError ex); }; diff --git a/src/libstore/include/nix/store/build/substitution-goal.hh b/src/libstore/include/nix/store/build/substitution-goal.hh index 9fc6450b1..5f6cb6a18 100644 --- a/src/libstore/include/nix/store/build/substitution-goal.hh +++ b/src/libstore/include/nix/store/build/substitution-goal.hh @@ -41,7 +41,9 @@ struct PathSubstitutionGoal : public Goal */ std::optional ca; - Done done(ExitCode result, BuildResult::Status status, std::optional errorMsg = {}); + Done doneSuccess(BuildResult::Success::Status status); + + Done doneFailure(ExitCode result, BuildResult::Failure::Status status, std::string errorMsg); public: PathSubstitutionGoal( diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc index f935de206..3b466c9bb 100644 --- a/src/libstore/legacy-ssh-store.cc +++ b/src/libstore/legacy-ssh-store.cc @@ -241,12 +241,13 @@ void LegacySSHStore::buildPaths( conn->to.flush(); - BuildResult result; - result.status = (BuildResult::Status) readInt(conn->from); - - if (!result.success()) { - conn->from >> result.errorMsg; - throw Error(result.status, result.errorMsg); + auto status = readInt(conn->from); + if (!BuildResult::Success::statusIs(status)) { + BuildResult::Failure failure{ + .status = (BuildResult::Failure::Status) status, + }; + conn->from >> failure.errorMsg; + throw Error(failure.status, std::move(failure.errorMsg)); } } diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 4cadf5282..ebc987ee0 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -997,7 +997,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos) }}, {[&](const StorePath & path, const StorePath & parent) { return BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "cycle detected in the references of '%s' from '%s'", printStorePath(path), printStorePath(parent)); diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index c5e1747c1..7efaa4f86 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -322,7 +322,7 @@ StorePaths Store::topoSortPaths(const StorePathSet & paths) }}, {[&](const StorePath & path, const StorePath & parent) { return BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "cycle detected in the references of '%s' from '%s'", printStorePath(path), printStorePath(parent)); diff --git a/src/libstore/posix-fs-canonicalise.cc b/src/libstore/posix-fs-canonicalise.cc index b6a64e65b..a274468c3 100644 --- a/src/libstore/posix-fs-canonicalise.cc +++ b/src/libstore/posix-fs-canonicalise.cc @@ -98,7 +98,7 @@ static void canonicalisePathMetaData_( (i.e. "touch $out/foo; ln $out/foo $out/bar"). */ if (uidRange && (st.st_uid < uidRange->first || st.st_uid > uidRange->second)) { if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino))) - throw BuildError(BuildResult::OutputRejected, "invalid ownership on file '%1%'", path); + throw BuildError(BuildResult::Failure::OutputRejected, "invalid ownership on file '%1%'", path); mode_t mode = st.st_mode & ~S_IFMT; assert( S_ISLNK(st.st_mode) diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index bb7425081..a6994f844 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -598,16 +598,15 @@ std::vector RemoteStore::buildPathsWithResults( [&](const DerivedPath::Opaque & bo) { results.push_back( KeyedBuildResult{ - { - .status = BuildResult::Substituted, - }, + {.inner{BuildResult::Success{ + .status = BuildResult::Success::Substituted, + }}}, /* .path = */ bo, }); }, [&](const DerivedPath::Built & bfd) { - KeyedBuildResult res{ - {.status = BuildResult::Built}, - /* .path = */ bfd, + BuildResult::Success success{ + .status = BuildResult::Success::Built, }; OutputPathMap outputs; @@ -627,9 +626,9 @@ std::vector RemoteStore::buildPathsWithResults( auto realisation = queryRealisation(outputId); if (!realisation) throw MissingRealisation(outputId); - res.builtOutputs.emplace(output, *realisation); + success.builtOutputs.emplace(output, *realisation); } else { - res.builtOutputs.emplace( + success.builtOutputs.emplace( output, Realisation{ .id = outputId, @@ -638,7 +637,11 @@ std::vector RemoteStore::buildPathsWithResults( } } - results.push_back(res); + results.push_back( + KeyedBuildResult{ + {.inner = std::move(success)}, + /* .path = */ bfd, + }); }}, path.raw()); } diff --git a/src/libstore/restricted-store.cc b/src/libstore/restricted-store.cc index e0f43ab6c..a1cb41606 100644 --- a/src/libstore/restricted-store.cc +++ b/src/libstore/restricted-store.cc @@ -257,8 +257,8 @@ void RestrictedStore::buildPaths( const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) { for (auto & result : buildPathsWithResults(paths, buildMode, evalStore)) - if (!result.success()) - result.rethrow(); + if (auto * failureP = result.tryGetFailure()) + failureP->rethrow(); } std::vector RestrictedStore::buildPathsWithResults( @@ -280,9 +280,11 @@ std::vector RestrictedStore::buildPathsWithResults( auto results = next->buildPathsWithResults(paths, buildMode); for (auto & result : results) { - for (auto & [outputName, output] : result.builtOutputs) { - newPaths.insert(output.outPath); - newRealisations.insert(output); + if (auto * successP = result.tryGetSuccess()) { + for (auto & [outputName, output] : successP->builtOutputs) { + newPaths.insert(output.outPath); + newRealisations.insert(output); + } } } diff --git a/src/libstore/serve-protocol.cc b/src/libstore/serve-protocol.cc index 7cf5e6997..51b575fcd 100644 --- a/src/libstore/serve-protocol.cc +++ b/src/libstore/serve-protocol.cc @@ -16,32 +16,62 @@ namespace nix { BuildResult ServeProto::Serialise::read(const StoreDirConfig & store, ServeProto::ReadConn conn) { BuildResult status; - status.status = (BuildResult::Status) readInt(conn.from); - conn.from >> status.errorMsg; + BuildResult::Success success; + BuildResult::Failure failure; + + auto rawStatus = readInt(conn.from); + conn.from >> failure.errorMsg; if (GET_PROTOCOL_MINOR(conn.version) >= 3) - conn.from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime; + conn.from >> status.timesBuilt >> failure.isNonDeterministic >> status.startTime >> status.stopTime; if (GET_PROTOCOL_MINOR(conn.version) >= 6) { auto builtOutputs = ServeProto::Serialise::read(store, conn); for (auto && [output, realisation] : builtOutputs) - status.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); + success.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); } + + if (BuildResult::Success::statusIs(rawStatus)) { + success.status = static_cast(rawStatus); + status.inner = std::move(success); + } else { + failure.status = static_cast(rawStatus); + status.inner = std::move(failure); + } + return status; } void ServeProto::Serialise::write( - const StoreDirConfig & store, ServeProto::WriteConn conn, const BuildResult & status) + const StoreDirConfig & store, ServeProto::WriteConn conn, const BuildResult & res) { - conn.to << status.status << status.errorMsg; - - if (GET_PROTOCOL_MINOR(conn.version) >= 3) - conn.to << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime; - if (GET_PROTOCOL_MINOR(conn.version) >= 6) { - DrvOutputs builtOutputs; - for (auto & [output, realisation] : status.builtOutputs) - builtOutputs.insert_or_assign(realisation.id, realisation); - ServeProto::write(store, conn, builtOutputs); - } + /* The protocol predates the use of sum types (std::variant) to + separate the success or failure cases. As such, it transits some + success- or failure-only fields in both cases. This helper + function helps support this: in each case, we just pass the old + default value for the fields that don't exist in that case. */ + auto common = [&](std::string_view errorMsg, bool isNonDeterministic, const auto & builtOutputs) { + conn.to << errorMsg; + if (GET_PROTOCOL_MINOR(conn.version) >= 3) + conn.to << res.timesBuilt << isNonDeterministic << res.startTime << res.stopTime; + if (GET_PROTOCOL_MINOR(conn.version) >= 6) { + DrvOutputs builtOutputsFullKey; + for (auto & [output, realisation] : builtOutputs) + builtOutputsFullKey.insert_or_assign(realisation.id, realisation); + ServeProto::write(store, conn, builtOutputsFullKey); + } + }; + std::visit( + overloaded{ + [&](const BuildResult::Failure & failure) { + conn.to << failure.status; + common(failure.errorMsg, failure.isNonDeterministic, decltype(BuildResult::Success::builtOutputs){}); + }, + [&](const BuildResult::Success & success) { + conn.to << success.status; + common(/*errorMsg=*/"", /*isNonDeterministic=*/false, success.builtOutputs); + }, + }, + res.inner); } UnkeyedValidPathInfo ServeProto::Serialise::read(const StoreDirConfig & store, ReadConn conn) diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index a0b06db54..56dffe19d 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -764,7 +764,7 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor for (auto & storePath : storePaths) { if (!inputPaths.count(storePath)) throw BuildError( - BuildResult::InputRejected, + BuildResult::Failure::InputRejected, "cannot export references of path '%s' because it is not in the input closure of the derivation", printStorePath(storePath)); diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 770bdad4d..d765de562 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -50,7 +50,7 @@ namespace nix { struct NotDeterministic : BuildError { NotDeterministic(auto &&... args) - : BuildError(BuildResult::NotDeterministic, args...) + : BuildError(BuildResult::Failure::NotDeterministic, args...) { } }; @@ -518,7 +518,8 @@ SingleDrvOutputs DerivationBuilderImpl::unprepareBuild() cleanupBuild(false); throw BuilderFailureError{ - !derivationType.isSandboxed() || diskFull ? BuildResult::TransientFailure : BuildResult::PermanentFailure, + !derivationType.isSandboxed() || diskFull ? BuildResult::Failure::TransientFailure + : BuildResult::Failure::PermanentFailure, status, diskFull ? "\nnote: build failure may have been caused by lack of free disk space" : "", }; @@ -700,7 +701,7 @@ std::optional DerivationBuilderImpl::startBuild() fmt("\nNote: run `%s` to run programs for x86_64-darwin", Magenta("/usr/sbin/softwareupdate --install-rosetta && launchctl stop org.nixos.nix-daemon")); - throw BuildError(BuildResult::InputRejected, msg); + throw BuildError(BuildResult::Failure::InputRejected, msg); } auto buildDir = store.config->getBuildDir(); @@ -1389,7 +1390,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto optSt = maybeLstat(actualPath.c_str()); if (!optSt) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "builder for '%s' failed to produce output path for output '%s' at '%s'", store.printStorePath(drvPath), outputName, @@ -1404,7 +1405,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() if ((!S_ISLNK(st.st_mode) && (st.st_mode & (S_IWGRP | S_IWOTH))) || (buildUser && st.st_uid != buildUser->getUID())) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "suspicious ownership or permission on '%s' for output '%s'; rejecting this build output", actualPath, outputName); @@ -1442,7 +1443,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto orifu = get(outputReferencesIfUnregistered, name); if (!orifu) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "no output reference for '%s' in build of '%s'", name, store.printStorePath(drvPath)); @@ -1467,7 +1468,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() {[&](const std::string & path, const std::string & parent) { // TODO with more -vvvv also show the temporary paths for manual inspection. return BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "cycle detected in build of '%s' in the references of output '%s' from output '%s'", store.printStorePath(drvPath), path, @@ -1561,12 +1562,13 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto newInfoFromCA = [&](const DerivationOutput::CAFloating outputHash) -> ValidPathInfo { auto st = get(outputStats, outputName); if (!st) - throw BuildError(BuildResult::OutputRejected, "output path %1% without valid stats info", actualPath); + throw BuildError( + BuildResult::Failure::OutputRejected, "output path %1% without valid stats info", actualPath); if (outputHash.method.getFileIngestionMethod() == FileIngestionMethod::Flat) { /* The output path should be a regular file without execute permission. */ if (!S_ISREG(st->st_mode) || (st->st_mode & S_IXUSR) != 0) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "output path '%1%' should be a non-executable regular file " "since recursive hashing is not enabled (one of outputHashMode={flat,text} is true)", actualPath); diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc index 1bbff64a2..4f7c28409 100644 --- a/src/libstore/worker-protocol.cc +++ b/src/libstore/worker-protocol.cc @@ -165,10 +165,14 @@ void WorkerProto::Serialise::write( BuildResult WorkerProto::Serialise::read(const StoreDirConfig & store, WorkerProto::ReadConn conn) { BuildResult res; - res.status = static_cast(readInt(conn.from)); - conn.from >> res.errorMsg; + BuildResult::Success success; + BuildResult::Failure failure; + + auto rawStatus = readInt(conn.from); + conn.from >> failure.errorMsg; + if (GET_PROTOCOL_MINOR(conn.version) >= 29) { - conn.from >> res.timesBuilt >> res.isNonDeterministic >> res.startTime >> res.stopTime; + conn.from >> res.timesBuilt >> failure.isNonDeterministic >> res.startTime >> res.stopTime; } if (GET_PROTOCOL_MINOR(conn.version) >= 37) { res.cpuUser = WorkerProto::Serialise>::read(store, conn); @@ -177,28 +181,56 @@ BuildResult WorkerProto::Serialise::read(const StoreDirConfig & sto if (GET_PROTOCOL_MINOR(conn.version) >= 28) { auto builtOutputs = WorkerProto::Serialise::read(store, conn); for (auto && [output, realisation] : builtOutputs) - res.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); + success.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); } + + if (BuildResult::Success::statusIs(rawStatus)) { + success.status = static_cast(rawStatus); + res.inner = std::move(success); + } else { + failure.status = static_cast(rawStatus); + res.inner = std::move(failure); + } + return res; } void WorkerProto::Serialise::write( const StoreDirConfig & store, WorkerProto::WriteConn conn, const BuildResult & res) { - conn.to << res.status << res.errorMsg; - if (GET_PROTOCOL_MINOR(conn.version) >= 29) { - conn.to << res.timesBuilt << res.isNonDeterministic << res.startTime << res.stopTime; - } - if (GET_PROTOCOL_MINOR(conn.version) >= 37) { - WorkerProto::write(store, conn, res.cpuUser); - WorkerProto::write(store, conn, res.cpuSystem); - } - if (GET_PROTOCOL_MINOR(conn.version) >= 28) { - DrvOutputs builtOutputs; - for (auto & [output, realisation] : res.builtOutputs) - builtOutputs.insert_or_assign(realisation.id, realisation); - WorkerProto::write(store, conn, builtOutputs); - } + /* The protocol predates the use of sum types (std::variant) to + separate the success or failure cases. As such, it transits some + success- or failure-only fields in both cases. This helper + function helps support this: in each case, we just pass the old + default value for the fields that don't exist in that case. */ + auto common = [&](std::string_view errorMsg, bool isNonDeterministic, const auto & builtOutputs) { + conn.to << errorMsg; + if (GET_PROTOCOL_MINOR(conn.version) >= 29) { + conn.to << res.timesBuilt << isNonDeterministic << res.startTime << res.stopTime; + } + if (GET_PROTOCOL_MINOR(conn.version) >= 37) { + WorkerProto::write(store, conn, res.cpuUser); + WorkerProto::write(store, conn, res.cpuSystem); + } + if (GET_PROTOCOL_MINOR(conn.version) >= 28) { + DrvOutputs builtOutputsFullKey; + for (auto & [output, realisation] : builtOutputs) + builtOutputsFullKey.insert_or_assign(realisation.id, realisation); + WorkerProto::write(store, conn, builtOutputsFullKey); + } + }; + std::visit( + overloaded{ + [&](const BuildResult::Failure & failure) { + conn.to << failure.status; + common(failure.errorMsg, failure.isNonDeterministic, decltype(BuildResult::Success::builtOutputs){}); + }, + [&](const BuildResult::Success & success) { + conn.to << success.status; + common(/*errorMsg=*/"", /*isNonDeterministic=*/false, success.builtOutputs); + }, + }, + res.inner); } ValidPathInfo WorkerProto::Serialise::read(const StoreDirConfig & store, ReadConn conn) diff --git a/src/nix/build-remote/build-remote.cc b/src/nix/build-remote/build-remote.cc index 11df8cc5e..ffb77ddf1 100644 --- a/src/nix/build-remote/build-remote.cc +++ b/src/nix/build-remote/build-remote.cc @@ -324,7 +324,7 @@ static int main_build_remote(int argc, char ** argv) drv.inputSrcs = store->parseStorePathSet(inputs); optResult = sshStore->buildDerivation(*drvPath, (const BasicDerivation &) drv); auto & result = *optResult; - if (!result.success()) { + if (auto * failureP = result.tryGetFailure()) { if (settings.keepFailed) { warn( "The failed build directory was kept on the remote builder due to `--keep-failed`.%s", @@ -333,7 +333,7 @@ static int main_build_remote(int argc, char ** argv) : ""); } throw Error( - "build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, result.errorMsg); + "build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, failureP->errorMsg); } } else { copyClosure(*store, *sshStore, StorePathSet{*drvPath}, NoRepair, NoCheckSigs, substitute); @@ -357,11 +357,14 @@ static int main_build_remote(int argc, char ** argv) debug("missing output %s", outputName); assert(optResult); auto & result = *optResult; - auto i = result.builtOutputs.find(outputName); - assert(i != result.builtOutputs.end()); - auto & newRealisation = i->second; - missingRealisations.insert(newRealisation); - missingPaths.insert(newRealisation.outPath); + if (auto * successP = result.tryGetSuccess()) { + auto & success = *successP; + auto i = success.builtOutputs.find(outputName); + assert(i != success.builtOutputs.end()); + auto & newRealisation = i->second; + missingRealisations.insert(newRealisation); + missingPaths.insert(newRealisation.outPath); + } } } } else { diff --git a/tests/functional/test-libstoreconsumer/main.cc b/tests/functional/test-libstoreconsumer/main.cc index d8db67a4d..5b0132934 100644 --- a/tests/functional/test-libstoreconsumer/main.cc +++ b/tests/functional/test-libstoreconsumer/main.cc @@ -34,8 +34,10 @@ int main(int argc, char ** argv) const auto results = store->buildPathsWithResults(paths, bmNormal, store); for (const auto & result : results) { - for (const auto & [outputName, realisation] : result.builtOutputs) { - std::cout << store->printStorePath(realisation.outPath) << "\n"; + if (auto * successP = result.tryGetSuccess()) { + for (const auto & [outputName, realisation] : successP->builtOutputs) { + std::cout << store->printStorePath(realisation.outPath) << "\n"; + } } } From 121dda0f1f5fbb861ca38d7225b8923ee53337b5 Mon Sep 17 00:00:00 2001 From: Ephraim Siegfried Date: Mon, 29 Sep 2025 14:07:26 +0200 Subject: [PATCH 010/373] docs: fix build command in make-content-addressed.md --- src/nix/make-content-addressed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/make-content-addressed.md b/src/nix/make-content-addressed.md index b1f7da525..e6a51c83a 100644 --- a/src/nix/make-content-addressed.md +++ b/src/nix/make-content-addressed.md @@ -51,7 +51,7 @@ be verified without any additional information such as signatures. This means that a command like ```console -# nix store build /nix/store/5skmmcb9svys5lj3kbsrjg7vf2irid63-hello-2.10 \ +# nix build /nix/store/5skmmcb9svys5lj3kbsrjg7vf2irid63-hello-2.10 \ --substituters https://my-cache.example.org ``` From 020f67a653fc6cf67bc16585d2969af624bd694a Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:14:41 -0700 Subject: [PATCH 011/373] shellcheck fix: tests/functional/flakes/prefetch.sh --- maintainers/flake-module.nix | 1 - tests/functional/flakes/prefetch.sh | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 8c84d0517..a3e126d3f 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/flakes/prefetch\.sh$'' ''^tests/functional/flakes/run\.sh$'' ''^tests/functional/flakes/show\.sh$'' ''^tests/functional/formatter\.sh$'' diff --git a/tests/functional/flakes/prefetch.sh b/tests/functional/flakes/prefetch.sh index a451b7120..999270c1e 100755 --- a/tests/functional/flakes/prefetch.sh +++ b/tests/functional/flakes/prefetch.sh @@ -3,6 +3,6 @@ source common.sh # Test symlinks in zip files (#10649). -path=$(nix flake prefetch --json file://$(pwd)/tree.zip | jq -r .storePath) -[[ $(cat $path/foo) = foo ]] -[[ $(readlink $path/bar) = foo ]] +path=$(nix flake prefetch --json file://"$(pwd)"/tree.zip | jq -r .storePath) +[[ $(cat "$path"/foo) = foo ]] +[[ $(readlink "$path"/bar) = foo ]] From cb22518754b553d0d830e48a7caea26c48cb345a Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:15:11 -0700 Subject: [PATCH 012/373] shellcheck fix: tests/functional/flakes/run.sh --- maintainers/flake-module.nix | 1 - tests/functional/flakes/run.sh | 20 ++++++++++---------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a3e126d3f..f5ac5c489 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/flakes/run\.sh$'' ''^tests/functional/flakes/show\.sh$'' ''^tests/functional/formatter\.sh$'' ''^tests/functional/formatter\.simple\.sh$'' diff --git a/tests/functional/flakes/run.sh b/tests/functional/flakes/run.sh index 0a2947825..107b3dfb8 100755 --- a/tests/functional/flakes/run.sh +++ b/tests/functional/flakes/run.sh @@ -5,10 +5,10 @@ source ../common.sh TODO_NixOS clearStore -rm -rf $TEST_HOME/.cache $TEST_HOME/.config $TEST_HOME/.local +rm -rf "$TEST_HOME"/.cache "$TEST_HOME"/.config "$TEST_HOME"/.local -cp ../shell-hello.nix "${config_nix}" $TEST_HOME -cd $TEST_HOME +cp ../shell-hello.nix "${config_nix}" "$TEST_HOME" +cd "$TEST_HOME" cat < flake.nix { @@ -34,8 +34,8 @@ nix run --no-write-lock-file .#pkgAsPkg # For instance, we might set an environment variable temporarily to affect some # initialization or whatnot, but this must not leak into the environment of the # command being run. -env > $TEST_ROOT/expected-env -nix run -f shell-hello.nix env > $TEST_ROOT/actual-env +env > "$TEST_ROOT"/expected-env +nix run -f shell-hello.nix env > "$TEST_ROOT"/actual-env # Remove/reset variables we expect to be different. # - PATH is modified by nix shell # - we unset TMPDIR on macOS if it contains /var/folders. bad. https://github.com/NixOS/nix/issues/7731 @@ -48,12 +48,12 @@ sed -i \ -e '/^TMPDIR=\/var\/folders\/.*/d' \ -e '/^__CF_USER_TEXT_ENCODING=.*$/d' \ -e '/^__LLVM_PROFILE_RT_INIT_ONCE=.*$/d' \ - $TEST_ROOT/expected-env $TEST_ROOT/actual-env -sort $TEST_ROOT/expected-env | uniq > $TEST_ROOT/expected-env.sorted + "$TEST_ROOT"/expected-env "$TEST_ROOT"/actual-env +sort "$TEST_ROOT"/expected-env | uniq > "$TEST_ROOT"/expected-env.sorted # nix run appears to clear _. I don't understand why. Is this ok? -echo "_=..." >> $TEST_ROOT/actual-env -sort $TEST_ROOT/actual-env | uniq > $TEST_ROOT/actual-env.sorted -diff $TEST_ROOT/expected-env.sorted $TEST_ROOT/actual-env.sorted +echo "_=..." >> "$TEST_ROOT"/actual-env +sort "$TEST_ROOT"/actual-env | uniq > "$TEST_ROOT"/actual-env.sorted +diff "$TEST_ROOT"/expected-env.sorted "$TEST_ROOT"/actual-env.sorted clearStore From f596c9b8c392e2a67d9fe5a6701ccaec5df18a24 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:16:29 -0700 Subject: [PATCH 013/373] shellcheck fix: tests/functional/flakes/show.sh --- maintainers/flake-module.nix | 1 - tests/functional/flakes/show.sh | 9 +++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index f5ac5c489..8350fea5c 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/flakes/show\.sh$'' ''^tests/functional/formatter\.sh$'' ''^tests/functional/formatter\.simple\.sh$'' ''^tests/functional/gc-auto\.sh$'' diff --git a/tests/functional/flakes/show.sh b/tests/functional/flakes/show.sh index 7fcc6aca9..a08db115a 100755 --- a/tests/functional/flakes/show.sh +++ b/tests/functional/flakes/show.sh @@ -12,6 +12,7 @@ pushd "$flakeDir" # By default: Only show the packages content for the current system and no # legacyPackages at all nix flake show --json > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -23,6 +24,7 @@ true # With `--all-systems`, show the packages for all systems nix flake show --json --all-systems > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -33,6 +35,7 @@ true # With `--legacy`, show the legacy packages nix flake show --json --legacy > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -80,6 +83,7 @@ cat >flake.nix < show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -91,11 +95,12 @@ true # Test that nix flake show doesn't fail if one of the outputs contains # an IFD popd -writeIfdFlake $flakeDir -pushd $flakeDir +writeIfdFlake "$flakeDir" +pushd "$flakeDir" nix flake show --json > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in From 08a82f46821e7c875dc6d39a75bec82c633043db Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:17:24 -0700 Subject: [PATCH 014/373] shellcheck fix: tests/functional/formatter.simple.sh --- maintainers/flake-module.nix | 2 -- tests/functional/formatter.sh | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 8350fea5c..12732bf90 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,8 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/formatter\.sh$'' - ''^tests/functional/formatter\.simple\.sh$'' ''^tests/functional/gc-auto\.sh$'' ''^tests/functional/gc-concurrent\.builder\.sh$'' ''^tests/functional/gc-concurrent\.sh$'' diff --git a/tests/functional/formatter.sh b/tests/functional/formatter.sh index 6631dd6b8..03b31708d 100755 --- a/tests/functional/formatter.sh +++ b/tests/functional/formatter.sh @@ -16,6 +16,7 @@ nix fmt --help | grep "reformat your code" nix fmt run --help | grep "reformat your code" nix fmt build --help | grep "build" +# shellcheck disable=SC2154 cat << EOF > flake.nix { outputs = _: { From 4192ca9131ce93ac51cde4110dfc4b1bf251e243 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:18:50 -0700 Subject: [PATCH 015/373] shellcheck fix: tests/functional/gc-auto.sh --- maintainers/flake-module.nix | 1 - tests/functional/gc-auto.sh | 22 +++++++++++++--------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 12732bf90..51ac3a629 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/gc-auto\.sh$'' ''^tests/functional/gc-concurrent\.builder\.sh$'' ''^tests/functional/gc-concurrent\.sh$'' ''^tests/functional/gc-concurrent2\.builder\.sh$'' diff --git a/tests/functional/gc-auto.sh b/tests/functional/gc-auto.sh index efe3e4b2b..ea877f27f 100755 --- a/tests/functional/gc-auto.sh +++ b/tests/functional/gc-auto.sh @@ -2,22 +2,26 @@ source common.sh +# shellcheck disable=SC1111 needLocalStore "“min-free” and “max-free” are daemon options" TODO_NixOS clearStore +# shellcheck disable=SC2034 garbage1=$(nix store add-path --name garbage1 ./nar-access.sh) +# shellcheck disable=SC2034 garbage2=$(nix store add-path --name garbage2 ./nar-access.sh) +# shellcheck disable=SC2034 garbage3=$(nix store add-path --name garbage3 ./nar-access.sh) -ls -l $garbage3 -POSIXLY_CORRECT=1 du $garbage3 +ls -l "$garbage3" +POSIXLY_CORRECT=1 du "$garbage3" fake_free=$TEST_ROOT/fake-free export _NIX_TEST_FREE_SPACE_FILE=$fake_free -echo 1100 > $fake_free +echo 1100 > "$fake_free" fifoLock=$TEST_ROOT/fifoLock mkfifo "$fifoLock" @@ -65,11 +69,11 @@ with import ${config_nix}; mkDerivation { EOF ) -nix build --impure -v -o $TEST_ROOT/result-A -L --expr "$expr" \ +nix build --impure -v -o "$TEST_ROOT"/result-A -L --expr "$expr" \ --min-free 1K --max-free 2K --min-free-check-interval 1 & pid1=$! -nix build --impure -v -o $TEST_ROOT/result-B -L --expr "$expr2" \ +nix build --impure -v -o "$TEST_ROOT"/result-B -L --expr "$expr2" \ --min-free 1K --max-free 2K --min-free-check-interval 1 & pid2=$! @@ -77,9 +81,9 @@ pid2=$! # If the first build fails, we need to postpone the failure to still allow # the second one to finish wait "$pid1" || FIRSTBUILDSTATUS=$? -echo "unlock" > $fifoLock -( exit ${FIRSTBUILDSTATUS:-0} ) +echo "unlock" > "$fifoLock" +( exit "${FIRSTBUILDSTATUS:-0}" ) wait "$pid2" -[[ foo = $(cat $TEST_ROOT/result-A/bar) ]] -[[ foo = $(cat $TEST_ROOT/result-B/bar) ]] +[[ foo = $(cat "$TEST_ROOT"/result-A/bar) ]] +[[ foo = $(cat "$TEST_ROOT"/result-B/bar) ]] From 613bd67574c1455577b70ba435bcbfcc8329e13b Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:20:02 -0700 Subject: [PATCH 016/373] shellcheck fix: tests/functional/gc-concurrent.builder.sh --- maintainers/flake-module.nix | 1 - tests/functional/gc-concurrent.builder.sh | 11 +++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 51ac3a629..65c94c415 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/gc-concurrent\.builder\.sh$'' ''^tests/functional/gc-concurrent\.sh$'' ''^tests/functional/gc-concurrent2\.builder\.sh$'' ''^tests/functional/gc-non-blocking\.sh$'' diff --git a/tests/functional/gc-concurrent.builder.sh b/tests/functional/gc-concurrent.builder.sh index bb6dcd4cf..b3c7abeb1 100644 --- a/tests/functional/gc-concurrent.builder.sh +++ b/tests/functional/gc-concurrent.builder.sh @@ -1,16 +1,19 @@ +# shellcheck shell=bash +# shellcheck disable=SC2154 echo "Build started" > "$lockFifo" -mkdir $out -echo $(cat $input1/foo)$(cat $input2/bar) > $out/foobar +# shellcheck disable=SC2154 +mkdir "$out" +echo "$(cat "$input1"/foo)""$(cat "$input2"/bar)" > "$out"/foobar # Wait for someone to write on the fifo cat "$lockFifo" # $out should not have been GC'ed while we were sleeping, but just in # case... -mkdir -p $out +mkdir -p "$out" # Check that the GC hasn't deleted the lock on our output. test -e "$out.lock" -ln -s $input2 $out/input-2 +ln -s "$input2" "$out"/input-2 From 75df03204b2505e1132fa67a45ae589239ccdaec Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:21:47 -0700 Subject: [PATCH 017/373] shellcheck fix: tests/functional/gc-concurrent.sh --- maintainers/flake-module.nix | 1 - tests/functional/gc-concurrent.sh | 34 +++++++++++++++++-------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 65c94c415..a2edadebb 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/gc-concurrent\.sh$'' ''^tests/functional/gc-concurrent2\.builder\.sh$'' ''^tests/functional/gc-non-blocking\.sh$'' ''^tests/functional/hash-convert\.sh$'' diff --git a/tests/functional/gc-concurrent.sh b/tests/functional/gc-concurrent.sh index df180b14f..dcfcea3e9 100755 --- a/tests/functional/gc-concurrent.sh +++ b/tests/functional/gc-concurrent.sh @@ -10,54 +10,58 @@ lockFifo1=$TEST_ROOT/test1.fifo mkfifo "$lockFifo1" drvPath1=$(nix-instantiate gc-concurrent.nix -A test1 --argstr lockFifo "$lockFifo1") -outPath1=$(nix-store -q $drvPath1) +outPath1=$(nix-store -q "$drvPath1") drvPath2=$(nix-instantiate gc-concurrent.nix -A test2) -outPath2=$(nix-store -q $drvPath2) +outPath2=$(nix-store -q "$drvPath2") drvPath3=$(nix-instantiate simple.nix) -outPath3=$(nix-store -r $drvPath3) +outPath3=$(nix-store -r "$drvPath3") -(! test -e $outPath3.lock) -touch $outPath3.lock +# shellcheck disable=SC2235 +(! test -e "$outPath3".lock) +touch "$outPath3".lock rm -f "$NIX_STATE_DIR"/gcroots/foo* -ln -s $drvPath2 "$NIX_STATE_DIR/gcroots/foo" -ln -s $outPath3 "$NIX_STATE_DIR/gcroots/foo2" +ln -s "$drvPath2" "$NIX_STATE_DIR/gcroots/foo" +ln -s "$outPath3" "$NIX_STATE_DIR/gcroots/foo2" # Start build #1 in the background. It starts immediately. nix-store -rvv "$drvPath1" & pid1=$! # Wait for the build of $drvPath1 to start -cat $lockFifo1 +cat "$lockFifo1" # Run the garbage collector while the build is running. nix-collect-garbage # Unlock the build of $drvPath1 -echo "" > $lockFifo1 +echo "" > "$lockFifo1" echo waiting for pid $pid1 to finish... wait $pid1 # Check that the root of build #1 and its dependencies haven't been # deleted. The should not be deleted by the GC because they were # being built during the GC. -cat $outPath1/foobar -cat $outPath1/input-2/bar +cat "$outPath1"/foobar +cat "$outPath1"/input-2/bar # Check that the build build $drvPath2 succeeds. # It should succeed because the derivation is a GC root. nix-store -rvv "$drvPath2" -cat $outPath2/foobar +cat "$outPath2"/foobar rm -f "$NIX_STATE_DIR"/gcroots/foo* # The collector should have deleted lock files for paths that have # been built previously. -(! test -e $outPath3.lock) +# shellcheck disable=SC2235 +(! test -e "$outPath3".lock) # If we run the collector now, it should delete outPath1/2. nix-collect-garbage -(! test -e $outPath1) -(! test -e $outPath2) +# shellcheck disable=SC2235 +(! test -e "$outPath1") +# shellcheck disable=SC2235 +(! test -e "$outPath2") From 2e5952fb6aed7015af50f09a1c60f94cd0649f22 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:22:45 -0700 Subject: [PATCH 018/373] shellcheck fix: tests/functional/gc-concurrent2.builder.sh --- maintainers/flake-module.nix | 1 - tests/functional/gc-concurrent2.builder.sh | 7 +++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a2edadebb..dd7d1d338 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/gc-concurrent2\.builder\.sh$'' ''^tests/functional/gc-non-blocking\.sh$'' ''^tests/functional/hash-convert\.sh$'' ''^tests/functional/impure-derivations\.sh$'' diff --git a/tests/functional/gc-concurrent2.builder.sh b/tests/functional/gc-concurrent2.builder.sh index 4f6c58b96..4b1ad6f5e 100644 --- a/tests/functional/gc-concurrent2.builder.sh +++ b/tests/functional/gc-concurrent2.builder.sh @@ -1,5 +1,8 @@ -mkdir $out -echo $(cat $input1/foo)$(cat $input2/bar)xyzzy > $out/foobar +# shellcheck shell=bash +# shellcheck disable=SC2154 +mkdir "$out" +# shellcheck disable=SC2154 +echo "$(cat "$input1"/foo)""$(cat "$input2"/bar)"xyzzy > "$out"/foobar # Check that the GC hasn't deleted the lock on our output. test -e "$out.lock" From 52b9fb38e0dfc0af226a25d21197b40fa44e6c78 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:23:41 -0700 Subject: [PATCH 019/373] shellcheck fix: tests/functional/gc-non-blocking.sh --- maintainers/flake-module.nix | 1 - tests/functional/gc-non-blocking.sh | 14 ++++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index dd7d1d338..b080683ff 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/gc-non-blocking\.sh$'' ''^tests/functional/hash-convert\.sh$'' ''^tests/functional/impure-derivations\.sh$'' ''^tests/functional/impure-eval\.sh$'' diff --git a/tests/functional/gc-non-blocking.sh b/tests/functional/gc-non-blocking.sh index 9cd5c0e1c..a85b8e5db 100755 --- a/tests/functional/gc-non-blocking.sh +++ b/tests/functional/gc-non-blocking.sh @@ -23,17 +23,17 @@ mkfifo "$fifo2" dummy=$(nix store add-path ./simple.nix) running=$TEST_ROOT/running -touch $running +touch "$running" # Start GC. -(_NIX_TEST_GC_SYNC_1=$fifo1 _NIX_TEST_GC_SYNC_2=$fifo2 nix-store --gc -vvvvv; rm $running) & +(_NIX_TEST_GC_SYNC_1=$fifo1 _NIX_TEST_GC_SYNC_2=$fifo2 nix-store --gc -vvvvv; rm "$running") & pid=$! sleep 2 # Delay the start of the root server to check that the build below # correctly handles ENOENT when connecting to the root server. -(sleep 1; echo > $fifo1) & +(sleep 1; echo > "$fifo1") & pid2=$! # Start a build. This should not be blocked by the GC in progress. @@ -47,6 +47,8 @@ outPath=$(nix-build --max-silent-time 60 -o "$TEST_ROOT/result" -E " wait $pid wait $pid2 -(! test -e $running) -(! test -e $dummy) -test -e $outPath +# shellcheck disable=SC2235 +(! test -e "$running") +# shellcheck disable=SC2235 +(! test -e "$dummy") +test -e "$outPath" From 745d1f95191c90f46032c607bb07037ef2d614cb Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:11:29 -0700 Subject: [PATCH 020/373] shellcheck fix: tests/functional/ca/build-delete.sh --- maintainers/flake-module.nix | 1 - tests/functional/ca/build-delete.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 8c84d0517..5ba8aa505 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -172,7 +172,6 @@ # Content-addressed test files that use recursive-*looking* sourcing # (cd .. && source ), causing shellcheck to loop # They're small wrapper scripts with not a lot going on - ''^tests/functional/ca/build-delete\.sh$'' ''^tests/functional/ca/build-dry\.sh$'' ''^tests/functional/ca/eval-store\.sh$'' ''^tests/functional/ca/gc\.sh$'' diff --git a/tests/functional/ca/build-delete.sh b/tests/functional/ca/build-delete.sh index 3ad3d0a80..173cfb224 100644 --- a/tests/functional/ca/build-delete.sh +++ b/tests/functional/ca/build-delete.sh @@ -4,4 +4,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./build-delete.sh From 5846d9d4dcdbe7604c34c046c075344a9859abc7 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:12:04 -0700 Subject: [PATCH 021/373] shellcheck fix: tests/functional/ca/build-dry.sh --- maintainers/flake-module.nix | 1 - tests/functional/ca/build-dry.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5ba8aa505..5a92e624f 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -172,7 +172,6 @@ # Content-addressed test files that use recursive-*looking* sourcing # (cd .. && source ), causing shellcheck to loop # They're small wrapper scripts with not a lot going on - ''^tests/functional/ca/build-dry\.sh$'' ''^tests/functional/ca/eval-store\.sh$'' ''^tests/functional/ca/gc\.sh$'' ''^tests/functional/ca/import-from-derivation\.sh$'' diff --git a/tests/functional/ca/build-dry.sh b/tests/functional/ca/build-dry.sh index 9a72075ec..0b8b959ea 100644 --- a/tests/functional/ca/build-dry.sh +++ b/tests/functional/ca/build-dry.sh @@ -2,5 +2,6 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 +# shellcheck source=/dev/null cd .. && source build-dry.sh From 4232cb045afba8f5dfba2231525a638ec0c0ae67 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:13:58 -0700 Subject: [PATCH 022/373] Remaining functional/ca tests for shellcheck --- maintainers/flake-module.nix | 15 --------------- tests/functional/ca/build-dry.sh | 1 + tests/functional/ca/eval-store.sh | 1 + tests/functional/ca/gc.sh | 1 + tests/functional/ca/import-from-derivation.sh | 2 +- tests/functional/ca/multiple-outputs.sh | 1 + tests/functional/ca/new-build-cmd.sh | 1 + tests/functional/ca/nix-shell.sh | 2 ++ tests/functional/ca/post-hook.sh | 1 + tests/functional/ca/recursive.sh | 1 + tests/functional/ca/repl.sh | 2 +- tests/functional/ca/selfref-gc.sh | 1 + tests/functional/ca/why-depends.sh | 2 +- 13 files changed, 13 insertions(+), 18 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5a92e624f..7752ee2ce 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -168,21 +168,6 @@ ''^tests/functional/user-envs\.builder\.sh$'' ''^tests/functional/user-envs\.sh$'' ''^tests/functional/why-depends\.sh$'' - - # Content-addressed test files that use recursive-*looking* sourcing - # (cd .. && source ), causing shellcheck to loop - # They're small wrapper scripts with not a lot going on - ''^tests/functional/ca/eval-store\.sh$'' - ''^tests/functional/ca/gc\.sh$'' - ''^tests/functional/ca/import-from-derivation\.sh$'' - ''^tests/functional/ca/multiple-outputs\.sh$'' - ''^tests/functional/ca/new-build-cmd\.sh$'' - ''^tests/functional/ca/nix-shell\.sh$'' - ''^tests/functional/ca/post-hook\.sh$'' - ''^tests/functional/ca/recursive\.sh$'' - ''^tests/functional/ca/repl\.sh$'' - ''^tests/functional/ca/selfref-gc\.sh$'' - ''^tests/functional/ca/why-depends\.sh$'' ]; }; }; diff --git a/tests/functional/ca/build-dry.sh b/tests/functional/ca/build-dry.sh index 0b8b959ea..44bd7202b 100644 --- a/tests/functional/ca/build-dry.sh +++ b/tests/functional/ca/build-dry.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 diff --git a/tests/functional/ca/eval-store.sh b/tests/functional/ca/eval-store.sh index 9cc499606..0ffdef839 100644 --- a/tests/functional/ca/eval-store.sh +++ b/tests/functional/ca/eval-store.sh @@ -7,4 +7,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source eval-store.sh diff --git a/tests/functional/ca/gc.sh b/tests/functional/ca/gc.sh index e9b6c5ab5..26b037f64 100755 --- a/tests/functional/ca/gc.sh +++ b/tests/functional/ca/gc.sh @@ -7,4 +7,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source gc.sh diff --git a/tests/functional/ca/import-from-derivation.sh b/tests/functional/ca/import-from-derivation.sh index 708d2fc78..a3101cc3f 100644 --- a/tests/functional/ca/import-from-derivation.sh +++ b/tests/functional/ca/import-from-derivation.sh @@ -3,6 +3,6 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 - +# shellcheck source=/dev/null cd .. && source import-from-derivation.sh diff --git a/tests/functional/ca/multiple-outputs.sh b/tests/functional/ca/multiple-outputs.sh index 63b7d3197..e4e05b5f5 100644 --- a/tests/functional/ca/multiple-outputs.sh +++ b/tests/functional/ca/multiple-outputs.sh @@ -4,4 +4,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./multiple-outputs.sh diff --git a/tests/functional/ca/new-build-cmd.sh b/tests/functional/ca/new-build-cmd.sh index 408bfb0f6..e5cb644d1 100644 --- a/tests/functional/ca/new-build-cmd.sh +++ b/tests/functional/ca/new-build-cmd.sh @@ -4,4 +4,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./build.sh diff --git a/tests/functional/ca/nix-shell.sh b/tests/functional/ca/nix-shell.sh index 7b30b2ac8..05115c126 100755 --- a/tests/functional/ca/nix-shell.sh +++ b/tests/functional/ca/nix-shell.sh @@ -2,6 +2,8 @@ source common.sh +# shellcheck disable=SC2034 NIX_TESTS_CA_BY_DEFAULT=true cd .. +# shellcheck source=/dev/null source ./nix-shell.sh diff --git a/tests/functional/ca/post-hook.sh b/tests/functional/ca/post-hook.sh index 705bde9d4..e1adffc47 100755 --- a/tests/functional/ca/post-hook.sh +++ b/tests/functional/ca/post-hook.sh @@ -6,6 +6,7 @@ requireDaemonNewerThan "2.4pre20210626" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./post-hook.sh diff --git a/tests/functional/ca/recursive.sh b/tests/functional/ca/recursive.sh index cd6736b24..e3fb98ab2 100755 --- a/tests/functional/ca/recursive.sh +++ b/tests/functional/ca/recursive.sh @@ -6,4 +6,5 @@ requireDaemonNewerThan "2.4pre20210623" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./recursive.sh diff --git a/tests/functional/ca/repl.sh b/tests/functional/ca/repl.sh index 0bbbebd85..f96ecfcf2 100644 --- a/tests/functional/ca/repl.sh +++ b/tests/functional/ca/repl.sh @@ -3,5 +3,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 - +# shellcheck source=/dev/null cd .. && source repl.sh diff --git a/tests/functional/ca/selfref-gc.sh b/tests/functional/ca/selfref-gc.sh index 248778894..7ac9ec9f7 100755 --- a/tests/functional/ca/selfref-gc.sh +++ b/tests/functional/ca/selfref-gc.sh @@ -8,4 +8,5 @@ enableFeatures "ca-derivations nix-command flakes" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./selfref-gc.sh diff --git a/tests/functional/ca/why-depends.sh b/tests/functional/ca/why-depends.sh index 0af8a5440..2a3c7d083 100644 --- a/tests/functional/ca/why-depends.sh +++ b/tests/functional/ca/why-depends.sh @@ -3,5 +3,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 - +# shellcheck source=/dev/null cd .. && source why-depends.sh From f3a2876c3a830bfc073ebd11f725657e03e98935 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:24:43 -0700 Subject: [PATCH 023/373] shellcheck fix: tests/functional/hash-convert.sh --- maintainers/flake-module.nix | 1 - tests/functional/hash-convert.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index b080683ff..3bf41bc14 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/hash-convert\.sh$'' ''^tests/functional/impure-derivations\.sh$'' ''^tests/functional/impure-eval\.sh$'' ''^tests/functional/install-darwin\.sh$'' diff --git a/tests/functional/hash-convert.sh b/tests/functional/hash-convert.sh index c40cb469c..9ef4c189d 100755 --- a/tests/functional/hash-convert.sh +++ b/tests/functional/hash-convert.sh @@ -99,7 +99,7 @@ try3() { expectStderr 1 nix hash convert --hash-algo "$1" --from nix32 "$4" | grepQuiet "input hash" # Base-16 hashes can be in uppercase. - nix hash convert --hash-algo "$1" --from base16 "$(echo $2 | tr [a-z] [A-Z])" + nix hash convert --hash-algo "$1" --from base16 "$(echo "$2" | tr '[:lower:]' '[:upper:]')" } try3 sha1 "800d59cfcd3c05e900cb4e214be48f6b886a08df" "vw46m23bizj4n8afrc0fj19wrp7mj3c0" "gA1Zz808BekAy04hS+SPa4hqCN8=" From 1cd96f22c045ce3aa16e7fc40f4f9d56f069bf6e Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:25:07 -0700 Subject: [PATCH 024/373] shellcheck fix: tests/functional/impure-derivations.sh --- maintainers/flake-module.nix | 1 - tests/functional/impure-derivations.sh | 46 +++++++++++++------------- 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 3bf41bc14..43c84d5ae 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/impure-derivations\.sh$'' ''^tests/functional/impure-eval\.sh$'' ''^tests/functional/install-darwin\.sh$'' ''^tests/functional/legacy-ssh-store\.sh$'' diff --git a/tests/functional/impure-derivations.sh b/tests/functional/impure-derivations.sh index 9e483d376..e0b7c3eea 100755 --- a/tests/functional/impure-derivations.sh +++ b/tests/functional/impure-derivations.sh @@ -12,62 +12,62 @@ restartDaemon clearStoreIfPossible # Basic test of impure derivations: building one a second time should not use the previous result. -printf 0 > $TEST_ROOT/counter +printf 0 > "$TEST_ROOT"/counter # `nix derivation add` with impure derivations work drvPath=$(nix-instantiate ./impure-derivations.nix -A impure) -nix derivation show $drvPath | jq .[] > $TEST_HOME/impure-drv.json -drvPath2=$(nix derivation add < $TEST_HOME/impure-drv.json) +nix derivation show "$drvPath" | jq .[] > "$TEST_HOME"/impure-drv.json +drvPath2=$(nix derivation add < "$TEST_HOME"/impure-drv.json) [[ "$drvPath" = "$drvPath2" ]] # But only with the experimental feature! -expectStderr 1 nix derivation add < $TEST_HOME/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" +expectStderr 1 nix derivation add < "$TEST_HOME"/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" nix build --dry-run --json --file ./impure-derivations.nix impure.all json=$(nix build -L --no-link --json --file ./impure-derivations.nix impure.all) -path1=$(echo $json | jq -r .[].outputs.out) -path1_stuff=$(echo $json | jq -r .[].outputs.stuff) -[[ $(< $path1/n) = 0 ]] -[[ $(< $path1_stuff/bla) = 0 ]] +path1=$(echo "$json" | jq -r .[].outputs.out) +path1_stuff=$(echo "$json" | jq -r .[].outputs.stuff) +[[ $(< "$path1"/n) = 0 ]] +[[ $(< "$path1_stuff"/bla) = 0 ]] -[[ $(nix path-info --json $path1 | jq .[].ca) =~ fixed:r:sha256: ]] +[[ $(nix path-info --json "$path1" | jq .[].ca) =~ fixed:r:sha256: ]] path2=$(nix build -L --no-link --json --file ./impure-derivations.nix impure | jq -r .[].outputs.out) -[[ $(< $path2/n) = 1 ]] +[[ $(< "$path2"/n) = 1 ]] # Test impure derivations that depend on impure derivations. path3=$(nix build -L --no-link --json --file ./impure-derivations.nix impureOnImpure | jq -r .[].outputs.out) -[[ $(< $path3/n) = X2 ]] +[[ $(< "$path3"/n) = X2 ]] path4=$(nix build -L --no-link --json --file ./impure-derivations.nix impureOnImpure | jq -r .[].outputs.out) -[[ $(< $path4/n) = X3 ]] +[[ $(< "$path4"/n) = X3 ]] # Test that (self-)references work. -[[ $(< $path4/symlink/bla) = 3 ]] -[[ $(< $path4/self/n) = X3 ]] +[[ $(< "$path4"/symlink/bla) = 3 ]] +[[ $(< "$path4"/self/n) = X3 ]] # Input-addressed derivations cannot depend on impure derivations directly. (! nix build -L --no-link --json --file ./impure-derivations.nix inputAddressed 2>&1) | grep 'depends on impure derivation' drvPath=$(nix eval --json --file ./impure-derivations.nix impure.drvPath | jq -r .) -[[ $(nix derivation show $drvPath | jq ".[\"$(basename "$drvPath")\"].outputs.out.impure") = true ]] -[[ $(nix derivation show $drvPath | jq ".[\"$(basename "$drvPath")\"].outputs.stuff.impure") = true ]] +[[ $(nix derivation show "$drvPath" | jq ".[\"$(basename "$drvPath")\"].outputs.out.impure") = true ]] +[[ $(nix derivation show "$drvPath" | jq ".[\"$(basename "$drvPath")\"].outputs.stuff.impure") = true ]] # Fixed-output derivations *can* depend on impure derivations. path5=$(nix build -L --no-link --json --file ./impure-derivations.nix contentAddressed | jq -r .[].outputs.out) -[[ $(< $path5) = X ]] -[[ $(< $TEST_ROOT/counter) = 5 ]] +[[ $(< "$path5") = X ]] +[[ $(< "$TEST_ROOT"/counter) = 5 ]] # And they should not be rebuilt. path5=$(nix build -L --no-link --json --file ./impure-derivations.nix contentAddressed | jq -r .[].outputs.out) -[[ $(< $path5) = X ]] -[[ $(< $TEST_ROOT/counter) = 5 ]] +[[ $(< "$path5") = X ]] +[[ $(< "$TEST_ROOT"/counter) = 5 ]] # Input-addressed derivations can depend on fixed-output derivations that depend on impure derivations. path6=$(nix build -L --no-link --json --file ./impure-derivations.nix inputAddressedAfterCA | jq -r .[].outputs.out) -[[ $(< $path6) = X ]] -[[ $(< $TEST_ROOT/counter) = 5 ]] +[[ $(< "$path6") = X ]] +[[ $(< "$TEST_ROOT"/counter) = 5 ]] # Test nix/fetchurl.nix. path7=$(nix build -L --no-link --print-out-paths --expr "import { impure = true; url = file://$PWD/impure-derivations.sh; }") -cmp $path7 $PWD/impure-derivations.sh +cmp "$path7" "$PWD"/impure-derivations.sh From 78d9a8d92b7033ffa673767183fe6936d8f3d0d0 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:25:29 -0700 Subject: [PATCH 025/373] shellcheck fix: tests/functional/impure-eval.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 43c84d5ae..eac332920 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/impure-eval\.sh$'' ''^tests/functional/install-darwin\.sh$'' ''^tests/functional/legacy-ssh-store\.sh$'' ''^tests/functional/linux-sandbox\.sh$'' From f702101224eba2bd322d99efa7dafc09f6e47569 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:27:09 -0700 Subject: [PATCH 026/373] shellcheck fix: tests/functional/install-darwin.sh --- maintainers/flake-module.nix | 1 - tests/functional/install-darwin.sh | 9 +++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index eac332920..2d10cc870 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/install-darwin\.sh$'' ''^tests/functional/legacy-ssh-store\.sh$'' ''^tests/functional/linux-sandbox\.sh$'' ''^tests/functional/logging\.sh$'' diff --git a/tests/functional/install-darwin.sh b/tests/functional/install-darwin.sh index ea2b75323..0070e9dce 100755 --- a/tests/functional/install-darwin.sh +++ b/tests/functional/install-darwin.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -eux @@ -21,12 +21,13 @@ cleanup() { for file in ~/.bash_profile ~/.bash_login ~/.profile ~/.zshenv ~/.zprofile ~/.zshrc ~/.zlogin; do if [ -e "$file" ]; then + # shellcheck disable=SC2002 cat "$file" | grep -v nix-profile > "$file.next" mv "$file.next" "$file" fi done - for i in $(seq 1 $(sysctl -n hw.ncpu)); do + for i in $(seq 1 "$(sysctl -n hw.ncpu)"); do sudo /usr/bin/dscl . -delete "/Users/nixbld$i" || true done sudo /usr/bin/dscl . -delete "/Groups/nixbld" || true @@ -65,11 +66,11 @@ verify echo nix-build ./release.nix -A binaryTarball.x86_64-darwin ) | bash -l set -e - cp ./result/nix-*.tar.bz2 $scratch/nix.tar.bz2 + cp ./result/nix-*.tar.bz2 "$scratch"/nix.tar.bz2 ) ( - cd $scratch + cd "$scratch" tar -xf ./nix.tar.bz2 cd nix-* From 5341d82428744f1c2afa3f4298abb106d4261faf Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:27:30 -0700 Subject: [PATCH 027/373] shellcheck fix: tests/functional/legacy-ssh-store.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 2d10cc870..8ef74498d 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/legacy-ssh-store\.sh$'' ''^tests/functional/linux-sandbox\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' From c4da98c8f480e90fe35df3edce95635fd60fb8e7 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:27:55 -0700 Subject: [PATCH 028/373] shellcheck fix: tests/functional/linux-sandbox.sh --- maintainers/flake-module.nix | 1 - tests/functional/linux-sandbox.sh | 26 +++++++++++++------------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 8ef74498d..baa240a04 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/linux-sandbox\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/linux-sandbox.sh b/tests/functional/linux-sandbox.sh index abb635f11..c3ddf6ce6 100755 --- a/tests/functional/linux-sandbox.sh +++ b/tests/functional/linux-sandbox.sh @@ -19,8 +19,8 @@ if [[ ! $SHELL =~ /nix/store ]]; then skipTest "Shell is not from Nix store"; fi # An alias to automatically bind-mount the $SHELL on nix-build invocations nix-sandbox-build () { nix-build --no-out-link --sandbox-paths /nix/store "$@"; } -chmod -R u+w $TEST_ROOT/store0 || true -rm -rf $TEST_ROOT/store0 +chmod -R u+w "$TEST_ROOT"/store0 || true +rm -rf "$TEST_ROOT"/store0 export NIX_STORE_DIR=/my/store export NIX_REMOTE=$TEST_ROOT/store0 @@ -29,11 +29,11 @@ outPath=$(nix-sandbox-build dependencies.nix) [[ $outPath =~ /my/store/.*-dependencies ]] -nix path-info -r $outPath | grep input-2 +nix path-info -r "$outPath" | grep input-2 -nix store ls -R -l $outPath | grep foobar +nix store ls -R -l "$outPath" | grep foobar -nix store cat $outPath/foobar | grep FOOBAR +nix store cat "$outPath"/foobar | grep FOOBAR # Test --check without hash rewriting. nix-sandbox-build dependencies.nix --check @@ -42,9 +42,9 @@ nix-sandbox-build dependencies.nix --check nix-sandbox-build check.nix -A nondeterministic # `100 + 4` means non-determinstic, see doc/manual/source/command-ref/status-build-failure.md -expectStderr 104 nix-sandbox-build check.nix -A nondeterministic --check -K > $TEST_ROOT/log -grepQuietInverse 'error: renaming' $TEST_ROOT/log -grepQuiet 'may not be deterministic' $TEST_ROOT/log +expectStderr 104 nix-sandbox-build check.nix -A nondeterministic --check -K > "$TEST_ROOT"/log +grepQuietInverse 'error: renaming' "$TEST_ROOT"/log +grepQuiet 'may not be deterministic' "$TEST_ROOT"/log # Test that sandboxed builds cannot write to /etc easily # `100` means build failure without extra info, see doc/manual/source/command-ref/status-build-failure.md @@ -59,7 +59,7 @@ testCert () { certFile=$3 # a string that can be the path to a cert file # `100` means build failure without extra info, see doc/manual/source/command-ref/status-build-failure.md [ "$mode" == fixed-output ] && ret=1 || ret=100 - expectStderr $ret nix-sandbox-build linux-sandbox-cert-test.nix --argstr mode "$mode" --option ssl-cert-file "$certFile" | + expectStderr "$ret" nix-sandbox-build linux-sandbox-cert-test.nix --argstr mode "$mode" --option ssl-cert-file "$certFile" | grepQuiet "CERT_${expectation}_IN_SANDBOX" } @@ -68,10 +68,10 @@ cert=$TEST_ROOT/some-cert-file.pem symlinkcert=$TEST_ROOT/symlink-cert-file.pem transitivesymlinkcert=$TEST_ROOT/transitive-symlink-cert-file.pem symlinkDir=$TEST_ROOT/symlink-dir -echo -n "CERT_CONTENT" > $cert -ln -s $cert $symlinkcert -ln -s $symlinkcert $transitivesymlinkcert -ln -s $TEST_ROOT $symlinkDir +echo -n "CERT_CONTENT" > "$cert" +ln -s "$cert" "$symlinkcert" +ln -s "$symlinkcert" "$transitivesymlinkcert" +ln -s "$TEST_ROOT" "$symlinkDir" # No cert in sandbox when not a fixed-output derivation testCert missing normal "$cert" From 5a13f9fc91f993f936f4582ba12f7d30328ce15c Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:28:39 -0700 Subject: [PATCH 029/373] shellcheck fix: tests/functional/logging.sh --- maintainers/flake-module.nix | 1 - tests/functional/logging.sh | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index baa240a04..5f2a837f9 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' ''^tests/functional/nested-sandboxing\.sh$'' diff --git a/tests/functional/logging.sh b/tests/functional/logging.sh index 83df9a45d..600fce43e 100755 --- a/tests/functional/logging.sh +++ b/tests/functional/logging.sh @@ -9,14 +9,14 @@ clearStore path=$(nix-build dependencies.nix --no-out-link) # Test nix-store -l. -[ "$(nix-store -l $path)" = FOO ] +[ "$(nix-store -l "$path")" = FOO ] # Test compressed logs. clearStore -rm -rf $NIX_LOG_DIR -(! nix-store -l $path) +rm -rf "$NIX_LOG_DIR" +(! nix-store -l "$path") nix-build dependencies.nix --no-out-link --compress-build-log -[ "$(nix-store -l $path)" = FOO ] +[ "$(nix-store -l "$path")" = FOO ] # test whether empty logs work fine with `nix log`. builder="$(realpath "$(mktemp)")" @@ -40,5 +40,5 @@ if [[ "$NIX_REMOTE" != "daemon" ]]; then nix build -vv --file dependencies.nix --no-link --json-log-path "$TEST_ROOT/log.json" 2>&1 | grepQuiet 'building.*dependencies-top.drv' jq < "$TEST_ROOT/log.json" grep '{"action":"start","fields":\[".*-dependencies-top.drv","",1,1\],"id":.*,"level":3,"parent":0' "$TEST_ROOT/log.json" >&2 - (( $(grep '{"action":"msg","level":5,"msg":"executing builder .*"}' "$TEST_ROOT/log.json" | wc -l) == 5 )) + (( $(grep -c '{"action":"msg","level":5,"msg":"executing builder .*"}' "$TEST_ROOT/log.json" ) == 5 )) fi From f2eef5b0a49bef1beb5fbc7c4451676828d1c8c8 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:29:35 -0700 Subject: [PATCH 030/373] shellcheck fix: tests/functional/misc.sh --- maintainers/flake-module.nix | 1 - tests/functional/misc.sh | 13 +++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5f2a837f9..ee306a4ee 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' ''^tests/functional/nested-sandboxing\.sh$'' ''^tests/functional/nested-sandboxing/command\.sh$'' diff --git a/tests/functional/misc.sh b/tests/functional/misc.sh index b94a5fc57..131b63323 100755 --- a/tests/functional/misc.sh +++ b/tests/functional/misc.sh @@ -14,6 +14,7 @@ source common.sh nix-env --version | grep -F "${_NIX_TEST_CLIENT_VERSION:-$version}" nix_env=$(type -P nix-env) +# shellcheck disable=SC2123 (PATH=""; ! $nix_env --help 2>&1 ) | grepQuiet -F "The 'man' command was not found, but it is needed for 'nix-env' and some other 'nix-*' commands' help text. Perhaps you could install the 'man' command?" # Usage errors. @@ -22,12 +23,12 @@ expect 1 nix-env -q --foo 2>&1 | grep "unknown flag" # Eval Errors. eval_arg_res=$(nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 || true) -echo $eval_arg_res | grep "at «string»:1:15:" -echo $eval_arg_res | grep "infinite recursion encountered" +echo "$eval_arg_res" | grep "at «string»:1:15:" +echo "$eval_arg_res" | grep "infinite recursion encountered" eval_stdin_res=$(echo 'let a = {} // a; in a.foo' | nix-instantiate --eval -E - 2>&1 || true) -echo $eval_stdin_res | grep "at «stdin»:1:15:" -echo $eval_stdin_res | grep "infinite recursion encountered" +echo "$eval_stdin_res" | grep "at «stdin»:1:15:" +echo "$eval_stdin_res" | grep "infinite recursion encountered" # Attribute path errors expectStderr 1 nix-instantiate --eval -E '{}' -A '"x' | grepQuiet "missing closing quote in selection path" @@ -40,10 +41,10 @@ expectStderr 1 nix-instantiate --eval -E '[]' -A '1' | grepQuiet "out of range" # NOTE(cole-h): behavior is different depending on the order, which is why we test an unknown option # before and after the `'{}'`! out="$(expectStderr 0 nix-instantiate --option foobar baz --expr '{}')" -[[ "$(echo "$out" | grep foobar | wc -l)" = 1 ]] +[[ "$(echo "$out" | grep -c foobar )" = 1 ]] out="$(expectStderr 0 nix-instantiate '{}' --option foobar baz --expr )" -[[ "$(echo "$out" | grep foobar | wc -l)" = 1 ]] +[[ "$(echo "$out" | grep -c foobar )" = 1 ]] if [[ $(uname) = Linux && $(uname -m) = i686 ]]; then [[ $(nix config show system) = i686-linux ]] From e26b0c66b0ca2e44f2fcf1c389d4e27d5008ddc4 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:34:26 -0700 Subject: [PATCH 031/373] shellcheck fix: tests/functional/multiple-outputs.sh --- maintainers/flake-module.nix | 1 - tests/functional/multiple-outputs.sh | 68 +++++++++++++++------------- 2 files changed, 37 insertions(+), 32 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index ee306a4ee..742a9d313 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/multiple-outputs\.sh$'' ''^tests/functional/nested-sandboxing\.sh$'' ''^tests/functional/nested-sandboxing/command\.sh$'' ''^tests/functional/nix-build\.sh$'' diff --git a/tests/functional/multiple-outputs.sh b/tests/functional/multiple-outputs.sh index c4e0be15e..f703fb02b 100755 --- a/tests/functional/multiple-outputs.sh +++ b/tests/functional/multiple-outputs.sh @@ -6,15 +6,17 @@ TODO_NixOS clearStoreIfPossible -rm -f $TEST_ROOT/result* +rm -f "$TEST_ROOT"/result* # Placeholder strings are opaque, so cannot do this check for floating # content-addressing derivations. -if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then # Test whether the output names match our expectations outPath=$(nix-instantiate multiple-outputs.nix --eval -A nameCheck.out.outPath) + # shellcheck disable=SC2016 [ "$(echo "$outPath" | sed -E 's_^".*/[^-/]*-([^/]*)"$_\1_')" = "multiple-outputs-a" ] outPath=$(nix-instantiate multiple-outputs.nix --eval -A nameCheck.dev.outPath) + # shellcheck disable=SC2016 [ "$(echo "$outPath" | sed -E 's_^".*/[^-/]*-([^/]*)"$_\1_')" = "multiple-outputs-a-dev" ] fi @@ -27,16 +29,17 @@ echo "evaluating c..." # outputs. drvPath=$(nix-instantiate multiple-outputs.nix -A c) #[ "$drvPath" = "$drvPath2" ] -grepQuiet 'multiple-outputs-a.drv",\["first","second"\]' $drvPath -grepQuiet 'multiple-outputs-b.drv",\["out"\]' $drvPath +grepQuiet 'multiple-outputs-a.drv",\["first","second"\]' "$drvPath" +grepQuiet 'multiple-outputs-b.drv",\["out"\]' "$drvPath" # While we're at it, test the ‘unsafeDiscardOutputDependency’ primop. outPath=$(nix-build multiple-outputs.nix -A d --no-out-link) -drvPath=$(cat $outPath/drv) +drvPath=$(cat "$outPath"/drv) if [[ -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then - expectStderr 1 nix-store -q $drvPath | grepQuiet "Cannot use output path of floating content-addressing derivation until we know what it is (e.g. by building it)" + expectStderr 1 nix-store -q "$drvPath" | grepQuiet "Cannot use output path of floating content-addressing derivation until we know what it is (e.g. by building it)" else - outPath=$(nix-store -q $drvPath) + outPath=$(nix-store -q "$drvPath") + # shellcheck disable=SC2233 (! [ -e "$outPath" ]) fi @@ -48,34 +51,37 @@ echo "output path is $outPath" [ "$(cat "$outPath/file")" = "success" ] # Test nix-build on a derivation with multiple outputs. -outPath1=$(nix-build multiple-outputs.nix -A a -o $TEST_ROOT/result) -[ -e $TEST_ROOT/result-first ] -(! [ -e $TEST_ROOT/result-second ]) -nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result -[ "$(cat $TEST_ROOT/result-first/file)" = "first" ] -[ "$(cat $TEST_ROOT/result-second/file)" = "second" ] -[ "$(cat $TEST_ROOT/result-second/link/file)" = "first" ] -hash1=$(nix-store -q --hash $TEST_ROOT/result-second) +outPath1=$(nix-build multiple-outputs.nix -A a -o "$TEST_ROOT"/result) +[ -e "$TEST_ROOT"/result-first ] +# shellcheck disable=SC2235 +(! [ -e "$TEST_ROOT"/result-second ]) +nix-build multiple-outputs.nix -A a.all -o "$TEST_ROOT"/result +[ "$(cat "$TEST_ROOT"/result-first/file)" = "first" ] +[ "$(cat "$TEST_ROOT"/result-second/file)" = "second" ] +[ "$(cat "$TEST_ROOT"/result-second/link/file)" = "first" ] +hash1=$(nix-store -q --hash "$TEST_ROOT"/result-second) -outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a) --no-out-link) -[[ $outPath1 = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate multiple-outputs.nix -A a)" --no-out-link) +[[ $outPath1 = "$outPath2" ]] -outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a.first) --no-out-link) -[[ $outPath1 = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate multiple-outputs.nix -A a.first)" --no-out-link) +[[ $outPath1 = "$outPath2" ]] -outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a.second) --no-out-link) -[[ $(cat $outPath2/file) = second ]] +outPath2=$(nix-build "$(nix-instantiate multiple-outputs.nix -A a.second)" --no-out-link) +[[ $(cat "$outPath2"/file) = second ]] +# FIXME: Fixing this shellcheck causes the test to fail. +# shellcheck disable=SC2046 [[ $(nix-build $(nix-instantiate multiple-outputs.nix -A a.all) --no-out-link | wc -l) -eq 2 ]] -if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then # Delete one of the outputs and rebuild it. This will cause a hash # rewrite. - env -u NIX_REMOTE nix store delete $TEST_ROOT/result-second --ignore-liveness - nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result - [ "$(cat $TEST_ROOT/result-second/file)" = "second" ] - [ "$(cat $TEST_ROOT/result-second/link/file)" = "first" ] - hash2=$(nix-store -q --hash $TEST_ROOT/result-second) + env -u NIX_REMOTE nix store delete "$TEST_ROOT"/result-second --ignore-liveness + nix-build multiple-outputs.nix -A a.all -o "$TEST_ROOT"/result + [ "$(cat "$TEST_ROOT"/result-second/file)" = "second" ] + [ "$(cat "$TEST_ROOT"/result-second/link/file)" = "first" ] + hash2=$(nix-store -q --hash "$TEST_ROOT"/result-second) [ "$hash1" = "$hash2" ] fi @@ -92,15 +98,15 @@ fi # Do a GC. This should leave an empty store. echo "collecting garbage..." -rm $TEST_ROOT/result* +rm "$TEST_ROOT"/result* nix-store --gc --keep-derivations --keep-outputs nix-store --gc --print-roots -rm -rf $NIX_STORE_DIR/.links -rmdir $NIX_STORE_DIR +rm -rf "$NIX_STORE_DIR"/.links +rmdir "$NIX_STORE_DIR" # TODO inspect why this doesn't work with floating content-addressing # derivations. -if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then expect 1 nix build -f multiple-outputs.nix invalid-output-name-1 2>&1 | grep 'contains illegal character' expect 1 nix build -f multiple-outputs.nix invalid-output-name-2 2>&1 | grep 'contains illegal character' fi From 1a71c1ef9fa1cf925e053c56c953e698f0af4dfa Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:43:38 -0700 Subject: [PATCH 032/373] shellcheck fix: tests/functional/nested-sandboxing.sh --- maintainers/flake-module.nix | 1 - tests/functional/nested-sandboxing.sh | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 742a9d313..ffb55b767 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nested-sandboxing\.sh$'' ''^tests/functional/nested-sandboxing/command\.sh$'' ''^tests/functional/nix-build\.sh$'' ''^tests/functional/nix-channel\.sh$'' diff --git a/tests/functional/nested-sandboxing.sh b/tests/functional/nested-sandboxing.sh index 4d4cf125e..8788c7d90 100755 --- a/tests/functional/nested-sandboxing.sh +++ b/tests/functional/nested-sandboxing.sh @@ -11,7 +11,7 @@ requiresUnprivilegedUserNamespaces start="$TEST_ROOT/start" mkdir -p "$start" -cp -r common common.sh ${config_nix} ./nested-sandboxing "$start" +cp -r common common.sh "${config_nix}" ./nested-sandboxing "$start" cp "${_NIX_TEST_BUILD_DIR}/common/subst-vars.sh" "$start/common" # N.B. redefine _NIX_TEST_SOURCE_DIR="$start" @@ -20,6 +20,7 @@ cd "$start" source ./nested-sandboxing/command.sh +# shellcheck disable=SC2016 expectStderr 100 runNixBuild badStoreUrl 2 | grepQuiet '`sandbox-build-dir` must not contain' runNixBuild goodStoreUrl 5 From 794723142ba1ac70577c58fba37f0a0200945a54 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:44:52 -0700 Subject: [PATCH 033/373] shellcheck fix: tests/functional/nested-sandboxing/command.sh --- maintainers/flake-module.nix | 1 - tests/functional/nested-sandboxing/command.sh | 11 +++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index ffb55b767..cf13e1e80 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nested-sandboxing/command\.sh$'' ''^tests/functional/nix-build\.sh$'' ''^tests/functional/nix-channel\.sh$'' ''^tests/functional/nix-collect-garbage-d\.sh$'' diff --git a/tests/functional/nested-sandboxing/command.sh b/tests/functional/nested-sandboxing/command.sh index 7c04e82f5..c01133d93 100644 --- a/tests/functional/nested-sandboxing/command.sh +++ b/tests/functional/nested-sandboxing/command.sh @@ -1,17 +1,20 @@ +# shellcheck shell=bash set -eu -o pipefail -export NIX_BIN_DIR=$(dirname $(type -p nix)) +NIX_BIN_DIR=$(dirname "$(type -p nix)") +export NIX_BIN_DIR # TODO Get Nix and its closure more flexibly -export EXTRA_SANDBOX="/nix/store $(dirname $NIX_BIN_DIR)" +EXTRA_SANDBOX="/nix/store $(dirname "$NIX_BIN_DIR")" +export EXTRA_SANDBOX badStoreUrl () { local altitude=$1 - echo $TEST_ROOT/store-$altitude + echo "$TEST_ROOT"/store-"$altitude" } goodStoreUrl () { local altitude=$1 - echo $("badStoreUrl" "$altitude")?store=/foo-$altitude + echo "$("badStoreUrl" "$altitude")"?store=/foo-"$altitude" } # The non-standard sandbox-build-dir helps ensure that we get the same behavior From 2bfc9019fad4cd1521bb42aa2244eb9cf6d15578 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:46:43 -0700 Subject: [PATCH 034/373] shellcheck fix: tests/functional/nix-build.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-build.sh | 28 +++++++++++++++------------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index cf13e1e80..a21fb214c 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-build\.sh$'' ''^tests/functional/nix-channel\.sh$'' ''^tests/functional/nix-collect-garbage-d\.sh$'' ''^tests/functional/nix-copy-ssh-common\.sh$'' diff --git a/tests/functional/nix-build.sh b/tests/functional/nix-build.sh index 091e429e0..33973c628 100755 --- a/tests/functional/nix-build.sh +++ b/tests/functional/nix-build.sh @@ -6,30 +6,30 @@ TODO_NixOS clearStoreIfPossible -outPath=$(nix-build dependencies.nix -o $TEST_ROOT/result) -test "$(cat $TEST_ROOT/result/foobar)" = FOOBAR +outPath=$(nix-build dependencies.nix -o "$TEST_ROOT"/result) +test "$(cat "$TEST_ROOT"/result/foobar)" = FOOBAR # The result should be retained by a GC. echo A -target=$(readLink $TEST_ROOT/result) +target=$(readLink "$TEST_ROOT"/result) echo B -echo target is $target +echo target is "$target" nix-store --gc -test -e $target/foobar +test -e "$target"/foobar # But now it should be gone. -rm $TEST_ROOT/result +rm "$TEST_ROOT"/result nix-store --gc -if test -e $target/foobar; then false; fi +if test -e "$target"/foobar; then false; fi -outPath2=$(nix-build $(nix-instantiate dependencies.nix) --no-out-link) -[[ $outPath = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate dependencies.nix)" --no-out-link) +[[ $outPath = "$outPath2" ]] -outPath2=$(nix-build $(nix-instantiate dependencies.nix)!out --no-out-link) -[[ $outPath = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate dependencies.nix)"!out --no-out-link) +[[ $outPath = "$outPath2" ]] -outPath2=$(nix-store -r $(nix-instantiate --add-root $TEST_ROOT/indirect dependencies.nix)!out) -[[ $outPath = $outPath2 ]] +outPath2=$(nix-store -r "$(nix-instantiate --add-root "$TEST_ROOT"/indirect dependencies.nix)"!out) +[[ $outPath = "$outPath2" ]] # The order of the paths on stdout must correspond to the -A options # https://github.com/NixOS/nix/issues/4197 @@ -39,9 +39,11 @@ input1="$(nix-build nix-build-examples.nix -A input1 --no-out-link)" input2="$(nix-build nix-build-examples.nix -A input2 --no-out-link)" body="$(nix-build nix-build-examples.nix -A body --no-out-link)" +# shellcheck disable=SC2046,SC2005 outPathsA="$(echo $(nix-build nix-build-examples.nix -A input0 -A input1 -A input2 -A body --no-out-link))" [[ "$outPathsA" = "$input0 $input1 $input2 $body" ]] # test a different ordering to make sure it fails, not just in 23 out of 24 permutations +# shellcheck disable=SC2046,SC2005 outPathsB="$(echo $(nix-build nix-build-examples.nix -A body -A input1 -A input2 -A input0 --no-out-link))" [[ "$outPathsB" = "$body $input1 $input2 $input0" ]] From 2b1a0963f9771238d5cb985f4c91b9e3c39c3e0d Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:51:46 -0700 Subject: [PATCH 035/373] shellcheck fix: tests/functional/nix-channel.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-channel.sh | 52 ++++++++++++++++----------------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a21fb214c..6b41b291d 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-channel\.sh$'' ''^tests/functional/nix-collect-garbage-d\.sh$'' ''^tests/functional/nix-copy-ssh-common\.sh$'' ''^tests/functional/nix-copy-ssh-ng\.sh$'' diff --git a/tests/functional/nix-channel.sh b/tests/functional/nix-channel.sh index d0b772850..f23d4bbde 100755 --- a/tests/functional/nix-channel.sh +++ b/tests/functional/nix-channel.sh @@ -4,7 +4,7 @@ source common.sh clearProfiles -rm -f $TEST_HOME/.nix-channels $TEST_HOME/.nix-profile +rm -f "$TEST_HOME"/.nix-channels "$TEST_HOME"/.nix-profile # Test add/list/remove. nix-channel --add http://foo/bar xyzzy @@ -12,8 +12,8 @@ nix-channel --list | grepQuiet http://foo/bar nix-channel --remove xyzzy [[ $(nix-channel --list-generations | wc -l) == 1 ]] -[ -e $TEST_HOME/.nix-channels ] -[ "$(cat $TEST_HOME/.nix-channels)" = '' ] +[ -e "$TEST_HOME"/.nix-channels ] +[ "$(cat "$TEST_HOME"/.nix-channels)" = '' ] # Test the XDG Base Directories support @@ -25,47 +25,47 @@ nix-channel --remove xyzzy unset NIX_CONFIG -[ -e $TEST_HOME/.local/state/nix/channels ] -[ "$(cat $TEST_HOME/.local/state/nix/channels)" = '' ] +[ -e "$TEST_HOME"/.local/state/nix/channels ] +[ "$(cat "$TEST_HOME"/.local/state/nix/channels)" = '' ] # Create a channel. -rm -rf $TEST_ROOT/foo -mkdir -p $TEST_ROOT/foo +rm -rf "$TEST_ROOT"/foo +mkdir -p "$TEST_ROOT"/foo drvPath=$(nix-instantiate dependencies.nix) -nix copy --to file://$TEST_ROOT/foo?compression="bzip2" $(nix-store -r "$drvPath") -rm -rf $TEST_ROOT/nixexprs -mkdir -p $TEST_ROOT/nixexprs -cp "${config_nix}" dependencies.nix dependencies.builder*.sh $TEST_ROOT/nixexprs/ -ln -s dependencies.nix $TEST_ROOT/nixexprs/default.nix -(cd $TEST_ROOT && tar cvf - nixexprs) | bzip2 > $TEST_ROOT/foo/nixexprs.tar.bz2 +nix copy --to file://"$TEST_ROOT"/foo?compression="bzip2" "$(nix-store -r "$drvPath")" +rm -rf "$TEST_ROOT"/nixexprs +mkdir -p "$TEST_ROOT"/nixexprs +cp "${config_nix}" dependencies.nix dependencies.builder*.sh "$TEST_ROOT"/nixexprs/ +ln -s dependencies.nix "$TEST_ROOT"/nixexprs/default.nix +(cd "$TEST_ROOT" && tar cvf - nixexprs) | bzip2 > "$TEST_ROOT"/foo/nixexprs.tar.bz2 # Test the update action. -nix-channel --add file://$TEST_ROOT/foo +nix-channel --add file://"$TEST_ROOT"/foo nix-channel --update [[ $(nix-channel --list-generations | wc -l) == 2 ]] # Do a query. -nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml -grepQuiet 'meta.*description.*Random test package' $TEST_ROOT/meta.xml -grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml +nix-env -qa \* --meta --xml --out-path > "$TEST_ROOT"/meta.xml +grepQuiet 'meta.*description.*Random test package' "$TEST_ROOT"/meta.xml +grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' "$TEST_ROOT"/meta.xml # Do an install. nix-env -i dependencies-top -[ -e $TEST_HOME/.nix-profile/foobar ] +[ -e "$TEST_HOME"/.nix-profile/foobar ] # Test updating from a tarball -nix-channel --add file://$TEST_ROOT/foo/nixexprs.tar.bz2 bar +nix-channel --add file://"$TEST_ROOT"/foo/nixexprs.tar.bz2 bar nix-channel --update # Do a query. -nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml -grepQuiet 'meta.*description.*Random test package' $TEST_ROOT/meta.xml -grepQuiet 'item.*attrPath="bar".*name="dependencies-top"' $TEST_ROOT/meta.xml -grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml +nix-env -qa \* --meta --xml --out-path > "$TEST_ROOT"/meta.xml +grepQuiet 'meta.*description.*Random test package' "$TEST_ROOT"/meta.xml +grepQuiet 'item.*attrPath="bar".*name="dependencies-top"' "$TEST_ROOT"/meta.xml +grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' "$TEST_ROOT"/meta.xml # Do an install. nix-env -i dependencies-top -[ -e $TEST_HOME/.nix-profile/foobar ] +[ -e "$TEST_HOME"/.nix-profile/foobar ] # Test evaluation through a channel symlink (#9882). drvPath=$(nix-instantiate '') @@ -73,9 +73,9 @@ drvPath=$(nix-instantiate '') # Add a test for the special case behaviour of 'nixpkgs' in the # channels for root (see EvalSettings::getDefaultNixPath()). if ! isTestOnNixOS; then - nix-channel --add file://$TEST_ROOT/foo nixpkgs + nix-channel --add file://"$TEST_ROOT"/foo nixpkgs nix-channel --update - mv $TEST_HOME/.local/state/nix/profiles $TEST_ROOT/var/nix/profiles/per-user/root + mv "$TEST_HOME"/.local/state/nix/profiles "$TEST_ROOT"/var/nix/profiles/per-user/root drvPath2=$(nix-instantiate '') [[ "$drvPath" = "$drvPath2" ]] fi From 83e203fe453f1a3448b24dbb0630de1338d5e1e6 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:52:08 -0700 Subject: [PATCH 036/373] shellcheck fix: tests/functional/nix-collect-garbage-d.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-collect-garbage-d.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 6b41b291d..492c85bb0 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-collect-garbage-d\.sh$'' ''^tests/functional/nix-copy-ssh-common\.sh$'' ''^tests/functional/nix-copy-ssh-ng\.sh$'' ''^tests/functional/nix-copy-ssh\.sh$'' diff --git a/tests/functional/nix-collect-garbage-d.sh b/tests/functional/nix-collect-garbage-d.sh index 119efe629..44de90711 100755 --- a/tests/functional/nix-collect-garbage-d.sh +++ b/tests/functional/nix-collect-garbage-d.sh @@ -29,7 +29,7 @@ testCollectGarbageD # Run the same test, but forcing the profiles an arbitrary location. rm ~/.nix-profile -ln -s $TEST_ROOT/blah ~/.nix-profile +ln -s "$TEST_ROOT"/blah ~/.nix-profile testCollectGarbageD # Run the same test, but forcing the profiles at their legacy location under From c9fd721be95eb34516e78910bc7e49396c28e830 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:52:31 -0700 Subject: [PATCH 037/373] shellcheck fix: tests/functional/nix-copy-ssh-common.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-copy-ssh-common.sh | 15 ++++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 492c85bb0..a2c6801e9 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-copy-ssh-common\.sh$'' ''^tests/functional/nix-copy-ssh-ng\.sh$'' ''^tests/functional/nix-copy-ssh\.sh$'' ''^tests/functional/nix-daemon-untrusting\.sh$'' diff --git a/tests/functional/nix-copy-ssh-common.sh b/tests/functional/nix-copy-ssh-common.sh index 5eea9612d..8154585af 100644 --- a/tests/functional/nix-copy-ssh-common.sh +++ b/tests/functional/nix-copy-ssh-common.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash proto=$1 shift (( $# == 0 )) @@ -7,7 +8,7 @@ TODO_NixOS clearStore clearCache -mkdir -p $TEST_ROOT/stores +mkdir -p "$TEST_ROOT"/stores # Create path to copy back and forth outPath=$(nix-build --no-out-link dependencies.nix) @@ -37,17 +38,17 @@ if [[ "$proto" == "ssh-ng" ]]; then args+=(--no-check-sigs) fi -[ ! -f ${remoteRoot}${outPath}/foobar ] -nix copy "${args[@]}" --to "$remoteStore" $outPath -[ -f ${remoteRoot}${outPath}/foobar ] +[ ! -f "${remoteRoot}""${outPath}"/foobar ] +nix copy "${args[@]}" --to "$remoteStore" "$outPath" +[ -f "${remoteRoot}""${outPath}"/foobar ] # Copy back from store clearStore -[ ! -f $outPath/foobar ] -nix copy --no-check-sigs --from "$remoteStore" $outPath -[ -f $outPath/foobar ] +[ ! -f "$outPath"/foobar ] +nix copy --no-check-sigs --from "$remoteStore" "$outPath" +[ -f "$outPath"/foobar ] # Check --substitute-on-destination, avoid corrupted store From ca7414cd18985f50486c42451c4f5fa1839c9695 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:52:52 -0700 Subject: [PATCH 038/373] shellcheck fix: tests/functional/nix-copy-ssh-ng.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-copy-ssh-ng.sh | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a2c6801e9..81f384e57 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-copy-ssh-ng\.sh$'' ''^tests/functional/nix-copy-ssh\.sh$'' ''^tests/functional/nix-daemon-untrusting\.sh$'' ''^tests/functional/nix-profile\.sh$'' diff --git a/tests/functional/nix-copy-ssh-ng.sh b/tests/functional/nix-copy-ssh-ng.sh index 41958c2c3..f74f3bb86 100755 --- a/tests/functional/nix-copy-ssh-ng.sh +++ b/tests/functional/nix-copy-ssh-ng.sh @@ -14,5 +14,5 @@ outPath=$(nix-build --no-out-link dependencies.nix) nix store info --store "$remoteStore" # Regression test for https://github.com/NixOS/nix/issues/6253 -nix copy --to "$remoteStore" $outPath --no-check-sigs & -nix copy --to "$remoteStore" $outPath --no-check-sigs +nix copy --to "$remoteStore" "$outPath" --no-check-sigs & +nix copy --to "$remoteStore" "$outPath" --no-check-sigs From 8c2664ed15ab12fe49d4a8c8126c79a401106880 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:53:17 -0700 Subject: [PATCH 039/373] shellcheck fix: tests/functional/nix-copy-ssh.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 81f384e57..2741ff143 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-copy-ssh\.sh$'' ''^tests/functional/nix-daemon-untrusting\.sh$'' ''^tests/functional/nix-profile\.sh$'' ''^tests/functional/nix-shell\.sh$'' From cf206ef61e25a7727e0ee493c01240f3ae29c376 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:53:41 -0700 Subject: [PATCH 040/373] shellcheck fix: tests/functional/nix-daemon-untrusting.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 2741ff143..64d22d2ac 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-daemon-untrusting\.sh$'' ''^tests/functional/nix-profile\.sh$'' ''^tests/functional/nix-shell\.sh$'' ''^tests/functional/nix_path\.sh$'' From 78833ca8d091d90b81979974679558fa3f667241 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:57:45 -0700 Subject: [PATCH 041/373] shellcheck fix: tests/functional/nix-profile.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-profile.sh | 145 +++++++++++++++++--------------- 2 files changed, 76 insertions(+), 70 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 64d22d2ac..5c373cdb9 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-profile\.sh$'' ''^tests/functional/nix-shell\.sh$'' ''^tests/functional/nix_path\.sh$'' ''^tests/functional/optimise-store\.sh$'' diff --git a/tests/functional/nix-profile.sh b/tests/functional/nix-profile.sh index b1cfef6b0..922162d4b 100755 --- a/tests/functional/nix-profile.sh +++ b/tests/functional/nix-profile.sh @@ -12,9 +12,10 @@ restartDaemon # Make a flake. flake1Dir=$TEST_ROOT/flake1 -mkdir -p $flake1Dir +mkdir -p "$flake1Dir" -cat > $flake1Dir/flake.nix < "$flake1Dir"/flake.nix < $flake1Dir/flake.nix < $flake1Dir/who -printf 1.0 > $flake1Dir/version -printf false > $flake1Dir/ca.nix +printf World > "$flake1Dir"/who +printf 1.0 > "$flake1Dir"/version +printf false > "$flake1Dir"/ca.nix -cp "${config_nix}" $flake1Dir/ +cp "${config_nix}" "$flake1Dir"/ # Test upgrading from nix-env. nix-env -f ./user-envs.nix -i foo-1.0 nix profile list | grep -A2 'Name:.*foo' | grep 'Store paths:.*foo-1.0' -nix profile add $flake1Dir -L +nix profile add "$flake1Dir" -L nix profile list | grep -A4 'Name:.*flake1' | grep 'Locked flake URL:.*narHash' -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -[ -e $TEST_HOME/.nix-profile/share/man ] -(! [ -e $TEST_HOME/.nix-profile/include ]) +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +[ -e "$TEST_HOME"/.nix-profile/share/man ] +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/include ]) nix profile history nix profile history | grep "packages.$system.default: ∅ -> 1.0" nix profile diff-closures | grep 'env-manifest.nix: ε → ∅' @@ -64,32 +66,32 @@ nix profile diff-closures | grep 'env-manifest.nix: ε → ∅' # Test XDG Base Directories support export NIX_CONFIG="use-xdg-base-directories = true" nix profile remove flake1 2>&1 | grep 'removed 1 packages' -nix profile add $flake1Dir -[[ $($TEST_HOME/.local/state/nix/profile/bin/hello) = "Hello World" ]] +nix profile add "$flake1Dir" +[[ $("$TEST_HOME"/.local/state/nix/profile/bin/hello) = "Hello World" ]] unset NIX_CONFIG # Test conflicting package add. -nix profile add $flake1Dir 2>&1 | grep "warning: 'flake1' is already added" +nix profile add "$flake1Dir" 2>&1 | grep "warning: 'flake1' is already added" # Test upgrading a package. -printf NixOS > $flake1Dir/who -printf 2.0 > $flake1Dir/version +printf NixOS > "$flake1Dir"/who +printf 2.0 > "$flake1Dir"/version nix profile upgrade flake1 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello NixOS" ]] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello NixOS" ]] nix profile history | grep "packages.$system.default: 1.0, 1.0-man -> 2.0, 2.0-man" # Test upgrading package using regular expression. -printf 2.1 > $flake1Dir/version +printf 2.1 > "$flake1Dir"/version nix profile upgrade --regex '.*' -[[ $(readlink $TEST_HOME/.nix-profile/bin/hello) =~ .*-profile-test-2\.1/bin/hello ]] +[[ $(readlink "$TEST_HOME"/.nix-profile/bin/hello) =~ .*-profile-test-2\.1/bin/hello ]] nix profile rollback # Test upgrading all packages -printf 2.2 > $flake1Dir/version +printf 2.2 > "$flake1Dir"/version nix profile upgrade --all -[[ $(readlink $TEST_HOME/.nix-profile/bin/hello) =~ .*-profile-test-2\.2/bin/hello ]] +[[ $(readlink "$TEST_HOME"/.nix-profile/bin/hello) =~ .*-profile-test-2\.2/bin/hello ]] nix profile rollback -printf 1.0 > $flake1Dir/version +printf 1.0 > "$flake1Dir"/version # Test --all exclusivity. assertStderr nix --offline profile upgrade --all foo << EOF @@ -117,98 +119,102 @@ nix profile rollback nix profile diff-closures # Test rollback. -printf World > $flake1Dir/who +printf World > "$flake1Dir"/who nix profile upgrade flake1 -printf NixOS > $flake1Dir/who +printf NixOS > "$flake1Dir"/who nix profile upgrade flake1 nix profile rollback -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] # Test uninstall. -[ -e $TEST_HOME/.nix-profile/bin/foo ] +[ -e "$TEST_HOME"/.nix-profile/bin/foo ] +# shellcheck disable=SC2235 nix profile remove foo 2>&1 | grep 'removed 1 packages' -(! [ -e $TEST_HOME/.nix-profile/bin/foo ]) +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/bin/foo ]) nix profile history | grep 'foo: 1.0 -> ∅' nix profile diff-closures | grep 'Version 3 -> 4' # Test installing a non-flake package. nix profile add --file ./simple.nix '' -[[ $(cat $TEST_HOME/.nix-profile/hello) = "Hello World!" ]] +[[ $(cat "$TEST_HOME"/.nix-profile/hello) = "Hello World!" ]] nix profile remove simple 2>&1 | grep 'removed 1 packages' -nix profile add $(nix-build --no-out-link ./simple.nix) -[[ $(cat $TEST_HOME/.nix-profile/hello) = "Hello World!" ]] +nix profile add "$(nix-build --no-out-link ./simple.nix)" +[[ $(cat "$TEST_HOME"/.nix-profile/hello) = "Hello World!" ]] # Test packages with same name from different sources -mkdir $TEST_ROOT/simple-too -cp ./simple.nix "${config_nix}" simple.builder.sh $TEST_ROOT/simple-too -nix profile add --file $TEST_ROOT/simple-too/simple.nix '' +mkdir "$TEST_ROOT"/simple-too +cp ./simple.nix "${config_nix}" simple.builder.sh "$TEST_ROOT"/simple-too +nix profile add --file "$TEST_ROOT"/simple-too/simple.nix '' nix profile list | grep -A4 'Name:.*simple' | grep 'Name:.*simple-1' nix profile remove simple 2>&1 | grep 'removed 1 packages' nix profile remove simple-1 2>&1 | grep 'removed 1 packages' # Test wipe-history. nix profile wipe-history -[[ $(nix profile history | grep Version | wc -l) -eq 1 ]] +[[ $(nix profile history | grep -c Version) -eq 1 ]] # Test upgrade to CA package. -printf true > $flake1Dir/ca.nix -printf 3.0 > $flake1Dir/version +printf true > "$flake1Dir"/ca.nix +printf 3.0 > "$flake1Dir"/version nix profile upgrade flake1 nix profile history | grep "packages.$system.default: 1.0, 1.0-man -> 3.0, 3.0-man" # Test new install of CA package. nix profile remove flake1 2>&1 | grep 'removed 1 packages' -printf 4.0 > $flake1Dir/version -printf Utrecht > $flake1Dir/who -nix profile add $flake1Dir -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello Utrecht" ]] -[[ $(nix path-info --json $(realpath $TEST_HOME/.nix-profile/bin/hello) | jq -r .[].ca) =~ fixed:r:sha256: ]] +printf 4.0 > "$flake1Dir"/version +printf Utrecht > "$flake1Dir"/who +nix profile add "$flake1Dir" +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello Utrecht" ]] +[[ $(nix path-info --json "$(realpath "$TEST_HOME"/.nix-profile/bin/hello)" | jq -r .[].ca) =~ fixed:r:sha256: ]] # Override the outputs. nix profile remove simple flake1 nix profile add "$flake1Dir^*" -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello Utrecht" ]] -[ -e $TEST_HOME/.nix-profile/share/man ] -[ -e $TEST_HOME/.nix-profile/include ] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello Utrecht" ]] +[ -e "$TEST_HOME"/.nix-profile/share/man ] +[ -e "$TEST_HOME"/.nix-profile/include ] -printf Nix > $flake1Dir/who +printf Nix > "$flake1Dir"/who nix profile list nix profile upgrade flake1 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello Nix" ]] -[ -e $TEST_HOME/.nix-profile/share/man ] -[ -e $TEST_HOME/.nix-profile/include ] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello Nix" ]] +[ -e "$TEST_HOME"/.nix-profile/share/man ] +[ -e "$TEST_HOME"/.nix-profile/include ] nix profile remove flake1 2>&1 | grep 'removed 1 packages' nix profile add "$flake1Dir^man" -(! [ -e $TEST_HOME/.nix-profile/bin/hello ]) -[ -e $TEST_HOME/.nix-profile/share/man ] -(! [ -e $TEST_HOME/.nix-profile/include ]) +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/bin/hello ]) +[ -e "$TEST_HOME"/.nix-profile/share/man ] +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/include ]) # test priority nix profile remove flake1 2>&1 | grep 'removed 1 packages' # Make another flake. flake2Dir=$TEST_ROOT/flake2 -printf World > $flake1Dir/who -cp -r $flake1Dir $flake2Dir -printf World2 > $flake2Dir/who +printf World > "$flake1Dir"/who +cp -r "$flake1Dir" "$flake2Dir" +printf World2 > "$flake2Dir"/who -nix profile add $flake1Dir -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -expect 1 nix profile add $flake2Dir +nix profile add "$flake1Dir" +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +expect 1 nix profile add "$flake2Dir" diff -u <( - nix --offline profile install $flake2Dir 2>&1 1> /dev/null \ + nix --offline profile install "$flake2Dir" 2>&1 1> /dev/null \ | grep -vE "^warning: " \ | grep -vE "^error \(ignored\): " \ || true ) <(cat << EOF error: An existing package already provides the following file: - $(nix build --no-link --print-out-paths ${flake1Dir}"#default.out")/bin/hello + $(nix build --no-link --print-out-paths "${flake1Dir}""#default.out")/bin/hello This is the conflicting file from the new package: - $(nix build --no-link --print-out-paths ${flake2Dir}"#default.out")/bin/hello + $(nix build --no-link --print-out-paths "${flake2Dir}""#default.out")/bin/hello To remove the existing package: @@ -225,11 +231,11 @@ error: An existing package already provides the following file: nix profile add path:${flake2Dir}#packages.${system}.default --priority 6 EOF ) -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -nix profile add $flake2Dir --priority 100 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -nix profile add $flake2Dir --priority 0 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World2" ]] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +nix profile add "$flake2Dir" --priority 100 +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +nix profile add "$flake2Dir" --priority 0 +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World2" ]] # nix profile add $flake1Dir --priority 100 # [[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] @@ -237,14 +243,15 @@ nix profile add $flake2Dir --priority 0 # flake references. # Regression test for https://github.com/NixOS/nix/issues/8284 clearProfiles -nix profile add $(nix build $flake1Dir --no-link --print-out-paths) +# shellcheck disable=SC2046 +nix profile add $(nix build "$flake1Dir" --no-link --print-out-paths) expect 1 nix profile add --impure --expr "(builtins.getFlake ''$flake2Dir'').packages.$system.default" # Test upgrading from profile version 2. clearProfiles -mkdir -p $TEST_ROOT/import-profile -outPath=$(nix build --no-link --print-out-paths $flake1Dir/flake.nix^out) -printf '{ "version": 2, "elements": [ { "active": true, "attrPath": "legacyPackages.x86_64-linux.hello", "originalUrl": "flake:nixpkgs", "outputs": null, "priority": 5, "storePaths": [ "%s" ], "url": "github:NixOS/nixpkgs/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } ] }' "$outPath" > $TEST_ROOT/import-profile/manifest.json -nix build --profile $TEST_HOME/.nix-profile $(nix store add-path $TEST_ROOT/import-profile) --no-link +mkdir -p "$TEST_ROOT"/import-profile +outPath=$(nix build --no-link --print-out-paths "$flake1Dir"/flake.nix^out) +printf '{ "version": 2, "elements": [ { "active": true, "attrPath": "legacyPackages.x86_64-linux.hello", "originalUrl": "flake:nixpkgs", "outputs": null, "priority": 5, "storePaths": [ "%s" ], "url": "github:NixOS/nixpkgs/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } ] }' "$outPath" > "$TEST_ROOT"/import-profile/manifest.json +nix build --profile "$TEST_HOME"/.nix-profile "$(nix store add-path "$TEST_ROOT"/import-profile)" --no-link nix profile list | grep -A4 'Name:.*hello' | grep "Store paths:.*$outPath" nix profile remove hello 2>&1 | grep 'removed 1 packages, kept 0 packages' From fe4e476d1339cf30aa910a954c8d0d05cd4c1c2c Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:02:36 -0700 Subject: [PATCH 042/373] shellcheck fix: tests/functional/nix-shell.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-shell.sh | 147 +++++++++++++++++++--------------- 2 files changed, 82 insertions(+), 66 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5c373cdb9..24eedaa9b 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-shell\.sh$'' ''^tests/functional/nix_path\.sh$'' ''^tests/functional/optimise-store\.sh$'' ''^tests/functional/output-normalization\.sh$'' diff --git a/tests/functional/nix-shell.sh b/tests/functional/nix-shell.sh index bc49333b5..cf650e2c3 100755 --- a/tests/functional/nix-shell.sh +++ b/tests/functional/nix-shell.sh @@ -16,16 +16,19 @@ export NIX_PATH=nixpkgs="$shellDotNix" export IMPURE_VAR=foo export SELECTED_IMPURE_VAR=baz +# shellcheck disable=SC2016 output=$(nix-shell --pure "$shellDotNix" -A shellDrv --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') [ "$output" = " - foo - bar - true" ] +# shellcheck disable=SC2016 output=$(nix-shell --pure "$shellDotNix" -A shellDrv --option nix-shell-always-looks-for-shell-nix false --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') [ "$output" = " - foo - bar - true" ] # Test --keep +# shellcheck disable=SC2016 output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR "$shellDotNix" -A shellDrv --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $SELECTED_IMPURE_VAR"') @@ -34,6 +37,7 @@ output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR "$shellDotNix" -A shellDrv # test NIX_BUILD_TOP testTmpDir=$(pwd)/nix-shell mkdir -p "$testTmpDir" +# shellcheck disable=SC2016 output=$(TMPDIR="$testTmpDir" nix-shell --pure "$shellDotNix" -A shellDrv --run 'echo $NIX_BUILD_TOP') [[ "$output" =~ ${testTmpDir}.* ]] || { echo "expected $output =~ ${testTmpDir}.*" >&2 @@ -41,105 +45,111 @@ output=$(TMPDIR="$testTmpDir" nix-shell --pure "$shellDotNix" -A shellDrv --run } # Test nix-shell on a .drv -[[ $(nix-shell --pure $(nix-instantiate "$shellDotNix" -A shellDrv) --run \ +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$(nix-instantiate "$shellDotNix" -A shellDrv)" --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') = " - foo - bar - false" ]] - -[[ $(nix-shell --pure $(nix-instantiate "$shellDotNix" -A shellDrv) --run \ +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$(nix-instantiate "$shellDotNix" -A shellDrv)" --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') = " - foo - bar - false" ]] # Test nix-shell on a .drv symlink # Legacy: absolute path and .drv extension required -nix-instantiate "$shellDotNix" -A shellDrv --add-root $TEST_ROOT/shell.drv -[[ $(nix-shell --pure $TEST_ROOT/shell.drv --run \ +nix-instantiate "$shellDotNix" -A shellDrv --add-root "$TEST_ROOT"/shell.drv +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$TEST_ROOT"/shell.drv --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] # New behaviour: just needs to resolve to a derivation in the store -nix-instantiate "$shellDotNix" -A shellDrv --add-root $TEST_ROOT/shell -[[ $(nix-shell --pure $TEST_ROOT/shell --run \ +nix-instantiate "$shellDotNix" -A shellDrv --add-root "$TEST_ROOT"/shell +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$TEST_ROOT"/shell --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] # Test nix-shell -p +# shellcheck disable=SC2016 output=$(NIX_PATH=nixpkgs="$shellDotNix" nix-shell --pure -p foo bar --run 'echo "$(foo) $(bar)"') [ "$output" = "foo bar" ] # Test nix-shell -p --arg x y +# shellcheck disable=SC2016 output=$(NIX_PATH=nixpkgs="$shellDotNix" nix-shell --pure -p foo --argstr fooContents baz --run 'echo "$(foo)"') [ "$output" = "baz" ] # Test nix-shell shebang mode -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/shell.shebang.sh -chmod a+rx $TEST_ROOT/shell.shebang.sh +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > "$TEST_ROOT"/shell.shebang.sh +chmod a+rx "$TEST_ROOT"/shell.shebang.sh -output=$($TEST_ROOT/shell.shebang.sh abc def) +output=$("$TEST_ROOT"/shell.shebang.sh abc def) [ "$output" = "foo bar abc def" ] # Test nix-shell shebang mode with an alternate working directory -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.expr > $TEST_ROOT/shell.shebang.expr -chmod a+rx $TEST_ROOT/shell.shebang.expr +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.expr > "$TEST_ROOT"/shell.shebang.expr +chmod a+rx "$TEST_ROOT"/shell.shebang.expr # Should fail due to expressions using relative path -! $TEST_ROOT/shell.shebang.expr bar -cp shell.nix "${config_nix}" $TEST_ROOT + "$TEST_ROOT"/shell.shebang.expr bar && exit 1 +cp shell.nix "${config_nix}" "$TEST_ROOT" # Should succeed echo "cwd: $PWD" -output=$($TEST_ROOT/shell.shebang.expr bar) +output=$("$TEST_ROOT"/shell.shebang.expr bar) [ "$output" = foo ] # Test nix-shell shebang mode with an alternate working directory -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.legacy.expr > $TEST_ROOT/shell.shebang.legacy.expr -chmod a+rx $TEST_ROOT/shell.shebang.legacy.expr +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.legacy.expr > "$TEST_ROOT"/shell.shebang.legacy.expr +chmod a+rx "$TEST_ROOT"/shell.shebang.legacy.expr # Should fail due to expressions using relative path mkdir -p "$TEST_ROOT/somewhere-unrelated" -output="$(cd "$TEST_ROOT/somewhere-unrelated"; $TEST_ROOT/shell.shebang.legacy.expr bar;)" +output="$(cd "$TEST_ROOT/somewhere-unrelated"; "$TEST_ROOT"/shell.shebang.legacy.expr bar;)" [[ $(realpath "$output") = $(realpath "$TEST_ROOT/somewhere-unrelated") ]] # Test nix-shell shebang mode again with metacharacters in the filename. # First word of filename is chosen to not match any file in the test root. -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh -chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.sh +chmod a+rx "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.sh -output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.sh abc def) +output=$("$TEST_ROOT"/spaced\ \\\'\"shell.shebang.sh abc def) [ "$output" = "foo bar abc def" ] # Test nix-shell shebang mode for ruby # This uses a fake interpreter that returns the arguments passed # This, in turn, verifies the `rc` script is valid and the `load()` script (given using `-e`) is as expected. -sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/shell.shebang.rb -chmod a+rx $TEST_ROOT/shell.shebang.rb +sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > "$TEST_ROOT"/shell.shebang.rb +chmod a+rx "$TEST_ROOT"/shell.shebang.rb -output=$($TEST_ROOT/shell.shebang.rb abc ruby) +output=$("$TEST_ROOT"/shell.shebang.rb abc ruby) [ "$output" = '-e load(ARGV.shift) -- '"$TEST_ROOT"'/shell.shebang.rb abc ruby' ] # Test nix-shell shebang mode for ruby again with metacharacters in the filename. # Note: fake interpreter only space-separates args without adding escapes to its output. -sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb -chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb +sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.rb +chmod a+rx "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.rb -output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.rb abc ruby) +output=$("$TEST_ROOT"/spaced\ \\\'\"shell.shebang.rb abc ruby) +# shellcheck disable=SC1003 [ "$output" = '-e load(ARGV.shift) -- '"$TEST_ROOT"'/spaced \'\''"shell.shebang.rb abc ruby' ] # Test nix-shell shebang quoting -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.nix > $TEST_ROOT/shell.shebang.nix -chmod a+rx $TEST_ROOT/shell.shebang.nix -$TEST_ROOT/shell.shebang.nix +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.nix > "$TEST_ROOT"/shell.shebang.nix +chmod a+rx "$TEST_ROOT"/shell.shebang.nix +"$TEST_ROOT"/shell.shebang.nix -mkdir $TEST_ROOT/lookup-test $TEST_ROOT/empty +mkdir "$TEST_ROOT"/lookup-test "$TEST_ROOT"/empty -echo "import $shellDotNix" > $TEST_ROOT/lookup-test/shell.nix -cp "${config_nix}" $TEST_ROOT/lookup-test/ -echo 'abort "do not load default.nix!"' > $TEST_ROOT/lookup-test/default.nix +echo "import $shellDotNix" > "$TEST_ROOT"/lookup-test/shell.nix +cp "${config_nix}" "$TEST_ROOT"/lookup-test/ +echo 'abort "do not load default.nix!"' > "$TEST_ROOT"/lookup-test/default.nix -nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it works"' | grepQuiet "it works" +nix-shell "$TEST_ROOT"/lookup-test -A shellDrv --run 'echo "it works"' | grepQuiet "it works" # https://github.com/NixOS/nix/issues/4529 nix-shell -I "testRoot=$TEST_ROOT" '' -A shellDrv --run 'echo "it works"' | grepQuiet "it works" -expectStderr 1 nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ +expectStderr 1 nix-shell "$TEST_ROOT"/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ | grepQuiet -F "do not load default.nix!" # we did, because we chose to enable legacy behavior -expectStderr 1 nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ +expectStderr 1 nix-shell "$TEST_ROOT"/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ | grepQuiet "Skipping .*lookup-test/shell\.nix.*, because the setting .*nix-shell-always-looks-for-shell-nix.* is disabled. This is a deprecated behavior\. Consider enabling .*nix-shell-always-looks-for-shell-nix.*" ( - cd $TEST_ROOT/empty; + cd "$TEST_ROOT"/empty; expectStderr 1 nix-shell | \ grepQuiet "error.*no argument specified and no .*shell\.nix.* or .*default\.nix.* file found in the working directory" ) @@ -147,29 +157,29 @@ expectStderr 1 nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it work expectStderr 1 nix-shell -I "testRoot=$TEST_ROOT" '' | grepQuiet "error.*neither .*shell\.nix.* nor .*default\.nix.* found in .*/empty" -cat >$TEST_ROOT/lookup-test/shebangscript <"$TEST_ROOT"/lookup-test/shebangscript < $TEST_ROOT/marco/shell.nix -cat >$TEST_ROOT/marco/polo/default.nix < "$TEST_ROOT"/marco/shell.nix +cat >"$TEST_ROOT"/marco/polo/default.nix <$TEST_ROOT/issue-11892/shebangscript <"$TEST_ROOT"/issue-11892/shebangscript <$TEST_ROOT/issue-11892/shebangscript <$TEST_ROOT/issue-11892/my_package.nix <"$TEST_ROOT"/issue-11892/my_package.nix < $TEST_ROOT/dev-env.sh -nix print-dev-env -f "$shellDotNix" shellDrv --json > $TEST_ROOT/dev-env.json +nix print-dev-env -f "$shellDotNix" shellDrv > "$TEST_ROOT"/dev-env.sh +nix print-dev-env -f "$shellDotNix" shellDrv --json > "$TEST_ROOT"/dev-env.json # Test with raw drv shellDrv=$(nix-instantiate "$shellDotNix" -A shellDrv.out) -nix develop $shellDrv -c bash -c '[[ -n $stdenv ]]' +# shellcheck disable=SC2016 +nix develop "$shellDrv" -c bash -c '[[ -n $stdenv ]]' -nix print-dev-env $shellDrv > $TEST_ROOT/dev-env2.sh -nix print-dev-env $shellDrv --json > $TEST_ROOT/dev-env2.json +nix print-dev-env "$shellDrv" > "$TEST_ROOT"/dev-env2.sh +nix print-dev-env "$shellDrv" --json > "$TEST_ROOT"/dev-env2.json -diff $TEST_ROOT/dev-env{,2}.sh -diff $TEST_ROOT/dev-env{,2}.json +diff "$TEST_ROOT"/dev-env{,2}.sh +diff "$TEST_ROOT"/dev-env{,2}.json # Ensure `nix print-dev-env --json` contains variable assignments. -[[ $(jq -r .variables.arr1.value[2] $TEST_ROOT/dev-env.json) = '3 4' ]] +[[ $(jq -r .variables.arr1.value[2] "$TEST_ROOT"/dev-env.json) = '3 4' ]] # Run tests involving `source <(nix print-dev-env)` in subshells to avoid modifying the current # environment. @@ -238,27 +250,32 @@ set -u # Ensure `source <(nix print-dev-env)` modifies the environment. ( path=$PATH - source $TEST_ROOT/dev-env.sh + # shellcheck disable=SC1091 + source "$TEST_ROOT"/dev-env.sh [[ -n $stdenv ]] + # shellcheck disable=SC2154 [[ ${arr1[2]} = "3 4" ]] + # shellcheck disable=SC2154 [[ ${arr2[1]} = $'\n' ]] [[ ${arr2[2]} = $'x\ny' ]] [[ $(fun) = blabla ]] - [[ $PATH = $(jq -r .variables.PATH.value $TEST_ROOT/dev-env.json):$path ]] + [[ $PATH = $(jq -r .variables.PATH.value "$TEST_ROOT"/dev-env.json):$path ]] ) # Ensure `source <(nix print-dev-env)` handles the case when PATH is empty. ( path=$PATH + # shellcheck disable=SC2123 PATH= - source $TEST_ROOT/dev-env.sh - [[ $PATH = $(PATH=$path jq -r .variables.PATH.value $TEST_ROOT/dev-env.json) ]] + # shellcheck disable=SC1091 + source "$TEST_ROOT"/dev-env.sh + [[ $PATH = $(PATH=$path jq -r .variables.PATH.value "$TEST_ROOT"/dev-env.json) ]] ) # Test nix-shell with ellipsis and no `inNixShell` argument (for backwards compat with old nixpkgs) -cat >$TEST_ROOT/shell-ellipsis.nix <"$TEST_ROOT"/shell-ellipsis.nix < Date: Mon, 29 Sep 2025 10:03:10 -0700 Subject: [PATCH 043/373] shellcheck fix: tests/functional/nix_path.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix_path.sh | 35 ++++++++++++++++++----------------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 24eedaa9b..f783f0261 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix_path\.sh$'' ''^tests/functional/optimise-store\.sh$'' ''^tests/functional/output-normalization\.sh$'' ''^tests/functional/parallel\.builder\.sh$'' diff --git a/tests/functional/nix_path.sh b/tests/functional/nix_path.sh index 90cba1f0c..24ddcdd01 100755 --- a/tests/functional/nix_path.sh +++ b/tests/functional/nix_path.sh @@ -34,12 +34,13 @@ nix-instantiate --eval -E '' --restrict-eval unset NIX_PATH -mkdir -p $TEST_ROOT/{from-nix-path-file,from-NIX_PATH,from-nix-path,from-extra-nix-path,from-I} +mkdir -p "$TEST_ROOT"/{from-nix-path-file,from-NIX_PATH,from-nix-path,from-extra-nix-path,from-I} for i in from-nix-path-file from-NIX_PATH from-nix-path from-extra-nix-path from-I; do - touch $TEST_ROOT/$i/only-$i.nix + touch "$TEST_ROOT"/$i/only-$i.nix done # finding something that's not in any of the default paths fails +# shellcheck disable=SC2091 ( ! $(nix-instantiate --find-file test) ) echo "nix-path = test=$TEST_ROOT/from-nix-path-file" >> "$test_nix_conf" @@ -53,36 +54,36 @@ echo "nix-path = test=$TEST_ROOT/from-nix-path-file" >> "$test_nix_conf" (! NIX_PATH=test=$TEST_ROOT nix-instantiate --find-file test/only-from-nix-path-file.nix) # -I extends nix.conf -[[ $(nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] +[[ $(nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] # if -I does not have the desired entry, the value from nix.conf is used -[[ $(nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-nix-path-file.nix) = $TEST_ROOT/from-nix-path-file/only-from-nix-path-file.nix ]] +[[ $(nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-nix-path-file.nix) = $TEST_ROOT/from-nix-path-file/only-from-nix-path-file.nix ]] # -I extends NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] # -I takes precedence over NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test=$TEST_ROOT/from-I --find-file test) = $TEST_ROOT/from-I ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test) = $TEST_ROOT/from-I ]] # if -I does not have the desired entry, the value from NIX_PATH is used -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] # --extra-nix-path extends NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] # if --extra-nix-path does not have the desired entry, the value from NIX_PATH is used -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] # --nix-path overrides NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] # if --nix-path does not have the desired entry, it fails -(! NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test/only-from-NIX_PATH.nix) +(! NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test/only-from-NIX_PATH.nix) # --nix-path overrides nix.conf -[[ $(nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] -(! nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test/only-from-nix-path-file.nix) +[[ $(nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] +(! nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test/only-from-nix-path-file.nix) # --extra-nix-path extends nix.conf -[[ $(nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] +[[ $(nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] # if --extra-nix-path does not have the desired entry, it is taken from nix.conf -[[ $(nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test) = $TEST_ROOT/from-nix-path-file ]] +[[ $(nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test) = $TEST_ROOT/from-nix-path-file ]] # -I extends --nix-path -[[ $(nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path -I test=$TEST_ROOT/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] -[[ $(nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path -I test=$TEST_ROOT/from-I --find-file test/only-from-nix-path.nix) = $TEST_ROOT/from-nix-path/only-from-nix-path.nix ]] +[[ $(nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path -I test="$TEST_ROOT"/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] +[[ $(nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path -I test="$TEST_ROOT"/from-I --find-file test/only-from-nix-path.nix) = $TEST_ROOT/from-nix-path/only-from-nix-path.nix ]] From 32818483a52750cac727e2f5b53ae16f46fc14d2 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:03:50 -0700 Subject: [PATCH 044/373] shellcheck fix: tests/functional/optimise-store.sh --- maintainers/flake-module.nix | 1 - tests/functional/optimise-store.sh | 17 ++++++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index f783f0261..5b743e61d 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/optimise-store\.sh$'' ''^tests/functional/output-normalization\.sh$'' ''^tests/functional/parallel\.builder\.sh$'' ''^tests/functional/parallel\.sh$'' diff --git a/tests/functional/optimise-store.sh b/tests/functional/optimise-store.sh index 05c4c41e4..332a308c2 100755 --- a/tests/functional/optimise-store.sh +++ b/tests/functional/optimise-store.sh @@ -4,28 +4,31 @@ source common.sh clearStoreIfPossible +# shellcheck disable=SC2016 outPath1=$(echo 'with import '"${config_nix}"'; mkDerivation { name = "foo1"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --auto-optimise-store) +# shellcheck disable=SC2016 outPath2=$(echo 'with import '"${config_nix}"'; mkDerivation { name = "foo2"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --auto-optimise-store) TODO_NixOS # ignoring the client-specified setting 'auto-optimise-store', because it is a restricted setting and you are not a trusted user # TODO: only continue when trusted user or root -inode1="$(stat --format=%i $outPath1/foo)" -inode2="$(stat --format=%i $outPath2/foo)" +inode1="$(stat --format=%i "$outPath1"/foo)" +inode2="$(stat --format=%i "$outPath2"/foo)" if [ "$inode1" != "$inode2" ]; then echo "inodes do not match" exit 1 fi -nlink="$(stat --format=%h $outPath1/foo)" +nlink="$(stat --format=%h "$outPath1"/foo)" if [ "$nlink" != 3 ]; then echo "link count incorrect" exit 1 fi +# shellcheck disable=SC2016 outPath3=$(echo 'with import '"${config_nix}"'; mkDerivation { name = "foo3"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link) -inode3="$(stat --format=%i $outPath3/foo)" +inode3="$(stat --format=%i "$outPath3"/foo)" if [ "$inode1" = "$inode3" ]; then echo "inodes match unexpectedly" exit 1 @@ -34,8 +37,8 @@ fi # XXX: This should work through the daemon too NIX_REMOTE="" nix-store --optimise -inode1="$(stat --format=%i $outPath1/foo)" -inode3="$(stat --format=%i $outPath3/foo)" +inode1="$(stat --format=%i "$outPath1"/foo)" +inode3="$(stat --format=%i "$outPath3"/foo)" if [ "$inode1" != "$inode3" ]; then echo "inodes do not match" exit 1 @@ -43,7 +46,7 @@ fi nix-store --gc -if [ -n "$(ls $NIX_STORE_DIR/.links)" ]; then +if [ -n "$(ls "$NIX_STORE_DIR"/.links)" ]; then echo ".links directory not empty after GC" exit 1 fi From c09cf33a3ac25291a4e4c095ee3e898f57187445 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:04:14 -0700 Subject: [PATCH 045/373] shellcheck fix: tests/functional/output-normalization.sh --- maintainers/flake-module.nix | 1 - tests/functional/output-normalization.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5b743e61d..db232f179 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/output-normalization\.sh$'' ''^tests/functional/parallel\.builder\.sh$'' ''^tests/functional/parallel\.sh$'' ''^tests/functional/pass-as-file\.sh$'' diff --git a/tests/functional/output-normalization.sh b/tests/functional/output-normalization.sh index c55f1b1d1..bd1668db9 100755 --- a/tests/functional/output-normalization.sh +++ b/tests/functional/output-normalization.sh @@ -6,7 +6,7 @@ testNormalization () { TODO_NixOS clearStore outPath=$(nix-build ./simple.nix --no-out-link) - test "$(stat -c %Y $outPath)" -eq 1 + test "$(stat -c %Y "$outPath")" -eq 1 } testNormalization From 4dc5dbaba270e6122b94986f4dc82d028e448c1f Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:05:33 -0700 Subject: [PATCH 046/373] shellcheck fix: tests/functional/parallel.builder.sh --- maintainers/flake-module.nix | 1 - tests/functional/parallel.builder.sh | 30 +++++++++++++++------------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index db232f179..59adb8fdb 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/parallel\.builder\.sh$'' ''^tests/functional/parallel\.sh$'' ''^tests/functional/pass-as-file\.sh$'' ''^tests/functional/path-from-hash-part\.sh$'' diff --git a/tests/functional/parallel.builder.sh b/tests/functional/parallel.builder.sh index d092bc5a6..436246571 100644 --- a/tests/functional/parallel.builder.sh +++ b/tests/functional/parallel.builder.sh @@ -1,29 +1,31 @@ +# shellcheck shell=bash +# shellcheck disable=SC2154 echo "DOING $text" # increase counter -while ! ln -s x $shared.lock 2> /dev/null; do +while ! ln -s x "$shared".lock 2> /dev/null; do sleep 1 done -test -f $shared.cur || echo 0 > $shared.cur -test -f $shared.max || echo 0 > $shared.max -new=$(($(cat $shared.cur) + 1)) -if test $new -gt $(cat $shared.max); then - echo $new > $shared.max +test -f "$shared".cur || echo 0 > "$shared".cur +test -f "$shared".max || echo 0 > "$shared".max +new=$(($(cat "$shared".cur) + 1)) +if test $new -gt "$(cat "$shared".max)"; then + echo $new > "$shared".max fi -echo $new > $shared.cur -rm $shared.lock +echo $new > "$shared".cur +rm "$shared".lock -echo -n $(cat $inputs)$text > $out +echo -n "$(cat "$inputs")""$text" > "$out" -sleep $sleepTime +sleep "$sleepTime" # decrease counter -while ! ln -s x $shared.lock 2> /dev/null; do +while ! ln -s x "$shared".lock 2> /dev/null; do sleep 1 done -test -f $shared.cur || echo 0 > $shared.cur -echo $(($(cat $shared.cur) - 1)) > $shared.cur -rm $shared.lock +test -f "$shared".cur || echo 0 > "$shared".cur +echo $(($(cat "$shared".cur) - 1)) > "$shared".cur +rm "$shared".lock From ef17baf50d262c40a0761b39f1da6d24e0add375 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:05:59 -0700 Subject: [PATCH 047/373] shellcheck fix: tests/functional/parallel.sh --- maintainers/flake-module.nix | 1 - tests/functional/parallel.sh | 13 +++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 59adb8fdb..0a15c2362 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/parallel\.sh$'' ''^tests/functional/pass-as-file\.sh$'' ''^tests/functional/path-from-hash-part\.sh$'' ''^tests/functional/path-info\.sh$'' diff --git a/tests/functional/parallel.sh b/tests/functional/parallel.sh index 7e420688d..4d0bf0f1b 100644 --- a/tests/functional/parallel.sh +++ b/tests/functional/parallel.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh @@ -8,7 +9,7 @@ TODO_NixOS clearStore -rm -f $_NIX_TEST_SHARED.cur $_NIX_TEST_SHARED.max +rm -f "$_NIX_TEST_SHARED".cur "$_NIX_TEST_SHARED".max outPath=$(nix-build -j10000 parallel.nix --no-out-link) @@ -17,8 +18,8 @@ echo "output path is $outPath" text=$(cat "$outPath") if test "$text" != "abacade"; then exit 1; fi -if test "$(cat $_NIX_TEST_SHARED.cur)" != 0; then fail "wrong current process count"; fi -if test "$(cat $_NIX_TEST_SHARED.max)" != 3; then fail "not enough parallelism"; fi +if test "$(cat "$_NIX_TEST_SHARED".cur)" != 0; then fail "wrong current process count"; fi +if test "$(cat "$_NIX_TEST_SHARED".max)" != 3; then fail "not enough parallelism"; fi # Second, test that parallel invocations of nix-build perform builds @@ -27,7 +28,7 @@ echo "testing multiple nix-build -j1..." clearStore -rm -f $_NIX_TEST_SHARED.cur $_NIX_TEST_SHARED.max +rm -f "$_NIX_TEST_SHARED".cur "$_NIX_TEST_SHARED".max drvPath=$(nix-instantiate parallel.nix --argstr sleepTime 15) @@ -54,5 +55,5 @@ wait $pid2 || fail "instance 2 failed: $?" wait $pid3 || fail "instance 3 failed: $?" wait $pid4 || fail "instance 4 failed: $?" -if test "$(cat $_NIX_TEST_SHARED.cur)" != 0; then fail "wrong current process count"; fi -if test "$(cat $_NIX_TEST_SHARED.max)" != 3; then fail "not enough parallelism"; fi +if test "$(cat "$_NIX_TEST_SHARED".cur)" != 0; then fail "wrong current process count"; fi +if test "$(cat "$_NIX_TEST_SHARED".max)" != 3; then fail "not enough parallelism"; fi From 1830f5f967c1726d07104fb9b65e8ae84aac287c Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 29 Sep 2025 23:16:28 +0300 Subject: [PATCH 048/373] libutil: Create empty directory at the root for makeEmptySourceAccessor This is my SNAFU. Accidentally broken in 02c9ac445ff527a7b4c5105d20d9ab401117dcee. There's very dubious behavior for 'builtins.readDir /.': { outputs = { ... }: { lib.a = builtins.readDir /.; }; } nix eval /tmp/test-flake#lib.a Starting from 2.27 this now returns an empty set. This really isn't supposed to happen, but this change in the semantics of makeEmptySourceAccessor accidentally changed the behavior of this. --- src/libutil/memory-source-accessor.cc | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/libutil/memory-source-accessor.cc b/src/libutil/memory-source-accessor.cc index caff5b56a..a9ffb7746 100644 --- a/src/libutil/memory-source-accessor.cc +++ b/src/libutil/memory-source-accessor.cc @@ -208,11 +208,16 @@ void MemorySink::createSymlink(const CanonPath & path, const std::string & targe ref makeEmptySourceAccessor() { - static auto empty = make_ref().cast(); - /* Don't forget to clear the display prefix, as the default constructed - SourceAccessor has the «unknown» prefix. Since this accessor is supposed - to mimic an empty root directory the prefix needs to be empty. */ - empty->setPathDisplay(""); + static auto empty = []() { + auto empty = make_ref(); + MemorySink sink{*empty}; + sink.createDirectory(CanonPath::root); + /* Don't forget to clear the display prefix, as the default constructed + SourceAccessor has the «unknown» prefix. Since this accessor is supposed + to mimic an empty root directory the prefix needs to be empty. */ + empty->setPathDisplay(""); + return empty.cast(); + }(); return empty; } From f70b0b599c75e05c42c2be4f85167fd8f4805e0e Mon Sep 17 00:00:00 2001 From: Taeer Bar-Yam Date: Sun, 28 Sep 2025 11:02:54 -0400 Subject: [PATCH 049/373] libexpr: allocate ExprPath strings in the allocator --- src/libexpr/include/nix/expr/nixexpr.hh | 10 ++++++---- src/libexpr/nixexpr.cc | 2 +- src/libexpr/parser.y | 6 +++--- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index 747a8e4b2..2af6039cd 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -212,14 +212,16 @@ struct ExprString : Expr struct ExprPath : Expr { ref accessor; - std::string s; Value v; - ExprPath(ref accessor, std::string s) + ExprPath(std::pmr::polymorphic_allocator & alloc, ref accessor, std::string_view sv) : accessor(accessor) - , s(std::move(s)) { - v.mkPath(&*accessor, this->s.c_str()); + auto len = sv.length(); + char * s = alloc.allocate(len + 1); + sv.copy(s, len); + s[len] = '\0'; + v.mkPath(&*accessor, s); } Value * maybeThunk(EvalState & state, Env & env) override; diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index a2980af6b..014b85f20 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -45,7 +45,7 @@ void ExprString::show(const SymbolTable & symbols, std::ostream & str) const void ExprPath::show(const SymbolTable & symbols, std::ostream & str) const { - str << s; + str << v.pathStr(); } void ExprVar::show(const SymbolTable & symbols, std::ostream & str) const diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 7dabd6b56..bc1eb056e 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -392,8 +392,8 @@ path_start root filesystem accessor, rather than the accessor of the current Nix expression. */ literal.front() == '/' - ? new ExprPath(state->rootFS, std::move(path)) - : new ExprPath(state->basePath.accessor, std::move(path)); + ? new ExprPath(state->alloc, state->rootFS, path) + : new ExprPath(state->alloc, state->basePath.accessor, path); } | HPATH { if (state->settings.pureEval) { @@ -403,7 +403,7 @@ path_start ); } Path path(getHome() + std::string($1.p + 1, $1.l - 1)); - $$ = new ExprPath(ref(state->rootFS), std::move(path)); + $$ = new ExprPath(state->alloc, ref(state->rootFS), path); } ; From 689fa81dc9fb3a8368a4f1b7b8d18f5b1ce8526b Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Mon, 29 Sep 2025 21:31:46 +0000 Subject: [PATCH 050/373] feat(libstore/http-binary-cache-store): narinfo/ls/log compression --- src/libstore/http-binary-cache-store.cc | 22 ++++++++++++++++++- .../include/nix/store/binary-cache-store.hh | 15 +++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 6922c0f69..5d4fba163 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -4,6 +4,7 @@ #include "nix/store/nar-info-disk-cache.hh" #include "nix/util/callback.hh" #include "nix/store/store-registration.hh" +#include "nix/util/compression.hh" namespace nix { @@ -142,8 +143,27 @@ protected: const std::string & mimeType) override { auto req = makeRequest(path); - req.data = StreamToSourceAdapter(istream).drain(); + + auto data = StreamToSourceAdapter(istream).drain(); + + // Determine compression method based on file type + std::string compressionMethod; + if (hasSuffix(path, ".narinfo")) + compressionMethod = config->narinfoCompression; + else if (hasSuffix(path, ".ls")) + compressionMethod = config->lsCompression; + else if (hasPrefix(path, "log/")) + compressionMethod = config->logCompression; + + // Apply compression if configured + if (!compressionMethod.empty()) { + data = compress(compressionMethod, data); + req.headers.emplace_back("Content-Encoding", compressionMethod); + } + + req.data = std::move(data); req.mimeType = mimeType; + try { getFileTransfer()->upload(req); } catch (FileTransferError & e) { diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index c316b1199..3a2c90022 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -59,6 +59,21 @@ struct BinaryCacheStoreConfig : virtual StoreConfig The meaning and accepted values depend on the compression method selected. `-1` specifies that the default compression level should be used. )"}; + + const Setting narinfoCompression{ + this, "", "narinfo-compression", "Compression method for `.narinfo` files."}; + + const Setting lsCompression{this, "", "ls-compression", "Compression method for `.ls` files."}; + + const Setting logCompression{ + this, + "", + "log-compression", + R"( + Compression method for `log/*` files. It is recommended to + use a compression method supported by most web browsers + (e.g. `brotli`). + )"}; }; /** From d5402b8527a87a887b516d5cdf630acb54ecbcb5 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 29 Sep 2025 16:35:59 -0400 Subject: [PATCH 051/373] Encapsulate `curlFileTransfer::State:quit` It is allowed to read it, and to set it to `false`, but not to set it to `true`. --- src/libstore/filetransfer.cc | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index a162df1ad..72153dfdd 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -594,10 +594,21 @@ struct curlFileTransfer : public FileTransfer } }; - bool quit = false; std:: priority_queue, std::vector>, EmbargoComparator> incoming; + private: + bool quitting = false; + public: + void quit() + { + quitting = true; + } + + bool isQuitting() + { + return quitting; + } }; Sync state_; @@ -649,7 +660,7 @@ struct curlFileTransfer : public FileTransfer /* Signal the worker thread to exit. */ { auto state(state_.lock()); - state->quit = true; + state->quit(); } #ifndef _WIN32 // TODO need graceful async exit support on Windows? writeFull(wakeupPipe.writeSide.get(), " ", false); @@ -750,7 +761,7 @@ struct curlFileTransfer : public FileTransfer break; } } - quit = state->quit; + quit = state->isQuitting(); } for (auto & item : incoming) { @@ -778,7 +789,7 @@ struct curlFileTransfer : public FileTransfer auto state(state_.lock()); while (!state->incoming.empty()) state->incoming.pop(); - state->quit = true; + state->quit(); } } @@ -789,7 +800,7 @@ struct curlFileTransfer : public FileTransfer { auto state(state_.lock()); - if (state->quit) + if (state->isQuitting()) throw nix::Error("cannot enqueue download request because the download thread is shutting down"); state->incoming.push(item); } @@ -845,7 +856,7 @@ ref getFileTransfer() { static ref fileTransfer = makeCurlFileTransfer(); - if (fileTransfer->state_.lock()->quit) + if (fileTransfer->state_.lock()->isQuitting()) fileTransfer = makeCurlFileTransfer(); return fileTransfer; From 1f65b08d947d9ab7eb397eebe49609963e003641 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 29 Sep 2025 16:37:12 -0400 Subject: [PATCH 052/373] `curlFileTransfer::State:quit` emptys the queue Whoever first calls `quit` now empties the queue, instead of waiting for the worker thread to do it. (Note that in the unwinding case, the worker thread is still the first to call `quit`, though.) --- src/libstore/filetransfer.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 72153dfdd..f8f5b48e0 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -603,6 +603,9 @@ struct curlFileTransfer : public FileTransfer void quit() { quitting = true; + /* We wil not be processing any more incomming requests */ + while (!incoming.empty()) + incoming.pop(); } bool isQuitting() @@ -787,8 +790,6 @@ struct curlFileTransfer : public FileTransfer { auto state(state_.lock()); - while (!state->incoming.empty()) - state->incoming.pop(); state->quit(); } } From 86fb5b24a9cb528d87cb02efb89483353a4b6c44 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 29 Sep 2025 16:43:45 -0400 Subject: [PATCH 053/373] `curlFileTransfer::workerThreadEntry` Only call `quit` if we need to. --- src/libstore/filetransfer.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index f8f5b48e0..59fc75ed0 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -781,14 +781,18 @@ struct curlFileTransfer : public FileTransfer void workerThreadEntry() { + // Unwinding or because someone called `quit`. + bool normalExit = true; try { workerThreadMain(); } catch (nix::Interrupted & e) { + normalExit = false; } catch (std::exception & e) { printError("unexpected error in download thread: %s", e.what()); + normalExit = false; } - { + if (!normalExit) { auto state(state_.lock()); state->quit(); } From a8670e8a7da337e230ecd31bc81a040af208f9d0 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Tue, 30 Sep 2025 03:16:35 +0300 Subject: [PATCH 054/373] libexpr-tests: Add unit tests for broken readDir /. for pure eval A very unfortunate interaction of current filtering with pure eval is that the following actually leads to `lib.a = {}`. This just adds a unit test for this broken behavior. This is really good to be done as a unit test via the in-memory store. { outputs = { ... }: { lib.a = builtins.readDir /.; }; } --- .../include/nix/expr/tests/libexpr.hh | 13 ++++++- src/libexpr-tests/eval.cc | 38 +++++++++++++++++++ .../include/nix/store/tests/libstore.hh | 13 +++---- 3 files changed, 55 insertions(+), 9 deletions(-) diff --git a/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh b/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh index 4cf985e15..a1320e14a 100644 --- a/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh +++ b/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh @@ -26,11 +26,20 @@ public: } protected: - LibExprTest() + LibExprTest(ref store, auto && makeEvalSettings) : LibStoreTest() + , evalSettings(makeEvalSettings(readOnlyMode)) , state({}, store, fetchSettings, evalSettings, nullptr) { - evalSettings.nixPath = {}; + } + + LibExprTest() + : LibExprTest(openStore("dummy://"), [](bool & readOnlyMode) { + EvalSettings settings{readOnlyMode}; + settings.nixPath = {}; + return settings; + }) + { } Value eval(std::string input, bool forceValue = true) diff --git a/src/libexpr-tests/eval.cc b/src/libexpr-tests/eval.cc index ad70ea5b8..7562a9da2 100644 --- a/src/libexpr-tests/eval.cc +++ b/src/libexpr-tests/eval.cc @@ -3,6 +3,7 @@ #include "nix/expr/eval.hh" #include "nix/expr/tests/libexpr.hh" +#include "nix/util/memory-source-accessor.hh" namespace nix { @@ -174,4 +175,41 @@ TEST_F(EvalStateTest, getBuiltin_fail) ASSERT_THROW(state.getBuiltin("nonexistent"), EvalError); } +class PureEvalTest : public LibExprTest +{ +public: + PureEvalTest() + : LibExprTest(openStore("dummy://", {{"read-only", "false"}}), [](bool & readOnlyMode) { + EvalSettings settings{readOnlyMode}; + settings.pureEval = true; + settings.restrictEval = true; + return settings; + }) + { + } +}; + +TEST_F(PureEvalTest, pathExists) +{ + ASSERT_THAT(eval("builtins.pathExists /."), IsFalse()); + ASSERT_THAT(eval("builtins.pathExists /nix"), IsFalse()); + ASSERT_THAT(eval("builtins.pathExists /nix/store"), IsFalse()); + + { + std::string contents = "Lorem ipsum"; + + StringSource s{contents}; + auto path = state.store->addToStoreFromDump( + s, "source", FileSerialisationMethod::Flat, ContentAddressMethod::Raw::Text, HashAlgorithm::SHA256); + auto printed = store->printStorePath(path); + + ASSERT_THROW(eval(fmt("builtins.readFile %s", printed)), RestrictedPathError); + ASSERT_THAT(eval(fmt("builtins.pathExists %s", printed)), IsFalse()); + + ASSERT_THROW(eval("builtins.readDir /."), RestrictedPathError); + state.allowPath(path); // FIXME: This shouldn't behave this way. + ASSERT_THAT(eval("builtins.readDir /."), IsAttrsOfSize(0)); + } +} + } // namespace nix diff --git a/src/libstore-test-support/include/nix/store/tests/libstore.hh b/src/libstore-test-support/include/nix/store/tests/libstore.hh index 28b29fa31..d79b55312 100644 --- a/src/libstore-test-support/include/nix/store/tests/libstore.hh +++ b/src/libstore-test-support/include/nix/store/tests/libstore.hh @@ -19,14 +19,13 @@ public: } protected: + LibStoreTest(ref store) + : store(std::move(store)) + { + } + LibStoreTest() - : store(openStore({ - .variant = - StoreReference::Specified{ - .scheme = "dummy", - }, - .params = {}, - })) + : LibStoreTest(openStore("dummy://")) { } From 3fcd33079cc8100d44d9252307c3390b0765db69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 30 Sep 2025 10:32:33 +0200 Subject: [PATCH 055/373] add http binary cache test for compression options --- tests/nixos/content-encoding.nix | 190 ++++++++++++++++++++++++++ tests/nixos/default.nix | 2 +- tests/nixos/gzip-content-encoding.nix | 74 ---------- 3 files changed, 191 insertions(+), 75 deletions(-) create mode 100644 tests/nixos/content-encoding.nix delete mode 100644 tests/nixos/gzip-content-encoding.nix diff --git a/tests/nixos/content-encoding.nix b/tests/nixos/content-encoding.nix new file mode 100644 index 000000000..debee377b --- /dev/null +++ b/tests/nixos/content-encoding.nix @@ -0,0 +1,190 @@ +# Test content encoding support in Nix: +# 1. Fetching compressed files from servers with Content-Encoding headers +# (e.g., fetching a zstd archive from a server using gzip Content-Encoding +# should preserve the zstd format, not double-decompress) +# 2. HTTP binary cache store upload/download with compression support + +{ lib, config, ... }: + +let + pkgs = config.nodes.machine.nixpkgs.pkgs; + + ztdCompressedFile = pkgs.stdenv.mkDerivation { + name = "dummy-zstd-compressed-archive"; + dontUnpack = true; + nativeBuildInputs = with pkgs; [ zstd ]; + buildPhase = '' + mkdir archive + for _ in {1..100}; do echo "lorem" > archive/file1; done + for _ in {1..100}; do echo "ipsum" > archive/file2; done + tar --zstd -cf archive.tar.zst archive + ''; + installPhase = '' + install -Dm 644 -T archive.tar.zst $out/share/archive + ''; + }; + + # Bare derivation for testing binary cache with logs + testDrv = builtins.toFile "test.nix" '' + derivation { + name = "test-package"; + builder = "/bin/sh"; + args = [ "-c" "echo 'Building test package...' >&2; echo 'hello from test package' > $out; echo 'Build complete!' >&2" ]; + system = builtins.currentSystem; + } + ''; +in + +{ + name = "content-encoding"; + + nodes = { + machine = + { pkgs, ... }: + { + networking.firewall.allowedTCPPorts = [ 80 ]; + + services.nginx.enable = true; + services.nginx.virtualHosts."localhost" = { + root = "${ztdCompressedFile}/share/"; + # Make sure that nginx really tries to compress the + # file on the fly with no regard to size/mime. + # http://nginx.org/en/docs/http/ngx_http_gzip_module.html + extraConfig = '' + gzip on; + gzip_types *; + gzip_proxied any; + gzip_min_length 0; + ''; + + # Upload endpoint with WebDAV + locations."/cache-upload" = { + root = "/var/lib/nginx-cache"; + extraConfig = '' + client_body_temp_path /var/lib/nginx-cache/tmp; + create_full_put_path on; + dav_methods PUT DELETE; + dav_access user:rw group:rw all:r; + + # Don't try to compress already compressed files + gzip off; + + # Rewrite to remove -upload suffix when writing files + rewrite ^/cache-upload/(.*)$ /cache/$1 break; + ''; + }; + + # Download endpoint with Content-Encoding headers + locations."/cache" = { + root = "/var/lib/nginx-cache"; + extraConfig = '' + gzip off; + + # Serve .narinfo files with gzip encoding + location ~ \.narinfo$ { + add_header Content-Encoding gzip; + default_type "text/x-nix-narinfo"; + } + + # Serve .ls files with gzip encoding + location ~ \.ls$ { + add_header Content-Encoding gzip; + default_type "application/json"; + } + + # Serve log files with brotli encoding + location ~ ^/cache/log/ { + add_header Content-Encoding br; + default_type "text/plain"; + } + ''; + }; + }; + + systemd.services.nginx = { + serviceConfig = { + StateDirectory = "nginx-cache"; + StateDirectoryMode = "0755"; + }; + }; + + environment.systemPackages = with pkgs; [ + file + gzip + brotli + curl + ]; + + virtualisation.writableStore = true; + nix.settings.substituters = lib.mkForce [ ]; + nix.settings.experimental-features = [ + "nix-command" + "flakes" + ]; + }; + }; + + # Check that when nix-prefetch-url is used with a zst tarball it does not get decompressed. + # Also test HTTP binary cache store with compression support. + testScript = '' + # fmt: off + start_all() + + machine.wait_for_unit("nginx.service") + + # Original test: zstd archive with gzip content-encoding + # Make sure that the file is properly compressed as the test would be meaningless otherwise + curl_output = machine.succeed("curl --compressed -v http://localhost/archive 2>&1") + assert "content-encoding: gzip" in curl_output.lower(), f"Expected 'content-encoding: gzip' in curl output, but got: {curl_output}" + + archive_path = machine.succeed("nix-prefetch-url http://localhost/archive --print-path | tail -n1").strip() + mime_type = machine.succeed(f"file --brief --mime-type {archive_path}").strip() + assert mime_type == "application/zstd", f"Expected archive to be 'application/zstd', but got: {mime_type}" + machine.succeed(f"tar --zstd -xf {archive_path}") + + # Test HTTP binary cache store with compression + outPath = machine.succeed(""" + nix build --store /var/lib/build-store -f ${testDrv} --print-out-paths --print-build-logs + """).strip() + + drvPath = machine.succeed(f""" + nix path-info --store /var/lib/build-store --derivation {outPath} + """).strip() + + # Upload to cache with compression (use cache-upload endpoint) + machine.succeed(f""" + nix copy --store /var/lib/build-store --to 'http://localhost/cache-upload?narinfo-compression=gzip&ls-compression=gzip&write-nar-listing=1' {outPath} -vvvvv 2>&1 | tail -100 + """) + machine.succeed(f""" + nix store copy-log --store /var/lib/build-store --to 'http://localhost/cache-upload?log-compression=br' {drvPath} -vvvvv 2>&1 | tail -100 + """) + + # List cache contents + print(machine.succeed("find /var/lib/nginx-cache -type f")) + + narinfoHash = outPath.split('/')[3].split('-')[0] + drvName = drvPath.split('/')[3] + + # Verify compression + machine.succeed(f"gzip -t /var/lib/nginx-cache/cache/{narinfoHash}.narinfo") + machine.succeed(f"gzip -t /var/lib/nginx-cache/cache/{narinfoHash}.ls") + machine.succeed(f"brotli -t /var/lib/nginx-cache/cache/log/{drvName}") + + # Check Content-Encoding headers on the download endpoint + narinfo_headers = machine.succeed(f"curl -I http://localhost/cache/{narinfoHash}.narinfo 2>&1") + assert "content-encoding: gzip" in narinfo_headers.lower(), f"Expected 'content-encoding: gzip' for .narinfo file, but headers were: {narinfo_headers}" + + ls_headers = machine.succeed(f"curl -I http://localhost/cache/{narinfoHash}.ls 2>&1") + assert "content-encoding: gzip" in ls_headers.lower(), f"Expected 'content-encoding: gzip' for .ls file, but headers were: {ls_headers}" + + log_headers = machine.succeed(f"curl -I http://localhost/cache/log/{drvName} 2>&1") + assert "content-encoding: br" in log_headers.lower(), f"Expected 'content-encoding: br' for log file, but headers were: {log_headers}" + + # Test fetching from cache + machine.succeed(f"nix copy --from 'http://localhost/cache' --no-check-sigs {outPath}") + + # Test log retrieval + log_output = machine.succeed(f"nix log --store 'http://localhost/cache' {drvPath} 2>&1") + assert "Building test package" in log_output, f"Expected 'Building test package' in log output, but got: {log_output}" + ''; +} diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index 2031e02a4..5a1e08528 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -187,7 +187,7 @@ in ca-fd-leak = runNixOSTest ./ca-fd-leak; - gzip-content-encoding = runNixOSTest ./gzip-content-encoding.nix; + content-encoding = runNixOSTest ./content-encoding.nix; functional_user = runNixOSTest ./functional/as-user.nix; diff --git a/tests/nixos/gzip-content-encoding.nix b/tests/nixos/gzip-content-encoding.nix deleted file mode 100644 index 22d196c61..000000000 --- a/tests/nixos/gzip-content-encoding.nix +++ /dev/null @@ -1,74 +0,0 @@ -# Test that compressed files fetched from server with compressed responses -# do not get excessively decompressed. -# E.g. fetching a zstd compressed tarball from a server, -# which compresses the response with `Content-Encoding: gzip`. -# The expected result is that the fetched file is a zstd archive. - -{ lib, config, ... }: - -let - pkgs = config.nodes.machine.nixpkgs.pkgs; - - ztdCompressedFile = pkgs.stdenv.mkDerivation { - name = "dummy-zstd-compressed-archive"; - dontUnpack = true; - nativeBuildInputs = with pkgs; [ zstd ]; - buildPhase = '' - mkdir archive - for _ in {1..100}; do echo "lorem" > archive/file1; done - for _ in {1..100}; do echo "ipsum" > archive/file2; done - tar --zstd -cf archive.tar.zst archive - ''; - installPhase = '' - install -Dm 644 -T archive.tar.zst $out/share/archive - ''; - }; - - fileCmd = "${pkgs.file}/bin/file"; -in - -{ - name = "gzip-content-encoding"; - - nodes = { - machine = - { config, pkgs, ... }: - { - networking.firewall.allowedTCPPorts = [ 80 ]; - - services.nginx.enable = true; - services.nginx.virtualHosts."localhost" = { - root = "${ztdCompressedFile}/share/"; - # Make sure that nginx really tries to compress the - # file on the fly with no regard to size/mime. - # http://nginx.org/en/docs/http/ngx_http_gzip_module.html - extraConfig = '' - gzip on; - gzip_types *; - gzip_proxied any; - gzip_min_length 0; - ''; - }; - virtualisation.writableStore = true; - virtualisation.additionalPaths = with pkgs; [ file ]; - nix.settings.substituters = lib.mkForce [ ]; - }; - }; - - # Check that when nix-prefetch-url is used with a zst tarball it does not get decompressed. - testScript = - { nodes }: - '' - # fmt: off - start_all() - - machine.wait_for_unit("nginx.service") - machine.succeed(""" - # Make sure that the file is properly compressed as the test would be meaningless otherwise - curl --compressed -v http://localhost/archive |& tr -s ' ' |& grep --ignore-case 'content-encoding: gzip' - archive_path=$(nix-prefetch-url http://localhost/archive --print-path | tail -n1) - [[ $(${fileCmd} --brief --mime-type $archive_path) == "application/zstd" ]] - tar --zstd -xf $archive_path - """) - ''; -} From 6e6f88ac4557109fddab5d46a225199ca763f226 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 30 Sep 2025 11:05:20 +0200 Subject: [PATCH 056/373] add changelog for http binary cache compression --- .../rl-next/http-binary-cache-compression.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 doc/manual/rl-next/http-binary-cache-compression.md diff --git a/doc/manual/rl-next/http-binary-cache-compression.md b/doc/manual/rl-next/http-binary-cache-compression.md new file mode 100644 index 000000000..88f1de6d9 --- /dev/null +++ b/doc/manual/rl-next/http-binary-cache-compression.md @@ -0,0 +1,19 @@ +--- +synopsis: "HTTP binary caches now support transparent compression for metadata" +prs: [] +--- + +HTTP binary cache stores can now compress `.narinfo`, `.ls`, and build log files before uploading them, +reducing bandwidth usage and storage requirements. The compression is applied transparently using the +`Content-Encoding` header, allowing compatible clients to automatically decompress the files. + +Three new configuration options control this behavior: +- `narinfo-compression`: Compression method for `.narinfo` files +- `ls-compression`: Compression method for `.ls` files +- `log-compression`: Compression method for build logs in `log/` directory + +Example usage: +``` +nix copy --to 'http://cache.example.com?narinfo-compression=gzip&ls-compression=gzip' /nix/store/... +nix store copy-log --to 'http://cache.example.com?log-compression=br' /nix/store/... +``` From 8f4a739d0fa05e44589d578f1860b45b8a48f1cc Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 18 Sep 2025 15:54:43 -0400 Subject: [PATCH 057/373] Split out `DerivationResolutionGoal` This prepares the way for fixing a few issues. --- .../build/derivation-building-goal.cc | 135 ++--------- .../build/derivation-resolution-goal.cc | 210 ++++++++++++++++++ src/libstore/build/worker.cc | 9 + .../store/build/derivation-building-goal.hh | 2 +- .../store/build/derivation-resolution-goal.hh | 82 +++++++ .../include/nix/store/build/worker.hh | 10 +- src/libstore/include/nix/store/meson.build | 1 + src/libstore/meson.build | 1 + tests/functional/build.sh | 9 +- 9 files changed, 334 insertions(+), 125 deletions(-) create mode 100644 src/libstore/build/derivation-resolution-goal.cc create mode 100644 src/libstore/include/nix/store/build/derivation-resolution-goal.hh diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 001816ca0..bf7f332c7 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1,4 +1,5 @@ #include "nix/store/build/derivation-building-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-env-desugar.hh" #include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows @@ -129,46 +130,6 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() { Goals waitees; - std::map, GoalPtr, value_comparison> inputGoals; - - { - std::function, const DerivedPathMap::ChildNode &)> - addWaiteeDerivedPath; - - addWaiteeDerivedPath = [&](ref inputDrv, - const DerivedPathMap::ChildNode & inputNode) { - if (!inputNode.value.empty()) { - auto g = worker.makeGoal( - DerivedPath::Built{ - .drvPath = inputDrv, - .outputs = inputNode.value, - }, - buildMode == bmRepair ? bmRepair : bmNormal); - inputGoals.insert_or_assign(inputDrv, g); - waitees.insert(std::move(g)); - } - for (const auto & [outputName, childNode] : inputNode.childMap) - addWaiteeDerivedPath( - make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); - }; - - for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { - /* Ensure that pure, non-fixed-output derivations don't - depend on impure derivations. */ - if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() - && !drv->type().isFixed()) { - auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); - if (inputDrv.type().isImpure()) - throw Error( - "pure derivation '%s' depends on impure derivation '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(inputDrvPath)); - } - - addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); - } - } - /* Copy the input sources from the eval store to the build store. @@ -213,88 +174,22 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ - /* First, the input derivations. */ { - auto & fullDrv = *drv; + auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); + { + Goals waitees{resolutionGoal}; + co_await await(std::move(waitees)); + } + if (nrFailed != 0) { + co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); + } - auto drvType = fullDrv.type(); - bool resolveDrv = - std::visit( - overloaded{ - [&](const DerivationType::InputAddressed & ia) { - /* must resolve if deferred. */ - return ia.deferred; - }, - [&](const DerivationType::ContentAddressed & ca) { - return !fullDrv.inputDrvs.map.empty() - && (ca.fixed - /* Can optionally resolve if fixed, which is good - for avoiding unnecessary rebuilds. */ - ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) - /* Must resolve if floating and there are any inputs - drvs. */ - : true); - }, - [&](const DerivationType::Impure &) { return true; }}, - drvType.raw) - /* no inputs are outputs of dynamic derivations */ - || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { - return !pair.second.childMap.empty(); - }); + if (resolutionGoal->resolvedDrv) { + auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { - experimentalFeatureSettings.require(Xp::CaDerivations); - - /* We are be able to resolve this derivation based on the - now-known results of dependencies. If so, we become a - stub goal aliasing that resolved derivation goal. */ - std::optional attempt = fullDrv.tryResolve( - worker.store, - [&](ref drvPath, const std::string & outputName) -> std::optional { - auto mEntry = get(inputGoals, drvPath); - if (!mEntry) - return std::nullopt; - - auto & buildResult = (*mEntry)->buildResult; - return std::visit( - overloaded{ - [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, - [&](const BuildResult::Success & success) -> std::optional { - auto i = get(success.builtOutputs, outputName); - if (!i) - return std::nullopt; - - return i->outPath; - }, - }, - buildResult.inner); - }); - if (!attempt) { - /* TODO (impure derivations-induced tech debt) (see below): - The above attempt should have found it, but because we manage - inputDrvOutputs statefully, sometimes it gets out of sync with - the real source of truth (store). So we query the store - directly if there's a problem. */ - attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); - } - assert(attempt); - Derivation drvResolved{std::move(*attempt)}; - - auto pathResolved = writeDerivation(worker.store, drvResolved); - - auto msg = - fmt("resolved derivation: '%s' -> '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved)); - act = std::make_unique( - *logger, - lvlInfo, - actBuildWaiting, - msg, - Logger::Fields{ - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved), - }); + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, drvResolved); /* TODO https://github.com/NixOS/nix/issues/13247 we should let the calling goal do this, so it has a change to pass @@ -383,7 +278,7 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* If we get this far, we know no dynamic drvs inputs */ - for (auto & [depDrvPath, depNode] : fullDrv.inputDrvs.map) { + for (auto & [depDrvPath, depNode] : drv->inputDrvs.map) { for (auto & outputName : depNode.value) { /* Don't need to worry about `inputGoals`, because impure derivations are always resolved above. Can diff --git a/src/libstore/build/derivation-resolution-goal.cc b/src/libstore/build/derivation-resolution-goal.cc new file mode 100644 index 000000000..584169ef3 --- /dev/null +++ b/src/libstore/build/derivation-resolution-goal.cc @@ -0,0 +1,210 @@ +#include "nix/store/build/derivation-resolution-goal.hh" +#include "nix/store/build/derivation-env-desugar.hh" +#include "nix/store/build/worker.hh" +#include "nix/util/util.hh" +#include "nix/store/common-protocol.hh" +#include "nix/store/globals.hh" + +#include +#include +#include + +#include + +namespace nix { + +DerivationResolutionGoal::DerivationResolutionGoal( + const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode) + : Goal(worker, resolveDerivation()) + , drvPath(drvPath) +{ + drv = std::make_unique(drv_); + + name = fmt("building of '%s' from in-memory derivation", worker.store.printStorePath(drvPath)); + trace("created"); + + /* Prevent the .chroot directory from being + garbage-collected. (See isActiveTempFile() in gc.cc.) */ + worker.store.addTempRoot(this->drvPath); +} + +void DerivationResolutionGoal::timedOut(Error && ex) {} + +std::string DerivationResolutionGoal::key() +{ + /* Ensure that derivations get built in order of their name, + i.e. a derivation named "aardvark" always comes before + "baboon". And substitution goals always happen before + derivation goals (due to "bd$"). */ + return "rd$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); +} + +/** + * Used for `inputGoals` local variable below + */ +struct value_comparison +{ + template + bool operator()(const ref & lhs, const ref & rhs) const + { + return *lhs < *rhs; + } +}; + +/* At least one of the output paths could not be + produced using a substitute. So we have to build instead. */ +Goal::Co DerivationResolutionGoal::resolveDerivation() +{ + Goals waitees; + + std::map, GoalPtr, value_comparison> inputGoals; + + { + std::function, const DerivedPathMap::ChildNode &)> + addWaiteeDerivedPath; + + addWaiteeDerivedPath = [&](ref inputDrv, + const DerivedPathMap::ChildNode & inputNode) { + if (!inputNode.value.empty()) { + auto g = worker.makeGoal( + DerivedPath::Built{ + .drvPath = inputDrv, + .outputs = inputNode.value, + }, + buildMode == bmRepair ? bmRepair : bmNormal); + inputGoals.insert_or_assign(inputDrv, g); + waitees.insert(std::move(g)); + } + for (const auto & [outputName, childNode] : inputNode.childMap) + addWaiteeDerivedPath( + make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); + }; + + for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { + /* Ensure that pure, non-fixed-output derivations don't + depend on impure derivations. */ + if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() + && !drv->type().isFixed()) { + auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); + if (inputDrv.type().isImpure()) + throw Error( + "pure derivation '%s' depends on impure derivation '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(inputDrvPath)); + } + + addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); + } + } + + co_await await(std::move(waitees)); + + trace("all inputs realised"); + + if (nrFailed != 0) { + auto msg = + fmt("Cannot build '%s'.\n" + "Reason: " ANSI_RED "%d %s failed" ANSI_NORMAL ".", + Magenta(worker.store.printStorePath(drvPath)), + nrFailed, + nrFailed == 1 ? "dependency" : "dependencies"); + msg += showKnownOutputs(worker.store, *drv); + co_return amDone(ecFailed, {BuildError(BuildResult::Failure::DependencyFailed, msg)}); + } + + /* Gather information necessary for computing the closure and/or + running the build hook. */ + + /* Determine the full set of input paths. */ + + /* First, the input derivations. */ + { + auto & fullDrv = *drv; + + auto drvType = fullDrv.type(); + bool resolveDrv = + std::visit( + overloaded{ + [&](const DerivationType::InputAddressed & ia) { + /* must resolve if deferred. */ + return ia.deferred; + }, + [&](const DerivationType::ContentAddressed & ca) { + return !fullDrv.inputDrvs.map.empty() + && (ca.fixed + /* Can optionally resolve if fixed, which is good + for avoiding unnecessary rebuilds. */ + ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) + /* Must resolve if floating and there are any inputs + drvs. */ + : true); + }, + [&](const DerivationType::Impure &) { return true; }}, + drvType.raw) + /* no inputs are outputs of dynamic derivations */ + || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { + return !pair.second.childMap.empty(); + }); + + if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { + experimentalFeatureSettings.require(Xp::CaDerivations); + + /* We are be able to resolve this derivation based on the + now-known results of dependencies. If so, we become a + stub goal aliasing that resolved derivation goal. */ + std::optional attempt = fullDrv.tryResolve( + worker.store, + [&](ref drvPath, const std::string & outputName) -> std::optional { + auto mEntry = get(inputGoals, drvPath); + if (!mEntry) + return std::nullopt; + + auto & buildResult = (*mEntry)->buildResult; + return std::visit( + overloaded{ + [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, + [&](const BuildResult::Success & success) -> std::optional { + auto i = get(success.builtOutputs, outputName); + if (!i) + return std::nullopt; + + return i->outPath; + }, + }, + buildResult.inner); + }); + if (!attempt) { + /* TODO (impure derivations-induced tech debt) (see below): + The above attempt should have found it, but because we manage + inputDrvOutputs statefully, sometimes it gets out of sync with + the real source of truth (store). So we query the store + directly if there's a problem. */ + attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); + } + assert(attempt); + + auto pathResolved = writeDerivation(worker.store, *attempt, NoRepair, /*readOnly =*/true); + + auto msg = + fmt("resolved derivation: '%s' -> '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved)); + act = std::make_unique( + *logger, + lvlInfo, + actBuildWaiting, + msg, + Logger::Fields{ + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved), + }); + + resolvedDrv = + std::make_unique>(std::move(pathResolved), *std::move(attempt)); + } + } + + co_return amDone(ecSuccess, std::nullopt); +} + +} // namespace nix diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index 3e6e0bef0..f597abb63 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -4,6 +4,7 @@ #include "nix/store/build/substitution-goal.hh" #include "nix/store/build/drv-output-substitution-goal.hh" #include "nix/store/build/derivation-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-building-goal.hh" #include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO Enable building on Windows @@ -80,6 +81,12 @@ std::shared_ptr Worker::makeDerivationGoal( return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode); } +std::shared_ptr +Worker::makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) +{ + return initGoalIfNeeded(derivationResolutionGoals[drvPath], drvPath, drv, *this, buildMode); +} + std::shared_ptr Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) { @@ -158,6 +165,8 @@ void Worker::removeGoal(GoalPtr goal) nix::removeGoal(drvGoal, derivationTrampolineGoals.map); else if (auto drvGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvGoal, derivationGoals); + else if (auto drvResolutionGoal = std::dynamic_pointer_cast(goal)) + nix::removeGoal(drvResolutionGoal, derivationResolutionGoals); else if (auto drvBuildingGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvBuildingGoal, derivationBuildingGoals); else if (auto subGoal = std::dynamic_pointer_cast(goal)) diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index edb496024..8192dc778 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -155,7 +155,7 @@ private: JobCategory jobCategory() const override { - return JobCategory::Build; + return JobCategory::Administration; }; }; diff --git a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh new file mode 100644 index 000000000..ebaab4f06 --- /dev/null +++ b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh @@ -0,0 +1,82 @@ +#pragma once +///@file + +#include "nix/store/derivations.hh" +#include "nix/store/derivation-options.hh" +#include "nix/store/build/derivation-building-misc.hh" +#include "nix/store/store-api.hh" +#include "nix/store/build/goal.hh" + +namespace nix { + +struct BuilderFailureError; + +/** + * A goal for resolving a derivation. Resolving a derivation (@see + * `Derivation::tryResolve`) simplifies its inputs, replacing + * `inputDrvs` with `inputSrcs. + * + * Conceptually, we resolve all derivations. For input-addressed + * derivations (that don't transtively depend on content-addressed + * derivations), however, we don't actually use the resolved derivation, + * because the output paths would appear invalid (if we tried to verify + * them), since they are computed from the original, unresolved inputs. + * + * That said, if we ever made the new flavor of input-addressing as described + * in issue #9259, then the input-addressing would be based on the resolved + * inputs, and we like the CA case *would* use the output of this goal. + * + * (The point of this discussion is not to randomly stuff information on + * a yet-unimplemented feature (issue #9259) in the codebase, but + * rather, to illustrate that there is no inherent tension between + * explicit derivation resolution and input-addressing in general. That + * tension only exists with the type of input-addressing we've + * historically used.) + */ +struct DerivationResolutionGoal : public Goal +{ + DerivationResolutionGoal( + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); + + /** + * If the derivation needed to be resolved, this is resulting + * resolved derivations and its path. + */ + std::unique_ptr> resolvedDrv; + + void timedOut(Error && ex) override; + +private: + + /** + * The path of the derivation. + */ + StorePath drvPath; + + /** + * The derivation stored at drvPath. + */ + std::unique_ptr drv; + + /** + * The remainder is state held during the build. + */ + + BuildMode buildMode; + + std::unique_ptr act; + + std::string key() override; + + /** + * The states. + */ + Co resolveDerivation(); + + JobCategory jobCategory() const override + { + return JobCategory::Administration; + }; +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index a6de780c1..9660d66b2 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -16,6 +16,7 @@ namespace nix { /* Forward definition. */ struct DerivationTrampolineGoal; struct DerivationGoal; +struct DerivationResolutionGoal; struct DerivationBuildingGoal; struct PathSubstitutionGoal; class DrvOutputSubstitutionGoal; @@ -111,6 +112,7 @@ private: DerivedPathMap>> derivationTrampolineGoals; std::map>> derivationGoals; + std::map> derivationResolutionGoals; std::map> derivationBuildingGoals; std::map> substitutionGoals; std::map> drvOutputSubstitutionGoals; @@ -224,7 +226,13 @@ public: BuildMode buildMode = bmNormal); /** - * @ref DerivationBuildingGoal "derivation goal" + * @ref DerivationResolutionGoal "derivation resolution goal" + */ + std::shared_ptr + makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); + + /** + * @ref DerivationBuildingGoal "derivation building goal" */ std::shared_ptr makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index 428ef00f3..3e115fc08 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -17,6 +17,7 @@ headers = [ config_pub_h ] + files( 'build/derivation-building-misc.hh', 'build/derivation-env-desugar.hh', 'build/derivation-goal.hh', + 'build/derivation-resolution-goal.hh', 'build/derivation-trampoline-goal.hh', 'build/drv-output-substitution-goal.hh', 'build/goal.hh', diff --git a/src/libstore/meson.build b/src/libstore/meson.build index e3004ebf5..f5eb858ef 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -274,6 +274,7 @@ sources = files( 'build/derivation-check.cc', 'build/derivation-env-desugar.cc', 'build/derivation-goal.cc', + 'build/derivation-resolution-goal.cc', 'build/derivation-trampoline-goal.cc', 'build/drv-output-substitution-goal.cc', 'build/entry-points.cc', diff --git a/tests/functional/build.sh b/tests/functional/build.sh index 0a19ff7da..c9a39438d 100755 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -178,7 +178,8 @@ test "$(<<<"$out" grep -cE '^error:')" = 4 out="$(nix build -f fod-failing.nix -L x4 2>&1)" && status=0 || status=$? test "$status" = 1 -test "$(<<<"$out" grep -cE '^error:')" = 2 +# Precise number of errors depends on daemon version / goal refactorings +(( "$(<<<"$out" grep -cE '^error:')" >= 2 )) if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" @@ -186,11 +187,13 @@ if isDaemonNewer "2.29pre"; then else <<<"$out" grepQuiet -E "error: 1 dependencies of derivation '.*-x4\\.drv' failed to build" fi -<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" +# Either x2 or x3 could have failed, x4 depends on both symmetrically +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x[23]\\.drv'" out="$(nix build -f fod-failing.nix -L x4 --keep-going 2>&1)" && status=0 || status=$? test "$status" = 1 -test "$(<<<"$out" grep -cE '^error:')" = 3 +# Precise number of errors depends on daemon version / goal refactorings +(( "$(<<<"$out" grep -cE '^error:')" >= 3 )) if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" <<<"$out" grepQuiet -E "Reason: 2 dependencies failed." From 39f6fd9b464298f37a08cfe7485271b9294fd278 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 26 Sep 2025 01:13:22 -0400 Subject: [PATCH 058/373] Fix #13247 Resolve the derivation before creating a building goal, in a context where we know what output(s) we want. That way we have a chance just to download the outputs we want. Fix #13247 --- .../build/derivation-building-goal.cc | 103 ------------------ src/libstore/build/derivation-goal.cc | 91 ++++++++++++++++ tests/functional/ca/issue-13247.sh | 5 +- 3 files changed, 92 insertions(+), 107 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index bf7f332c7..98b80862d 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1,7 +1,5 @@ #include "nix/store/build/derivation-building-goal.hh" -#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-env-desugar.hh" -#include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -175,107 +173,6 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ { - auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); - { - Goals waitees{resolutionGoal}; - co_await await(std::move(waitees)); - } - if (nrFailed != 0) { - co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); - } - - if (resolutionGoal->resolvedDrv) { - auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - - /* Store the resolved derivation, as part of the record of - what we're actually building */ - writeDerivation(worker.store, drvResolved); - - /* TODO https://github.com/NixOS/nix/issues/13247 we should - let the calling goal do this, so it has a change to pass - just the output(s) it cares about. */ - auto resolvedDrvGoal = - worker.makeDerivationTrampolineGoal(pathResolved, OutputsSpec::All{}, drvResolved, buildMode); - { - Goals waitees{resolvedDrvGoal}; - co_await await(std::move(waitees)); - } - - trace("resolved derivation finished"); - - auto resolvedResult = resolvedDrvGoal->buildResult; - - // No `std::visit` for coroutines yet - if (auto * successP = resolvedResult.tryGetSuccess()) { - auto & success = *successP; - SingleDrvOutputs builtOutputs; - - auto outputHashes = staticOutputHashes(worker.evalStore, *drv); - auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); - - StorePathSet outputPaths; - - for (auto & outputName : drvResolved.outputNames()) { - auto outputHash = get(outputHashes, outputName); - auto resolvedHash = get(resolvedHashes, outputName); - if ((!outputHash) || (!resolvedHash)) - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", - worker.store.printStorePath(drvPath), - outputName); - - auto realisation = [&] { - auto take1 = get(success.builtOutputs, outputName); - if (take1) - return *take1; - - /* The above `get` should work. But stateful tracking of - outputs in resolvedResult, this can get out of sync with the - store, which is our actual source of truth. For now we just - check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, outputName}); - if (take2) - return *take2; - - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", - worker.store.printStorePath(pathResolved), - outputName); - }(); - - if (!drv->type().isImpure()) { - auto newRealisation = realisation; - newRealisation.id = DrvOutput{*outputHash, outputName}; - newRealisation.signatures.clear(); - if (!drv->type().isFixed()) { - auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; - newRealisation.dependentRealisations = - drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); - } - worker.store.signRealisation(newRealisation); - worker.store.registerDrvOutput(newRealisation); - } - outputPaths.insert(realisation.outPath); - builtOutputs.emplace(outputName, realisation); - } - - runPostBuildHook(worker.store, *logger, drvPath, outputPaths); - - auto status = success.status; - if (status == BuildResult::Success::AlreadyValid) - status = BuildResult::Success::ResolvesToAlreadyValid; - - co_return doneSuccess(success.status, std::move(builtOutputs)); - } else if (resolvedResult.tryGetFailure()) { - co_return doneFailure({ - BuildResult::Failure::DependencyFailed, - "build of resolved derivation '%s' failed", - worker.store.printStorePath(pathResolved), - }); - } else - assert(false); - } - /* If we get this far, we know no dynamic drvs inputs */ for (auto & [depDrvPath, depNode] : drv->inputDrvs.map) { diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 5dfc334a8..8e924fd4a 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -1,5 +1,6 @@ #include "nix/store/build/derivation-goal.hh" #include "nix/store/build/derivation-building-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -146,6 +147,96 @@ Goal::Co DerivationGoal::haveDerivation() worker.store.printStorePath(drvPath)); } + auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); + { + Goals waitees{resolutionGoal}; + co_await await(std::move(waitees)); + } + if (nrFailed != 0) { + co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); + } + + if (resolutionGoal->resolvedDrv) { + auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; + + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, drvResolved); + + auto resolvedDrvGoal = worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode); + { + Goals waitees{resolvedDrvGoal}; + co_await await(std::move(waitees)); + } + + trace("resolved derivation finished"); + + auto resolvedResult = resolvedDrvGoal->buildResult; + + // No `std::visit` for coroutines yet + if (auto * successP = resolvedResult.tryGetSuccess()) { + auto & success = *successP; + auto outputHashes = staticOutputHashes(worker.evalStore, *drv); + auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); + + StorePathSet outputPaths; + + auto outputHash = get(outputHashes, wantedOutput); + auto resolvedHash = get(resolvedHashes, wantedOutput); + if ((!outputHash) || (!resolvedHash)) + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", + worker.store.printStorePath(drvPath), + wantedOutput); + + auto realisation = [&] { + auto take1 = get(success.builtOutputs, wantedOutput); + if (take1) + return *take1; + + /* The above `get` should work. But stateful tracking of + outputs in resolvedResult, this can get out of sync with the + store, which is our actual source of truth. For now we just + check the store directly if it fails. */ + auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, wantedOutput}); + if (take2) + return *take2; + + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", + worker.store.printStorePath(pathResolved), + wantedOutput); + }(); + + if (!drv->type().isImpure()) { + auto newRealisation = realisation; + newRealisation.id = DrvOutput{*outputHash, wantedOutput}; + newRealisation.signatures.clear(); + if (!drv->type().isFixed()) { + auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; + newRealisation.dependentRealisations = + drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); + } + worker.store.signRealisation(newRealisation); + worker.store.registerDrvOutput(newRealisation); + } + outputPaths.insert(realisation.outPath); + + auto status = success.status; + if (status == BuildResult::Success::AlreadyValid) + status = BuildResult::Success::ResolvesToAlreadyValid; + + co_return doneSuccess(status, std::move(realisation)); + } else if (resolvedResult.tryGetFailure()) { + co_return doneFailure({ + BuildResult::Failure::DependencyFailed, + "build of resolved derivation '%s' failed", + worker.store.printStorePath(pathResolved), + }); + } else + assert(false); + } + /* Give up on substitution for the output we want, actually build this derivation */ auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode); diff --git a/tests/functional/ca/issue-13247.sh b/tests/functional/ca/issue-13247.sh index 686d90ced..705919513 100755 --- a/tests/functional/ca/issue-13247.sh +++ b/tests/functional/ca/issue-13247.sh @@ -65,7 +65,4 @@ buildViaSubstitute use-a-prime-more-outputs^first # Should only fetch the output we asked for [[ -d "$(jq -r <"$TEST_ROOT"/a.json '.[0].outputs.out')" ]] [[ -f "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.first')" ]] - -# Output should *not* be here, this is the bug -[[ -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] -skipTest "bug is not yet fixed" +[[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] From c97b050a6c212d0b748303080b5604309b7abdce Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 26 Sep 2025 01:40:00 -0400 Subject: [PATCH 059/373] Fix `ca/eval-store.sh` test The refactor in the last commit fixed the bug it was supposed to fix, but introduced a new bug in that sometimes we tried to write a resolved derivation to a store before all its `inputSrcs` were in that store. The solution is to defer writing the derivation until inside `DerivationBuildingGoal`, just before we do an actual build. At this point, we are sure that all inputs in are the store. This does have the side effect of meaning we don't write down the resolved derivation in the substituting case, only the building case, but I think that is actually fine. The store that actually does the building should make a record of what it built by storing the resolved derivation. Other stores that just substitute from that store don't necessary want that derivation however. They can trust the substituter to keep the record around, or baring that, they can attempt to re resolve everything, if they need to be audited. --- src/libstore/build/derivation-building-goal.cc | 13 ++++++++++--- src/libstore/build/derivation-goal.cc | 16 +++++++--------- src/libstore/build/worker.cc | 15 ++++++++++----- .../nix/store/build/derivation-building-goal.hh | 17 +++++++++++++++-- .../include/nix/store/build/derivation-goal.hh | 8 ++++++-- src/libstore/include/nix/store/build/worker.hh | 10 +++++++--- 6 files changed, 55 insertions(+), 24 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 98b80862d..fa819c96b 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -26,8 +26,8 @@ namespace nix { DerivationBuildingGoal::DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode) - : Goal(worker, gaveUpOnSubstitution()) + const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode, bool storeDerivation) + : Goal(worker, gaveUpOnSubstitution(storeDerivation)) , drvPath(drvPath) , buildMode(buildMode) { @@ -124,7 +124,7 @@ static void runPostBuildHook( /* At least one of the output paths could not be produced using a substitute. So we have to build instead. */ -Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() +Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution(bool storeDerivation) { Goals waitees; @@ -172,6 +172,13 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ + if (storeDerivation) { + assert(drv->inputDrvs.map.empty()); + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, *drv); + } + { /* If we get this far, we know no dynamic drvs inputs */ diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 8e924fd4a..cc3ba2b7b 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -30,8 +30,9 @@ DerivationGoal::DerivationGoal( const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode) - : Goal(worker, haveDerivation()) + BuildMode buildMode, + bool storeDerivation) + : Goal(worker, haveDerivation(storeDerivation)) , drvPath(drvPath) , wantedOutput(wantedOutput) , outputHash{[&] { @@ -65,7 +66,7 @@ std::string DerivationGoal::key() }.to_string(worker.store); } -Goal::Co DerivationGoal::haveDerivation() +Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) { trace("have derivation"); @@ -159,11 +160,8 @@ Goal::Co DerivationGoal::haveDerivation() if (resolutionGoal->resolvedDrv) { auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - /* Store the resolved derivation, as part of the record of - what we're actually building */ - writeDerivation(worker.store, drvResolved); - - auto resolvedDrvGoal = worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode); + auto resolvedDrvGoal = + worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode, /*storeDerivation=*/true); { Goals waitees{resolvedDrvGoal}; co_await await(std::move(waitees)); @@ -239,7 +237,7 @@ Goal::Co DerivationGoal::haveDerivation() /* Give up on substitution for the output we want, actually build this derivation */ - auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode); + auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode, storeDerivation); /* We will finish with it ourselves, as if we were the derivational goal. */ g->preserveException = true; diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index f597abb63..53175a8c4 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -76,9 +76,14 @@ std::shared_ptr Worker::makeDerivationTrampolineGoal( } std::shared_ptr Worker::makeDerivationGoal( - const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode) + const StorePath & drvPath, + const Derivation & drv, + const OutputName & wantedOutput, + BuildMode buildMode, + bool storeDerivation) { - return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode); + return initGoalIfNeeded( + derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode, storeDerivation); } std::shared_ptr @@ -87,10 +92,10 @@ Worker::makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation return initGoalIfNeeded(derivationResolutionGoals[drvPath], drvPath, drv, *this, buildMode); } -std::shared_ptr -Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) +std::shared_ptr Worker::makeDerivationBuildingGoal( + const StorePath & drvPath, const Derivation & drv, BuildMode buildMode, bool storeDerivation) { - return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode); + return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode, storeDerivation); } std::shared_ptr diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index 8192dc778..ab063ff3f 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -29,8 +29,21 @@ typedef enum { rpAccept, rpDecline, rpPostpone } HookReply; */ struct DerivationBuildingGoal : public Goal { + /** + * @param storeDerivation Whether to store the derivation in + * `worker.store`. This is useful for newly-resolved derivations. In this + * case, the derivation was not created a priori, e.g. purely (or close + * enough) from evaluation of the Nix language, but also depends on the + * exact content produced by upstream builds. It is strongly advised to + * have a permanent record of such a resolved derivation in order to + * faithfully reconstruct the build history. + */ DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); + const StorePath & drvPath, + const Derivation & drv, + Worker & worker, + BuildMode buildMode = bmNormal, + bool storeDerivation = false); ~DerivationBuildingGoal(); private: @@ -100,7 +113,7 @@ private: /** * The states. */ - Co gaveUpOnSubstitution(); + Co gaveUpOnSubstitution(bool storeDerivation); Co tryToBuild(); /** diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index e05bf1c0b..353e7c489 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -40,12 +40,16 @@ struct DerivationGoal : public Goal */ OutputName wantedOutput; + /** + * @param storeDerivation See `DerivationBuildingGoal`. This is just passed along. + */ DerivationGoal( const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode = bmNormal); + BuildMode buildMode = bmNormal, + bool storeDerivation = false); ~DerivationGoal() = default; void timedOut(Error && ex) override @@ -80,7 +84,7 @@ private: /** * The states. */ - Co haveDerivation(); + Co haveDerivation(bool storeDerivation); /** * Return `std::nullopt` if the output is unknown, e.g. un unbuilt diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index 9660d66b2..9767590ac 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -223,7 +223,8 @@ public: const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, - BuildMode buildMode = bmNormal); + BuildMode buildMode = bmNormal, + bool storeDerivation = false); /** * @ref DerivationResolutionGoal "derivation resolution goal" @@ -234,8 +235,11 @@ public: /** * @ref DerivationBuildingGoal "derivation building goal" */ - std::shared_ptr - makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); + std::shared_ptr makeDerivationBuildingGoal( + const StorePath & drvPath, + const Derivation & drv, + BuildMode buildMode = bmNormal, + bool storeDerivation = false); /** * @ref PathSubstitutionGoal "substitution goal" From 88bd0c25f2f0fda6502653f40e88c6d377bc4617 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 27 Sep 2025 16:03:43 -0400 Subject: [PATCH 060/373] `Store::registerDrvOutput` make pure virtual It should be the responsibility of implementations that don't implement it to say so. See also PR #9799, and issue #5729 --- src/libstore/dummy-store.cc | 5 +++++ src/libstore/include/nix/store/legacy-ssh-store.hh | 7 ++++++- src/libstore/include/nix/store/store-api.hh | 5 +---- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 4b485ca66..43c575263 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -258,6 +258,11 @@ struct DummyStore : virtual Store return path; } + void registerDrvOutput(const Realisation & output) override + { + unsupported("registerDrvOutput"); + } + void narFromPath(const StorePath & path, Sink & sink) override { bool visited = contents.cvisit(path, [&](const auto & kv) { diff --git a/src/libstore/include/nix/store/legacy-ssh-store.hh b/src/libstore/include/nix/store/legacy-ssh-store.hh index 75751e2d1..c91f88a84 100644 --- a/src/libstore/include/nix/store/legacy-ssh-store.hh +++ b/src/libstore/include/nix/store/legacy-ssh-store.hh @@ -109,7 +109,7 @@ struct LegacySSHStore : public virtual Store unsupported("addToStore"); } - virtual StorePath addToStoreFromDump( + StorePath addToStoreFromDump( Source & dump, std::string_view name, FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive, @@ -121,6 +121,11 @@ struct LegacySSHStore : public virtual Store unsupported("addToStore"); } + void registerDrvOutput(const Realisation & output) override + { + unsupported("registerDrvOutput"); + } + public: BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode) override; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index 6d3f6b8d0..1131ec975 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -598,10 +598,7 @@ public: * floating-ca derivations and their dependencies as there's no way to * retrieve this information otherwise. */ - virtual void registerDrvOutput(const Realisation & output) - { - unsupported("registerDrvOutput"); - } + virtual void registerDrvOutput(const Realisation & output) = 0; virtual void registerDrvOutput(const Realisation & output, CheckSigsFlag checkSigs) { From 9ac306c4dfb1ff94b85656c32ff55c55a8d1d7f7 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 27 Sep 2025 16:52:36 -0400 Subject: [PATCH 061/373] Expose some core implementation details and write a basic unit test for the dummy store This test currently doesn't use the new-exposed functionality, but with future changes the tests will be expanded and they will be used. --- src/libstore-tests/dummy-store.cc | 27 +++++++++++++ src/libstore-tests/meson.build | 1 + src/libstore/dummy-store.cc | 31 +++++--------- .../include/nix/store/dummy-store-impl.hh | 40 +++++++++++++++++++ src/libstore/include/nix/store/dummy-store.hh | 7 ++++ src/libstore/include/nix/store/meson.build | 1 + 6 files changed, 87 insertions(+), 20 deletions(-) create mode 100644 src/libstore-tests/dummy-store.cc create mode 100644 src/libstore/include/nix/store/dummy-store-impl.hh diff --git a/src/libstore-tests/dummy-store.cc b/src/libstore-tests/dummy-store.cc new file mode 100644 index 000000000..b841d7890 --- /dev/null +++ b/src/libstore-tests/dummy-store.cc @@ -0,0 +1,27 @@ +#include + +#include "nix/store/dummy-store.hh" +#include "nix/store/globals.hh" +#include "nix/store/realisation.hh" + +namespace nix { + +TEST(DummyStore, realisation_read) +{ + initLibStore(/*loadConfig=*/false); + + auto store = [] { + auto cfg = make_ref(StoreReference::Params{}); + cfg->readOnly = false; + return cfg->openStore(); + }(); + + auto drvHash = Hash::parseExplicitFormatUnprefixed( + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", HashAlgorithm::SHA256, HashFormat::Base16); + + auto outputName = "foo"; + + EXPECT_EQ(store->queryRealisation({drvHash, outputName}), nullptr); +} + +} // namespace nix diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index 915c10a38..dd817de32 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -61,6 +61,7 @@ sources = files( 'derivation.cc', 'derived-path.cc', 'downstream-placeholder.cc', + 'dummy-store.cc', 'http-binary-cache-store.cc', 'legacy-ssh-store.cc', 'local-binary-cache-store.cc', diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 4b485ca66..f60a72df4 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -2,7 +2,7 @@ #include "nix/util/archive.hh" #include "nix/util/callback.hh" #include "nix/util/memory-source-accessor.hh" -#include "nix/store/dummy-store.hh" +#include "nix/store/dummy-store-impl.hh" #include @@ -108,24 +108,15 @@ public: } // namespace -struct DummyStore : virtual Store +ref DummyStoreConfig::openStore() const +{ + return openDummyStore(); +} + +struct DummyStoreImpl : DummyStore { using Config = DummyStoreConfig; - ref config; - - struct PathInfoAndContents - { - UnkeyedValidPathInfo info; - ref contents; - }; - - /** - * This is map conceptually owns the file system objects for each - * store object. - */ - boost::concurrent_flat_map contents; - /** * This view conceptually just borrows the file systems objects of * each store object from `contents`, and combines them together @@ -135,9 +126,9 @@ struct DummyStore : virtual Store */ ref wholeStoreView = make_ref(); - DummyStore(ref config) + DummyStoreImpl(ref config) : Store{*config} - , config(config) + , DummyStore{config} { wholeStoreView->setPathDisplay(config->storeDir); } @@ -289,9 +280,9 @@ struct DummyStore : virtual Store } }; -ref DummyStore::Config::openStore() const +ref DummyStore::Config::openDummyStore() const { - return make_ref(ref{shared_from_this()}); + return make_ref(ref{shared_from_this()}); } static RegisterStoreImplementation regDummyStore; diff --git a/src/libstore/include/nix/store/dummy-store-impl.hh b/src/libstore/include/nix/store/dummy-store-impl.hh new file mode 100644 index 000000000..e05bb94ff --- /dev/null +++ b/src/libstore/include/nix/store/dummy-store-impl.hh @@ -0,0 +1,40 @@ +#pragma once +///@file + +#include "nix/store/dummy-store.hh" + +#include + +namespace nix { + +struct MemorySourceAccessor; + +/** + * Enough of the Dummy Store exposed for sake of writing unit tests + */ +struct DummyStore : virtual Store +{ + using Config = DummyStoreConfig; + + ref config; + + struct PathInfoAndContents + { + UnkeyedValidPathInfo info; + ref contents; + }; + + /** + * This is map conceptually owns the file system objects for each + * store object. + */ + boost::concurrent_flat_map contents; + + DummyStore(ref config) + : Store{*config} + , config(config) + { + } +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/dummy-store.hh b/src/libstore/include/nix/store/dummy-store.hh index e93aad366..95c09078c 100644 --- a/src/libstore/include/nix/store/dummy-store.hh +++ b/src/libstore/include/nix/store/dummy-store.hh @@ -5,6 +5,8 @@ namespace nix { +struct DummyStore; + struct DummyStoreConfig : public std::enable_shared_from_this, virtual StoreConfig { DummyStoreConfig(const Params & params) @@ -42,6 +44,11 @@ struct DummyStoreConfig : public std::enable_shared_from_this, return {"dummy"}; } + /** + * Same as `openStore`, just with a more precise return type. + */ + ref openDummyStore() const; + ref openStore() const override; StoreReference getReference() const override diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index 428ef00f3..ac72f04e2 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -34,6 +34,7 @@ headers = [ config_pub_h ] + files( 'derived-path-map.hh', 'derived-path.hh', 'downstream-placeholder.hh', + 'dummy-store-impl.hh', 'dummy-store.hh', 'export-import.hh', 'filetransfer.hh', From 32cbf5f55af9eb9d10493f06d42f723ef0657064 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:52:44 -0700 Subject: [PATCH 062/373] shellcheck fix: tests/functional/pass-as-file.sh --- maintainers/flake-module.nix | 1 - tests/functional/pass-as-file.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index e9a820d72..ef345bbe4 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/pass-as-file\.sh$'' ''^tests/functional/path-from-hash-part\.sh$'' ''^tests/functional/path-info\.sh$'' ''^tests/functional/placeholders\.sh$'' diff --git a/tests/functional/pass-as-file.sh b/tests/functional/pass-as-file.sh index 66a8e588e..68f68b8cf 100755 --- a/tests/functional/pass-as-file.sh +++ b/tests/functional/pass-as-file.sh @@ -4,6 +4,7 @@ source common.sh clearStoreIfPossible +# shellcheck disable=SC2034 outPath=$(nix-build --no-out-link -E " with import ${config_nix}; From 112c9d8f547446e28df5d01d91be3a17d8f12bc6 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:53:33 -0700 Subject: [PATCH 063/373] shellcheck fix: tests/functional/path-from-hash-part.sh --- maintainers/flake-module.nix | 1 - tests/functional/path-from-hash-part.sh | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index ef345bbe4..06915c2ed 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/path-from-hash-part\.sh$'' ''^tests/functional/path-info\.sh$'' ''^tests/functional/placeholders\.sh$'' ''^tests/functional/post-hook\.sh$'' diff --git a/tests/functional/path-from-hash-part.sh b/tests/functional/path-from-hash-part.sh index 41d1b7410..0b258a6ea 100755 --- a/tests/functional/path-from-hash-part.sh +++ b/tests/functional/path-from-hash-part.sh @@ -4,9 +4,9 @@ source common.sh path=$(nix build --no-link --print-out-paths -f simple.nix) -hash_part=$(basename $path) +hash_part=$(basename "$path") hash_part=${hash_part:0:32} -path2=$(nix store path-from-hash-part $hash_part) +path2=$(nix store path-from-hash-part "$hash_part") -[[ $path = $path2 ]] +[[ $path = "$path2" ]] From c82aa04a3d80b9d42d71f3d075119b30184da321 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:53:54 -0700 Subject: [PATCH 064/373] shellcheck fix: tests/functional/path-info.sh --- maintainers/flake-module.nix | 1 - tests/functional/path-info.sh | 12 ++++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 06915c2ed..3c37f58f6 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/path-info\.sh$'' ''^tests/functional/placeholders\.sh$'' ''^tests/functional/post-hook\.sh$'' ''^tests/functional/pure-eval\.sh$'' diff --git a/tests/functional/path-info.sh b/tests/functional/path-info.sh index 8597de683..463ac6214 100755 --- a/tests/functional/path-info.sh +++ b/tests/functional/path-info.sh @@ -2,14 +2,14 @@ source common.sh -echo foo > $TEST_ROOT/foo -foo=$(nix store add-file $TEST_ROOT/foo) +echo foo > "$TEST_ROOT"/foo +foo=$(nix store add-file "$TEST_ROOT"/foo) -echo bar > $TEST_ROOT/bar -bar=$(nix store add-file $TEST_ROOT/bar) +echo bar > "$TEST_ROOT"/bar +bar=$(nix store add-file "$TEST_ROOT"/bar) -echo baz > $TEST_ROOT/baz -baz=$(nix store add-file $TEST_ROOT/baz) +echo baz > "$TEST_ROOT"/baz +baz=$(nix store add-file "$TEST_ROOT"/baz) nix-store --delete "$baz" diff --unified --color=always \ From 1aaa3dafeee303062fbcf3c7c266fde9101f2db2 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:54:29 -0700 Subject: [PATCH 065/373] shellcheck fix: tests/functional/placeholders.sh --- maintainers/flake-module.nix | 1 - tests/functional/placeholders.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 3c37f58f6..f7cf94e54 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/placeholders\.sh$'' ''^tests/functional/post-hook\.sh$'' ''^tests/functional/pure-eval\.sh$'' ''^tests/functional/push-to-store-old\.sh$'' diff --git a/tests/functional/placeholders.sh b/tests/functional/placeholders.sh index 374203af8..5791d8006 100755 --- a/tests/functional/placeholders.sh +++ b/tests/functional/placeholders.sh @@ -4,6 +4,7 @@ source common.sh clearStoreIfPossible +# shellcheck disable=SC2016 nix-build --no-out-link -E ' with import '"${config_nix}"'; From bcd8311ec6b9893697e42eb44f3f205a121673ed Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:55:03 -0700 Subject: [PATCH 066/373] shellcheck fix: tests/functional/post-hook.sh --- maintainers/flake-module.nix | 1 - tests/functional/post-hook.sh | 13 ++++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index f7cf94e54..0caa97b23 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/post-hook\.sh$'' ''^tests/functional/pure-eval\.sh$'' ''^tests/functional/push-to-store-old\.sh$'' ''^tests/functional/push-to-store\.sh$'' diff --git a/tests/functional/post-hook.sh b/tests/functional/post-hook.sh index 94a6d0d69..67bb46377 100755 --- a/tests/functional/post-hook.sh +++ b/tests/functional/post-hook.sh @@ -6,10 +6,10 @@ TODO_NixOS clearStore -rm -f $TEST_ROOT/result +rm -f "$TEST_ROOT"/result export REMOTE_STORE=file:$TEST_ROOT/remote_store -echo 'require-sigs = false' >> $test_nix_conf +echo 'require-sigs = false' >> "$test_nix_conf" restartDaemon @@ -20,11 +20,14 @@ else fi # Build the dependencies and push them to the remote store. -nix-build -o $TEST_ROOT/result dependencies.nix --post-build-hook "$pushToStore" +nix-build -o "$TEST_ROOT"/result dependencies.nix --post-build-hook "$pushToStore" # See if all outputs are passed to the post-build hook by only specifying one # We're not able to test CA tests this way -export BUILD_HOOK_ONLY_OUT_PATHS=$([ ! $NIX_TESTS_CA_BY_DEFAULT ]) -nix-build -o $TEST_ROOT/result-mult multiple-outputs.nix -A a.first --post-build-hook "$pushToStore" +# +# FIXME: This export is hiding error condition +# shellcheck disable=SC2155 +export BUILD_HOOK_ONLY_OUT_PATHS=$([ ! "$NIX_TESTS_CA_BY_DEFAULT" ]) +nix-build -o "$TEST_ROOT"/result-mult multiple-outputs.nix -A a.first --post-build-hook "$pushToStore" clearStore From b951e6e1ed555719157e982f0493faf97f504322 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:56:20 -0700 Subject: [PATCH 067/373] shellcheck fix: tests/functional/pure-eval.sh --- tests/functional/pure-eval.sh | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/functional/pure-eval.sh b/tests/functional/pure-eval.sh index 45a65f9ab..b769b2150 100755 --- a/tests/functional/pure-eval.sh +++ b/tests/functional/pure-eval.sh @@ -10,6 +10,7 @@ nix eval --expr 'assert 1 + 2 == 3; true' missingImpureErrorMsg=$(! nix eval --expr 'builtins.readFile ./pure-eval.sh' 2>&1) +# shellcheck disable=SC1111 echo "$missingImpureErrorMsg" | grepQuiet -- --impure || \ fail "The error message should mention the “--impure” flag to unblock users" @@ -25,14 +26,15 @@ echo "$missingImpureErrorMsg" | grepQuiet -- --impure || \ (! nix eval --expr "(import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x") nix eval --expr "(import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; sha256 = \"$(nix hash file pure-eval.nix --type sha256)\"; })).x" -rm -rf $TEST_ROOT/eval-out -nix eval --store dummy:// --write-to $TEST_ROOT/eval-out --expr '{ x = "foo" + "bar"; y = { z = "bla"; }; }' -[[ $(cat $TEST_ROOT/eval-out/x) = foobar ]] -[[ $(cat $TEST_ROOT/eval-out/y/z) = bla ]] +rm -rf "$TEST_ROOT"/eval-out +nix eval --store dummy:// --write-to "$TEST_ROOT"/eval-out --expr '{ x = "foo" + "bar"; y = { z = "bla"; }; }' +[[ $(cat "$TEST_ROOT"/eval-out/x) = foobar ]] +[[ $(cat "$TEST_ROOT"/eval-out/y/z) = bla ]] -rm -rf $TEST_ROOT/eval-out -(! nix eval --store dummy:// --write-to $TEST_ROOT/eval-out --expr '{ "." = "bla"; }') +rm -rf "$TEST_ROOT"/eval-out +(! nix eval --store dummy:// --write-to "$TEST_ROOT"/eval-out --expr '{ "." = "bla"; }') +# shellcheck disable=SC2088 (! nix eval --expr '~/foo') expectStderr 0 nix eval --expr "/some/absolute/path" \ From a11195d6cefbbc3cf5140f1024fd69c54b30b6d9 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:56:51 -0700 Subject: [PATCH 068/373] shellcheck fix: tests/functional/push-to-store-old.sh --- maintainers/flake-module.nix | 2 -- 1 file changed, 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 0caa97b23..285a76f59 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,8 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/pure-eval\.sh$'' - ''^tests/functional/push-to-store-old\.sh$'' ''^tests/functional/push-to-store\.sh$'' ''^tests/functional/read-only-store\.sh$'' ''^tests/functional/readfile-context\.sh$'' From 1492c1bc5dd1eb39326bae5e3bcae67813d17b7c Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:57:07 -0700 Subject: [PATCH 069/373] shellcheck fix: tests/functional/push-to-store.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 285a76f59..392ba4387 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/push-to-store\.sh$'' ''^tests/functional/read-only-store\.sh$'' ''^tests/functional/readfile-context\.sh$'' ''^tests/functional/recursive\.sh$'' From c8a77196148f9027caaa885ee96d0c45b9ec5a7e Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:57:29 -0700 Subject: [PATCH 070/373] shellcheck fix: tests/functional/read-only-store.sh --- maintainers/flake-module.nix | 1 - tests/functional/read-only-store.sh | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 392ba4387..24d2e08d4 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/read-only-store\.sh$'' ''^tests/functional/readfile-context\.sh$'' ''^tests/functional/recursive\.sh$'' ''^tests/functional/referrers\.sh$'' diff --git a/tests/functional/read-only-store.sh b/tests/functional/read-only-store.sh index ea96bba41..8ccca2192 100755 --- a/tests/functional/read-only-store.sh +++ b/tests/functional/read-only-store.sh @@ -12,10 +12,10 @@ clearStore happy () { # We can do a read-only query just fine with a read-only store - nix --store local?read-only=true path-info $dummyPath + nix --store local?read-only=true path-info "$dummyPath" # `local://` also works. - nix --store local://?read-only=true path-info $dummyPath + nix --store local://?read-only=true path-info "$dummyPath" # We can "write" an already-present store-path a read-only store, because no IO is actually required nix-store --store local?read-only=true --add dummy @@ -37,8 +37,8 @@ happy ## Testing read-only mode with an underlying store that is actually read-only # Ensure store is actually read-only -chmod -R -w $TEST_ROOT/store -chmod -R -w $TEST_ROOT/var +chmod -R -w "$TEST_ROOT"/store +chmod -R -w "$TEST_ROOT"/var # Make sure we fail on add operations on the read-only store # This is only for adding files that are not *already* in the store From 8a36cf4422a094ba1b60a5ad8afaf632ac8236ae Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:58:11 -0700 Subject: [PATCH 071/373] shellcheck fix: tests/functional/readfile-context.sh --- maintainers/flake-module.nix | 1 - tests/functional/readfile-context.sh | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 24d2e08d4..57e0f9997 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/readfile-context\.sh$'' ''^tests/functional/recursive\.sh$'' ''^tests/functional/referrers\.sh$'' ''^tests/functional/remote-store\.sh$'' diff --git a/tests/functional/readfile-context.sh b/tests/functional/readfile-context.sh index cb9ef6234..effe483dc 100755 --- a/tests/functional/readfile-context.sh +++ b/tests/functional/readfile-context.sh @@ -9,12 +9,12 @@ clearStore outPath=$(nix-build --no-out-link readfile-context.nix) # Set a GC root. -ln -s $outPath "$NIX_STATE_DIR/gcroots/foo" +ln -s "$outPath" "$NIX_STATE_DIR/gcroots/foo" # Check that file exists. -[ "$(cat $(cat $outPath))" = "Hello World!" ] +[ "$(cat "$(cat "$outPath")")" = "Hello World!" ] nix-collect-garbage # Check that file still exists. -[ "$(cat $(cat $outPath))" = "Hello World!" ] +[ "$(cat "$(cat "$outPath")")" = "Hello World!" ] From 5d1333bf4bf6277f1a10643a3b82d9f15ebcb7ea Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:59:03 -0700 Subject: [PATCH 072/373] shellcheck fix: tests/functional/recursive.sh --- maintainers/flake-module.nix | 1 - tests/functional/recursive.sh | 15 ++++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 57e0f9997..5bafcd640 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/recursive\.sh$'' ''^tests/functional/referrers\.sh$'' ''^tests/functional/remote-store\.sh$'' ''^tests/functional/repair\.sh$'' diff --git a/tests/functional/recursive.sh b/tests/functional/recursive.sh index 640fb92d2..9115aa775 100755 --- a/tests/functional/recursive.sh +++ b/tests/functional/recursive.sh @@ -9,15 +9,16 @@ restartDaemon clearStore -rm -f $TEST_ROOT/result +rm -f "$TEST_ROOT"/result -export unreachable=$(nix store add-path ./recursive.sh) +unreachable=$(nix store add-path ./recursive.sh) +export unreachable -NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --file ./recursive.nix +NIX_BIN_DIR=$(dirname "$(type -p nix)") nix --extra-experimental-features 'nix-command recursive-nix' build -o "$TEST_ROOT"/result -L --impure --file ./recursive.nix -[[ $(cat $TEST_ROOT/result/inner1) =~ blaat ]] +[[ $(cat "$TEST_ROOT"/result/inner1) =~ blaat ]] # Make sure the recursively created paths are in the closure. -nix path-info -r $TEST_ROOT/result | grep foobar -nix path-info -r $TEST_ROOT/result | grep fnord -nix path-info -r $TEST_ROOT/result | grep inner1 +nix path-info -r "$TEST_ROOT"/result | grep foobar +nix path-info -r "$TEST_ROOT"/result | grep fnord +nix path-info -r "$TEST_ROOT"/result | grep inner1 From 7ed40119906e60ff2548c3ac3bc0265b158e02c7 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:00:38 -0700 Subject: [PATCH 073/373] shellcheck fix: tests/functional/referrers.sh --- maintainers/flake-module.nix | 1 - tests/functional/referrers.sh | 18 ++++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5bafcd640..3f27668c8 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/referrers\.sh$'' ''^tests/functional/remote-store\.sh$'' ''^tests/functional/repair\.sh$'' ''^tests/functional/restricted\.sh$'' diff --git a/tests/functional/referrers.sh b/tests/functional/referrers.sh index 411cdb7c1..ae6b39ae1 100755 --- a/tests/functional/referrers.sh +++ b/tests/functional/referrers.sh @@ -11,32 +11,34 @@ clearStore max=500 reference=$NIX_STORE_DIR/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bla -touch $reference -(echo $reference && echo && echo 0) | nix-store --register-validity +touch "$reference" +(echo "$reference" && echo && echo 0) | nix-store --register-validity echo "making registration..." set +x +# shellcheck disable=SC2004 for ((n = 0; n < $max; n++)); do storePath=$NIX_STORE_DIR/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-$n - echo -n > $storePath + echo -n > "$storePath" ref2=$NIX_STORE_DIR/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-$((n+1)) if test $((n+1)) = $max; then ref2=$reference fi - echo $storePath; echo; echo 2; echo $reference; echo $ref2 -done > $TEST_ROOT/reg_info + echo "$storePath"; echo; echo 2; echo "$reference"; echo "$ref2" +done > "$TEST_ROOT"/reg_info set -x echo "registering..." -nix-store --register-validity < $TEST_ROOT/reg_info +nix-store --register-validity < "$TEST_ROOT"/reg_info echo "collecting garbage..." -ln -sfn $reference "$NIX_STATE_DIR/gcroots/ref" +ln -sfn "$reference" "$NIX_STATE_DIR/gcroots/ref" nix-store --gc -if [ -n "$(type -p sqlite3)" -a "$(sqlite3 $NIX_STATE_DIR/db/db.sqlite 'select count(*) from Refs')" -ne 0 ]; then +# shellcheck disable=SC2166 +if [ -n "$(type -p sqlite3)" -a "$(sqlite3 "$NIX_STATE_DIR"/db/db.sqlite 'select count(*) from Refs')" -ne 0 ]; then echo "referrers not cleaned up" exit 1 fi From 06f21e101f9180926027bb1c1c2043d9fc904b61 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:02:04 -0700 Subject: [PATCH 074/373] shellcheck fix: tests/functional/remote-store.sh --- tests/functional/remote-store.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/functional/remote-store.sh b/tests/functional/remote-store.sh index 841b6b27a..f125ae137 100755 --- a/tests/functional/remote-store.sh +++ b/tests/functional/remote-store.sh @@ -7,10 +7,10 @@ TODO_NixOS clearStore # Ensure "fake ssh" remote store works just as legacy fake ssh would. -nix --store ssh-ng://localhost?remote-store=$TEST_ROOT/other-store doctor +nix --store ssh-ng://localhost?remote-store="$TEST_ROOT"/other-store doctor # Ensure that store info trusted works with ssh-ng:// -nix --store ssh-ng://localhost?remote-store=$TEST_ROOT/other-store store info --json | jq -e '.trusted' +nix --store ssh-ng://localhost?remote-store="$TEST_ROOT"/other-store store info --json | jq -e '.trusted' startDaemon @@ -31,8 +31,8 @@ NIX_REMOTE_=$NIX_REMOTE $SHELL ./user-envs-test-case.sh nix-store --gc --max-freed 1K -nix-store --dump-db > $TEST_ROOT/d1 -NIX_REMOTE= nix-store --dump-db > $TEST_ROOT/d2 -cmp $TEST_ROOT/d1 $TEST_ROOT/d2 +nix-store --dump-db > "$TEST_ROOT"/d1 +NIX_REMOTE='' nix-store --dump-db > "$TEST_ROOT"/d2 +cmp "$TEST_ROOT"/d1 "$TEST_ROOT"/d2 killDaemon From d35d86da89b14b19eb0855a357fa5e945d2ce4f2 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:04:26 -0700 Subject: [PATCH 075/373] shellcheck fix: tests/functional/repair.sh --- maintainers/flake-module.nix | 2 - tests/functional/repair.sh | 84 ++++++++++++++++++++---------------- 2 files changed, 46 insertions(+), 40 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 3f27668c8..12bb8375e 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,8 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/remote-store\.sh$'' - ''^tests/functional/repair\.sh$'' ''^tests/functional/restricted\.sh$'' ''^tests/functional/search\.sh$'' ''^tests/functional/secure-drv-outputs\.sh$'' diff --git a/tests/functional/repair.sh b/tests/functional/repair.sh index 1f6004b2c..a90bdcfd5 100755 --- a/tests/functional/repair.sh +++ b/tests/functional/repair.sh @@ -8,39 +8,43 @@ TODO_NixOS clearStore -path=$(nix-build dependencies.nix -o $TEST_ROOT/result) -path2=$(nix-store -qR $path | grep input-2) +path=$(nix-build dependencies.nix -o "$TEST_ROOT"/result) +path2=$(nix-store -qR "$path" | grep input-2) nix-store --verify --check-contents -v -hash=$(nix-hash $path2) +hash=$(nix-hash "$path2") # Corrupt a path and check whether nix-build --repair can fix it. -chmod u+w $path2 -touch $path2/bad +chmod u+w "$path2" +touch "$path2"/bad (! nix-store --verify --check-contents -v) # The path can be repaired by rebuilding the derivation. nix-store --verify --check-contents --repair -(! [ -e $path2/bad ]) -(! [ -w $path2 ]) +# shellcheck disable=SC2235 +(! [ -e "$path2"/bad ]) +# shellcheck disable=SC2235 +(! [ -w "$path2" ]) -nix-store --verify-path $path2 +nix-store --verify-path "$path2" # Re-corrupt and delete the deriver. Now --verify --repair should # not work. -chmod u+w $path2 -touch $path2/bad +chmod u+w "$path2" +touch "$path2"/bad -nix-store --delete $(nix-store -q --referrers-closure $(nix-store -qd $path2)) +# shellcheck disable=SC2046 +nix-store --delete $(nix-store -q --referrers-closure "$(nix-store -qd "$path2")") (! nix-store --verify --check-contents --repair) -nix-build dependencies.nix -o $TEST_ROOT/result --repair +nix-build dependencies.nix -o "$TEST_ROOT"/result --repair -if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o -e "$path2"/bad ]; then echo "path not repaired properly" >&2 exit 1 fi @@ -49,79 +53,83 @@ fi # --verify can fix it. clearCache -nix copy --to file://$cacheDir $path +nix copy --to file://"$cacheDir" "$path" -chmod u+w $path2 -rm -rf $path2 +chmod u+w "$path2" +rm -rf "$path2" nix-store --verify --check-contents --repair --substituters "file://$cacheDir" --no-require-sigs -if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o -e "$path2"/bad ]; then echo "path not repaired properly" >&2 exit 1 fi # Check --verify-path and --repair-path. -nix-store --verify-path $path2 +nix-store --verify-path "$path2" -chmod u+w $path2 -rm -rf $path2 +chmod u+w "$path2" +rm -rf "$path2" -if nix-store --verify-path $path2; then +if nix-store --verify-path "$path2"; then echo "nix-store --verify-path succeeded unexpectedly" >&2 exit 1 fi -nix-store --repair-path $path2 --substituters "file://$cacheDir" --no-require-sigs +nix-store --repair-path "$path2" --substituters "file://$cacheDir" --no-require-sigs -if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o -e "$path2"/bad ]; then echo "path not repaired properly" >&2 exit 1 fi # Check that --repair-path also checks content of optimised symlinks (1/2) -nix-store --verify-path $path2 +nix-store --verify-path "$path2" if (! nix-store --optimize); then echo "nix-store --optimize failed to optimize the store" >&2 exit 1 fi -chmod u+w $path2/bar -echo 'rabrab' > $path2/bar # different length +chmod u+w "$path2"/bar +echo 'rabrab' > "$path2"/bar # different length -if nix-store --verify-path $path2; then +if nix-store --verify-path "$path2"; then echo "nix-store --verify-path did not detect .links file corruption" >&2 exit 1 fi -nix-store --repair-path $path2 --option auto-optimise-store true +nix-store --repair-path "$path2" --option auto-optimise-store true -if [ "$(nix-hash $path2)" != "$hash" -o "BAR" != "$(< $path2/bar)" ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o "BAR" != "$(< "$path2"/bar)" ]; then echo "path not repaired properly" >&2 exit 1 fi # Check that --repair-path also checks content of optimised symlinks (2/2) -nix-store --verify-path $path2 +nix-store --verify-path "$path2" if (! nix-store --optimize); then echo "nix-store --optimize failed to optimize the store" >&2 exit 1 fi -chmod u+w $path2 -chmod u+w $path2/bar -sed -e 's/./X/g' < $path2/bar > $path2/tmp # same length, different content. -cp $path2/tmp $path2/bar -rm $path2/tmp +chmod u+w "$path2" +chmod u+w "$path2"/bar +sed -e 's/./X/g' < "$path2"/bar > "$path2"/tmp # same length, different content. +cp "$path2"/tmp "$path2"/bar +rm "$path2"/tmp -if nix-store --verify-path $path2; then +if nix-store --verify-path "$path2"; then echo "nix-store --verify-path did not detect .links file corruption" >&2 exit 1 fi -nix-store --repair-path $path2 --substituters "file://$cacheDir" --no-require-sigs --option auto-optimise-store true +nix-store --repair-path "$path2" --substituters "file://$cacheDir" --no-require-sigs --option auto-optimise-store true -if [ "$(nix-hash $path2)" != "$hash" -o "BAR" != "$(< $path2/bar)" ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o "BAR" != "$(< "$path2"/bar)" ]; then echo "path not repaired properly" >&2 exit 1 fi From b42ed6a74d281763e32285ae8e96900294cb4173 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:05:29 -0700 Subject: [PATCH 076/373] shellcheck fix: tests/functional/restricted.sh --- maintainers/flake-module.nix | 1 - tests/functional/restricted.sh | 22 ++++++++++++---------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 12bb8375e..c56599785 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/restricted\.sh$'' ''^tests/functional/search\.sh$'' ''^tests/functional/secure-drv-outputs\.sh$'' ''^tests/functional/selfref-gc\.sh$'' diff --git a/tests/functional/restricted.sh b/tests/functional/restricted.sh index 00ee4ddc8..2f65f15fe 100755 --- a/tests/functional/restricted.sh +++ b/tests/functional/restricted.sh @@ -40,30 +40,32 @@ nix eval --raw --expr "builtins.fetchurl file://${_NIX_TEST_SOURCE_DIR}/restrict (! nix eval --raw --expr "fetchGit git://github.com/NixOS/patchelf.git" --impure --restrict-eval) ln -sfn "${_NIX_TEST_SOURCE_DIR}/restricted.nix" "$TEST_ROOT/restricted.nix" -[[ $(nix-instantiate --eval $TEST_ROOT/restricted.nix) == 3 ]] -(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix) -(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I $TEST_ROOT) -(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I .) +[[ $(nix-instantiate --eval "$TEST_ROOT"/restricted.nix) == 3 ]] +(! nix-instantiate --eval --restrict-eval "$TEST_ROOT"/restricted.nix) +(! nix-instantiate --eval --restrict-eval "$TEST_ROOT"/restricted.nix -I "$TEST_ROOT") +(! nix-instantiate --eval --restrict-eval "$TEST_ROOT"/restricted.nix -I .) nix-instantiate --eval --restrict-eval "$TEST_ROOT/restricted.nix" -I "$TEST_ROOT" -I "${_NIX_TEST_SOURCE_DIR}" +# shellcheck disable=SC2016 [[ $(nix eval --raw --impure --restrict-eval -I . --expr 'builtins.readFile "${import ./simple.nix}/hello"') == 'Hello World!' ]] # Check that we can't follow a symlink outside of the allowed paths. -mkdir -p $TEST_ROOT/tunnel.d $TEST_ROOT/foo2 -ln -sfn .. $TEST_ROOT/tunnel.d/tunnel -echo foo > $TEST_ROOT/bar +mkdir -p "$TEST_ROOT"/tunnel.d "$TEST_ROOT"/foo2 +ln -sfn .. "$TEST_ROOT"/tunnel.d/tunnel +echo foo > "$TEST_ROOT"/bar -expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readFile " -I $TEST_ROOT/tunnel.d | grepQuiet "forbidden in restricted mode" +expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readFile " -I "$TEST_ROOT"/tunnel.d | grepQuiet "forbidden in restricted mode" -expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I $TEST_ROOT/tunnel.d | grepQuiet "forbidden in restricted mode" +expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I "$TEST_ROOT"/tunnel.d | grepQuiet "forbidden in restricted mode" # Reading the parents of allowed paths should show only the ancestors of the allowed paths. -[[ $(nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I $TEST_ROOT/tunnel.d) == '{ "tunnel.d" = "directory"; }' ]] +[[ $(nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I "$TEST_ROOT"/tunnel.d) == '{ "tunnel.d" = "directory"; }' ]] # Check whether we can leak symlink information through directory traversal. traverseDir="${_NIX_TEST_SOURCE_DIR}/restricted-traverse-me" ln -sfn "${_NIX_TEST_SOURCE_DIR}/restricted-secret" "${_NIX_TEST_SOURCE_DIR}/restricted-innocent" mkdir -p "$traverseDir" +# shellcheck disable=SC2001 goUp="..$(echo "$traverseDir" | sed -e 's,[^/]\+,..,g')" output="$(nix eval --raw --restrict-eval -I "$traverseDir" \ --expr "builtins.readFile \"$traverseDir/$goUp${_NIX_TEST_SOURCE_DIR}/restricted-innocent\"" \ From 64d828b8c417b94eb168b3a6e0b296329f42ef2d Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:05:56 -0700 Subject: [PATCH 077/373] shellcheck fix: tests/functional/search.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index c56599785..21dcf9c2e 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/search\.sh$'' ''^tests/functional/secure-drv-outputs\.sh$'' ''^tests/functional/selfref-gc\.sh$'' ''^tests/functional/shell\.shebang\.sh$'' From 1a5ccbeafc4ee7074283e1b0d095969f52793252 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:06:37 -0700 Subject: [PATCH 078/373] shellcheck fix: tests/functional/secure-drv-outputs.sh --- maintainers/flake-module.nix | 1 - tests/functional/secure-drv-outputs.sh | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 21dcf9c2e..711b31ee4 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/secure-drv-outputs\.sh$'' ''^tests/functional/selfref-gc\.sh$'' ''^tests/functional/shell\.shebang\.sh$'' ''^tests/functional/simple\.builder\.sh$'' diff --git a/tests/functional/secure-drv-outputs.sh b/tests/functional/secure-drv-outputs.sh index 5cc4af435..876d3c817 100755 --- a/tests/functional/secure-drv-outputs.sh +++ b/tests/functional/secure-drv-outputs.sh @@ -13,20 +13,20 @@ clearStore startDaemon # Determine the output path of the "good" derivation. -goodOut=$(nix-store -q $(nix-instantiate ./secure-drv-outputs.nix -A good)) +goodOut=$(nix-store -q "$(nix-instantiate ./secure-drv-outputs.nix -A good)") # Instantiate the "bad" derivation. badDrv=$(nix-instantiate ./secure-drv-outputs.nix -A bad) -badOut=$(nix-store -q $badDrv) +badOut=$(nix-store -q "$badDrv") # Rewrite the bad derivation to produce the output path of the good # derivation. -rm -f $TEST_ROOT/bad.drv -sed -e "s|$badOut|$goodOut|g" < $badDrv > $TEST_ROOT/bad.drv +rm -f "$TEST_ROOT"/bad.drv +sed -e "s|$badOut|$goodOut|g" < "$badDrv" > "$TEST_ROOT"/bad.drv # Add the manipulated derivation to the store and build it. This # should fail. -if badDrv2=$(nix-store --add $TEST_ROOT/bad.drv); then +if badDrv2=$(nix-store --add "$TEST_ROOT"/bad.drv); then nix-store -r "$badDrv2" fi From b8f1a8a0c170e133c1390027d3341b11dae2fdbf Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:07:09 -0700 Subject: [PATCH 079/373] shellcheck fix: tests/functional/selfref-gc.sh --- maintainers/flake-module.nix | 1 - tests/functional/selfref-gc.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 711b31ee4..458aaa777 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/selfref-gc\.sh$'' ''^tests/functional/shell\.shebang\.sh$'' ''^tests/functional/simple\.builder\.sh$'' ''^tests/functional/supplementary-groups\.sh$'' diff --git a/tests/functional/selfref-gc.sh b/tests/functional/selfref-gc.sh index dc4f14cc1..de202a09d 100755 --- a/tests/functional/selfref-gc.sh +++ b/tests/functional/selfref-gc.sh @@ -6,6 +6,7 @@ requireDaemonNewerThan "2.6.0pre20211215" clearStoreIfPossible +# shellcheck disable=SC2016 nix-build --no-out-link -E ' with import '"${config_nix}"'; From 7266a514124444379358ae4f60e975e208981feb Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:08:27 -0700 Subject: [PATCH 080/373] shellcheck fix: tests/functional/selfref-gc.sh --- maintainers/flake-module.nix | 1 - tests/functional/shell.shebang.sh | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 458aaa777..c52201229 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/shell\.shebang\.sh$'' ''^tests/functional/simple\.builder\.sh$'' ''^tests/functional/supplementary-groups\.sh$'' ''^tests/functional/toString-path\.sh$'' diff --git a/tests/functional/shell.shebang.sh b/tests/functional/shell.shebang.sh index f7132043d..b6e4ee286 100755 --- a/tests/functional/shell.shebang.sh +++ b/tests/functional/shell.shebang.sh @@ -1,4 +1,5 @@ #! @ENV_PROG@ nix-shell #! nix-shell -I nixpkgs=shell.nix --no-substitute #! nix-shell --pure -i bash -p foo bar -echo "$(foo) $(bar) $@" +# shellcheck shell=bash +echo "$(foo) $(bar)" "$@" From 8c9bfb6e1249453ec984afb16a62d6d78b5f646b Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:18:48 -0700 Subject: [PATCH 081/373] shellcheck fix: tests/functional/simple.builder.sh --- maintainers/flake-module.nix | 1 - tests/functional/simple.builder.sh | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index c52201229..806444df4 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/simple\.builder\.sh$'' ''^tests/functional/supplementary-groups\.sh$'' ''^tests/functional/toString-path\.sh$'' ''^tests/functional/user-envs-migration\.sh$'' diff --git a/tests/functional/simple.builder.sh b/tests/functional/simple.builder.sh index 97abf0676..27cdfe684 100644 --- a/tests/functional/simple.builder.sh +++ b/tests/functional/simple.builder.sh @@ -6,7 +6,9 @@ echo "PATH=$PATH" if mkdir foo 2> /dev/null; then exit 1; fi # Set a PATH (!!! impure). +# shellcheck disable=SC2154 export PATH=$goodPath +# shellcheck disable=SC2154 mkdir "$out" echo "Hello World!" > "$out"/hello From b349783830d1d82c3cc43c19e402977bdbf29ddd Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:19:27 -0700 Subject: [PATCH 082/373] shellcheck fix: tests/functional/supplementary-groups.sh --- maintainers/flake-module.nix | 1 - tests/functional/supplementary-groups.sh | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 806444df4..829cc5c0f 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/supplementary-groups\.sh$'' ''^tests/functional/toString-path\.sh$'' ''^tests/functional/user-envs-migration\.sh$'' ''^tests/functional/user-envs-test-case\.sh$'' diff --git a/tests/functional/supplementary-groups.sh b/tests/functional/supplementary-groups.sh index a667d3e99..0f614a130 100755 --- a/tests/functional/supplementary-groups.sh +++ b/tests/functional/supplementary-groups.sh @@ -9,6 +9,7 @@ needLocalStore "The test uses --store always so we would just be bypassing the d TODO_NixOS +# shellcheck disable=SC2119 execUnshare < Date: Tue, 30 Sep 2025 20:19:47 -0700 Subject: [PATCH 083/373] shellcheck fix: tests/functional/toString-path.sh --- maintainers/flake-module.nix | 1 - tests/functional/toString-path.sh | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 829cc5c0f..913957519 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/toString-path\.sh$'' ''^tests/functional/user-envs-migration\.sh$'' ''^tests/functional/user-envs-test-case\.sh$'' ''^tests/functional/user-envs\.builder\.sh$'' diff --git a/tests/functional/toString-path.sh b/tests/functional/toString-path.sh index d790109f4..c425b61be 100755 --- a/tests/functional/toString-path.sh +++ b/tests/functional/toString-path.sh @@ -2,8 +2,8 @@ source common.sh -mkdir -p $TEST_ROOT/foo -echo bla > $TEST_ROOT/foo/bar +mkdir -p "$TEST_ROOT"/foo +echo bla > "$TEST_ROOT"/foo/bar [[ $(nix eval --raw --impure --expr "builtins.readFile (builtins.toString (builtins.fetchTree { type = \"path\"; path = \"$TEST_ROOT/foo\"; } + \"/bar\"))") = bla ]] From 359e73a6db92179478a4298c4a5bc9c083897499 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:20:13 -0700 Subject: [PATCH 084/373] shellcheck fix: tests/functional/user-envs-migration.sh --- maintainers/flake-module.nix | 1 - tests/functional/user-envs-migration.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 913957519..2d1a1bb10 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/user-envs-migration\.sh$'' ''^tests/functional/user-envs-test-case\.sh$'' ''^tests/functional/user-envs\.builder\.sh$'' ''^tests/functional/user-envs\.sh$'' diff --git a/tests/functional/user-envs-migration.sh b/tests/functional/user-envs-migration.sh index 0f33074e1..46337cdda 100755 --- a/tests/functional/user-envs-migration.sh +++ b/tests/functional/user-envs-migration.sh @@ -29,6 +29,7 @@ nix-env -f user-envs.nix -i bar-0.1 # Migrate to the new profile dir, and ensure that everything’s there export PATH="$PATH_WITH_NEW_NIX" nix-env -q # Trigger the migration +# shellcheck disable=SC2235 ( [[ -L ~/.nix-profile ]] && \ [[ $(readlink ~/.nix-profile) == ~/.local/share/nix/profiles/profile ]] ) || \ fail "The nix profile should point to the new location" From 049c4c7546e1bb87796b8dafcbe76bc818eb8129 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:22:11 -0700 Subject: [PATCH 085/373] shellcheck fix: tests/functional/user-envs-test-case.sh --- maintainers/flake-module.nix | 1 - tests/functional/user-envs-test-case.sh | 72 +++++++++++++------------ 2 files changed, 39 insertions(+), 34 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 2d1a1bb10..c13578ec9 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/user-envs-test-case\.sh$'' ''^tests/functional/user-envs\.builder\.sh$'' ''^tests/functional/user-envs\.sh$'' ''^tests/functional/why-depends\.sh$'' diff --git a/tests/functional/user-envs-test-case.sh b/tests/functional/user-envs-test-case.sh index 3483a4600..9f4450161 100644 --- a/tests/functional/user-envs-test-case.sh +++ b/tests/functional/user-envs-test-case.sh @@ -1,14 +1,17 @@ +# shellcheck shell=bash clearProfiles # Query installed: should be empty. -test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 0 +# shellcheck disable=SC2154 +test "$(nix-env -p "$profiles"/test -q '*' | wc -l)" -eq 0 -nix-env --switch-profile $profiles/test +nix-env --switch-profile "$profiles"/test # Query available: should contain several. test "$(nix-env -f ./user-envs.nix -qa '*' | wc -l)" -eq 6 outPath10=$(nix-env -f ./user-envs.nix -qa --out-path --no-name '*' | grep foo-1.0) drvPath10=$(nix-env -f ./user-envs.nix -qa --drv-path --no-name '*' | grep foo-1.0) +# shellcheck disable=SC2166 [ -n "$outPath10" -a -n "$drvPath10" ] TODO_NixOS @@ -20,18 +23,19 @@ nix-env -f ./user-envs.nix -qa --json | jq -e '.[] | select(.name == "bar-0.1") ] | all' nix-env -f ./user-envs.nix -qa --json --out-path | jq -e '.[] | select(.name == "bar-0.1") | [ .outputName == "out", - (.outputs.out | test("'$NIX_STORE_DIR'.*-0\\.1")) + (.outputs.out | test("'"$NIX_STORE_DIR"'.*-0\\.1")) ] | all' -nix-env -f ./user-envs.nix -qa --json --drv-path | jq -e '.[] | select(.name == "bar-0.1") | (.drvPath | test("'$NIX_STORE_DIR'.*-0\\.1\\.drv"))' +nix-env -f ./user-envs.nix -qa --json --drv-path | jq -e '.[] | select(.name == "bar-0.1") | (.drvPath | test("'"$NIX_STORE_DIR"'.*-0\\.1\\.drv"))' # Query descriptions. nix-env -f ./user-envs.nix -qa '*' --description | grepQuiet silly -rm -rf $HOME/.nix-defexpr -ln -s $(pwd)/user-envs.nix $HOME/.nix-defexpr +rm -rf "$HOME"/.nix-defexpr +ln -s "$(pwd)"/user-envs.nix "$HOME"/.nix-defexpr nix-env -qa '*' --description | grepQuiet silly # Query the system. -nix-env -qa '*' --system | grepQuiet $system +# shellcheck disable=SC2154 +nix-env -qa '*' --system | grepQuiet "$system" # Install "foo-1.0". nix-env -i foo-1.0 @@ -40,7 +44,7 @@ nix-env -i foo-1.0 # executable). test "$(nix-env -q '*' | wc -l)" -eq 1 nix-env -q '*' | grepQuiet foo-1.0 -test "$($profiles/test/bin/foo)" = "foo-1.0" +test "$("$profiles"/test/bin/foo)" = "foo-1.0" # Test nix-env -qc to compare installed against available packages, and vice versa. nix-env -qc '*' | grepQuiet '< 2.0' @@ -55,6 +59,7 @@ nix-env -qas | grepQuiet -- '--- bar-0.1' # Disable foo. nix-env --set-flag active false foo +# shellcheck disable=SC2235 (! [ -e "$profiles/test/bin/foo" ]) # Enable foo. @@ -72,7 +77,7 @@ nix-env -i foo-2.0pre1 # Query installed: should contain foo-2.0pre1 now. test "$(nix-env -q '*' | wc -l)" -eq 1 nix-env -q '*' | grepQuiet foo-2.0pre1 -test "$($profiles/test/bin/foo)" = "foo-2.0pre1" +test "$("$profiles"/test/bin/foo)" = "foo-2.0pre1" # Upgrade "foo": should install foo-2.0. NIX_PATH=nixpkgs=./user-envs.nix:${NIX_PATH-} nix-env -f '' -u foo @@ -80,7 +85,7 @@ NIX_PATH=nixpkgs=./user-envs.nix:${NIX_PATH-} nix-env -f '' -u foo # Query installed: should contain foo-2.0 now. test "$(nix-env -q '*' | wc -l)" -eq 1 nix-env -q '*' | grepQuiet foo-2.0 -test "$($profiles/test/bin/foo)" = "foo-2.0" +test "$("$profiles"/test/bin/foo)" = "foo-2.0" # Store the path of foo-2.0. outPath20=$(nix-env -q --out-path --no-name '*' | grep foo-2.0) @@ -95,9 +100,9 @@ if nix-env -q '*' | grepQuiet foo; then false; fi nix-env -q '*' | grepQuiet bar # Rollback: should bring "foo" back. -oldGen="$(nix-store -q --resolve $profiles/test)" +oldGen="$(nix-store -q --resolve "$profiles"/test)" nix-env --rollback -[ "$(nix-store -q --resolve $profiles/test)" != "$oldGen" ] +[ "$(nix-store -q --resolve "$profiles"/test)" != "$oldGen" ] nix-env -q '*' | grepQuiet foo-2.0 nix-env -q '*' | grepQuiet bar @@ -122,23 +127,23 @@ test "$(nix-env --list-generations | wc -l)" -eq 8 # Switch to a specified generation. nix-env --switch-generation 7 -[ "$(nix-store -q --resolve $profiles/test)" = "$oldGen" ] +[ "$(nix-store -q --resolve "$profiles"/test)" = "$oldGen" ] # Install foo-1.0, now using its store path. nix-env -i "$outPath10" nix-env -q '*' | grepQuiet foo-1.0 -nix-store -qR $profiles/test | grep "$outPath10" -nix-store -q --referrers-closure $profiles/test | grep "$(nix-store -q --resolve $profiles/test)" -[ "$(nix-store -q --deriver "$outPath10")" = $drvPath10 ] +nix-store -qR "$profiles"/test | grep "$outPath10" +nix-store -q --referrers-closure "$profiles"/test | grep "$(nix-store -q --resolve "$profiles"/test)" +[ "$(nix-store -q --deriver "$outPath10")" = "$drvPath10" ] # Uninstall foo-1.0, using a symlink to its store path. -ln -sfn $outPath10/bin/foo $TEST_ROOT/symlink -nix-env -e $TEST_ROOT/symlink +ln -sfn "$outPath10"/bin/foo "$TEST_ROOT"/symlink +nix-env -e "$TEST_ROOT"/symlink if nix-env -q '*' | grepQuiet foo; then false; fi -nix-store -qR $profiles/test | grepInverse "$outPath10" +nix-store -qR "$profiles"/test | grepInverse "$outPath10" # Install foo-1.0, now using a symlink to its store path. -nix-env -i $TEST_ROOT/symlink +nix-env -i "$TEST_ROOT"/symlink nix-env -q '*' | grepQuiet foo # Delete all old generations. @@ -148,15 +153,16 @@ nix-env --delete-generations old # foo-1.0. nix-collect-garbage test -e "$outPath10" +# shellcheck disable=SC2235 (! [ -e "$outPath20" ]) # Uninstall everything nix-env -e '*' -test "$(nix-env -q '*' | wc -l)" -eq 0 +test "$(nix-env -q '*' -c)" -eq 0 # Installing "foo" should only install the newest foo. nix-env -i foo -test "$(nix-env -q '*' | grep foo- | wc -l)" -eq 1 +test "$(nix-env -q '*' | grep foo- -c)" -eq 1 nix-env -q '*' | grepQuiet foo-2.0 # On the other hand, this should install both (and should fail due to @@ -177,25 +183,25 @@ nix-env -q '*' | grepQuiet bar-0.1.1 # declared priorities. nix-env -e '*' nix-env -i foo-0.1 foo-1.0 -[ "$($profiles/test/bin/foo)" = "foo-1.0" ] +[ "$("$profiles"/test/bin/foo)" = "foo-1.0" ] nix-env --set-flag priority 1 foo-0.1 -[ "$($profiles/test/bin/foo)" = "foo-0.1" ] +[ "$("$profiles"/test/bin/foo)" = "foo-0.1" ] # Priorities can be overridden with the --priority flag nix-env -e '*' nix-env -i foo-1.0 -[ "$($profiles/test/bin/foo)" = "foo-1.0" ] +[ "$("$profiles"/test/bin/foo)" = "foo-1.0" ] nix-env -i --priority 1 foo-0.1 -[ "$($profiles/test/bin/foo)" = "foo-0.1" ] +[ "$("$profiles"/test/bin/foo)" = "foo-0.1" ] # Test nix-env --set. -nix-env --set $outPath10 -[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ] -nix-env --set $drvPath10 -[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ] +nix-env --set "$outPath10" +[ "$(nix-store -q --resolve "$profiles"/test)" = "$outPath10" ] +nix-env --set "$drvPath10" +[ "$(nix-store -q --resolve "$profiles"/test)" = "$outPath10" ] # Test the case where $HOME contains a symlink. -mkdir -p $TEST_ROOT/real-home/alice/.nix-defexpr/channels -ln -sfn $TEST_ROOT/real-home $TEST_ROOT/home -ln -sfn $(pwd)/user-envs.nix $TEST_ROOT/home/alice/.nix-defexpr/channels/foo +mkdir -p "$TEST_ROOT"/real-home/alice/.nix-defexpr/channels +ln -sfn "$TEST_ROOT"/real-home "$TEST_ROOT"/home +ln -sfn "$(pwd)"/user-envs.nix "$TEST_ROOT"/home/alice/.nix-defexpr/channels/foo HOME=$TEST_ROOT/home/alice nix-env -i foo-0.1 From 13eac5295d1b15f7708ad193e164ece615d1dc44 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:23:02 -0700 Subject: [PATCH 086/373] shellcheck fix: tests/functional/user-envs.builder.sh --- maintainers/flake-module.nix | 1 - tests/functional/user-envs.builder.sh | 13 ++++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index c13578ec9..e1c89f71a 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/user-envs\.builder\.sh$'' ''^tests/functional/user-envs\.sh$'' ''^tests/functional/why-depends\.sh$'' ]; diff --git a/tests/functional/user-envs.builder.sh b/tests/functional/user-envs.builder.sh index 5fafa797f..e875c2fe5 100644 --- a/tests/functional/user-envs.builder.sh +++ b/tests/functional/user-envs.builder.sh @@ -1,5 +1,8 @@ -mkdir $out -mkdir $out/bin -echo "#! $shell" > $out/bin/$progName -echo "echo $name" >> $out/bin/$progName -chmod +x $out/bin/$progName +# shellcheck shell=bash +# shellcheck disable=SC2154 +mkdir "$out" +mkdir "$out"/bin +echo "#! $shell" > "$out"/bin/"$progName" +# shellcheck disable=SC2154 +echo "echo $name" >> "$out"/bin/"$progName" +chmod +x "$out"/bin/"$progName" From c8ef6dfa5a9c9a869b0bfd08a2cd9b2bb35a6ce6 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:23:17 -0700 Subject: [PATCH 087/373] shellcheck fix: tests/functional/user-envs.sh --- maintainers/flake-module.nix | 1 - tests/functional/user-envs-test-case.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index e1c89f71a..83891daa2 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/user-envs\.sh$'' ''^tests/functional/why-depends\.sh$'' ]; }; diff --git a/tests/functional/user-envs-test-case.sh b/tests/functional/user-envs-test-case.sh index 9f4450161..f6a8ab8c6 100644 --- a/tests/functional/user-envs-test-case.sh +++ b/tests/functional/user-envs-test-case.sh @@ -158,7 +158,7 @@ test -e "$outPath10" # Uninstall everything nix-env -e '*' -test "$(nix-env -q '*' -c)" -eq 0 +test "$(nix-env -q '*' | wc -l)" -eq 0 # Installing "foo" should only install the newest foo. nix-env -i foo From 015b639cea34a4fa4f3d716fe3cbfe5a26e85ee6 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:23:39 -0700 Subject: [PATCH 088/373] shellcheck fix: tests/functional/why-depends.sh --- maintainers/flake-module.nix | 4 ---- tests/functional/why-depends.sh | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 83891daa2..8dcff9c63 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -104,10 +104,6 @@ }; shellcheck = { enable = true; - excludes = [ - # We haven't linted these files yet - ''^tests/functional/why-depends\.sh$'' - ]; }; }; }; diff --git a/tests/functional/why-depends.sh b/tests/functional/why-depends.sh index 45d1f2f0b..fe9ff9a62 100755 --- a/tests/functional/why-depends.sh +++ b/tests/functional/why-depends.sh @@ -4,9 +4,9 @@ source common.sh clearStoreIfPossible -cp ./dependencies.nix ./dependencies.builder0.sh "${config_nix}" $TEST_HOME +cp ./dependencies.nix ./dependencies.builder0.sh "${config_nix}" "$TEST_HOME" -cd $TEST_HOME +cd "$TEST_HOME" nix why-depends --derivation --file ./dependencies.nix input2_drv input1_drv nix why-depends --file ./dependencies.nix input2_drv input1_drv From b72898b2aa4f5d7fe32fee009539daf066251dbf Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 1 Oct 2025 16:01:28 +0000 Subject: [PATCH 089/373] refactor(libstore): extract S3 URL parsing into separate files Move ParsedS3URL from s3.cc/.hh into dedicated s3-url.cc/.hh files. This separates URL parsing utilities (which are protocol-agnostic) from the AWS SDK-specific S3Helper implementation, making the code cleaner and enabling reuse by future curl-based S3 implementation. --- src/libstore-tests/meson.build | 2 +- src/libstore-tests/{s3.cc => s3-url.cc} | 2 +- src/libstore/include/nix/store/meson.build | 1 + src/libstore/include/nix/store/s3-url.hh | 60 ++++++++++++++++++++++ src/libstore/include/nix/store/s3.hh | 46 +---------------- src/libstore/meson.build | 2 +- src/libstore/{s3.cc => s3-url.cc} | 22 ++++---- 7 files changed, 76 insertions(+), 59 deletions(-) rename src/libstore-tests/{s3.cc => s3-url.cc} (99%) create mode 100644 src/libstore/include/nix/store/s3-url.hh rename src/libstore/{s3.cc => s3-url.cc} (95%) diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index 915c10a38..1908e5cbc 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -77,7 +77,7 @@ sources = files( 'realisation.cc', 'references.cc', 's3-binary-cache-store.cc', - 's3.cc', + 's3-url.cc', 'serve-protocol.cc', 'ssh-store.cc', 'store-reference.cc', diff --git a/src/libstore-tests/s3.cc b/src/libstore-tests/s3-url.cc similarity index 99% rename from src/libstore-tests/s3.cc rename to src/libstore-tests/s3-url.cc index 799e102fe..56ec4e40e 100644 --- a/src/libstore-tests/s3.cc +++ b/src/libstore-tests/s3-url.cc @@ -1,4 +1,4 @@ -#include "nix/store/s3.hh" +#include "nix/store/s3-url.hh" #include "nix/util/tests/gmock-matchers.hh" #if NIX_WITH_S3_SUPPORT diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index 428ef00f3..f945f25ad 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -72,6 +72,7 @@ headers = [ config_pub_h ] + files( 'remote-store.hh', 'restricted-store.hh', 's3-binary-cache-store.hh', + 's3-url.hh', 's3.hh', 'serve-protocol-connection.hh', 'serve-protocol-impl.hh', diff --git a/src/libstore/include/nix/store/s3-url.hh b/src/libstore/include/nix/store/s3-url.hh new file mode 100644 index 000000000..4f0a7b0c2 --- /dev/null +++ b/src/libstore/include/nix/store/s3-url.hh @@ -0,0 +1,60 @@ +#pragma once +///@file +#include "nix/store/config.hh" + +#if NIX_WITH_S3_SUPPORT + +# include "nix/util/url.hh" +# include "nix/util/util.hh" + +# include +# include +# include +# include + +namespace nix { + +/** + * Parsed S3 URL. + */ +struct ParsedS3URL +{ + std::string bucket; + /** + * @see ParsedURL::path. This is a vector for the same reason. + * Unlike ParsedURL::path this doesn't include the leading empty segment, + * since the bucket name is necessary. + */ + std::vector key; + std::optional profile; + std::optional region; + std::optional scheme; + /** + * The endpoint can be either missing, be an absolute URI (with a scheme like `http:`) + * or an authority (so an IP address or a registered name). + */ + std::variant endpoint; + + std::optional getEncodedEndpoint() const + { + return std::visit( + overloaded{ + [](std::monostate) -> std::optional { return std::nullopt; }, + [](const auto & authorityOrUrl) -> std::optional { return authorityOrUrl.to_string(); }, + }, + endpoint); + } + + static ParsedS3URL parse(const ParsedURL & uri); + + /** + * Convert this ParsedS3URL to HTTPS ParsedURL for use with curl's AWS SigV4 authentication + */ + ParsedURL toHttpsUrl() const; + + auto operator<=>(const ParsedS3URL & other) const = default; +}; + +} // namespace nix + +#endif diff --git a/src/libstore/include/nix/store/s3.hh b/src/libstore/include/nix/store/s3.hh index 0270eeda6..ba3adbc2a 100644 --- a/src/libstore/include/nix/store/s3.hh +++ b/src/libstore/include/nix/store/s3.hh @@ -4,12 +4,9 @@ #if NIX_WITH_S3_SUPPORT # include "nix/util/ref.hh" -# include "nix/util/url.hh" -# include "nix/util/util.hh" +# include "nix/store/s3-url.hh" -# include # include -# include namespace Aws { namespace Client { @@ -48,47 +45,6 @@ struct S3Helper FileTransferResult getObject(const std::string & bucketName, const std::string & key); }; -/** - * Parsed S3 URL. - */ -struct ParsedS3URL -{ - std::string bucket; - /** - * @see ParsedURL::path. This is a vector for the same reason. - * Unlike ParsedURL::path this doesn't include the leading empty segment, - * since the bucket name is necessary. - */ - std::vector key; - std::optional profile; - std::optional region; - std::optional scheme; - /** - * The endpoint can be either missing, be an absolute URI (with a scheme like `http:`) - * or an authority (so an IP address or a registered name). - */ - std::variant endpoint; - - std::optional getEncodedEndpoint() const - { - return std::visit( - overloaded{ - [](std::monostate) -> std::optional { return std::nullopt; }, - [](const auto & authorityOrUrl) -> std::optional { return authorityOrUrl.to_string(); }, - }, - endpoint); - } - - static ParsedS3URL parse(const ParsedURL & uri); - - /** - * Convert this ParsedS3URL to HTTPS ParsedURL for use with curl's AWS SigV4 authentication - */ - ParsedURL toHttpsUrl() const; - - auto operator<=>(const ParsedS3URL & other) const = default; -}; - } // namespace nix #endif diff --git a/src/libstore/meson.build b/src/libstore/meson.build index e3004ebf5..80c234bd5 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -329,7 +329,7 @@ sources = files( 'remote-store.cc', 'restricted-store.cc', 's3-binary-cache-store.cc', - 's3.cc', + 's3-url.cc', 'serve-protocol-connection.cc', 'serve-protocol.cc', 'sqlite.cc', diff --git a/src/libstore/s3.cc b/src/libstore/s3-url.cc similarity index 95% rename from src/libstore/s3.cc rename to src/libstore/s3-url.cc index 5396f43b9..947de60b0 100644 --- a/src/libstore/s3.cc +++ b/src/libstore/s3-url.cc @@ -1,17 +1,17 @@ -#include "nix/store/s3.hh" -#include "nix/util/split.hh" -#include "nix/util/url.hh" -#include "nix/util/util.hh" -#include "nix/util/canon-path.hh" -#include "nix/util/strings-inline.hh" +#include "nix/store/s3-url.hh" -#include +#if NIX_WITH_S3_SUPPORT -namespace nix { +# include "nix/util/error.hh" +# include "nix/util/split.hh" +# include "nix/util/strings-inline.hh" + +# include +# include using namespace std::string_view_literals; -#if NIX_WITH_S3_SUPPORT +namespace nix { ParsedS3URL ParsedS3URL::parse(const ParsedURL & parsed) try { @@ -116,6 +116,6 @@ ParsedURL ParsedS3URL::toHttpsUrl() const endpoint); } -#endif - } // namespace nix + +#endif From 140b08ae3e8a766fc04e70b7a281abb746f06241 Mon Sep 17 00:00:00 2001 From: Jami Kettunen Date: Wed, 1 Oct 2025 22:19:08 +0300 Subject: [PATCH 090/373] libstore: Include missing header to fix compile with libc++ 20 https://en.cppreference.com/w/cpp/thread.html src/libstore/gc.cc:121:39: error: no member named 'sleep_for' in namespace 'std::this_thread' 121 | std::this_thread::sleep_for(std::chrono::milliseconds(100)); | ~~~~~~~~~~~~~~~~~~^ --- src/libstore/gc.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 86c4e37a6..47f40ab8e 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include From 2a0fddc7d5c44845253267e28c2dedc5c56bf4ac Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 1 Oct 2025 23:13:11 +0300 Subject: [PATCH 091/373] libexpr: Move derivation-internal.nix from corepkgsFS to internalFS Best I can tell this was never supposed to be exposed to the user and has been this way since 2.19. 2.18 did not expose this file to the user: nix run nix/2.18-maintenance -- eval --expr "import " error: getting status of '/__corepkgs__/derivation-internal.nix': No such file or directory --- src/libexpr/eval.cc | 2 +- tests/functional/lang/eval-fail-derivation-name.err.exp | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 2df373520..20ebe026a 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -268,7 +268,7 @@ EvalState::EvalState( }()) , corepkgsFS(make_ref()) , internalFS(make_ref()) - , derivationInternal{corepkgsFS->addFile( + , derivationInternal{internalFS->addFile( CanonPath("derivation-internal.nix"), #include "primops/derivation.nix.gen.hh" )} diff --git a/tests/functional/lang/eval-fail-derivation-name.err.exp b/tests/functional/lang/eval-fail-derivation-name.err.exp index 017326c34..ba5ff2d00 100644 --- a/tests/functional/lang/eval-fail-derivation-name.err.exp +++ b/tests/functional/lang/eval-fail-derivation-name.err.exp @@ -1,20 +1,20 @@ error: … while evaluating the attribute 'outPath' - at ::: + at «nix-internal»/derivation-internal.nix::: | value = commonAttrs // { | outPath = builtins.getAttr outputName strict; | ^ | drvPath = strict.drvPath; … while calling the 'getAttr' builtin - at ::: + at «nix-internal»/derivation-internal.nix::: | value = commonAttrs // { | outPath = builtins.getAttr outputName strict; | ^ | drvPath = strict.drvPath; … while calling the 'derivationStrict' builtin - at ::: + at «nix-internal»/derivation-internal.nix::: | | strict = derivationStrict drvAttrs; | ^ From 85d6c8af4da6a1405563b81f3afb0dbe79e5ef7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Wed, 1 Oct 2025 22:23:10 +0200 Subject: [PATCH 092/373] link to jitsi meeting in the PR docs --- .github/PULL_REQUEST_TEMPLATE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c6843d86f..c155bf8bf 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -15,6 +15,10 @@ so you understand the process and the expectations. - volunteering contributions effectively - how to get help and our review process. +PR stuck in review? We have two Nix team meetings per week online that are open for everyone in a jitsi conference: + +- https://calendar.google.com/calendar/u/0/embed?src=b9o52fobqjak8oq8lfkhg3t0qg@group.calendar.google.com + --> ## Motivation From e06968ec2586a9ccd18e58d1796de6d9ac628bc6 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 24 Sep 2025 00:06:47 -0400 Subject: [PATCH 093/373] Split out `UnkeyedRealisation` from `Realisation` Realisations are conceptually key-value pairs, mapping `DrvOutputs` (the key) to information about that derivation output. This separate the value type, which will be useful in maps, etc., where we don't want to denormalize by including the key twice. This matches similar changes for existing types: | keyed | unkeyed | |--------------------|------------------------| | `ValidPathInfo` | `UnkeyedValidPathInfo` | | `KeyedBuildResult` | `BuildResult` | | `Realisation` | `UnkeyedRealisation` | --- src/libcmd/built-path.cc | 5 +- src/libstore-tests/common-protocol.cc | 44 +++--- src/libstore-tests/realisation.cc | 20 +-- src/libstore-tests/serve-protocol.cc | 74 +++++----- src/libstore-tests/worker-protocol.cc | 132 +++++++++--------- src/libstore/binary-cache-store.cc | 21 +-- .../build/derivation-building-goal.cc | 13 +- src/libstore/build/derivation-goal.cc | 59 ++++++-- .../build/drv-output-substitution-goal.cc | 10 +- src/libstore/daemon.cc | 4 +- src/libstore/dummy-store.cc | 4 +- .../include/nix/store/binary-cache-store.hh | 17 ++- .../nix/store/build/derivation-goal.hh | 6 +- .../build/drv-output-substitution-goal.hh | 3 +- .../include/nix/store/legacy-ssh-store.hh | 4 +- .../include/nix/store/local-overlay-store.hh | 2 +- src/libstore/include/nix/store/local-store.hh | 6 +- src/libstore/include/nix/store/realisation.hh | 50 ++++--- .../include/nix/store/remote-store.hh | 2 +- src/libstore/include/nix/store/store-api.hh | 9 +- src/libstore/local-overlay-store.cc | 8 +- src/libstore/local-store.cc | 17 ++- src/libstore/misc.cc | 5 +- src/libstore/realisation.cc | 50 ++++--- src/libstore/remote-store.cc | 18 +-- src/libstore/restricted-store.cc | 4 +- src/libstore/store-api.cc | 20 +-- src/libstore/unix/build/derivation-builder.cc | 7 +- 28 files changed, 363 insertions(+), 251 deletions(-) diff --git a/src/libcmd/built-path.cc b/src/libcmd/built-path.cc index 4d76dd6da..fc7f18493 100644 --- a/src/libcmd/built-path.cc +++ b/src/libcmd/built-path.cc @@ -117,10 +117,11 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const "the derivation '%s' has unrealised output '%s' (derived-path.cc/toRealisedPaths)", store.printStorePath(p.drvPath->outPath()), outputName); - auto thisRealisation = store.queryRealisation(DrvOutput{*drvOutput, outputName}); + DrvOutput key{*drvOutput, outputName}; + auto thisRealisation = store.queryRealisation(key); assert(thisRealisation); // We’ve built it, so we must // have the realisation - res.insert(*thisRealisation); + res.insert(Realisation{*thisRealisation, std::move(key)}); } else { res.insert(outputPath); } diff --git a/src/libstore-tests/common-protocol.cc b/src/libstore-tests/common-protocol.cc index 35fca165d..2c001957b 100644 --- a/src/libstore-tests/common-protocol.cc +++ b/src/libstore-tests/common-protocol.cc @@ -112,32 +112,34 @@ CHARACTERIZATION_TEST( "realisation", (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) diff --git a/src/libstore-tests/realisation.cc b/src/libstore-tests/realisation.cc index a5a5bee50..d16049bc5 100644 --- a/src/libstore-tests/realisation.cc +++ b/src/libstore-tests/realisation.cc @@ -49,16 +49,16 @@ INSTANTIATE_TEST_SUITE_P( RealisationJsonTest, ([] { Realisation simple{ - - .id = - { - .drvHash = Hash::parseExplicitFormatUnprefixed( - "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", - HashAlgorithm::SHA256, - HashFormat::Base16), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + }, + { + .drvHash = Hash::parseExplicitFormatUnprefixed( + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + HashAlgorithm::SHA256, + HashFormat::Base16), + .outputName = "foo", + }, }; return ::testing::Values( std::pair{ diff --git a/src/libstore-tests/serve-protocol.cc b/src/libstore-tests/serve-protocol.cc index a63201164..10aa21e9d 100644 --- a/src/libstore-tests/serve-protocol.cc +++ b/src/libstore-tests/serve-protocol.cc @@ -95,32 +95,34 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) @@ -196,25 +198,27 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, diff --git a/src/libstore-tests/worker-protocol.cc b/src/libstore-tests/worker-protocol.cc index 489151c8c..c4afde3bd 100644 --- a/src/libstore-tests/worker-protocol.cc +++ b/src/libstore-tests/worker-protocol.cc @@ -148,32 +148,34 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) @@ -214,25 +216,25 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, @@ -267,25 +269,27 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, @@ -324,25 +328,27 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index badfb4b14..3705f3d4d 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -502,10 +502,15 @@ StorePath BinaryCacheStore::addToStore( ->path; } -void BinaryCacheStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept +std::string BinaryCacheStore::makeRealisationPath(const DrvOutput & id) { - auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi"; + return realisationsPrefix + "/" + id.to_string() + ".doi"; +} + +void BinaryCacheStore::queryRealisationUncached( + const DrvOutput & id, Callback> callback) noexcept +{ + auto outputInfoFilePath = makeRealisationPath(id); auto callbackPtr = std::make_shared(std::move(callback)); @@ -515,11 +520,12 @@ void BinaryCacheStore::queryRealisationUncached( if (!data) return (*callbackPtr)({}); - std::shared_ptr realisation; + std::shared_ptr realisation; try { - realisation = std::make_shared(nlohmann::json::parse(*data)); + realisation = std::make_shared(nlohmann::json::parse(*data)); } catch (Error & e) { - e.addTrace({}, "while parsing file '%s' as a realisation", outputInfoFilePath); + e.addTrace( + {}, "while parsing file '%s' as a realisation for key '%s'", outputInfoFilePath, id.to_string()); throw; } return (*callbackPtr)(std::move(realisation)); @@ -535,8 +541,7 @@ void BinaryCacheStore::registerDrvOutput(const Realisation & info) { if (diskCache) diskCache->upsertRealisation(config.getReference().render(/*FIXME withParams=*/false), info); - auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi"; - upsertFile(filePath, static_cast(info).dump(), "application/json"); + upsertFile(makeRealisationPath(info.id), static_cast(info).dump(), "application/json"); } ref BinaryCacheStore::getRemoteFSAccessor(bool requireValidPath) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index fa819c96b..c39fd8c1c 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1092,13 +1092,22 @@ DerivationBuildingGoal::checkPathValidity(std::map & // without the `ca-derivations` experimental flag). worker.store.registerDrvOutput( Realisation{ + { + .outPath = info.known->path, + }, drvOutput, - info.known->path, }); } } if (info.known && info.known->isValid()) - validOutputs.emplace(i.first, Realisation{drvOutput, info.known->path}); + validOutputs.emplace( + i.first, + Realisation{ + { + .outPath = info.known->path, + }, + drvOutput, + }); } bool allValid = true; diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index cc3ba2b7b..81f4e6654 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -190,13 +190,17 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) auto realisation = [&] { auto take1 = get(success.builtOutputs, wantedOutput); if (take1) - return *take1; + return static_cast(*take1); /* The above `get` should work. But stateful tracking of outputs in resolvedResult, this can get out of sync with the store, which is our actual source of truth. For now we just check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, wantedOutput}); + auto take2 = worker.evalStore.queryRealisation( + DrvOutput{ + .drvHash = *resolvedHash, + .outputName = wantedOutput, + }); if (take2) return *take2; @@ -207,8 +211,12 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) }(); if (!drv->type().isImpure()) { - auto newRealisation = realisation; - newRealisation.id = DrvOutput{*outputHash, wantedOutput}; + Realisation newRealisation{ + realisation, + { + .drvHash = *outputHash, + .outputName = wantedOutput, + }}; newRealisation.signatures.clear(); if (!drv->type().isFixed()) { auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; @@ -258,7 +266,16 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) /* In checking mode, the builder will not register any outputs. So we want to make sure the ones that we wanted to check are properly there. */ - success.builtOutputs = {{wantedOutput, assertPathValidity()}}; + success.builtOutputs = {{ + wantedOutput, + { + assertPathValidity(), + { + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }, + }}; } else { /* Otherwise the builder will give us info for out output, but also for other outputs. Filter down to just our output so as @@ -373,18 +390,20 @@ Goal::Co DerivationGoal::repairClosure() co_return doneSuccess(BuildResult::Success::AlreadyValid, assertPathValidity()); } -std::optional> DerivationGoal::checkPathValidity() +std::optional> DerivationGoal::checkPathValidity() { if (drv->type().isImpure()) return std::nullopt; auto drvOutput = DrvOutput{outputHash, wantedOutput}; - std::optional mRealisation; + std::optional mRealisation; if (auto * mOutput = get(drv->outputs, wantedOutput)) { if (auto mPath = mOutput->path(worker.store, drv->name, wantedOutput)) { - mRealisation = Realisation{drvOutput, std::move(*mPath)}; + mRealisation = UnkeyedRealisation{ + .outPath = std::move(*mPath), + }; } } else { throw Error( @@ -412,7 +431,14 @@ std::optional> DerivationGoal::checkPathValid // derivation, and the output path is valid, but we don't have // its realisation stored (probably because it has been built // without the `ca-derivations` experimental flag). - worker.store.registerDrvOutput(*mRealisation); + worker.store.registerDrvOutput( + Realisation{ + *mRealisation, + { + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }); } return {{*mRealisation, status}}; @@ -420,7 +446,7 @@ std::optional> DerivationGoal::checkPathValid return std::nullopt; } -Realisation DerivationGoal::assertPathValidity() +UnkeyedRealisation DerivationGoal::assertPathValidity() { auto checkResult = checkPathValidity(); if (!(checkResult && checkResult->second == PathStatus::Valid)) @@ -428,11 +454,20 @@ Realisation DerivationGoal::assertPathValidity() return checkResult->first; } -Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Realisation builtOutput) +Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput) { buildResult.inner = BuildResult::Success{ .status = status, - .builtOutputs = {{wantedOutput, std::move(builtOutput)}}, + .builtOutputs = {{ + wantedOutput, + { + std::move(builtOutput), + DrvOutput{ + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }, + }}, }; mcExpectedBuilds.reset(); diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc index b6ace4784..a969b905b 100644 --- a/src/libstore/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -43,10 +43,10 @@ Goal::Co DrvOutputSubstitutionGoal::init() outPipe->createAsyncPipe(worker.ioport.get()); #endif - auto promise = std::make_shared>>(); + auto promise = std::make_shared>>(); sub->queryRealisation( - id, {[outPipe(outPipe), promise(promise)](std::future> res) { + id, {[outPipe(outPipe), promise(promise)](std::future> res) { try { Finally updateStats([&]() { outPipe->writeSide.close(); }); promise->set_value(res.get()); @@ -75,7 +75,7 @@ Goal::Co DrvOutputSubstitutionGoal::init() * The realisation corresponding to the given output id. * Will be filled once we can get it. */ - std::shared_ptr outputInfo; + std::shared_ptr outputInfo; try { outputInfo = promise->get_future().get(); @@ -132,7 +132,7 @@ Goal::Co DrvOutputSubstitutionGoal::init() } Goal::Co DrvOutputSubstitutionGoal::realisationFetched( - Goals waitees, std::shared_ptr outputInfo, nix::ref sub) + Goals waitees, std::shared_ptr outputInfo, nix::ref sub) { waitees.insert(worker.makePathSubstitutionGoal(outputInfo->outPath)); @@ -145,7 +145,7 @@ Goal::Co DrvOutputSubstitutionGoal::realisationFetched( co_return amDone(nrNoSubstituters > 0 ? ecNoSubstituters : ecFailed); } - worker.store.registerDrvOutput(*outputInfo); + worker.store.registerDrvOutput({*outputInfo, id}); trace("finished"); co_return amDone(ecSuccess); diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 2bd0698a0..2898f113f 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -964,7 +964,7 @@ static void performOp( if (GET_PROTOCOL_MINOR(conn.protoVersion) < 31) { auto outputId = DrvOutput::parse(readString(conn.from)); auto outputPath = StorePath(readString(conn.from)); - store->registerDrvOutput(Realisation{.id = outputId, .outPath = outputPath}); + store->registerDrvOutput(Realisation{{.outPath = outputPath}, outputId}); } else { auto realisation = WorkerProto::Serialise::read(*store, rconn); store->registerDrvOutput(realisation); @@ -986,7 +986,7 @@ static void performOp( } else { std::set realisations; if (info) - realisations.insert(*info); + realisations.insert({*info, outputId}); WorkerProto::write(*store, wconn, realisations); } break; diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 1eb51fe3e..209be3ce9 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -266,8 +266,8 @@ struct DummyStoreImpl : DummyStore throw Error("path '%s' is not valid", printStorePath(path)); } - void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override + void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept override { callback(nullptr); } diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index 3a2c90022..660dd870a 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -95,13 +95,22 @@ private: protected: - // The prefix under which realisation infos will be stored - const std::string realisationsPrefix = "realisations"; + /** + * The prefix under which realisation infos will be stored + */ + constexpr const static std::string realisationsPrefix = "realisations"; - const std::string cacheInfoFile = "nix-cache-info"; + constexpr const static std::string cacheInfoFile = "nix-cache-info"; BinaryCacheStore(Config &); + /** + * Compute the path to the given realisation + * + * It's `${realisationsPrefix}/${drvOutput}.doi`. + */ + std::string makeRealisationPath(const DrvOutput & id); + public: virtual bool fileExists(const std::string & path) = 0; @@ -190,7 +199,7 @@ public: void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void narFromPath(const StorePath & path, Sink & sink) override; diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index 353e7c489..c31645fff 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -93,17 +93,17 @@ private: * of the wanted output, and a `PathStatus` with the * current status of that output. */ - std::optional> checkPathValidity(); + std::optional> checkPathValidity(); /** * Aborts if any output is not valid or corrupt, and otherwise * returns a 'Realisation' for the wanted output. */ - Realisation assertPathValidity(); + UnkeyedRealisation assertPathValidity(); Co repairClosure(); - Done doneSuccess(BuildResult::Success::Status status, Realisation builtOutput); + Done doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput); Done doneFailure(BuildError ex); }; diff --git a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh index b42336427..1a5a4ea26 100644 --- a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh +++ b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh @@ -39,7 +39,8 @@ public: GoalState state; Co init(); - Co realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); + Co + realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); void timedOut(Error && ex) override { diff --git a/src/libstore/include/nix/store/legacy-ssh-store.hh b/src/libstore/include/nix/store/legacy-ssh-store.hh index c91f88a84..994918f90 100644 --- a/src/libstore/include/nix/store/legacy-ssh-store.hh +++ b/src/libstore/include/nix/store/legacy-ssh-store.hh @@ -208,8 +208,8 @@ public: */ std::optional isTrustedClient() override; - void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override + void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept override // TODO: Implement { unsupported("queryRealisation"); diff --git a/src/libstore/include/nix/store/local-overlay-store.hh b/src/libstore/include/nix/store/local-overlay-store.hh index b89d0a1a0..1d69d3417 100644 --- a/src/libstore/include/nix/store/local-overlay-store.hh +++ b/src/libstore/include/nix/store/local-overlay-store.hh @@ -173,7 +173,7 @@ private: * Check lower store if upper DB does not have. */ void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; /** * Call `remountIfNecessary` after collecting garbage normally. diff --git a/src/libstore/include/nix/store/local-store.hh b/src/libstore/include/nix/store/local-store.hh index b871aaee2..ab255fba8 100644 --- a/src/libstore/include/nix/store/local-store.hh +++ b/src/libstore/include/nix/store/local-store.hh @@ -385,10 +385,10 @@ public: void cacheDrvOutputMapping( State & state, const uint64_t deriver, const std::string & outputName, const StorePath & output); - std::optional queryRealisation_(State & state, const DrvOutput & id); - std::optional> queryRealisationCore_(State & state, const DrvOutput & id); + std::optional queryRealisation_(State & state, const DrvOutput & id); + std::optional> queryRealisationCore_(State & state, const DrvOutput & id); void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; std::optional getVersion() override; diff --git a/src/libstore/include/nix/store/realisation.hh b/src/libstore/include/nix/store/realisation.hh index 3424a39c9..c7e0a4483 100644 --- a/src/libstore/include/nix/store/realisation.hh +++ b/src/libstore/include/nix/store/realisation.hh @@ -46,12 +46,12 @@ struct DrvOutput static DrvOutput parse(const std::string &); - GENERATE_CMP(DrvOutput, me->drvHash, me->outputName); + bool operator==(const DrvOutput &) const = default; + auto operator<=>(const DrvOutput &) const = default; }; -struct Realisation +struct UnkeyedRealisation { - DrvOutput id; StorePath outPath; StringSet signatures; @@ -64,22 +64,35 @@ struct Realisation */ std::map dependentRealisations; - std::string fingerprint() const; - void sign(const Signer &); - bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const; - size_t checkSignatures(const PublicKeys & publicKeys) const; + std::string fingerprint(const DrvOutput & key) const; - static std::set closure(Store &, const std::set &); - static void closure(Store &, const std::set &, std::set & res); + void sign(const DrvOutput & key, const Signer &); - bool isCompatibleWith(const Realisation & other) const; + bool checkSignature(const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const; - StorePath getPath() const + size_t checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const; + + const StorePath & getPath() const { return outPath; } - GENERATE_CMP(Realisation, me->id, me->outPath); + // TODO sketchy that it avoids signatures + GENERATE_CMP(UnkeyedRealisation, me->outPath); +}; + +struct Realisation : UnkeyedRealisation +{ + DrvOutput id; + + bool isCompatibleWith(const UnkeyedRealisation & other) const; + + static std::set closure(Store &, const std::set &); + + static void closure(Store &, const std::set &, std::set & res); + + bool operator==(const Realisation &) const = default; + auto operator<=>(const Realisation &) const = default; }; /** @@ -103,12 +116,13 @@ struct OpaquePath { StorePath path; - StorePath getPath() const + const StorePath & getPath() const { return path; } - GENERATE_CMP(OpaquePath, me->path); + bool operator==(const OpaquePath &) const = default; + auto operator<=>(const OpaquePath &) const = default; }; /** @@ -116,7 +130,7 @@ struct OpaquePath */ struct RealisedPath { - /* + /** * A path is either the result of the realisation of a derivation or * an opaque blob that has been directly added to the store */ @@ -138,13 +152,14 @@ struct RealisedPath /** * Get the raw store path associated to this */ - StorePath path() const; + const StorePath & path() const; void closure(Store & store, Set & ret) const; static void closure(Store & store, const Set & startPaths, Set & ret); Set closure(Store & store) const; - GENERATE_CMP(RealisedPath, me->raw); + bool operator==(const RealisedPath &) const = default; + auto operator<=>(const RealisedPath &) const = default; }; class MissingRealisation : public Error @@ -167,4 +182,5 @@ public: } // namespace nix +JSON_IMPL(nix::UnkeyedRealisation) JSON_IMPL(nix::Realisation) diff --git a/src/libstore/include/nix/store/remote-store.hh b/src/libstore/include/nix/store/remote-store.hh index 1aaf29d37..b152e054b 100644 --- a/src/libstore/include/nix/store/remote-store.hh +++ b/src/libstore/include/nix/store/remote-store.hh @@ -102,7 +102,7 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index 1131ec975..c9fd00513 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -31,6 +31,7 @@ MakeError(SubstituterDisabled, Error); MakeError(InvalidStoreReference, Error); +struct UnkeyedRealisation; struct Realisation; struct RealisedPath; struct DrvOutput; @@ -398,12 +399,12 @@ public: /** * Query the information about a realisation. */ - std::shared_ptr queryRealisation(const DrvOutput &); + std::shared_ptr queryRealisation(const DrvOutput &); /** * Asynchronous version of queryRealisation(). */ - void queryRealisation(const DrvOutput &, Callback> callback) noexcept; + void queryRealisation(const DrvOutput &, Callback> callback) noexcept; /** * Check whether the given valid path info is sufficiently attested, by @@ -430,8 +431,8 @@ protected: virtual void queryPathInfoUncached(const StorePath & path, Callback> callback) noexcept = 0; - virtual void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept = 0; + virtual void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept = 0; public: diff --git a/src/libstore/local-overlay-store.cc b/src/libstore/local-overlay-store.cc index 2b000b3db..f23feb8fb 100644 --- a/src/libstore/local-overlay-store.cc +++ b/src/libstore/local-overlay-store.cc @@ -77,7 +77,7 @@ void LocalOverlayStore::registerDrvOutput(const Realisation & info) // First do queryRealisation on lower layer to populate DB auto res = lowerStore->queryRealisation(info.id); if (res) - LocalStore::registerDrvOutput(*res); + LocalStore::registerDrvOutput({*res, info.id}); LocalStore::registerDrvOutput(info); } @@ -108,12 +108,12 @@ void LocalOverlayStore::queryPathInfoUncached( } void LocalOverlayStore::queryRealisationUncached( - const DrvOutput & drvOutput, Callback> callback) noexcept + const DrvOutput & drvOutput, Callback> callback) noexcept { auto callbackPtr = std::make_shared(std::move(callback)); LocalStore::queryRealisationUncached( - drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { + drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (info) @@ -123,7 +123,7 @@ void LocalOverlayStore::queryRealisationUncached( } // If we don't have it, check lower store lowerStore->queryRealisation( - drvOutput, {[callbackPtr](std::future> fut) { + drvOutput, {[callbackPtr](std::future> fut) { try { (*callbackPtr)(fut.get()); } catch (...) { diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index ebc987ee0..6425819c5 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1036,7 +1036,7 @@ bool LocalStore::pathInfoIsUntrusted(const ValidPathInfo & info) bool LocalStore::realisationIsUntrusted(const Realisation & realisation) { - return config->requireSigs && !realisation.checkSignatures(getPublicKeys()); + return config->requireSigs && !realisation.checkSignatures(realisation.id, getPublicKeys()); } void LocalStore::addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) @@ -1586,7 +1586,7 @@ void LocalStore::addSignatures(const StorePath & storePath, const StringSet & si }); } -std::optional> +std::optional> LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & id) { auto useQueryRealisedOutput(state.stmts->QueryRealisedOutput.use()(id.strHash())(id.outputName)); @@ -1598,14 +1598,13 @@ LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & i return { {realisationDbId, - Realisation{ - .id = id, + UnkeyedRealisation{ .outPath = outputPath, .signatures = signatures, }}}; } -std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) +std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) { auto maybeCore = queryRealisationCore_(state, id); if (!maybeCore) @@ -1631,13 +1630,13 @@ std::optional LocalStore::queryRealisation_(LocalStore::State } void LocalStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { - auto maybeRealisation = - retrySQLite>([&]() { return queryRealisation_(*_state->lock(), id); }); + auto maybeRealisation = retrySQLite>( + [&]() { return queryRealisation_(*_state->lock(), id); }); if (maybeRealisation) - callback(std::make_shared(maybeRealisation.value())); + callback(std::make_shared(maybeRealisation.value())); else callback(nullptr); diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index 7efaa4f86..a31d149c2 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -360,11 +360,12 @@ drvOutputReferences(Store & store, const Derivation & drv, const StorePath & out if (!outputHash) throw Error( "output '%s' of derivation '%s' isn't realised", outputName, store.printStorePath(inputDrv)); - auto thisRealisation = store.queryRealisation(DrvOutput{*outputHash, outputName}); + DrvOutput key{*outputHash, outputName}; + auto thisRealisation = store.queryRealisation(key); if (!thisRealisation) throw Error( "output '%s' of derivation '%s' isn’t built", outputName, store.printStorePath(inputDrv)); - inputRealisations.insert(*thisRealisation); + inputRealisations.insert({*thisRealisation, std::move(key)}); } } if (!inputNode.value.empty()) { diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc index febd67bd2..e08d5ee8a 100644 --- a/src/libstore/realisation.cc +++ b/src/libstore/realisation.cc @@ -39,7 +39,7 @@ void Realisation::closure(Store & store, const std::set & startOutp std::set res; for (auto & [currentDep, _] : current.dependentRealisations) { if (auto currentRealisation = store.queryRealisation(currentDep)) - res.insert(*currentRealisation); + res.insert({*currentRealisation, currentDep}); else throw Error("Unrealised derivation '%s'", currentDep.to_string()); } @@ -61,24 +61,25 @@ void Realisation::closure(Store & store, const std::set & startOutp }); } -std::string Realisation::fingerprint() const +std::string UnkeyedRealisation::fingerprint(const DrvOutput & key) const { - nlohmann::json serialized = *this; + nlohmann::json serialized = Realisation{*this, key}; serialized.erase("signatures"); return serialized.dump(); } -void Realisation::sign(const Signer & signer) +void UnkeyedRealisation::sign(const DrvOutput & key, const Signer & signer) { - signatures.insert(signer.signDetached(fingerprint())); + signatures.insert(signer.signDetached(fingerprint(key))); } -bool Realisation::checkSignature(const PublicKeys & publicKeys, const std::string & sig) const +bool UnkeyedRealisation::checkSignature( + const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const { - return verifyDetached(fingerprint(), sig, publicKeys); + return verifyDetached(fingerprint(key), sig, publicKeys); } -size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const +size_t UnkeyedRealisation::checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const { // FIXME: Maybe we should return `maxSigs` if the realisation corresponds to // an input-addressed one − because in that case the drv is enough to check @@ -86,19 +87,18 @@ size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const size_t good = 0; for (auto & sig : signatures) - if (checkSignature(publicKeys, sig)) + if (checkSignature(key, publicKeys, sig)) good++; return good; } -StorePath RealisedPath::path() const +const StorePath & RealisedPath::path() const { - return std::visit([](auto && arg) { return arg.getPath(); }, raw); + return std::visit([](auto && arg) -> auto & { return arg.getPath(); }, raw); } -bool Realisation::isCompatibleWith(const Realisation & other) const +bool Realisation::isCompatibleWith(const UnkeyedRealisation & other) const { - assert(id == other.id); if (outPath == other.outPath) { if (dependentRealisations.empty() != other.dependentRealisations.empty()) { warn( @@ -144,7 +144,7 @@ namespace nlohmann { using namespace nix; -Realisation adl_serializer::from_json(const json & json0) +UnkeyedRealisation adl_serializer::from_json(const json & json0) { auto json = getObject(json0); @@ -157,25 +157,39 @@ Realisation adl_serializer::from_json(const json & json0) for (auto & [jsonDepId, jsonDepOutPath] : getObject(*jsonDependencies)) dependentRealisations.insert({DrvOutput::parse(jsonDepId), jsonDepOutPath}); - return Realisation{ - .id = DrvOutput::parse(valueAt(json, "id")), + return UnkeyedRealisation{ .outPath = valueAt(json, "outPath"), .signatures = signatures, .dependentRealisations = dependentRealisations, }; } -void adl_serializer::to_json(json & json, const Realisation & r) +void adl_serializer::to_json(json & json, const UnkeyedRealisation & r) { auto jsonDependentRealisations = nlohmann::json::object(); for (auto & [depId, depOutPath] : r.dependentRealisations) jsonDependentRealisations.emplace(depId.to_string(), depOutPath); json = { - {"id", r.id.to_string()}, {"outPath", r.outPath}, {"signatures", r.signatures}, {"dependentRealisations", jsonDependentRealisations}, }; } +Realisation adl_serializer::from_json(const json & json0) +{ + auto json = getObject(json0); + + return Realisation{ + static_cast(json0), + DrvOutput::parse(valueAt(json, "id")), + }; +} + +void adl_serializer::to_json(json & json, const Realisation & r) +{ + json = static_cast(r); + json["id"] = r.id.to_string(); +} + } // namespace nlohmann diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index a6994f844..8dd5bc064 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -501,7 +501,7 @@ void RemoteStore::registerDrvOutput(const Realisation & info) } void RemoteStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { auto conn(getConnection()); @@ -515,21 +515,21 @@ void RemoteStore::queryRealisationUncached( conn->to << id.to_string(); conn.processStderr(); - auto real = [&]() -> std::shared_ptr { + auto real = [&]() -> std::shared_ptr { if (GET_PROTOCOL_MINOR(conn->protoVersion) < 31) { auto outPaths = WorkerProto::Serialise>::read(*this, *conn); if (outPaths.empty()) return nullptr; - return std::make_shared(Realisation{.id = id, .outPath = *outPaths.begin()}); + return std::make_shared(UnkeyedRealisation{.outPath = *outPaths.begin()}); } else { auto realisations = WorkerProto::Serialise>::read(*this, *conn); if (realisations.empty()) return nullptr; - return std::make_shared(*realisations.begin()); + return std::make_shared(*realisations.begin()); } }(); - callback(std::shared_ptr(real)); + callback(std::shared_ptr(real)); } catch (...) { return callback.rethrow(); } @@ -626,13 +626,15 @@ std::vector RemoteStore::buildPathsWithResults( auto realisation = queryRealisation(outputId); if (!realisation) throw MissingRealisation(outputId); - success.builtOutputs.emplace(output, *realisation); + success.builtOutputs.emplace(output, Realisation{*realisation, outputId}); } else { success.builtOutputs.emplace( output, Realisation{ - .id = outputId, - .outPath = outputPath, + UnkeyedRealisation{ + .outPath = outputPath, + }, + outputId, }); } } diff --git a/src/libstore/restricted-store.cc b/src/libstore/restricted-store.cc index a1cb41606..5270f7d10 100644 --- a/src/libstore/restricted-store.cc +++ b/src/libstore/restricted-store.cc @@ -107,7 +107,7 @@ struct RestrictedStore : public virtual IndirectRootStore, public virtual GcStor void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept override; + const DrvOutput & id, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; @@ -244,7 +244,7 @@ void RestrictedStore::registerDrvOutput(const Realisation & info) } void RestrictedStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept // XXX: This should probably be allowed if the realisation corresponds to // an allowed derivation { diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 4ce6b15fa..df00dc179 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -598,7 +598,8 @@ void Store::queryPathInfo(const StorePath & storePath, Callback> callback) noexcept +void Store::queryRealisation( + const DrvOutput & id, Callback> callback) noexcept { try { @@ -624,20 +625,20 @@ void Store::queryRealisation(const DrvOutput & id, Callback(std::move(callback)); - queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { + queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (diskCache) { if (info) diskCache->upsertRealisation( - config.getReference().render(/*FIXME withParams=*/false), *info); + config.getReference().render(/*FIXME withParams=*/false), {*info, id}); else diskCache->upsertAbsentRealisation( config.getReference().render(/*FIXME withParams=*/false), id); } - (*callbackPtr)(std::shared_ptr(info)); + (*callbackPtr)(std::shared_ptr(info)); } catch (...) { callbackPtr->rethrow(); @@ -645,9 +646,9 @@ void Store::queryRealisation(const DrvOutput & id, Callback Store::queryRealisation(const DrvOutput & id) +std::shared_ptr Store::queryRealisation(const DrvOutput & id) { - using RealPtr = std::shared_ptr; + using RealPtr = std::shared_ptr; std::promise promise; queryRealisation(id, {[&](std::future result) { @@ -910,11 +911,12 @@ std::map copyPaths( std::set toplevelRealisations; for (auto & path : paths) { storePaths.insert(path.path()); - if (auto realisation = std::get_if(&path.raw)) { + if (auto * realisation = std::get_if(&path.raw)) { experimentalFeatureSettings.require(Xp::CaDerivations); toplevelRealisations.insert(*realisation); } } + auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute); try { @@ -931,7 +933,7 @@ std::map copyPaths( "dependency of '%s' but isn't registered", drvOutput.to_string(), current.id.to_string()); - children.insert(*currentChild); + children.insert({*currentChild, drvOutput}); } return children; }, @@ -1199,7 +1201,7 @@ void Store::signRealisation(Realisation & realisation) for (auto & secretKeyFile : secretKeyFiles.get()) { SecretKey secretKey(readFile(secretKeyFile)); LocalSigner signer(std::move(secretKey)); - realisation.sign(signer); + realisation.sign(realisation.id, signer); } } diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index a04056599..7cf72fb84 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -1830,7 +1830,12 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() for (auto & [outputName, newInfo] : infos) { auto oldinfo = get(initialOutputs, outputName); assert(oldinfo); - auto thisRealisation = Realisation{.id = DrvOutput{oldinfo->outputHash, outputName}, .outPath = newInfo.path}; + auto thisRealisation = Realisation{ + { + .outPath = newInfo.path, + }, + DrvOutput{oldinfo->outputHash, outputName}, + }; if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations) && !drv.type().isImpure()) { store.signRealisation(thisRealisation); store.registerDrvOutput(thisRealisation); From 5592bb717beb7afa43a232a13e78d2c62a794fb1 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 27 Sep 2025 16:30:36 -0400 Subject: [PATCH 094/373] Implement realisation operations on dummy store --- src/libstore-tests/dummy-store.cc | 15 +++++++++++++-- src/libstore/dummy-store.cc | 19 ++++++++++++++++--- .../include/nix/store/dummy-store-impl.hh | 12 ++++++++++++ src/libstore/include/nix/store/dummy-store.hh | 2 ++ src/libutil/include/nix/util/hash.hh | 19 +++++++++++++++++++ 5 files changed, 62 insertions(+), 5 deletions(-) diff --git a/src/libstore-tests/dummy-store.cc b/src/libstore-tests/dummy-store.cc index b841d7890..3dd8137a3 100644 --- a/src/libstore-tests/dummy-store.cc +++ b/src/libstore-tests/dummy-store.cc @@ -1,6 +1,6 @@ #include -#include "nix/store/dummy-store.hh" +#include "nix/store/dummy-store-impl.hh" #include "nix/store/globals.hh" #include "nix/store/realisation.hh" @@ -13,7 +13,7 @@ TEST(DummyStore, realisation_read) auto store = [] { auto cfg = make_ref(StoreReference::Params{}); cfg->readOnly = false; - return cfg->openStore(); + return cfg->openDummyStore(); }(); auto drvHash = Hash::parseExplicitFormatUnprefixed( @@ -22,6 +22,17 @@ TEST(DummyStore, realisation_read) auto outputName = "foo"; EXPECT_EQ(store->queryRealisation({drvHash, outputName}), nullptr); + + UnkeyedRealisation value{ + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + }; + + store->buildTrace.insert({drvHash, {{outputName, make_ref(value)}}}); + + auto value2 = store->queryRealisation({drvHash, outputName}); + + ASSERT_TRUE(value2); + EXPECT_EQ(*value2, value); } } // namespace nix diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 209be3ce9..509b7a0b1 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -3,6 +3,7 @@ #include "nix/util/callback.hh" #include "nix/util/memory-source-accessor.hh" #include "nix/store/dummy-store-impl.hh" +#include "nix/store/realisation.hh" #include @@ -251,7 +252,10 @@ struct DummyStoreImpl : DummyStore void registerDrvOutput(const Realisation & output) override { - unsupported("registerDrvOutput"); + auto ref = make_ref(output); + buildTrace.insert_or_visit({output.id.drvHash, {{output.id.outputName, ref}}}, [&](auto & kv) { + kv.second.insert_or_assign(output.id.outputName, make_ref(output)); + }); } void narFromPath(const StorePath & path, Sink & sink) override @@ -267,9 +271,18 @@ struct DummyStoreImpl : DummyStore } void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override + const DrvOutput & drvOutput, Callback> callback) noexcept override { - callback(nullptr); + bool visited = false; + buildTrace.cvisit(drvOutput.drvHash, [&](const auto & kv) { + if (auto it = kv.second.find(drvOutput.outputName); it != kv.second.end()) { + visited = true; + callback(it->second.get_ptr()); + } + }); + + if (!visited) + callback(nullptr); } std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override diff --git a/src/libstore/include/nix/store/dummy-store-impl.hh b/src/libstore/include/nix/store/dummy-store-impl.hh index e05bb94ff..4c9f54e98 100644 --- a/src/libstore/include/nix/store/dummy-store-impl.hh +++ b/src/libstore/include/nix/store/dummy-store-impl.hh @@ -30,6 +30,18 @@ struct DummyStore : virtual Store */ boost::concurrent_flat_map contents; + /** + * The build trace maps the pair of a content-addressing (fixed or + * floating) derivations an one of its output to a + * (content-addressed) store object. + * + * It is [curried](https://en.wikipedia.org/wiki/Currying), so we + * instead having a single output with a `DrvOutput` key, we have an + * outer map for the derivation, and inner maps for the outputs of a + * given derivation. + */ + boost::concurrent_flat_map>> buildTrace; + DummyStore(ref config) : Store{*config} , config(config) diff --git a/src/libstore/include/nix/store/dummy-store.hh b/src/libstore/include/nix/store/dummy-store.hh index 95c09078c..d371c4e51 100644 --- a/src/libstore/include/nix/store/dummy-store.hh +++ b/src/libstore/include/nix/store/dummy-store.hh @@ -3,6 +3,8 @@ #include "nix/store/store-api.hh" +#include + namespace nix { struct DummyStore; diff --git a/src/libutil/include/nix/util/hash.hh b/src/libutil/include/nix/util/hash.hh index 571b6acca..0b16b423c 100644 --- a/src/libutil/include/nix/util/hash.hh +++ b/src/libutil/include/nix/util/hash.hh @@ -222,3 +222,22 @@ public: }; } // namespace nix + +template<> +struct std::hash +{ + std::size_t operator()(const nix::Hash & hash) const noexcept + { + assert(hash.hashSize > sizeof(size_t)); + return *reinterpret_cast(&hash.hash); + } +}; + +namespace nix { + +inline std::size_t hash_value(const Hash & hash) +{ + return std::hash{}(hash); +} + +} // namespace nix From a4e792cba7afc38ac3d4c3f85ae12622c39fd340 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 1 Oct 2025 19:47:18 +0000 Subject: [PATCH 095/373] feat(libstore): add AWS CRT-based credential infrastructure Add lightweight AWS credential resolution using AWS CRT (Common Runtime) instead of the full AWS SDK. This provides credential management for the upcoming curl-based S3 implementation. --- src/libstore/aws-creds.cc | 178 ++++++++++++++++++++ src/libstore/include/nix/store/aws-creds.hh | 73 ++++++++ src/libstore/include/nix/store/meson.build | 1 + src/libstore/meson.build | 1 + 4 files changed, 253 insertions(+) create mode 100644 src/libstore/aws-creds.cc create mode 100644 src/libstore/include/nix/store/aws-creds.hh diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc new file mode 100644 index 000000000..576f932d5 --- /dev/null +++ b/src/libstore/aws-creds.cc @@ -0,0 +1,178 @@ +#include "nix/store/aws-creds.hh" + +#if NIX_WITH_S3_SUPPORT + +# include +# include "nix/store/s3-url.hh" +# include "nix/util/finally.hh" +# include "nix/util/logging.hh" +# include "nix/util/url.hh" +# include "nix/util/util.hh" + +# include +# include +# include + +# include + +# include +# include +# include +# include + +namespace nix { + +namespace { + +static void initAwsCrt() +{ + struct CrtWrapper + { + Aws::Crt::ApiHandle apiHandle; + + CrtWrapper() + { + apiHandle.InitializeLogging(Aws::Crt::LogLevel::Warn, static_cast(nullptr)); + } + + ~CrtWrapper() + { + try { + // CRITICAL: Clear credential provider cache BEFORE AWS CRT shuts down + // This ensures all providers (which hold references to ClientBootstrap) + // are destroyed while AWS CRT is still valid + clearAwsCredentialsCache(); + // Now it's safe for ApiHandle destructor to run + } catch (...) { + ignoreExceptionInDestructor(); + } + } + }; + + static CrtWrapper crt; +} + +static AwsCredentials getCredentialsFromProvider(std::shared_ptr provider) +{ + if (!provider || !provider->IsValid()) { + throw AwsAuthError("AWS credential provider is invalid"); + } + + auto prom = std::make_shared>(); + auto fut = prom->get_future(); + + provider->GetCredentials([prom](std::shared_ptr credentials, int errorCode) { + if (errorCode != 0 || !credentials) { + prom->set_exception( + std::make_exception_ptr(AwsAuthError("Failed to resolve AWS credentials: error code %d", errorCode))); + } else { + auto accessKeyId = Aws::Crt::ByteCursorToStringView(credentials->GetAccessKeyId()); + auto secretAccessKey = Aws::Crt::ByteCursorToStringView(credentials->GetSecretAccessKey()); + auto sessionToken = Aws::Crt::ByteCursorToStringView(credentials->GetSessionToken()); + + std::optional sessionTokenStr; + if (!sessionToken.empty()) { + sessionTokenStr = std::string(sessionToken.data(), sessionToken.size()); + } + + prom->set_value(AwsCredentials( + std::string(accessKeyId.data(), accessKeyId.size()), + std::string(secretAccessKey.data(), secretAccessKey.size()), + sessionTokenStr)); + } + }); + + // AWS CRT GetCredentials is asynchronous and only guarantees the callback will be + // invoked if the initial call returns success. There's no documented timeout mechanism, + // so we add a timeout to prevent indefinite hanging if the callback is never called. + auto timeout = std::chrono::seconds(30); + if (fut.wait_for(timeout) == std::future_status::timeout) { + throw AwsAuthError( + "Timeout waiting for AWS credentials (%d seconds)", + std::chrono::duration_cast(timeout).count()); + } + + return fut.get(); // This will throw if set_exception was called +} + +// Global credential provider cache using boost's concurrent map +// Key: profile name (empty string for default profile) +using CredentialProviderCache = + boost::concurrent_flat_map>; + +static CredentialProviderCache credentialProviderCache; + +} // anonymous namespace + +AwsCredentials getAwsCredentials(const std::string & profile) +{ + // Get or create credential provider with caching + std::shared_ptr provider; + + // Try to find existing provider + credentialProviderCache.visit(profile, [&](const auto & pair) { provider = pair.second; }); + + if (!provider) { + // Create new provider if not found + debug( + "[pid=%d] creating new AWS credential provider for profile '%s'", + getpid(), + profile.empty() ? "(default)" : profile.c_str()); + + try { + initAwsCrt(); + + if (profile.empty()) { + Aws::Crt::Auth::CredentialsProviderChainDefaultConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + provider = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderChainDefault(config); + } else { + Aws::Crt::Auth::CredentialsProviderProfileConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + // This is safe because the underlying C library will copy this string + // c.f. https://github.com/awslabs/aws-c-auth/blob/main/source/credentials_provider_profile.c#L220 + config.ProfileNameOverride = Aws::Crt::ByteCursorFromCString(profile.c_str()); + provider = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderProfile(config); + } + } catch (Error & e) { + e.addTrace( + {}, + "while creating AWS credentials provider for %s", + profile.empty() ? "default profile" : fmt("profile '%s'", profile)); + throw; + } + + if (!provider) { + throw AwsAuthError( + "Failed to create AWS credentials provider for %s", + profile.empty() ? "default profile" : fmt("profile '%s'", profile)); + } + + // Insert into cache (try_emplace is thread-safe and won't overwrite if another thread added it) + credentialProviderCache.try_emplace(profile, provider); + } + + return getCredentialsFromProvider(provider); +} + +void invalidateAwsCredentials(const std::string & profile) +{ + credentialProviderCache.erase(profile); +} + +void clearAwsCredentialsCache() +{ + credentialProviderCache.clear(); +} + +AwsCredentials preResolveAwsCredentials(const ParsedS3URL & s3Url) +{ + std::string profile = s3Url.profile.value_or(""); + + // Get credentials (automatically cached) + return getAwsCredentials(profile); +} + +} // namespace nix + +#endif diff --git a/src/libstore/include/nix/store/aws-creds.hh b/src/libstore/include/nix/store/aws-creds.hh new file mode 100644 index 000000000..67ff2e49c --- /dev/null +++ b/src/libstore/include/nix/store/aws-creds.hh @@ -0,0 +1,73 @@ +#pragma once +///@file +#include "nix/store/config.hh" + +#if NIX_WITH_S3_SUPPORT + +# include "nix/store/s3-url.hh" +# include "nix/util/error.hh" + +# include +# include +# include + +namespace nix { + +/** + * AWS credentials obtained from credential providers + */ +struct AwsCredentials +{ + std::string accessKeyId; + std::string secretAccessKey; + std::optional sessionToken; + + AwsCredentials( + const std::string & accessKeyId, + const std::string & secretAccessKey, + const std::optional & sessionToken = std::nullopt) + : accessKeyId(accessKeyId) + , secretAccessKey(secretAccessKey) + , sessionToken(sessionToken) + { + } +}; + +/** + * Exception thrown when AWS authentication fails + */ +MakeError(AwsAuthError, Error); + +/** + * Get AWS credentials for the given profile. + * This function automatically caches credential providers to avoid + * creating multiple providers for the same profile. + * + * @param profile The AWS profile name (empty string for default profile) + * @return AWS credentials + * @throws AwsAuthError if credentials cannot be resolved + */ +AwsCredentials getAwsCredentials(const std::string & profile = ""); + +/** + * Invalidate cached credentials for a profile (e.g., on authentication failure). + * The next request for this profile will create a new provider. + * + * @param profile The AWS profile name to invalidate + */ +void invalidateAwsCredentials(const std::string & profile); + +/** + * Clear all cached credential providers. + * Typically called during application cleanup. + */ +void clearAwsCredentialsCache(); + +/** + * Pre-resolve AWS credentials for S3 URLs. + * Used to cache credentials in parent process before forking. + */ +AwsCredentials preResolveAwsCredentials(const ParsedS3URL & s3Url); + +} // namespace nix +#endif diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index f945f25ad..1aa32cf2c 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -10,6 +10,7 @@ config_pub_h = configure_file( ) headers = [ config_pub_h ] + files( + 'aws-creds.hh', 'binary-cache-store.hh', 'build-result.hh', 'build/derivation-builder.hh', diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 80c234bd5..713a40382 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -268,6 +268,7 @@ subdir('nix-meson-build-support/common') subdir('nix-meson-build-support/asan-options') sources = files( + 'aws-creds.cc', 'binary-cache-store.cc', 'build-result.cc', 'build/derivation-building-goal.cc', From 7f3f0f2a0b98cf05a04fe6d1c305856afb3370b7 Mon Sep 17 00:00:00 2001 From: osbm Date: Thu, 2 Oct 2025 10:44:30 +0300 Subject: [PATCH 096/373] docs: Update documentation regarding the flake outputs --- src/nix/flake-check.md | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/nix/flake-check.md b/src/nix/flake-check.md index c8307f8d8..007640c27 100644 --- a/src/nix/flake-check.md +++ b/src/nix/flake-check.md @@ -31,39 +31,49 @@ at the first error. The following flake output attributes must be derivations: * `checks.`*system*`.`*name* -* `defaultPackage.`*system* -* `devShell.`*system* +* `devShells.`*system*`.default` * `devShells.`*system*`.`*name* * `nixosConfigurations.`*name*`.config.system.build.toplevel` +* `packages.`*system*`.default` * `packages.`*system*`.`*name* The following flake output attributes must be [app definitions](./nix3-run.md): +* `apps.`*system*`.default` * `apps.`*system*`.`*name* -* `defaultApp.`*system* The following flake output attributes must be [template definitions](./nix3-flake-init.md): -* `defaultTemplate` +* `templates.default` * `templates.`*name* The following flake output attributes must be *Nixpkgs overlays*: -* `overlay` +* `overlays.default` * `overlays.`*name* The following flake output attributes must be *NixOS modules*: -* `nixosModule` +* `nixosModules.default` * `nixosModules.`*name* The following flake output attributes must be [bundlers](./nix3-bundle.md): +* `bundlers.default` * `bundlers.`*name* -* `defaultBundler` + +Old default attributes are renamed, they will work but will emit a warning: + +* `defaultPackage.` → `packages.`*system*`.default` +* `defaultApps.` → `apps.`*system*`.default` +* `defaultTemplate` → `templates.default` +* `defaultBundler.` → `bundlers.`*system*`.default` +* `overlay` → `overlays.default` +* `devShell.` → `devShells.`*system*`.default` +* `nixosModule` → `nixosModules.default` In addition, the `hydraJobs` output is evaluated in the same way as Hydra's `hydra-eval-jobs` (i.e. as a arbitrarily deeply nested From 1e92b61750c88783c36372e48ab411d482bb5421 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Thu, 2 Oct 2025 03:51:31 +0000 Subject: [PATCH 097/373] fix(libfetchers): substitute fetchTarball and fetchurl Fixes #4313 by enabling builtins.fetchurl, builtins.fetchTarball to use binary cache substituters before attempting to download from the original URL. --- src/libexpr/primops/fetchTree.cc | 14 ++- tests/nixos/default.nix | 2 + tests/nixos/fetchers-substitute.nix | 176 ++++++++++++++++++++++++++++ 3 files changed, 189 insertions(+), 3 deletions(-) create mode 100644 tests/nixos/fetchers-substitute.nix diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index e673e55a0..ee2ca375a 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -561,14 +561,22 @@ static void fetch( .hash = *expectedHash, .references = {}}); - if (state.store->isValidPath(expectedPath)) { + // Try to get the path from the local store or substituters + try { + state.store->ensurePath(expectedPath); + debug("using substituted/cached path '%s' for '%s'", state.store->printStorePath(expectedPath), *url); state.allowAndSetStorePathString(expectedPath, v); return; + } catch (Error & e) { + debug( + "substitution of '%s' failed, will try to download: %s", + state.store->printStorePath(expectedPath), + e.what()); + // Fall through to download } } - // TODO: fetching may fail, yet the path may be substitutable. - // https://github.com/NixOS/nix/issues/4313 + // Download the file/tarball if substitution failed or no hash was provided auto storePath = unpack ? fetchToStore( state.fetchSettings, *state.store, diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index 5a1e08528..edfa4124f 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -207,5 +207,7 @@ in fetchurl = runNixOSTest ./fetchurl.nix; + fetchersSubstitute = runNixOSTest ./fetchers-substitute.nix; + chrootStore = runNixOSTest ./chroot-store.nix; } diff --git a/tests/nixos/fetchers-substitute.nix b/tests/nixos/fetchers-substitute.nix new file mode 100644 index 000000000..453982677 --- /dev/null +++ b/tests/nixos/fetchers-substitute.nix @@ -0,0 +1,176 @@ +{ + name = "fetchers-substitute"; + + nodes.substituter = + { pkgs, ... }: + { + virtualisation.writableStore = true; + + nix.settings.extra-experimental-features = [ + "nix-command" + "fetch-tree" + ]; + + networking.firewall.allowedTCPPorts = [ 5000 ]; + + services.nix-serve = { + enable = true; + secretKeyFile = + let + key = pkgs.writeTextFile { + name = "secret-key"; + text = '' + substituter:SerxxAca5NEsYY0DwVo+subokk+OoHcD9m6JwuctzHgSQVfGHe6nCc+NReDjV3QdFYPMGix4FMg0+K/TM1B3aA== + ''; + }; + in + "${key}"; + }; + }; + + nodes.importer = + { lib, ... }: + { + virtualisation.writableStore = true; + + nix.settings = { + extra-experimental-features = [ + "nix-command" + "fetch-tree" + ]; + substituters = lib.mkForce [ "http://substituter:5000" ]; + trusted-public-keys = lib.mkForce [ "substituter:EkFXxh3upwnPjUXg41d0HRWDzBoseBTINPiv0zNQd2g=" ]; + }; + }; + + testScript = + { nodes }: # python + '' + import json + + start_all() + + substituter.wait_for_unit("multi-user.target") + + ########################################## + # Test 1: builtins.fetchurl with substitution + ########################################## + + missing_file = "/only-on-substituter.txt" + + substituter.succeed(f"echo 'this should only exist on the substituter' > {missing_file}") + + file_hash = substituter.succeed(f"nix hash file {missing_file}").strip() + + file_store_path_json = substituter.succeed(f""" + nix-instantiate --eval --json --read-write-mode --expr ' + builtins.fetchurl {{ + url = "file://{missing_file}"; + sha256 = "{file_hash}"; + }} + ' + """) + + file_store_path = json.loads(file_store_path_json) + + substituter.succeed(f"nix store sign --key-file ${nodes.substituter.services.nix-serve.secretKeyFile} {file_store_path}") + + importer.wait_for_unit("multi-user.target") + + print("Testing fetchurl with substitution...") + importer.succeed(f""" + nix-instantiate -vvvvv --eval --json --read-write-mode --expr ' + builtins.fetchurl {{ + url = "file://{missing_file}"; + sha256 = "{file_hash}"; + }} + ' + """) + print("✓ fetchurl substitution works!") + + ########################################## + # Test 2: builtins.fetchTarball with substitution + ########################################## + + missing_tarball = "/only-on-substituter.tar.gz" + + # Create a directory with some content + substituter.succeed(""" + mkdir -p /tmp/test-tarball + echo 'Hello from tarball!' > /tmp/test-tarball/hello.txt + echo 'Another file' > /tmp/test-tarball/file2.txt + """) + + # Create a tarball + substituter.succeed(f"tar czf {missing_tarball} -C /tmp test-tarball") + + # For fetchTarball, we need to first fetch it without hash to get the store path, + # then compute the NAR hash of that path + tarball_store_path_json = substituter.succeed(f""" + nix-instantiate --eval --json --read-write-mode --expr ' + builtins.fetchTarball {{ + url = "file://{missing_tarball}"; + }} + ' + """) + + tarball_store_path = json.loads(tarball_store_path_json) + + # Get the NAR hash of the unpacked tarball in SRI format + path_info_json = substituter.succeed(f"nix path-info --json {tarball_store_path}").strip() + path_info_dict = json.loads(path_info_json) + # nix path-info returns a dict with store paths as keys + tarball_hash_sri = path_info_dict[tarball_store_path]["narHash"] + print(f"Tarball NAR hash (SRI): {tarball_hash_sri}") + + # Also get the old format hash for fetchTarball (which uses sha256 parameter) + tarball_hash = substituter.succeed(f"nix-store --query --hash {tarball_store_path}").strip() + + # Sign the tarball's store path + substituter.succeed(f"nix store sign --recursive --key-file ${nodes.substituter.services.nix-serve.secretKeyFile} {tarball_store_path}") + + # Now try to fetch the same tarball on the importer + # The file doesn't exist locally, so it should be substituted + print("Testing fetchTarball with substitution...") + result = importer.succeed(f""" + nix-instantiate -vvvvv --eval --json --read-write-mode --expr ' + builtins.fetchTarball {{ + url = "file://{missing_tarball}"; + sha256 = "{tarball_hash}"; + }} + ' + """) + + result_path = json.loads(result) + print(f"✓ fetchTarball substitution works! Result: {result_path}") + + # Verify the content is correct + # fetchTarball strips the top-level directory if there's only one + content = importer.succeed(f"cat {result_path}/hello.txt").strip() + assert content == "Hello from tarball!", f"Content mismatch: {content}" + print("✓ fetchTarball content verified!") + + ########################################## + # Test 3: Verify fetchTree does NOT substitute (preserves metadata) + ########################################## + + print("Testing that fetchTree without __final does NOT use substitution...") + + # fetchTree with just narHash (not __final) should try to download, which will fail + # since the file doesn't exist on the importer + exit_code = importer.fail(f""" + nix-instantiate --eval --json --read-write-mode --expr ' + builtins.fetchTree {{ + type = "tarball"; + url = "file:///only-on-substituter.tar.gz"; + narHash = "{tarball_hash_sri}"; + }} + ' 2>&1 + """) + + # Should fail with "does not exist" since it tries to download instead of substituting + assert "does not exist" in exit_code or "Couldn't open file" in exit_code, f"Expected download failure, got: {exit_code}" + print("✓ fetchTree correctly does NOT substitute non-final inputs!") + print(" (This preserves metadata like lastModified from the actual fetch)") + ''; +} From d2017e0e1a687af3b1a297acc43b004cd69a9793 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Thu, 2 Oct 2025 23:11:16 +0300 Subject: [PATCH 098/373] libstore: Move {narinfo,ls,log}-compression settings from BinaryCacheStoreConfig to HttpBinaryCacheStoreConfig These settings are only implemented for the http store and should not be there for the file:// stores. --- .../include/nix/store/binary-cache-store.hh | 15 --------------- .../include/nix/store/http-binary-cache-store.hh | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index 660dd870a..3f4de2bd4 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -59,21 +59,6 @@ struct BinaryCacheStoreConfig : virtual StoreConfig The meaning and accepted values depend on the compression method selected. `-1` specifies that the default compression level should be used. )"}; - - const Setting narinfoCompression{ - this, "", "narinfo-compression", "Compression method for `.narinfo` files."}; - - const Setting lsCompression{this, "", "ls-compression", "Compression method for `.ls` files."}; - - const Setting logCompression{ - this, - "", - "log-compression", - R"( - Compression method for `log/*` files. It is recommended to - use a compression method supported by most web browsers - (e.g. `brotli`). - )"}; }; /** diff --git a/src/libstore/include/nix/store/http-binary-cache-store.hh b/src/libstore/include/nix/store/http-binary-cache-store.hh index 4102c858f..e0b7ac1ea 100644 --- a/src/libstore/include/nix/store/http-binary-cache-store.hh +++ b/src/libstore/include/nix/store/http-binary-cache-store.hh @@ -17,6 +17,21 @@ struct HttpBinaryCacheStoreConfig : std::enable_shared_from_this narinfoCompression{ + this, "", "narinfo-compression", "Compression method for `.narinfo` files."}; + + const Setting lsCompression{this, "", "ls-compression", "Compression method for `.ls` files."}; + + const Setting logCompression{ + this, + "", + "log-compression", + R"( + Compression method for `log/*` files. It is recommended to + use a compression method supported by most web browsers + (e.g. `brotli`). + )"}; + static const std::string name() { return "HTTP Binary Cache Store"; From 27f64171281812b403eba40becd5a63d9594179a Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 3 Oct 2025 00:45:49 +0000 Subject: [PATCH 099/373] build(libstore): add NIX_WITH_CURL_S3 build option Introduce a new build option 'curl-s3-store' for the curl-based S3 implementation, separate from the existing AWS SDK-based 's3-store'. The two options are mutually exclusive to avoid conflicts. Users can enable the new implementation with: -Dcurl-s3-store=enabled -Ds3-store=disabled --- src/libstore-tests/s3-url.cc | 2 +- src/libstore/aws-creds.cc | 2 +- src/libstore/include/nix/store/aws-creds.hh | 2 +- src/libstore/include/nix/store/s3-url.hh | 2 +- src/libstore/meson.build | 27 +++++++++++++++++++++ src/libstore/meson.options | 7 ++++++ src/libstore/package.nix | 7 +++++- src/libstore/s3-url.cc | 2 +- 8 files changed, 45 insertions(+), 6 deletions(-) diff --git a/src/libstore-tests/s3-url.cc b/src/libstore-tests/s3-url.cc index 56ec4e40e..60652dd9c 100644 --- a/src/libstore-tests/s3-url.cc +++ b/src/libstore-tests/s3-url.cc @@ -1,7 +1,7 @@ #include "nix/store/s3-url.hh" #include "nix/util/tests/gmock-matchers.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_S3_SUPPORT || NIX_WITH_CURL_S3 # include # include diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index 576f932d5..dc8584e1b 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -1,6 +1,6 @@ #include "nix/store/aws-creds.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_CURL_S3 # include # include "nix/store/s3-url.hh" diff --git a/src/libstore/include/nix/store/aws-creds.hh b/src/libstore/include/nix/store/aws-creds.hh index 67ff2e49c..16643c555 100644 --- a/src/libstore/include/nix/store/aws-creds.hh +++ b/src/libstore/include/nix/store/aws-creds.hh @@ -2,7 +2,7 @@ ///@file #include "nix/store/config.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_CURL_S3 # include "nix/store/s3-url.hh" # include "nix/util/error.hh" diff --git a/src/libstore/include/nix/store/s3-url.hh b/src/libstore/include/nix/store/s3-url.hh index 4f0a7b0c2..45c3b2d1c 100644 --- a/src/libstore/include/nix/store/s3-url.hh +++ b/src/libstore/include/nix/store/s3-url.hh @@ -2,7 +2,7 @@ ///@file #include "nix/store/config.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_S3_SUPPORT || NIX_WITH_CURL_S3 # include "nix/util/url.hh" # include "nix/util/util.hh" diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 1086df3c2..e220e65cd 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -164,6 +164,33 @@ if aws_s3.found() endif deps_other += aws_s3 +# Curl-based S3 store support (alternative to AWS SDK) +# Check if curl supports AWS SigV4 (requires >= 7.75.0) +curl_supports_aws_sigv4 = curl.version().version_compare('>= 7.75.0') +# AWS CRT C++ for lightweight credential management +aws_crt_cpp = cxx.find_library('aws-crt-cpp', required : false) + +curl_s3_store_opt = get_option('curl-s3-store').require( + curl_supports_aws_sigv4, + error_message : 'curl-based S3 support requires curl >= 7.75.0', +).require( + aws_crt_cpp.found(), + error_message : 'curl-based S3 support requires aws-crt-cpp', +) + +# Make AWS SDK and curl-based S3 mutually exclusive +if aws_s3.found() and curl_s3_store_opt.enabled() + error( + 'Cannot enable both AWS SDK S3 support and curl-based S3 support. Please choose one.', + ) +endif + +if curl_s3_store_opt.enabled() + deps_other += aws_crt_cpp +endif + +configdata_pub.set('NIX_WITH_CURL_S3', curl_s3_store_opt.enabled().to_int()) + subdir('nix-meson-build-support/generate-header') generated_headers = [] diff --git a/src/libstore/meson.options b/src/libstore/meson.options index b8414068d..edc43bd45 100644 --- a/src/libstore/meson.options +++ b/src/libstore/meson.options @@ -33,3 +33,10 @@ option( value : '/nix/var/log/nix', description : 'path to store logs in for Nix', ) + +option( + 'curl-s3-store', + type : 'feature', + value : 'disabled', + description : 'Enable curl-based S3 binary cache store support (requires aws-crt-cpp and curl >= 7.75.0)', +) diff --git a/src/libstore/package.nix b/src/libstore/package.nix index d890d2256..1c08e466e 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -10,6 +10,7 @@ boost, curl, aws-sdk-cpp, + aws-crt-cpp, libseccomp, nlohmann_json, sqlite, @@ -25,6 +26,8 @@ withAWS ? # Default is this way because there have been issues building this dependency stdenv.hostPlatform == stdenv.buildPlatform && (stdenv.isLinux || stdenv.isDarwin), + + withCurlS3 ? false, }: let @@ -64,7 +67,8 @@ mkMesonLibrary (finalAttrs: { sqlite ] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp - ++ lib.optional withAWS aws-sdk-cpp; + ++ lib.optional withAWS aws-sdk-cpp + ++ lib.optional withCurlS3 aws-crt-cpp; propagatedBuildInputs = [ nix-util @@ -74,6 +78,7 @@ mkMesonLibrary (finalAttrs: { mesonFlags = [ (lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux) (lib.mesonBool "embedded-sandbox-shell" embeddedSandboxShell) + (lib.mesonEnable "curl-s3-store" withCurlS3) ] ++ lib.optionals stdenv.hostPlatform.isLinux [ (lib.mesonOption "sandbox-shell" "${busybox-sandbox-shell}/bin/busybox") diff --git a/src/libstore/s3-url.cc b/src/libstore/s3-url.cc index 947de60b0..baefe5cba 100644 --- a/src/libstore/s3-url.cc +++ b/src/libstore/s3-url.cc @@ -1,6 +1,6 @@ #include "nix/store/s3-url.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_S3_SUPPORT || NIX_WITH_CURL_S3 # include "nix/util/error.hh" # include "nix/util/split.hh" From 2cbbb63628adf5e18150c59f49676d3d074e5eff Mon Sep 17 00:00:00 2001 From: Seth Flynn Date: Mon, 15 Sep 2025 22:58:34 -0400 Subject: [PATCH 100/373] ci: enable use of the experimental installer --- .../actions/install-nix-action/action.yaml | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/.github/actions/install-nix-action/action.yaml b/.github/actions/install-nix-action/action.yaml index c299b3956..b9861131d 100644 --- a/.github/actions/install-nix-action/action.yaml +++ b/.github/actions/install-nix-action/action.yaml @@ -4,12 +4,18 @@ inputs: dogfood: description: "Whether to use Nix installed from the latest artifact from master branch" required: true # Be explicit about the fact that we are using unreleased artifacts + experimental-installer: + description: "Whether to use the experimental installer to install Nix" + default: false extra_nix_config: description: "Gets appended to `/etc/nix/nix.conf` if passed." install_url: description: "URL of the Nix installer" required: false default: "https://releases.nixos.org/nix/nix-2.30.2/install" + tarball_url: + description: "URL of the Nix tarball to use with the experimental installer" + required: false github_token: description: "Github token" required: true @@ -37,14 +43,57 @@ runs: gh run download "$RUN_ID" --repo "$DOGFOOD_REPO" -n "$INSTALLER_ARTIFACT" -D "$INSTALLER_DOWNLOAD_DIR" echo "installer-path=file://$INSTALLER_DOWNLOAD_DIR" >> "$GITHUB_OUTPUT" + TARBALL_PATH="$(find "$INSTALLER_DOWNLOAD_DIR" -name 'nix*.tar.xz' -print | head -n 1)" + echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT" echo "::notice ::Dogfooding Nix installer from master (https://github.com/$DOGFOOD_REPO/actions/runs/$RUN_ID)" env: GH_TOKEN: ${{ inputs.github_token }} DOGFOOD_REPO: "NixOS/nix" + - name: "Download experimental installer" + shell: bash + id: download-experimental-nix-installer + if: ${{ inputs.experimental-installer == 'true' }} + run: | + if [ "$RUNNER_OS" == "Linux" ]; then + INSTALLER_OS="linux" + elif [ "$RUNNER_OS" == "macOS" ]; then + INSTALLER_OS="darwin" + else + echo "::error ::Unsupported RUNNER_OS: $RUNNER_OS" + fi + + if [ "$RUNNER_ARCH" == "X64" ]; then + INSTALLER_ARCH="x86_64" + elif [ "$RUNNER_ARCH" == "ARM64" ]; then + INSTALLER_ARCH="aarch64" + else + echo "::error ::Unsupported RUNNER_ARCH: $RUNNER_ARCH" + fi + + EXPERIMENTAL_INSTALLER_ARTIFACT="nix-installer-$INSTALLER_ARCH-$INSTALLER_OS" + EXPERIMENTAL_INSTALLER_PATH="$GITHUB_WORKSPACE/$EXPERIMENTAL_INSTALLER_ARTIFACT" + # TODO: This uses the latest release. It should probably be pinned, or dogfood the experimental repo's default branch - similar to the above + gh release download -R "$EXPERIMENTAL_INSTALLER_REPO" -D "$EXPERIMENTAL_INSTALLER_PATH" -p "nix-installer.sh" -p "$EXPERIMENTAL_INSTALLER_ARTIFACT" + chmod +x "$EXPERIMENTAL_INSTALLER_PATH/$EXPERIMENTAL_INSTALLER_ARTIFACT" + + echo "installer-path=$EXPERIMENTAL_INSTALLER_PATH" >> "$GITHUB_OUTPUT" + + echo "::notice Using experimental installer from $EXPERIMENTAL_INSTALLER_REPO (https://github.com/$EXPERIMENTAL_INSTALLER_REPO)" + env: + GH_TOKEN: ${{ inputs.github_token }} + EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer" - uses: cachix/install-nix-action@c134e4c9e34bac6cab09cf239815f9339aaaf84e # v31.5.1 + if: ${{ inputs.experimental-installer != 'true' }} with: # Ternary operator in GHA: https://www.github.com/actions/runner/issues/409#issuecomment-752775072 install_url: ${{ inputs.dogfood == 'true' && format('{0}/install', steps.download-nix-installer.outputs.installer-path) || inputs.install_url }} install_options: ${{ inputs.dogfood == 'true' && format('--tarball-url-prefix {0}', steps.download-nix-installer.outputs.installer-path) || '' }} extra_nix_config: ${{ inputs.extra_nix_config }} + - uses: DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196 # v20 + if: ${{ inputs.experimental-installer == 'true' }} + with: + diagnostic-endpoint: "" + local-root: ${{ steps.download-experimental-nix-installer.outputs.installer-path }} + nix-package-url: ${{ inputs.dogfood == 'true' && steps.download-nix-installer.outputs.tarball-path || (inputs.tarball_url || '') }} + extra-conf: ${{ inputs.extra_nix_config }} From d2293fb458feb3b75d4ed81b32136b335610218b Mon Sep 17 00:00:00 2001 From: Seth Flynn Date: Tue, 16 Sep 2025 00:47:02 -0400 Subject: [PATCH 101/373] ci: enable experimental installer tests --- .github/workflows/ci.yml | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dcf0814d8..145bbe6d9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -135,9 +135,19 @@ jobs: - scenario: on ubuntu runs-on: ubuntu-24.04 os: linux + experimental-installer: false - scenario: on macos runs-on: macos-14 os: darwin + experimental-installer: false + - scenario: on ubuntu (experimental) + runs-on: ubuntu-24.04 + os: linux + experimental-installer: true + - scenario: on macos (experimental) + runs-on: macos-14 + os: darwin + experimental-installer: true name: installer test ${{ matrix.scenario }} runs-on: ${{ matrix.runs-on }} steps: @@ -149,11 +159,22 @@ jobs: path: out - name: Looking up the installer tarball URL id: installer-tarball-url - run: echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" + run: | + echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" + TARBALL_PATH="$(find "$GITHUB_WORKSPACE/out" -name 'nix*.tar.xz' -print | head -n 1)" + echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT" - uses: cachix/install-nix-action@v31 + if: ${{ !matrix.experimental-installer }} with: install_url: ${{ format('{0}/install', steps.installer-tarball-url.outputs.installer-url) }} install_options: ${{ format('--tarball-url-prefix {0}', steps.installer-tarball-url.outputs.installer-url) }} + - uses: ./.github/actions/install-nix-action + if: ${{ matrix.experimental-installer }} + with: + dogfood: false + experimental-installer: true + tarball_url: ${{ steps.installer-tarball-url.outputs.tarball-path }} + github_token: ${{ secrets.GITHUB_TOKEN }} - run: sudo apt install fish zsh if: matrix.os == 'linux' - run: brew install fish From 92d7381826982f7193145e9fa786eb0f0b1420a2 Mon Sep 17 00:00:00 2001 From: Seth Flynn Date: Fri, 3 Oct 2025 02:01:03 -0400 Subject: [PATCH 102/373] ci: allow for using the latest build of the experimental installer Until these repos are potentially merged, this is good for dogfooding alongside the experimental installer. It also uses the more official `artifacts.nixos.org` endpoint to install stable releases now More immediately though, we need a patch for the experimental installer to really work in CI at all, and that hasn't landed in a tag yet. So, this lets us use it right from `main`! --- .../actions/install-nix-action/action.yaml | 49 +++++++++++++------ 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/.github/actions/install-nix-action/action.yaml b/.github/actions/install-nix-action/action.yaml index b9861131d..46abea179 100644 --- a/.github/actions/install-nix-action/action.yaml +++ b/.github/actions/install-nix-action/action.yaml @@ -7,6 +7,10 @@ inputs: experimental-installer: description: "Whether to use the experimental installer to install Nix" default: false + experimental-installer-version: + description: "Version of the experimental installer to use. If `latest`, the newest artifact from the default branch is used." + # TODO: This should probably be pinned to a release after https://github.com/NixOS/experimental-nix-installer/pull/49 lands in one + default: "latest" extra_nix_config: description: "Gets appended to `/etc/nix/nix.conf` if passed." install_url: @@ -50,36 +54,51 @@ runs: env: GH_TOKEN: ${{ inputs.github_token }} DOGFOOD_REPO: "NixOS/nix" - - name: "Download experimental installer" + - name: "Gather system info for experimental installer" shell: bash - id: download-experimental-nix-installer if: ${{ inputs.experimental-installer == 'true' }} run: | + echo "::notice Using experimental installer from $EXPERIMENTAL_INSTALLER_REPO (https://github.com/$EXPERIMENTAL_INSTALLER_REPO)" + if [ "$RUNNER_OS" == "Linux" ]; then - INSTALLER_OS="linux" + EXPERIMENTAL_INSTALLER_SYSTEM="linux" + echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" elif [ "$RUNNER_OS" == "macOS" ]; then - INSTALLER_OS="darwin" + EXPERIMENTAL_INSTALLER_SYSTEM="darwin" + echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" else echo "::error ::Unsupported RUNNER_OS: $RUNNER_OS" + exit 1 fi if [ "$RUNNER_ARCH" == "X64" ]; then - INSTALLER_ARCH="x86_64" + EXPERIMENTAL_INSTALLER_ARCH=x86_64 + echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> "$GITHUB_ENV" elif [ "$RUNNER_ARCH" == "ARM64" ]; then - INSTALLER_ARCH="aarch64" + EXPERIMENTAL_INSTALLER_ARCH=aarch64 + echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> "$GITHUB_ENV" else echo "::error ::Unsupported RUNNER_ARCH: $RUNNER_ARCH" + exit 1 fi - EXPERIMENTAL_INSTALLER_ARTIFACT="nix-installer-$INSTALLER_ARCH-$INSTALLER_OS" - EXPERIMENTAL_INSTALLER_PATH="$GITHUB_WORKSPACE/$EXPERIMENTAL_INSTALLER_ARTIFACT" - # TODO: This uses the latest release. It should probably be pinned, or dogfood the experimental repo's default branch - similar to the above - gh release download -R "$EXPERIMENTAL_INSTALLER_REPO" -D "$EXPERIMENTAL_INSTALLER_PATH" -p "nix-installer.sh" -p "$EXPERIMENTAL_INSTALLER_ARTIFACT" - chmod +x "$EXPERIMENTAL_INSTALLER_PATH/$EXPERIMENTAL_INSTALLER_ARTIFACT" + echo "EXPERIMENTAL_INSTALLER_ARTIFACT=nix-installer-$EXPERIMENTAL_INSTALLER_ARCH-$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" + env: + EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer" + - name: "Download latest experimental installer" + shell: bash + id: download-latest-experimental-installer + if: ${{ inputs.experimental-installer == 'true' && inputs.experimental-installer-version == 'latest' }} + run: | + RUN_ID=$(gh run list --repo "$EXPERIMENTAL_INSTALLER_REPO" --workflow ci.yml --branch main --status success --json databaseId --jq ".[0].databaseId") - echo "installer-path=$EXPERIMENTAL_INSTALLER_PATH" >> "$GITHUB_OUTPUT" + EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR="$GITHUB_WORKSPACE/$EXPERIMENTAL_INSTALLER_ARTIFACT" + mkdir -p "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" - echo "::notice Using experimental installer from $EXPERIMENTAL_INSTALLER_REPO (https://github.com/$EXPERIMENTAL_INSTALLER_REPO)" + gh run download "$RUN_ID" --repo "$EXPERIMENTAL_INSTALLER_REPO" -n "$EXPERIMENTAL_INSTALLER_ARTIFACT" -D "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" + # Executable permissions are lost in artifacts + find $EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR -type f -exec chmod +x {} + + echo "installer-path=$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" >> "$GITHUB_OUTPUT" env: GH_TOKEN: ${{ inputs.github_token }} EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer" @@ -94,6 +113,8 @@ runs: if: ${{ inputs.experimental-installer == 'true' }} with: diagnostic-endpoint: "" - local-root: ${{ steps.download-experimental-nix-installer.outputs.installer-path }} + # TODO: It'd be nice to use `artifacts.nixos.org` for both of these, maybe through an `/experimental-installer/latest` endpoint? or `/commit/`? + local-root: ${{ inputs.experimental-installer-version == 'latest' && steps.download-latest-experimental-installer.outputs.installer-path || '' }} + source-url: ${{ inputs.experimental-installer-version != 'latest' && 'https://artifacts.nixos.org/experimental-installer/tag/${{ inputs.experimental-installer-version }}/${{ env.EXPERIMENTAL_INSTALLER_ARTIFACT }}' || '' }} nix-package-url: ${{ inputs.dogfood == 'true' && steps.download-nix-installer.outputs.tarball-path || (inputs.tarball_url || '') }} extra-conf: ${{ inputs.extra_nix_config }} From 584ef0ffd30c4a06b6d664219b794e2dedf7e844 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 3 Oct 2025 14:34:13 +0200 Subject: [PATCH 103/373] Add external builders These are helper programs that execute derivations for specified system types (e.g. using QEMU to emulate another system type). To use, set `external-builders`: external-builders = [{"systems": ["aarch64-linux"], "program": "/path/to/external-builder.py"}] The external builder gets one command line argument, the path to a JSON file containing all necessary information about the derivation: { "args": [...], "builder": "/nix/store/kwcyvgdg98n98hqapaz8sw92pc2s78x6-bash-5.2p37/bin/bash", "env": { "HOME": "/homeless-shelter", ... }, "realStoreDir": "/tmp/nix/nix/store", "storeDir": "/nix/store", "tmpDir": "/tmp/nix-shell.dzQ2hE/nix-build-patchelf-0.14.3.drv-46/build", "tmpDirInSandbox": "/build" } Co-authored-by: Cole Helbling --- src/libstore/globals.cc | 31 ++++- src/libstore/include/nix/store/globals.hh | 99 ++++++++++++++++ src/libstore/unix/build/derivation-builder.cc | 43 ++++--- .../unix/build/external-derivation-builder.cc | 110 ++++++++++++++++++ src/libutil/experimental-features.cc | 8 ++ .../include/nix/util/experimental-features.hh | 1 + 6 files changed, 274 insertions(+), 18 deletions(-) create mode 100644 src/libstore/unix/build/external-derivation-builder.cc diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 612e79ab0..58a649fc5 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -341,10 +341,15 @@ PathsInChroot BaseSetting::parse(const std::string & str) const i.pop_back(); } size_t p = i.find('='); - if (p == std::string::npos) - pathsInChroot[i] = {.source = i, .optional = optional}; - else - pathsInChroot[i.substr(0, p)] = {.source = i.substr(p + 1), .optional = optional}; + std::string inside, outside; + if (p == std::string::npos) { + inside = i; + outside = i; + } else { + inside = i.substr(0, p); + outside = i.substr(p + 1); + } + pathsInChroot[inside] = {.source = outside, .optional = optional}; } return pathsInChroot; } @@ -374,6 +379,24 @@ unsigned int MaxBuildJobsSetting::parse(const std::string & str) const } } +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Settings::ExternalBuilder, systems, program, args); + +template<> +Settings::ExternalBuilders BaseSetting::parse(const std::string & str) const +{ + try { + return nlohmann::json::parse(str).template get(); + } catch (std::exception & e) { + throw UsageError("parsing setting '%s': %s", name, e.what()); + } +} + +template<> +std::string BaseSetting::to_string() const +{ + return nlohmann::json(value).dump(); +} + template<> void BaseSetting::appendOrSet(PathsInChroot newValue, bool append) { diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index 2cd92467c..ae8990eab 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1372,6 +1372,105 @@ public: Default is 0, which disables the warning. Set it to 1 to warn on all paths. )"}; + + struct ExternalBuilder + { + std::vector systems; + Path program; + std::vector args; + }; + + using ExternalBuilders = std::vector; + + Setting externalBuilders{ + this, + {}, + "external-builders", + R"( + Helper programs that execute derivations. + + The program is passed a JSON document that describes the build environment as the final argument. + The JSON document looks like this: + + { + "args": [ + "-e", + "/nix/store/vj1c3wf9…-source-stdenv.sh", + "/nix/store/shkw4qm9…-default-builder.sh" + ], + "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", + "env": { + "HOME": "/homeless-shelter", + "NIX_BUILD_CORES": "14", + "NIX_BUILD_TOP": "/build", + "NIX_LOG_FD": "2", + "NIX_STORE": "/nix/store", + "PATH": "/path-not-set", + "PWD": "/build", + "TEMP": "/build", + "TEMPDIR": "/build", + "TERM": "xterm-256color", + "TMP": "/build", + "TMPDIR": "/build", + "__structuredAttrs": "", + "buildInputs": "", + "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", + "cmakeFlags": "", + "configureFlags": "", + "depsBuildBuild": "", + "depsBuildBuildPropagated": "", + "depsBuildTarget": "", + "depsBuildTargetPropagated": "", + "depsHostHost": "", + "depsHostHostPropagated": "", + "depsTargetTarget": "", + "depsTargetTargetPropagated": "", + "doCheck": "1", + "doInstallCheck": "1", + "mesonFlags": "", + "name": "hello-2.12.2", + "nativeBuildInputs": "/nix/store/l31j72f1…-version-check-hook", + "out": "/nix/store/2yx2prgx…-hello-2.12.2", + "outputs": "out", + "patches": "", + "pname": "hello", + "postInstallCheck": "stat \"${!outputBin}/bin/hello\"\n", + "propagatedBuildInputs": "", + "propagatedNativeBuildInputs": "", + "src": "/nix/store/dw402azx…-hello-2.12.2.tar.gz", + "stdenv": "/nix/store/i8bw5nqg…-stdenv-linux", + "strictDeps": "", + "system": "aarch64-linux", + "version": "2.12.2" + }, + "realStoreDir": "/nix/store", + "storeDir": "/nix/store", + "system": "aarch64-linux", + "tmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0/build", + "tmpDirInSandbox": "/build", + "topTmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0" + } + )", + {}, // aliases + true, // document default + // NOTE(cole-h): even though we can make the experimental feature required here, the errors + // are not as good (it just becomes a warning if you try to use this setting without the + // experimental feature) + // + // With this commented out: + // + // error: experimental Nix feature 'external-builders' is disabled; add '--extra-experimental-features + // external-builders' to enable it + // + // With this uncommented: + // + // warning: Ignoring setting 'external-builders' because experimental feature 'external-builders' is not enabled + // error: Cannot build '/nix/store/vwsp4qd8…-opentofu-1.10.2.drv'. + // Reason: required system or feature not available + // Required system: 'aarch64-linux' with features {} + // Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} + // Xp::ExternalBuilders + }; }; // FIXME: don't use a global variable. diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 7cf72fb84..e2bcb1b84 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -229,6 +229,12 @@ protected: return acquireUserLock(1, false); } + /** + * Throw an exception if we can't do this derivation because of + * missing system features. + */ + virtual void checkSystem(); + /** * Return the paths that should be made available in the sandbox. * This includes: @@ -666,21 +672,8 @@ static bool checkNotWorldWritable(std::filesystem::path path) return true; } -std::optional DerivationBuilderImpl::startBuild() +void DerivationBuilderImpl::checkSystem() { - if (useBuildUsers()) { - if (!buildUser) - buildUser = getBuildUser(); - - if (!buildUser) - return std::nullopt; - } - - /* Make sure that no other processes are executing under the - sandbox uids. This must be done before any chownToBuilder() - calls. */ - prepareUser(); - /* Right platform? */ if (!drvOptions.canBuildLocally(store, drv)) { auto msg = @@ -704,6 +697,24 @@ std::optional DerivationBuilderImpl::startBuild() throw BuildError(BuildResult::Failure::InputRejected, msg); } +} + +std::optional DerivationBuilderImpl::startBuild() +{ + if (useBuildUsers()) { + if (!buildUser) + buildUser = getBuildUser(); + + if (!buildUser) + return std::nullopt; + } + + checkSystem(); + + /* Make sure that no other processes are executing under the + sandbox uids. This must be done before any chownToBuilder() + calls. */ + prepareUser(); auto buildDir = store.config->getBuildDir(); @@ -1909,12 +1920,16 @@ StorePath DerivationBuilderImpl::makeFallbackPath(const StorePath & path) #include "chroot-derivation-builder.cc" #include "linux-derivation-builder.cc" #include "darwin-derivation-builder.cc" +#include "external-derivation-builder.cc" namespace nix { std::unique_ptr makeDerivationBuilder( LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params) { + if (auto builder = ExternalDerivationBuilder::newIfSupported(store, miscMethods, params)) + return builder; + bool useSandbox = false; /* Are we doing a sandboxed build? */ diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc new file mode 100644 index 000000000..4d3eba6db --- /dev/null +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -0,0 +1,110 @@ +namespace nix { + +struct ExternalDerivationBuilder : DerivationBuilderImpl +{ + Settings::ExternalBuilder externalBuilder; + + ExternalDerivationBuilder( + LocalStore & store, + std::unique_ptr miscMethods, + DerivationBuilderParams params, + Settings::ExternalBuilder externalBuilder) + : DerivationBuilderImpl(store, std::move(miscMethods), std::move(params)) + , externalBuilder(std::move(externalBuilder)) + { + experimentalFeatureSettings.require(Xp::ExternalBuilders); + } + + static std::unique_ptr newIfSupported( + LocalStore & store, std::unique_ptr & miscMethods, DerivationBuilderParams & params) + { + for (auto & handler : settings.externalBuilders.get()) { + for (auto & system : handler.systems) + if (params.drv.platform == system) + return std::make_unique( + store, std::move(miscMethods), std::move(params), handler); + } + return {}; + } + + Path tmpDirInSandbox() override + { + /* In a sandbox, for determinism, always use the same temporary + directory. */ + return "/build"; + } + + void setBuildTmpDir() override + { + tmpDir = topTmpDir + "/build"; + createDir(tmpDir, 0700); + } + + void checkSystem() override {} + + void startChild() override + { + if (drvOptions.getRequiredSystemFeatures(drv).count("recursive-nix")) + throw Error("'recursive-nix' is not supported yet by external derivation builders"); + + auto json = nlohmann::json::object(); + + json.emplace("builder", drv.builder); + { + auto l = nlohmann::json::array(); + for (auto & i : drv.args) + l.push_back(rewriteStrings(i, inputRewrites)); + json.emplace("args", std::move(l)); + } + { + auto j = nlohmann::json::object(); + for (auto & [name, value] : env) + j.emplace(name, rewriteStrings(value, inputRewrites)); + json.emplace("env", std::move(j)); + } + json.emplace("topTmpDir", topTmpDir); + json.emplace("tmpDir", tmpDir); + json.emplace("tmpDirInSandbox", tmpDirInSandbox()); + json.emplace("storeDir", store.storeDir); + json.emplace("realStoreDir", store.config->realStoreDir.get()); + json.emplace("system", drv.platform); + + // TODO(cole-h): writing this to stdin is too much effort right now, if we want to revisit + // that, see this comment by Eelco about how to make it not suck: + // https://github.com/DeterminateSystems/nix-src/pull/141#discussion_r2205493257 + auto jsonFile = std::filesystem::path{topTmpDir} / "build.json"; + writeFile(jsonFile, json.dump()); + + pid = startProcess([&]() { + openSlave(); + try { + commonChildInit(); + + Strings args = {externalBuilder.program}; + + if (!externalBuilder.args.empty()) { + args.insert(args.end(), externalBuilder.args.begin(), externalBuilder.args.end()); + } + + args.insert(args.end(), jsonFile); + + if (chdir(tmpDir.c_str()) == -1) + throw SysError("changing into '%1%'", tmpDir); + + chownToBuilder(topTmpDir); + + setUser(); + + debug("executing external builder: %s", concatStringsSep(" ", args)); + execv(externalBuilder.program.c_str(), stringsToCharPtrs(args).data()); + + throw SysError("executing '%s'", externalBuilder.program); + } catch (...) { + handleChildException(true); + _exit(1); + } + }); + } +}; + +} // namespace nix diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 60d6bf74d..0edd5a585 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -304,6 +304,14 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "https://github.com/NixOS/nix/milestone/55", }, + { + .tag = Xp::ExternalBuilders, + .name = "external-builders", + .description = R"( + Enables support for external builders / sandbox providers. + )", + .trackingUrl = "", + }, { .tag = Xp::BLAKE3Hashes, .name = "blake3-hashes", diff --git a/src/libutil/include/nix/util/experimental-features.hh b/src/libutil/include/nix/util/experimental-features.hh index 0a8f15863..73c4eeca4 100644 --- a/src/libutil/include/nix/util/experimental-features.hh +++ b/src/libutil/include/nix/util/experimental-features.hh @@ -37,6 +37,7 @@ enum struct ExperimentalFeature { MountedSSHStore, VerifiedFetches, PipeOperators, + ExternalBuilders, BLAKE3Hashes, }; From 73e4c40e648f6bd3053648df66b1b9c391217b9b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 3 Oct 2025 15:07:08 +0200 Subject: [PATCH 104/373] Add test for external-builders --- tests/functional/external-builders.sh | 50 +++++++++++++++++++++++++++ tests/functional/meson.build | 1 + 2 files changed, 51 insertions(+) create mode 100644 tests/functional/external-builders.sh diff --git a/tests/functional/external-builders.sh b/tests/functional/external-builders.sh new file mode 100644 index 000000000..4c1d5636a --- /dev/null +++ b/tests/functional/external-builders.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +source common.sh + +TODO_NixOS + +needLocalStore "'--external-builders' can’t be used with the daemon" + +expr="$TEST_ROOT/expr.nix" +cat > "$expr" < \$out + ''; +} +EOF + +external_builder="$TEST_ROOT/external-builder.sh" +cat > "$external_builder" <> \$out +EOF +chmod +x "$external_builder" + +nix build -L --file "$expr" --out-link "$TEST_ROOT/result" \ + --extra-experimental-features external-builders \ + --external-builders "[{\"systems\": [\"x68_46-xunil\"], \"args\": [\"bla\"], \"program\": \"$external_builder\"}]" + +[[ $(cat "$TEST_ROOT/result") = foobar ]] diff --git a/tests/functional/meson.build b/tests/functional/meson.build index 368f60452..6f649c836 100644 --- a/tests/functional/meson.build +++ b/tests/functional/meson.build @@ -174,6 +174,7 @@ suites = [ 'extra-sandbox-profile.sh', 'help.sh', 'symlinks.sh', + 'external-builders.sh', ], 'workdir' : meson.current_source_dir(), }, From 7ec1427fc33e2287dd4c1d3f750f9a2ba416a6dc Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 3 Oct 2025 12:03:25 -0700 Subject: [PATCH 105/373] libstore: fixup fakeSSH check This broke invocations like: NIX_SSHOPTS='-p2222 -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no' nix copy /nix/store/......-foo --to ssh-ng://root@localhost In Nix 2.30.2, fakeSSH was enabled when the "thing I want to connect to" was plain old "localhost". Previously, this check was written as: , fakeSSH(host == "localhost") Given the above invocation, `host` would have been `root@localhost`, and thus `fakeSSH` would be `false` because `root@localhost` != `localhost`. However, since 49ba06175ebc632a4c043e944ac6d9faf6a3ef2a, `authority.host` returned _just_ the host (`localhost`, no user) and erroneously enabled `fakeSSH` in this case, causing `NIX_SSHOPTS` to be ignored (since, when `fakeSSH` is `true`, `SSHMaster::startCommand` doesn't call `addCommonSSHOpts`). `authority.to_string()` accurately returns the expected `root@localhost` format (given the above invocation), fixing this. --- src/libstore/ssh.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 0f1dba1e9..1a9908366 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -78,7 +78,7 @@ SSHMaster::SSHMaster( oss << authority.host; return std::move(oss).str(); }()) - , fakeSSH(authority.host == "localhost") + , fakeSSH(authority.to_string() == "localhost") , keyFile(keyFile) , sshPublicHostKey(parsePublicHostKey(authority.host, sshPublicHostKey)) , useMaster(useMaster && !fakeSSH) From 76a92985d7c8495ec45aa426c9f85c1cc36ddd6d Mon Sep 17 00:00:00 2001 From: Taeer Bar-Yam Date: Mon, 29 Sep 2025 13:13:15 -0400 Subject: [PATCH 106/373] libexpr: allocate ExprSelect's AttrName vector in Expr::alloc --- src/libexpr/eval.cc | 14 +++++----- src/libexpr/include/nix/expr/nixexpr.hh | 34 ++++++++++++++++++++----- src/libexpr/nixexpr.cc | 6 ++--- src/libexpr/parser.y | 8 +++--- 4 files changed, 42 insertions(+), 20 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 20ebe026a..8cb647c5f 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -1341,7 +1341,7 @@ void ExprVar::eval(EvalState & state, Env & env, Value & v) v = *v2; } -static std::string showAttrPath(EvalState & state, Env & env, const AttrPath & attrPath) +static std::string showAttrPath(EvalState & state, Env & env, std::span attrPath) { std::ostringstream out; bool first = true; @@ -1377,10 +1377,10 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) env, getPos(), "while evaluating the attribute '%1%'", - showAttrPath(state, env, attrPath)) + showAttrPath(state, env, getAttrPath())) : nullptr; - for (auto & i : attrPath) { + for (auto & i : getAttrPath()) { state.nrLookups++; const Attr * j; auto name = getName(i, state, env); @@ -1418,7 +1418,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) auto origin = std::get_if(&pos2r.origin); if (!(origin && *origin == state.derivationInternal)) state.addErrorTrace( - e, pos2, "while evaluating the attribute '%1%'", showAttrPath(state, env, attrPath)); + e, pos2, "while evaluating the attribute '%1%'", showAttrPath(state, env, getAttrPath())); } throw; } @@ -1429,13 +1429,13 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) Symbol ExprSelect::evalExceptFinalSelect(EvalState & state, Env & env, Value & attrs) { Value vTmp; - Symbol name = getName(attrPath[attrPath.size() - 1], state, env); + Symbol name = getName(attrPathStart[nAttrPath - 1], state, env); - if (attrPath.size() == 1) { + if (nAttrPath == 1) { e->eval(state, env, vTmp); } else { ExprSelect init(*this); - init.attrPath.pop_back(); + init.nAttrPath--; init.eval(state, env, vTmp); } attrs = vTmp; diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index 2af6039cd..512999020 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -2,8 +2,10 @@ ///@file #include +#include #include #include +#include #include "nix/expr/gc-small-vector.hh" #include "nix/expr/value.hh" @@ -79,9 +81,11 @@ struct AttrName : expr(e) {}; }; +static_assert(std::is_trivially_copy_constructible_v); + typedef std::vector AttrPath; -std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath); +std::string showAttrPath(const SymbolTable & symbols, std::span attrPath); using UpdateQueue = SmallTemporaryValueVector; @@ -288,20 +292,33 @@ struct ExprInheritFrom : ExprVar struct ExprSelect : Expr { PosIdx pos; + uint32_t nAttrPath; Expr *e, *def; - AttrPath attrPath; - ExprSelect(const PosIdx & pos, Expr * e, AttrPath attrPath, Expr * def) + AttrName * attrPathStart; + + ExprSelect( + std::pmr::polymorphic_allocator & alloc, + const PosIdx & pos, + Expr * e, + std::span attrPath, + Expr * def) : pos(pos) + , nAttrPath(attrPath.size()) , e(e) , def(def) - , attrPath(std::move(attrPath)) {}; + , attrPathStart(alloc.allocate_object(nAttrPath)) + { + std::ranges::copy(attrPath, attrPathStart); + }; - ExprSelect(const PosIdx & pos, Expr * e, Symbol name) + ExprSelect(std::pmr::polymorphic_allocator & alloc, const PosIdx & pos, Expr * e, Symbol name) : pos(pos) + , nAttrPath(1) , e(e) , def(0) + , attrPathStart((alloc.allocate_object())) { - attrPath.push_back(AttrName(name)); + *attrPathStart = AttrName(name); }; PosIdx getPos() const override @@ -309,6 +326,11 @@ struct ExprSelect : Expr return pos; } + std::span getAttrPath() const + { + return {attrPathStart, nAttrPath}; + } + /** * Evaluate the `a.b.c` part of `a.b.c.d`. This exists mostly for the purpose of :doc in the repl. * diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index 014b85f20..5b9d17d49 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -57,7 +57,7 @@ void ExprSelect::show(const SymbolTable & symbols, std::ostream & str) const { str << "("; e->show(symbols, str); - str << ")." << showAttrPath(symbols, attrPath); + str << ")." << showAttrPath(symbols, getAttrPath()); if (def) { str << " or ("; def->show(symbols, str); @@ -261,7 +261,7 @@ void ExprPos::show(const SymbolTable & symbols, std::ostream & str) const str << "__curPos"; } -std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath) +std::string showAttrPath(const SymbolTable & symbols, std::span attrPath) { std::ostringstream out; bool first = true; @@ -362,7 +362,7 @@ void ExprSelect::bindVars(EvalState & es, const std::shared_ptr e->bindVars(es, env); if (def) def->bindVars(es, env); - for (auto & i : attrPath) + for (auto & i : getAttrPath()) if (!i.symbol) i.expr->bindVars(es, env); } diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index bc1eb056e..56e65acfb 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -282,9 +282,9 @@ expr_app expr_select : expr_simple '.' attrpath - { $$ = new ExprSelect(CUR_POS, $1, std::move(*$3), nullptr); delete $3; } + { $$ = new ExprSelect(state->alloc, CUR_POS, $1, std::move(*$3), nullptr); delete $3; } | expr_simple '.' attrpath OR_KW expr_select - { $$ = new ExprSelect(CUR_POS, $1, std::move(*$3), $5); delete $3; $5->warnIfCursedOr(state->symbols, state->positions); } + { $$ = new ExprSelect(state->alloc, CUR_POS, $1, std::move(*$3), $5); delete $3; $5->warnIfCursedOr(state->symbols, state->positions); } | /* Backwards compatibility: because Nixpkgs has a function named ‘or’, allow stuff like ‘map or [...]’. This production is problematic (see https://github.com/NixOS/nix/issues/11118) and will be refactored in the @@ -343,7 +343,7 @@ expr_simple /* Let expressions `let {..., body = ...}' are just desugared into `(rec {..., body = ...}).body'. */ | LET '{' binds '}' - { $3->recursive = true; $3->pos = CUR_POS; $$ = new ExprSelect(noPos, $3, state->s.body); } + { $3->recursive = true; $3->pos = CUR_POS; $$ = new ExprSelect(state->alloc, noPos, $3, state->s.body); } | REC '{' binds '}' { $3->recursive = true; $3->pos = CUR_POS; $$ = $3; } | '{' binds1 '}' @@ -447,7 +447,7 @@ binds1 $accum->attrs.emplace( i.symbol, ExprAttrs::AttrDef( - new ExprSelect(iPos, from, i.symbol), + new ExprSelect(state->alloc, iPos, from, i.symbol), iPos, ExprAttrs::AttrDef::Kind::InheritedFrom)); } From 39109c05be66c7dde854be3021c24183c92bf6bb Mon Sep 17 00:00:00 2001 From: Taeer Bar-Yam Date: Fri, 3 Oct 2025 12:49:55 -0400 Subject: [PATCH 107/373] libexpr: allocate ExprOpHasAttr's AttrPath in Exprs::alloc --- src/libexpr/include/nix/expr/nixexpr.hh | 10 +++++++--- src/libexpr/parser.y | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index 512999020..b66dba4f3 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -348,10 +348,14 @@ struct ExprSelect : Expr struct ExprOpHasAttr : Expr { Expr * e; - AttrPath attrPath; - ExprOpHasAttr(Expr * e, AttrPath attrPath) + std::span attrPath; + + ExprOpHasAttr(std::pmr::polymorphic_allocator alloc, Expr * e, std::vector attrPath) : e(e) - , attrPath(std::move(attrPath)) {}; + , attrPath({alloc.allocate_object(attrPath.size()), attrPath.size()}) + { + std::ranges::copy(attrPath, this->attrPath.begin()); + }; PosIdx getPos() const override { diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 56e65acfb..9186fcf4b 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -261,7 +261,7 @@ expr_op | expr_op OR expr_op { $$ = new ExprOpOr(state->at(@2), $1, $3); } | expr_op IMPL expr_op { $$ = new ExprOpImpl(state->at(@2), $1, $3); } | expr_op UPDATE expr_op { $$ = new ExprOpUpdate(state->at(@2), $1, $3); } - | expr_op '?' attrpath { $$ = new ExprOpHasAttr($1, std::move(*$3)); delete $3; } + | expr_op '?' attrpath { $$ = new ExprOpHasAttr(state->alloc, $1, std::move(*$3)); delete $3; } | expr_op '+' expr_op { $$ = new ExprConcatStrings(state->at(@2), false, new std::vector >({{state->at(@1), $1}, {state->at(@3), $3}})); } | expr_op '-' expr_op { $$ = new ExprCall(state->at(@2), new ExprVar(state->s.sub), {$1, $3}); } From dce1a893d0206083cbab19b9211ddb01eaa53f70 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 5 Oct 2025 02:30:21 +0300 Subject: [PATCH 108/373] treewide: Remove toView() because it leads to segfaults when compiled with newer nixpkgs Firstly, this is now available on darwin where the default in llvm 19. Secondly, this leads to very weird segfaults when building with newer nixpkgs for some reason. (It's UB after all). This appears when building with the following: mesonComponentOverrides = finalAttrs: prevAttrs: { mesonBuildType = "debugoptimized"; dontStrip = true; doCheck = false; separateDebugInfo = false; preConfigure = (prevAttrs.preConfigure or "") + '' case "$mesonBuildType" in release|minsize|debugoptimized) appendToVar mesonFlags "-Db_lto=true" ;; *) appendToVar mesonFlags "-Db_lto=false" ;; esac ''; }; And with the following nixpkgs input: nix build ".#nix-cli" -L --override-input nixpkgs "https://releases.nixos.org/nixos/unstable/nixos-25.11pre870157.7df7ff7d8e00/nixexprs.tar.xz" Stacktrace: #0 0x00000000006afdc0 in ?? () #1 0x00007ffff71cebb6 in _Unwind_ForcedUnwind_Phase2 () from /nix/store/41ym1jm1b7j3rhglk82gwg9jml26z1km-gcc-14.3.0-lib/lib/libgcc_s.so.1 #2 0x00007ffff71cf5b5 in _Unwind_Resume () from /nix/store/41ym1jm1b7j3rhglk82gwg9jml26z1km-gcc-14.3.0-lib/lib/libgcc_s.so.1 #3 0x00007ffff7eac7d8 in std::basic_ios >::~basic_ios (this=, this=) at /nix/store/82kmz7r96navanrc2fgckh2bamiqrgsw-gcc-14.3.0/include/c++/14.3.0/bits/basic_ios.h:286 #4 std::__cxx11::basic_ostringstream, std::allocator >::basic_ostringstream (this=, this=) at /nix/store/82kmz7r96navanrc2fgckh2bamiqrgsw-gcc-14.3.0/include/c++/14.3.0/sstream:806 #5 nix::SimpleLogger::logEI (this=, ei=...) at ../logging.cc:121 #6 0x00007ffff7515794 in nix::Logger::logEI (this=0x675450, lvl=nix::lvlError, ei=...) at /nix/store/bkshji3nnxmrmgwa4n2kaxadajkwvn65-nix-util-2.32.0pre-dev/include/nix/util/logging.hh:144 #7 nix::handleExceptions (programName=..., fun=...) at ../shared.cc:336 #8 0x000000000047b76b in main (argc=, argv=) at /nix/store/82kmz7r96navanrc2fgckh2bamiqrgsw-gcc-14.3.0/include/c++/14.3.0/bits/new_allocator.h:88 --- src/libcmd/repl.cc | 2 +- src/libexpr/eval.cc | 4 ++-- src/libexpr/primops.cc | 4 ++-- src/libexpr/primops/fromTOML.cc | 2 +- src/libexpr/print.cc | 2 +- src/libmain/progress-bar.cc | 2 +- src/libstore/daemon.cc | 2 +- src/libutil/include/nix/util/strings.hh | 5 ----- src/libutil/logging.cc | 2 +- src/libutil/strings.cc | 17 ----------------- src/nix/config-check.cc | 6 +++--- src/nix/nix-build/nix-build.cc | 4 ++-- src/nix/nix-env/user-env.cc | 2 +- 13 files changed, 16 insertions(+), 38 deletions(-) diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 38d06336b..a308b731d 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -669,7 +669,7 @@ ProcessLineResult NixRepl::processLine(std::string line) ss << "No documentation found.\n\n"; } - auto markdown = toView(ss); + auto markdown = ss.view(); logger->cout(trim(renderMarkdownToTerminal(markdown))); } else diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 8cb647c5f..db17f103b 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -591,7 +591,7 @@ std::optional EvalState::getDoc(Value & v) .name = name, .arity = 0, // FIXME: figure out how deep by syntax only? It's not semantically useful though... .args = {}, - .doc = makeImmutableString(toView(s)), // NOTE: memory leak when compiled without GC + .doc = makeImmutableString(s.view()), // NOTE: memory leak when compiled without GC }; } if (isFunctor(v)) { @@ -1811,7 +1811,7 @@ void ExprAssert::eval(EvalState & state, Env & env, Value & v) if (!state.evalBool(env, cond, pos, "in the condition of the assert statement")) { std::ostringstream out; cond->show(state.symbols, out); - auto exprStr = toView(out); + auto exprStr = out.view(); if (auto eq = dynamic_cast(cond)) { try { diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index a8ac8d159..86cb00131 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -2412,7 +2412,7 @@ static void prim_toXML(EvalState & state, const PosIdx pos, Value ** args, Value std::ostringstream out; NixStringContext context; printValueAsXML(state, true, false, *args[0], out, context, pos); - v.mkString(toView(out), context); + v.mkString(out.view(), context); } static RegisterPrimOp primop_toXML({ @@ -2520,7 +2520,7 @@ static void prim_toJSON(EvalState & state, const PosIdx pos, Value ** args, Valu std::ostringstream out; NixStringContext context; printValueAsJSON(state, true, *args[0], pos, out, context); - v.mkString(toView(out), context); + v.mkString(out.view(), context); } static RegisterPrimOp primop_toJSON({ diff --git a/src/libexpr/primops/fromTOML.cc b/src/libexpr/primops/fromTOML.cc index 3ab594905..d2f91a75b 100644 --- a/src/libexpr/primops/fromTOML.cc +++ b/src/libexpr/primops/fromTOML.cc @@ -139,7 +139,7 @@ static void prim_fromTOML(EvalState & state, const PosIdx pos, Value ** args, Va attrs.alloc("_type").mkStringNoCopy("timestamp"); std::ostringstream s; s << t; - auto str = toView(s); + auto str = s.view(); forceNoNullByte(str); attrs.alloc("value").mkString(str); v.mkAttrs(attrs); diff --git a/src/libexpr/print.cc b/src/libexpr/print.cc index 071addc1a..4776be033 100644 --- a/src/libexpr/print.cc +++ b/src/libexpr/print.cc @@ -461,7 +461,7 @@ private: std::ostringstream s; s << state.positions[v.lambda().fun->pos]; - output << " @ " << filterANSIEscapes(toView(s)); + output << " @ " << filterANSIEscapes(s.view()); } } else if (v.isPrimOp()) { if (v.primOp()) diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index c00f5d86b..edec8460d 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -183,7 +183,7 @@ public: std::ostringstream oss; showErrorInfo(oss, ei, loggerSettings.showTrace.get()); - log(*state, ei.level, toView(oss)); + log(*state, ei.level, oss.view()); } void log(State & state, Verbosity lvl, std::string_view s) diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 2898f113f..00c0a1fdd 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -102,7 +102,7 @@ struct TunnelLogger : public Logger showErrorInfo(oss, ei, false); StringSink buf; - buf << STDERR_NEXT << toView(oss); + buf << STDERR_NEXT << oss.view(); enqueueMsg(buf.s); } diff --git a/src/libutil/include/nix/util/strings.hh b/src/libutil/include/nix/util/strings.hh index b4ef66bfe..ba37ce79f 100644 --- a/src/libutil/include/nix/util/strings.hh +++ b/src/libutil/include/nix/util/strings.hh @@ -12,11 +12,6 @@ namespace nix { -/* - * workaround for unavailable view() method (C++20) of std::ostringstream under MacOS with clang-16 - */ -std::string_view toView(const std::ostringstream & os); - /** * String tokenizer. * diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 997110617..e2f28f553 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -121,7 +121,7 @@ public: std::ostringstream oss; showErrorInfo(oss, ei, loggerSettings.showTrace.get()); - log(ei.level, toView(oss)); + log(ei.level, oss.view()); } void startActivity( diff --git a/src/libutil/strings.cc b/src/libutil/strings.cc index a95390089..a87567cef 100644 --- a/src/libutil/strings.cc +++ b/src/libutil/strings.cc @@ -8,23 +8,6 @@ namespace nix { -struct view_stringbuf : public std::stringbuf -{ - inline std::string_view toView() - { - auto begin = pbase(); - return {begin, begin + pubseekoff(0, std::ios_base::cur, std::ios_base::out)}; - } -}; - -__attribute__((no_sanitize("undefined"))) std::string_view toView(const std::ostringstream & os) -{ - /* Downcasting like this is very much undefined behavior, so we disable - UBSAN for this function. */ - auto buf = static_cast(os.rdbuf()); - return buf->toView(); -} - template std::list tokenizeString(std::string_view s, std::string_view separators); template StringSet tokenizeString(std::string_view s, std::string_view separators); template std::vector tokenizeString(std::string_view s, std::string_view separators); diff --git a/src/nix/config-check.cc b/src/nix/config-check.cc index c04943eab..e1efb40eb 100644 --- a/src/nix/config-check.cc +++ b/src/nix/config-check.cc @@ -100,7 +100,7 @@ struct CmdConfigCheck : StoreCommand ss << "Multiple versions of nix found in PATH:\n"; for (auto & dir : dirs) ss << " " << dir << "\n"; - return checkFail(toView(ss)); + return checkFail(ss.view()); } return checkPass("PATH contains only one nix version."); @@ -143,7 +143,7 @@ struct CmdConfigCheck : StoreCommand for (auto & dir : dirs) ss << " " << dir << "\n"; ss << "\n"; - return checkFail(toView(ss)); + return checkFail(ss.view()); } return checkPass("All profiles are gcroots."); @@ -162,7 +162,7 @@ struct CmdConfigCheck : StoreCommand << "sync with the daemon.\n\n" << "Client protocol: " << formatProtocol(clientProto) << "\n" << "Store protocol: " << formatProtocol(storeProto) << "\n\n"; - return checkFail(toView(ss)); + return checkFail(ss.view()); } return checkPass("Client protocol matches store protocol."); diff --git a/src/nix/nix-build/nix-build.cc b/src/nix/nix-build/nix-build.cc index d3902f2a6..eef97aa19 100644 --- a/src/nix/nix-build/nix-build.cc +++ b/src/nix/nix-build/nix-build.cc @@ -285,10 +285,10 @@ static void main_nix_build(int argc, char ** argv) execArgs, interpreter, escapeShellArgAlways(script), - toView(joined)); + joined.view()); } else { envCommand = - fmt("exec %1% %2% %3% %4%", execArgs, interpreter, escapeShellArgAlways(script), toView(joined)); + fmt("exec %1% %2% %3% %4%", execArgs, interpreter, escapeShellArgAlways(script), joined.view()); } } diff --git a/src/nix/nix-env/user-env.cc b/src/nix/nix-env/user-env.cc index fbdcb14f8..81e2c4f80 100644 --- a/src/nix/nix-env/user-env.cc +++ b/src/nix/nix-env/user-env.cc @@ -108,7 +108,7 @@ bool createUserEnv( auto manifestFile = ({ std::ostringstream str; printAmbiguous(manifest, state.symbols, str, nullptr, std::numeric_limits::max()); - StringSource source{toView(str)}; + StringSource source{str.view()}; state.store->addToStoreFromDump( source, "env-manifest.nix", From 452ec09fe0d027565defb804c29bde6d62996a95 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 5 Oct 2025 16:55:41 +0300 Subject: [PATCH 109/373] libstore: Fix use-after-move in DerivationGoal::repairClosure --- src/libstore/build/derivation-goal.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 81f4e6654..3c26a6922 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -378,9 +378,10 @@ Goal::Co DerivationGoal::repairClosure() bmRepair)); } + bool haveWaitees = !waitees.empty(); co_await await(std::move(waitees)); - if (!waitees.empty()) { + if (haveWaitees) { trace("closure repaired"); if (nrFailed > 0) throw Error( From be1ade737391a6656b3ffb872fb9ec7b36c89ca0 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 5 Oct 2025 16:57:13 +0300 Subject: [PATCH 110/373] libexpr: Use use-after-move in SampleStack::saveProfile() --- src/libexpr/eval-profiler.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/eval-profiler.cc b/src/libexpr/eval-profiler.cc index ba92faf18..e9dc1e021 100644 --- a/src/libexpr/eval-profiler.cc +++ b/src/libexpr/eval-profiler.cc @@ -324,7 +324,7 @@ void SampleStack::saveProfile() std::visit([&](auto && info) { info.symbolize(state, os, posCache); }, pos); } os << " " << count; - writeLine(profileFd.get(), std::move(os).str()); + writeLine(profileFd.get(), os.str()); /* Clear ostringstream. */ os.str(""); os.clear(); From 06a82da6f54bda38355171d061485a1119f36300 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 5 Oct 2025 11:18:30 -0700 Subject: [PATCH 111/373] clang-tidy fix for src/libstore/build/derivation-check.cc --- src/libstore/build/derivation-check.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/libstore/build/derivation-check.cc b/src/libstore/build/derivation-check.cc index db3ec7c3d..181221ba5 100644 --- a/src/libstore/build/derivation-check.cc +++ b/src/libstore/build/derivation-check.cc @@ -18,7 +18,11 @@ void checkOutputs( for (auto & output : outputs) outputsByPath.emplace(store.printStorePath(output.second.path), output.second); - for (auto & [outputName, info] : outputs) { + for (auto & pair : outputs) { + // We can't use auto destructuring here because + // clang-tidy seems to complain about it. + const std::string & outputName = pair.first; + const auto & info = pair.second; auto * outputSpec = get(drvOutputs, outputName); assert(outputSpec); From 7e39ab4dc73dff2cc451e503fc300784f8c67224 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 5 Oct 2025 21:54:32 +0300 Subject: [PATCH 112/373] Revert "Merge pull request #14097 from obsidiansystems/light-realisation-improvements" This reverts commit dc8c1461daa7e8db2a78f14ba0edd25e9df93e60, reversing changes made to 28adcfda3200c7f1f281f80686a1ab40311e0e5d. --- src/libcmd/built-path.cc | 5 +- src/libstore-tests/common-protocol.cc | 44 +++--- src/libstore-tests/dummy-store.cc | 15 +- src/libstore-tests/realisation.cc | 20 +-- src/libstore-tests/serve-protocol.cc | 74 +++++----- src/libstore-tests/worker-protocol.cc | 132 +++++++++--------- src/libstore/binary-cache-store.cc | 19 +-- .../build/derivation-building-goal.cc | 13 +- src/libstore/build/derivation-goal.cc | 59 ++------ .../build/drv-output-substitution-goal.cc | 10 +- src/libstore/daemon.cc | 4 +- src/libstore/dummy-store.cc | 21 +-- .../include/nix/store/binary-cache-store.hh | 17 +-- .../nix/store/build/derivation-goal.hh | 6 +- .../build/drv-output-substitution-goal.hh | 3 +- .../include/nix/store/dummy-store-impl.hh | 12 -- src/libstore/include/nix/store/dummy-store.hh | 2 - .../include/nix/store/legacy-ssh-store.hh | 4 +- .../include/nix/store/local-overlay-store.hh | 2 +- src/libstore/include/nix/store/local-store.hh | 6 +- src/libstore/include/nix/store/realisation.hh | 50 +++---- .../include/nix/store/remote-store.hh | 2 +- src/libstore/include/nix/store/store-api.hh | 9 +- src/libstore/local-overlay-store.cc | 8 +- src/libstore/local-store.cc | 17 +-- src/libstore/misc.cc | 5 +- src/libstore/realisation.cc | 50 +++---- src/libstore/remote-store.cc | 18 ++- src/libstore/restricted-store.cc | 4 +- src/libstore/store-api.cc | 20 ++- src/libstore/unix/build/derivation-builder.cc | 7 +- src/libutil/include/nix/util/hash.hh | 19 --- 32 files changed, 254 insertions(+), 423 deletions(-) diff --git a/src/libcmd/built-path.cc b/src/libcmd/built-path.cc index fc7f18493..4d76dd6da 100644 --- a/src/libcmd/built-path.cc +++ b/src/libcmd/built-path.cc @@ -117,11 +117,10 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const "the derivation '%s' has unrealised output '%s' (derived-path.cc/toRealisedPaths)", store.printStorePath(p.drvPath->outPath()), outputName); - DrvOutput key{*drvOutput, outputName}; - auto thisRealisation = store.queryRealisation(key); + auto thisRealisation = store.queryRealisation(DrvOutput{*drvOutput, outputName}); assert(thisRealisation); // We’ve built it, so we must // have the realisation - res.insert(Realisation{*thisRealisation, std::move(key)}); + res.insert(*thisRealisation); } else { res.insert(outputPath); } diff --git a/src/libstore-tests/common-protocol.cc b/src/libstore-tests/common-protocol.cc index 2c001957b..35fca165d 100644 --- a/src/libstore-tests/common-protocol.cc +++ b/src/libstore-tests/common-protocol.cc @@ -112,34 +112,32 @@ CHARACTERIZATION_TEST( "realisation", (std::tuple{ Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - }, - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + .id = + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, }, Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = + .id = + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = + { { - { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", - }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + }, }, })) diff --git a/src/libstore-tests/dummy-store.cc b/src/libstore-tests/dummy-store.cc index 3dd8137a3..b841d7890 100644 --- a/src/libstore-tests/dummy-store.cc +++ b/src/libstore-tests/dummy-store.cc @@ -1,6 +1,6 @@ #include -#include "nix/store/dummy-store-impl.hh" +#include "nix/store/dummy-store.hh" #include "nix/store/globals.hh" #include "nix/store/realisation.hh" @@ -13,7 +13,7 @@ TEST(DummyStore, realisation_read) auto store = [] { auto cfg = make_ref(StoreReference::Params{}); cfg->readOnly = false; - return cfg->openDummyStore(); + return cfg->openStore(); }(); auto drvHash = Hash::parseExplicitFormatUnprefixed( @@ -22,17 +22,6 @@ TEST(DummyStore, realisation_read) auto outputName = "foo"; EXPECT_EQ(store->queryRealisation({drvHash, outputName}), nullptr); - - UnkeyedRealisation value{ - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, - }; - - store->buildTrace.insert({drvHash, {{outputName, make_ref(value)}}}); - - auto value2 = store->queryRealisation({drvHash, outputName}); - - ASSERT_TRUE(value2); - EXPECT_EQ(*value2, value); } } // namespace nix diff --git a/src/libstore-tests/realisation.cc b/src/libstore-tests/realisation.cc index d16049bc5..a5a5bee50 100644 --- a/src/libstore-tests/realisation.cc +++ b/src/libstore-tests/realisation.cc @@ -49,16 +49,16 @@ INSTANTIATE_TEST_SUITE_P( RealisationJsonTest, ([] { Realisation simple{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, - }, - { - .drvHash = Hash::parseExplicitFormatUnprefixed( - "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", - HashAlgorithm::SHA256, - HashFormat::Base16), - .outputName = "foo", - }, + + .id = + { + .drvHash = Hash::parseExplicitFormatUnprefixed( + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + HashAlgorithm::SHA256, + HashFormat::Base16), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, }; return ::testing::Values( std::pair{ diff --git a/src/libstore-tests/serve-protocol.cc b/src/libstore-tests/serve-protocol.cc index 10aa21e9d..a63201164 100644 --- a/src/libstore-tests/serve-protocol.cc +++ b/src/libstore-tests/serve-protocol.cc @@ -95,34 +95,32 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - }, - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + .id = + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, }, Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = + .id = + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = + { { - { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", - }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + }, }, })) @@ -198,27 +196,25 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, }, { "bar", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, }, }, }, diff --git a/src/libstore-tests/worker-protocol.cc b/src/libstore-tests/worker-protocol.cc index c4afde3bd..489151c8c 100644 --- a/src/libstore-tests/worker-protocol.cc +++ b/src/libstore-tests/worker-protocol.cc @@ -148,34 +148,32 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - }, - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + .id = + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, }, Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = + .id = + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = + { { - { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", - }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + }, }, })) @@ -216,25 +214,25 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, }, { "bar", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, }, }, }, @@ -269,27 +267,25 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, }, { "bar", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, }, }, }, @@ -328,27 +324,25 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, }, { "bar", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, }, }, }, diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 3705f3d4d..badfb4b14 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -502,15 +502,10 @@ StorePath BinaryCacheStore::addToStore( ->path; } -std::string BinaryCacheStore::makeRealisationPath(const DrvOutput & id) -{ - return realisationsPrefix + "/" + id.to_string() + ".doi"; -} - void BinaryCacheStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { - auto outputInfoFilePath = makeRealisationPath(id); + auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi"; auto callbackPtr = std::make_shared(std::move(callback)); @@ -520,12 +515,11 @@ void BinaryCacheStore::queryRealisationUncached( if (!data) return (*callbackPtr)({}); - std::shared_ptr realisation; + std::shared_ptr realisation; try { - realisation = std::make_shared(nlohmann::json::parse(*data)); + realisation = std::make_shared(nlohmann::json::parse(*data)); } catch (Error & e) { - e.addTrace( - {}, "while parsing file '%s' as a realisation for key '%s'", outputInfoFilePath, id.to_string()); + e.addTrace({}, "while parsing file '%s' as a realisation", outputInfoFilePath); throw; } return (*callbackPtr)(std::move(realisation)); @@ -541,7 +535,8 @@ void BinaryCacheStore::registerDrvOutput(const Realisation & info) { if (diskCache) diskCache->upsertRealisation(config.getReference().render(/*FIXME withParams=*/false), info); - upsertFile(makeRealisationPath(info.id), static_cast(info).dump(), "application/json"); + auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi"; + upsertFile(filePath, static_cast(info).dump(), "application/json"); } ref BinaryCacheStore::getRemoteFSAccessor(bool requireValidPath) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index c39fd8c1c..fa819c96b 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1092,22 +1092,13 @@ DerivationBuildingGoal::checkPathValidity(std::map & // without the `ca-derivations` experimental flag). worker.store.registerDrvOutput( Realisation{ - { - .outPath = info.known->path, - }, drvOutput, + info.known->path, }); } } if (info.known && info.known->isValid()) - validOutputs.emplace( - i.first, - Realisation{ - { - .outPath = info.known->path, - }, - drvOutput, - }); + validOutputs.emplace(i.first, Realisation{drvOutput, info.known->path}); } bool allValid = true; diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 81f4e6654..cc3ba2b7b 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -190,17 +190,13 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) auto realisation = [&] { auto take1 = get(success.builtOutputs, wantedOutput); if (take1) - return static_cast(*take1); + return *take1; /* The above `get` should work. But stateful tracking of outputs in resolvedResult, this can get out of sync with the store, which is our actual source of truth. For now we just check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation( - DrvOutput{ - .drvHash = *resolvedHash, - .outputName = wantedOutput, - }); + auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, wantedOutput}); if (take2) return *take2; @@ -211,12 +207,8 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) }(); if (!drv->type().isImpure()) { - Realisation newRealisation{ - realisation, - { - .drvHash = *outputHash, - .outputName = wantedOutput, - }}; + auto newRealisation = realisation; + newRealisation.id = DrvOutput{*outputHash, wantedOutput}; newRealisation.signatures.clear(); if (!drv->type().isFixed()) { auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; @@ -266,16 +258,7 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) /* In checking mode, the builder will not register any outputs. So we want to make sure the ones that we wanted to check are properly there. */ - success.builtOutputs = {{ - wantedOutput, - { - assertPathValidity(), - { - .drvHash = outputHash, - .outputName = wantedOutput, - }, - }, - }}; + success.builtOutputs = {{wantedOutput, assertPathValidity()}}; } else { /* Otherwise the builder will give us info for out output, but also for other outputs. Filter down to just our output so as @@ -390,20 +373,18 @@ Goal::Co DerivationGoal::repairClosure() co_return doneSuccess(BuildResult::Success::AlreadyValid, assertPathValidity()); } -std::optional> DerivationGoal::checkPathValidity() +std::optional> DerivationGoal::checkPathValidity() { if (drv->type().isImpure()) return std::nullopt; auto drvOutput = DrvOutput{outputHash, wantedOutput}; - std::optional mRealisation; + std::optional mRealisation; if (auto * mOutput = get(drv->outputs, wantedOutput)) { if (auto mPath = mOutput->path(worker.store, drv->name, wantedOutput)) { - mRealisation = UnkeyedRealisation{ - .outPath = std::move(*mPath), - }; + mRealisation = Realisation{drvOutput, std::move(*mPath)}; } } else { throw Error( @@ -431,14 +412,7 @@ std::optional> DerivationGoal::checkPa // derivation, and the output path is valid, but we don't have // its realisation stored (probably because it has been built // without the `ca-derivations` experimental flag). - worker.store.registerDrvOutput( - Realisation{ - *mRealisation, - { - .drvHash = outputHash, - .outputName = wantedOutput, - }, - }); + worker.store.registerDrvOutput(*mRealisation); } return {{*mRealisation, status}}; @@ -446,7 +420,7 @@ std::optional> DerivationGoal::checkPa return std::nullopt; } -UnkeyedRealisation DerivationGoal::assertPathValidity() +Realisation DerivationGoal::assertPathValidity() { auto checkResult = checkPathValidity(); if (!(checkResult && checkResult->second == PathStatus::Valid)) @@ -454,20 +428,11 @@ UnkeyedRealisation DerivationGoal::assertPathValidity() return checkResult->first; } -Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput) +Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Realisation builtOutput) { buildResult.inner = BuildResult::Success{ .status = status, - .builtOutputs = {{ - wantedOutput, - { - std::move(builtOutput), - DrvOutput{ - .drvHash = outputHash, - .outputName = wantedOutput, - }, - }, - }}, + .builtOutputs = {{wantedOutput, std::move(builtOutput)}}, }; mcExpectedBuilds.reset(); diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc index a969b905b..b6ace4784 100644 --- a/src/libstore/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -43,10 +43,10 @@ Goal::Co DrvOutputSubstitutionGoal::init() outPipe->createAsyncPipe(worker.ioport.get()); #endif - auto promise = std::make_shared>>(); + auto promise = std::make_shared>>(); sub->queryRealisation( - id, {[outPipe(outPipe), promise(promise)](std::future> res) { + id, {[outPipe(outPipe), promise(promise)](std::future> res) { try { Finally updateStats([&]() { outPipe->writeSide.close(); }); promise->set_value(res.get()); @@ -75,7 +75,7 @@ Goal::Co DrvOutputSubstitutionGoal::init() * The realisation corresponding to the given output id. * Will be filled once we can get it. */ - std::shared_ptr outputInfo; + std::shared_ptr outputInfo; try { outputInfo = promise->get_future().get(); @@ -132,7 +132,7 @@ Goal::Co DrvOutputSubstitutionGoal::init() } Goal::Co DrvOutputSubstitutionGoal::realisationFetched( - Goals waitees, std::shared_ptr outputInfo, nix::ref sub) + Goals waitees, std::shared_ptr outputInfo, nix::ref sub) { waitees.insert(worker.makePathSubstitutionGoal(outputInfo->outPath)); @@ -145,7 +145,7 @@ Goal::Co DrvOutputSubstitutionGoal::realisationFetched( co_return amDone(nrNoSubstituters > 0 ? ecNoSubstituters : ecFailed); } - worker.store.registerDrvOutput({*outputInfo, id}); + worker.store.registerDrvOutput(*outputInfo); trace("finished"); co_return amDone(ecSuccess); diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 00c0a1fdd..1fc568e87 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -964,7 +964,7 @@ static void performOp( if (GET_PROTOCOL_MINOR(conn.protoVersion) < 31) { auto outputId = DrvOutput::parse(readString(conn.from)); auto outputPath = StorePath(readString(conn.from)); - store->registerDrvOutput(Realisation{{.outPath = outputPath}, outputId}); + store->registerDrvOutput(Realisation{.id = outputId, .outPath = outputPath}); } else { auto realisation = WorkerProto::Serialise::read(*store, rconn); store->registerDrvOutput(realisation); @@ -986,7 +986,7 @@ static void performOp( } else { std::set realisations; if (info) - realisations.insert({*info, outputId}); + realisations.insert(*info); WorkerProto::write(*store, wconn, realisations); } break; diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 509b7a0b1..1eb51fe3e 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -3,7 +3,6 @@ #include "nix/util/callback.hh" #include "nix/util/memory-source-accessor.hh" #include "nix/store/dummy-store-impl.hh" -#include "nix/store/realisation.hh" #include @@ -252,10 +251,7 @@ struct DummyStoreImpl : DummyStore void registerDrvOutput(const Realisation & output) override { - auto ref = make_ref(output); - buildTrace.insert_or_visit({output.id.drvHash, {{output.id.outputName, ref}}}, [&](auto & kv) { - kv.second.insert_or_assign(output.id.outputName, make_ref(output)); - }); + unsupported("registerDrvOutput"); } void narFromPath(const StorePath & path, Sink & sink) override @@ -270,19 +266,10 @@ struct DummyStoreImpl : DummyStore throw Error("path '%s' is not valid", printStorePath(path)); } - void queryRealisationUncached( - const DrvOutput & drvOutput, Callback> callback) noexcept override + void + queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override { - bool visited = false; - buildTrace.cvisit(drvOutput.drvHash, [&](const auto & kv) { - if (auto it = kv.second.find(drvOutput.outputName); it != kv.second.end()) { - visited = true; - callback(it->second.get_ptr()); - } - }); - - if (!visited) - callback(nullptr); + callback(nullptr); } std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index 3f4de2bd4..c316b1199 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -80,22 +80,13 @@ private: protected: - /** - * The prefix under which realisation infos will be stored - */ - constexpr const static std::string realisationsPrefix = "realisations"; + // The prefix under which realisation infos will be stored + const std::string realisationsPrefix = "realisations"; - constexpr const static std::string cacheInfoFile = "nix-cache-info"; + const std::string cacheInfoFile = "nix-cache-info"; BinaryCacheStore(Config &); - /** - * Compute the path to the given realisation - * - * It's `${realisationsPrefix}/${drvOutput}.doi`. - */ - std::string makeRealisationPath(const DrvOutput & id); - public: virtual bool fileExists(const std::string & path) = 0; @@ -184,7 +175,7 @@ public: void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void narFromPath(const StorePath & path, Sink & sink) override; diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index c31645fff..353e7c489 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -93,17 +93,17 @@ private: * of the wanted output, and a `PathStatus` with the * current status of that output. */ - std::optional> checkPathValidity(); + std::optional> checkPathValidity(); /** * Aborts if any output is not valid or corrupt, and otherwise * returns a 'Realisation' for the wanted output. */ - UnkeyedRealisation assertPathValidity(); + Realisation assertPathValidity(); Co repairClosure(); - Done doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput); + Done doneSuccess(BuildResult::Success::Status status, Realisation builtOutput); Done doneFailure(BuildError ex); }; diff --git a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh index 1a5a4ea26..b42336427 100644 --- a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh +++ b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh @@ -39,8 +39,7 @@ public: GoalState state; Co init(); - Co - realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); + Co realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); void timedOut(Error && ex) override { diff --git a/src/libstore/include/nix/store/dummy-store-impl.hh b/src/libstore/include/nix/store/dummy-store-impl.hh index 4c9f54e98..e05bb94ff 100644 --- a/src/libstore/include/nix/store/dummy-store-impl.hh +++ b/src/libstore/include/nix/store/dummy-store-impl.hh @@ -30,18 +30,6 @@ struct DummyStore : virtual Store */ boost::concurrent_flat_map contents; - /** - * The build trace maps the pair of a content-addressing (fixed or - * floating) derivations an one of its output to a - * (content-addressed) store object. - * - * It is [curried](https://en.wikipedia.org/wiki/Currying), so we - * instead having a single output with a `DrvOutput` key, we have an - * outer map for the derivation, and inner maps for the outputs of a - * given derivation. - */ - boost::concurrent_flat_map>> buildTrace; - DummyStore(ref config) : Store{*config} , config(config) diff --git a/src/libstore/include/nix/store/dummy-store.hh b/src/libstore/include/nix/store/dummy-store.hh index d371c4e51..95c09078c 100644 --- a/src/libstore/include/nix/store/dummy-store.hh +++ b/src/libstore/include/nix/store/dummy-store.hh @@ -3,8 +3,6 @@ #include "nix/store/store-api.hh" -#include - namespace nix { struct DummyStore; diff --git a/src/libstore/include/nix/store/legacy-ssh-store.hh b/src/libstore/include/nix/store/legacy-ssh-store.hh index 994918f90..c91f88a84 100644 --- a/src/libstore/include/nix/store/legacy-ssh-store.hh +++ b/src/libstore/include/nix/store/legacy-ssh-store.hh @@ -208,8 +208,8 @@ public: */ std::optional isTrustedClient() override; - void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override + void + queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override // TODO: Implement { unsupported("queryRealisation"); diff --git a/src/libstore/include/nix/store/local-overlay-store.hh b/src/libstore/include/nix/store/local-overlay-store.hh index 1d69d3417..b89d0a1a0 100644 --- a/src/libstore/include/nix/store/local-overlay-store.hh +++ b/src/libstore/include/nix/store/local-overlay-store.hh @@ -173,7 +173,7 @@ private: * Check lower store if upper DB does not have. */ void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; /** * Call `remountIfNecessary` after collecting garbage normally. diff --git a/src/libstore/include/nix/store/local-store.hh b/src/libstore/include/nix/store/local-store.hh index ab255fba8..b871aaee2 100644 --- a/src/libstore/include/nix/store/local-store.hh +++ b/src/libstore/include/nix/store/local-store.hh @@ -385,10 +385,10 @@ public: void cacheDrvOutputMapping( State & state, const uint64_t deriver, const std::string & outputName, const StorePath & output); - std::optional queryRealisation_(State & state, const DrvOutput & id); - std::optional> queryRealisationCore_(State & state, const DrvOutput & id); + std::optional queryRealisation_(State & state, const DrvOutput & id); + std::optional> queryRealisationCore_(State & state, const DrvOutput & id); void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; std::optional getVersion() override; diff --git a/src/libstore/include/nix/store/realisation.hh b/src/libstore/include/nix/store/realisation.hh index c7e0a4483..3424a39c9 100644 --- a/src/libstore/include/nix/store/realisation.hh +++ b/src/libstore/include/nix/store/realisation.hh @@ -46,12 +46,12 @@ struct DrvOutput static DrvOutput parse(const std::string &); - bool operator==(const DrvOutput &) const = default; - auto operator<=>(const DrvOutput &) const = default; + GENERATE_CMP(DrvOutput, me->drvHash, me->outputName); }; -struct UnkeyedRealisation +struct Realisation { + DrvOutput id; StorePath outPath; StringSet signatures; @@ -64,35 +64,22 @@ struct UnkeyedRealisation */ std::map dependentRealisations; - std::string fingerprint(const DrvOutput & key) const; + std::string fingerprint() const; + void sign(const Signer &); + bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const; + size_t checkSignatures(const PublicKeys & publicKeys) const; - void sign(const DrvOutput & key, const Signer &); + static std::set closure(Store &, const std::set &); + static void closure(Store &, const std::set &, std::set & res); - bool checkSignature(const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const; + bool isCompatibleWith(const Realisation & other) const; - size_t checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const; - - const StorePath & getPath() const + StorePath getPath() const { return outPath; } - // TODO sketchy that it avoids signatures - GENERATE_CMP(UnkeyedRealisation, me->outPath); -}; - -struct Realisation : UnkeyedRealisation -{ - DrvOutput id; - - bool isCompatibleWith(const UnkeyedRealisation & other) const; - - static std::set closure(Store &, const std::set &); - - static void closure(Store &, const std::set &, std::set & res); - - bool operator==(const Realisation &) const = default; - auto operator<=>(const Realisation &) const = default; + GENERATE_CMP(Realisation, me->id, me->outPath); }; /** @@ -116,13 +103,12 @@ struct OpaquePath { StorePath path; - const StorePath & getPath() const + StorePath getPath() const { return path; } - bool operator==(const OpaquePath &) const = default; - auto operator<=>(const OpaquePath &) const = default; + GENERATE_CMP(OpaquePath, me->path); }; /** @@ -130,7 +116,7 @@ struct OpaquePath */ struct RealisedPath { - /** + /* * A path is either the result of the realisation of a derivation or * an opaque blob that has been directly added to the store */ @@ -152,14 +138,13 @@ struct RealisedPath /** * Get the raw store path associated to this */ - const StorePath & path() const; + StorePath path() const; void closure(Store & store, Set & ret) const; static void closure(Store & store, const Set & startPaths, Set & ret); Set closure(Store & store) const; - bool operator==(const RealisedPath &) const = default; - auto operator<=>(const RealisedPath &) const = default; + GENERATE_CMP(RealisedPath, me->raw); }; class MissingRealisation : public Error @@ -182,5 +167,4 @@ public: } // namespace nix -JSON_IMPL(nix::UnkeyedRealisation) JSON_IMPL(nix::Realisation) diff --git a/src/libstore/include/nix/store/remote-store.hh b/src/libstore/include/nix/store/remote-store.hh index b152e054b..1aaf29d37 100644 --- a/src/libstore/include/nix/store/remote-store.hh +++ b/src/libstore/include/nix/store/remote-store.hh @@ -102,7 +102,7 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index c9fd00513..1131ec975 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -31,7 +31,6 @@ MakeError(SubstituterDisabled, Error); MakeError(InvalidStoreReference, Error); -struct UnkeyedRealisation; struct Realisation; struct RealisedPath; struct DrvOutput; @@ -399,12 +398,12 @@ public: /** * Query the information about a realisation. */ - std::shared_ptr queryRealisation(const DrvOutput &); + std::shared_ptr queryRealisation(const DrvOutput &); /** * Asynchronous version of queryRealisation(). */ - void queryRealisation(const DrvOutput &, Callback> callback) noexcept; + void queryRealisation(const DrvOutput &, Callback> callback) noexcept; /** * Check whether the given valid path info is sufficiently attested, by @@ -431,8 +430,8 @@ protected: virtual void queryPathInfoUncached(const StorePath & path, Callback> callback) noexcept = 0; - virtual void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept = 0; + virtual void + queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept = 0; public: diff --git a/src/libstore/local-overlay-store.cc b/src/libstore/local-overlay-store.cc index f23feb8fb..2b000b3db 100644 --- a/src/libstore/local-overlay-store.cc +++ b/src/libstore/local-overlay-store.cc @@ -77,7 +77,7 @@ void LocalOverlayStore::registerDrvOutput(const Realisation & info) // First do queryRealisation on lower layer to populate DB auto res = lowerStore->queryRealisation(info.id); if (res) - LocalStore::registerDrvOutput({*res, info.id}); + LocalStore::registerDrvOutput(*res); LocalStore::registerDrvOutput(info); } @@ -108,12 +108,12 @@ void LocalOverlayStore::queryPathInfoUncached( } void LocalOverlayStore::queryRealisationUncached( - const DrvOutput & drvOutput, Callback> callback) noexcept + const DrvOutput & drvOutput, Callback> callback) noexcept { auto callbackPtr = std::make_shared(std::move(callback)); LocalStore::queryRealisationUncached( - drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { + drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (info) @@ -123,7 +123,7 @@ void LocalOverlayStore::queryRealisationUncached( } // If we don't have it, check lower store lowerStore->queryRealisation( - drvOutput, {[callbackPtr](std::future> fut) { + drvOutput, {[callbackPtr](std::future> fut) { try { (*callbackPtr)(fut.get()); } catch (...) { diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 6425819c5..ebc987ee0 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1036,7 +1036,7 @@ bool LocalStore::pathInfoIsUntrusted(const ValidPathInfo & info) bool LocalStore::realisationIsUntrusted(const Realisation & realisation) { - return config->requireSigs && !realisation.checkSignatures(realisation.id, getPublicKeys()); + return config->requireSigs && !realisation.checkSignatures(getPublicKeys()); } void LocalStore::addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) @@ -1586,7 +1586,7 @@ void LocalStore::addSignatures(const StorePath & storePath, const StringSet & si }); } -std::optional> +std::optional> LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & id) { auto useQueryRealisedOutput(state.stmts->QueryRealisedOutput.use()(id.strHash())(id.outputName)); @@ -1598,13 +1598,14 @@ LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & i return { {realisationDbId, - UnkeyedRealisation{ + Realisation{ + .id = id, .outPath = outputPath, .signatures = signatures, }}}; } -std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) +std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) { auto maybeCore = queryRealisationCore_(state, id); if (!maybeCore) @@ -1630,13 +1631,13 @@ std::optional LocalStore::queryRealisation_(LocalStore } void LocalStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { - auto maybeRealisation = retrySQLite>( - [&]() { return queryRealisation_(*_state->lock(), id); }); + auto maybeRealisation = + retrySQLite>([&]() { return queryRealisation_(*_state->lock(), id); }); if (maybeRealisation) - callback(std::make_shared(maybeRealisation.value())); + callback(std::make_shared(maybeRealisation.value())); else callback(nullptr); diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index a31d149c2..7efaa4f86 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -360,12 +360,11 @@ drvOutputReferences(Store & store, const Derivation & drv, const StorePath & out if (!outputHash) throw Error( "output '%s' of derivation '%s' isn't realised", outputName, store.printStorePath(inputDrv)); - DrvOutput key{*outputHash, outputName}; - auto thisRealisation = store.queryRealisation(key); + auto thisRealisation = store.queryRealisation(DrvOutput{*outputHash, outputName}); if (!thisRealisation) throw Error( "output '%s' of derivation '%s' isn’t built", outputName, store.printStorePath(inputDrv)); - inputRealisations.insert({*thisRealisation, std::move(key)}); + inputRealisations.insert(*thisRealisation); } } if (!inputNode.value.empty()) { diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc index e08d5ee8a..febd67bd2 100644 --- a/src/libstore/realisation.cc +++ b/src/libstore/realisation.cc @@ -39,7 +39,7 @@ void Realisation::closure(Store & store, const std::set & startOutp std::set res; for (auto & [currentDep, _] : current.dependentRealisations) { if (auto currentRealisation = store.queryRealisation(currentDep)) - res.insert({*currentRealisation, currentDep}); + res.insert(*currentRealisation); else throw Error("Unrealised derivation '%s'", currentDep.to_string()); } @@ -61,25 +61,24 @@ void Realisation::closure(Store & store, const std::set & startOutp }); } -std::string UnkeyedRealisation::fingerprint(const DrvOutput & key) const +std::string Realisation::fingerprint() const { - nlohmann::json serialized = Realisation{*this, key}; + nlohmann::json serialized = *this; serialized.erase("signatures"); return serialized.dump(); } -void UnkeyedRealisation::sign(const DrvOutput & key, const Signer & signer) +void Realisation::sign(const Signer & signer) { - signatures.insert(signer.signDetached(fingerprint(key))); + signatures.insert(signer.signDetached(fingerprint())); } -bool UnkeyedRealisation::checkSignature( - const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const +bool Realisation::checkSignature(const PublicKeys & publicKeys, const std::string & sig) const { - return verifyDetached(fingerprint(key), sig, publicKeys); + return verifyDetached(fingerprint(), sig, publicKeys); } -size_t UnkeyedRealisation::checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const +size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const { // FIXME: Maybe we should return `maxSigs` if the realisation corresponds to // an input-addressed one − because in that case the drv is enough to check @@ -87,18 +86,19 @@ size_t UnkeyedRealisation::checkSignatures(const DrvOutput & key, const PublicKe size_t good = 0; for (auto & sig : signatures) - if (checkSignature(key, publicKeys, sig)) + if (checkSignature(publicKeys, sig)) good++; return good; } -const StorePath & RealisedPath::path() const +StorePath RealisedPath::path() const { - return std::visit([](auto && arg) -> auto & { return arg.getPath(); }, raw); + return std::visit([](auto && arg) { return arg.getPath(); }, raw); } -bool Realisation::isCompatibleWith(const UnkeyedRealisation & other) const +bool Realisation::isCompatibleWith(const Realisation & other) const { + assert(id == other.id); if (outPath == other.outPath) { if (dependentRealisations.empty() != other.dependentRealisations.empty()) { warn( @@ -144,7 +144,7 @@ namespace nlohmann { using namespace nix; -UnkeyedRealisation adl_serializer::from_json(const json & json0) +Realisation adl_serializer::from_json(const json & json0) { auto json = getObject(json0); @@ -157,39 +157,25 @@ UnkeyedRealisation adl_serializer::from_json(const json & js for (auto & [jsonDepId, jsonDepOutPath] : getObject(*jsonDependencies)) dependentRealisations.insert({DrvOutput::parse(jsonDepId), jsonDepOutPath}); - return UnkeyedRealisation{ + return Realisation{ + .id = DrvOutput::parse(valueAt(json, "id")), .outPath = valueAt(json, "outPath"), .signatures = signatures, .dependentRealisations = dependentRealisations, }; } -void adl_serializer::to_json(json & json, const UnkeyedRealisation & r) +void adl_serializer::to_json(json & json, const Realisation & r) { auto jsonDependentRealisations = nlohmann::json::object(); for (auto & [depId, depOutPath] : r.dependentRealisations) jsonDependentRealisations.emplace(depId.to_string(), depOutPath); json = { + {"id", r.id.to_string()}, {"outPath", r.outPath}, {"signatures", r.signatures}, {"dependentRealisations", jsonDependentRealisations}, }; } -Realisation adl_serializer::from_json(const json & json0) -{ - auto json = getObject(json0); - - return Realisation{ - static_cast(json0), - DrvOutput::parse(valueAt(json, "id")), - }; -} - -void adl_serializer::to_json(json & json, const Realisation & r) -{ - json = static_cast(r); - json["id"] = r.id.to_string(); -} - } // namespace nlohmann diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 8dd5bc064..a6994f844 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -501,7 +501,7 @@ void RemoteStore::registerDrvOutput(const Realisation & info) } void RemoteStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { auto conn(getConnection()); @@ -515,21 +515,21 @@ void RemoteStore::queryRealisationUncached( conn->to << id.to_string(); conn.processStderr(); - auto real = [&]() -> std::shared_ptr { + auto real = [&]() -> std::shared_ptr { if (GET_PROTOCOL_MINOR(conn->protoVersion) < 31) { auto outPaths = WorkerProto::Serialise>::read(*this, *conn); if (outPaths.empty()) return nullptr; - return std::make_shared(UnkeyedRealisation{.outPath = *outPaths.begin()}); + return std::make_shared(Realisation{.id = id, .outPath = *outPaths.begin()}); } else { auto realisations = WorkerProto::Serialise>::read(*this, *conn); if (realisations.empty()) return nullptr; - return std::make_shared(*realisations.begin()); + return std::make_shared(*realisations.begin()); } }(); - callback(std::shared_ptr(real)); + callback(std::shared_ptr(real)); } catch (...) { return callback.rethrow(); } @@ -626,15 +626,13 @@ std::vector RemoteStore::buildPathsWithResults( auto realisation = queryRealisation(outputId); if (!realisation) throw MissingRealisation(outputId); - success.builtOutputs.emplace(output, Realisation{*realisation, outputId}); + success.builtOutputs.emplace(output, *realisation); } else { success.builtOutputs.emplace( output, Realisation{ - UnkeyedRealisation{ - .outPath = outputPath, - }, - outputId, + .id = outputId, + .outPath = outputPath, }); } } diff --git a/src/libstore/restricted-store.cc b/src/libstore/restricted-store.cc index 5270f7d10..a1cb41606 100644 --- a/src/libstore/restricted-store.cc +++ b/src/libstore/restricted-store.cc @@ -107,7 +107,7 @@ struct RestrictedStore : public virtual IndirectRootStore, public virtual GcStor void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept override; + const DrvOutput & id, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; @@ -244,7 +244,7 @@ void RestrictedStore::registerDrvOutput(const Realisation & info) } void RestrictedStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept // XXX: This should probably be allowed if the realisation corresponds to // an allowed derivation { diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index df00dc179..4ce6b15fa 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -598,8 +598,7 @@ void Store::queryPathInfo(const StorePath & storePath, Callback> callback) noexcept +void Store::queryRealisation(const DrvOutput & id, Callback> callback) noexcept { try { @@ -625,20 +624,20 @@ void Store::queryRealisation( auto callbackPtr = std::make_shared(std::move(callback)); - queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { + queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (diskCache) { if (info) diskCache->upsertRealisation( - config.getReference().render(/*FIXME withParams=*/false), {*info, id}); + config.getReference().render(/*FIXME withParams=*/false), *info); else diskCache->upsertAbsentRealisation( config.getReference().render(/*FIXME withParams=*/false), id); } - (*callbackPtr)(std::shared_ptr(info)); + (*callbackPtr)(std::shared_ptr(info)); } catch (...) { callbackPtr->rethrow(); @@ -646,9 +645,9 @@ void Store::queryRealisation( }}); } -std::shared_ptr Store::queryRealisation(const DrvOutput & id) +std::shared_ptr Store::queryRealisation(const DrvOutput & id) { - using RealPtr = std::shared_ptr; + using RealPtr = std::shared_ptr; std::promise promise; queryRealisation(id, {[&](std::future result) { @@ -911,12 +910,11 @@ std::map copyPaths( std::set toplevelRealisations; for (auto & path : paths) { storePaths.insert(path.path()); - if (auto * realisation = std::get_if(&path.raw)) { + if (auto realisation = std::get_if(&path.raw)) { experimentalFeatureSettings.require(Xp::CaDerivations); toplevelRealisations.insert(*realisation); } } - auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute); try { @@ -933,7 +931,7 @@ std::map copyPaths( "dependency of '%s' but isn't registered", drvOutput.to_string(), current.id.to_string()); - children.insert({*currentChild, drvOutput}); + children.insert(*currentChild); } return children; }, @@ -1201,7 +1199,7 @@ void Store::signRealisation(Realisation & realisation) for (auto & secretKeyFile : secretKeyFiles.get()) { SecretKey secretKey(readFile(secretKeyFile)); LocalSigner signer(std::move(secretKey)); - realisation.sign(realisation.id, signer); + realisation.sign(signer); } } diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 7cf72fb84..a04056599 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -1830,12 +1830,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() for (auto & [outputName, newInfo] : infos) { auto oldinfo = get(initialOutputs, outputName); assert(oldinfo); - auto thisRealisation = Realisation{ - { - .outPath = newInfo.path, - }, - DrvOutput{oldinfo->outputHash, outputName}, - }; + auto thisRealisation = Realisation{.id = DrvOutput{oldinfo->outputHash, outputName}, .outPath = newInfo.path}; if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations) && !drv.type().isImpure()) { store.signRealisation(thisRealisation); store.registerDrvOutput(thisRealisation); diff --git a/src/libutil/include/nix/util/hash.hh b/src/libutil/include/nix/util/hash.hh index 0b16b423c..571b6acca 100644 --- a/src/libutil/include/nix/util/hash.hh +++ b/src/libutil/include/nix/util/hash.hh @@ -222,22 +222,3 @@ public: }; } // namespace nix - -template<> -struct std::hash -{ - std::size_t operator()(const nix::Hash & hash) const noexcept - { - assert(hash.hashSize > sizeof(size_t)); - return *reinterpret_cast(&hash.hash); - } -}; - -namespace nix { - -inline std::size_t hash_value(const Hash & hash) -{ - return std::hash{}(hash); -} - -} // namespace nix From ce749454dc3e7685092cafdb4d1e05876a065b07 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 5 Oct 2025 21:54:59 +0300 Subject: [PATCH 113/373] Revert "Merge pull request #14022 from obsidiansystems/derivation-resolution-goal" This reverts commit d02dca099f2f7411489b57fc5c97968013498f9a, reversing changes made to 9bd09155ac7659f07dfefbd47e4e76ec499f38cd. --- .../build/derivation-building-goal.cc | 223 +++++++++++++++++- src/libstore/build/derivation-goal.cc | 97 +------- .../build/derivation-resolution-goal.cc | 210 ----------------- src/libstore/build/worker.cc | 24 +- .../store/build/derivation-building-goal.hh | 19 +- .../nix/store/build/derivation-goal.hh | 8 +- .../store/build/derivation-resolution-goal.hh | 82 ------- .../include/nix/store/build/worker.hh | 20 +- src/libstore/include/nix/store/meson.build | 1 - src/libstore/meson.build | 1 - tests/functional/build.sh | 9 +- tests/functional/ca/issue-13247.sh | 5 +- 12 files changed, 237 insertions(+), 462 deletions(-) delete mode 100644 src/libstore/build/derivation-resolution-goal.cc delete mode 100644 src/libstore/include/nix/store/build/derivation-resolution-goal.hh diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index fa819c96b..001816ca0 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1,5 +1,6 @@ #include "nix/store/build/derivation-building-goal.hh" #include "nix/store/build/derivation-env-desugar.hh" +#include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -26,8 +27,8 @@ namespace nix { DerivationBuildingGoal::DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode, bool storeDerivation) - : Goal(worker, gaveUpOnSubstitution(storeDerivation)) + const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode) + : Goal(worker, gaveUpOnSubstitution()) , drvPath(drvPath) , buildMode(buildMode) { @@ -124,10 +125,50 @@ static void runPostBuildHook( /* At least one of the output paths could not be produced using a substitute. So we have to build instead. */ -Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution(bool storeDerivation) +Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() { Goals waitees; + std::map, GoalPtr, value_comparison> inputGoals; + + { + std::function, const DerivedPathMap::ChildNode &)> + addWaiteeDerivedPath; + + addWaiteeDerivedPath = [&](ref inputDrv, + const DerivedPathMap::ChildNode & inputNode) { + if (!inputNode.value.empty()) { + auto g = worker.makeGoal( + DerivedPath::Built{ + .drvPath = inputDrv, + .outputs = inputNode.value, + }, + buildMode == bmRepair ? bmRepair : bmNormal); + inputGoals.insert_or_assign(inputDrv, g); + waitees.insert(std::move(g)); + } + for (const auto & [outputName, childNode] : inputNode.childMap) + addWaiteeDerivedPath( + make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); + }; + + for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { + /* Ensure that pure, non-fixed-output derivations don't + depend on impure derivations. */ + if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() + && !drv->type().isFixed()) { + auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); + if (inputDrv.type().isImpure()) + throw Error( + "pure derivation '%s' depends on impure derivation '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(inputDrvPath)); + } + + addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); + } + } + /* Copy the input sources from the eval store to the build store. @@ -172,17 +213,177 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution(bool storeDerivation) /* Determine the full set of input paths. */ - if (storeDerivation) { - assert(drv->inputDrvs.map.empty()); - /* Store the resolved derivation, as part of the record of - what we're actually building */ - writeDerivation(worker.store, *drv); - } - + /* First, the input derivations. */ { + auto & fullDrv = *drv; + + auto drvType = fullDrv.type(); + bool resolveDrv = + std::visit( + overloaded{ + [&](const DerivationType::InputAddressed & ia) { + /* must resolve if deferred. */ + return ia.deferred; + }, + [&](const DerivationType::ContentAddressed & ca) { + return !fullDrv.inputDrvs.map.empty() + && (ca.fixed + /* Can optionally resolve if fixed, which is good + for avoiding unnecessary rebuilds. */ + ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) + /* Must resolve if floating and there are any inputs + drvs. */ + : true); + }, + [&](const DerivationType::Impure &) { return true; }}, + drvType.raw) + /* no inputs are outputs of dynamic derivations */ + || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { + return !pair.second.childMap.empty(); + }); + + if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { + experimentalFeatureSettings.require(Xp::CaDerivations); + + /* We are be able to resolve this derivation based on the + now-known results of dependencies. If so, we become a + stub goal aliasing that resolved derivation goal. */ + std::optional attempt = fullDrv.tryResolve( + worker.store, + [&](ref drvPath, const std::string & outputName) -> std::optional { + auto mEntry = get(inputGoals, drvPath); + if (!mEntry) + return std::nullopt; + + auto & buildResult = (*mEntry)->buildResult; + return std::visit( + overloaded{ + [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, + [&](const BuildResult::Success & success) -> std::optional { + auto i = get(success.builtOutputs, outputName); + if (!i) + return std::nullopt; + + return i->outPath; + }, + }, + buildResult.inner); + }); + if (!attempt) { + /* TODO (impure derivations-induced tech debt) (see below): + The above attempt should have found it, but because we manage + inputDrvOutputs statefully, sometimes it gets out of sync with + the real source of truth (store). So we query the store + directly if there's a problem. */ + attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); + } + assert(attempt); + Derivation drvResolved{std::move(*attempt)}; + + auto pathResolved = writeDerivation(worker.store, drvResolved); + + auto msg = + fmt("resolved derivation: '%s' -> '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved)); + act = std::make_unique( + *logger, + lvlInfo, + actBuildWaiting, + msg, + Logger::Fields{ + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved), + }); + + /* TODO https://github.com/NixOS/nix/issues/13247 we should + let the calling goal do this, so it has a change to pass + just the output(s) it cares about. */ + auto resolvedDrvGoal = + worker.makeDerivationTrampolineGoal(pathResolved, OutputsSpec::All{}, drvResolved, buildMode); + { + Goals waitees{resolvedDrvGoal}; + co_await await(std::move(waitees)); + } + + trace("resolved derivation finished"); + + auto resolvedResult = resolvedDrvGoal->buildResult; + + // No `std::visit` for coroutines yet + if (auto * successP = resolvedResult.tryGetSuccess()) { + auto & success = *successP; + SingleDrvOutputs builtOutputs; + + auto outputHashes = staticOutputHashes(worker.evalStore, *drv); + auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); + + StorePathSet outputPaths; + + for (auto & outputName : drvResolved.outputNames()) { + auto outputHash = get(outputHashes, outputName); + auto resolvedHash = get(resolvedHashes, outputName); + if ((!outputHash) || (!resolvedHash)) + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", + worker.store.printStorePath(drvPath), + outputName); + + auto realisation = [&] { + auto take1 = get(success.builtOutputs, outputName); + if (take1) + return *take1; + + /* The above `get` should work. But stateful tracking of + outputs in resolvedResult, this can get out of sync with the + store, which is our actual source of truth. For now we just + check the store directly if it fails. */ + auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, outputName}); + if (take2) + return *take2; + + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", + worker.store.printStorePath(pathResolved), + outputName); + }(); + + if (!drv->type().isImpure()) { + auto newRealisation = realisation; + newRealisation.id = DrvOutput{*outputHash, outputName}; + newRealisation.signatures.clear(); + if (!drv->type().isFixed()) { + auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; + newRealisation.dependentRealisations = + drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); + } + worker.store.signRealisation(newRealisation); + worker.store.registerDrvOutput(newRealisation); + } + outputPaths.insert(realisation.outPath); + builtOutputs.emplace(outputName, realisation); + } + + runPostBuildHook(worker.store, *logger, drvPath, outputPaths); + + auto status = success.status; + if (status == BuildResult::Success::AlreadyValid) + status = BuildResult::Success::ResolvesToAlreadyValid; + + co_return doneSuccess(success.status, std::move(builtOutputs)); + } else if (resolvedResult.tryGetFailure()) { + co_return doneFailure({ + BuildResult::Failure::DependencyFailed, + "build of resolved derivation '%s' failed", + worker.store.printStorePath(pathResolved), + }); + } else + assert(false); + } + /* If we get this far, we know no dynamic drvs inputs */ - for (auto & [depDrvPath, depNode] : drv->inputDrvs.map) { + for (auto & [depDrvPath, depNode] : fullDrv.inputDrvs.map) { for (auto & outputName : depNode.value) { /* Don't need to worry about `inputGoals`, because impure derivations are always resolved above. Can diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index cc3ba2b7b..5dfc334a8 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -1,6 +1,5 @@ #include "nix/store/build/derivation-goal.hh" #include "nix/store/build/derivation-building-goal.hh" -#include "nix/store/build/derivation-resolution-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -30,9 +29,8 @@ DerivationGoal::DerivationGoal( const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode, - bool storeDerivation) - : Goal(worker, haveDerivation(storeDerivation)) + BuildMode buildMode) + : Goal(worker, haveDerivation()) , drvPath(drvPath) , wantedOutput(wantedOutput) , outputHash{[&] { @@ -66,7 +64,7 @@ std::string DerivationGoal::key() }.to_string(worker.store); } -Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) +Goal::Co DerivationGoal::haveDerivation() { trace("have derivation"); @@ -148,96 +146,9 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) worker.store.printStorePath(drvPath)); } - auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); - { - Goals waitees{resolutionGoal}; - co_await await(std::move(waitees)); - } - if (nrFailed != 0) { - co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); - } - - if (resolutionGoal->resolvedDrv) { - auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - - auto resolvedDrvGoal = - worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode, /*storeDerivation=*/true); - { - Goals waitees{resolvedDrvGoal}; - co_await await(std::move(waitees)); - } - - trace("resolved derivation finished"); - - auto resolvedResult = resolvedDrvGoal->buildResult; - - // No `std::visit` for coroutines yet - if (auto * successP = resolvedResult.tryGetSuccess()) { - auto & success = *successP; - auto outputHashes = staticOutputHashes(worker.evalStore, *drv); - auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); - - StorePathSet outputPaths; - - auto outputHash = get(outputHashes, wantedOutput); - auto resolvedHash = get(resolvedHashes, wantedOutput); - if ((!outputHash) || (!resolvedHash)) - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", - worker.store.printStorePath(drvPath), - wantedOutput); - - auto realisation = [&] { - auto take1 = get(success.builtOutputs, wantedOutput); - if (take1) - return *take1; - - /* The above `get` should work. But stateful tracking of - outputs in resolvedResult, this can get out of sync with the - store, which is our actual source of truth. For now we just - check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, wantedOutput}); - if (take2) - return *take2; - - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", - worker.store.printStorePath(pathResolved), - wantedOutput); - }(); - - if (!drv->type().isImpure()) { - auto newRealisation = realisation; - newRealisation.id = DrvOutput{*outputHash, wantedOutput}; - newRealisation.signatures.clear(); - if (!drv->type().isFixed()) { - auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; - newRealisation.dependentRealisations = - drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); - } - worker.store.signRealisation(newRealisation); - worker.store.registerDrvOutput(newRealisation); - } - outputPaths.insert(realisation.outPath); - - auto status = success.status; - if (status == BuildResult::Success::AlreadyValid) - status = BuildResult::Success::ResolvesToAlreadyValid; - - co_return doneSuccess(status, std::move(realisation)); - } else if (resolvedResult.tryGetFailure()) { - co_return doneFailure({ - BuildResult::Failure::DependencyFailed, - "build of resolved derivation '%s' failed", - worker.store.printStorePath(pathResolved), - }); - } else - assert(false); - } - /* Give up on substitution for the output we want, actually build this derivation */ - auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode, storeDerivation); + auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode); /* We will finish with it ourselves, as if we were the derivational goal. */ g->preserveException = true; diff --git a/src/libstore/build/derivation-resolution-goal.cc b/src/libstore/build/derivation-resolution-goal.cc deleted file mode 100644 index 584169ef3..000000000 --- a/src/libstore/build/derivation-resolution-goal.cc +++ /dev/null @@ -1,210 +0,0 @@ -#include "nix/store/build/derivation-resolution-goal.hh" -#include "nix/store/build/derivation-env-desugar.hh" -#include "nix/store/build/worker.hh" -#include "nix/util/util.hh" -#include "nix/store/common-protocol.hh" -#include "nix/store/globals.hh" - -#include -#include -#include - -#include - -namespace nix { - -DerivationResolutionGoal::DerivationResolutionGoal( - const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode) - : Goal(worker, resolveDerivation()) - , drvPath(drvPath) -{ - drv = std::make_unique(drv_); - - name = fmt("building of '%s' from in-memory derivation", worker.store.printStorePath(drvPath)); - trace("created"); - - /* Prevent the .chroot directory from being - garbage-collected. (See isActiveTempFile() in gc.cc.) */ - worker.store.addTempRoot(this->drvPath); -} - -void DerivationResolutionGoal::timedOut(Error && ex) {} - -std::string DerivationResolutionGoal::key() -{ - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before - "baboon". And substitution goals always happen before - derivation goals (due to "bd$"). */ - return "rd$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); -} - -/** - * Used for `inputGoals` local variable below - */ -struct value_comparison -{ - template - bool operator()(const ref & lhs, const ref & rhs) const - { - return *lhs < *rhs; - } -}; - -/* At least one of the output paths could not be - produced using a substitute. So we have to build instead. */ -Goal::Co DerivationResolutionGoal::resolveDerivation() -{ - Goals waitees; - - std::map, GoalPtr, value_comparison> inputGoals; - - { - std::function, const DerivedPathMap::ChildNode &)> - addWaiteeDerivedPath; - - addWaiteeDerivedPath = [&](ref inputDrv, - const DerivedPathMap::ChildNode & inputNode) { - if (!inputNode.value.empty()) { - auto g = worker.makeGoal( - DerivedPath::Built{ - .drvPath = inputDrv, - .outputs = inputNode.value, - }, - buildMode == bmRepair ? bmRepair : bmNormal); - inputGoals.insert_or_assign(inputDrv, g); - waitees.insert(std::move(g)); - } - for (const auto & [outputName, childNode] : inputNode.childMap) - addWaiteeDerivedPath( - make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); - }; - - for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { - /* Ensure that pure, non-fixed-output derivations don't - depend on impure derivations. */ - if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() - && !drv->type().isFixed()) { - auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); - if (inputDrv.type().isImpure()) - throw Error( - "pure derivation '%s' depends on impure derivation '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(inputDrvPath)); - } - - addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); - } - } - - co_await await(std::move(waitees)); - - trace("all inputs realised"); - - if (nrFailed != 0) { - auto msg = - fmt("Cannot build '%s'.\n" - "Reason: " ANSI_RED "%d %s failed" ANSI_NORMAL ".", - Magenta(worker.store.printStorePath(drvPath)), - nrFailed, - nrFailed == 1 ? "dependency" : "dependencies"); - msg += showKnownOutputs(worker.store, *drv); - co_return amDone(ecFailed, {BuildError(BuildResult::Failure::DependencyFailed, msg)}); - } - - /* Gather information necessary for computing the closure and/or - running the build hook. */ - - /* Determine the full set of input paths. */ - - /* First, the input derivations. */ - { - auto & fullDrv = *drv; - - auto drvType = fullDrv.type(); - bool resolveDrv = - std::visit( - overloaded{ - [&](const DerivationType::InputAddressed & ia) { - /* must resolve if deferred. */ - return ia.deferred; - }, - [&](const DerivationType::ContentAddressed & ca) { - return !fullDrv.inputDrvs.map.empty() - && (ca.fixed - /* Can optionally resolve if fixed, which is good - for avoiding unnecessary rebuilds. */ - ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) - /* Must resolve if floating and there are any inputs - drvs. */ - : true); - }, - [&](const DerivationType::Impure &) { return true; }}, - drvType.raw) - /* no inputs are outputs of dynamic derivations */ - || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { - return !pair.second.childMap.empty(); - }); - - if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { - experimentalFeatureSettings.require(Xp::CaDerivations); - - /* We are be able to resolve this derivation based on the - now-known results of dependencies. If so, we become a - stub goal aliasing that resolved derivation goal. */ - std::optional attempt = fullDrv.tryResolve( - worker.store, - [&](ref drvPath, const std::string & outputName) -> std::optional { - auto mEntry = get(inputGoals, drvPath); - if (!mEntry) - return std::nullopt; - - auto & buildResult = (*mEntry)->buildResult; - return std::visit( - overloaded{ - [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, - [&](const BuildResult::Success & success) -> std::optional { - auto i = get(success.builtOutputs, outputName); - if (!i) - return std::nullopt; - - return i->outPath; - }, - }, - buildResult.inner); - }); - if (!attempt) { - /* TODO (impure derivations-induced tech debt) (see below): - The above attempt should have found it, but because we manage - inputDrvOutputs statefully, sometimes it gets out of sync with - the real source of truth (store). So we query the store - directly if there's a problem. */ - attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); - } - assert(attempt); - - auto pathResolved = writeDerivation(worker.store, *attempt, NoRepair, /*readOnly =*/true); - - auto msg = - fmt("resolved derivation: '%s' -> '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved)); - act = std::make_unique( - *logger, - lvlInfo, - actBuildWaiting, - msg, - Logger::Fields{ - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved), - }); - - resolvedDrv = - std::make_unique>(std::move(pathResolved), *std::move(attempt)); - } - } - - co_return amDone(ecSuccess, std::nullopt); -} - -} // namespace nix diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index 53175a8c4..3e6e0bef0 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -4,7 +4,6 @@ #include "nix/store/build/substitution-goal.hh" #include "nix/store/build/drv-output-substitution-goal.hh" #include "nix/store/build/derivation-goal.hh" -#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-building-goal.hh" #include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO Enable building on Windows @@ -76,26 +75,15 @@ std::shared_ptr Worker::makeDerivationTrampolineGoal( } std::shared_ptr Worker::makeDerivationGoal( - const StorePath & drvPath, - const Derivation & drv, - const OutputName & wantedOutput, - BuildMode buildMode, - bool storeDerivation) + const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode) { - return initGoalIfNeeded( - derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode, storeDerivation); + return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode); } -std::shared_ptr -Worker::makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) +std::shared_ptr +Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) { - return initGoalIfNeeded(derivationResolutionGoals[drvPath], drvPath, drv, *this, buildMode); -} - -std::shared_ptr Worker::makeDerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv, BuildMode buildMode, bool storeDerivation) -{ - return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode, storeDerivation); + return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode); } std::shared_ptr @@ -170,8 +158,6 @@ void Worker::removeGoal(GoalPtr goal) nix::removeGoal(drvGoal, derivationTrampolineGoals.map); else if (auto drvGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvGoal, derivationGoals); - else if (auto drvResolutionGoal = std::dynamic_pointer_cast(goal)) - nix::removeGoal(drvResolutionGoal, derivationResolutionGoals); else if (auto drvBuildingGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvBuildingGoal, derivationBuildingGoals); else if (auto subGoal = std::dynamic_pointer_cast(goal)) diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index ab063ff3f..edb496024 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -29,21 +29,8 @@ typedef enum { rpAccept, rpDecline, rpPostpone } HookReply; */ struct DerivationBuildingGoal : public Goal { - /** - * @param storeDerivation Whether to store the derivation in - * `worker.store`. This is useful for newly-resolved derivations. In this - * case, the derivation was not created a priori, e.g. purely (or close - * enough) from evaluation of the Nix language, but also depends on the - * exact content produced by upstream builds. It is strongly advised to - * have a permanent record of such a resolved derivation in order to - * faithfully reconstruct the build history. - */ DerivationBuildingGoal( - const StorePath & drvPath, - const Derivation & drv, - Worker & worker, - BuildMode buildMode = bmNormal, - bool storeDerivation = false); + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); ~DerivationBuildingGoal(); private: @@ -113,7 +100,7 @@ private: /** * The states. */ - Co gaveUpOnSubstitution(bool storeDerivation); + Co gaveUpOnSubstitution(); Co tryToBuild(); /** @@ -168,7 +155,7 @@ private: JobCategory jobCategory() const override { - return JobCategory::Administration; + return JobCategory::Build; }; }; diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index 353e7c489..e05bf1c0b 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -40,16 +40,12 @@ struct DerivationGoal : public Goal */ OutputName wantedOutput; - /** - * @param storeDerivation See `DerivationBuildingGoal`. This is just passed along. - */ DerivationGoal( const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode = bmNormal, - bool storeDerivation = false); + BuildMode buildMode = bmNormal); ~DerivationGoal() = default; void timedOut(Error && ex) override @@ -84,7 +80,7 @@ private: /** * The states. */ - Co haveDerivation(bool storeDerivation); + Co haveDerivation(); /** * Return `std::nullopt` if the output is unknown, e.g. un unbuilt diff --git a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh deleted file mode 100644 index ebaab4f06..000000000 --- a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh +++ /dev/null @@ -1,82 +0,0 @@ -#pragma once -///@file - -#include "nix/store/derivations.hh" -#include "nix/store/derivation-options.hh" -#include "nix/store/build/derivation-building-misc.hh" -#include "nix/store/store-api.hh" -#include "nix/store/build/goal.hh" - -namespace nix { - -struct BuilderFailureError; - -/** - * A goal for resolving a derivation. Resolving a derivation (@see - * `Derivation::tryResolve`) simplifies its inputs, replacing - * `inputDrvs` with `inputSrcs. - * - * Conceptually, we resolve all derivations. For input-addressed - * derivations (that don't transtively depend on content-addressed - * derivations), however, we don't actually use the resolved derivation, - * because the output paths would appear invalid (if we tried to verify - * them), since they are computed from the original, unresolved inputs. - * - * That said, if we ever made the new flavor of input-addressing as described - * in issue #9259, then the input-addressing would be based on the resolved - * inputs, and we like the CA case *would* use the output of this goal. - * - * (The point of this discussion is not to randomly stuff information on - * a yet-unimplemented feature (issue #9259) in the codebase, but - * rather, to illustrate that there is no inherent tension between - * explicit derivation resolution and input-addressing in general. That - * tension only exists with the type of input-addressing we've - * historically used.) - */ -struct DerivationResolutionGoal : public Goal -{ - DerivationResolutionGoal( - const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); - - /** - * If the derivation needed to be resolved, this is resulting - * resolved derivations and its path. - */ - std::unique_ptr> resolvedDrv; - - void timedOut(Error && ex) override; - -private: - - /** - * The path of the derivation. - */ - StorePath drvPath; - - /** - * The derivation stored at drvPath. - */ - std::unique_ptr drv; - - /** - * The remainder is state held during the build. - */ - - BuildMode buildMode; - - std::unique_ptr act; - - std::string key() override; - - /** - * The states. - */ - Co resolveDerivation(); - - JobCategory jobCategory() const override - { - return JobCategory::Administration; - }; -}; - -} // namespace nix diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index 9767590ac..a6de780c1 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -16,7 +16,6 @@ namespace nix { /* Forward definition. */ struct DerivationTrampolineGoal; struct DerivationGoal; -struct DerivationResolutionGoal; struct DerivationBuildingGoal; struct PathSubstitutionGoal; class DrvOutputSubstitutionGoal; @@ -112,7 +111,6 @@ private: DerivedPathMap>> derivationTrampolineGoals; std::map>> derivationGoals; - std::map> derivationResolutionGoals; std::map> derivationBuildingGoals; std::map> substitutionGoals; std::map> drvOutputSubstitutionGoals; @@ -223,23 +221,13 @@ public: const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, - BuildMode buildMode = bmNormal, - bool storeDerivation = false); + BuildMode buildMode = bmNormal); /** - * @ref DerivationResolutionGoal "derivation resolution goal" + * @ref DerivationBuildingGoal "derivation goal" */ - std::shared_ptr - makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); - - /** - * @ref DerivationBuildingGoal "derivation building goal" - */ - std::shared_ptr makeDerivationBuildingGoal( - const StorePath & drvPath, - const Derivation & drv, - BuildMode buildMode = bmNormal, - bool storeDerivation = false); + std::shared_ptr + makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); /** * @ref PathSubstitutionGoal "substitution goal" diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index 1f04e357a..c9e4c36dd 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -18,7 +18,6 @@ headers = [ config_pub_h ] + files( 'build/derivation-building-misc.hh', 'build/derivation-env-desugar.hh', 'build/derivation-goal.hh', - 'build/derivation-resolution-goal.hh', 'build/derivation-trampoline-goal.hh', 'build/drv-output-substitution-goal.hh', 'build/goal.hh', diff --git a/src/libstore/meson.build b/src/libstore/meson.build index e220e65cd..a3502c2e0 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -302,7 +302,6 @@ sources = files( 'build/derivation-check.cc', 'build/derivation-env-desugar.cc', 'build/derivation-goal.cc', - 'build/derivation-resolution-goal.cc', 'build/derivation-trampoline-goal.cc', 'build/drv-output-substitution-goal.cc', 'build/entry-points.cc', diff --git a/tests/functional/build.sh b/tests/functional/build.sh index c9a39438d..0a19ff7da 100755 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -178,8 +178,7 @@ test "$(<<<"$out" grep -cE '^error:')" = 4 out="$(nix build -f fod-failing.nix -L x4 2>&1)" && status=0 || status=$? test "$status" = 1 -# Precise number of errors depends on daemon version / goal refactorings -(( "$(<<<"$out" grep -cE '^error:')" >= 2 )) +test "$(<<<"$out" grep -cE '^error:')" = 2 if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" @@ -187,13 +186,11 @@ if isDaemonNewer "2.29pre"; then else <<<"$out" grepQuiet -E "error: 1 dependencies of derivation '.*-x4\\.drv' failed to build" fi -# Either x2 or x3 could have failed, x4 depends on both symmetrically -<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x[23]\\.drv'" +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" out="$(nix build -f fod-failing.nix -L x4 --keep-going 2>&1)" && status=0 || status=$? test "$status" = 1 -# Precise number of errors depends on daemon version / goal refactorings -(( "$(<<<"$out" grep -cE '^error:')" >= 3 )) +test "$(<<<"$out" grep -cE '^error:')" = 3 if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" <<<"$out" grepQuiet -E "Reason: 2 dependencies failed." diff --git a/tests/functional/ca/issue-13247.sh b/tests/functional/ca/issue-13247.sh index 705919513..686d90ced 100755 --- a/tests/functional/ca/issue-13247.sh +++ b/tests/functional/ca/issue-13247.sh @@ -65,4 +65,7 @@ buildViaSubstitute use-a-prime-more-outputs^first # Should only fetch the output we asked for [[ -d "$(jq -r <"$TEST_ROOT"/a.json '.[0].outputs.out')" ]] [[ -f "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.first')" ]] -[[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] + +# Output should *not* be here, this is the bug +[[ -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] +skipTest "bug is not yet fixed" From 14b119c948476cc24e83bb08880eeab47ff92986 Mon Sep 17 00:00:00 2001 From: Taeer Bar-Yam Date: Sun, 5 Oct 2025 12:07:10 -0400 Subject: [PATCH 114/373] libexpr: fixup ExprOpHasAttr() to take allocator reference --- src/libexpr/include/nix/expr/nixexpr.hh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index b66dba4f3..863a1369d 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -350,7 +350,7 @@ struct ExprOpHasAttr : Expr Expr * e; std::span attrPath; - ExprOpHasAttr(std::pmr::polymorphic_allocator alloc, Expr * e, std::vector attrPath) + ExprOpHasAttr(std::pmr::polymorphic_allocator & alloc, Expr * e, std::vector attrPath) : e(e) , attrPath({alloc.allocate_object(attrPath.size()), attrPath.size()}) { From 6c0d67769d99800cbbc294abba722d9ba3b19fcc Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 11:29:15 +0200 Subject: [PATCH 115/373] ExternalDerivationBuilder: Pass inputPaths --- src/libstore/include/nix/store/globals.hh | 45 +++---------------- .../unix/build/external-derivation-builder.cc | 6 +++ 2 files changed, 12 insertions(+), 39 deletions(-) diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index ae8990eab..f97b261f8 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1401,48 +1401,15 @@ public: "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", "env": { "HOME": "/homeless-shelter", - "NIX_BUILD_CORES": "14", - "NIX_BUILD_TOP": "/build", - "NIX_LOG_FD": "2", - "NIX_STORE": "/nix/store", - "PATH": "/path-not-set", - "PWD": "/build", - "TEMP": "/build", - "TEMPDIR": "/build", - "TERM": "xterm-256color", - "TMP": "/build", - "TMPDIR": "/build", - "__structuredAttrs": "", - "buildInputs": "", "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", - "cmakeFlags": "", - "configureFlags": "", - "depsBuildBuild": "", - "depsBuildBuildPropagated": "", - "depsBuildTarget": "", - "depsBuildTargetPropagated": "", - "depsHostHost": "", - "depsHostHostPropagated": "", - "depsTargetTarget": "", - "depsTargetTargetPropagated": "", - "doCheck": "1", - "doInstallCheck": "1", - "mesonFlags": "", - "name": "hello-2.12.2", "nativeBuildInputs": "/nix/store/l31j72f1…-version-check-hook", - "out": "/nix/store/2yx2prgx…-hello-2.12.2", - "outputs": "out", - "patches": "", - "pname": "hello", - "postInstallCheck": "stat \"${!outputBin}/bin/hello\"\n", - "propagatedBuildInputs": "", - "propagatedNativeBuildInputs": "", - "src": "/nix/store/dw402azx…-hello-2.12.2.tar.gz", - "stdenv": "/nix/store/i8bw5nqg…-stdenv-linux", - "strictDeps": "", - "system": "aarch64-linux", - "version": "2.12.2" + … }, + "inputPaths": [ + "/nix/store/14dciax3mm8j70hjy4c0d68mds9ppx2s-glibc-2.32-54-dev", + "/nix/store/1azs5s8zc0z7m6sssvq1np0m7z873zml-gettext-0.21", + … + ], "realStoreDir": "/nix/store", "storeDir": "/nix/store", "system": "aarch64-linux", diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index 4d3eba6db..e30a92db7 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -68,6 +68,12 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl json.emplace("storeDir", store.storeDir); json.emplace("realStoreDir", store.config->realStoreDir.get()); json.emplace("system", drv.platform); + { + auto l = nlohmann::json::array(); + for (auto & i : inputPaths) + l.push_back(store.printStorePath(i)); + json.emplace("inputPaths", std::move(l)); + } // TODO(cole-h): writing this to stdin is too much effort right now, if we want to revisit // that, see this comment by Eelco about how to make it not suck: From 68bd2e40f4629f760886e2934f1506c54c795415 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 11:33:29 +0200 Subject: [PATCH 116/373] ExternalDerivationBuilder: Pass the (scratch) outputs --- src/libstore/include/nix/store/globals.hh | 8 ++++++-- src/libstore/unix/build/external-derivation-builder.cc | 6 ++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index f97b261f8..385f8cd7a 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1403,13 +1403,17 @@ public: "HOME": "/homeless-shelter", "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", "nativeBuildInputs": "/nix/store/l31j72f1…-version-check-hook", + "out": "/nix/store/2yx2prgx…-hello-2.12.2" … }, "inputPaths": [ - "/nix/store/14dciax3mm8j70hjy4c0d68mds9ppx2s-glibc-2.32-54-dev", - "/nix/store/1azs5s8zc0z7m6sssvq1np0m7z873zml-gettext-0.21", + "/nix/store/14dciax3…-glibc-2.32-54-dev", + "/nix/store/1azs5s8z…-gettext-0.21", … ], + "outputs": { + "out": "/nix/store/2yx2prgx…-hello-2.12.2" + }, "realStoreDir": "/nix/store", "storeDir": "/nix/store", "system": "aarch64-linux", diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index e30a92db7..12ac77542 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -74,6 +74,12 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl l.push_back(store.printStorePath(i)); json.emplace("inputPaths", std::move(l)); } + { + auto l = nlohmann::json::object(); + for (auto & i : scratchOutputs) + l.emplace(i.first, store.printStorePath(i.second)); + json.emplace("outputs", std::move(l)); + } // TODO(cole-h): writing this to stdin is too much effort right now, if we want to revisit // that, see this comment by Eelco about how to make it not suck: From e9c5d721d871d5c78c577c5c47edc87c5e1af476 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 11:36:26 +0200 Subject: [PATCH 117/373] ExternalDerivationBuilder: Emit a version field --- src/libstore/include/nix/store/globals.hh | 3 ++- src/libstore/unix/build/external-derivation-builder.cc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index 385f8cd7a..1b59bd6fc 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1419,7 +1419,8 @@ public: "system": "aarch64-linux", "tmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0/build", "tmpDirInSandbox": "/build", - "topTmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0" + "topTmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0", + "version": 1 } )", {}, // aliases diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index 12ac77542..71cfd1a62 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -49,6 +49,7 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl auto json = nlohmann::json::object(); + json.emplace("version", 1); json.emplace("builder", drv.builder); { auto l = nlohmann::json::array(); From 8aa0acb9e8260c2713cabb8407a30ae54f6eebb5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 13:25:33 +0200 Subject: [PATCH 118/373] Don't build getPtsName() on Windows It's not needed. https://hydra.nixos.org/build/309215536 --- src/libutil/terminal.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/libutil/terminal.cc b/src/libutil/terminal.cc index 656847487..fe22146ab 100644 --- a/src/libutil/terminal.cc +++ b/src/libutil/terminal.cc @@ -179,9 +179,10 @@ std::pair getWindowSize() return *windowSize.lock(); } +#ifndef _WIN32 std::string getPtsName(int fd) { -#ifdef __APPLE__ +# ifdef __APPLE__ static std::mutex ptsnameMutex; // macOS doesn't have ptsname_r, use mutex-protected ptsname std::lock_guard lock(ptsnameMutex); @@ -190,7 +191,7 @@ std::string getPtsName(int fd) throw SysError("getting pseudoterminal slave name"); } return name; -#else +# else // Use thread-safe ptsname_r on platforms that support it // PTY names are typically short: // - Linux: /dev/pts/N (where N is usually < 1000) @@ -201,7 +202,8 @@ std::string getPtsName(int fd) throw SysError("getting pseudoterminal slave name"); } return buf; -#endif +# endif } +#endif } // namespace nix From 9f6ed7042986693eb76f338697ec446d1c69c88c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 16:04:58 +0200 Subject: [PATCH 119/373] release notes: 2.32.0 --- doc/manual/rl-next/c-api-byidx.md | 7 -- doc/manual/rl-next/c-api-lazy-accessors.md | 16 --- .../rl-next/cached-substituted-inputs.md | 10 -- doc/manual/rl-next/derivation-json.md | 15 --- doc/manual/rl-next/dropped-compat.md | 6 - doc/manual/rl-next/faster-nix-flake-check.md | 9 -- .../rl-next/http-binary-cache-compression.md | 19 --- doc/manual/rl-next/shorter-build-dir-names.md | 6 - doc/manual/source/SUMMARY.md.in | 1 + doc/manual/source/release-notes/rl-2.32.md | 112 ++++++++++++++++++ 10 files changed, 113 insertions(+), 88 deletions(-) delete mode 100644 doc/manual/rl-next/c-api-byidx.md delete mode 100644 doc/manual/rl-next/c-api-lazy-accessors.md delete mode 100644 doc/manual/rl-next/cached-substituted-inputs.md delete mode 100644 doc/manual/rl-next/derivation-json.md delete mode 100644 doc/manual/rl-next/dropped-compat.md delete mode 100644 doc/manual/rl-next/faster-nix-flake-check.md delete mode 100644 doc/manual/rl-next/http-binary-cache-compression.md delete mode 100644 doc/manual/rl-next/shorter-build-dir-names.md create mode 100644 doc/manual/source/release-notes/rl-2.32.md diff --git a/doc/manual/rl-next/c-api-byidx.md b/doc/manual/rl-next/c-api-byidx.md deleted file mode 100644 index 9b5bb3fcb..000000000 --- a/doc/manual/rl-next/c-api-byidx.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -synopsis: "C API: `nix_get_attr_name_byidx`, `nix_get_attr_byidx` take a `nix_value *` instead of `const nix_value *`" -prs: [13987] ---- - -In order to accommodate a more optimized internal representation of attribute set merges these functions require -a mutable `nix_value *` that might be modified on access. This does *not* break the ABI of these functions. diff --git a/doc/manual/rl-next/c-api-lazy-accessors.md b/doc/manual/rl-next/c-api-lazy-accessors.md deleted file mode 100644 index bd0604f0d..000000000 --- a/doc/manual/rl-next/c-api-lazy-accessors.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -synopsis: "C API: Add lazy attribute and list item accessors" -prs: [14030] ---- - -The C API now includes lazy accessor functions for retrieving values from lists and attribute sets without forcing evaluation: - -- `nix_get_list_byidx_lazy()` - Get a list element without forcing its evaluation -- `nix_get_attr_byname_lazy()` - Get an attribute value by name without forcing evaluation -- `nix_get_attr_byidx_lazy()` - Get an attribute by index without forcing evaluation - -These functions are useful when forwarding unevaluated sub-values to other lists, attribute sets, or function calls. They allow more efficient handling of Nix values by deferring evaluation until actually needed. - -Additionally, bounds checking has been improved for all `_byidx` functions to properly validate indices before access, preventing potential out-of-bounds errors. - -The documentation for `NIX_ERR_KEY` error handling has also been clarified to specify when this error code is returned. \ No newline at end of file diff --git a/doc/manual/rl-next/cached-substituted-inputs.md b/doc/manual/rl-next/cached-substituted-inputs.md deleted file mode 100644 index b0b53a213..000000000 --- a/doc/manual/rl-next/cached-substituted-inputs.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -synopsis: "Substituted flake inputs are no longer re-copied to the store" -prs: [14041] ---- - -Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, -which in turn would cause them to be re-copied to the store on initial -evaluation. Caching these inputs results in a near doubling of a performance in -some cases — especially on I/O-bound machines and when using commands that -fetch many inputs, like `nix flake archive/prefetch-inputs` diff --git a/doc/manual/rl-next/derivation-json.md b/doc/manual/rl-next/derivation-json.md deleted file mode 100644 index be7ab1cfe..000000000 --- a/doc/manual/rl-next/derivation-json.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -synopsis: Derivation JSON format now uses store path basenames (no store dir) only -prs: [13980] -issues: [13570] ---- - -Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell), has shown that the use of the store dir in JSON formats is an impediment to systematic JSON formats, -because it requires the serializer/deserializer to take an extra paramater (the store dir). - -We ultimately want to rectify this issue with all (non-stable, able to be changed) JSON formats. -To start with, we are changing the JSON format for derivations because the `nix derivation` commands are ---- in addition to being formally unstable ---- less widely used than other unstable commands. - -See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details. diff --git a/doc/manual/rl-next/dropped-compat.md b/doc/manual/rl-next/dropped-compat.md deleted file mode 100644 index d6cc7704a..000000000 --- a/doc/manual/rl-next/dropped-compat.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -synopsis: "Removed support for daemons and clients older than Nix 2.0" -prs: [13951] ---- - -We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018. diff --git a/doc/manual/rl-next/faster-nix-flake-check.md b/doc/manual/rl-next/faster-nix-flake-check.md deleted file mode 100644 index c195023c3..000000000 --- a/doc/manual/rl-next/faster-nix-flake-check.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -synopsis: "`nix flake check` now skips derivations that can be substituted" -prs: [13574] ---- - -Previously, `nix flake check` would evaluate and build/substitute all -derivations. Now, it will skip downloading derivations that can be substituted. -This can drastically decrease the time invocations take in environments where -checks may already be cached (like in CI). diff --git a/doc/manual/rl-next/http-binary-cache-compression.md b/doc/manual/rl-next/http-binary-cache-compression.md deleted file mode 100644 index 88f1de6d9..000000000 --- a/doc/manual/rl-next/http-binary-cache-compression.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -synopsis: "HTTP binary caches now support transparent compression for metadata" -prs: [] ---- - -HTTP binary cache stores can now compress `.narinfo`, `.ls`, and build log files before uploading them, -reducing bandwidth usage and storage requirements. The compression is applied transparently using the -`Content-Encoding` header, allowing compatible clients to automatically decompress the files. - -Three new configuration options control this behavior: -- `narinfo-compression`: Compression method for `.narinfo` files -- `ls-compression`: Compression method for `.ls` files -- `log-compression`: Compression method for build logs in `log/` directory - -Example usage: -``` -nix copy --to 'http://cache.example.com?narinfo-compression=gzip&ls-compression=gzip' /nix/store/... -nix store copy-log --to 'http://cache.example.com?log-compression=br' /nix/store/... -``` diff --git a/doc/manual/rl-next/shorter-build-dir-names.md b/doc/manual/rl-next/shorter-build-dir-names.md deleted file mode 100644 index e87fa5d04..000000000 --- a/doc/manual/rl-next/shorter-build-dir-names.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -synopsis: "Temporary build directories no longer include derivation names" -prs: [13839] ---- - -Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. \ No newline at end of file diff --git a/doc/manual/source/SUMMARY.md.in b/doc/manual/source/SUMMARY.md.in index 8fed98c2c..25e68811d 100644 --- a/doc/manual/source/SUMMARY.md.in +++ b/doc/manual/source/SUMMARY.md.in @@ -138,6 +138,7 @@ - [Contributing](development/contributing.md) - [Releases](release-notes/index.md) {{#include ./SUMMARY-rl-next.md}} + - [Release 2.32 (2025-10-06)](release-notes/rl-2.32.md) - [Release 2.31 (2025-08-21)](release-notes/rl-2.31.md) - [Release 2.30 (2025-07-07)](release-notes/rl-2.30.md) - [Release 2.29 (2025-05-14)](release-notes/rl-2.29.md) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md new file mode 100644 index 000000000..5c1c314db --- /dev/null +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -0,0 +1,112 @@ +# Release 2.32.0 (2025-10-06) + +- C API: `nix_get_attr_name_byidx`, `nix_get_attr_byidx` take a `nix_value *` instead of `const nix_value *` [#13987](https://github.com/NixOS/nix/pull/13987) + + In order to accommodate a more optimized internal representation of attribute set merges these functions require + a mutable `nix_value *` that might be modified on access. This does *not* break the ABI of these functions. + +- C API: Add lazy attribute and list item accessors [#14030](https://github.com/NixOS/nix/pull/14030) + + The C API now includes lazy accessor functions for retrieving values from lists and attribute sets without forcing evaluation: + + - `nix_get_list_byidx_lazy()` - Get a list element without forcing its evaluation + - `nix_get_attr_byname_lazy()` - Get an attribute value by name without forcing evaluation + - `nix_get_attr_byidx_lazy()` - Get an attribute by index without forcing evaluation + + These functions are useful when forwarding unevaluated sub-values to other lists, attribute sets, or function calls. They allow more efficient handling of Nix values by deferring evaluation until actually needed. + + Additionally, bounds checking has been improved for all `_byidx` functions to properly validate indices before access, preventing potential out-of-bounds errors. + + The documentation for `NIX_ERR_KEY` error handling has also been clarified to specify when this error code is returned. + +- Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041) + + Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, + which in turn would cause them to be re-copied to the store on initial + evaluation. Caching these inputs results in a near doubling of a performance in + some cases — especially on I/O-bound machines and when using commands that + fetch many inputs, like `nix flake archive/prefetch-inputs` + +- Derivation JSON format now uses store path basenames (no store dir) only [#13570](https://github.com/NixOS/nix/issues/13570) [#13980](https://github.com/NixOS/nix/pull/13980) + + Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell), has shown that the use of the store dir in JSON formats is an impediment to systematic JSON formats, + because it requires the serializer/deserializer to take an extra paramater (the store dir). + + We ultimately want to rectify this issue with all (non-stable, able to be changed) JSON formats. + To start with, we are changing the JSON format for derivations because the `nix derivation` commands are + --- in addition to being formally unstable + --- less widely used than other unstable commands. + + See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details. + +- Removed support for daemons and clients older than Nix 2.0 [#13951](https://github.com/NixOS/nix/pull/13951) + + We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018. + +- `nix flake check` now skips derivations that can be substituted [#13574](https://github.com/NixOS/nix/pull/13574) + + Previously, `nix flake check` would evaluate and build/substitute all + derivations. Now, it will skip downloading derivations that can be substituted. + This can drastically decrease the time invocations take in environments where + checks may already be cached (like in CI). + +- HTTP binary caches now support transparent compression for metadata + + HTTP binary cache stores can now compress `.narinfo`, `.ls`, and build log files before uploading them, + reducing bandwidth usage and storage requirements. The compression is applied transparently using the + `Content-Encoding` header, allowing compatible clients to automatically decompress the files. + + Three new configuration options control this behavior: + - `narinfo-compression`: Compression method for `.narinfo` files + - `ls-compression`: Compression method for `.ls` files + - `log-compression`: Compression method for build logs in `log/` directory + + Example usage: + ``` + nix copy --to 'http://cache.example.com?narinfo-compression=gzip&ls-compression=gzip' /nix/store/... + nix store copy-log --to 'http://cache.example.com?log-compression=br' /nix/store/... + ``` + +- Temporary build directories no longer include derivation names [#13839](https://github.com/NixOS/nix/pull/13839) + + Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. + + +## Contributors + + +This release was made possible by the following 32 contributors: + +- Farid Zakaria [**(@fzakaria)**](https://github.com/fzakaria) +- dram [**(@dramforever)**](https://github.com/dramforever) +- Ephraim Siegfried [**(@EphraimSiegfried)**](https://github.com/EphraimSiegfried) +- Robert Hensing [**(@roberth)**](https://github.com/roberth) +- Taeer Bar-Yam [**(@Radvendii)**](https://github.com/Radvendii) +- Emily [**(@emilazy)**](https://github.com/emilazy) +- Jens Petersen [**(@juhp)**](https://github.com/juhp) +- Bernardo Meurer [**(@lovesegfault)**](https://github.com/lovesegfault) +- Jörg Thalheim [**(@Mic92)**](https://github.com/Mic92) +- Leandro Emmanuel Reina Kiperman [**(@kip93)**](https://github.com/kip93) +- Marie [**(@NyCodeGHG)**](https://github.com/NyCodeGHG) +- Ethan Evans [**(@ethanavatar)**](https://github.com/ethanavatar) +- Yaroslav Bolyukin [**(@CertainLach)**](https://github.com/CertainLach) +- Matej Urbas [**(@urbas)**](https://github.com/urbas) +- Jami Kettunen [**(@JamiKettunen)**](https://github.com/JamiKettunen) +- Clayton [**(@netadr)**](https://github.com/netadr) +- Grégory Marti [**(@gmarti)**](https://github.com/gmarti) +- Eelco Dolstra [**(@edolstra)**](https://github.com/edolstra) +- rszyma [**(@rszyma)**](https://github.com/rszyma) +- Philip Wilk [**(@philipwilk)**](https://github.com/philipwilk) +- John Ericson [**(@Ericson2314)**](https://github.com/Ericson2314) +- Tom Westerhout [**(@twesterhout)**](https://github.com/twesterhout) +- Tristan Ross [**(@RossComputerGuy)**](https://github.com/RossComputerGuy) +- Sergei Zimmerman [**(@xokdvium)**](https://github.com/xokdvium) +- Jean-François Roche [**(@jfroche)**](https://github.com/jfroche) +- Seth Flynn [**(@getchoo)**](https://github.com/getchoo) +- éclairevoyant [**(@eclairevoyant)**](https://github.com/eclairevoyant) +- Glen Huang [**(@hgl)**](https://github.com/hgl) +- osman - オスマン [**(@osbm)**](https://github.com/osbm) +- David McFarland [**(@corngood)**](https://github.com/corngood) +- Cole Helbling [**(@cole-h)**](https://github.com/cole-h) +- Sinan Mohd [**(@sinanmohd)**](https://github.com/sinanmohd) +- Philipp Otterbein From c1761b867b5ba1df81c5c2e87a05131bca9ce459 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 16:11:15 +0200 Subject: [PATCH 120/373] Contributors --- .../data/release-credits-email-to-handle.json | 23 ++++++++++++++++++- .../data/release-credits-handle-to-name.json | 21 ++++++++++++++++- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/maintainers/data/release-credits-email-to-handle.json b/maintainers/data/release-credits-email-to-handle.json index ea37afb90..0dbbf8fa6 100644 --- a/maintainers/data/release-credits-email-to-handle.json +++ b/maintainers/data/release-credits-email-to-handle.json @@ -203,5 +203,26 @@ "ConnorBaker01@Gmail.com": "ConnorBaker", "jsoo1@asu.edu": "jsoo1", "hsngrmpf+github@gmail.com": "DavHau", - "matthew@floxdev.com": "mkenigs" + "matthew@floxdev.com": "mkenigs", + "taeer@bar-yam.me": "Radvendii", + "beme@anthropic.com": "lovesegfault", + "osbm@osbm.dev": "osbm", + "jami.kettunen@protonmail.com": "JamiKettunen", + "ephraim.siegfried@hotmail.com": "EphraimSiegfried", + "rszyma.dev@gmail.com": "rszyma", + "tristan.ross@determinate.systems": "RossComputerGuy", + "corngood@gmail.com": "corngood", + "jfroche@pyxel.be": "jfroche", + "848000+eclairevoyant@users.noreply.github.com": "eclairevoyant", + "petersen@redhat.com": "juhp", + "dramforever@live.com": "dramforever", + "me@glenhuang.com": "hgl", + "philip.wilk@fivium.co.uk": "philipwilk", + "me@nycode.dev": "NyCodeGHG", + "14264576+twesterhout@users.noreply.github.com": "twesterhout", + "sinan@sinanmohd.com": "sinanmohd", + "42688647+netadr@users.noreply.github.com": "netadr", + "matej.urbas@gmail.com": "urbas", + "ethanalexevans@gmail.com": "ethanavatar", + "greg.marti@gmail.com": "gmarti" } \ No newline at end of file diff --git a/maintainers/data/release-credits-handle-to-name.json b/maintainers/data/release-credits-handle-to-name.json index e2510548d..8abffc65c 100644 --- a/maintainers/data/release-credits-handle-to-name.json +++ b/maintainers/data/release-credits-handle-to-name.json @@ -177,5 +177,24 @@ "avnik": "Alexander V. Nikolaev", "DavHau": null, "aln730": "AGawas", - "vog": "Volker Diels-Grabsch" + "vog": "Volker Diels-Grabsch", + "corngood": "David McFarland", + "twesterhout": "Tom Westerhout", + "JamiKettunen": "Jami Kettunen", + "dramforever": "dram", + "philipwilk": "Philip Wilk", + "netadr": "Clayton", + "NyCodeGHG": "Marie", + "jfroche": "Jean-Fran\u00e7ois Roche", + "urbas": "Matej Urbas", + "osbm": "osman - \u30aa\u30b9\u30de\u30f3", + "rszyma": null, + "eclairevoyant": "\u00e9clairevoyant", + "Radvendii": "Taeer Bar-Yam", + "sinanmohd": "Sinan Mohd", + "ethanavatar": "Ethan Evans", + "gmarti": "Gr\u00e9gory Marti", + "lovesegfault": "Bernardo Meurer", + "EphraimSiegfried": "Ephraim Siegfried", + "hgl": "Glen Huang" } \ No newline at end of file From 0376112a512b7fb8d283e613d6ed6419e741c189 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 16:11:24 +0200 Subject: [PATCH 121/373] Organize release notes --- doc/manual/source/release-notes/rl-2.32.md | 60 ++++++++++------------ 1 file changed, 28 insertions(+), 32 deletions(-) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md index 5c1c314db..885e86631 100644 --- a/doc/manual/source/release-notes/rl-2.32.md +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -1,10 +1,26 @@ # Release 2.32.0 (2025-10-06) +## Incompatible changes + +- Removed support for daemons and clients older than Nix 2.0 [#13951](https://github.com/NixOS/nix/pull/13951) + + We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018. + +- Derivation JSON format now uses store path basenames only [#13570](https://github.com/NixOS/nix/issues/13570) [#13980](https://github.com/NixOS/nix/pull/13980) + + Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell) has shown that the use of the store directory in JSON formats is an impediment to systematic JSON formats, because it requires the serializer/deserializer to take an extra paramater (the store directory). + + We ultimately want to rectify this issue with all JSON formats to the extent allowed by our stability promises. To start with, we are changing the JSON format for derivations because the `nix derivation` commands are — in addition to being formally unstable — less widely used than other unstable commands. + + See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details. + - C API: `nix_get_attr_name_byidx`, `nix_get_attr_byidx` take a `nix_value *` instead of `const nix_value *` [#13987](https://github.com/NixOS/nix/pull/13987) In order to accommodate a more optimized internal representation of attribute set merges these functions require a mutable `nix_value *` that might be modified on access. This does *not* break the ABI of these functions. +## New features + - C API: Add lazy attribute and list item accessors [#14030](https://github.com/NixOS/nix/pull/14030) The C API now includes lazy accessor functions for retrieving values from lists and attribute sets without forcing evaluation: @@ -19,37 +35,6 @@ The documentation for `NIX_ERR_KEY` error handling has also been clarified to specify when this error code is returned. -- Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041) - - Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, - which in turn would cause them to be re-copied to the store on initial - evaluation. Caching these inputs results in a near doubling of a performance in - some cases — especially on I/O-bound machines and when using commands that - fetch many inputs, like `nix flake archive/prefetch-inputs` - -- Derivation JSON format now uses store path basenames (no store dir) only [#13570](https://github.com/NixOS/nix/issues/13570) [#13980](https://github.com/NixOS/nix/pull/13980) - - Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell), has shown that the use of the store dir in JSON formats is an impediment to systematic JSON formats, - because it requires the serializer/deserializer to take an extra paramater (the store dir). - - We ultimately want to rectify this issue with all (non-stable, able to be changed) JSON formats. - To start with, we are changing the JSON format for derivations because the `nix derivation` commands are - --- in addition to being formally unstable - --- less widely used than other unstable commands. - - See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details. - -- Removed support for daemons and clients older than Nix 2.0 [#13951](https://github.com/NixOS/nix/pull/13951) - - We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018. - -- `nix flake check` now skips derivations that can be substituted [#13574](https://github.com/NixOS/nix/pull/13574) - - Previously, `nix flake check` would evaluate and build/substitute all - derivations. Now, it will skip downloading derivations that can be substituted. - This can drastically decrease the time invocations take in environments where - checks may already be cached (like in CI). - - HTTP binary caches now support transparent compression for metadata HTTP binary cache stores can now compress `.narinfo`, `.ls`, and build log files before uploading them, @@ -71,10 +56,21 @@ Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. +## Performance improvements + +- Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041) + + Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, which in turn would cause them to be re-copied to the store on initial evaluation. Caching these inputs results in a near doubling of performance in some cases — especially on I/O-bound machines and when using commands that fetch many inputs, like `nix flake [archive|prefetch-inputs]`. + +- `nix flake check` now skips derivations that can be substituted [#13574](https://github.com/NixOS/nix/pull/13574) + + Previously, `nix flake check` would evaluate and build/substitute all + derivations. Now, it will skip downloading derivations that can be substituted. + This can drastically decrease the time invocations take in environments where + checks may already be cached (like in CI). ## Contributors - This release was made possible by the following 32 contributors: - Farid Zakaria [**(@fzakaria)**](https://github.com/fzakaria) From f4e44040d4c92d4ca87601c437922962dffae548 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 16:26:29 +0200 Subject: [PATCH 122/373] Release note for external derivation builders --- doc/manual/source/release-notes/rl-2.32.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md index 885e86631..c2f0eb27f 100644 --- a/doc/manual/source/release-notes/rl-2.32.md +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -56,6 +56,12 @@ Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. +- External derivation builders [#14145](https://github.com/NixOS/nix/pull/14145) + + These are helper programs that Nix calls to perform derivations for specified system types, e.g. by using QEMU to emulate a different type of platform. For more information, see the [`external-builders` setting](../command-ref/conf-file.md#conf-external-builders). + + This is currently an experimental feature. + ## Performance improvements - Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041) From 9abcc68ad1b1eabd03c56969d9df8b1330039817 Mon Sep 17 00:00:00 2001 From: Tristan Ross Date: Fri, 19 Sep 2025 09:48:08 -0700 Subject: [PATCH 123/373] libstore-c: add nix_store_get_fs_closure --- src/libstore-c/nix_api_store.cc | 30 ++++++++++++++++++++++++++++++ src/libstore-c/nix_api_store.h | 24 ++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/src/libstore-c/nix_api_store.cc b/src/libstore-c/nix_api_store.cc index c4c17f127..6ee792fc3 100644 --- a/src/libstore-c/nix_api_store.cc +++ b/src/libstore-c/nix_api_store.cc @@ -126,6 +126,36 @@ StorePath * nix_store_parse_path(nix_c_context * context, Store * store, const c NIXC_CATCH_ERRS_NULL } +nix_err nix_store_get_fs_closure( + nix_c_context * context, + Store * store, + const StorePath * store_path, + bool flip_direction, + bool include_outputs, + bool include_derivers, + void * userdata, + void (*callback)(nix_c_context * context, void * userdata, const StorePath * store_path)) +{ + if (context) + context->last_err_code = NIX_OK; + try { + const auto nixStore = store->ptr; + + nix::StorePathSet set; + nixStore->computeFSClosure(store_path->path, set, flip_direction, include_outputs, include_derivers); + + if (callback) { + for (const auto & path : set) { + const StorePath tmp{path}; + callback(context, userdata, &tmp); + if (context && context->last_err_code != NIX_OK) + return context->last_err_code; + } + } + } + NIXC_CATCH_ERRS +} + nix_err nix_store_realise( nix_c_context * context, Store * store, diff --git a/src/libstore-c/nix_api_store.h b/src/libstore-c/nix_api_store.h index e76e376b4..fd7ce068a 100644 --- a/src/libstore-c/nix_api_store.h +++ b/src/libstore-c/nix_api_store.h @@ -245,6 +245,30 @@ void nix_derivation_free(nix_derivation * drv); */ nix_err nix_store_copy_closure(nix_c_context * context, Store * srcStore, Store * dstStore, StorePath * path); +/** + * @brief Gets the closure of a specific store path + * + * @note The callback borrows each StorePath only for the duration of the call. + * + * @param[out] context Optional, stores error information + * @param[in] store nix store reference + * @param[in] store_path The path to compute from + * @param[in] flip_direction + * @param[in] include_outputs + * @param[in] include_derivers + * @param[in] callback The function to call for every store path, in no particular order + * @param[in] userdata The userdata to pass to the callback + */ +nix_err nix_store_get_fs_closure( + nix_c_context * context, + Store * store, + const StorePath * store_path, + bool flip_direction, + bool include_outputs, + bool include_derivers, + void * userdata, + void (*callback)(nix_c_context * context, void * userdata, const StorePath * store_path)); + // cffi end #ifdef __cplusplus } From 7f22a40e3b515d0a99233a1eb36ef8191628629f Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 3 Oct 2025 15:58:47 +0000 Subject: [PATCH 124/373] build(libstore): assert withAWS xor withCurlS3 --- src/libstore/package.nix | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/libstore/package.nix b/src/libstore/package.nix index 1c08e466e..0eb8e3687 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -34,6 +34,9 @@ let inherit (lib) fileset; in +assert lib.assertMsg (!withAWS || !withCurlS3) + "withAWS and withCurlS3 are mutually exclusive - cannot enable both S3 implementations simultaneously"; + mkMesonLibrary (finalAttrs: { pname = "nix-store"; inherit version; From 8c28283876799be6ef21a228e8c6d8168118ed86 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 3 Oct 2025 15:48:24 +0000 Subject: [PATCH 125/373] ci: test without s3 and with curl-based-s3 --- .github/workflows/ci.yml | 57 ++++++++++++++++++++++++++++++++----- ci/gha/tests/default.nix | 8 ++++++ ci/gha/tests/wrapper.nix | 3 ++ ci/gha/vm-tests/wrapper.nix | 45 +++++++++++++++++++++++++++++ 4 files changed, 106 insertions(+), 7 deletions(-) create mode 100644 ci/gha/vm-tests/wrapper.nix diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dcf0814d8..00a808951 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -65,18 +65,42 @@ jobs: instrumented: false primary: true stdenv: stdenv + withAWS: true + withCurlS3: false + # TODO: remove once curl-based-s3 fully lands + - scenario: on ubuntu (no s3) + runs-on: ubuntu-24.04 + os: linux + instrumented: false + primary: false + stdenv: stdenv + withAWS: false + withCurlS3: false + # TODO: remove once curl-based-s3 fully lands + - scenario: on ubuntu (curl s3) + runs-on: ubuntu-24.04 + os: linux + instrumented: false + primary: false + stdenv: stdenv + withAWS: false + withCurlS3: true - scenario: on macos runs-on: macos-14 os: darwin instrumented: false primary: true stdenv: stdenv + withAWS: true + withCurlS3: false - scenario: on ubuntu (with sanitizers / coverage) runs-on: ubuntu-24.04 os: linux instrumented: true primary: false stdenv: clangStdenv + withAWS: true + withCurlS3: false name: tests ${{ matrix.scenario }} runs-on: ${{ matrix.runs-on }} timeout-minutes: 60 @@ -99,7 +123,9 @@ jobs: run: | nix build --file ci/gha/tests/wrapper.nix componentTests -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" + --argstr stdenv "${{ matrix.stdenv }}" \ + ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ + ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} - name: Run flake checks and prepare the installer tarball run: | ci/gha/tests/build-checks @@ -110,6 +136,8 @@ jobs: nix build --file ci/gha/tests/wrapper.nix codeCoverage.coverageReports -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ --argstr stdenv "${{ matrix.stdenv }}" \ + ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ + ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} \ --out-link coverage-reports cat coverage-reports/index.txt >> $GITHUB_STEP_SUMMARY if: ${{ matrix.instrumented }} @@ -240,6 +268,18 @@ jobs: vm_tests: needs: basic-checks + strategy: + fail-fast: false + matrix: + include: + # TODO: remove once curl-based-s3 fully lands + - scenario: legacy s3 + withAWS: true + withCurlS3: false + - scenario: curl s3 + withAWS: false + withCurlS3: true + name: vm_tests (${{ matrix.scenario }}) runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v5 @@ -250,13 +290,16 @@ jobs: experimental-features = nix-command flakes github_token: ${{ secrets.GITHUB_TOKEN }} - uses: DeterminateSystems/magic-nix-cache-action@main - - run: | + - name: Build VM tests + run: | nix build -L \ - .#hydraJobs.tests.functional_user \ - .#hydraJobs.tests.githubFlakes \ - .#hydraJobs.tests.nix-docker \ - .#hydraJobs.tests.tarballFlakes \ - ; + --file ci/gha/vm-tests/wrapper.nix \ + ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ + ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} \ + functional_user \ + githubFlakes \ + nix-docker \ + tarballFlakes flake_regressions: needs: vm_tests diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index b89d51c76..bbcd7e6b7 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -12,6 +12,8 @@ componentTestsPrefix ? "", withSanitizers ? false, withCoverage ? false, + withAWS ? null, + withCurlS3 ? null, ... }: @@ -65,6 +67,12 @@ rec { # Boehm is incompatible with ASAN. nix-expr = prev.nix-expr.override { enableGC = !withSanitizers; }; + # Override AWS configuration if specified + nix-store = prev.nix-store.override ( + lib.optionalAttrs (withAWS != null) { inherit withAWS; } + // lib.optionalAttrs (withCurlS3 != null) { inherit withCurlS3; } + ); + mesonComponentOverrides = lib.composeManyExtensions componentOverrides; # Unclear how to make Perl bindings work with a dynamically linked ASAN. nix-perl-bindings = if withSanitizers then null else prev.nix-perl-bindings; diff --git a/ci/gha/tests/wrapper.nix b/ci/gha/tests/wrapper.nix index dc280ebbb..c1655f8c0 100644 --- a/ci/gha/tests/wrapper.nix +++ b/ci/gha/tests/wrapper.nix @@ -5,6 +5,8 @@ stdenv ? "stdenv", componentTestsPrefix ? "", withInstrumentation ? false, + withAWS ? null, + withCurlS3 ? null, }@args: import ./. ( args @@ -12,5 +14,6 @@ import ./. ( getStdenv = p: p.${stdenv}; withSanitizers = withInstrumentation; withCoverage = withInstrumentation; + inherit withAWS withCurlS3; } ) diff --git a/ci/gha/vm-tests/wrapper.nix b/ci/gha/vm-tests/wrapper.nix new file mode 100644 index 000000000..2ca80974c --- /dev/null +++ b/ci/gha/vm-tests/wrapper.nix @@ -0,0 +1,45 @@ +{ + nixFlake ? builtins.getFlake ("git+file://" + toString ../../..), + system ? "x86_64-linux", + withAWS ? null, + withCurlS3 ? null, +}: + +let + pkgs = nixFlake.inputs.nixpkgs.legacyPackages.${system}; + lib = pkgs.lib; + + # Create base nixComponents using the flake's makeComponents + baseNixComponents = nixFlake.lib.makeComponents { + inherit pkgs; + }; + + # Override nixComponents if AWS parameters are specified + nixComponents = + if (withAWS == null && withCurlS3 == null) then + baseNixComponents + else + baseNixComponents.overrideScope ( + final: prev: { + nix-store = prev.nix-store.override ( + lib.optionalAttrs (withAWS != null) { inherit withAWS; } + // lib.optionalAttrs (withCurlS3 != null) { inherit withCurlS3; } + ); + } + ); + + # Import NixOS tests with the overridden nixComponents + tests = import ../../../tests/nixos { + inherit lib pkgs nixComponents; + nixpkgs = nixFlake.inputs.nixpkgs; + inherit (nixFlake.inputs) nixpkgs-23-11; + }; +in +{ + inherit (tests) + functional_user + githubFlakes + nix-docker + tarballFlakes + ; +} From 776038f842d5b4844f9f3411a698733b1d1c0547 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Mon, 6 Oct 2025 17:09:34 +0000 Subject: [PATCH 126/373] docs(release-notes): note fix for fetchTarball/fetchurl substitution --- doc/manual/source/release-notes/rl-2.32.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md index c2f0eb27f..04f06e6b1 100644 --- a/doc/manual/source/release-notes/rl-2.32.md +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -75,6 +75,10 @@ This can drastically decrease the time invocations take in environments where checks may already be cached (like in CI). +- `fetchTarball` and `fetchurl` now correctly substitute (#14138) + + At some point we stopped substituting calls to `fetchTarball` and `fetchurl` with a set `narHash` to avoid incorrectly substituting things in `fetchTree`, even though it would be safe to substitute when calling the legacy `fetch{Tarball,url}`. This fixes that regression where it is safe. + ## Contributors This release was made possible by the following 32 contributors: From 8f71ef7edee5876af20df403d38d5ef7c4d81008 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 19:27:30 +0200 Subject: [PATCH 127/373] Update doc/manual/source/release-notes/rl-2.32.md Co-authored-by: Taeer Bar-Yam --- doc/manual/source/release-notes/rl-2.32.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md index 04f06e6b1..d85a4c2ea 100644 --- a/doc/manual/source/release-notes/rl-2.32.md +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -78,7 +78,9 @@ - `fetchTarball` and `fetchurl` now correctly substitute (#14138) At some point we stopped substituting calls to `fetchTarball` and `fetchurl` with a set `narHash` to avoid incorrectly substituting things in `fetchTree`, even though it would be safe to substitute when calling the legacy `fetch{Tarball,url}`. This fixes that regression where it is safe. +- Started moving AST allocations into a bump allocator [#14088](https://github.com/NixOS/nix/issues/14088) + This leaves smaller, immutable structures in the AST. So far this saves about 2% memory on a NixOS config evaluation. ## Contributors This release was made possible by the following 32 contributors: From 0068ee6ca72b0596b67117823e2c73343bade0c0 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 6 Oct 2025 22:16:21 +0300 Subject: [PATCH 128/373] Release note for attrset optimization --- doc/manual/source/release-notes/rl-2.32.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md index d85a4c2ea..3a925198d 100644 --- a/doc/manual/source/release-notes/rl-2.32.md +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -64,6 +64,16 @@ ## Performance improvements +- Optimize memory usage of attribute set merges [#13987](https://github.com/NixOS/nix/pull/13987) + + [Attribute set update operations](@docroot@/language/operators.md#update) have been optimized to + reduce reallocations in cases when the second operand is small. + + For typical evaluations of nixpkgs this optimization leads to ~20% less memory allocated in total + without significantly affecting evaluation performance. + + See [eval-attrset-update-layer-rhs-threshold](@docroot@/command-ref/conf-file.md#conf-eval-attrset-update-layer-rhs-threshold) + - Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041) Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, which in turn would cause them to be re-copied to the store on initial evaluation. Caching these inputs results in a near doubling of performance in some cases — especially on I/O-bound machines and when using commands that fetch many inputs, like `nix flake [archive|prefetch-inputs]`. From 242f3625675cc06069edfd0936ad6f42acb068a8 Mon Sep 17 00:00:00 2001 From: Samuel Connelly <140354451+myclevorname@users.noreply.github.com> Date: Fri, 3 Oct 2025 18:41:01 -0400 Subject: [PATCH 129/373] libutil: Throw if `str("contents")` not found This was broken in 7aa3e7e3a5281acf350eff0fe039656cd4986e2c (since 2.25). --- src/libutil-tests/archive.cc | 47 ++++++++++++++++++ .../nars/invalid-tag-instead-of-contents.nar | Bin 0 -> 104 bytes src/libutil-tests/meson.build | 1 + src/libutil/archive.cc | 6 ++- 4 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 src/libutil-tests/archive.cc create mode 100644 src/libutil-tests/data/nars/invalid-tag-instead-of-contents.nar diff --git a/src/libutil-tests/archive.cc b/src/libutil-tests/archive.cc new file mode 100644 index 000000000..386f7b857 --- /dev/null +++ b/src/libutil-tests/archive.cc @@ -0,0 +1,47 @@ +#include "nix/util/archive.hh" +#include "nix/util/tests/characterization.hh" +#include "nix/util/tests/gmock-matchers.hh" + +#include + +namespace nix { + +namespace { + +class NarTest : public CharacterizationTest +{ + std::filesystem::path unitTestData = getUnitTestData() / "nars"; + +public: + std::filesystem::path goldenMaster(std::string_view testStem) const override + { + return unitTestData / (std::string(testStem) + ".nar"); + } +}; + +class InvalidNarTest : public NarTest, public ::testing::WithParamInterface> +{}; + +} // namespace + +TEST_P(InvalidNarTest, throwsErrorMessage) +{ + const auto & [name, message] = GetParam(); + readTest(name, [&](const std::string & narContents) { + ASSERT_THAT( + [&]() { + StringSource source{narContents}; + NullFileSystemObjectSink sink; + parseDump(sink, source); + }, + ::testing::ThrowsMessage(testing::HasSubstrIgnoreANSIMatcher(message))); + }); +} + +INSTANTIATE_TEST_SUITE_P( + NarTest, + InvalidNarTest, + ::testing::Values( + std::pair{"invalid-tag-instead-of-contents", "bad archive: expected tag 'contents', got 'AAAAAAAA'"})); + +} // namespace nix diff --git a/src/libutil-tests/data/nars/invalid-tag-instead-of-contents.nar b/src/libutil-tests/data/nars/invalid-tag-instead-of-contents.nar new file mode 100644 index 0000000000000000000000000000000000000000..80dbf5a12ff8cd03fb1cadcc8a827982d1f9d5aa GIT binary patch literal 104 zcmd;OfPlQr3f;t_ Date: Tue, 7 Oct 2025 17:15:28 +0200 Subject: [PATCH 130/373] Bump version --- .version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.version b/.version index 7cca401c7..3afbaeb2b 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.32.0 +2.33.0 From b0f567e18b5bacb0ec2faadad24b321fbb60c08b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 7 Oct 2025 17:16:57 +0200 Subject: [PATCH 131/373] Update mergify.yml --- .mergify.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index 1c220045a..8fdcb05b4 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -172,3 +172,14 @@ pull_request_rules: labels: - automatic backport - merge-queue + + - name: backport patches to 2.32 + conditions: + - label=backport 2.32-maintenance + actions: + backport: + branches: + - "2.32-maintenance" + labels: + - automatic backport + - merge-queue From 63e8b5f94aa6d9a4f3fb68f2b51e3e3a1b1457d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Sun, 5 Oct 2025 07:17:15 +0200 Subject: [PATCH 132/373] ci: Switch away from mergify to backport action We want to use github native queues. --- .github/workflows/backport.yml | 37 +++++++ .mergify.yml | 185 --------------------------------- 2 files changed, 37 insertions(+), 185 deletions(-) create mode 100644 .github/workflows/backport.yml delete mode 100644 .mergify.yml diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml new file mode 100644 index 000000000..99b75621e --- /dev/null +++ b/.github/workflows/backport.yml @@ -0,0 +1,37 @@ +name: Backport +on: + pull_request_target: + types: [closed, labeled] +permissions: + contents: read +jobs: + backport: + name: Backport Pull Request + permissions: + # for korthout/backport-action + contents: write + pull-requests: write + if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name)) + runs-on: ubuntu-24.04-arm + steps: + - name: Generate GitHub App token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.CI_APP_ID }} + private-key: ${{ secrets.CI_APP_PRIVATE_KEY }} + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + # required to find all branches + fetch-depth: 0 + - name: Create backport PRs + uses: korthout/backport-action@d07416681cab29bf2661702f925f020aaa962997 # v3.4.1 + id: backport + with: + # Config README: https://github.com/korthout/backport-action#backport-action + github_token: ${{ steps.generate-token.outputs.token }} + github_workspace: ${{ github.workspace }} + auto_merge_enabled: true + pull_description: |- + Automatic backport to `${target_branch}`, triggered by a label in #${pull_number}. diff --git a/.mergify.yml b/.mergify.yml deleted file mode 100644 index 8fdcb05b4..000000000 --- a/.mergify.yml +++ /dev/null @@ -1,185 +0,0 @@ -queue_rules: - - name: default - # all required tests need to go here - merge_conditions: - - check-success=tests on macos - - check-success=tests on ubuntu - - check-success=installer test on macos - - check-success=installer test on ubuntu - - check-success=vm_tests - batch_size: 5 - -pull_request_rules: - - name: merge using the merge queue - conditions: - - base~=master|.+-maintenance - - label~=merge-queue|dependencies - actions: - queue: {} - -# The rules below will first create backport pull requests and put those in a merge queue. - - - name: backport patches to 2.18 - conditions: - - label=backport 2.18-maintenance - actions: - backport: - branches: - - 2.18-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.19 - conditions: - - label=backport 2.19-maintenance - actions: - backport: - branches: - - 2.19-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.20 - conditions: - - label=backport 2.20-maintenance - actions: - backport: - branches: - - 2.20-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.21 - conditions: - - label=backport 2.21-maintenance - actions: - backport: - branches: - - 2.21-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.22 - conditions: - - label=backport 2.22-maintenance - actions: - backport: - branches: - - 2.22-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.23 - conditions: - - label=backport 2.23-maintenance - actions: - backport: - branches: - - 2.23-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.24 - conditions: - - label=backport 2.24-maintenance - actions: - backport: - branches: - - "2.24-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.25 - conditions: - - label=backport 2.25-maintenance - actions: - backport: - branches: - - "2.25-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.26 - conditions: - - label=backport 2.26-maintenance - actions: - backport: - branches: - - "2.26-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.27 - conditions: - - label=backport 2.27-maintenance - actions: - backport: - branches: - - "2.27-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.28 - conditions: - - label=backport 2.28-maintenance - actions: - backport: - branches: - - "2.28-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.29 - conditions: - - label=backport 2.29-maintenance - actions: - backport: - branches: - - "2.29-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.30 - conditions: - - label=backport 2.30-maintenance - actions: - backport: - branches: - - "2.30-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.31 - conditions: - - label=backport 2.31-maintenance - actions: - backport: - branches: - - "2.31-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.32 - conditions: - - label=backport 2.32-maintenance - actions: - backport: - branches: - - "2.32-maintenance" - labels: - - automatic backport - - merge-queue From 75b18a6e47f30381d838d4f0c8a5d9905452fea2 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 8 Oct 2025 00:51:18 +0300 Subject: [PATCH 133/373] maintainers: Remove mergify note from release-process.md --- maintainers/release-process.md | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/release-process.md b/maintainers/release-process.md index 790618b7f..68de3b677 100644 --- a/maintainers/release-process.md +++ b/maintainers/release-process.md @@ -142,7 +142,6 @@ release: $ git pull $ NEW_VERSION=2.13.0 $ echo $NEW_VERSION > .version - $ ... edit .mergify.yml to add the previous version ... $ git checkout -b bump-$NEW_VERSION $ git commit -a -m 'Bump version' $ git push --set-upstream origin bump-$NEW_VERSION From c5b88c22fa2033fb10ee16ee2849ca46847806ea Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Tue, 7 Oct 2025 01:35:37 +0300 Subject: [PATCH 134/373] dev-shell: Disable separateDebugInfo This breaks gdb pretty-printers inserted into .debug_gdb_scripts section, because it implies --compress-debug-sections=zlib, -Wa,--compress-debug-sections. This is very unfortunate, because then gdb can't use pretty printers for Boost.Unordered (which are very useful, since boost::unoredred_flat_map is impossible to debug). This seems perfectly fine to disable in the dev-shell for the time being. See [1-3] for further references. With this change I'm able to use boost's pretty-printers out-of-the box: ``` p *importResolutionCache $2 = boost::concurrent_flat_map with 1 elements = {[{accessor = {p = std::shared_ptr (use count 5, weak count 1) = { get() = 0x555555d830a8}}, path = {static root = {static root = , path = "/"}, path = "/derivation-internal.nix"}}] = {accessor = {p = std::shared_ptr (use count 5, weak count 1) = { get() = 0x555555d830a8}}, path = {static root = {static root = , path = "/"}, path = "/derivation-internal.nix"}}} ``` When combined with a simple `add-auto-load-safe-path ~/code` in .gdbinit [1]: https://gerrit.lix.systems/c/lix/+/3880 [2]: https://git.lix.systems/lix-project/lix/issues/1003 [3]: https://sourceware.org/pipermail/gdb-patches/2025-October/221398.html --- packaging/dev-shell.nix | 73 +++++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 32 deletions(-) diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index ccfb9c4ae..37e92e363 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -70,6 +70,9 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( # We use this shell with the local checkout, not unpackPhase. src = null; + # Workaround https://sourceware.org/pipermail/gdb-patches/2025-October/221398.html + # Remove when gdb fix is rolled out everywhere. + separateDebugInfo = false; env = { # For `make format`, to work without installing pre-commit @@ -93,38 +96,44 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( ++ map (transformFlag "libcmd") (ignoreCrossFile pkgs.nixComponents2.nix-cmd.mesonFlags); nativeBuildInputs = - attrs.nativeBuildInputs or [ ] - ++ pkgs.nixComponents2.nix-util.nativeBuildInputs - ++ pkgs.nixComponents2.nix-store.nativeBuildInputs - ++ pkgs.nixComponents2.nix-fetchers.nativeBuildInputs - ++ pkgs.nixComponents2.nix-expr.nativeBuildInputs - ++ lib.optionals havePerl pkgs.nixComponents2.nix-perl-bindings.nativeBuildInputs - ++ lib.optionals buildCanExecuteHost pkgs.nixComponents2.nix-manual.externalNativeBuildInputs - ++ pkgs.nixComponents2.nix-internal-api-docs.nativeBuildInputs - ++ pkgs.nixComponents2.nix-external-api-docs.nativeBuildInputs - ++ pkgs.nixComponents2.nix-functional-tests.externalNativeBuildInputs - ++ lib.optional ( - !buildCanExecuteHost - # Hack around https://github.com/nixos/nixpkgs/commit/bf7ad8cfbfa102a90463433e2c5027573b462479 - && !(stdenv.hostPlatform.isWindows && stdenv.buildPlatform.isDarwin) - && stdenv.hostPlatform.emulatorAvailable pkgs.buildPackages - && lib.meta.availableOn stdenv.buildPlatform (stdenv.hostPlatform.emulator pkgs.buildPackages) - ) pkgs.buildPackages.mesonEmulatorHook - ++ [ - pkgs.buildPackages.cmake - pkgs.buildPackages.gnused - pkgs.buildPackages.shellcheck - pkgs.buildPackages.changelog-d - modular.pre-commit.settings.package - (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) - pkgs.buildPackages.nixfmt-rfc-style - pkgs.buildPackages.shellcheck - pkgs.buildPackages.gdb - ] - ++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) ( - lib.hiPrio pkgs.buildPackages.clang-tools - ) - ++ lib.optional stdenv.hostPlatform.isLinux pkgs.buildPackages.mold-wrapped; + let + inputs = + attrs.nativeBuildInputs or [ ] + ++ pkgs.nixComponents2.nix-util.nativeBuildInputs + ++ pkgs.nixComponents2.nix-store.nativeBuildInputs + ++ pkgs.nixComponents2.nix-fetchers.nativeBuildInputs + ++ pkgs.nixComponents2.nix-expr.nativeBuildInputs + ++ lib.optionals havePerl pkgs.nixComponents2.nix-perl-bindings.nativeBuildInputs + ++ lib.optionals buildCanExecuteHost pkgs.nixComponents2.nix-manual.externalNativeBuildInputs + ++ pkgs.nixComponents2.nix-internal-api-docs.nativeBuildInputs + ++ pkgs.nixComponents2.nix-external-api-docs.nativeBuildInputs + ++ pkgs.nixComponents2.nix-functional-tests.externalNativeBuildInputs + ++ lib.optional ( + !buildCanExecuteHost + # Hack around https://github.com/nixos/nixpkgs/commit/bf7ad8cfbfa102a90463433e2c5027573b462479 + && !(stdenv.hostPlatform.isWindows && stdenv.buildPlatform.isDarwin) + && stdenv.hostPlatform.emulatorAvailable pkgs.buildPackages + && lib.meta.availableOn stdenv.buildPlatform (stdenv.hostPlatform.emulator pkgs.buildPackages) + ) pkgs.buildPackages.mesonEmulatorHook + ++ [ + pkgs.buildPackages.cmake + pkgs.buildPackages.gnused + pkgs.buildPackages.shellcheck + pkgs.buildPackages.changelog-d + modular.pre-commit.settings.package + (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) + pkgs.buildPackages.nixfmt-rfc-style + pkgs.buildPackages.shellcheck + pkgs.buildPackages.gdb + ] + ++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) ( + lib.hiPrio pkgs.buildPackages.clang-tools + ) + ++ lib.optional stdenv.hostPlatform.isLinux pkgs.buildPackages.mold-wrapped; + in + # FIXME: separateDebugInfo = false doesn't actually prevent -Wa,--compress-debug-sections + # from making its way into NIX_CFLAGS_COMPILE. + lib.filter (p: !lib.hasInfix "separate-debug-info" p) inputs; buildInputs = [ pkgs.gbenchmark From 0619351326bb7b7aa2a05d7e97a71ea61f8a7bff Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 8 Oct 2025 01:59:04 +0300 Subject: [PATCH 135/373] tests: Move invalid nar tests from tests/functional to libutil-tests Since 242f3625675cc06069edfd0936ad6f42acb068a8 we have better infrastructure for this kind of tests. --- src/libutil-tests/archive.cc | 16 +++++++++- .../libutil-tests/data/nars}/dot.nar | Bin .../libutil-tests/data/nars}/dotdot.nar | Bin .../libutil-tests/data/nars}/empty.nar | Bin .../data/nars}/executable-after-contents.nar | Bin .../data/nars}/name-after-node.nar | Bin .../data/nars}/nul-character.nar | Bin .../libutil-tests/data/nars}/slash.nar | Bin tests/functional/nars.sh | 28 ------------------ 9 files changed, 15 insertions(+), 29 deletions(-) rename {tests/functional => src/libutil-tests/data/nars}/dot.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/dotdot.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/empty.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/executable-after-contents.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/name-after-node.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/nul-character.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/slash.nar (100%) diff --git a/src/libutil-tests/archive.cc b/src/libutil-tests/archive.cc index 386f7b857..427b29d41 100644 --- a/src/libutil-tests/archive.cc +++ b/src/libutil-tests/archive.cc @@ -42,6 +42,20 @@ INSTANTIATE_TEST_SUITE_P( NarTest, InvalidNarTest, ::testing::Values( - std::pair{"invalid-tag-instead-of-contents", "bad archive: expected tag 'contents', got 'AAAAAAAA'"})); + std::pair{"invalid-tag-instead-of-contents", "bad archive: expected tag 'contents', got 'AAAAAAAA'"}, + // Unpacking a NAR with a NUL character in a file name should fail. + std::pair{"nul-character", "bad archive: NAR contains invalid file name 'f"}, + // Likewise for a '.' filename. + std::pair{"dot", "bad archive: NAR contains invalid file name '.'"}, + // Likewise for a '..' filename. + std::pair{"dotdot", "bad archive: NAR contains invalid file name '..'"}, + // Likewise for a filename containing a slash. + std::pair{"slash", "bad archive: NAR contains invalid file name 'x/y'"}, + // Likewise for an empty filename. + std::pair{"empty", "bad archive: NAR contains invalid file name ''"}, + // Test that the 'executable' field cannot come before the 'contents' field. + std::pair{"executable-after-contents", "bad archive: expected tag ')', got 'executable'"}, + // Test that the 'name' field cannot come before the 'node' field in a directory entry. + std::pair{"name-after-node", "bad archive: expected tag 'name'"})); } // namespace nix diff --git a/tests/functional/dot.nar b/src/libutil-tests/data/nars/dot.nar similarity index 100% rename from tests/functional/dot.nar rename to src/libutil-tests/data/nars/dot.nar diff --git a/tests/functional/dotdot.nar b/src/libutil-tests/data/nars/dotdot.nar similarity index 100% rename from tests/functional/dotdot.nar rename to src/libutil-tests/data/nars/dotdot.nar diff --git a/tests/functional/empty.nar b/src/libutil-tests/data/nars/empty.nar similarity index 100% rename from tests/functional/empty.nar rename to src/libutil-tests/data/nars/empty.nar diff --git a/tests/functional/executable-after-contents.nar b/src/libutil-tests/data/nars/executable-after-contents.nar similarity index 100% rename from tests/functional/executable-after-contents.nar rename to src/libutil-tests/data/nars/executable-after-contents.nar diff --git a/tests/functional/name-after-node.nar b/src/libutil-tests/data/nars/name-after-node.nar similarity index 100% rename from tests/functional/name-after-node.nar rename to src/libutil-tests/data/nars/name-after-node.nar diff --git a/tests/functional/nul-character.nar b/src/libutil-tests/data/nars/nul-character.nar similarity index 100% rename from tests/functional/nul-character.nar rename to src/libutil-tests/data/nars/nul-character.nar diff --git a/tests/functional/slash.nar b/src/libutil-tests/data/nars/slash.nar similarity index 100% rename from tests/functional/slash.nar rename to src/libutil-tests/data/nars/slash.nar diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index dd90345a6..a52c257bc 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -131,31 +131,3 @@ else fi rm -f "$TEST_ROOT/unicode-*" - -# Unpacking a NAR with a NUL character in a file name should fail. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < nul-character.nar | grepQuiet "NAR contains invalid file name 'f" - -# Likewise for a '.' filename. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < dot.nar | grepQuiet "NAR contains invalid file name '.'" - -# Likewise for a '..' filename. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < dotdot.nar | grepQuiet "NAR contains invalid file name '..'" - -# Likewise for a filename containing a slash. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < slash.nar | grepQuiet "NAR contains invalid file name 'x/y'" - -# Likewise for an empty filename. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < empty.nar | grepQuiet "NAR contains invalid file name ''" - -# Test that the 'executable' field cannot come before the 'contents' field. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < executable-after-contents.nar | grepQuiet "expected tag ')', got 'executable'" - -# Test that the 'name' field cannot come before the 'node' field in a directory entry. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < name-after-node.nar | grepQuiet "expected tag 'name'" From a400ea42575470b1f95d0199a3cc87f788577dcb Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 8 Oct 2025 00:04:37 +0000 Subject: [PATCH 136/373] ci: integrate vm_tests into main tests job This consolidates the separate vm_tests job into the main tests job, simplifying the CI workflow. VM tests now run as part of the regular test matrix. --- .github/workflows/ci.yml | 47 ++++++++----------------------------- ci/gha/tests/default.nix | 28 ++++++++++++++++++++++ ci/gha/vm-tests/wrapper.nix | 45 ----------------------------------- 3 files changed, 38 insertions(+), 82 deletions(-) delete mode 100644 ci/gha/vm-tests/wrapper.nix diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 00a808951..e82e59309 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -126,6 +126,14 @@ jobs: --argstr stdenv "${{ matrix.stdenv }}" \ ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} + - name: Run VM tests + run: | + nix build --file ci/gha/tests/wrapper.nix vmTests -L \ + --arg withInstrumentation ${{ matrix.instrumented }} \ + --argstr stdenv "${{ matrix.stdenv }}" \ + ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ + ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} + if: ${{ matrix.os == 'linux' }} - name: Run flake checks and prepare the installer tarball run: | ci/gha/tests/build-checks @@ -213,7 +221,7 @@ jobs: echo "docker=${{ env._DOCKER_SECRETS != '' }}" >> $GITHUB_OUTPUT docker_push_image: - needs: [tests, vm_tests, check_secrets] + needs: [tests, check_secrets] permissions: contents: read packages: write @@ -266,43 +274,8 @@ jobs: docker tag nix:$NIX_VERSION $IMAGE_ID:master docker push $IMAGE_ID:master - vm_tests: - needs: basic-checks - strategy: - fail-fast: false - matrix: - include: - # TODO: remove once curl-based-s3 fully lands - - scenario: legacy s3 - withAWS: true - withCurlS3: false - - scenario: curl s3 - withAWS: false - withCurlS3: true - name: vm_tests (${{ matrix.scenario }}) - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v5 - - uses: ./.github/actions/install-nix-action - with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: - experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - - uses: DeterminateSystems/magic-nix-cache-action@main - - name: Build VM tests - run: | - nix build -L \ - --file ci/gha/vm-tests/wrapper.nix \ - ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ - ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} \ - functional_user \ - githubFlakes \ - nix-docker \ - tarballFlakes - flake_regressions: - needs: vm_tests + needs: tests runs-on: ubuntu-24.04 steps: - name: Checkout nix diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index bbcd7e6b7..d9115f92c 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -79,6 +79,14 @@ rec { } ); + # Import NixOS tests using the instrumented components + nixosTests = import ../../../tests/nixos { + inherit lib pkgs; + nixComponents = nixComponentsInstrumented; + nixpkgs = nixFlake.inputs.nixpkgs; + inherit (nixFlake.inputs) nixpkgs-23-11; + }; + /** Top-level tests for the flake outputs, as they would be built by hydra. These tests generally can't be overridden to run with sanitizers. @@ -229,4 +237,24 @@ rec { { inherit coverageProfileDrvs mergedProfdata coverageReports; }; + + vmTests = { + } + # FIXME: when the curlS3 implementation is complete, it should also enable these tests. + // lib.optionalAttrs (withAWS == true) { + # S3 binary cache store test only runs when S3 support is enabled + inherit (nixosTests) s3-binary-cache-store; + } + // lib.optionalAttrs (!withSanitizers && !withCoverage) { + # evalNixpkgs uses non-instrumented components from hydraJobs, so only run it + # when not testing with sanitizers to avoid rebuilding nix + inherit (hydraJobs.tests) evalNixpkgs; + # FIXME: CI times out when building vm tests instrumented + inherit (nixosTests) + functional_user + githubFlakes + nix-docker + tarballFlakes + ; + }; } diff --git a/ci/gha/vm-tests/wrapper.nix b/ci/gha/vm-tests/wrapper.nix deleted file mode 100644 index 2ca80974c..000000000 --- a/ci/gha/vm-tests/wrapper.nix +++ /dev/null @@ -1,45 +0,0 @@ -{ - nixFlake ? builtins.getFlake ("git+file://" + toString ../../..), - system ? "x86_64-linux", - withAWS ? null, - withCurlS3 ? null, -}: - -let - pkgs = nixFlake.inputs.nixpkgs.legacyPackages.${system}; - lib = pkgs.lib; - - # Create base nixComponents using the flake's makeComponents - baseNixComponents = nixFlake.lib.makeComponents { - inherit pkgs; - }; - - # Override nixComponents if AWS parameters are specified - nixComponents = - if (withAWS == null && withCurlS3 == null) then - baseNixComponents - else - baseNixComponents.overrideScope ( - final: prev: { - nix-store = prev.nix-store.override ( - lib.optionalAttrs (withAWS != null) { inherit withAWS; } - // lib.optionalAttrs (withCurlS3 != null) { inherit withCurlS3; } - ); - } - ); - - # Import NixOS tests with the overridden nixComponents - tests = import ../../../tests/nixos { - inherit lib pkgs nixComponents; - nixpkgs = nixFlake.inputs.nixpkgs; - inherit (nixFlake.inputs) nixpkgs-23-11; - }; -in -{ - inherit (tests) - functional_user - githubFlakes - nix-docker - tarballFlakes - ; -} From 1d8dd77e1d71f8cc97e59ee11362e0cb8312bdce Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 8 Oct 2025 22:05:14 +0300 Subject: [PATCH 137/373] libutil: Fix renderAuthorityAndPath unreachable for path:/ URLs This was mistakenly triggered by path:/ URL, since the `//` would correspond to 3 empty segments. --- src/libutil-tests/url.cc | 13 +++++++++++++ src/libutil/url.cc | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/libutil-tests/url.cc b/src/libutil-tests/url.cc index 5c7b02248..cd6816096 100644 --- a/src/libutil-tests/url.cc +++ b/src/libutil-tests/url.cc @@ -868,6 +868,12 @@ TEST_P(ParsedURLPathSegmentsTest, segmentsAreCorrect) EXPECT_EQ(encodeUrlPath(segments), testCase.path); } +TEST_P(ParsedURLPathSegmentsTest, to_string) +{ + const auto & testCase = GetParam(); + EXPECT_EQ(testCase.url, parseURL(testCase.url).to_string()); +} + INSTANTIATE_TEST_SUITE_P( ParsedURL, ParsedURLPathSegmentsTest, @@ -886,6 +892,13 @@ INSTANTIATE_TEST_SUITE_P( .skipEmpty = false, .description = "empty_authority_empty_path", }, + ParsedURLPathSegmentsTestCase{ + .url = "path:/", + .segments = {"", ""}, + .path = "/", + .skipEmpty = false, + .description = "empty_authority_root_path", + }, ParsedURLPathSegmentsTestCase{ .url = "scheme:///", .segments = {"", ""}, diff --git a/src/libutil/url.cc b/src/libutil/url.cc index 1c7fd3f0f..a50de0944 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -350,7 +350,7 @@ std::string ParsedURL::renderAuthorityAndPath() const must either be empty or begin with a slash ("/") character. */ assert(path.empty() || path.front().empty()); res += authority->to_string(); - } else if (std::ranges::equal(std::views::take(path, 2), std::views::repeat("", 2))) { + } else if (std::ranges::equal(std::views::take(path, 3), std::views::repeat("", 3))) { /* If a URI does not contain an authority component, then the path cannot begin with two slash characters ("//") */ unreachable(); From 3c1e2e56ea21b975103e227fabc79574b811da15 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 8 Oct 2025 18:37:18 +0000 Subject: [PATCH 138/373] feat(libstore/filetransfer): add username/password authentication support Add a `UsernameAuth` struct and optional `usernameAuth` field to `FileTransferRequest` to support programmatic username/password authentication. This uses curl's `CURLOPT_USERNAME`/`CURLOPT_PASSWORD` options, which works with multiple protocols (HTTP, FTP, etc.) and is not specific to any particular authentication scheme. The primary motivation is to enable S3 authentication refactoring where AWS credentials (access key ID and secret access key) can be passed through this general-purpose mechanism, reducing the amount of S3-specific code behind `#if NIX_WITH_CURL_S3` guards. --- src/libstore/filetransfer.cc | 8 ++++++++ src/libstore/include/nix/store/filetransfer.hh | 16 ++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 59fc75ed0..03bf3cda4 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -426,6 +426,14 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_ERRORBUFFER, errbuf); errbuf[0] = 0; + // Set up username/password authentication if provided + if (request.usernameAuth) { + curl_easy_setopt(req, CURLOPT_USERNAME, request.usernameAuth->username.c_str()); + if (request.usernameAuth->password) { + curl_easy_setopt(req, CURLOPT_PASSWORD, request.usernameAuth->password->c_str()); + } + } + result.data.clear(); result.bodySize = 0; } diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 2f2d59036..abd9ece5b 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -77,6 +77,17 @@ extern FileTransferSettings fileTransferSettings; extern const unsigned int RETRY_TIME_MS_DEFAULT; +/** + * Username and optional password for HTTP basic authentication. + * These are used with curl's CURLOPT_USERNAME and CURLOPT_PASSWORD options + * for various protocols including HTTP, FTP, and others. + */ +struct UsernameAuth +{ + std::string username; + std::optional password; +}; + struct FileTransferRequest { ValidURL uri; @@ -92,6 +103,11 @@ struct FileTransferRequest std::optional data; std::string mimeType; std::function dataCallback; + /** + * Optional username and password for HTTP basic authentication. + * When provided, these credentials will be used with curl's CURLOPT_USERNAME/PASSWORD option. + */ + std::optional usernameAuth; FileTransferRequest(ValidURL uri) : uri(std::move(uri)) From 94f410b628ede2ecec6ed06cbb0f62e1f9d9e8cf Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 8 Oct 2025 19:59:04 +0200 Subject: [PATCH 139/373] exportReferencesGraph: Handle heterogeneous arrays This barfed with error: [json.exception.type_error.302] type must be string, but is array on `nix build github:malt3/bazel-env#bazel-env` because it has a `exportReferencesGraph` with a value like `["string",...["string"]]`. --- src/libstore/derivation-options.cc | 20 ++++++++++++++------ tests/functional/structured-attrs.nix | 4 ++++ tests/functional/structured-attrs.sh | 5 ++--- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/libstore/derivation-options.cc b/src/libstore/derivation-options.cc index 844bce840..698485c0d 100644 --- a/src/libstore/derivation-options.cc +++ b/src/libstore/derivation-options.cc @@ -99,6 +99,17 @@ DerivationOptions DerivationOptions::fromStructuredAttrs( return fromStructuredAttrs(env, parsed ? &*parsed : nullptr); } +static void flatten(const nlohmann::json & value, StringSet & res) +{ + if (value.is_array()) + for (auto & v : value) + flatten(v, res); + else if (value.is_string()) + res.insert(value); + else + throw Error("'exportReferencesGraph' value is not an array or a string"); +} + DerivationOptions DerivationOptions::fromStructuredAttrs(const StringMap & env, const StructuredAttrs * parsed, bool shouldWarn) { @@ -219,12 +230,9 @@ DerivationOptions::fromStructuredAttrs(const StringMap & env, const StructuredAt if (!e || !e->is_object()) return ret; for (auto & [key, value] : getObject(*e)) { - if (value.is_array()) - ret.insert_or_assign(key, value); - else if (value.is_string()) - ret.insert_or_assign(key, StringSet{value}); - else - throw Error("'exportReferencesGraph' value is not an array or a string"); + StringSet ss; + flatten(value, ss); + ret.insert_or_assign(key, std::move(ss)); } } else { auto s = getOr(env, "exportReferencesGraph", ""); diff --git a/tests/functional/structured-attrs.nix b/tests/functional/structured-attrs.nix index 4e1984517..70ac807ab 100644 --- a/tests/functional/structured-attrs.nix +++ b/tests/functional/structured-attrs.nix @@ -82,4 +82,8 @@ mkDerivation { "foo$" = "BAD"; exportReferencesGraph.refs = [ dep ]; + exportReferencesGraph.refs2 = [ + dep + [ dep ] + ]; # regression test for heterogeneous arrays } diff --git a/tests/functional/structured-attrs.sh b/tests/functional/structured-attrs.sh index dfd5a1412..473a037f9 100755 --- a/tests/functional/structured-attrs.sh +++ b/tests/functional/structured-attrs.sh @@ -2,9 +2,8 @@ source common.sh -# 27ce722638 required some incompatible changes to the nix file, so skip this -# tests for the older versions -requireDaemonNewerThan "2.4pre20210712" +# https://github.com/NixOS/nix/pull/14189 +requireDaemonNewerThan "2.33" clearStoreIfPossible From 00c2a576668cc2eb7f44318c88c1790edfe38438 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Tue, 7 Oct 2025 03:44:46 +0000 Subject: [PATCH 140/373] feat(libstore/filetransfer): add S3 signing support --- src/libstore/aws-creds.cc | 28 ++++---- src/libstore/filetransfer.cc | 71 +++++++++++++++++-- src/libstore/include/nix/store/aws-creds.hh | 6 -- .../include/nix/store/filetransfer.hh | 19 +++++ 4 files changed, 100 insertions(+), 24 deletions(-) diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index dc8584e1b..cd404a554 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -24,6 +24,22 @@ namespace nix { namespace { +// Global credential provider cache using boost's concurrent map +// Key: profile name (empty string for default profile) +using CredentialProviderCache = + boost::concurrent_flat_map>; + +static CredentialProviderCache credentialProviderCache; + +/** + * Clear all cached credential providers. + * Called automatically by CrtWrapper destructor during static destruction. + */ +static void clearAwsCredentialsCache() +{ + credentialProviderCache.clear(); +} + static void initAwsCrt() { struct CrtWrapper @@ -95,13 +111,6 @@ static AwsCredentials getCredentialsFromProvider(std::shared_ptr>; - -static CredentialProviderCache credentialProviderCache; - } // anonymous namespace AwsCredentials getAwsCredentials(const std::string & profile) @@ -160,11 +169,6 @@ void invalidateAwsCredentials(const std::string & profile) credentialProviderCache.erase(profile); } -void clearAwsCredentialsCache() -{ - credentialProviderCache.clear(); -} - AwsCredentials preResolveAwsCredentials(const ParsedS3URL & s3Url) { std::string profile = s3Url.profile.value_or(""); diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 03bf3cda4..d6e21f3e6 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -9,9 +9,14 @@ #include "nix/util/signals.hh" #include "store-config-private.hh" +#include #if NIX_WITH_S3_SUPPORT # include #endif +#if NIX_WITH_CURL_S3 +# include "nix/store/aws-creds.hh" +# include "nix/store/s3-url.hh" +#endif #ifdef __linux__ # include "nix/util/linux-namespaces.hh" @@ -434,6 +439,16 @@ struct curlFileTransfer : public FileTransfer } } +#if NIX_WITH_CURL_S3 + // Set up AWS SigV4 signing if this is an S3 request + // Note: AWS SigV4 support guaranteed available (curl >= 7.75.0 checked at build time) + // The username/password (access key ID and secret key) are set via the general + // usernameAuth mechanism above. + if (request.awsSigV4Provider) { + curl_easy_setopt(req, CURLOPT_AWS_SIGV4, request.awsSigV4Provider->c_str()); + } +#endif + result.data.clear(); result.bodySize = 0; } @@ -808,7 +823,11 @@ struct curlFileTransfer : public FileTransfer void enqueueItem(std::shared_ptr item) { - if (item->request.data && item->request.uri.scheme() != "http" && item->request.uri.scheme() != "https") + if (item->request.data && item->request.uri.scheme() != "http" && item->request.uri.scheme() != "https" +#if NIX_WITH_CURL_S3 + && item->request.uri.scheme() != "s3" +#endif + ) throw nix::Error("uploading to '%s' is not supported", item->request.uri.to_string()); { @@ -826,9 +845,15 @@ struct curlFileTransfer : public FileTransfer { /* Ugly hack to support s3:// URIs. */ if (request.uri.scheme() == "s3") { +#if NIX_WITH_CURL_S3 + // New curl-based S3 implementation + auto modifiedRequest = request; + modifiedRequest.setupForS3(); + enqueueItem(std::make_shared(*this, std::move(modifiedRequest), std::move(callback))); +#elif NIX_WITH_S3_SUPPORT + // Old AWS SDK-based implementation // FIXME: do this on a worker thread try { -#if NIX_WITH_S3_SUPPORT auto parsed = ParsedS3URL::parse(request.uri.parsed()); std::string profile = parsed.profile.value_or(""); @@ -846,13 +871,12 @@ struct curlFileTransfer : public FileTransfer res.data = std::move(*s3Res.data); res.urls.push_back(request.uri.to_string()); callback(std::move(res)); -#else - throw nix::Error( - "cannot download '%s' because Nix is not built with S3 support", request.uri.to_string()); -#endif } catch (...) { callback.rethrow(); } +#else + throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri.to_string()); +#endif return; } @@ -880,6 +904,41 @@ ref makeFileTransfer() return makeCurlFileTransfer(); } +#if NIX_WITH_CURL_S3 +void FileTransferRequest::setupForS3() +{ + auto parsedS3 = ParsedS3URL::parse(uri.parsed()); + // Update the request URI to use HTTPS + uri = parsedS3.toHttpsUrl(); + // This gets used later in a curl setopt + awsSigV4Provider = "aws:amz:" + parsedS3.region.value_or("us-east-1") + ":s3"; + // check if the request already has pre-resolved credentials + std::optional sessionToken; + if (usernameAuth) { + debug("Using pre-resolved AWS credentials from parent process"); + sessionToken = preResolvedAwsSessionToken; + } else { + std::string profile = parsedS3.profile.value_or(""); + try { + auto creds = getAwsCredentials(profile); + usernameAuth = UsernameAuth{ + .username = creds.accessKeyId, + .password = creds.secretAccessKey, + }; + sessionToken = creds.sessionToken; + } catch (const AwsAuthError & e) { + warn("AWS authentication failed for S3 request %s: %s", uri, e.what()); + // Invalidate the cached credentials so next request will retry + invalidateAwsCredentials(profile); + // Continue without authentication - might be a public bucket + return; + } + } + if (sessionToken) + headers.emplace_back("x-amz-security-token", *sessionToken); +} +#endif + std::future FileTransfer::enqueueFileTransfer(const FileTransferRequest & request) { auto promise = std::make_shared>(); diff --git a/src/libstore/include/nix/store/aws-creds.hh b/src/libstore/include/nix/store/aws-creds.hh index 16643c555..4930dc9d8 100644 --- a/src/libstore/include/nix/store/aws-creds.hh +++ b/src/libstore/include/nix/store/aws-creds.hh @@ -57,12 +57,6 @@ AwsCredentials getAwsCredentials(const std::string & profile = ""); */ void invalidateAwsCredentials(const std::string & profile); -/** - * Clear all cached credential providers. - * Typically called during application cleanup. - */ -void clearAwsCredentialsCache(); - /** * Pre-resolve AWS credentials for S3 URLs. * Used to cache credentials in parent process before forking. diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index abd9ece5b..942e05a61 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -11,6 +11,11 @@ #include "nix/util/serialise.hh" #include "nix/util/url.hh" +#include "nix/store/config.hh" +#if NIX_WITH_CURL_S3 +# include "nix/store/aws-creds.hh" +#endif + namespace nix { struct FileTransferSettings : Config @@ -108,6 +113,13 @@ struct FileTransferRequest * When provided, these credentials will be used with curl's CURLOPT_USERNAME/PASSWORD option. */ std::optional usernameAuth; +#if NIX_WITH_CURL_S3 + /** + * Pre-resolved AWS session token for S3 requests. + * When provided along with usernameAuth, this will be used instead of fetching fresh credentials. + */ + std::optional preResolvedAwsSessionToken; +#endif FileTransferRequest(ValidURL uri) : uri(std::move(uri)) @@ -119,6 +131,13 @@ struct FileTransferRequest { return data ? "upload" : "download"; } + +#if NIX_WITH_CURL_S3 +private: + friend struct curlFileTransfer; + void setupForS3(); + std::optional awsSigV4Provider; +#endif }; struct FileTransferResult From 0f016f9bf55eba195e5a47490e370812f4b0d505 Mon Sep 17 00:00:00 2001 From: Seth Flynn Date: Thu, 9 Oct 2025 03:11:56 -0400 Subject: [PATCH 141/373] packaging: only override `toml11` when necessary v4.4.0 hit Nixpkgs in https://github.com/NixOS/nixpkgs/pull/442682. Ideally we'd just use that, but this keeps the fallback behavior until it's more widespread --- packaging/dependencies.nix | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 981c1aa48..7f815f128 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -57,15 +57,20 @@ scope: { prevAttrs.postInstall; }); - toml11 = pkgs.toml11.overrideAttrs rec { - version = "4.4.0"; - src = pkgs.fetchFromGitHub { - owner = "ToruNiina"; - repo = "toml11"; - tag = "v${version}"; - hash = "sha256-sgWKYxNT22nw376ttGsTdg0AMzOwp8QH3E8mx0BZJTQ="; - }; - }; + # TODO: Remove this when https://github.com/NixOS/nixpkgs/pull/442682 is included in a stable release + toml11 = + if lib.versionAtLeast pkgs.toml11.version "4.4.0" then + pkgs.toml11 + else + pkgs.toml11.overrideAttrs rec { + version = "4.4.0"; + src = pkgs.fetchFromGitHub { + owner = "ToruNiina"; + repo = "toml11"; + tag = "v${version}"; + hash = "sha256-sgWKYxNT22nw376ttGsTdg0AMzOwp8QH3E8mx0BZJTQ="; + }; + }; # TODO Hack until https://github.com/NixOS/nixpkgs/issues/45462 is fixed. boost = From 118acc84ba029a48e58f92cdfab6c3fda5e7f9a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 9 Oct 2025 14:15:33 +0100 Subject: [PATCH 142/373] only build on push to master we have now merge queues for maintainance branches. We still build it for master to have our installer beeing updated. In future this part could go in new workflow instead. --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f23a6c6e1..6e08b5a9e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,6 +4,8 @@ on: pull_request: merge_group: push: + branches: + - master workflow_dispatch: inputs: dogfood: From 0387b7d6db14a682dd8fd2bd2bd3aa5c04b4c06b Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Thu, 9 Oct 2025 20:40:40 +0200 Subject: [PATCH 143/373] Move openEvalCache to libflake Most of the eval cache logic is flake-independent and libexpr, but the loading part is not. `nix-flake` is the right component for this, as the eval cache isn't exactly specific to the command line. --- .../include/nix/cmd/installable-flake.hh | 2 - src/libcmd/installables.cc | 36 ---------------- src/libflake/flake.cc | 42 ++++++++++++++++++- src/libflake/include/nix/flake/flake.hh | 6 +++ 4 files changed, 46 insertions(+), 40 deletions(-) diff --git a/src/libcmd/include/nix/cmd/installable-flake.hh b/src/libcmd/include/nix/cmd/installable-flake.hh index 935ea8779..f3237c915 100644 --- a/src/libcmd/include/nix/cmd/installable-flake.hh +++ b/src/libcmd/include/nix/cmd/installable-flake.hh @@ -87,6 +87,4 @@ static inline FlakeRef defaultNixpkgsFlakeRef() return FlakeRef::fromAttrs(fetchSettings, {{"type", "indirect"}, {"id", "nixpkgs"}}); } -ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake); - } // namespace nix diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 91ad74308..7d6ec5199 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -443,42 +443,6 @@ static StorePath getDeriver(ref store, const Installable & i, const Store return *derivers.begin(); } -ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake) -{ - auto fingerprint = evalSettings.useEvalCache && evalSettings.pureEval - ? lockedFlake->getFingerprint(state.store, state.fetchSettings) - : std::nullopt; - auto rootLoader = [&state, lockedFlake]() { - /* For testing whether the evaluation cache is - complete. */ - if (getEnv("NIX_ALLOW_EVAL").value_or("1") == "0") - throw Error("not everything is cached, but evaluation is not allowed"); - - auto vFlake = state.allocValue(); - flake::callFlake(state, *lockedFlake, *vFlake); - - state.forceAttrs(*vFlake, noPos, "while parsing cached flake data"); - - auto aOutputs = vFlake->attrs()->get(state.symbols.create("outputs")); - assert(aOutputs); - - return aOutputs->value; - }; - - if (fingerprint) { - auto search = state.evalCaches.find(fingerprint.value()); - if (search == state.evalCaches.end()) { - search = - state.evalCaches - .emplace(fingerprint.value(), make_ref(fingerprint, state, rootLoader)) - .first; - } - return search->second; - } else { - return make_ref(std::nullopt, state, rootLoader); - } -} - Installables SourceExprCommand::parseInstallables(ref store, std::vector ss) { Installables result; diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index 486118963..b9a2388c7 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -1,6 +1,9 @@ #include "nix/util/terminal.hh" +#include "nix/util/ref.hh" +#include "nix/util/environment-variables.hh" #include "nix/flake/flake.hh" #include "nix/expr/eval.hh" +#include "nix/expr/eval-cache.hh" #include "nix/expr/eval-settings.hh" #include "nix/flake/lockfile.hh" #include "nix/expr/primops.hh" @@ -924,8 +927,6 @@ void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) state.callFunction(*vCallFlake, args, vRes, noPos); } -} // namespace flake - std::optional LockedFlake::getFingerprint(ref store, const fetchers::Settings & fetchSettings) const { if (lockFile.isUnlocked(fetchSettings)) @@ -953,4 +954,41 @@ std::optional LockedFlake::getFingerprint(ref store, const f Flake::~Flake() {} +ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake) +{ + auto fingerprint = state.settings.useEvalCache && state.settings.pureEval + ? lockedFlake->getFingerprint(state.store, state.fetchSettings) + : std::nullopt; + auto rootLoader = [&state, lockedFlake]() { + /* For testing whether the evaluation cache is + complete. */ + if (getEnv("NIX_ALLOW_EVAL").value_or("1") == "0") + throw Error("not everything is cached, but evaluation is not allowed"); + + auto vFlake = state.allocValue(); + callFlake(state, *lockedFlake, *vFlake); + + state.forceAttrs(*vFlake, noPos, "while parsing cached flake data"); + + auto aOutputs = vFlake->attrs()->get(state.symbols.create("outputs")); + assert(aOutputs); + + return aOutputs->value; + }; + + if (fingerprint) { + auto search = state.evalCaches.find(fingerprint.value()); + if (search == state.evalCaches.end()) { + search = state.evalCaches + .emplace(fingerprint.value(), make_ref(fingerprint, state, rootLoader)) + .first; + } + return search->second; + } else { + return make_ref(std::nullopt, state, rootLoader); + } +} + +} // namespace flake + } // namespace nix diff --git a/src/libflake/include/nix/flake/flake.hh b/src/libflake/include/nix/flake/flake.hh index 13002b47c..ba27bd09e 100644 --- a/src/libflake/include/nix/flake/flake.hh +++ b/src/libflake/include/nix/flake/flake.hh @@ -5,6 +5,7 @@ #include "nix/flake/flakeref.hh" #include "nix/flake/lockfile.hh" #include "nix/expr/value.hh" +#include "nix/expr/eval-cache.hh" namespace nix { @@ -218,6 +219,11 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & flakeRe void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & v); +/** + * Open an evaluation cache for a flake. + */ +ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake); + } // namespace flake void emitTreeAttrs( From 42c9cbf9ca6edad0c4beabee137d3ce6384c42e2 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Thu, 9 Oct 2025 20:53:48 +0200 Subject: [PATCH 144/373] Use ref where non-null --- src/libcmd/include/nix/cmd/installable-flake.hh | 2 +- src/libcmd/installable-flake.cc | 8 ++++---- src/libcmd/installables.cc | 3 +-- src/libflake/flake.cc | 2 +- src/libflake/include/nix/flake/flake.hh | 2 +- src/nix/flake.cc | 4 ++-- 6 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/libcmd/include/nix/cmd/installable-flake.hh b/src/libcmd/include/nix/cmd/installable-flake.hh index f3237c915..9f449ad48 100644 --- a/src/libcmd/include/nix/cmd/installable-flake.hh +++ b/src/libcmd/include/nix/cmd/installable-flake.hh @@ -69,7 +69,7 @@ struct InstallableFlake : InstallableValue */ std::vector> getCursors(EvalState & state) override; - std::shared_ptr getLockedFlake() const; + ref getLockedFlake() const; FlakeRef nixpkgsFlakeRef() const; }; diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc index 5431100d3..65f48fa2b 100644 --- a/src/libcmd/installable-flake.cc +++ b/src/libcmd/installable-flake.cc @@ -185,16 +185,16 @@ std::vector> InstallableFlake::getCursors(EvalState return res; } -std::shared_ptr InstallableFlake::getLockedFlake() const +ref InstallableFlake::getLockedFlake() const { if (!_lockedFlake) { flake::LockFlags lockFlagsApplyConfig = lockFlags; // FIXME why this side effect? lockFlagsApplyConfig.applyNixConfig = true; - _lockedFlake = - std::make_shared(lockFlake(flakeSettings, *state, flakeRef, lockFlagsApplyConfig)); + _lockedFlake = make_ref(lockFlake(flakeSettings, *state, flakeRef, lockFlagsApplyConfig)); } - return _lockedFlake; + // _lockedFlake is now non-null but still just a shared_ptr + return ref(_lockedFlake); } FlakeRef InstallableFlake::nixpkgsFlakeRef() const diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 7d6ec5199..f0f36378b 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -342,8 +342,7 @@ void completeFlakeRefWithFragment( parseFlakeRef(fetchSettings, expandTilde(flakeRefS), std::filesystem::current_path().string()); auto evalCache = openEvalCache( - *evalState, - std::make_shared(lockFlake(flakeSettings, *evalState, flakeRef, lockFlags))); + *evalState, make_ref(lockFlake(flakeSettings, *evalState, flakeRef, lockFlags))); auto root = evalCache->getRoot(); diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index b9a2388c7..26b3ef2a0 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -954,7 +954,7 @@ std::optional LockedFlake::getFingerprint(ref store, const f Flake::~Flake() {} -ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake) +ref openEvalCache(EvalState & state, ref lockedFlake) { auto fingerprint = state.settings.useEvalCache && state.settings.pureEval ? lockedFlake->getFingerprint(state.store, state.fetchSettings) diff --git a/src/libflake/include/nix/flake/flake.hh b/src/libflake/include/nix/flake/flake.hh index ba27bd09e..b3168144c 100644 --- a/src/libflake/include/nix/flake/flake.hh +++ b/src/libflake/include/nix/flake/flake.hh @@ -222,7 +222,7 @@ void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & v); /** * Open an evaluation cache for a flake. */ -ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake); +ref openEvalCache(EvalState & state, ref lockedFlake); } // namespace flake diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 18be64bba..cf05f6943 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -1155,7 +1155,7 @@ struct CmdFlakeShow : FlakeCommand, MixJSON evalSettings.enableImportFromDerivation.setDefault(false); auto state = getEvalState(); - auto flake = std::make_shared(lockFlake()); + auto flake = make_ref(lockFlake()); auto localSystem = std::string(settings.thisSystem.get()); std::function & attrPath, const Symbol & attr)> @@ -1443,7 +1443,7 @@ struct CmdFlakeShow : FlakeCommand, MixJSON return j; }; - auto cache = openEvalCache(*state, flake); + auto cache = openEvalCache(*state, ref(flake)); auto j = visit(*cache->getRoot(), {}, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), ""); if (json) From abcceafbce41374b70ed090aeb0627ebdc26d3af Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Thu, 9 Oct 2025 21:23:26 +0200 Subject: [PATCH 145/373] Use const for lock in openEvalCache --- src/libflake/flake.cc | 2 +- src/libflake/include/nix/flake/flake.hh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index 26b3ef2a0..147bff820 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -954,7 +954,7 @@ std::optional LockedFlake::getFingerprint(ref store, const f Flake::~Flake() {} -ref openEvalCache(EvalState & state, ref lockedFlake) +ref openEvalCache(EvalState & state, ref lockedFlake) { auto fingerprint = state.settings.useEvalCache && state.settings.pureEval ? lockedFlake->getFingerprint(state.store, state.fetchSettings) diff --git a/src/libflake/include/nix/flake/flake.hh b/src/libflake/include/nix/flake/flake.hh index b3168144c..79a50f0f7 100644 --- a/src/libflake/include/nix/flake/flake.hh +++ b/src/libflake/include/nix/flake/flake.hh @@ -222,7 +222,7 @@ void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & v); /** * Open an evaluation cache for a flake. */ -ref openEvalCache(EvalState & state, ref lockedFlake); +ref openEvalCache(EvalState & state, ref lockedFlake); } // namespace flake From c58acff42afd591762746538f0d2226ee63cbef0 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Fri, 10 Oct 2025 00:11:25 +0300 Subject: [PATCH 146/373] libfetchers: Remove toRealPath in SourceHutInputScheme::getRevFromRef This code had several issues: 1. Not going through the SourceAccessor means that we can only work with physical paths. 2. It did not actually check that the file exists. (std::ifstream does not check it by default). --- src/libfetchers/github.cc | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index 3b723d7d8..a905bb384 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -548,13 +548,10 @@ struct SourceHutInputScheme : GitArchiveInputScheme std::string refUri; if (ref == "HEAD") { - auto file = store->toRealPath( - downloadFile(store, *input.settings, fmt("%s/HEAD", base_url), "source", headers).storePath); - std::ifstream is(file); - std::string line; - getline(is, line); + auto downloadFileResult = downloadFile(store, *input.settings, fmt("%s/HEAD", base_url), "source", headers); + auto contents = nix::ref(store->getFSAccessor(downloadFileResult.storePath))->readFile(CanonPath::root); - auto remoteLine = git::parseLsRemoteLine(line); + auto remoteLine = git::parseLsRemoteLine(getLine(contents).first); if (!remoteLine) { throw BadURL("in '%d', couldn't resolve HEAD ref '%d'", input.to_string(), ref); } @@ -564,9 +561,10 @@ struct SourceHutInputScheme : GitArchiveInputScheme } std::regex refRegex(refUri); - auto file = store->toRealPath( - downloadFile(store, *input.settings, fmt("%s/info/refs", base_url), "source", headers).storePath); - std::ifstream is(file); + auto downloadFileResult = + downloadFile(store, *input.settings, fmt("%s/info/refs", base_url), "source", headers); + auto contents = nix::ref(store->getFSAccessor(downloadFileResult.storePath))->readFile(CanonPath::root); + std::istringstream is(contents); std::string line; std::optional id; From 0855b715a97a44cbcb23492c94ed91fcf7162c4d Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Thu, 9 Oct 2025 02:42:14 +0000 Subject: [PATCH 147/373] feat(libstore): add curl-based S3 store implementation Add a new S3BinaryCacheStore implementation that inherits from HttpBinaryCacheStore. The implementation is activated with NIX_WITH_CURL_S3, keeping the existing NIX_WITH_S3_SUPPORT (AWS SDK) implementation unchanged. --- src/libstore-tests/s3-binary-cache-store.cc | 127 ++++++++++++++++++ .../nix/store/s3-binary-cache-store.hh | 75 +++++++++++ src/libstore/s3-binary-cache-store.cc | 46 +++++++ 3 files changed, 248 insertions(+) diff --git a/src/libstore-tests/s3-binary-cache-store.cc b/src/libstore-tests/s3-binary-cache-store.cc index 251e96172..8c58b8408 100644 --- a/src/libstore-tests/s3-binary-cache-store.cc +++ b/src/libstore-tests/s3-binary-cache-store.cc @@ -15,4 +15,131 @@ TEST(S3BinaryCacheStore, constructConfig) } // namespace nix +#elif NIX_WITH_CURL_S3 + +# include "nix/store/http-binary-cache-store.hh" +# include "nix/store/filetransfer.hh" +# include "nix/store/s3-url.hh" + +# include + +namespace nix { + +TEST(S3BinaryCacheStore, constructConfig) +{ + S3BinaryCacheStoreConfig config{"s3", "foobar", {}}; + + // The bucket name is stored as the host part of the authority in cacheUri + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "foobar"}, + })); +} + +TEST(S3BinaryCacheStore, constructConfigWithRegion) +{ + Store::Config::Params params{{"region", "eu-west-1"}}; + S3BinaryCacheStoreConfig config{"s3", "my-bucket", params}; + + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "my-bucket"}, + .query = (StringMap) {{"region", "eu-west-1"}}, + })); + EXPECT_EQ(config.region.get(), "eu-west-1"); +} + +TEST(S3BinaryCacheStore, defaultSettings) +{ + S3BinaryCacheStoreConfig config{"s3", "test-bucket", {}}; + + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "test-bucket"}, + })); + + // Check default values + EXPECT_EQ(config.region.get(), "us-east-1"); + EXPECT_EQ(config.profile.get(), "default"); + EXPECT_EQ(config.scheme.get(), "https"); + EXPECT_EQ(config.endpoint.get(), ""); +} + +/** + * Test that S3BinaryCacheStore properly preserves S3-specific parameters + */ +TEST(S3BinaryCacheStore, s3StoreConfigPreservesParameters) +{ + StringMap params; + params["region"] = "eu-west-1"; + params["endpoint"] = "custom.s3.com"; + + S3BinaryCacheStoreConfig config("s3", "test-bucket", params); + + // The config should preserve S3-specific parameters + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "test-bucket"}, + .query = (StringMap) {{"region", "eu-west-1"}, {"endpoint", "custom.s3.com"}}, + })); +} + +/** + * Test that S3 store scheme is properly registered + */ +TEST(S3BinaryCacheStore, s3SchemeRegistration) +{ + auto schemes = S3BinaryCacheStoreConfig::uriSchemes(); + EXPECT_TRUE(schemes.count("s3") > 0) << "S3 scheme should be supported"; + + // Verify HttpBinaryCacheStoreConfig doesn't directly list S3 + auto httpSchemes = HttpBinaryCacheStoreConfig::uriSchemes(); + EXPECT_FALSE(httpSchemes.count("s3") > 0) << "HTTP store shouldn't directly list S3 scheme"; +} + +/** + * Test that only S3-specific parameters are preserved in cacheUri, + * while non-S3 store parameters are not propagated to the URL + */ +TEST(S3BinaryCacheStore, parameterFiltering) +{ + StringMap params; + params["region"] = "eu-west-1"; + params["endpoint"] = "minio.local"; + params["want-mass-query"] = "true"; // Non-S3 store parameter + params["priority"] = "10"; // Non-S3 store parameter + + S3BinaryCacheStoreConfig config("s3", "test-bucket", params); + + // Only S3-specific params should be in cacheUri.query + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "test-bucket"}, + .query = (StringMap) {{"region", "eu-west-1"}, {"endpoint", "minio.local"}}, + })); + + // But the non-S3 params should still be set on the config + EXPECT_EQ(config.wantMassQuery.get(), true); + EXPECT_EQ(config.priority.get(), 10); + + // And all params (S3 and non-S3) should be returned by getReference() + auto ref = config.getReference(); + EXPECT_EQ(ref.params["region"], "eu-west-1"); + EXPECT_EQ(ref.params["endpoint"], "minio.local"); + EXPECT_EQ(ref.params["want-mass-query"], "true"); + EXPECT_EQ(ref.params["priority"], "10"); +} + +} // namespace nix + #endif diff --git a/src/libstore/include/nix/store/s3-binary-cache-store.hh b/src/libstore/include/nix/store/s3-binary-cache-store.hh index 2fe66b0ad..0f8fff030 100644 --- a/src/libstore/include/nix/store/s3-binary-cache-store.hh +++ b/src/libstore/include/nix/store/s3-binary-cache-store.hh @@ -134,4 +134,79 @@ struct S3BinaryCacheStore : virtual BinaryCacheStore } // namespace nix +#elif NIX_WITH_CURL_S3 + +# include "nix/store/http-binary-cache-store.hh" + +namespace nix { + +struct S3BinaryCacheStoreConfig : HttpBinaryCacheStoreConfig +{ + using HttpBinaryCacheStoreConfig::HttpBinaryCacheStoreConfig; + + S3BinaryCacheStoreConfig(std::string_view uriScheme, std::string_view bucketName, const Params & params); + + const Setting profile{ + this, + "default", + "profile", + R"( + The name of the AWS configuration profile to use. By default + Nix uses the `default` profile. + )"}; + +public: + + const Setting region{ + this, + "us-east-1", + "region", + R"( + The region of the S3 bucket. If your bucket is not in + `us-east-1`, you should always explicitly specify the region + parameter. + )"}; + + const Setting scheme{ + this, + "https", + "scheme", + R"( + The scheme used for S3 requests, `https` (default) or `http`. This + option allows you to disable HTTPS for binary caches which don't + support it. + + > **Note** + > + > HTTPS should be used if the cache might contain sensitive + > information. + )"}; + + const Setting endpoint{ + this, + "", + "endpoint", + R"( + The S3 endpoint to use. When empty (default), uses AWS S3 with + region-specific endpoints (e.g., s3.us-east-1.amazonaws.com). + For S3-compatible services such as MinIO, set this to your service's endpoint. + + > **Note** + > + > Custom endpoints must support HTTPS and use path-based + > addressing instead of virtual host based addressing. + )"}; + + static const std::string name() + { + return "S3 Binary Cache Store"; + } + + static StringSet uriSchemes(); + + static std::string doc(); +}; + +} // namespace nix + #endif diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index b70f04be7..ab0847bb1 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -589,4 +589,50 @@ static RegisterStoreImplementation regS3BinaryCa } // namespace nix +#elif NIX_WITH_CURL_S3 + +# include + +# include "nix/store/s3-binary-cache-store.hh" +# include "nix/store/http-binary-cache-store.hh" +# include "nix/store/store-registration.hh" + +namespace nix { + +StringSet S3BinaryCacheStoreConfig::uriSchemes() +{ + return {"s3"}; +} + +S3BinaryCacheStoreConfig::S3BinaryCacheStoreConfig( + std::string_view scheme, std::string_view _cacheUri, const Params & params) + : StoreConfig(params) + , HttpBinaryCacheStoreConfig(scheme, _cacheUri, params) +{ + // For S3 stores, preserve S3-specific query parameters as part of the URL + // These are needed for region specification and other S3-specific settings + assert(cacheUri.query.empty()); + + // Only copy S3-specific parameters to the URL query + static const std::set s3Params = {"region", "endpoint", "profile", "scheme"}; + for (const auto & [key, value] : params) { + if (s3Params.contains(key)) { + cacheUri.query[key] = value; + } + } +} + +std::string S3BinaryCacheStoreConfig::doc() +{ + return R"( + **Store URL format**: `s3://bucket-name` + + This store allows reading and writing a binary cache stored in an AWS S3 bucket. + )"; +} + +static RegisterStoreImplementation registerS3BinaryCacheStore; + +} // namespace nix + #endif From 43b01b6790af2070e6162472bbfa5bbe3bb3ff61 Mon Sep 17 00:00:00 2001 From: Graham Dennis Date: Fri, 10 Oct 2025 14:54:03 +1100 Subject: [PATCH 148/373] Improved backwards compatibility hack for git URLs using dir=... attribute --- src/libfetchers/git.cc | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index 7c1630167..a8a52ef30 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -496,6 +496,36 @@ struct GitInputScheme : InputScheme Git interprets them as part of the file name. So get rid of them. */ url.query.clear(); + /* Backward compatibility hack: In old versions of Nix, if you had + a flake input like + + inputs.foo.url = "git+https://foo/bar?dir=subdir"; + + it would result in a lock file entry like + + "original": { + "dir": "subdir", + "type": "git", + "url": "https://foo/bar?dir=subdir" + } + + New versions of Nix remove `?dir=subdir` from the `url` field, + since the subdirectory is intended for `FlakeRef`, not the + fetcher (and specifically the remote server), that is, the + flakeref is parsed into + + "original": { + "dir": "subdir", + "type": "git", + "url": "https://foo/bar" + } + + However, new versions of nix parsing old flake.lock files would pass the dir= + query parameter in the "url" attribute to git, which will then complain. + + For this reason, we filtering the `dir` query parameter from the URL + before passing it to git. */ + url.query.erase("dir"); repoInfo.location = url; } From 8d9e9bc400433faeb9a6edc49327f7700b93b1c2 Mon Sep 17 00:00:00 2001 From: Graham Dennis Date: Fri, 10 Oct 2025 15:00:10 +1100 Subject: [PATCH 149/373] Improve comment --- src/libfetchers/git.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index a8a52ef30..9334dc1cb 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -523,7 +523,7 @@ struct GitInputScheme : InputScheme However, new versions of nix parsing old flake.lock files would pass the dir= query parameter in the "url" attribute to git, which will then complain. - For this reason, we filtering the `dir` query parameter from the URL + For this reason, we are filtering the `dir` query parameter from the URL before passing it to git. */ url.query.erase("dir"); repoInfo.location = url; From b56dd21c311b1ad1e19bfb1180a0b5f94834b85d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 10 Oct 2025 17:18:40 -0400 Subject: [PATCH 150/373] `Settings::ExternalBuilder::systems` make set Nothing cares about the order, actually. --- src/libstore/include/nix/store/globals.hh | 2 +- src/libstore/unix/build/external-derivation-builder.cc | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index 1b59bd6fc..be3561848 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1375,7 +1375,7 @@ public: struct ExternalBuilder { - std::vector systems; + StringSet systems; Path program; std::vector args; }; diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index 71cfd1a62..f20badb85 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -19,10 +19,9 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl LocalStore & store, std::unique_ptr & miscMethods, DerivationBuilderParams & params) { for (auto & handler : settings.externalBuilders.get()) { - for (auto & system : handler.systems) - if (params.drv.platform == system) - return std::make_unique( - store, std::move(miscMethods), std::move(params), handler); + if (handler.systems.contains(params.drv.platform)) + return std::make_unique( + store, std::move(miscMethods), std::move(params), handler); } return {}; } From f30cb8667bab3856f083dde308ec35df7c4adbc3 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Fri, 10 Oct 2025 23:57:36 +0300 Subject: [PATCH 151/373] libstore: Fix double-quoting of paths in logs std::filesystem::path is already quoted by boost::format with double quotes ("). --- src/libstore/local-store.cc | 6 +++--- src/libstore/optimise-store.cc | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index ebc987ee0..cbd3fa6d8 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1383,7 +1383,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) for (auto & link : DirectoryIterator{linksDir}) { checkInterrupt(); auto name = link.path().filename(); - printMsg(lvlTalkative, "checking contents of '%s'", name); + printMsg(lvlTalkative, "checking contents of %s", name); PosixSourceAccessor accessor; std::string hash = hashPath( PosixSourceAccessor::createAtRoot(link.path()), @@ -1391,10 +1391,10 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) HashAlgorithm::SHA256) .first.to_string(HashFormat::Nix32, false); if (hash != name.string()) { - printError("link '%s' was modified! expected hash '%s', got '%s'", link.path(), name, hash); + printError("link %s was modified! expected hash %s, got '%s'", link.path(), name, hash); if (repair) { std::filesystem::remove(link.path()); - printInfo("removed link '%s'", link.path()); + printInfo("removed link %s", link.path()); } else { errors = true; } diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 1cf28e022..8f2878136 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -202,7 +202,7 @@ void LocalStore::optimisePath_( full. When that happens, it's fine to ignore it: we just effectively disable deduplication of this file. */ - printInfo("cannot link '%s' to '%s': %s", linkPath, path, strerror(errno)); + printInfo("cannot link %s to '%s': %s", linkPath, path, strerror(errno)); return; } @@ -216,11 +216,11 @@ void LocalStore::optimisePath_( auto stLink = lstat(linkPath.string()); if (st.st_ino == stLink.st_ino) { - debug("'%1%' is already linked to '%2%'", path, linkPath); + debug("'%1%' is already linked to %2%", path, linkPath); return; } - printMsg(lvlTalkative, "linking '%1%' to '%2%'", path, linkPath); + printMsg(lvlTalkative, "linking '%1%' to %2%", path, linkPath); /* Make the containing directory writable, but only if it's not the store itself (we don't want or need to mess with its @@ -245,7 +245,7 @@ void LocalStore::optimisePath_( systems). This is likely to happen with empty files. Just shrug and ignore. */ if (st.st_size) - printInfo("'%1%' has maximum number of links", linkPath); + printInfo("%1% has maximum number of links", linkPath); return; } throw; @@ -256,13 +256,13 @@ void LocalStore::optimisePath_( std::filesystem::rename(tempLink, path); } catch (std::filesystem::filesystem_error & e) { std::filesystem::remove(tempLink); - printError("unable to unlink '%1%'", tempLink); + printError("unable to unlink %1%", tempLink); if (e.code() == std::errc::too_many_links) { /* Some filesystems generate too many links on the rename, rather than on the original link. (Probably it temporarily increases the st_nlink field before decreasing it again.) */ - debug("'%s' has reached maximum number of links", linkPath); + debug("%s has reached maximum number of links", linkPath); return; } throw; From 2ff59ec3e0fc093dcd0064bc5df21c5d62ea2445 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 10 Oct 2025 17:27:41 -0400 Subject: [PATCH 152/373] Use `std::ranges::find_if` for finding external builders Co-authored-by: Sergei Zimmerman --- src/libstore/unix/build/external-derivation-builder.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index f20badb85..ebcaad525 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -18,10 +18,12 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl static std::unique_ptr newIfSupported( LocalStore & store, std::unique_ptr & miscMethods, DerivationBuilderParams & params) { - for (auto & handler : settings.externalBuilders.get()) { - if (handler.systems.contains(params.drv.platform)) - return std::make_unique( - store, std::move(miscMethods), std::move(params), handler); + if (auto it = std::ranges::find_if( + settings.externalBuilders.get(), + [&](const auto & handler) { return handler.systems.contains(params.drv.platform); }); + it != settings.externalBuilders.get().end()) { + return std::make_unique( + store, std::move(miscMethods), std::move(params), *it); } return {}; } From b57caaa1a273323b596097ab5509797b38e2e272 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 16 Aug 2025 14:25:28 -0400 Subject: [PATCH 153/373] Consolidate logic choosing where we can/should build a bit I want to separate "policy" from "mechanism". Now the logic to decide how to build (a policy choice, though with some hard constraints) is all in derivation building goal, and all in the same spot. build hook, external builder, or local builder --- the choice between all three is made in the same spot --- pure policy. Now, if you want to use the external deriation builder, you simply provide the `ExternalBuilder` you wish to use, and there is no additional checking --- pure mechanism. It is the responsibility of the caller to choose an external builder that works for the derivation in question. Also, `checkSystem()` was the only thing throwing `BuildError` from `startBuilder`. Now that that is gone, we can now remove the `try...catch` around that. --- src/libstore/build/derivation-builder.cc | 27 ++++++ .../build/derivation-building-goal.cc | 94 +++++++++++++------ src/libstore/globals.cc | 11 ++- .../nix/store/build/derivation-builder.hh | 22 +++++ src/libstore/include/nix/store/globals.hh | 13 ++- src/libstore/meson.build | 1 + src/libstore/unix/build/derivation-builder.cc | 38 -------- .../unix/build/external-derivation-builder.cc | 28 +++--- 8 files changed, 140 insertions(+), 94 deletions(-) create mode 100644 src/libstore/build/derivation-builder.cc diff --git a/src/libstore/build/derivation-builder.cc b/src/libstore/build/derivation-builder.cc new file mode 100644 index 000000000..39ac40175 --- /dev/null +++ b/src/libstore/build/derivation-builder.cc @@ -0,0 +1,27 @@ +#include "nix/util/json-utils.hh" +#include "nix/store/build/derivation-builder.hh" + +namespace nlohmann { + +using namespace nix; + +ExternalBuilder adl_serializer::from_json(const json & json) +{ + auto obj = getObject(json); + return { + .systems = valueAt(obj, "systems"), + .program = valueAt(obj, "program"), + .args = valueAt(obj, "args"), + }; +} + +void adl_serializer::to_json(json & json, const ExternalBuilder & eb) +{ + json = { + {"systems", eb.systems}, + {"program", eb.program}, + {"args", eb.args}, + }; +} + +} // namespace nlohmann diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 001816ca0..e8ee945d9 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -491,6 +491,8 @@ Goal::Co DerivationBuildingGoal::tryToBuild() bool useHook; + const ExternalBuilder * externalBuilder = nullptr; + while (true) { trace("trying to build"); @@ -584,7 +586,42 @@ Goal::Co DerivationBuildingGoal::tryToBuild() co_await waitForAWhile(); continue; case rpDecline: - /* We should do it ourselves. */ + /* We should do it ourselves. + + Now that we've decided we can't / won't do a remote build, check + that we can in fact build locally. First see if there is an + external builder for a "semi-local build". If there is, prefer to + use that. If there is not, then check if we can do a "true" local + build. */ + + externalBuilder = settings.findExternalDerivationBuilderIfSupported(*drv); + + if (!externalBuilder && !drvOptions->canBuildLocally(worker.store, *drv)) { + auto msg = + fmt("Cannot build '%s'.\n" + "Reason: " ANSI_RED "required system or feature not available" ANSI_NORMAL + "\n" + "Required system: '%s' with features {%s}\n" + "Current system: '%s' with features {%s}", + Magenta(worker.store.printStorePath(drvPath)), + Magenta(drv->platform), + concatStringsSep(", ", drvOptions->getRequiredSystemFeatures(*drv)), + Magenta(settings.thisSystem), + concatStringsSep(", ", worker.store.Store::config.systemFeatures)); + + // since aarch64-darwin has Rosetta 2, this user can actually run x86_64-darwin on their hardware - + // we should tell them to run the command to install Darwin 2 + if (drv->platform == "x86_64-darwin" && settings.thisSystem == "aarch64-darwin") + msg += fmt( + "\nNote: run `%s` to run programs for x86_64-darwin", + Magenta( + "/usr/sbin/softwareupdate --install-rosetta && launchctl stop org.nixos.nix-daemon")); + + builder.reset(); + outputLocks.unlock(); + worker.permanentFailure = true; + co_return doneFailure({BuildResult::Failure::InputRejected, std::move(msg)}); + } useHook = false; break; } @@ -771,36 +808,35 @@ Goal::Co DerivationBuildingGoal::tryToBuild() co_return doneFailure(std::move(e)); } + DerivationBuilderParams params{ + .drvPath = drvPath, + .buildResult = buildResult, + .drv = *drv, + .drvOptions = *drvOptions, + .inputPaths = inputPaths, + .initialOutputs = initialOutputs, + .buildMode = buildMode, + .defaultPathsInChroot = std::move(defaultPathsInChroot), + .systemFeatures = worker.store.config.systemFeatures.get(), + .desugaredEnv = std::move(desugaredEnv), + }; + /* If we have to wait and retry (see below), then `builder` will already be created, so we don't need to create it again. */ - builder = makeDerivationBuilder( - *localStoreP, - std::make_unique(*this, builder), - DerivationBuilderParams{ - .drvPath = drvPath, - .buildResult = buildResult, - .drv = *drv, - .drvOptions = *drvOptions, - .inputPaths = inputPaths, - .initialOutputs = initialOutputs, - .buildMode = buildMode, - .defaultPathsInChroot = std::move(defaultPathsInChroot), - .systemFeatures = worker.store.config.systemFeatures.get(), - .desugaredEnv = std::move(desugaredEnv), - }); + builder = externalBuilder ? makeExternalDerivationBuilder( + *localStoreP, + std::make_unique(*this, builder), + std::move(params), + *externalBuilder) + : makeDerivationBuilder( + *localStoreP, + std::make_unique(*this, builder), + std::move(params)); } - std::optional builderOutOpt; - try { - /* Okay, we have to build. */ - builderOutOpt = builder->startBuild(); - } catch (BuildError & e) { - builder.reset(); - outputLocks.unlock(); - worker.permanentFailure = true; - co_return doneFailure(std::move(e)); // InputRejected - } - if (!builderOutOpt) { + if (auto builderOutOpt = builder->startBuild()) { + builderOut = *std::move(builderOutOpt); + } else { if (!actLock) actLock = std::make_unique( *logger, @@ -809,9 +845,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() fmt("waiting for a free build user ID for '%s'", Magenta(worker.store.printStorePath(drvPath)))); co_await waitForAWhile(); continue; - } else { - builderOut = *std::move(builderOutOpt); - }; + } break; } diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 58a649fc5..4fdb820a9 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -258,6 +258,15 @@ Path Settings::getDefaultSSLCertFile() return ""; } +const ExternalBuilder * Settings::findExternalDerivationBuilderIfSupported(const Derivation & drv) +{ + if (auto it = std::ranges::find_if( + externalBuilders.get(), [&](const auto & handler) { return handler.systems.contains(drv.platform); }); + it != externalBuilders.get().end()) + return &*it; + return nullptr; +} + std::string nixVersion = PACKAGE_VERSION; NLOHMANN_JSON_SERIALIZE_ENUM( @@ -379,8 +388,6 @@ unsigned int MaxBuildJobsSetting::parse(const std::string & str) const } } -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Settings::ExternalBuilder, systems, program, args); - template<> Settings::ExternalBuilders BaseSetting::parse(const std::string & str) const { diff --git a/src/libstore/include/nix/store/build/derivation-builder.hh b/src/libstore/include/nix/store/build/derivation-builder.hh index 63ef2b665..5fad26e83 100644 --- a/src/libstore/include/nix/store/build/derivation-builder.hh +++ b/src/libstore/include/nix/store/build/derivation-builder.hh @@ -1,12 +1,15 @@ #pragma once ///@file +#include + #include "nix/store/build-result.hh" #include "nix/store/derivation-options.hh" #include "nix/store/build/derivation-building-misc.hh" #include "nix/store/derivations.hh" #include "nix/store/parsed-derivations.hh" #include "nix/util/processes.hh" +#include "nix/util/json-impls.hh" #include "nix/store/restricted-store.hh" #include "nix/store/build/derivation-env-desugar.hh" @@ -179,9 +182,28 @@ struct DerivationBuilder : RestrictionContext virtual bool killChild() = 0; }; +struct ExternalBuilder +{ + StringSet systems; + Path program; + std::vector args; +}; + #ifndef _WIN32 // TODO enable `DerivationBuilder` on Windows std::unique_ptr makeDerivationBuilder( LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params); + +/** + * @param handler Must be chosen such that it supports the given + * derivation. + */ +std::unique_ptr makeExternalDerivationBuilder( + LocalStore & store, + std::unique_ptr miscMethods, + DerivationBuilderParams params, + const ExternalBuilder & handler); #endif } // namespace nix + +JSON_IMPL(nix::ExternalBuilder) diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index be3561848..14647c05f 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1373,13 +1373,6 @@ public: Set it to 1 to warn on all paths. )"}; - struct ExternalBuilder - { - StringSet systems; - Path program; - std::vector args; - }; - using ExternalBuilders = std::vector; Setting externalBuilders{ @@ -1443,6 +1436,12 @@ public: // Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} // Xp::ExternalBuilders }; + + /** + * Finds the first external derivation builder that supports this + * derivation, or else returns a null pointer. + */ + const ExternalBuilder * findExternalDerivationBuilderIfSupported(const Derivation & drv); }; // FIXME: don't use a global variable. diff --git a/src/libstore/meson.build b/src/libstore/meson.build index a3502c2e0..728de2dfd 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -298,6 +298,7 @@ sources = files( 'aws-creds.cc', 'binary-cache-store.cc', 'build-result.cc', + 'build/derivation-builder.cc', 'build/derivation-building-goal.cc', 'build/derivation-check.cc', 'build/derivation-env-desugar.cc', diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 5bdd843bd..0158505a5 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -229,12 +229,6 @@ protected: return acquireUserLock(1, false); } - /** - * Throw an exception if we can't do this derivation because of - * missing system features. - */ - virtual void checkSystem(); - /** * Return the paths that should be made available in the sandbox. * This includes: @@ -672,33 +666,6 @@ static bool checkNotWorldWritable(std::filesystem::path path) return true; } -void DerivationBuilderImpl::checkSystem() -{ - /* Right platform? */ - if (!drvOptions.canBuildLocally(store, drv)) { - auto msg = - fmt("Cannot build '%s'.\n" - "Reason: " ANSI_RED "required system or feature not available" ANSI_NORMAL - "\n" - "Required system: '%s' with features {%s}\n" - "Current system: '%s' with features {%s}", - Magenta(store.printStorePath(drvPath)), - Magenta(drv.platform), - concatStringsSep(", ", drvOptions.getRequiredSystemFeatures(drv)), - Magenta(settings.thisSystem), - concatStringsSep(", ", store.Store::config.systemFeatures)); - - // since aarch64-darwin has Rosetta 2, this user can actually run x86_64-darwin on their hardware - we should - // tell them to run the command to install Darwin 2 - if (drv.platform == "x86_64-darwin" && settings.thisSystem == "aarch64-darwin") - msg += - fmt("\nNote: run `%s` to run programs for x86_64-darwin", - Magenta("/usr/sbin/softwareupdate --install-rosetta && launchctl stop org.nixos.nix-daemon")); - - throw BuildError(BuildResult::Failure::InputRejected, msg); - } -} - std::optional DerivationBuilderImpl::startBuild() { if (useBuildUsers()) { @@ -709,8 +676,6 @@ std::optional DerivationBuilderImpl::startBuild() return std::nullopt; } - checkSystem(); - /* Make sure that no other processes are executing under the sandbox uids. This must be done before any chownToBuilder() calls. */ @@ -1922,9 +1887,6 @@ namespace nix { std::unique_ptr makeDerivationBuilder( LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params) { - if (auto builder = ExternalDerivationBuilder::newIfSupported(store, miscMethods, params)) - return builder; - bool useSandbox = false; /* Are we doing a sandboxed build? */ diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index ebcaad525..7ddb6e093 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -2,32 +2,19 @@ namespace nix { struct ExternalDerivationBuilder : DerivationBuilderImpl { - Settings::ExternalBuilder externalBuilder; + ExternalBuilder externalBuilder; ExternalDerivationBuilder( LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params, - Settings::ExternalBuilder externalBuilder) + ExternalBuilder externalBuilder) : DerivationBuilderImpl(store, std::move(miscMethods), std::move(params)) , externalBuilder(std::move(externalBuilder)) { experimentalFeatureSettings.require(Xp::ExternalBuilders); } - static std::unique_ptr newIfSupported( - LocalStore & store, std::unique_ptr & miscMethods, DerivationBuilderParams & params) - { - if (auto it = std::ranges::find_if( - settings.externalBuilders.get(), - [&](const auto & handler) { return handler.systems.contains(params.drv.platform); }); - it != settings.externalBuilders.get().end()) { - return std::make_unique( - store, std::move(miscMethods), std::move(params), *it); - } - return {}; - } - Path tmpDirInSandbox() override { /* In a sandbox, for determinism, always use the same temporary @@ -41,8 +28,6 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl createDir(tmpDir, 0700); } - void checkSystem() override {} - void startChild() override { if (drvOptions.getRequiredSystemFeatures(drv).count("recursive-nix")) @@ -121,4 +106,13 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl } }; +std::unique_ptr makeExternalDerivationBuilder( + LocalStore & store, + std::unique_ptr miscMethods, + DerivationBuilderParams params, + const ExternalBuilder & handler) +{ + return std::make_unique(store, std::move(miscMethods), std::move(params), handler); +} + } // namespace nix From f02218873e846d93e079b96de3a2ba1bb369c12a Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 10 Oct 2025 19:39:09 +0000 Subject: [PATCH 154/373] fix(libstore): improve http-binary-cache-store S3 compatibility This commit adds two key fixes to http-binary-cache-store.cc to properly support the new curl-based S3 implementation: 1. **Consistent cache key handling**: Use `getReference().render(withParams=false)` for disk cache keys instead of `cacheUri.to_string()`. This ensures cache keys are consistent with the S3 implementation and don't include query parameters, which matches the behavior expected by Store::queryPathInfo() lookups. 2. **S3 query parameter preservation**: When generating file transfer requests for S3 URLs, preserve query parameters from the base URL (region, endpoint, etc.) when the relative path doesn't have its own query parameters. This ensures S3-specific configuration is propagated to all requests. --- src/libstore/http-binary-cache-store.cc | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 5d4fba163..8d5f427af 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -78,7 +78,11 @@ public: void init() override { // FIXME: do this lazily? - if (auto cacheInfo = diskCache->upToDateCacheExists(config->cacheUri.to_string())) { + // For consistent cache key handling, use the reference without parameters + // This matches what's used in Store::queryPathInfo() lookups + auto cacheKey = config->getReference().render(/*withParams=*/false); + + if (auto cacheInfo = diskCache->upToDateCacheExists(cacheKey)) { config->wantMassQuery.setDefault(cacheInfo->wantMassQuery); config->priority.setDefault(cacheInfo->priority); } else { @@ -87,8 +91,7 @@ public: } catch (UploadToHTTP &) { throw Error("'%s' does not appear to be a binary cache", config->cacheUri.to_string()); } - diskCache->createCache( - config->cacheUri.to_string(), config->storeDir, config->wantMassQuery, config->priority); + diskCache->createCache(cacheKey, config->storeDir, config->wantMassQuery, config->priority); } } @@ -184,7 +187,16 @@ protected: field which is `nar/15f99rdaf26k39knmzry4xd0d97wp6yfpnfk1z9avakis7ipb9yg.nar?hash=zphkqn2wg8mnvbkixnl2aadkbn0rcnfj` (note the query param) and that gets passed here. */ - return FileTransferRequest(parseURLRelative(path, cacheUriWithTrailingSlash)); + auto result = parseURLRelative(path, cacheUriWithTrailingSlash); + + /* For S3 URLs, preserve query parameters from the base URL when the + relative path doesn't have its own query parameters. This is needed + to preserve S3-specific parameters like endpoint and region. */ + if (config->cacheUri.scheme == "s3" && result.query.empty()) { + result.query = config->cacheUri.query; + } + + return FileTransferRequest(result); } void getFile(const std::string & path, Sink & sink) override From 46382ade74bdd811ddeab7da33d57effaa76852a Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 11 Oct 2025 01:30:21 +0300 Subject: [PATCH 155/373] libutil: Print stack trace on assertion failure This change overrides __assert_fail on glibc/musl to instead call std::terminate that we have a custom handler for. This ensures that we have more context to diagnose issues encountered by users in the wild. --- .../common/assert-fail/meson.build | 32 +++++++++++++++++++ .../common/assert-fail/wrap-assert-fail.cc | 17 ++++++++++ nix-meson-build-support/common/meson.build | 2 ++ 3 files changed, 51 insertions(+) create mode 100644 nix-meson-build-support/common/assert-fail/meson.build create mode 100644 nix-meson-build-support/common/assert-fail/wrap-assert-fail.cc diff --git a/nix-meson-build-support/common/assert-fail/meson.build b/nix-meson-build-support/common/assert-fail/meson.build new file mode 100644 index 000000000..7539b3921 --- /dev/null +++ b/nix-meson-build-support/common/assert-fail/meson.build @@ -0,0 +1,32 @@ +can_wrap_assert_fail_test_code = ''' +#include +#include + +int main() +{ + assert(0); +} + +extern "C" void * __real___assert_fail(const char *, const char *, unsigned int, const char *); + +extern "C" void * +__wrap___assert_fail(const char *, const char *, unsigned int, const char *) +{ + return __real___assert_fail(nullptr, nullptr, 0, nullptr); +} +''' + +wrap_assert_fail_args = [ '-Wl,--wrap=__assert_fail' ] + +can_wrap_assert_fail = cxx.links( + can_wrap_assert_fail_test_code, + args : wrap_assert_fail_args, + name : 'linker can wrap __assert_fail', +) + +if can_wrap_assert_fail + deps_other += declare_dependency( + sources : 'wrap-assert-fail.cc', + link_args : wrap_assert_fail_args, + ) +endif diff --git a/nix-meson-build-support/common/assert-fail/wrap-assert-fail.cc b/nix-meson-build-support/common/assert-fail/wrap-assert-fail.cc new file mode 100644 index 000000000..d9e34168b --- /dev/null +++ b/nix-meson-build-support/common/assert-fail/wrap-assert-fail.cc @@ -0,0 +1,17 @@ +#include "nix/util/error.hh" + +#include +#include +#include +#include + +extern "C" [[noreturn]] void __attribute__((weak)) +__wrap___assert_fail(const char * assertion, const char * file, unsigned int line, const char * function) +{ + char buf[512]; + int n = + snprintf(buf, sizeof(buf), "Assertion '%s' failed in %s at %s:%" PRIuLEAST32, assertion, function, file, line); + if (n < 0) + nix::panic("Assertion failed and could not format error message"); + nix::panic(std::string_view(buf, std::min(static_cast(sizeof(buf)), n))); +} diff --git a/nix-meson-build-support/common/meson.build b/nix-meson-build-support/common/meson.build index 8c4e98862..2944a733b 100644 --- a/nix-meson-build-support/common/meson.build +++ b/nix-meson-build-support/common/meson.build @@ -44,3 +44,5 @@ endif # Darwin ld doesn't like "X.Y.Zpre" nix_soversion = meson.project_version().split('pre')[0] + +subdir('assert-fail') From d26a337c09baf6d5d0c8310efd534f6c806afe20 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 11 Oct 2025 16:08:35 +0300 Subject: [PATCH 156/373] meson: Move asan-options to common This way we don't have to duplicate the subdir everywhere. Less copy-pasta is good. --- nix-meson-build-support/{ => common}/asan-options/meson.build | 0 nix-meson-build-support/common/meson.build | 1 + src/libcmd/meson.build | 1 - src/libexpr-c/meson.build | 1 - src/libexpr-test-support/meson.build | 1 - src/libexpr-tests/meson.build | 1 - src/libexpr/meson.build | 1 - src/libfetchers-c/meson.build | 1 - src/libfetchers-tests/meson.build | 1 - src/libfetchers/meson.build | 1 - src/libflake-c/meson.build | 1 - src/libflake-tests/meson.build | 1 - src/libflake/meson.build | 1 - src/libmain-c/meson.build | 1 - src/libmain/meson.build | 1 - src/libstore-c/meson.build | 1 - src/libstore-test-support/meson.build | 1 - src/libstore-tests/meson.build | 1 - src/libstore/meson.build | 1 - src/libutil-c/meson.build | 1 - src/libutil-test-support/meson.build | 1 - src/libutil-tests/meson.build | 1 - src/libutil/meson.build | 1 - src/nix/meson.build | 1 - tests/functional/test-libstoreconsumer/meson.build | 2 +- 25 files changed, 2 insertions(+), 23 deletions(-) rename nix-meson-build-support/{ => common}/asan-options/meson.build (100%) diff --git a/nix-meson-build-support/asan-options/meson.build b/nix-meson-build-support/common/asan-options/meson.build similarity index 100% rename from nix-meson-build-support/asan-options/meson.build rename to nix-meson-build-support/common/asan-options/meson.build diff --git a/nix-meson-build-support/common/meson.build b/nix-meson-build-support/common/meson.build index 2944a733b..99bfbd486 100644 --- a/nix-meson-build-support/common/meson.build +++ b/nix-meson-build-support/common/meson.build @@ -46,3 +46,4 @@ endif nix_soversion = meson.project_version().split('pre')[0] subdir('assert-fail') +subdir('asan-options') diff --git a/src/libcmd/meson.build b/src/libcmd/meson.build index 3833d7e0a..f553afa0b 100644 --- a/src/libcmd/meson.build +++ b/src/libcmd/meson.build @@ -67,7 +67,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'built-path.cc', diff --git a/src/libexpr-c/meson.build b/src/libexpr-c/meson.build index 03cee41a0..c47704ce4 100644 --- a/src/libexpr-c/meson.build +++ b/src/libexpr-c/meson.build @@ -28,7 +28,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_expr.cc', diff --git a/src/libexpr-test-support/meson.build b/src/libexpr-test-support/meson.build index 01a3f3bcb..df28661b7 100644 --- a/src/libexpr-test-support/meson.build +++ b/src/libexpr-test-support/meson.build @@ -31,7 +31,6 @@ rapidcheck = dependency('rapidcheck') deps_public += rapidcheck subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'tests/value/context.cc', diff --git a/src/libexpr-tests/meson.build b/src/libexpr-tests/meson.build index 7f7c08955..d1700b11d 100644 --- a/src/libexpr-tests/meson.build +++ b/src/libexpr-tests/meson.build @@ -45,7 +45,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'derived-path.cc', diff --git a/src/libexpr/meson.build b/src/libexpr/meson.build index d24e7fae3..1314ab65b 100644 --- a/src/libexpr/meson.build +++ b/src/libexpr/meson.build @@ -97,7 +97,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') parser_tab = custom_target( input : 'parser.y', diff --git a/src/libfetchers-c/meson.build b/src/libfetchers-c/meson.build index 3761b0df2..db415d917 100644 --- a/src/libfetchers-c/meson.build +++ b/src/libfetchers-c/meson.build @@ -32,7 +32,6 @@ add_project_arguments( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_fetchers.cc', diff --git a/src/libfetchers-tests/meson.build b/src/libfetchers-tests/meson.build index 858d7f3af..905e06db0 100644 --- a/src/libfetchers-tests/meson.build +++ b/src/libfetchers-tests/meson.build @@ -37,7 +37,6 @@ libgit2 = dependency('libgit2') deps_private += libgit2 subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'access-tokens.cc', diff --git a/src/libfetchers/meson.build b/src/libfetchers/meson.build index 5b53a147b..d34dd4f43 100644 --- a/src/libfetchers/meson.build +++ b/src/libfetchers/meson.build @@ -32,7 +32,6 @@ libgit2 = dependency('libgit2', version : '>= 1.9') deps_private += libgit2 subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'attrs.cc', diff --git a/src/libflake-c/meson.build b/src/libflake-c/meson.build index d0d45cfa8..fddb39bdf 100644 --- a/src/libflake-c/meson.build +++ b/src/libflake-c/meson.build @@ -32,7 +32,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_flake.cc', diff --git a/src/libflake-tests/meson.build b/src/libflake-tests/meson.build index 41ae6cf3d..a75603970 100644 --- a/src/libflake-tests/meson.build +++ b/src/libflake-tests/meson.build @@ -34,7 +34,6 @@ gtest = dependency('gtest', main : true) deps_private += gtest subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'flakeref.cc', diff --git a/src/libflake/meson.build b/src/libflake/meson.build index 3bd04fcf4..58916ecd9 100644 --- a/src/libflake/meson.build +++ b/src/libflake/meson.build @@ -29,7 +29,6 @@ nlohmann_json = dependency('nlohmann_json', version : '>= 3.9') deps_public += nlohmann_json subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') subdir('nix-meson-build-support/generate-header') diff --git a/src/libmain-c/meson.build b/src/libmain-c/meson.build index 2ac2b799b..36332fdb7 100644 --- a/src/libmain-c/meson.build +++ b/src/libmain-c/meson.build @@ -28,7 +28,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_main.cc', diff --git a/src/libmain/meson.build b/src/libmain/meson.build index 21bfbea3e..2ac59924e 100644 --- a/src/libmain/meson.build +++ b/src/libmain/meson.build @@ -53,7 +53,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'common-args.cc', diff --git a/src/libstore-c/meson.build b/src/libstore-c/meson.build index a92771efc..c6b6174c7 100644 --- a/src/libstore-c/meson.build +++ b/src/libstore-c/meson.build @@ -26,7 +26,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_store.cc', diff --git a/src/libstore-test-support/meson.build b/src/libstore-test-support/meson.build index e929ae2b4..8617225d7 100644 --- a/src/libstore-test-support/meson.build +++ b/src/libstore-test-support/meson.build @@ -29,7 +29,6 @@ rapidcheck = dependency('rapidcheck') deps_public += rapidcheck subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'derived-path.cc', diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index 2784b31dc..399e2abd5 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -52,7 +52,6 @@ gtest = dependency('gmock') deps_private += gtest subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'common-protocol.cc', diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 728de2dfd..8ec39dac1 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -292,7 +292,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'aws-creds.cc', diff --git a/src/libutil-c/meson.build b/src/libutil-c/meson.build index 54fd53c74..1806dbb6f 100644 --- a/src/libutil-c/meson.build +++ b/src/libutil-c/meson.build @@ -32,7 +32,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_util.cc', diff --git a/src/libutil-test-support/meson.build b/src/libutil-test-support/meson.build index 1ca251ce8..64231107e 100644 --- a/src/libutil-test-support/meson.build +++ b/src/libutil-test-support/meson.build @@ -27,7 +27,6 @@ rapidcheck = dependency('rapidcheck') deps_public += rapidcheck subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'hash.cc', diff --git a/src/libutil-tests/meson.build b/src/libutil-tests/meson.build index 83245a73d..87af49933 100644 --- a/src/libutil-tests/meson.build +++ b/src/libutil-tests/meson.build @@ -42,7 +42,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'archive.cc', diff --git a/src/libutil/meson.build b/src/libutil/meson.build index 8c9e1f1eb..f4b8dbb61 100644 --- a/src/libutil/meson.build +++ b/src/libutil/meson.build @@ -118,7 +118,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = [ config_priv_h ] + files( 'archive.cc', diff --git a/src/nix/meson.build b/src/nix/meson.build index f67a2948f..9bee2d147 100644 --- a/src/nix/meson.build +++ b/src/nix/meson.build @@ -56,7 +56,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') subdir('nix-meson-build-support/generate-header') nix_sources = [ config_priv_h ] + files( diff --git a/tests/functional/test-libstoreconsumer/meson.build b/tests/functional/test-libstoreconsumer/meson.build index 7f619d01b..7c95b0c4a 100644 --- a/tests/functional/test-libstoreconsumer/meson.build +++ b/tests/functional/test-libstoreconsumer/meson.build @@ -1,6 +1,6 @@ cxx = meson.get_compiler('cpp') -subdir('nix-meson-build-support/asan-options') +subdir('nix-meson-build-support/common/asan-options') libstoreconsumer_tester = executable( 'test-libstoreconsumer', From 47705139c92bc8bec9dff316d005f2b152258121 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 11 Oct 2025 16:30:55 +0300 Subject: [PATCH 157/373] packaging: Remove no longer necessary libgit2 patches 25.05 already has 1.9.0 and we don't support older nixpkgs versions. --- packaging/dependencies.nix | 34 - .../libgit2-mempack-thin-packfile.patch | 282 ------ ...2-packbuilder-callback-interruptible.patch | 930 ------------------ 3 files changed, 1246 deletions(-) delete mode 100644 packaging/patches/libgit2-mempack-thin-packfile.patch delete mode 100644 packaging/patches/libgit2-packbuilder-callback-interruptible.patch diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 7f815f128..5581719b5 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -89,38 +89,4 @@ scope: { buildPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.buildPhase; installPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.installPhase; }); - - libgit2 = - if lib.versionAtLeast pkgs.libgit2.version "1.9.0" then - pkgs.libgit2 - else - pkgs.libgit2.overrideAttrs (attrs: { - # libgit2: Nixpkgs 24.11 has < 1.9.0, which needs our patches - nativeBuildInputs = - attrs.nativeBuildInputs or [ ] - # gitMinimal does not build on Windows. See packbuilder patch. - ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ - # Needed for `git apply`; see `prePatch` - pkgs.buildPackages.gitMinimal - ]; - # Only `git apply` can handle git binary patches - prePatch = - attrs.prePatch or "" - + lib.optionalString (!stdenv.hostPlatform.isWindows) '' - patch() { - git apply - } - ''; - patches = - attrs.patches or [ ] - ++ [ - ./patches/libgit2-mempack-thin-packfile.patch - ] - # gitMinimal does not build on Windows, but fortunately this patch only - # impacts interruptibility - ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ - # binary patch; see `prePatch` - ./patches/libgit2-packbuilder-callback-interruptible.patch - ]; - }); } diff --git a/packaging/patches/libgit2-mempack-thin-packfile.patch b/packaging/patches/libgit2-mempack-thin-packfile.patch deleted file mode 100644 index fb74b1683..000000000 --- a/packaging/patches/libgit2-mempack-thin-packfile.patch +++ /dev/null @@ -1,282 +0,0 @@ -commit 9bacade4a3ef4b6b26e2c02f549eef0e9eb9eaa2 -Author: Robert Hensing -Date: Sun Aug 18 20:20:36 2024 +0200 - - Add unoptimized git_mempack_write_thin_pack - -diff --git a/include/git2/sys/mempack.h b/include/git2/sys/mempack.h -index 17da590a3..3688bdd50 100644 ---- a/include/git2/sys/mempack.h -+++ b/include/git2/sys/mempack.h -@@ -44,6 +44,29 @@ GIT_BEGIN_DECL - */ - GIT_EXTERN(int) git_mempack_new(git_odb_backend **out); - -+/** -+ * Write a thin packfile with the objects in the memory store. -+ * -+ * A thin packfile is a packfile that does not contain its transitive closure of -+ * references. This is useful for efficiently distributing additions to a -+ * repository over the network, but also finds use in the efficient bulk -+ * addition of objects to a repository, locally. -+ * -+ * This operation performs the (shallow) insert operations into the -+ * `git_packbuilder`, but does not write the packfile to disk; -+ * see `git_packbuilder_write_buf`. -+ * -+ * It also does not reset the memory store; see `git_mempack_reset`. -+ * -+ * @note This function may or may not write trees and blobs that are not -+ * referenced by commits. Currently everything is written, but this -+ * behavior may change in the future as the packer is optimized. -+ * -+ * @param backend The mempack backend -+ * @param pb The packbuilder to use to write the packfile -+ */ -+GIT_EXTERN(int) git_mempack_write_thin_pack(git_odb_backend *backend, git_packbuilder *pb); -+ - /** - * Dump all the queued in-memory writes to a packfile. - * -diff --git a/src/libgit2/odb_mempack.c b/src/libgit2/odb_mempack.c -index 6f27f45f8..0b61e2b66 100644 ---- a/src/libgit2/odb_mempack.c -+++ b/src/libgit2/odb_mempack.c -@@ -132,6 +132,35 @@ cleanup: - return err; - } - -+int git_mempack_write_thin_pack(git_odb_backend *backend, git_packbuilder *pb) -+{ -+ struct memory_packer_db *db = (struct memory_packer_db *)backend; -+ const git_oid *oid; -+ size_t iter = 0; -+ int err = -1; -+ -+ /* TODO: Implement the recency heuristics. -+ For this it probably makes sense to only write what's referenced -+ through commits, an option I've carved out for you in the docs. -+ wrt heuristics: ask your favorite LLM to translate https://git-scm.com/docs/pack-heuristics/en -+ to actual normal reference documentation. */ -+ while (true) { -+ err = git_oidmap_iterate(NULL, db->objects, &iter, &oid); -+ if (err == GIT_ITEROVER) { -+ err = 0; -+ break; -+ } -+ if (err != 0) -+ return err; -+ -+ err = git_packbuilder_insert(pb, oid, NULL); -+ if (err != 0) -+ return err; -+ } -+ -+ return 0; -+} -+ - int git_mempack_dump( - git_buf *pack, - git_repository *repo, -diff --git a/tests/libgit2/mempack/thinpack.c b/tests/libgit2/mempack/thinpack.c -new file mode 100644 -index 000000000..604a4dda2 ---- /dev/null -+++ b/tests/libgit2/mempack/thinpack.c -@@ -0,0 +1,196 @@ -+#include "clar_libgit2.h" -+#include "git2/indexer.h" -+#include "git2/odb_backend.h" -+#include "git2/tree.h" -+#include "git2/types.h" -+#include "git2/sys/mempack.h" -+#include "git2/sys/odb_backend.h" -+#include "util.h" -+ -+static git_repository *_repo; -+static git_odb_backend * _mempack_backend; -+ -+void test_mempack_thinpack__initialize(void) -+{ -+ git_odb *odb; -+ -+ _repo = cl_git_sandbox_init_new("mempack_thinpack_repo"); -+ -+ cl_git_pass(git_mempack_new(&_mempack_backend)); -+ cl_git_pass(git_repository_odb(&odb, _repo)); -+ cl_git_pass(git_odb_add_backend(odb, _mempack_backend, 999)); -+ git_odb_free(odb); -+} -+ -+void _mempack_thinpack__cleanup(void) -+{ -+ cl_git_sandbox_cleanup(); -+} -+ -+/* -+ Generating a packfile for an unchanged repo works and produces an empty packfile. -+ Even if we allow this scenario to be detected, it shouldn't misbehave if the -+ application is unaware of it. -+*/ -+void test_mempack_thinpack__empty(void) -+{ -+ git_packbuilder *pb; -+ int version; -+ int n; -+ git_buf buf = GIT_BUF_INIT; -+ -+ git_packbuilder_new(&pb, _repo); -+ -+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); -+ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); -+ cl_assert_in_range(12, buf.size, 1024 /* empty packfile is >0 bytes, but certainly not that big */); -+ cl_assert(buf.ptr[0] == 'P'); -+ cl_assert(buf.ptr[1] == 'A'); -+ cl_assert(buf.ptr[2] == 'C'); -+ cl_assert(buf.ptr[3] == 'K'); -+ version = (buf.ptr[4] << 24) | (buf.ptr[5] << 16) | (buf.ptr[6] << 8) | buf.ptr[7]; -+ /* Subject to change. https://git-scm.com/docs/pack-format: Git currently accepts version number 2 or 3 but generates version 2 only.*/ -+ cl_assert_equal_i(2, version); -+ n = (buf.ptr[8] << 24) | (buf.ptr[9] << 16) | (buf.ptr[10] << 8) | buf.ptr[11]; -+ cl_assert_equal_i(0, n); -+ git_buf_dispose(&buf); -+ -+ git_packbuilder_free(pb); -+} -+ -+#define LIT_LEN(x) x, sizeof(x) - 1 -+ -+/* -+ Check that git_mempack_write_thin_pack produces a thin packfile. -+*/ -+void test_mempack_thinpack__thin(void) -+{ -+ /* Outline: -+ - Create tree 1 -+ - Flush to packfile A -+ - Create tree 2 -+ - Flush to packfile B -+ -+ Tree 2 has a new blob and a reference to a blob from tree 1. -+ -+ Expectation: -+ - Packfile B is thin and does not contain the objects from packfile A -+ */ -+ -+ -+ git_oid oid_blob_1; -+ git_oid oid_blob_2; -+ git_oid oid_blob_3; -+ git_oid oid_tree_1; -+ git_oid oid_tree_2; -+ git_treebuilder *tb; -+ -+ git_packbuilder *pb; -+ git_buf buf = GIT_BUF_INIT; -+ git_indexer *indexer; -+ git_indexer_progress stats; -+ char pack_dir_path[1024]; -+ -+ char sbuf[1024]; -+ const char * repo_path; -+ const char * pack_name_1; -+ const char * pack_name_2; -+ git_str pack_path_1 = GIT_STR_INIT; -+ git_str pack_path_2 = GIT_STR_INIT; -+ git_odb_backend * pack_odb_backend_1; -+ git_odb_backend * pack_odb_backend_2; -+ -+ -+ cl_assert_in_range(0, snprintf(pack_dir_path, sizeof(pack_dir_path), "%s/objects/pack", git_repository_path(_repo)), sizeof(pack_dir_path)); -+ -+ /* Create tree 1 */ -+ -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_1, _repo, LIT_LEN("thinpack blob 1"))); -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_2, _repo, LIT_LEN("thinpack blob 2"))); -+ -+ -+ cl_git_pass(git_treebuilder_new(&tb, _repo, NULL)); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob1", &oid_blob_1, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob2", &oid_blob_2, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_write(&oid_tree_1, tb)); -+ -+ /* Flush */ -+ -+ cl_git_pass(git_packbuilder_new(&pb, _repo)); -+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); -+ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); -+ cl_git_pass(git_indexer_new(&indexer, pack_dir_path, 0, NULL, NULL)); -+ cl_git_pass(git_indexer_append(indexer, buf.ptr, buf.size, &stats)); -+ cl_git_pass(git_indexer_commit(indexer, &stats)); -+ pack_name_1 = strdup(git_indexer_name(indexer)); -+ cl_assert(pack_name_1); -+ git_buf_dispose(&buf); -+ git_mempack_reset(_mempack_backend); -+ git_indexer_free(indexer); -+ git_packbuilder_free(pb); -+ -+ /* Create tree 2 */ -+ -+ cl_git_pass(git_treebuilder_clear(tb)); -+ /* blob 1 won't be used, but we add it anyway to test that just "declaring" an object doesn't -+ necessarily cause its inclusion in the next thin packfile. It must only be included if new. */ -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_1, _repo, LIT_LEN("thinpack blob 1"))); -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_3, _repo, LIT_LEN("thinpack blob 3"))); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob1", &oid_blob_1, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob3", &oid_blob_3, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_write(&oid_tree_2, tb)); -+ -+ /* Flush */ -+ -+ cl_git_pass(git_packbuilder_new(&pb, _repo)); -+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); -+ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); -+ cl_git_pass(git_indexer_new(&indexer, pack_dir_path, 0, NULL, NULL)); -+ cl_git_pass(git_indexer_append(indexer, buf.ptr, buf.size, &stats)); -+ cl_git_pass(git_indexer_commit(indexer, &stats)); -+ pack_name_2 = strdup(git_indexer_name(indexer)); -+ cl_assert(pack_name_2); -+ git_buf_dispose(&buf); -+ git_mempack_reset(_mempack_backend); -+ git_indexer_free(indexer); -+ git_packbuilder_free(pb); -+ git_treebuilder_free(tb); -+ -+ /* Assertions */ -+ -+ assert(pack_name_1); -+ assert(pack_name_2); -+ -+ repo_path = git_repository_path(_repo); -+ -+ snprintf(sbuf, sizeof(sbuf), "objects/pack/pack-%s.pack", pack_name_1); -+ git_str_joinpath(&pack_path_1, repo_path, sbuf); -+ snprintf(sbuf, sizeof(sbuf), "objects/pack/pack-%s.pack", pack_name_2); -+ git_str_joinpath(&pack_path_2, repo_path, sbuf); -+ -+ /* If they're the same, something definitely went wrong. */ -+ cl_assert(strcmp(pack_name_1, pack_name_2) != 0); -+ -+ cl_git_pass(git_odb_backend_one_pack(&pack_odb_backend_1, pack_path_1.ptr)); -+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_1)); -+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_2)); -+ cl_assert(!pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_3)); -+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_tree_1)); -+ cl_assert(!pack_odb_backend_1->exists(pack_odb_backend_1, &oid_tree_2)); -+ -+ cl_git_pass(git_odb_backend_one_pack(&pack_odb_backend_2, pack_path_2.ptr)); -+ /* blob 1 is already in the packfile 1, so packfile 2 must not include it, in order to be _thin_. */ -+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_1)); -+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_2)); -+ cl_assert(pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_3)); -+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_tree_1)); -+ cl_assert(pack_odb_backend_2->exists(pack_odb_backend_2, &oid_tree_2)); -+ -+ pack_odb_backend_1->free(pack_odb_backend_1); -+ pack_odb_backend_2->free(pack_odb_backend_2); -+ free((void *)pack_name_1); -+ free((void *)pack_name_2); -+ git_str_dispose(&pack_path_1); -+ git_str_dispose(&pack_path_2); -+ -+} diff --git a/packaging/patches/libgit2-packbuilder-callback-interruptible.patch b/packaging/patches/libgit2-packbuilder-callback-interruptible.patch deleted file mode 100644 index c67822ff7..000000000 --- a/packaging/patches/libgit2-packbuilder-callback-interruptible.patch +++ /dev/null @@ -1,930 +0,0 @@ -commit e9823c5da4fa977c46bcb97167fbdd0d70adb5ff -Author: Robert Hensing -Date: Mon Aug 26 20:07:04 2024 +0200 - - Make packbuilder interruptible using progress callback - - Forward errors from packbuilder->progress_cb - - This allows the callback to terminate long-running operations when - the application is interrupted. - -diff --git a/include/git2/pack.h b/include/git2/pack.h -index 0f6bd2ab9..bee72a6c0 100644 ---- a/include/git2/pack.h -+++ b/include/git2/pack.h -@@ -247,6 +247,9 @@ typedef int GIT_CALLBACK(git_packbuilder_progress)( - * @param progress_cb Function to call with progress information during - * pack building. Be aware that this is called inline with pack building - * operations, so performance may be affected. -+ * When progress_cb returns an error, the pack building process will be -+ * aborted and the error will be returned from the invoked function. -+ * `pb` must then be freed. - * @param progress_cb_payload Payload for progress callback. - * @return 0 or an error code - */ -diff --git a/src/libgit2/pack-objects.c b/src/libgit2/pack-objects.c -index b2d80cba9..7c331c2d5 100644 ---- a/src/libgit2/pack-objects.c -+++ b/src/libgit2/pack-objects.c -@@ -932,6 +932,9 @@ static int report_delta_progress( - { - int ret; - -+ if (pb->failure) -+ return pb->failure; -+ - if (pb->progress_cb) { - uint64_t current_time = git_time_monotonic(); - uint64_t elapsed = current_time - pb->last_progress_report_time; -@@ -943,8 +946,10 @@ static int report_delta_progress( - GIT_PACKBUILDER_DELTAFICATION, - count, pb->nr_objects, pb->progress_cb_payload); - -- if (ret) -+ if (ret) { -+ pb->failure = ret; - return git_error_set_after_callback(ret); -+ } - } - } - -@@ -976,7 +981,10 @@ static int find_deltas(git_packbuilder *pb, git_pobject **list, - } - - pb->nr_deltified += 1; -- report_delta_progress(pb, pb->nr_deltified, false); -+ if ((error = report_delta_progress(pb, pb->nr_deltified, false)) < 0) { -+ GIT_ASSERT(git_packbuilder__progress_unlock(pb) == 0); -+ goto on_error; -+ } - - po = *list++; - (*list_size)--; -@@ -1124,6 +1132,10 @@ struct thread_params { - size_t depth; - size_t working; - size_t data_ready; -+ -+ /* A pb->progress_cb can stop the packing process by returning an error. -+ When that happens, all threads observe the error and stop voluntarily. */ -+ bool stopped; - }; - - static void *threaded_find_deltas(void *arg) -@@ -1133,7 +1145,12 @@ static void *threaded_find_deltas(void *arg) - while (me->remaining) { - if (find_deltas(me->pb, me->list, &me->remaining, - me->window, me->depth) < 0) { -- ; /* TODO */ -+ me->stopped = true; -+ GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_lock(me->pb) == 0, NULL); -+ me->working = false; -+ git_cond_signal(&me->pb->progress_cond); -+ GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_unlock(me->pb) == 0, NULL); -+ return NULL; - } - - GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_lock(me->pb) == 0, NULL); -@@ -1175,8 +1192,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - pb->nr_threads = git__online_cpus(); - - if (pb->nr_threads <= 1) { -- find_deltas(pb, list, &list_size, window, depth); -- return 0; -+ return find_deltas(pb, list, &list_size, window, depth); - } - - p = git__mallocarray(pb->nr_threads, sizeof(*p)); -@@ -1195,6 +1211,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - p[i].depth = depth; - p[i].working = 1; - p[i].data_ready = 0; -+ p[i].stopped = 0; - - /* try to split chunks on "path" boundaries */ - while (sub_size && sub_size < list_size && -@@ -1262,7 +1279,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - (!victim || victim->remaining < p[i].remaining)) - victim = &p[i]; - -- if (victim) { -+ if (victim && !target->stopped) { - sub_size = victim->remaining / 2; - list = victim->list + victim->list_size - sub_size; - while (sub_size && list[0]->hash && -@@ -1286,7 +1303,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - } - target->list_size = sub_size; - target->remaining = sub_size; -- target->working = 1; -+ target->working = 1; /* even when target->stopped, so that we don't process this thread again */ - GIT_ASSERT(git_packbuilder__progress_unlock(pb) == 0); - - if (git_mutex_lock(&target->mutex)) { -@@ -1299,7 +1316,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - git_cond_signal(&target->cond); - git_mutex_unlock(&target->mutex); - -- if (!sub_size) { -+ if (target->stopped || !sub_size) { - git_thread_join(&target->thread, NULL); - git_cond_free(&target->cond); - git_mutex_free(&target->mutex); -@@ -1308,7 +1325,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - } - - git__free(p); -- return 0; -+ return pb->failure; - } - - #else -@@ -1319,6 +1336,7 @@ int git_packbuilder__prepare(git_packbuilder *pb) - { - git_pobject **delta_list; - size_t i, n = 0; -+ int error; - - if (pb->nr_objects == 0 || pb->done) - return 0; /* nothing to do */ -@@ -1327,8 +1345,10 @@ int git_packbuilder__prepare(git_packbuilder *pb) - * Although we do not report progress during deltafication, we - * at least report that we are in the deltafication stage - */ -- if (pb->progress_cb) -- pb->progress_cb(GIT_PACKBUILDER_DELTAFICATION, 0, pb->nr_objects, pb->progress_cb_payload); -+ if (pb->progress_cb) { -+ if ((error = pb->progress_cb(GIT_PACKBUILDER_DELTAFICATION, 0, pb->nr_objects, pb->progress_cb_payload)) < 0) -+ return git_error_set_after_callback(error); -+ } - - delta_list = git__mallocarray(pb->nr_objects, sizeof(*delta_list)); - GIT_ERROR_CHECK_ALLOC(delta_list); -@@ -1345,31 +1365,33 @@ int git_packbuilder__prepare(git_packbuilder *pb) - - if (n > 1) { - git__tsort((void **)delta_list, n, type_size_sort); -- if (ll_find_deltas(pb, delta_list, n, -+ if ((error = ll_find_deltas(pb, delta_list, n, - GIT_PACK_WINDOW + 1, -- GIT_PACK_DEPTH) < 0) { -+ GIT_PACK_DEPTH)) < 0) { - git__free(delta_list); -- return -1; -+ return error; - } - } - -- report_delta_progress(pb, pb->nr_objects, true); -+ error = report_delta_progress(pb, pb->nr_objects, true); - - pb->done = true; - git__free(delta_list); -- return 0; -+ return error; - } - --#define PREPARE_PACK if (git_packbuilder__prepare(pb) < 0) { return -1; } -+#define PREPARE_PACK error = git_packbuilder__prepare(pb); if (error < 0) { return error; } - - int git_packbuilder_foreach(git_packbuilder *pb, int (*cb)(void *buf, size_t size, void *payload), void *payload) - { -+ int error; - PREPARE_PACK; - return write_pack(pb, cb, payload); - } - - int git_packbuilder__write_buf(git_str *buf, git_packbuilder *pb) - { -+ int error; - PREPARE_PACK; - - return write_pack(pb, &write_pack_buf, buf); -diff --git a/src/libgit2/pack-objects.h b/src/libgit2/pack-objects.h -index bbc8b9430..380a28ebe 100644 ---- a/src/libgit2/pack-objects.h -+++ b/src/libgit2/pack-objects.h -@@ -100,6 +100,10 @@ struct git_packbuilder { - uint64_t last_progress_report_time; - - bool done; -+ -+ /* A non-zero error code in failure causes all threads to shut themselves -+ down. Some functions will return this error code. */ -+ volatile int failure; - }; - - int git_packbuilder__write_buf(git_str *buf, git_packbuilder *pb); -diff --git a/tests/libgit2/pack/cancel.c b/tests/libgit2/pack/cancel.c -new file mode 100644 -index 000000000..a0aa9716a ---- /dev/null -+++ b/tests/libgit2/pack/cancel.c -@@ -0,0 +1,240 @@ -+#include "clar_libgit2.h" -+#include "futils.h" -+#include "pack.h" -+#include "hash.h" -+#include "iterator.h" -+#include "vector.h" -+#include "posix.h" -+#include "hash.h" -+#include "pack-objects.h" -+ -+static git_repository *_repo; -+static git_revwalk *_revwalker; -+static git_packbuilder *_packbuilder; -+static git_indexer *_indexer; -+static git_vector _commits; -+static int _commits_is_initialized; -+static git_indexer_progress _stats; -+ -+extern bool git_disable_pack_keep_file_checks; -+ -+static void pack_packbuilder_init(const char *sandbox) { -+ _repo = cl_git_sandbox_init(sandbox); -+ /* cl_git_pass(p_chdir(sandbox)); */ -+ cl_git_pass(git_revwalk_new(&_revwalker, _repo)); -+ cl_git_pass(git_packbuilder_new(&_packbuilder, _repo)); -+ cl_git_pass(git_vector_init(&_commits, 0, NULL)); -+ _commits_is_initialized = 1; -+ memset(&_stats, 0, sizeof(_stats)); -+ p_fsync__cnt = 0; -+} -+ -+void test_pack_cancel__initialize(void) -+{ -+ pack_packbuilder_init("small.git"); -+} -+ -+void test_pack_cancel__cleanup(void) -+{ -+ git_oid *o; -+ unsigned int i; -+ -+ cl_git_pass(git_libgit2_opts(GIT_OPT_ENABLE_FSYNC_GITDIR, 0)); -+ cl_git_pass(git_libgit2_opts(GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, false)); -+ -+ if (_commits_is_initialized) { -+ _commits_is_initialized = 0; -+ git_vector_foreach(&_commits, i, o) { -+ git__free(o); -+ } -+ git_vector_free(&_commits); -+ } -+ -+ git_packbuilder_free(_packbuilder); -+ _packbuilder = NULL; -+ -+ git_revwalk_free(_revwalker); -+ _revwalker = NULL; -+ -+ git_indexer_free(_indexer); -+ _indexer = NULL; -+ -+ /* cl_git_pass(p_chdir("..")); */ -+ cl_git_sandbox_cleanup(); -+ _repo = NULL; -+} -+ -+static int seed_packbuilder(void) -+{ -+ int error; -+ git_oid oid, *o; -+ unsigned int i; -+ -+ git_revwalk_sorting(_revwalker, GIT_SORT_TIME); -+ cl_git_pass(git_revwalk_push_ref(_revwalker, "HEAD")); -+ -+ while (git_revwalk_next(&oid, _revwalker) == 0) { -+ o = git__malloc(sizeof(git_oid)); -+ cl_assert(o != NULL); -+ git_oid_cpy(o, &oid); -+ cl_git_pass(git_vector_insert(&_commits, o)); -+ } -+ -+ git_vector_foreach(&_commits, i, o) { -+ if((error = git_packbuilder_insert(_packbuilder, o, NULL)) < 0) -+ return error; -+ } -+ -+ git_vector_foreach(&_commits, i, o) { -+ git_object *obj; -+ cl_git_pass(git_object_lookup(&obj, _repo, o, GIT_OBJECT_COMMIT)); -+ error = git_packbuilder_insert_tree(_packbuilder, -+ git_commit_tree_id((git_commit *)obj)); -+ git_object_free(obj); -+ if (error < 0) -+ return error; -+ } -+ -+ return 0; -+} -+ -+static int fail_stage; -+ -+static int packbuilder_cancel_after_n_calls_cb(int stage, uint32_t current, uint32_t total, void *payload) -+{ -+ -+ /* Force the callback to run again on the next opportunity regardless -+ of how fast we're running. */ -+ _packbuilder->last_progress_report_time = 0; -+ -+ if (stage == fail_stage) { -+ int *calls = (int *)payload; -+ int n = *calls; -+ /* Always decrement, including past zero. This way the error is only -+ triggered once, making sure it is picked up immediately. */ -+ --*calls; -+ if (n == 0) -+ return GIT_EUSER; -+ } -+ -+ return 0; -+} -+ -+static void test_cancel(int n) -+{ -+ -+ int calls_remaining = n; -+ int err; -+ git_buf buf = GIT_BUF_INIT; -+ -+ /* Switch to a small repository, so that `packbuilder_cancel_after_n_calls_cb` -+ can hack the time to call the callback on every opportunity. */ -+ -+ cl_git_pass(git_packbuilder_set_callbacks(_packbuilder, &packbuilder_cancel_after_n_calls_cb, &calls_remaining)); -+ err = seed_packbuilder(); -+ if (!err) -+ err = git_packbuilder_write_buf(&buf, _packbuilder); -+ -+ cl_assert_equal_i(GIT_EUSER, err); -+} -+void test_pack_cancel__cancel_after_add_0(void) -+{ -+ fail_stage = GIT_PACKBUILDER_ADDING_OBJECTS; -+ test_cancel(0); -+} -+ -+void test_pack_cancel__cancel_after_add_1(void) -+{ -+ cl_skip(); -+ fail_stage = GIT_PACKBUILDER_ADDING_OBJECTS; -+ test_cancel(1); -+} -+ -+void test_pack_cancel__cancel_after_delta_0(void) -+{ -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(0); -+} -+ -+void test_pack_cancel__cancel_after_delta_1(void) -+{ -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(1); -+} -+ -+void test_pack_cancel__cancel_after_delta_0_threaded(void) -+{ -+#ifdef GIT_THREADS -+ git_packbuilder_set_threads(_packbuilder, 8); -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(0); -+#else -+ cl_skip(); -+#endif -+} -+ -+void test_pack_cancel__cancel_after_delta_1_threaded(void) -+{ -+#ifdef GIT_THREADS -+ git_packbuilder_set_threads(_packbuilder, 8); -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(1); -+#else -+ cl_skip(); -+#endif -+} -+ -+static int foreach_cb(void *buf, size_t len, void *payload) -+{ -+ git_indexer *idx = (git_indexer *) payload; -+ cl_git_pass(git_indexer_append(idx, buf, len, &_stats)); -+ return 0; -+} -+ -+void test_pack_cancel__foreach(void) -+{ -+ git_indexer *idx; -+ -+ seed_packbuilder(); -+ -+#ifdef GIT_EXPERIMENTAL_SHA256 -+ cl_git_pass(git_indexer_new(&idx, ".", GIT_OID_SHA1, NULL)); -+#else -+ cl_git_pass(git_indexer_new(&idx, ".", 0, NULL, NULL)); -+#endif -+ -+ cl_git_pass(git_packbuilder_foreach(_packbuilder, foreach_cb, idx)); -+ cl_git_pass(git_indexer_commit(idx, &_stats)); -+ git_indexer_free(idx); -+} -+ -+static int foreach_cancel_cb(void *buf, size_t len, void *payload) -+{ -+ git_indexer *idx = (git_indexer *)payload; -+ cl_git_pass(git_indexer_append(idx, buf, len, &_stats)); -+ return (_stats.total_objects > 2) ? -1111 : 0; -+} -+ -+void test_pack_cancel__foreach_with_cancel(void) -+{ -+ git_indexer *idx; -+ -+ seed_packbuilder(); -+ -+#ifdef GIT_EXPERIMENTAL_SHA256 -+ cl_git_pass(git_indexer_new(&idx, ".", GIT_OID_SHA1, NULL)); -+#else -+ cl_git_pass(git_indexer_new(&idx, ".", 0, NULL, NULL)); -+#endif -+ -+ cl_git_fail_with( -+ git_packbuilder_foreach(_packbuilder, foreach_cancel_cb, idx), -1111); -+ git_indexer_free(idx); -+} -+ -+void test_pack_cancel__keep_file_check(void) -+{ -+ assert(!git_disable_pack_keep_file_checks); -+ cl_git_pass(git_libgit2_opts(GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, true)); -+ assert(git_disable_pack_keep_file_checks); -+} -diff --git a/tests/resources/small.git/HEAD b/tests/resources/small.git/HEAD -new file mode 100644 -index 0000000000000000000000000000000000000000..cb089cd89a7d7686d284d8761201649346b5aa1c -GIT binary patch -literal 23 -ecmXR)O|w!cN=+-)&qz&7Db~+TEG|hc;sO9;xClW2 - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/config b/tests/resources/small.git/config -new file mode 100644 -index 0000000000000000000000000000000000000000..07d359d07cf1ed0c0074fdad71ffff5942f0adfa -GIT binary patch -literal 66 -zcmaz}&M!)h<>D+#Eyypk5{uv*03B5png9R* - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/description b/tests/resources/small.git/description -new file mode 100644 -index 0000000000000000000000000000000000000000..498b267a8c7812490d6479839c5577eaaec79d62 -GIT binary patch -literal 73 -zcmWH|%S+5nO;IRHEyyp$t+PQ$;d2LNXyJgRZve!Elw`VEGWs$&r??@ -Q$yWgB0LrH#Y0~2Y0PnOK(EtDd - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/applypatch-msg.sample b/tests/resources/small.git/hooks/applypatch-msg.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..dcbf8167fa503f96ff6a39c68409007eadc9b1f3 -GIT binary patch -literal 535 -zcmY+AX;Q;542A#a6e8^~FyI8r&I~hf2QJ{GO6(?HuvEG*+#R{4EI%zhfA8r{j%sh$ -zHE~E-UtQd8{bq4@*S%jq3@bmxwQDXGv#o!N`o3AHMw3xD)hy0#>&E&zzl%vRffomqo=v6>_2NRa#TwDdYvTVQyueO*15Nlo%=#DXgC0bhF3vTa`LQGaO9;jeD$OP?~ -za$G4Q{z+Q_{5V?5h;a-noM$P{<>Q~j4o7u%#P6^o^16{y*jU=-K8GYD_dUtdj4FSx -zSC0C!DvAnv%S!4dgk -XB^)11aoGMJPCqWs%IS0YSv(eBT&%T6 - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/commit-msg.sample b/tests/resources/small.git/hooks/commit-msg.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..f3780f92349638ebe32f6baf24c7c3027675d7c9 -GIT binary patch -literal 953 -zcmaJy@-{3h^^Cx;#d0zEA@DDc$nY4ez&|=%jTg@_HU*ub=!!y$xW09TSjlj -z(`I@QCsM`!9&80$I98wsQ8yK#)Orb<8re8FjkKh630D$QUDwi~(gkX=RunYm$rDjk -zlp%RUSnzA#6yjdG5?T?2DcYKp+v_lts0ljn&bh3J0bD5@N@1UKZ190O6ZeWr-BuZ^ -zWRebCX%(%=Xoj#(xYk1Cjtr!=tyBesf@m6}8zY6Ijbz9i9ziI_jG9MvR -zDH*e>^ga9IR?2wrSrAVm;eButj4Y>7(E2?b~jsu>& -zRKCJ7bp#19sqYh627wD%D9R$8=Ml$TNlumDypl~$jBu*G>5fIR^FB0h0Ex&TGZNr> -zL5hs1_K>taRb!|ThN9ns7^@4MXKP+6aGI_UK)T-M#rcP$;kN(Vcf#P)+5GzWa{l@J -z>-E{`$1iiNVYxq27}j;uo%;)r3kJI2xCFF~Ux;$Q%) -wjbk6JlDCM`jU&P+UVOvg`|iYl<7~9k>HHB4I;pdlQ=I-^$DrHaN$@lH1?P!0U;qFB - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/fsmonitor-watchman.sample b/tests/resources/small.git/hooks/fsmonitor-watchman.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..41184ebc318c159f51cd1ebe2290559805df89d8 -GIT binary patch -literal 4777 -zcmbtYYi}F368$Xwipg4lq(BeHMvzvH-4;n7DGJBPqq#tw3aed8+IU5-m)yvL>;Cqh -z8FFRGj$`9CA8aoJ?j^$%==FV``-=rhLcPW`McSytRm~mEO7_&_cAVZrf1fFy*ha@8oe%*-aBYE -zcjzZg>LOkgxuUr-XJnHyD;zmPnRaSc#!k_P*d_BttRdc+J6G7za5#+^Y1nkc2Oowk`ya47uUR3Feu?B(w;S{(VYzxh}q-=#zP@uxSx{wbyPUMFU;K(06)$o{07&3yI?q{GqMcQ1c_^M<0< -zF4acAV)Il-V(rCTC1(;bsZ*}bl8dmejAk~yb`B}!^0;g^(o9kGUfZfDOvyp@x4OQt -zSgWh6T|3eq;9MFs8-#z+FDM1h(IjRUP|``PxupgJ7CUHOH90gbgl^2~97`?_X{P)) -zB*$r1cDlF-%azKND}?Gv`2K8-9v5e`gQoft=j?T<&a13c^!wY_$D`5z-X1g?ty&6- -zQN50{8?bUk9AI->^W@~~nkOghHIC2YN+AXkLQG_2-{Pq3%{`3KUMeG$iIn%%^6*NYb -zn|_BdV#C)n4565VccX;uT8&z3vSi!HXGbUj2B!R -zdz~&#fk#L-&k$fLwo$4?>12g@AXOKFekuo#6EHB%gmpD?1eyh%N8s{2wGoTu -z*@6cEZ^ZW!FAF_|JL`NkV7k}0ow|-2jHwbgH0;c@Dq*o?@&c*HnGdyx6^su8Qk%2{ -z*ye(dxO*6-&>qn1+zw}tc6;=sOX{4WB=VqjTS^))y1jlX2Q;=e!qMmFA5lC$#;BxC -z=Y%tRpWxb+_uQAvAw7Q{HGV#R$xb&udLCzZ+HN?kTyB};1EJ8UlQ5!>5eGW@)RX0n -zkjj>EF!3=0Gl^8dzv$B^NMGRxJoqN4A`xq-@wCbrx*u2NmIJ1xZ%H -zh;{|4T3(!E9sY#Ni(wUJYs1MmIc9bl)(4Nl3_wD_BWB>i<1S(LX7m*{Q7PU$muMS* -zM!%0EZx-Vw=Zey;erC?SNxF;pY@^A%-krqzfLV2meBp1vWdyArFYn`DD19T)Hw(?n -z)}{NP(Lk(o*?gl#B@pP7^*r|=;PIDT4|F#{2Hzh-AL0Rv$6uT;n|WzE4=slK?on@(fZeGhRgQCu56qB -z{+n81Az96qnQjMY*-*r-KV*7;Z#4QuJRJJV$M^KdldiMhj?ImK6~FvwJ*L5a){QoM=L5TYHkGO1$UrO3`a>{?Opw|b -zG(#59NQ#jFL9v~vgOVkM@^^(^A}onOE))yWEwhIlk&{ZyseZ^O0b=w8&O=BK{k<5B -k^Q-B@eG}LeHrquz%(SVEp_N)VhYZikCW__82JXfD17`J9Qvd(} - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/pre-applypatch.sample b/tests/resources/small.git/hooks/pre-applypatch.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..625837e25f91421b8809a097f4a3103dd387ef31 -GIT binary patch -literal 481 -zcmY+ATTa6;5Jms9iouO45IBJXEg&Jm9@v1LPHMM_ZR|;#6tQh$71hSXq*MxP;V& -zj0cY7SCL=x4`a46sF)C>94Gk%=3q$W2s;j6iHtB2$R0%gix4oK@&T~=ALd_o*CKxt -I-`Pv{1Bpzc>;M1& - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/pre-commit.sample b/tests/resources/small.git/hooks/pre-commit.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..10b39b2e26981b8f87ea424e735ef87359066dbb -GIT binary patch -literal 1706 -zcmZuxU2ohr5PY_N#pZ0-F<{-v&v-X^RA+u>k}E$4d&uD7=g_fA8+pNNV=4s0|iD3p<=DTXClTS -zXV23tJ;ECmN@M0j@zUAKEYW@3bv!SeYZ8ZH`YQNTApFVNc;F|9r5p4TqGs=>8E?6y -zi|gY{iM#PG1nL?UE9YCnWTk72kgZPG*Usqw!~Qd3c?~@w2?%eg@~)+VlSs6N5Yf2^ -zz;owF#K#r^&KMq1A`oqVGFpD&-!Pv|Rc -zO3KSqA@h9nSc%bm`0)Amk6*J}@14J*1-219l%%7D!Pl}UK>|lVi0Dfgu2jN3WC!uL -z0ej??b2iSehVgdnWHmZV4kUo*QL#aiIp}U=9x)IXk}JJ7VQ;CI9Rtn5e0VcjbYcVt+`x5D+svCGD;Z5hm*E$jSEQZ%SQ(}oLgslTvrKK@9Qf#b!hajVFnp9@oIix;NcI9Wk -xjnh0ya!AWet{I7YpD;y6HXyzI*lfSvH=o6*7mJZPkuaYpm>vzZ`wyGEBtOQPo|pgt - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/pre-push.sample b/tests/resources/small.git/hooks/pre-push.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..02cbd80c287f959fe33975bb66c56293e3f5b396 -GIT binary patch -literal 1431 -zcmaJ>U60!~5PUX&#a1@z9B{IIZkjLT0t5kq9#8~D(I5{+8&J~9;#ndUk~-ZT`r|uG -z$#K$$J{TsKs*LP1}9!GoZ@4I4myMMG_di|of -z%?llx{O8TS-#^;(OioEmPy%kwWQBA1OMzV{hsQ8XFzS1k!~YQoLa5 -zhtP1fA$q6VmMbbAC_9)4I628k*O5J$NR19uHe4QYDK<==I~SQk)Nu%xQ~KH -z53w=!ke(FGb_PpnZfd*+hnXDTn;2*`u^~;?+5C~cn?bRka7NR%06%e6O91{MAgN6J -zmlO8{Biw4&wr&&(z4p3eln`E}XR9m9bNYZ7Ibrg(4yZIXrfgD7N*AFD7L3YSM#j}% -zo__rOS5fr;@8UM<6cl+cv_$YB$PQ&9dv($eM*))g!_cu!QcSh-mqE9i#QDZT)=o#` -z?8!RtE?w6p?GkGZ-6yt_p~5~4ecu|Sf^)6096%h*q-eNiEA1;Xwg)p~Q&iGSG7-IQ -z9aII&`ps$WOojFA`*bjGkFk|E@sHHuD}W^d`7YJ3YE^zrQnqR -zGoq?;YGKe)93o|_=^f%3U1KYZGPOXRRxK7w`UUbMMa3<86OmVH!EKP$8RCrn9mWX+ -zC?9yF!fRVLmud3hF<}x;;sR}f(*r}6Gap3fR6zLHR~kbMgD{98N`L+r&?3p~*0+FX -zcAL%j=(SO}xTJUTvA`&Lf`2mv4koPG9&|;2+68$XxiXKL@ma;l5d2^5Ba_rPh_DHI-u1#&_upttZXp;no03$20|NFiM -zK#D#xQ>!Z3JkX8T-LDVm!B5j7y_{;JDmmTTef+K1oIiPzeEr+Ai*<2PUgnG4^ZB>p -z_fkAvoR1emuf~ri^K$-px=4#D-vY9w& -z`bCv#2zVn=YnJyeNey(Y -zRh`9vtLw~A+5zsjp|W0Nsa|29Rm!B>OoG5a+vi;ari8O>KkU!KAWg_fa3btK2x*_@ -z0bEc7J;Ubghm}n9bOi(Sv_B66nQ7U)J7f0fO}8Wuf*uorcIgEG -zOHc|-V6+HlRhOP}?Cn?@5iwSl43abmBA^2lyL$+cpabCGVES+v^j^FO_}?FIp%En%Ll?Z*7*}TwrZyg5OSZ9rY-`aU~Mc-jjv{Ll)FLMgtB4ujktfQ`Xhqrka -zT=P!A;9w^;Z?PqpLwOLu=cj3L>TdUKw2;DMu)`oVkj}#bcDx4tYg=j%D`+i{W~fVM -zVmZ>W9VMyin9c-0KzI_;iZ-g|OyzuG`Yq%(%dvl;ifnVr0;jWE&S`z|rQu=!yHBBO -zx`OJ;oOQ(KKM<$(bC38o>pD0%|HA(E0TRw7qj$fJ_pRN+7Nm>dSC(gLg{(`t+5Z=?o+}wXU4tHy+&%F&aRhFebeEhR2R5|$#Ycbp^w@t -zTl%=f1t=w+WpJzF<|CE@?SCNAz)%9?w33lQ8vrHJqPfH9@}qs*QXOG71W=ylx;wOB -zcx!Bj^)Yy6WX$a^vBkBJ5CobqlaDx_B0c<3b+8)f84LCrt;e;qxc+7>VbwVK{skNv!wvBiTa^9Iu -zkwP;VK)jH$WJ{`MRwAA9fal!y0dtV;FWg8PTkWU>CwnqD>1ZX2B@;$DlX%C5MI+}{ -z9xQVnffR*~v2KAUj*hCdgul~`bk#mk`o>zk9)<2Uc8?hUZAEvd!`9em)~$Z)zev>w^8 -zyAgCP_$&Y)7HSQ84`xG}OeTavaEswwF|8Xpi5iZzZa@hCiv(J-%bfFC&)HLlO+Rhw -zG6g?9eL5&A!SuJnQ6}LxG%tU+@vZ`i+!+Rz6iYvsTdhnPo7lW{m-}{hya@viX4)XZ -zngaw+j;gloB#|UwI@8sOmQpc`h+bicQJnQIB5eifIMQNgD2+oai33m!34~xU|0Azj -zhu$8z+T5^;Pxx@d{N)pzOJLSa^e;aDf$W%N5XcOf!mGC9l9j$Ev2h6N+6ZQC+CJzl -zaM7?S!SrFLS2DASjj(h6y1WN3N?|bmqmyzm!&nLoE|`rKBOc_yDF$a#FsUn!IQf(t -zdC&Us(kQz*7mvH^j*^MC@>wTDb}g%~sx*ng#>{@lR=XG-Z5_ -z#<9*Oh0joMzt;nS)ObAp)347`D=}r-;nV!TbIq&xrGRGsF6fZg+!VkfUei@_&l-M& -zPqQ+Dw)RV}+)I8RuqAxa`Pv8e&!_gXS=e2-un>=Ktn}-;%lLZxaVn?Q>yZCb2R3Wk -z77zr%;Rq&h|2ncqyKYmFI0148JVY7Q$V5p=dWj+Qqpu%i|xp2C=WaOb2Wudn^h0EcD%$p9YVU1fnoRV9`(cy(vv6K>FXS!2jY>1GnU--7)4usH&K -zao*&P^@9~YmUe|ZdLW@C>H;!*Vt3>Nw4M*;=?j(TBD#O@XCv0|MEhA;z}kTFRv@`tPHhp=&Yh -zg%Zhg4i7o_k{a5i&f5;tZ==%}^Sn4aD_6%qs_XAuJt&EumdH4Yu`UjT<-+XHTuHss+b -YOmM2;hq8Egm*4=7_P9T{21QBYH*F=mfB*mh - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/prepare-commit-msg.sample b/tests/resources/small.git/hooks/prepare-commit-msg.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..b1970da1b6d3f42f00069fd17c325de72cda812e -GIT binary patch -literal 1702 -zcmb_cTW{Mo6n>t6#i?x6xmZ$SFLf{QfG*3r0L?Pg?px55l8$UTGO3bO;spKi{V3XX -z))weX0X>M9bNMcZ-6yG%>(n}JI2|25dr}WZBP@ih?JX^+@ -zu#5O48P>yRX(mfDIhYP)doc1&TADZa@ZGpusJ$6G+e$ZMcmC -zoOosDQPS}l{H?YPsq(4;0SGkATa9eeqAaDcjq8n2wALbFwU@2i@FAaRV!=uw-nwx1gKn2SvY -z>Ff>;2sg!+Hxfkwv1lsiii=p6WenF=5)6LZcQaZ=aS_}+-4Y&?!@HWh|<^gJ21!|T@+%On#w6azxPHV}XsRbe*w -zR_TZ2XEsQa1lPK~biYqg@0-RW@5J1@=<87cFzEUABdCoFH2CZo?}l(Z*!OFqUxo>K -z_d`l#4d9|H6;VPT{X?^{VJ>oL|D7K{BJwwqB>`YcPoGk+9hbvHnoQ{EM|kPgD_`wk -zKm4#2xu;-y`RAm!=L_BnLvJ8$AZm8@?)v<%vwvsw8AF2x6!mTT;c72A_~U9nIq0ST -zv)N0!I!^1p=g8-RQfx5)E_Mb_4I2vtQpI30XZ&t-9h5!Hn - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/push-to-checkout.sample b/tests/resources/small.git/hooks/push-to-checkout.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..a80611e18896f212c390d845e49a3f6d5693b41d -GIT binary patch -literal 2840 -zcmai0U31$u5PXh)#YOS7cE^-rw@uolNhe9&aUS|HtvhX>G$45tVUYj>fRdF?|9kfU -zNR~aG=E)WbEbeyq7JTw}ZuHIE2kUtL<AoeCNptd-NM1aZLhESzC;I`+Ns -zfmNNjdAp^W8#Q*}l>CT7RB9F5(BbI8ly2l~+E};JW|>&d1)=epZ-8vm8ppkbEVn#R -zt30a5A-c(YQR8eM5%;|UAnO>rt!&@x@G@yp+92%w-}%(5P_+P&Wf_zb$f-Qrl5(7z -z2ah(bkE;!DK(&aAMuQ%1TS>ai?wSXCOCSj=_}8x4IbCx^$}9q)whwv)SBt| -zg#MX4;;Oau`m=MI9(^&zPbueY@~>3*ixX%mvR5m_1&nAg@ZKvY1E$O}&EtLiG;mhV -z1xhMIm~fGjmf_#{62f`y;09?I7M1W2tWQvz<}i9lR>OpQyUJi45_&*pQus&EkwY<> -zI|ZAx=*3i9a-)g)hXkvO7>UJ5MNgL(Z+-wpXVcgbSgpmFmbf1~DPA(OVGI&FNLeIE -zNH!_aiH$vsif$_j7=T2{cS(!DOI`~bn@)vSd-0d7xL=DF;UNP|tW}4ih>DvHtu9tY_pbJ6x(6E*hxgC -zzNDao%qlr-IE%YGbS4hF!n!on7#W3$bX-_hbZAaws^nHu#)Dx=WzdbJ>AKzAy@T$x -zSWE^x9+|TEHVEPyaPYa0DOChp?AeHSBBDbZNokQpAY{lE!7geZI=jV)G^2@l)&91Zb1+`T+oq9wWF -zRV~kGTGce0O~p^6mj{kT5kL(pv>r;Lvd7VDX*P>A^Th`$3cWO0L81p4Ysdo3ZP1(SrR-peEdTo;-@bkB((G -zPHYQXUL!@Q$e(OQ;R9r%@Afz+50I7>*^^c&&|E*r-jN)LH=pM4AqMwWxSv|nqjddE -Z4{_hwv8!W(T -zYw`X3V>TCdnSD1ru8&`j=2DIPbCT@SnIgUw>$+lEYP}+x8(BMYnr=iT3*ndq)xzaV -z>I+qjv}vC#8_9M+b1p#uNS0M0)q

8!3p_LRQ0MA3M`!2foxzRUjbFY@}O~(ki=S -zqscnq8cU*dY)D$$cqE}n)V0yIk>CNKHCrndOtSP*HbOb;nbwAHSb;R+gs^?^Dve%) -zoW}t(*D}$>O3ab0TS^-;J|u&sb-PkZzo#kn*#xYt(;FGuwzSb^g&RDiGcOz9TB;Hu`nJh)$W=C=XCSm2AY=$w3G3P-V#Oo+N*;#2 -z4ijJ-pBZ=;T(RTgp_HYrD!uW-dTMfkuqY5jwOy)~gM;#=P^i{!l7`pXTS^s(&^{RU -zydaw}OpS#^D1cXM8?FW+fh`t7D(g;yr6|}fdaNtZBx3hlK~IpkTu3!Qq%R+zAo#t}Bs8^3$vHD+-TGT@`F>H1Cc#WAVW;&$S6%fE2d6@kLS0g&ihIM{}0z -z8#XhD>b>3{(BH|Px7}&lJ4%y1v(CihZJx@8MPoGdl*BJGD;usf*iS7%;{Joe; -zNFuBa>*~o&qETDPo~u&~$FxE1xb^x&(CbE`Y3GfsibL2rl+L;>P6j&Y3U>K$mkp*6 -zd`Q{<^+^&;GskGjwD-%!boR&i-TCA9UOR|@=GYb5x#+dhd7fkaVIR^pol`Mv+rUbmZ43dVL6^S7g3{NsPiG$iy$5EDB% -z6KIgnb$H(n&t3e4E6d4V7w^B?JS}JkG)PM6+X3Co`SQs($O*AA+MG~{S7RJ=cy-l& -z>~%3y`tjfx2>uOutB_^s -ziwG=e=ch|FQ0IkN91US7rhdQkXhwwt$gU0WEVDjo=IPb+?6PC=s8}J*ua(Ms))`UL -fi$|vMHn?H_tSE3ettp-hLlsZCxaLX8(nU;bVRB;Ce6@s#eu2|WvLz>- -zvy(&>Gyfp@+BtKnpqWkKi^+v{4jn_pNw_zeuxETifiGO|)w}OANj2n2D^K=o3j6P6uOL70#cbA{uzWXDlk1wr9GV1X(2W{RuTvjXV -zCmd8u -zH%V`94=q3)Dk)PHNrnFC(T1)Om6f{Usj;u1R->&XoCYVK2V3ZlgZuF?N}1+33OER*x -z*9Z=L=zI8CN>A_^jYjt0F$psO$sL=38q5q|SG)qCN6{^>RFh5E&l5GZ$pEahnF&d+ -z5c>64t}uJPkf~_!VUj#&N%nC-gUMj%=@B=!V>&}xtj2%@-mOm#rQUSJ3(ccmc+fza -znZ#uxF>N?QN5UrIEd!5RgHEfW#;(nKYF+D<*rdshJ$X-z2OZ2X;)nn@KSVdVhaA?}@3;6gZxb4v -zozoWSr{{+!h}zGpumG3H`=AvWpm^9kW;J$Jp^Xl*?8ckr`fqN%c|Z;VC0|cM4vSrk -zH_O8Yvh85nvJp^;``wo8=z0f`FWg?`>gO#y1hjX1{}rTlg9rwIKia8eyGexA3GnuR -z`Rg~XZoW;0pA)vI8=p5!+6sIn#C^FCvR>ffv39h6SCNi9v);%WD;WZ`of_MgwyRWy -z-yY%n*Y>X89W-v4`Ff%bx$Vkn}$!Ay}rnY6F$m-Kg*KD_+;Lx#g4|^&N -I02NaX#p`nv=Kufz - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/objects/af/5626b4a114abcb82d63db7c8082c3c4756e51b b/tests/resources/small.git/objects/af/5626b4a114abcb82d63db7c8082c3c4756e51b -new file mode 100644 -index 0000000000000000000000000000000000000000..822bc151862ec3763cf2d3fa2372b93bbd3a4b65 -GIT binary patch -literal 30 -mcmb>0i}&W3IZ_@1U=^!a~EV1casc=c+{&un1qQN*i9hD|0|m(2n|iwp*q%W -z%N;b$hu%cM`$TMo*~EnC1BFP&Pfj~;jZVKXQ96s_PhV<-XAROi+@-v8dBLUa`!;GB -k^iXlEv8$>R)1G>9th&t3j;s7J{?^9n|7U^`%mXoWC24Q^m!3%@{ - -literal 0 -HcmV?d00001 - From ba7bbcd1daf7deb41afeaf1a9d2d1dce3df0044d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 11 Oct 2025 18:30:27 -0400 Subject: [PATCH 158/373] Cleanup `Derivation*Goal` names --- src/libstore/build/derivation-building-goal.cc | 2 +- src/libstore/build/derivation-goal.cc | 4 +--- src/libstore/build/derivation-trampoline-goal.cc | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index e8ee945d9..50315dede 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -42,7 +42,7 @@ DerivationBuildingGoal::DerivationBuildingGoal( throw; } - name = fmt("building of '%s' from in-memory derivation", worker.store.printStorePath(drvPath)); + name = fmt("building derivation '%s'", worker.store.printStorePath(drvPath)); trace("created"); /* Prevent the .chroot directory from being diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 2e57c1708..e50ce8f79 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -43,9 +43,7 @@ DerivationGoal::DerivationGoal( { this->drv = std::make_unique(drv); - name = - fmt("building of '%s' from in-memory derivation", - DerivedPath::Built{makeConstantStorePathRef(drvPath), drv.outputNames()}.to_string(worker.store)); + name = fmt("getting output '%s' from derivation '%s'", wantedOutput, worker.store.printStorePath(drvPath)); trace("created"); mcExpectedBuilds = std::make_unique>(worker.expectedBuilds); diff --git a/src/libstore/build/derivation-trampoline-goal.cc b/src/libstore/build/derivation-trampoline-goal.cc index 205f5c427..01c1de75b 100644 --- a/src/libstore/build/derivation-trampoline-goal.cc +++ b/src/libstore/build/derivation-trampoline-goal.cc @@ -31,7 +31,7 @@ DerivationTrampolineGoal::DerivationTrampolineGoal( void DerivationTrampolineGoal::commonInit() { name = - fmt("outer obtaining drv from '%s' and then building outputs %s", + fmt("obtaining derivation from '%s' and then building outputs %s", drvReq->to_string(worker.store), std::visit( overloaded{ From a629ce3dec9dda5019f7acbe6575dac906564095 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 11 Oct 2025 18:36:29 -0400 Subject: [PATCH 159/373] Use member initializer list for `Derivation*Goal::drv` --- src/libstore/build/derivation-building-goal.cc | 7 +++---- src/libstore/build/derivation-goal.cc | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 50315dede..e0412c3dd 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -27,16 +27,15 @@ namespace nix { DerivationBuildingGoal::DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode) + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode) : Goal(worker, gaveUpOnSubstitution()) , drvPath(drvPath) + , drv{std::make_unique(drv)} , buildMode(buildMode) { - drv = std::make_unique(drv_); - try { drvOptions = - std::make_unique(DerivationOptions::fromStructuredAttrs(drv->env, drv->structuredAttrs)); + std::make_unique(DerivationOptions::fromStructuredAttrs(drv.env, drv.structuredAttrs)); } catch (Error & e) { e.addTrace({}, "while parsing derivation '%s'", worker.store.printStorePath(drvPath)); throw; diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index e50ce8f79..1939ddbfe 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -33,6 +33,7 @@ DerivationGoal::DerivationGoal( : Goal(worker, haveDerivation()) , drvPath(drvPath) , wantedOutput(wantedOutput) + , drv{std::make_unique(drv)} , outputHash{[&] { auto outputHashes = staticOutputHashes(worker.evalStore, drv); if (auto * mOutputHash = get(outputHashes, wantedOutput)) @@ -41,7 +42,6 @@ DerivationGoal::DerivationGoal( }()} , buildMode(buildMode) { - this->drv = std::make_unique(drv); name = fmt("getting output '%s' from derivation '%s'", wantedOutput, worker.store.printStorePath(drvPath)); trace("created"); From 07df87652c6883ca6198a1cc6a1202685bb92099 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 11 Oct 2025 18:38:23 -0400 Subject: [PATCH 160/373] Make keys of `Derivation*Goal` more legible The property that substitution goals come first is still preserved --- src/libstore/build/derivation-building-goal.cc | 6 +----- src/libstore/build/derivation-goal.cc | 6 +----- src/libstore/build/derivation-trampoline-goal.cc | 6 +----- src/libstore/build/drv-output-substitution-goal.cc | 2 -- src/libstore/include/nix/store/build/goal.hh | 12 ++++++++++++ .../include/nix/store/build/substitution-goal.hh | 4 ---- 6 files changed, 15 insertions(+), 21 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index e0412c3dd..65500ac2d 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -66,11 +66,7 @@ DerivationBuildingGoal::~DerivationBuildingGoal() std::string DerivationBuildingGoal::key() { - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before - "baboon". And substitution goals always happen before - derivation goals (due to "bd$"). */ - return "bd$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); + return "dd$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); } void DerivationBuildingGoal::killChild() diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 1939ddbfe..0509d524f 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -52,11 +52,7 @@ DerivationGoal::DerivationGoal( std::string DerivationGoal::key() { - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before - "baboon". And substitution goals always happen before - derivation goals (due to "b$"). */ - return "b$" + std::string(drvPath.name()) + "$" + SingleDerivedPath::Built{ + return "db$" + std::string(drvPath.name()) + "$" + SingleDerivedPath::Built{ .drvPath = makeConstantStorePathRef(drvPath), .output = wantedOutput, }.to_string(worker.store); diff --git a/src/libstore/build/derivation-trampoline-goal.cc b/src/libstore/build/derivation-trampoline-goal.cc index 01c1de75b..83384c589 100644 --- a/src/libstore/build/derivation-trampoline-goal.cc +++ b/src/libstore/build/derivation-trampoline-goal.cc @@ -58,11 +58,7 @@ static StorePath pathPartOfReq(const SingleDerivedPath & req) std::string DerivationTrampolineGoal::key() { - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before "baboon". And - substitution goals, derivation goals, and derivation building goals always happen before - derivation goals (due to "bt$"). */ - return "bt$" + std::string(pathPartOfReq(*drvReq).name()) + "$" + DerivedPath::Built{ + return "da$" + std::string(pathPartOfReq(*drvReq).name()) + "$" + DerivedPath::Built{ .drvPath = drvReq, .outputs = wantedOutputs, }.to_string(worker.store); diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc index b6ace4784..209d6d542 100644 --- a/src/libstore/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -153,8 +153,6 @@ Goal::Co DrvOutputSubstitutionGoal::realisationFetched( std::string DrvOutputSubstitutionGoal::key() { - /* "a$" ensures substitution goals happen before derivation - goals. */ return "a$" + std::string(id.to_string()); } diff --git a/src/libstore/include/nix/store/build/goal.hh b/src/libstore/include/nix/store/build/goal.hh index 52700d12e..4d57afc0f 100644 --- a/src/libstore/include/nix/store/build/goal.hh +++ b/src/libstore/include/nix/store/build/goal.hh @@ -456,6 +456,18 @@ public: */ virtual void timedOut(Error && ex) = 0; + /** + * Used for comparisons. The order matters a bit for scheduling. We + * want: + * + * 1. Substitution + * 2. Derivation administrativia + * 3. Actual building + * + * Also, ensure that derivations get processed in order of their + * name, i.e. a derivation named "aardvark" always comes before + * "baboon". + */ virtual std::string key() = 0; /** diff --git a/src/libstore/include/nix/store/build/substitution-goal.hh b/src/libstore/include/nix/store/build/substitution-goal.hh index 5f6cb6a18..5f33b9aa5 100644 --- a/src/libstore/include/nix/store/build/substitution-goal.hh +++ b/src/libstore/include/nix/store/build/substitution-goal.hh @@ -58,10 +58,6 @@ public: unreachable(); }; - /** - * We prepend "a$" to the key name to ensure substitution goals - * happen before derivation goals. - */ std::string key() override { return "a$" + std::string(storePath.name()) + "$" + worker.store.printStorePath(storePath); From 0da430be35ea37abc06428359a35f931fbe51ca8 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 18 Sep 2025 15:54:43 -0400 Subject: [PATCH 161/373] Split out `DerivationResolutionGoal` This prepares the way for fixing a few issues. Take 2: was landed before in 8f4a739d0fa05e44589d578f1860b45b8a48f1cc. --- .../build/derivation-building-goal.cc | 147 ++------------ src/libstore/build/derivation-goal.cc | 12 -- .../build/derivation-resolution-goal.cc | 191 ++++++++++++++++++ .../build/derivation-trampoline-goal.cc | 2 - src/libstore/build/worker.cc | 9 + .../store/build/derivation-resolution-goal.hh | 82 ++++++++ .../store/build/derivation-trampoline-goal.hh | 2 +- .../include/nix/store/build/worker.hh | 10 +- src/libstore/include/nix/store/meson.build | 1 + src/libstore/meson.build | 1 + tests/functional/build.sh | 9 +- 11 files changed, 315 insertions(+), 151 deletions(-) create mode 100644 src/libstore/build/derivation-resolution-goal.cc create mode 100644 src/libstore/include/nix/store/build/derivation-resolution-goal.hh diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 65500ac2d..6bda17d37 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1,4 +1,5 @@ #include "nix/store/build/derivation-building-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-env-desugar.hh" #include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows @@ -88,18 +89,6 @@ void DerivationBuildingGoal::timedOut(Error && ex) [[maybe_unused]] Done _ = doneFailure({BuildResult::Failure::TimedOut, std::move(ex)}); } -/** - * Used for `inputGoals` local variable below - */ -struct value_comparison -{ - template - bool operator()(const ref & lhs, const ref & rhs) const - { - return *lhs < *rhs; - } -}; - std::string showKnownOutputs(const StoreDirConfig & store, const Derivation & drv) { std::string msg; @@ -124,46 +113,6 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() { Goals waitees; - std::map, GoalPtr, value_comparison> inputGoals; - - { - std::function, const DerivedPathMap::ChildNode &)> - addWaiteeDerivedPath; - - addWaiteeDerivedPath = [&](ref inputDrv, - const DerivedPathMap::ChildNode & inputNode) { - if (!inputNode.value.empty()) { - auto g = worker.makeGoal( - DerivedPath::Built{ - .drvPath = inputDrv, - .outputs = inputNode.value, - }, - buildMode == bmRepair ? bmRepair : bmNormal); - inputGoals.insert_or_assign(inputDrv, g); - waitees.insert(std::move(g)); - } - for (const auto & [outputName, childNode] : inputNode.childMap) - addWaiteeDerivedPath( - make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); - }; - - for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { - /* Ensure that pure, non-fixed-output derivations don't - depend on impure derivations. */ - if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() - && !drv->type().isFixed()) { - auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); - if (inputDrv.type().isImpure()) - throw Error( - "pure derivation '%s' depends on impure derivation '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(inputDrvPath)); - } - - addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); - } - } - /* Copy the input sources from the eval store to the build store. @@ -208,88 +157,22 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ - /* First, the input derivations. */ { - auto & fullDrv = *drv; + auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); + { + Goals waitees{resolutionGoal}; + co_await await(std::move(waitees)); + } + if (nrFailed != 0) { + co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); + } - auto drvType = fullDrv.type(); - bool resolveDrv = - std::visit( - overloaded{ - [&](const DerivationType::InputAddressed & ia) { - /* must resolve if deferred. */ - return ia.deferred; - }, - [&](const DerivationType::ContentAddressed & ca) { - return !fullDrv.inputDrvs.map.empty() - && (ca.fixed - /* Can optionally resolve if fixed, which is good - for avoiding unnecessary rebuilds. */ - ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) - /* Must resolve if floating and there are any inputs - drvs. */ - : true); - }, - [&](const DerivationType::Impure &) { return true; }}, - drvType.raw) - /* no inputs are outputs of dynamic derivations */ - || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { - return !pair.second.childMap.empty(); - }); + if (resolutionGoal->resolvedDrv) { + auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { - experimentalFeatureSettings.require(Xp::CaDerivations); - - /* We are be able to resolve this derivation based on the - now-known results of dependencies. If so, we become a - stub goal aliasing that resolved derivation goal. */ - std::optional attempt = fullDrv.tryResolve( - worker.store, - [&](ref drvPath, const std::string & outputName) -> std::optional { - auto mEntry = get(inputGoals, drvPath); - if (!mEntry) - return std::nullopt; - - auto & buildResult = (*mEntry)->buildResult; - return std::visit( - overloaded{ - [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, - [&](const BuildResult::Success & success) -> std::optional { - auto i = get(success.builtOutputs, outputName); - if (!i) - return std::nullopt; - - return i->outPath; - }, - }, - buildResult.inner); - }); - if (!attempt) { - /* TODO (impure derivations-induced tech debt) (see below): - The above attempt should have found it, but because we manage - inputDrvOutputs statefully, sometimes it gets out of sync with - the real source of truth (store). So we query the store - directly if there's a problem. */ - attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); - } - assert(attempt); - Derivation drvResolved{std::move(*attempt)}; - - auto pathResolved = writeDerivation(worker.store, drvResolved); - - auto msg = - fmt("resolved derivation: '%s' -> '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved)); - act = std::make_unique( - *logger, - lvlInfo, - actBuildWaiting, - msg, - Logger::Fields{ - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved), - }); + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, drvResolved); /* TODO https://github.com/NixOS/nix/issues/13247 we should let the calling goal do this, so it has a change to pass @@ -378,7 +261,7 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* If we get this far, we know no dynamic drvs inputs */ - for (auto & [depDrvPath, depNode] : fullDrv.inputDrvs.map) { + for (auto & [depDrvPath, depNode] : drv->inputDrvs.map) { for (auto & outputName : depNode.value) { /* Don't need to worry about `inputGoals`, because impure derivations are always resolved above. Can diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 0509d524f..dc12ab55a 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -183,18 +183,6 @@ Goal::Co DerivationGoal::haveDerivation() co_return amDone(g->exitCode, g->ex); } -/** - * Used for `inputGoals` local variable below - */ -struct value_comparison -{ - template - bool operator()(const ref & lhs, const ref & rhs) const - { - return *lhs < *rhs; - } -}; - Goal::Co DerivationGoal::repairClosure() { assert(!drv->type().isImpure()); diff --git a/src/libstore/build/derivation-resolution-goal.cc b/src/libstore/build/derivation-resolution-goal.cc new file mode 100644 index 000000000..6cb9702f4 --- /dev/null +++ b/src/libstore/build/derivation-resolution-goal.cc @@ -0,0 +1,191 @@ +#include "nix/store/build/derivation-resolution-goal.hh" +#include "nix/store/build/worker.hh" +#include "nix/util/util.hh" + +#include + +namespace nix { + +DerivationResolutionGoal::DerivationResolutionGoal( + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode) + : Goal(worker, resolveDerivation()) + , drvPath(drvPath) + , drv{std::make_unique(drv)} + , buildMode{buildMode} +{ + name = fmt("resolving derivation '%s'", worker.store.printStorePath(drvPath)); + trace("created"); +} + +std::string DerivationResolutionGoal::key() +{ + return "dc$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); +} + +/** + * Used for `inputGoals` local variable below + */ +struct value_comparison +{ + template + bool operator()(const ref & lhs, const ref & rhs) const + { + return *lhs < *rhs; + } +}; + +Goal::Co DerivationResolutionGoal::resolveDerivation() +{ + Goals waitees; + + std::map, GoalPtr, value_comparison> inputGoals; + + { + std::function, const DerivedPathMap::ChildNode &)> + addWaiteeDerivedPath; + + addWaiteeDerivedPath = [&](ref inputDrv, + const DerivedPathMap::ChildNode & inputNode) { + if (!inputNode.value.empty()) { + auto g = worker.makeGoal( + DerivedPath::Built{ + .drvPath = inputDrv, + .outputs = inputNode.value, + }, + buildMode == bmRepair ? bmRepair : bmNormal); + inputGoals.insert_or_assign(inputDrv, g); + waitees.insert(std::move(g)); + } + for (const auto & [outputName, childNode] : inputNode.childMap) + addWaiteeDerivedPath( + make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); + }; + + for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { + /* Ensure that pure, non-fixed-output derivations don't + depend on impure derivations. */ + if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() + && !drv->type().isFixed()) { + auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); + if (inputDrv.type().isImpure()) + throw Error( + "pure derivation '%s' depends on impure derivation '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(inputDrvPath)); + } + + addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); + } + } + + co_await await(std::move(waitees)); + + trace("all inputs realised"); + + if (nrFailed != 0) { + auto msg = + fmt("Cannot build '%s'.\n" + "Reason: " ANSI_RED "%d %s failed" ANSI_NORMAL ".", + Magenta(worker.store.printStorePath(drvPath)), + nrFailed, + nrFailed == 1 ? "dependency" : "dependencies"); + msg += showKnownOutputs(worker.store, *drv); + co_return amDone(ecFailed, {BuildError(BuildResult::Failure::DependencyFailed, msg)}); + } + + /* Gather information necessary for computing the closure and/or + running the build hook. */ + + /* Determine the full set of input paths. */ + + /* First, the input derivations. */ + { + auto & fullDrv = *drv; + + auto drvType = fullDrv.type(); + bool resolveDrv = + std::visit( + overloaded{ + [&](const DerivationType::InputAddressed & ia) { + /* must resolve if deferred. */ + return ia.deferred; + }, + [&](const DerivationType::ContentAddressed & ca) { + return !fullDrv.inputDrvs.map.empty() + && (ca.fixed + /* Can optionally resolve if fixed, which is good + for avoiding unnecessary rebuilds. */ + ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) + /* Must resolve if floating and there are any inputs + drvs. */ + : true); + }, + [&](const DerivationType::Impure &) { return true; }}, + drvType.raw) + /* no inputs are outputs of dynamic derivations */ + || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { + return !pair.second.childMap.empty(); + }); + + if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { + experimentalFeatureSettings.require(Xp::CaDerivations); + + /* We are be able to resolve this derivation based on the + now-known results of dependencies. If so, we become a + stub goal aliasing that resolved derivation goal. */ + std::optional attempt = fullDrv.tryResolve( + worker.store, + [&](ref drvPath, const std::string & outputName) -> std::optional { + auto mEntry = get(inputGoals, drvPath); + if (!mEntry) + return std::nullopt; + + auto & buildResult = (*mEntry)->buildResult; + return std::visit( + overloaded{ + [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, + [&](const BuildResult::Success & success) -> std::optional { + auto i = get(success.builtOutputs, outputName); + if (!i) + return std::nullopt; + + return i->outPath; + }, + }, + buildResult.inner); + }); + if (!attempt) { + /* TODO (impure derivations-induced tech debt) (see below): + The above attempt should have found it, but because we manage + inputDrvOutputs statefully, sometimes it gets out of sync with + the real source of truth (store). So we query the store + directly if there's a problem. */ + attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); + } + assert(attempt); + + auto pathResolved = writeDerivation(worker.store, *attempt, NoRepair, /*readOnly =*/true); + + auto msg = + fmt("resolved derivation: '%s' -> '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved)); + act = std::make_unique( + *logger, + lvlInfo, + actBuildWaiting, + msg, + Logger::Fields{ + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved), + }); + + resolvedDrv = + std::make_unique>(std::move(pathResolved), *std::move(attempt)); + } + } + + co_return amDone(ecSuccess, std::nullopt); +} + +} // namespace nix diff --git a/src/libstore/build/derivation-trampoline-goal.cc b/src/libstore/build/derivation-trampoline-goal.cc index 83384c589..310d23d70 100644 --- a/src/libstore/build/derivation-trampoline-goal.cc +++ b/src/libstore/build/derivation-trampoline-goal.cc @@ -64,8 +64,6 @@ std::string DerivationTrampolineGoal::key() }.to_string(worker.store); } -void DerivationTrampolineGoal::timedOut(Error && ex) {} - Goal::Co DerivationTrampolineGoal::init() { trace("need to load derivation from file"); diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index 3e6e0bef0..f597abb63 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -4,6 +4,7 @@ #include "nix/store/build/substitution-goal.hh" #include "nix/store/build/drv-output-substitution-goal.hh" #include "nix/store/build/derivation-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-building-goal.hh" #include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO Enable building on Windows @@ -80,6 +81,12 @@ std::shared_ptr Worker::makeDerivationGoal( return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode); } +std::shared_ptr +Worker::makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) +{ + return initGoalIfNeeded(derivationResolutionGoals[drvPath], drvPath, drv, *this, buildMode); +} + std::shared_ptr Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) { @@ -158,6 +165,8 @@ void Worker::removeGoal(GoalPtr goal) nix::removeGoal(drvGoal, derivationTrampolineGoals.map); else if (auto drvGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvGoal, derivationGoals); + else if (auto drvResolutionGoal = std::dynamic_pointer_cast(goal)) + nix::removeGoal(drvResolutionGoal, derivationResolutionGoals); else if (auto drvBuildingGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvBuildingGoal, derivationBuildingGoals); else if (auto subGoal = std::dynamic_pointer_cast(goal)) diff --git a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh new file mode 100644 index 000000000..a284843f0 --- /dev/null +++ b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh @@ -0,0 +1,82 @@ +#pragma once +///@file + +#include "nix/store/derivations.hh" +#include "nix/store/derivation-options.hh" +#include "nix/store/build/derivation-building-misc.hh" +#include "nix/store/store-api.hh" +#include "nix/store/build/goal.hh" + +namespace nix { + +struct BuilderFailureError; + +/** + * A goal for resolving a derivation. Resolving a derivation (@see + * `Derivation::tryResolve`) simplifies its inputs, replacing + * `inputDrvs` with `inputSrcs`. + * + * Conceptually, we resolve all derivations. For input-addressed + * derivations (that don't transtively depend on content-addressed + * derivations), however, we don't actually use the resolved derivation, + * because the output paths would appear invalid (if we tried to verify + * them), since they are computed from the original, unresolved inputs. + * + * That said, if we ever made the new flavor of input-addressing as described + * in issue #9259, then the input-addressing would be based on the resolved + * inputs, and we like the CA case *would* use the output of this goal. + * + * (The point of this discussion is not to randomly stuff information on + * a yet-unimplemented feature (issue #9259) in the codebase, but + * rather, to illustrate that there is no inherent tension between + * explicit derivation resolution and input-addressing in general. That + * tension only exists with the type of input-addressing we've + * historically used.) + */ +struct DerivationResolutionGoal : public Goal +{ + DerivationResolutionGoal( + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); + + /** + * If the derivation needed to be resolved, this is resulting + * resolved derivations and its path. + */ + std::unique_ptr> resolvedDrv; + + void timedOut(Error && ex) override {} + +private: + + /** + * The path of the derivation. + */ + StorePath drvPath; + + /** + * The derivation stored at drvPath. + */ + std::unique_ptr drv; + + /** + * The remainder is state held during the build. + */ + + BuildMode buildMode; + + std::unique_ptr act; + + std::string key() override; + + /** + * The states. + */ + Co resolveDerivation(); + + JobCategory jobCategory() const override + { + return JobCategory::Administration; + }; +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh b/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh index 79b74f4c1..bfed67f63 100644 --- a/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh @@ -109,7 +109,7 @@ struct DerivationTrampolineGoal : public Goal virtual ~DerivationTrampolineGoal(); - void timedOut(Error && ex) override; + void timedOut(Error && ex) override {} std::string key() override; diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index a6de780c1..9660d66b2 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -16,6 +16,7 @@ namespace nix { /* Forward definition. */ struct DerivationTrampolineGoal; struct DerivationGoal; +struct DerivationResolutionGoal; struct DerivationBuildingGoal; struct PathSubstitutionGoal; class DrvOutputSubstitutionGoal; @@ -111,6 +112,7 @@ private: DerivedPathMap>> derivationTrampolineGoals; std::map>> derivationGoals; + std::map> derivationResolutionGoals; std::map> derivationBuildingGoals; std::map> substitutionGoals; std::map> drvOutputSubstitutionGoals; @@ -224,7 +226,13 @@ public: BuildMode buildMode = bmNormal); /** - * @ref DerivationBuildingGoal "derivation goal" + * @ref DerivationResolutionGoal "derivation resolution goal" + */ + std::shared_ptr + makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); + + /** + * @ref DerivationBuildingGoal "derivation building goal" */ std::shared_ptr makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index c9e4c36dd..1f04e357a 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -18,6 +18,7 @@ headers = [ config_pub_h ] + files( 'build/derivation-building-misc.hh', 'build/derivation-env-desugar.hh', 'build/derivation-goal.hh', + 'build/derivation-resolution-goal.hh', 'build/derivation-trampoline-goal.hh', 'build/drv-output-substitution-goal.hh', 'build/goal.hh', diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 8ec39dac1..a50a3f5fd 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -302,6 +302,7 @@ sources = files( 'build/derivation-check.cc', 'build/derivation-env-desugar.cc', 'build/derivation-goal.cc', + 'build/derivation-resolution-goal.cc', 'build/derivation-trampoline-goal.cc', 'build/drv-output-substitution-goal.cc', 'build/entry-points.cc', diff --git a/tests/functional/build.sh b/tests/functional/build.sh index 0a19ff7da..c9a39438d 100755 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -178,7 +178,8 @@ test "$(<<<"$out" grep -cE '^error:')" = 4 out="$(nix build -f fod-failing.nix -L x4 2>&1)" && status=0 || status=$? test "$status" = 1 -test "$(<<<"$out" grep -cE '^error:')" = 2 +# Precise number of errors depends on daemon version / goal refactorings +(( "$(<<<"$out" grep -cE '^error:')" >= 2 )) if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" @@ -186,11 +187,13 @@ if isDaemonNewer "2.29pre"; then else <<<"$out" grepQuiet -E "error: 1 dependencies of derivation '.*-x4\\.drv' failed to build" fi -<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" +# Either x2 or x3 could have failed, x4 depends on both symmetrically +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x[23]\\.drv'" out="$(nix build -f fod-failing.nix -L x4 --keep-going 2>&1)" && status=0 || status=$? test "$status" = 1 -test "$(<<<"$out" grep -cE '^error:')" = 3 +# Precise number of errors depends on daemon version / goal refactorings +(( "$(<<<"$out" grep -cE '^error:')" >= 3 )) if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" <<<"$out" grepQuiet -E "Reason: 2 dependencies failed." From 711e738bf9449c384a065c2b98f6585f3da0da42 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 12 Oct 2025 18:04:57 +0300 Subject: [PATCH 162/373] meson: Simplify asan-options handling even more Instead of specifying env variables all the time we can instead embed the __asan_default_options symbol in all executables / shared objects. This reduces code duplication. --- doc/manual/meson.build | 1 - .../common/asan-options}/asan-options.cc | 2 +- nix-meson-build-support/common/asan-options/meson.build | 7 +++---- src/libexpr-tests/meson.build | 2 +- src/libexpr-tests/package.nix | 1 - src/libfetchers-tests/meson.build | 2 +- src/libfetchers-tests/package.nix | 1 - src/libflake-tests/meson.build | 2 +- src/libflake-tests/package.nix | 1 - src/libstore-tests/meson.build | 4 ++-- src/libstore-tests/package.nix | 1 - src/libutil-tests/meson.build | 2 +- src/libutil-tests/package.nix | 1 - src/nix/meson.build | 1 - tests/functional/test-libstoreconsumer/main.cc | 7 ------- tests/functional/test-libstoreconsumer/meson.build | 3 ++- 16 files changed, 12 insertions(+), 26 deletions(-) rename {src/nix => nix-meson-build-support/common/asan-options}/asan-options.cc (71%) diff --git a/doc/manual/meson.build b/doc/manual/meson.build index a5672f0ad..2e372dedd 100644 --- a/doc/manual/meson.build +++ b/doc/manual/meson.build @@ -15,7 +15,6 @@ pymod = import('python') python = pymod.find_installation('python3') nix_env_for_docs = { - 'ASAN_OPTIONS' : 'abort_on_error=1:print_summary=1:detect_leaks=0', 'HOME' : '/dummy', 'NIX_CONF_DIR' : '/dummy', 'NIX_SSL_CERT_FILE' : '/dummy/no-ca-bundle.crt', diff --git a/src/nix/asan-options.cc b/nix-meson-build-support/common/asan-options/asan-options.cc similarity index 71% rename from src/nix/asan-options.cc rename to nix-meson-build-support/common/asan-options/asan-options.cc index 256f34cbe..651354bac 100644 --- a/src/nix/asan-options.cc +++ b/nix-meson-build-support/common/asan-options/asan-options.cc @@ -1,4 +1,4 @@ -extern "C" [[gnu::retain]] const char * __asan_default_options() +extern "C" [[gnu::retain, gnu::weak]] const char * __asan_default_options() { // We leak a bunch of memory knowingly on purpose. It's not worthwhile to // diagnose that memory being leaked for now. diff --git a/nix-meson-build-support/common/asan-options/meson.build b/nix-meson-build-support/common/asan-options/meson.build index 17880b0ed..80527b5a9 100644 --- a/nix-meson-build-support/common/asan-options/meson.build +++ b/nix-meson-build-support/common/asan-options/meson.build @@ -1,7 +1,3 @@ -asan_test_options_env = { - 'ASAN_OPTIONS' : 'abort_on_error=1:print_summary=1:detect_leaks=0', -} - # Clang gets grumpy about missing libasan symbols if -shared-libasan is not # passed when building shared libs, at least on Linux if cxx.get_id() == 'clang' and ('address' in get_option('b_sanitize') or 'undefined' in get_option( @@ -10,3 +6,6 @@ if cxx.get_id() == 'clang' and ('address' in get_option('b_sanitize') or 'undefi add_project_link_arguments('-shared-libasan', language : 'cpp') endif +if 'address' in get_option('b_sanitize') + deps_other += declare_dependency(sources : 'asan-options.cc') +endif diff --git a/src/libexpr-tests/meson.build b/src/libexpr-tests/meson.build index d1700b11d..c5dafe0de 100644 --- a/src/libexpr-tests/meson.build +++ b/src/libexpr-tests/meson.build @@ -82,7 +82,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, protocol : 'gtest', diff --git a/src/libexpr-tests/package.nix b/src/libexpr-tests/package.nix index c36aa2dc7..51d52e935 100644 --- a/src/libexpr-tests/package.nix +++ b/src/libexpr-tests/package.nix @@ -62,7 +62,6 @@ mkMesonExecutable (finalAttrs: { mkdir -p "$HOME" '' + '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${resolvePath ./data} ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} touch $out diff --git a/src/libfetchers-tests/meson.build b/src/libfetchers-tests/meson.build index 905e06db0..a18f64d79 100644 --- a/src/libfetchers-tests/meson.build +++ b/src/libfetchers-tests/meson.build @@ -63,7 +63,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, protocol : 'gtest', diff --git a/src/libfetchers-tests/package.nix b/src/libfetchers-tests/package.nix index 8e82430d7..780618725 100644 --- a/src/libfetchers-tests/package.nix +++ b/src/libfetchers-tests/package.nix @@ -61,7 +61,6 @@ mkMesonExecutable (finalAttrs: { buildInputs = [ writableTmpDirAsHomeHook ]; } '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${resolvePath ./data} ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} touch $out diff --git a/src/libflake-tests/meson.build b/src/libflake-tests/meson.build index a75603970..59094abe8 100644 --- a/src/libflake-tests/meson.build +++ b/src/libflake-tests/meson.build @@ -58,7 +58,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', 'NIX_CONFIG' : 'extra-experimental-features = flakes', 'HOME' : meson.current_build_dir() / 'test-home', diff --git a/src/libflake-tests/package.nix b/src/libflake-tests/package.nix index 09812a57b..397ef4192 100644 --- a/src/libflake-tests/package.nix +++ b/src/libflake-tests/package.nix @@ -59,7 +59,6 @@ mkMesonExecutable (finalAttrs: { buildInputs = [ writableTmpDirAsHomeHook ]; } ('' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${resolvePath ./data} export NIX_CONFIG="extra-experimental-features = flakes" ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index 399e2abd5..e8e90ad81 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -104,7 +104,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', 'HOME' : meson.current_build_dir() / 'test-home', 'NIX_REMOTE' : meson.current_build_dir() / 'test-home' / 'store', @@ -138,7 +138,7 @@ if get_option('benchmarks') benchmark( 'nix-store-benchmarks', benchmark_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, ) diff --git a/src/libstore-tests/package.nix b/src/libstore-tests/package.nix index d5255f4f9..90e6af519 100644 --- a/src/libstore-tests/package.nix +++ b/src/libstore-tests/package.nix @@ -83,7 +83,6 @@ mkMesonExecutable (finalAttrs: { } ( '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${data + "/src/libstore-tests/data"} export NIX_REMOTE=$HOME/store ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} diff --git a/src/libutil-tests/meson.build b/src/libutil-tests/meson.build index 87af49933..d84dbbb68 100644 --- a/src/libutil-tests/meson.build +++ b/src/libutil-tests/meson.build @@ -97,7 +97,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, protocol : 'gtest', diff --git a/src/libutil-tests/package.nix b/src/libutil-tests/package.nix index 077d36a4d..c06de6894 100644 --- a/src/libutil-tests/package.nix +++ b/src/libutil-tests/package.nix @@ -61,7 +61,6 @@ mkMesonExecutable (finalAttrs: { mkdir -p "$HOME" '' + '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${./data} ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} touch $out diff --git a/src/nix/meson.build b/src/nix/meson.build index 9bee2d147..e989e8016 100644 --- a/src/nix/meson.build +++ b/src/nix/meson.build @@ -61,7 +61,6 @@ subdir('nix-meson-build-support/generate-header') nix_sources = [ config_priv_h ] + files( 'add-to-store.cc', 'app.cc', - 'asan-options.cc', 'build.cc', 'bundle.cc', 'cat.cc', diff --git a/tests/functional/test-libstoreconsumer/main.cc b/tests/functional/test-libstoreconsumer/main.cc index 5b0132934..6cfe50047 100644 --- a/tests/functional/test-libstoreconsumer/main.cc +++ b/tests/functional/test-libstoreconsumer/main.cc @@ -5,13 +5,6 @@ using namespace nix; -extern "C" [[gnu::retain]] const char * __asan_default_options() -{ - // We leak a bunch of memory knowingly on purpose. It's not worthwhile to - // diagnose that memory being leaked for now. - return "abort_on_error=1:print_summary=1:detect_leaks=0"; -} - int main(int argc, char ** argv) { try { diff --git a/tests/functional/test-libstoreconsumer/meson.build b/tests/functional/test-libstoreconsumer/meson.build index 7c95b0c4a..b2f1c1ca3 100644 --- a/tests/functional/test-libstoreconsumer/meson.build +++ b/tests/functional/test-libstoreconsumer/meson.build @@ -1,11 +1,12 @@ cxx = meson.get_compiler('cpp') +deps_other = [] subdir('nix-meson-build-support/common/asan-options') libstoreconsumer_tester = executable( 'test-libstoreconsumer', 'main.cc', - dependencies : [ + dependencies : deps_other + [ dependency('nix-store'), ], build_by_default : false, From 199b6ff3fb91f7d7c81f5bfaaaea0935bd2fcbea Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 12 Oct 2025 17:36:45 +0300 Subject: [PATCH 163/373] Disable detect_odr_violation for ASan There's some unfortunate ODR violations that get dianosed with GCC but not Clang for static inline constexpr variables defined inside the class body: template struct static_const { static JSON_INLINE_VARIABLE constexpr T value{}; }; This can be ignored pretty much. There is the same problem for std::piecewise_construct: http://lists.boost.org/Archives/boost/2007/06/123353.php ==2455704==ERROR: AddressSanitizer: odr-violation (0x7efddc460e20): [1] size=1 'value' /nix/store/235hvgzcbl06fxy53515q8sr6lljvf68-nlohmann_json-3.11.3/include/nlohmann/detail/meta/cpp_future.hpp:156:45 in /nix/store/pkmljfq97a83dbanr0n64zbm8cyhna33-nix-store-2.33.0pre/lib/libnixstore.so.2.33.0 [2] size=1 'value' /nix/store/235hvgzcbl06fxy53515q8sr6lljvf68-nlohmann_json-3.11.3/include/nlohmann/detail/meta/cpp_future.hpp:156:45 in /nix/store/gbjpkjj0g8vk20fzlyrwj491gwp6g1qw-nix-util-2.33.0pre/lib/libnixutil.so.2.33.0 --- nix-meson-build-support/common/asan-options/asan-options.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nix-meson-build-support/common/asan-options/asan-options.cc b/nix-meson-build-support/common/asan-options/asan-options.cc index 651354bac..c9782fea0 100644 --- a/nix-meson-build-support/common/asan-options/asan-options.cc +++ b/nix-meson-build-support/common/asan-options/asan-options.cc @@ -2,5 +2,5 @@ extern "C" [[gnu::retain, gnu::weak]] const char * __asan_default_options() { // We leak a bunch of memory knowingly on purpose. It's not worthwhile to // diagnose that memory being leaked for now. - return "abort_on_error=1:print_summary=1:detect_leaks=0"; + return "abort_on_error=1:print_summary=1:detect_leaks=0:detect_odr_violation=0"; } From a491173369c18bd3c079e8180ccb07c6edf49d54 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 12 Oct 2025 17:42:19 +0300 Subject: [PATCH 164/373] packaging: Add withASan,withUBSan options to the scope --- ci/gha/tests/default.nix | 17 ++++------------- packaging/components.nix | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index d9115f92c..09fb6ec23 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -23,16 +23,6 @@ let packages' = nixFlake.packages.${system}; stdenv = (getStdenv pkgs); - enableSanitizersLayer = finalAttrs: prevAttrs: { - mesonFlags = - (prevAttrs.mesonFlags or [ ]) - ++ [ (lib.mesonOption "b_sanitize" "address,undefined") ] - ++ (lib.optionals stdenv.cc.isClang [ - # https://www.github.com/mesonbuild/meson/issues/764 - (lib.mesonBool "b_lundef" false) - ]); - }; - collectCoverageLayer = finalAttrs: prevAttrs: { env = let @@ -55,14 +45,15 @@ let ''; }; - componentOverrides = - (lib.optional withSanitizers enableSanitizersLayer) - ++ (lib.optional withCoverage collectCoverageLayer); + componentOverrides = (lib.optional withCoverage collectCoverageLayer); in rec { nixComponentsInstrumented = nixComponents.overrideScope ( final: prev: { + withASan = withSanitizers; + withUBSan = withSanitizers; + nix-store-tests = prev.nix-store-tests.override { withBenchmarks = true; }; # Boehm is incompatible with ASAN. nix-expr = prev.nix-expr.override { enableGC = !withSanitizers; }; diff --git a/packaging/components.nix b/packaging/components.nix index 2be4fa61d..106e96723 100644 --- a/packaging/components.nix +++ b/packaging/components.nix @@ -204,6 +204,25 @@ let mesonFlags = [ (lib.mesonBool "b_asneeded" false) ] ++ prevAttrs.mesonFlags or [ ]; }; + enableSanitizersLayer = + finalAttrs: prevAttrs: + let + sanitizers = lib.optional scope.withASan "address" ++ lib.optional scope.withUBSan "undefined"; + in + { + mesonFlags = + (prevAttrs.mesonFlags or [ ]) + ++ lib.optionals (lib.length sanitizers > 0) ( + [ + (lib.mesonOption "b_sanitize" (lib.concatStringsSep "," sanitizers)) + ] + ++ (lib.optionals stdenv.cc.isClang [ + # https://www.github.com/mesonbuild/meson/issues/764 + (lib.mesonBool "b_lundef" false) + ]) + ); + }; + nixDefaultsLayer = finalAttrs: prevAttrs: { strictDeps = prevAttrs.strictDeps or true; enableParallelBuilding = true; @@ -246,6 +265,16 @@ in inherit filesetToSource; + /** + Whether meson components are built with [AddressSanitizer](https://clang.llvm.org/docs/AddressSanitizer.html). + */ + withASan = false; + + /** + Whether meson components are built with [UndefinedBehaviorSanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html). + */ + withUBSan = false; + /** A user-provided extension function to apply to each component derivation. */ @@ -332,6 +361,7 @@ in setVersionLayer mesonLayer fixupStaticLayer + enableSanitizersLayer scope.mesonComponentOverrides ]; mkMesonExecutable = mkPackageBuilder [ @@ -342,6 +372,7 @@ in mesonLayer mesonBuildLayer fixupStaticLayer + enableSanitizersLayer scope.mesonComponentOverrides ]; mkMesonLibrary = mkPackageBuilder [ @@ -353,6 +384,7 @@ in mesonBuildLayer mesonLibraryLayer fixupStaticLayer + enableSanitizersLayer scope.mesonComponentOverrides ]; From de75a180cbe89d40053b8c4c163df4fbc172c5be Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 12 Oct 2025 19:38:01 +0300 Subject: [PATCH 165/373] packaging: Add buildWithSanitizers to hydraJobs --- packaging/hydra.nix | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/packaging/hydra.nix b/packaging/hydra.nix index 9f9749bde..ae2e6ab98 100644 --- a/packaging/hydra.nix +++ b/packaging/hydra.nix @@ -158,6 +158,27 @@ in in forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); + buildWithSanitizers = + let + components = forAllSystems ( + system: + let + pkgs = nixpkgsFor.${system}.native; + in + pkgs.nixComponents2.overrideScope ( + self: super: { + # Boost coroutines fail with ASAN on darwin. + withASan = !pkgs.stdenv.buildPlatform.isDarwin; + withUBSan = true; + nix-expr = super.nix-expr.override { enableGC = false; }; + # Unclear how to make Perl bindings work with a dynamically linked ASAN. + nix-perl-bindings = null; + } + ) + ); + in + forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); + buildNoTests = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents2.nix-cli); # Toggles some settings for better coverage. Windows needs these From 9150ccb89e1da05ac731f34a860022a69a215edd Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sun, 12 Oct 2025 13:16:50 -0400 Subject: [PATCH 166/373] Fix Windows dev shell (mostly) gbenchmark still has too-narrow supported systems, however. That needs to be fixed in Nixpkgs. --- tests/functional/package.nix | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/tests/functional/package.nix b/tests/functional/package.nix index 1f1d10ea8..a36c2e2d3 100644 --- a/tests/functional/package.nix +++ b/tests/functional/package.nix @@ -2,16 +2,7 @@ lib, stdenv, mkMesonDerivation, - - meson, - ninja, - pkg-config, - - jq, - git, - mercurial, - util-linux, - unixtools, + buildPackages, nix-store, nix-expr, @@ -46,16 +37,17 @@ mkMesonDerivation ( ./. ]; - # Hack for sake of the dev shell + # Hack for sake of the dev shell. Need to "manually splice" since + # this isn't a specially-recognized list of dependencies. passthru.externalNativeBuildInputs = [ - meson - ninja - pkg-config + buildPackages.meson + buildPackages.ninja + buildPackages.pkg-config - jq - git - mercurial - unixtools.script + buildPackages.jq + buildPackages.git + buildPackages.mercurial + buildPackages.unixtools.script ] ++ lib.optionals stdenv.hostPlatform.isLinux [ # For various sandboxing tests that needs a statically-linked shell, @@ -64,7 +56,7 @@ mkMesonDerivation ( # For Overlay FS tests need `mount`, `umount`, and `unshare`. # For `script` command (ensuring a TTY) # TODO use `unixtools` to be precise over which executables instead? - util-linux + buildPackages.util-linux ]; nativeBuildInputs = finalAttrs.passthru.externalNativeBuildInputs ++ [ From 10223fae8623ce2b919f620878bf7af2a95a8680 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sun, 12 Oct 2025 13:22:14 -0400 Subject: [PATCH 167/373] Fix windows build I forget to add some CPP in b57caaa1a273323b596097ab5509797b38e2e272. Hopefully, as we relyon RAII more, these explicit resets become unneeded. --- src/libstore/build/derivation-building-goal.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 6bda17d37..037401ccb 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -495,7 +495,9 @@ Goal::Co DerivationBuildingGoal::tryToBuild() Magenta( "/usr/sbin/softwareupdate --install-rosetta && launchctl stop org.nixos.nix-daemon")); +#ifndef _WIN32 // TODO enable `DerivationBuilder` on Windows builder.reset(); +#endif outputLocks.unlock(); worker.permanentFailure = true; co_return doneFailure({BuildResult::Failure::InputRejected, std::move(msg)}); From 89b35ec0dce03b41a01899b04b9b116f7cdf85c5 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 12 Oct 2025 22:10:35 +0300 Subject: [PATCH 168/373] packaging/hydra: buildNoGC is the same as buildWithSanitizers This will reduce the load on hydra. It doesn't make sense to build 2 slightly different variations where the difference is only in the nix-perl-bindings and additional sanitizers. --- packaging/hydra.nix | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/packaging/hydra.nix b/packaging/hydra.nix index ae2e6ab98..bc75b5dfb 100644 --- a/packaging/hydra.nix +++ b/packaging/hydra.nix @@ -73,7 +73,7 @@ let ] ); in -{ +rec { /** An internal check to make sure our package listing is complete. */ @@ -145,18 +145,9 @@ in ) ); - buildNoGc = - let - components = forAllSystems ( - system: - nixpkgsFor.${system}.native.nixComponents2.overrideScope ( - self: super: { - nix-expr = super.nix-expr.override { enableGC = false; }; - } - ) - ); - in - forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); + # Builds with sanitizers already have GC disabled, so this buildNoGc can just + # point to buildWithSanitizers in order to reduce the load on hydra. + buildNoGc = buildWithSanitizers; buildWithSanitizers = let From f0e1f652607a4423ac10393cdb9250f15fead512 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sun, 12 Oct 2025 02:00:27 +0000 Subject: [PATCH 169/373] fix(libstore): fix race condition in AWS credential provider caching The previous implementation had a check-then-create race condition where multiple threads could simultaneously: 1. Check the cache and find no provider (line 122) 2. Create their own providers (lines 126-145) 3. Insert into cache (line 161) This resulted in multiple credential providers being created when downloading multiple packages in parallel, as each .narinfo download would trigger provider creation on its own thread. Fix by using boost::concurrent_flat_map's try_emplace_and_cvisit, which provides atomic get-or-create semantics: - f1 callback: Called atomically during insertion, creates the provider - f2 callback: Called if key exists, returns cached provider - Other threads are blocked during f1, so no nullptr is ever visible --- src/libstore/aws-creds.cc | 85 ++++++++++++++++++++++----------------- 1 file changed, 47 insertions(+), 38 deletions(-) diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index cd404a554..05c11d24a 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -118,48 +118,57 @@ AwsCredentials getAwsCredentials(const std::string & profile) // Get or create credential provider with caching std::shared_ptr provider; - // Try to find existing provider - credentialProviderCache.visit(profile, [&](const auto & pair) { provider = pair.second; }); + // Use try_emplace_and_cvisit for atomic get-or-create + // This prevents race conditions where multiple threads create providers + credentialProviderCache.try_emplace_and_cvisit( + profile, + nullptr, // Placeholder - will be replaced in f1 before any thread can see it + [&](auto & kv) { + // f1: Called atomically during insertion with non-const reference + // Other threads are blocked until we finish, so nullptr is never visible + debug( + "[pid=%d] creating new AWS credential provider for profile '%s'", + getpid(), + profile.empty() ? "(default)" : profile.c_str()); - if (!provider) { - // Create new provider if not found - debug( - "[pid=%d] creating new AWS credential provider for profile '%s'", - getpid(), - profile.empty() ? "(default)" : profile.c_str()); + try { + initAwsCrt(); - try { - initAwsCrt(); + if (profile.empty()) { + Aws::Crt::Auth::CredentialsProviderChainDefaultConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + kv.second = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderChainDefault(config); + } else { + Aws::Crt::Auth::CredentialsProviderProfileConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + // This is safe because the underlying C library will copy this string + // c.f. https://github.com/awslabs/aws-c-auth/blob/main/source/credentials_provider_profile.c#L220 + config.ProfileNameOverride = Aws::Crt::ByteCursorFromCString(profile.c_str()); + kv.second = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderProfile(config); + } - if (profile.empty()) { - Aws::Crt::Auth::CredentialsProviderChainDefaultConfig config; - config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); - provider = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderChainDefault(config); - } else { - Aws::Crt::Auth::CredentialsProviderProfileConfig config; - config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); - // This is safe because the underlying C library will copy this string - // c.f. https://github.com/awslabs/aws-c-auth/blob/main/source/credentials_provider_profile.c#L220 - config.ProfileNameOverride = Aws::Crt::ByteCursorFromCString(profile.c_str()); - provider = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderProfile(config); + if (!kv.second) { + throw AwsAuthError( + "Failed to create AWS credentials provider for %s", + profile.empty() ? "default profile" : fmt("profile '%s'", profile)); + } + + provider = kv.second; + } catch (Error & e) { + // Exception during creation - remove the entry to allow retry + credentialProviderCache.erase(profile); + e.addTrace({}, "for AWS profile: %s", profile.empty() ? "(default)" : profile); + throw; + } catch (...) { + // Non-Error exception - still need to clean up + credentialProviderCache.erase(profile); + throw; } - } catch (Error & e) { - e.addTrace( - {}, - "while creating AWS credentials provider for %s", - profile.empty() ? "default profile" : fmt("profile '%s'", profile)); - throw; - } - - if (!provider) { - throw AwsAuthError( - "Failed to create AWS credentials provider for %s", - profile.empty() ? "default profile" : fmt("profile '%s'", profile)); - } - - // Insert into cache (try_emplace is thread-safe and won't overwrite if another thread added it) - credentialProviderCache.try_emplace(profile, provider); - } + }, + [&](const auto & kv) { + // f2: Called if key already exists (const reference) + provider = kv.second; + }); return getCredentialsFromProvider(provider); } From 18ec3d1094e821df381dc1b12b13472086bfe021 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Mon, 13 Oct 2025 01:44:40 +0300 Subject: [PATCH 170/373] libstore: Avoid copying derivations to the store if they are already valid MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This avoids the quite costly copying of derivations to the daemon over the wire in case it already exists in the eval store. For a fresh instantiatation (after running nix-collect-garbage) this doesn't significantly slow down eval: taskset -c 2,3 hyperfine --reference "result-old/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket" --prepare "nix-collect-garbage --store /tmp/store1111 --no-keep-derivations" "result/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket" Benchmark 1: result-old/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket Time (mean ± σ): 388.7 ms ± 10.5 ms [User: 157.0 ms, System: 61.3 ms] Range (min … max): 379.4 ms … 415.9 ms 10 runs Benchmark 2: result/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket Time (mean ± σ): 389.2 ms ± 4.8 ms [User: 158.5 ms, System: 60.7 ms] Range (min … max): 381.2 ms … 397.6 ms 10 runs But if the derivations are already instantiated this shows a pretty neat speedup: taskset -c 2,3 hyperfine --reference "result-old/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket" "result/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket" Benchmark 1: result-old/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket Time (mean ± σ): 240.4 ms ± 3.1 ms [User: 148.1 ms, System: 57.0 ms] Range (min … max): 233.8 ms … 245.0 ms 12 runs Benchmark 2: result/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket Time (mean ± σ): 226.5 ms ± 4.5 ms [User: 147.8 ms, System: 55.2 ms] Range (min … max): 214.0 ms … 231.2 ms 13 runs Co-authored-by: Sergei Zimmerman --- src/libstore-tests/meson.build | 1 + src/libstore-tests/write-derivation.cc | 57 ++++++++++++++++++++++++++ src/libstore/derivations.cc | 36 ++++++++-------- 3 files changed, 77 insertions(+), 17 deletions(-) create mode 100644 src/libstore-tests/write-derivation.cc diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index e8e90ad81..4d464ad89 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -83,6 +83,7 @@ sources = files( 'store-reference.cc', 'uds-remote-store.cc', 'worker-protocol.cc', + 'write-derivation.cc', ) include_dirs = [ include_directories('.') ] diff --git a/src/libstore-tests/write-derivation.cc b/src/libstore-tests/write-derivation.cc new file mode 100644 index 000000000..3f7de05d3 --- /dev/null +++ b/src/libstore-tests/write-derivation.cc @@ -0,0 +1,57 @@ +#include +#include + +#include "nix/util/tests/gmock-matchers.hh" +#include "nix/store/derivations.hh" +#include "nix/store/dummy-store-impl.hh" +#include "nix/store/tests/libstore.hh" + +namespace nix { +namespace { + +class WriteDerivationTest : public LibStoreTest +{ +protected: + WriteDerivationTest(ref config_) + : LibStoreTest(config_->openDummyStore()) + , config(std::move(config_)) + { + config->readOnly = false; + } + + WriteDerivationTest() + : WriteDerivationTest(make_ref(DummyStoreConfig::Params{})) + { + } + + ref config; +}; + +static Derivation makeSimpleDrv() +{ + Derivation drv; + drv.name = "simple-derivation"; + drv.platform = "system"; + drv.builder = "foo"; + drv.args = {"bar", "baz"}; + drv.env = StringPairs{{"BIG_BAD", "WOLF"}}; + return drv; +} + +} // namespace + +TEST_F(WriteDerivationTest, addToStoreFromDumpCalledOnce) +{ + auto drv = makeSimpleDrv(); + + auto path1 = writeDerivation(*store, drv, NoRepair); + config->readOnly = true; + auto path2 = writeDerivation(*store, drv, NoRepair); + EXPECT_EQ(path1, path2); + EXPECT_THAT( + [&] { writeDerivation(*store, drv, Repair); }, + ::testing::ThrowsMessage(testing::HasSubstrIgnoreANSIMatcher( + "operation 'addToStoreFromDump' is not supported by store 'dummy://'"))); +} + +} // namespace nix diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 6d7dbc99c..f634bccfb 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -115,23 +115,25 @@ StorePath writeDerivation(Store & store, const Derivation & drv, RepairFlag repa held during a garbage collection). */ auto suffix = std::string(drv.name) + drvExtension; auto contents = drv.unparse(store, false); - return readOnly || settings.readOnlyMode ? store.makeFixedOutputPathFromCA( - suffix, - TextInfo{ - .hash = hashString(HashAlgorithm::SHA256, contents), - .references = std::move(references), - }) - : ({ - StringSource s{contents}; - store.addToStoreFromDump( - s, - suffix, - FileSerialisationMethod::Flat, - ContentAddressMethod::Raw::Text, - HashAlgorithm::SHA256, - references, - repair); - }); + auto hash = hashString(HashAlgorithm::SHA256, contents); + auto ca = TextInfo{.hash = hash, .references = references}; + auto path = store.makeFixedOutputPathFromCA(suffix, ca); + + if (readOnly || settings.readOnlyMode || (store.isValidPath(path) && !repair)) + return path; + + StringSource s{contents}; + auto path2 = store.addToStoreFromDump( + s, + suffix, + FileSerialisationMethod::Flat, + ContentAddressMethod::Raw::Text, + HashAlgorithm::SHA256, + references, + repair); + assert(path2 == path); + + return path; } namespace { From 000e6f628221ae94a1e08a0ba4d5b64544ffeb8d Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 10 Oct 2025 14:45:06 +0000 Subject: [PATCH 171/373] feat(libstore): add builtin fetchurl S3 credential pre-resolution Add support for pre-resolving AWS credentials in the parent process before forking for builtin:fetchurl. This avoids recreating credential providers in the forked child process. --- src/libstore/builtins/fetchurl.cc | 13 ++++ src/libstore/include/nix/store/builtins.hh | 13 ++++ src/libstore/unix/build/derivation-builder.cc | 68 +++++++++++++++++-- .../unix/build/linux-derivation-builder.cc | 8 ++- 4 files changed, 97 insertions(+), 5 deletions(-) diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index 7abfa4495..3b2d5b866 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -33,6 +33,7 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx) /* Note: have to use a fresh fileTransfer here because we're in a forked process. */ + debug("[pid=%d] builtin:fetchurl creating fresh FileTransfer instance", getpid()); auto fileTransfer = makeFileTransfer(); auto fetch = [&](const std::string & url) { @@ -40,6 +41,18 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx) FileTransferRequest request(ValidURL{url}); request.decompress = false; +#if NIX_WITH_CURL_S3 + // Use pre-resolved credentials if available + if (ctx.awsCredentials && request.uri.scheme() == "s3") { + debug("[pid=%d] Using pre-resolved AWS credentials from parent process", getpid()); + request.usernameAuth = UsernameAuth{ + .username = ctx.awsCredentials->accessKeyId, + .password = ctx.awsCredentials->secretAccessKey, + }; + request.preResolvedAwsSessionToken = ctx.awsCredentials->sessionToken; + } +#endif + auto decompressor = makeDecompressionSink(unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink); fileTransfer->download(std::move(request), *decompressor); decompressor->finish(); diff --git a/src/libstore/include/nix/store/builtins.hh b/src/libstore/include/nix/store/builtins.hh index cc164fe82..5c15b2e9b 100644 --- a/src/libstore/include/nix/store/builtins.hh +++ b/src/libstore/include/nix/store/builtins.hh @@ -2,6 +2,11 @@ ///@file #include "nix/store/derivations.hh" +#include "nix/store/config.hh" + +#if NIX_WITH_CURL_S3 +# include "nix/store/aws-creds.hh" +#endif namespace nix { @@ -12,6 +17,14 @@ struct BuiltinBuilderContext std::string netrcData; std::string caFileData; Path tmpDirInSandbox; + +#if NIX_WITH_CURL_S3 + /** + * Pre-resolved AWS credentials for S3 URLs in builtin:fetchurl. + * When present, these should be used instead of creating new credential providers. + */ + std::optional awsCredentials; +#endif }; using BuiltinBuilder = std::function; diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 0158505a5..f7bab7057 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -46,6 +46,12 @@ #include "store-config-private.hh" #include "build/derivation-check.hh" +#if NIX_WITH_CURL_S3 +# include "nix/store/aws-creds.hh" +# include "nix/store/s3-url.hh" +# include "nix/util/url.hh" +#endif + namespace nix { struct NotDeterministic : BuildError @@ -290,6 +296,15 @@ protected: */ virtual void startChild(); +#if NIX_WITH_CURL_S3 + /** + * Pre-resolve AWS credentials for S3 URLs in builtin:fetchurl. + * This should be called before forking to ensure credentials are available in child. + * Returns the credentials if successfully resolved, or std::nullopt otherwise. + */ + std::optional preResolveAwsCredentials(); +#endif + private: /** @@ -339,10 +354,20 @@ protected: */ void writeBuilderFile(const std::string & name, std::string_view contents); + /** + * Arguments passed to runChild(). + */ + struct RunChildArgs + { +#if NIX_WITH_CURL_S3 + std::optional awsCredentials; +#endif + }; + /** * Run the builder's process. */ - void runChild(); + void runChild(RunChildArgs args); /** * Move the current process into the chroot, if any. Called early @@ -920,11 +945,43 @@ void DerivationBuilderImpl::openSlave() throw SysError("cannot pipe standard error into log file"); } +#if NIX_WITH_CURL_S3 +std::optional DerivationBuilderImpl::preResolveAwsCredentials() +{ + if (drv.isBuiltin() && drv.builder == "builtin:fetchurl") { + auto url = drv.env.find("url"); + if (url != drv.env.end()) { + try { + auto parsedUrl = parseURL(url->second); + if (parsedUrl.scheme == "s3") { + debug("Pre-resolving AWS credentials for S3 URL in builtin:fetchurl"); + auto s3Url = ParsedS3URL::parse(parsedUrl); + + // Use the preResolveAwsCredentials from aws-creds + auto credentials = nix::preResolveAwsCredentials(s3Url); + debug("Successfully pre-resolved AWS credentials in parent process"); + return credentials; + } + } catch (const std::exception & e) { + debug("Error pre-resolving S3 credentials: %s", e.what()); + } + } + } + return std::nullopt; +} +#endif + void DerivationBuilderImpl::startChild() { - pid = startProcess([&]() { + RunChildArgs args{ +#if NIX_WITH_CURL_S3 + .awsCredentials = preResolveAwsCredentials(), +#endif + }; + + pid = startProcess([this, args = std::move(args)]() { openSlave(); - runChild(); + runChild(std::move(args)); }); } @@ -1181,7 +1238,7 @@ void DerivationBuilderImpl::writeBuilderFile(const std::string & name, std::stri chownToBuilder(fd.get(), path); } -void DerivationBuilderImpl::runChild() +void DerivationBuilderImpl::runChild(RunChildArgs args) { /* Warning: in the child we should absolutely not make any SQLite calls! */ @@ -1198,6 +1255,9 @@ void DerivationBuilderImpl::runChild() BuiltinBuilderContext ctx{ .drv = drv, .tmpDirInSandbox = tmpDirInSandbox(), +#if NIX_WITH_CURL_S3 + .awsCredentials = args.awsCredentials, +#endif }; if (drv.isBuiltin() && drv.builder == "builtin:fetchurl") { diff --git a/src/libstore/unix/build/linux-derivation-builder.cc b/src/libstore/unix/build/linux-derivation-builder.cc index f6e910d08..be064566f 100644 --- a/src/libstore/unix/build/linux-derivation-builder.cc +++ b/src/libstore/unix/build/linux-derivation-builder.cc @@ -276,6 +276,12 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu void startChild() override { + RunChildArgs args{ +# if NIX_WITH_CURL_S3 + .awsCredentials = preResolveAwsCredentials(), +# endif + }; + /* Set up private namespaces for the build: - The PID namespace causes the build to start as PID 1. @@ -343,7 +349,7 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu if (usingUserNamespace) options.cloneFlags |= CLONE_NEWUSER; - pid_t child = startProcess([&]() { runChild(); }, options); + pid_t child = startProcess([this, args = std::move(args)]() { runChild(std::move(args)); }, options); writeFull(sendPid.writeSide.get(), fmt("%d\n", child)); _exit(0); From 6db86389ce89ac777d297e463021e549d6838d93 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 10 Oct 2025 19:08:38 +0200 Subject: [PATCH 172/373] util/error: Document addTrace params ... and rename e -> pos. That was weird. --- src/libutil/include/nix/util/error.hh | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/libutil/include/nix/util/error.hh b/src/libutil/include/nix/util/error.hh index e564ca5b9..49dd75991 100644 --- a/src/libutil/include/nix/util/error.hh +++ b/src/libutil/include/nix/util/error.hh @@ -192,13 +192,23 @@ public: err.traces.push_front(trace); } + /** + * @param pos Nullable `shared_ptr` + * @param fs Format string, see `HintFmt` + * @param args... Format string arguments. + */ template - void addTrace(std::shared_ptr && e, std::string_view fs, const Args &... args) + void addTrace(std::shared_ptr && pos, std::string_view fs, const Args &... args) { - addTrace(std::move(e), HintFmt(std::string(fs), args...)); + addTrace(std::move(pos), HintFmt(std::string(fs), args...)); } - void addTrace(std::shared_ptr && e, HintFmt hint, TracePrint print = TracePrint::Default); + /** + * @param pos Nullable `shared_ptr` + * @param hint Formatted error message + * @param print Optional, whether to always print (e.g. `addErrorContext`) + */ + void addTrace(std::shared_ptr && pos, HintFmt hint, TracePrint print = TracePrint::Default); bool hasTrace() const { From 48a5e2dde2625ebb0d7f6aa2e77051e152fb3411 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 13:14:05 +0200 Subject: [PATCH 173/373] EvalState: add doc comment --- src/libexpr/include/nix/expr/eval.hh | 9 ++++++++- src/libutil/include/nix/util/error.hh | 10 +++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index b87c45ce3..76ce62b87 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -508,8 +508,15 @@ private: public: + /** + * @param lookupPath Only used during construction. + * @param store The store to use for instantiation + * @param fetchSettings Must outlive the lifetime of this EvalState! + * @param settings Must outlive the lifetime of this EvalState! + * @param buildStore The store to use for builds ("import from derivation", C API `nix_string_realise`) + */ EvalState( - const LookupPath & _lookupPath, + const LookupPath & lookupPath, ref store, const fetchers::Settings & fetchSettings, const EvalSettings & settings, diff --git a/src/libutil/include/nix/util/error.hh b/src/libutil/include/nix/util/error.hh index 49dd75991..cc8460592 100644 --- a/src/libutil/include/nix/util/error.hh +++ b/src/libutil/include/nix/util/error.hh @@ -193,7 +193,9 @@ public: } /** - * @param pos Nullable `shared_ptr` + * Prepends an item to the error trace, as is usual for extra context. + * + * @param pos Nullable source position to put in trace item * @param fs Format string, see `HintFmt` * @param args... Format string arguments. */ @@ -204,9 +206,11 @@ public: } /** - * @param pos Nullable `shared_ptr` + * Prepends an item to the error trace, as is usual for extra context. + * + * @param pos Nullable source position to put in trace item * @param hint Formatted error message - * @param print Optional, whether to always print (e.g. `addErrorContext`) + * @param print Optional, whether to always print (used by `addErrorContext`) */ void addTrace(std::shared_ptr && pos, HintFmt hint, TracePrint print = TracePrint::Default); From 5dcfddf9972fadf3a188397757eb1727289ab854 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 13:59:39 +0200 Subject: [PATCH 174/373] strings: Add optionalBracket helper --- src/libutil-tests/strings.cc | 59 +++++++++++++++++++++++++ src/libutil/include/nix/util/strings.hh | 39 ++++++++++++++++ src/libutil/strings.cc | 14 ++++++ 3 files changed, 112 insertions(+) diff --git a/src/libutil-tests/strings.cc b/src/libutil-tests/strings.cc index bd740ce0c..dbbecd514 100644 --- a/src/libutil-tests/strings.cc +++ b/src/libutil-tests/strings.cc @@ -494,4 +494,63 @@ TEST(shellSplitString, testUnbalancedQuotes) ASSERT_THROW(shellSplitString("foo\"bar\\\""), Error); } +/* ---------------------------------------------------------------------------- + * optionalBracket + * --------------------------------------------------------------------------*/ + +TEST(optionalBracket, emptyContent) +{ + ASSERT_EQ(optionalBracket(" (", "", ")"), ""); +} + +TEST(optionalBracket, nonEmptyContent) +{ + ASSERT_EQ(optionalBracket(" (", "foo", ")"), " (foo)"); +} + +TEST(optionalBracket, emptyPrefixAndSuffix) +{ + ASSERT_EQ(optionalBracket("", "foo", ""), "foo"); +} + +TEST(optionalBracket, emptyContentEmptyBrackets) +{ + ASSERT_EQ(optionalBracket("", "", ""), ""); +} + +TEST(optionalBracket, complexBrackets) +{ + ASSERT_EQ(optionalBracket(" [[[", "content", "]]]"), " [[[content]]]"); +} + +TEST(optionalBracket, onlyPrefix) +{ + ASSERT_EQ(optionalBracket("prefix", "content", ""), "prefixcontent"); +} + +TEST(optionalBracket, onlySuffix) +{ + ASSERT_EQ(optionalBracket("", "content", "suffix"), "contentsuffix"); +} + +TEST(optionalBracket, optionalWithValue) +{ + ASSERT_EQ(optionalBracket(" (", std::optional("foo"), ")"), " (foo)"); +} + +TEST(optionalBracket, optionalNullopt) +{ + ASSERT_EQ(optionalBracket(" (", std::optional(std::nullopt), ")"), ""); +} + +TEST(optionalBracket, optionalEmptyString) +{ + ASSERT_EQ(optionalBracket(" (", std::optional(""), ")"), ""); +} + +TEST(optionalBracket, optionalStringViewWithValue) +{ + ASSERT_EQ(optionalBracket(" (", std::optional("bar"), ")"), " (bar)"); +} + } // namespace nix diff --git a/src/libutil/include/nix/util/strings.hh b/src/libutil/include/nix/util/strings.hh index ba37ce79f..da6decc31 100644 --- a/src/libutil/include/nix/util/strings.hh +++ b/src/libutil/include/nix/util/strings.hh @@ -3,6 +3,7 @@ #include "nix/util/types.hh" #include +#include #include #include #include @@ -93,6 +94,44 @@ extern template std::string dropEmptyInitThenConcatStringsSep(std::string_view, */ std::list shellSplitString(std::string_view s); +/** + * Conditionally wrap a string with prefix and suffix brackets. + * + * If `content` is empty, returns an empty string. + * Otherwise, returns `prefix + content + suffix`. + * + * Example: + * optionalBracket(" (", "foo", ")") == " (foo)" + * optionalBracket(" (", "", ")") == "" + * + * Design note: this would have been called `optionalParentheses`, except this + * function is more general and more explicit. Parentheses typically *also* need + * to be prefixed with a space in order to fit nicely in a piece of natural + * language. + */ +std::string optionalBracket(std::string_view prefix, std::string_view content, std::string_view suffix); + +/** + * Overload for optional content. + * + * If `content` is nullopt or contains an empty string, returns an empty string. + * Otherwise, returns `prefix + *content + suffix`. + * + * Example: + * optionalBracket(" (", std::optional("foo"), ")") == " (foo)" + * optionalBracket(" (", std::nullopt, ")") == "" + * optionalBracket(" (", std::optional(""), ")") == "" + */ +template + requires std::convertible_to +std::string optionalBracket(std::string_view prefix, const std::optional & content, std::string_view suffix) +{ + if (!content || std::string_view(*content).empty()) { + return ""; + } + return optionalBracket(prefix, std::string_view(*content), suffix); +} + /** * Hash implementation that can be used for zero-copy heterogenous lookup from * P1690R1[1] in unordered containers. diff --git a/src/libutil/strings.cc b/src/libutil/strings.cc index a87567cef..c0c3d6602 100644 --- a/src/libutil/strings.cc +++ b/src/libutil/strings.cc @@ -138,4 +138,18 @@ std::list shellSplitString(std::string_view s) return result; } + +std::string optionalBracket(std::string_view prefix, std::string_view content, std::string_view suffix) +{ + if (content.empty()) { + return ""; + } + std::string result; + result.reserve(prefix.size() + content.size() + suffix.size()); + result.append(prefix); + result.append(content); + result.append(suffix); + return result; +} + } // namespace nix From 583f5e37fc508e2307fb790188791214fb646b05 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 14:02:59 +0200 Subject: [PATCH 175/373] Refactor: use optionalBracket in nix search --- src/nix/search.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/search.cc b/src/nix/search.cc index 910450e95..20bb4cd5d 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -159,7 +159,7 @@ struct CmdSearch : InstallableValueCommand, MixJSON logger->cout( "* %s%s", wrap("\e[0;1m", hiliteMatches(attrPath2, attrPathMatches, ANSI_GREEN, "\e[0;1m")), - name.version != "" ? " (" + name.version + ")" : ""); + optionalBracket(" (", name.version, ")")); if (description != "") logger->cout( " %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL)); From 998f93f267832c672511eed259339dd0fd142464 Mon Sep 17 00:00:00 2001 From: Soumyadip Sarkar Date: Mon, 13 Oct 2025 18:15:52 +0530 Subject: [PATCH 176/373] Fix typos --- doc/manual/source/protocols/json/derivation.md | 4 ++-- doc/manual/source/store/derivation/index.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/manual/source/protocols/json/derivation.md b/doc/manual/source/protocols/json/derivation.md index 566288962..cc9389f7c 100644 --- a/doc/manual/source/protocols/json/derivation.md +++ b/doc/manual/source/protocols/json/derivation.md @@ -25,7 +25,7 @@ is a JSON object with the following fields: - Version 2: Separate `method` and `hashAlgo` fields in output specs - - Verison 3: Drop store dir from store paths, just include base name. + - Version 3: Drop store dir from store paths, just include base name. Note that while this format is experimental, the maintenance of versions is best-effort, and not promised to identify every change. @@ -116,5 +116,5 @@ is a JSON object with the following fields: The environment passed to the `builder`. * `structuredAttrs`: - [Strucutured Attributes](@docroot@/store/derivation/index.md#structured-attrs), only defined if the derivation contains them. + [Structured Attributes](@docroot@/store/derivation/index.md#structured-attrs), only defined if the derivation contains them. Structured attributes are JSON, and thus embedded as-is. diff --git a/doc/manual/source/store/derivation/index.md b/doc/manual/source/store/derivation/index.md index 0e12b4d5e..5b179273d 100644 --- a/doc/manual/source/store/derivation/index.md +++ b/doc/manual/source/store/derivation/index.md @@ -106,7 +106,7 @@ The system type on which the [`builder`](#attr-builder) executable is meant to b A necessary condition for Nix to schedule a given derivation on some [Nix instance] is for the "system" of that derivation to match that instance's [`system` configuration option] or [`extra-platforms` configuration option]. -By putting the `system` in each derivation, Nix allows *heterogenous* build plans, where not all steps can be run on the same machine or same sort of machine. +By putting the `system` in each derivation, Nix allows *heterogeneous* build plans, where not all steps can be run on the same machine or same sort of machine. Nix can schedule builds such that it automatically builds on other platforms by [forwarding build requests](@docroot@/advanced-topics/distributed-builds.md) to other Nix instances. [`system` configuration option]: @docroot@/command-ref/conf-file.md#conf-system From 47f427a1723ba36e4f48dc3db6dcdafa206932e6 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 13 Oct 2025 22:05:46 +0300 Subject: [PATCH 177/373] Remove validation of URLs passed to FileTransferRequest verbatim CURL is not very strict about validation of URLs passed to it. We should reflect this in our handling of URLs that we get from the user in or builtins.fetchurl. ValidURL was an attempt to rectify this, but it turned out to be too strict. The only good way to resolve this is to pass (in some cases) the user-provided string verbatim to CURL. Other usages in libfetchers still benefit from using structured ParsedURL and validation though. nix store prefetch-file --name foo 'https://cdn.skypack.dev/big.js@^5.2.2' error: 'https://cdn.skypack.dev/big.js@^5.2.2' is not a valid URL: leftover --- src/libfetchers/tarball.cc | 12 ++--- src/libstore/builtins/fetchurl.cc | 2 +- .../include/nix/store/filetransfer.hh | 4 +- src/libutil/include/nix/util/url.hh | 54 +++++++++++-------- src/libutil/url.cc | 2 +- src/nix/prefetch.cc | 2 +- tests/functional/fetchurl.sh | 5 -- 7 files changed, 44 insertions(+), 37 deletions(-) diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index 31d5ab460..863a0d680 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -42,7 +42,7 @@ DownloadFileResult downloadFile( if (cached && !cached->expired) return useCached(); - FileTransferRequest request(ValidURL{url}); + FileTransferRequest request(VerbatimURL{url}); request.headers = headers; if (cached) request.expectedETag = getStrAttr(cached->value, "etag"); @@ -107,13 +107,13 @@ DownloadFileResult downloadFile( static DownloadTarballResult downloadTarball_( const Settings & settings, const std::string & urlS, const Headers & headers, const std::string & displayPrefix) { - ValidURL url = urlS; + ParsedURL url = parseURL(urlS); // Some friendly error messages for common mistakes. // Namely lets catch when the url is a local file path, but // it is not in fact a tarball. - if (url.scheme() == "file") { - std::filesystem::path localPath = renderUrlPathEnsureLegal(url.path()); + if (url.scheme == "file") { + std::filesystem::path localPath = renderUrlPathEnsureLegal(url.path); if (!exists(localPath)) { throw Error("tarball '%s' does not exist.", localPath); } @@ -164,7 +164,7 @@ static DownloadTarballResult downloadTarball_( /* Note: if the download is cached, `importTarball()` will receive no data, which causes it to import an empty tarball. */ - auto archive = !url.path().empty() && hasSuffix(toLower(url.path().back()), ".zip") ? ({ + auto archive = !url.path.empty() && hasSuffix(toLower(url.path.back()), ".zip") ? ({ /* In streaming mode, libarchive doesn't handle symlinks in zip files correctly (#10649). So write the entire file to disk so libarchive can access it @@ -178,7 +178,7 @@ static DownloadTarballResult downloadTarball_( } TarArchive{path}; }) - : TarArchive{*source}; + : TarArchive{*source}; auto tarballCache = getTarballCache(); auto parseSink = tarballCache->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index 7abfa4495..df056954e 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -37,7 +37,7 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx) auto fetch = [&](const std::string & url) { auto source = sinkToSource([&](Sink & sink) { - FileTransferRequest request(ValidURL{url}); + FileTransferRequest request(VerbatimURL{url}); request.decompress = false; auto decompressor = makeDecompressionSink(unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink); diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 942e05a61..78ce439ae 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -95,7 +95,7 @@ struct UsernameAuth struct FileTransferRequest { - ValidURL uri; + VerbatimURL uri; Headers headers; std::string expectedETag; bool verifyTLS = true; @@ -121,7 +121,7 @@ struct FileTransferRequest std::optional preResolvedAwsSessionToken; #endif - FileTransferRequest(ValidURL uri) + FileTransferRequest(VerbatimURL uri) : uri(std::move(uri)) , parentAct(getCurActivity()) { diff --git a/src/libutil/include/nix/util/url.hh b/src/libutil/include/nix/util/url.hh index f2bd79b08..4ed80feb3 100644 --- a/src/libutil/include/nix/util/url.hh +++ b/src/libutil/include/nix/util/url.hh @@ -6,6 +6,9 @@ #include "nix/util/error.hh" #include "nix/util/canon-path.hh" +#include "nix/util/split.hh" +#include "nix/util/util.hh" +#include "nix/util/variant-wrapper.hh" namespace nix { @@ -342,8 +345,7 @@ ParsedURL fixGitURL(const std::string & url); bool isValidSchemeName(std::string_view scheme); /** - * Either a ParsedURL or a verbatim string, but the string must be a valid - * ParsedURL. This is necessary because in certain cases URI must be passed + * Either a ParsedURL or a verbatim string. This is necessary because in certain cases URI must be passed * verbatim (e.g. in builtin fetchers), since those are specified by the user. * In those cases normalizations performed by the ParsedURL might be surprising * and undesirable, since Nix must be a universal client that has to work with @@ -354,23 +356,23 @@ bool isValidSchemeName(std::string_view scheme); * * Though we perform parsing and validation for internal needs. */ -struct ValidURL : private ParsedURL +struct VerbatimURL { - std::optional encoded; + using Raw = std::variant; + Raw raw; - ValidURL(std::string str) - : ParsedURL(parseURL(str, /*lenient=*/false)) - , encoded(std::move(str)) + VerbatimURL(std::string_view s) + : raw(std::string{s}) { } - ValidURL(std::string_view str) - : ValidURL(std::string{str}) + VerbatimURL(std::string s) + : raw(std::move(s)) { } - ValidURL(ParsedURL parsed) - : ParsedURL{std::move(parsed)} + VerbatimURL(ParsedURL url) + : raw(std::move(url)) { } @@ -379,25 +381,35 @@ struct ValidURL : private ParsedURL */ std::string to_string() const { - return encoded.or_else([&]() -> std::optional { return ParsedURL::to_string(); }).value(); + return std::visit( + overloaded{ + [](const std::string & str) { return str; }, [](const ParsedURL & url) { return url.to_string(); }}, + raw); } - const ParsedURL & parsed() const & + const ParsedURL parsed() const { - return *this; + return std::visit( + overloaded{ + [](const std::string & str) { return parseURL(str); }, [](const ParsedURL & url) { return url; }}, + raw); } std::string_view scheme() const & { - return ParsedURL::scheme; - } - - const auto & path() const & - { - return ParsedURL::path; + return std::visit( + overloaded{ + [](std::string_view str) { + auto scheme = splitPrefixTo(str, ':'); + if (!scheme) + throw BadURL("URL '%s' doesn't have a scheme", str); + return *scheme; + }, + [](const ParsedURL & url) -> std::string_view { return url.scheme; }}, + raw); } }; -std::ostream & operator<<(std::ostream & os, const ValidURL & url); +std::ostream & operator<<(std::ostream & os, const VerbatimURL & url); } // namespace nix diff --git a/src/libutil/url.cc b/src/libutil/url.cc index a50de0944..7410e4062 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -434,7 +434,7 @@ bool isValidSchemeName(std::string_view s) return std::regex_match(s.begin(), s.end(), regex, std::regex_constants::match_default); } -std::ostream & operator<<(std::ostream & os, const ValidURL & url) +std::ostream & operator<<(std::ostream & os, const VerbatimURL & url) { os << url.to_string(); return os; diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc index 26905e34c..18abfa0aa 100644 --- a/src/nix/prefetch.cc +++ b/src/nix/prefetch.cc @@ -105,7 +105,7 @@ std::tuple prefetchFile( FdSink sink(fd.get()); - FileTransferRequest req(ValidURL{url}); + FileTransferRequest req(VerbatimURL{url}); req.decompress = false; getFileTransfer()->download(std::move(req), sink); } diff --git a/tests/functional/fetchurl.sh b/tests/functional/fetchurl.sh index 5bc8ca625..c25ac3216 100755 --- a/tests/functional/fetchurl.sh +++ b/tests/functional/fetchurl.sh @@ -88,8 +88,3 @@ requireDaemonNewerThan "2.20" expected=100 if [[ -v NIX_DAEMON_PACKAGE ]]; then expected=1; fi # work around the daemon not returning a 100 status correctly expectStderr $expected nix-build --expr '{ url }: builtins.derivation { name = "nix-cache-info"; system = "x86_64-linux"; builder = "builtin:fetchurl"; inherit url; outputHashMode = "flat"; }' --argstr url "file://$narxz" 2>&1 | grep 'must be a fixed-output or impure derivation' - -requireDaemonNewerThan "2.32.0pre20250831" - -expect 1 nix-build --expr 'import ' --argstr name 'name' --argstr url "file://authority.not.allowed/fetchurl.sh?a=1&a=2" --no-out-link |& - grepQuiet "error: file:// URL 'file://authority.not.allowed/fetchurl.sh?a=1&a=2' has unexpected authority 'authority.not.allowed'" From 3ba221025f3d5e78e5f5fde22d704b403f2090e9 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 13 Oct 2025 23:50:58 +0300 Subject: [PATCH 178/373] libstore/outputs-spec: Drop usage of std::regex std::regex is a really bad tool for parsing things, since it tends to overflow the stack pretty badly. See the build failure under ASan in [^]. [^]: https://hydra.nixos.org/build/310077167/nixlog/5 --- src/libstore/outputs-spec.cc | 38 ++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/src/libstore/outputs-spec.cc b/src/libstore/outputs-spec.cc index aacc964cd..622df5fc3 100644 --- a/src/libstore/outputs-spec.cc +++ b/src/libstore/outputs-spec.cc @@ -1,10 +1,10 @@ -#include #include +#include +#include "nix/store/path.hh" +#include "nix/store/store-dir-config.hh" #include "nix/util/util.hh" -#include "nix/util/regex-combinators.hh" #include "nix/store/outputs-spec.hh" -#include "nix/store/path-regex.hh" #include "nix/util/strings-inline.hh" namespace nix { @@ -19,31 +19,27 @@ bool OutputsSpec::contains(const std::string & outputName) const raw); } -static std::string outputSpecRegexStr = regex::either(regex::group(R"(\*)"), regex::group(regex::list(nameRegexStr))); - std::optional OutputsSpec::parseOpt(std::string_view s) { - static std::regex regex(std::string{outputSpecRegexStr}); - - std::cmatch match; - if (!std::regex_match(s.cbegin(), s.cend(), match, regex)) + try { + return parse(s); + } catch (BadStorePathName &) { return std::nullopt; - - if (match[1].matched) - return {OutputsSpec::All{}}; - - if (match[2].matched) - return OutputsSpec::Names{tokenizeString({match[2].first, match[2].second}, ",")}; - - assert(false); + } } OutputsSpec OutputsSpec::parse(std::string_view s) { - std::optional spec = parseOpt(s); - if (!spec) - throw Error("invalid outputs specifier '%s'", s); - return std::move(*spec); + using namespace std::string_view_literals; + + if (s == "*"sv) + return OutputsSpec::All{}; + + auto names = splitString(s, ","); + for (const auto & name : names) + checkName(name); + + return OutputsSpec::Names{std::move(names)}; } std::optional> ExtendedOutputsSpec::parseOpt(std::string_view s) From 0fd890a8d68b128ff4c1e8eefc063589d7910fe1 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 14:09:49 +0200 Subject: [PATCH 179/373] Add reason string support to MissingExperimentalFeature --- src/libutil/configuration.cc | 4 ++-- src/libutil/experimental-features.cc | 8 +++++--- src/libutil/include/nix/util/configuration.hh | 2 +- src/libutil/include/nix/util/experimental-features.hh | 2 +- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/libutil/configuration.cc b/src/libutil/configuration.cc index dc9d91f63..7a0ed22ea 100644 --- a/src/libutil/configuration.cc +++ b/src/libutil/configuration.cc @@ -500,10 +500,10 @@ bool ExperimentalFeatureSettings::isEnabled(const ExperimentalFeature & feature) return std::find(f.begin(), f.end(), feature) != f.end(); } -void ExperimentalFeatureSettings::require(const ExperimentalFeature & feature) const +void ExperimentalFeatureSettings::require(const ExperimentalFeature & feature, std::string reason) const { if (!isEnabled(feature)) - throw MissingExperimentalFeature(feature); + throw MissingExperimentalFeature(feature, std::move(reason)); } bool ExperimentalFeatureSettings::isEnabled(const std::optional & feature) const diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 0edd5a585..11b8ceadf 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -1,5 +1,6 @@ #include "nix/util/experimental-features.hh" #include "nix/util/fmt.hh" +#include "nix/util/strings.hh" #include "nix/util/util.hh" #include @@ -376,10 +377,11 @@ std::set parseFeatures(const StringSet & rawFeatures) return res; } -MissingExperimentalFeature::MissingExperimentalFeature(ExperimentalFeature feature) +MissingExperimentalFeature::MissingExperimentalFeature(ExperimentalFeature feature, std::string reason) : Error( - "experimental Nix feature '%1%' is disabled; add '--extra-experimental-features %1%' to enable it", - showExperimentalFeature(feature)) + "experimental Nix feature '%1%' is disabled%2%; add '--extra-experimental-features %1%' to enable it", + showExperimentalFeature(feature), + Uncolored(optionalBracket(" (", reason, ")"))) , missingFeature(feature) { } diff --git a/src/libutil/include/nix/util/configuration.hh b/src/libutil/include/nix/util/configuration.hh index 65391721c..c8d7b7f24 100644 --- a/src/libutil/include/nix/util/configuration.hh +++ b/src/libutil/include/nix/util/configuration.hh @@ -463,7 +463,7 @@ struct ExperimentalFeatureSettings : Config * Require an experimental feature be enabled, throwing an error if it is * not. */ - void require(const ExperimentalFeature &) const; + void require(const ExperimentalFeature &, std::string reason = "") const; /** * `std::nullopt` pointer means no feature, which means there is nothing that could be diff --git a/src/libutil/include/nix/util/experimental-features.hh b/src/libutil/include/nix/util/experimental-features.hh index 73c4eeca4..6ffc0e0c0 100644 --- a/src/libutil/include/nix/util/experimental-features.hh +++ b/src/libutil/include/nix/util/experimental-features.hh @@ -88,7 +88,7 @@ public: */ ExperimentalFeature missingFeature; - MissingExperimentalFeature(ExperimentalFeature missingFeature); + MissingExperimentalFeature(ExperimentalFeature missingFeature, std::string reason = ""); }; /** From 71aa9a479883cdf372ed49e717abd277e58f449e Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 14:20:08 +0200 Subject: [PATCH 180/373] Add reasons to dyndrv xp messages --- src/libexpr/primops.cc | 3 ++- src/libstore/derivations.cc | 8 ++++---- src/libstore/derived-path.cc | 6 +++++- src/libstore/downstream-placeholder.cc | 2 +- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 86cb00131..5f06bf009 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -1420,7 +1420,8 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName .debugThrow(); } if (ingestionMethod == ContentAddressMethod::Raw::Text) - experimentalFeatureSettings.require(Xp::DynamicDerivations); + experimentalFeatureSettings.require( + Xp::DynamicDerivations, fmt("text-hashed derivation '%s', outputHashMode = \"text\"", drvName)); if (ingestionMethod == ContentAddressMethod::Raw::Git) experimentalFeatureSettings.require(Xp::GitHashing); }; diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 6d7dbc99c..b5d8d1a1c 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -288,7 +288,7 @@ static DerivationOutput parseDerivationOutput( if (!hashAlgoStr.empty()) { ContentAddressMethod method = ContentAddressMethod::parsePrefix(hashAlgoStr); if (method == ContentAddressMethod::Raw::Text) - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, "text-hashed derivation output"); const auto hashAlgo = parseHashAlgo(hashAlgoStr); if (hashS == "impure"sv) { xpSettings.require(Xp::ImpureDerivations); @@ -426,7 +426,7 @@ Derivation parseDerivation( if (*versionS == "xp-dyn-drv"sv) { // Only version we have so far version = DerivationATermVersion::DynamicDerivations; - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, fmt("derivation '%s', ATerm format version 'xp-dyn-drv'", name)); } else { throw FormatError("Unknown derivation ATerm format version '%s'", *versionS); } @@ -1301,7 +1301,7 @@ DerivationOutput::fromJSON(const nlohmann::json & _json, const ExperimentalFeatu auto methodAlgo = [&]() -> std::pair { ContentAddressMethod method = ContentAddressMethod::parse(getString(valueAt(json, "method"))); if (method == ContentAddressMethod::Raw::Text) - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, "text-hashed derivation output in JSON"); auto hashAlgo = parseHashAlgo(getString(valueAt(json, "hashAlgo"))); return {std::move(method), std::move(hashAlgo)}; @@ -1454,7 +1454,7 @@ Derivation Derivation::fromJSON(const nlohmann::json & _json, const Experimental node.value = getStringSet(valueAt(json, "outputs")); auto drvs = getObject(valueAt(json, "dynamicOutputs")); for (auto & [outputId, childNode] : drvs) { - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, fmt("dynamic output '%s' in JSON", outputId)); node.childMap[outputId] = doInput(childNode); } return node; diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc index 2cf720b82..34e591666 100644 --- a/src/libstore/derived-path.cc +++ b/src/libstore/derived-path.cc @@ -85,7 +85,11 @@ void drvRequireExperiment(const SingleDerivedPath & drv, const ExperimentalFeatu [&](const SingleDerivedPath::Opaque &) { // plain drv path; no experimental features required. }, - [&](const SingleDerivedPath::Built &) { xpSettings.require(Xp::DynamicDerivations); }, + [&](const SingleDerivedPath::Built & b) { + xpSettings.require( + Xp::DynamicDerivations, + fmt("building output '%s' of '%s'", b.output, b.drvPath->getBaseStorePath().to_string())); + }, }, drv.raw()); } diff --git a/src/libstore/downstream-placeholder.cc b/src/libstore/downstream-placeholder.cc index b3ac1c8c4..30044501b 100644 --- a/src/libstore/downstream-placeholder.cc +++ b/src/libstore/downstream-placeholder.cc @@ -24,7 +24,7 @@ DownstreamPlaceholder DownstreamPlaceholder::unknownDerivation( OutputNameView outputName, const ExperimentalFeatureSettings & xpSettings) { - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, fmt("placeholder for unknown derivation output '%s'", outputName)); auto compressed = compressHash(placeholder.hash, 20); auto clearText = "nix-computed-output:" + compressed.to_string(HashFormat::Nix32, false) + ":" + std::string{outputName}; From 39c46654880d66a1bdfe107f6726630ff831707e Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 23:48:58 +0200 Subject: [PATCH 181/373] Store reason as a field in MissingExperimentalFeature Store the reason string as a field in the exception class rather than only embedding it in the error message. This supports better structured error handling and future JSON error reporting. Suggested by Ericson2314 in PR review. --- src/libutil/experimental-features.cc | 1 + src/libutil/include/nix/util/experimental-features.hh | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 11b8ceadf..198d021bb 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -383,6 +383,7 @@ MissingExperimentalFeature::MissingExperimentalFeature(ExperimentalFeature featu showExperimentalFeature(feature), Uncolored(optionalBracket(" (", reason, ")"))) , missingFeature(feature) + , reason{reason} { } diff --git a/src/libutil/include/nix/util/experimental-features.hh b/src/libutil/include/nix/util/experimental-features.hh index 6ffc0e0c0..aca14bfbb 100644 --- a/src/libutil/include/nix/util/experimental-features.hh +++ b/src/libutil/include/nix/util/experimental-features.hh @@ -88,6 +88,8 @@ public: */ ExperimentalFeature missingFeature; + std::string reason; + MissingExperimentalFeature(ExperimentalFeature missingFeature, std::string reason = ""); }; From 962862e9e00a088b27178985153783b0ff3cceed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 22:00:55 +0000 Subject: [PATCH 182/373] build(deps): bump actions/create-github-app-token from 1 to 2 Bumps [actions/create-github-app-token](https://github.com/actions/create-github-app-token) from 1 to 2. - [Release notes](https://github.com/actions/create-github-app-token/releases) - [Commits](https://github.com/actions/create-github-app-token/compare/v1...v2) --- updated-dependencies: - dependency-name: actions/create-github-app-token dependency-version: '2' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/backport.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 99b75621e..7785e53c2 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Generate GitHub App token id: generate-token - uses: actions/create-github-app-token@v1 + uses: actions/create-github-app-token@v2 with: app-id: ${{ vars.CI_APP_ID }} private-key: ${{ secrets.CI_APP_PRIVATE_KEY }} From b846f27682d27f8674c586e97a758eced52912da Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 22:00:59 +0000 Subject: [PATCH 183/373] build(deps): bump actions/checkout from 4 to 5 Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 5. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/backport.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 99b75621e..b9abc720b 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -20,7 +20,7 @@ jobs: with: app-id: ${{ vars.CI_APP_ID }} private-key: ${{ secrets.CI_APP_PRIVATE_KEY }} - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: ref: ${{ github.event.pull_request.head.sha }} # required to find all branches From 2ee41976c22f1252a439f1940d6190b82830283b Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 26 Sep 2025 01:13:22 -0400 Subject: [PATCH 184/373] Fix #13247 Resolve the derivation before creating a building goal, in a context where we know what output(s) we want. That way we have a chance just to download the outputs we want. Fix #13247 (cherry picked from commit 39f6fd9b464298f37a08cfe7485271b9294fd278) --- .../build/derivation-building-goal.cc | 103 ------------------ src/libstore/build/derivation-goal.cc | 91 ++++++++++++++++ tests/functional/ca/issue-13247.sh | 5 +- 3 files changed, 92 insertions(+), 107 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 037401ccb..c00123634 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1,7 +1,5 @@ #include "nix/store/build/derivation-building-goal.hh" -#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-env-desugar.hh" -#include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -158,107 +156,6 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ { - auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); - { - Goals waitees{resolutionGoal}; - co_await await(std::move(waitees)); - } - if (nrFailed != 0) { - co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); - } - - if (resolutionGoal->resolvedDrv) { - auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - - /* Store the resolved derivation, as part of the record of - what we're actually building */ - writeDerivation(worker.store, drvResolved); - - /* TODO https://github.com/NixOS/nix/issues/13247 we should - let the calling goal do this, so it has a change to pass - just the output(s) it cares about. */ - auto resolvedDrvGoal = - worker.makeDerivationTrampolineGoal(pathResolved, OutputsSpec::All{}, drvResolved, buildMode); - { - Goals waitees{resolvedDrvGoal}; - co_await await(std::move(waitees)); - } - - trace("resolved derivation finished"); - - auto resolvedResult = resolvedDrvGoal->buildResult; - - // No `std::visit` for coroutines yet - if (auto * successP = resolvedResult.tryGetSuccess()) { - auto & success = *successP; - SingleDrvOutputs builtOutputs; - - auto outputHashes = staticOutputHashes(worker.evalStore, *drv); - auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); - - StorePathSet outputPaths; - - for (auto & outputName : drvResolved.outputNames()) { - auto outputHash = get(outputHashes, outputName); - auto resolvedHash = get(resolvedHashes, outputName); - if ((!outputHash) || (!resolvedHash)) - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", - worker.store.printStorePath(drvPath), - outputName); - - auto realisation = [&] { - auto take1 = get(success.builtOutputs, outputName); - if (take1) - return *take1; - - /* The above `get` should work. But stateful tracking of - outputs in resolvedResult, this can get out of sync with the - store, which is our actual source of truth. For now we just - check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, outputName}); - if (take2) - return *take2; - - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", - worker.store.printStorePath(pathResolved), - outputName); - }(); - - if (!drv->type().isImpure()) { - auto newRealisation = realisation; - newRealisation.id = DrvOutput{*outputHash, outputName}; - newRealisation.signatures.clear(); - if (!drv->type().isFixed()) { - auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; - newRealisation.dependentRealisations = - drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); - } - worker.store.signRealisation(newRealisation); - worker.store.registerDrvOutput(newRealisation); - } - outputPaths.insert(realisation.outPath); - builtOutputs.emplace(outputName, realisation); - } - - runPostBuildHook(worker.store, *logger, drvPath, outputPaths); - - auto status = success.status; - if (status == BuildResult::Success::AlreadyValid) - status = BuildResult::Success::ResolvesToAlreadyValid; - - co_return doneSuccess(success.status, std::move(builtOutputs)); - } else if (resolvedResult.tryGetFailure()) { - co_return doneFailure({ - BuildResult::Failure::DependencyFailed, - "build of resolved derivation '%s' failed", - worker.store.printStorePath(pathResolved), - }); - } else - assert(false); - } - /* If we get this far, we know no dynamic drvs inputs */ for (auto & [depDrvPath, depNode] : drv->inputDrvs.map) { diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index dc12ab55a..1f8eb1262 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -1,5 +1,6 @@ #include "nix/store/build/derivation-goal.hh" #include "nix/store/build/derivation-building-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -140,6 +141,96 @@ Goal::Co DerivationGoal::haveDerivation() worker.store.printStorePath(drvPath)); } + auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); + { + Goals waitees{resolutionGoal}; + co_await await(std::move(waitees)); + } + if (nrFailed != 0) { + co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); + } + + if (resolutionGoal->resolvedDrv) { + auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; + + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, drvResolved); + + auto resolvedDrvGoal = worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode); + { + Goals waitees{resolvedDrvGoal}; + co_await await(std::move(waitees)); + } + + trace("resolved derivation finished"); + + auto resolvedResult = resolvedDrvGoal->buildResult; + + // No `std::visit` for coroutines yet + if (auto * successP = resolvedResult.tryGetSuccess()) { + auto & success = *successP; + auto outputHashes = staticOutputHashes(worker.evalStore, *drv); + auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); + + StorePathSet outputPaths; + + auto outputHash = get(outputHashes, wantedOutput); + auto resolvedHash = get(resolvedHashes, wantedOutput); + if ((!outputHash) || (!resolvedHash)) + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", + worker.store.printStorePath(drvPath), + wantedOutput); + + auto realisation = [&] { + auto take1 = get(success.builtOutputs, wantedOutput); + if (take1) + return *take1; + + /* The above `get` should work. But stateful tracking of + outputs in resolvedResult, this can get out of sync with the + store, which is our actual source of truth. For now we just + check the store directly if it fails. */ + auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, wantedOutput}); + if (take2) + return *take2; + + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", + worker.store.printStorePath(pathResolved), + wantedOutput); + }(); + + if (!drv->type().isImpure()) { + auto newRealisation = realisation; + newRealisation.id = DrvOutput{*outputHash, wantedOutput}; + newRealisation.signatures.clear(); + if (!drv->type().isFixed()) { + auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; + newRealisation.dependentRealisations = + drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); + } + worker.store.signRealisation(newRealisation); + worker.store.registerDrvOutput(newRealisation); + } + outputPaths.insert(realisation.outPath); + + auto status = success.status; + if (status == BuildResult::Success::AlreadyValid) + status = BuildResult::Success::ResolvesToAlreadyValid; + + co_return doneSuccess(status, std::move(realisation)); + } else if (resolvedResult.tryGetFailure()) { + co_return doneFailure({ + BuildResult::Failure::DependencyFailed, + "build of resolved derivation '%s' failed", + worker.store.printStorePath(pathResolved), + }); + } else + assert(false); + } + /* Give up on substitution for the output we want, actually build this derivation */ auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode); diff --git a/tests/functional/ca/issue-13247.sh b/tests/functional/ca/issue-13247.sh index 686d90ced..705919513 100755 --- a/tests/functional/ca/issue-13247.sh +++ b/tests/functional/ca/issue-13247.sh @@ -65,7 +65,4 @@ buildViaSubstitute use-a-prime-more-outputs^first # Should only fetch the output we asked for [[ -d "$(jq -r <"$TEST_ROOT"/a.json '.[0].outputs.out')" ]] [[ -f "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.first')" ]] - -# Output should *not* be here, this is the bug -[[ -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] -skipTest "bug is not yet fixed" +[[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] From 06bb1c2f93f73fdfd93c04502fbd59f4489e4378 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 13 Oct 2025 18:40:10 -0400 Subject: [PATCH 185/373] Remove some `buildMode` default parameters Force the internals to be more explicit. --- .../nix/store/build/derivation-building-goal.hh | 3 +-- .../include/nix/store/build/derivation-goal.hh | 2 +- .../store/build/derivation-resolution-goal.hh | 3 +-- src/libstore/include/nix/store/build/worker.hh | 16 +++++----------- 4 files changed, 8 insertions(+), 16 deletions(-) diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index edb496024..1dd11160f 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -29,8 +29,7 @@ typedef enum { rpAccept, rpDecline, rpPostpone } HookReply; */ struct DerivationBuildingGoal : public Goal { - DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); + DerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode); ~DerivationBuildingGoal(); private: diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index e05bf1c0b..13369d889 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -45,7 +45,7 @@ struct DerivationGoal : public Goal const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode = bmNormal); + BuildMode buildMode); ~DerivationGoal() = default; void timedOut(Error && ex) override diff --git a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh index a284843f0..fb4c2a346 100644 --- a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh @@ -35,8 +35,7 @@ struct BuilderFailureError; */ struct DerivationResolutionGoal : public Goal { - DerivationResolutionGoal( - const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); + DerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode); /** * If the derivation needed to be resolved, this is resulting diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index 9660d66b2..542e3ff33 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -210,32 +210,26 @@ private: std::shared_ptr initGoalIfNeeded(std::weak_ptr & goal_weak, Args &&... args); std::shared_ptr makeDerivationTrampolineGoal( - ref drvReq, const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal); + ref drvReq, const OutputsSpec & wantedOutputs, BuildMode buildMode); public: std::shared_ptr makeDerivationTrampolineGoal( - const StorePath & drvPath, - const OutputsSpec & wantedOutputs, - const Derivation & drv, - BuildMode buildMode = bmNormal); + const StorePath & drvPath, const OutputsSpec & wantedOutputs, const Derivation & drv, BuildMode buildMode); std::shared_ptr makeDerivationGoal( - const StorePath & drvPath, - const Derivation & drv, - const OutputName & wantedOutput, - BuildMode buildMode = bmNormal); + const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode); /** * @ref DerivationResolutionGoal "derivation resolution goal" */ std::shared_ptr - makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); + makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode); /** * @ref DerivationBuildingGoal "derivation building goal" */ std::shared_ptr - makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); + makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode); /** * @ref PathSubstitutionGoal "substitution goal" From ad893acf466ad889fecd459ed0e1554d97c27e97 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 26 Sep 2025 01:40:00 -0400 Subject: [PATCH 186/373] Fix `ca/eval-store.sh` test The refactor in the last commit fixed the bug it was supposed to fix, but introduced a new bug in that sometimes we tried to write a resolved derivation to a store before all its `inputSrcs` were in that store. The solution is to defer writing the derivation until inside `DerivationBuildingGoal`, just before we do an actual build. At this point, we are sure that all inputs in are the store. This does have the side effect of meaning we don't write down the resolved derivation in the substituting case, only the building case, but I think that is actually fine. The store that actually does the building should make a record of what it built by storing the resolved derivation. Other stores that just substitute from that store don't necessary want that derivation however. They can trust the substituter to keep the record around, or baring that, they can attempt to re resolve everything, if they need to be audited. (cherry picked from commit c97b050a6c212d0b748303080b5604309b7abdce) --- src/libstore/build/derivation-building-goal.cc | 13 ++++++++++--- src/libstore/build/derivation-goal.cc | 16 +++++++--------- src/libstore/build/derivation-trampoline-goal.cc | 2 +- src/libstore/build/worker.cc | 15 ++++++++++----- .../nix/store/build/derivation-building-goal.hh | 14 ++++++++++++-- .../include/nix/store/build/derivation-goal.hh | 8 ++++++-- src/libstore/include/nix/store/build/worker.hh | 10 +++++++--- 7 files changed, 53 insertions(+), 25 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index c00123634..4230ed465 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -26,8 +26,8 @@ namespace nix { DerivationBuildingGoal::DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode) - : Goal(worker, gaveUpOnSubstitution()) + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode, bool storeDerivation) + : Goal(worker, gaveUpOnSubstitution(storeDerivation)) , drvPath(drvPath) , drv{std::make_unique(drv)} , buildMode(buildMode) @@ -107,7 +107,7 @@ static void runPostBuildHook( /* At least one of the output paths could not be produced using a substitute. So we have to build instead. */ -Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() +Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution(bool storeDerivation) { Goals waitees; @@ -155,6 +155,13 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ + if (storeDerivation) { + assert(drv->inputDrvs.map.empty()); + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, *drv); + } + { /* If we get this far, we know no dynamic drvs inputs */ diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 1f8eb1262..b0081f709 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -30,8 +30,9 @@ DerivationGoal::DerivationGoal( const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode) - : Goal(worker, haveDerivation()) + BuildMode buildMode, + bool storeDerivation) + : Goal(worker, haveDerivation(storeDerivation)) , drvPath(drvPath) , wantedOutput(wantedOutput) , drv{std::make_unique(drv)} @@ -59,7 +60,7 @@ std::string DerivationGoal::key() }.to_string(worker.store); } -Goal::Co DerivationGoal::haveDerivation() +Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) { trace("have derivation"); @@ -153,11 +154,8 @@ Goal::Co DerivationGoal::haveDerivation() if (resolutionGoal->resolvedDrv) { auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - /* Store the resolved derivation, as part of the record of - what we're actually building */ - writeDerivation(worker.store, drvResolved); - - auto resolvedDrvGoal = worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode); + auto resolvedDrvGoal = + worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode, /*storeDerivation=*/true); { Goals waitees{resolvedDrvGoal}; co_await await(std::move(waitees)); @@ -233,7 +231,7 @@ Goal::Co DerivationGoal::haveDerivation() /* Give up on substitution for the output we want, actually build this derivation */ - auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode); + auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode, storeDerivation); /* We will finish with it ourselves, as if we were the derivational goal. */ g->preserveException = true; diff --git a/src/libstore/build/derivation-trampoline-goal.cc b/src/libstore/build/derivation-trampoline-goal.cc index 310d23d70..963156aa5 100644 --- a/src/libstore/build/derivation-trampoline-goal.cc +++ b/src/libstore/build/derivation-trampoline-goal.cc @@ -145,7 +145,7 @@ Goal::Co DerivationTrampolineGoal::haveDerivation(StorePath drvPath, Derivation /* Build this step! */ for (auto & output : resolvedWantedOutputs) { - auto g = upcast_goal(worker.makeDerivationGoal(drvPath, drv, output, buildMode)); + auto g = upcast_goal(worker.makeDerivationGoal(drvPath, drv, output, buildMode, false)); g->preserveException = true; /* We will finish with it ourselves, as if we were the derivational goal. */ concreteDrvGoals.insert(std::move(g)); diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index f597abb63..53175a8c4 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -76,9 +76,14 @@ std::shared_ptr Worker::makeDerivationTrampolineGoal( } std::shared_ptr Worker::makeDerivationGoal( - const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode) + const StorePath & drvPath, + const Derivation & drv, + const OutputName & wantedOutput, + BuildMode buildMode, + bool storeDerivation) { - return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode); + return initGoalIfNeeded( + derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode, storeDerivation); } std::shared_ptr @@ -87,10 +92,10 @@ Worker::makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation return initGoalIfNeeded(derivationResolutionGoals[drvPath], drvPath, drv, *this, buildMode); } -std::shared_ptr -Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) +std::shared_ptr Worker::makeDerivationBuildingGoal( + const StorePath & drvPath, const Derivation & drv, BuildMode buildMode, bool storeDerivation) { - return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode); + return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode, storeDerivation); } std::shared_ptr diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index 1dd11160f..547e533e2 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -29,7 +29,17 @@ typedef enum { rpAccept, rpDecline, rpPostpone } HookReply; */ struct DerivationBuildingGoal : public Goal { - DerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode); + /** + * @param storeDerivation Whether to store the derivation in + * `worker.store`. This is useful for newly-resolved derivations. In this + * case, the derivation was not created a priori, e.g. purely (or close + * enough) from evaluation of the Nix language, but also depends on the + * exact content produced by upstream builds. It is strongly advised to + * have a permanent record of such a resolved derivation in order to + * faithfully reconstruct the build history. + */ + DerivationBuildingGoal( + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode, bool storeDerivation); ~DerivationBuildingGoal(); private: @@ -99,7 +109,7 @@ private: /** * The states. */ - Co gaveUpOnSubstitution(); + Co gaveUpOnSubstitution(bool storeDerivation); Co tryToBuild(); /** diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index 13369d889..c5eb2fe79 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -40,12 +40,16 @@ struct DerivationGoal : public Goal */ OutputName wantedOutput; + /** + * @param storeDerivation See `DerivationBuildingGoal`. This is just passed along. + */ DerivationGoal( const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode); + BuildMode buildMode, + bool storeDerivation); ~DerivationGoal() = default; void timedOut(Error && ex) override @@ -80,7 +84,7 @@ private: /** * The states. */ - Co haveDerivation(); + Co haveDerivation(bool storeDerivation); /** * Return `std::nullopt` if the output is unknown, e.g. un unbuilt diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index 542e3ff33..bb0202dfd 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -217,7 +217,11 @@ public: const StorePath & drvPath, const OutputsSpec & wantedOutputs, const Derivation & drv, BuildMode buildMode); std::shared_ptr makeDerivationGoal( - const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode); + const StorePath & drvPath, + const Derivation & drv, + const OutputName & wantedOutput, + BuildMode buildMode, + bool storeDerivation); /** * @ref DerivationResolutionGoal "derivation resolution goal" @@ -228,8 +232,8 @@ public: /** * @ref DerivationBuildingGoal "derivation building goal" */ - std::shared_ptr - makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode); + std::shared_ptr makeDerivationBuildingGoal( + const StorePath & drvPath, const Derivation & drv, BuildMode buildMode, bool storeDerivation); /** * @ref PathSubstitutionGoal "substitution goal" From edf9163c2259b7267d9b3fe39347a22744ecdb8b Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Tue, 14 Oct 2025 02:24:03 +0300 Subject: [PATCH 187/373] libutil: Make CanonPath::root const By all means CanonPath::root must be immutable. Let's enforce this with in the code. --- src/libutil/canon-path.cc | 2 +- src/libutil/include/nix/util/canon-path.hh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libutil/canon-path.cc b/src/libutil/canon-path.cc index 07a3a6193..3b4777ef7 100644 --- a/src/libutil/canon-path.cc +++ b/src/libutil/canon-path.cc @@ -5,7 +5,7 @@ namespace nix { -CanonPath CanonPath::root = CanonPath("/"); +const CanonPath CanonPath::root = CanonPath("/"); static std::string absPathPure(std::string_view path) { diff --git a/src/libutil/include/nix/util/canon-path.hh b/src/libutil/include/nix/util/canon-path.hh index dd07929b4..a9c173d71 100644 --- a/src/libutil/include/nix/util/canon-path.hh +++ b/src/libutil/include/nix/util/canon-path.hh @@ -69,7 +69,7 @@ public: */ CanonPath(const std::vector & elems); - static CanonPath root; + static const CanonPath root; /** * If `raw` starts with a slash, return From 1633ceaff25535de9419d992dd4753c6cc221796 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Tue, 14 Oct 2025 02:33:38 +0300 Subject: [PATCH 188/373] libutil: Ensure that CanonPath does not contain NUL bytes This, alongside the other invariants of the CanonPath is important to uphold. std::filesystem happily crashes on NUL bytes in the constructor, as we've seen with `path:%00` prior to c436b7a32afaf01d62f828697ddf5c49d4f8678c. Best to stay clear of NUL bytes when we're talking about syscalls, especially on Unix where strings are null terminated. Very nice to have if we decide to switch over to pascal-style strings. --- src/libutil-tests/canon-path.cc | 9 +++++++++ src/libutil/canon-path.cc | 19 +++++++++++++++++++ src/libutil/include/nix/util/canon-path.hh | 10 ++++++---- 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/src/libutil-tests/canon-path.cc b/src/libutil-tests/canon-path.cc index 971a9cc96..aae9285c4 100644 --- a/src/libutil-tests/canon-path.cc +++ b/src/libutil-tests/canon-path.cc @@ -42,6 +42,15 @@ TEST(CanonPath, basic) } } +TEST(CanonPath, nullBytes) +{ + std::string s = "/hello/world"; + s[8] = '\0'; + ASSERT_THROW(CanonPath("/").push(std::string(1, '\0')), BadCanonPath); + ASSERT_THROW(CanonPath(std::string_view(s)), BadCanonPath); + ASSERT_THROW(CanonPath(s, CanonPath::root), BadCanonPath); +} + TEST(CanonPath, from_existing) { CanonPath p0("foo//bar/"); diff --git a/src/libutil/canon-path.cc b/src/libutil/canon-path.cc index 3b4777ef7..22ca3e066 100644 --- a/src/libutil/canon-path.cc +++ b/src/libutil/canon-path.cc @@ -3,6 +3,8 @@ #include "nix/util/file-path-impl.hh" #include "nix/util/strings-inline.hh" +#include + namespace nix { const CanonPath CanonPath::root = CanonPath("/"); @@ -12,14 +14,30 @@ static std::string absPathPure(std::string_view path) return canonPathInner(path, [](auto &, auto &) {}); } +static void ensureNoNullBytes(std::string_view s) +{ + if (std::memchr(s.data(), '\0', s.size())) [[unlikely]] { + using namespace std::string_view_literals; + auto str = replaceStrings(std::string(s), "\0"sv, "␀"sv); + throw BadCanonPath("path segment '%s' must not contain null (\\0) bytes", str); + } +} + CanonPath::CanonPath(std::string_view raw) : path(absPathPure(concatStrings("/", raw))) +{ + ensureNoNullBytes(raw); +} + +CanonPath::CanonPath(const char * raw) + : path(absPathPure(concatStrings("/", raw))) { } CanonPath::CanonPath(std::string_view raw, const CanonPath & root) : path(absPathPure(raw.size() > 0 && raw[0] == '/' ? raw : concatStrings(root.abs(), "/", raw))) { + ensureNoNullBytes(raw); } CanonPath::CanonPath(const std::vector & elems) @@ -80,6 +98,7 @@ void CanonPath::push(std::string_view c) { assert(c.find('/') == c.npos); assert(c != "." && c != ".."); + ensureNoNullBytes(c); if (!isRoot()) path += '/'; path += c; diff --git a/src/libutil/include/nix/util/canon-path.hh b/src/libutil/include/nix/util/canon-path.hh index a9c173d71..b9b2fff25 100644 --- a/src/libutil/include/nix/util/canon-path.hh +++ b/src/libutil/include/nix/util/canon-path.hh @@ -1,6 +1,7 @@ #pragma once ///@file +#include "nix/util/error.hh" #include #include #include @@ -12,6 +13,8 @@ namespace nix { +MakeError(BadCanonPath, Error); + /** * A canonical representation of a path. It ensures the following: * @@ -23,6 +26,8 @@ namespace nix { * * - There are no components equal to '.' or '..'. * + * - It does not contain NUL bytes. + * * `CanonPath` are "virtual" Nix paths for abstract file system objects; * they are always Unix-style paths, regardless of what OS Nix is * running on. The `/` root doesn't denote the ambient host file system @@ -51,10 +56,7 @@ public: */ CanonPath(std::string_view raw); - explicit CanonPath(const char * raw) - : CanonPath(std::string_view(raw)) - { - } + explicit CanonPath(const char * raw); struct unchecked_t {}; From 1b96a704d38b38804d317a7dac3663630ac599e7 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Tue, 14 Oct 2025 16:49:59 +0200 Subject: [PATCH 189/373] Add lazy evaluation for experimental feature reasons Wrap fmt() calls in lambdas to defer string formatting until the feature check fails. This avoids unnecessary string formatting in the common case where the feature is enabled. Addresses performance concern raised by xokdvium in PR review. --- src/libstore/derivations.cc | 7 +++++-- src/libstore/derived-path.cc | 6 +++--- src/libstore/downstream-placeholder.cc | 3 ++- src/libutil/include/nix/util/configuration.hh | 13 +++++++++++++ 4 files changed, 23 insertions(+), 6 deletions(-) diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index b5d8d1a1c..fa8bc58ac 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -426,7 +426,9 @@ Derivation parseDerivation( if (*versionS == "xp-dyn-drv"sv) { // Only version we have so far version = DerivationATermVersion::DynamicDerivations; - xpSettings.require(Xp::DynamicDerivations, fmt("derivation '%s', ATerm format version 'xp-dyn-drv'", name)); + xpSettings.require(Xp::DynamicDerivations, [&] { + return fmt("derivation '%s', ATerm format version 'xp-dyn-drv'", name); + }); } else { throw FormatError("Unknown derivation ATerm format version '%s'", *versionS); } @@ -1454,7 +1456,8 @@ Derivation Derivation::fromJSON(const nlohmann::json & _json, const Experimental node.value = getStringSet(valueAt(json, "outputs")); auto drvs = getObject(valueAt(json, "dynamicOutputs")); for (auto & [outputId, childNode] : drvs) { - xpSettings.require(Xp::DynamicDerivations, fmt("dynamic output '%s' in JSON", outputId)); + xpSettings.require( + Xp::DynamicDerivations, [&] { return fmt("dynamic output '%s' in JSON", outputId); }); node.childMap[outputId] = doInput(childNode); } return node; diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc index 34e591666..8d606cb41 100644 --- a/src/libstore/derived-path.cc +++ b/src/libstore/derived-path.cc @@ -86,9 +86,9 @@ void drvRequireExperiment(const SingleDerivedPath & drv, const ExperimentalFeatu // plain drv path; no experimental features required. }, [&](const SingleDerivedPath::Built & b) { - xpSettings.require( - Xp::DynamicDerivations, - fmt("building output '%s' of '%s'", b.output, b.drvPath->getBaseStorePath().to_string())); + xpSettings.require(Xp::DynamicDerivations, [&] { + return fmt("building output '%s' of '%s'", b.output, b.drvPath->getBaseStorePath().to_string()); + }); }, }, drv.raw()); diff --git a/src/libstore/downstream-placeholder.cc b/src/libstore/downstream-placeholder.cc index 30044501b..780717a62 100644 --- a/src/libstore/downstream-placeholder.cc +++ b/src/libstore/downstream-placeholder.cc @@ -24,7 +24,8 @@ DownstreamPlaceholder DownstreamPlaceholder::unknownDerivation( OutputNameView outputName, const ExperimentalFeatureSettings & xpSettings) { - xpSettings.require(Xp::DynamicDerivations, fmt("placeholder for unknown derivation output '%s'", outputName)); + xpSettings.require( + Xp::DynamicDerivations, [&] { return fmt("placeholder for unknown derivation output '%s'", outputName); }); auto compressed = compressHash(placeholder.hash, 20); auto clearText = "nix-computed-output:" + compressed.to_string(HashFormat::Nix32, false) + ":" + std::string{outputName}; diff --git a/src/libutil/include/nix/util/configuration.hh b/src/libutil/include/nix/util/configuration.hh index c8d7b7f24..541febdb5 100644 --- a/src/libutil/include/nix/util/configuration.hh +++ b/src/libutil/include/nix/util/configuration.hh @@ -465,6 +465,19 @@ struct ExperimentalFeatureSettings : Config */ void require(const ExperimentalFeature &, std::string reason = "") const; + /** + * Require an experimental feature be enabled, throwing an error if it is + * not. The reason is lazily evaluated only if the feature is disabled. + */ + template + requires std::invocable && std::convertible_to, std::string> + void require(const ExperimentalFeature & feature, GetReason && getReason) const + { + if (isEnabled(feature)) + return; + require(feature, getReason()); + } + /** * `std::nullopt` pointer means no feature, which means there is nothing that could be * disabled, and so the function returns true in that case. From d18f959d4fb381ec4e3a489410fb336731cff7d3 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sun, 12 Oct 2025 02:07:04 +0000 Subject: [PATCH 190/373] test(nixos): add comprehensive curl-based S3 VM tests Add `curl-s3-binary-cache-store.nix` with comprehensive test coverage for the curl-based S3 implementation. Depends-On: #14206, #14222 --- ci/gha/tests/default.nix | 4 + tests/nixos/curl-s3-binary-cache-store.nix | 507 +++++++++++++++++++++ tests/nixos/default.nix | 2 + 3 files changed, 513 insertions(+) create mode 100644 tests/nixos/curl-s3-binary-cache-store.nix diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index 09fb6ec23..46310bc36 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -236,6 +236,10 @@ rec { # S3 binary cache store test only runs when S3 support is enabled inherit (nixosTests) s3-binary-cache-store; } + // lib.optionalAttrs (withCurlS3 == true) { + # S3 binary cache store test using curl implementation + inherit (nixosTests) curl-s3-binary-cache-store; + } // lib.optionalAttrs (!withSanitizers && !withCoverage) { # evalNixpkgs uses non-instrumented components from hydraJobs, so only run it # when not testing with sanitizers to avoid rebuilding nix diff --git a/tests/nixos/curl-s3-binary-cache-store.nix b/tests/nixos/curl-s3-binary-cache-store.nix new file mode 100644 index 000000000..53d79689c --- /dev/null +++ b/tests/nixos/curl-s3-binary-cache-store.nix @@ -0,0 +1,507 @@ +{ + lib, + config, + nixpkgs, + ... +}: + +let + pkgs = config.nodes.client.nixpkgs.pkgs; + + # Test packages - minimal packages for fast copying + pkgA = pkgs.writeText "test-package-a" "test package a"; + pkgB = pkgs.writeText "test-package-b" "test package b"; + pkgC = pkgs.writeText "test-package-c" "test package c"; + + # S3 configuration + accessKey = "BKIKJAA5BMMU2RHO6IBB"; + secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; + +in +{ + name = "curl-s3-binary-cache-store"; + + nodes = { + server = + { + config, + lib, + pkgs, + ... + }: + { + virtualisation.writableStore = true; + virtualisation.cores = 2; + virtualisation.additionalPaths = [ + pkgA + pkgB + pkgC + ]; + environment.systemPackages = [ pkgs.minio-client ]; + nix.extraOptions = '' + experimental-features = nix-command + substituters = + ''; + services.minio = { + enable = true; + region = "eu-west-1"; + rootCredentialsFile = pkgs.writeText "minio-credentials-full" '' + MINIO_ROOT_USER=${accessKey} + MINIO_ROOT_PASSWORD=${secretKey} + ''; + }; + networking.firewall.allowedTCPPorts = [ 9000 ]; + }; + + client = + { config, pkgs, ... }: + { + virtualisation.writableStore = true; + virtualisation.cores = 2; + nix.extraOptions = '' + experimental-features = nix-command + substituters = + ''; + }; + }; + + testScript = + { nodes }: + # python + '' + import json + import random + import re + import uuid + + # ============================================================================ + # Configuration + # ============================================================================ + + ACCESS_KEY = '${accessKey}' + SECRET_KEY = '${secretKey}' + ENDPOINT = 'http://server:9000' + REGION = 'eu-west-1' + + PKG_A = '${pkgA}' + PKG_B = '${pkgB}' + PKG_C = '${pkgC}' + + ENV_WITH_CREDS = f"AWS_ACCESS_KEY_ID={ACCESS_KEY} AWS_SECRET_ACCESS_KEY={SECRET_KEY}" + + # ============================================================================ + # Helper Functions + # ============================================================================ + + def make_s3_url(bucket, path="", **params): + """Build S3 URL with optional path and query parameters""" + params.setdefault('endpoint', ENDPOINT) + params.setdefault('region', REGION) + query = '&'.join(f"{k}={v}" for k, v in params.items()) + bucket_and_path = f"{bucket}{path}" if path else bucket + return f"s3://{bucket_and_path}?{query}" + + def make_http_url(path): + """Build HTTP URL for direct S3 access""" + return f"{ENDPOINT}/{path}" + + def get_package_hash(pkg_path): + """Extract store hash from package path""" + return pkg_path.split("/")[-1].split("-")[0] + + def verify_content_encoding(machine, bucket, object_path, expected_encoding): + """Verify S3 object has expected Content-Encoding header""" + stat = machine.succeed(f"mc stat minio/{bucket}/{object_path}") + if "Content-Encoding" not in stat or expected_encoding not in stat: + print(f"mc stat output for {object_path}:") + print(stat) + raise Exception(f"Expected Content-Encoding: {expected_encoding} header on {object_path}") + + def verify_no_compression(machine, bucket, object_path): + """Verify S3 object has no compression headers""" + stat = machine.succeed(f"mc stat minio/{bucket}/{object_path}") + if "Content-Encoding" in stat and ("gzip" in stat or "xz" in stat): + print(f"mc stat output for {object_path}:") + print(stat) + raise Exception(f"Object {object_path} should not have compression Content-Encoding") + + def assert_count(output, pattern, expected, error_msg): + """Assert that pattern appears exactly expected times in output""" + actual = output.count(pattern) + if actual != expected: + print("Debug output:") + print(output) + raise Exception(f"{error_msg}: expected {expected}, got {actual}") + + def with_test_bucket(populate_with=[]): + """ + Decorator that creates/destroys a unique bucket for each test. + Optionally pre-populates bucket with specified packages. + + Args: + populate_with: List of packages to upload before test runs + """ + def decorator(test_func): + def wrapper(): + bucket = str(uuid.uuid4()) + server.succeed(f"mc mb minio/{bucket}") + try: + if populate_with: + store_url = make_s3_url(bucket) + for pkg in populate_with: + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {pkg}") + test_func(bucket) + finally: + server.succeed(f"mc rb --force minio/{bucket}") + return wrapper + return decorator + + # ============================================================================ + # Test Functions + # ============================================================================ + + @with_test_bucket() + def test_credential_caching(bucket): + """Verify credential providers are cached and reused""" + print("\n=== Testing Credential Caching ===") + + store_url = make_s3_url(bucket) + output = server.succeed( + f"{ENV_WITH_CREDS} nix copy --debug --to '{store_url}' " + f"{PKG_A} {PKG_B} {PKG_C} 2>&1" + ) + + assert_count( + output, + "creating new AWS credential provider", + 1, + "Credential provider caching failed" + ) + + print("✓ Credential provider created once and cached") + + @with_test_bucket(populate_with=[PKG_A]) + def test_fetchurl_basic(bucket): + """Test builtins.fetchurl works with s3:// URLs""" + print("\n=== Testing builtins.fetchurl ===") + + client.wait_for_unit("network-addresses-eth1.service") + + cache_info_url = make_s3_url(bucket, path="/nix-cache-info") + + client.succeed( + f"{ENV_WITH_CREDS} nix eval --impure --expr " + f"'builtins.fetchurl {{ name = \"foo\"; url = \"{cache_info_url}\"; }}'" + ) + + print("✓ builtins.fetchurl works with s3:// URLs") + + @with_test_bucket() + def test_error_message_formatting(bucket): + """Verify error messages display URLs correctly""" + print("\n=== Testing Error Message Formatting ===") + + nonexistent_url = make_s3_url(bucket, path="/foo-that-does-not-exist") + expected_http_url = make_http_url(f"{bucket}/foo-that-does-not-exist") + + error_msg = client.fail( + f"{ENV_WITH_CREDS} nix eval --impure --expr " + f"'builtins.fetchurl {{ name = \"foo\"; url = \"{nonexistent_url}\"; }}' 2>&1" + ) + + if f"unable to download '{expected_http_url}': HTTP error 404" not in error_msg: + print("Actual error message:") + print(error_msg) + raise Exception("Error message formatting failed - should show actual URL, not %s placeholder") + + print("✓ Error messages format URLs correctly") + + @with_test_bucket(populate_with=[PKG_A]) + def test_fork_credential_preresolution(bucket): + """Test credential pre-resolution in forked processes""" + print("\n=== Testing Fork Credential Pre-resolution ===") + + # Get hash of nix-cache-info for fixed-output derivation + cache_info_url = make_s3_url(bucket, path="/nix-cache-info") + + cache_info_path = client.succeed( + f"{ENV_WITH_CREDS} nix eval --impure --raw --expr " + f"'builtins.fetchurl {{ name = \"nix-cache-info\"; url = \"{cache_info_url}\"; }}'" + ).strip() + + cache_info_hash = client.succeed( + f"nix hash file --type sha256 --base32 {cache_info_path}" + ).strip() + + # Build derivation with unique test ID + test_id = random.randint(0, 10000) + test_url = make_s3_url(bucket, path="/nix-cache-info", test_id=test_id) + + fetchurl_expr = """ + import {{ + name = "s3-fork-test-{id}"; + url = "{url}"; + sha256 = "{hash}"; + }} + """.format(id=test_id, url=test_url, hash=cache_info_hash) + + output = client.succeed( + f"{ENV_WITH_CREDS} nix build --debug --impure --expr '{fetchurl_expr}' 2>&1" + ) + + # Verify fork behavior + if "builtin:fetchurl creating fresh FileTransfer instance" not in output: + print("Debug output:") + print(output) + raise Exception("Expected to find FileTransfer creation in forked process") + + print(" ✓ Forked process creates fresh FileTransfer") + + # Verify pre-resolution in parent + required_messages = [ + "Pre-resolving AWS credentials for S3 URL in builtin:fetchurl", + "Successfully pre-resolved AWS credentials in parent process", + ] + + for msg in required_messages: + if msg not in output: + print("Debug output:") + print(output) + raise Exception(f"Missing expected message: {msg}") + + print(" ✓ Parent pre-resolves credentials") + + # Verify child uses pre-resolved credentials + if "Using pre-resolved AWS credentials from parent process" not in output: + print("Debug output:") + print(output) + raise Exception("Child should use pre-resolved credentials") + + # Extract child PID and verify it doesn't create new providers + filetransfer_match = re.search( + r'\[pid=(\d+)\] builtin:fetchurl creating fresh FileTransfer instance', + output + ) + + if not filetransfer_match: + raise Exception("Could not extract child PID from debug output") + + child_pid = filetransfer_match.group(1) + child_provider_creation = f"[pid={child_pid}] creating new AWS credential provider" + + if child_provider_creation in output: + print("Debug output:") + print(output) + raise Exception(f"Child process (pid={child_pid}) should NOT create new credential providers") + + print(" ✓ Child uses pre-resolved credentials (no new providers)") + + @with_test_bucket(populate_with=[PKG_A, PKG_B, PKG_C]) + def test_store_operations(bucket): + """Test nix store info and copy operations""" + print("\n=== Testing Store Operations ===") + + store_url = make_s3_url(bucket) + + # Verify store info works + client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{store_url}' >&2") + + # Get and validate store info JSON + info_json = client.succeed(f"{ENV_WITH_CREDS} nix store info --json --store '{store_url}'") + store_info = json.loads(info_json) + + if not store_info.get("url"): + raise Exception("Store should have a URL") + + print(f" ✓ Store URL: {store_info['url']}") + + # Test copy from store + client.fail(f"nix path-info {PKG_A}") + + output = client.succeed( + f"{ENV_WITH_CREDS} nix copy --debug --no-check-sigs " + f"--from '{store_url}' {PKG_A} {PKG_B} {PKG_C} 2>&1" + ) + + assert_count( + output, + "creating new AWS credential provider", + 1, + "Client credential provider caching failed" + ) + + client.succeed(f"nix path-info {PKG_A}") + + print(" ✓ nix copy works") + print(" ✓ Credentials cached on client") + + @with_test_bucket(populate_with=[PKG_A]) + def test_url_format_variations(bucket): + """Test different S3 URL parameter combinations""" + print("\n=== Testing URL Format Variations ===") + + # Test parameter order variation (region before endpoint) + url1 = f"s3://{bucket}?region={REGION}&endpoint={ENDPOINT}" + client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{url1}' >&2") + print(" ✓ Parameter order: region before endpoint works") + + # Test parameter order variation (endpoint before region) + url2 = f"s3://{bucket}?endpoint={ENDPOINT}®ion={REGION}" + client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{url2}' >&2") + print(" ✓ Parameter order: endpoint before region works") + + @with_test_bucket(populate_with=[PKG_A]) + def test_concurrent_fetches(bucket): + """Validate thread safety with concurrent S3 operations""" + print("\n=== Testing Concurrent Fetches ===") + + # Get hash for test derivations + cache_info_url = make_s3_url(bucket, path="/nix-cache-info") + + cache_info_path = client.succeed( + f"{ENV_WITH_CREDS} nix eval --impure --raw --expr " + f"'builtins.fetchurl {{ name = \"nix-cache-info\"; url = \"{cache_info_url}\"; }}'" + ).strip() + + cache_info_hash = client.succeed( + f"nix hash file --type sha256 --base32 {cache_info_path}" + ).strip() + + # Create 5 concurrent fetch derivations + # Build base URL for concurrent test (we'll add fetch_id in Nix interpolation) + base_url = make_s3_url(bucket, path="/nix-cache-info") + concurrent_expr = """ + let + mkFetch = i: import {{ + name = "concurrent-s3-fetch-''${{toString i}}"; + url = "{url}&fetch_id=''${{toString i}}"; + sha256 = "{hash}"; + }}; + fetches = builtins.listToAttrs (map (i: {{ + name = "fetch''${{toString i}}"; + value = mkFetch i; + }}) (builtins.genList (i: i) 5)); + in fetches + """.format(url=base_url, hash=cache_info_hash) + + try: + output = client.succeed( + f"{ENV_WITH_CREDS} nix build --debug --impure " + f"--expr '{concurrent_expr}' --max-jobs 5 2>&1" + ) + except: + output = client.fail( + f"{ENV_WITH_CREDS} nix build --debug --impure " + f"--expr '{concurrent_expr}' --max-jobs 5 2>&1" + ) + + if "error:" in output.lower(): + print("Found error during concurrent fetches:") + print(output) + + providers_created = output.count("creating new AWS credential provider") + transfers_created = output.count("builtin:fetchurl creating fresh FileTransfer instance") + + print(f" ✓ {providers_created} credential providers created") + print(f" ✓ {transfers_created} FileTransfer instances created") + + if transfers_created != 5: + print("Debug output:") + print(output) + raise Exception( + f"Expected 5 FileTransfer instances for 5 concurrent fetches, got {transfers_created}" + ) + + @with_test_bucket() + def test_compression_narinfo_gzip(bucket): + """Test narinfo compression with gzip""" + print("\n=== Testing Compression: narinfo (gzip) ===") + + store_url = make_s3_url(bucket, **{'narinfo-compression': 'gzip'}) + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKG_B}") + + pkg_hash = get_package_hash(PKG_B) + verify_content_encoding(server, bucket, f"{pkg_hash}.narinfo", "gzip") + + print(" ✓ .narinfo has Content-Encoding: gzip") + + # Verify client can download and decompress + client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKG_B}") + client.succeed(f"nix path-info {PKG_B}") + + print(" ✓ Client decompressed .narinfo successfully") + + @with_test_bucket() + def test_compression_mixed(bucket): + """Test mixed compression (narinfo=xz, ls=gzip)""" + print("\n=== Testing Compression: mixed (narinfo=xz, ls=gzip) ===") + + store_url = make_s3_url( + bucket, + **{'narinfo-compression': 'xz', 'write-nar-listing': 'true', 'ls-compression': 'gzip'} + ) + + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKG_C}") + + pkg_hash = get_package_hash(PKG_C) + + # Verify .narinfo has xz compression + verify_content_encoding(server, bucket, f"{pkg_hash}.narinfo", "xz") + print(" ✓ .narinfo has Content-Encoding: xz") + + # Verify .ls has gzip compression + verify_content_encoding(server, bucket, f"{pkg_hash}.ls", "gzip") + print(" ✓ .ls has Content-Encoding: gzip") + + # Verify client can download with mixed compression + client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKG_C}") + client.succeed(f"nix path-info {PKG_C}") + + print(" ✓ Client downloaded package with mixed compression") + + @with_test_bucket() + def test_compression_disabled(bucket): + """Verify no compression by default""" + print("\n=== Testing Compression: disabled (default) ===") + + store_url = make_s3_url(bucket) + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKG_A}") + + pkg_hash = get_package_hash(PKG_A) + verify_no_compression(server, bucket, f"{pkg_hash}.narinfo") + + print(" ✓ No compression applied by default") + + # ============================================================================ + # Main Test Execution + # ============================================================================ + + print("\n" + "="*80) + print("S3 Binary Cache Store Tests") + print("="*80) + + start_all() + + # Initialize MinIO server + server.wait_for_unit("minio") + server.wait_for_unit("network-addresses-eth1.service") + server.wait_for_open_port(9000) + server.succeed(f"mc config host add minio http://localhost:9000 {ACCESS_KEY} {SECRET_KEY} --api s3v4") + + # Run tests (each gets isolated bucket via decorator) + test_credential_caching() + test_fetchurl_basic() + test_error_message_formatting() + test_fork_credential_preresolution() + test_store_operations() + test_url_format_variations() + test_concurrent_fetches() + test_compression_narinfo_gzip() + test_compression_mixed() + test_compression_disabled() + + print("\n" + "="*80) + print("✓ All S3 Binary Cache Store Tests Passed!") + print("="*80) + ''; +} diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index edfa4124f..ea6a7e914 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -201,6 +201,8 @@ in s3-binary-cache-store = runNixOSTest ./s3-binary-cache-store.nix; + curl-s3-binary-cache-store = runNixOSTest ./curl-s3-binary-cache-store.nix; + fsync = runNixOSTest ./fsync.nix; cgroups = runNixOSTest ./cgroups; From 0c32fb3fa2d66448615744b502d06b6dea21d66e Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Tue, 14 Oct 2025 23:58:18 +0300 Subject: [PATCH 191/373] treewide: Add Store::requireStoreObjectAccessor, simplify uses of getFSAccessor This is a simple wrapper around getFSAccessor that throws an InvalidPath error. This simplifies usage in callsites that only care about getting a non-null accessor. --- src/libfetchers/fetchers.cc | 3 +-- src/libfetchers/github.cc | 10 ++++++---- src/libfetchers/mercurial.cc | 4 +--- src/libfetchers/path.cc | 2 +- src/libstore/include/nix/store/store-api.hh | 20 +++++++++++++++++++- src/libstore/store-api.cc | 2 +- src/nix/cat.cc | 5 +---- src/nix/ls.cc | 5 +---- src/nix/nix-store/nix-store.cc | 2 +- src/nix/why-depends.cc | 2 +- 10 files changed, 33 insertions(+), 22 deletions(-) diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index f697ec6f5..7c741a7a3 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -332,8 +332,7 @@ std::pair, Input> Input::getAccessorUnchecked(ref sto debug("using substituted/cached input '%s' in '%s'", to_string(), store->printStorePath(storePath)); - // We just ensured the store object was there - auto accessor = ref{store->getFSAccessor(storePath)}; + auto accessor = store->requireStoreObjectAccessor(storePath); accessor->fingerprint = getFingerprint(store); diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index a905bb384..2479a57d2 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -399,7 +399,8 @@ struct GitHubInputScheme : GitArchiveInputScheme Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); auto downloadResult = downloadFile(store, *input.settings, url, "source", headers); - auto json = nlohmann::json::parse(store->getFSAccessor(downloadResult.storePath)->readFile(CanonPath::root)); + auto json = nlohmann::json::parse( + store->requireStoreObjectAccessor(downloadResult.storePath)->readFile(CanonPath::root)); return RefInfo{ .rev = Hash::parseAny(std::string{json["sha"]}, HashAlgorithm::SHA1), @@ -473,7 +474,8 @@ struct GitLabInputScheme : GitArchiveInputScheme Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); auto downloadResult = downloadFile(store, *input.settings, url, "source", headers); - auto json = nlohmann::json::parse(store->getFSAccessor(downloadResult.storePath)->readFile(CanonPath::root)); + auto json = nlohmann::json::parse( + store->requireStoreObjectAccessor(downloadResult.storePath)->readFile(CanonPath::root)); if (json.is_array() && json.size() >= 1 && json[0]["id"] != nullptr) { return RefInfo{.rev = Hash::parseAny(std::string(json[0]["id"]), HashAlgorithm::SHA1)}; @@ -549,7 +551,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme std::string refUri; if (ref == "HEAD") { auto downloadFileResult = downloadFile(store, *input.settings, fmt("%s/HEAD", base_url), "source", headers); - auto contents = nix::ref(store->getFSAccessor(downloadFileResult.storePath))->readFile(CanonPath::root); + auto contents = store->requireStoreObjectAccessor(downloadFileResult.storePath)->readFile(CanonPath::root); auto remoteLine = git::parseLsRemoteLine(getLine(contents).first); if (!remoteLine) { @@ -563,7 +565,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme auto downloadFileResult = downloadFile(store, *input.settings, fmt("%s/info/refs", base_url), "source", headers); - auto contents = nix::ref(store->getFSAccessor(downloadFileResult.storePath))->readFile(CanonPath::root); + auto contents = store->requireStoreObjectAccessor(downloadFileResult.storePath)->readFile(CanonPath::root); std::istringstream is(contents); std::string line; diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc index bf460d9c6..41bf6e2aa 100644 --- a/src/libfetchers/mercurial.cc +++ b/src/libfetchers/mercurial.cc @@ -329,9 +329,7 @@ struct MercurialInputScheme : InputScheme Input input(_input); auto storePath = fetchToStore(store, input); - - // We just added it, it should be there. - auto accessor = ref{store->getFSAccessor(storePath)}; + auto accessor = store->requireStoreObjectAccessor(storePath); accessor->setPathDisplay("«" + input.to_string() + "»"); diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index aa0411ff9..c4b5e2f1e 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -138,7 +138,7 @@ struct PathInputScheme : InputScheme storePath = store->addToStoreFromDump(*src, "source"); } - auto accessor = ref{store->getFSAccessor(*storePath)}; + auto accessor = store->requireStoreObjectAccessor(*storePath); // To prevent `fetchToStore()` copying the path again to Nix // store, pre-create an entry in the fetcher cache. diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index 1131ec975..5c96c5f80 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -724,10 +724,28 @@ public: * the Nix store. * * @return nullptr if the store doesn't contain an object at the - * givine path. + * given path. */ virtual std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath = true) = 0; + /** + * Get an accessor for the store object or throw an Error if it's invalid or + * doesn't exist. + * + * @throws InvalidPath if the store object doesn't exist or (if requireValidPath = true) is + * invalid. + */ + [[nodiscard]] ref requireStoreObjectAccessor(const StorePath & path, bool requireValidPath = true) + { + auto accessor = getFSAccessor(path, requireValidPath); + if (!accessor) { + throw InvalidPath( + requireValidPath ? "path '%1%' is not a valid store path" : "store path '%1%' does not exist", + printStorePath(path)); + } + return ref{accessor}; + } + /** * Repair the contents of the given path by redownloading it using * a substituter (if available). diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 4ce6b15fa..1335eb76a 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -1130,7 +1130,7 @@ Derivation Store::derivationFromPath(const StorePath & drvPath) static Derivation readDerivationCommon(Store & store, const StorePath & drvPath, bool requireValidPath) { - auto accessor = store.getFSAccessor(drvPath, requireValidPath); + auto accessor = store.requireStoreObjectAccessor(drvPath, requireValidPath); try { return parseDerivation(store, accessor->readFile(CanonPath::root), Derivation::nameFromPath(drvPath)); } catch (FormatError & e) { diff --git a/src/nix/cat.cc b/src/nix/cat.cc index 145336723..effe544e6 100644 --- a/src/nix/cat.cc +++ b/src/nix/cat.cc @@ -41,10 +41,7 @@ struct CmdCatStore : StoreCommand, MixCat void run(ref store) override { auto [storePath, rest] = store->toStorePath(path); - auto accessor = store->getFSAccessor(storePath); - if (!accessor) - throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); - cat(ref{std::move(accessor)}, CanonPath{rest}); + cat(store->requireStoreObjectAccessor(storePath), CanonPath{rest}); } }; diff --git a/src/nix/ls.cc b/src/nix/ls.cc index 4952d5243..5cdfc2c0f 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -115,10 +115,7 @@ struct CmdLsStore : StoreCommand, MixLs void run(ref store) override { auto [storePath, rest] = store->toStorePath(path); - auto accessor = store->getFSAccessor(storePath); - if (!accessor) - throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); - list(ref{std::move(accessor)}, CanonPath{rest}); + list(store->requireStoreObjectAccessor(storePath), CanonPath{rest}); } }; diff --git a/src/nix/nix-store/nix-store.cc b/src/nix/nix-store/nix-store.cc index f8078426c..313a6398c 100644 --- a/src/nix/nix-store/nix-store.cc +++ b/src/nix/nix-store/nix-store.cc @@ -603,7 +603,7 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise) #endif if (!hashGiven) { HashResult hash = hashPath( - {ref{store->getFSAccessor(info->path, false)}}, + {store->requireStoreObjectAccessor(info->path, /*requireValidPath=*/false)}, FileSerialisationMethod::NixArchive, HashAlgorithm::SHA256); info->narHash = hash.hash; diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc index 473827a93..dc30fabd7 100644 --- a/src/nix/why-depends.cc +++ b/src/nix/why-depends.cc @@ -207,7 +207,7 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions contain the reference. */ std::map hits; - auto accessor = store->getFSAccessor(node.path); + auto accessor = store->requireStoreObjectAccessor(node.path); auto visitPath = [&](this auto && recur, const CanonPath & p) -> void { auto st = accessor->maybeLstat(p); From 69c005e805859364bc98061852602d6ea2dd37c3 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 15 Oct 2025 00:15:48 +0300 Subject: [PATCH 192/373] libstore: Use getFSAccessor for store object in Worker::pathContentsGood We only care about the accessor for a single store object anyway, but the validity gets ignored. Also `pathExists(store.printStorePath(path))` is definitely incorrect since it confuses the logical location vs physical location in case of a chroot store. --- src/libstore/build/worker.cc | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index 53175a8c4..d23c53e77 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -529,15 +529,9 @@ bool Worker::pathContentsGood(const StorePath & path) return i->second; printInfo("checking path '%s'...", store.printStorePath(path)); auto info = store.queryPathInfo(path); - bool res; - if (!pathExists(store.printStorePath(path))) - res = false; - else { - auto current = hashPath( - {store.getFSAccessor(), CanonPath(path.to_string())}, - FileIngestionMethod::NixArchive, - info->narHash.algo) - .first; + bool res = false; + if (auto accessor = store.getFSAccessor(path, /*requireValidPath=*/false)) { + auto current = hashPath({ref{accessor}}, FileIngestionMethod::NixArchive, info->narHash.algo).first; Hash nullHash(HashAlgorithm::SHA256); res = info->narHash == nullHash || info->narHash == current; } From 918a3cebaa439d2baba46c9ca7d0f1fc6da0db2b Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 15 Oct 2025 00:25:14 +0300 Subject: [PATCH 193/373] libexpr: Use Store::requireStoreObjectAccessor instead or toRealPath in fetch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This forces the code to go through proper abstractions instead of the raw filesystem API. This issue is evident from this reproducer: nix eval --expr 'builtins.fetchurl { url = "https://example.com"; sha256 = ""; }' --json --eval-store "dummy://?read-only=false" error: … while calling the 'fetchurl' builtin at «string»:1:1: 1| builtins.fetchurl { url = "https://example.com"; sha256 = ""; } | ^ error: opening file '/nix/store/r4f87yrl98f2m6v9z8ai2rbg4qwlcakq-example.com': No such file or directory --- src/libexpr/primops/fetchTree.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 48c03f177..ad76af5b5 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -588,7 +588,11 @@ static void fetch( if (expectedHash) { auto hash = unpack ? state.store->queryPathInfo(storePath)->narHash - : hashFile(HashAlgorithm::SHA256, state.store->toRealPath(storePath)); + : hashPath( + {state.store->requireStoreObjectAccessor(storePath)}, + FileSerialisationMethod::Flat, + HashAlgorithm::SHA256) + .hash; if (hash != *expectedHash) { state .error( From 0347958dd2c53763146e0227a1fbf6ffaa3d2c86 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 15 Oct 2025 00:48:39 +0300 Subject: [PATCH 194/373] nix/develop: Remove usage of toRealPath, replace with SourceAccessor --- src/nix/develop.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/nix/develop.cc b/src/nix/develop.cc index f78eee59a..28d0a7080 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -299,11 +299,9 @@ static StorePath getDerivationEnvironment(ref store, ref evalStore for (auto & [_0, optPath] : evalStore->queryPartialDerivationOutputMap(shellDrvPath)) { assert(optPath); - auto & outPath = *optPath; - assert(store->isValidPath(outPath)); - auto outPathS = store->toRealPath(outPath); - if (lstat(outPathS).st_size) - return outPath; + auto accessor = evalStore->requireStoreObjectAccessor(*optPath); + if (auto st = accessor->maybeLstat(CanonPath::root); st && st->fileSize.value_or(0)) + return *optPath; } throw Error("get-env.sh failed to produce an environment"); @@ -502,7 +500,9 @@ struct Common : InstallableCommand, MixProfile debug("reading environment file '%s'", strPath); - return {BuildEnvironment::parseJSON(readFile(store->toRealPath(shellOutPath))), strPath}; + return { + BuildEnvironment::parseJSON(store->requireStoreObjectAccessor(shellOutPath)->readFile(CanonPath::root)), + strPath}; } }; From 092639709f8cfa6ee2b896bb560ae1b37dfe81cf Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 14 Oct 2025 19:25:06 -0700 Subject: [PATCH 195/373] Remove duplicate shellcheck in dev-shell.nix --- packaging/dev-shell.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index 37e92e363..bfa219d2d 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -118,7 +118,6 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( ++ [ pkgs.buildPackages.cmake pkgs.buildPackages.gnused - pkgs.buildPackages.shellcheck pkgs.buildPackages.changelog-d modular.pre-commit.settings.package (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) From 7bc3d9b9a9ba1f84a5b6b631143276be767234a4 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 14 Oct 2025 22:53:13 -0400 Subject: [PATCH 196/373] First attempt at uwyu for libflake --- packaging/dev-shell.nix | 1 + src/libflake/config.cc | 27 +++++++++- src/libflake/flake-primops.cc | 26 +++++++++ src/libflake/flake.cc | 52 ++++++++++++++++-- src/libflake/flakeref.cc | 20 ++++++- .../include/nix/flake/flake-primops.hh | 6 +++ src/libflake/include/nix/flake/flakeref.hh | 14 +++++ src/libflake/include/nix/flake/settings.hh | 7 ++- src/libflake/include/nix/flake/url-name.hh | 4 ++ src/libflake/lockfile.cc | 54 +++++++++++++++---- src/libflake/settings.cc | 4 ++ src/libflake/url-name.cc | 6 ++- 12 files changed, 201 insertions(+), 20 deletions(-) diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index 37e92e363..7eec45bfb 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -124,6 +124,7 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) pkgs.buildPackages.nixfmt-rfc-style pkgs.buildPackages.shellcheck + pkgs.buildPackages.include-what-you-use pkgs.buildPackages.gdb ] ++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) ( diff --git a/src/libflake/config.cc b/src/libflake/config.cc index c9071f601..08e6ff038 100644 --- a/src/libflake/config.cc +++ b/src/libflake/config.cc @@ -1,9 +1,32 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/util/users.hh" #include "nix/util/config-global.hh" #include "nix/flake/settings.hh" #include "nix/flake/flake.hh" - -#include +#include "nix/util/ansicolor.hh" +#include "nix/util/configuration.hh" +#include "nix/util/file-system.hh" +#include "nix/util/fmt.hh" +#include "nix/util/logging.hh" +#include "nix/util/strings.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" namespace nix::flake { diff --git a/src/libflake/flake-primops.cc b/src/libflake/flake-primops.cc index 7c5ce01b2..eeff9a966 100644 --- a/src/libflake/flake-primops.cc +++ b/src/libflake/flake-primops.cc @@ -1,8 +1,34 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/flake/flake-primops.hh" #include "nix/expr/eval.hh" #include "nix/flake/flake.hh" #include "nix/flake/flakeref.hh" #include "nix/flake/settings.hh" +#include "nix/expr/attr-set.hh" +#include "nix/expr/eval-error.hh" +#include "nix/expr/eval-inline.hh" +#include "nix/expr/eval-settings.hh" +#include "nix/expr/symbol-table.hh" +#include "nix/expr/value.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/experimental-features.hh" +#include "nix/util/pos-idx.hh" +#include "nix/util/pos-table.hh" +#include "nix/util/source-path.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" namespace nix::flake::primops { diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index 147bff820..ae93f2f39 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -1,3 +1,26 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/util/terminal.hh" #include "nix/util/ref.hh" #include "nix/util/environment-variables.hh" @@ -6,7 +29,6 @@ #include "nix/expr/eval-cache.hh" #include "nix/expr/eval-settings.hh" #include "nix/flake/lockfile.hh" -#include "nix/expr/primops.hh" #include "nix/expr/eval-inline.hh" #include "nix/store/store-api.hh" #include "nix/fetchers/fetchers.hh" @@ -14,14 +36,36 @@ #include "nix/fetchers/fetch-settings.hh" #include "nix/flake/settings.hh" #include "nix/expr/value-to-json.hh" -#include "nix/store/local-fs-store.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/memory-source-accessor.hh" #include "nix/fetchers/input-cache.hh" - -#include +#include "nix/expr/attr-set.hh" +#include "nix/expr/eval-error.hh" +#include "nix/expr/nixexpr.hh" +#include "nix/expr/symbol-table.hh" +#include "nix/expr/value.hh" +#include "nix/expr/value/context.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/registry.hh" +#include "nix/flake/flakeref.hh" +#include "nix/store/path.hh" +#include "nix/util/canon-path.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/experimental-features.hh" +#include "nix/util/file-system.hh" +#include "nix/util/fmt.hh" +#include "nix/util/hash.hh" +#include "nix/util/logging.hh" +#include "nix/util/pos-idx.hh" +#include "nix/util/pos-table.hh" +#include "nix/util/position.hh" +#include "nix/util/source-path.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" namespace nix { +struct SourceAccessor; using namespace flake; diff --git a/src/libflake/flakeref.cc b/src/libflake/flakeref.cc index 38979783d..a3448c88d 100644 --- a/src/libflake/flakeref.cc +++ b/src/libflake/flakeref.cc @@ -1,10 +1,28 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/flake/flakeref.hh" -#include "nix/store/store-api.hh" #include "nix/util/url.hh" #include "nix/util/url-parts.hh" #include "nix/fetchers/fetchers.hh" +#include "nix/util/error.hh" +#include "nix/util/file-system.hh" +#include "nix/util/fmt.hh" +#include "nix/util/logging.hh" +#include "nix/util/strings.hh" +#include "nix/util/util.hh" namespace nix { +namespace fetchers { +struct Settings; +} // namespace fetchers #if 0 // 'dir' path elements cannot start with a '.'. We also reject diff --git a/src/libflake/include/nix/flake/flake-primops.hh b/src/libflake/include/nix/flake/flake-primops.hh index 35a7128f4..57a5e3422 100644 --- a/src/libflake/include/nix/flake/flake-primops.hh +++ b/src/libflake/include/nix/flake/flake-primops.hh @@ -3,6 +3,12 @@ #include "nix/expr/eval.hh" #include "nix/flake/settings.hh" +namespace nix { +namespace flake { +struct Settings; +} // namespace flake +} // namespace nix + namespace nix::flake::primops { /** diff --git a/src/libflake/include/nix/flake/flakeref.hh b/src/libflake/include/nix/flake/flakeref.hh index c8c536bce..7a26382a7 100644 --- a/src/libflake/include/nix/flake/flakeref.hh +++ b/src/libflake/include/nix/flake/flakeref.hh @@ -1,16 +1,30 @@ #pragma once ///@file +#include #include +#include +#include +#include +#include +#include +#include +#include #include "nix/util/types.hh" #include "nix/fetchers/fetchers.hh" #include "nix/store/outputs-spec.hh" #include "nix/fetchers/registry.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/util/ref.hh" +#include "nix/util/source-accessor.hh" namespace nix { class Store; +namespace fetchers { +struct Settings; +} // namespace fetchers typedef std::string FlakeId; diff --git a/src/libflake/include/nix/flake/settings.hh b/src/libflake/include/nix/flake/settings.hh index 618ed4d38..7e5d18746 100644 --- a/src/libflake/include/nix/flake/settings.hh +++ b/src/libflake/include/nix/flake/settings.hh @@ -1,9 +1,12 @@ #pragma once ///@file -#include "nix/util/configuration.hh" - #include +#include +#include + +#include "nix/util/configuration.hh" +#include "nix/util/experimental-features.hh" namespace nix { // Forward declarations diff --git a/src/libflake/include/nix/flake/url-name.hh b/src/libflake/include/nix/flake/url-name.hh index b95d2dff6..0c79b74aa 100644 --- a/src/libflake/include/nix/flake/url-name.hh +++ b/src/libflake/include/nix/flake/url-name.hh @@ -1,9 +1,13 @@ +#include +#include + #include "nix/util/url.hh" #include "nix/util/url-parts.hh" #include "nix/util/util.hh" #include "nix/util/split.hh" namespace nix { +struct ParsedURL; /** * Try to extract a reasonably unique and meaningful, human-readable diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index f381a57e6..421f872cc 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -1,15 +1,51 @@ -#include "nix/fetchers/fetch-settings.hh" -#include "nix/flake/settings.hh" -#include "nix/flake/lockfile.hh" -#include "nix/store/store-api.hh" -#include "nix/util/strings.hh" - +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include - -#include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nix/fetchers/fetch-settings.hh" +#include "nix/flake/lockfile.hh" +#include "nix/util/strings.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/flake/flakeref.hh" +#include "nix/store/path.hh" +#include "nix/util/ansicolor.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/fmt.hh" +#include "nix/util/hash.hh" +#include "nix/util/logging.hh" +#include "nix/util/ref.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" + +namespace nix { +class Store; +} // namespace nix namespace nix::flake { diff --git a/src/libflake/settings.cc b/src/libflake/settings.cc index e77bded30..52fa1b49d 100644 --- a/src/libflake/settings.cc +++ b/src/libflake/settings.cc @@ -1,5 +1,9 @@ +#include + #include "nix/flake/settings.hh" #include "nix/flake/flake-primops.hh" +#include "nix/expr/eval-settings.hh" +#include "nix/expr/eval.hh" namespace nix::flake { diff --git a/src/libflake/url-name.cc b/src/libflake/url-name.cc index 3bba3692e..a63b107c3 100644 --- a/src/libflake/url-name.cc +++ b/src/libflake/url-name.cc @@ -1,6 +1,8 @@ -#include "nix/flake/url-name.hh" #include -#include + +#include "nix/flake/url-name.hh" +#include "nix/util/strings.hh" +#include "nix/util/url.hh" namespace nix { From 902faf4fe5d0d0b8947f0001c66c4d67e5282e08 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 14 Oct 2025 23:20:35 -0400 Subject: [PATCH 197/373] More fixes for iwyu --- src/libflake/config.cc | 4 +--- src/libflake/flake.cc | 6 ++---- src/libflake/flakeref.cc | 15 +++++++++++++-- src/libflake/lockfile.cc | 2 -- src/libflake/url-name.cc | 2 ++ 5 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/libflake/config.cc b/src/libflake/config.cc index 08e6ff038..c248ed0a6 100644 --- a/src/libflake/config.cc +++ b/src/libflake/config.cc @@ -1,19 +1,17 @@ #include #include #include -#include -#include #include #include #include #include #include #include -#include #include #include #include #include +#include #include "nix/util/users.hh" #include "nix/util/config-global.hh" diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index ae93f2f39..8e7e2be26 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -3,8 +3,6 @@ #include #include #include -#include -#include #include #include #include @@ -12,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -20,6 +17,7 @@ #include #include #include +#include #include "nix/util/terminal.hh" #include "nix/util/ref.hh" @@ -909,7 +907,7 @@ static ref makeInternalFS() internalFS->setPathDisplay("«flakes-internal»", ""); internalFS->addFile( CanonPath("call-flake.nix"), -#include "call-flake.nix.gen.hh" +#include "call-flake.nix.gen.hh" // IWYU pragma: keep ); return internalFS; } diff --git a/src/libflake/flakeref.cc b/src/libflake/flakeref.cc index a3448c88d..b4a5c106e 100644 --- a/src/libflake/flakeref.cc +++ b/src/libflake/flakeref.cc @@ -1,12 +1,15 @@ #include #include #include -#include -#include #include #include #include #include +#include +#include +#include +#include +#include #include "nix/flake/flakeref.hh" #include "nix/util/url.hh" @@ -18,8 +21,16 @@ #include "nix/util/logging.hh" #include "nix/util/strings.hh" #include "nix/util/util.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/registry.hh" +#include "nix/store/outputs-spec.hh" +#include "nix/util/ref.hh" +#include "nix/util/types.hh" namespace nix { +class Store; +struct SourceAccessor; + namespace fetchers { struct Settings; } // namespace fetchers diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index 421f872cc..fbf17a383 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -1,8 +1,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/src/libflake/url-name.cc b/src/libflake/url-name.cc index a63b107c3..f4b5c6a7f 100644 --- a/src/libflake/url-name.cc +++ b/src/libflake/url-name.cc @@ -1,4 +1,6 @@ #include +#include +#include #include "nix/flake/url-name.hh" #include "nix/util/strings.hh" From e8b126fa909e9745cbc0f4cdcc99a2a5d05258d4 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 14 Oct 2025 23:43:33 -0400 Subject: [PATCH 198/373] Remove unecessary includes --- src/libflake-tests/flakeref.cc | 7 +++++++ src/libflake-tests/nix_api_flake.cc | 12 +++++++----- src/libflake-tests/url-name.cc | 4 +++- src/libflake/include/nix/flake/flake-primops.hh | 1 - src/libflake/include/nix/flake/flakeref.hh | 9 --------- src/libflake/include/nix/flake/settings.hh | 2 -- src/libflake/include/nix/flake/url-name.hh | 5 ----- 7 files changed, 17 insertions(+), 23 deletions(-) diff --git a/src/libflake-tests/flakeref.cc b/src/libflake-tests/flakeref.cc index e2cb91bb8..34d281c52 100644 --- a/src/libflake-tests/flakeref.cc +++ b/src/libflake-tests/flakeref.cc @@ -1,8 +1,15 @@ #include +#include +#include +#include #include "nix/fetchers/fetch-settings.hh" #include "nix/flake/flakeref.hh" #include "nix/fetchers/attrs.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/experimental-features.hh" namespace nix { diff --git a/src/libflake-tests/nix_api_flake.cc b/src/libflake-tests/nix_api_flake.cc index f7e0cb719..da7f01401 100644 --- a/src/libflake-tests/nix_api_flake.cc +++ b/src/libflake-tests/nix_api_flake.cc @@ -1,15 +1,17 @@ +#include +#include +#include + #include "nix/util/file-system.hh" #include "nix_api_store.h" #include "nix_api_util.h" #include "nix_api_expr.h" #include "nix_api_value.h" #include "nix_api_flake.h" - -#include "nix/expr/tests/nix_api_expr.hh" #include "nix/util/tests/string_callback.hh" - -#include -#include +#include "nix/store/tests/nix_api_store.hh" +#include "nix/util/tests/nix_api_util.hh" +#include "nix_api_fetchers.h" namespace nixC { diff --git a/src/libflake-tests/url-name.cc b/src/libflake-tests/url-name.cc index 81ba516c8..64cbe5c9d 100644 --- a/src/libflake-tests/url-name.cc +++ b/src/libflake-tests/url-name.cc @@ -1,6 +1,8 @@ -#include "nix/flake/url-name.hh" #include +#include "nix/flake/url-name.hh" +#include "nix/util/url.hh" + namespace nix { /* ----------- tests for url-name.hh --------------------------------------------------*/ diff --git a/src/libflake/include/nix/flake/flake-primops.hh b/src/libflake/include/nix/flake/flake-primops.hh index 57a5e3422..a2a3d1612 100644 --- a/src/libflake/include/nix/flake/flake-primops.hh +++ b/src/libflake/include/nix/flake/flake-primops.hh @@ -1,7 +1,6 @@ #pragma once #include "nix/expr/eval.hh" -#include "nix/flake/settings.hh" namespace nix { namespace flake { diff --git a/src/libflake/include/nix/flake/flakeref.hh b/src/libflake/include/nix/flake/flakeref.hh index 7a26382a7..65a2dfed5 100644 --- a/src/libflake/include/nix/flake/flakeref.hh +++ b/src/libflake/include/nix/flake/flakeref.hh @@ -1,23 +1,14 @@ #pragma once ///@file -#include #include -#include #include -#include #include #include #include -#include -#include "nix/util/types.hh" -#include "nix/fetchers/fetchers.hh" #include "nix/store/outputs-spec.hh" #include "nix/fetchers/registry.hh" -#include "nix/fetchers/attrs.hh" -#include "nix/util/ref.hh" -#include "nix/util/source-accessor.hh" namespace nix { diff --git a/src/libflake/include/nix/flake/settings.hh b/src/libflake/include/nix/flake/settings.hh index 7e5d18746..7187a3294 100644 --- a/src/libflake/include/nix/flake/settings.hh +++ b/src/libflake/include/nix/flake/settings.hh @@ -2,11 +2,9 @@ ///@file #include -#include #include #include "nix/util/configuration.hh" -#include "nix/util/experimental-features.hh" namespace nix { // Forward declarations diff --git a/src/libflake/include/nix/flake/url-name.hh b/src/libflake/include/nix/flake/url-name.hh index 0c79b74aa..d313db33b 100644 --- a/src/libflake/include/nix/flake/url-name.hh +++ b/src/libflake/include/nix/flake/url-name.hh @@ -1,11 +1,6 @@ #include #include -#include "nix/util/url.hh" -#include "nix/util/url-parts.hh" -#include "nix/util/util.hh" -#include "nix/util/split.hh" - namespace nix { struct ParsedURL; From 01a8499d2f7baede36827cc6138468329757551f Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 14 Oct 2025 23:51:40 -0400 Subject: [PATCH 199/373] Format cpp files --- src/libflake/flakeref.cc | 2 +- src/libflake/include/nix/flake/flake-primops.hh | 4 ++-- src/libflake/include/nix/flake/flakeref.hh | 3 ++- src/libflake/lockfile.cc | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/libflake/flakeref.cc b/src/libflake/flakeref.cc index b4a5c106e..a26f269c3 100644 --- a/src/libflake/flakeref.cc +++ b/src/libflake/flakeref.cc @@ -33,7 +33,7 @@ struct SourceAccessor; namespace fetchers { struct Settings; -} // namespace fetchers +} // namespace fetchers #if 0 // 'dir' path elements cannot start with a '.'. We also reject diff --git a/src/libflake/include/nix/flake/flake-primops.hh b/src/libflake/include/nix/flake/flake-primops.hh index a2a3d1612..b333e33d7 100644 --- a/src/libflake/include/nix/flake/flake-primops.hh +++ b/src/libflake/include/nix/flake/flake-primops.hh @@ -5,8 +5,8 @@ namespace nix { namespace flake { struct Settings; -} // namespace flake -} // namespace nix +} // namespace flake +} // namespace nix namespace nix::flake::primops { diff --git a/src/libflake/include/nix/flake/flakeref.hh b/src/libflake/include/nix/flake/flakeref.hh index 65a2dfed5..1af8c5afd 100644 --- a/src/libflake/include/nix/flake/flakeref.hh +++ b/src/libflake/include/nix/flake/flakeref.hh @@ -13,9 +13,10 @@ namespace nix { class Store; + namespace fetchers { struct Settings; -} // namespace fetchers +} // namespace fetchers typedef std::string FlakeId; diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index fbf17a383..d3dac19c5 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -43,7 +43,7 @@ namespace nix { class Store; -} // namespace nix +} // namespace nix namespace nix::flake { From aace1fb5d698e763c7f4e3ebd04ea737631adc62 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Wed, 15 Oct 2025 13:27:09 +0200 Subject: [PATCH 200/373] C API: test nix_store_get_fs_closure --- src/libstore-tests/nix_api_store.cc | 240 ++++++++++++++++++++++++++++ 1 file changed, 240 insertions(+) diff --git a/src/libstore-tests/nix_api_store.cc b/src/libstore-tests/nix_api_store.cc index dfd554ec1..6d6017f1f 100644 --- a/src/libstore-tests/nix_api_store.cc +++ b/src/libstore-tests/nix_api_store.cc @@ -218,6 +218,66 @@ struct LambdaAdapter } }; +class NixApiStoreTestWithRealisedPath : public nix_api_store_test_base +{ +public: + StorePath * drvPath = nullptr; + nix_derivation * drv = nullptr; + Store * store = nullptr; + StorePath * outPath = nullptr; + + void SetUp() override + { + nix_api_store_test_base::SetUp(); + + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.substituters = {}; + + store = open_local_store(); + + std::filesystem::path unitTestData{getenv("_NIX_TEST_UNIT_DATA")}; + std::ifstream t{unitTestData / "derivation/ca/self-contained.json"}; + std::stringstream buffer; + buffer << t.rdbuf(); + + drv = nix_derivation_from_json(ctx, store, buffer.str().c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drvPath, nullptr); + + auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath_) { + auto is_valid_path = nix_store_is_valid_path(ctx, store, outPath_); + ASSERT_EQ(is_valid_path, true); + ASSERT_STREQ(outname, "out") << "Expected single 'out' output"; + ASSERT_EQ(outPath, nullptr) << "Output path callback should only be called once"; + outPath = nix_store_path_clone(outPath_); + }}; + + auto ret = nix_store_realise( + ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + ASSERT_NE(outPath, nullptr) << "Derivation should have produced an output"; + } + + void TearDown() override + { + if (drvPath) + nix_store_path_free(drvPath); + if (outPath) + nix_store_path_free(outPath); + if (drv) + nix_derivation_free(drv); + if (store) + nix_store_free(store); + + nix_api_store_test_base::TearDown(); + } +}; + TEST_F(nix_api_store_test_base, build_from_json) { // FIXME get rid of these @@ -256,4 +316,184 @@ TEST_F(nix_api_store_test_base, build_from_json) nix_store_free(store); } +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_with_outputs) +{ + // Test closure computation with include_outputs on a derivation path + struct CallbackData + { + std::set * paths; + }; + + std::set closure_paths; + CallbackData data{&closure_paths}; + + auto ret = nix_store_get_fs_closure( + ctx, + store, + drvPath, // Use derivation path + false, // flip_direction + true, // include_outputs - include the outputs in the closure + false, // include_derivers + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + std::string path_str; + nix_store_path_name(path, OBSERVE_STRING(path_str)); + auto [it, inserted] = data->paths->insert(path_str); + ASSERT_TRUE(inserted) << "Duplicate path in closure: " << path_str; + }); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + + // The closure should contain the derivation and its outputs + ASSERT_GE(closure_paths.size(), 2); + + // Verify the output path is in the closure + std::string outPathName; + nix_store_path_name(outPath, OBSERVE_STRING(outPathName)); + ASSERT_EQ(closure_paths.count(outPathName), 1); +} + +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_without_outputs) +{ + // Test closure computation WITHOUT include_outputs on a derivation path + struct CallbackData + { + std::set * paths; + }; + + std::set closure_paths; + CallbackData data{&closure_paths}; + + auto ret = nix_store_get_fs_closure( + ctx, + store, + drvPath, // Use derivation path + false, // flip_direction + false, // include_outputs - do NOT include the outputs + false, // include_derivers + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + std::string path_str; + nix_store_path_name(path, OBSERVE_STRING(path_str)); + auto [it, inserted] = data->paths->insert(path_str); + ASSERT_TRUE(inserted) << "Duplicate path in closure: " << path_str; + }); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + + // Verify the output path is NOT in the closure + std::string outPathName; + nix_store_path_name(outPath, OBSERVE_STRING(outPathName)); + ASSERT_EQ(closure_paths.count(outPathName), 0) << "Output path should not be in closure when includeOutputs=false"; +} + +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_flip_direction) +{ + // Test closure computation with flip_direction on a derivation path + // When flip_direction=true, we get the reverse dependencies (what depends on this path) + // For a derivation, this should NOT include outputs even with include_outputs=true + struct CallbackData + { + std::set * paths; + }; + + std::set closure_paths; + CallbackData data{&closure_paths}; + + auto ret = nix_store_get_fs_closure( + ctx, + store, + drvPath, // Use derivation path + true, // flip_direction - get reverse dependencies + true, // include_outputs + false, // include_derivers + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + std::string path_str; + nix_store_path_name(path, OBSERVE_STRING(path_str)); + auto [it, inserted] = data->paths->insert(path_str); + ASSERT_TRUE(inserted) << "Duplicate path in closure: " << path_str; + }); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + + // Verify the output path is NOT in the closure when direction is flipped + std::string outPathName; + nix_store_path_name(outPath, OBSERVE_STRING(outPathName)); + ASSERT_EQ(closure_paths.count(outPathName), 0) << "Output path should not be in closure when flip_direction=true"; +} + +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_include_derivers) +{ + // Test closure computation with include_derivers on an output path + // This should include the derivation that produced the output + struct CallbackData + { + std::set * paths; + }; + + std::set closure_paths; + CallbackData data{&closure_paths}; + + auto ret = nix_store_get_fs_closure( + ctx, + store, + outPath, // Use output path (not derivation) + false, // flip_direction + false, // include_outputs + true, // include_derivers - include the derivation + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + std::string path_str; + nix_store_path_name(path, OBSERVE_STRING(path_str)); + auto [it, inserted] = data->paths->insert(path_str); + ASSERT_TRUE(inserted) << "Duplicate path in closure: " << path_str; + }); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + + // Verify the derivation path is in the closure + // Deriver is nasty stateful, and this assertion is only guaranteed because + // we're using an empty store as our starting point. Otherwise, if the + // output happens to exist, the deriver could be anything. + std::string drvPathName; + nix_store_path_name(drvPath, OBSERVE_STRING(drvPathName)); + ASSERT_EQ(closure_paths.count(drvPathName), 1) << "Derivation should be in closure when include_derivers=true"; +} + +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_error_propagation) +{ + // Test that errors in the callback abort the closure computation + struct CallbackData + { + int * count; + }; + + int call_count = 0; + CallbackData data{&call_count}; + + auto ret = nix_store_get_fs_closure( + ctx, + store, + drvPath, // Use derivation path + false, // flip_direction + true, // include_outputs + false, // include_derivers + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + (*data->count)++; + // Set an error immediately + nix_set_err_msg(context, NIX_ERR_UNKNOWN, "Test error"); + }); + + // Should have aborted with error + ASSERT_EQ(ret, NIX_ERR_UNKNOWN); + ASSERT_EQ(call_count, 1); // Should have been called exactly once, then aborted +} + } // namespace nixC From 3fb943d130868f2290d260bfd7a19cb633519ca9 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Wed, 15 Oct 2025 14:55:28 +0200 Subject: [PATCH 201/373] C API: Make store realise tests multi-platform ... and improve assertions. --- src/libstore-tests/nix_api_store.cc | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/libstore-tests/nix_api_store.cc b/src/libstore-tests/nix_api_store.cc index 6d6017f1f..16d1ac0d8 100644 --- a/src/libstore-tests/nix_api_store.cc +++ b/src/libstore-tests/nix_api_store.cc @@ -240,7 +240,10 @@ public: std::stringstream buffer; buffer << t.rdbuf(); - drv = nix_derivation_from_json(ctx, store, buffer.str().c_str()); + // Replace the hardcoded system with the current system + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", nix::settings.thisSystem.get()); + + drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); assert_ctx_ok(); ASSERT_NE(drv, nullptr); @@ -249,6 +252,7 @@ public: ASSERT_NE(drvPath, nullptr); auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath_) { + ASSERT_NE(outname, nullptr) << "Output name should not be NULL"; auto is_valid_path = nix_store_is_valid_path(ctx, store, outPath_); ASSERT_EQ(is_valid_path, true); ASSERT_STREQ(outname, "out") << "Expected single 'out' output"; @@ -292,7 +296,10 @@ TEST_F(nix_api_store_test_base, build_from_json) std::stringstream buffer; buffer << t.rdbuf(); - auto * drv = nix_derivation_from_json(ctx, store, buffer.str().c_str()); + // Replace the hardcoded system with the current system + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", nix::settings.thisSystem.get()); + + auto * drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); assert_ctx_ok(); ASSERT_NE(drv, nullptr); @@ -300,15 +307,21 @@ TEST_F(nix_api_store_test_base, build_from_json) assert_ctx_ok(); ASSERT_NE(drv, nullptr); + int callbackCount = 0; auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath) { + ASSERT_NE(outname, nullptr); + ASSERT_STREQ(outname, "out"); + ASSERT_NE(outPath, nullptr); auto is_valid_path = nix_store_is_valid_path(ctx, store, outPath); ASSERT_EQ(is_valid_path, true); + callbackCount++; }}; auto ret = nix_store_realise( ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); assert_ctx_ok(); ASSERT_EQ(ret, NIX_OK); + ASSERT_EQ(callbackCount, 1) << "Callback should have been invoked exactly once"; // Clean up nix_store_path_free(drvPath); From 12293a8b1162bc273f991b098e05c93e5ff32c5f Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Wed, 15 Oct 2025 15:05:50 +0200 Subject: [PATCH 202/373] C API: Document nix_store_copy_closure flags --- src/libstore-c/nix_api_store.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/libstore-c/nix_api_store.h b/src/libstore-c/nix_api_store.h index fd7ce068a..f477d084a 100644 --- a/src/libstore-c/nix_api_store.h +++ b/src/libstore-c/nix_api_store.h @@ -253,9 +253,14 @@ nix_err nix_store_copy_closure(nix_c_context * context, Store * srcStore, Store * @param[out] context Optional, stores error information * @param[in] store nix store reference * @param[in] store_path The path to compute from - * @param[in] flip_direction - * @param[in] include_outputs - * @param[in] include_derivers + * @param[in] flip_direction If false, compute the forward closure (paths referenced by any store path in the closure). + * If true, compute the backward closure (paths that reference any store path in the closure). + * @param[in] include_outputs If flip_direction is false: for any derivation in the closure, include its outputs. + * If flip_direction is true: for any output in the closure, include derivations that produce + * it. + * @param[in] include_derivers If flip_direction is false: for any output in the closure, include the derivation that + * produced it. + * If flip_direction is true: for any derivation in the closure, include its outputs. * @param[in] callback The function to call for every store path, in no particular order * @param[in] userdata The userdata to pass to the callback */ From 6fa03765edcce6e5403903cd68a2cc464e67e4d1 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Wed, 15 Oct 2025 15:19:40 +0200 Subject: [PATCH 203/373] C API: Propagate nix_store_realise build errors --- src/libstore-c/nix_api_store.cc | 8 ++ src/libstore-c/nix_api_store.h | 2 + src/libstore-tests/nix_api_store.cc | 135 ++++++++++++++++++++++++++++ 3 files changed, 145 insertions(+) diff --git a/src/libstore-c/nix_api_store.cc b/src/libstore-c/nix_api_store.cc index 6ee792fc3..e18463192 100644 --- a/src/libstore-c/nix_api_store.cc +++ b/src/libstore-c/nix_api_store.cc @@ -173,6 +173,14 @@ nix_err nix_store_realise( const auto nixStore = store->ptr; auto results = nixStore->buildPathsWithResults(paths, nix::bmNormal, nixStore); + assert(results.size() == 1); + + // Check if any builds failed + for (auto & result : results) { + if (!result.success()) + result.rethrow(); + } + if (callback) { for (const auto & result : results) { for (const auto & [outputName, realisation] : result.builtOutputs) { diff --git a/src/libstore-c/nix_api_store.h b/src/libstore-c/nix_api_store.h index f477d084a..964f6d6d5 100644 --- a/src/libstore-c/nix_api_store.h +++ b/src/libstore-c/nix_api_store.h @@ -186,6 +186,8 @@ nix_err nix_store_real_path( * @param[in] path Path to build * @param[in] userdata data to pass to every callback invocation * @param[in] callback called for every realised output + * @return NIX_OK if the build succeeded, or an error code if the build/scheduling/outputs/copying/etc failed. + * On error, the callback is never invoked and error information is stored in context. */ nix_err nix_store_realise( nix_c_context * context, diff --git a/src/libstore-tests/nix_api_store.cc b/src/libstore-tests/nix_api_store.cc index 16d1ac0d8..045b4ad83 100644 --- a/src/libstore-tests/nix_api_store.cc +++ b/src/libstore-tests/nix_api_store.cc @@ -329,6 +329,141 @@ TEST_F(nix_api_store_test_base, build_from_json) nix_store_free(store); } +TEST_F(nix_api_store_test_base, nix_store_realise_invalid_system) +{ + // Test that nix_store_realise properly reports errors when the system is invalid + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.substituters = {}; + + auto * store = open_local_store(); + + std::filesystem::path unitTestData{getenv("_NIX_TEST_UNIT_DATA")}; + std::ifstream t{unitTestData / "derivation/ca/self-contained.json"}; + std::stringstream buffer; + buffer << t.rdbuf(); + + // Use an invalid system that cannot be built + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", "bogus65-bogusos"); + + auto * drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + auto * drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drvPath, nullptr); + + int callbackCount = 0; + auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath) { callbackCount++; }}; + + auto ret = nix_store_realise( + ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); + + // Should fail with an error + ASSERT_NE(ret, NIX_OK); + ASSERT_EQ(callbackCount, 0) << "Callback should not be invoked when build fails"; + + // Check that error message is set + std::string errMsg = nix_err_msg(nullptr, ctx, nullptr); + ASSERT_FALSE(errMsg.empty()) << "Error message should be set"; + ASSERT_NE(errMsg.find("system"), std::string::npos) << "Error should mention system"; + + // Clean up + nix_store_path_free(drvPath); + nix_derivation_free(drv); + nix_store_free(store); +} + +TEST_F(nix_api_store_test_base, nix_store_realise_builder_fails) +{ + // Test that nix_store_realise properly reports errors when the builder fails + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.substituters = {}; + + auto * store = open_local_store(); + + std::filesystem::path unitTestData{getenv("_NIX_TEST_UNIT_DATA")}; + std::ifstream t{unitTestData / "derivation/ca/self-contained.json"}; + std::stringstream buffer; + buffer << t.rdbuf(); + + // Replace with current system and make builder command fail + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", nix::settings.thisSystem.get()); + jsonStr = nix::replaceStrings(jsonStr, "echo $name foo > $out", "exit 1"); + + auto * drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + auto * drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drvPath, nullptr); + + int callbackCount = 0; + auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath) { callbackCount++; }}; + + auto ret = nix_store_realise( + ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); + + // Should fail with an error + ASSERT_NE(ret, NIX_OK); + ASSERT_EQ(callbackCount, 0) << "Callback should not be invoked when build fails"; + + // Check that error message is set + std::string errMsg = nix_err_msg(nullptr, ctx, nullptr); + ASSERT_FALSE(errMsg.empty()) << "Error message should be set"; + + // Clean up + nix_store_path_free(drvPath); + nix_derivation_free(drv); + nix_store_free(store); +} + +TEST_F(nix_api_store_test_base, nix_store_realise_builder_no_output) +{ + // Test that nix_store_realise properly reports errors when builder succeeds but produces no output + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.substituters = {}; + + auto * store = open_local_store(); + + std::filesystem::path unitTestData{getenv("_NIX_TEST_UNIT_DATA")}; + std::ifstream t{unitTestData / "derivation/ca/self-contained.json"}; + std::stringstream buffer; + buffer << t.rdbuf(); + + // Replace with current system and make builder succeed but not produce output + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", nix::settings.thisSystem.get()); + jsonStr = nix::replaceStrings(jsonStr, "echo $name foo > $out", "true"); + + auto * drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + auto * drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drvPath, nullptr); + + int callbackCount = 0; + auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath) { callbackCount++; }}; + + auto ret = nix_store_realise( + ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); + + // Should fail with an error + ASSERT_NE(ret, NIX_OK); + ASSERT_EQ(callbackCount, 0) << "Callback should not be invoked when build produces no output"; + + // Check that error message is set + std::string errMsg = nix_err_msg(nullptr, ctx, nullptr); + ASSERT_FALSE(errMsg.empty()) << "Error message should be set"; + + // Clean up + nix_store_path_free(drvPath); + nix_derivation_free(drv); + nix_store_free(store); +} + TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_with_outputs) { // Test closure computation with include_outputs on a derivation path From b20cebf993780266513adcb176bef8edc8d30d1c Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 14 Oct 2025 14:37:52 -0400 Subject: [PATCH 204/373] Remove unused typedef and field --- .../include/nix/store/build/drv-output-substitution-goal.hh | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh index b42336427..3128a719e 100644 --- a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh +++ b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh @@ -35,8 +35,6 @@ public: RepairFlag repair = NoRepair, std::optional ca = std::nullopt); - typedef void (DrvOutputSubstitutionGoal::*GoalState)(); - GoalState state; Co init(); Co realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); From 46357468a4681cb722ddf03a550ff08ce7a1e36d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 14 Oct 2025 18:53:03 -0400 Subject: [PATCH 205/373] Remove unused parameters to `DrvOutputSubstitutionGoal` --- src/libstore/build/derivation-goal.cc | 3 +-- src/libstore/build/drv-output-substitution-goal.cc | 3 +-- src/libstore/build/worker.cc | 5 ++--- .../nix/store/build/drv-output-substitution-goal.hh | 7 +------ src/libstore/include/nix/store/build/worker.hh | 3 +-- 5 files changed, 6 insertions(+), 15 deletions(-) diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index b0081f709..dca058fb8 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -100,8 +100,7 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) them. */ if (settings.useSubstitutes && drvOptions.substitutesAllowed()) { if (!checkResult) - waitees.insert(upcast_goal(worker.makeDrvOutputSubstitutionGoal( - DrvOutput{outputHash, wantedOutput}, buildMode == bmRepair ? Repair : NoRepair))); + waitees.insert(upcast_goal(worker.makeDrvOutputSubstitutionGoal(DrvOutput{outputHash, wantedOutput}))); else { auto * cap = getDerivationCA(*drv); waitees.insert(upcast_goal(worker.makePathSubstitutionGoal( diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc index 209d6d542..6635e214d 100644 --- a/src/libstore/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -8,8 +8,7 @@ namespace nix { -DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal( - const DrvOutput & id, Worker & worker, RepairFlag repair, std::optional ca) +DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(const DrvOutput & id, Worker & worker) : Goal(worker, init()) , id(id) { diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index d23c53e77..3663a2c91 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -104,10 +104,9 @@ Worker::makePathSubstitutionGoal(const StorePath & path, RepairFlag repair, std: return initGoalIfNeeded(substitutionGoals[path], path, *this, repair, ca); } -std::shared_ptr -Worker::makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair, std::optional ca) +std::shared_ptr Worker::makeDrvOutputSubstitutionGoal(const DrvOutput & id) { - return initGoalIfNeeded(drvOutputSubstitutionGoals[id], id, *this, repair, ca); + return initGoalIfNeeded(drvOutputSubstitutionGoals[id], id, *this); } GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode) diff --git a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh index 3128a719e..c3ee9019f 100644 --- a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh +++ b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh @@ -29,12 +29,7 @@ class DrvOutputSubstitutionGoal : public Goal DrvOutput id; public: - DrvOutputSubstitutionGoal( - const DrvOutput & id, - Worker & worker, - RepairFlag repair = NoRepair, - std::optional ca = std::nullopt); - + DrvOutputSubstitutionGoal(const DrvOutput & id, Worker & worker); Co init(); Co realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index bb0202dfd..173f7b222 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -240,8 +240,7 @@ public: */ std::shared_ptr makePathSubstitutionGoal( const StorePath & storePath, RepairFlag repair = NoRepair, std::optional ca = std::nullopt); - std::shared_ptr makeDrvOutputSubstitutionGoal( - const DrvOutput & id, RepairFlag repair = NoRepair, std::optional ca = std::nullopt); + std::shared_ptr makeDrvOutputSubstitutionGoal(const DrvOutput & id); /** * Make a goal corresponding to the `DerivedPath`. From 632ccfb8c0eac7deb42a0153d28847fe8eb7c0dd Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 15 Oct 2025 11:03:50 -0400 Subject: [PATCH 206/373] Remove dead `outputPaths` variable. --- src/libstore/build/derivation-goal.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index dca058fb8..c50054caf 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -170,8 +170,6 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) auto outputHashes = staticOutputHashes(worker.evalStore, *drv); auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); - StorePathSet outputPaths; - auto outputHash = get(outputHashes, wantedOutput); auto resolvedHash = get(resolvedHashes, wantedOutput); if ((!outputHash) || (!resolvedHash)) @@ -211,7 +209,6 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) worker.store.signRealisation(newRealisation); worker.store.registerDrvOutput(newRealisation); } - outputPaths.insert(realisation.outPath); auto status = success.status; if (status == BuildResult::Success::AlreadyValid) From 9295c14a35b07f87351d677fc828bb5800f7b022 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Thu, 21 Aug 2025 05:38:42 +0000 Subject: [PATCH 207/373] refactor(libstore): replace AWS SDK with curl-based S3 implementation This commit replaces the AWS C++ SDK with a lighter curl-based approach for S3 binary cache operations. - Removed dependency on the heavy aws-cpp-sdk-s3 and aws-cpp-sdk-transfer - Added lightweight aws-crt-cpp for credential resolution only - Leverages curl's native AWS SigV4 authentication (requires curl >= 7.75.0) - S3BinaryCacheStore now delegates to HttpBinaryCacheStore - Function s3ToHttpsUrl converts ParsedS3URL to ParsedURL - Multipart uploads are no longer supported (may be reimplemented later) - Build now requires curl >= 7.75.0 for AWS SigV4 support Fixes: #13084, #12671, #11748, #12403, #5947 --- .github/workflows/ci.yml | 22 +- ci/gha/tests/default.nix | 9 +- ci/gha/tests/wrapper.nix | 3 +- packaging/components.nix | 2 +- packaging/dependencies.nix | 15 - src/libstore-tests/s3-binary-cache-store.cc | 17 +- src/libstore-tests/s3-url.cc | 2 +- src/libstore/filetransfer.cc | 28 - src/libstore/include/nix/store/meson.build | 1 - .../nix/store/s3-binary-cache-store.hh | 133 +--- src/libstore/include/nix/store/s3-url.hh | 2 +- src/libstore/include/nix/store/s3.hh | 50 -- src/libstore/meson.build | 31 +- src/libstore/package.nix | 8 +- src/libstore/s3-binary-cache-store.cc | 591 +----------------- src/libstore/s3-url.cc | 2 +- tests/nixos/default.nix | 2 - tests/nixos/s3-binary-cache-store.nix | 98 --- 18 files changed, 14 insertions(+), 1002 deletions(-) delete mode 100644 src/libstore/include/nix/store/s3.hh delete mode 100644 tests/nixos/s3-binary-cache-store.nix diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6e08b5a9e..00a7ef7a1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -67,8 +67,7 @@ jobs: instrumented: false primary: true stdenv: stdenv - withAWS: true - withCurlS3: false + withCurlS3: true # TODO: remove once curl-based-s3 fully lands - scenario: on ubuntu (no s3) runs-on: ubuntu-24.04 @@ -76,33 +75,21 @@ jobs: instrumented: false primary: false stdenv: stdenv - withAWS: false withCurlS3: false - # TODO: remove once curl-based-s3 fully lands - - scenario: on ubuntu (curl s3) - runs-on: ubuntu-24.04 - os: linux - instrumented: false - primary: false - stdenv: stdenv - withAWS: false - withCurlS3: true - scenario: on macos runs-on: macos-14 os: darwin instrumented: false primary: true stdenv: stdenv - withAWS: true - withCurlS3: false + withCurlS3: true - scenario: on ubuntu (with sanitizers / coverage) runs-on: ubuntu-24.04 os: linux instrumented: true primary: false stdenv: clangStdenv - withAWS: true - withCurlS3: false + withCurlS3: true name: tests ${{ matrix.scenario }} runs-on: ${{ matrix.runs-on }} timeout-minutes: 60 @@ -126,14 +113,12 @@ jobs: nix build --file ci/gha/tests/wrapper.nix componentTests -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ --argstr stdenv "${{ matrix.stdenv }}" \ - ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} - name: Run VM tests run: | nix build --file ci/gha/tests/wrapper.nix vmTests -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ --argstr stdenv "${{ matrix.stdenv }}" \ - ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} if: ${{ matrix.os == 'linux' }} - name: Run flake checks and prepare the installer tarball @@ -146,7 +131,6 @@ jobs: nix build --file ci/gha/tests/wrapper.nix codeCoverage.coverageReports -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ --argstr stdenv "${{ matrix.stdenv }}" \ - ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} \ --out-link coverage-reports cat coverage-reports/index.txt >> $GITHUB_STEP_SUMMARY diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index 46310bc36..be634e833 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -12,7 +12,6 @@ componentTestsPrefix ? "", withSanitizers ? false, withCoverage ? false, - withAWS ? null, withCurlS3 ? null, ... }: @@ -60,8 +59,7 @@ rec { # Override AWS configuration if specified nix-store = prev.nix-store.override ( - lib.optionalAttrs (withAWS != null) { inherit withAWS; } - // lib.optionalAttrs (withCurlS3 != null) { inherit withCurlS3; } + lib.optionalAttrs (withCurlS3 != null) { inherit withCurlS3; } ); mesonComponentOverrides = lib.composeManyExtensions componentOverrides; @@ -231,11 +229,6 @@ rec { vmTests = { } - # FIXME: when the curlS3 implementation is complete, it should also enable these tests. - // lib.optionalAttrs (withAWS == true) { - # S3 binary cache store test only runs when S3 support is enabled - inherit (nixosTests) s3-binary-cache-store; - } // lib.optionalAttrs (withCurlS3 == true) { # S3 binary cache store test using curl implementation inherit (nixosTests) curl-s3-binary-cache-store; diff --git a/ci/gha/tests/wrapper.nix b/ci/gha/tests/wrapper.nix index c1655f8c0..4b1656500 100644 --- a/ci/gha/tests/wrapper.nix +++ b/ci/gha/tests/wrapper.nix @@ -5,7 +5,6 @@ stdenv ? "stdenv", componentTestsPrefix ? "", withInstrumentation ? false, - withAWS ? null, withCurlS3 ? null, }@args: import ./. ( @@ -14,6 +13,6 @@ import ./. ( getStdenv = p: p.${stdenv}; withSanitizers = withInstrumentation; withCoverage = withInstrumentation; - inherit withAWS withCurlS3; + inherit withCurlS3; } ) diff --git a/packaging/components.nix b/packaging/components.nix index 106e96723..c621b7073 100644 --- a/packaging/components.nix +++ b/packaging/components.nix @@ -490,7 +490,7 @@ in Example: ``` - overrideScope (finalScope: prevScope: { aws-sdk-cpp = null; }) + overrideScope (finalScope: prevScope: { aws-crt-cpp = null; }) ``` */ overrideScope = f: (scope.overrideScope f).nix-everything; diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 5581719b5..6b2dafcfa 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -16,21 +16,6 @@ in scope: { inherit stdenv; - aws-sdk-cpp = - (pkgs.aws-sdk-cpp.override { - apis = [ - "identity-management" - "s3" - "transfer" - ]; - customMemoryManagement = false; - }).overrideAttrs - { - # only a stripped down version is built, which takes a lot less resources - # to build, so we don't need a "big-parallel" machine. - requiredSystemFeatures = [ ]; - }; - boehmgc = (pkgs.boehmgc.override { enableLargeConfig = true; diff --git a/src/libstore-tests/s3-binary-cache-store.cc b/src/libstore-tests/s3-binary-cache-store.cc index 8c58b8408..359c70148 100644 --- a/src/libstore-tests/s3-binary-cache-store.cc +++ b/src/libstore-tests/s3-binary-cache-store.cc @@ -1,21 +1,6 @@ #include "nix/store/s3-binary-cache-store.hh" -#if NIX_WITH_S3_SUPPORT - -# include - -namespace nix { - -TEST(S3BinaryCacheStore, constructConfig) -{ - S3BinaryCacheStoreConfig config{"s3", "foobar", {}}; - - EXPECT_EQ(config.bucketName, "foobar"); -} - -} // namespace nix - -#elif NIX_WITH_CURL_S3 +#if NIX_WITH_CURL_S3 # include "nix/store/http-binary-cache-store.hh" # include "nix/store/filetransfer.hh" diff --git a/src/libstore-tests/s3-url.cc b/src/libstore-tests/s3-url.cc index 60652dd9c..5f3f9702b 100644 --- a/src/libstore-tests/s3-url.cc +++ b/src/libstore-tests/s3-url.cc @@ -1,7 +1,7 @@ #include "nix/store/s3-url.hh" #include "nix/util/tests/gmock-matchers.hh" -#if NIX_WITH_S3_SUPPORT || NIX_WITH_CURL_S3 +#if NIX_WITH_CURL_S3 # include # include diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index d6e21f3e6..2e12470a4 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -2,7 +2,6 @@ #include "nix/store/globals.hh" #include "nix/util/config-global.hh" #include "nix/store/store-api.hh" -#include "nix/store/s3.hh" #include "nix/util/compression.hh" #include "nix/util/finally.hh" #include "nix/util/callback.hh" @@ -10,9 +9,6 @@ #include "store-config-private.hh" #include -#if NIX_WITH_S3_SUPPORT -# include -#endif #if NIX_WITH_CURL_S3 # include "nix/store/aws-creds.hh" # include "nix/store/s3-url.hh" @@ -850,30 +846,6 @@ struct curlFileTransfer : public FileTransfer auto modifiedRequest = request; modifiedRequest.setupForS3(); enqueueItem(std::make_shared(*this, std::move(modifiedRequest), std::move(callback))); -#elif NIX_WITH_S3_SUPPORT - // Old AWS SDK-based implementation - // FIXME: do this on a worker thread - try { - auto parsed = ParsedS3URL::parse(request.uri.parsed()); - - std::string profile = parsed.profile.value_or(""); - std::string region = parsed.region.value_or(Aws::Region::US_EAST_1); - std::string scheme = parsed.scheme.value_or(""); - std::string endpoint = parsed.getEncodedEndpoint().value_or(""); - - S3Helper s3Helper(profile, region, scheme, endpoint); - - // FIXME: implement ETag - auto s3Res = s3Helper.getObject(parsed.bucket, encodeUrlPath(parsed.key)); - FileTransferResult res; - if (!s3Res.data) - throw FileTransferError(NotFound, {}, "S3 object '%s' does not exist", request.uri); - res.data = std::move(*s3Res.data); - res.urls.push_back(request.uri.to_string()); - callback(std::move(res)); - } catch (...) { - callback.rethrow(); - } #else throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri.to_string()); #endif diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index 1f04e357a..5d6626ff8 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -76,7 +76,6 @@ headers = [ config_pub_h ] + files( 'restricted-store.hh', 's3-binary-cache-store.hh', 's3-url.hh', - 's3.hh', 'serve-protocol-connection.hh', 'serve-protocol-impl.hh', 'serve-protocol.hh', diff --git a/src/libstore/include/nix/store/s3-binary-cache-store.hh b/src/libstore/include/nix/store/s3-binary-cache-store.hh index 0f8fff030..61ff8cb6c 100644 --- a/src/libstore/include/nix/store/s3-binary-cache-store.hh +++ b/src/libstore/include/nix/store/s3-binary-cache-store.hh @@ -3,138 +3,7 @@ #include "nix/store/config.hh" -#if NIX_WITH_S3_SUPPORT - -# include "nix/store/binary-cache-store.hh" - -# include - -namespace nix { - -struct S3BinaryCacheStoreConfig : std::enable_shared_from_this, virtual BinaryCacheStoreConfig -{ - std::string bucketName; - - using BinaryCacheStoreConfig::BinaryCacheStoreConfig; - - S3BinaryCacheStoreConfig(std::string_view uriScheme, std::string_view bucketName, const Params & params); - - const Setting profile{ - this, - "", - "profile", - R"( - The name of the AWS configuration profile to use. By default - Nix uses the `default` profile. - )"}; - -protected: - - constexpr static const char * defaultRegion = "us-east-1"; - -public: - - const Setting region{ - this, - defaultRegion, - "region", - R"( - The region of the S3 bucket. If your bucket is not in - `us-east-1`, you should always explicitly specify the region - parameter. - )"}; - - const Setting scheme{ - this, - "", - "scheme", - R"( - The scheme used for S3 requests, `https` (default) or `http`. This - option allows you to disable HTTPS for binary caches which don't - support it. - - > **Note** - > - > HTTPS should be used if the cache might contain sensitive - > information. - )"}; - - const Setting endpoint{ - this, - "", - "endpoint", - R"( - The URL of the endpoint of an S3-compatible service such as MinIO. - Do not specify this setting if you're using Amazon S3. - - > **Note** - > - > This endpoint must support HTTPS and uses path-based - > addressing instead of virtual host based addressing. - )"}; - - const Setting narinfoCompression{ - this, "", "narinfo-compression", "Compression method for `.narinfo` files."}; - - const Setting lsCompression{this, "", "ls-compression", "Compression method for `.ls` files."}; - - const Setting logCompression{ - this, - "", - "log-compression", - R"( - Compression method for `log/*` files. It is recommended to - use a compression method supported by most web browsers - (e.g. `brotli`). - )"}; - - const Setting multipartUpload{this, false, "multipart-upload", "Whether to use multi-part uploads."}; - - const Setting bufferSize{ - this, 5 * 1024 * 1024, "buffer-size", "Size (in bytes) of each part in multi-part uploads."}; - - static const std::string name() - { - return "S3 Binary Cache Store"; - } - - static StringSet uriSchemes() - { - return {"s3"}; - } - - static std::string doc(); - - ref openStore() const override; - - StoreReference getReference() const override; -}; - -struct S3BinaryCacheStore : virtual BinaryCacheStore -{ - using Config = S3BinaryCacheStoreConfig; - - ref config; - - S3BinaryCacheStore(ref); - - struct Stats - { - std::atomic put{0}; - std::atomic putBytes{0}; - std::atomic putTimeMs{0}; - std::atomic get{0}; - std::atomic getBytes{0}; - std::atomic getTimeMs{0}; - std::atomic head{0}; - }; - - virtual const Stats & getS3Stats() = 0; -}; - -} // namespace nix - -#elif NIX_WITH_CURL_S3 +#if NIX_WITH_CURL_S3 # include "nix/store/http-binary-cache-store.hh" diff --git a/src/libstore/include/nix/store/s3-url.hh b/src/libstore/include/nix/store/s3-url.hh index 45c3b2d1c..49dadfbe8 100644 --- a/src/libstore/include/nix/store/s3-url.hh +++ b/src/libstore/include/nix/store/s3-url.hh @@ -2,7 +2,7 @@ ///@file #include "nix/store/config.hh" -#if NIX_WITH_S3_SUPPORT || NIX_WITH_CURL_S3 +#if NIX_WITH_CURL_S3 # include "nix/util/url.hh" # include "nix/util/util.hh" diff --git a/src/libstore/include/nix/store/s3.hh b/src/libstore/include/nix/store/s3.hh deleted file mode 100644 index ba3adbc2a..000000000 --- a/src/libstore/include/nix/store/s3.hh +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once -///@file -#include "nix/store/config.hh" -#if NIX_WITH_S3_SUPPORT - -# include "nix/util/ref.hh" -# include "nix/store/s3-url.hh" - -# include - -namespace Aws { -namespace Client { -struct ClientConfiguration; -} -} // namespace Aws - -namespace Aws { -namespace S3 { -class S3Client; -} -} // namespace Aws - -namespace nix { - -struct S3Helper -{ - ref config; - ref client; - - S3Helper( - const std::string & profile, - const std::string & region, - const std::string & scheme, - const std::string & endpoint); - - ref - makeConfig(const std::string & region, const std::string & scheme, const std::string & endpoint); - - struct FileTransferResult - { - std::optional data; - unsigned int durationMs; - }; - - FileTransferResult getObject(const std::string & bucketName, const std::string & key); -}; - -} // namespace nix - -#endif diff --git a/src/libstore/meson.build b/src/libstore/meson.build index a50a3f5fd..d691c10bf 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -142,29 +142,7 @@ deps_public += nlohmann_json sqlite = dependency('sqlite3', 'sqlite', version : '>=3.6.19') deps_private += sqlite -# AWS C++ SDK has bad pkg-config. See -# https://github.com/aws/aws-sdk-cpp/issues/2673 for details. -aws_s3 = dependency('aws-cpp-sdk-s3', required : false) -# The S3 store definitions in the header will be hidden based on this variables. -configdata_pub.set('NIX_WITH_S3_SUPPORT', aws_s3.found().to_int()) -if aws_s3.found() - aws_s3 = declare_dependency( - include_directories : include_directories(aws_s3.get_variable('includedir')), - link_args : [ - '-L' + aws_s3.get_variable('libdir'), - '-laws-cpp-sdk-transfer', - '-laws-cpp-sdk-s3', - '-laws-cpp-sdk-identity-management', - '-laws-cpp-sdk-cognito-identity', - '-laws-cpp-sdk-sts', - '-laws-cpp-sdk-core', - '-laws-crt-cpp', - ], - ).as_system('system') -endif -deps_other += aws_s3 - -# Curl-based S3 store support (alternative to AWS SDK) +# Curl-based S3 store support # Check if curl supports AWS SigV4 (requires >= 7.75.0) curl_supports_aws_sigv4 = curl.version().version_compare('>= 7.75.0') # AWS CRT C++ for lightweight credential management @@ -178,13 +156,6 @@ curl_s3_store_opt = get_option('curl-s3-store').require( error_message : 'curl-based S3 support requires aws-crt-cpp', ) -# Make AWS SDK and curl-based S3 mutually exclusive -if aws_s3.found() and curl_s3_store_opt.enabled() - error( - 'Cannot enable both AWS SDK S3 support and curl-based S3 support. Please choose one.', - ) -endif - if curl_s3_store_opt.enabled() deps_other += aws_crt_cpp endif diff --git a/src/libstore/package.nix b/src/libstore/package.nix index 0eb8e3687..846d0f15f 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -23,20 +23,15 @@ embeddedSandboxShell ? stdenv.hostPlatform.isStatic, - withAWS ? + withCurlS3 ? # Default is this way because there have been issues building this dependency stdenv.hostPlatform == stdenv.buildPlatform && (stdenv.isLinux || stdenv.isDarwin), - - withCurlS3 ? false, }: let inherit (lib) fileset; in -assert lib.assertMsg (!withAWS || !withCurlS3) - "withAWS and withCurlS3 are mutually exclusive - cannot enable both S3 implementations simultaneously"; - mkMesonLibrary (finalAttrs: { pname = "nix-store"; inherit version; @@ -70,7 +65,6 @@ mkMesonLibrary (finalAttrs: { sqlite ] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp - ++ lib.optional withAWS aws-sdk-cpp ++ lib.optional withCurlS3 aws-crt-cpp; propagatedBuildInputs = [ diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index ab0847bb1..16228b9f1 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -1,595 +1,6 @@ #include "nix/store/s3-binary-cache-store.hh" -#if NIX_WITH_S3_SUPPORT - -# include - -# include "nix/store/s3.hh" -# include "nix/store/nar-info.hh" -# include "nix/store/nar-info-disk-cache.hh" -# include "nix/store/globals.hh" -# include "nix/util/compression.hh" -# include "nix/store/filetransfer.hh" -# include "nix/util/signals.hh" -# include "nix/store/store-registration.hh" - -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include - -using namespace Aws::Transfer; - -namespace nix { - -struct S3Error : public Error -{ - Aws::S3::S3Errors err; - Aws::String exceptionName; - - template - S3Error(Aws::S3::S3Errors err, Aws::String exceptionName, const Args &... args) - : Error(args...) - , err(err) - , exceptionName(exceptionName){}; -}; - -/* Helper: given an Outcome, return R in case of success, or - throw an exception in case of an error. */ -template -R && checkAws(std::string_view s, Aws::Utils::Outcome && outcome) -{ - if (!outcome.IsSuccess()) - throw S3Error( - outcome.GetError().GetErrorType(), - outcome.GetError().GetExceptionName(), - fmt("%s: %s (request id: %s)", s, outcome.GetError().GetMessage(), outcome.GetError().GetRequestId())); - return outcome.GetResultWithOwnership(); -} - -class AwsLogger : public Aws::Utils::Logging::FormattedLogSystem -{ - using Aws::Utils::Logging::FormattedLogSystem::FormattedLogSystem; - - void ProcessFormattedStatement(Aws::String && statement) override - { - debug("AWS: %s", chomp(statement)); - } - -# if !(AWS_SDK_VERSION_MAJOR <= 1 && AWS_SDK_VERSION_MINOR <= 7 && AWS_SDK_VERSION_PATCH <= 115) - void Flush() override {} -# endif -}; - -/* Retrieve the credentials from the list of AWS default providers, with the addition of the STS creds provider. This - last can be used to acquire further permissions with a specific IAM role. - Roughly based on https://github.com/aws/aws-sdk-cpp/issues/150#issuecomment-538548438 -*/ -struct CustomAwsCredentialsProviderChain : public Aws::Auth::AWSCredentialsProviderChain -{ - CustomAwsCredentialsProviderChain(const std::string & profile) - { - if (profile.empty()) { - // Use all the default AWS providers, plus the possibility to acquire a IAM role directly via a profile. - Aws::Auth::DefaultAWSCredentialsProviderChain default_aws_chain; - for (auto provider : default_aws_chain.GetProviders()) - AddProvider(provider); - AddProvider(std::make_shared()); - } else { - // Override the profile name to retrieve from the AWS config and credentials. I believe this option - // comes from the ?profile querystring in nix.conf. - AddProvider(std::make_shared(profile.c_str())); - AddProvider(std::make_shared(profile)); - } - } -}; - -static void initAWS() -{ - static std::once_flag flag; - std::call_once(flag, []() { - Aws::SDKOptions options; - - /* We install our own OpenSSL locking function (see - shared.cc), so don't let aws-sdk-cpp override it. */ - options.cryptoOptions.initAndCleanupOpenSSL = false; - - if (verbosity >= lvlDebug) { - options.loggingOptions.logLevel = - verbosity == lvlDebug ? Aws::Utils::Logging::LogLevel::Debug : Aws::Utils::Logging::LogLevel::Trace; - options.loggingOptions.logger_create_fn = [options]() { - return std::make_shared(options.loggingOptions.logLevel); - }; - } - - Aws::InitAPI(options); - }); -} - -S3Helper::S3Helper( - const std::string & profile, const std::string & region, const std::string & scheme, const std::string & endpoint) - : config(makeConfig(region, scheme, endpoint)) - , client( - make_ref( - std::make_shared(profile), - *config, -# if AWS_SDK_VERSION_MAJOR == 1 && AWS_SDK_VERSION_MINOR < 3 - false, -# else - Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, -# endif - endpoint.empty())) -{ -} - -/* Log AWS retries. */ -class RetryStrategy : public Aws::Client::DefaultRetryStrategy -{ - bool ShouldRetry(const Aws::Client::AWSError & error, long attemptedRetries) const override - { - checkInterrupt(); - auto retry = Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries); - if (retry) - printError( - "AWS error '%s' (%s; request id: %s), will retry in %d ms", - error.GetExceptionName(), - error.GetMessage(), - error.GetRequestId(), - CalculateDelayBeforeNextRetry(error, attemptedRetries)); - return retry; - } -}; - -ref -S3Helper::makeConfig(const std::string & region, const std::string & scheme, const std::string & endpoint) -{ - initAWS(); - auto res = make_ref(); - res->allowSystemProxy = true; - res->region = region; - if (!scheme.empty()) { - res->scheme = Aws::Http::SchemeMapper::FromString(scheme.c_str()); - } - if (!endpoint.empty()) { - res->endpointOverride = endpoint; - } - res->requestTimeoutMs = 600 * 1000; - res->connectTimeoutMs = 5 * 1000; - res->retryStrategy = std::make_shared(); - res->caFile = settings.caFile; - return res; -} - -S3Helper::FileTransferResult S3Helper::getObject(const std::string & bucketName, const std::string & key) -{ - std::string uri = "s3://" + bucketName + "/" + key; - Activity act( - *logger, lvlTalkative, actFileTransfer, fmt("downloading '%s'", uri), Logger::Fields{uri}, getCurActivity()); - - auto request = Aws::S3::Model::GetObjectRequest().WithBucket(bucketName).WithKey(key); - - request.SetResponseStreamFactory([&]() { return Aws::New("STRINGSTREAM"); }); - - size_t bytesDone = 0; - size_t bytesExpected = 0; - request.SetDataReceivedEventHandler( - [&](const Aws::Http::HttpRequest * req, Aws::Http::HttpResponse * resp, long long l) { - if (!bytesExpected && resp->HasHeader("Content-Length")) { - if (auto length = string2Int(resp->GetHeader("Content-Length"))) { - bytesExpected = *length; - } - } - bytesDone += l; - act.progress(bytesDone, bytesExpected); - }); - - request.SetContinueRequestHandler([](const Aws::Http::HttpRequest *) { return !isInterrupted(); }); - - FileTransferResult res; - - auto now1 = std::chrono::steady_clock::now(); - - try { - - auto result = checkAws(fmt("AWS error fetching '%s'", key), client->GetObject(request)); - - act.progress(result.GetContentLength(), result.GetContentLength()); - - res.data = decompress(result.GetContentEncoding(), dynamic_cast(result.GetBody()).str()); - - } catch (S3Error & e) { - if ((e.err != Aws::S3::S3Errors::NO_SUCH_KEY) && (e.err != Aws::S3::S3Errors::ACCESS_DENIED) && - // Expired tokens are not really an error, more of a caching problem. Should be treated same as 403. - // - // AWS unwilling to provide a specific error type for the situation - // (https://github.com/aws/aws-sdk-cpp/issues/1843) so use this hack - (e.exceptionName != "ExpiredToken")) - throw; - } - - auto now2 = std::chrono::steady_clock::now(); - - res.durationMs = std::chrono::duration_cast(now2 - now1).count(); - - return res; -} - -S3BinaryCacheStoreConfig::S3BinaryCacheStoreConfig( - std::string_view uriScheme, std::string_view bucketName, const Params & params) - : StoreConfig(params) - , BinaryCacheStoreConfig(params) - , bucketName(bucketName) -{ - // Don't want to use use AWS SDK in header, so we check the default - // here. TODO do this better after we overhaul the store settings - // system. - assert(std::string{defaultRegion} == std::string{Aws::Region::US_EAST_1}); - - if (bucketName.empty()) - throw UsageError("`%s` store requires a bucket name in its Store URI", uriScheme); -} - -S3BinaryCacheStore::S3BinaryCacheStore(ref config) - : BinaryCacheStore(*config) - , config{config} -{ -} - -std::string S3BinaryCacheStoreConfig::doc() -{ - return -# include "s3-binary-cache-store.md" - ; -} - -StoreReference S3BinaryCacheStoreConfig::getReference() const -{ - return { - .variant = - StoreReference::Specified{ - .scheme = *uriSchemes().begin(), - .authority = bucketName, - }, - .params = getQueryParams(), - }; -} - -struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStore -{ - Stats stats; - - S3Helper s3Helper; - - S3BinaryCacheStoreImpl(ref config) - : Store{*config} - , BinaryCacheStore{*config} - , S3BinaryCacheStore{config} - , s3Helper(config->profile, config->region, config->scheme, config->endpoint) - { - diskCache = getNarInfoDiskCache(); - } - - void init() override - { - /* FIXME: The URI (when used as a cache key) must have several parameters rendered (e.g. the endpoint). - This must be represented as a separate opaque string (probably a URI) that has the right query parameters. */ - auto cacheUri = config->getReference().render(/*withParams=*/false); - if (auto cacheInfo = diskCache->upToDateCacheExists(cacheUri)) { - config->wantMassQuery.setDefault(cacheInfo->wantMassQuery); - config->priority.setDefault(cacheInfo->priority); - } else { - BinaryCacheStore::init(); - diskCache->createCache(cacheUri, config->storeDir, config->wantMassQuery, config->priority); - } - } - - const Stats & getS3Stats() override - { - return stats; - } - - /* This is a specialisation of isValidPath() that optimistically - fetches the .narinfo file, rather than first checking for its - existence via a HEAD request. Since .narinfos are small, doing - a GET is unlikely to be slower than HEAD. */ - bool isValidPathUncached(const StorePath & storePath) override - { - try { - queryPathInfo(storePath); - return true; - } catch (InvalidPath & e) { - return false; - } - } - - bool fileExists(const std::string & path) override - { - stats.head++; - - auto res = s3Helper.client->HeadObject( - Aws::S3::Model::HeadObjectRequest().WithBucket(config->bucketName).WithKey(path)); - - if (!res.IsSuccess()) { - auto & error = res.GetError(); - if (error.GetErrorType() == Aws::S3::S3Errors::RESOURCE_NOT_FOUND - || error.GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY - // Expired tokens are not really an error, more of a caching problem. Should be treated same as 403. - // AWS unwilling to provide a specific error type for the situation - // (https://github.com/aws/aws-sdk-cpp/issues/1843) so use this hack - || (error.GetErrorType() == Aws::S3::S3Errors::UNKNOWN && error.GetExceptionName() == "ExpiredToken") - // If bucket listing is disabled, 404s turn into 403s - || error.GetErrorType() == Aws::S3::S3Errors::ACCESS_DENIED) - return false; - throw Error("AWS error fetching '%s': %s", path, error.GetMessage()); - } - - return true; - } - - std::shared_ptr transferManager; - std::once_flag transferManagerCreated; - - struct AsyncContext : public Aws::Client::AsyncCallerContext - { - mutable std::mutex mutex; - mutable std::condition_variable cv; - const Activity & act; - - void notify() const - { - cv.notify_one(); - } - - void wait() const - { - std::unique_lock lk(mutex); - cv.wait(lk); - } - - AsyncContext(const Activity & act) - : act(act) - { - } - }; - - void uploadFile( - const std::string & path, - std::shared_ptr> istream, - const std::string & mimeType, - const std::string & contentEncoding) - { - std::string uri = "s3://" + config->bucketName + "/" + path; - Activity act( - *logger, lvlTalkative, actFileTransfer, fmt("uploading '%s'", uri), Logger::Fields{uri}, getCurActivity()); - istream->seekg(0, istream->end); - auto size = istream->tellg(); - istream->seekg(0, istream->beg); - - auto maxThreads = std::thread::hardware_concurrency(); - - static std::shared_ptr executor = - std::make_shared(maxThreads); - - std::call_once(transferManagerCreated, [&]() { - if (config->multipartUpload) { - TransferManagerConfiguration transferConfig(executor.get()); - - transferConfig.s3Client = s3Helper.client; - transferConfig.bufferSize = config->bufferSize; - - transferConfig.uploadProgressCallback = - [](const TransferManager * transferManager, - const std::shared_ptr & transferHandle) { - auto context = std::dynamic_pointer_cast(transferHandle->GetContext()); - size_t bytesDone = transferHandle->GetBytesTransferred(); - size_t bytesTotal = transferHandle->GetBytesTotalSize(); - try { - checkInterrupt(); - context->act.progress(bytesDone, bytesTotal); - } catch (...) { - context->notify(); - } - }; - transferConfig.transferStatusUpdatedCallback = - [](const TransferManager * transferManager, - const std::shared_ptr & transferHandle) { - auto context = std::dynamic_pointer_cast(transferHandle->GetContext()); - context->notify(); - }; - - transferManager = TransferManager::Create(transferConfig); - } - }); - - auto now1 = std::chrono::steady_clock::now(); - - auto & bucketName = config->bucketName; - - if (transferManager) { - - if (contentEncoding != "") - throw Error("setting a content encoding is not supported with S3 multi-part uploads"); - - auto context = std::make_shared(act); - std::shared_ptr transferHandle = transferManager->UploadFile( - istream, - bucketName, - path, - mimeType, - Aws::Map(), - context /*, contentEncoding */); - - TransferStatus status = transferHandle->GetStatus(); - while (status == TransferStatus::IN_PROGRESS || status == TransferStatus::NOT_STARTED) { - if (!isInterrupted()) { - context->wait(); - } else { - transferHandle->Cancel(); - transferHandle->WaitUntilFinished(); - } - status = transferHandle->GetStatus(); - } - act.progress(transferHandle->GetBytesTransferred(), transferHandle->GetBytesTotalSize()); - - if (status == TransferStatus::FAILED) - throw Error( - "AWS error: failed to upload 's3://%s/%s': %s", - bucketName, - path, - transferHandle->GetLastError().GetMessage()); - - if (status != TransferStatus::COMPLETED) - throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state", bucketName, path); - - } else { - act.progress(0, size); - - auto request = Aws::S3::Model::PutObjectRequest().WithBucket(bucketName).WithKey(path); - - size_t bytesSent = 0; - request.SetDataSentEventHandler([&](const Aws::Http::HttpRequest * req, long long l) { - bytesSent += l; - act.progress(bytesSent, size); - }); - - request.SetContinueRequestHandler([](const Aws::Http::HttpRequest *) { return !isInterrupted(); }); - - request.SetContentType(mimeType); - - if (contentEncoding != "") - request.SetContentEncoding(contentEncoding); - - request.SetBody(istream); - - auto result = checkAws(fmt("AWS error uploading '%s'", path), s3Helper.client->PutObject(request)); - - act.progress(size, size); - } - - auto now2 = std::chrono::steady_clock::now(); - - auto duration = std::chrono::duration_cast(now2 - now1).count(); - - printInfo("uploaded 's3://%s/%s' (%d bytes) in %d ms", bucketName, path, size, duration); - - stats.putTimeMs += duration; - stats.putBytes += std::max(size, (decltype(size)) 0); - stats.put++; - } - - void upsertFile( - const std::string & path, - std::shared_ptr> istream, - const std::string & mimeType) override - { - auto compress = [&](std::string compression) { - auto compressed = nix::compress(compression, StreamToSourceAdapter(istream).drain()); - return std::make_shared(std::move(compressed)); - }; - - if (config->narinfoCompression != "" && hasSuffix(path, ".narinfo")) - uploadFile(path, compress(config->narinfoCompression), mimeType, config->narinfoCompression); - else if (config->lsCompression != "" && hasSuffix(path, ".ls")) - uploadFile(path, compress(config->lsCompression), mimeType, config->lsCompression); - else if (config->logCompression != "" && hasPrefix(path, "log/")) - uploadFile(path, compress(config->logCompression), mimeType, config->logCompression); - else - uploadFile(path, istream, mimeType, ""); - } - - void getFile(const std::string & path, Sink & sink) override - { - stats.get++; - - // FIXME: stream output to sink. - auto res = s3Helper.getObject(config->bucketName, path); - - stats.getBytes += res.data ? res.data->size() : 0; - stats.getTimeMs += res.durationMs; - - if (res.data) { - printTalkative( - "downloaded 's3://%s/%s' (%d bytes) in %d ms", - config->bucketName, - path, - res.data->size(), - res.durationMs); - - sink(*res.data); - } else - throw NoSuchBinaryCacheFile( - "file '%s' does not exist in binary cache '%s'", path, config->getHumanReadableURI()); - } - - StorePathSet queryAllValidPaths() override - { - StorePathSet paths; - std::string marker; - - auto & bucketName = config->bucketName; - - do { - debug("listing bucket 's3://%s' from key '%s'...", bucketName, marker); - - auto res = checkAws( - fmt("AWS error listing bucket '%s'", bucketName), - s3Helper.client->ListObjects( - Aws::S3::Model::ListObjectsRequest().WithBucket(bucketName).WithDelimiter("/").WithMarker(marker))); - - auto & contents = res.GetContents(); - - debug("got %d keys, next marker '%s'", contents.size(), res.GetNextMarker()); - - for (const auto & object : contents) { - auto & key = object.GetKey(); - if (key.size() != 40 || !hasSuffix(key, ".narinfo")) - continue; - paths.insert(parseStorePath(storeDir + "/" + key.substr(0, key.size() - 8) + "-" + MissingName)); - } - - marker = res.GetNextMarker(); - } while (!marker.empty()); - - return paths; - } - - /** - * For now, we conservatively say we don't know. - * - * \todo try to expose our S3 authentication status. - */ - std::optional isTrustedClient() override - { - return std::nullopt; - } -}; - -ref S3BinaryCacheStoreImpl::Config::openStore() const -{ - auto store = - make_ref(ref{// FIXME we shouldn't actually need a mutable config - std::const_pointer_cast(shared_from_this())}); - store->init(); - return store; -} - -static RegisterStoreImplementation regS3BinaryCacheStore; - -} // namespace nix - -#elif NIX_WITH_CURL_S3 +#if NIX_WITH_CURL_S3 # include diff --git a/src/libstore/s3-url.cc b/src/libstore/s3-url.cc index baefe5cba..478308270 100644 --- a/src/libstore/s3-url.cc +++ b/src/libstore/s3-url.cc @@ -1,6 +1,6 @@ #include "nix/store/s3-url.hh" -#if NIX_WITH_S3_SUPPORT || NIX_WITH_CURL_S3 +#if NIX_WITH_CURL_S3 # include "nix/util/error.hh" # include "nix/util/split.hh" diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index ea6a7e914..0112d2e2f 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -199,8 +199,6 @@ in user-sandboxing = runNixOSTest ./user-sandboxing; - s3-binary-cache-store = runNixOSTest ./s3-binary-cache-store.nix; - curl-s3-binary-cache-store = runNixOSTest ./curl-s3-binary-cache-store.nix; fsync = runNixOSTest ./fsync.nix; diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix deleted file mode 100644 index a22e4c2c2..000000000 --- a/tests/nixos/s3-binary-cache-store.nix +++ /dev/null @@ -1,98 +0,0 @@ -{ - lib, - config, - nixpkgs, - ... -}: - -let - pkgs = config.nodes.client.nixpkgs.pkgs; - - pkgA = pkgs.cowsay; - - accessKey = "BKIKJAA5BMMU2RHO6IBB"; - secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; - env = "AWS_ACCESS_KEY_ID=${accessKey} AWS_SECRET_ACCESS_KEY=${secretKey}"; - - storeUrl = "s3://my-cache?endpoint=http://server:9000®ion=eu-west-1"; - objectThatDoesNotExist = "s3://my-cache/foo-that-does-not-exist?endpoint=http://server:9000®ion=eu-west-1"; - -in -{ - name = "s3-binary-cache-store"; - - nodes = { - server = - { - config, - lib, - pkgs, - ... - }: - { - virtualisation.writableStore = true; - virtualisation.additionalPaths = [ pkgA ]; - environment.systemPackages = [ pkgs.minio-client ]; - nix.extraOptions = '' - experimental-features = nix-command - substituters = - ''; - services.minio = { - enable = true; - region = "eu-west-1"; - rootCredentialsFile = pkgs.writeText "minio-credentials-full" '' - MINIO_ROOT_USER=${accessKey} - MINIO_ROOT_PASSWORD=${secretKey} - ''; - }; - networking.firewall.allowedTCPPorts = [ 9000 ]; - }; - - client = - { config, pkgs, ... }: - { - virtualisation.writableStore = true; - nix.extraOptions = '' - experimental-features = nix-command - substituters = - ''; - }; - }; - - testScript = - { nodes }: - '' - # fmt: off - start_all() - - # Create a binary cache. - server.wait_for_unit("minio") - server.wait_for_unit("network-addresses-eth1.service") - server.wait_for_open_port(9000) - - server.succeed("mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4") - server.succeed("mc mb minio/my-cache") - - server.succeed("${env} nix copy --to '${storeUrl}' ${pkgA}") - - client.wait_for_unit("network-addresses-eth1.service") - - # Test fetchurl on s3:// URLs while we're at it. - client.succeed("${env} nix eval --impure --expr 'builtins.fetchurl { name = \"foo\"; url = \"s3://my-cache/nix-cache-info?endpoint=http://server:9000®ion=eu-west-1\"; }'") - - # Test that the format string in the error message is properly setup and won't display `%s` instead of the failed URI - msg = client.fail("${env} nix eval --impure --expr 'builtins.fetchurl { name = \"foo\"; url = \"${objectThatDoesNotExist}\"; }' 2>&1") - if "S3 object '${objectThatDoesNotExist}' does not exist" not in msg: - print(msg) # So that you can see the message that was improperly formatted - raise Exception("Error message formatting didn't work") - - # Copy a package from the binary cache. - client.fail("nix path-info ${pkgA}") - - client.succeed("${env} nix store info --store '${storeUrl}' >&2") - - client.succeed("${env} nix copy --no-check-sigs --from '${storeUrl}' ${pkgA}") - - client.succeed("nix path-info ${pkgA}") - ''; -} From 1f710300c9de0a28b2c09cc95d201fa2ca7e9571 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Tue, 14 Oct 2025 23:58:48 +0000 Subject: [PATCH 208/373] refactor(libstore): withCurlS3 -> withAWS Now that the legacy S3 implementation is gone, we can go back to calling things `NIX_WITH_S3_SUPPORT`. --- .github/workflows/ci.yml | 14 +++++++------- ci/gha/tests/default.nix | 8 +++----- ci/gha/tests/wrapper.nix | 4 ++-- src/libstore-tests/s3-binary-cache-store.cc | 2 +- src/libstore-tests/s3-url.cc | 2 +- src/libstore/aws-creds.cc | 2 +- src/libstore/builtins/fetchurl.cc | 2 +- src/libstore/filetransfer.cc | 10 +++++----- src/libstore/include/nix/store/aws-creds.hh | 2 +- src/libstore/include/nix/store/builtins.hh | 4 ++-- src/libstore/include/nix/store/filetransfer.hh | 6 +++--- .../include/nix/store/s3-binary-cache-store.hh | 2 +- src/libstore/include/nix/store/s3-url.hh | 2 +- src/libstore/meson.build | 2 +- src/libstore/package.nix | 6 +++--- src/libstore/s3-binary-cache-store.cc | 2 +- src/libstore/s3-url.cc | 2 +- src/libstore/unix/build/derivation-builder.cc | 12 ++++++------ .../unix/build/linux-derivation-builder.cc | 2 +- 19 files changed, 42 insertions(+), 44 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 00a7ef7a1..1259bbbb4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -67,7 +67,7 @@ jobs: instrumented: false primary: true stdenv: stdenv - withCurlS3: true + withAWS: true # TODO: remove once curl-based-s3 fully lands - scenario: on ubuntu (no s3) runs-on: ubuntu-24.04 @@ -75,21 +75,21 @@ jobs: instrumented: false primary: false stdenv: stdenv - withCurlS3: false + withAWS: false - scenario: on macos runs-on: macos-14 os: darwin instrumented: false primary: true stdenv: stdenv - withCurlS3: true + withAWS: true - scenario: on ubuntu (with sanitizers / coverage) runs-on: ubuntu-24.04 os: linux instrumented: true primary: false stdenv: clangStdenv - withCurlS3: true + withAWS: true name: tests ${{ matrix.scenario }} runs-on: ${{ matrix.runs-on }} timeout-minutes: 60 @@ -113,13 +113,13 @@ jobs: nix build --file ci/gha/tests/wrapper.nix componentTests -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ --argstr stdenv "${{ matrix.stdenv }}" \ - ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} + ${{ format('--arg withAWS {0}', matrix.withAWS) }} - name: Run VM tests run: | nix build --file ci/gha/tests/wrapper.nix vmTests -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ --argstr stdenv "${{ matrix.stdenv }}" \ - ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} + ${{ format('--arg withAWS {0}', matrix.withAWS) }} if: ${{ matrix.os == 'linux' }} - name: Run flake checks and prepare the installer tarball run: | @@ -131,7 +131,7 @@ jobs: nix build --file ci/gha/tests/wrapper.nix codeCoverage.coverageReports -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ --argstr stdenv "${{ matrix.stdenv }}" \ - ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} \ + ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ --out-link coverage-reports cat coverage-reports/index.txt >> $GITHUB_STEP_SUMMARY if: ${{ matrix.instrumented }} diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index be634e833..28e305a95 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -12,7 +12,7 @@ componentTestsPrefix ? "", withSanitizers ? false, withCoverage ? false, - withCurlS3 ? null, + withAWS ? null, ... }: @@ -58,9 +58,7 @@ rec { nix-expr = prev.nix-expr.override { enableGC = !withSanitizers; }; # Override AWS configuration if specified - nix-store = prev.nix-store.override ( - lib.optionalAttrs (withCurlS3 != null) { inherit withCurlS3; } - ); + nix-store = prev.nix-store.override (lib.optionalAttrs (withAWS != null) { inherit withAWS; }); mesonComponentOverrides = lib.composeManyExtensions componentOverrides; # Unclear how to make Perl bindings work with a dynamically linked ASAN. @@ -229,7 +227,7 @@ rec { vmTests = { } - // lib.optionalAttrs (withCurlS3 == true) { + // lib.optionalAttrs (withAWS == true) { # S3 binary cache store test using curl implementation inherit (nixosTests) curl-s3-binary-cache-store; } diff --git a/ci/gha/tests/wrapper.nix b/ci/gha/tests/wrapper.nix index 4b1656500..72b1ba7a3 100644 --- a/ci/gha/tests/wrapper.nix +++ b/ci/gha/tests/wrapper.nix @@ -5,7 +5,7 @@ stdenv ? "stdenv", componentTestsPrefix ? "", withInstrumentation ? false, - withCurlS3 ? null, + withAWS ? null, }@args: import ./. ( args @@ -13,6 +13,6 @@ import ./. ( getStdenv = p: p.${stdenv}; withSanitizers = withInstrumentation; withCoverage = withInstrumentation; - inherit withCurlS3; + inherit withAWS; } ) diff --git a/src/libstore-tests/s3-binary-cache-store.cc b/src/libstore-tests/s3-binary-cache-store.cc index 359c70148..670000520 100644 --- a/src/libstore-tests/s3-binary-cache-store.cc +++ b/src/libstore-tests/s3-binary-cache-store.cc @@ -1,6 +1,6 @@ #include "nix/store/s3-binary-cache-store.hh" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include "nix/store/http-binary-cache-store.hh" # include "nix/store/filetransfer.hh" diff --git a/src/libstore-tests/s3-url.cc b/src/libstore-tests/s3-url.cc index 5f3f9702b..56ec4e40e 100644 --- a/src/libstore-tests/s3-url.cc +++ b/src/libstore-tests/s3-url.cc @@ -1,7 +1,7 @@ #include "nix/store/s3-url.hh" #include "nix/util/tests/gmock-matchers.hh" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include # include diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index 05c11d24a..b0e1b7ed1 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -1,6 +1,6 @@ #include "nix/store/aws-creds.hh" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include # include "nix/store/s3-url.hh" diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index 2488e18af..d55915183 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -41,7 +41,7 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx) FileTransferRequest request(VerbatimURL{url}); request.decompress = false; -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT // Use pre-resolved credentials if available if (ctx.awsCredentials && request.uri.scheme() == "s3") { debug("[pid=%d] Using pre-resolved AWS credentials from parent process", getpid()); diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 2e12470a4..4f0f89b64 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -9,7 +9,7 @@ #include "store-config-private.hh" #include -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include "nix/store/aws-creds.hh" # include "nix/store/s3-url.hh" #endif @@ -435,7 +435,7 @@ struct curlFileTransfer : public FileTransfer } } -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT // Set up AWS SigV4 signing if this is an S3 request // Note: AWS SigV4 support guaranteed available (curl >= 7.75.0 checked at build time) // The username/password (access key ID and secret key) are set via the general @@ -820,7 +820,7 @@ struct curlFileTransfer : public FileTransfer void enqueueItem(std::shared_ptr item) { if (item->request.data && item->request.uri.scheme() != "http" && item->request.uri.scheme() != "https" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT && item->request.uri.scheme() != "s3" #endif ) @@ -841,7 +841,7 @@ struct curlFileTransfer : public FileTransfer { /* Ugly hack to support s3:// URIs. */ if (request.uri.scheme() == "s3") { -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT // New curl-based S3 implementation auto modifiedRequest = request; modifiedRequest.setupForS3(); @@ -876,7 +876,7 @@ ref makeFileTransfer() return makeCurlFileTransfer(); } -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT void FileTransferRequest::setupForS3() { auto parsedS3 = ParsedS3URL::parse(uri.parsed()); diff --git a/src/libstore/include/nix/store/aws-creds.hh b/src/libstore/include/nix/store/aws-creds.hh index 4930dc9d8..dcafa9c75 100644 --- a/src/libstore/include/nix/store/aws-creds.hh +++ b/src/libstore/include/nix/store/aws-creds.hh @@ -2,7 +2,7 @@ ///@file #include "nix/store/config.hh" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include "nix/store/s3-url.hh" # include "nix/util/error.hh" diff --git a/src/libstore/include/nix/store/builtins.hh b/src/libstore/include/nix/store/builtins.hh index 5c15b2e9b..6b9431331 100644 --- a/src/libstore/include/nix/store/builtins.hh +++ b/src/libstore/include/nix/store/builtins.hh @@ -4,7 +4,7 @@ #include "nix/store/derivations.hh" #include "nix/store/config.hh" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include "nix/store/aws-creds.hh" #endif @@ -18,7 +18,7 @@ struct BuiltinBuilderContext std::string caFileData; Path tmpDirInSandbox; -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT /** * Pre-resolved AWS credentials for S3 URLs in builtin:fetchurl. * When present, these should be used instead of creating new credential providers. diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 78ce439ae..6ca6ffa16 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -12,7 +12,7 @@ #include "nix/util/url.hh" #include "nix/store/config.hh" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include "nix/store/aws-creds.hh" #endif @@ -113,7 +113,7 @@ struct FileTransferRequest * When provided, these credentials will be used with curl's CURLOPT_USERNAME/PASSWORD option. */ std::optional usernameAuth; -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT /** * Pre-resolved AWS session token for S3 requests. * When provided along with usernameAuth, this will be used instead of fetching fresh credentials. @@ -132,7 +132,7 @@ struct FileTransferRequest return data ? "upload" : "download"; } -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT private: friend struct curlFileTransfer; void setupForS3(); diff --git a/src/libstore/include/nix/store/s3-binary-cache-store.hh b/src/libstore/include/nix/store/s3-binary-cache-store.hh index 61ff8cb6c..c071d0887 100644 --- a/src/libstore/include/nix/store/s3-binary-cache-store.hh +++ b/src/libstore/include/nix/store/s3-binary-cache-store.hh @@ -3,7 +3,7 @@ #include "nix/store/config.hh" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include "nix/store/http-binary-cache-store.hh" diff --git a/src/libstore/include/nix/store/s3-url.hh b/src/libstore/include/nix/store/s3-url.hh index 49dadfbe8..4f0a7b0c2 100644 --- a/src/libstore/include/nix/store/s3-url.hh +++ b/src/libstore/include/nix/store/s3-url.hh @@ -2,7 +2,7 @@ ///@file #include "nix/store/config.hh" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include "nix/util/url.hh" # include "nix/util/util.hh" diff --git a/src/libstore/meson.build b/src/libstore/meson.build index d691c10bf..c7d3f1600 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -160,7 +160,7 @@ if curl_s3_store_opt.enabled() deps_other += aws_crt_cpp endif -configdata_pub.set('NIX_WITH_CURL_S3', curl_s3_store_opt.enabled().to_int()) +configdata_pub.set('NIX_WITH_S3_SUPPORT', curl_s3_store_opt.enabled().to_int()) subdir('nix-meson-build-support/generate-header') diff --git a/src/libstore/package.nix b/src/libstore/package.nix index 846d0f15f..897662e11 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -23,7 +23,7 @@ embeddedSandboxShell ? stdenv.hostPlatform.isStatic, - withCurlS3 ? + withAWS ? # Default is this way because there have been issues building this dependency stdenv.hostPlatform == stdenv.buildPlatform && (stdenv.isLinux || stdenv.isDarwin), }: @@ -65,7 +65,7 @@ mkMesonLibrary (finalAttrs: { sqlite ] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp - ++ lib.optional withCurlS3 aws-crt-cpp; + ++ lib.optional withAWS aws-crt-cpp; propagatedBuildInputs = [ nix-util @@ -75,7 +75,7 @@ mkMesonLibrary (finalAttrs: { mesonFlags = [ (lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux) (lib.mesonBool "embedded-sandbox-shell" embeddedSandboxShell) - (lib.mesonEnable "curl-s3-store" withCurlS3) + (lib.mesonEnable "curl-s3-store" withAWS) ] ++ lib.optionals stdenv.hostPlatform.isLinux [ (lib.mesonOption "sandbox-shell" "${busybox-sandbox-shell}/bin/busybox") diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 16228b9f1..fb62b9548 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -1,6 +1,6 @@ #include "nix/store/s3-binary-cache-store.hh" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include diff --git a/src/libstore/s3-url.cc b/src/libstore/s3-url.cc index 478308270..947de60b0 100644 --- a/src/libstore/s3-url.cc +++ b/src/libstore/s3-url.cc @@ -1,6 +1,6 @@ #include "nix/store/s3-url.hh" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include "nix/util/error.hh" # include "nix/util/split.hh" diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index f7bab7057..d0c8cce06 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -46,7 +46,7 @@ #include "store-config-private.hh" #include "build/derivation-check.hh" -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT # include "nix/store/aws-creds.hh" # include "nix/store/s3-url.hh" # include "nix/util/url.hh" @@ -296,7 +296,7 @@ protected: */ virtual void startChild(); -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT /** * Pre-resolve AWS credentials for S3 URLs in builtin:fetchurl. * This should be called before forking to ensure credentials are available in child. @@ -359,7 +359,7 @@ protected: */ struct RunChildArgs { -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT std::optional awsCredentials; #endif }; @@ -945,7 +945,7 @@ void DerivationBuilderImpl::openSlave() throw SysError("cannot pipe standard error into log file"); } -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT std::optional DerivationBuilderImpl::preResolveAwsCredentials() { if (drv.isBuiltin() && drv.builder == "builtin:fetchurl") { @@ -974,7 +974,7 @@ std::optional DerivationBuilderImpl::preResolveAwsCredentials() void DerivationBuilderImpl::startChild() { RunChildArgs args{ -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT .awsCredentials = preResolveAwsCredentials(), #endif }; @@ -1255,7 +1255,7 @@ void DerivationBuilderImpl::runChild(RunChildArgs args) BuiltinBuilderContext ctx{ .drv = drv, .tmpDirInSandbox = tmpDirInSandbox(), -#if NIX_WITH_CURL_S3 +#if NIX_WITH_S3_SUPPORT .awsCredentials = args.awsCredentials, #endif }; diff --git a/src/libstore/unix/build/linux-derivation-builder.cc b/src/libstore/unix/build/linux-derivation-builder.cc index be064566f..07e421bef 100644 --- a/src/libstore/unix/build/linux-derivation-builder.cc +++ b/src/libstore/unix/build/linux-derivation-builder.cc @@ -277,7 +277,7 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu void startChild() override { RunChildArgs args{ -# if NIX_WITH_CURL_S3 +# if NIX_WITH_S3_SUPPORT .awsCredentials = preResolveAwsCredentials(), # endif }; From bb1f22a8dfb044f8a8b9f931064bcc3721b7ce72 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 15 Oct 2025 18:07:42 +0000 Subject: [PATCH 209/373] refactor(libstore): minimize NIX_WITH_S3_SUPPORT scope to auth only Move S3 URL parsing, store configuration, and public bucket support outside of NIX_WITH_S3_SUPPORT guards. Only AWS credential resolution remains gated, allowing builds with withAWS = false to: - Parse s3:// URLs - Register S3 store types - Access public S3 buckets (via HTTPS conversion) - Use S3-compatible services without authentication The setupForS3() function now always performs URL conversion, with authentication code conditionally compiled based on NIX_WITH_S3_SUPPORT. The aws-creds.cc file (only code using AWS CRT SDK) is now conditionally compiled by meson. --- src/libstore-tests/s3-binary-cache-store.cc | 13 +++------ src/libstore-tests/s3-url.cc | 8 ++---- src/libstore/filetransfer.cc | 27 +++++++++---------- .../include/nix/store/filetransfer.hh | 3 ++- .../nix/store/s3-binary-cache-store.hh | 7 +---- src/libstore/include/nix/store/s3-url.hh | 17 +++++------- src/libstore/meson.build | 6 ++++- src/libstore/s3-binary-cache-store.cc | 11 +++----- src/libstore/s3-url.cc | 15 ++++------- 9 files changed, 40 insertions(+), 67 deletions(-) diff --git a/src/libstore-tests/s3-binary-cache-store.cc b/src/libstore-tests/s3-binary-cache-store.cc index 670000520..f01759771 100644 --- a/src/libstore-tests/s3-binary-cache-store.cc +++ b/src/libstore-tests/s3-binary-cache-store.cc @@ -1,12 +1,9 @@ #include "nix/store/s3-binary-cache-store.hh" +#include "nix/store/http-binary-cache-store.hh" +#include "nix/store/filetransfer.hh" +#include "nix/store/s3-url.hh" -#if NIX_WITH_S3_SUPPORT - -# include "nix/store/http-binary-cache-store.hh" -# include "nix/store/filetransfer.hh" -# include "nix/store/s3-url.hh" - -# include +#include namespace nix { @@ -126,5 +123,3 @@ TEST(S3BinaryCacheStore, parameterFiltering) } } // namespace nix - -#endif diff --git a/src/libstore-tests/s3-url.cc b/src/libstore-tests/s3-url.cc index 56ec4e40e..2c384c255 100644 --- a/src/libstore-tests/s3-url.cc +++ b/src/libstore-tests/s3-url.cc @@ -1,10 +1,8 @@ #include "nix/store/s3-url.hh" #include "nix/util/tests/gmock-matchers.hh" -#if NIX_WITH_S3_SUPPORT - -# include -# include +#include +#include namespace nix { @@ -228,5 +226,3 @@ INSTANTIATE_TEST_SUITE_P( [](const ::testing::TestParamInfo & info) { return info.param.description; }); } // namespace nix - -#endif diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 4f0f89b64..65fcbea5f 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -8,10 +8,10 @@ #include "nix/util/signals.hh" #include "store-config-private.hh" +#include "nix/store/s3-url.hh" #include #if NIX_WITH_S3_SUPPORT # include "nix/store/aws-creds.hh" -# include "nix/store/s3-url.hh" #endif #ifdef __linux__ @@ -820,10 +820,7 @@ struct curlFileTransfer : public FileTransfer void enqueueItem(std::shared_ptr item) { if (item->request.data && item->request.uri.scheme() != "http" && item->request.uri.scheme() != "https" -#if NIX_WITH_S3_SUPPORT - && item->request.uri.scheme() != "s3" -#endif - ) + && item->request.uri.scheme() != "s3") throw nix::Error("uploading to '%s' is not supported", item->request.uri.to_string()); { @@ -839,16 +836,11 @@ struct curlFileTransfer : public FileTransfer void enqueueFileTransfer(const FileTransferRequest & request, Callback callback) override { - /* Ugly hack to support s3:// URIs. */ + /* Handle s3:// URIs by converting to HTTPS and optionally adding auth */ if (request.uri.scheme() == "s3") { -#if NIX_WITH_S3_SUPPORT - // New curl-based S3 implementation auto modifiedRequest = request; modifiedRequest.setupForS3(); enqueueItem(std::make_shared(*this, std::move(modifiedRequest), std::move(callback))); -#else - throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri.to_string()); -#endif return; } @@ -876,14 +868,16 @@ ref makeFileTransfer() return makeCurlFileTransfer(); } -#if NIX_WITH_S3_SUPPORT void FileTransferRequest::setupForS3() { auto parsedS3 = ParsedS3URL::parse(uri.parsed()); - // Update the request URI to use HTTPS + // Update the request URI to use HTTPS (works without AWS SDK) uri = parsedS3.toHttpsUrl(); - // This gets used later in a curl setopt + +#if NIX_WITH_S3_SUPPORT + // Auth-specific code only compiled when AWS support is available awsSigV4Provider = "aws:amz:" + parsedS3.region.value_or("us-east-1") + ":s3"; + // check if the request already has pre-resolved credentials std::optional sessionToken; if (usernameAuth) { @@ -908,8 +902,11 @@ void FileTransferRequest::setupForS3() } if (sessionToken) headers.emplace_back("x-amz-security-token", *sessionToken); -} +#else + // When built without AWS support, just try as public bucket + debug("S3 request without authentication (built without AWS support)"); #endif +} std::future FileTransfer::enqueueFileTransfer(const FileTransferRequest & request) { diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 6ca6ffa16..8b2c7eb31 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -15,6 +15,7 @@ #if NIX_WITH_S3_SUPPORT # include "nix/store/aws-creds.hh" #endif +#include "nix/store/s3-url.hh" namespace nix { @@ -132,10 +133,10 @@ struct FileTransferRequest return data ? "upload" : "download"; } -#if NIX_WITH_S3_SUPPORT private: friend struct curlFileTransfer; void setupForS3(); +#if NIX_WITH_S3_SUPPORT std::optional awsSigV4Provider; #endif }; diff --git a/src/libstore/include/nix/store/s3-binary-cache-store.hh b/src/libstore/include/nix/store/s3-binary-cache-store.hh index c071d0887..c8cb967c1 100644 --- a/src/libstore/include/nix/store/s3-binary-cache-store.hh +++ b/src/libstore/include/nix/store/s3-binary-cache-store.hh @@ -2,10 +2,7 @@ ///@file #include "nix/store/config.hh" - -#if NIX_WITH_S3_SUPPORT - -# include "nix/store/http-binary-cache-store.hh" +#include "nix/store/http-binary-cache-store.hh" namespace nix { @@ -77,5 +74,3 @@ public: }; } // namespace nix - -#endif diff --git a/src/libstore/include/nix/store/s3-url.hh b/src/libstore/include/nix/store/s3-url.hh index 4f0a7b0c2..4ee0c87f9 100644 --- a/src/libstore/include/nix/store/s3-url.hh +++ b/src/libstore/include/nix/store/s3-url.hh @@ -1,16 +1,13 @@ #pragma once ///@file #include "nix/store/config.hh" +#include "nix/util/url.hh" +#include "nix/util/util.hh" -#if NIX_WITH_S3_SUPPORT - -# include "nix/util/url.hh" -# include "nix/util/util.hh" - -# include -# include -# include -# include +#include +#include +#include +#include namespace nix { @@ -56,5 +53,3 @@ struct ParsedS3URL }; } // namespace nix - -#endif diff --git a/src/libstore/meson.build b/src/libstore/meson.build index c7d3f1600..af01c8652 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -265,7 +265,6 @@ config_priv_h = configure_file( subdir('nix-meson-build-support/common') sources = files( - 'aws-creds.cc', 'binary-cache-store.cc', 'build-result.cc', 'build/derivation-builder.cc', @@ -344,6 +343,11 @@ sources = files( 'worker-protocol.cc', ) +# AWS credentials code requires AWS CRT, so only compile when enabled +if curl_s3_store_opt.enabled() + sources += files('aws-creds.cc') +endif + subdir('include/nix/store') if host_machine.system() == 'linux' diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index fb62b9548..a84ea5fcb 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -1,12 +1,9 @@ #include "nix/store/s3-binary-cache-store.hh" -#if NIX_WITH_S3_SUPPORT +#include -# include - -# include "nix/store/s3-binary-cache-store.hh" -# include "nix/store/http-binary-cache-store.hh" -# include "nix/store/store-registration.hh" +#include "nix/store/http-binary-cache-store.hh" +#include "nix/store/store-registration.hh" namespace nix { @@ -45,5 +42,3 @@ std::string S3BinaryCacheStoreConfig::doc() static RegisterStoreImplementation registerS3BinaryCacheStore; } // namespace nix - -#endif diff --git a/src/libstore/s3-url.cc b/src/libstore/s3-url.cc index 947de60b0..e8fbba8f7 100644 --- a/src/libstore/s3-url.cc +++ b/src/libstore/s3-url.cc @@ -1,13 +1,10 @@ #include "nix/store/s3-url.hh" +#include "nix/util/error.hh" +#include "nix/util/split.hh" +#include "nix/util/strings-inline.hh" -#if NIX_WITH_S3_SUPPORT - -# include "nix/util/error.hh" -# include "nix/util/split.hh" -# include "nix/util/strings-inline.hh" - -# include -# include +#include +#include using namespace std::string_view_literals; @@ -117,5 +114,3 @@ ParsedURL ParsedS3URL::toHttpsUrl() const } } // namespace nix - -#endif From 3224636ab005c03b71b2eb4ac956ddb02355df67 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 15 Oct 2025 18:14:21 +0000 Subject: [PATCH 210/373] refactor(libstore): rename NIX_WITH_S3_SUPPORT to NIX_WITH_AWS_AUTH The macro now accurately reflects its purpose: gating only AWS authentication code, not all S3 functionality. S3 URL parsing, store configuration, and public bucket access work regardless of this flag. This rename clarifies that: - S3 support is always available (URL parsing, store registration) - Only AWS credential resolution requires the flag - The flag controls AWS CRT SDK dependency, not S3 protocol support --- src/libstore/aws-creds.cc | 2 +- src/libstore/builtins/fetchurl.cc | 2 +- src/libstore/filetransfer.cc | 6 +++--- src/libstore/include/nix/store/aws-creds.hh | 2 +- src/libstore/include/nix/store/builtins.hh | 4 ++-- src/libstore/include/nix/store/filetransfer.hh | 6 +++--- src/libstore/meson.build | 2 +- src/libstore/unix/build/derivation-builder.cc | 12 ++++++------ src/libstore/unix/build/linux-derivation-builder.cc | 2 +- 9 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index b0e1b7ed1..93fc3da33 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -1,6 +1,6 @@ #include "nix/store/aws-creds.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH # include # include "nix/store/s3-url.hh" diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index d55915183..126fb922e 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -41,7 +41,7 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx) FileTransferRequest request(VerbatimURL{url}); request.decompress = false; -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH // Use pre-resolved credentials if available if (ctx.awsCredentials && request.uri.scheme() == "s3") { debug("[pid=%d] Using pre-resolved AWS credentials from parent process", getpid()); diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 65fcbea5f..981d49d77 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -10,7 +10,7 @@ #include "store-config-private.hh" #include "nix/store/s3-url.hh" #include -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH # include "nix/store/aws-creds.hh" #endif @@ -435,7 +435,7 @@ struct curlFileTransfer : public FileTransfer } } -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH // Set up AWS SigV4 signing if this is an S3 request // Note: AWS SigV4 support guaranteed available (curl >= 7.75.0 checked at build time) // The username/password (access key ID and secret key) are set via the general @@ -874,7 +874,7 @@ void FileTransferRequest::setupForS3() // Update the request URI to use HTTPS (works without AWS SDK) uri = parsedS3.toHttpsUrl(); -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH // Auth-specific code only compiled when AWS support is available awsSigV4Provider = "aws:amz:" + parsedS3.region.value_or("us-east-1") + ":s3"; diff --git a/src/libstore/include/nix/store/aws-creds.hh b/src/libstore/include/nix/store/aws-creds.hh index dcafa9c75..6e653936c 100644 --- a/src/libstore/include/nix/store/aws-creds.hh +++ b/src/libstore/include/nix/store/aws-creds.hh @@ -2,7 +2,7 @@ ///@file #include "nix/store/config.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH # include "nix/store/s3-url.hh" # include "nix/util/error.hh" diff --git a/src/libstore/include/nix/store/builtins.hh b/src/libstore/include/nix/store/builtins.hh index 6b9431331..6925e61c1 100644 --- a/src/libstore/include/nix/store/builtins.hh +++ b/src/libstore/include/nix/store/builtins.hh @@ -4,7 +4,7 @@ #include "nix/store/derivations.hh" #include "nix/store/config.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH # include "nix/store/aws-creds.hh" #endif @@ -18,7 +18,7 @@ struct BuiltinBuilderContext std::string caFileData; Path tmpDirInSandbox; -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH /** * Pre-resolved AWS credentials for S3 URLs in builtin:fetchurl. * When present, these should be used instead of creating new credential providers. diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 8b2c7eb31..34ec316ef 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -12,7 +12,7 @@ #include "nix/util/url.hh" #include "nix/store/config.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH # include "nix/store/aws-creds.hh" #endif #include "nix/store/s3-url.hh" @@ -114,7 +114,7 @@ struct FileTransferRequest * When provided, these credentials will be used with curl's CURLOPT_USERNAME/PASSWORD option. */ std::optional usernameAuth; -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH /** * Pre-resolved AWS session token for S3 requests. * When provided along with usernameAuth, this will be used instead of fetching fresh credentials. @@ -136,7 +136,7 @@ struct FileTransferRequest private: friend struct curlFileTransfer; void setupForS3(); -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH std::optional awsSigV4Provider; #endif }; diff --git a/src/libstore/meson.build b/src/libstore/meson.build index af01c8652..78a3dd9b3 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -160,7 +160,7 @@ if curl_s3_store_opt.enabled() deps_other += aws_crt_cpp endif -configdata_pub.set('NIX_WITH_S3_SUPPORT', curl_s3_store_opt.enabled().to_int()) +configdata_pub.set('NIX_WITH_AWS_AUTH', curl_s3_store_opt.enabled().to_int()) subdir('nix-meson-build-support/generate-header') diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index d0c8cce06..831f1fa9e 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -46,7 +46,7 @@ #include "store-config-private.hh" #include "build/derivation-check.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH # include "nix/store/aws-creds.hh" # include "nix/store/s3-url.hh" # include "nix/util/url.hh" @@ -296,7 +296,7 @@ protected: */ virtual void startChild(); -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH /** * Pre-resolve AWS credentials for S3 URLs in builtin:fetchurl. * This should be called before forking to ensure credentials are available in child. @@ -359,7 +359,7 @@ protected: */ struct RunChildArgs { -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH std::optional awsCredentials; #endif }; @@ -945,7 +945,7 @@ void DerivationBuilderImpl::openSlave() throw SysError("cannot pipe standard error into log file"); } -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH std::optional DerivationBuilderImpl::preResolveAwsCredentials() { if (drv.isBuiltin() && drv.builder == "builtin:fetchurl") { @@ -974,7 +974,7 @@ std::optional DerivationBuilderImpl::preResolveAwsCredentials() void DerivationBuilderImpl::startChild() { RunChildArgs args{ -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH .awsCredentials = preResolveAwsCredentials(), #endif }; @@ -1255,7 +1255,7 @@ void DerivationBuilderImpl::runChild(RunChildArgs args) BuiltinBuilderContext ctx{ .drv = drv, .tmpDirInSandbox = tmpDirInSandbox(), -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_AWS_AUTH .awsCredentials = args.awsCredentials, #endif }; diff --git a/src/libstore/unix/build/linux-derivation-builder.cc b/src/libstore/unix/build/linux-derivation-builder.cc index 07e421bef..e96f83700 100644 --- a/src/libstore/unix/build/linux-derivation-builder.cc +++ b/src/libstore/unix/build/linux-derivation-builder.cc @@ -277,7 +277,7 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu void startChild() override { RunChildArgs args{ -# if NIX_WITH_S3_SUPPORT +# if NIX_WITH_AWS_AUTH .awsCredentials = preResolveAwsCredentials(), # endif }; From 5d1178b817a5c14cca96a57a11a4161ba70d83aa Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 15 Oct 2025 21:54:09 +0300 Subject: [PATCH 211/373] libfetchers/git-utils: Be more correct about validating refnames Turns out there's a much better API for this that doesn't have the footguns of the previous method. isLegalRefName is somewhat of a misnomer, since it's mainly used to validate user inputs that can be either references, branch names, psedorefs or tags. --- src/libfetchers-tests/git-utils.cc | 6 ++ src/libfetchers/git-utils.cc | 58 +++++-------------- .../include/nix/fetchers/git-utils.hh | 7 ++- tests/functional/fetchGitRefs.sh | 3 + 4 files changed, 29 insertions(+), 45 deletions(-) diff --git a/src/libfetchers-tests/git-utils.cc b/src/libfetchers-tests/git-utils.cc index f9fae23da..774934d26 100644 --- a/src/libfetchers-tests/git-utils.cc +++ b/src/libfetchers-tests/git-utils.cc @@ -175,6 +175,12 @@ TEST_F(GitUtilsTest, peel_reference) TEST(GitUtils, isLegalRefName) { + ASSERT_TRUE(isLegalRefName("A/b")); + ASSERT_TRUE(isLegalRefName("AaA/b")); + ASSERT_TRUE(isLegalRefName("FOO/BAR/BAZ")); + ASSERT_TRUE(isLegalRefName("HEAD")); + ASSERT_TRUE(isLegalRefName("refs/tags/1.2.3")); + ASSERT_TRUE(isLegalRefName("refs/heads/master")); ASSERT_TRUE(isLegalRefName("foox")); ASSERT_TRUE(isLegalRefName("1337")); ASSERT_TRUE(isLegalRefName("foo.baz")); diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index a3652e522..215418522 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include #include #include @@ -1323,63 +1325,33 @@ GitRepo::WorkdirInfo GitRepo::getCachedWorkdirInfo(const std::filesystem::path & return workdirInfo; } -/** - * Checks that the git reference is valid and normalizes slash '/' sequences. - * - * Accepts shorthand references (one-level refnames are allowed). - */ -bool isValidRefNameAllowNormalizations(const std::string & refName) -{ - /* Unfortunately libgit2 doesn't expose the limit in headers, but its internal - limit is also 1024. */ - std::array normalizedRefBuffer; - - /* It would be nice to have a better API like git_reference_name_is_valid, but - * with GIT_REFERENCE_FORMAT_REFSPEC_SHORTHAND flag. libgit2 uses it internally - * but doesn't expose it in public headers [1]. - * [1]: - * https://github.com/libgit2/libgit2/blob/9d5f1bacc23594c2ba324c8f0d41b88bf0e9ef04/src/libgit2/refs.c#L1362-L1365 - */ - - auto res = git_reference_normalize_name( - normalizedRefBuffer.data(), - normalizedRefBuffer.size(), - refName.c_str(), - GIT_REFERENCE_FORMAT_ALLOW_ONELEVEL | GIT_REFERENCE_FORMAT_REFSPEC_SHORTHAND); - - return res == 0; -} - bool isLegalRefName(const std::string & refName) { initLibGit2(); - /* Since `git_reference_normalize_name` is the best API libgit2 has for verifying - * reference names with shorthands (see comment in normalizeRefName), we need to - * ensure that exceptions to the validity checks imposed by normalization [1] are checked - * explicitly. - * [1]: https://git-scm.com/docs/git-check-ref-format#Documentation/git-check-ref-format.txt---normalize - */ - /* Check for cases that don't get rejected by libgit2. * FIXME: libgit2 should reject this. */ if (refName == "@") return false; - /* Leading slashes and consecutive slashes are stripped during normalizatiton. */ - if (refName.starts_with('/') || refName.find("//") != refName.npos) - return false; - - /* Refer to libgit2. */ - if (!isValidRefNameAllowNormalizations(refName)) - return false; - /* libgit2 doesn't barf on DEL symbol. * FIXME: libgit2 should reject this. */ if (refName.find('\177') != refName.npos) return false; - return true; + for (auto * func : { + git_reference_name_is_valid, + git_branch_name_is_valid, + git_tag_name_is_valid, + }) { + int valid = 0; + if (func(&valid, refName.c_str())) + throw Error("checking git reference '%s': %s", refName, git_error_last()->message); + if (valid) + return true; + } + + return false; } } // namespace nix diff --git a/src/libfetchers/include/nix/fetchers/git-utils.hh b/src/libfetchers/include/nix/fetchers/git-utils.hh index 07b985541..8357ce4cd 100644 --- a/src/libfetchers/include/nix/fetchers/git-utils.hh +++ b/src/libfetchers/include/nix/fetchers/git-utils.hh @@ -158,9 +158,12 @@ struct Setter }; /** - * Checks that the git reference is valid and normalized. + * Checks that the string can be a valid git reference, branch or tag name. + * Accepts shorthand references (one-level refnames are allowed), pseudorefs + * like `HEAD`. * - * Accepts shorthand references (one-level refnames are allowed). + * @note This is a coarse test to make sure that the refname is at least something + * that Git can make sense of. */ bool isLegalRefName(const std::string & refName); diff --git a/tests/functional/fetchGitRefs.sh b/tests/functional/fetchGitRefs.sh index 288b26591..a7d1a2a29 100755 --- a/tests/functional/fetchGitRefs.sh +++ b/tests/functional/fetchGitRefs.sh @@ -59,6 +59,9 @@ invalid_ref() { } +valid_ref 'A/b' +valid_ref 'AaA/b' +valid_ref 'FOO/BAR/BAZ' valid_ref 'foox' valid_ref '1337' valid_ref 'foo.baz' From 6995d325ef6836fcd2077a280cabc602c90de026 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 24 Sep 2025 00:06:47 -0400 Subject: [PATCH 212/373] Split out `UnkeyedRealisation` from `Realisation` Realisations are conceptually key-value pairs, mapping `DrvOutputs` (the key) to information about that derivation output. This separate the value type, which will be useful in maps, etc., where we don't want to denormalize by including the key twice. This matches similar changes for existing types: | keyed | unkeyed | |--------------------|------------------------| | `ValidPathInfo` | `UnkeyedValidPathInfo` | | `KeyedBuildResult` | `BuildResult` | | `Realisation` | `UnkeyedRealisation` | Co-authored-by: Sergei Zimmerman --- src/libcmd/built-path.cc | 5 +- src/libstore-tests/common-protocol.cc | 44 +++--- src/libstore-tests/realisation.cc | 20 +-- src/libstore-tests/serve-protocol.cc | 74 +++++----- src/libstore-tests/worker-protocol.cc | 132 +++++++++--------- src/libstore/binary-cache-store.cc | 21 +-- .../build/derivation-building-goal.cc | 13 +- src/libstore/build/derivation-goal.cc | 59 ++++++-- .../build/drv-output-substitution-goal.cc | 10 +- src/libstore/daemon.cc | 4 +- src/libstore/dummy-store.cc | 4 +- .../include/nix/store/binary-cache-store.hh | 17 ++- .../nix/store/build/derivation-goal.hh | 6 +- .../build/drv-output-substitution-goal.hh | 3 +- .../include/nix/store/legacy-ssh-store.hh | 4 +- .../include/nix/store/local-overlay-store.hh | 2 +- src/libstore/include/nix/store/local-store.hh | 6 +- src/libstore/include/nix/store/realisation.hh | 50 ++++--- .../include/nix/store/remote-store.hh | 2 +- src/libstore/include/nix/store/store-api.hh | 9 +- src/libstore/local-overlay-store.cc | 8 +- src/libstore/local-store.cc | 17 ++- src/libstore/misc.cc | 5 +- src/libstore/realisation.cc | 50 ++++--- src/libstore/remote-store.cc | 18 +-- src/libstore/restricted-store.cc | 4 +- src/libstore/store-api.cc | 20 +-- src/libstore/unix/build/derivation-builder.cc | 7 +- 28 files changed, 363 insertions(+), 251 deletions(-) diff --git a/src/libcmd/built-path.cc b/src/libcmd/built-path.cc index 4d76dd6da..fc7f18493 100644 --- a/src/libcmd/built-path.cc +++ b/src/libcmd/built-path.cc @@ -117,10 +117,11 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const "the derivation '%s' has unrealised output '%s' (derived-path.cc/toRealisedPaths)", store.printStorePath(p.drvPath->outPath()), outputName); - auto thisRealisation = store.queryRealisation(DrvOutput{*drvOutput, outputName}); + DrvOutput key{*drvOutput, outputName}; + auto thisRealisation = store.queryRealisation(key); assert(thisRealisation); // We’ve built it, so we must // have the realisation - res.insert(*thisRealisation); + res.insert(Realisation{*thisRealisation, std::move(key)}); } else { res.insert(outputPath); } diff --git a/src/libstore-tests/common-protocol.cc b/src/libstore-tests/common-protocol.cc index 35fca165d..2c001957b 100644 --- a/src/libstore-tests/common-protocol.cc +++ b/src/libstore-tests/common-protocol.cc @@ -112,32 +112,34 @@ CHARACTERIZATION_TEST( "realisation", (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) diff --git a/src/libstore-tests/realisation.cc b/src/libstore-tests/realisation.cc index a5a5bee50..d16049bc5 100644 --- a/src/libstore-tests/realisation.cc +++ b/src/libstore-tests/realisation.cc @@ -49,16 +49,16 @@ INSTANTIATE_TEST_SUITE_P( RealisationJsonTest, ([] { Realisation simple{ - - .id = - { - .drvHash = Hash::parseExplicitFormatUnprefixed( - "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", - HashAlgorithm::SHA256, - HashFormat::Base16), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + }, + { + .drvHash = Hash::parseExplicitFormatUnprefixed( + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + HashAlgorithm::SHA256, + HashFormat::Base16), + .outputName = "foo", + }, }; return ::testing::Values( std::pair{ diff --git a/src/libstore-tests/serve-protocol.cc b/src/libstore-tests/serve-protocol.cc index a63201164..10aa21e9d 100644 --- a/src/libstore-tests/serve-protocol.cc +++ b/src/libstore-tests/serve-protocol.cc @@ -95,32 +95,34 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) @@ -196,25 +198,27 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, diff --git a/src/libstore-tests/worker-protocol.cc b/src/libstore-tests/worker-protocol.cc index 489151c8c..c4afde3bd 100644 --- a/src/libstore-tests/worker-protocol.cc +++ b/src/libstore-tests/worker-protocol.cc @@ -148,32 +148,34 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) @@ -214,25 +216,25 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, @@ -267,25 +269,27 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, @@ -324,25 +328,27 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index badfb4b14..3705f3d4d 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -502,10 +502,15 @@ StorePath BinaryCacheStore::addToStore( ->path; } -void BinaryCacheStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept +std::string BinaryCacheStore::makeRealisationPath(const DrvOutput & id) { - auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi"; + return realisationsPrefix + "/" + id.to_string() + ".doi"; +} + +void BinaryCacheStore::queryRealisationUncached( + const DrvOutput & id, Callback> callback) noexcept +{ + auto outputInfoFilePath = makeRealisationPath(id); auto callbackPtr = std::make_shared(std::move(callback)); @@ -515,11 +520,12 @@ void BinaryCacheStore::queryRealisationUncached( if (!data) return (*callbackPtr)({}); - std::shared_ptr realisation; + std::shared_ptr realisation; try { - realisation = std::make_shared(nlohmann::json::parse(*data)); + realisation = std::make_shared(nlohmann::json::parse(*data)); } catch (Error & e) { - e.addTrace({}, "while parsing file '%s' as a realisation", outputInfoFilePath); + e.addTrace( + {}, "while parsing file '%s' as a realisation for key '%s'", outputInfoFilePath, id.to_string()); throw; } return (*callbackPtr)(std::move(realisation)); @@ -535,8 +541,7 @@ void BinaryCacheStore::registerDrvOutput(const Realisation & info) { if (diskCache) diskCache->upsertRealisation(config.getReference().render(/*FIXME withParams=*/false), info); - auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi"; - upsertFile(filePath, static_cast(info).dump(), "application/json"); + upsertFile(makeRealisationPath(info.id), static_cast(info).dump(), "application/json"); } ref BinaryCacheStore::getRemoteFSAccessor(bool requireValidPath) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 4230ed465..a6fe95f3e 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1111,13 +1111,22 @@ DerivationBuildingGoal::checkPathValidity(std::map & // without the `ca-derivations` experimental flag). worker.store.registerDrvOutput( Realisation{ + { + .outPath = info.known->path, + }, drvOutput, - info.known->path, }); } } if (info.known && info.known->isValid()) - validOutputs.emplace(i.first, Realisation{drvOutput, info.known->path}); + validOutputs.emplace( + i.first, + Realisation{ + { + .outPath = info.known->path, + }, + drvOutput, + }); } bool allValid = true; diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index c50054caf..4beced6d8 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -181,13 +181,17 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) auto realisation = [&] { auto take1 = get(success.builtOutputs, wantedOutput); if (take1) - return *take1; + return static_cast(*take1); /* The above `get` should work. But stateful tracking of outputs in resolvedResult, this can get out of sync with the store, which is our actual source of truth. For now we just check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, wantedOutput}); + auto take2 = worker.evalStore.queryRealisation( + DrvOutput{ + .drvHash = *resolvedHash, + .outputName = wantedOutput, + }); if (take2) return *take2; @@ -198,8 +202,12 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) }(); if (!drv->type().isImpure()) { - auto newRealisation = realisation; - newRealisation.id = DrvOutput{*outputHash, wantedOutput}; + Realisation newRealisation{ + realisation, + { + .drvHash = *outputHash, + .outputName = wantedOutput, + }}; newRealisation.signatures.clear(); if (!drv->type().isFixed()) { auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; @@ -248,7 +256,16 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) /* In checking mode, the builder will not register any outputs. So we want to make sure the ones that we wanted to check are properly there. */ - success.builtOutputs = {{wantedOutput, assertPathValidity()}}; + success.builtOutputs = {{ + wantedOutput, + { + assertPathValidity(), + { + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }, + }}; } else { /* Otherwise the builder will give us info for out output, but also for other outputs. Filter down to just our output so as @@ -352,18 +369,20 @@ Goal::Co DerivationGoal::repairClosure() co_return doneSuccess(BuildResult::Success::AlreadyValid, assertPathValidity()); } -std::optional> DerivationGoal::checkPathValidity() +std::optional> DerivationGoal::checkPathValidity() { if (drv->type().isImpure()) return std::nullopt; auto drvOutput = DrvOutput{outputHash, wantedOutput}; - std::optional mRealisation; + std::optional mRealisation; if (auto * mOutput = get(drv->outputs, wantedOutput)) { if (auto mPath = mOutput->path(worker.store, drv->name, wantedOutput)) { - mRealisation = Realisation{drvOutput, std::move(*mPath)}; + mRealisation = UnkeyedRealisation{ + .outPath = std::move(*mPath), + }; } } else { throw Error( @@ -391,7 +410,14 @@ std::optional> DerivationGoal::checkPathValid // derivation, and the output path is valid, but we don't have // its realisation stored (probably because it has been built // without the `ca-derivations` experimental flag). - worker.store.registerDrvOutput(*mRealisation); + worker.store.registerDrvOutput( + Realisation{ + *mRealisation, + { + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }); } return {{*mRealisation, status}}; @@ -399,7 +425,7 @@ std::optional> DerivationGoal::checkPathValid return std::nullopt; } -Realisation DerivationGoal::assertPathValidity() +UnkeyedRealisation DerivationGoal::assertPathValidity() { auto checkResult = checkPathValidity(); if (!(checkResult && checkResult->second == PathStatus::Valid)) @@ -407,11 +433,20 @@ Realisation DerivationGoal::assertPathValidity() return checkResult->first; } -Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Realisation builtOutput) +Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput) { buildResult.inner = BuildResult::Success{ .status = status, - .builtOutputs = {{wantedOutput, std::move(builtOutput)}}, + .builtOutputs = {{ + wantedOutput, + { + std::move(builtOutput), + DrvOutput{ + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }, + }}, }; mcExpectedBuilds.reset(); diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc index 6635e214d..568a06201 100644 --- a/src/libstore/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -42,10 +42,10 @@ Goal::Co DrvOutputSubstitutionGoal::init() outPipe->createAsyncPipe(worker.ioport.get()); #endif - auto promise = std::make_shared>>(); + auto promise = std::make_shared>>(); sub->queryRealisation( - id, {[outPipe(outPipe), promise(promise)](std::future> res) { + id, {[outPipe(outPipe), promise(promise)](std::future> res) { try { Finally updateStats([&]() { outPipe->writeSide.close(); }); promise->set_value(res.get()); @@ -74,7 +74,7 @@ Goal::Co DrvOutputSubstitutionGoal::init() * The realisation corresponding to the given output id. * Will be filled once we can get it. */ - std::shared_ptr outputInfo; + std::shared_ptr outputInfo; try { outputInfo = promise->get_future().get(); @@ -131,7 +131,7 @@ Goal::Co DrvOutputSubstitutionGoal::init() } Goal::Co DrvOutputSubstitutionGoal::realisationFetched( - Goals waitees, std::shared_ptr outputInfo, nix::ref sub) + Goals waitees, std::shared_ptr outputInfo, nix::ref sub) { waitees.insert(worker.makePathSubstitutionGoal(outputInfo->outPath)); @@ -144,7 +144,7 @@ Goal::Co DrvOutputSubstitutionGoal::realisationFetched( co_return amDone(nrNoSubstituters > 0 ? ecNoSubstituters : ecFailed); } - worker.store.registerDrvOutput(*outputInfo); + worker.store.registerDrvOutput({*outputInfo, id}); trace("finished"); co_return amDone(ecSuccess); diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 1fc568e87..00c0a1fdd 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -964,7 +964,7 @@ static void performOp( if (GET_PROTOCOL_MINOR(conn.protoVersion) < 31) { auto outputId = DrvOutput::parse(readString(conn.from)); auto outputPath = StorePath(readString(conn.from)); - store->registerDrvOutput(Realisation{.id = outputId, .outPath = outputPath}); + store->registerDrvOutput(Realisation{{.outPath = outputPath}, outputId}); } else { auto realisation = WorkerProto::Serialise::read(*store, rconn); store->registerDrvOutput(realisation); @@ -986,7 +986,7 @@ static void performOp( } else { std::set realisations; if (info) - realisations.insert(*info); + realisations.insert({*info, outputId}); WorkerProto::write(*store, wconn, realisations); } break; diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 1eb51fe3e..209be3ce9 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -266,8 +266,8 @@ struct DummyStoreImpl : DummyStore throw Error("path '%s' is not valid", printStorePath(path)); } - void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override + void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept override { callback(nullptr); } diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index c316b1199..3f4de2bd4 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -80,13 +80,22 @@ private: protected: - // The prefix under which realisation infos will be stored - const std::string realisationsPrefix = "realisations"; + /** + * The prefix under which realisation infos will be stored + */ + constexpr const static std::string realisationsPrefix = "realisations"; - const std::string cacheInfoFile = "nix-cache-info"; + constexpr const static std::string cacheInfoFile = "nix-cache-info"; BinaryCacheStore(Config &); + /** + * Compute the path to the given realisation + * + * It's `${realisationsPrefix}/${drvOutput}.doi`. + */ + std::string makeRealisationPath(const DrvOutput & id); + public: virtual bool fileExists(const std::string & path) = 0; @@ -175,7 +184,7 @@ public: void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void narFromPath(const StorePath & path, Sink & sink) override; diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index c5eb2fe79..0fe610987 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -93,17 +93,17 @@ private: * of the wanted output, and a `PathStatus` with the * current status of that output. */ - std::optional> checkPathValidity(); + std::optional> checkPathValidity(); /** * Aborts if any output is not valid or corrupt, and otherwise * returns a 'Realisation' for the wanted output. */ - Realisation assertPathValidity(); + UnkeyedRealisation assertPathValidity(); Co repairClosure(); - Done doneSuccess(BuildResult::Success::Status status, Realisation builtOutput); + Done doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput); Done doneFailure(BuildError ex); }; diff --git a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh index c3ee9019f..c01e99dd1 100644 --- a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh +++ b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh @@ -32,7 +32,8 @@ public: DrvOutputSubstitutionGoal(const DrvOutput & id, Worker & worker); Co init(); - Co realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); + Co + realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); void timedOut(Error && ex) override { diff --git a/src/libstore/include/nix/store/legacy-ssh-store.hh b/src/libstore/include/nix/store/legacy-ssh-store.hh index c91f88a84..994918f90 100644 --- a/src/libstore/include/nix/store/legacy-ssh-store.hh +++ b/src/libstore/include/nix/store/legacy-ssh-store.hh @@ -208,8 +208,8 @@ public: */ std::optional isTrustedClient() override; - void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override + void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept override // TODO: Implement { unsupported("queryRealisation"); diff --git a/src/libstore/include/nix/store/local-overlay-store.hh b/src/libstore/include/nix/store/local-overlay-store.hh index b89d0a1a0..1d69d3417 100644 --- a/src/libstore/include/nix/store/local-overlay-store.hh +++ b/src/libstore/include/nix/store/local-overlay-store.hh @@ -173,7 +173,7 @@ private: * Check lower store if upper DB does not have. */ void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; /** * Call `remountIfNecessary` after collecting garbage normally. diff --git a/src/libstore/include/nix/store/local-store.hh b/src/libstore/include/nix/store/local-store.hh index b871aaee2..ab255fba8 100644 --- a/src/libstore/include/nix/store/local-store.hh +++ b/src/libstore/include/nix/store/local-store.hh @@ -385,10 +385,10 @@ public: void cacheDrvOutputMapping( State & state, const uint64_t deriver, const std::string & outputName, const StorePath & output); - std::optional queryRealisation_(State & state, const DrvOutput & id); - std::optional> queryRealisationCore_(State & state, const DrvOutput & id); + std::optional queryRealisation_(State & state, const DrvOutput & id); + std::optional> queryRealisationCore_(State & state, const DrvOutput & id); void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; std::optional getVersion() override; diff --git a/src/libstore/include/nix/store/realisation.hh b/src/libstore/include/nix/store/realisation.hh index 3424a39c9..e8a71862e 100644 --- a/src/libstore/include/nix/store/realisation.hh +++ b/src/libstore/include/nix/store/realisation.hh @@ -46,12 +46,12 @@ struct DrvOutput static DrvOutput parse(const std::string &); - GENERATE_CMP(DrvOutput, me->drvHash, me->outputName); + bool operator==(const DrvOutput &) const = default; + auto operator<=>(const DrvOutput &) const = default; }; -struct Realisation +struct UnkeyedRealisation { - DrvOutput id; StorePath outPath; StringSet signatures; @@ -64,22 +64,35 @@ struct Realisation */ std::map dependentRealisations; - std::string fingerprint() const; - void sign(const Signer &); - bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const; - size_t checkSignatures(const PublicKeys & publicKeys) const; + std::string fingerprint(const DrvOutput & key) const; - static std::set closure(Store &, const std::set &); - static void closure(Store &, const std::set &, std::set & res); + void sign(const DrvOutput & key, const Signer &); - bool isCompatibleWith(const Realisation & other) const; + bool checkSignature(const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const; - StorePath getPath() const + size_t checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const; + + const StorePath & getPath() const { return outPath; } - GENERATE_CMP(Realisation, me->id, me->outPath); + // TODO sketchy that it avoids signatures + GENERATE_CMP(UnkeyedRealisation, me->outPath); +}; + +struct Realisation : UnkeyedRealisation +{ + DrvOutput id; + + bool isCompatibleWith(const UnkeyedRealisation & other) const; + + static std::set closure(Store &, const std::set &); + + static void closure(Store &, const std::set &, std::set & res); + + bool operator==(const Realisation &) const = default; + auto operator<=>(const Realisation &) const = default; }; /** @@ -103,12 +116,13 @@ struct OpaquePath { StorePath path; - StorePath getPath() const + const StorePath & getPath() const & { return path; } - GENERATE_CMP(OpaquePath, me->path); + bool operator==(const OpaquePath &) const = default; + auto operator<=>(const OpaquePath &) const = default; }; /** @@ -116,7 +130,7 @@ struct OpaquePath */ struct RealisedPath { - /* + /** * A path is either the result of the realisation of a derivation or * an opaque blob that has been directly added to the store */ @@ -138,13 +152,14 @@ struct RealisedPath /** * Get the raw store path associated to this */ - StorePath path() const; + const StorePath & path() const &; void closure(Store & store, Set & ret) const; static void closure(Store & store, const Set & startPaths, Set & ret); Set closure(Store & store) const; - GENERATE_CMP(RealisedPath, me->raw); + bool operator==(const RealisedPath &) const = default; + auto operator<=>(const RealisedPath &) const = default; }; class MissingRealisation : public Error @@ -167,4 +182,5 @@ public: } // namespace nix +JSON_IMPL(nix::UnkeyedRealisation) JSON_IMPL(nix::Realisation) diff --git a/src/libstore/include/nix/store/remote-store.hh b/src/libstore/include/nix/store/remote-store.hh index 1aaf29d37..b152e054b 100644 --- a/src/libstore/include/nix/store/remote-store.hh +++ b/src/libstore/include/nix/store/remote-store.hh @@ -102,7 +102,7 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index 5c96c5f80..d03e8e010 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -31,6 +31,7 @@ MakeError(SubstituterDisabled, Error); MakeError(InvalidStoreReference, Error); +struct UnkeyedRealisation; struct Realisation; struct RealisedPath; struct DrvOutput; @@ -398,12 +399,12 @@ public: /** * Query the information about a realisation. */ - std::shared_ptr queryRealisation(const DrvOutput &); + std::shared_ptr queryRealisation(const DrvOutput &); /** * Asynchronous version of queryRealisation(). */ - void queryRealisation(const DrvOutput &, Callback> callback) noexcept; + void queryRealisation(const DrvOutput &, Callback> callback) noexcept; /** * Check whether the given valid path info is sufficiently attested, by @@ -430,8 +431,8 @@ protected: virtual void queryPathInfoUncached(const StorePath & path, Callback> callback) noexcept = 0; - virtual void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept = 0; + virtual void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept = 0; public: diff --git a/src/libstore/local-overlay-store.cc b/src/libstore/local-overlay-store.cc index 2b000b3db..f23feb8fb 100644 --- a/src/libstore/local-overlay-store.cc +++ b/src/libstore/local-overlay-store.cc @@ -77,7 +77,7 @@ void LocalOverlayStore::registerDrvOutput(const Realisation & info) // First do queryRealisation on lower layer to populate DB auto res = lowerStore->queryRealisation(info.id); if (res) - LocalStore::registerDrvOutput(*res); + LocalStore::registerDrvOutput({*res, info.id}); LocalStore::registerDrvOutput(info); } @@ -108,12 +108,12 @@ void LocalOverlayStore::queryPathInfoUncached( } void LocalOverlayStore::queryRealisationUncached( - const DrvOutput & drvOutput, Callback> callback) noexcept + const DrvOutput & drvOutput, Callback> callback) noexcept { auto callbackPtr = std::make_shared(std::move(callback)); LocalStore::queryRealisationUncached( - drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { + drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (info) @@ -123,7 +123,7 @@ void LocalOverlayStore::queryRealisationUncached( } // If we don't have it, check lower store lowerStore->queryRealisation( - drvOutput, {[callbackPtr](std::future> fut) { + drvOutput, {[callbackPtr](std::future> fut) { try { (*callbackPtr)(fut.get()); } catch (...) { diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index cbd3fa6d8..59d5cc24f 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1036,7 +1036,7 @@ bool LocalStore::pathInfoIsUntrusted(const ValidPathInfo & info) bool LocalStore::realisationIsUntrusted(const Realisation & realisation) { - return config->requireSigs && !realisation.checkSignatures(getPublicKeys()); + return config->requireSigs && !realisation.checkSignatures(realisation.id, getPublicKeys()); } void LocalStore::addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) @@ -1586,7 +1586,7 @@ void LocalStore::addSignatures(const StorePath & storePath, const StringSet & si }); } -std::optional> +std::optional> LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & id) { auto useQueryRealisedOutput(state.stmts->QueryRealisedOutput.use()(id.strHash())(id.outputName)); @@ -1598,14 +1598,13 @@ LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & i return { {realisationDbId, - Realisation{ - .id = id, + UnkeyedRealisation{ .outPath = outputPath, .signatures = signatures, }}}; } -std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) +std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) { auto maybeCore = queryRealisationCore_(state, id); if (!maybeCore) @@ -1631,13 +1630,13 @@ std::optional LocalStore::queryRealisation_(LocalStore::State } void LocalStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { - auto maybeRealisation = - retrySQLite>([&]() { return queryRealisation_(*_state->lock(), id); }); + auto maybeRealisation = retrySQLite>( + [&]() { return queryRealisation_(*_state->lock(), id); }); if (maybeRealisation) - callback(std::make_shared(maybeRealisation.value())); + callback(std::make_shared(maybeRealisation.value())); else callback(nullptr); diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index 7efaa4f86..a31d149c2 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -360,11 +360,12 @@ drvOutputReferences(Store & store, const Derivation & drv, const StorePath & out if (!outputHash) throw Error( "output '%s' of derivation '%s' isn't realised", outputName, store.printStorePath(inputDrv)); - auto thisRealisation = store.queryRealisation(DrvOutput{*outputHash, outputName}); + DrvOutput key{*outputHash, outputName}; + auto thisRealisation = store.queryRealisation(key); if (!thisRealisation) throw Error( "output '%s' of derivation '%s' isn’t built", outputName, store.printStorePath(inputDrv)); - inputRealisations.insert(*thisRealisation); + inputRealisations.insert({*thisRealisation, std::move(key)}); } } if (!inputNode.value.empty()) { diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc index febd67bd2..a7f3b98d6 100644 --- a/src/libstore/realisation.cc +++ b/src/libstore/realisation.cc @@ -39,7 +39,7 @@ void Realisation::closure(Store & store, const std::set & startOutp std::set res; for (auto & [currentDep, _] : current.dependentRealisations) { if (auto currentRealisation = store.queryRealisation(currentDep)) - res.insert(*currentRealisation); + res.insert({*currentRealisation, currentDep}); else throw Error("Unrealised derivation '%s'", currentDep.to_string()); } @@ -61,24 +61,25 @@ void Realisation::closure(Store & store, const std::set & startOutp }); } -std::string Realisation::fingerprint() const +std::string UnkeyedRealisation::fingerprint(const DrvOutput & key) const { - nlohmann::json serialized = *this; + nlohmann::json serialized = Realisation{*this, key}; serialized.erase("signatures"); return serialized.dump(); } -void Realisation::sign(const Signer & signer) +void UnkeyedRealisation::sign(const DrvOutput & key, const Signer & signer) { - signatures.insert(signer.signDetached(fingerprint())); + signatures.insert(signer.signDetached(fingerprint(key))); } -bool Realisation::checkSignature(const PublicKeys & publicKeys, const std::string & sig) const +bool UnkeyedRealisation::checkSignature( + const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const { - return verifyDetached(fingerprint(), sig, publicKeys); + return verifyDetached(fingerprint(key), sig, publicKeys); } -size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const +size_t UnkeyedRealisation::checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const { // FIXME: Maybe we should return `maxSigs` if the realisation corresponds to // an input-addressed one − because in that case the drv is enough to check @@ -86,19 +87,18 @@ size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const size_t good = 0; for (auto & sig : signatures) - if (checkSignature(publicKeys, sig)) + if (checkSignature(key, publicKeys, sig)) good++; return good; } -StorePath RealisedPath::path() const +const StorePath & RealisedPath::path() const & { - return std::visit([](auto && arg) { return arg.getPath(); }, raw); + return std::visit([](auto & arg) -> auto & { return arg.getPath(); }, raw); } -bool Realisation::isCompatibleWith(const Realisation & other) const +bool Realisation::isCompatibleWith(const UnkeyedRealisation & other) const { - assert(id == other.id); if (outPath == other.outPath) { if (dependentRealisations.empty() != other.dependentRealisations.empty()) { warn( @@ -144,7 +144,7 @@ namespace nlohmann { using namespace nix; -Realisation adl_serializer::from_json(const json & json0) +UnkeyedRealisation adl_serializer::from_json(const json & json0) { auto json = getObject(json0); @@ -157,25 +157,39 @@ Realisation adl_serializer::from_json(const json & json0) for (auto & [jsonDepId, jsonDepOutPath] : getObject(*jsonDependencies)) dependentRealisations.insert({DrvOutput::parse(jsonDepId), jsonDepOutPath}); - return Realisation{ - .id = DrvOutput::parse(valueAt(json, "id")), + return UnkeyedRealisation{ .outPath = valueAt(json, "outPath"), .signatures = signatures, .dependentRealisations = dependentRealisations, }; } -void adl_serializer::to_json(json & json, const Realisation & r) +void adl_serializer::to_json(json & json, const UnkeyedRealisation & r) { auto jsonDependentRealisations = nlohmann::json::object(); for (auto & [depId, depOutPath] : r.dependentRealisations) jsonDependentRealisations.emplace(depId.to_string(), depOutPath); json = { - {"id", r.id.to_string()}, {"outPath", r.outPath}, {"signatures", r.signatures}, {"dependentRealisations", jsonDependentRealisations}, }; } +Realisation adl_serializer::from_json(const json & json0) +{ + auto json = getObject(json0); + + return Realisation{ + static_cast(json0), + DrvOutput::parse(valueAt(json, "id")), + }; +} + +void adl_serializer::to_json(json & json, const Realisation & r) +{ + json = static_cast(r); + json["id"] = r.id.to_string(); +} + } // namespace nlohmann diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index a6994f844..8dd5bc064 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -501,7 +501,7 @@ void RemoteStore::registerDrvOutput(const Realisation & info) } void RemoteStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { auto conn(getConnection()); @@ -515,21 +515,21 @@ void RemoteStore::queryRealisationUncached( conn->to << id.to_string(); conn.processStderr(); - auto real = [&]() -> std::shared_ptr { + auto real = [&]() -> std::shared_ptr { if (GET_PROTOCOL_MINOR(conn->protoVersion) < 31) { auto outPaths = WorkerProto::Serialise>::read(*this, *conn); if (outPaths.empty()) return nullptr; - return std::make_shared(Realisation{.id = id, .outPath = *outPaths.begin()}); + return std::make_shared(UnkeyedRealisation{.outPath = *outPaths.begin()}); } else { auto realisations = WorkerProto::Serialise>::read(*this, *conn); if (realisations.empty()) return nullptr; - return std::make_shared(*realisations.begin()); + return std::make_shared(*realisations.begin()); } }(); - callback(std::shared_ptr(real)); + callback(std::shared_ptr(real)); } catch (...) { return callback.rethrow(); } @@ -626,13 +626,15 @@ std::vector RemoteStore::buildPathsWithResults( auto realisation = queryRealisation(outputId); if (!realisation) throw MissingRealisation(outputId); - success.builtOutputs.emplace(output, *realisation); + success.builtOutputs.emplace(output, Realisation{*realisation, outputId}); } else { success.builtOutputs.emplace( output, Realisation{ - .id = outputId, - .outPath = outputPath, + UnkeyedRealisation{ + .outPath = outputPath, + }, + outputId, }); } } diff --git a/src/libstore/restricted-store.cc b/src/libstore/restricted-store.cc index a1cb41606..5270f7d10 100644 --- a/src/libstore/restricted-store.cc +++ b/src/libstore/restricted-store.cc @@ -107,7 +107,7 @@ struct RestrictedStore : public virtual IndirectRootStore, public virtual GcStor void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept override; + const DrvOutput & id, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; @@ -244,7 +244,7 @@ void RestrictedStore::registerDrvOutput(const Realisation & info) } void RestrictedStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept // XXX: This should probably be allowed if the realisation corresponds to // an allowed derivation { diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 1335eb76a..cdca6a763 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -598,7 +598,8 @@ void Store::queryPathInfo(const StorePath & storePath, Callback> callback) noexcept +void Store::queryRealisation( + const DrvOutput & id, Callback> callback) noexcept { try { @@ -624,20 +625,20 @@ void Store::queryRealisation(const DrvOutput & id, Callback(std::move(callback)); - queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { + queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (diskCache) { if (info) diskCache->upsertRealisation( - config.getReference().render(/*FIXME withParams=*/false), *info); + config.getReference().render(/*FIXME withParams=*/false), {*info, id}); else diskCache->upsertAbsentRealisation( config.getReference().render(/*FIXME withParams=*/false), id); } - (*callbackPtr)(std::shared_ptr(info)); + (*callbackPtr)(std::shared_ptr(info)); } catch (...) { callbackPtr->rethrow(); @@ -645,9 +646,9 @@ void Store::queryRealisation(const DrvOutput & id, Callback Store::queryRealisation(const DrvOutput & id) +std::shared_ptr Store::queryRealisation(const DrvOutput & id) { - using RealPtr = std::shared_ptr; + using RealPtr = std::shared_ptr; std::promise promise; queryRealisation(id, {[&](std::future result) { @@ -910,11 +911,12 @@ std::map copyPaths( std::set toplevelRealisations; for (auto & path : paths) { storePaths.insert(path.path()); - if (auto realisation = std::get_if(&path.raw)) { + if (auto * realisation = std::get_if(&path.raw)) { experimentalFeatureSettings.require(Xp::CaDerivations); toplevelRealisations.insert(*realisation); } } + auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute); try { @@ -931,7 +933,7 @@ std::map copyPaths( "dependency of '%s' but isn't registered", drvOutput.to_string(), current.id.to_string()); - children.insert(*currentChild); + children.insert({*currentChild, drvOutput}); } return children; }, @@ -1199,7 +1201,7 @@ void Store::signRealisation(Realisation & realisation) for (auto & secretKeyFile : secretKeyFiles.get()) { SecretKey secretKey(readFile(secretKeyFile)); LocalSigner signer(std::move(secretKey)); - realisation.sign(signer); + realisation.sign(realisation.id, signer); } } diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index f7bab7057..7f8314c91 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -1866,7 +1866,12 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() for (auto & [outputName, newInfo] : infos) { auto oldinfo = get(initialOutputs, outputName); assert(oldinfo); - auto thisRealisation = Realisation{.id = DrvOutput{oldinfo->outputHash, outputName}, .outPath = newInfo.path}; + auto thisRealisation = Realisation{ + { + .outPath = newInfo.path, + }, + DrvOutput{oldinfo->outputHash, outputName}, + }; if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations) && !drv.type().isImpure()) { store.signRealisation(thisRealisation); store.registerDrvOutput(thisRealisation); From 266fbebe66f771130efeb9153c49b4a744d995b5 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 27 Sep 2025 16:30:36 -0400 Subject: [PATCH 213/373] Implement realisation operations on dummy store --- src/libstore-tests/dummy-store.cc | 15 +++++++++++++-- src/libstore/dummy-store.cc | 19 ++++++++++++++++--- .../include/nix/store/dummy-store-impl.hh | 12 ++++++++++++ src/libstore/include/nix/store/dummy-store.hh | 2 ++ src/libutil/include/nix/util/hash.hh | 19 +++++++++++++++++++ 5 files changed, 62 insertions(+), 5 deletions(-) diff --git a/src/libstore-tests/dummy-store.cc b/src/libstore-tests/dummy-store.cc index b841d7890..3dd8137a3 100644 --- a/src/libstore-tests/dummy-store.cc +++ b/src/libstore-tests/dummy-store.cc @@ -1,6 +1,6 @@ #include -#include "nix/store/dummy-store.hh" +#include "nix/store/dummy-store-impl.hh" #include "nix/store/globals.hh" #include "nix/store/realisation.hh" @@ -13,7 +13,7 @@ TEST(DummyStore, realisation_read) auto store = [] { auto cfg = make_ref(StoreReference::Params{}); cfg->readOnly = false; - return cfg->openStore(); + return cfg->openDummyStore(); }(); auto drvHash = Hash::parseExplicitFormatUnprefixed( @@ -22,6 +22,17 @@ TEST(DummyStore, realisation_read) auto outputName = "foo"; EXPECT_EQ(store->queryRealisation({drvHash, outputName}), nullptr); + + UnkeyedRealisation value{ + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + }; + + store->buildTrace.insert({drvHash, {{outputName, make_ref(value)}}}); + + auto value2 = store->queryRealisation({drvHash, outputName}); + + ASSERT_TRUE(value2); + EXPECT_EQ(*value2, value); } } // namespace nix diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 209be3ce9..509b7a0b1 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -3,6 +3,7 @@ #include "nix/util/callback.hh" #include "nix/util/memory-source-accessor.hh" #include "nix/store/dummy-store-impl.hh" +#include "nix/store/realisation.hh" #include @@ -251,7 +252,10 @@ struct DummyStoreImpl : DummyStore void registerDrvOutput(const Realisation & output) override { - unsupported("registerDrvOutput"); + auto ref = make_ref(output); + buildTrace.insert_or_visit({output.id.drvHash, {{output.id.outputName, ref}}}, [&](auto & kv) { + kv.second.insert_or_assign(output.id.outputName, make_ref(output)); + }); } void narFromPath(const StorePath & path, Sink & sink) override @@ -267,9 +271,18 @@ struct DummyStoreImpl : DummyStore } void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override + const DrvOutput & drvOutput, Callback> callback) noexcept override { - callback(nullptr); + bool visited = false; + buildTrace.cvisit(drvOutput.drvHash, [&](const auto & kv) { + if (auto it = kv.second.find(drvOutput.outputName); it != kv.second.end()) { + visited = true; + callback(it->second.get_ptr()); + } + }); + + if (!visited) + callback(nullptr); } std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override diff --git a/src/libstore/include/nix/store/dummy-store-impl.hh b/src/libstore/include/nix/store/dummy-store-impl.hh index e05bb94ff..4c9f54e98 100644 --- a/src/libstore/include/nix/store/dummy-store-impl.hh +++ b/src/libstore/include/nix/store/dummy-store-impl.hh @@ -30,6 +30,18 @@ struct DummyStore : virtual Store */ boost::concurrent_flat_map contents; + /** + * The build trace maps the pair of a content-addressing (fixed or + * floating) derivations an one of its output to a + * (content-addressed) store object. + * + * It is [curried](https://en.wikipedia.org/wiki/Currying), so we + * instead having a single output with a `DrvOutput` key, we have an + * outer map for the derivation, and inner maps for the outputs of a + * given derivation. + */ + boost::concurrent_flat_map>> buildTrace; + DummyStore(ref config) : Store{*config} , config(config) diff --git a/src/libstore/include/nix/store/dummy-store.hh b/src/libstore/include/nix/store/dummy-store.hh index 95c09078c..d371c4e51 100644 --- a/src/libstore/include/nix/store/dummy-store.hh +++ b/src/libstore/include/nix/store/dummy-store.hh @@ -3,6 +3,8 @@ #include "nix/store/store-api.hh" +#include + namespace nix { struct DummyStore; diff --git a/src/libutil/include/nix/util/hash.hh b/src/libutil/include/nix/util/hash.hh index 571b6acca..0b16b423c 100644 --- a/src/libutil/include/nix/util/hash.hh +++ b/src/libutil/include/nix/util/hash.hh @@ -222,3 +222,22 @@ public: }; } // namespace nix + +template<> +struct std::hash +{ + std::size_t operator()(const nix::Hash & hash) const noexcept + { + assert(hash.hashSize > sizeof(size_t)); + return *reinterpret_cast(&hash.hash); + } +}; + +namespace nix { + +inline std::size_t hash_value(const Hash & hash) +{ + return std::hash{}(hash); +} + +} // namespace nix From e069c9892eec9ef77752b5d9774a71e485cb77ec Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 15 Oct 2025 18:23:20 +0000 Subject: [PATCH 214/373] docs(rl-next): add notes for curl-based s3 --- doc/manual/rl-next/s3-curl-implementation.md | 26 ++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 doc/manual/rl-next/s3-curl-implementation.md diff --git a/doc/manual/rl-next/s3-curl-implementation.md b/doc/manual/rl-next/s3-curl-implementation.md new file mode 100644 index 000000000..fab010010 --- /dev/null +++ b/doc/manual/rl-next/s3-curl-implementation.md @@ -0,0 +1,26 @@ +--- +synopsis: "Improved S3 binary cache support via HTTP" +prs: [13823, 14026, 14120, 14131, 14135, 14144, 14170, 14190, 14198, 14206, 14209, 14222, 14223, 13752] +issues: [13084, 12671, 11748, 12403] +--- + +S3 binary cache operations now happen via HTTP, leveraging `libcurl`'s native +AWS SigV4 authentication instead of the AWS C++ SDK, providing significant +improvements: + +- **Reduced memory usage**: Eliminates memory buffering issues that caused + segfaults with large files +- **Fixed upload reliability**: Resolves AWS SDK chunking errors + (`InvalidChunkSizeError`) +- **Lighter dependencies**: Uses lightweight `aws-crt-cpp` instead of full + `aws-cpp-sdk`, reducing build complexity + +The new implementation requires curl >= 7.75.0 and `aws-crt-cpp` for credential +management. + +All existing S3 URL formats and parameters remain supported, with the notable +exception of multi-part uploads, which are no longer supported. + +Note that this change also means Nix now supports S3 binary cache stores even +if build without `aws-crt-cpp`, but only for public buckets which do not +require auth. From 6036aaf798f38a2c1a1d63a16f8566c98a60dbcf Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Wed, 15 Oct 2025 22:04:21 +0200 Subject: [PATCH 215/373] C API: Check output callback order --- src/libstore-tests/nix_api_store.cc | 149 ++++++++++++++++++++++++++++ 1 file changed, 149 insertions(+) diff --git a/src/libstore-tests/nix_api_store.cc b/src/libstore-tests/nix_api_store.cc index 045b4ad83..228b8069f 100644 --- a/src/libstore-tests/nix_api_store.cc +++ b/src/libstore-tests/nix_api_store.cc @@ -613,6 +613,155 @@ TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_include_deriver ASSERT_EQ(closure_paths.count(drvPathName), 1) << "Derivation should be in closure when include_derivers=true"; } +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_realise_output_ordering) +{ + // Test that nix_store_realise returns outputs in alphabetical order by output name. + // This test uses a CA derivation with 10 outputs in randomized input order + // to verify that the callback order is deterministic and alphabetical. + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.substituters = {}; + + auto * store = open_local_store(); + + // Create a CA derivation with 10 outputs using proper placeholders + auto outa_ph = nix::hashPlaceholder("outa"); + auto outb_ph = nix::hashPlaceholder("outb"); + auto outc_ph = nix::hashPlaceholder("outc"); + auto outd_ph = nix::hashPlaceholder("outd"); + auto oute_ph = nix::hashPlaceholder("oute"); + auto outf_ph = nix::hashPlaceholder("outf"); + auto outg_ph = nix::hashPlaceholder("outg"); + auto outh_ph = nix::hashPlaceholder("outh"); + auto outi_ph = nix::hashPlaceholder("outi"); + auto outj_ph = nix::hashPlaceholder("outj"); + + std::string drvJson = R"({ + "version": 3, + "name": "multi-output-test", + "system": ")" + nix::settings.thisSystem.get() + + R"(", + "builder": "/bin/sh", + "args": ["-c", "echo a > $outa; echo b > $outb; echo c > $outc; echo d > $outd; echo e > $oute; echo f > $outf; echo g > $outg; echo h > $outh; echo i > $outi; echo j > $outj"], + "env": { + "builder": "/bin/sh", + "name": "multi-output-test", + "system": ")" + nix::settings.thisSystem.get() + + R"(", + "outf": ")" + outf_ph + + R"(", + "outd": ")" + outd_ph + + R"(", + "outi": ")" + outi_ph + + R"(", + "oute": ")" + oute_ph + + R"(", + "outh": ")" + outh_ph + + R"(", + "outc": ")" + outc_ph + + R"(", + "outb": ")" + outb_ph + + R"(", + "outg": ")" + outg_ph + + R"(", + "outj": ")" + outj_ph + + R"(", + "outa": ")" + outa_ph + + R"(" + }, + "inputDrvs": {}, + "inputSrcs": [], + "outputs": { + "outd": { "hashAlgo": "sha256", "method": "nar" }, + "outf": { "hashAlgo": "sha256", "method": "nar" }, + "outg": { "hashAlgo": "sha256", "method": "nar" }, + "outb": { "hashAlgo": "sha256", "method": "nar" }, + "outc": { "hashAlgo": "sha256", "method": "nar" }, + "outi": { "hashAlgo": "sha256", "method": "nar" }, + "outj": { "hashAlgo": "sha256", "method": "nar" }, + "outh": { "hashAlgo": "sha256", "method": "nar" }, + "outa": { "hashAlgo": "sha256", "method": "nar" }, + "oute": { "hashAlgo": "sha256", "method": "nar" } + } + })"; + + auto * drv = nix_derivation_from_json(ctx, store, drvJson.c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + auto * drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drvPath, nullptr); + + // Realise the derivation - capture the order outputs are returned + std::map outputs; + std::vector output_order; + auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath) { + ASSERT_NE(outname, nullptr); + ASSERT_NE(outPath, nullptr); + output_order.push_back(outname); + outputs.emplace(outname, outPath->path); + }}; + + auto ret = nix_store_realise( + ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + ASSERT_EQ(outputs.size(), 10); + + // Verify outputs are returned in alphabetical order by output name + std::vector expected_order = { + "outa", "outb", "outc", "outd", "oute", "outf", "outg", "outh", "outi", "outj"}; + ASSERT_EQ(output_order, expected_order) << "Outputs should be returned in alphabetical order by output name"; + + // Now compute closure with include_outputs and collect paths in order + struct CallbackData + { + std::vector * paths; + }; + + std::vector closure_paths; + CallbackData data{&closure_paths}; + + ret = nix_store_get_fs_closure( + ctx, + store, + drvPath, + false, // flip_direction + true, // include_outputs - include the outputs in the closure + false, // include_derivers + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + std::string path_str; + nix_store_path_name(path, OBSERVE_STRING(path_str)); + data->paths->push_back(path_str); + }); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + + // Should contain at least the derivation and 10 outputs + ASSERT_GE(closure_paths.size(), 11); + + // Verify all outputs are present in the closure + for (const auto & [outname, outPath] : outputs) { + std::string outPathName = store->ptr->printStorePath(outPath); + + bool found = false; + for (const auto & p : closure_paths) { + // nix_store_path_name returns just the name part, so match against full path name + if (outPathName.find(p) != std::string::npos) { + found = true; + break; + } + } + ASSERT_TRUE(found) << "Output " << outname << " (" << outPathName << ") not found in closure"; + } + + nix_store_path_free(drvPath); + nix_derivation_free(drv); + nix_store_free(store); +} + TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_error_propagation) { // Test that errors in the callback abort the closure computation From fa0d00e6689782348c0e13efa352d6d3741d3c05 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 15 Oct 2025 20:15:19 +0000 Subject: [PATCH 216/373] ci: cleanup s3 tests This cleans up the work done in 8c2828387. Now that #13752 has landed, there's no need to test configurations without AWS auth in CI. --- .github/workflows/ci.yml | 18 ++---------------- ci/gha/tests/default.nix | 7 ------- ci/gha/tests/wrapper.nix | 2 -- 3 files changed, 2 insertions(+), 25 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1259bbbb4..1edfcf167 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -67,29 +67,18 @@ jobs: instrumented: false primary: true stdenv: stdenv - withAWS: true - # TODO: remove once curl-based-s3 fully lands - - scenario: on ubuntu (no s3) - runs-on: ubuntu-24.04 - os: linux - instrumented: false - primary: false - stdenv: stdenv - withAWS: false - scenario: on macos runs-on: macos-14 os: darwin instrumented: false primary: true stdenv: stdenv - withAWS: true - scenario: on ubuntu (with sanitizers / coverage) runs-on: ubuntu-24.04 os: linux instrumented: true primary: false stdenv: clangStdenv - withAWS: true name: tests ${{ matrix.scenario }} runs-on: ${{ matrix.runs-on }} timeout-minutes: 60 @@ -112,14 +101,12 @@ jobs: run: | nix build --file ci/gha/tests/wrapper.nix componentTests -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" \ - ${{ format('--arg withAWS {0}', matrix.withAWS) }} + --argstr stdenv "${{ matrix.stdenv }}" - name: Run VM tests run: | nix build --file ci/gha/tests/wrapper.nix vmTests -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" \ - ${{ format('--arg withAWS {0}', matrix.withAWS) }} + --argstr stdenv "${{ matrix.stdenv }}" if: ${{ matrix.os == 'linux' }} - name: Run flake checks and prepare the installer tarball run: | @@ -131,7 +118,6 @@ jobs: nix build --file ci/gha/tests/wrapper.nix codeCoverage.coverageReports -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ --argstr stdenv "${{ matrix.stdenv }}" \ - ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ --out-link coverage-reports cat coverage-reports/index.txt >> $GITHUB_STEP_SUMMARY if: ${{ matrix.instrumented }} diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index 28e305a95..fac4f9002 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -12,7 +12,6 @@ componentTestsPrefix ? "", withSanitizers ? false, withCoverage ? false, - withAWS ? null, ... }: @@ -57,9 +56,6 @@ rec { # Boehm is incompatible with ASAN. nix-expr = prev.nix-expr.override { enableGC = !withSanitizers; }; - # Override AWS configuration if specified - nix-store = prev.nix-store.override (lib.optionalAttrs (withAWS != null) { inherit withAWS; }); - mesonComponentOverrides = lib.composeManyExtensions componentOverrides; # Unclear how to make Perl bindings work with a dynamically linked ASAN. nix-perl-bindings = if withSanitizers then null else prev.nix-perl-bindings; @@ -226,9 +222,6 @@ rec { }; vmTests = { - } - // lib.optionalAttrs (withAWS == true) { - # S3 binary cache store test using curl implementation inherit (nixosTests) curl-s3-binary-cache-store; } // lib.optionalAttrs (!withSanitizers && !withCoverage) { diff --git a/ci/gha/tests/wrapper.nix b/ci/gha/tests/wrapper.nix index 72b1ba7a3..dc280ebbb 100644 --- a/ci/gha/tests/wrapper.nix +++ b/ci/gha/tests/wrapper.nix @@ -5,7 +5,6 @@ stdenv ? "stdenv", componentTestsPrefix ? "", withInstrumentation ? false, - withAWS ? null, }@args: import ./. ( args @@ -13,6 +12,5 @@ import ./. ( getStdenv = p: p.${stdenv}; withSanitizers = withInstrumentation; withCoverage = withInstrumentation; - inherit withAWS; } ) From 17b7fb383f04b792b5709e2ed497330c7962c6a3 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Thu, 16 Oct 2025 01:04:26 +0300 Subject: [PATCH 217/373] tests: Fix splicing in functional tests for nix-cli This is necessary to fix nix-everything-llvm. The problem here is that nix-cli is taken from the previous stage that is built with libstdc++, but this derivation builds plugins with libc++ and the plugin load fails miserably. --- tests/functional/package.nix | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/functional/package.nix b/tests/functional/package.nix index a36c2e2d3..6830a9e58 100644 --- a/tests/functional/package.nix +++ b/tests/functional/package.nix @@ -60,7 +60,10 @@ mkMesonDerivation ( ]; nativeBuildInputs = finalAttrs.passthru.externalNativeBuildInputs ++ [ - nix-cli + # Explicitly splice the hostHost variant to fix LLVM tests. The nix-cli + # has to be in PATH, but must come from the host context where it's built + # with libc++. + (nix-cli.__spliced.hostHost or nix-cli) ]; buildInputs = [ From 0deb492b3d3e311b54fc09ac4ad7c824dfccbc08 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 15 Oct 2025 23:57:06 +0300 Subject: [PATCH 218/373] Restore `ServeProto::Command::ImportPaths` This partially reverts commit 5e46df973f496cbdf375bfa24f27c156d24458e9, partially reversing changes made to 8c789db05b0bb4cd8dccc544b930837da52f423a. We do this because Hydra, while using the newer version of the protocol, still uses this command, even though Nix (as a client) doesn't use it. On that basis, we don't want to remove it (or consider it only part of the older versions of the protocol) until Hydra no longer uses the Legacy SSH Protocol. --- .../include/nix/store/serve-protocol-connection.hh | 2 ++ src/libstore/include/nix/store/serve-protocol.hh | 7 +++++++ src/libstore/serve-protocol-connection.cc | 10 ++++++++++ src/nix/nix-store/nix-store.cc | 10 ++++++++++ 4 files changed, 29 insertions(+) diff --git a/src/libstore/include/nix/store/serve-protocol-connection.hh b/src/libstore/include/nix/store/serve-protocol-connection.hh index 873277db9..fa50132c8 100644 --- a/src/libstore/include/nix/store/serve-protocol-connection.hh +++ b/src/libstore/include/nix/store/serve-protocol-connection.hh @@ -82,6 +82,8 @@ struct ServeProto::BasicClientConnection BuildResult getBuildDerivationResponse(const StoreDirConfig & store); void narFromPath(const StoreDirConfig & store, const StorePath & path, std::function fun); + + void importPaths(const StoreDirConfig & store, std::function fun); }; struct ServeProto::BasicServerConnection diff --git a/src/libstore/include/nix/store/serve-protocol.hh b/src/libstore/include/nix/store/serve-protocol.hh index 4c2043f17..974bf42d5 100644 --- a/src/libstore/include/nix/store/serve-protocol.hh +++ b/src/libstore/include/nix/store/serve-protocol.hh @@ -108,6 +108,13 @@ enum struct ServeProto::Command : uint64_t { QueryValidPaths = 1, QueryPathInfos = 2, DumpStorePath = 3, + /** + * @note This is no longer used by Nix (as a client), but it is used + * by Hydra. We should therefore not remove it until Hydra no longer + * uses it either. + */ + ImportPaths = 4, + // ExportPaths = 5, BuildPaths = 6, QueryClosure = 7, BuildDerivation = 8, diff --git a/src/libstore/serve-protocol-connection.cc b/src/libstore/serve-protocol-connection.cc index a90b104a6..baa3bf0ce 100644 --- a/src/libstore/serve-protocol-connection.cc +++ b/src/libstore/serve-protocol-connection.cc @@ -93,4 +93,14 @@ void ServeProto::BasicClientConnection::narFromPath( fun(from); } +void ServeProto::BasicClientConnection::importPaths(const StoreDirConfig & store, std::function fun) +{ + to << ServeProto::Command::ImportPaths; + fun(to); + to.flush(); + + if (readInt(from) != 1) + throw Error("remote machine failed to import closure"); +} + } // namespace nix diff --git a/src/nix/nix-store/nix-store.cc b/src/nix/nix-store/nix-store.cc index 313a6398c..3798c7fa0 100644 --- a/src/nix/nix-store/nix-store.cc +++ b/src/nix/nix-store/nix-store.cc @@ -986,6 +986,16 @@ static void opServe(Strings opFlags, Strings opArgs) store->narFromPath(store->parseStorePath(readString(in)), out); break; + case ServeProto::Command::ImportPaths: { + if (!writeAllowed) + throw Error("importing paths is not allowed"); + // FIXME: should we skip sig checking? + importPaths(*store, in, NoCheckSigs); + // indicate success + out << 1; + break; + } + case ServeProto::Command::BuildPaths: { if (!writeAllowed) From 61cb9c4832c6d720cd61d876a7740610427535f3 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Thu, 16 Oct 2025 13:21:27 +0200 Subject: [PATCH 219/373] doc/dev/doc: Update local build instructions for manual --- .../source/development/documentation.md | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/doc/manual/source/development/documentation.md b/doc/manual/source/development/documentation.md index 30cc8adc4..a2a54175d 100644 --- a/doc/manual/source/development/documentation.md +++ b/doc/manual/source/development/documentation.md @@ -25,20 +25,31 @@ nix build .#nix-manual and open `./result/share/doc/nix/manual/index.html`. -To build the manual incrementally, [enter the development shell](./building.md) and run: +To build the manual incrementally, [enter the development shell](./building.md) and configure with `doc-gen` enabled: + +**If using interactive `nix develop`:** ```console -make manual-html-open -j $NIX_BUILD_CORES +$ nix develop +$ mesonFlags="$mesonFlags -Ddoc-gen=true" mesonConfigurePhase ``` -In order to reflect changes to the [Makefile for the manual], clear all generated files before re-building: - -[Makefile for the manual]: https://github.com/NixOS/nix/blob/master/doc/manual/local.mk +**If using direnv:** ```console -rm $(git ls-files doc/manual/ -o | grep -F '.md') && rmdir doc/manual/source/command-ref/new-cli && make manual-html -j $NIX_BUILD_CORES +$ direnv allow +$ bash -c 'source $stdenv/setup && mesonFlags="$mesonFlags -Ddoc-gen=true" mesonConfigurePhase' ``` +Then build the manual: + +```console +$ cd build +$ meson compile manual +``` + +The HTML manual will be generated at `build/src/nix-manual/manual/index.html`. + ## Style guide The goal of this style guide is to make it such that From a48a737517af31683d331b0aadc990d15c214b34 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 16 Oct 2025 16:30:10 +0200 Subject: [PATCH 220/373] Use serializer for std::optional --- src/libstore/worker-protocol.cc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc index 4f7c28409..a17d2c028 100644 --- a/src/libstore/worker-protocol.cc +++ b/src/libstore/worker-protocol.cc @@ -251,11 +251,10 @@ void WorkerProto::Serialise::write( UnkeyedValidPathInfo WorkerProto::Serialise::read(const StoreDirConfig & store, ReadConn conn) { - auto deriver = readString(conn.from); + auto deriver = WorkerProto::Serialise>::read(store, conn); auto narHash = Hash::parseAny(readString(conn.from), HashAlgorithm::SHA256); UnkeyedValidPathInfo info(narHash); - if (deriver != "") - info.deriver = store.parseStorePath(deriver); + info.deriver = std::move(deriver); info.references = WorkerProto::Serialise::read(store, conn); conn.from >> info.registrationTime >> info.narSize; if (GET_PROTOCOL_MINOR(conn.version) >= 16) { @@ -269,8 +268,8 @@ UnkeyedValidPathInfo WorkerProto::Serialise::read(const St void WorkerProto::Serialise::write( const StoreDirConfig & store, WriteConn conn, const UnkeyedValidPathInfo & pathInfo) { - conn.to << (pathInfo.deriver ? store.printStorePath(*pathInfo.deriver) : "") - << pathInfo.narHash.to_string(HashFormat::Base16, false); + WorkerProto::write(store, conn, pathInfo.deriver); + conn.to << pathInfo.narHash.to_string(HashFormat::Base16, false); WorkerProto::write(store, conn, pathInfo.references); conn.to << pathInfo.registrationTime << pathInfo.narSize; if (GET_PROTOCOL_MINOR(conn.version) >= 16) { From 139df7744014cf46f1194afdb8d797588513f853 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 16 Oct 2025 12:42:53 +0200 Subject: [PATCH 221/373] Factor out --no-check-sigs --- src/libcmd/include/nix/cmd/command.hh | 14 ++++++++++++++ src/nix/copy.cc | 10 +--------- src/nix/flake.cc | 9 +-------- 3 files changed, 16 insertions(+), 17 deletions(-) diff --git a/src/libcmd/include/nix/cmd/command.hh b/src/libcmd/include/nix/cmd/command.hh index 20cd1abc1..2bff11dc1 100644 --- a/src/libcmd/include/nix/cmd/command.hh +++ b/src/libcmd/include/nix/cmd/command.hh @@ -350,6 +350,20 @@ struct MixEnvironment : virtual Args void setEnviron(); }; +struct MixNoCheckSigs : virtual Args +{ + CheckSigsFlag checkSigs = CheckSigs; + + MixNoCheckSigs() + { + addFlag({ + .longName = "no-check-sigs", + .description = "Do not require that paths are signed by trusted keys.", + .handler = {&checkSigs, NoCheckSigs}, + }); + } +}; + void completeFlakeInputAttrPath( AddCompletions & completions, ref evalState, diff --git a/src/nix/copy.cc b/src/nix/copy.cc index 62e8b64f5..706edc6c9 100644 --- a/src/nix/copy.cc +++ b/src/nix/copy.cc @@ -5,10 +5,9 @@ using namespace nix; -struct CmdCopy : virtual CopyCommand, virtual BuiltPathsCommand, MixProfile +struct CmdCopy : virtual CopyCommand, virtual BuiltPathsCommand, MixProfile, MixNoCheckSigs { std::optional outLink; - CheckSigsFlag checkSigs = CheckSigs; SubstituteFlag substitute = NoSubstitute; @@ -24,13 +23,6 @@ struct CmdCopy : virtual CopyCommand, virtual BuiltPathsCommand, MixProfile .handler = {&outLink}, .completer = completePath, }); - - addFlag({ - .longName = "no-check-sigs", - .description = "Do not require that paths are signed by trusted keys.", - .handler = {&checkSigs, NoCheckSigs}, - }); - addFlag({ .longName = "substitute-on-destination", .shortName = 's', diff --git a/src/nix/flake.cc b/src/nix/flake.cc index cf05f6943..04d4ec8eb 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -1032,12 +1032,10 @@ struct CmdFlakeClone : FlakeCommand } }; -struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun +struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun, MixNoCheckSigs { std::string dstUri; - CheckSigsFlag checkSigs = CheckSigs; - SubstituteFlag substitute = NoSubstitute; CmdFlakeArchive() @@ -1048,11 +1046,6 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun .labels = {"store-uri"}, .handler = {&dstUri}, }); - addFlag({ - .longName = "no-check-sigs", - .description = "Do not require that paths are signed by trusted keys.", - .handler = {&checkSigs, NoCheckSigs}, - }); } std::string description() override From 3bd2b76f6e93beb16405ffd6e98512d75067fda8 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 16 Oct 2025 15:11:47 +0200 Subject: [PATCH 222/373] nix store sign: Use required attribute --- src/nix/sigs.cc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index 470cd3951..142421e9c 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -104,6 +104,7 @@ struct CmdSign : StorePathsCommand .labels = {"file"}, .handler = {&secretKeyFile}, .completer = completePath, + .required = true, }); } @@ -114,9 +115,6 @@ struct CmdSign : StorePathsCommand void run(ref store, StorePaths && storePaths) override { - if (secretKeyFile.empty()) - throw UsageError("you must specify a secret key file using '-k'"); - SecretKey secretKey(readFile(secretKeyFile)); LocalSigner signer(std::move(secretKey)); From d782c5e5863cdcfd3fc8013c84efa1053b3d2e80 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 16 Oct 2025 16:57:43 +0200 Subject: [PATCH 223/373] Daemon protocol: Use the WorkerProto serializer for store paths --- src/libstore/daemon.cc | 56 +++++++++++++++++++----------------- src/libstore/remote-store.cc | 45 +++++++++++++++-------------- 2 files changed, 53 insertions(+), 48 deletions(-) diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 00c0a1fdd..d6d2a5781 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -312,7 +312,7 @@ static void performOp( switch (op) { case WorkerProto::Op::IsValidPath: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); bool result = store->isValidPath(path); logger->stopWork(); @@ -339,7 +339,7 @@ static void performOp( } case WorkerProto::Op::HasSubstitutes: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); StorePathSet paths; // FIXME paths.insert(path); @@ -359,7 +359,7 @@ static void performOp( } case WorkerProto::Op::QueryPathHash: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); auto hash = store->queryPathInfo(path)->narHash; logger->stopWork(); @@ -371,7 +371,7 @@ static void performOp( case WorkerProto::Op::QueryReferrers: case WorkerProto::Op::QueryValidDerivers: case WorkerProto::Op::QueryDerivationOutputs: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); StorePathSet paths; if (op == WorkerProto::Op::QueryReferences) @@ -389,7 +389,7 @@ static void performOp( } case WorkerProto::Op::QueryDerivationOutputNames: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); auto names = store->readDerivation(path).outputNames(); logger->stopWork(); @@ -398,7 +398,7 @@ static void performOp( } case WorkerProto::Op::QueryDerivationOutputMap: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); auto outputs = store->queryPartialDerivationOutputMap(path); logger->stopWork(); @@ -407,11 +407,11 @@ static void performOp( } case WorkerProto::Op::QueryDeriver: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); auto info = store->queryPathInfo(path); logger->stopWork(); - conn.to << (info->deriver ? store->printStorePath(*info->deriver) : ""); + WorkerProto::write(*store, conn, info->deriver); break; } @@ -420,7 +420,7 @@ static void performOp( logger->startWork(); auto path = store->queryPathFromHashPart(hashPart); logger->stopWork(); - conn.to << (path ? store->printStorePath(*path) : ""); + WorkerProto::write(*store, conn, path); break; } @@ -505,7 +505,7 @@ static void performOp( store->addToStoreFromDump(*dumpSource, baseName, FileSerialisationMethod::NixArchive, method, hashAlgo); logger->stopWork(); - conn.to << store->printStorePath(path); + WorkerProto::write(*store, wconn, path); } break; } @@ -542,7 +542,7 @@ static void performOp( NoRepair); }); logger->stopWork(); - conn.to << store->printStorePath(path); + WorkerProto::write(*store, wconn, path); break; } @@ -591,7 +591,7 @@ static void performOp( } case WorkerProto::Op::BuildDerivation: { - auto drvPath = store->parseStorePath(readString(conn.from)); + auto drvPath = WorkerProto::Serialise::read(*store, rconn); BasicDerivation drv; /* * Note: unlike wopEnsurePath, this operation reads a @@ -668,7 +668,7 @@ static void performOp( } case WorkerProto::Op::EnsurePath: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); store->ensurePath(path); logger->stopWork(); @@ -677,7 +677,7 @@ static void performOp( } case WorkerProto::Op::AddTempRoot: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); store->addTempRoot(path); logger->stopWork(); @@ -733,8 +733,10 @@ static void performOp( conn.to << size; for (auto & [target, links] : roots) - for (auto & link : links) - conn.to << link << store->printStorePath(target); + for (auto & link : links) { + conn.to << link; + WorkerProto::write(*store, wconn, target); + } break; } @@ -799,7 +801,7 @@ static void performOp( } case WorkerProto::Op::QuerySubstitutablePathInfo: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); SubstitutablePathInfos infos; store->querySubstitutablePathInfos({{path, std::nullopt}}, infos); @@ -808,7 +810,8 @@ static void performOp( if (i == infos.end()) conn.to << 0; else { - conn.to << 1 << (i->second.deriver ? store->printStorePath(*i->second.deriver) : ""); + conn.to << 1; + WorkerProto::write(*store, wconn, i->second.deriver); WorkerProto::write(*store, wconn, i->second.references); conn.to << i->second.downloadSize << i->second.narSize; } @@ -829,8 +832,8 @@ static void performOp( logger->stopWork(); conn.to << infos.size(); for (auto & i : infos) { - conn.to << store->printStorePath(i.first) - << (i.second.deriver ? store->printStorePath(*i.second.deriver) : ""); + WorkerProto::write(*store, wconn, i.first); + WorkerProto::write(*store, wconn, i.second.deriver); WorkerProto::write(*store, wconn, i.second.references); conn.to << i.second.downloadSize << i.second.narSize; } @@ -846,7 +849,7 @@ static void performOp( } case WorkerProto::Op::QueryPathInfo: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); std::shared_ptr info; logger->startWork(); info = store->queryPathInfo(path); @@ -880,7 +883,7 @@ static void performOp( } case WorkerProto::Op::AddSignatures: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); StringSet sigs = readStrings(conn.from); logger->startWork(); store->addSignatures(path, sigs); @@ -890,7 +893,7 @@ static void performOp( } case WorkerProto::Op::NarFromPath: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); logger->stopWork(); dumpPath(store->toRealPath(path), conn.to); @@ -899,12 +902,11 @@ static void performOp( case WorkerProto::Op::AddToStoreNar: { bool repair, dontCheckSigs; - auto path = store->parseStorePath(readString(conn.from)); - auto deriver = readString(conn.from); + auto path = WorkerProto::Serialise::read(*store, rconn); + auto deriver = WorkerProto::Serialise>::read(*store, rconn); auto narHash = Hash::parseAny(readString(conn.from), HashAlgorithm::SHA256); ValidPathInfo info{path, narHash}; - if (deriver != "") - info.deriver = store->parseStorePath(deriver); + info.deriver = std::move(deriver); info.references = WorkerProto::Serialise::read(*store, rconn); conn.from >> info.registrationTime >> info.narSize >> info.ultimate; info.sigs = readStrings(conn.from); diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 8dd5bc064..0d83aed4c 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -159,7 +159,8 @@ void RemoteStore::setOptions() bool RemoteStore::isValidPathUncached(const StorePath & path) { auto conn(getConnection()); - conn->to << WorkerProto::Op::IsValidPath << printStorePath(path); + conn->to << WorkerProto::Op::IsValidPath; + WorkerProto::write(*this, *conn, path); conn.processStderr(); return readInt(conn->from); } @@ -205,10 +206,8 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S conn.processStderr(); size_t count = readNum(conn->from); for (size_t n = 0; n < count; n++) { - SubstitutablePathInfo & info(infos[parseStorePath(readString(conn->from))]); - auto deriver = readString(conn->from); - if (deriver != "") - info.deriver = parseStorePath(deriver); + SubstitutablePathInfo & info(infos[WorkerProto::Serialise::read(*this, *conn)]); + info.deriver = WorkerProto::Serialise>::read(*this, *conn); info.references = WorkerProto::Serialise::read(*this, *conn); info.downloadSize = readLongLong(conn->from); info.narSize = readLongLong(conn->from); @@ -235,7 +234,8 @@ void RemoteStore::queryPathInfoUncached( void RemoteStore::queryReferrers(const StorePath & path, StorePathSet & referrers) { auto conn(getConnection()); - conn->to << WorkerProto::Op::QueryReferrers << printStorePath(path); + conn->to << WorkerProto::Op::QueryReferrers; + WorkerProto::write(*this, *conn, path); conn.processStderr(); for (auto & i : WorkerProto::Serialise::read(*this, *conn)) referrers.insert(i); @@ -244,7 +244,8 @@ void RemoteStore::queryReferrers(const StorePath & path, StorePathSet & referrer StorePathSet RemoteStore::queryValidDerivers(const StorePath & path) { auto conn(getConnection()); - conn->to << WorkerProto::Op::QueryValidDerivers << printStorePath(path); + conn->to << WorkerProto::Op::QueryValidDerivers; + WorkerProto::write(*this, *conn, path); conn.processStderr(); return WorkerProto::Serialise::read(*this, *conn); } @@ -255,7 +256,8 @@ StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path) return Store::queryDerivationOutputs(path); } auto conn(getConnection()); - conn->to << WorkerProto::Op::QueryDerivationOutputs << printStorePath(path); + conn->to << WorkerProto::Op::QueryDerivationOutputs; + WorkerProto::write(*this, *conn, path); conn.processStderr(); return WorkerProto::Serialise::read(*this, *conn); } @@ -266,7 +268,8 @@ RemoteStore::queryPartialDerivationOutputMap(const StorePath & path, Store * eva if (GET_PROTOCOL_MINOR(getProtocol()) >= 0x16) { if (!evalStore_) { auto conn(getConnection()); - conn->to << WorkerProto::Op::QueryDerivationOutputMap << printStorePath(path); + conn->to << WorkerProto::Op::QueryDerivationOutputMap; + WorkerProto::write(*this, *conn, path); conn.processStderr(); return WorkerProto::Serialise>>::read(*this, *conn); } else { @@ -299,10 +302,7 @@ std::optional RemoteStore::queryPathFromHashPart(const std::string & auto conn(getConnection()); conn->to << WorkerProto::Op::QueryPathFromHashPart << hashPart; conn.processStderr(); - Path path = readString(conn->from); - if (path.empty()) - return {}; - return parseStorePath(path); + return WorkerProto::Serialise>::read(*this, *conn); } ref RemoteStore::addCAToStore( @@ -384,7 +384,7 @@ ref RemoteStore::addCAToStore( break; } } - auto path = parseStorePath(readString(conn->from)); + auto path = WorkerProto::Serialise::read(*this, *conn); // Release our connection to prevent a deadlock in queryPathInfo(). conn_.reset(); return queryPathInfo(path); @@ -426,9 +426,10 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, Repair { auto conn(getConnection()); - conn->to << WorkerProto::Op::AddToStoreNar << printStorePath(info.path) - << (info.deriver ? printStorePath(*info.deriver) : "") - << info.narHash.to_string(HashFormat::Base16, false); + conn->to << WorkerProto::Op::AddToStoreNar; + WorkerProto::write(*this, *conn, info.path); + WorkerProto::write(*this, *conn, info.deriver); + conn->to << info.narHash.to_string(HashFormat::Base16, false); WorkerProto::write(*this, *conn, info.references); conn->to << info.registrationTime << info.narSize << info.ultimate << info.sigs << renderContentAddress(info.ca) << repair << !checkSigs; @@ -663,7 +664,8 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD void RemoteStore::ensurePath(const StorePath & path) { auto conn(getConnection()); - conn->to << WorkerProto::Op::EnsurePath << printStorePath(path); + conn->to << WorkerProto::Op::EnsurePath; + WorkerProto::write(*this, *conn, path); conn.processStderr(); readInt(conn->from); } @@ -683,8 +685,7 @@ Roots RemoteStore::findRoots(bool censor) Roots result; while (count--) { Path link = readString(conn->from); - auto target = parseStorePath(readString(conn->from)); - result[std::move(target)].emplace(link); + result[WorkerProto::Serialise::read(*this, *conn)].emplace(link); } return result; } @@ -728,7 +729,9 @@ bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair) void RemoteStore::addSignatures(const StorePath & storePath, const StringSet & sigs) { auto conn(getConnection()); - conn->to << WorkerProto::Op::AddSignatures << printStorePath(storePath) << sigs; + conn->to << WorkerProto::Op::AddSignatures; + WorkerProto::write(*this, *conn, storePath); + conn->to << sigs; conn.processStderr(); readInt(conn->from); } From b1d067c9bb33bbe35507872636d5e1c499b4ea7c Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Thu, 16 Oct 2025 20:34:15 +0300 Subject: [PATCH 224/373] tests/nixos: Rename back S3 store nixos test --- ci/gha/tests/default.nix | 2 +- tests/nixos/default.nix | 2 +- ...curl-s3-binary-cache-store.nix => s3-binary-cache-store.nix} | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename tests/nixos/{curl-s3-binary-cache-store.nix => s3-binary-cache-store.nix} (100%) diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index fac4f9002..2bfdae17b 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -222,7 +222,7 @@ rec { }; vmTests = { - inherit (nixosTests) curl-s3-binary-cache-store; + inherit (nixosTests) s3-binary-cache-store; } // lib.optionalAttrs (!withSanitizers && !withCoverage) { # evalNixpkgs uses non-instrumented components from hydraJobs, so only run it diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index 0112d2e2f..edfa4124f 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -199,7 +199,7 @@ in user-sandboxing = runNixOSTest ./user-sandboxing; - curl-s3-binary-cache-store = runNixOSTest ./curl-s3-binary-cache-store.nix; + s3-binary-cache-store = runNixOSTest ./s3-binary-cache-store.nix; fsync = runNixOSTest ./fsync.nix; diff --git a/tests/nixos/curl-s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix similarity index 100% rename from tests/nixos/curl-s3-binary-cache-store.nix rename to tests/nixos/s3-binary-cache-store.nix From dc03c6a8121a42f597268dcfbee2087a5a80018d Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Thu, 16 Oct 2025 21:13:04 +0300 Subject: [PATCH 225/373] libstore: Put all the AWS credentials logic behind interface class AwsCredentialProvider This makes it so we don't need to rely on global variables and hacky destructors to clean up another global variable. Just putting it in the correct order in the class is more than enough. --- src/libstore/aws-creds.cc | 88 +++++++------------ src/libstore/filetransfer.cc | 22 ++--- src/libstore/include/nix/store/aws-creds.hh | 46 ++++++---- src/libstore/unix/build/derivation-builder.cc | 2 +- 4 files changed, 69 insertions(+), 89 deletions(-) diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index 93fc3da33..4ba5b7dee 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -24,50 +24,6 @@ namespace nix { namespace { -// Global credential provider cache using boost's concurrent map -// Key: profile name (empty string for default profile) -using CredentialProviderCache = - boost::concurrent_flat_map>; - -static CredentialProviderCache credentialProviderCache; - -/** - * Clear all cached credential providers. - * Called automatically by CrtWrapper destructor during static destruction. - */ -static void clearAwsCredentialsCache() -{ - credentialProviderCache.clear(); -} - -static void initAwsCrt() -{ - struct CrtWrapper - { - Aws::Crt::ApiHandle apiHandle; - - CrtWrapper() - { - apiHandle.InitializeLogging(Aws::Crt::LogLevel::Warn, static_cast(nullptr)); - } - - ~CrtWrapper() - { - try { - // CRITICAL: Clear credential provider cache BEFORE AWS CRT shuts down - // This ensures all providers (which hold references to ClientBootstrap) - // are destroyed while AWS CRT is still valid - clearAwsCredentialsCache(); - // Now it's safe for ApiHandle destructor to run - } catch (...) { - ignoreExceptionInDestructor(); - } - } - }; - - static CrtWrapper crt; -} - static AwsCredentials getCredentialsFromProvider(std::shared_ptr provider) { if (!provider || !provider->IsValid()) { @@ -113,7 +69,35 @@ static AwsCredentials getCredentialsFromProvider(std::shared_ptr(nullptr)); + } + + AwsCredentials getCredentialsRaw(const std::string & profile); + + AwsCredentials getCredentials(const ParsedS3URL & url) override + { + auto profile = url.profile.value_or(""); + try { + return getCredentialsRaw(profile); + } catch (AwsAuthError & e) { + warn("AWS authentication failed for S3 request %s: %s", url.toHttpsUrl(), e.what()); + credentialProviderCache.erase(profile); + throw; + } + } + +private: + Aws::Crt::ApiHandle apiHandle; + boost::concurrent_flat_map> + credentialProviderCache; +}; + +AwsCredentials AwsCredentialProviderImpl::getCredentialsRaw(const std::string & profile) { // Get or create credential provider with caching std::shared_ptr provider; @@ -132,8 +116,6 @@ AwsCredentials getAwsCredentials(const std::string & profile) profile.empty() ? "(default)" : profile.c_str()); try { - initAwsCrt(); - if (profile.empty()) { Aws::Crt::Auth::CredentialsProviderChainDefaultConfig config; config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); @@ -173,17 +155,15 @@ AwsCredentials getAwsCredentials(const std::string & profile) return getCredentialsFromProvider(provider); } -void invalidateAwsCredentials(const std::string & profile) +ref makeAwsCredentialsProvider() { - credentialProviderCache.erase(profile); + return make_ref(); } -AwsCredentials preResolveAwsCredentials(const ParsedS3URL & s3Url) +ref getAwsCredentialsProvider() { - std::string profile = s3Url.profile.value_or(""); - - // Get credentials (automatically cached) - return getAwsCredentials(profile); + static auto instance = makeAwsCredentialsProvider(); + return instance; } } // namespace nix diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 981d49d77..201f2984e 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -883,22 +883,12 @@ void FileTransferRequest::setupForS3() if (usernameAuth) { debug("Using pre-resolved AWS credentials from parent process"); sessionToken = preResolvedAwsSessionToken; - } else { - std::string profile = parsedS3.profile.value_or(""); - try { - auto creds = getAwsCredentials(profile); - usernameAuth = UsernameAuth{ - .username = creds.accessKeyId, - .password = creds.secretAccessKey, - }; - sessionToken = creds.sessionToken; - } catch (const AwsAuthError & e) { - warn("AWS authentication failed for S3 request %s: %s", uri, e.what()); - // Invalidate the cached credentials so next request will retry - invalidateAwsCredentials(profile); - // Continue without authentication - might be a public bucket - return; - } + } else if (auto creds = getAwsCredentialsProvider()->maybeGetCredentials(parsedS3)) { + usernameAuth = UsernameAuth{ + .username = creds->accessKeyId, + .password = creds->secretAccessKey, + }; + sessionToken = creds->sessionToken; } if (sessionToken) headers.emplace_back("x-amz-security-token", *sessionToken); diff --git a/src/libstore/include/nix/store/aws-creds.hh b/src/libstore/include/nix/store/aws-creds.hh index 6e653936c..d72290ced 100644 --- a/src/libstore/include/nix/store/aws-creds.hh +++ b/src/libstore/include/nix/store/aws-creds.hh @@ -5,6 +5,7 @@ #if NIX_WITH_AWS_AUTH # include "nix/store/s3-url.hh" +# include "nix/util/ref.hh" # include "nix/util/error.hh" # include @@ -38,30 +39,39 @@ struct AwsCredentials */ MakeError(AwsAuthError, Error); -/** - * Get AWS credentials for the given profile. - * This function automatically caches credential providers to avoid - * creating multiple providers for the same profile. - * - * @param profile The AWS profile name (empty string for default profile) - * @return AWS credentials - * @throws AwsAuthError if credentials cannot be resolved - */ -AwsCredentials getAwsCredentials(const std::string & profile = ""); +class AwsCredentialProvider +{ +public: + /** + * Get AWS credentials for the given URL. + * + * @param url The S3 url to get the credentials for + * @return AWS credentials + * @throws AwsAuthError if credentials cannot be resolved + */ + virtual AwsCredentials getCredentials(const ParsedS3URL & url) = 0; + + std::optional maybeGetCredentials(const ParsedS3URL & url) + { + try { + return getCredentials(url); + } catch (AwsAuthError & e) { + return std::nullopt; + } + } + + virtual ~AwsCredentialProvider() {} +}; /** - * Invalidate cached credentials for a profile (e.g., on authentication failure). - * The next request for this profile will create a new provider. - * - * @param profile The AWS profile name to invalidate + * Create a new instancee of AwsCredentialProvider. */ -void invalidateAwsCredentials(const std::string & profile); +ref makeAwsCredentialsProvider(); /** - * Pre-resolve AWS credentials for S3 URLs. - * Used to cache credentials in parent process before forking. + * Get a reference to the global AwsCredentialProvider. */ -AwsCredentials preResolveAwsCredentials(const ParsedS3URL & s3Url); +ref getAwsCredentialsProvider(); } // namespace nix #endif diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 8a0fa5ef7..1246fbf26 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -958,7 +958,7 @@ std::optional DerivationBuilderImpl::preResolveAwsCredentials() auto s3Url = ParsedS3URL::parse(parsedUrl); // Use the preResolveAwsCredentials from aws-creds - auto credentials = nix::preResolveAwsCredentials(s3Url); + auto credentials = getAwsCredentialsProvider()->getCredentials(s3Url); debug("Successfully pre-resolved AWS credentials in parent process"); return credentials; } From 33e94fe19fdedca5dd89fdc0b292938ac58dc81a Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Thu, 16 Oct 2025 21:48:13 +0300 Subject: [PATCH 226/373] libstore: Make AwsAuthError more legible Instead of the cryptic: > error: Failed to resolve AWS credentials: error code 6153` We now get more legible: > error: AWS authentication error: 'Valid credentials could not be sourced by the IMDS provider' (6153) --- src/libstore/aws-creds.cc | 9 +++++++-- src/libstore/include/nix/store/aws-creds.hh | 17 +++++++++++++---- src/libstore/meson.build | 2 ++ 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index 4ba5b7dee..6c9bc99b2 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -22,6 +22,12 @@ namespace nix { +AwsAuthError::AwsAuthError(int errorCode) + : Error("AWS authentication error: '%s' (%d)", aws_error_str(errorCode), errorCode) + , errorCode(errorCode) +{ +} + namespace { static AwsCredentials getCredentialsFromProvider(std::shared_ptr provider) @@ -35,8 +41,7 @@ static AwsCredentials getCredentialsFromProvider(std::shared_ptrGetCredentials([prom](std::shared_ptr credentials, int errorCode) { if (errorCode != 0 || !credentials) { - prom->set_exception( - std::make_exception_ptr(AwsAuthError("Failed to resolve AWS credentials: error code %d", errorCode))); + prom->set_exception(std::make_exception_ptr(AwsAuthError(errorCode))); } else { auto accessKeyId = Aws::Crt::ByteCursorToStringView(credentials->GetAccessKeyId()); auto secretAccessKey = Aws::Crt::ByteCursorToStringView(credentials->GetSecretAccessKey()); diff --git a/src/libstore/include/nix/store/aws-creds.hh b/src/libstore/include/nix/store/aws-creds.hh index d72290ced..30f6592a0 100644 --- a/src/libstore/include/nix/store/aws-creds.hh +++ b/src/libstore/include/nix/store/aws-creds.hh @@ -34,10 +34,19 @@ struct AwsCredentials } }; -/** - * Exception thrown when AWS authentication fails - */ -MakeError(AwsAuthError, Error); +class AwsAuthError : public Error +{ + std::optional errorCode; + +public: + using Error::Error; + AwsAuthError(int errorCode); + + std::optional getErrorCode() const + { + return errorCode; + } +}; class AwsCredentialProvider { diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 78a3dd9b3..40da06e6b 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -158,6 +158,8 @@ curl_s3_store_opt = get_option('curl-s3-store').require( if curl_s3_store_opt.enabled() deps_other += aws_crt_cpp + aws_c_common = cxx.find_library('aws-c-common', required : true) + deps_other += aws_c_common endif configdata_pub.set('NIX_WITH_AWS_AUTH', curl_s3_store_opt.enabled().to_int()) From e7047fde2549aa207ebd28cfb67a7eb21471c708 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Thu, 16 Oct 2025 21:33:42 +0300 Subject: [PATCH 227/373] libstore: Remove the unnecessary 'error: ' prefix in warning message --- src/libstore/aws-creds.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index 6c9bc99b2..d58293560 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -90,7 +90,7 @@ public: try { return getCredentialsRaw(profile); } catch (AwsAuthError & e) { - warn("AWS authentication failed for S3 request %s: %s", url.toHttpsUrl(), e.what()); + warn("AWS authentication failed for S3 request %s: %s", url.toHttpsUrl(), e.message()); credentialProviderCache.erase(profile); throw; } From 4cbcaad435b18fff1904757b2e035c9033d647eb Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Thu, 16 Oct 2025 23:08:30 +0300 Subject: [PATCH 228/373] libstore/registerOutputs: Don't try to optimize a non-existent actualPath Since 3c610df550be35d9696efe9dd3217a6e1ec100f2 this resulted in `getting status of` errors on paths inside the chroot if a path was already valid. Careful inspection of the logic shows that if buildMode != bmCheck actualPath gets reassigned to store.toRealPath(finalDestPath). The only branch that cares about actualPath is the buildMode == bmCheck case, which doesn't lead to optimisePath anyway. --- src/libstore/unix/build/derivation-builder.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 8a0fa5ef7..0efdc14b2 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -1742,7 +1742,6 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() if (buildMode == bmRepair) { /* Path already exists, need to replace it */ replaceValidPath(store.toRealPath(finalDestPath), actualPath); - actualPath = store.toRealPath(finalDestPath); } else if (buildMode == bmCheck) { /* Path already exists, and we want to compare, so we leave out new path in place. */ @@ -1756,7 +1755,6 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto destPath = store.toRealPath(finalDestPath); deletePath(destPath); movePath(actualPath, destPath); - actualPath = destPath; } } @@ -1809,7 +1807,9 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() debug("unreferenced input: '%1%'", store.printStorePath(i)); } - store.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences() + if (!store.isValidPath(newInfo.path)) + store.optimisePath( + store.toRealPath(finalDestPath), NoRepair); // FIXME: combine with scanForReferences() newInfo.deriver = drvPath; newInfo.ultimate = true; From 1c02dd5b9c2b65115c49d2dbed43c01d467f77c2 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 16 Oct 2025 15:49:47 -0400 Subject: [PATCH 229/373] Allow for standard nlohmann JSON serializers to take separate XP features I realized that we can actually do this thing, even though it is not what nlohmann expects at all, because the extra parameter has a default argument so nlohmann doesn't need to care. Sneaky! --- src/libstore-tests/derivation.cc | 36 +++++++------------ src/libstore/derivations.cc | 9 ++--- src/libstore/include/nix/store/derivations.hh | 4 +-- .../nix/util/tests/json-characterization.hh | 4 +-- src/libutil/include/nix/util/json-impls.hh | 14 ++++++++ 5 files changed, 35 insertions(+), 32 deletions(-) diff --git a/src/libstore-tests/derivation.cc b/src/libstore-tests/derivation.cc index 65a5d011d..75bf75753 100644 --- a/src/libstore-tests/derivation.cc +++ b/src/libstore-tests/derivation.cc @@ -66,23 +66,17 @@ TEST_F(DynDerivationTest, BadATerm_oldVersionDynDeps) FormatError); } -#define MAKE_OUTPUT_JSON_TEST_P(FIXTURE) \ - TEST_P(FIXTURE, from_json) \ - { \ - const auto & [name, expected] = GetParam(); \ - /* Don't use readJsonTest because we want to check experimental \ - features. */ \ - readTest(Path{"output-"} + name + ".json", [&](const auto & encoded_) { \ - json j = json::parse(encoded_); \ - DerivationOutput got = DerivationOutput::fromJSON(j, mockXpSettings); \ - ASSERT_EQ(got, expected); \ - }); \ - } \ - \ - TEST_P(FIXTURE, to_json) \ - { \ - const auto & [name, value] = GetParam(); \ - writeJsonTest("output-" + name, value); \ +#define MAKE_OUTPUT_JSON_TEST_P(FIXTURE) \ + TEST_P(FIXTURE, from_json) \ + { \ + const auto & [name, expected] = GetParam(); \ + readJsonTest(Path{"output-"} + name, expected, mockXpSettings); \ + } \ + \ + TEST_P(FIXTURE, to_json) \ + { \ + const auto & [name, value] = GetParam(); \ + writeJsonTest("output-" + name, value); \ } struct DerivationOutputJsonTest : DerivationTest, @@ -193,13 +187,7 @@ INSTANTIATE_TEST_SUITE_P( TEST_P(FIXTURE, from_json) \ { \ const auto & drv = GetParam(); \ - /* Don't use readJsonTest because we want to check experimental \ - features. */ \ - readTest(drv.name + ".json", [&](const auto & encoded_) { \ - auto encoded = json::parse(encoded_); \ - Derivation got = Derivation::fromJSON(encoded, mockXpSettings); \ - ASSERT_EQ(got, drv); \ - }); \ + readJsonTest(drv.name, drv, mockXpSettings); \ } \ \ TEST_P(FIXTURE, to_json) \ diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 24dd61807..d39080e08 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -1496,9 +1496,10 @@ namespace nlohmann { using namespace nix; -DerivationOutput adl_serializer::from_json(const json & json) +DerivationOutput +adl_serializer::from_json(const json & json, const ExperimentalFeatureSettings & xpSettings) { - return DerivationOutput::fromJSON(json); + return DerivationOutput::fromJSON(json, xpSettings); } void adl_serializer::to_json(json & json, const DerivationOutput & c) @@ -1506,9 +1507,9 @@ void adl_serializer::to_json(json & json, const DerivationOutp json = c.toJSON(); } -Derivation adl_serializer::from_json(const json & json) +Derivation adl_serializer::from_json(const json & json, const ExperimentalFeatureSettings & xpSettings) { - return Derivation::fromJSON(json); + return Derivation::fromJSON(json, xpSettings); } void adl_serializer::to_json(json & json, const Derivation & c) diff --git a/src/libstore/include/nix/store/derivations.hh b/src/libstore/include/nix/store/derivations.hh index 0dfb80347..45188d6b3 100644 --- a/src/libstore/include/nix/store/derivations.hh +++ b/src/libstore/include/nix/store/derivations.hh @@ -537,5 +537,5 @@ std::string hashPlaceholder(const OutputNameView outputName); } // namespace nix -JSON_IMPL(nix::DerivationOutput) -JSON_IMPL(nix::Derivation) +JSON_IMPL_WITH_XP_FEATURES(nix::DerivationOutput) +JSON_IMPL_WITH_XP_FEATURES(nix::Derivation) diff --git a/src/libutil-test-support/include/nix/util/tests/json-characterization.hh b/src/libutil-test-support/include/nix/util/tests/json-characterization.hh index 5a38b8e2c..d713c615b 100644 --- a/src/libutil-test-support/include/nix/util/tests/json-characterization.hh +++ b/src/libutil-test-support/include/nix/util/tests/json-characterization.hh @@ -24,12 +24,12 @@ struct JsonCharacterizationTest : virtual CharacterizationTest * @param test hook that takes the contents of the file and does the * actual work */ - void readJsonTest(PathView testStem, const T & expected) + void readJsonTest(PathView testStem, const T & expected, auto... args) { using namespace nlohmann; readTest(Path{testStem} + ".json", [&](const auto & encodedRaw) { auto encoded = json::parse(encodedRaw); - T decoded = adl_serializer::from_json(encoded); + T decoded = adl_serializer::from_json(encoded, args...); ASSERT_EQ(decoded, expected); }); } diff --git a/src/libutil/include/nix/util/json-impls.hh b/src/libutil/include/nix/util/json-impls.hh index 751fc410f..802c212e1 100644 --- a/src/libutil/include/nix/util/json-impls.hh +++ b/src/libutil/include/nix/util/json-impls.hh @@ -3,6 +3,8 @@ #include +#include "nix/util/experimental-features.hh" + // Following https://github.com/nlohmann/json#how-can-i-use-get-for-non-default-constructiblenon-copyable-types #define JSON_IMPL(TYPE) \ namespace nlohmann { \ @@ -14,3 +16,15 @@ static void to_json(json & json, const TYPE & t); \ }; \ } + +#define JSON_IMPL_WITH_XP_FEATURES(TYPE) \ + namespace nlohmann { \ + using namespace nix; \ + template<> \ + struct adl_serializer \ + { \ + static TYPE \ + from_json(const json & json, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); \ + static void to_json(json & json, const TYPE & t); \ + }; \ + } From a2c6f38e1fd1869be638756549de949c315a845b Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 16 Oct 2025 16:08:59 -0400 Subject: [PATCH 230/373] Remove now-redundant methods for JSON on `Derivation` --- .../derivation-advanced-attrs.cc | 78 +++++++++---------- src/libstore-tests/derivation.cc | 3 +- src/libstore/derivations.cc | 72 ++++++----------- src/libstore/include/nix/store/derivations.hh | 11 --- src/nix/derivation-add.cc | 2 +- src/nix/derivation-show.cc | 2 +- 6 files changed, 67 insertions(+), 101 deletions(-) diff --git a/src/libstore-tests/derivation-advanced-attrs.cc b/src/libstore-tests/derivation-advanced-attrs.cc index 9c13bf048..02bc8fa24 100644 --- a/src/libstore-tests/derivation-advanced-attrs.cc +++ b/src/libstore-tests/derivation-advanced-attrs.cc @@ -14,7 +14,7 @@ namespace nix { -using nlohmann::json; +using namespace nlohmann; class DerivationAdvancedAttrsTest : public CharacterizationTest, public LibStoreTest { @@ -51,44 +51,44 @@ using BothFixtures = ::testing::TypesreadTest(NAME ".json", [&](const auto & encoded_) { \ - auto encoded = json::parse(encoded_); \ - /* Use DRV file instead of C++ literal as source of truth. */ \ - auto aterm = readFile(this->goldenMaster(NAME ".drv")); \ - auto expected = parseDerivation(*this->store, std::move(aterm), NAME, this->mockXpSettings); \ - Derivation got = Derivation::fromJSON(encoded, this->mockXpSettings); \ - EXPECT_EQ(got, expected); \ - }); \ - } \ - \ - TYPED_TEST(DerivationAdvancedAttrsBothTest, Derivation_##STEM##_to_json) \ - { \ - this->writeTest( \ - NAME ".json", \ - [&]() -> json { \ - /* Use DRV file instead of C++ literal as source of truth. */ \ - auto aterm = readFile(this->goldenMaster(NAME ".drv")); \ - return parseDerivation(*this->store, std::move(aterm), NAME, this->mockXpSettings).toJSON(); \ - }, \ - [](const auto & file) { return json::parse(readFile(file)); }, \ - [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ - } \ - \ - TYPED_TEST(DerivationAdvancedAttrsBothTest, Derivation_##STEM##_from_aterm) \ - { \ - this->readTest(NAME ".drv", [&](auto encoded) { \ - /* Use JSON file instead of C++ literal as source of truth. */ \ - auto json = json::parse(readFile(this->goldenMaster(NAME ".json"))); \ - auto expected = Derivation::fromJSON(json, this->mockXpSettings); \ - auto got = parseDerivation(*this->store, std::move(encoded), NAME, this->mockXpSettings); \ - EXPECT_EQ(got.toJSON(), expected.toJSON()); \ - EXPECT_EQ(got, expected); \ - }); \ - } \ - \ +#define TEST_ATERM_JSON(STEM, NAME) \ + TYPED_TEST(DerivationAdvancedAttrsBothTest, Derivation_##STEM##_from_json) \ + { \ + this->readTest(NAME ".json", [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ + /* Use DRV file instead of C++ literal as source of truth. */ \ + auto aterm = readFile(this->goldenMaster(NAME ".drv")); \ + auto expected = parseDerivation(*this->store, std::move(aterm), NAME, this->mockXpSettings); \ + Derivation got = adl_serializer::from_json(encoded, this->mockXpSettings); \ + EXPECT_EQ(got, expected); \ + }); \ + } \ + \ + TYPED_TEST(DerivationAdvancedAttrsBothTest, Derivation_##STEM##_to_json) \ + { \ + this->writeTest( \ + NAME ".json", \ + [&]() -> json { \ + /* Use DRV file instead of C++ literal as source of truth. */ \ + auto aterm = readFile(this->goldenMaster(NAME ".drv")); \ + return parseDerivation(*this->store, std::move(aterm), NAME, this->mockXpSettings); \ + }, \ + [](const auto & file) { return json::parse(readFile(file)); }, \ + [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ + } \ + \ + TYPED_TEST(DerivationAdvancedAttrsBothTest, Derivation_##STEM##_from_aterm) \ + { \ + this->readTest(NAME ".drv", [&](auto encoded) { \ + /* Use JSON file instead of C++ literal as source of truth. */ \ + auto j = json::parse(readFile(this->goldenMaster(NAME ".json"))); \ + auto expected = adl_serializer::from_json(j, this->mockXpSettings); \ + auto got = parseDerivation(*this->store, std::move(encoded), NAME, this->mockXpSettings); \ + EXPECT_EQ(static_cast(got), static_cast(expected)); \ + EXPECT_EQ(got, expected); \ + }); \ + } \ + \ /* No corresponding write test, because we need to read the drv to write the json file */ TEST_ATERM_JSON(advancedAttributes, "advanced-attributes-defaults"); diff --git a/src/libstore-tests/derivation.cc b/src/libstore-tests/derivation.cc index 75bf75753..6b33e5442 100644 --- a/src/libstore-tests/derivation.cc +++ b/src/libstore-tests/derivation.cc @@ -201,7 +201,8 @@ INSTANTIATE_TEST_SUITE_P( const auto & drv = GetParam(); \ readTest(drv.name + ".drv", [&](auto encoded) { \ auto got = parseDerivation(*store, std::move(encoded), drv.name, mockXpSettings); \ - ASSERT_EQ(got.toJSON(), drv.toJSON()); \ + using nlohmann::json; \ + ASSERT_EQ(static_cast(got), static_cast(drv)); \ ASSERT_EQ(got, drv); \ }); \ } \ diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index d39080e08..f44bf3e70 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -1261,9 +1261,15 @@ void Derivation::checkInvariants(Store & store, const StorePath & drvPath) const const Hash impureOutputHash = hashString(HashAlgorithm::SHA256, "impure"); -nlohmann::json DerivationOutput::toJSON() const +} // namespace nix + +namespace nlohmann { + +using namespace nix; + +void adl_serializer::to_json(json & res, const DerivationOutput & o) { - nlohmann::json res = nlohmann::json::object(); + res = nlohmann::json::object(); std::visit( overloaded{ [&](const DerivationOutput::InputAddressed & doi) { res["path"] = doi.path; }, @@ -1289,12 +1295,11 @@ nlohmann::json DerivationOutput::toJSON() const res["impure"] = true; }, }, - raw); - return res; + o.raw); } DerivationOutput -DerivationOutput::fromJSON(const nlohmann::json & _json, const ExperimentalFeatureSettings & xpSettings) +adl_serializer::from_json(const json & _json, const ExperimentalFeatureSettings & xpSettings) { std::set keys; auto & json = getObject(_json); @@ -1362,18 +1367,18 @@ DerivationOutput::fromJSON(const nlohmann::json & _json, const ExperimentalFeatu } } -nlohmann::json Derivation::toJSON() const +void adl_serializer::to_json(json & res, const Derivation & d) { - nlohmann::json res = nlohmann::json::object(); + res = nlohmann::json::object(); - res["name"] = name; + res["name"] = d.name; res["version"] = 3; { nlohmann::json & outputsObj = res["outputs"]; outputsObj = nlohmann::json::object(); - for (auto & [outputName, output] : outputs) { + for (auto & [outputName, output] : d.outputs) { outputsObj[outputName] = output; } } @@ -1381,7 +1386,7 @@ nlohmann::json Derivation::toJSON() const { auto & inputsList = res["inputSrcs"]; inputsList = nlohmann::json ::array(); - for (auto & input : inputSrcs) + for (auto & input : d.inputSrcs) inputsList.emplace_back(input); } @@ -1401,24 +1406,22 @@ nlohmann::json Derivation::toJSON() const { auto & inputDrvsObj = res["inputDrvs"]; inputDrvsObj = nlohmann::json::object(); - for (auto & [inputDrv, inputNode] : inputDrvs.map) { + for (auto & [inputDrv, inputNode] : d.inputDrvs.map) { inputDrvsObj[inputDrv.to_string()] = doInput(inputNode); } } } - res["system"] = platform; - res["builder"] = builder; - res["args"] = args; - res["env"] = env; + res["system"] = d.platform; + res["builder"] = d.builder; + res["args"] = d.args; + res["env"] = d.env; - if (structuredAttrs) - res["structuredAttrs"] = structuredAttrs->structuredAttrs; - - return res; + if (d.structuredAttrs) + res["structuredAttrs"] = d.structuredAttrs->structuredAttrs; } -Derivation Derivation::fromJSON(const nlohmann::json & _json, const ExperimentalFeatureSettings & xpSettings) +Derivation adl_serializer::from_json(const json & _json, const ExperimentalFeatureSettings & xpSettings) { using nlohmann::detail::value_t; @@ -1434,7 +1437,7 @@ Derivation Derivation::fromJSON(const nlohmann::json & _json, const Experimental try { auto outputs = getObject(valueAt(json, "outputs")); for (auto & [outputName, output] : outputs) { - res.outputs.insert_or_assign(outputName, DerivationOutput::fromJSON(output, xpSettings)); + res.outputs.insert_or_assign(outputName, adl_serializer::from_json(output, xpSettings)); } } catch (Error & e) { e.addTrace({}, "while reading key 'outputs'"); @@ -1490,31 +1493,4 @@ Derivation Derivation::fromJSON(const nlohmann::json & _json, const Experimental return res; } -} // namespace nix - -namespace nlohmann { - -using namespace nix; - -DerivationOutput -adl_serializer::from_json(const json & json, const ExperimentalFeatureSettings & xpSettings) -{ - return DerivationOutput::fromJSON(json, xpSettings); -} - -void adl_serializer::to_json(json & json, const DerivationOutput & c) -{ - json = c.toJSON(); -} - -Derivation adl_serializer::from_json(const json & json, const ExperimentalFeatureSettings & xpSettings) -{ - return Derivation::fromJSON(json, xpSettings); -} - -void adl_serializer::to_json(json & json, const Derivation & c) -{ - json = c.toJSON(); -} - } // namespace nlohmann diff --git a/src/libstore/include/nix/store/derivations.hh b/src/libstore/include/nix/store/derivations.hh index 45188d6b3..4615d8acd 100644 --- a/src/libstore/include/nix/store/derivations.hh +++ b/src/libstore/include/nix/store/derivations.hh @@ -134,13 +134,6 @@ struct DerivationOutput */ std::optional path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const; - - nlohmann::json toJSON() const; - /** - * @param xpSettings Stop-gap to avoid globals during unit tests. - */ - static DerivationOutput - fromJSON(const nlohmann::json & json, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); }; typedef std::map DerivationOutputs; @@ -390,10 +383,6 @@ struct Derivation : BasicDerivation { } - nlohmann::json toJSON() const; - static Derivation - fromJSON(const nlohmann::json & json, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); - bool operator==(const Derivation &) const = default; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. // auto operator <=> (const Derivation &) const = default; diff --git a/src/nix/derivation-add.cc b/src/nix/derivation-add.cc index 2d13aba52..48e935092 100644 --- a/src/nix/derivation-add.cc +++ b/src/nix/derivation-add.cc @@ -33,7 +33,7 @@ struct CmdAddDerivation : MixDryRun, StoreCommand { auto json = nlohmann::json::parse(drainFD(STDIN_FILENO)); - auto drv = Derivation::fromJSON(json); + auto drv = static_cast(json); auto drvPath = writeDerivation(*store, drv, NoRepair, /* read only */ dryRun); diff --git a/src/nix/derivation-show.cc b/src/nix/derivation-show.cc index 20e54bba7..1528f5b51 100644 --- a/src/nix/derivation-show.cc +++ b/src/nix/derivation-show.cc @@ -58,7 +58,7 @@ struct CmdShowDerivation : InstallablesCommand, MixPrintJSON if (!drvPath.isDerivation()) continue; - jsonRoot[drvPath.to_string()] = store->readDerivation(drvPath).toJSON(); + jsonRoot[drvPath.to_string()] = store->readDerivation(drvPath); } printJSON(jsonRoot); } From 1177d65094acd4a7b9d57f2672c08fd72d007d99 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 16 Oct 2025 16:44:27 -0400 Subject: [PATCH 231/373] Properly check xp features when deserializing deriving paths --- src/libstore-tests/derived-path.cc | 74 +++++++++++++------ src/libstore/derived-path.cc | 23 ++++-- .../include/nix/store/derived-path.hh | 8 +- 3 files changed, 69 insertions(+), 36 deletions(-) diff --git a/src/libstore-tests/derived-path.cc b/src/libstore-tests/derived-path.cc index 6e7648f25..70e789c0c 100644 --- a/src/libstore-tests/derived-path.cc +++ b/src/libstore-tests/derived-path.cc @@ -3,13 +3,13 @@ #include #include -#include "nix/util/tests/characterization.hh" +#include "nix/util/tests/json-characterization.hh" #include "nix/store/tests/derived-path.hh" #include "nix/store/tests/libstore.hh" namespace nix { -class DerivedPathTest : public CharacterizationTest, public LibStoreTest +class DerivedPathTest : public virtual CharacterizationTest, public LibStoreTest { std::filesystem::path unitTestData = getUnitTestData() / "derived-path"; @@ -123,25 +123,51 @@ RC_GTEST_FIXTURE_PROP(DerivedPathTest, prop_round_rip, (const DerivedPath & o)) using nlohmann::json; -#define TEST_JSON(TYPE, NAME, VAL) \ - static const TYPE NAME = VAL; \ - \ - TEST_F(DerivedPathTest, NAME##_from_json) \ - { \ - readTest(#NAME ".json", [&](const auto & encoded_) { \ - auto encoded = json::parse(encoded_); \ - TYPE got = static_cast(encoded); \ - ASSERT_EQ(got, NAME); \ - }); \ - } \ - \ - TEST_F(DerivedPathTest, NAME##_to_json) \ - { \ - writeTest( \ - #NAME ".json", \ - [&]() -> json { return static_cast(NAME); }, \ - [](const auto & file) { return json::parse(readFile(file)); }, \ - [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ +struct SingleDerivedPathJsonTest : DerivedPathTest, + JsonCharacterizationTest, + ::testing::WithParamInterface +{}; + +struct DerivedPathJsonTest : DerivedPathTest, + JsonCharacterizationTest, + ::testing::WithParamInterface +{}; + +#define TEST_JSON(TYPE, NAME, VAL) \ + static const TYPE NAME = VAL; \ + \ + TEST_F(TYPE##JsonTest, NAME##_from_json) \ + { \ + readJsonTest(#NAME, NAME); \ + } \ + \ + TEST_F(TYPE##JsonTest, NAME##_to_json) \ + { \ + writeJsonTest(#NAME, NAME); \ + } + +#define TEST_JSON_XP_DYN(TYPE, NAME, VAL) \ + static const TYPE NAME = VAL; \ + \ + TEST_F(TYPE##JsonTest, NAME##_from_json_throws_without_xp) \ + { \ + std::optional ret; \ + readTest(#NAME ".json", [&](const auto & encoded_) { ret = json::parse(encoded_); }); \ + if (ret) { \ + EXPECT_THROW(nlohmann::adl_serializer::from_json(*ret), MissingExperimentalFeature); \ + } \ + } \ + \ + TEST_F(TYPE##JsonTest, NAME##_from_json) \ + { \ + ExperimentalFeatureSettings xpSettings; \ + xpSettings.set("experimental-features", "dynamic-derivations"); \ + readJsonTest(#NAME, NAME, xpSettings); \ + } \ + \ + TEST_F(TYPE##JsonTest, NAME##_to_json) \ + { \ + writeJsonTest(#NAME, NAME); \ } TEST_JSON( @@ -156,7 +182,7 @@ TEST_JSON( .output = "bar", })); -TEST_JSON( +TEST_JSON_XP_DYN( SingleDerivedPath, single_built_built, (SingleDerivedPath::Built{ @@ -179,7 +205,7 @@ TEST_JSON( .outputs = OutputsSpec::Names{"bar", "baz"}, })); -TEST_JSON( +TEST_JSON_XP_DYN( DerivedPath, multi_built_built, (DerivedPath::Built{ @@ -191,7 +217,7 @@ TEST_JSON( .outputs = OutputsSpec::Names{"baz", "quux"}, })); -TEST_JSON( +TEST_JSON_XP_DYN( DerivedPath, multi_built_built_wildcard, (DerivedPath::Built{ diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc index 8d606cb41..251e11251 100644 --- a/src/libstore/derived-path.cc +++ b/src/libstore/derived-path.cc @@ -252,20 +252,26 @@ void adl_serializer::to_json(json & json, const DerivedPath: }; } -SingleDerivedPath::Built adl_serializer::from_json(const json & json0) +SingleDerivedPath::Built +adl_serializer::from_json(const json & json0, const ExperimentalFeatureSettings & xpSettings) { auto & json = getObject(json0); + auto drvPath = make_ref(static_cast(valueAt(json, "drvPath"))); + drvRequireExperiment(*drvPath, xpSettings); return { - .drvPath = make_ref(static_cast(valueAt(json, "drvPath"))), + .drvPath = std::move(drvPath), .output = getString(valueAt(json, "output")), }; } -DerivedPath::Built adl_serializer::from_json(const json & json0) +DerivedPath::Built +adl_serializer::from_json(const json & json0, const ExperimentalFeatureSettings & xpSettings) { auto & json = getObject(json0); + auto drvPath = make_ref(static_cast(valueAt(json, "drvPath"))); + drvRequireExperiment(*drvPath, xpSettings); return { - .drvPath = make_ref(static_cast(valueAt(json, "drvPath"))), + .drvPath = std::move(drvPath), .outputs = adl_serializer::from_json(valueAt(json, "outputs")), }; } @@ -280,20 +286,21 @@ void adl_serializer::to_json(json & json, const DerivedPath & sdp) std::visit([&](const auto & buildable) { json = buildable; }, sdp.raw()); } -SingleDerivedPath adl_serializer::from_json(const json & json) +SingleDerivedPath +adl_serializer::from_json(const json & json, const ExperimentalFeatureSettings & xpSettings) { if (json.is_string()) return static_cast(json); else - return static_cast(json); + return adl_serializer::from_json(json, xpSettings); } -DerivedPath adl_serializer::from_json(const json & json) +DerivedPath adl_serializer::from_json(const json & json, const ExperimentalFeatureSettings & xpSettings) { if (json.is_string()) return static_cast(json); else - return static_cast(json); + return adl_serializer::from_json(json, xpSettings); } } // namespace nlohmann diff --git a/src/libstore/include/nix/store/derived-path.hh b/src/libstore/include/nix/store/derived-path.hh index 47b29b2d6..70074ea40 100644 --- a/src/libstore/include/nix/store/derived-path.hh +++ b/src/libstore/include/nix/store/derived-path.hh @@ -299,7 +299,7 @@ void drvRequireExperiment( } // namespace nix JSON_IMPL(nix::SingleDerivedPath::Opaque) -JSON_IMPL(nix::SingleDerivedPath::Built) -JSON_IMPL(nix::SingleDerivedPath) -JSON_IMPL(nix::DerivedPath::Built) -JSON_IMPL(nix::DerivedPath) +JSON_IMPL_WITH_XP_FEATURES(nix::SingleDerivedPath::Built) +JSON_IMPL_WITH_XP_FEATURES(nix::SingleDerivedPath) +JSON_IMPL_WITH_XP_FEATURES(nix::DerivedPath::Built) +JSON_IMPL_WITH_XP_FEATURES(nix::DerivedPath) From 01b001d5ba710637a24ab1432533acdb7bc1292a Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Thu, 16 Oct 2025 15:32:23 +0200 Subject: [PATCH 232/373] Add JSON Schema infrastructure, use for Derivation For manual, and testing formats --- ci/gha/tests/default.nix | 1 + doc/manual/meson.build | 1 + doc/manual/package.nix | 2 + doc/manual/source/SUMMARY.md.in | 1 + doc/manual/source/meson.build | 3 + .../source/protocols/json/derivation.md | 123 +------------ .../json/fixup-json-schema-generated-doc.sed | 14 ++ doc/manual/source/protocols/json/hash.md | 7 + .../json/json-schema-for-humans-config.yaml | 17 ++ doc/manual/source/protocols/json/meson.build | 74 ++++++++ .../protocols/json/schema/derivation-v3.yaml | 164 ++++++++++++++++++ .../source/protocols/json/schema/hash-v1.yaml | 29 ++++ doc/manual/source/protocols/meson.build | 2 + flake.nix | 4 + meson.build | 1 + packaging/components.nix | 5 + packaging/dev-shell.nix | 1 + packaging/hydra.nix | 1 + src/json-schema-checks/.version | 1 + src/json-schema-checks/derivation | 1 + src/json-schema-checks/meson.build | 76 ++++++++ src/json-schema-checks/package.nix | 50 ++++++ src/json-schema-checks/schema | 1 + src/nix/derivation-add.md | 5 +- src/nix/derivation-show.md | 5 +- 25 files changed, 465 insertions(+), 124 deletions(-) create mode 100644 doc/manual/source/protocols/json/fixup-json-schema-generated-doc.sed create mode 100644 doc/manual/source/protocols/json/hash.md create mode 100644 doc/manual/source/protocols/json/json-schema-for-humans-config.yaml create mode 100644 doc/manual/source/protocols/json/meson.build create mode 100644 doc/manual/source/protocols/json/schema/derivation-v3.yaml create mode 100644 doc/manual/source/protocols/json/schema/hash-v1.yaml create mode 100644 doc/manual/source/protocols/meson.build create mode 120000 src/json-schema-checks/.version create mode 120000 src/json-schema-checks/derivation create mode 100644 src/json-schema-checks/meson.build create mode 100644 src/json-schema-checks/package.nix create mode 120000 src/json-schema-checks/schema diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index 2bfdae17b..0c5c103bf 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -116,6 +116,7 @@ rec { ) nixComponentsInstrumented) // lib.optionalAttrs (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) { "${componentTestsPrefix}nix-functional-tests" = nixComponentsInstrumented.nix-functional-tests; + "${componentTestsPrefix}nix-json-schema-checks" = nixComponentsInstrumented.nix-json-schema-checks; }; codeCoverage = diff --git a/doc/manual/meson.build b/doc/manual/meson.build index 2e372dedd..7090c949c 100644 --- a/doc/manual/meson.build +++ b/doc/manual/meson.build @@ -115,6 +115,7 @@ manual = custom_target( builtins_md, rl_next_generated, summary_rl_next, + json_schema_generated_files, nix_input, ], output : [ diff --git a/doc/manual/package.nix b/doc/manual/package.nix index 69b7c0e49..7b94721ae 100644 --- a/doc/manual/package.nix +++ b/doc/manual/package.nix @@ -12,6 +12,7 @@ rsync, nix-cli, changelog-d, + json-schema-for-humans, officialRelease, # Configuration Options @@ -55,6 +56,7 @@ mkMesonDerivation (finalAttrs: { jq python3 rsync + json-schema-for-humans changelog-d ] ++ lib.optionals (!officialRelease) [ diff --git a/doc/manual/source/SUMMARY.md.in b/doc/manual/source/SUMMARY.md.in index 25e68811d..f74ed7043 100644 --- a/doc/manual/source/SUMMARY.md.in +++ b/doc/manual/source/SUMMARY.md.in @@ -117,6 +117,7 @@ - [Architecture and Design](architecture/architecture.md) - [Formats and Protocols](protocols/index.md) - [JSON Formats](protocols/json/index.md) + - [Hash](protocols/json/hash.md) - [Store Object Info](protocols/json/store-object-info.md) - [Derivation](protocols/json/derivation.md) - [Serving Tarball Flakes](protocols/tarball-fetcher.md) diff --git a/doc/manual/source/meson.build b/doc/manual/source/meson.build index 949d26526..294d57ad9 100644 --- a/doc/manual/source/meson.build +++ b/doc/manual/source/meson.build @@ -1,3 +1,6 @@ +# Process JSON schema documentation +subdir('protocols') + summary_rl_next = custom_target( command : [ bash, diff --git a/doc/manual/source/protocols/json/derivation.md b/doc/manual/source/protocols/json/derivation.md index cc9389f7c..602ab67e4 100644 --- a/doc/manual/source/protocols/json/derivation.md +++ b/doc/manual/source/protocols/json/derivation.md @@ -1,120 +1,7 @@ -# Derivation JSON Format +{{#include derivation-v3-fixed.md}} -> **Warning** -> -> This JSON format is currently -> [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) -> and subject to change. + diff --git a/doc/manual/source/protocols/json/fixup-json-schema-generated-doc.sed b/doc/manual/source/protocols/json/fixup-json-schema-generated-doc.sed new file mode 100644 index 000000000..126e666e9 --- /dev/null +++ b/doc/manual/source/protocols/json/fixup-json-schema-generated-doc.sed @@ -0,0 +1,14 @@ +# For some reason, backticks in the JSON schema are being escaped rather +# than being kept as intentional code spans. This removes all backtick +# escaping, which is an ugly solution, but one that is fine, because we +# are not using backticks for any other purpose. +s/\\`/`/g + +# The way that semi-external references are rendered (i.e. ones to +# sibling schema files, as opposed to separate website ones, is not nice +# for humans. Replace it with a nice relative link within the manual +# instead. +# +# As we have more such relative links, more replacements of this nature +# should appear below. +s^\(./hash-v1.yaml\)\?#/$defs/algorithm^[JSON format for `Hash`](./hash.html#algorithm)^g diff --git a/doc/manual/source/protocols/json/hash.md b/doc/manual/source/protocols/json/hash.md new file mode 100644 index 000000000..d2bdf1062 --- /dev/null +++ b/doc/manual/source/protocols/json/hash.md @@ -0,0 +1,7 @@ +{{#include hash-v1-fixed.md}} + + diff --git a/doc/manual/source/protocols/json/json-schema-for-humans-config.yaml b/doc/manual/source/protocols/json/json-schema-for-humans-config.yaml new file mode 100644 index 000000000..cad098053 --- /dev/null +++ b/doc/manual/source/protocols/json/json-schema-for-humans-config.yaml @@ -0,0 +1,17 @@ +# Configuration file for json-schema-for-humans +# +# https://github.com/coveooss/json-schema-for-humans/blob/main/docs/examples/examples_md_default/Configuration.md + +template_name: md +show_toc: true +# impure timestamp and distracting +with_footer: false +recursive_detection_depth: 3 +show_breadcrumbs: false +description_is_markdown: true +template_md_options: + properties_table_columns: + - Property + - Type + - Pattern + - Title/Description diff --git a/doc/manual/source/protocols/json/meson.build b/doc/manual/source/protocols/json/meson.build new file mode 100644 index 000000000..44795599c --- /dev/null +++ b/doc/manual/source/protocols/json/meson.build @@ -0,0 +1,74 @@ +# Tests in: ../../../../src/json-schema-checks + +fs = import('fs') + +# Find json-schema-for-humans if available +json_schema_for_humans = find_program('generate-schema-doc', required : false) + +# Configuration for json-schema-for-humans +json_schema_config = files('json-schema-for-humans-config.yaml') + +schemas = [ + 'hash-v1', + 'derivation-v3', +] + +schema_files = files() +foreach schema_name : schemas + schema_files += files('schema' / schema_name + '.yaml') +endforeach + + +schema_outputs = [] +foreach schema_name : schemas + schema_outputs += schema_name + '.md' +endforeach + +json_schema_generated_files = [] + +# Generate markdown documentation from JSON schema +# Note: output must be just a filename, not a path +gen_file = custom_target( + schema_name + '-schema-docs.tmp', + command : [ + json_schema_for_humans, + '--config-file', + json_schema_config, + meson.current_source_dir() / 'schema', + meson.current_build_dir(), + ], + input : schema_files + [ + json_schema_config, + ], + output : schema_outputs, + capture : false, + build_by_default : true, +) + +idx = 0 +if json_schema_for_humans.found() + foreach schema_name : schemas + #schema_file = 'schema' / schema_name + '.yaml' + + # There is one so-so hack, and one horrible hack being done here. + sedded_file = custom_target( + schema_name + '-schema-docs', + command : [ + 'sed', + '-f', + # Out of line to avoid https://github.com/mesonbuild/meson/issues/1564 + files('fixup-json-schema-generated-doc.sed'), + '@INPUT@', + ], + capture : true, + input : gen_file[idx], + output : schema_name + '-fixed.md', + ) + idx += 1 + json_schema_generated_files += [ sedded_file ] + endforeach +else + warning( + 'json-schema-for-humans not found, skipping JSON schema documentation generation', + ) +endif diff --git a/doc/manual/source/protocols/json/schema/derivation-v3.yaml b/doc/manual/source/protocols/json/schema/derivation-v3.yaml new file mode 100644 index 000000000..e80f24e9f --- /dev/null +++ b/doc/manual/source/protocols/json/schema/derivation-v3.yaml @@ -0,0 +1,164 @@ +"$schema": http://json-schema.org/draft-04/schema# +"$id": https://nix.dev/manual/nix/latest/protocols/json/schema/derivation-v3.json +title: Derivation +description: | + Experimental JSON representation of a Nix derivation (version 3). + + This schema describes the JSON representation of Nix's `Derivation` type. + + > **Warning** + > + > This JSON format is currently + > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > and subject to change. + +type: object +required: + - name + - version + - outputs + - inputSrcs + - inputDrvs + - system + - builder + - args + - env +properties: + name: + type: string + description: | + The name of the derivation. + Used when calculating store paths for the derivation’s outputs. + + version: + const: 3 + description: | + Must be `3`. + This is a guard that allows us to continue evolving this format. + The choice of `3` is fairly arbitrary, but corresponds to this informal version: + + - Version 0: A-Term format + + - Version 1: Original JSON format, with ugly `"r:sha256"` inherited from A-Term format. + + - Version 2: Separate `method` and `hashAlgo` fields in output specs + + - Version 3: Drop store dir from store paths, just include base name. + + Note that while this format is experimental, the maintenance of versions is best-effort, and not promised to identify every change. + + outputs: + type: object + description: | + Information about the output paths of the derivation. + This is a JSON object with one member per output, where the key is the output name and the value is a JSON object as described. + + > **Example** + > + > ```json + > "outputs": { + > "out": { + > "method": "nar", + > "hashAlgo": "sha256", + > "hash": "6fc80dcc62179dbc12fc0b5881275898f93444833d21b89dfe5f7fbcbb1d0d62" + > } + > } + > ``` + additionalProperties: + "$ref": "#/$defs/output" + + inputSrcs: + type: array + description: | + List of store paths on which this derivation depends. + + > **Example** + > + > ```json + > "inputSrcs": [ + > "47y241wqdhac3jm5l7nv0x4975mb1975-separate-debug-info.sh", + > "56d0w71pjj9bdr363ym3wj1zkwyqq97j-fix-pop-var-context-error.patch" + > ] + > ``` + items: + type: string + + inputDrvs: + type: object + description: | + Mapping of derivation paths to lists of output names they provide. + + > **Example** + > + > ```json + > "inputDrvs": { + > "6lkh5yi7nlb7l6dr8fljlli5zfd9hq58-curl-7.73.0.drv": ["dev"], + > "fn3kgnfzl5dzym26j8g907gq3kbm8bfh-unzip-6.0.drv": ["out"] + > } + > ``` + > + > specifies that this derivation depends on the `dev` output of `curl`, and the `out` output of `unzip`. + + system: + type: string + description: | + The system type on which this derivation is to be built + (e.g. `x86_64-linux`). + + builder: + type: string + description: | + Absolute path of the program used to perform the build. + Typically this is the `bash` shell + (e.g. `/nix/store/r3j288vpmczbl500w6zz89gyfa4nr0b1-bash-4.4-p23/bin/bash`). + + args: + type: array + description: | + Command-line arguments passed to the `builder`. + items: + type: string + + env: + type: object + description: | + Environment variables passed to the `builder`. + additionalProperties: + type: string + + structuredAttrs: + description: | + [Structured Attributes](@docroot@/store/derivation/index.md#structured-attrs), only defined if the derivation contains them. + Structured attributes are JSON, and thus embedded as-is. + type: object + additionalProperties: true + +"$defs": + output: + type: object + properties: + path: + type: string + description: | + The output path, if known in advance. + + method: + type: string + enum: [flat, nar, text, git] + description: | + For an output which will be [content addressed](@docroot@/store/derivation/outputs/content-address.md), a string representing the [method](@docroot@/store/store-object/content-address.md) of content addressing that is chosen. + + Valid method strings are: + + - [`flat`](@docroot@/store/store-object/content-address.md#method-flat) + - [`nar`](@docroot@/store/store-object/content-address.md#method-nix-archive) + - [`text`](@docroot@/store/store-object/content-address.md#method-text) + - [`git`](@docroot@/store/store-object/content-address.md#method-git) + + hashAlgo: + "$ref": "./hash-v1.yaml#/$defs/algorithm" + + hash: + type: string + description: | + For fixed-output derivations, the expected content hash in base-16. diff --git a/doc/manual/source/protocols/json/schema/hash-v1.yaml b/doc/manual/source/protocols/json/schema/hash-v1.yaml new file mode 100644 index 000000000..b258a90c7 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/hash-v1.yaml @@ -0,0 +1,29 @@ +"$schema": http://json-schema.org/draft-04/schema# +"$id": https://nix.dev/manual/nix/latest/protocols/json/schema/hash-v1.json +title: Hash +description: | + A cryptographic hash value used throughout Nix for content addressing and integrity verification. + + This schema describes the JSON representation of Nix's `Hash` type. + + TODO Work in progress +type: object +properties: + algorithm: + "$ref": "#/$defs/algorithm" +required: +- algorithm +additionalProperties: false +"$defs": + algorithm: + type: string + enum: + - blake3 + - md5 + - sha1 + - sha256 + - sha512 + description: | + The hash algorithm used to compute the hash value. + + `blake3` is currently experimental and requires the [`blake-hashing`](@docroot@/development/experimental-features.md#xp-feature-blake-hashing) experimental feature. diff --git a/doc/manual/source/protocols/meson.build b/doc/manual/source/protocols/meson.build new file mode 100644 index 000000000..5b5eb900d --- /dev/null +++ b/doc/manual/source/protocols/meson.build @@ -0,0 +1,2 @@ +# Process JSON schema documentation +subdir('json') diff --git a/flake.nix b/flake.nix index fd623c807..8d3d963be 100644 --- a/flake.nix +++ b/flake.nix @@ -413,6 +413,10 @@ supportsCross = false; }; + "nix-json-schema-checks" = { + supportsCross = false; + }; + "nix-perl-bindings" = { supportsCross = false; }; diff --git a/meson.build b/meson.build index 736756157..f3158ea6d 100644 --- a/meson.build +++ b/meson.build @@ -60,3 +60,4 @@ if get_option('unit-tests') subproject('libflake-tests') endif subproject('nix-functional-tests') +subproject('json-schema-checks') diff --git a/packaging/components.nix b/packaging/components.nix index c621b7073..f9d7b109a 100644 --- a/packaging/components.nix +++ b/packaging/components.nix @@ -438,6 +438,11 @@ in */ nix-external-api-docs = callPackage ../src/external-api-docs/package.nix { version = fineVersion; }; + /** + JSON schema validation checks + */ + nix-json-schema-checks = callPackage ../src/json-schema-checks/package.nix { }; + nix-perl-bindings = callPackage ../src/perl/package.nix { }; /** diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index 5fb4f14d2..153e7a3eb 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -108,6 +108,7 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( ++ pkgs.nixComponents2.nix-internal-api-docs.nativeBuildInputs ++ pkgs.nixComponents2.nix-external-api-docs.nativeBuildInputs ++ pkgs.nixComponents2.nix-functional-tests.externalNativeBuildInputs + ++ pkgs.nixComponents2.nix-json-schema-checks.externalNativeBuildInputs ++ lib.optional ( !buildCanExecuteHost # Hack around https://github.com/nixos/nixpkgs/commit/bf7ad8cfbfa102a90463433e2c5027573b462479 diff --git a/packaging/hydra.nix b/packaging/hydra.nix index bc75b5dfb..3bbb6c15b 100644 --- a/packaging/hydra.nix +++ b/packaging/hydra.nix @@ -62,6 +62,7 @@ let "nix-cmd" "nix-cli" "nix-functional-tests" + "nix-json-schema-checks" ] ++ lib.optionals enableBindings [ "nix-perl-bindings" diff --git a/src/json-schema-checks/.version b/src/json-schema-checks/.version new file mode 120000 index 000000000..b7badcd0c --- /dev/null +++ b/src/json-schema-checks/.version @@ -0,0 +1 @@ +../../.version \ No newline at end of file diff --git a/src/json-schema-checks/derivation b/src/json-schema-checks/derivation new file mode 120000 index 000000000..3dc1cbe06 --- /dev/null +++ b/src/json-schema-checks/derivation @@ -0,0 +1 @@ +../../src/libstore-tests/data/derivation \ No newline at end of file diff --git a/src/json-schema-checks/meson.build b/src/json-schema-checks/meson.build new file mode 100644 index 000000000..5326ad4c6 --- /dev/null +++ b/src/json-schema-checks/meson.build @@ -0,0 +1,76 @@ +# Run with: +# meson test --suite json-schema +# Run with: (without shell / configure) +# nix build .#nix-json-schema-checks + +project( + 'nix-json-schema-checks', + version : files('.version'), + meson_version : '>= 1.1', + license : 'LGPL-2.1-or-later', +) + +fs = import('fs') + +# Note: The 'jsonschema' package provides the 'jv' command +jv = find_program('jv', required : true) + +# The schema directory is a committed symlink to the actual schema location +schema_dir = meson.current_source_dir() / 'schema' + +# Get all example files +schemas = [ + { + 'stem' : 'derivation', + 'schema' : schema_dir / 'derivation-v3.yaml', + 'files' : [ + 'dyn-dep-derivation.json', + 'simple-derivation.json', + ], + }, + # # Not sure how to make subschema work + # { + # 'stem': 'derivation', + # 'schema': schema_dir / 'derivation-v3.yaml#output', + # 'files' : [ + # 'output-caFixedFlat.json', + # 'output-caFixedNAR.json', + # 'output-caFixedText.json', + # 'output-caFloating.json', + # 'output-deferred.json', + # 'output-impure.json', + # 'output-inputAddressed.json', + # ], + # }, +] + +# Validate each example against the schema +foreach schema : schemas + stem = schema['stem'] + schema_file = schema['schema'] + if '#' not in schema_file + # Validate the schema itself against JSON Schema Draft 04 + test( + stem + '-schema-valid', + jv, + args : [ + '--map', + './hash-v1.yaml=' + schema_dir / 'hash-v1.yaml', + 'http://json-schema.org/draft-04/schema', + schema_file, + ], + suite : 'json-schema', + ) + endif + foreach example : schema['files'] + test( + stem + '-example-' + fs.stem(example), + jv, + args : [ + schema_file, + files(stem / example), + ], + suite : 'json-schema', + ) + endforeach +endforeach diff --git a/src/json-schema-checks/package.nix b/src/json-schema-checks/package.nix new file mode 100644 index 000000000..2061672cd --- /dev/null +++ b/src/json-schema-checks/package.nix @@ -0,0 +1,50 @@ +# Run with: nix build .#nix-json-schema-checks +{ + lib, + mkMesonDerivation, + + meson, + ninja, + jsonschema, + + # Configuration Options + + version, +}: + +mkMesonDerivation (finalAttrs: { + pname = "nix-json-schema-checks"; + inherit version; + + workDir = ./.; + fileset = lib.fileset.unions [ + ../../.version + ../../doc/manual/source/protocols/json/schema + ../../src/libstore-tests/data/derivation + ./. + ]; + + outputs = [ "out" ]; + + passthru.externalNativeBuildInputs = [ + jsonschema + ]; + + nativeBuildInputs = [ + meson + ninja + ] + ++ finalAttrs.passthru.externalNativeBuildInputs; + + doCheck = true; + + mesonCheckFlags = [ "--print-errorlogs" ]; + + postInstall = '' + touch $out + ''; + + meta = { + platforms = lib.platforms.all; + }; +}) diff --git a/src/json-schema-checks/schema b/src/json-schema-checks/schema new file mode 120000 index 000000000..473e47b1b --- /dev/null +++ b/src/json-schema-checks/schema @@ -0,0 +1 @@ +../../doc/manual/source/protocols/json/schema \ No newline at end of file diff --git a/src/nix/derivation-add.md b/src/nix/derivation-add.md index 35507d9ad..4e37c4e6f 100644 --- a/src/nix/derivation-add.md +++ b/src/nix/derivation-add.md @@ -12,8 +12,7 @@ a Nix expression evaluates. [store derivation]: @docroot@/glossary.md#gloss-store-derivation -`nix derivation add` takes a single derivation in the following format: - -{{#include ../../protocols/json/derivation.md}} +`nix derivation add` takes a single derivation in the JSON format. +See [the manual](@docroot@/protocols/json/derivation.md) for a documentation of this format. )"" diff --git a/src/nix/derivation-show.md b/src/nix/derivation-show.md index 9fff58ef9..1784be44c 100644 --- a/src/nix/derivation-show.md +++ b/src/nix/derivation-show.md @@ -48,10 +48,9 @@ By default, this command only shows top-level derivations, but with [store derivation]: @docroot@/glossary.md#gloss-store-derivation -`nix derivation show` outputs a JSON map of [store path]s to derivations in the following format: +`nix derivation show` outputs a JSON map of [store path]s to derivations in JSON format. +See [the manual](@docroot@/protocols/json/derivation.md) for a documentation of this format. [store path]: @docroot@/store/store-path.md -{{#include ../../protocols/json/derivation.md}} - )"" From bcd5a9d05ce6faf8da520e8423ad832c332eed35 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Fri, 17 Oct 2025 00:56:53 +0300 Subject: [PATCH 233/373] libutil: Drop unused SubdirSourceAccessor --- .../include/nix/util/source-accessor.hh | 6 -- src/libutil/meson.build | 1 - src/libutil/subdir-source-accessor.cc | 59 ------------------- 3 files changed, 66 deletions(-) delete mode 100644 src/libutil/subdir-source-accessor.cc diff --git a/src/libutil/include/nix/util/source-accessor.hh b/src/libutil/include/nix/util/source-accessor.hh index 671444e6f..1006895b3 100644 --- a/src/libutil/include/nix/util/source-accessor.hh +++ b/src/libutil/include/nix/util/source-accessor.hh @@ -241,10 +241,4 @@ ref makeFSSourceAccessor(std::filesystem::path root); */ ref makeUnionSourceAccessor(std::vector> && accessors); -/** - * Creates a new source accessor which is confined to the subdirectory - * of the given source accessor. - */ -ref projectSubdirSourceAccessor(ref, CanonPath subdirectory); - } // namespace nix diff --git a/src/libutil/meson.build b/src/libutil/meson.build index f4b8dbb61..acba0b81b 100644 --- a/src/libutil/meson.build +++ b/src/libutil/meson.build @@ -156,7 +156,6 @@ sources = [ config_priv_h ] + files( 'source-accessor.cc', 'source-path.cc', 'strings.cc', - 'subdir-source-accessor.cc', 'suggestions.cc', 'tarfile.cc', 'tee-logger.cc', diff --git a/src/libutil/subdir-source-accessor.cc b/src/libutil/subdir-source-accessor.cc deleted file mode 100644 index d4f57e2f7..000000000 --- a/src/libutil/subdir-source-accessor.cc +++ /dev/null @@ -1,59 +0,0 @@ -#include "nix/util/source-accessor.hh" - -namespace nix { - -struct SubdirSourceAccessor : SourceAccessor -{ - ref parent; - - CanonPath subdirectory; - - SubdirSourceAccessor(ref && parent, CanonPath && subdirectory) - : parent(std::move(parent)) - , subdirectory(std::move(subdirectory)) - { - displayPrefix.clear(); - } - - std::string readFile(const CanonPath & path) override - { - return parent->readFile(subdirectory / path); - } - - void readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) override - { - return parent->readFile(subdirectory / path, sink, sizeCallback); - } - - bool pathExists(const CanonPath & path) override - { - return parent->pathExists(subdirectory / path); - } - - std::optional maybeLstat(const CanonPath & path) override - { - return parent->maybeLstat(subdirectory / path); - } - - DirEntries readDirectory(const CanonPath & path) override - { - return parent->readDirectory(subdirectory / path); - } - - std::string readLink(const CanonPath & path) override - { - return parent->readLink(subdirectory / path); - } - - std::string showPath(const CanonPath & path) override - { - return displayPrefix + parent->showPath(subdirectory / path) + displaySuffix; - } -}; - -ref projectSubdirSourceAccessor(ref parent, CanonPath subdirectory) -{ - return make_ref(std::move(parent), std::move(subdirectory)); -} - -} // namespace nix From a80fc252e8dc56e991322f0871490ef264071c3e Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Fri, 17 Oct 2025 01:10:46 +0300 Subject: [PATCH 234/373] libstore/meson: Require curl >= 7.75.0 This version has been released a long time ago in 2021 and it's doubtful that anybody actually uses it still, since it's full of vulnerabilities [^] [^]: https://curl.se/docs/vuln-7.75.0.html --- src/libstore/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 40da06e6b..a0061eb9b 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -113,7 +113,7 @@ boost = dependency( # put in `deps_other`. deps_other += boost -curl = dependency('libcurl', 'curl') +curl = dependency('libcurl', 'curl', version : '>= 7.75.0') deps_private += curl # seccomp only makes sense on Linux From ffbc33fec610fe97a795edc2bee481252dbc902b Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Fri, 17 Oct 2025 01:18:46 +0300 Subject: [PATCH 235/373] libstore/meson: Rename curl-s3-store to s3-aws-auth We now unconditionally compile support for s3:// URLs and stores without authentication. The whole curl version check can be greatly simplified by the previous commit, which bumps the minimum required curl version. --- src/libstore/meson.build | 21 +++++---------------- src/libstore/meson.options | 5 ++--- src/libstore/package.nix | 2 +- 3 files changed, 8 insertions(+), 20 deletions(-) diff --git a/src/libstore/meson.build b/src/libstore/meson.build index a0061eb9b..d1b3666cc 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -142,27 +142,16 @@ deps_public += nlohmann_json sqlite = dependency('sqlite3', 'sqlite', version : '>=3.6.19') deps_private += sqlite -# Curl-based S3 store support -# Check if curl supports AWS SigV4 (requires >= 7.75.0) -curl_supports_aws_sigv4 = curl.version().version_compare('>= 7.75.0') -# AWS CRT C++ for lightweight credential management -aws_crt_cpp = cxx.find_library('aws-crt-cpp', required : false) +s3_aws_auth = get_option('s3-aws-auth') +aws_crt_cpp = cxx.find_library('aws-crt-cpp', required : s3_aws_auth) -curl_s3_store_opt = get_option('curl-s3-store').require( - curl_supports_aws_sigv4, - error_message : 'curl-based S3 support requires curl >= 7.75.0', -).require( - aws_crt_cpp.found(), - error_message : 'curl-based S3 support requires aws-crt-cpp', -) - -if curl_s3_store_opt.enabled() +if s3_aws_auth.enabled() deps_other += aws_crt_cpp aws_c_common = cxx.find_library('aws-c-common', required : true) deps_other += aws_c_common endif -configdata_pub.set('NIX_WITH_AWS_AUTH', curl_s3_store_opt.enabled().to_int()) +configdata_pub.set('NIX_WITH_AWS_AUTH', s3_aws_auth.enabled().to_int()) subdir('nix-meson-build-support/generate-header') @@ -346,7 +335,7 @@ sources = files( ) # AWS credentials code requires AWS CRT, so only compile when enabled -if curl_s3_store_opt.enabled() +if s3_aws_auth.enabled() sources += files('aws-creds.cc') endif diff --git a/src/libstore/meson.options b/src/libstore/meson.options index edc43bd45..c822133df 100644 --- a/src/libstore/meson.options +++ b/src/libstore/meson.options @@ -35,8 +35,7 @@ option( ) option( - 'curl-s3-store', + 's3-aws-auth', type : 'feature', - value : 'disabled', - description : 'Enable curl-based S3 binary cache store support (requires aws-crt-cpp and curl >= 7.75.0)', + description : 'build support for AWS authentication with S3', ) diff --git a/src/libstore/package.nix b/src/libstore/package.nix index 897662e11..ddad077ce 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -75,7 +75,7 @@ mkMesonLibrary (finalAttrs: { mesonFlags = [ (lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux) (lib.mesonBool "embedded-sandbox-shell" embeddedSandboxShell) - (lib.mesonEnable "curl-s3-store" withAWS) + (lib.mesonEnable "s3-aws-auth" withAWS) ] ++ lib.optionals stdenv.hostPlatform.isLinux [ (lib.mesonOption "sandbox-shell" "${busybox-sandbox-shell}/bin/busybox") From 64c55961eb52d298e0643481c31fd178a59f5cd4 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 16 Oct 2025 16:16:54 -0700 Subject: [PATCH 236/373] Merge pull request #14273 from fzakaria/fzakaria/issue-13944 Make `nix nar [cat|ls]` lazy --- .../include/nix/store/nar-accessor.hh | 7 +++- src/libstore/nar-accessor.cc | 34 +++++++++++++++---- src/libstore/remote-fs-accessor.cc | 22 ++---------- src/nix/cat.cc | 10 +++++- src/nix/ls.cc | 6 +++- 5 files changed, 50 insertions(+), 29 deletions(-) diff --git a/src/libstore/include/nix/store/nar-accessor.hh b/src/libstore/include/nix/store/nar-accessor.hh index 0e69d436e..bfba5da73 100644 --- a/src/libstore/include/nix/store/nar-accessor.hh +++ b/src/libstore/include/nix/store/nar-accessor.hh @@ -27,7 +27,12 @@ ref makeNarAccessor(Source & source); */ using GetNarBytes = std::function; -ref makeLazyNarAccessor(const std::string & listing, GetNarBytes getNarBytes); +/** + * The canonical GetNarBytes function for a seekable Source. + */ +GetNarBytes seekableGetNarBytes(const Path & path); + +ref makeLazyNarAccessor(const nlohmann::json & listing, GetNarBytes getNarBytes); /** * Write a JSON representation of the contents of a NAR (except file diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc index 63fe774c9..f0882d52d 100644 --- a/src/libstore/nar-accessor.cc +++ b/src/libstore/nar-accessor.cc @@ -141,14 +141,14 @@ struct NarAccessor : public SourceAccessor parseDump(indexer, indexer); } - NarAccessor(const std::string & listing, GetNarBytes getNarBytes) + NarAccessor(const nlohmann::json & listing, GetNarBytes getNarBytes) : getNarBytes(getNarBytes) { using json = nlohmann::json; - std::function recurse; + std::function recurse; - recurse = [&](NarMember & member, json & v) { + recurse = [&](NarMember & member, const json & v) { std::string type = v["type"]; if (type == "directory") { @@ -169,8 +169,7 @@ struct NarAccessor : public SourceAccessor return; }; - json v = json::parse(listing); - recurse(root, v); + recurse(root, listing); } NarMember * find(const CanonPath & path) @@ -251,11 +250,34 @@ ref makeNarAccessor(Source & source) return make_ref(source); } -ref makeLazyNarAccessor(const std::string & listing, GetNarBytes getNarBytes) +ref makeLazyNarAccessor(const nlohmann::json & listing, GetNarBytes getNarBytes) { return make_ref(listing, getNarBytes); } +GetNarBytes seekableGetNarBytes(const Path & path) +{ + return [path](uint64_t offset, uint64_t length) { + AutoCloseFD fd = toDescriptor(open( + path.c_str(), + O_RDONLY +#ifndef _WIN32 + | O_CLOEXEC +#endif + )); + if (!fd) + throw SysError("opening NAR cache file '%s'", path); + + if (lseek(fromDescriptorReadOnly(fd.get()), offset, SEEK_SET) != (off_t) offset) + throw SysError("seeking in '%s'", path); + + std::string buf(length, 0); + readFull(fd.get(), buf.data(), length); + + return buf; + }; +} + using nlohmann::json; json listNar(ref accessor, const CanonPath & path, bool recurse) diff --git a/src/libstore/remote-fs-accessor.cc b/src/libstore/remote-fs-accessor.cc index e6715cbdf..f7ca28ae2 100644 --- a/src/libstore/remote-fs-accessor.cc +++ b/src/libstore/remote-fs-accessor.cc @@ -70,26 +70,8 @@ std::shared_ptr RemoteFSAccessor::accessObject(const StorePath & try { listing = nix::readFile(makeCacheFile(storePath.hashPart(), "ls")); - - auto narAccessor = makeLazyNarAccessor(listing, [cacheFile](uint64_t offset, uint64_t length) { - AutoCloseFD fd = toDescriptor(open( - cacheFile.c_str(), - O_RDONLY -#ifndef _WIN32 - | O_CLOEXEC -#endif - )); - if (!fd) - throw SysError("opening NAR cache file '%s'", cacheFile); - - if (lseek(fromDescriptorReadOnly(fd.get()), offset, SEEK_SET) != (off_t) offset) - throw SysError("seeking in '%s'", cacheFile); - - std::string buf(length, 0); - readFull(fd.get(), buf.data(), length); - - return buf; - }); + auto listingJson = nlohmann::json::parse(listing); + auto narAccessor = makeLazyNarAccessor(listingJson, seekableGetNarBytes(cacheFile)); nars.emplace(storePath.hashPart(), narAccessor); return narAccessor; diff --git a/src/nix/cat.cc b/src/nix/cat.cc index effe544e6..5b93d560b 100644 --- a/src/nix/cat.cc +++ b/src/nix/cat.cc @@ -1,6 +1,10 @@ #include "nix/cmd/command.hh" #include "nix/store/store-api.hh" #include "nix/store/nar-accessor.hh" +#include "nix/util/serialise.hh" +#include "nix/util/source-accessor.hh" + +#include using namespace nix; @@ -71,7 +75,11 @@ struct CmdCatNar : StoreCommand, MixCat void run(ref store) override { - cat(makeNarAccessor(readFile(narPath)), CanonPath{path}); + AutoCloseFD fd = open(narPath.c_str(), O_RDONLY); + auto source = FdSource{fd.get()}; + auto narAccessor = makeNarAccessor(source); + auto listing = listNar(narAccessor, CanonPath::root, true); + cat(makeLazyNarAccessor(listing, seekableGetNarBytes(narPath)), CanonPath{path}); } }; diff --git a/src/nix/ls.cc b/src/nix/ls.cc index 5cdfc2c0f..846af246d 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -145,7 +145,11 @@ struct CmdLsNar : Command, MixLs void run() override { - list(makeNarAccessor(readFile(narPath)), CanonPath{path}); + AutoCloseFD fd = open(narPath.c_str(), O_RDONLY); + auto source = FdSource{fd.get()}; + auto narAccessor = makeNarAccessor(source); + auto listing = listNar(narAccessor, CanonPath::root, true); + list(makeLazyNarAccessor(listing, seekableGetNarBytes(narPath)), CanonPath{path}); } }; From e457ea768880149b259429c836b2260f0a2c6b8f Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Fri, 17 Oct 2025 02:26:24 +0300 Subject: [PATCH 237/373] nix {cat,ls}: Add back missing checks for file descriptors I didn't catch this during the review of https://github.com/NixOS/nix/pull/14273. This fixes that mistake. --- src/nix/cat.cc | 2 ++ src/nix/ls.cc | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/nix/cat.cc b/src/nix/cat.cc index 5b93d560b..1284b50fd 100644 --- a/src/nix/cat.cc +++ b/src/nix/cat.cc @@ -76,6 +76,8 @@ struct CmdCatNar : StoreCommand, MixCat void run(ref store) override { AutoCloseFD fd = open(narPath.c_str(), O_RDONLY); + if (!fd) + throw SysError("opening NAR file '%s'", narPath); auto source = FdSource{fd.get()}; auto narAccessor = makeNarAccessor(source); auto listing = listNar(narAccessor, CanonPath::root, true); diff --git a/src/nix/ls.cc b/src/nix/ls.cc index 846af246d..82721222e 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -146,6 +146,8 @@ struct CmdLsNar : Command, MixLs void run() override { AutoCloseFD fd = open(narPath.c_str(), O_RDONLY); + if (!fd) + throw SysError("opening NAR file '%s'", narPath); auto source = FdSource{fd.get()}; auto narAccessor = makeNarAccessor(source); auto listing = listNar(narAccessor, CanonPath::root, true); From 20c7c551bfdd5d99c477c4c04b4f5271bec4e285 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 17 Oct 2025 16:42:37 +0000 Subject: [PATCH 238/373] fix(tests/functional/repl): skip test if stack size limit is insufficient Nix attempts to set the stack size to 64 MB during initialization, which is required for the repl tests to run successfully. Skip the tests on systems where the hard stack limit is less than this value rather than failing. --- tests/functional/repl.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/functional/repl.sh b/tests/functional/repl.sh index bfe18c9e5..aeff43d30 100755 --- a/tests/functional/repl.sh +++ b/tests/functional/repl.sh @@ -25,6 +25,13 @@ import $testDir/undefined-variable.nix TODO_NixOS +# FIXME: repl tests fail on systems with stack limits +stack_ulimit="$(ulimit -Hs)" +stack_required="$((64 * 1024 * 1024))" +if [[ "$stack_ulimit" != "unlimited" ]]; then + ((stack_ulimit < stack_required)) && skipTest "repl tests cannot run on systems with stack size <$stack_required ($stack_ulimit)" +fi + testRepl () { local nixArgs nixArgs=("$@") From 109f6449cc782efae84f71bc68fe5ead1c752e3e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 17 Oct 2025 20:23:20 +0200 Subject: [PATCH 239/373] nix store dump-path: Refuse to write NARs to the terminal --- src/nix/dump-path.cc | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/nix/dump-path.cc b/src/nix/dump-path.cc index 8475655e9..f375b0ac8 100644 --- a/src/nix/dump-path.cc +++ b/src/nix/dump-path.cc @@ -4,6 +4,14 @@ using namespace nix; +static FdSink getNarSink() +{ + auto fd = getStandardOutput(); + if (isatty(fd)) + throw UsageError("refusing to write NAR to a terminal"); + return FdSink(std::move(fd)); +} + struct CmdDumpPath : StorePathCommand { std::string description() override @@ -20,7 +28,7 @@ struct CmdDumpPath : StorePathCommand void run(ref store, const StorePath & storePath) override { - FdSink sink(getStandardOutput()); + auto sink = getNarSink(); store->narFromPath(storePath, sink); sink.flush(); } @@ -51,7 +59,7 @@ struct CmdDumpPath2 : Command void run() override { - FdSink sink(getStandardOutput()); + auto sink = getNarSink(); dumpPath(path, sink); sink.flush(); } From daa7e0d2e967f455cbf6285f31c5307be49545cd Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 17 Oct 2025 13:56:17 +0200 Subject: [PATCH 240/373] Source: Add skip() method This allows FdSource to efficiently skip data we don't care about. --- src/libutil/include/nix/util/serialise.hh | 5 +++ src/libutil/serialise.cc | 48 +++++++++++++++++++++-- 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/src/libutil/include/nix/util/serialise.hh b/src/libutil/include/nix/util/serialise.hh index 16e0d0fa5..8799e128f 100644 --- a/src/libutil/include/nix/util/serialise.hh +++ b/src/libutil/include/nix/util/serialise.hh @@ -97,6 +97,8 @@ struct Source void drainInto(Sink & sink); std::string drain(); + + virtual void skip(size_t len); }; /** @@ -177,6 +179,7 @@ struct FdSource : BufferedSource Descriptor fd; size_t read = 0; BackedStringView endOfFileError{"unexpected end-of-file"}; + bool isSeekable = true; FdSource() : fd(INVALID_DESCRIPTOR) @@ -200,6 +203,8 @@ struct FdSource : BufferedSource */ bool hasData(); + void skip(size_t len) override; + protected: size_t readUnbuffered(char * data, size_t len) override; private: diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 15629935e..bdce956f3 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -94,9 +94,8 @@ void Source::drainInto(Sink & sink) { std::array buf; while (true) { - size_t n; try { - n = read(buf.data(), buf.size()); + auto n = read(buf.data(), buf.size()); sink({buf.data(), n}); } catch (EndOfFile &) { break; @@ -111,6 +110,16 @@ std::string Source::drain() return std::move(s.s); } +void Source::skip(size_t len) +{ + std::array buf; + while (len) { + auto n = read(buf.data(), std::min(len, buf.size())); + assert(n <= len); + len -= n; + } +} + size_t BufferedSource::read(char * data, size_t len) { if (!buffer) @@ -120,7 +129,7 @@ size_t BufferedSource::read(char * data, size_t len) bufPosIn = readUnbuffered(buffer.get(), bufSize); /* Copy out the data in the buffer. */ - size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len; + auto n = std::min(len, bufPosIn - bufPosOut); memcpy(data, buffer.get() + bufPosOut, n); bufPosOut += n; if (bufPosIn == bufPosOut) @@ -191,6 +200,39 @@ bool FdSource::hasData() } } +void FdSource::skip(size_t len) +{ + /* Discard data in the buffer. */ + if (len && buffer && bufPosIn - bufPosOut) { + if (len >= bufPosIn - bufPosOut) { + len -= bufPosIn - bufPosOut; + bufPosIn = bufPosOut = 0; + } else { + bufPosOut += len; + len = 0; + } + } + +#ifndef _WIN32 + /* If we can, seek forward in the file to skip the rest. */ + if (isSeekable && len) { + if (lseek(fd, len, SEEK_CUR) == -1) { + if (errno == ESPIPE) + isSeekable = false; + else + throw SysError("seeking forward in file"); + } else { + read += len; + return; + } + } +#endif + + /* Otherwise, skip by reading. */ + if (len) + BufferedSource::skip(len); +} + size_t StringSource::read(char * data, size_t len) { if (pos == s.size()) From 67bffa19a533c5d2b562db367d1823166ca714b2 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 17 Oct 2025 18:32:47 +0200 Subject: [PATCH 241/373] NullFileSystemObjectSink: Skip over file contents --- src/libutil/archive.cc | 7 ++++++- src/libutil/fs-sink.cc | 2 ++ src/libutil/include/nix/util/fs-sink.hh | 8 ++++++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 3d96df75e..b8fef9ef3 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -132,6 +132,11 @@ static void parseContents(CreateRegularFileSink & sink, Source & source) sink.preallocateContents(size); + if (sink.skipContents) { + source.skip(size + (size % 8 ? 8 - (size % 8) : 0)); + return; + } + uint64_t left = size; std::array buf; @@ -166,7 +171,7 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath auto expectTag = [&](std::string_view expected) { auto tag = getString(); if (tag != expected) - throw badArchive("expected tag '%s', got '%s'", expected, tag); + throw badArchive("expected tag '%s', got '%s'", expected, tag.substr(0, 1024)); }; expectTag("("); diff --git a/src/libutil/fs-sink.cc b/src/libutil/fs-sink.cc index 6efd5e0c7..45ef57a9f 100644 --- a/src/libutil/fs-sink.cc +++ b/src/libutil/fs-sink.cc @@ -196,6 +196,8 @@ void NullFileSystemObjectSink::createRegularFile( void isExecutable() override {} } crf; + crf.skipContents = true; + // Even though `NullFileSystemObjectSink` doesn't do anything, it's important // that we call the function, to e.g. advance the parser using this // sink. diff --git a/src/libutil/include/nix/util/fs-sink.hh b/src/libutil/include/nix/util/fs-sink.hh index f96fe3ef9..bd2db7f53 100644 --- a/src/libutil/include/nix/util/fs-sink.hh +++ b/src/libutil/include/nix/util/fs-sink.hh @@ -14,6 +14,14 @@ namespace nix { */ struct CreateRegularFileSink : Sink { + /** + * If set to true, the sink will not be called with the contents + * of the file. `preallocateContents()` will still be called to + * convey the file size. Useful for sinks that want to efficiently + * discard the contents of the file. + */ + bool skipContents = false; + virtual void isExecutable() = 0; /** From c92ba4b9b7b63f24050c8e769a102380ea079e4f Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 17 Oct 2025 21:50:31 +0200 Subject: [PATCH 242/373] Add titles in JSON schemas This way, the description isn't rendered in the tables of contents, leading to no more formatting errors. --- .../protocols/json/schema/derivation-v3.yaml | 14 ++++++++++++++ .../source/protocols/json/schema/hash-v1.yaml | 1 + 2 files changed, 15 insertions(+) diff --git a/doc/manual/source/protocols/json/schema/derivation-v3.yaml b/doc/manual/source/protocols/json/schema/derivation-v3.yaml index e80f24e9f..7c92d475d 100644 --- a/doc/manual/source/protocols/json/schema/derivation-v3.yaml +++ b/doc/manual/source/protocols/json/schema/derivation-v3.yaml @@ -26,12 +26,14 @@ required: properties: name: type: string + title: Derivation name description: | The name of the derivation. Used when calculating store paths for the derivation’s outputs. version: const: 3 + title: Format version (must be 3) description: | Must be `3`. This is a guard that allows us to continue evolving this format. @@ -49,6 +51,7 @@ properties: outputs: type: object + title: Output specifications description: | Information about the output paths of the derivation. This is a JSON object with one member per output, where the key is the output name and the value is a JSON object as described. @@ -69,6 +72,7 @@ properties: inputSrcs: type: array + title: Input source paths description: | List of store paths on which this derivation depends. @@ -85,6 +89,7 @@ properties: inputDrvs: type: object + title: Input derivations description: | Mapping of derivation paths to lists of output names they provide. @@ -101,12 +106,14 @@ properties: system: type: string + title: Build system type description: | The system type on which this derivation is to be built (e.g. `x86_64-linux`). builder: type: string + title: Build program path description: | Absolute path of the program used to perform the build. Typically this is the `bash` shell @@ -114,6 +121,7 @@ properties: args: type: array + title: Builder arguments description: | Command-line arguments passed to the `builder`. items: @@ -121,12 +129,14 @@ properties: env: type: object + title: Environment variables description: | Environment variables passed to the `builder`. additionalProperties: type: string structuredAttrs: + title: Structured attributes description: | [Structured Attributes](@docroot@/store/derivation/index.md#structured-attrs), only defined if the derivation contains them. Structured attributes are JSON, and thus embedded as-is. @@ -139,11 +149,13 @@ properties: properties: path: type: string + title: Output path description: | The output path, if known in advance. method: type: string + title: Content addressing method enum: [flat, nar, text, git] description: | For an output which will be [content addressed](@docroot@/store/derivation/outputs/content-address.md), a string representing the [method](@docroot@/store/store-object/content-address.md) of content addressing that is chosen. @@ -156,9 +168,11 @@ properties: - [`git`](@docroot@/store/store-object/content-address.md#method-git) hashAlgo: + title: Hash algorithm "$ref": "./hash-v1.yaml#/$defs/algorithm" hash: type: string + title: Expected hash value description: | For fixed-output derivations, the expected content hash in base-16. diff --git a/doc/manual/source/protocols/json/schema/hash-v1.yaml b/doc/manual/source/protocols/json/schema/hash-v1.yaml index b258a90c7..44a59541b 100644 --- a/doc/manual/source/protocols/json/schema/hash-v1.yaml +++ b/doc/manual/source/protocols/json/schema/hash-v1.yaml @@ -10,6 +10,7 @@ description: | type: object properties: algorithm: + title: Hash algorithm "$ref": "#/$defs/algorithm" required: - algorithm From 61fbef42a6eeae7553f148f1759c5a770a2f65aa Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 18 Oct 2025 18:47:27 +0300 Subject: [PATCH 243/373] libstore: Simplify check for S3-specific URI query parameters Instead of hardcoding strings we should instead use the setting objects to determine the query names that should be preserved. --- .../include/nix/store/s3-binary-cache-store.hh | 8 ++++++-- src/libstore/s3-binary-cache-store.cc | 15 +++++++-------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/libstore/include/nix/store/s3-binary-cache-store.hh b/src/libstore/include/nix/store/s3-binary-cache-store.hh index c8cb967c1..e5fcbeda3 100644 --- a/src/libstore/include/nix/store/s3-binary-cache-store.hh +++ b/src/libstore/include/nix/store/s3-binary-cache-store.hh @@ -21,8 +21,6 @@ struct S3BinaryCacheStoreConfig : HttpBinaryCacheStoreConfig Nix uses the `default` profile. )"}; -public: - const Setting region{ this, "us-east-1", @@ -63,6 +61,12 @@ public: > addressing instead of virtual host based addressing. )"}; + /** + * Set of settings that are part of the S3 URI itself. + * These are needed for region specification and other S3-specific settings. + */ + const std::set s3UriSettings = {&profile, ®ion, &scheme, &endpoint}; + static const std::string name() { return "S3 Binary Cache Store"; diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index a84ea5fcb..ac08a4982 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -1,10 +1,10 @@ #include "nix/store/s3-binary-cache-store.hh" - -#include - #include "nix/store/http-binary-cache-store.hh" #include "nix/store/store-registration.hh" +#include +#include + namespace nix { StringSet S3BinaryCacheStoreConfig::uriSchemes() @@ -17,14 +17,13 @@ S3BinaryCacheStoreConfig::S3BinaryCacheStoreConfig( : StoreConfig(params) , HttpBinaryCacheStoreConfig(scheme, _cacheUri, params) { - // For S3 stores, preserve S3-specific query parameters as part of the URL - // These are needed for region specification and other S3-specific settings assert(cacheUri.query.empty()); + assert(cacheUri.scheme == "s3"); - // Only copy S3-specific parameters to the URL query - static const std::set s3Params = {"region", "endpoint", "profile", "scheme"}; for (const auto & [key, value] : params) { - if (s3Params.contains(key)) { + auto s3Params = + std::views::transform(s3UriSettings, [](const AbstractSetting * setting) { return setting->name; }); + if (std::ranges::contains(s3Params, key)) { cacheUri.query[key] = value; } } From 3d147c04a5f9d03e1696fb25b495a077885d2cf7 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 18 Oct 2025 19:11:39 +0300 Subject: [PATCH 244/373] libstore: Implement getHumanReadableURI for S3BinaryCacheStoreConfig This slightly improves the logs situation by including the region/profile/endpoint in the logs when S3 store references get printed. Instead of: copying path '/nix/store/lxnp9cs4cfh2g9r2bs4z7gwwz9kdj2r9-test-package-c' to 's3://bucketname'... This now includes: copying path '/nix/store/lxnp9cs4cfh2g9r2bs4z7gwwz9kdj2r9-test-package-c' to 's3://bucketname?endpoint=http://server:9000®ion=eu-west-1'... --- .../include/nix/store/s3-binary-cache-store.hh | 2 ++ src/libstore/s3-binary-cache-store.cc | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/src/libstore/include/nix/store/s3-binary-cache-store.hh b/src/libstore/include/nix/store/s3-binary-cache-store.hh index e5fcbeda3..288ca41a0 100644 --- a/src/libstore/include/nix/store/s3-binary-cache-store.hh +++ b/src/libstore/include/nix/store/s3-binary-cache-store.hh @@ -75,6 +75,8 @@ struct S3BinaryCacheStoreConfig : HttpBinaryCacheStoreConfig static StringSet uriSchemes(); static std::string doc(); + + std::string getHumanReadableURI() const override; }; } // namespace nix diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index ac08a4982..0b37ac5d7 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -29,6 +29,19 @@ S3BinaryCacheStoreConfig::S3BinaryCacheStoreConfig( } } +std::string S3BinaryCacheStoreConfig::getHumanReadableURI() const +{ + auto reference = getReference(); + reference.params = [&]() { + Params relevantParams; + for (auto & setting : s3UriSettings) + if (setting->overridden) + relevantParams.insert({setting->name, reference.params.at(setting->name)}); + return relevantParams; + }(); + return reference.render(); +} + std::string S3BinaryCacheStoreConfig::doc() { return R"( From 22f4cccc716abbb2ce58622bed699d3259bdd724 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 18 Oct 2025 23:15:53 +0000 Subject: [PATCH 245/373] refactor(tests/nixos/s3-binary-cache-store): use a PKGS dict Replace individual PKG_A, PKG_B, and PKG_C variables with a PKGS dictionary. This will enable `@with_clean_client_store` in the future. --- tests/nixos/s3-binary-cache-store.nix | 46 ++++++++++++++------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index 53d79689c..2d5c6c1c1 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -83,9 +83,11 @@ in ENDPOINT = 'http://server:9000' REGION = 'eu-west-1' - PKG_A = '${pkgA}' - PKG_B = '${pkgB}' - PKG_C = '${pkgC}' + PKGS = { + 'A': '${pkgA}', + 'B': '${pkgB}', + 'C': '${pkgC}', + } ENV_WITH_CREDS = f"AWS_ACCESS_KEY_ID={ACCESS_KEY} AWS_SECRET_ACCESS_KEY={SECRET_KEY}" @@ -168,7 +170,7 @@ in store_url = make_s3_url(bucket) output = server.succeed( f"{ENV_WITH_CREDS} nix copy --debug --to '{store_url}' " - f"{PKG_A} {PKG_B} {PKG_C} 2>&1" + f"{PKGS['A']} {PKGS['B']} {PKGS['C']} 2>&1" ) assert_count( @@ -180,7 +182,7 @@ in print("✓ Credential provider created once and cached") - @with_test_bucket(populate_with=[PKG_A]) + @with_test_bucket(populate_with=[PKGS['A']]) def test_fetchurl_basic(bucket): """Test builtins.fetchurl works with s3:// URLs""" print("\n=== Testing builtins.fetchurl ===") @@ -216,7 +218,7 @@ in print("✓ Error messages format URLs correctly") - @with_test_bucket(populate_with=[PKG_A]) + @with_test_bucket(populate_with=[PKGS['A']]) def test_fork_credential_preresolution(bucket): """Test credential pre-resolution in forked processes""" print("\n=== Testing Fork Credential Pre-resolution ===") @@ -296,7 +298,7 @@ in print(" ✓ Child uses pre-resolved credentials (no new providers)") - @with_test_bucket(populate_with=[PKG_A, PKG_B, PKG_C]) + @with_test_bucket(populate_with=[PKGS['A'], PKGS['B'], PKGS['C']]) def test_store_operations(bucket): """Test nix store info and copy operations""" print("\n=== Testing Store Operations ===") @@ -316,11 +318,11 @@ in print(f" ✓ Store URL: {store_info['url']}") # Test copy from store - client.fail(f"nix path-info {PKG_A}") + client.fail(f"nix path-info {PKGS['A']}") output = client.succeed( f"{ENV_WITH_CREDS} nix copy --debug --no-check-sigs " - f"--from '{store_url}' {PKG_A} {PKG_B} {PKG_C} 2>&1" + f"--from '{store_url}' {PKGS['A']} {PKGS['B']} {PKGS['C']} 2>&1" ) assert_count( @@ -330,12 +332,12 @@ in "Client credential provider caching failed" ) - client.succeed(f"nix path-info {PKG_A}") + client.succeed(f"nix path-info {PKGS['A']}") print(" ✓ nix copy works") print(" ✓ Credentials cached on client") - @with_test_bucket(populate_with=[PKG_A]) + @with_test_bucket(populate_with=[PKGS['A']]) def test_url_format_variations(bucket): """Test different S3 URL parameter combinations""" print("\n=== Testing URL Format Variations ===") @@ -350,7 +352,7 @@ in client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{url2}' >&2") print(" ✓ Parameter order: endpoint before region works") - @with_test_bucket(populate_with=[PKG_A]) + @with_test_bucket(populate_with=[PKGS['A']]) def test_concurrent_fetches(bucket): """Validate thread safety with concurrent S3 operations""" print("\n=== Testing Concurrent Fetches ===") @@ -418,16 +420,16 @@ in print("\n=== Testing Compression: narinfo (gzip) ===") store_url = make_s3_url(bucket, **{'narinfo-compression': 'gzip'}) - server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKG_B}") + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKGS['B']}") - pkg_hash = get_package_hash(PKG_B) + pkg_hash = get_package_hash(PKGS['B']) verify_content_encoding(server, bucket, f"{pkg_hash}.narinfo", "gzip") print(" ✓ .narinfo has Content-Encoding: gzip") # Verify client can download and decompress - client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKG_B}") - client.succeed(f"nix path-info {PKG_B}") + client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKGS['B']}") + client.succeed(f"nix path-info {PKGS['B']}") print(" ✓ Client decompressed .narinfo successfully") @@ -441,9 +443,9 @@ in **{'narinfo-compression': 'xz', 'write-nar-listing': 'true', 'ls-compression': 'gzip'} ) - server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKG_C}") + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKGS['C']}") - pkg_hash = get_package_hash(PKG_C) + pkg_hash = get_package_hash(PKGS['C']) # Verify .narinfo has xz compression verify_content_encoding(server, bucket, f"{pkg_hash}.narinfo", "xz") @@ -454,8 +456,8 @@ in print(" ✓ .ls has Content-Encoding: gzip") # Verify client can download with mixed compression - client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKG_C}") - client.succeed(f"nix path-info {PKG_C}") + client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKGS['C']}") + client.succeed(f"nix path-info {PKGS['C']}") print(" ✓ Client downloaded package with mixed compression") @@ -465,9 +467,9 @@ in print("\n=== Testing Compression: disabled (default) ===") store_url = make_s3_url(bucket) - server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKG_A}") + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKGS['A']}") - pkg_hash = get_package_hash(PKG_A) + pkg_hash = get_package_hash(PKGS['A']) verify_no_compression(server, bucket, f"{pkg_hash}.narinfo") print(" ✓ No compression applied by default") From c1a15d1a26479380069c6ba14be9a23573e32c0f Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 18 Oct 2025 23:24:29 +0000 Subject: [PATCH 246/373] refactor(tests/nixos/s3-binary-cache-store): rename with_test_bucket to setup_s3 --- tests/nixos/s3-binary-cache-store.nix | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index 2d5c6c1c1..5ed543d89 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -135,7 +135,7 @@ in print(output) raise Exception(f"{error_msg}: expected {expected}, got {actual}") - def with_test_bucket(populate_with=[]): + def setup_s3(populate_with=[]): """ Decorator that creates/destroys a unique bucket for each test. Optionally pre-populates bucket with specified packages. @@ -162,7 +162,7 @@ in # Test Functions # ============================================================================ - @with_test_bucket() + @setup_s3() def test_credential_caching(bucket): """Verify credential providers are cached and reused""" print("\n=== Testing Credential Caching ===") @@ -182,7 +182,7 @@ in print("✓ Credential provider created once and cached") - @with_test_bucket(populate_with=[PKGS['A']]) + @setup_s3(populate_with=[PKGS['A']]) def test_fetchurl_basic(bucket): """Test builtins.fetchurl works with s3:// URLs""" print("\n=== Testing builtins.fetchurl ===") @@ -198,7 +198,7 @@ in print("✓ builtins.fetchurl works with s3:// URLs") - @with_test_bucket() + @setup_s3() def test_error_message_formatting(bucket): """Verify error messages display URLs correctly""" print("\n=== Testing Error Message Formatting ===") @@ -218,7 +218,7 @@ in print("✓ Error messages format URLs correctly") - @with_test_bucket(populate_with=[PKGS['A']]) + @setup_s3(populate_with=[PKGS['A']]) def test_fork_credential_preresolution(bucket): """Test credential pre-resolution in forked processes""" print("\n=== Testing Fork Credential Pre-resolution ===") @@ -298,7 +298,7 @@ in print(" ✓ Child uses pre-resolved credentials (no new providers)") - @with_test_bucket(populate_with=[PKGS['A'], PKGS['B'], PKGS['C']]) + @setup_s3(populate_with=[PKGS['A'], PKGS['B'], PKGS['C']]) def test_store_operations(bucket): """Test nix store info and copy operations""" print("\n=== Testing Store Operations ===") @@ -337,7 +337,7 @@ in print(" ✓ nix copy works") print(" ✓ Credentials cached on client") - @with_test_bucket(populate_with=[PKGS['A']]) + @setup_s3(populate_with=[PKGS['A']]) def test_url_format_variations(bucket): """Test different S3 URL parameter combinations""" print("\n=== Testing URL Format Variations ===") @@ -352,7 +352,7 @@ in client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{url2}' >&2") print(" ✓ Parameter order: endpoint before region works") - @with_test_bucket(populate_with=[PKGS['A']]) + @setup_s3(populate_with=[PKGS['A']]) def test_concurrent_fetches(bucket): """Validate thread safety with concurrent S3 operations""" print("\n=== Testing Concurrent Fetches ===") @@ -414,7 +414,7 @@ in f"Expected 5 FileTransfer instances for 5 concurrent fetches, got {transfers_created}" ) - @with_test_bucket() + @setup_s3() def test_compression_narinfo_gzip(bucket): """Test narinfo compression with gzip""" print("\n=== Testing Compression: narinfo (gzip) ===") @@ -433,7 +433,7 @@ in print(" ✓ Client decompressed .narinfo successfully") - @with_test_bucket() + @setup_s3() def test_compression_mixed(bucket): """Test mixed compression (narinfo=xz, ls=gzip)""" print("\n=== Testing Compression: mixed (narinfo=xz, ls=gzip) ===") @@ -461,7 +461,7 @@ in print(" ✓ Client downloaded package with mixed compression") - @with_test_bucket() + @setup_s3() def test_compression_disabled(bucket): """Verify no compression by default""" print("\n=== Testing Compression: disabled (default) ===") From 9058d90ab2b81f77bbb844f9c783bb5a4c238d1d Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 18 Oct 2025 23:27:03 +0000 Subject: [PATCH 247/373] refactor(tests/nixos/s3-binary-cache-store): rename populate_with to populate_bucket --- tests/nixos/s3-binary-cache-store.nix | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index 5ed543d89..e7fcadb45 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -135,22 +135,22 @@ in print(output) raise Exception(f"{error_msg}: expected {expected}, got {actual}") - def setup_s3(populate_with=[]): + def setup_s3(populate_bucket=[]): """ Decorator that creates/destroys a unique bucket for each test. Optionally pre-populates bucket with specified packages. Args: - populate_with: List of packages to upload before test runs + populate_bucket: List of packages to upload before test runs """ def decorator(test_func): def wrapper(): bucket = str(uuid.uuid4()) server.succeed(f"mc mb minio/{bucket}") try: - if populate_with: + if populate_bucket: store_url = make_s3_url(bucket) - for pkg in populate_with: + for pkg in populate_bucket: server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {pkg}") test_func(bucket) finally: @@ -182,7 +182,7 @@ in print("✓ Credential provider created once and cached") - @setup_s3(populate_with=[PKGS['A']]) + @setup_s3(populate_bucket=[PKGS['A']]) def test_fetchurl_basic(bucket): """Test builtins.fetchurl works with s3:// URLs""" print("\n=== Testing builtins.fetchurl ===") @@ -218,7 +218,7 @@ in print("✓ Error messages format URLs correctly") - @setup_s3(populate_with=[PKGS['A']]) + @setup_s3(populate_bucket=[PKGS['A']]) def test_fork_credential_preresolution(bucket): """Test credential pre-resolution in forked processes""" print("\n=== Testing Fork Credential Pre-resolution ===") @@ -298,7 +298,7 @@ in print(" ✓ Child uses pre-resolved credentials (no new providers)") - @setup_s3(populate_with=[PKGS['A'], PKGS['B'], PKGS['C']]) + @setup_s3(populate_bucket=[PKGS['A'], PKGS['B'], PKGS['C']]) def test_store_operations(bucket): """Test nix store info and copy operations""" print("\n=== Testing Store Operations ===") @@ -337,7 +337,7 @@ in print(" ✓ nix copy works") print(" ✓ Credentials cached on client") - @setup_s3(populate_with=[PKGS['A']]) + @setup_s3(populate_bucket=[PKGS['A']]) def test_url_format_variations(bucket): """Test different S3 URL parameter combinations""" print("\n=== Testing URL Format Variations ===") @@ -352,7 +352,7 @@ in client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{url2}' >&2") print(" ✓ Parameter order: endpoint before region works") - @setup_s3(populate_with=[PKGS['A']]) + @setup_s3(populate_bucket=[PKGS['A']]) def test_concurrent_fetches(bucket): """Validate thread safety with concurrent S3 operations""" print("\n=== Testing Concurrent Fetches ===") From f88c3055f8b3786f0f95ffe26813d4f3cf093247 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 18 Oct 2025 23:36:42 +0000 Subject: [PATCH 248/373] refactor(tests/nixos/s3-binary-cache-store): clean client store in setup_s3 Add cleanup of client store in the finally block of setup_s3 decorator. Uses `nix store delete --ignore-liveness` to properly handle GC roots and only attempts deletion if the path exists. --- tests/nixos/s3-binary-cache-store.nix | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index e7fcadb45..68d123b51 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -139,6 +139,7 @@ in """ Decorator that creates/destroys a unique bucket for each test. Optionally pre-populates bucket with specified packages. + Cleans up client store after test completion. Args: populate_bucket: List of packages to upload before test runs @@ -155,6 +156,9 @@ in test_func(bucket) finally: server.succeed(f"mc rb --force minio/{bucket}") + # Clean up client store - only delete if path exists + for pkg in PKGS.values(): + client.succeed(f"[ ! -e {pkg} ] || nix store delete --ignore-liveness {pkg}") return wrapper return decorator From 4f19e63a8fa3f8079adce11a87374fa3fb8d2709 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 18 Oct 2025 23:44:10 +0000 Subject: [PATCH 249/373] refactor(tests/nixos/s3-binary-cache-store): add --no-link to nix build commands Prevent creation of result symlinks in all nix build commands by adding the --no-link flag. --- tests/nixos/s3-binary-cache-store.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index 68d123b51..d47273196 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -252,7 +252,7 @@ in """.format(id=test_id, url=test_url, hash=cache_info_hash) output = client.succeed( - f"{ENV_WITH_CREDS} nix build --debug --impure --expr '{fetchurl_expr}' 2>&1" + f"{ENV_WITH_CREDS} nix build --debug --impure --no-link --expr '{fetchurl_expr}' 2>&1" ) # Verify fork behavior @@ -392,12 +392,12 @@ in try: output = client.succeed( - f"{ENV_WITH_CREDS} nix build --debug --impure " + f"{ENV_WITH_CREDS} nix build --debug --impure --no-link " f"--expr '{concurrent_expr}' --max-jobs 5 2>&1" ) except: output = client.fail( - f"{ENV_WITH_CREDS} nix build --debug --impure " + f"{ENV_WITH_CREDS} nix build --debug --impure --no-link " f"--expr '{concurrent_expr}' --max-jobs 5 2>&1" ) From 4ae6c65bc589807c787be4b31809c15649a468f4 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 18 Oct 2025 23:48:54 +0000 Subject: [PATCH 250/373] test(tests/nixos/s3-binary-cache-store): verify credential caching in concurrent fetches Add assertion to test_concurrent_fetches to verify that only one credential provider is created even with 5 concurrent fetches. --- tests/nixos/s3-binary-cache-store.nix | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index d47273196..9a0975eed 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -418,6 +418,13 @@ in f"Expected 5 FileTransfer instances for 5 concurrent fetches, got {transfers_created}" ) + if providers_created != 1: + print("Debug output:") + print(output) + raise Exception( + f"Expected 1 credential provider for concurrent fetches, got {providers_created}" + ) + @setup_s3() def test_compression_narinfo_gzip(bucket): """Test narinfo compression with gzip""" From 5b4bd5bcb854f44b53119be58269f50430c55137 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 18 Oct 2025 23:51:39 +0000 Subject: [PATCH 251/373] refactor(tests/nixos/s3-binary-cache-store): inline make_http_url fn Remove make_http_url helper function and inline its single usage. --- tests/nixos/s3-binary-cache-store.nix | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index 9a0975eed..4f5632724 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -103,10 +103,6 @@ in bucket_and_path = f"{bucket}{path}" if path else bucket return f"s3://{bucket_and_path}?{query}" - def make_http_url(path): - """Build HTTP URL for direct S3 access""" - return f"{ENDPOINT}/{path}" - def get_package_hash(pkg_path): """Extract store hash from package path""" return pkg_path.split("/")[-1].split("-")[0] @@ -208,7 +204,7 @@ in print("\n=== Testing Error Message Formatting ===") nonexistent_url = make_s3_url(bucket, path="/foo-that-does-not-exist") - expected_http_url = make_http_url(f"{bucket}/foo-that-does-not-exist") + expected_http_url = f"{ENDPOINT}/{bucket}/foo-that-does-not-exist" error_msg = client.fail( f"{ENV_WITH_CREDS} nix eval --impure --expr " From 7d0c06f921a37c85dd98dfdcd077e5aad2e9ab3e Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 18 Oct 2025 23:57:51 +0000 Subject: [PATCH 252/373] feat(tests/nixos/s3-binary-cache-store): add public parameter to setup_s3 Add optional 'public' parameter to setup_s3 decorator. When set to True, the bucket will be made publicly accessible using mc anonymous set. --- tests/nixos/s3-binary-cache-store.nix | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index 4f5632724..96ca37f19 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -131,7 +131,7 @@ in print(output) raise Exception(f"{error_msg}: expected {expected}, got {actual}") - def setup_s3(populate_bucket=[]): + def setup_s3(populate_bucket=[], public=False): """ Decorator that creates/destroys a unique bucket for each test. Optionally pre-populates bucket with specified packages. @@ -139,11 +139,14 @@ in Args: populate_bucket: List of packages to upload before test runs + public: If True, make the bucket publicly accessible """ def decorator(test_func): def wrapper(): bucket = str(uuid.uuid4()) server.succeed(f"mc mb minio/{bucket}") + if public: + server.succeed(f"mc anonymous set download minio/{bucket}") try: if populate_bucket: store_url = make_s3_url(bucket) From 55ea3d3476101ef1dce6d6e88770b0b0fb12c7c3 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sun, 19 Oct 2025 00:04:30 +0000 Subject: [PATCH 253/373] test(tests/nixos/s3-binary-cache-store): test public bucket operations Add `test_public_bucket_operations` to validate that store operations work correctly on public S3 buckets without requiring credentials. Tests nix store info and nix copy operations. --- tests/nixos/s3-binary-cache-store.nix | 37 +++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index 96ca37f19..40804f599 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -340,6 +340,42 @@ in print(" ✓ nix copy works") print(" ✓ Credentials cached on client") + @setup_s3(populate_bucket=[PKGS['A'], PKGS['B']], public=True) + def test_public_bucket_operations(bucket): + """Test store operations on public bucket without credentials""" + print("\n=== Testing Public Bucket Operations ===") + + store_url = make_s3_url(bucket) + + # Verify store info works without credentials + client.succeed(f"nix store info --store '{store_url}' >&2") + print(" ✓ nix store info works without credentials") + + # Get and validate store info JSON + info_json = client.succeed(f"nix store info --json --store '{store_url}'") + store_info = json.loads(info_json) + + if not store_info.get("url"): + raise Exception("Store should have a URL") + + print(f" ✓ Store URL: {store_info['url']}") + + # Verify packages are not yet in client store + client.fail(f"nix path-info {PKGS['A']}") + client.fail(f"nix path-info {PKGS['B']}") + + # Test copy from public bucket without credentials + client.succeed( + f"nix copy --debug --no-check-sigs " + f"--from '{store_url}' {PKGS['A']} {PKGS['B']} 2>&1" + ) + + # Verify packages were copied successfully + client.succeed(f"nix path-info {PKGS['A']}") + client.succeed(f"nix path-info {PKGS['B']}") + + print(" ✓ nix copy from public bucket works without credentials") + @setup_s3(populate_bucket=[PKGS['A']]) def test_url_format_variations(bucket): """Test different S3 URL parameter combinations""" @@ -506,6 +542,7 @@ in test_error_message_formatting() test_fork_credential_preresolution() test_store_operations() + test_public_bucket_operations() test_url_format_variations() test_concurrent_fetches() test_compression_narinfo_gzip() From d9c808f8a76ef35050d3aa65e1973ab7d69ed48e Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sun, 19 Oct 2025 00:21:46 +0000 Subject: [PATCH 254/373] refactor(tests/nixos/s3-binary-cache-store): add verify_packages_in_store helper --- tests/nixos/s3-binary-cache-store.nix | 30 ++++++++++++++++++++------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index 40804f599..b1995bd3a 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -131,6 +131,22 @@ in print(output) raise Exception(f"{error_msg}: expected {expected}, got {actual}") + def verify_packages_in_store(machine, pkg_paths, should_exist=True): + """ + Verify whether packages exist in the store. + + Args: + machine: The machine to check on + pkg_paths: List of package paths to check (or single path) + should_exist: If True, verify packages exist; if False, verify they don't + """ + paths = [pkg_paths] if isinstance(pkg_paths, str) else pkg_paths + for pkg in paths: + if should_exist: + machine.succeed(f"nix path-info {pkg}") + else: + machine.fail(f"nix path-info {pkg}") + def setup_s3(populate_bucket=[], public=False): """ Decorator that creates/destroys a unique bucket for each test. @@ -321,7 +337,7 @@ in print(f" ✓ Store URL: {store_info['url']}") # Test copy from store - client.fail(f"nix path-info {PKGS['A']}") + verify_packages_in_store(client, PKGS['A'], should_exist=False) output = client.succeed( f"{ENV_WITH_CREDS} nix copy --debug --no-check-sigs " @@ -335,7 +351,7 @@ in "Client credential provider caching failed" ) - client.succeed(f"nix path-info {PKGS['A']}") + verify_packages_in_store(client, [PKGS['A'], PKGS['B'], PKGS['C']]) print(" ✓ nix copy works") print(" ✓ Credentials cached on client") @@ -361,8 +377,7 @@ in print(f" ✓ Store URL: {store_info['url']}") # Verify packages are not yet in client store - client.fail(f"nix path-info {PKGS['A']}") - client.fail(f"nix path-info {PKGS['B']}") + verify_packages_in_store(client, [PKGS['A'], PKGS['B']], should_exist=False) # Test copy from public bucket without credentials client.succeed( @@ -371,8 +386,7 @@ in ) # Verify packages were copied successfully - client.succeed(f"nix path-info {PKGS['A']}") - client.succeed(f"nix path-info {PKGS['B']}") + verify_packages_in_store(client, [PKGS['A'], PKGS['B']]) print(" ✓ nix copy from public bucket works without credentials") @@ -475,7 +489,7 @@ in # Verify client can download and decompress client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKGS['B']}") - client.succeed(f"nix path-info {PKGS['B']}") + verify_packages_in_store(client, PKGS['B']) print(" ✓ Client decompressed .narinfo successfully") @@ -503,7 +517,7 @@ in # Verify client can download with mixed compression client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKGS['C']}") - client.succeed(f"nix path-info {PKGS['C']}") + verify_packages_in_store(client, PKGS['C']) print(" ✓ Client downloaded package with mixed compression") From e33cd5aa38c5760b5c0cda08aad2b5e9eb7768ff Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Sun, 19 Oct 2025 14:08:34 +0200 Subject: [PATCH 255/373] Clarify unlocked input warning message The previous message was vague about what "deprecated" meant and why unlocked inputs with NAR hashes "may not be reproducible". It also used "verifiable" which was confusing. The new message makes it clear that the NAR hash provides verification (is checked by NAR hash) and explicitly states the failure modes: garbage collection and sharing. --- src/libexpr/primops/fetchTree.cc | 4 ++-- src/libflake/lockfile.cc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index ad76af5b5..b49bd02e7 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -199,8 +199,8 @@ static void fetchTree( if (state.settings.pureEval && !input.isLocked()) { if (input.getNarHash()) warn( - "Input '%s' is unlocked (e.g. lacks a Git revision) but does have a NAR hash. " - "This is deprecated since such inputs are verifiable but may not be reproducible.", + "Input '%s' is unlocked (e.g. lacks a Git revision) but is checked by NAR hash. " + "This is not reproducible and will break after garbage collection or when shared.", input.to_string()); else state diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index d3dac19c5..d0d339f9f 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -77,8 +77,8 @@ LockedNode::LockedNode(const fetchers::Settings & fetchSettings, const nlohmann: if (!lockedRef.input.isLocked() && !lockedRef.input.isRelative()) { if (lockedRef.input.getNarHash()) warn( - "Lock file entry '%s' is unlocked (e.g. lacks a Git revision) but does have a NAR hash. " - "This is deprecated since such inputs are verifiable but may not be reproducible.", + "Lock file entry '%s' is unlocked (e.g. lacks a Git revision) but is checked by NAR hash. " + "This is not reproducible and will break after garbage collection or when shared.", lockedRef.to_string()); else throw Error( From c663f7ec79aa21037dabc0df130eb6f3e98d10c4 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 19 Oct 2025 21:03:13 +0300 Subject: [PATCH 256/373] libstore: Fix reentrancy in AwsCredentialProviderImpl::getCredentialsRaw Old code would do very much incorrect reentrancy crimes (trying to do an erase inside the emplace callback). This would fail miserably with an assertion in Boost: terminating due to unexpected unrecoverable internal error: Assertion '(!find(px))&&("reentrancy not allowed")' failed in boost::unordered::detail::foa::entry_trace::entry_trace(const void *) at include/boost/unordered/detail/foa/reentrancy_check.hpp:33 This is trivially reproduced by using any S3 URL with a non-empty profile: nix-prefetch-url "s3://happy/crash?profile=default" --- src/libstore/aws-creds.cc | 82 +++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 46 deletions(-) diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index d58293560..ff7b0f0ef 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -96,67 +96,57 @@ public: } } + std::shared_ptr createProviderForProfile(const std::string & profile); + private: Aws::Crt::ApiHandle apiHandle; boost::concurrent_flat_map> credentialProviderCache; }; +std::shared_ptr +AwsCredentialProviderImpl::createProviderForProfile(const std::string & profile) +{ + debug( + "[pid=%d] creating new AWS credential provider for profile '%s'", + getpid(), + profile.empty() ? "(default)" : profile.c_str()); + + if (profile.empty()) { + Aws::Crt::Auth::CredentialsProviderChainDefaultConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + return Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderChainDefault(config); + } + + Aws::Crt::Auth::CredentialsProviderProfileConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + // This is safe because the underlying C library will copy this string + // c.f. https://github.com/awslabs/aws-c-auth/blob/main/source/credentials_provider_profile.c#L220 + config.ProfileNameOverride = Aws::Crt::ByteCursorFromCString(profile.c_str()); + return Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderProfile(config); +} + AwsCredentials AwsCredentialProviderImpl::getCredentialsRaw(const std::string & profile) { - // Get or create credential provider with caching std::shared_ptr provider; - // Use try_emplace_and_cvisit for atomic get-or-create - // This prevents race conditions where multiple threads create providers credentialProviderCache.try_emplace_and_cvisit( profile, - nullptr, // Placeholder - will be replaced in f1 before any thread can see it - [&](auto & kv) { - // f1: Called atomically during insertion with non-const reference - // Other threads are blocked until we finish, so nullptr is never visible - debug( - "[pid=%d] creating new AWS credential provider for profile '%s'", - getpid(), - profile.empty() ? "(default)" : profile.c_str()); + nullptr, + [&](auto & kv) { provider = kv.second = createProviderForProfile(profile); }, + [&](const auto & kv) { provider = kv.second; }); - try { - if (profile.empty()) { - Aws::Crt::Auth::CredentialsProviderChainDefaultConfig config; - config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); - kv.second = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderChainDefault(config); - } else { - Aws::Crt::Auth::CredentialsProviderProfileConfig config; - config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); - // This is safe because the underlying C library will copy this string - // c.f. https://github.com/awslabs/aws-c-auth/blob/main/source/credentials_provider_profile.c#L220 - config.ProfileNameOverride = Aws::Crt::ByteCursorFromCString(profile.c_str()); - kv.second = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderProfile(config); - } - - if (!kv.second) { - throw AwsAuthError( - "Failed to create AWS credentials provider for %s", - profile.empty() ? "default profile" : fmt("profile '%s'", profile)); - } - - provider = kv.second; - } catch (Error & e) { - // Exception during creation - remove the entry to allow retry - credentialProviderCache.erase(profile); - e.addTrace({}, "for AWS profile: %s", profile.empty() ? "(default)" : profile); - throw; - } catch (...) { - // Non-Error exception - still need to clean up - credentialProviderCache.erase(profile); - throw; - } - }, - [&](const auto & kv) { - // f2: Called if key already exists (const reference) - provider = kv.second; + if (!provider) { + credentialProviderCache.erase_if(profile, [](const auto & kv) { + [[maybe_unused]] auto [_, provider] = kv; + return !provider; }); + throw AwsAuthError( + "Failed to create AWS credentials provider for %s", + profile.empty() ? "default profile" : fmt("profile '%s'", profile)); + } + return getCredentialsFromProvider(provider); } From 6c9083db2c4d68c3a3719a816da48b68541a4a72 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 20 Oct 2025 13:40:19 +0200 Subject: [PATCH 257/373] Use a smaller buffer --- src/libutil/serialise.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index bdce956f3..47a00c8d6 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -112,7 +112,7 @@ std::string Source::drain() void Source::skip(size_t len) { - std::array buf; + std::array buf; while (len) { auto n = read(buf.data(), std::min(len, buf.size())); assert(n <= len); From a91b7875249e84cb0b3a836b5fd59267481fa0cd Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 20 Oct 2025 21:08:49 +0300 Subject: [PATCH 258/373] libutil: Add alignUp helper function --- src/libutil-tests/alignment.cc | 18 ++++++++++++++++++ src/libutil-tests/meson.build | 1 + src/libutil/include/nix/util/alignment.hh | 23 +++++++++++++++++++++++ src/libutil/include/nix/util/meson.build | 1 + 4 files changed, 43 insertions(+) create mode 100644 src/libutil-tests/alignment.cc create mode 100644 src/libutil/include/nix/util/alignment.hh diff --git a/src/libutil-tests/alignment.cc b/src/libutil-tests/alignment.cc new file mode 100644 index 000000000..bef0c435d --- /dev/null +++ b/src/libutil-tests/alignment.cc @@ -0,0 +1,18 @@ +#include "nix/util/alignment.hh" + +#include + +namespace nix { + +TEST(alignUp, value) +{ + for (uint64_t i = 1; i <= 8; ++i) + EXPECT_EQ(alignUp(i, 8), 8); +} + +TEST(alignUp, notAPowerOf2) +{ + ASSERT_DEATH({ alignUp(1u, 42); }, "alignment must be a power of 2"); +} + +} // namespace nix diff --git a/src/libutil-tests/meson.build b/src/libutil-tests/meson.build index d84dbbb68..c75f4d90a 100644 --- a/src/libutil-tests/meson.build +++ b/src/libutil-tests/meson.build @@ -44,6 +44,7 @@ config_priv_h = configure_file( subdir('nix-meson-build-support/common') sources = files( + 'alignment.cc', 'archive.cc', 'args.cc', 'base-n.cc', diff --git a/src/libutil/include/nix/util/alignment.hh b/src/libutil/include/nix/util/alignment.hh new file mode 100644 index 000000000..a4e5af4d6 --- /dev/null +++ b/src/libutil/include/nix/util/alignment.hh @@ -0,0 +1,23 @@ +#pragma once +///@file + +#include +#include +#include +#include + +namespace nix { + +/// Aligns val upwards to be a multiple of alignment. +/// +/// @pre alignment must be a power of 2. +template + requires std::is_unsigned_v +constexpr T alignUp(T val, unsigned alignment) +{ + assert(std::has_single_bit(alignment) && "alignment must be a power of 2"); + T mask = ~(T{alignment} - 1u); + return (val + alignment - 1) & mask; +} + +} // namespace nix diff --git a/src/libutil/include/nix/util/meson.build b/src/libutil/include/nix/util/meson.build index dcfaa8e3f..9a606e15d 100644 --- a/src/libutil/include/nix/util/meson.build +++ b/src/libutil/include/nix/util/meson.build @@ -4,6 +4,7 @@ include_dirs = [ include_directories('../..') ] headers = files( 'abstract-setting-to-json.hh', + 'alignment.hh', 'ansicolor.hh', 'archive.hh', 'args.hh', From 22c73868c396ceb189ff2638768b3eea30120ded Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 20 Oct 2025 21:15:11 +0300 Subject: [PATCH 259/373] libutil/archive: Use alignUp With this change it's much more apparent what's going on. --- src/libutil/archive.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index b8fef9ef3..73ec0cab7 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -6,6 +6,7 @@ #include // for strcasecmp #include "nix/util/archive.hh" +#include "nix/util/alignment.hh" #include "nix/util/config-global.hh" #include "nix/util/posix-source-accessor.hh" #include "nix/util/source-path.hh" @@ -133,7 +134,7 @@ static void parseContents(CreateRegularFileSink & sink, Source & source) sink.preallocateContents(size); if (sink.skipContents) { - source.skip(size + (size % 8 ? 8 - (size % 8) : 0)); + source.skip(alignUp(size, 8)); return; } From e3b3f05e5d0ee92ccee4e8679af45609e786d149 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 18 Oct 2025 20:45:43 +0000 Subject: [PATCH 260/373] fix(nix-prefetch-url): correctly extract filename from URLs with query parameters Previously, `prefetchFile()` used `baseNameOf()` directly on the URL string to extract the filename. This caused issues with URLs containing query parameters that include slashes, such as S3 URLs with custom endpoints: ``` s3://bucket/file.txt?endpoint=http://server:9000 ``` The `baseNameOf()` function naively searches for the rightmost `/` in the entire string, which would find the `/` in `http://server:9000` and extract `server:9000®ion=...` as the filename. This resulted in invalid store path names containing illegal characters like `:`. This commit fixes the issue by: 1. Adding a `VerbatimURL::lastPathSegment()` method that extracts the last non-empty path segment from a URL, using `pathSegments(true)` to filter empty segments 2. Changing `prefetchFile()` to accept `const VerbatimURL &` and use the new `lastPathSegment()` method instead of manual path parsing 3. Adding early validation with `checkName()` to fail quickly on invalid filenames 4. Maintains backward compatibility by falling back to `baseNameOf()` for unparsable `VerbatimURL`s --- src/libutil/include/nix/util/url.hh | 11 +++++++++++ src/libutil/url.cc | 18 ++++++++++++++++++ src/nix/prefetch.cc | 22 +++++++++++++++------- 3 files changed, 44 insertions(+), 7 deletions(-) diff --git a/src/libutil/include/nix/util/url.hh b/src/libutil/include/nix/util/url.hh index 4ed80feb3..1fc8c3f2b 100644 --- a/src/libutil/include/nix/util/url.hh +++ b/src/libutil/include/nix/util/url.hh @@ -408,6 +408,17 @@ struct VerbatimURL [](const ParsedURL & url) -> std::string_view { return url.scheme; }}, raw); } + + /** + * Get the last non-empty path segment from the URL. + * + * This is useful for extracting filenames from URLs. + * For example, "https://example.com/path/to/file.txt?query=value" + * returns "file.txt". + * + * @return The last non-empty path segment, or std::nullopt if no such segment exists. + */ + std::optional lastPathSegment() const; }; std::ostream & operator<<(std::ostream & os, const VerbatimURL & url); diff --git a/src/libutil/url.cc b/src/libutil/url.cc index 7410e4062..538792463 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -4,6 +4,7 @@ #include "nix/util/split.hh" #include "nix/util/canon-path.hh" #include "nix/util/strings-inline.hh" +#include "nix/util/file-system.hh" #include @@ -440,4 +441,21 @@ std::ostream & operator<<(std::ostream & os, const VerbatimURL & url) return os; } +std::optional VerbatimURL::lastPathSegment() const +{ + try { + auto parsedUrl = parsed(); + auto segments = parsedUrl.pathSegments(/*skipEmpty=*/true); + if (std::ranges::empty(segments)) + return std::nullopt; + return segments.back(); + } catch (BadURL &) { + // Fall back to baseNameOf for unparsable URLs + auto name = baseNameOf(to_string()); + if (name.empty()) + return std::nullopt; + return std::string{name}; + } +} + } // namespace nix diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc index 18abfa0aa..d875f8e4b 100644 --- a/src/nix/prefetch.cc +++ b/src/nix/prefetch.cc @@ -13,6 +13,8 @@ #include "nix/cmd/misc-store-flags.hh" #include "nix/util/terminal.hh" #include "nix/util/environment-variables.hh" +#include "nix/util/url.hh" +#include "nix/store/path.hh" #include "man-pages.hh" @@ -56,7 +58,7 @@ std::string resolveMirrorUrl(EvalState & state, const std::string & url) std::tuple prefetchFile( ref store, - std::string_view url, + const VerbatimURL & url, std::optional name, HashAlgorithm hashAlgo, std::optional expectedHash, @@ -68,9 +70,15 @@ std::tuple prefetchFile( /* Figure out a name in the Nix store. */ if (!name) { - name = baseNameOf(url); - if (name->empty()) - throw Error("cannot figure out file name for '%s'", url); + name = url.lastPathSegment(); + if (!name || name->empty()) + throw Error("cannot figure out file name for '%s'", url.to_string()); + } + try { + checkName(*name); + } catch (BadStorePathName & e) { + e.addTrace({}, "file name '%s' was extracted from URL '%s'", *name, url.to_string()); + throw; } std::optional storePath; @@ -105,14 +113,14 @@ std::tuple prefetchFile( FdSink sink(fd.get()); - FileTransferRequest req(VerbatimURL{url}); + FileTransferRequest req(url); req.decompress = false; getFileTransfer()->download(std::move(req), sink); } /* Optionally unpack the file. */ if (unpack) { - Activity act(*logger, lvlChatty, actUnknown, fmt("unpacking '%s'", url)); + Activity act(*logger, lvlChatty, actUnknown, fmt("unpacking '%s'", url.to_string())); auto unpacked = (tmpDir.path() / "unpacked").string(); createDirs(unpacked); unpackTarfile(tmpFile.string(), unpacked); @@ -128,7 +136,7 @@ std::tuple prefetchFile( } } - Activity act(*logger, lvlChatty, actUnknown, fmt("adding '%s' to the store", url)); + Activity act(*logger, lvlChatty, actUnknown, fmt("adding '%s' to the store", url.to_string())); auto info = store->addToStoreSlow( *name, PosixSourceAccessor::createAtRoot(tmpFile), method, hashAlgo, {}, expectedHash); From 0f28c76a4471b20e2257cff408ae0f25a055d283 Mon Sep 17 00:00:00 2001 From: David McFarland Date: Mon, 20 Oct 2025 15:40:05 -0300 Subject: [PATCH 261/373] nix/develop: Strip outputChecks when structuredAttrs is enabled --- src/nix/develop.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/nix/develop.cc b/src/nix/develop.cc index 28d0a7080..d23dce10b 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -254,10 +254,15 @@ static StorePath getDerivationEnvironment(ref store, ref evalStore drv.args = {store->printStorePath(getEnvShPath)}; /* Remove derivation checks. */ - drv.env.erase("allowedReferences"); - drv.env.erase("allowedRequisites"); - drv.env.erase("disallowedReferences"); - drv.env.erase("disallowedRequisites"); + if (drv.structuredAttrs) { + drv.structuredAttrs->structuredAttrs.erase("outputChecks"); + } else { + drv.env.erase("allowedReferences"); + drv.env.erase("allowedRequisites"); + drv.env.erase("disallowedReferences"); + drv.env.erase("disallowedRequisites"); + } + drv.env.erase("name"); /* Rehash and write the derivation. FIXME: would be nice to use From 1b1d7e30470e2979674214b021e8d13fd0e7df93 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 18 Oct 2025 20:48:50 +0000 Subject: [PATCH 262/373] test(nixos): add nix-prefetch-url test for S3 URLs with query parameters Adds a comprehensive test to verify that `nix-prefetch-url` correctly handles S3 URLs with query parameters (e.g., custom endpoints and regions). Previously, nix-prefetch-url would fail with "invalid store path" errors when given S3 URLs with query parameters like `?endpoint=http://server:9000®ion=eu-west-1`, because it incorrectly extracted the filename from the query parameters instead of the path. --- tests/nixos/s3-binary-cache-store.nix | 64 +++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index b1995bd3a..981fab868 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -534,6 +534,69 @@ in print(" ✓ No compression applied by default") + @setup_s3() + def test_nix_prefetch_url(bucket): + """Test that nix-prefetch-url retrieves actual file content from S3, not empty files (issue #8862)""" + print("\n=== Testing nix-prefetch-url S3 Content Retrieval (issue #8862) ===") + + # Create a test file with known content + test_content = "This is test content to verify S3 downloads work correctly!\n" + test_file_size = len(test_content) + + server.succeed(f"echo -n '{test_content}' > /tmp/test-file.txt") + + # Upload to S3 + server.succeed(f"mc cp /tmp/test-file.txt minio/{bucket}/test-file.txt") + + # Calculate expected hash + expected_hash = server.succeed( + "nix hash file --type sha256 --base32 /tmp/test-file.txt" + ).strip() + + print(f" ✓ Uploaded test file to S3 ({test_file_size} bytes)") + + # Use nix-prefetch-url to download from S3 + s3_url = make_s3_url(bucket, path="/test-file.txt") + + prefetch_output = client.succeed( + f"{ENV_WITH_CREDS} nix-prefetch-url --print-path '{s3_url}'" + ) + + # Extract hash and store path + # With --print-path, output is: \n + lines = prefetch_output.strip().split('\n') + prefetch_hash = lines[0] # First line is the hash + store_path = lines[1] # Second line is the store path + + # Verify hash matches + if prefetch_hash != expected_hash: + raise Exception( + f"Hash mismatch: expected {expected_hash}, got {prefetch_hash}" + ) + + print(" ✓ nix-prefetch-url completed with correct hash") + + # Verify the downloaded file is NOT empty (the bug in #8862) + file_size = int(client.succeed(f"stat -c %s {store_path}").strip()) + + if file_size == 0: + raise Exception("Downloaded file is EMPTY - issue #8862 regression detected!") + + if file_size != test_file_size: + raise Exception( + f"File size mismatch: expected {test_file_size}, got {file_size}" + ) + + print(f" ✓ File has correct size ({file_size} bytes, not empty)") + + # Verify actual content matches by comparing hashes instead of printing entire file + downloaded_hash = client.succeed(f"nix hash file --type sha256 --base32 {store_path}").strip() + + if downloaded_hash != expected_hash: + raise Exception(f"Content hash mismatch: expected {expected_hash}, got {downloaded_hash}") + + print(" ✓ File content verified correct (hash matches)") + # ============================================================================ # Main Test Execution # ============================================================================ @@ -562,6 +625,7 @@ in test_compression_narinfo_gzip() test_compression_mixed() test_compression_disabled() + test_nix_prefetch_url() print("\n" + "="*80) print("✓ All S3 Binary Cache Store Tests Passed!") From 5e7ee808de8bdc353f80401e8fd8a310a2622f4b Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 13 Sep 2025 08:32:26 -0400 Subject: [PATCH 263/373] `nlohmann::json` instance and JSON Schema for `Hash` Improving and codifying our experimental JSON interfacing. Co-Authored-By: Robert Hensing --- doc/manual/meson.build | 2 +- doc/manual/package.nix | 2 + doc/manual/source/protocols/json/hash.md | 26 ++++ .../source/protocols/json/schema/hash-v1 | 1 + .../source/protocols/json/schema/hash-v1.yaml | 30 ++++- src/json-schema-checks/hash | 1 + src/json-schema-checks/meson.build | 10 ++ src/json-schema-checks/package.nix | 1 + .../data/hash/blake3-base64.json | 5 + .../data/hash/sha256-base16.json | 5 + .../data/hash/sha256-base64.json | 5 + src/libutil-tests/data/hash/sha256-nix32.json | 5 + src/libutil-tests/data/hash/simple.json | 5 + src/libutil-tests/hash.cc | 111 ++++++++++++++++-- src/libutil/hash.cc | 48 ++++++-- src/libutil/include/nix/util/hash.hh | 19 ++- 16 files changed, 252 insertions(+), 24 deletions(-) create mode 120000 doc/manual/source/protocols/json/schema/hash-v1 create mode 120000 src/json-schema-checks/hash create mode 100644 src/libutil-tests/data/hash/blake3-base64.json create mode 100644 src/libutil-tests/data/hash/sha256-base16.json create mode 100644 src/libutil-tests/data/hash/sha256-base64.json create mode 100644 src/libutil-tests/data/hash/sha256-nix32.json create mode 100644 src/libutil-tests/data/hash/simple.json diff --git a/doc/manual/meson.build b/doc/manual/meson.build index 7090c949c..fdea40098 100644 --- a/doc/manual/meson.build +++ b/doc/manual/meson.build @@ -88,7 +88,7 @@ manual = custom_target( @0@ @INPUT0@ @CURRENT_SOURCE_DIR@ > @DEPFILE@ @0@ @INPUT1@ summary @2@ < @CURRENT_SOURCE_DIR@/source/SUMMARY.md.in > @2@/source/SUMMARY.md sed -e 's|@version@|@3@|g' < @INPUT2@ > @2@/book.toml - @4@ -r --include='*.md' @CURRENT_SOURCE_DIR@/ @2@/ + @4@ -r -L --include='*.md' @CURRENT_SOURCE_DIR@/ @2@/ (cd @2@; RUST_LOG=warn @1@ build -d @2@ 3>&2 2>&1 1>&3) | { grep -Fv "because fragment resolution isn't implemented" || :; } 3>&2 2>&1 1>&3 rm -rf @2@/manual mv @2@/html @2@/manual diff --git a/doc/manual/package.nix b/doc/manual/package.nix index 7b94721ae..30486869e 100644 --- a/doc/manual/package.nix +++ b/doc/manual/package.nix @@ -33,6 +33,8 @@ mkMesonDerivation (finalAttrs: { fileset.difference (fileset.unions [ ../../.version + # For example JSON + ../../src/libutil-tests/data/hash # Too many different types of files to filter for now ../../doc/manual ./. diff --git a/doc/manual/source/protocols/json/hash.md b/doc/manual/source/protocols/json/hash.md index d2bdf1062..efd920086 100644 --- a/doc/manual/source/protocols/json/hash.md +++ b/doc/manual/source/protocols/json/hash.md @@ -1,5 +1,31 @@ {{#include hash-v1-fixed.md}} +## Examples + +### SHA-256 with Base64 encoding + +```json +{{#include schema/hash-v1/sha256-base64.json}} +``` + +### SHA-256 with Base16 (hexadecimal) encoding + +```json +{{#include schema/hash-v1/sha256-base16.json}} +``` + +### SHA-256 with Nix32 encoding + +```json +{{#include schema/hash-v1/sha256-nix32.json}} +``` + +### BLAKE3 with Base64 encoding + +```json +{{#include schema/hash-v1/blake3-base64.json}} +``` + diff --git a/doc/manual/source/protocols/json/fixup-json-schema-generated-doc.sed b/doc/manual/source/protocols/json/fixup-json-schema-generated-doc.sed index 126e666e9..27895d42a 100644 --- a/doc/manual/source/protocols/json/fixup-json-schema-generated-doc.sed +++ b/doc/manual/source/protocols/json/fixup-json-schema-generated-doc.sed @@ -12,3 +12,6 @@ s/\\`/`/g # As we have more such relative links, more replacements of this nature # should appear below. s^\(./hash-v1.yaml\)\?#/$defs/algorithm^[JSON format for `Hash`](./hash.html#algorithm)^g +s^\(./hash-v1.yaml\)^[JSON format for `Hash`](./hash.html)^g +s^\(./content-address-v1.yaml\)\?#/$defs/method^[JSON format for `ContentAddress`](./content-address.html#method)^g +s^\(./content-address-v1.yaml\)^[JSON format for `ContentAddress`](./content-address.html)^g diff --git a/doc/manual/source/protocols/json/meson.build b/doc/manual/source/protocols/json/meson.build index 191ec6dbe..f79667961 100644 --- a/doc/manual/source/protocols/json/meson.build +++ b/doc/manual/source/protocols/json/meson.build @@ -10,6 +10,7 @@ json_schema_config = files('json-schema-for-humans-config.yaml') schemas = [ 'hash-v1', + 'content-address-v1', 'derivation-v3', 'deriving-path-v1', ] diff --git a/doc/manual/source/protocols/json/schema/content-address-v1 b/doc/manual/source/protocols/json/schema/content-address-v1 new file mode 120000 index 000000000..35a0dd865 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/content-address-v1 @@ -0,0 +1 @@ +../../../../../../src/libstore-tests/data/content-address \ No newline at end of file diff --git a/doc/manual/source/protocols/json/schema/content-address-v1.yaml b/doc/manual/source/protocols/json/schema/content-address-v1.yaml new file mode 100644 index 000000000..d0f759201 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/content-address-v1.yaml @@ -0,0 +1,55 @@ +"$schema": "http://json-schema.org/draft-04/schema" +"$id": "https://nix.dev/manual/nix/latest/protocols/json/schema/content-address-v1.json" +title: Content Address +description: | + This schema describes the JSON representation of Nix's `ContentAddress` type, which conveys information about [content-addressing store objects](@docroot@/store/store-object/content-address.md). + + > **Note** + > + > For current methods of content addressing, this data type is a bit suspicious, because it is neither simply a content address of a file system object (the `method` is richer), nor simply a content address of a store object (the `hash` doesn't account for the references). + > It should thus only be used in contexts where the references are also known / otherwise made tamper-resistant. + + + +type: object +properties: + method: + "$ref": "#/$defs/method" + hash: + title: Content Address + description: | + This would be the content-address itself. + + For all current methods, this is just a content address of the file system object of the store object, [as described in the store chapter](@docroot@/store/file-system-object/content-address.md), and not of the store object as a whole. + In particular, the references of the store object are *not* taken into account with this hash (and currently-supported methods). + "$ref": "./hash-v1.yaml" +required: +- method +- hash +additionalProperties: false +"$defs": + method: + type: string + enum: [flat, nar, text, git] + title: Content-Addressing Method + description: | + A string representing the [method](@docroot@/store/store-object/content-address.md) of content addressing that is chosen. + + Valid method strings are: + + - [`flat`](@docroot@/store/store-object/content-address.md#method-flat) (provided the contents are a single file) + - [`nar`](@docroot@/store/store-object/content-address.md#method-nix-archive) + - [`text`](@docroot@/store/store-object/content-address.md#method-text) + - [`git`](@docroot@/store/store-object/content-address.md#method-git) diff --git a/doc/manual/source/protocols/json/schema/derivation-v3.yaml b/doc/manual/source/protocols/json/schema/derivation-v3.yaml index 7c92d475d..c950b839f 100644 --- a/doc/manual/source/protocols/json/schema/derivation-v3.yaml +++ b/doc/manual/source/protocols/json/schema/derivation-v3.yaml @@ -1,5 +1,5 @@ -"$schema": http://json-schema.org/draft-04/schema# -"$id": https://nix.dev/manual/nix/latest/protocols/json/schema/derivation-v3.json +"$schema": "http://json-schema.org/draft-04/schema" +"$id": "https://nix.dev/manual/nix/latest/protocols/json/schema/derivation-v3.json" title: Derivation description: | Experimental JSON representation of a Nix derivation (version 3). @@ -154,19 +154,10 @@ properties: The output path, if known in advance. method: - type: string - title: Content addressing method - enum: [flat, nar, text, git] + "$ref": "./content-address-v1.yaml#/$defs/method" description: | For an output which will be [content addressed](@docroot@/store/derivation/outputs/content-address.md), a string representing the [method](@docroot@/store/store-object/content-address.md) of content addressing that is chosen. - - Valid method strings are: - - - [`flat`](@docroot@/store/store-object/content-address.md#method-flat) - - [`nar`](@docroot@/store/store-object/content-address.md#method-nix-archive) - - [`text`](@docroot@/store/store-object/content-address.md#method-text) - - [`git`](@docroot@/store/store-object/content-address.md#method-git) - + See the linked original definition for further details. hashAlgo: title: Hash algorithm "$ref": "./hash-v1.yaml#/$defs/algorithm" diff --git a/doc/manual/source/protocols/json/schema/deriving-path-v1.yaml b/doc/manual/source/protocols/json/schema/deriving-path-v1.yaml index 9c0350d3d..7fd74941e 100644 --- a/doc/manual/source/protocols/json/schema/deriving-path-v1.yaml +++ b/doc/manual/source/protocols/json/schema/deriving-path-v1.yaml @@ -1,5 +1,5 @@ -"$schema": http://json-schema.org/draft-04/schema# -"$id": https://nix.dev/manual/nix/latest/protocols/json/schema/deriving-path-v1.json +"$schema": "http://json-schema.org/draft-04/schema" +"$id": "https://nix.dev/manual/nix/latest/protocols/json/schema/deriving-path-v1.json" title: Deriving Path description: | This schema describes the JSON representation of Nix's [Deriving Path](@docroot@/store/derivation/index.md#deriving-path). diff --git a/doc/manual/source/protocols/json/schema/hash-v1.yaml b/doc/manual/source/protocols/json/schema/hash-v1.yaml index 844959bcd..316fb6d73 100644 --- a/doc/manual/source/protocols/json/schema/hash-v1.yaml +++ b/doc/manual/source/protocols/json/schema/hash-v1.yaml @@ -1,5 +1,5 @@ -"$schema": http://json-schema.org/draft-04/schema# -"$id": https://nix.dev/manual/nix/latest/protocols/json/schema/hash-v1.json +"$schema": "http://json-schema.org/draft-04/schema" +"$id": "https://nix.dev/manual/nix/latest/protocols/json/schema/hash-v1.json" title: Hash description: | A cryptographic hash value used throughout Nix for content addressing and integrity verification. diff --git a/src/json-schema-checks/content-address b/src/json-schema-checks/content-address new file mode 120000 index 000000000..194a265a1 --- /dev/null +++ b/src/json-schema-checks/content-address @@ -0,0 +1 @@ +../../src/libstore-tests/data/content-address \ No newline at end of file diff --git a/src/json-schema-checks/meson.build b/src/json-schema-checks/meson.build index 09da8770b..745fb5ffa 100644 --- a/src/json-schema-checks/meson.build +++ b/src/json-schema-checks/meson.build @@ -30,6 +30,14 @@ schemas = [ 'blake3-base64.json', ], }, + { + 'stem' : 'content-address', + 'schema' : schema_dir / 'content-address-v1.yaml', + 'files' : [ + 'text.json', + 'nar.json', + ], + }, { 'stem' : 'derivation', 'schema' : schema_dir / 'derivation-v3.yaml', @@ -73,8 +81,6 @@ foreach schema : schemas stem + '-schema-valid', jv, args : [ - '--map', - './hash-v1.yaml=' + schema_dir / 'hash-v1.yaml', 'http://json-schema.org/draft-04/schema', schema_file, ], diff --git a/src/json-schema-checks/package.nix b/src/json-schema-checks/package.nix index cf4e4cb19..6a76c8b28 100644 --- a/src/json-schema-checks/package.nix +++ b/src/json-schema-checks/package.nix @@ -21,6 +21,7 @@ mkMesonDerivation (finalAttrs: { ../../.version ../../doc/manual/source/protocols/json/schema ../../src/libutil-tests/data/hash + ../../src/libstore-tests/data/content-address ../../src/libstore-tests/data/derivation ../../src/libstore-tests/data/derived-path ./. diff --git a/src/libstore-tests/content-address.cc b/src/libstore-tests/content-address.cc index 51d591c38..0474fb2e0 100644 --- a/src/libstore-tests/content-address.cc +++ b/src/libstore-tests/content-address.cc @@ -1,6 +1,7 @@ #include #include "nix/store/content-address.hh" +#include "nix/util/tests/json-characterization.hh" namespace nix { @@ -8,33 +9,93 @@ namespace nix { * ContentAddressMethod::parse, ContentAddressMethod::render * --------------------------------------------------------------------------*/ -TEST(ContentAddressMethod, testRoundTripPrintParse_1) +static auto methods = ::testing::Values( + std::pair{ContentAddressMethod::Raw::Text, "text"}, + std::pair{ContentAddressMethod::Raw::Flat, "flat"}, + std::pair{ContentAddressMethod::Raw::NixArchive, "nar"}, + std::pair{ContentAddressMethod::Raw::Git, "git"}); + +struct ContentAddressMethodTest : ::testing::Test, + ::testing::WithParamInterface> +{}; + +TEST_P(ContentAddressMethodTest, testRoundTripPrintParse_1) { - for (ContentAddressMethod cam : { - ContentAddressMethod::Raw::Text, - ContentAddressMethod::Raw::Flat, - ContentAddressMethod::Raw::NixArchive, - ContentAddressMethod::Raw::Git, - }) { - EXPECT_EQ(ContentAddressMethod::parse(cam.render()), cam); - } + auto & [cam, _] = GetParam(); + EXPECT_EQ(ContentAddressMethod::parse(cam.render()), cam); } -TEST(ContentAddressMethod, testRoundTripPrintParse_2) +TEST_P(ContentAddressMethodTest, testRoundTripPrintParse_2) { - for (const std::string_view camS : { - "text", - "flat", - "nar", - "git", - }) { - EXPECT_EQ(ContentAddressMethod::parse(camS).render(), camS); - } + auto & [cam, camS] = GetParam(); + EXPECT_EQ(ContentAddressMethod::parse(camS).render(), camS); } +INSTANTIATE_TEST_SUITE_P(ContentAddressMethod, ContentAddressMethodTest, methods); + TEST(ContentAddressMethod, testParseContentAddressMethodOptException) { EXPECT_THROW(ContentAddressMethod::parse("narwhal"), UsageError); } +/* ---------------------------------------------------------------------------- + * JSON + * --------------------------------------------------------------------------*/ + +class ContentAddressTest : public virtual CharacterizationTest +{ + std::filesystem::path unitTestData = getUnitTestData() / "content-address"; + +public: + + /** + * We set these in tests rather than the regular globals so we don't have + * to worry about race conditions if the tests run concurrently. + */ + ExperimentalFeatureSettings mockXpSettings; + + std::filesystem::path goldenMaster(std::string_view testStem) const override + { + return unitTestData / testStem; + } +}; + +using nlohmann::json; + +struct ContentAddressJsonTest : ContentAddressTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; + +TEST_P(ContentAddressJsonTest, from_json) +{ + auto & [name, expected] = GetParam(); + readJsonTest(name, expected); +} + +TEST_P(ContentAddressJsonTest, to_json) +{ + auto & [name, value] = GetParam(); + writeJsonTest(name, value); +} + +INSTANTIATE_TEST_SUITE_P( + ContentAddressJSON, + ContentAddressJsonTest, + ::testing::Values( + std::pair{ + "text", + ContentAddress{ + .method = ContentAddressMethod::Raw::Text, + .hash = hashString(HashAlgorithm::SHA256, "asdf"), + }, + }, + std::pair{ + "nar", + ContentAddress{ + .method = ContentAddressMethod::Raw::NixArchive, + .hash = hashString(HashAlgorithm::SHA256, "qwer"), + }, + })); + } // namespace nix diff --git a/src/libstore-tests/data/content-address/nar.json b/src/libstore-tests/data/content-address/nar.json new file mode 100644 index 000000000..21e065cd3 --- /dev/null +++ b/src/libstore-tests/data/content-address/nar.json @@ -0,0 +1,8 @@ +{ + "hash": { + "algorithm": "sha256", + "format": "base64", + "hash": "9vLqj0XYoFfJVmoz+ZR02i5camYE1zYSFlDicwxvsKM=" + }, + "method": "nar" +} diff --git a/src/libstore-tests/data/content-address/text.json b/src/libstore-tests/data/content-address/text.json new file mode 100644 index 000000000..04bc8ac20 --- /dev/null +++ b/src/libstore-tests/data/content-address/text.json @@ -0,0 +1,8 @@ +{ + "hash": { + "algorithm": "sha256", + "format": "base64", + "hash": "8OTC92xYkW7CWPJGhRvqCR0U1CR6L8PhhpRGGxgW4Ts=" + }, + "method": "text" +} diff --git a/src/libstore/content-address.cc b/src/libstore/content-address.cc index 9a57e3aa6..497c2c5b4 100644 --- a/src/libstore/content-address.cc +++ b/src/libstore/content-address.cc @@ -1,6 +1,7 @@ #include "nix/util/args.hh" #include "nix/store/content-address.hh" #include "nix/util/split.hh" +#include "nix/util/json-utils.hh" namespace nix { @@ -300,3 +301,36 @@ Hash ContentAddressWithReferences::getHash() const } } // namespace nix + +namespace nlohmann { + +using namespace nix; + +ContentAddressMethod adl_serializer::from_json(const json & json) +{ + return ContentAddressMethod::parse(getString(json)); +} + +void adl_serializer::to_json(json & json, const ContentAddressMethod & m) +{ + json = m.render(); +} + +ContentAddress adl_serializer::from_json(const json & json) +{ + auto obj = getObject(json); + return { + .method = adl_serializer::from_json(valueAt(obj, "method")), + .hash = valueAt(obj, "hash"), + }; +} + +void adl_serializer::to_json(json & json, const ContentAddress & ca) +{ + json = { + {"method", ca.method}, + {"hash", ca.hash}, + }; +} + +} // namespace nlohmann diff --git a/src/libstore/include/nix/store/content-address.hh b/src/libstore/include/nix/store/content-address.hh index 0a3dc79bd..41ccc69ae 100644 --- a/src/libstore/include/nix/store/content-address.hh +++ b/src/libstore/include/nix/store/content-address.hh @@ -6,6 +6,7 @@ #include "nix/store/path.hh" #include "nix/util/file-content-address.hh" #include "nix/util/variant-wrapper.hh" +#include "nix/util/json-impls.hh" namespace nix { @@ -308,4 +309,15 @@ struct ContentAddressWithReferences Hash getHash() const; }; +template<> +struct json_avoids_null : std::true_type +{}; + +template<> +struct json_avoids_null : std::true_type +{}; + } // namespace nix + +JSON_IMPL(nix::ContentAddressMethod) +JSON_IMPL(nix::ContentAddress) From 3915b3a111ffe42d1ac9c8162b5506fa7678464f Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 22 Oct 2025 08:10:20 +0000 Subject: [PATCH 321/373] feat(libstore/s3-binary-cache-store): implement `abortMultipartUpload()` Implement `abortMultipartUpload()` for cleaning up incomplete multipart uploads on error: - Constructs URL with `?uploadId=ID` query parameter - Issues `DELETE` request to abort the multipart upload --- src/libstore/s3-binary-cache-store.cc | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 5d97fb0fd..98f742c70 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -26,6 +26,14 @@ public: private: ref s3Config; + + /** + * Abort a multipart upload + * + * @see + * https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html#API_AbortMultipartUpload_RequestSyntax + */ + void abortMultipartUpload(std::string_view key, std::string_view uploadId); }; void S3BinaryCacheStore::upsertFile( @@ -37,6 +45,19 @@ void S3BinaryCacheStore::upsertFile( HttpBinaryCacheStore::upsertFile(path, istream, mimeType, sizeHint); } +void S3BinaryCacheStore::abortMultipartUpload(std::string_view key, std::string_view uploadId) +{ + auto req = makeRequest(key); + req.setupForS3(); + + auto url = req.uri.parsed(); + url.query["uploadId"] = uploadId; + req.uri = VerbatimURL(url); + req.method = HttpMethod::DELETE; + + getFileTransfer()->enqueueFileTransfer(req).get(); +} + StringSet S3BinaryCacheStoreConfig::uriSchemes() { return {"s3"}; From 5e220271e2dbafb5205684354057aeaa4a58a5c6 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 25 Oct 2025 22:38:43 +0000 Subject: [PATCH 322/373] feat(libstore): add scanForReferencesDeep for per-file reference tracking Introduces `scanForReferencesDeep` to provide per-file granularity when scanning for store path references, enabling better diagnostics for cycle detection and `nix why-depends --precise`. --- src/libstore-tests/references.cc | 143 ++++++++++++++++++ .../include/nix/store/path-references.hh | 57 +++++++ src/libstore/path-references.cc | 90 +++++++++++ 3 files changed, 290 insertions(+) diff --git a/src/libstore-tests/references.cc b/src/libstore-tests/references.cc index 27ecad08f..9cecd573e 100644 --- a/src/libstore-tests/references.cc +++ b/src/libstore-tests/references.cc @@ -1,4 +1,6 @@ #include "nix/store/references.hh" +#include "nix/store/path-references.hh" +#include "nix/util/memory-source-accessor.hh" #include @@ -79,4 +81,145 @@ TEST(references, scan) } } +TEST(references, scanForReferencesDeep) +{ + using File = MemorySourceAccessor::File; + + // Create store paths to search for + StorePath path1{"dc04vv14dak1c1r48qa0m23vr9jy8sm0-foo"}; + StorePath path2{"zc842j0rz61mjsp3h3wp5ly71ak6qgdn-bar"}; + StorePath path3{"a5cn2i4b83gnsm60d38l3kgb8qfplm11-baz"}; + + StorePathSet refs{path1, path2, path3}; + + std::string_view hash1 = path1.hashPart(); + std::string_view hash2 = path2.hashPart(); + std::string_view hash3 = path3.hashPart(); + + // Create an in-memory file system with various reference patterns + auto accessor = make_ref(); + accessor->root = File::Directory{ + .contents{ + { + // file1.txt: contains hash1 + "file1.txt", + File::Regular{ + .contents = "This file references " + hash1 + " in its content", + }, + }, + { + // file2.txt: contains hash2 and hash3 + "file2.txt", + File::Regular{ + .contents = "Multiple refs: " + hash2 + " and also " + hash3, + }, + }, + { + // file3.txt: contains no references + "file3.txt", + File::Regular{ + .contents = "This file has no store path references at all", + }, + }, + { + // subdir: a subdirectory + "subdir", + File::Directory{ + .contents{ + { + // subdir/file4.txt: contains hash1 again + "file4.txt", + File::Regular{ + .contents = "Subdirectory file with " + hash1, + }, + }, + }, + }, + }, + { + // link1: a symlink that contains a reference in its target + "link1", + File::Symlink{ + .target = hash2 + "-target", + }, + }, + }, + }; + + // Test the callback-based API + { + std::map foundRefs; + + scanForReferencesDeep(*accessor, CanonPath::root, refs, [&](FileRefScanResult result) { + foundRefs[std::move(result.filePath)] = std::move(result.foundRefs); + }); + + // Verify we found the expected references + EXPECT_EQ(foundRefs.size(), 4); // file1, file2, file4, link1 + + // Check file1.txt found path1 + { + CanonPath f1Path("/file1.txt"); + auto it = foundRefs.find(f1Path); + ASSERT_TRUE(it != foundRefs.end()); + EXPECT_EQ(it->second.size(), 1); + EXPECT_TRUE(it->second.count(path1)); + } + + // Check file2.txt found path2 and path3 + { + CanonPath f2Path("/file2.txt"); + auto it = foundRefs.find(f2Path); + ASSERT_TRUE(it != foundRefs.end()); + EXPECT_EQ(it->second.size(), 2); + EXPECT_TRUE(it->second.count(path2)); + EXPECT_TRUE(it->second.count(path3)); + } + + // Check file3.txt is not in results (no refs) + { + CanonPath f3Path("/file3.txt"); + EXPECT_FALSE(foundRefs.count(f3Path)); + } + + // Check subdir/file4.txt found path1 + { + CanonPath f4Path("/subdir/file4.txt"); + auto it = foundRefs.find(f4Path); + ASSERT_TRUE(it != foundRefs.end()); + EXPECT_EQ(it->second.size(), 1); + EXPECT_TRUE(it->second.count(path1)); + } + + // Check symlink found path2 + { + CanonPath linkPath("/link1"); + auto it = foundRefs.find(linkPath); + ASSERT_TRUE(it != foundRefs.end()); + EXPECT_EQ(it->second.size(), 1); + EXPECT_TRUE(it->second.count(path2)); + } + } + + // Test the map-based convenience API + { + auto results = scanForReferencesDeep(*accessor, CanonPath::root, refs); + + EXPECT_EQ(results.size(), 4); // file1, file2, file4, link1 + + // Verify all expected files are in the results + EXPECT_TRUE(results.count(CanonPath("/file1.txt"))); + EXPECT_TRUE(results.count(CanonPath("/file2.txt"))); + EXPECT_TRUE(results.count(CanonPath("/subdir/file4.txt"))); + EXPECT_TRUE(results.count(CanonPath("/link1"))); + EXPECT_FALSE(results.count(CanonPath("/file3.txt"))); + + // Verify the references found in each file are correct + EXPECT_EQ(results.at(CanonPath("/file1.txt")), StorePathSet{path1}); + EXPECT_EQ(results.at(CanonPath("/file2.txt")), StorePathSet({path2, path3})); + EXPECT_EQ(results.at(CanonPath("/subdir/file4.txt")), StorePathSet{path1}); + EXPECT_EQ(results.at(CanonPath("/link1")), StorePathSet{path2}); + } +} + } // namespace nix diff --git a/src/libstore/include/nix/store/path-references.hh b/src/libstore/include/nix/store/path-references.hh index 66d0da268..6aa506da4 100644 --- a/src/libstore/include/nix/store/path-references.hh +++ b/src/libstore/include/nix/store/path-references.hh @@ -3,6 +3,10 @@ #include "nix/store/references.hh" #include "nix/store/path.hh" +#include "nix/util/source-accessor.hh" + +#include +#include namespace nix { @@ -21,4 +25,57 @@ public: StorePathSet getResultPaths(); }; +/** + * Result of scanning a single file for references. + */ +struct FileRefScanResult +{ + CanonPath filePath; ///< The file that was scanned + StorePathSet foundRefs; ///< Which store paths were found in this file +}; + +/** + * Scan a store path tree and report which references appear in which files. + * + * This is like scanForReferences() but provides per-file granularity. + * Useful for cycle detection and detailed dependency analysis like `nix why-depends --precise`. + * + * The function walks the tree using the provided accessor and streams each file's + * contents through a RefScanSink to detect hash references. For each file that + * contains at least one reference, a callback is invoked with the file path and + * the set of references found. + * + * Note: This function only searches for the hash part of store paths (e.g., + * "dc04vv14dak1c1r48qa0m23vr9jy8sm0"), not the name part. A store path like + * "/nix/store/dc04vv14dak1c1r48qa0m23vr9jy8sm0-foo" will be detected if the + * hash appears anywhere in the scanned content, regardless of the "-foo" suffix. + * + * @param accessor Source accessor to read the tree + * @param rootPath Root path to scan + * @param refs Set of store paths to search for + * @param callback Called for each file that contains at least one reference + */ +void scanForReferencesDeep( + SourceAccessor & accessor, + const CanonPath & rootPath, + const StorePathSet & refs, + std::function callback); + +/** + * Scan a store path tree and return which references appear in which files. + * + * This is a convenience wrapper around the callback-based scanForReferencesDeep() + * that collects all results into a map for efficient lookups. + * + * Note: This function only searches for the hash part of store paths, not the name part. + * See the callback-based overload for details. + * + * @param accessor Source accessor to read the tree + * @param rootPath Root path to scan + * @param refs Set of store paths to search for + * @return Map from file paths to the set of references found in each file + */ +std::map +scanForReferencesDeep(SourceAccessor & accessor, const CanonPath & rootPath, const StorePathSet & refs); + } // namespace nix diff --git a/src/libstore/path-references.cc b/src/libstore/path-references.cc index 8b167e902..3d783bbe4 100644 --- a/src/libstore/path-references.cc +++ b/src/libstore/path-references.cc @@ -1,11 +1,15 @@ #include "nix/store/path-references.hh" #include "nix/util/hash.hh" #include "nix/util/archive.hh" +#include "nix/util/source-accessor.hh" +#include "nix/util/canon-path.hh" +#include "nix/util/logging.hh" #include #include #include #include +#include namespace nix { @@ -54,4 +58,90 @@ StorePathSet scanForReferences(Sink & toTee, const Path & path, const StorePathS return refsSink.getResultPaths(); } +void scanForReferencesDeep( + SourceAccessor & accessor, + const CanonPath & rootPath, + const StorePathSet & refs, + std::function callback) +{ + // Recursive tree walker + auto walk = [&](this auto & self, const CanonPath & path) -> void { + auto stat = accessor.lstat(path); + + switch (stat.type) { + case SourceAccessor::tRegular: { + // Create a fresh sink for each file to independently detect references. + // RefScanSink accumulates found hashes globally - once a hash is found, + // it remains in the result set. If we reused the same sink across files, + // we couldn't distinguish which files contain which references, as a hash + // found in an earlier file wouldn't be reported when found in later files. + PathRefScanSink sink = PathRefScanSink::fromPaths(refs); + + // Scan this file by streaming its contents through the sink + accessor.readFile(path, sink); + + // Get the references found in this file + auto foundRefs = sink.getResultPaths(); + + // Report if we found anything in this file + if (!foundRefs.empty()) { + debug("scanForReferencesDeep: found %d references in %s", foundRefs.size(), path.abs()); + callback(FileRefScanResult{.filePath = path, .foundRefs = std::move(foundRefs)}); + } + break; + } + + case SourceAccessor::tDirectory: { + // Recursively scan directory contents + auto entries = accessor.readDirectory(path); + for (const auto & [name, entryType] : entries) { + self(path / name); + } + break; + } + + case SourceAccessor::tSymlink: { + // Create a fresh sink for the symlink target (same reason as regular files) + PathRefScanSink sink = PathRefScanSink::fromPaths(refs); + + // Scan symlink target for references + auto target = accessor.readLink(path); + sink(std::string_view(target)); + + // Get the references found in this symlink target + auto foundRefs = sink.getResultPaths(); + + if (!foundRefs.empty()) { + debug("scanForReferencesDeep: found %d references in symlink %s", foundRefs.size(), path.abs()); + callback(FileRefScanResult{.filePath = path, .foundRefs = std::move(foundRefs)}); + } + break; + } + + case SourceAccessor::tChar: + case SourceAccessor::tBlock: + case SourceAccessor::tSocket: + case SourceAccessor::tFifo: + case SourceAccessor::tUnknown: + default: + throw Error("file '%s' has an unsupported type", path.abs()); + } + }; + + // Start the recursive walk from the root + walk(rootPath); +} + +std::map +scanForReferencesDeep(SourceAccessor & accessor, const CanonPath & rootPath, const StorePathSet & refs) +{ + std::map results; + + scanForReferencesDeep(accessor, rootPath, refs, [&](FileRefScanResult result) { + results[std::move(result.filePath)] = std::move(result.foundRefs); + }); + + return results; +} + } // namespace nix From 6129aee988132742837d36fd4cf995bfe85b3198 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sat, 25 Oct 2025 22:55:14 +0000 Subject: [PATCH 323/373] refactor(nix/why-depends): use scanForReferencesDeep for --precise mode Replaces manual tree-walking and reference scanning with the new scanForReferencesDeep function. --- src/nix/why-depends.cc | 79 +++++++++++++++++++----------------------- 1 file changed, 36 insertions(+), 43 deletions(-) diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc index dc30fabd7..29da9e953 100644 --- a/src/nix/why-depends.cc +++ b/src/nix/why-depends.cc @@ -1,5 +1,6 @@ #include "nix/cmd/command.hh" #include "nix/store/store-api.hh" +#include "nix/store/path-references.hh" #include "nix/util/source-accessor.hh" #include "nix/main/shared.hh" @@ -191,7 +192,7 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions /* Sort the references by distance to `dependency` to ensure that the shortest path is printed first. */ std::multimap refs; - StringSet hashes; + StorePathSet refPaths; for (auto & ref : node.refs) { if (ref == node.path && packagePath != dependencyPath) @@ -200,7 +201,7 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions if (node2.dist == inf) continue; refs.emplace(node2.dist, &node2); - hashes.insert(std::string(node2.path.hashPart())); + refPaths.insert(node2.path); } /* For each reference, find the files and symlinks that @@ -209,58 +210,50 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions auto accessor = store->requireStoreObjectAccessor(node.path); - auto visitPath = [&](this auto && recur, const CanonPath & p) -> void { - auto st = accessor->maybeLstat(p); - assert(st); + auto getColour = [&](const std::string & hash) { + return hash == dependencyPathHash ? ANSI_GREEN : ANSI_BLUE; + }; - auto p2 = p.isRoot() ? p.abs() : p.rel(); + if (precise) { + // Use scanForReferencesDeep to find files containing references + scanForReferencesDeep(*accessor, CanonPath::root, refPaths, [&](FileRefScanResult result) { + auto p2 = result.filePath.isRoot() ? result.filePath.abs() : result.filePath.rel(); + auto st = accessor->lstat(result.filePath); - auto getColour = [&](const std::string & hash) { - return hash == dependencyPathHash ? ANSI_GREEN : ANSI_BLUE; - }; + if (st.type == SourceAccessor::Type::tRegular) { + auto contents = accessor->readFile(result.filePath); - if (st->type == SourceAccessor::Type::tDirectory) { - auto names = accessor->readDirectory(p); - for (auto & [name, type] : names) - recur(p / name); - } - - else if (st->type == SourceAccessor::Type::tRegular) { - auto contents = accessor->readFile(p); - - for (auto & hash : hashes) { - auto pos = contents.find(hash); - if (pos != std::string::npos) { - size_t margin = 32; - auto pos2 = pos >= margin ? pos - margin : 0; - hits[hash].emplace_back( - fmt("%s: …%s…", + // For each reference found in this file, extract context + for (auto & foundRef : result.foundRefs) { + std::string hash(foundRef.hashPart()); + auto pos = contents.find(hash); + if (pos != std::string::npos) { + size_t margin = 32; + auto pos2 = pos >= margin ? pos - margin : 0; + hits[hash].emplace_back(fmt( + "%s: …%s…", p2, hilite( filterPrintable(std::string(contents, pos2, pos - pos2 + hash.size() + margin)), pos - pos2, StorePath::HashLen, getColour(hash)))); + } + } + } else if (st.type == SourceAccessor::Type::tSymlink) { + auto target = accessor->readLink(result.filePath); + + // For each reference found in this symlink, show it + for (auto & foundRef : result.foundRefs) { + std::string hash(foundRef.hashPart()); + auto pos = target.find(hash); + if (pos != std::string::npos) + hits[hash].emplace_back( + fmt("%s -> %s", p2, hilite(target, pos, StorePath::HashLen, getColour(hash)))); } } - } - - else if (st->type == SourceAccessor::Type::tSymlink) { - auto target = accessor->readLink(p); - - for (auto & hash : hashes) { - auto pos = target.find(hash); - if (pos != std::string::npos) - hits[hash].emplace_back( - fmt("%s -> %s", p2, hilite(target, pos, StorePath::HashLen, getColour(hash)))); - } - } - }; - - // FIXME: should use scanForReferences(). - - if (precise) - visitPath(CanonPath::root); + }); + } for (auto & ref : refs) { std::string hash(ref.second->path.hashPart()); From ad664ce64e90234e6a0349b7b14f00bc9c82bf8e Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Mon, 27 Oct 2025 20:56:54 +0000 Subject: [PATCH 324/373] ci: cancel previous workflow runs on PR updates Add concurrency group configuration to the CI workflow to automatically cancel outdated runs when a PR receives new commits or is force-pushed. This prevents wasting CI resources on superseded code. --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8a0820903..67e97b188 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,6 +14,10 @@ on: default: true type: boolean +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + permissions: read-all jobs: From dd716dc9be9d54df959b951d97c51c9eafa37d4d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 27 Oct 2025 15:48:07 -0400 Subject: [PATCH 325/373] Create default `Store::narFromPath` implementation in terms of `getFSAccessor` This is a good default (the methods that allow for an arbitrary choice of source accessor are generally preferable both to implement and to use). And it also pays its way by allowing us to delete *both* the `DummyStore` and `LocalStore` implementations. --- src/libstore/dummy-store.cc | 12 ------------ src/libstore/include/nix/store/local-fs-store.hh | 1 - src/libstore/include/nix/store/store-api.hh | 2 +- src/libstore/include/nix/store/uds-remote-store.hh | 2 +- src/libstore/local-fs-store.cc | 7 ------- src/libstore/restricted-store.cc | 2 +- src/libstore/ssh-store.cc | 2 +- src/libstore/store-api.cc | 7 +++++++ 8 files changed, 11 insertions(+), 24 deletions(-) diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 6c8cb3480..1333e0aed 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -258,18 +258,6 @@ struct DummyStoreImpl : DummyStore }); } - void narFromPath(const StorePath & path, Sink & sink) override - { - bool visited = contents.cvisit(path, [&](const auto & kv) { - const auto & [info, accessor] = kv.second; - SourcePath sourcePath(accessor); - dumpPath(sourcePath, sink, FileSerialisationMethod::NixArchive); - }); - - if (!visited) - throw Error("path '%s' is not valid", printStorePath(path)); - } - void queryRealisationUncached( const DrvOutput & drvOutput, Callback> callback) noexcept override { diff --git a/src/libstore/include/nix/store/local-fs-store.hh b/src/libstore/include/nix/store/local-fs-store.hh index 08f8e1656..100a4110d 100644 --- a/src/libstore/include/nix/store/local-fs-store.hh +++ b/src/libstore/include/nix/store/local-fs-store.hh @@ -78,7 +78,6 @@ struct LocalFSStore : virtual Store, virtual GcStore, virtual LogStore LocalFSStore(const Config & params); - void narFromPath(const StorePath & path, Sink & sink) override; ref getFSAccessor(bool requireValidPath = true) override; std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath = true) override; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index d03e8e010..8fa13de34 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -609,7 +609,7 @@ public: /** * Write a NAR dump of a store path. */ - virtual void narFromPath(const StorePath & path, Sink & sink) = 0; + virtual void narFromPath(const StorePath & path, Sink & sink); /** * For each path, if it's a derivation, build it. Building a diff --git a/src/libstore/include/nix/store/uds-remote-store.hh b/src/libstore/include/nix/store/uds-remote-store.hh index fe6e486f4..764e8768a 100644 --- a/src/libstore/include/nix/store/uds-remote-store.hh +++ b/src/libstore/include/nix/store/uds-remote-store.hh @@ -68,7 +68,7 @@ struct UDSRemoteStore : virtual IndirectRootStore, virtual RemoteStore void narFromPath(const StorePath & path, Sink & sink) override { - LocalFSStore::narFromPath(path, sink); + Store::narFromPath(path, sink); } /** diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc index 28069dcaf..1a38cac3b 100644 --- a/src/libstore/local-fs-store.cc +++ b/src/libstore/local-fs-store.cc @@ -112,13 +112,6 @@ std::shared_ptr LocalFSStore::getFSAccessor(const StorePath & pa return std::make_shared(std::move(absPath)); } -void LocalFSStore::narFromPath(const StorePath & path, Sink & sink) -{ - if (!isValidPath(path)) - throw Error("path '%s' is not valid", printStorePath(path)); - dumpPath(getRealStoreDir() + std::string(printStorePath(path), storeDir.size()), sink); -} - const std::string LocalFSStore::drvsLogDir = "drvs"; std::optional LocalFSStore::getBuildLogExact(const StorePath & path) diff --git a/src/libstore/restricted-store.cc b/src/libstore/restricted-store.cc index 5270f7d10..ef8aaa380 100644 --- a/src/libstore/restricted-store.cc +++ b/src/libstore/restricted-store.cc @@ -226,7 +226,7 @@ void RestrictedStore::narFromPath(const StorePath & path, Sink & sink) { if (!goal.isAllowed(path)) throw InvalidPath("cannot dump unknown path '%s' in recursive Nix", printStorePath(path)); - LocalFSStore::narFromPath(path, sink); + Store::narFromPath(path, sink); } void RestrictedStore::ensurePath(const StorePath & path) diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index a7e28017f..ce973e734 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -143,7 +143,7 @@ struct MountedSSHStore : virtual SSHStore, virtual LocalFSStore void narFromPath(const StorePath & path, Sink & sink) override { - return LocalFSStore::narFromPath(path, sink); + return Store::narFromPath(path, sink); } ref getFSAccessor(bool requireValidPath) override diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index cdca6a763..08b75c8fa 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -300,6 +300,13 @@ ValidPathInfo Store::addToStoreSlow( return info; } +void Store::narFromPath(const StorePath & path, Sink & sink) +{ + auto accessor = requireStoreObjectAccessor(path); + SourcePath sourcePath{accessor}; + dumpPath(sourcePath, sink, FileSerialisationMethod::NixArchive); +} + StringSet Store::Config::getDefaultSystemFeatures() { auto res = settings.systemFeatures.get(); From 234f029940ce9bfa86f6f49604a47561400d9e27 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 27 Oct 2025 15:39:58 -0400 Subject: [PATCH 326/373] Add consuming `ref` <-> `std::share_ptr` methods/ctrs This can help churning ref counts when we don't need to. --- src/libutil/include/nix/util/ref.hh | 32 ++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/src/libutil/include/nix/util/ref.hh b/src/libutil/include/nix/util/ref.hh index 7cf5ef25e..7ba5349a6 100644 --- a/src/libutil/include/nix/util/ref.hh +++ b/src/libutil/include/nix/util/ref.hh @@ -17,6 +17,12 @@ private: std::shared_ptr p; + void assertNonNull() + { + if (!p) + throw std::invalid_argument("null pointer cast to ref"); + } + public: using element_type = T; @@ -24,15 +30,19 @@ public: explicit ref(const std::shared_ptr & p) : p(p) { - if (!p) - throw std::invalid_argument("null pointer cast to ref"); + assertNonNull(); + } + + explicit ref(std::shared_ptr && p) + : p(std::move(p)) + { + assertNonNull(); } explicit ref(T * p) : p(p) { - if (!p) - throw std::invalid_argument("null pointer cast to ref"); + assertNonNull(); } T * operator->() const @@ -45,14 +55,22 @@ public: return *p; } - operator std::shared_ptr() const + std::shared_ptr get_ptr() const & { return p; } - std::shared_ptr get_ptr() const + std::shared_ptr get_ptr() && { - return p; + return std::move(p); + } + + /** + * Convenience to avoid explicit `get_ptr()` call in some cases. + */ + operator std::shared_ptr(this auto && self) + { + return std::forward(self).get_ptr(); } template From 4b6d07d64299e539ba4f421a6589abc4e630c36f Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 24 Oct 2025 23:53:39 +0000 Subject: [PATCH 327/373] feat(libstore/s3-binary-cache-store): implement `createMultipartUpload()` POST to key with `?uploads` query parameter, optionally set `Content-Encoding` header, parse `uploadId` from XML response using regex --- src/libstore/s3-binary-cache-store.cc | 43 +++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 98f742c70..58cb72776 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -4,6 +4,7 @@ #include #include +#include namespace nix { @@ -27,6 +28,15 @@ public: private: ref s3Config; + /** + * Creates a multipart upload for large objects to S3. + * + * @see + * https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html#API_CreateMultipartUpload_RequestSyntax + */ + std::string createMultipartUpload( + std::string_view key, std::string_view mimeType, std::optional contentEncoding); + /** * Abort a multipart upload * @@ -45,6 +55,39 @@ void S3BinaryCacheStore::upsertFile( HttpBinaryCacheStore::upsertFile(path, istream, mimeType, sizeHint); } +std::string S3BinaryCacheStore::createMultipartUpload( + std::string_view key, std::string_view mimeType, std::optional contentEncoding) +{ + auto req = makeRequest(key); + + // setupForS3() converts s3:// to https:// but strips query parameters + // So we call it first, then add our multipart parameters + req.setupForS3(); + + auto url = req.uri.parsed(); + url.query["uploads"] = ""; + req.uri = VerbatimURL(url); + + req.method = HttpMethod::POST; + req.data = ""; + req.mimeType = mimeType; + + if (contentEncoding) { + req.headers.emplace_back("Content-Encoding", *contentEncoding); + } + + auto result = getFileTransfer()->enqueueFileTransfer(req).get(); + + std::regex uploadIdRegex("([^<]+)"); + std::smatch match; + + if (std::regex_search(result.data, match, uploadIdRegex)) { + return match[1]; + } + + throw Error("S3 CreateMultipartUpload response missing "); +} + void S3BinaryCacheStore::abortMultipartUpload(std::string_view key, std::string_view uploadId) { auto req = makeRequest(key); From c592090fffde2fc107dec0bfd398ae7a9c0b4f35 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 22 Oct 2025 08:02:25 +0000 Subject: [PATCH 328/373] feat(libstore/s3-binary-cache-store): implement `uploadPart()` Implement `uploadPart()` for uploading individual parts in S3 multipart uploads: - Constructs URL with `?partNumber=N&uploadId=ID` query parameters - Uploads chunk data with `application/octet-stream` mime type - Extracts and returns `ETag` from response --- src/libstore/s3-binary-cache-store.cc | 31 +++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 58cb72776..828e75b7c 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -37,6 +37,15 @@ private: std::string createMultipartUpload( std::string_view key, std::string_view mimeType, std::optional contentEncoding); + /** + * Uploads a single part of a multipart upload + * + * @see https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html#API_UploadPart_RequestSyntax + * + * @returns the [ETag](https://en.wikipedia.org/wiki/HTTP_ETag) + */ + std::string uploadPart(std::string_view key, std::string_view uploadId, uint64_t partNumber, std::string data); + /** * Abort a multipart upload * @@ -88,6 +97,28 @@ std::string S3BinaryCacheStore::createMultipartUpload( throw Error("S3 CreateMultipartUpload response missing "); } +std::string +S3BinaryCacheStore::uploadPart(std::string_view key, std::string_view uploadId, uint64_t partNumber, std::string data) +{ + auto req = makeRequest(key); + req.setupForS3(); + + auto url = req.uri.parsed(); + url.query["partNumber"] = std::to_string(partNumber); + url.query["uploadId"] = uploadId; + req.uri = VerbatimURL(url); + req.data = std::move(data); + req.mimeType = "application/octet-stream"; + + auto result = getFileTransfer()->enqueueFileTransfer(req).get(); + + if (result.etag.empty()) { + throw Error("S3 UploadPart response missing ETag for part %d", partNumber); + } + + return std::move(result.etag); +} + void S3BinaryCacheStore::abortMultipartUpload(std::string_view key, std::string_view uploadId) { auto req = makeRequest(key); From c77317b1a9086b9aa8ff1b22da051e520febe871 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 24 Oct 2025 23:54:49 +0000 Subject: [PATCH 329/373] feat(libstore/s3-binary-cache-store): implement `completeMultipartUpload()` `completeMultipartUpload()`: Build XML with part numbers and `ETags`, POST to key with `?uploadId` to finalize the multipart upload --- src/libstore/s3-binary-cache-store.cc | 42 +++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 828e75b7c..178373778 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -5,6 +5,7 @@ #include #include #include +#include namespace nix { @@ -46,6 +47,19 @@ private: */ std::string uploadPart(std::string_view key, std::string_view uploadId, uint64_t partNumber, std::string data); + struct UploadedPart + { + uint64_t partNumber; + std::string etag; + }; + + /** + * Completes a multipart upload by combining all uploaded parts. + * @see + * https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html#API_CompleteMultipartUpload_RequestSyntax + */ + void completeMultipartUpload(std::string_view key, std::string_view uploadId, std::span parts); + /** * Abort a multipart upload * @@ -132,6 +146,34 @@ void S3BinaryCacheStore::abortMultipartUpload(std::string_view key, std::string_ getFileTransfer()->enqueueFileTransfer(req).get(); } +void S3BinaryCacheStore::completeMultipartUpload( + std::string_view key, std::string_view uploadId, std::span parts) +{ + auto req = makeRequest(key); + req.setupForS3(); + + auto url = req.uri.parsed(); + url.query["uploadId"] = uploadId; + req.uri = VerbatimURL(url); + req.method = HttpMethod::POST; + + std::string xml = ""; + for (const auto & part : parts) { + xml += ""; + xml += "" + std::to_string(part.partNumber) + ""; + xml += "" + part.etag + ""; + xml += ""; + } + xml += ""; + + debug("S3 CompleteMultipartUpload XML (%d parts): %s", parts.size(), xml); + + req.data = xml; + req.mimeType = "text/xml"; + + getFileTransfer()->enqueueFileTransfer(req).get(); +} + StringSet S3BinaryCacheStoreConfig::uriSchemes() { return {"s3"}; From 94965a3a3eeac6574a06a36760e6470977a7c1f9 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 22 Oct 2025 20:15:25 +0000 Subject: [PATCH 330/373] test(nixos): add S3 multipart upload integration tests --- tests/nixos/s3-binary-cache-store.nix | 129 ++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index a2ede4572..a07375489 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -34,8 +34,10 @@ in pkgA pkgB pkgC + pkgs.coreutils ]; environment.systemPackages = [ pkgs.minio-client ]; + nix.nixPath = [ "nixpkgs=${pkgs.path}" ]; nix.extraOptions = '' experimental-features = nix-command substituters = @@ -639,6 +641,129 @@ in ) print(" ✓ Fetch with versionId parameter works") + @setup_s3() + def test_multipart_upload_basic(bucket): + """Test basic multipart upload with a large file""" + print("\n--- Test: Multipart Upload Basic ---") + + large_file_size = 10 * 1024 * 1024 + large_pkg = server.succeed( + "nix-store --add $(dd if=/dev/urandom of=/tmp/large-file bs=1M count=10 2>/dev/null && echo /tmp/large-file)" + ).strip() + + chunk_size = 5 * 1024 * 1024 + expected_parts = 3 # 10 MB raw becomes ~10.5 MB compressed (NAR + xz overhead) + + store_url = make_s3_url( + bucket, + **{ + "multipart-upload": "true", + "multipart-threshold": str(5 * 1024 * 1024), + "multipart-chunk-size": str(chunk_size), + } + ) + + print(f" Uploading {large_file_size} byte file (expect {expected_parts} parts)") + output = server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {large_pkg} --debug 2>&1") + + if "using S3 multipart upload" not in output: + raise Exception("Expected multipart upload to be used") + + expected_msg = f"{expected_parts} parts uploaded" + if expected_msg not in output: + print("Debug output:") + print(output) + raise Exception(f"Expected '{expected_msg}' in output") + + print(f" ✓ Multipart upload used with {expected_parts} parts") + + client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' {large_pkg} --no-check-sigs") + verify_packages_in_store(client, large_pkg, should_exist=True) + + print(" ✓ Large file downloaded and verified") + + @setup_s3() + def test_multipart_threshold(bucket): + """Test that files below threshold use regular upload""" + print("\n--- Test: Multipart Threshold Behavior ---") + + store_url = make_s3_url( + bucket, + **{ + "multipart-upload": "true", + "multipart-threshold": str(1024 * 1024 * 1024), + } + ) + + print(" Uploading small file with high threshold") + output = server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKGS['A']} --debug 2>&1") + + if "using S3 multipart upload" in output: + raise Exception("Should not use multipart for file below threshold") + + if "using S3 regular upload" not in output: + raise Exception("Expected regular upload to be used") + + print(" ✓ Regular upload used for file below threshold") + + client.succeed(f"{ENV_WITH_CREDS} nix copy --no-check-sigs --from '{store_url}' {PKGS['A']}") + verify_packages_in_store(client, PKGS['A'], should_exist=True) + + print(" ✓ Small file uploaded and verified") + + @setup_s3() + def test_multipart_with_log_compression(bucket): + """Test multipart upload with compressed build logs""" + print("\n--- Test: Multipart Upload with Log Compression ---") + + # Create a derivation that produces a large text log (12 MB of base64 output) + drv_path = server.succeed( + """ + nix-instantiate --expr ' + let pkgs = import {}; + in derivation { + name = "large-log-builder"; + builder = "/bin/sh"; + args = ["-c" "$coreutils/bin/dd if=/dev/urandom bs=1M count=12 | $coreutils/bin/base64; echo success > $out"]; + coreutils = pkgs.coreutils; + system = builtins.currentSystem; + } + ' + """ + ).strip() + + print(" Building derivation to generate large log") + server.succeed(f"nix-store --realize {drv_path} &>/dev/null") + + # Upload logs with compression and multipart + store_url = make_s3_url( + bucket, + **{ + "multipart-upload": "true", + "multipart-threshold": str(5 * 1024 * 1024), + "multipart-chunk-size": str(5 * 1024 * 1024), + "log-compression": "xz", + } + ) + + print(" Uploading build log with compression and multipart") + output = server.succeed( + f"{ENV_WITH_CREDS} nix store copy-log --to '{store_url}' {drv_path} --debug 2>&1" + ) + + # Should use multipart for the compressed log + if "using S3 multipart upload" not in output or "log/" not in output: + print("Debug output:") + print(output) + raise Exception("Expected multipart upload to be used for compressed log") + + if "parts uploaded" not in output: + print("Debug output:") + print(output) + raise Exception("Expected multipart completion message") + + print(" ✓ Compressed log uploaded with multipart") + # ============================================================================ # Main Test Execution # ============================================================================ @@ -669,6 +794,10 @@ in test_compression_disabled() test_nix_prefetch_url() test_versioned_urls() + # FIXME: enable when multipart fully lands + # test_multipart_upload_basic() + # test_multipart_threshold() + # test_multipart_with_log_compression() print("\n" + "="*80) print("✓ All S3 Binary Cache Store Tests Passed!") From 972915cabd772c4056fc4d08abd0579f1c252147 Mon Sep 17 00:00:00 2001 From: Adam Dinwoodie Date: Tue, 28 Oct 2025 09:36:46 +0000 Subject: [PATCH 331/373] docs: remove incorrect claim re gc --print-dead Per #7591, the `nix-store --gc --print-dead` command does not provide any feedback about the amount of disk space that is used by dead store paths. It looks like this has been the case since 7ab68961e (* Garbage collector: added an option `--use-atime' to delete paths in..., 2008-09-17). Update the nix-store documentation to remove the claim that this is function that `nix-store --gc --print-dead` performs. --- doc/manual/source/command-ref/nix-store/gc.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/manual/source/command-ref/nix-store/gc.md b/doc/manual/source/command-ref/nix-store/gc.md index f432e00eb..8ec59d906 100644 --- a/doc/manual/source/command-ref/nix-store/gc.md +++ b/doc/manual/source/command-ref/nix-store/gc.md @@ -48,8 +48,7 @@ The behaviour of the collector is also influenced by the configuration file. By default, the collector prints the total number of freed bytes when it -finishes (or when it is interrupted). With `--print-dead`, it prints the -number of bytes that would be freed. +finishes (or when it is interrupted). {{#include ./opt-common.md}} From 5fc0c4f1027f673f76768b2e8659321cedda6834 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 14:07:03 +0200 Subject: [PATCH 332/373] doc: Improve libexpr-c docs - Uses the more explicit `@ingroup` most of the time, to avoid problems with nested groups, and to make group membership more explicit. The division into headers is not great for documentation purposes, so this helps. - More attention for memory management details - Various other improvements to doc comments --- .../source/development/documentation.md | 6 + src/external-api-docs/README.md | 2 +- src/libexpr-c/nix_api_expr.h | 43 ++- src/libexpr-c/nix_api_external.h | 12 +- src/libexpr-c/nix_api_value.h | 250 +++++++++++++----- src/libutil-c/nix_api_util.h | 2 + 6 files changed, 237 insertions(+), 78 deletions(-) diff --git a/doc/manual/source/development/documentation.md b/doc/manual/source/development/documentation.md index a2a54175d..6823780cc 100644 --- a/doc/manual/source/development/documentation.md +++ b/doc/manual/source/development/documentation.md @@ -240,3 +240,9 @@ $ configurePhase $ ninja src/external-api-docs/html $ xdg-open src/external-api-docs/html/index.html ``` + +If you use direnv, or otherwise want to run `configurePhase` in a transient shell, use: + +```bash +nix-shell -A devShells.x86_64-linux.native-clangStdenv --command 'mesonFlags="$mesonFlags -Ddoc-gen=true"; mesonConfigurePhase' +``` diff --git a/src/external-api-docs/README.md b/src/external-api-docs/README.md index 8760ac88b..1940cc1c0 100644 --- a/src/external-api-docs/README.md +++ b/src/external-api-docs/README.md @@ -15,7 +15,7 @@ programmatically: 1. Embedding the evaluator 2. Writing language plug-ins -Embedding means you link the Nix C libraries in your program and use them from +Embedding means you link the Nix C API libraries in your program and use them from there. Adding a plug-in means you make a library that gets loaded by the Nix language evaluator, specified through a configuration option. diff --git a/src/libexpr-c/nix_api_expr.h b/src/libexpr-c/nix_api_expr.h index 2be739955..3623ee076 100644 --- a/src/libexpr-c/nix_api_expr.h +++ b/src/libexpr-c/nix_api_expr.h @@ -4,11 +4,14 @@ * @brief Bindings to the Nix language evaluator * * See *[Embedding the Nix Evaluator](@ref nix_evaluator_example)* for an example. - * @{ */ /** @file * @brief Main entry for the libexpr C bindings */ +/** @defgroup libexpr_init Initialization + * @ingroup libexpr + * @{ + */ #include "nix_api_store.h" #include "nix_api_util.h" @@ -45,7 +48,10 @@ typedef struct nix_eval_state_builder nix_eval_state_builder; */ typedef struct EvalState EvalState; // nix::EvalState +/** @} */ + /** @brief A Nix language value, or thunk that may evaluate to a value. + * @ingroup value * * Values are the primary objects manipulated in the Nix language. * They are considered to be immutable from a user's perspective, but the process of evaluating a value changes its @@ -56,7 +62,8 @@ typedef struct EvalState EvalState; // nix::EvalState * * The evaluator manages its own memory, but your use of the C API must follow the reference counting rules. * - * @see value_manip + * @struct nix_value + * @see value_create, value_extract * @see nix_value_incref, nix_value_decref */ typedef struct nix_value nix_value; @@ -65,6 +72,7 @@ NIX_DEPRECATED("use nix_value instead") typedef nix_value Value; // Function prototypes /** * @brief Initialize the Nix language evaluator. + * @ingroup libexpr_init * * This function must be called at least once, * at some point before constructing a EvalState for the first time. @@ -77,6 +85,7 @@ nix_err nix_libexpr_init(nix_c_context * context); /** * @brief Parses and evaluates a Nix expression from a string. + * @ingroup value_create * * @param[out] context Optional, stores error information * @param[in] state The state of the evaluation. @@ -93,6 +102,7 @@ nix_err nix_expr_eval_from_string( /** * @brief Calls a Nix function with an argument. + * @ingroup value_create * * @param[out] context Optional, stores error information * @param[in] state The state of the evaluation. @@ -107,6 +117,7 @@ nix_err nix_value_call(nix_c_context * context, EvalState * state, nix_value * f /** * @brief Calls a Nix function with multiple arguments. + * @ingroup value_create * * Technically these are functions that return functions. It is common for Nix * functions to be curried, so this function is useful for calling them. @@ -126,10 +137,12 @@ nix_err nix_value_call_multi( /** * @brief Calls a Nix function with multiple arguments. + * @ingroup value_create * * Technically these are functions that return functions. It is common for Nix * functions to be curried, so this function is useful for calling them. * + * @def NIX_VALUE_CALL * @param[out] context Optional, stores error information * @param[in] state The state of the evaluation. * @param[out] value The result of the function call. @@ -147,6 +160,7 @@ nix_err nix_value_call_multi( /** * @brief Forces the evaluation of a Nix value. + * @ingroup value_create * * The Nix interpreter is lazy, and not-yet-evaluated values can be * of type NIX_TYPE_THUNK instead of their actual value. @@ -180,18 +194,20 @@ nix_err nix_value_force_deep(nix_c_context * context, EvalState * state, nix_val /** * @brief Create a new nix_eval_state_builder + * @ingroup libexpr_init * * The settings are initialized to their default value. * Values can be sourced elsewhere with nix_eval_state_builder_load. * * @param[out] context Optional, stores error information * @param[in] store The Nix store to use. - * @return A new nix_eval_state_builder or NULL on failure. + * @return A new nix_eval_state_builder or NULL on failure. Call nix_eval_state_builder_free() when you're done. */ nix_eval_state_builder * nix_eval_state_builder_new(nix_c_context * context, Store * store); /** * @brief Read settings from the ambient environment + * @ingroup libexpr_init * * Settings are sourced from environment variables and configuration files, * as documented in the Nix manual. @@ -204,6 +220,7 @@ nix_err nix_eval_state_builder_load(nix_c_context * context, nix_eval_state_buil /** * @brief Set the lookup path for `<...>` expressions + * @ingroup libexpr_init * * @param[in] context Optional, stores error information * @param[in] builder The builder to modify. @@ -214,18 +231,21 @@ nix_err nix_eval_state_builder_set_lookup_path( /** * @brief Create a new Nix language evaluator state + * @ingroup libexpr_init * - * Remember to nix_eval_state_builder_free after building the state. + * The builder becomes unusable after this call. Remember to call nix_eval_state_builder_free() + * after building the state. * * @param[out] context Optional, stores error information * @param[in] builder The builder to use and free - * @return A new Nix state or NULL on failure. + * @return A new Nix state or NULL on failure. Call nix_state_free() when you're done. * @see nix_eval_state_builder_new, nix_eval_state_builder_free */ EvalState * nix_eval_state_build(nix_c_context * context, nix_eval_state_builder * builder); /** * @brief Free a nix_eval_state_builder + * @ingroup libexpr_init * * Does not fail. * @@ -235,19 +255,21 @@ void nix_eval_state_builder_free(nix_eval_state_builder * builder); /** * @brief Create a new Nix language evaluator state + * @ingroup libexpr_init * * For more control, use nix_eval_state_builder * * @param[out] context Optional, stores error information * @param[in] lookupPath Null-terminated array of strings corresponding to entries in NIX_PATH. * @param[in] store The Nix store to use. - * @return A new Nix state or NULL on failure. + * @return A new Nix state or NULL on failure. Call nix_state_free() when you're done. * @see nix_state_builder_new */ EvalState * nix_state_create(nix_c_context * context, const char ** lookupPath, Store * store); /** * @brief Frees a Nix state. + * @ingroup libexpr_init * * Does not fail. * @@ -256,6 +278,7 @@ EvalState * nix_state_create(nix_c_context * context, const char ** lookupPath, void nix_state_free(EvalState * state); /** @addtogroup GC + * @ingroup libexpr * @brief Reference counting and garbage collector operations * * The Nix language evaluator uses a garbage collector. To ease C interop, we implement @@ -286,6 +309,9 @@ nix_err nix_gc_incref(nix_c_context * context, const void * object); /** * @brief Decrement the garbage collector reference counter for the given object * + * @deprecated We are phasing out the general nix_gc_decref() in favor of type-specified free functions, such as + * nix_value_decref(). + * * We also provide typed `nix_*_decref` functions, which are * - safer to use * - easier to integrate when deriving bindings @@ -314,12 +340,11 @@ void nix_gc_now(); */ void nix_gc_register_finalizer(void * obj, void * cd, void (*finalizer)(void * obj, void * cd)); -/** @} */ +/** @} */ // doxygen group GC + // cffi end #ifdef __cplusplus } #endif -/** @} */ - #endif // NIX_API_EXPR_H diff --git a/src/libexpr-c/nix_api_external.h b/src/libexpr-c/nix_api_external.h index f4a327281..96c479d57 100644 --- a/src/libexpr-c/nix_api_external.h +++ b/src/libexpr-c/nix_api_external.h @@ -2,11 +2,12 @@ #define NIX_API_EXTERNAL_H /** @ingroup libexpr * @addtogroup Externals - * @brief Deal with external values + * @brief Externals let Nix expressions work with foreign values that aren't part of the normal Nix value data model * @{ */ /** @file * @brief libexpr C bindings dealing with external values + * @see Externals */ #include "nix_api_expr.h" @@ -115,7 +116,7 @@ typedef struct NixCExternalValueDesc * @brief Try to compare two external values * * Optional, the default is always false. - * If the other object was not a Nix C external value, this comparison will + * If the other object was not a Nix C API external value, this comparison will * also return false * @param[in] self the void* passed to nix_create_external_value * @param[in] other the void* passed to the other object's @@ -168,7 +169,7 @@ typedef struct NixCExternalValueDesc /** * @brief Create an external value, that can be given to nix_init_external * - * Owned by the GC. Use nix_gc_decref when you're done with the pointer. + * Call nix_gc_decref() when you're done with the pointer. * * @param[out] context Optional, stores error information * @param[in] desc a NixCExternalValueDesc, you should keep this alive as long @@ -180,10 +181,11 @@ typedef struct NixCExternalValueDesc ExternalValue * nix_create_external_value(nix_c_context * context, NixCExternalValueDesc * desc, void * v); /** - * @brief Extract the pointer from a nix c external value. + * @brief Extract the pointer from a Nix C API external value. * @param[out] context Optional, stores error information * @param[in] b The external value - * @returns The pointer, or null if the external value was not from nix c. + * @returns The pointer, valid while the external value is valid, or null if the external value was not from the Nix C + * API. * @see nix_get_external */ void * nix_get_external_value_content(nix_c_context * context, ExternalValue * b); diff --git a/src/libexpr-c/nix_api_value.h b/src/libexpr-c/nix_api_value.h index 835eaec6e..5bd45da90 100644 --- a/src/libexpr-c/nix_api_value.h +++ b/src/libexpr-c/nix_api_value.h @@ -1,9 +1,6 @@ #ifndef NIX_API_VALUE_H #define NIX_API_VALUE_H -/** @addtogroup libexpr - * @{ - */ /** @file * @brief libexpr C bindings dealing with values */ @@ -20,18 +17,89 @@ extern "C" { #endif // cffi start +/** @defgroup value Value + * @ingroup libexpr + * @brief nix_value type and core operations for working with Nix values + * @see value_create + * @see value_extract + */ + +/** @defgroup value_create Value Creation + * @ingroup libexpr + * @brief Functions for allocating and initializing Nix values + * + * Values are usually created with `nix_alloc_value` followed by `nix_init_*` functions. + * In primop callbacks, allocation is already done and only initialization is needed. + */ + +/** @defgroup value_extract Value Extraction + * @ingroup libexpr + * @brief Functions for extracting data from Nix values + */ + +/** @defgroup primops PrimOps and Builtins + * @ingroup libexpr + */ + // Type definitions +/** @brief Represents the state of a Nix value + * + * Thunk values (NIX_TYPE_THUNK) change to their final, unchanging type when forced. + * + * @see https://nix.dev/manual/nix/latest/language/evaluation.html + * @enum ValueType + * @ingroup value + */ typedef enum { + /** Unevaluated expression + * + * Thunks often contain an expression and closure, but may contain other + * representations too. + * + * Their state is mutable, unlike that of the other types. + */ NIX_TYPE_THUNK, + /** + * A 64 bit signed integer. + */ NIX_TYPE_INT, + /** @brief IEEE 754 double precision floating point number + * @see https://nix.dev/manual/nix/latest/language/types.html#type-float + */ NIX_TYPE_FLOAT, + /** @brief Boolean true or false value + * @see https://nix.dev/manual/nix/latest/language/types.html#type-bool + */ NIX_TYPE_BOOL, + /** @brief String value with context + * + * String content may contain arbitrary bytes, not necessarily UTF-8. + * @see https://nix.dev/manual/nix/latest/language/types.html#type-string + */ NIX_TYPE_STRING, + /** @brief Filesystem path + * @see https://nix.dev/manual/nix/latest/language/types.html#type-path + */ NIX_TYPE_PATH, + /** @brief Null value + * @see https://nix.dev/manual/nix/latest/language/types.html#type-null + */ NIX_TYPE_NULL, + /** @brief Attribute set (key-value mapping) + * @see https://nix.dev/manual/nix/latest/language/types.html#type-attrs + */ NIX_TYPE_ATTRS, + /** @brief Ordered list of values + * @see https://nix.dev/manual/nix/latest/language/types.html#type-list + */ NIX_TYPE_LIST, + /** @brief Function (lambda or builtin) + * @see https://nix.dev/manual/nix/latest/language/types.html#type-function + */ NIX_TYPE_FUNCTION, + /** @brief External value from C++ plugins or C API + * @see Externals + */ NIX_TYPE_EXTERNAL } ValueType; @@ -39,22 +107,41 @@ typedef enum { typedef struct nix_value nix_value; typedef struct EvalState EvalState; +/** @deprecated Use nix_value instead */ [[deprecated("use nix_value instead")]] typedef nix_value Value; // type defs /** @brief Stores an under-construction set of bindings - * @ingroup value_manip + * @ingroup value_create * - * Do not reuse. + * Each builder can only be used once. After calling nix_make_attrs(), the builder + * becomes invalid and must not be used again. Call nix_bindings_builder_free() to release it. + * + * Typical usage pattern: + * 1. Create with nix_make_bindings_builder() + * 2. Insert attributes with nix_bindings_builder_insert() + * 3. Create final attribute set with nix_make_attrs() + * 4. Free builder with nix_bindings_builder_free() + * + * @struct BindingsBuilder * @see nix_make_bindings_builder, nix_bindings_builder_free, nix_make_attrs * @see nix_bindings_builder_insert */ typedef struct BindingsBuilder BindingsBuilder; /** @brief Stores an under-construction list - * @ingroup value_manip + * @ingroup value_create * - * Do not reuse. + * Each builder can only be used once. After calling nix_make_list(), the builder + * becomes invalid and must not be used again. Call nix_list_builder_free() to release it. + * + * Typical usage pattern: + * 1. Create with nix_make_list_builder() + * 2. Insert elements with nix_list_builder_insert() + * 3. Create final list with nix_make_list() + * 4. Free builder with nix_list_builder_free() + * + * @struct ListBuilder * @see nix_make_list_builder, nix_list_builder_free, nix_make_list * @see nix_list_builder_insert */ @@ -63,25 +150,28 @@ typedef struct ListBuilder ListBuilder; /** @brief PrimOp function * @ingroup primops * - * Owned by the GC - * @see nix_alloc_primop, nix_init_primop + * Can be released with nix_gc_decref() when necessary. + * @struct PrimOp + * @see nix_alloc_primop, nix_init_primop, nix_register_primop */ typedef struct PrimOp PrimOp; /** @brief External Value * @ingroup Externals * - * Owned by the GC + * Can be released with nix_gc_decref() when necessary. + * @struct ExternalValue + * @see nix_create_external_value, nix_init_external, nix_get_external */ typedef struct ExternalValue ExternalValue; /** @brief String without placeholders, and realised store paths + * @struct nix_realised_string + * @see nix_string_realise, nix_realised_string_free */ typedef struct nix_realised_string nix_realised_string; -/** @defgroup primops Adding primops - * @{ - */ /** @brief Function pointer for primops + * @ingroup primops * * When you want to return an error, call nix_set_err_msg(context, NIX_ERR_UNKNOWN, "your error message here"). * @@ -97,9 +187,9 @@ typedef void (*PrimOpFun)( void * user_data, nix_c_context * context, EvalState * state, nix_value ** args, nix_value * ret); /** @brief Allocate a PrimOp + * @ingroup primops * - * Owned by the garbage collector. - * Use nix_gc_decref() when you're done with the returned PrimOp. + * Call nix_gc_decref() when you're done with the returned PrimOp. * * @param[out] context Optional, stores error information * @param[in] fun callback @@ -121,35 +211,38 @@ PrimOp * nix_alloc_primop( void * user_data); /** @brief add a primop to the `builtins` attribute set + * @ingroup primops * * Only applies to States created after this call. * - * Moves your PrimOp content into the global evaluator - * registry, meaning your input PrimOp pointer is no longer usable. - * You are free to remove your references to it, - * after which it will be garbage collected. + * Moves your PrimOp content into the global evaluator registry, meaning + * your input PrimOp pointer becomes invalid. The PrimOp must not be used + * with nix_init_primop() before or after this call, as this would cause + * undefined behavior. + * You must call nix_gc_decref() on the original PrimOp pointer + * after this call to release your reference. * * @param[out] context Optional, stores error information - * @return primop, or null in case of errors - * + * @param[in] primOp PrimOp to register + * @return error code, NIX_OK on success */ nix_err nix_register_primop(nix_c_context * context, PrimOp * primOp); -/** @} */ // Function prototypes /** @brief Allocate a Nix value + * @ingroup value_create * - * Owned by the GC. Use nix_gc_decref() when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] state nix evaluator state * @return value, or null in case of errors - * */ nix_value * nix_alloc_value(nix_c_context * context, EvalState * state); /** * @brief Increment the garbage collector reference counter for the given `nix_value`. + * @ingroup value * * The Nix language evaluator C API keeps track of alive objects by reference counting. * When you're done with a refcounted pointer, call nix_value_decref(). @@ -161,21 +254,19 @@ nix_err nix_value_incref(nix_c_context * context, nix_value * value); /** * @brief Decrement the garbage collector reference counter for the given object + * @ingroup value + * + * When the counter reaches zero, the `nix_value` object becomes invalid. + * The data referenced by `nix_value` may not be deallocated until the memory + * garbage collector has run, but deallocation is not guaranteed. * * @param[out] context Optional, stores error information * @param[in] value The object to stop referencing */ nix_err nix_value_decref(nix_c_context * context, nix_value * value); -/** @addtogroup value_manip Manipulating values - * @brief Functions to inspect and change Nix language values, represented by nix_value. - * @{ - */ -/** @anchor getters - * @name Getters - */ -/**@{*/ /** @brief Get value type + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return type of nix value @@ -183,14 +274,15 @@ nix_err nix_value_decref(nix_c_context * context, nix_value * value); ValueType nix_get_type(nix_c_context * context, const nix_value * value); /** @brief Get type name of value as defined in the evaluator + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect - * @return type name, owned string - * @todo way to free the result + * @return type name string, free with free() */ const char * nix_get_typename(nix_c_context * context, const nix_value * value); /** @brief Get boolean value + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return true or false, error info via context @@ -198,6 +290,7 @@ const char * nix_get_typename(nix_c_context * context, const nix_value * value); bool nix_get_bool(nix_c_context * context, const nix_value * value); /** @brief Get the raw string + * @ingroup value_extract * * This may contain placeholders. * @@ -205,21 +298,21 @@ bool nix_get_bool(nix_c_context * context, const nix_value * value); * @param[in] value Nix value to inspect * @param[in] callback Called with the string value. * @param[in] user_data optional, arbitrary data, passed to the callback when it's called. - * @return string * @return error code, NIX_OK on success. */ nix_err nix_get_string(nix_c_context * context, const nix_value * value, nix_get_string_callback callback, void * user_data); /** @brief Get path as string + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect - * @return string, if the type is NIX_TYPE_PATH - * @return NULL in case of error. + * @return string valid while value is valid, NULL in case of error */ const char * nix_get_path_string(nix_c_context * context, const nix_value * value); /** @brief Get the length of a list + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return length of list, error info via context @@ -227,6 +320,7 @@ const char * nix_get_path_string(nix_c_context * context, const nix_value * valu unsigned int nix_get_list_size(nix_c_context * context, const nix_value * value); /** @brief Get the element count of an attrset + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return attrset element count, error info via context @@ -234,6 +328,7 @@ unsigned int nix_get_list_size(nix_c_context * context, const nix_value * value) unsigned int nix_get_attrs_size(nix_c_context * context, const nix_value * value); /** @brief Get float value in 64 bits + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return float contents, error info via context @@ -241,6 +336,7 @@ unsigned int nix_get_attrs_size(nix_c_context * context, const nix_value * value double nix_get_float(nix_c_context * context, const nix_value * value); /** @brief Get int value + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return int contents, error info via context @@ -248,15 +344,18 @@ double nix_get_float(nix_c_context * context, const nix_value * value); int64_t nix_get_int(nix_c_context * context, const nix_value * value); /** @brief Get external reference + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect - * @return reference to external, NULL in case of error + * @return reference valid while value is valid. Call nix_gc_incref() if you need it to live longer, then only in that + * case call nix_gc_decref() when done. NULL in case of error */ ExternalValue * nix_get_external(nix_c_context * context, nix_value * value); /** @brief Get the ix'th element of a list + * @ingroup value_extract * - * Owned by the GC. Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state @@ -266,11 +365,12 @@ ExternalValue * nix_get_external(nix_c_context * context, nix_value * value); nix_value * nix_get_list_byidx(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int ix); /** @brief Get the ix'th element of a list without forcing evaluation of the element + * @ingroup value_extract * * Returns the list element without forcing its evaluation, allowing access to lazy values. * The list value itself must already be evaluated. * - * Owned by the GC. Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect (must be an evaluated list) * @param[in] state nix evaluator state @@ -281,8 +381,9 @@ nix_value * nix_get_list_byidx_lazy(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int ix); /** @brief Get an attr by name + * @ingroup value_extract * - * Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state @@ -292,11 +393,12 @@ nix_get_list_byidx_lazy(nix_c_context * context, const nix_value * value, EvalSt nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name); /** @brief Get an attribute value by attribute name, without forcing evaluation of the attribute's value + * @ingroup value_extract * * Returns the attribute value without forcing its evaluation, allowing access to lazy values. * The attribute set value itself must already be evaluated. * - * Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect (must be an evaluated attribute set) * @param[in] state nix evaluator state @@ -307,6 +409,7 @@ nix_value * nix_get_attr_byname_lazy(nix_c_context * context, const nix_value * value, EvalState * state, const char * name); /** @brief Check if an attribute name exists on a value + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state @@ -316,6 +419,7 @@ nix_get_attr_byname_lazy(nix_c_context * context, const nix_value * value, EvalS bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name); /** @brief Get an attribute by index + * @ingroup value_extract * * Also gives you the name. * @@ -329,18 +433,19 @@ bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalS * lexicographic order by Unicode scalar value for valid UTF-8). We recommend * applying this same ordering for consistency. * - * Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state * @param[in] i attribute index - * @param[out] name will store a pointer to the attribute name + * @param[out] name will store a pointer to the attribute name, valid until state is freed * @return value, NULL in case of errors */ nix_value * nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name); /** @brief Get an attribute by index, without forcing evaluation of the attribute's value + * @ingroup value_extract * * Also gives you the name. * @@ -357,18 +462,19 @@ nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state * lexicographic order by Unicode scalar value for valid UTF-8). We recommend * applying this same ordering for consistency. * - * Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect (must be an evaluated attribute set) * @param[in] state nix evaluator state * @param[in] i attribute index - * @param[out] name will store a pointer to the attribute name + * @param[out] name will store a pointer to the attribute name, valid until state is freed * @return value, NULL in case of errors */ nix_value * nix_get_attr_byidx_lazy( nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name); /** @brief Get an attribute name by index + * @ingroup value_extract * * Returns the attribute name without forcing evaluation of the attribute's value. * @@ -382,16 +488,14 @@ nix_value * nix_get_attr_byidx_lazy( * lexicographic order by Unicode scalar value for valid UTF-8). We recommend * applying this same ordering for consistency. * - * Owned by the nix EvalState * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state * @param[in] i attribute index - * @return name, NULL in case of errors + * @return name string valid until state is freed, NULL in case of errors */ const char * nix_get_attr_name_byidx(nix_c_context * context, nix_value * value, EvalState * state, unsigned int i); -/**@}*/ /** @name Initializers * * Values are typically "returned" by initializing already allocated memory that serves as the return value. @@ -401,6 +505,7 @@ const char * nix_get_attr_name_byidx(nix_c_context * context, nix_value * value, */ /**@{*/ /** @brief Set boolean value + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] b the boolean value @@ -409,6 +514,7 @@ const char * nix_get_attr_name_byidx(nix_c_context * context, nix_value * value, nix_err nix_init_bool(nix_c_context * context, nix_value * value, bool b); /** @brief Set a string + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] str the string, copied @@ -417,6 +523,7 @@ nix_err nix_init_bool(nix_c_context * context, nix_value * value, bool b); nix_err nix_init_string(nix_c_context * context, nix_value * value, const char * str); /** @brief Set a path + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] str the path string, copied @@ -425,6 +532,7 @@ nix_err nix_init_string(nix_c_context * context, nix_value * value, const char * nix_err nix_init_path_string(nix_c_context * context, EvalState * s, nix_value * value, const char * str); /** @brief Set a float + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] d the float, 64-bits @@ -433,6 +541,7 @@ nix_err nix_init_path_string(nix_c_context * context, EvalState * s, nix_value * nix_err nix_init_float(nix_c_context * context, nix_value * value, double d); /** @brief Set an int + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] i the int @@ -441,6 +550,7 @@ nix_err nix_init_float(nix_c_context * context, nix_value * value, double d); nix_err nix_init_int(nix_c_context * context, nix_value * value, int64_t i); /** @brief Set null + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @return error code, NIX_OK on success. @@ -448,6 +558,7 @@ nix_err nix_init_int(nix_c_context * context, nix_value * value, int64_t i); nix_err nix_init_null(nix_c_context * context, nix_value * value); /** @brief Set the value to a thunk that will perform a function application when needed. + * @ingroup value_create * * Thunks may be put into attribute sets and lists to perform some computation lazily; on demand. * However, note that in some places, a thunk must not be returned, such as in the return value of a PrimOp. @@ -464,6 +575,7 @@ nix_err nix_init_null(nix_c_context * context, nix_value * value); nix_err nix_init_apply(nix_c_context * context, nix_value * value, nix_value * fn, nix_value * arg); /** @brief Set an external value + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] val the external value to set. Will be GC-referenced by the value. @@ -472,18 +584,25 @@ nix_err nix_init_apply(nix_c_context * context, nix_value * value, nix_value * f nix_err nix_init_external(nix_c_context * context, nix_value * value, ExternalValue * val); /** @brief Create a list from a list builder + * @ingroup value_create + * + * After this call, the list builder becomes invalid and cannot be used again. + * The only necessary next step is to free it with nix_list_builder_free(). + * * @param[out] context Optional, stores error information - * @param[in] list_builder list builder to use. Make sure to unref this afterwards. + * @param[in] list_builder list builder to use * @param[out] value Nix value to modify * @return error code, NIX_OK on success. + * @see nix_list_builder_free */ nix_err nix_make_list(nix_c_context * context, ListBuilder * list_builder, nix_value * value); /** @brief Create a list builder + * @ingroup value_create * @param[out] context Optional, stores error information * @param[in] state nix evaluator state * @param[in] capacity how many bindings you'll add. Don't exceed. - * @return owned reference to a list builder. Make sure to unref when you're done. + * @return list builder. Call nix_list_builder_free() when you're done. */ ListBuilder * nix_make_list_builder(nix_c_context * context, EvalState * state, size_t capacity); @@ -505,14 +624,21 @@ nix_list_builder_insert(nix_c_context * context, ListBuilder * list_builder, uns void nix_list_builder_free(ListBuilder * list_builder); /** @brief Create an attribute set from a bindings builder + * @ingroup value_create + * + * After this call, the bindings builder becomes invalid and cannot be used again. + * The only necessary next step is to free it with nix_bindings_builder_free(). + * * @param[out] context Optional, stores error information * @param[out] value Nix value to modify - * @param[in] b bindings builder to use. Make sure to unref this afterwards. + * @param[in] b bindings builder to use * @return error code, NIX_OK on success. + * @see nix_bindings_builder_free */ nix_err nix_make_attrs(nix_c_context * context, nix_value * value, BindingsBuilder * b); /** @brief Set primop + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] op primop, will be gc-referenced by the value @@ -521,6 +647,7 @@ nix_err nix_make_attrs(nix_c_context * context, nix_value * value, BindingsBuild */ nix_err nix_init_primop(nix_c_context * context, nix_value * value, PrimOp * op); /** @brief Copy from another value + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] source value to copy from @@ -530,12 +657,11 @@ nix_err nix_copy_value(nix_c_context * context, nix_value * value, const nix_val /**@}*/ /** @brief Create a bindings builder -* @param[out] context Optional, stores error information -* @param[in] state nix evaluator state -* @param[in] capacity how many bindings you'll add. Don't exceed. -* @return owned reference to a bindings builder. Make sure to unref when you're -done. -*/ + * @param[out] context Optional, stores error information + * @param[in] state nix evaluator state + * @param[in] capacity how many bindings you'll add. Don't exceed. + * @return bindings builder. Call nix_bindings_builder_free() when you're done. + */ BindingsBuilder * nix_make_bindings_builder(nix_c_context * context, EvalState * state, size_t capacity); /** @brief Insert bindings into a builder @@ -554,7 +680,6 @@ nix_bindings_builder_insert(nix_c_context * context, BindingsBuilder * builder, * @param[in] builder the builder to free */ void nix_bindings_builder_free(BindingsBuilder * builder); -/**@}*/ /** @brief Realise a string context. * @@ -571,13 +696,13 @@ void nix_bindings_builder_free(BindingsBuilder * builder); * @param[in] isIFD If true, disallow derivation outputs if setting `allow-import-from-derivation` is false. You should set this to true when this call is part of a primop. You should set this to false when building for your application's purpose. - * @return NULL if failed, are a new nix_realised_string, which must be freed with nix_realised_string_free + * @return NULL if failed, or a new nix_realised_string, which must be freed with nix_realised_string_free */ nix_realised_string * nix_string_realise(nix_c_context * context, EvalState * state, nix_value * value, bool isIFD); /** @brief Start of the string * @param[in] realised_string - * @return pointer to the start of the string. It may not be null-terminated. + * @return pointer to the start of the string, valid until realised_string is freed. It may not be null-terminated. */ const char * nix_realised_string_get_buffer_start(nix_realised_string * realised_string); @@ -596,7 +721,7 @@ size_t nix_realised_string_get_store_path_count(nix_realised_string * realised_s /** @brief Get a store path. The store paths are stored in an arbitrary order. * @param[in] realised_string * @param[in] index index of the store path, must be less than the count - * @return store path + * @return store path valid until realised_string is freed */ const StorePath * nix_realised_string_get_store_path(nix_realised_string * realised_string, size_t index); @@ -610,5 +735,4 @@ void nix_realised_string_free(nix_realised_string * realised_string); } #endif -/** @} */ #endif // NIX_API_VALUE_H diff --git a/src/libutil-c/nix_api_util.h b/src/libutil-c/nix_api_util.h index 4d7f394fa..d301e5743 100644 --- a/src/libutil-c/nix_api_util.h +++ b/src/libutil-c/nix_api_util.h @@ -155,6 +155,8 @@ typedef struct nix_c_context nix_c_context; /** * @brief Called to get the value of a string owned by Nix. * + * The `start` data is borrowed and the function must not assume that the buffer persists after it returns. + * * @param[in] start the string to copy. * @param[in] n the string length. * @param[in] user_data optional, arbitrary data, passed to the nix_get_string_callback when it's called. From 883860c7ff6638f8069d8a6bb1be6ba2065c4608 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 28 Oct 2025 11:14:31 -0700 Subject: [PATCH 333/373] Move docker documentation to docker.io --- doc/manual/source/installation/installing-docker.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/manual/source/installation/installing-docker.md b/doc/manual/source/installation/installing-docker.md index 9354c1a72..92fa55e1c 100644 --- a/doc/manual/source/installation/installing-docker.md +++ b/doc/manual/source/installation/installing-docker.md @@ -3,14 +3,14 @@ To run the latest stable release of Nix with Docker run the following command: ```console -$ docker run -ti ghcr.io/nixos/nix -Unable to find image 'ghcr.io/nixos/nix:latest' locally -latest: Pulling from ghcr.io/nixos/nix +$ docker run -ti docker.io/nixos/nix +Unable to find image 'docker.io/nixos/nix:latest' locally +latest: Pulling from docker.io/nixos/nix 5843afab3874: Pull complete b52bf13f109c: Pull complete 1e2415612aa3: Pull complete Digest: sha256:27f6e7f60227e959ee7ece361f75d4844a40e1cc6878b6868fe30140420031ff -Status: Downloaded newer image for ghcr.io/nixos/nix:latest +Status: Downloaded newer image for docker.io/nixos/nix:latest 35ca4ada6e96:/# nix --version nix (Nix) 2.3.12 35ca4ada6e96:/# exit From 943788754fc695dbe1b8cb3057f7fc1a16858e2c Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 28 Oct 2025 11:16:37 -0700 Subject: [PATCH 334/373] Add ghcr for pre-release --- doc/manual/source/installation/installing-docker.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/manual/source/installation/installing-docker.md b/doc/manual/source/installation/installing-docker.md index 92fa55e1c..ccc75be5a 100644 --- a/doc/manual/source/installation/installing-docker.md +++ b/doc/manual/source/installation/installing-docker.md @@ -16,6 +16,8 @@ nix (Nix) 2.3.12 35ca4ada6e96:/# exit ``` +> If you want the latest pre-release you can use ghcr.io/nixos/nix and view them at https://github.com/nixos/nix/pkgs/container/nix + # What is included in Nix's Docker image? The official Docker image is created using `pkgs.dockerTools.buildLayeredImage` From f5aafbd6ed5ea7a38d27d51cf82d77634d341a05 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Tue, 28 Oct 2025 19:39:04 +0100 Subject: [PATCH 335/373] .coderabbit.yaml: Disable auto-review --- .coderabbit.yaml | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .coderabbit.yaml diff --git a/.coderabbit.yaml b/.coderabbit.yaml new file mode 100644 index 000000000..5122f01e0 --- /dev/null +++ b/.coderabbit.yaml @@ -0,0 +1,6 @@ +# Disable CodeRabbit auto-review to prevent verbose comments on PRs. +# When enabled: false, CodeRabbit won't attempt reviews and won't post +# "Review skipped" or other automated comments. +reviews: + auto_review: + enabled: false From e3246301a6dcd2c722241f4756484d40bc06f48a Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 28 Oct 2025 14:49:04 -0400 Subject: [PATCH 336/373] Enable JSON schema testing for derivation outputs I figured out what the problem was: the fragment needs to start with a `/`. --- src/json-schema-checks/meson.build | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/json-schema-checks/meson.build b/src/json-schema-checks/meson.build index 745fb5ffa..8e8ac57c4 100644 --- a/src/json-schema-checks/meson.build +++ b/src/json-schema-checks/meson.build @@ -46,20 +46,19 @@ schemas = [ 'simple-derivation.json', ], }, - # # Not sure how to make subschema work - # { - # 'stem': 'derivation', - # 'schema': schema_dir / 'derivation-v3.yaml#output', - # 'files' : [ - # 'output-caFixedFlat.json', - # 'output-caFixedNAR.json', - # 'output-caFixedText.json', - # 'output-caFloating.json', - # 'output-deferred.json', - # 'output-impure.json', - # 'output-inputAddressed.json', - # ], - # }, + { + 'stem' : 'derivation', + 'schema' : schema_dir / 'derivation-v3.yaml#/$defs/output', + 'files' : [ + 'output-caFixedFlat.json', + 'output-caFixedNAR.json', + 'output-caFixedText.json', + 'output-caFloating.json', + 'output-deferred.json', + 'output-impure.json', + 'output-inputAddressed.json', + ], + }, { 'stem' : 'deriving-path', 'schema' : schema_dir / 'deriving-path-v1.yaml', From 84a5bee424ab25bd0dbc89b3abc6adb208142396 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 28 Oct 2025 21:41:20 +0100 Subject: [PATCH 337/373] coderabbit: disable reporting review status --- .coderabbit.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.coderabbit.yaml b/.coderabbit.yaml index 5122f01e0..815dc27a5 100644 --- a/.coderabbit.yaml +++ b/.coderabbit.yaml @@ -4,3 +4,4 @@ reviews: auto_review: enabled: false + review_status: false From fe8cdbc3e41ecab02d451c8864e6309507d3c7ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 28 Oct 2025 21:48:33 +0100 Subject: [PATCH 338/373] coderabbit: disable high_level_summary/poem/github status/sequence_diagrams --- .coderabbit.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.coderabbit.yaml b/.coderabbit.yaml index 815dc27a5..00244700a 100644 --- a/.coderabbit.yaml +++ b/.coderabbit.yaml @@ -5,3 +5,10 @@ reviews: auto_review: enabled: false review_status: false + high_level_summary: false + poem: false + sequence_diagrams: false + changed_files_summary: false + tools: + github-checks: + enabled: false From be2572ed8d0c9dd626462229436ba7aaf2369690 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 28 Oct 2025 17:16:38 -0400 Subject: [PATCH 339/373] Make `inputDrvs` JSON schema more precise It now captures the stable non-recursive format (just an output set) and the unstable recursive form for dynamic derivations. --- .../protocols/json/schema/derivation-v3.yaml | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/doc/manual/source/protocols/json/schema/derivation-v3.yaml b/doc/manual/source/protocols/json/schema/derivation-v3.yaml index c950b839f..30fddf699 100644 --- a/doc/manual/source/protocols/json/schema/derivation-v3.yaml +++ b/doc/manual/source/protocols/json/schema/derivation-v3.yaml @@ -103,6 +103,13 @@ properties: > ``` > > specifies that this derivation depends on the `dev` output of `curl`, and the `out` output of `unzip`. + additionalProperties: + title: Store Path + description: | + A store path to a derivation, mapped to the outputs of that derivation. + oneOf: + - "$ref": "#/$defs/outputNames" + - "$ref": "#/$defs/dynamicOutputs" system: type: string @@ -167,3 +174,28 @@ properties: title: Expected hash value description: | For fixed-output derivations, the expected content hash in base-16. + + outputName: + type: string + title: Output name + description: Name of the derivation output to depend on + + outputNames: + type: array + title: Output Names + description: Set of names of derivation outputs to depend on + items: + "$ref": "#/$defs/outputName" + + dynamicOutputs: + type: object + title: Dynamic Outputs + description: | + **Experimental feature**: [`dynamic-derivations`](@docroot@/development/experimental-features.md#xp-feature-dynamic-derivations) + + This recursive data type allows for depending on outputs of outputs. + properties: + outputs: + "$ref": "#/$defs/outputNames" + dynamicOutputs: + "$ref": "#/$defs/dynamicOutputs" From c67966418f99120a31e3d15c58a0aa253abfb151 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 28 Oct 2025 16:59:35 -0400 Subject: [PATCH 340/373] Create JSON Schema for Store Paths We immediately use this in the JSON schemas for Derivation and Deriving Path, but we cannot yet use it in Store Object Info because those paths *do* include the store dir currently. --- doc/manual/package.nix | 1 + doc/manual/source/SUMMARY.md.in | 1 + doc/manual/source/protocols/json/meson.build | 1 + .../protocols/json/schema/derivation-v3.yaml | 20 ++++++------ .../json/schema/deriving-path-v1.yaml | 2 +- .../protocols/json/schema/store-path-v1 | 1 + .../protocols/json/schema/store-path-v1.yaml | 32 +++++++++++++++++++ .../source/protocols/json/store-path.md | 15 +++++++++ src/json-schema-checks/meson.build | 7 ++++ src/json-schema-checks/package.nix | 1 + src/json-schema-checks/store-path | 1 + 11 files changed, 72 insertions(+), 10 deletions(-) create mode 120000 doc/manual/source/protocols/json/schema/store-path-v1 create mode 100644 doc/manual/source/protocols/json/schema/store-path-v1.yaml create mode 100644 doc/manual/source/protocols/json/store-path.md create mode 120000 src/json-schema-checks/store-path diff --git a/doc/manual/package.nix b/doc/manual/package.nix index 140fa9849..b7c9503ef 100644 --- a/doc/manual/package.nix +++ b/doc/manual/package.nix @@ -36,6 +36,7 @@ mkMesonDerivation (finalAttrs: { # For example JSON ../../src/libutil-tests/data/hash ../../src/libstore-tests/data/content-address + ../../src/libstore-tests/data/store-path ../../src/libstore-tests/data/derived-path # Too many different types of files to filter for now ../../doc/manual diff --git a/doc/manual/source/SUMMARY.md.in b/doc/manual/source/SUMMARY.md.in index abd9422cd..7f3b1a103 100644 --- a/doc/manual/source/SUMMARY.md.in +++ b/doc/manual/source/SUMMARY.md.in @@ -119,6 +119,7 @@ - [JSON Formats](protocols/json/index.md) - [Hash](protocols/json/hash.md) - [Content Address](protocols/json/content-address.md) + - [Store Path](protocols/json/store-path.md) - [Store Object Info](protocols/json/store-object-info.md) - [Derivation](protocols/json/derivation.md) - [Deriving Path](protocols/json/deriving-path.md) diff --git a/doc/manual/source/protocols/json/meson.build b/doc/manual/source/protocols/json/meson.build index f79667961..e8546d813 100644 --- a/doc/manual/source/protocols/json/meson.build +++ b/doc/manual/source/protocols/json/meson.build @@ -11,6 +11,7 @@ json_schema_config = files('json-schema-for-humans-config.yaml') schemas = [ 'hash-v1', 'content-address-v1', + 'store-path-v1', 'derivation-v3', 'deriving-path-v1', ] diff --git a/doc/manual/source/protocols/json/schema/derivation-v3.yaml b/doc/manual/source/protocols/json/schema/derivation-v3.yaml index 30fddf699..3275bcdd9 100644 --- a/doc/manual/source/protocols/json/schema/derivation-v3.yaml +++ b/doc/manual/source/protocols/json/schema/derivation-v3.yaml @@ -85,7 +85,7 @@ properties: > ] > ``` items: - type: string + $ref: "store-path-v1.yaml" inputDrvs: type: object @@ -103,13 +103,15 @@ properties: > ``` > > specifies that this derivation depends on the `dev` output of `curl`, and the `out` output of `unzip`. - additionalProperties: - title: Store Path - description: | - A store path to a derivation, mapped to the outputs of that derivation. - oneOf: - - "$ref": "#/$defs/outputNames" - - "$ref": "#/$defs/dynamicOutputs" + patternProperties: + "^[0123456789abcdfghijklmnpqrsvwxyz]{32}-.+\\.drv$": + title: Store Path + description: | + A store path to a derivation, mapped to the outputs of that derivation. + oneOf: + - "$ref": "#/$defs/outputNames" + - "$ref": "#/$defs/dynamicOutputs" + additionalProperties: false system: type: string @@ -155,7 +157,7 @@ properties: type: object properties: path: - type: string + $ref: "store-path-v1.yaml" title: Output path description: | The output path, if known in advance. diff --git a/doc/manual/source/protocols/json/schema/deriving-path-v1.yaml b/doc/manual/source/protocols/json/schema/deriving-path-v1.yaml index 7fd74941e..11a784d06 100644 --- a/doc/manual/source/protocols/json/schema/deriving-path-v1.yaml +++ b/doc/manual/source/protocols/json/schema/deriving-path-v1.yaml @@ -7,7 +7,7 @@ oneOf: - title: Constant description: | See [Constant](@docroot@/store/derivation/index.md#deriving-path-constant) deriving path. - type: string + $ref: "store-path-v1.yaml" - title: Output description: | See [Output](@docroot@/store/derivation/index.md#deriving-path-output) deriving path. diff --git a/doc/manual/source/protocols/json/schema/store-path-v1 b/doc/manual/source/protocols/json/schema/store-path-v1 new file mode 120000 index 000000000..31e7a6b2a --- /dev/null +++ b/doc/manual/source/protocols/json/schema/store-path-v1 @@ -0,0 +1 @@ +../../../../../../src/libstore-tests/data/store-path \ No newline at end of file diff --git a/doc/manual/source/protocols/json/schema/store-path-v1.yaml b/doc/manual/source/protocols/json/schema/store-path-v1.yaml new file mode 100644 index 000000000..2012aab99 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/store-path-v1.yaml @@ -0,0 +1,32 @@ +"$schema": "http://json-schema.org/draft-07/schema" +"$id": "https://nix.dev/manual/nix/latest/protocols/json/schema/store-path-v1.json" +title: Store Path +description: | + A [store path](@docroot@/store/store-path.md) identifying a store object. + + This schema describes the JSON representation of store paths as used in various Nix JSON APIs. + + > **Warning** + > + > This JSON format is currently + > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > and subject to change. + + ## Format + + Store paths in JSON are represented as strings containing just the hash and name portion, without the store directory prefix. + + For example: `"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"` + + (If the store dir is `/nix/store`, then this corresponds to the path `/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv`.) + + ## Structure + + The format follows this pattern: `${digest}-${name}` + + - **hash**: Digest rendered in a custom variant of [Base32](https://en.wikipedia.org/wiki/Base32) (20 arbitrary bytes become 32 ASCII characters) + - **name**: The package name and optional version/suffix information + +type: string +pattern: "^[0123456789abcdfghijklmnpqrsvwxyz]{32}-.+$" +minLength: 34 diff --git a/doc/manual/source/protocols/json/store-path.md b/doc/manual/source/protocols/json/store-path.md new file mode 100644 index 000000000..02ecc8068 --- /dev/null +++ b/doc/manual/source/protocols/json/store-path.md @@ -0,0 +1,15 @@ +{{#include store-path-v1-fixed.md}} + +## Examples + +### Simple store path + +```json +{{#include schema/store-path-v1/simple.json}} +``` + + diff --git a/src/json-schema-checks/meson.build b/src/json-schema-checks/meson.build index 745fb5ffa..f3e52e544 100644 --- a/src/json-schema-checks/meson.build +++ b/src/json-schema-checks/meson.build @@ -38,6 +38,13 @@ schemas = [ 'nar.json', ], }, + { + 'stem' : 'store-path', + 'schema' : schema_dir / 'store-path-v1.yaml', + 'files' : [ + 'simple.json', + ], + }, { 'stem' : 'derivation', 'schema' : schema_dir / 'derivation-v3.yaml', diff --git a/src/json-schema-checks/package.nix b/src/json-schema-checks/package.nix index 6a76c8b28..0122b5493 100644 --- a/src/json-schema-checks/package.nix +++ b/src/json-schema-checks/package.nix @@ -22,6 +22,7 @@ mkMesonDerivation (finalAttrs: { ../../doc/manual/source/protocols/json/schema ../../src/libutil-tests/data/hash ../../src/libstore-tests/data/content-address + ../../src/libstore-tests/data/store-path ../../src/libstore-tests/data/derivation ../../src/libstore-tests/data/derived-path ./. diff --git a/src/json-schema-checks/store-path b/src/json-schema-checks/store-path new file mode 120000 index 000000000..003b1dbbb --- /dev/null +++ b/src/json-schema-checks/store-path @@ -0,0 +1 @@ +../../src/libstore-tests/data/store-path \ No newline at end of file From c874e7071b0f81406a4078e5ce0aec50770ccd53 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 29 Oct 2025 01:47:18 +0300 Subject: [PATCH 341/373] libstore/http-binary-cache-store: Improve error messages in HttpBinaryCacheStore::upsertFile MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now the error message doesn't cram everything into a single line and we now instead get: error: … while uploading to HTTP binary cache at 's3://my-cache?endpoint=http://localhost:9000?compression%3Dzstd®ion=eu-west-1' error: unable to download 'http://localhost:9000/my-cache/nar/1125zqba8cx8wbfa632vy458a3j3xja0qpcqafsfdildyl9dqa7x.nar.xz': Operation was aborted by an application callback (42) --- src/libstore/http-binary-cache-store.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 738db132d..1f9ee4100 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -157,7 +157,9 @@ void HttpBinaryCacheStore::upsertFile( try { getFileTransfer()->upload(req); } catch (FileTransferError & e) { - throw UploadToHTTP("while uploading to HTTP binary cache at '%s': %s", config->cacheUri.to_string(), e.msg()); + UploadToHTTP err(e.message()); + err.addTrace({}, "while uploading to HTTP binary cache at '%s'", config->cacheUri.to_string()); + throw err; } } From ae49074548bb3485a0a263ca862f6aee95cfb09f Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 29 Oct 2025 02:48:26 +0300 Subject: [PATCH 342/373] libstore/filetransfer: Add HttpMethod::PUT This got lost in f1968ea38e51201b37962a9cfd80775989a56d46 and now we had incorrect logs that confused "downloading" when we were in fact "uploading" things. --- src/libstore/filetransfer.cc | 4 +++- src/libstore/http-binary-cache-store.cc | 2 +- src/libstore/include/nix/store/filetransfer.hh | 3 +++ src/libstore/s3-binary-cache-store.cc | 1 + 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 6b9c6602b..304984d99 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -394,9 +394,11 @@ struct curlFileTransfer : public FileTransfer if (request.method == HttpMethod::POST) { curl_easy_setopt(req, CURLOPT_POST, 1L); curl_easy_setopt(req, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t) request.data->length()); - } else { + } else if (request.method == HttpMethod::PUT) { curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->length()); + } else { + unreachable(); } curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper); curl_easy_setopt(req, CURLOPT_READDATA, this); diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 738db132d..089c7873a 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -141,7 +141,7 @@ void HttpBinaryCacheStore::upsertFile( uint64_t sizeHint) { auto req = makeRequest(path); - + req.method = HttpMethod::PUT; auto data = StreamToSourceAdapter(istream).drain(); auto compressionMethod = getCompressionMethod(path); diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 305c33af1..08a2b6329 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -88,6 +88,7 @@ extern const unsigned int RETRY_TIME_MS_DEFAULT; */ enum struct HttpMethod { GET, + PUT, HEAD, POST, DELETE, @@ -147,7 +148,9 @@ struct FileTransferRequest case HttpMethod::HEAD: case HttpMethod::GET: return "download"; + case HttpMethod::PUT: case HttpMethod::POST: + assert(data); return "upload"; case HttpMethod::DELETE: return "delet"; diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 828e75b7c..417355b68 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -101,6 +101,7 @@ std::string S3BinaryCacheStore::uploadPart(std::string_view key, std::string_view uploadId, uint64_t partNumber, std::string data) { auto req = makeRequest(key); + req.method = HttpMethod::PUT; req.setupForS3(); auto url = req.uri.parsed(); From 6280905638aac9d15c09fc4d38aa469ee63d17be Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 28 Oct 2025 13:24:05 -0400 Subject: [PATCH 343/373] Convert store path info JSON docs to formal JSON Schema, and test This continues the work for formalizing our current JSON docs. Note that in the process, a few bugs were caught: - `closureSize` was repeated twice, forgot `closureDownloadSize` - `file*` fields should be `download*`. They are in fact called that in the line-oriented `.narinfo` file, but were renamed in the JSON format. --- doc/manual/package.nix | 2 + .../source/protocols/json/derivation.md | 2 +- doc/manual/source/protocols/json/hash.md | 2 +- doc/manual/source/protocols/json/meson.build | 1 + .../source/protocols/json/schema/nar-info-v1 | 1 + .../json/schema/store-object-info-v1 | 1 + .../json/schema/store-object-info-v1.yaml | 235 ++++++++++++++++++ .../protocols/json/store-object-info.md | 117 +++------ .../source/protocols/json/store-path.md | 2 +- src/json-schema-checks/meson.build | 50 ++++ src/json-schema-checks/nar-info | 1 + src/json-schema-checks/package.nix | 2 + src/json-schema-checks/store-object-info | 1 + 13 files changed, 327 insertions(+), 90 deletions(-) create mode 120000 doc/manual/source/protocols/json/schema/nar-info-v1 create mode 120000 doc/manual/source/protocols/json/schema/store-object-info-v1 create mode 100644 doc/manual/source/protocols/json/schema/store-object-info-v1.yaml create mode 120000 src/json-schema-checks/nar-info create mode 120000 src/json-schema-checks/store-object-info diff --git a/doc/manual/package.nix b/doc/manual/package.nix index b7c9503ef..7d29df3c3 100644 --- a/doc/manual/package.nix +++ b/doc/manual/package.nix @@ -38,6 +38,8 @@ mkMesonDerivation (finalAttrs: { ../../src/libstore-tests/data/content-address ../../src/libstore-tests/data/store-path ../../src/libstore-tests/data/derived-path + ../../src/libstore-tests/data/path-info + ../../src/libstore-tests/data/nar-info # Too many different types of files to filter for now ../../doc/manual ./. diff --git a/doc/manual/source/protocols/json/derivation.md b/doc/manual/source/protocols/json/derivation.md index 602ab67e4..a4a4ea79d 100644 --- a/doc/manual/source/protocols/json/derivation.md +++ b/doc/manual/source/protocols/json/derivation.md @@ -1,6 +1,6 @@ {{#include derivation-v3-fixed.md}} - diff --git a/doc/manual/source/protocols/json/store-path.md b/doc/manual/source/protocols/json/store-path.md index 02ecc8068..cd18f6595 100644 --- a/doc/manual/source/protocols/json/store-path.md +++ b/doc/manual/source/protocols/json/store-path.md @@ -8,7 +8,7 @@ {{#include schema/store-path-v1/simple.json}} ``` - For instance, in Nixpkgs, if the attribute `enableParallelBuilding` for the `mkDerivation` build helper is set to `true`, it passes the `-j${NIX_BUILD_CORES}` flag to GNU Make. - If set to `0`, nix will detect the number of CPU cores and pass this number via NIX_BUILD_CORES. + If set to `0`, nix will detect the number of CPU cores and pass this number via `NIX_BUILD_CORES`. > **Note** > diff --git a/src/nix/unix/daemon.cc b/src/nix/unix/daemon.cc index cb105a385..33ad8757a 100644 --- a/src/nix/unix/daemon.cc +++ b/src/nix/unix/daemon.cc @@ -87,7 +87,7 @@ struct AuthorizationSettings : Config {"*"}, "allowed-users", R"( - A list user names, separated by whitespace. + A list of user names, separated by whitespace. These users are allowed to connect to the Nix daemon. You can specify groups by prefixing names with `@`. From 4ea32d0b03f04143c54344363affea50fc804681 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Sun, 2 Nov 2025 14:00:07 +0100 Subject: [PATCH 373/373] Improve "resolution failed" error Previously: error: Cannot build '/nix/store/cqc798lwy2njwbdzgd0319z4r19j2d1w-nix-manual-2.33.0pre20251101_e4e4063.drv'. Reason: 1 dependency failed. Output paths: /nix/store/f1kln1c6z9r7rlhj0h9shcpch7j5g1fj-nix-manual-2.33.0pre20251101_e4e4063-man /nix/store/k65203rx5g1kcagpcz3c3a09bghcj92a-nix-manual-2.33.0pre20251101_e4e4063 error: Cannot build '/nix/store/ajk2fb6r7ijn2fc5c3h85n6zdi36xlfl-nixops-manual.drv'. Reason: 1 dependency failed. Output paths: /nix/store/0anr0998as8ry4hr5g3f3iarszx5aisx-nixops-manual error: resolution failed Now: error: Cannot build '/nix/store/cqc798lwy2njwbdzgd0319z4r19j2d1w-nix-manual-2.33.0pre20251101_e4e4063.drv'. Reason: 1 dependency failed. Output paths: /nix/store/f1kln1c6z9r7rlhj0h9shcpch7j5g1fj-nix-manual-2.33.0pre20251101_e4e4063-man /nix/store/k65203rx5g1kcagpcz3c3a09bghcj92a-nix-manual-2.33.0pre20251101_e4e4063 error: Cannot build '/nix/store/ajk2fb6r7ijn2fc5c3h85n6zdi36xlfl-nixops-manual.drv'. Reason: 1 dependency failed. Output paths: /nix/store/0anr0998as8ry4hr5g3f3iarszx5aisx-nixops-manual error: Build failed due to failed dependency --- src/libstore/build/derivation-goal.cc | 2 +- tests/functional/build.sh | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 717d6890a..14aa044ea 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -147,7 +147,7 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) co_await await(std::move(waitees)); } if (nrFailed != 0) { - co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); + co_return doneFailure({BuildResult::Failure::DependencyFailed, "Build failed due to failed dependency"}); } if (resolutionGoal->resolvedDrv) { diff --git a/tests/functional/build.sh b/tests/functional/build.sh index c9a39438d..0b06dcd91 100755 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -184,6 +184,7 @@ test "$status" = 1 if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" <<<"$out" grepQuiet -E "Reason: 1 dependency failed." + <<<"$out" grepQuiet -E "Build failed due to failed dependency" else <<<"$out" grepQuiet -E "error: 1 dependencies of derivation '.*-x4\\.drv' failed to build" fi